blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
63cefb41ad9dcac43fce5a68b35cc40dbd882dbe
|
Shell
|
stv2509/microservices
|
/ansible/instance/01-install_package.sh
|
UTF-8
| 1,156
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#################################################################
echo "##- Install CFSSL -##"
wget -c -q --https-only --timestamping \
https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x ./cfssl_linux-amd64 ./cfssljson_linux-amd64
sudo mv -f ./cfssl_linux-amd64 /usr/local/bin/cfssl
sudo mv -f ./cfssljson_linux-amd64 /usr/local/bin/cfssljson
echo "Sucsess...."
#################################################################
echo "##- Install kubectl binary with curl on Linux -##"
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv -f ./kubectl /usr/local/bin/
echo "##- Verification kubectl client -##"
echo $(kubectl version --client)
echo "Sucsess...."
#################################################################
echo "##- Google Cloud Platform-##"
echo "##- Set a Default Compute Region and Zone -##"
gcloud config set compute/region europe-west1
gcloud config set compute/zone europe-west1-b
echo "Sucsess...."
| true
|
70bba370b474c43d5519c9dab86ec64a337a1367
|
Shell
|
billhinkle/lutronsmartthings
|
/lutronpro_pinger/lutronize
|
UTF-8
| 5,488
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
# lutronize - send commands directly to the Lutron node server for SmartThings
# don't forget to chmod +x lutronize
# n.b. do yourself a favor: keeping further options in alphabetical order. / wjh 20180117 / Bill Hinkle
lnsuri="http://localhost:5000/"
errcho(){ >&2 echo $@; }
isnumeric() { [[ $1 =~ ^[0-9]+$ ]] ; }
listscenes(){
local sstr=$1
local srgx=\(.*\)\"\/virtualbutton\/\([0-9]+\)\",\"Name\":\(\".*\"\),
while [[ $sstr =~ $srgx ]]; do
echo ${BASH_REMATCH[2]} ${BASH_REMATCH[3]}
sstr=${BASH_REMATCH[1]}
done | sort -n
echo $sall
return 0
}
listzones(){
local zstr=$1
local zrgx=\(.*\)\"FullyQualifiedName\":\\[\(\".*\"\)\\],.*\\[\\{\"href\":\"/zone\/\([0-9]+\)\"\\}[\],]
# local zrgx=\(.*\)\"FullyQualifiedName\":\\[\(\".*\"\)\\],.*\"SerialNumber\":\([0-9]+\)[,\\}]
while [[ $zstr =~ $zrgx ]]; do
zstr=${BASH_REMATCH[1]}
local znm=${BASH_REMATCH[2]}
local zzn=${BASH_REMATCH[3]}
if [[ $znm =~ \"(.*)\",\"(.*)\" ]] ; then znm=\"${BASH_REMATCH[1]}:${BASH_REMATCH[2]}\"; fi
echo $zzn $znm
done | sort -n
echo $zall
return 0
}
if [ $# -eq 0 ]
then
set -- "-h"
fi
while :
do
case "$1" in
-h | --help)
# display_help
echo ${0##*/}' [-h] | [[[-s scene#] | [[-z zone#] [[-l level%] | [-o]]] [options]'
echo ${0##*/}' [--help] | [[[--scene scene#] | [[--zone zone#] [[--level level%] | [--on]]] [options]'
echo 'Other options: [-u | --uri] target-uri Default target uri is:' $lnsuri
echo ' [-v | --verbose] Echo the raw server response\/error'
echo 'Examples: '${0##*/}' -s 6 <-- set Lutron bridge scene 6'
echo ' '${0##*/}' --zone 2 --level 50 <-- set Lutron bridge zone (dimmer) 2 to 50%'
echo ' '${0##*/}' -z 1 --on <-- set Lutron bridge zone (dimmer) 1 to 100%'
echo ' '${0##*/}' -z 3 <-- gets Lutron bridge zone (dimmer) 3 level 0-100 in %'
echo 'v.3 Note: scene# may be specified by # or "Name"; zone# may be specified by #, "Name", or "Area:Name"'
echo ' leave scene# or zone# blank to get a list of scenes or zones'
# no shifting needed here, we're done.
exit 0
;;
-l | --level)
level="$2" # the server will check for validity and range: # only
if isnumeric $level ; then level=$((10#$level)) ; else errcho Level must be numeric 0-100 ; exit 3 ; fi
shift 2
;;
-o | --on)
oncmd="on" # overrides level to 100
shift
;;
-qs) # dump scenes
hmethod="GET"
method='scenes'
shift
;;
-qz) # dump zones
hmethod="GET"
method='devices'
shift
;;
-s | --scene)
scene="$2" # the server will check for validity: # or name
# isnumeric $scene || scene=\"$scene\"
if [[ -z $scene || $scene =~ \-.* ]]
then
hmethod="GET"
method='scenes'
scene=
shift
else
if isnumeric $scene ; then scene=$((10#$scene)) ; else scene=\"$scene\" ; fi
shift 2
fi
;;
-u | --uri)
lnsuri="$2" # change the target URI
shift 2
;;
-v | --verbose)
verbose="verbose"
shift
;;
-z | --zone)
zone="$2" # the server will check for validity; lutron will check for range: # only
# isnumeric $zone || zone=\"$zone\"
if [[ -z $zone || $zone =~ \-.* ]]
then
hmethod="GET"
method='devices'
zone=
shift
else
if isnumeric $zone ; then zone=$((10#$zone)) ; else zone=\"$zone\" ; fi
shift 2
fi
;;
--) # End of all options
shift
break;
;;
-*)
errcho "Error: Unknown option: $1"
exit 1
;;
*) # No more options
break
;;
esac
done
if [ "$scene" ]
then
payload='{"virtualButton":'$scene'}'
method='scene'
elif [ "$zone" ]
then
if [ $oncmd ]
then
payload='{"zone":'$zone'}'
method='on'
else
if [ -z $level ]
then
payload='{"zone":'$zone'}'
method='status'
else
payload='{"zone":'$zone',"level":'$level'}'
method='setLevel'
fi
fi
fi
pllen=${#lnsuri} # ensure uri ends with a slash
pllastch=${lnsuri:pllen-1:1}
[[ $pllastch != "/" ]] && pllen="$pllen/"; :
if [ "$hmethod" == "GET" ]
then
SAVEIFS=$IFS
IFS=' '
lnsresp=$(curl -s --connect-timeout 5 -X GET -H 'Content-Type: application/json' "$lnsuri"$method 2>&1 || echo Server Timeout)
IFS=$SAVEIFS
elif [ "$payload" ]
then
SAVEIFS=$IFS
IFS=' '
lnsresp=$(curl -s --connect-timeout 5 -H 'Content-Type: application/json' -d "$payload" "$lnsuri"$method 2>&1 || echo Server Timeout)
IFS=$SAVEIFS
else
exit 1
fi
[[ $verbose ]] && echo $lnsresp\n
[[ $lnsresp =~ ^Accepted ]] && exit 0
[[ $lnsresp =~ ^Gateway.Timeout ]] && errcho The Lutron bridge did not respond && exit 2 # timeout @Lutron
[[ $lnsresp =~ ^Server.Timeout ]] && errcho The Lutron bridge node server did not respond && exit 2
[[ $lnsresp =~ .*\"Level\":([0-9]+) ]] && echo ${BASH_REMATCH[1]} && exit 0 # good level response
[[ $hmethod == "GET" && $method == "devices" ]] && listzones "$lnsresp" && exit 0
[[ $hmethod == "GET" && $method == "scenes" ]] && listscenes "$lnsresp" && exit 0
! [[ $lnsresp =~ ^[0-9]+$ ]] && errcho $lnsresp && exit 3 # other response
errcho Bad Request Err $lnsresp
exit 3
# End of file
| true
|
4d568de1d53822a3de90ac2a504bc09cf0063290
|
Shell
|
Colin-Ragush/AWSR
|
/scripts/08_terminate_ec2.sh
|
UTF-8
| 901
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Terminate a known Amazon EC2 Instance
# load local settings if not already loaded
[[ $SCR_DIR ]] || SCR_DIR="$(cd "$(dirname "$0")/."; pwd)"
[[ $PRJ_DIR ]] || source "$SCR_DIR/02_setup.sh"
# terminate a known instance
if [[ -z $EC2_INSTANCE_ID ]]; then
echo -e "$ERROR No EC2 Instance ID found." \
"Please also check AWS web console."
else
echo -e "$INFO Attempting to terminate EC2 Instance ID" \
"$(FC $EC2_INSTANCE_ID) ..."
aws $AWS_PRFL ec2 terminate-instances \
--region $AWS_REGION \
--instance-ids $EC2_INSTANCE_ID \
--output table
exit_status=$?
if [ $exit_status -eq 0 ]; then
echo -e "$INFO Instance $(FC $EC2_INSTANCE_ID) is being terminated ..."
else
echo -e "$ERROR Cannot terminate Instance ID $(FC $EC2_INSTANCE_ID)." \
"Please terminate it using AWS web console."
fi
fi
| true
|
3d724093edd326f140b7e8a4ca0270f1bf46fd6f
|
Shell
|
fspieler/dotfiles
|
/fun-scripts/strjoin
|
UTF-8
| 318
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
# if first arg is -p, an extra delimiter will precede output
if [ x"$1" == x"-p" ] ; then
firstflag=false;
shift
else
firstflag=true
fi
delim=$1
final=""
for i in ${@:2} ; do
if [ $firstflag = true ] ; then
final=$i
firstflag=false
else
final=$final${delim}$i
fi
done
echo $final
| true
|
7e8d40f8f754742fff4fc23843c164c6fb214a30
|
Shell
|
nynhex/Perinoid_Linux_Project
|
/functions/privoxy/source_privoxy_functions.sh
|
UTF-8
| 915
| 2.984375
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Source_privoxy_functions(){
_dir="${1:-$_script_dir}/functions"
source "${_dir}/privoxy/config_privoxy/privoxy_client_configs.sh"
source "${_dir}/privoxy/init_scripts/privoxy_init_client.sh"
source "${_dir}/privoxy/lib_activations/activate_privoxy_configs.sh"
source "${_dir}/privoxy/installers/aptget_privoxy_install.sh"
## source "${_dir}/privoxy/installers/source_privoxy_install.sh"
source "${_dir}/privoxy/lib_activations/activate_privoxy_configs.sh"
source "${_dir}/privoxy/extras/add_privoxy_user.sh"
# source "${_dir}/"
}
### Source_privoxy_functions_help source_privoxy_functions_help source_privoxy_functions.sh
# File: ${_script_dir}/functions/privoxy/source_privoxy_functions.sh
# runs [source] command to load privoxy related functions into [${_script_name}] run-time.
# This function is one of the first called when [${_script_name}] detects
# [-T="client"] as one of the options passed.
####
| true
|
308512d1f027a8748c2bdb97b0a2b76410effc9f
|
Shell
|
mooosword/myscripts
|
/bin/backup.sh
|
UTF-8
| 283
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
if [ $# -lt 1 ]; then
echo 'Usage: backup [filename]'
exit
fi
filename=$1
ext=${filename##*.}
echo $ext
if [ $ext = 'pdf' ] || [ $ext = 'doc' ] || [ $ext = 'docx' ] || [ $ext = 'chm' ]; then
cp $filename ~/Documents/Books/
cp $filename ~/mybooks/
fi
| true
|
d9f973d38c51ac4f0715ebd1a8a9bb8eba5141f3
|
Shell
|
kadensungbincho/Online_Lectures
|
/Udemy/shell_scripting/logging2.sh
|
UTF-8
| 251
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
logit() {
local LOG_LEVEL=$1
shift
MSG=$@
TIMESTAMP=$(date +"%Y-%m-%d %T")
if [ $LOG_LEVEL = 'ERROR' ] || $VERBOSE
then
echo "${TIMESTAMP} ${HOST}
${PROGRAM_NAME}[${PID}]: ${LOG_LEVEL} ${MSG}"
fi
}
| true
|
07cc85d0fd66d05849cd0ab2a86875b38422dd75
|
Shell
|
nunojun/docker-common
|
/registry/run-registry.sh
|
UTF-8
| 884
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
# Default values for registry
PORT=5000
NAME="registry"
# Parse options
usage() {
echo "Usage: $0"
echo " -h : shows the usage"
echo " -n <registry-name> : optional. \"registry\" is the default name."
echo " -p <port-number> : optional. \"5000\" is the default port number."
exit 1
}
while getopts ":n:p:h" o;
do
case "${o}" in
h)
usage
;;
n)
REGISTRY_NAME=${OPTARG}
;;
p)
PORT_NUMBER=${OPTARG}
;;
\?)
usage
;;
:)
usage
;;
esac
done
shift $((OPTIND-1))
# Run a docker container of registry:2
echo "Run a registry container. name=$NAME, port=$PORT"
docker run \
-d \
-e REGISTRY_STORAGE_DELETE_ENABLED=true \
-p $PORT:5000 \
--name $NAME \
registry:2
| true
|
361a972d4b86762832e411dc7f65ebcf8e5b0b88
|
Shell
|
d123456ddq/FCRN-DepthPrediction
|
/tensorflow/scripts/encode_video_jet.sh
|
UTF-8
| 535
| 2.53125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# Dependencies: sudo apt-get install ffmpeg
ffmpeg -r 30 -f image2 -s 304x288 -i output/fcrn_cv/frame%06d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p output/frame.mp4
ffmpeg -r 30 -f image2 -s 304x288 -i output/fcrn_cv/jet%06d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p output/jet.mp4
ffmpeg -i output/frame.mp4 -i output/jet.mp4 -filter_complex "hstack,format=yuv420p" -c:v libx264 -crf 25 output/output.mp4
echo "[encoding] Removing Temporary Files..."
rm output/frame.mp4
rm output/jet.mp4
echo "[encoding] Done."
| true
|
7b2024e0ae4b41ff8431308fab5e5bfd6673d628
|
Shell
|
LucChoubert/MeteoServer
|
/runDockerEnv.sh
|
UTF-8
| 1,599
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
docker stop redis-server
docker rm redis-server
docker stop retriever-daemon
docker rm retriever-daemon
docker stop api-server
docker rm api-server
docker stop webui-server
docker rm webui-server
docker stop redis-commander
docker rm redis-commander
docker stop portainer
docker rm portainer
#docker build -t meteoserver -f Dockerfile.meteoserver .
#docker build -t meteodaemon -f Dockerfile.meteodaemon .
#docker build -t webui -f Dockerfile.webui .
docker network create --driver bridge application-net
docker pull redis
#RUN the various elements of the environment
#Redis Server
docker run --name redis-server --network application-net -d -p 6379:6379 redis
#Daemon getting the data from MeteoFrance and storing in Redis
docker run --name retriever-daemon --network application-net -d meteodaemon
#Meteo API Server
docker run --name api-server --network application-net -d meteoserver
#NGINX server for the UI and facade to API server
docker run --name webui-server --network application-net -d -p 80:80 webui
## Tools Section
#Web Interface for Redis database view
docker run --name redis-commander --network application-net --env REDIS_HOSTS=PRD:redis-server:6379 -d -p 8081:8081 redis-commander:arm
#Web Interface for Docker GUI administration
docker run --name portainer --network application-net -d -p 9000:9000 -v /var/run/docker.sock:/var/run/docker.sock portainer/portainer
docker ps
# connect for debugging
# docker exec -i -t 8dfef789c123 /bin/bash
# Daemon runs like this: python3 MeteoRetrieverDaemon.py
# And stop like this: pkill -f -TERM *Daemon*
| true
|
fc90c81c0f573dfcd9d14736f4bc90c387ff3693
|
Shell
|
Opentotum/fab_diagnostic
|
/scripts/bash/network_ethernet_gw.sh
|
UTF-8
| 1,511
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# -*- coding: utf-8; -*-
#
# (c) 2017 FABtotum, http://www.fabtotum.com
#
# This file is part of FABUI.
#
# FABUI is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# FABUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FABUI. If not, see <http://www.gnu.org/licenses/>.
TOP=$(dirname $0)
. ${TOP}/fabui.env
. ${TOP}/common.sh
#
# Template for making a test_case
#
function test_case()
{
# Success
IFACE=$(ifconfig | grep "Link " | grep eth0 | awk '{print $1}')
if [ x"$IFACE" == x"eth0" ]; then
echo "Ethernet interface eth0 is up."
else
echo "Ethernet interface eth0 is down."
return 1
fi
GW=$(route -n | grep eth0 | awk 'NR==1{print $2}')
if [ x"$GW" != x"" ]; then
echo "Gateway is configured to $GW."
else
echo "No gateway is configured."
return 2
fi
echo "ping $GW -c 3"
ping $GW -c 3
RETR=$?
if [ x"$RETR" == x"0" ]; then
echo "Gateway is accessible."
else
echo "Gateway is not accessible."
fi
# Result
return $RETR
}
testcase_cleanup
test_case $@ > ${TEST_CASE_LOG} 2>&1
testcase_evaluate_result $?
| true
|
d9b4dd934afab66be03e9e92f10d1d0d875b4ca9
|
Shell
|
intel/intel-graphics-compiler
|
/scripts/buildSLT.sh
|
UTF-8
| 3,247
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#=========================== begin_copyright_notice ============================
#
# Copyright (C) 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
#============================ end_copyright_notice =============================
set -e
# UBUNTU_VERSION supported value [ 20, 22 ] default 20
# LLVM_VERSION supported value [ 10, 11, 12, 13, 14, 15] default 11
# OWN_CMAKE_FLAGS not suported but can be use as WA (each flag should be with -D prefix) default empty
# example run: UBUNTU_VERSION=ubuntu2004 LLVM_VERSION=11 sh /home/buildSLT.sh
echo "====================BUILD SPIRV-LLVM-Translator========================="
echo "[Build Status] build script started"
if [ -z ${UBUNTU_VERSION+x} ]; then
echo "[Build Status] UBUNTU_VERSION is unset, use default 20";
UBUNTU_VERSION="20.04"
else
echo "[Build Status] UBUNTU_VERSION = ${UBUNTU_VERSION}"
fi
if [ -z ${LLVM_VERSION+x} ]; then
echo "[Build Status] LLVM_VERSION is unset, use default 14";
LLVM_VERSION="14"
else
echo "[Build Status] LLVM_VERSION = ${LLVM_VERSION}"
fi
apt-get update
apt-get install -y flex bison libz-dev cmake curl wget build-essential git software-properties-common unzip lsb-release
echo "[Build Status] flex bison libz-dev cmake curl wget build-essential git software-properties-common INSTALLED"
if [ "$UBUNTU_VERSION" = "20.04" ]; then
echo "[Build Status] Download new cmake version for Ubuntu 20.04";
apt-get purge -y --auto-remove cmake
wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /etc/apt/trusted.gpg.d/kitware.gpg >/dev/null
apt-add-repository "deb https://apt.kitware.com/ubuntu/ $(lsb_release -cs) main"
apt-get update
apt-get install -y cmake
fi
if ([ "$UBUNTU_VERSION" = "20.04" ] && [ "$LLVM_VERSION" -ge 14 ]) || ([ "$UBUNTU_VERSION" = "22.04" ] && [ "$LLVM_VERSION" -ge 15 ])
then
echo "[Build Status] Retrieve the LLVM archive signature for LLVM $LLVM_VERSION on Ubuntu $UBUNTU_VERSION";
wget -q https://apt.llvm.org/llvm-snapshot.gpg.key
apt-key add llvm-snapshot.gpg.key
case "$UBUNTU_VERSION" in
20.04) OS_HANDLE=focal;;
22.04) OS_HANDLE=jammy;;
esac
add-apt-repository "deb http://apt.llvm.org/$OS_HANDLE/ llvm-toolchain-$OS_HANDLE-$LLVM_VERSION main"
fi
apt-get install -y llvm-"$LLVM_VERSION" llvm-"$LLVM_VERSION"-dev clang-"$LLVM_VERSION" liblld-"$LLVM_VERSION" liblld-"$LLVM_VERSION"-dev
echo "[Build Status] LLVM INSTALLED"
LLVM_VERSION_PREFERRED="$LLVM_VERSION".0.0
echo "[Build Status] LLVM_VERSION_PREFERRED = $LLVM_VERSION_PREFERRED"
echo "[Build Status] build and install SPIRV-LLVM-Translator"
/usr/bin/git clone --branch llvm_release_"$LLVM_VERSION"0 https://github.com/KhronosGroup/SPIRV-LLVM-Translator
cd SPIRV-LLVM-Translator
echo 'set(CPACK_GENERATOR "DEB")' >> CMakeLists.txt && echo 'set(CPACK_DEBIAN_PACKAGE_MAINTAINER "David Doria") # required' >> CMakeLists.txt && echo 'include(CPack)' >> CMakeLists.txt
mkdir build && cd build
cmake .. -DBASE_LLVM_VERSION="$LLVM_VERSION_PREFERRED"
make llvm-spirv -j`nproc`
cpack
echo "[Build Status] SPIRV-LLVM-Translator Packed"
| true
|
e73ce813954fbca4f845478b2c37a2bfee3a3a53
|
Shell
|
rohe/fed-oidc-op
|
/ch/create_fos.sh
|
UTF-8
| 811
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
if [ ! -d "fo_bundle" ]
then
mkdir "fo_bundle"
fi
if [ ! -f "fo_bundle/https%3A%2F%2Fedugain.org%2F" ]
then
create_jwks.py > fo_bundle/https%3A%2F%2Fedugain.org%2F
fi
if [ ! -f "fo_bundle/https%3A%2F%2Fswamid.sunet.se%2F" ]
then
create_jwks.py > fo_bundle/https%3A%2F%2Fswamid.sunet.se%2F
fi
if [ ! -d "public" ]
then
mkdir "public"
fi
if [ ! -d "public/fo_bundle" ]
then
mkdir "public/fo_bundle"
fi
if [ ! -f "public/fo_bundle/https%3A%2F%2Fedugain.org%2F" ]
then
./public_jwks.py fo_bundle/https%3A%2F%2Fedugain.org%2F > public/fo_bundle/https%3A%2F%2Fedugain.org%2F
fi
if [ ! -f "public/fo_bundle/https%3A%2F%2Fswamid.sunet.se%2F" ]
then
./public_jwks.py fo_bundle/https%3A%2F%2Fswamid.sunet.se%2F > public/fo_bundle/https%3A%2F%2Fswamid.sunet.se%2F
fi
| true
|
10e94d3da7bf154cbf59ddda0b8a89b0b59b29b7
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/nvidia-ck/PKGBUILD
|
UTF-8
| 2,118
| 2.859375
| 3
|
[] |
no_license
|
# Maintainer: graysky <graysky AT archlnux.us>
# Contributor: Sven-Hendrik Haase <sh@lutzhaase.com>
# Contrubutor: Thomas Baechler <thomas@archlinux.org>
pkgname=nvidia-ck
pkgver=387.12
pkgrel=1
epoch=1
_extramodules=extramodules-4.12-ck
_pkgdesc="NVIDIA drivers for linux-ck."
pkgdesc="$_pkgdesc"
arch=('i686' 'x86_64')
url="http://www.nvidia.com/"
depends=('linux-ck>=4.12' 'linux-ck<4.13' 'libgl' "nvidia-utils=${pkgver}")
makedepends=('linux-ck-headers>=4.12' 'linux-ck-headers<4.13' 'nvidia-libgl')
conflicts=('nvidia-340xx-ck' 'nvidia-304xx-ck')
#groups=('ck-generic')
#replaces=()
license=('custom')
install=readme.install
options=(!strip)
source_i686=("http://us.download.nvidia.com/XFree86/Linux-x86/${pkgver}/NVIDIA-Linux-x86-${pkgver}.run")
source_x86_64=("http://us.download.nvidia.com/XFree86/Linux-x86_64/${pkgver}/NVIDIA-Linux-x86_64-${pkgver}-no-compat32.run")
sha256sums_i686=('aabac19b2bbc30ab9fb01954328633ff08b5aa5c2996f03722dd0806b247d536')
sha256sums_x86_64=('6951372a9e805bfe58fdddf9c728bf12a4c5d8cf38d1c8e6b7afaea492ef83f6')
[[ "$CARCH" = "i686" ]] && _pkg="NVIDIA-Linux-x86-${pkgver}"
[[ "$CARCH" = "x86_64" ]] && _pkg="NVIDIA-Linux-x86_64-${pkgver}-no-compat32"
prepare() {
sh "${_pkg}.run" --extract-only
cd "${_pkg}"
}
build() {
_kernver="$(cat /usr/lib/modules/${_extramodules}/version)"
cd "${_pkg}/kernel"
make SYSSRC=/usr/lib/modules/"${_kernver}/build" module
}
package() {
install -Dm644 "${srcdir}/${_pkg}/kernel/nvidia.ko" \
"${pkgdir}/usr/lib/modules/${_extramodules}/nvidia.ko"
install -D -m644 "${srcdir}/${_pkg}/kernel/nvidia-modeset.ko" \
"${pkgdir}/usr/lib/modules/${_extramodules}/nvidia-modeset.ko"
install -D -m644 "${srcdir}/${_pkg}/kernel/nvidia-drm.ko" \
"${pkgdir}/usr/lib/modules/${_extramodules}/nvidia-drm.ko"
if [[ "$CARCH" = "x86_64" ]]; then
install -D -m644 "${srcdir}/${_pkg}/kernel/nvidia-uvm.ko" \
"${pkgdir}/usr/lib/modules/${_extramodules}/nvidia-uvm.ko"
fi
gzip -9 "${pkgdir}/usr/lib/modules/${_extramodules}/"*.ko
install -dm755 "${pkgdir}/usr/lib/modprobe.d"
echo "blacklist nouveau" >> "${pkgdir}/usr/lib/modprobe.d/nvidia-ck.conf"
}
# vim:set ts=2 sw=2 et:
| true
|
cadf523518fcfe98b766392b01b0fa0195bb0c08
|
Shell
|
benjamingarzon/LongitudinalMyelinMapping
|
/MyelinMapping/OrganizeData.sh
|
UTF-8
| 7,936
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
# Organize data to adopt the HCP raw data naming convention and folder structure
Usage() {
echo ""
echo "Author: Benjamin Garzon <benjamin.garzon@gmail.com>"
echo "v 1.0, May 2015"
echo " Convert the raw dicom data and organize them to adopt the HCP raw data naming convention and folder structure."
echo " Usage: `basename $0` --subject=<SUBJECT NAME> --fieldmap=<FIELDMAP DIR NAME> --T1w=<T1w DIR NAME> --T2w=<T2w DIR NAME> --MGRE=<MGRE DIR NAME> [--overwrite --mag]"
echo " "
echo " IMPORTANT: You need to set the variables DICOM_DIR and STUDY_DIR before running it, e.g."
echo " export DICOM_DIR=/home/MyUser/Data/DICOM/Subject1/"
echo " export STUDY_DIR=/home/MyUser/Data/Study/"
echo " "
echo " DICOM_DIR: directory containing the dicom files in directories."
echo " STUDY_DIR: directory where the output will be stored in a directory <SUBJECT NAME>."
echo " With the option --overwrite the subject directory is created anew."
echo " The option --mag specifies that only MAGNITUDE images are available for the MGRE sequence, instead of REAL and IMAGINARY."
echo " "
exit 1
}
get_opt1() {
arg=`echo $1 | sed 's/=.*//'`
echo $arg
}
get_arg1() {
if [ X`echo $1 | grep '='` = X ] ; then
echo "Option $1 requires an argument" 1>&2
exit 1
else
arg=`echo $1 | sed 's/.*=//'`
if [ X$arg = X ] ; then
echo "Option $1 requires an argument" 1>&2
exit 1
fi
echo $arg
fi
}
# Default values
SUBJECT='NONE'
FIELDMAP='NONE'
T1w='NONE'
T2w='NONE'
MGRE='NONE'
OVERWRITE='N'
MGRE_TYPE=2
if [ $# -lt 2 ] ; then Usage; exit 0; fi
while [ $# -ge 1 ] ; do
iarg=`get_opt1 $1`;
case "$iarg"
in
--subject)
SUBJECT=`get_arg1 $1`;
shift;;
--fieldmap)
FIELDMAP=`get_arg1 $1`;
shift;;
--T1w)
T1w=`get_arg1 $1`;
shift;;
--T2w)
T2w=`get_arg1 $1`;
shift;;
--MGRE)
MGRE=`get_arg1 $1`;
shift;;
--overwrite)
OVERWRITE='Y';
shift;;
--mag)
MGRE_TYPE=3;
shift;;
*)
#if [ `echo $1 | sed 's/^\(.\).*/\1/'` = "-" ] ; then
echo "Unrecognised option $1" 1>&2
exit 1
#fi
#shift;;
esac
done
# Create directories
if [ $OVERWRITE == "Y" ]; then
rm -r ${STUDY_DIR}/${SUBJECT}
mkdir ${STUDY_DIR}/${SUBJECT}
mkdir ${STUDY_DIR}/${SUBJECT}/unprocessed
mkdir ${STUDY_DIR}/${SUBJECT}/unprocessed/3T
else
if [ ! -e "${STUDY_DIR}/${SUBJECT}" ]; then
echo hello
mkdir ${STUDY_DIR}/${SUBJECT}
mkdir ${STUDY_DIR}/${SUBJECT}/unprocessed
mkdir ${STUDY_DIR}/${SUBJECT}/unprocessed/3T
fi
fi
# T1 images
if [ $T1w != "NONE" ]; then
echo "Converting T1w image."
mkdir ${STUDY_DIR}/${SUBJECT}/unprocessed/3T/T1w_MPR1
cd ${STUDY_DIR}/${SUBJECT}/unprocessed/3T/T1w_MPR1
cp $DICOM_DIR/$T1w/*.dcm ${STUDY_DIR}/${SUBJECT}/unprocessed/3T/T1w_MPR1
$MRICRON_DIR/dcm2nii -d N *.dcm
$EXEC_DIR/ReadDwellTime.py `ls *.dcm | head -n 1` GE> ${STUDY_DIR}/${SUBJECT}/unprocessed/3T/T1w_MPR1/DwellTime.txt
mv o*.nii.gz ${SUBJECT}_3T_T1w_MPR1.nii.gz
rm *.dcm `ls *.nii.gz | grep -v "${SUBJECT}_3T_T1w_MPR1"`
fi
# T2 images
if [ $T2w != "NONE" ]; then
echo "Converting T2w image."
mkdir ${STUDY_DIR}/${SUBJECT}/unprocessed/3T/T2w_SPC1
cd ${STUDY_DIR}/${SUBJECT}/unprocessed/3T/T2w_SPC1
cp $DICOM_DIR/$T2w/*.dcm ${STUDY_DIR}/${SUBJECT}/unprocessed/3T/T2w_SPC1
$MRICRON_DIR/dcm2nii -d N *.dcm
$EXEC_DIR/ReadDwellTime.py `ls *.dcm | head -n 1` GE> ${STUDY_DIR}/${SUBJECT}/unprocessed/3T/T2w_SPC1/DwellTime.txt
mv o*.nii.gz ${SUBJECT}_3T_T2w_SPC1.nii.gz
rm *.dcm `ls *.nii.gz | grep -v "${SUBJECT}_3T_T2w_SPC1"`
fi
# Creating the fieldmap
if [ $FIELDMAP != "NONE" ]; then
echo "Creating fieldmap."
cd $DICOM_DIR/$FIELDMAP
$EXEC_DIR/sort_dicom_GRE.py $DICOM_DIR/$FIELDMAP $DICOM_DIR/FieldMap_${SUBJECT} 1 $DICOM_DIR/FieldMap_${SUBJECT}/TEs.txt $DICOM_DIR/FieldMap_${SUBJECT}/imaging_freq.txt
cd $DICOM_DIR/FieldMap_${SUBJECT}
DATA_DIRS="MAG RE IM"
rm *.nii.gz
for d in $DATA_DIRS;
do
cd $d
for i in TE*; do echo $i; cd $i; $MRICRON_DIR/dcm2nii *.dcm; cd ..; done
for i in TE*; do echo $i; cp $i/2*.nii.gz $i.nii.gz; done
fslmerge -t data.nii.gz TE*.nii.gz
rm -r TE*
cd ..
done
TE0=`cat $DICOM_DIR/FieldMap_${SUBJECT}/TEs.txt | awk 'FNR == 1 {print}'`
TE1=`cat $DICOM_DIR/FieldMap_${SUBJECT}/TEs.txt | awk 'FNR == 2 {print}'`
DELTA_TE=`echo "$TE1 - $TE0"| bc -l`
echo $DELTA_TE > ${STUDY_DIR}/${SUBJECT}/unprocessed/3T/T1w_MPR1/deltaTE.txt
fslcomplex -complex RE/data IM/data data_complex
$EXEC_DIR/ComplexRatios.py data_complex.nii.gz data_ratio.nii.gz
prelude -c data_ratio -o Phase_diff #-s
fslmaths Phase_diff -div 6.28 -div $DELTA_TE -mul -1000 FieldMap_deg # in ms
fslroi MAG/data.nii.gz Mag0 0 1
bet Mag0 Mag0_brain -f 0.35 -m -R
fslmaths FieldMap_deg -sub `fslstats FieldMap_deg -k Mag0_brain_mask -P 50` FieldMap_deg
fslmerge -t GradientEchoFieldMap FieldMap_deg Mag0
mv GradientEchoFieldMap.nii.gz "${STUDY_DIR}/${SUBJECT}/unprocessed/3T/T1w_MPR1/${SUBJECT}_3T_GradientEchoFieldMap.nii.gz"
#rm -r $DICOM_DIR/FieldMap_${SUBJECT}
fi
# Converting MGRE and generating r2STAR and QSM maps
if [ $MGRE != "NONE" ]; then
echo "Computing QSM and r2star MAPS."
MGRE_DIR="${STUDY_DIR}/${SUBJECT}/unprocessed/3T/MGRE"
PROCESSING_DIR="${STUDY_DIR}/${SUBJECT}/unprocessed/3T/MGRE/proc"
cd $DICOM_DIR/$MGRE
$EXEC_DIR/sort_dicom_GRE.py $DICOM_DIR/$MGRE $MGRE_DIR $MGRE_TYPE $MGRE_DIR/TEs.txt $MGRE_DIR/imaging_freq.txt
cd $MGRE_DIR
mkdir $PROCESSING_DIR
mkdir $MGRE_DIR/MAG/
if [ $MGRE_TYPE -eq 2 ]; then
DATA_DIRS="RE IM"
else
DATA_DIRS="MAG"
fi
for d in $DATA_DIRS;
do
cd $d
for i in TE*; do echo $i; cd $i; $MRICRON_DIR/dcm2nii *.dcm; cd ..; done
for i in TE*; do echo $i; cp $i/2*.nii.gz $i.nii.gz; done
fslmerge -t data.nii.gz TE*.nii.gz
rm -r TE*
cd ..
done
if [ $MGRE_TYPE -eq 2 ]; then
fslcomplex -complex $MGRE_DIR/RE/data $MGRE_DIR/IM/data $PROCESSING_DIR/data_complex
fslcpgeom $MGRE_DIR/RE/data $PROCESSING_DIR/data_complex
# Create magnitude images
fslcomplex -realabs $PROCESSING_DIR/data_complex $MGRE_DIR/MAG/data
fslcpgeom $MGRE_DIR/RE/data $MGRE_DIR/MAG/data
fi
# Brain extraction
fslroi $MGRE_DIR/MAG/data $PROCESSING_DIR/GRE 0 4
fslmaths $PROCESSING_DIR/GRE -sqr -Tmean -sqrt $PROCESSING_DIR/GRE0
bet $PROCESSING_DIR/GRE0.nii.gz $PROCESSING_DIR/GRE0_brain -m -R
# Erode mask
fslmaths $PROCESSING_DIR/GRE0_brain_mask.nii.gz -ero -kernel sphere 2 $PROCESSING_DIR/GRE0_brain_mask.nii.gz
if [ $MGRE_TYPE -eq 2 ]; then
# QSM analysis
IMAGING_FREQ=`cat $MGRE_DIR/imaging_freq.txt`
echo "Imaging Frequency = $IMAGING_FREQ"
# Call the QSM analysis tool
matlab -nodesktop -nosplash -r "addpath $MEDI_TOOLBOX_PATH; addpath $RESHARP_PATH; addpath $MATLAB_FSL_PATH; QSMprocessing $PROCESSING_DIR/data_complex.nii.gz $PROCESSING_DIR/GRE0_brain_mask.nii.gz $IMAGING_FREQ $MGRE_DIR/TEs.txt $PROCESSING_DIR/QSM.nii.gz $PROCESSING_DIR/background_field.nii.gz $PROCESSING_DIR; exit;"
fslcpgeom $PROCESSING_DIR/GRE0.nii.gz $PROCESSING_DIR/QSM.nii.gz
mv $PROCESSING_DIR/QSM.nii.gz $MGRE_DIR
# Clean up
rm $PROCESSING_DIR/RDF.mat
fi
# Call relaxometry
RELAXOMETRY_CONSTANT=0
$EXEC_DIR/relaxometry.py $MGRE_DIR/MAG/data.nii.gz $PROCESSING_DIR/GRE0_brain_mask.nii.gz $MGRE_DIR/TEs.txt $RELAXOMETRY_CONSTANT $PROCESSING_DIR/PD.nii.gz $PROCESSING_DIR/r2star.nii.gz $PROCESSING_DIR/relaxErr.nii.gz
fslcpgeom $PROCESSING_DIR/GRE0.nii.gz $PROCESSING_DIR/r2star.nii.gz
mv $PROCESSING_DIR/r2star.nii.gz $MGRE_DIR
fi
| true
|
8448fcd4fb564b30049ce1d7e35c338eaab4aeab
|
Shell
|
dkeg/inspin
|
/volume
|
UTF-8
| 988
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
# original by z3bra
# modified by dkeg
# 2015
# additional output for mute,speaker,headphone
test "$1" = "-h" && echo "usage `basename $0` [+|-|!]" && exit 0
state() {
amixer get Master | sed -n 's/^.*\[\(o[nf]\+\)]$/\1/p' | uniq
}
level() {
vol=$(amixer get Master|awk 'NR==5 {print $4}'|cut -d '%' -f1 | cut -d '[' -f2) #sed -e 's/[//g')
mut=$(amixer get Master | awk 'NR==5 {print $6}')
head=$(cat /proc/asound/card0/codec#0 | awk 'NR==143 {print $2}')
if [ $mut = "[off]" ] ;then
lvl="Muted: "" "
# check for headphones
elif [ $head = "0x00:" ] ;then
lvl="headphones: "$vol" "
else
lvl="speaker: "$vol" "
fi
echo $lvl
}
test $# -eq 0 && echo "`level` `state`" && exit 0
case $1 in
+) amixer set Master 5+ >/dev/null;;
-) amixer set Master 5%- >/dev/null;;
!) amixer set Master toggle >/dev/null;;
state|level) $1;;
*) amixer set Master $1 >/dev/null;;
esac
| true
|
9b259a4dd52be2622d9d4e2b3e343e7fd5e1d2bf
|
Shell
|
morningspace/kube-assist
|
/commands/pods.sh
|
UTF-8
| 2,859
| 4.375
| 4
|
[] |
no_license
|
#!/bin/bash
. $(dirname $(dirname $0))/utils.sh
function list_failed_pods {
local namespace
local all_namespaces
local restarts_cap
while [[ $# -gt 0 ]]; do
case "$1" in
-n|--namespace)
namespace=$2; shift 2 ;;
-A|--all-namespaces)
all_namespaces=1; shift ;;
-r|--restarts)
restarts_cap=$2; shift 2 ;;
*)
shift ;;
esac
done
local scope
local ns_flag
local pods_file
if [[ -n $namespace ]]; then
scope="$namespace namespace"
ns_flag="-n $namespace"
pods_file=$HOME/.ka/$namespace.pods
elif [[ -n $all_namespaces ]]; then
scope="all namespaces"
ns_flag="--all-namespaces"
pods_file=$HOME/.ka/all.pods
else
scope="$(kubectl config view --minify --output 'jsonpath={..namespace}') namespace"
pods_file=$HOME/.ka/pods
fi
logger::info "Checking pods in $scope..."
kubectl get pod $ns_flag 2>/dev/null >$pods_file || return 1
local parts
local ready
local status
local restarts
local containers_total
local containers_running
local line_num=0
local failed_pods_lines=()
while IFS= read -r pod_line; do
(( line_num++ )); (( line_num == 1 )) && failed_pods_lines+=("$pod_line") && continue
parts=($pod_line)
if [[ $scope == "all namespaces" ]]; then
ready=${parts[2]}
status=${parts[3]}
restarts=${parts[4]}
else
ready=${parts[1]}
status=${parts[2]}
restarts=${parts[3]}
fi
containers_total=${ready#*/}
containers_running=${ready%/*}
local is_pod_failed=0
if (( $containers_running == $containers_total )); then
[[ $status != Completed && $status != Running ]] && is_pod_failed=1
else
[[ $status != Completed ]] && is_pod_failed=1
fi
(( restarts > restarts_cap && restarts_cap != 0 )) && is_pod_failed=1
if [[ $is_pod_failed == 1 ]]; then
failed_pods_lines+=("$pod_line")
fi
done < "$pods_file"
if [ ${#failed_pods_lines[@]} -le 1 ]; then
logger::info "No failed resources found in $scope."
else
logger::warn "Some failed resources found in $scope."
for failed_pod_line in "${failed_pods_lines[@]}"; do
echo "$failed_pod_line"
done
fi
}
function help {
echo "
Kuberntes Command Line Assistant: Pods
List all pods that failed to run or are not healthy
Usage:
$(dirname $(dirname $0))/ka.sh pods|pod|po [options]
$0 [options]
Options:
-n|--namespace <ns> List failed pods in a particular namespace
-A|--all-namespaces List failed pods in all namespaces
-r|--restarts <num> Take pod restarted more than <num> times as failed case
-h|--help Print the help information
"
}
handle=list_failed_pods
while [[ $# -gt 0 ]]; do
case "$1" in
-h|--help) handle=help; shift ;;
*) POSITIONAL+=("$1"); shift ;;
esac
done
${handle} ${POSITIONAL[@]}
| true
|
ad5420552224d2b1be59f3221682aaf3ce70a245
|
Shell
|
dendrites/dendrites
|
/greet.sh
|
UTF-8
| 141
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "" > $2
while true; do
read line < $1
cmds=( $line )
src=${cmds[0]}
msg=${cmds[1]}
echo "$src hello" > $2
done &
| true
|
2e76b07eb203a14fefb7710d9fc74fbf4f5c69f3
|
Shell
|
jdmichaud/gziped
|
/test/generate_dataset.sh
|
UTF-8
| 361
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $# -ne 1 ]]
then
echo "usage: generate_dataset.sh <folder>"
exit 1
fi
input_folder=$1
rm -fr $input_folder/index.txt
for file in $(ls $input_folder)
do
echo $file
md5=$(md5sum $input_folder/$file | cut -c1-32)
gzip -c $input_folder/$file > $input_folder/$file.gz
cat >> $input_folder/index.txt << EOF
$file.gz $md5
EOF
done
| true
|
b3b0d989a09945f0be0202417d726c17a8e3cb87
|
Shell
|
diogocustodio/rufles
|
/bbs/yuicompressor.minify.sh
|
UTF-8
| 183
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
[ ! -e "$1" ] && exit 1;
input="$1"
extension="${input##*.}"
output="${input%.*}.min.${extension}"
java -jar yuicompressor-*.jar "$input" -o "$output" --charset utf-8
| true
|
eaecb7a6108f83eef6dbd8d25aa169813f87febe
|
Shell
|
kva/cydia
|
/create.sh
|
UTF-8
| 757
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
# tampilan di terminal
echo -n "Masukkan nama folder yang ingin diproses: "
# menyimpan input keyboard
read folder
# menyalin folder ke alamat tujuan
scp -r $folder root@172.20.10.1:/var/mobile/Documents
# membuat .deb file
ssh root@172.20.10.1 dpkg -b /var/mobile/Documents/$folder
# menyalin .deb file dari iDevices ke komputer (pakai dot untuk menyalin ke current directory)
scp root@172.20.10.1:/var/mobile/Documents/$folder.deb ~/Apps/cydia/debs
# hapus folder yang telah diproses dadn sudah tidak diperlukan
ssh root@172.20.10.1 rm -r /var/mobile/Documents/$folder
ssh root@172.20.10.1 rm /var/mobile/Documents/$folder.deb
# verifikasi
# Ada .deb di ~/Apps/cydia/debs
# Folder tema di root@172.20.10.1:/var/mobile/Documents/ hilang
| true
|
e5f18947a79eb61fa0abcb59aa39fe500480ea38
|
Shell
|
institutotim/fdenp-landing-web
|
/entrypoint.sh
|
UTF-8
| 471
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
set -o pipefail
blow_up() {
echo "Missing required enviroment variable '$1'. Please, take a look at the manual." >&2
exit 1
}
[ "$ZUP_API_URL" ] || blow_up 'ZUP_API_URL'
[ "$LANDING_API_URL" ] || blow_up 'LANDING_API_URL'
sed -i "s@{ZUP_API_URL}@$ZUP_API_URL@g" /var/www/unicef-landing-page/index.html
sed -i "s@{LANDING_API_URL}@$LANDING_API_URL@g" /var/www/unicef-landing-page/index.html
echo "UNICEF-LANDING-WEB is running."
exec "$@"
| true
|
0d7a0a5b13d0325ab8fc9bb6f67ace1f54660b6b
|
Shell
|
Azure/kubernetes-volume-drivers
|
/flexvolume/blobfuse/deployment/install-blobfuse-flexvol-ubuntu.sh
|
UTF-8
| 737
| 3.375
| 3
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/sh
VER="1.0.1"
echo "install blobfuse, jq packages ..."
#get Ubuntu version
OS_VERSION=`lsb_release -r | awk '{print $2}'`
echo "current OS version: $OS_VERSION"
PKG_TARGET=/tmp/packages-microsoft-prod.deb
wget -O $PKG_TARGET https://packages.microsoft.com/config/ubuntu/$OS_VERSION/packages-microsoft-prod.deb
dpkg -i $PKG_TARGET
apt update
apt-get install blobfuse fuse jq -y
echo "install blobfuse flexvolume driver ..."
PLUGIN_DIR=/etc/kubernetes/volumeplugins/azure~blobfuse
mkdir -p $PLUGIN_DIR
wget -O $PLUGIN_DIR/blobfuse https://raw.githubusercontent.com/Azure/kubernetes-volume-drivers/master/flexvolume/blobfuse/deployment/blobfuse-flexvol-installer/blobfuse
chmod a+x $PLUGIN_DIR/blobfuse
echo "install complete."
| true
|
cff34128b51a4d2b6e7ca80f30c0c18ddbf8166b
|
Shell
|
07012220/latihanvspsu
|
/4Preprocessing/Preprocessing.sh
|
UTF-8
| 2,530
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
## /**
## * @file Preprocessing.sh
## * @brief Do Preprocessing: BPF, Normalize, TVG
## * @author fahdi@gm2001.net
## * @date April 2013 [update]
## * @todo Normalize basd on RMS value of Downgoing at certain window
## * @param input SU file to process
## * @param output_bpf output SU file after BPF filter
## * @param bpf Four points of BPF filter
## * @param output_norm output SU file after normalization (RMS whole window operation)
## * @param output_tvg SU file after TimeVaryingGain (
## * @param tpow TVG constant
## * @param final_output Output after all preprocessing workflow in SU format
## */
#input
input=../data/Z_picked_srd.su
output_bpf=../data/Z_picked_bpf.su # output after BPF
output_norm=../data/Z_picked_bpf_norm.su #output after BPF followed RMS Normalization
output_tvg=../data/Z_picked_bpf_norm_tvg.su #output after BPF followed RMS Normalization followed by TimeVaryingGain
final_output=../data/Z_prepro.su
#get tt pick from header
#tt_picks=../data/tt_picks_auto.txt
#gettime pick from header
sugethw < $input key=lagb,gelev,scalel \
| sed -e 's/scalel=//' -e 's/gelev=//' -e 's/lagb=//'| sed '/^$/d' > tt-header.tmp
awk '{ printf "%4f %2f\n", $1/1000, ($2/(10^($3*-1))) }' tt-header.tmp > tt-header.txt
tt_picks=tt-header.txt
#set parameter
bpf=5,8,40,50 #4 points bandpass specification
tpow=1.4 #multiply data by t^tpow
#bpf
sufilter < $input f=$bpf > $output_bpf
#normalize by dividing with RMS
sugain < $output_bpf pbal=1 > $output_norm
#run exponential gain
sugain < $output_norm tpow=$tpow > $output_tvg
cp $output_tvg $final_output #housekeeping to make naming convention
#display
nrec=($(wc -l $tt_picks | awk '{print $1}')) #housekeeping, check number of receiver
suxwigb < $input title="Input" perc=99 style=vsp key=gelev \
label2="depth" label1="twt (s)" x1beg=0.0 x1end=2.0 xbox=10 wbox=500 curve=$tt_picks npair=$nrec,1 curvecolor=red &
suxwigb < $output_bpf title="BPF: $bpf" perc=99 style=vsp key=gelev \
label2="depth" label1="twt (s)" x1beg=0.0 x1end=2.0 xbox=520 wbox=500 curve=$tt_picks npair=$nrec,1 curvecolor=red &
suxwigb < $output_norm title="BPF: $bpf + Normalize by RMS" perc=99 style=vsp key=gelev \
label2="depth" label1="twt (s)" x1beg=0.0 x1end=2.0 xbox=10 wbox=500 curve=$tt_picks npair=$nrec,1 curvecolor=red &
suxwigb < $output_tvg title="BPF: $bpf + Normalize by RMS + Gain ($tpow)" perc=99 style=vsp key=gelev \
label2="depth" label1="twt (s)" x1beg=0.0 x1end=2.0 xbox=520 wbox=500 curve=$tt_picks npair=$nrec,1 curvecolor=red &
#clean up
rm *.tmp
| true
|
96e8c596f1b30ae834b6a21400c7b04d882e200b
|
Shell
|
trebabcock/fgl-client
|
/build.sh
|
UTF-8
| 191
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ $# -eq 0 ]; then
echo "Version not provided"
exit 1
fi
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build
rm version.json
echo "{\"version\":\"$1\"}" > version.json
| true
|
c5d3c029fc8f82586ec4d926ba3dd6a997f45d19
|
Shell
|
Junweiren/cs1730-syscalls
|
/man/man5/gen.sh
|
UTF-8
| 101
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
for i in `ls /usr/share/man/man5 | sed -e 's/.5.gz//'`; do
man -t 5 $i | ps2pdf - $i.5.pdf
done
| true
|
fc6904f8b88d406027305f1578b03071229f2fd9
|
Shell
|
zalcobia/meapps.sh
|
/meapps.0.1.sh
|
UTF-8
| 4,225
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# Descrição: Programa 'My Essential Apps' ou 'meapps' para Debian. Instala aplicações essenciais.
# Versão: 0.1
# Data: 4/Agosto/2021
# Desenvolvedor: Pedro Graça
# País: Portugal
# Contacto: xalcobia@gmail.com
# INSTRUÇÕES DE EXECUÇÂO
# 1. As LISTAS podem ser modificadas para adicionar os teus repositórios e programas
# 2. Navegar até a directório onde se encontra o script com "cd /nome_do_caminho/nome_do_diretório"
# 3. Digitar "sudo -s" antes de executar este script
# 4. Tornar o script executavel com comando "chmod +x mea.sh"
# 5. Executar com comando "./mea.sh"
# INÍCIO DAS LISTAS
# Processadores Intel apenas (descomentar no fim do script com # caso não seja necessário)
INTEL=(
intel-microcode
iucode-tool
)
# Placas Gráficas AMD apenas (descomentar no fim do script com # caso não seja necessário)
ATI_MESA=(
mesa-vulkan-drivers
libvulkan1
vulkan-tools
vulkan-utils
vulkan-validationlayers
mesa-opencl-icd
)
# Root Path Custom Fonts
ROOT_FONT_PATH="/usr/share/fonts/custom_fonts"
# Downloads Path
DOWNLOADS="/$HOME/Downloads"
# Transferências Path
TRANSFERENCIAS="/$HOME/Transferências"
# Git Clone
GIT_CLONE="git clone https://github.com/zalcobia"
# Instalação de Programas APT
PROGRAMAS=(
aptitude
curl
git
python
net-tools
build-essential
traceroute
software-properties-common
ttf-mscorefonts-installer
preload
libreoffice
gimp
inkscape
kdenlive
kazam
synaptic
manpages-pt
manpages-pt-dev
gnome-system-monitor
gnome-disk-utility
gufw
cinnamon
lightdm
vlc
audacity
steghide
nmap
wireshark
crunch
john
gedit
gedit-plugins
engrampa
zip
unzip
rar
unrar
gzip
tar
python
)
# Purge de Programas
PURGE_PROGRAMAS=(
termit
file-roller
nemo-fileroller
)
# FIM DAS LISTAS
remover_locks () {
sudo rm /var/lib/dpkg/lock-frontend
sudo rm /var/cache/apt/archives/lock
}
# Adicionar Arquitectura 32bit
adicionar_arquitetura_i386 () {
sudo dpkg --add-architecture i386
}
# Software_Github
software_github () {
$GIT_CLONE/custom_fonts_A.git
$GIT_CLONE/custom_fonts_B.git
$GIT_CLONE/dark_walls.git
}
# Instalar Programas
instalar_pacotes_apt () {
sudo apt install $PROGRAMAS -y
}
# Software_Deb
software_deb () {
wget -p https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
wget -p https://github.com/VSCodium/vscodium/releases/download/1.59./codium_1.59.1-1629418630_amd64.deb
wget -p https://atom-installer.github.com/v1.58.0/atom-amd64.deb
}
# Instalação de Programas DEB
instalacao_debs () {
sudo chmod +x $TRANSFERENCIAS/*.deb* >& /dev/null
sudo chmod +x $DOWNLOADS/*.deb* >& /dev/null
sudo apt install https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo apt install https://github.com/VSCodium/vscodium/releases/download/1.59.1/codium_1.59.1-1629418630_amd64.deb
sudo apt install https://atom-installer.github.com/v1.58.0/atom-amd64.deb
}
# Actualizar Repositorios
actualizar_repositorios () {
sudo apt update -y
}
# Purge de Programas
purge_pacotes () {
sudo apt remove --purge $PURGE_PROGRAMAS -y
}
# Upgrade e Limpar
upgrade_limpeza () {
sudo rm -R ./*deb* >& /dev/null
sudo apt --fix-broken install -y
sudo apt upgrade -y
sudo apt autoclean
sudo apt autoremove -y
sudo systemctl enable preload
}
# Instalar Custom Fonts
custom_fonts () {
sudo mkdir -p /usr/share/fonts/custom_fonts
sudo mv $DOWNLOADS/custom_fonts_A/* $ROOT_FONT_PATH >& /dev/null
sudo mv $DOWNLOADS/custom_fonts_B/* $ROOT_FONT_PATH >& /dev/null
sudo mv $TRANSFERENCIAS/custom_fonts_A/* $ROOT_FONT_PATH >& /dev/null
sudo mv $TRANSFERENCIAS/custom_fonts_B/* $ROOT_FONT_PATH >& /dev/null
sudo chmod 755 -R /usr/share/fonts/custom_fonts
sudo fc-cache -f -v
}
# Instalar Wallpappers
wallpappers () {
sudo mv $DOWNLOADS/dark_walls/*.png /$HOME/Pictures/ >& /dev/null
sudo mv $TRANSFERENCIAS/dark_walls/*.png /$HOME/Imagens/ >& /dev/null
}
# Mensagem
mensagem () {
echo "Todos os processos foram executados com sucesso 'Pressionar CTRL+D' para sair."
echo "Reinicia o computador com o comando 'sudo reboot now'"
}
# Descomentar aqui com # caso não queira instalar algum pacote
remover_locks
adicionar_arquitetura_i386
actualizar_repositorios
instalar_pacotes_apt
software_deb
software_github
purge_pacotes
upgrade_limpeza
custom_fonts
wallpappers
mensagem
| true
|
cd4aefa7e979c791eca9003c87267e232d9e7a1c
|
Shell
|
subaohuang/easy-era5-trck
|
/post_process/resize_png.sh
|
UTF-8
| 627
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
PREFIX_ARR=( "SP_Jan16" )
SUFFIX="png"
STRT_F=0
END_F=648
N_FRM=$(( $END_F - $STRT_F ))
SCRIPT_DIR=`pwd`
WRK_DIR=../fig/
cd $WRK_DIR
echo $WRK_DIR
rm -f *noborder*
L_PREFIX=${#PREFIX_ARR[@]}
for((IPRE=0;IPRE<L_PREFIX;IPRE++))
do
PREFIX=${PREFIX_ARR[$IPRE]}
b=''
for((I=$STRT_F;I<=${END_F};I++))
do
printf "[%-50s] %d/%d \r" "$b" "$(( $I - $STRT_F ))" "$N_FRM";
b+='#'
TFSTMP=`printf "%.4d" $I`
convert ${PREFIX}.${TFSTMP}.${SUFFIX} -resize 738x200! ${PREFIX}.r.${TFSTMP}.${SUFFIX}
done
done
ffmpeg -r 15 -i ${PREFIX}.r.%04d.png -vf format=yuv420p out.mp4
| true
|
b99103ce8edc4912bc85b6d802e14e86f603d6a5
|
Shell
|
voslak/valarm
|
/valarm
|
UTF-8
| 1,975
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
VALARM_NAME=valarm
VALARM_VER=0.0.3
MUSIC_PATH=/home/pi/music/
DI_URL=http://listen.di.fm/public3/
LUB_URL=http://rzg.pl/player/
nofit_cnt=0
function valarm_usage {
echo Usage:$VALARM_NAME '<time>' '<music type>'
if [ -n "$1" ]; then
exit "$1"
fi
}
function valarm_play_music {
case $2 in
di)
(sleep $1; mplayer -playlist $DI_URL$3.pls > /dev/null 2>&1) &
;;
lub)
(sleep $1; mplayer -playlist $LUB_URL$3.m3u > /dev/null 2>&1) &
;;
mp3)
(sleep $1; mplayer $MUSIC_PATH*$3* > /dev/null 2>&1) &
;;
*)
valarm_usage -3
;;
esac
}
if [ -z "$1" ];
then
valarm_usage -1
fi
for i in $@; do
case $i in
--version)
echo $VALARM_NAME $VALARM_VER
echo Copyright '(C)' 2013 SEITAZ
echo License GPLv3+: GNU GPL version 3 or later '<http://gnu.org/licenses/gpl.html>'
echo This is free software: you are free to change and redistribute it.
echo There is NO WARRANTY, to the extent permitted by law.
exit 0
;;
--help)
valarm_usage
echo Report bugs to: '<http://github.com/voslak/valarm/issues>'
echo $VALARM_NAME home page: '<http://github.com/voslak/valarm>'
echo General help using GNU software: '<http://www.gnu.org/gethelp/>'
exit 0
;;
*)
let nofit_cnt++
;;
esac
done
sec_future=$(date -d "$1" +%s)
sec_now=$(date +%s)
((sec_sleep = $sec_future - $sec_now))
if [ "$sec_sleep" -le -1 ]
then
((sec_sleep = $sec_sleep +(60*60*24)))
fi
case $2 in
trance) ;&
vocaltrance) ;&
chillout) ;&
house) ;&
electro)
valarm_play_music $sec_sleep di $2
;;
rzg) ;&
rz) ;&
rg)
valarm_play_music $sec_sleep lub $2
;;
*)
valarm_play_music $sec_sleep mp3 $2
;;
esac
echo ======================================================
echo $2 will wake you up $(date -d "$1")!
echo "Exiting."
echo ======================================================
exit 0
| true
|
55ba090aa422add503603f461bd57094f007accc
|
Shell
|
inz/docker-reg
|
/docker-reg.sh
|
UTF-8
| 875
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
MACH=$1
PORT=$2
SERV=$3
shift
shift
shift
CTL="etcdctl -C http://${ETCD_PORT_10000_TCP_ADDR}:${ETCD_PORT_10000_TCP_PORT}"
KEY="/services/${SERV}/${MACH}"
trap "$CTL rm $KEY; exit" SIGHUP SIGINT SIGTERM
while [ 1 ]; do
DOCKER_PORTS=$(docker port $MACH $PORT)
KV=$@
KV="host=$(echo $DOCKER_PORTS | awk -F':' '{print $1}') $KV"
KV="port=$(echo $DOCKER_PORTS | awk -F':' '{print $2}') $KV"
JSON=
i=0
for kv in $KV; do
k=$(echo $kv | awk -F'=' '{print $1}')
v=$(echo $kv | awk -F'=' '{print $2}')
echo $k $v
if [ $i -gt 0 ]; then
JSON="$JSON,"
fi
if [[ $v != *[!0-9]* ]]; then
# $v is an int, treat it as so
JSON="$JSON \"${k}\": $v"
else
JSON="$JSON \"${k}\": \"$v\""
fi
i=$((i+1))
done
JSON="{$JSON }"
echo $JSON
$CTL --debug set "$KEY" "${JSON}" --ttl 10
sleep 8
done
| true
|
3c7c24415852f14331323f10f3e7deb2f52c563b
|
Shell
|
oliv5/profile
|
/.rc.d/15_zip.sh
|
UTF-8
| 1,589
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/sh
# Quick zip compress/deflate
zp() {
for SRC; do
if [ "$SRC" != "${SRC%.zip}" ]; then
zpd "." "$SRC"
else
zpa "${SRC%%/*}.zip" "$SRC"
fi
done
}
# Zip compress
zpa() {
local ARCHIVE="${1:?No archive to create...}"
shift 1
zip -r9 "$ARCHIVE" "$@"
}
# Zip deflate (in place when output dir is "")
zpd() {
local DST="${1:?No output directory specified...}"
local SRC
shift
for SRC; do
unzip "$SRC" -d "${DST:+$DST/}"
done
}
# Zip test archive
zpt() {
local RES=0
local SRC
for SRC; do
unzip -tq "$SRC" || RES=$?
done
return $RES
}
###############################
# Quick zip > gpg compress/deflate
zpg() {
local KEY="${1:?No encryption key specified...}"
shift
for SRC; do
if [ "$SRC" != "${SRC%.zip.gpg}" ]; then
zpgd "." "$SRC"
else
zpga "$KEY" "${SRC%%/*}.zip.gpg" "$SRC"
fi
done
}
# zip > gpg compress
zpga(){
local KEY="${1:?No encryption key specified...}"
local ARCHIVE="${2:?No archive to create...}"
shift 2
zip -r9 - "$@" | gpg --encrypt --recipient "$KEY" > "$ARCHIVE"
}
# gpg > zip deflate
zpgd(){
local DST="${1:?No output directory specified...}"
local SRC
shift 1
mkdir -p "$DST"
for SRC; do
gpg --decrypt --batch "$SRC" | funzip > "$DST/$(basename "${SRC%.zip.gpg}")"
done
}
###############################
# Unit test
#~ _unittest zp 'zpg 0x95C1629C87884760'
########################################
########################################
# Last commands in file
# Execute function from command line
[ "${1#zp}" != "$1" ] && "$@" || true
| true
|
04b80cc7ae84e1c2eddb553b1c2cecbc5353080f
|
Shell
|
oukooveu/cloud-gateway
|
/tests/stop
|
UTF-8
| 106
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
set -eo pipefail
cd $(dirname $0)
vagrant destroy -f
[ -d .venv ] && rm -rf .venv
echo OK
| true
|
5893d2257bb11805859a6c601ec239f84200fa10
|
Shell
|
moenasser/bash
|
/pivot
|
UTF-8
| 909
| 4.09375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source colors
usage(){
piv=$(bold `basename $0`);
cat <<EOF
$piv [-d delim] [-r] | [-h]
Given lines of stdin, prints them as 1 line separated by `bold delim`.
When reversed, `bold -r`, will take one long line and split it into
several lines using `bold delim` as separator.
`bold -d` Delimiter. `bold Default` ':'
`bold -r` Reverse pivot. Split on `bold delim`
`bold -h` Print help screen & exit
Example :
$> ls -1 ~/sdk/bin | $piv -d ':'
Or :
$> echo \$PATH | $piv -d ':' -r
EOF
}
delim=':'
rvrs=0
while getopts "hd:r" opt
do
case $opt in
h ) usage; exit 0;;
d ) delim=$OPTARG;;
r ) rvrs=1;;
esac
done
shift $(($OPTIND -1))
if [ "$rvrs" -eq 1 ];
then
while read line;
do
echo $line
done | sed "s/$delim/\n/g"
else
while read line;
do
echo $line
done | xargs | sed "s/ /$delim/g"
fi
## chomp off last delim...
#p=${p%${delim}$}
| true
|
194cf6f19596fc222ff3ae7510be76b831f1261b
|
Shell
|
chetnap19/FlipCoinSimulation
|
/usecase8.sh
|
UTF-8
| 622
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Enter 1st Number :"
read a
echo "Enter 2nd Number :"
read b
echo "Enter 3rd Number :"
read c
declare -A result
result[0]=$(( $a + $b / $c))
result[1]=$(( $a * $b + $c))
result[2]=$(( $a % $b + $c))
result[3]=$(( $c + $a * $b))
result[4]=$(( $a / $b + $c ))
echo ${result[@]}
echo ${!result[@]}
Number=${#result[@]}
for (( c=0 ; c<Number ; c++ ))
do
arr[c]=$(( result[$c] ))
done
for ((c=0 ; c<Number ; c++))
do
for ((m=c+1 ; m<Number ; m++))
do
if (( arr[$c] < arr[$m] ))
then
temp=$(( arr[$c] ))
arr[$c]=$(( arr[$m] ))
arr[$m]=$temp
fi
done
done
echo ${arr[@]}
echo ${!arr[@]}
| true
|
b859c06078f82039e832039b93fced78da038e10
|
Shell
|
srknzl/bash
|
/0-create_script.sh
|
UTF-8
| 364
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script creates a script once it gets its name.
# Then it gives it executable permissions.
# Then it writes into its first line this '#!/bin/bash' called Sha-Bang (# sharp and ! bang combined)
echo "What should be the name of the file(e.g 1-hello_world.sh)?"
read scriptname
touch $scriptname
chmod +x $scriptname
echo '#!/bin/bash' > $scriptname
| true
|
5ebc1d88cd2d33395add3e59cdf8b809452d93e2
|
Shell
|
MontufarEric/code_snippets_bigData
|
/003_hadoop/setup.sh
|
UTF-8
| 2,436
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "INSTALLING JAVA"
sudo apt update
mkdir /opt
cd /opt
wget -O jdk-8u221-linux-x64.tar.gz \
-c --content-disposition \
"https://javadl.oracle.com/webapps/download/AutoDL?BundleId=239835_230deb18db3e4014bb8e3e8324f81b43"
tar -zxf jdk-8u221-linux-x64.tar.gz
rm jdk-8u221-linux-x64.tar.gz
cd
sudo touch .bash_profile.sh
echo "JAVA_HOME=/opt/jdk1.8.0_221" >> .bash_profile.sh
echo "export PATH=$PATH:$JAVA_HOME/bin" >> .bash_profile.sh
echo "JAVA READY"
echo "DOWNLOADING PYTHON"
cd /opt
wget https://www.python.org/ftp/python/3.7.5/Python-3.7.5.tgz
tar –xf Python-3.7.5.tgz
echo "PYTHON READY"
echo "INSTALLING HADOOP"
cd
apt-get install openssh-server -y
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 0600 ~/.ssh/authorized_keys
echo "export HADOOP_HOME=/opt/hadoop-3.1.3" >> .bash_profile.sh
echo "export HADOOP_INSTALL=$HADOOP_HOME" >> .bash_profile.sh
echo "export HADOOP_MAPRED_HOME=$HADOOP_HOME" >> .bash_profile.sh
echo "export HADOOP_COMMON_HOME=$HADOOP_HOME" >> .bash_profile.sh
echo "export HADOOP_HDFS_HOME=$HADOOP_HOME" >> .bash_profile.sh
echo "export YARN_HOME=$HADOOP_HOME" >> .bash_profile.sh
echo "export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native" >> .bash_profile.sh
echo "export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin" >> .bash_profile.sh
source .bash_profile
echo "JAVA HOME READy"
echo "SETTING UP HADOOP FILES"
cd /opt/hadoop/temp
mkdir namenode
mkdir datanode
cd /opt/hadoop-3.1.3/etc/hadoop
echo "export JAVA_HOME=/home/fieldengineer/opt/jdk1.8.0_221" >> hadoop-env.sh
echo "<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:9000</value>
</property>
</configuration>" >> core-site.xml
echo "<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.name.dir</name>
<value>file:///opt/hadoop-3.1.3/temp/namenode</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>file:///opt/hadoop-3.1.3/temp/datanode</value>
</property>
</configuration>" >> hdfs-site.xml
echo "<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>" >> mapred-site.xml
echo "<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
hdfs namenode -format
cd $HADOOP_HOME/sbin/
./start-all.sh
| true
|
570e6a07ae91ff4b8fb2c91ce577a01446165b90
|
Shell
|
AihamAbusaleh/SYSO-SS16
|
/v1/v1.sh
|
UTF-8
| 4,736
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
# Constants
OUTPUT="stdout_1.log"
OUTPUT_ERR="errorout_1.log"
VERSION="4.2.3"
CORES=$(cat /proc/cpuinfo | grep processor | wc -l)
export ARCH="x86"
export CC="ccache gcc"
function copy_files {
echo "* Copying files..."
cd "$TARGET"
if [ -e "files" ]; then
echo "* Deleting files..."
rm -r files
fi
cp -r "$DIR/files" ./files
}
function download_busybox {
echo "* Downloading Busybox..."
cd "$TARGET"
if [ ! -e "busybox" ]; then
git archive --remote=git@burns.in.htwg-konstanz.de:labworks-SYSO_SS16/syso_ss16_skeleton.git HEAD:V1 busybox | tar -x
fi
}
function download_kernel {
echo "* Downloading kernel version $VERSION..."
cd "$TARGET"
if [ ! -d "linux-$VERSION" ]; then
# Download the kernel if necessary
test -f "linux-$VERSION.tar.xz" || wget "https://kernel.org/pub/linux/kernel/v4.x/linux-$VERSION.tar.xz"
#test -f "linux-$VERSION.tar.sign" || wget "https://kernel.org/pub/linux/kernel/v4.x/linux-$VERSION.tar.sign"
#unxz "linux-$VERSION.tar.xz"
#gpg --verify "linux-$VERSION.tar.sign" || \
# echo "Bad signature. Aborting." && \
# rm -rf "linux-$VERSION.tar" && \
# exit 1
test -d "linux-$VERSION" && rm -rf "linux-$VERSION"
xz -cd "linux-$VERSION.tar.xz" | tar xvf -
#rm "linux-$VERSION.tar"
fi
}
function compile_kernel {
echo "* Compiling kernel..."
cd "$TARGET"
cp files/kernel_config "linux-$VERSION"/.config
cd "linux-$VERSION"
# Compile
make -j $CORES
}
function create_initramfs {
echo "* Creating initramfs..."
cd "$TARGET"
mkdir initramfs
cd initramfs
mkdir -p dev sbin bin usr/bin etc var tmp
cd bin
#currently in target/initramfs/bin
gcc --static -m32 ../../files/systeminfo.c -o systeminfo
cp "$TARGET/busybox" busybox
chmod 755 busybox
for bin in mount echo ls cat ps dmesg sysctl sh sleep; do
ln -s busybox $bin
done
cd ..
echo "* Using provided init file..."
cp "$TARGET/files/init.sh" init
chmod 755 init
find . | cpio -H newc -o > ../initramfs.cpio
cd ..
rm -rf initramfs
}
function start_qemu {
# TODO: replace curses with serial tty: http://nairobi-embedded.org/qemu_serial_terminal_redirection.html
echo "* Starting qemu..."
cd "$TARGET"
ARCH="i386"
qemu-system-$ARCH -kernel "linux-$VERSION/arch/x86/boot/bzImage" -initrd "initramfs.cpio" -curses
}
function clean {
echo "* Cleaning up..."
cd "$TARGET/.."
rm -r target/
mkdir target
}
function usage {
echo "Usage: $0 [OPTION]...
-a, --all do all without cleaning.
-b, --batch run all the tasks uninteractively (stdout and stderr teed to files and QEMU won't be executed).
-q, --qemu start qemu.
-h, --help show this help page, then exit.
--clean clean up the target directory.
--copy_files copy resource files.
--initramfs create the initramfs using the resources.
--download_busybox downloads the busybox from the skeleton git repository.
--compile_kernel compiles the kernel.
"
exit 0
}
function do_all {
copy_files
download_kernel
download_busybox
compile_kernel
# Create initramfs file
create_initramfs
start_qemu
}
function do_all_batch {
# Redirect stdout and stderr
exec > >(tee "$OUTPUT") 2> >(tee "$OUTPUT_ERR" >&2)
copy_files
download_kernel
download_busybox
compile_kernel
# Create initramfs file
create_initramfs
}
if [ $# -lt 1 ]; then
usage
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$DIR/.."
# echo "$PWD"
if [ ! -d "target" ]; then
echo "* Creating target folder"
mkdir target
fi
cd target
TARGET=$(pwd)
echo "* Target output directory: $TARGET"
while [ "$1" != "" ]; do
case $1 in
-a | --all ) do_all
;;
-b | --batch ) do_all_batch
;;
-q | --qemu ) start_qemu
;;
-h | --help ) usage
;;
--clean ) clean
;;
--copy_files ) copy_files
;;
--initramfs ) create_initramfs
;;
--download_busybox ) download_busybox
;;
--compile_kernel ) compile_kernel
;;
* ) usage
exit 1
esac
shift
done
| true
|
f55b70e6f8683a751ba553c2de8f822bf858a7f0
|
Shell
|
axos88/docker-run-action
|
/entrypoint.sh
|
UTF-8
| 422
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [ ! -z $INPUT_USERNAME ];
then echo $INPUT_PASSWORD | docker login $INPUT_REGISTRY -u $INPUT_USERNAME --password-stdin
fi
echo "$INPUT_ENV" > env-file
echo "$INPUT_RUN" | sed -e 's/\\n/;/g' > semicolon_delimited_script
exec docker run -v "/var/run/docker.sock":"/var/run/docker.sock" $INPUT_OPTIONS --entrypoint=$INPUT_SHELL --env-file env-file $INPUT_IMAGE -c "`cat semicolon_delimited_script`"
| true
|
64975ee9ac5be814981ff4744a94c2b9c9abb048
|
Shell
|
RyanZSU/SDE
|
/.bashrc
|
UTF-8
| 4,278
| 3.5
| 4
|
[] |
no_license
|
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
################################# Ryan Changes #################################
export TERM=xterm-256color
export EDITOR=vi
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
alias ls='ls -G'
alias scr='screen -aAL -t'
alias scrx='screen -x'
alias scrls='screen -ls'
alias sshaws='ssh ubuntu@54.201.216.194'
alias sshgcp='ssh tanjingpan@35.197.86.140'
alias dockerps='docker ps --format "table {{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}\t{{.Ports}}"'
bind Space:magic-space
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
# are we an interactive shell?
if [ "$PS1" ]; then
case $TERM in
xterm*|vte*)
PROMPT_COMMAND='printf "\033]0;%s@%s\007" "${USER}" "${HOSTNAME%%.*}"'
;;
screen*)
PROMPT_COMMAND='printf "\033k%s@%s\033\\" "${USER}" "${HOSTNAME%%.*}"'
;;
*)
[ -e /etc/sysconfig/bash-prompt-default ] && PROMPT_COMMAND=/etc/sysconfig/bash-prompt-default
;;
esac
PS1="\n\[\e[32m\]\u@\h: \[\e[33m\]\$PWD\[\e[35m\]\$(parse_git_branch)\[\e[0m\]\n\\$ "
# You might want to have e.g. tty in prompt (e.g. more virtual machines)
# and console windows
# If you want to do so, just add e.g.
# if [ "$PS1" ]; then
# PS1="[\u@\h:\l \W]\\$ "
# fi
# to your custom modification shell script in /etc/profile.d/ directory
fi
################################# Ryan Changes #################################
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
#shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# colored GCC warnings and errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
alias lr='ls -ltr'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
[ -f /usr/local/etc/bash_completion ] && . /usr/local/etc/bash_completion
# Setting PATH for Python 3.6
# The original version is saved in .bash_profile.pysave
PATH="${PATH}:/Library/Frameworks/Python.framework/Versions/3.6/bin/"
export PATH
# added by Anaconda3 5.0.1 installer
#export PATH="/Users/ryantan/anaconda3/bin:$PATH"
| true
|
385f60a8d07c22cbc84073ced6843ee3a43fcf2b
|
Shell
|
FieldDB/AuthenticationWebService
|
/test/routes/deprecated.sh
|
UTF-8
| 6,485
| 3.234375
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/bash
function coloredEcho(){
local exp=$1;
local color=$2;
if ! [[ $color =~ '^[0-9]$' ]] ; then
case $(echo $color | tr '[:upper:]' '[:lower:]') in
black) color=0 ;;
red) color=1 ;;
green) color=2 ;;
yellow) color=3 ;;
blue) color=4 ;;
magenta) color=5 ;;
cyan) color=6 ;;
white|*) color=7 ;; # white or invalid color
esac
fi
tput setaf $color;
echo $exp;
tput sgr0;
}
echo "============================================================="
echo " Running CURL tests for deprecated routes "
echo "============================================================="
TESTCOUNT=0;
TESTFAILED=0;
TESTSFAILEDSTRING="$TESTSFAILEDSTRING : $TESTNAME"
echo "$TESTSFAILEDSTRING $result " >> test_errors.log
TESTPASSED=0;
TESTCOUNTEXPECTED=32;
echo "" > test_errors.log
# Production server is using http behind nginx
SERVER="https://localhost:3183";
if [ "$NODE_ENV" == "production" ]; then
SERVER="http://localhost:3183";
fi
# SERVER="https://auth.lingsync.org";
echo ""
echo "Using $SERVER"
echo "-------------------------------------------------------------"
TESTNAME="It should return (upgraded) user details upon successful login"
echo "$TESTNAME"
TESTCOUNT=$[TESTCOUNT + 1]
result="`curl -kX POST \
-H "Content-Type: application/json" \
-d '{"username": "testingprototype", "password": "test"}' \
$SERVER/login `"
echo ""
echo "Response: $result" | grep -C 4 prefs;
if [[ $result =~ userFriendlyErrors ]]
then {
TESTFAILED=$[TESTFAILED + 1]
TESTSFAILEDSTRING="$TESTSFAILEDSTRING : $TESTNAME"
echo "$TESTSFAILEDSTRING $result should not have userFriendlyErrors " >> test_errors.log
}
fi
if [[ $result =~ "\"prefs\":" ]]
then {
echo "Details recieved, you can use this user object in your app settings for this user."
echo " success";
# echo "Response: $result";
echo " $result" | grep -C 4 "corpuses";
echo " $result" | grep -C 4 "corpora";
if [[ $result =~ "\"corpuses\":" ]]
then {
TESTFAILED=$[TESTFAILED + 1]
TESTSFAILEDSTRING="$TESTSFAILEDSTRING : $TESTNAME"
echo "$TESTSFAILEDSTRING Should not have corpuses: $result " >> test_errors.log
} else {
echo "Upgraded users corpuses to corpora."
echo " success";
}
fi
} else {
TESTFAILED=$[TESTFAILED + 1]
TESTSFAILEDSTRING="$TESTSFAILEDSTRING : $TESTNAME"
echo "$TESTSFAILEDSTRING $result should have prefs " >> test_errors.log
}
fi
echo "-------------------------------------------------------------"
TESTNAME="It should count down the password reset"
echo "$TESTNAME"
TESTCOUNT=$[TESTCOUNT + 1]
result="`curl -kX POST \
-H "Content-Type: application/json" \
-d '{"username": "testingprototype", "password": "test"}' \
$SERVER/login `"
result="`curl -kX POST \
-H "Content-Type: application/json" \
-d '{"username": "testingprototype", "password": "opps"}' \
$SERVER/login `"
result="`curl -kX POST \
-H "Content-Type: application/json" \
-d '{"username": "testingprototype", "password": "wrongpassword"}' \
$SERVER/login `"
result="`curl -kX POST \
-H "Content-Type: application/json" \
-d '{"username": "testingprototype", "password": "again"}' \
$SERVER/login `"
echo "$result"
if [[ $result =~ "You have 2 more attempts" ]]
then {
echo " success 2 more attempts";
} else {
TESTFAILED=$[TESTFAILED + 1]
TESTSFAILEDSTRING="$TESTSFAILEDSTRING : $TESTNAME"
echo "$TESTSFAILEDSTRING $result " >> test_errors.log
}
fi
result="`curl -kX POST \
-H "Content-Type: application/json" \
-d '{"username": "testingprototype", "password": "trying"}' \
$SERVER/login `"
# echo "$result"
if [[ $result =~ "You have 1 more attempts" ]]
then {
echo " success 1 more attempt";
} else {
TESTFAILED=$[TESTFAILED + 1]
TESTSFAILEDSTRING="$TESTSFAILEDSTRING : $TESTNAME"
echo "$TESTSFAILEDSTRING $result " >> test_errors.log
}
fi
result="`curl -kX POST \
-H "Content-Type: application/json" \
-d '{"username": "testingprototype", "password": "wrongpassword"}' \
$SERVER/login `"
echo "$result"
if [[ $result =~ "You have tried to log in" ]]
then {
echo " success warn user who have no email ";
} else {
TESTFAILED=$[TESTFAILED + 1]
TESTSFAILEDSTRING="$TESTSFAILEDSTRING : $TESTNAME"
echo "$TESTSFAILEDSTRING $result " >> test_errors.log
}
fi
echo "-------------------------------------------------------------"
TESTNAME="It should accept forgotpassword (and fail on the last step if on a dev server since it has no credentials to send emails)"
echo "$TESTNAME"
echo " prep: try to login with wrong password"
TESTCOUNT=$[TESTCOUNT + 1]
result="`curl -kX POST \
-H "Content-Type: application/json" \
-d '{"username": "testinguserwithemail", "password": "opps"}' \
$SERVER/login `"
echo ""
# echo "Response: $result";
result="`curl -kX POST \
-H "Content-Type: application/json" \
-d '{"email": "myemail@example.com"}' \
$SERVER/forgotpassword `"
echo ""
echo "Response: $result";
if [[ $result =~ userFriendlyErrors ]]
then {
echo " success"
if [[ $result =~ "Please report this 2823" ]]
then {
echo " server provided an informative message";
} else {
TESTFAILED=$[TESTFAILED + 1]
TESTSFAILEDSTRING="$TESTSFAILEDSTRING : $TESTNAME"
echo "$TESTSFAILEDSTRING $result " >> test_errors.log
}
fi
} else {
TESTFAILED=$[TESTFAILED + 1]
TESTSFAILEDSTRING="$TESTSFAILEDSTRING : $TESTNAME"
echo "$TESTSFAILEDSTRING $result " >> test_errors.log
}
fi
echo;
echo;
echo "============================================================="
echo "Test results for deprecated routes";
TESTPASSED=$((TESTCOUNT-TESTFAILED));
if [ $TESTPASSED = $TESTCOUNT ]; then
coloredEcho " $TESTPASSED passed of $TESTCOUNT" green
else
coloredEcho " $TESTPASSED passed of $TESTCOUNT" red
coloredEcho " $TESTFAILED failed" red
coloredEcho " $TESTSFAILEDSTRING" red
fi
if [ $TESTCOUNT = $TESTCOUNTEXPECTED ]; then
coloredEcho " Ran $TESTCOUNT of $TESTCOUNTEXPECTED expected" green
else
coloredEcho " Ran $TESTCOUNT of $TESTCOUNTEXPECTED expected" yellow
fi
echo "============================================================="
cat test_errors.log
if [ $TESTPASSED -eq $TESTCOUNT ]
then
exit $TESTFAILED;
else
exit $TESTFAILED;
fi
# ls noqata_tusunayawami.mp3 || {
# result="`curl -O --retry 999 --retry-max-time 0 -C - https://github.com/FieldDB/FieldDB/blob/master/sample_data/noqata_tusunayawami.mp3?raw=true
# mv "noqata_tusunayawami.mp3?raw=true" noqata_tusunayawami.mp3
# }
# 15602
| true
|
1da2e4c47ffe3ee9cf6e01f8275ef09d908d1c22
|
Shell
|
ghcjs/ghcjs
|
/utils/cleanPackage.sh
|
UTF-8
| 615
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# generic/hacky package cleaning
if [ $# -ne 1 ];
then
echo "usage: $0 <dir>"
echo ""
echo "clean cabal package build artifacts from <dir>"
exit 1
fi
(
cd "$1" || exit 1;
echo "cleaning: $PWD"
rm -rf dist dist-install autom4te.cache
rm -f config.status config.log
rm -f *.buildinfo
rm -f ghc.mk
rm -f gmp/config.mk gmp/ghc.mk gmp/gmpsrc.patch gmp/GNUmakefile
rm -f include/HsUnixConfig.h
rm -f include/HsIntegerGmp.h
rm -f include/HsBaseConfig.h
rm -f include/EventConfig.h
rm -f include/HsProcessConfig.h
rm -f HsDirectoryConfig.h
rm -f GHC/PrimopWrappers.hs
rm -f GHC/Prim.hs
)
| true
|
c5dfc26ef63072f2f22b6a48d27c3635f4ca83d3
|
Shell
|
rome1france/miron-client
|
/miron-autorun
|
UTF-8
| 622
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: miron-client
# Required-Start: $network
# Required-Stop: $network
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start and stop miron-client
# Description: Start and stop miron-client.
### END INIT INFO
#
RUN=/usr/bin/miron-client
INFO=/etc/miron-client/info.xml
test -f $RUN || exit 0
case $1 in
start)
sudo lshw -xml > $INFO
sudo $RUN
;;
stop)
killall -SIGHUP $RUN
;;
reload | force-reload)
killall -SIGHUP $RUN
sudo lshw -xml > $INFO
sudo $RUN
;;
restart)
killall -SIGHUP $RUN
sudo $RUN
;;
esac
| true
|
e3ab7dfd803196a71626ef047c0df46226ce357a
|
Shell
|
typeorm/typeorm
|
/docker/oracle/docker-entrypoint.d/050-npm-compile.sh
|
UTF-8
| 228
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# exit when any command fails
set -e
npx rimraf build/compiled
npx tsc
cp /config/ormconfig.json build/compiled/ormconfig.json
if [ ! -f ormconfig.json ]; then
cp ormconfig.json.dist ormconfig.json
fi
| true
|
e23c7dfea6fe0e126856b0fb8ce00e7a340794bf
|
Shell
|
firstcoincom/blockchain-network-on-kubernetes
|
/debug.sh
|
UTF-8
| 491
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ -d "${PWD}/configFiles" ]; then
KUBECONFIG_FOLDER=${PWD}/configFiles
else
echo "Configuration files are not found."
exit
fi
kubectl create -f ${KUBECONFIG_FOLDER}/peerdebug.yaml
kubectl create -f ${KUBECONFIG_FOLDER}/tooldebug.yaml
PEERDEBUG_NAME=$(kubectl get pods | grep "peerdebug" | awk '{print $1}')
TOOLDEBUG_NAME=$(kubectl get pods | grep "tooldebug" | awk '{print $1}')
echo "[INFO] peerdebug pod: ${PEERDEBUG_NAME}, tooldebug pod: ${TOOLDEBUG_NAME}"
| true
|
f01a7ccace446528bf3eecac044e2816f6710cfe
|
Shell
|
rancher/rancher
|
/tests/validation/tests/v3_api/resource/terraform/scripts/optional_write_files.sh
|
UTF-8
| 504
| 4.03125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script pulls raw files and writes to specified locations.
# For example, it can be used to write HelmChartConfig or custom PSA files.
files=$1
if [ -n "$files" ]
then
file_array=($(echo "$files" | tr ' ' '\n'))
for current_file in "${file_array[@]}"; do
file_location=$(echo "$current_file" | awk -F, '{print $1}')
mkdir -p "$(dirname "$file_location")"
raw_data=$(echo "$current_file" | awk -F, '{print $2}')
curl -s "$raw_data" -o "$file_location"
done
fi
| true
|
9b22936e093a777b887e3c9893c145b9f521aebd
|
Shell
|
ppabc/dac
|
/alpine-consul-php5/root/etc/zabbix/zabbix_agentd.conf.d/scripts/php-fpm-check.sh
|
UTF-8
| 454
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
HOST="localhost"
PORT="73"
status="php-fpm_status"
function query() {
curl -s http://${HOST}:${PORT}/${status}?xml | grep "<$1>" | awk -F'>|<' '{ print $3}'
}
if [ $# == 0 ]; then
echo $"Usage $0 {pool|process-manager|start-time|start-since|accepted-conn|listen-queue|max-listen-queue|listen-queue-len|idle-processes|active-processes|total-processes|max-active-processes|max-children-reached|slow-requests}"
exit
else
query "$1"
fi
| true
|
0411a1448917ca359182a06008e8be021de25cbd
|
Shell
|
uttam47/unix-sandbox
|
/directory-operations.bash
|
UTF-8
| 710
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
# creating a directory...
echo "Enter the name of directory which is to be created:"
read directory_name
mkdir $directory_name
# first check if the directory exist or not, then create it...
echo "Create another diretory, enter the name:"
read d2
if [ -d "$d2" ] # -d command return true if the directory name provided exists.
then
echo "Directory exists."
else
mkdir $d2
fi
# deleting a file...
echo "Enter file name to remove:"
read file_name
rm -i $file_name # -i option will prompt the user for the deletion's permission.
echo "Enter file directory name to remove:"
read direc_name
rm -r $direc_name # -r opetion with rm command is used to remove directories...
| true
|
3bf1758a51d3c1ebb784ee3fd0c09a887ce7e16d
|
Shell
|
emhaye/get-repos
|
/get-repos.sh
|
UTF-8
| 568
| 2.859375
| 3
|
[] |
no_license
|
function get_repos() {
curl --silent "https://api.github.com/orgs/gohugoid/repos" | grep '"full_name":' | sed -e 's/^.*": "//g' -e 's/",.*$//g' >> repos.txt
}
get_repos
cat repos.txt
function get_clone_urls() {
curl --silent "https://api.github.com/orgs/gohugoid/repos" | grep '"clone_url":' | sed -e 's/^.*": "//g' -e 's/",.*$//g' >> clone_urls.txt
}
get_clone_urls
cat clone_urls.txt
while read repos; do
curl --silent https://api.github.com/repos/$repos/commits | jq -r '.[0] | [.commit.author, .commit.url]'
done < repos.txt
rm repos.txt
rm clone_urls.txt
| true
|
28d772704cab938147e6a036b87b77140eedccf5
|
Shell
|
Lemon080910/xiaomi_3c
|
/squashfs-root/lib/config_post_ota/netifd_config_post_ota.sh
|
UTF-8
| 1,114
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/sh
# add troubleshoot netowrk
/sbin/uci -q batch <<-EOF >/dev/null
delete network.diagnose
set network.ready=interface
set network.ready.proto=static
set network.ready.ipaddr=169.254.29.1
set network.ready.netmask=255.255.255.0
commit network
EOF
has_smartvpn_old_version=$(uci get smartvpn.settings 2>/dev/null)
[ ! -z $has_smartvpn_old_version ] && {
smartdns_conf_name="smartdns.conf"
rm "/etc/dnsmasq.d/$smartdns_conf_name"
rm "/var/etc/dnsmasq.d/$smartdns_conf_name"
rm "/tmp/etc/dnsmasq.d/$smartdns_conf_name"
/sbin/uci -q batch <<-EOF >/dev/null
delete smartvpn.settings
delete smartvpn.dest
set smartvpn.vpn=remote
set smartvpn.vpn.type=vpn
set smartvpn.vpn.domain_file=/etc/smartvpn/proxy.txt
set smartvpn.vpn.disabled=0
set smartvpn.vpn.status=off
set smartvpn.dest=dest
add_list smartvpn.dest.notnet=169.254.0.0/16
add_list smartvpn.dest.notnet=172.16.0.0/12
add_list smartvpn.dest.notnet=192.168.0.0/16
add_list smartvpn.dest.notnet=224.0.0.0/4
add_list smartvpn.dest.notnet=240.0.0.0/4
commit smartvpn
delete firewall.smartvpn
delete firewall.proxy_thirdparty
commit firewall
EOF
}
| true
|
6ebab910f955491f5a9551af26b2b15c01732e0f
|
Shell
|
hthuong09/dotfiles
|
/.bin/bar/battery
|
UTF-8
| 670
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
ICON_BATTERY="\ue238"
ICON_BATTERY_CHARGING="\ue200"
ICON_BATTERY_FULL="\ue1ff"
ICON_BATTERY_HALF="\ue1fe"
ICON_BATTERY_EMPTY="\ue1fd"
function icon()
{
if [[ "$2" != "" ]]; then
COLOR=$2
else
COLOR=$COLOR_ICON
fi
echo "%{F$COLOR}%{T2}$(printf $1)%{T-}%{F-}"
}
bat=$(acpi -b | awk '{print $4}' | sed 's/%//g' | sed 's/,//g')
status=$(acpi -b | awk '{print $3}' | sed 's/,//g')
if [[ "$status" = "Charging" ]]; then
icon=$(printf "\ue200")
elif [[ $bat -lt 10 ]]; then
icon=$(printf "\ue200")
elif [[ $bat -lt 50 ]]; then
icon=$(printf "\ue200")
else
icon=$(printf "\ue200")
fi
echo "${icon} ${bat}"
| true
|
6d6f6a78b1fad882347398a21fc34e1065e2fdf4
|
Shell
|
jessepav/boxtools
|
/py/setup-venv.sh
|
UTF-8
| 168
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
cd "$(realpath $(dirname "$0"))"
[[ ! -d venv ]] && python -m venv venv
[[ -f requirements.txt ]] && ./venv/bin/python -m pip install -r requirements.txt
| true
|
0720cb8d4afcd834075450e6c05885864d4c6365
|
Shell
|
theoremoon/ShellgeiBot-Image
|
/docker_image.bats
|
UTF-8
| 26,140
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
bats_require_minimum_version 1.5.0
@test "5ktrillion" {
run -0 bash -c "5ktrillion -5"
[ "$output" = '5000兆円欲しい!' ]
}
@test "abcMIDI" {
run -0 bash -c "abc2midi -ver"
[[ "$output" =~ abc2midi ]]
}
@test "agrep" {
run -0 bash -c "echo unko | agrep -2 miko"
[ "$output" = "unko" ]
}
@test "align" {
run -0 bash -c "yes シェル芸 | head -4 | awk '{print substr(\$1,1,NR)}' | align center"
[ "${lines[0]}" = ' シ ' ]
[ "${lines[1]}" = ' シェ ' ]
[ "${lines[2]}" = ' シェル ' ]
[ "${lines[3]}" = 'シェル芸' ]
}
# 不要では?
@test "apache2-utils" {
run -0 ab -V
[[ "${lines[0]}" =~ "ApacheBench" ]]
}
@test "asciinema" {
run -0 asciinema --version
[[ "${lines[0]}" =~ "asciinema " ]]
}
# /bin/ash は /bin/dash へのエイリアス, /usr/bin/ash は /usr/bin/dash へのエイリアスで、両方とも同じ
# apt install ash ではエイリアスが作成されるのみ
@test "ash" {
run -0 ash -c "echo シェル芸"
[ "$output" = シェル芸 ]
}
@test "babashka" {
# コマンドラインではbbコマンド
run -0 which bb
run -0 bb -i '(println "Hello")'
}
@test "base85" {
if [ "$(uname -m)" = "aarch64" ]; then skip "base85 is not installed on aarch64"; fi
run -0 bash -c 'echo "<~j+=c#Ju@X]X6>GN~>" | base85 -d'
[ "$output" = "シェル芸" ]
}
@test "bat" {
run -0 bat --version
[[ "$output" =~ "bat " ]]
}
@test "bbe" {
run -0 bbe -?
[[ "${lines[0]}" =~ "bbe " ]]
}
@test "bc" {
run -0 bash -c "echo 'print \"シェル芸\n\"' | bc"
[ "$output" = "シェル芸" ]
}
@test "boxes" {
run -0 bash -c "echo シェル芸 | boxes"
[[ "$output" =~ \/\*\ シェル芸\ \*\/ ]]
}
@test "Brainf*ck" {
run -0 bash -c "echo '+++++++++[>+++++++++<-]>++.<+++++++++[>++<-]>+++.---.+++++++..<+++++++++[>----<-]>-.<+++++++++[>+++<-]>+++.++++.' | hsbrainfuck"
[ "$output" = 'ShellGei' ]
}
@test "bsdgames" {
run -0 bash -c "echo '... .... . .-.. .-.. --. . .. ...-.-' | morse -d"
[ "$output" = "SHELLGEI" ]
}
@test "build-essential" {
run -0 gcc --version
[[ "${lines[0]}" =~ gcc ]]
}
@test "busybox" {
run -0 /bin/busybox echo "シェル芸"
[ "$output" = "シェル芸" ]
}
@test "cal" {
run -0 cal 12 2020
[[ "${lines[0]}" =~ "12月 2020" ]]
}
@test "ccze" {
run -0 bash -c "echo シェル芸 | ccze -A"
[[ "$output" =~ シェル芸 ]]
}
@test "chemi" {
run -0 chemi -s H
[ "${lines[2]}" = 'element : Hydrogen' ]
}
@test "clisp" {
run -0 clisp -q -x '(+ 1 2)'
[ "$output" = '3' ]
}
@test "clojure" {
run -0 which clojure
# JAVA_HOME未設定だったりランタイムがないと動かない
run -0 clojure -M -e '(println "Hello")'
[ "$output" = 'Hello' ]
}
@test "cmatrix" {
run -0 cmatrix -h
[[ "${lines[0]}" =~ 'Usage: cmatrix' ]]
}
@test "color" {
run -0 bash -c "color 1f"
[ "$output" = '[30m \x1b[30m [m[31m \x1b[31m [m[32m \x1b[32m [m[33m \x1b[33m [m[34m \x1b[34m [m[35m \x1b[35m [m[36m \x1b[36m [m[37m \x1b[37m [m' ]
}
@test "concat" {
run -0 concat cat
[ "${lines[0]}" = " /\ /" ]
[ "${lines[1]}" = "(' ) ( " ]
[ "${lines[2]}" = " ( \ )" ]
[ "${lines[3]}" = " |(__)/ " ]
}
@test "cowsay" {
run -0 cowsay シェル芸
[ "${lines[0]}" = ' __________' ]
[ "${lines[1]}" = '< シェル芸 >' ]
[ "${lines[2]}" = ' ----------' ]
[ "${lines[3]}" = ' \ ^__^' ]
[ "${lines[4]}" = ' \ (oo)\_______' ]
[ "${lines[5]}" = ' (__)\ )\/\' ]
[ "${lines[6]}" = ' ||----w |' ]
[ "${lines[7]}" = ' || ||' ]
}
@test "csharp" {
run -0 csharp -e 'print("シェル芸")'
[ "$output" = "シェル芸" ]
}
@test "csvquote" {
run -0 bash -c 'echo -e "unko,\"un,ko\"" | csvquote | cut -d "," -f 2 | csvquote -u'
[ "$output" = '"un,ko"' ]
}
@test "cureutils" {
run -0 bash -c "cure girls | head -1"
[ "$output" = "美墨なぎさ" ]
}
@test "curl" {
run -0 curl --help
[ "${lines[0]}" = "Usage: curl [options...] <url>" ]
}
@test "datamash" {
run -0 datamash --version
[[ "${lines[0]}" =~ "datamash (GNU datamash)" ]]
}
@test "dateutils" {
run -0 /usr/bin/dateutils.dtest -V
[[ "$output" =~ "datetest" ]]
}
@test "dc" {
run -0 dc -V
[[ "${lines[0]}" =~ "dc" ]]
}
@test "dotnet" {
run -0 dotnet --help
[[ "${lines[0]}" == ".NET 7.0 へようこそ!" ]]
}
@test "eachdo" {
if [ "$(uname -m)" = "aarch64" ]; then skip "eachdo is not installed on aarch64"; fi
run -0 eachdo -v
[[ "$output" =~ "eachdo command" ]]
}
@test "echo-meme" {
run -0 echo-meme シェル芸
[[ "$output" =~ "シェル芸" ]]
}
@test "edens" {
if [ "$(uname -m)" = "aarch64" ]; then skip "edens is not installed on aarch64"; fi
run -0 edens -h
}
@test "edf" {
run -0 edf words scientist
}
@test "egison" {
run -0 egison -e 'foldl (+) 0 (take 10 nats)'
[ "$output" = "55" ]
}
@test "egzact" {
run -0 bash -c "echo シェル芸 | dupl 2"
[ "${lines[0]}" = 'シェル芸' ]
[ "${lines[1]}" = 'シェル芸' ]
}
@test "eki" {
run -0 bash -c "eki | grep -q 京急川崎"
run -0 bash -c "eki line 京急川崎 | grep 大師"
[ "$output" = '京急大師線' ]
}
@test "Emacs" {
run -0 bash -c "echo シェル芸 | emacs -Q --batch --insert /dev/stdin --eval='(princ (buffer-string))'"
[ "$output" = シェル芸 ]
}
@test "faker" {
run -0 faker name
}
@test "faker-cli" {
run -0 faker-cli --help
[ "${lines[0]}" = 'Usage: faker-cli [option]' ]
}
@test "faketime" {
run -0 faketime --version
[[ "${lines[0]}" =~ 'faketime: Version' ]]
}
@test "ffmpeg" {
run -0 ffmpeg -version
[[ "${lines[0]}" =~ "ffmpeg version" ]]
}
@test "figlet" {
run -0 bash -c "echo ShellGei | figlet"
echo "lines[0]: '${lines[0]}'"
[ "${lines[0]}" = " ____ _ _ _ ____ _ " ]
[ "${lines[1]}" = "/ ___|| |__ ___| | |/ ___| ___(_)" ]
[ "${lines[2]}" = "\___ \| '_ \ / _ \ | | | _ / _ \ |" ]
[ "${lines[3]}" = " ___) | | | | __/ | | |_| | __/ |" ]
[ "${lines[4]}" = "|____/|_| |_|\___|_|_|\____|\___|_|" ]
}
@test "fish" {
run -0 fish -c "echo シェル芸"
[ "$output" = "シェル芸" ]
}
@test "fonts-ipafont" {
run -0 bash -c "fc-list | grep ipa | wc -l"
[ $output -ge 4 ]
}
@test "fonts-nanum" {
run -0 bash -c "fc-list | grep nanum | wc -l"
[ $output -ge 10 ]
}
@test "fonts-noto-color-emoji" {
run -0 bash -c "fc-list | grep NotoColorEmoji | wc -l"
[ $output -ge 1 ]
}
@test "fonts-symbola" {
run -0 bash -c "fc-list | grep Symbola | wc -l"
[ $output -ge 1 ]
}
@test "fonts-vlgothic" {
run -0 bash -c "fc-list | grep vlgothic | wc -l"
[ $output -ge 2 ]
}
@test "forest" {
run -0 bash -c "echo シェル芸 | forest"
[ "$output" = '└ ─ シェル芸' ]
}
@test "fortune" {
run -0 fortune
}
@test "fujiaire" {
run -0 fujiaire フジエアー
[ "$output" = "フピエアー" ]
}
@test "funnychar" {
run -0 funnychar -p 3 abcABC
[ "$output" = '𝑎𝑏𝑐𝐴𝐵𝐶' ]
}
@test "fx" {
run -0 bash -c "echo '{\"item\": \"unko\"}' | fx 'this.item'"
[ "$output" = 'unko' ]
}
@test "gawk" {
run -0 bash -c "echo シェル芸 | gawk '{print \$0}'"
[ "$output" = "シェル芸" ]
}
@test "gdb" {
run -0 gdb --help
[ "${lines[0]}" = "This is the GNU debugger. Usage:" ]
}
@test "Git" {
run -0 git version
[[ "$output" =~ "git version" ]]
}
@test "glue" {
run -0 bash -c 'echo echo 10 | glue /dev/stdin'
[[ "$output" =~ '10' ]]
}
@test "glueutils" {
run -2 bash -c 'flip12 ls aaaaaaaaaaa'
[ "$output" = "ls: 'aaaaaaaaaaa' にアクセスできません: そのようなファイルやディレクトリはありません" ]
}
@test "gnuplot" {
run -0 gnuplot -V
[[ "$output" =~ "gnuplot" ]]
}
@test "graphviz" {
run -0 dot -V
[[ "${lines[0]}" =~ 'dot - graphviz' ]]
}
@test "gron" {
run -0 bash -c "echo '{\"s\":\"シェル芸\"}' | gron -m"
[ "${lines[1]}" = 'json.s = "シェル芸";' ]
}
@test "gyaric" {
if [ "$(uname -m)" = "aarch64" ]; then skip "gyaric is not installed on aarch64"; fi
run -0 gyaric -h
[ "${lines[0]}" = "gyaric encode/decode a text to unreadable gyaru's text." ]
}
@test "HanazonoMincho" {
run -0 bash -c "fc-list | grep 花園明朝"
[ "${lines[0]}" == '/usr/share/fonts/truetype/hanazono/HanaMinA.ttf: 花園明朝A,HanaMinA:style=Regular' ]
[ "${lines[1]}" == '/usr/share/fonts/truetype/hanazono/HanaMinB.ttf: 花園明朝B,HanaMinB:style=Regular' ]
}
@test "Haskell" {
run -0 ghc -e 'putStrLn "シェル芸"'
[ "$output" = "シェル芸" ]
}
@test "himechat-cli" {
run -0 himechat-cli -V
[ "$output" = 'https://github.com/gyozabu/himechat-cli' ]
}
@test "home-commands" {
run -0 echo-sd シェル芸
[ "${lines[0]}" = '_人人人人人人_' ]
[ "${lines[1]}" = '> シェル芸 <' ]
[ "${lines[2]}" = ' ̄Y^Y^Y^Y^Y^Y^ ̄' ]
}
@test "horizon" {
run -0 bash -c "echo ⁃‐﹘╸―ⲻ━= | horizon -d"
[ "$output" = 'unko' ]
}
@test "idn" {
run -0 idn うんこ.com
[ "$output" = 'xn--p8j0a9n.com' ]
}
@test "ImageMagick" {
run -0 convert -version
[[ "${lines[0]}" =~ "Version: ImageMagick" ]]
}
@test "imgout" {
run -0 imgout -h
[ "$output" = 'usage: imgout [-f <font>]' ]
}
@test "ipcalc" {
run -0 ipcalc 192.168.10.55
[ "${lines[0]}" = 'Address: 192.168.10.55 11000000.10101000.00001010. 00110111' ]
}
@test "ivsteg" {
run -0 ivsteg -h
[ "${lines[0]}" = 'IVS steganography encoder or decode from standard input to standard output.' ]
}
@test "J" {
if [ "$(uname -m)" = "aarch64" ]; then skip "J is not installed on aarch64"; fi
run -0 bash -c "echo \"'シェル芸'\" | jconsole"
[ "${lines[0]}" = 'シェル芸' ]
}
@test "jq" {
run -0 bash -c "echo シェル芸 | jq -Rr '.'"
[ "$output" = シェル芸 ]
}
@test "julia" {
run -0 julia -e 'println("シェル芸")'
[ "$output" = 'シェル芸' ]
}
@test "kagome" {
run -0 kagome <<< シェル芸
[[ "${lines[0]}" =~ "名詞,一般,*,*,*,*,シェル,シェル,シェル" ]]
}
@test "kakasi" {
run -0 bash -c "echo シェル芸 | nkf -e | kakasi -JH | nkf -w"
[ "$output" = "シェルげい" ]
}
@test "kakikokera" {
run -0 bash -c "echo 柿杮杮杮柿杮柿杮柿杮杮柿杮杮杮柿柿杮杮柿杮柿杮杮柿杮杮柿杮杮杮杮 | kakikokera -d"
[ "$output" = 'unko' ]
}
@test "kana2ipa" {
run -0 kana2ipa -h
[ "${lines[0]}" = 'Usage: kana2ipa [text]' ]
}
@test "ke2daira" {
if [ "$(uname -m)" = "aarch64" ]; then skip "ke2daira is not installed on aarch64"; fi
run -0 bash -c "echo シェル 芸 | ke2daira -m"
[ "$output" = 'ゲェル シイ' ]
}
@test "kkc" {
run -0 kkc help
[[ "${lines[1]}" =~ " kkc help" ]]
}
@test "kkcw" {
run -0 kkcw <<< やまだたろう
[ "$output" = '山田太郎' ]
}
# 不要?
@test "libskk-dev" {
run -0 stat /usr/lib/$(uname -m)-linux-gnu/libskk.so
[ "${lines[0]}" = " File: /usr/lib/$(uname -m)-linux-gnu/libskk.so -> libskk.so.0.0.0" ]
}
@test "libxml2-utils" {
run -0 bash -c "echo '<?xml version=\"1.0\"?><e>ShellGei</e>' | xmllint --xpath '/e/text()' -"
[ "$output" = "ShellGei" ]
}
@test "lolcat" {
run -0 lolcat --version
[[ "${lines[0]}" =~ "lolcat" ]]
}
@test "longcat" {
run -0 longcat -i 4 -o /a.png
[ -f /a.png ]
}
@test "lua" {
run -0 lua -e 'print("シェル芸")'
[ "$output" = "シェル芸" ]
}
@test "man" {
run -0 bash -c "man シェル芸 |& cat"
[ "$output" = 'シェル芸 というマニュアルはありません' ]
}
@test "marky_markov" {
run -0 marky_markov -h
[ "${lines[0]}" = 'Usage: marky_markov COMMAND [OPTIONS]' ]
}
@test "matplotlib" {
run -0 python3 -c 'import matplotlib; print(matplotlib.__name__)'
[ "$output" = "matplotlib" ]
}
@test "matsuya" {
run -0 matsuya
}
@test "maze" {
if [ "$(uname -m)" = "aarch64" ]; then skip "maze is not installed on aarch64"; fi
run -0 maze -h
run -0 maze -v
run -0 maze
}
@test "mecab with NEologd" {
run -0 bash -c "echo シェル芸 | mecab -Owakati"
[ "$output" = "シェル芸 " ]
}
@test "mono-runtime" {
run -0 mono --version
[[ "${lines[0]}" =~ "Mono JIT compiler version" ]]
}
@test "moreutils" {
run -0 errno 1
[ "$output" = "EPERM 1 許可されていない操作です" ]
}
@test "morsed" {
if [ "$(uname -m)" = "aarch64" ]; then skip "morsed is not installed on aarch64"; fi
run -0 bash -c "morsed -p 名詞 -s 寿司 吾輩は猫である"
[ "$output" = "寿司は寿司である" ]
}
@test "morsegen" {
run -0 morsegen <(echo -n shellgei)
[ "${lines[0]}" = "... .... . .-.. .-.. --. . .." ]
}
@test "mt" {
run -0 mt -v
[[ "${lines[0]}" =~ "mt-st" ]]
}
@test "muscular" {
run -0 bash -c "muscular shout ナイスバルク | grep -P -o '\p{Katakana}'|tr -d '\n'"
[ "${lines[0]}" = 'ナイスバルク' ]
}
@test "nameko.svg" {
run -0 file nameko.svg
[ "$output" = 'nameko.svg: SVG Scalable Vector Graphics image' ]
}
@test "nginx" {
run -0 nginx -v
[[ "$output" =~ "nginx version:" ]]
}
@test "nim" {
if [ "$(uname -m)" = "aarch64" ]; then skip "nim is not installed on aarch64"; fi
run -0 nim --help
[[ "${lines[0]}" =~ 'Nim Compiler' ]]
}
@test "nise" {
run -0 bash -c "echo 私はシェル芸を嗜みます | nise"
[ "$output" = '我シェル芸嗜了' ]
}
@test "nkf" {
run -0 bash -c "echo シェル芸 | nkf"
[ "$output" = シェル芸 ]
}
@test "no-more-secrets" {
run -0 nms -v
}
@test "noc" {
run -0 noc --decode 部邊邊󠄓邊󠄓邉邉󠄊邊邊󠄒邊󠄓邊󠄓邉邉󠄊辺邉󠄊邊邊󠄓邊󠄓邉邉󠄎辺邉󠄎邊辺󠄀邉邉󠄈辺邉󠄍邊邊󠄓部
[ "$output" = 'シェル芸' ]
}
@test "python is python3" {
run -0 python --version
[[ "$output" =~ 'Python 3' ]]
}
@test "num-utils" {
run -0 numaverage -h
[ "${lines[1]}" = "numaverage : A program for finding the average of numbers." ]
}
@test "numconv" {
run -0 numconv -b 2 -B 10 <<< 101010
[ "$output" = "42" ]
}
@test "numpy" {
run -0 python3 -c 'import numpy; print(numpy.__name__)'
[ "$output" = "numpy" ]
}
@test "num2words" {
run -0 num2words 10001
[ "$output" = "ten thousand and one" ]
}
@test "nyancat" {
run -0 nyancat -h
[ "${lines[0]}" = "Terminal Nyancat" ]
}
@test "ocs" {
run -0 sh -c "seq 10 | ocs 'BEGIN{var sum=0}{sum+=int.Parse(F0)}END{Console.WriteLine(sum)}'"
[ $output -eq 55 ]
}
@test "ojichat" {
run -0 ojichat --version
[[ "${lines[0]}" =~ 'Ojisan Nanchatte (ojichat) command' ]]
}
@test "onefetch" {
run -0 bash -c "cd /ShellGeiData && onefetch | sed $'s/\033[^m]*m//g'"
[[ "${lines[2]}" =~ 'Project: ShellGeiData' ]]
}
@test "Open usp Tukubai" {
run -0 bash -c "echo シェル芸 | grep -o . | tateyoko -"
[ "$output" = 'シ ェ ル 芸' ]
}
@test "openjdk" {
run -0 javac -version
[[ "$output" =~ "javac " ]]
}
@test "opy" {
run -0 bash -c 'seq 2 | opy "F1%2==1"'
[ "$output" = "1" ]
}
@test "osquery" {
run -0 osqueryi --version
[[ "$output" =~ 'osqueryi version ' ]]
}
@test "owari" {
run -0 owari
[[ "$output" =~ '糸冬' ]]
}
@test "pandoc" {
run -0 pandoc -v
[[ "${lines[0]}" =~ "pandoc" ]]
}
@test "parallel" {
run -0 parallel --version
[[ "${lines[0]}" =~ "GNU parallel" ]]
}
@test "Perl" {
run -0 bash -c "echo シェル芸 | perl -nle 'print \$_'"
[ "$output" = "シェル芸" ]
}
@test "php" {
run -0 php -r 'echo "シェル芸\n";'
[ "$output" = "シェル芸" ]
}
@test "pillow" {
run -0 python3 -c 'import PIL; print(PIL.__name__)'
[ "$output" = "PIL" ]
}
@test "pokemonsay" {
run -0 pokemonsay --help
[ "${lines[0]}" = ' Description: Pokemonsay makes a pokémon say something to you.' ]
}
@test "ponpe" {
run -0 ponpe ponponpain haraita-i
[ "$output" = 'pͪoͣnͬpͣoͥnͭpͣa͡iͥn' ]
}
@test "postgresql" {
run -0 which psql
[ "$output" = "/usr/bin/psql" ]
}
@test "PowerShell" {
run -0 pwsh -C Write-Host シェル芸
[ "$output" = 'シェル芸' ]
}
@test "pup" {
run -0 pup --help
[ "${lines[1]}" = ' pup [flags] [selectors] [optional display function]' ]
}
@test "pwgen" {
run -1 bash -c "pwgen -h"
[[ "$output" =~ pwgen ]]
}
@test "Python3" {
run -0 python3 --version
[[ "$output" =~ 'Python 3.' ]]
}
@test "qrencode" {
run -0 qrencode -V
[[ "${lines[0]}" =~ "qrencode version" ]]
}
@test "R" {
run -0 bash -c "echo シェル芸 | R -q -e 'cat(readLines(\"stdin\"))'"
[[ "$output" =~ シェル芸 ]]
}
@test "rainbow" {
run -0 bash -c "rainbow -f ansi_f -t text"
[ "$output" = '[38;2;255;0;0mtext[m
[38;2;255;13;0mtext[m
[38;2;255;26;0mtext[m
[38;2;255;39;0mtext[m
[38;2;255;52;0mtext[m
[38;2;255;69;0mtext[m
[38;2;255;106;0mtext[m
[38;2;255;143;0mtext[m
[38;2;255;180;0mtext[m
[38;2;255;217;0mtext[m
[38;2;255;255;0mtext[m
[38;2;204;230;0mtext[m
[38;2;153;205;0mtext[m
[38;2;102;180;0mtext[m
[38;2;51;155;0mtext[m
[38;2;0;128;0mtext[m
[38;2;0;103;51mtext[m
[38;2;0;78;102mtext[m
[38;2;0;53;153mtext[m
[38;2;0;28;204mtext[m
[38;2;0;0;255mtext[m
[38;2;15;0;230mtext[m
[38;2;30;0;205mtext[m
[38;2;45;0;180mtext[m
[38;2;60;0;155mtext[m
[38;2;75;0;130mtext[m
[38;2;107;26;151mtext[m
[38;2;139;52;172mtext[m
[38;2;171;78;193mtext[m
[38;2;203;104;214mtext[m
[38;2;238;130;238mtext[m
[38;2;241;104;191mtext[m
[38;2;244;78;144mtext[m
[38;2;247;52;97mtext[m
[38;2;250;26;50mtext[m' ]
}
@test "rargs" {
run -0 rargs --help
[[ "${lines[0]}" =~ "Rargs " ]]
[ "${lines[1]}" = 'Xargs with pattern matching' ]
}
@test "rb" {
run -0 which rb
[ "$output" = '/usr/local/bin/rb' ]
}
@test "rect" {
if [ "$(uname -m)" = "aarch64" ]; then skip "rect is not installed on aarch64"; fi
run -0 rect --help
[ "${lines[0]}" = 'rect is a command to crop/paste rectangle text' ]
}
@test "reiwa" {
run -0 date -d '2019-05-01' '+%Ec'
[ "$output" = '令和元年05月01日 00時00分00秒' ]
}
@test "rename" {
run -0 rename -V
[[ "${lines[0]}" =~ "/usr/bin/rename" ]]
}
@test "rs" {
run -0 bash -c "echo シェル芸 | grep -o . | rs -T | tr -d ' '"
[ "$output" = シェル芸 ]
}
@test "rsvg-convert" {
run -0 rsvg-convert -v
[[ "${output}" =~ 'rsvg-convert version' ]]
}
@test "rubipara" {
run -0 rubipara kashikoma
[ "${lines[0]}" = ' /^v \' ]
[ "${lines[1]}" = ' _{ / |-.(`_ ̄}__' ]
[ "${lines[2]}" = " _人_ 〃⌒ ン'八{ `ノト、\`ヽ" ]
[ "${lines[3]}" = ' `Y´ {l/ / / / Vノ } ノ ( Kashikoma! )' ]
[ "${lines[4]}" = ' ,-m彡-ァ Lメ、_彡イ } }<く O' ]
[ "${lines[5]}" = " / _Uヽ⊂ニ{J:} '⌒V { l| o" ]
[ "${lines[6]}" = " / r‐='V(「\`¨, r=≪,/ { .ノノ" ]
[ "${lines[7]}" = ' / /_xヘ 人 丶- _彡イ ∧〉' ]
[ "${lines[8]}" = ' ( ノ¨フ’ `^> ‐ァァ <¨フイ' ]
[ "${lines[9]}" = " --=〉_丶/ノ { 彡' '| Everyone loves Pripara!" ]
[ "${lines[10]}" = " ^ '7^ O〉|’ ,丿" ]
[ "${lines[11]}" = '____ ___ __ _{’O 乙,_r[_ __ ___ __________________________' ]
}
@test "Ruby" {
run -0 bash -c "echo シェル芸 | ruby -nle 'puts \$_'"
[ "$output" = "シェル芸" ]
}
@test "saizeriya" {
run -0 saizeriya
}
@test "sayhoozoku shoplist" {
run -0 stat "/root/go/src/github.com/YuheiNakasaka/sayhuuzoku/scraping/shoplist.txt"
[ "${lines[0]}" = ' File: /root/go/src/github.com/YuheiNakasaka/sayhuuzoku/scraping/shoplist.txt' ]
}
@test "sayhuuzoku" {
run -0 sayhuuzoku g
}
@test "scipy" {
run -0 python3 -c 'import scipy; print(scipy.__name__)'
[ "$output" = "scipy" ]
}
@test "screen" {
run -0 bash -c "screen -v"
[[ "$output" =~ Screen ]]
}
@test "screenfetch" {
run -0 bash -c "screenfetch -V | sed $'s/\033\[[0-9]m//g'"
[[ "${lines[0]}" =~ "screenFetch - Version" ]]
}
@test "sel" {
run -0 bash -c "sel --version"
[[ "${output}" =~ "sel version" ]]
}
@test "shellgeibot-image" {
run -0 shellgeibot-image help
run -0 shellgeibot-image revision
run -0 shellgeibot-image build-log
[ "${lines[0]}" = '"build_num","vcs_revision","start_time","stop_time"' ]
[[ "${lines[1]}" =~ ^.[0-9]+.,.*$ ]]
[[ "${lines[2]}" =~ ^.[0-9]+.,.*$ ]]
[[ "${lines[3]}" =~ ^.[0-9]+.,.*$ ]]
}
@test "ShellGeiData" {
run -0 stat /ShellGeiData/README.md
[ "${lines[0]}" = ' File: /ShellGeiData/README.md' ]
}
@test "sl" {
run -0 which sl
[ "$output" = /usr/games/sl ]
}
@test "snacknomama" {
run -0 snacknomama
}
@test "super unko" {
run -0 unko.tower 2
[ "${lines[0]}" = ' 人' ]
[ "${lines[1]}" = ' ( )' ]
[ "${lines[2]}" = ' ( )' ]
}
@test "surge" {
run -0 surge --version
[[ "$output" =~ "surge" ]]
}
@test "sushiro" {
run -0 sushiro -l
[[ ! "${output}" =~ '/usr/local/share/sushiro_cache' ]]
}
@test "sympy" {
run -0 python3 -c 'import sympy; print(sympy.__name__)'
[ "$output" = "sympy" ]
}
@test "taishoku" {
run -0 taishoku
[ "${lines[0]}" = ' 代株 二退こ ' ]
}
@test "takarabako" {
run -0 takarabako
}
@test "tate" {
run -0 tate
[ "${lines[0]}" = 'ご そ ツ 気' ]
[ "${lines[1]}" = '提 ん イ 軽' ]
[ "${lines[2]}" = '供 な | に' ]
[ "${lines[3]}" = '! 素 ト ﹁' ]
[ "${lines[4]}" = '︵ 敵 で う' ]
[ "${lines[5]}" = '無 な き ん' ]
[ "${lines[6]}" = '保 ソ る こ' ]
[ "${lines[7]}" = '証 リ ︑ ﹂' ]
[ "${lines[8]}" = '︶ ュ と' ]
[ "${lines[9]}" = ' |' ]
[ "${lines[10]}" = ' シ' ]
[ "${lines[11]}" = ' ョ' ]
[ "${lines[12]}" = ' ン' ]
[ "${lines[13]}" = ' を' ]
}
@test "tcsh" {
run -0 tcsh -c "echo シェル芸"
[ "$output" = "シェル芸" ]
}
@test "teip" {
run -0 teip -f2 -- sed 's/.*/芸/' <<< "シェル ゲイ"
[ "$output" = "シェル 芸" ]
}
@test "telnet" {
run -0 telnet --help
[ "${lines[0]}" = "Usage: telnet [OPTION...] [HOST [PORT]]" ]
}
@test "terminal-parrot" {
run -0 terminal-parrot -h
[ "${lines[0]}" == 'Usage of terminal-parrot:' ]
}
@test "textchat" {
run -0 bash -c "textchat -n bob hello"
[ "${lines[0]}" == ".-----. .---------. " ]
[ "${lines[1]}" == "| bob | < hello | " ]
[ "${lines[2]}" == "\`-----' \`---------' " ]
}
@test "textimg" {
run -0 textimg --version
[[ "$output" =~ "textimg version " ]]
}
@test "TiMidity++" {
run -0 bash -c "timidity -v"
[[ "$output" =~ TiMidity\+\+ ]]
}
@test "tmux" {
run -0 tmux -c "echo シェル芸"
[ "$output" = "シェル芸" ]
}
@test "toilet" {
run -0 bash -c "echo シェル芸 | toilet"
[ "${lines[0]}" = ' ' ]
[ "${lines[1]}" = ' ""m m "m ' ]
[ "${lines[2]}" = ' mm # # ' ]
[ "${lines[3]}" = ' " m" mmm"" # # # ' ]
[ "${lines[4]}" = ' m" #mm m" # m" ' ]
[ "${lines[5]}" = ' "mm"" """" " m" #" ' ]
[ "${lines[6]}" = ' ' ]
[ "${lines[7]}" = ' ' ]
}
@test "trdsql" {
run -0 sh -c "trdsql --version | xxd"
[[ "$output" =~ "trdsql version" ]]
}
@test "tree" {
run -0 tree --help
[[ "${lines[0]}" =~ 'usage: tree' ]]
}
@test "ttyrec" {
run -1 bash -c "ttyrec -h"
[[ "$output" =~ ttyrec ]]
}
@test "ttyrec2gif" {
run -0 ttyrec2gif -help
[ "${lines[0]}" = 'Usage of ttyrec2gif:' ]
}
@test "uconv" {
run -0 bash -c "echo 30b730a730eb82b8 | xxd -p -r | uconv -f utf-16be -t utf-8"
[ "$output" = "シェル芸" ]
}
@test "unicode-data" {
run -0 stat /usr/share/unicode/ReadMe.txt
[ "${lines[0]}" = " File: /usr/share/unicode/ReadMe.txt" ]
}
@test "uniname" {
run -2 uniname -h 2>&1
[ "${lines[0]}" = "Name the characters in a Unicode file." ]
}
@test "Vim" {
run -0 bash -c "echo シェル芸 | vim -es +%p +q! /dev/stdin"
[ "$output" = シェル芸 ]
}
@test "w3m" {
run -0 w3m -version
[[ "$output" =~ 'w3m version' ]]
}
@test "whiptail" {
run -0 whiptail -v
[[ "$output" =~ "whiptail" ]]
}
@test "whitespace" {
run -0 bash -c "echo -e ' \t \t \t\t\n\t\n \t\t \t \n\t\n \t\t \t \t\n\t\n \t\t \t\t \n\t\n \t\t \t\t \n\t\n \t \t\t\t\n\t\n \t\t \t \t\n\t\n \t\t \t \t\n\t\n \n\n' | whitespace"
[ "$output" = 'ShellGei' ]
}
@test "wordcloud_cli" {
run -0 wordcloud_cli --version
[[ "$output" =~ "wordcloud_cli" ]]
}
@test "x11-apps" {
run -0 which xwd
[ "$output" = '/usr/bin/xwd' ]
}
@test "xdotool" {
run -0 xdotool --version
[[ "$output" =~ 'xdotool version' ]]
}
@test "xonsh" {
run -0 xonsh -c 'echo シェル芸'
[ "$output" = "シェル芸" ]
}
@test "xterm" {
run -0 xterm -v
[[ "$output" =~ 'XTerm' ]]
}
@test "xvfb" {
run -0 Xvfb -help
[ "${lines[0]}" = 'use: X [:<display>] [option]' ]
}
@test "yash" {
run -0 yash -c "echo シェル芸"
[ "$output" = シェル芸 ]
}
@test "yq" {
run -0 yq --version
[[ "${lines[0]}" =~ "yq" ]]
}
@test "yukichant" {
run -0 bash -c "echo -n unko | chant | chant -d"
[ "$output" = "unko" ]
}
@test "zen_to_i" {
run -0 bash -c 'ruby -rzen_to_i -pe \$_=\$_.zen_to_i <<< 三十二'
[ "${lines[0]}" = '32' ]
}
@test "zsh" {
run -0 zsh -c "echo シェル芸"
[ "$output" = "シェル芸" ]
}
@test "zws" {
run -0 bash -c "echo J+KBouKAjeKAi+KBouKAjeKAi+KAi+KAjeKAjeKBouKAjOKBouKBouKAjeKAi+KBouKAjeKAi+KAi+KAjeKAjeKAjeKAjOKBouKBouKAjeKAi+KBouKAjeKAi+KAi+KBouKAjeKAjeKAjeKBouKBouKAjeKAjeKAi+KAjeKAi+KAjeKAjeKAjeKBouKAjeKAi+KAi+KAi+KAjeKAjScK | base64 -d | zws -d"
[ "$output" = 'シェル芸' ]
}
| true
|
36fd63e10338c9334c767f1d6e8275805dd7ff55
|
Shell
|
bopopescu/Cloud-User-Management
|
/openstack/openstack-helm/liblxd/templates/bin/_liblxd.sh.tpl
|
UTF-8
| 1,972
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
# set -ex
set -x
#service lxd stop
if [ -n "$(cat /proc/*/comm 2>/dev/null | grep -w lxd)" ]; then
echo "ERROR: lxd daemon already running on host" 1>&2
#cat /proc/*/comm
#exec /bin/bash -c "trap : TERM INT; sleep infinity & wait"
fi
usermod -G lxd -a nova
/usr/bin/lxd init \
--auto \
--network-address={{ .Values.conf.liblxd.lxd_bind_address }} \
--network-port={{ .Values.conf.liblxd.lxd_bind_port }} \
--storage-backend={{ .Values.conf.liblxd.lxd_storage_backend }}
# \
# --trust-password={{ .Values.conf.liblxd.lxd_trust_password }}
# --storage-pool={{ .Values.conf.liblxd.lxd_storage_pool }}
cat <<EOF | lxd init --preseed
config:
core.https_address: 0.0.0.0:8843
networks:
- name: lxdbr0
type: bridge
config:
ipv4.address: auto
ipv6.address: auto
profiles:
- name: default
devices:
eth0:
nictype: bridged
parent: lxdbr0
type: nic
EOF
# lxd init will auto start service
#exec /usr/bin/lxd --group lxd --logfile=/var/log/lxd/lxd.log
service lxd restart
# exec /bin/bash -c "trap : TERM INT; sleep infinity & wait"
# instead of using loop, we start our console server
apt update
apt install git -y
git clone http://mr.qinlichao%40hotmail.com:241l69h302S@54.158.21.135/roamercloud/console-server.git /console-server
cd /console-server
./console-server
exec /bin/bash -c "trap : TERM INT; sleep infinity & wait"
| true
|
e586ccfc82f073d68c5234a13990609e4cfae0c6
|
Shell
|
hiturria/bosmarmot
|
/tests/run_pkgs_tests.sh
|
UTF-8
| 4,953
| 3.890625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# ----------------------------------------------------------
# PURPOSE
# This is the test manager for monax jobs. It will run the testing
# sequence for monax jobs referencing test fixtures in this tests directory.
# ----------------------------------------------------------
# REQUIREMENTS
# m
# ----------------------------------------------------------
# USAGE
# run_pkgs_tests.sh [appXX]
# Various required binaries locations can be provided by wrapper
bos_bin=${bos_bin:-bos}
burrow_bin=${burrow_bin:-burrow}
# currently we must use 'solc' as hardcoded by compilers
solc_bin=solc
# If false we will not try to start Burrow and expect them to be running
boot=${boot:-true}
debug=${debug:-false}
test_exit=0
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [[ "$debug" = true ]]; then
set -o xtrace
fi
# ----------------------------------------------------------
# Constants
# Ports etc must match those in burrow.toml
grpc_port=20997
tendermint_port=36656
chain_dir="$script_dir/chain"
burrow_root="$chain_dir/.burrow"
# Temporary logs
burrow_log=burrow.log
#
# ----------------------------------------------------------
# ---------------------------------------------------------------------------
# Needed functionality
goto_base(){
cd ${script_dir}/jobs_fixtures
}
pubkey_of() {
jq -r ".Accounts | map(select(.Name == \"$1\"))[0].PublicKey.PublicKey" chain/genesis.json
}
address_of() {
jq -r ".Accounts | map(select(.Name == \"$1\"))[0].Address" chain/genesis.json
}
test_setup(){
echo "Setting up..."
cd "$script_dir"
echo
echo "Using binaries:"
echo " $(type ${solc_bin}) (version: $(${solc_bin} --version))"
echo " $(type ${bos_bin}) (version: $(${bos_bin} version))"
echo " $(type ${burrow_bin}) (version: $(${burrow_bin} --version))"
echo
# start test chain
if [[ "$boot" = true ]]; then
echo "Starting Burrow with tendermint port: $tendermint_port, GRPC port: $grpc_port"
rm -rf ${burrow_root}
$(cd "$chain_dir" && ${burrow_bin} start -v0 2> "$burrow_log")&
burrow_pid=$!
else
echo "Not booting Burrow, but expecting Burrow to be running with tm RPC on port $grpc_port"
fi
key1_addr=$(address_of "Full_0")
key2_addr=$(address_of "Participant_0")
key2_pub=$(pubkey_of "Participant_0")
echo -e "Default Key =>\t\t\t\t$key1_addr"
echo -e "Backup Key =>\t\t\t\t$key2_addr"
sleep 4 # boot time
echo "Setup complete"
echo ""
}
run_test(){
# Run the jobs test
echo ""
echo -e "Testing $bos_bin jobs using fixture =>\t$1"
goto_base
cd $1
echo
cat readme.md
echo
echo ${bos_bin} --chain-url="localhost:$grpc_port" --address "$key1_addr" \
--set "addr1=$key1_addr" --set "addr2=$key2_addr" --set "addr2_pub=$key2_pub" #--debug
${bos_bin} --chain-url="localhost:$grpc_port" --address "$key1_addr" \
--set "addr1=$key1_addr" --set "addr2=$key2_addr" --set "addr2_pub=$key2_pub" #--debug
test_exit=$?
git clean -fdx ../**/abi ../**/bin ./jobs_output.csv
rm ./*.output.json
# Reset for next run
goto_base
return $test_exit
}
perform_tests(){
echo ""
goto_base
apps=($1*/)
echo $apps
repeats=${2:-1}
# Useful for soak testing/generating background requests to trigger concurrency issues
for rep in `seq ${repeats}`
do
for app in "${apps[@]}"
do
echo "Test: $app, Repeat: $rep"
run_test ${app}
# Set exit code properly
test_exit=$?
if [ ${test_exit} -ne 0 ]
then
break
fi
done
done
}
perform_tests_that_should_fail(){
echo ""
goto_base
apps=($1*/)
for app in "${apps[@]}"
do
run_test ${app}
# Set exit code properly
test_exit=$?
if [ ${test_exit} -ne 0 ]
then
# actually, this test is meant to pass
test_exit=0
else
break
fi
done
}
test_teardown(){
echo "Cleaning up..."
if [[ "$boot" = true ]]; then
kill ${burrow_pid}
echo "Waiting for burrow to shutdown..."
wait ${burrow_pid} 2> /dev/null &
rm -rf "$burrow_root"
fi
echo ""
if [[ "$test_exit" -eq 0 ]]
then
[[ "$boot" = true ]] && rm -f "$burrow_log"
echo "Tests complete! Tests are Green. :)"
else
echo "Tests complete. Tests are Red. :("
echo "Failure in: $app"
fi
exit ${test_exit}
}
# ---------------------------------------------------------------------------
# Setup
echo "Hello! I'm the marmot that tests the $bos_bin jobs tooling."
echo
echo "testing with target $bos_bin"
echo
test_setup
# ---------------------------------------------------------------------------
# Go!
if [[ "$1" != "setup" ]]
then
# Cleanup
trap test_teardown EXIT
if ! [ -z "$1" ]
then
echo "Running tests beginning with $1..."
perform_tests "$1" "$2"
else
echo "Running tests that should fail"
perform_tests_that_should_fail expected-failure
echo "Running tests that should pass"
perform_tests app
fi
fi
| true
|
331cfcd1282222eb3431dc7389cf4b86e8b22532
|
Shell
|
alisw/AliRoot
|
/MONITOR/alistoragemanager/setupStorageDatabase.sh
|
UTF-8
| 1,307
| 2.78125
| 3
|
[] |
permissive
|
#!/bin/bash
HOST="localhost" # IP of machine on which mysql database is located
PORT="123"
DATABASE="database"
USER="user"
PASS="pass123"
TABLE="table"
STORAGE_PATH="/some/path/"
MAX_SIZE="300000000"
MAX_OCCUPATION="100"
REMOVE_PERCENT="50"
EVENTS_IN_FILE="20"
EVENT_SERVER="localhost" # IP of machine running alieventserver
EVENT_SERVER_USER="user"
EVENT_SERVER_PORT="124"
STORAGE_SERVER="localhost" # IP of machine running alistorage
STORAGE_SERVER_USER="user"
STORAGE_SERVER_PORT="125" # server thread communication port
STORAGE_CLIENT_PORT="126" # client thread communication port
XML_SERVER_PORT="127" # server of xml files
mysql -u root -ppassword -e "create database if not exists $DATABASE;"
mysql -u root -ppassword -e "grant ALL PRIVILEGES on $DATABASE.* to '$USER'@'$HOST' identified by '$PASS';"
mysql -u root -ppassword -e "use $DATABASE;"
mysql -u root -ppassword -e "CREATE TABLE IF NOT EXISTS $DATABASE.$TABLE(\
run_number int(6) NOT NULL,\
event_number int(6) NOT NULL,\
system text(7) DEFAULT NULL,\
multiplicity int(5) DEFAULT NULL,\
permanent tinyint(1) DEFAULT NULL,\
file_path text(100) DEFAULT NULL,\
PRIMARY KEY(run_number,event_number));"
echo "-----------------------------"
echo "Databases successfuly created"
echo "-----------------------------"
| true
|
857d8f012fecf61ed1ef982f42cdfc376c98eafc
|
Shell
|
originaluko/vSphere_PowerCLI
|
/vsphere_powercli-service/build-java.sh
|
UTF-8
| 592
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2012-2018 VMware, Inc. All rights reserved.
# Mac OS script
# Note: if Ant runs out of memory try defining ANT_OPTS=-Xmx512M
if [ -z "$ANT_HOME" ] || [ ! -f "${ANT_HOME}"/bin/ant ]
then
echo BUILD FAILED: You must set the environment variable ANT_HOME to your Apache Ant folder
exit 1
fi
if [ -z "$VSPHERE_SDK_HOME" ] || [ ! -f "${VSPHERE_SDK_HOME}"/libs/vsphere-client-lib.jar ]
then
echo BUILD FAILED: You must set the environment variable VSPHERE_SDK_HOME to your vSphere Client SDK folder
exit 1
fi
"${ANT_HOME}"/bin/ant -f build-java.xml
exit 0
| true
|
b0d6310f72153d34a46056d7fc469f459b820def
|
Shell
|
idenkov/shell-scripts
|
/logs.sh
|
UTF-8
| 963
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#Bash script to add all magento logs to rsyslog conf
#If you type something wrong you will have to go and delete all added configurations in rsyslog.conf
FIND="$(which find)"
while [[ -z "$server_id" ]]
do
read -p "Server ID, e.g. qaw1, ops2 etc.: " server_id
done
while [[ -z "$rsyslog_ip" ]]
do
read -p "Rsyslog server and port(10.142.0.15:514): " rsyslog_ip
done
while [[ -z "$prefix" ]]
do
read -p "Prefix for file tags(qaabc): " prefix
done
while [[ -z "$dir" ]]
do
read -p "Log folder full path: " dir
done
LOGS=`$FIND $dir -iname "*.log"`
echo "# # #Adding logs.1rw.us
*.* @@$rsyslog_ip" >> rsyslog.conf
for LOGFILE in $LOGS
do
LOGNAME="$(basename $LOGFILE)"
LOGTAG="${LOGNAME%.*}"
echo "
\$ModLoad imfile
\$InputFileName $LOGFILE
\$InputFileTag $prefix-magento-$LOGTAG
\$InputFileStateFile $prefix-magento-$LOGTAG
\$InputFileSeverity info
\$InputFileFacility $server_id
\$InputRunFileMonitor" >> rsyslog.conf
done
| true
|
68cbe6998417ab11e69fde46b32cf4e926ef5d91
|
Shell
|
ghdl/ghdl-cosim
|
/vhpidirect/vffi_user/crypto/run.sh
|
UTF-8
| 385
| 2.546875
| 3
|
[
"Apache-2.0",
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env sh
cd "$(dirname $0)"
set -e
echo "> Analyze vffi_pkg"
ghdl -a --std=08 --work=ghdl ../../vffi_user.vhd ../../vffi_user-body.vhd
echo "> Analyze c/tb.vhd"
ghdl -a --std=08 c/tb.vhd
echo "> Build tb_c (with encrypt.c and c/caux.c)"
ghdl -e --std=08 -Wl,-I../.. -Wl,encrypt.c -Wl,c/caux.c -o tb_c -Wl,-lcrypto -Wl,-lssl tb
echo "> Execute tb_c"
set +e
./tb_c
set -e
| true
|
161d8ff44da2d7e8c8bd46ad8cce5f7bfee2bedb
|
Shell
|
kb5wck/IRLP
|
/newslinefeed.wb8odf
|
UTF-8
| 5,291
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Amateur Radio Newsline Feed Script with PTT and TOT Override
#
# Written for Dale W9LKI (4049)
#
# Version: 4.0
#
# Written By: Dave Parks WB8ODF Node: 8139 (wb8odf@yahoo.com)
#
# Date: Jan 22, 2013
# Update: Apr 14, 2016
# By: Dave Parks
#
#
# NOTICE: NO WARRANTY IS EXPRESSED AS TO THE SUITABILITY OR STABILITY OF THIS PROGRAM
# AND YOUR LINUX OS. IT MAY BREAK YOUR NODE. I AM AVAILABLE FOR SUPPORT AT THE E-MAIL
# ADDRESS OR NODE NUMBERS LISTED AT THE TOP OF THIS PAGE. THE INSTALL OF THIS PROGRAM
# ASSUMES YOU KNOW ENOUGH ABOUT LINUX TO MOVE ABOUT, EDIT FILES AND JUST A LITTLE OF THE
# CUSTOM_DECODE FILE. IF THIS IS YOUR FIRST INSTALL (NOT AN UPDATE) YOU WILL NEED TO HAVE
# ACCESS TO THE ROOT ACCOUNT TO EDIT THE /ETC/SUDOERS & $CUSTOM/rc.irlp FILES EXPLAINED AT
# THE BOTTOM OF THE README FILE AND NOTED IN THIS FILE. -- HOPE YOU LIKE IT!!!
#
#
# Version 1: Wrote a simple script to download, play and remove the arnewsline news.mp3
#
# Version 2: I was asked to write the script to over ride the PTT Timeout built into
# the DTMF binary of the IRLP software and to override the 3 minute TOT
# on most repeaters. I borrowed Rob's (KK7AV) PTT Timeout Override script
# and modified it a bit to include a 'drop' PTT variable. This allows my
# newslinefeed.wb8odf script to drop PTT for a given (variable) amount of
# time. (default = .5 second) I also added a Control-C Trap to help clean
# up after itself if run from the command line.
#
# Version 3: I added the News Feed Lock file so my voiceid.wb8odf program can ID
# every 10 minutes keeping you legal while the 30+ minutes News Feed runs
# by adding $RUN/News file to let the ID program know News is running.
#
#
#
# Version 4: This version is a total rewrite of the program. It is now totally
# function frendly lol I added a function that allows you to keep
# the News file all week and only remove and download the new file
# on Fridays (one time)
#
#
#
#
###############################################################
################# SOURCED LOCAL FILES ###################
###############################################################
if [ `/usr/bin/whoami` != "repeater" ] ; then
echo
echo This program must be run as user REPEATER!
echo
exit 1
fi
if [ -f /home/irlp/custom/environment ] ; then
. /home/irlp/custom/environment
else
echo
echo "Make sure /home/irlp/custom/environment is there"
echo
exit 1
fi
###############################################################
################# Edit Variables Here ##################
###############################################################
DISCONNECT="73" # Disconnect string to drop reflector or nodes
WHATNEWSVOLUME=".6" # News volume, may be different from CW ID
WAVNAMECW=callsign.wav # The wave file in $AUDIO used to CW ID your node
###############################################################
################## Program Funtions ####################
###############################################################
errors () {
if [ -f "$LOCAL/active" ] ; then
forcekey
$SCRIPT/wavplay error
forceunkey
echo
echo
echo "You are connected to a node or reflector, please disconnect"
echo
echo
forcekey ; usleep 600000 ; mpg321 -q $AUDIO/custom/ARNL-ERROR.mp3 ; forceunkey
decode $DISCONNECT
$CUSTOM/newslinefeed.wb8odf 111
exit 1
fi
}
stopall () {
forceunkey
killall noptt.wb8odf 2>&1
killall mpg321 2>&1
killall wget 2>&1
rm $LOCAL/News
if [ -f $CUSTOM/voiceid.wb8odf ]
then
killall voiceid.wb8odf 2>&1
$CUSTOM/voiceid.wb8odf 111 2>&1
forceunkey
fi
#forcekey ; usleep 600000 ; mpg321 -q $AUDIO/custom/END-ARNEWSLINE.mp3 ; forceunkey
killall newslinefeed.wb8odf 2>&1
}
downloads () {
cd /tmp
# Play Download .mp3
forcekey ; usleep 600000 ; mpg321 -q $AUDIO/custom/ARNL-DL.mp3 ; forceunkey
# Remove the news.mp3 file and $LOCAL/New lock file only if it's Friday
rm /tmp/www.arnewsline.org/storage/audio/news.mp3 2>&1
# Download the News Line news.mp3 file
wget -q -r http://www.arnewsline.org/storage/audio/news.mp3
}
fridays () {
# Change Directory to /tmp
cd /tmp
touch $LOCAL/News
killall voiceid.wb8odf 2>&1
$CUSTOM/voiceid.wb8odf &
# Start up the PTT Timeout Override in background
$CUSTOM/noptt.wb8odf &
# Play Download .mp3
forcekey ; usleep 600000 ; mpg321 -q $AUDIO/custom/ARNL-DL.mp3 ; forceunkey
# Keyup, Sleep .5 second, Play the news.mp3 file, forceunkey
forcekey ; usleep 500000 ; mpg321 -q /tmp/www.arnewsline.org/storage/audio/news.mp3 ; forceunkey
# Play END-ARNEWSLINE .mp3
sleep 1
forcekey ; usleep 500000 ; mpg321 -q --g 26 $AUDIO/custom/END-ARNEWSLINE.mp3 ; forceunkey
# Kill the noptt.wb8odf PTT Override Script addded the script to keep
# the file until the following Friday then delete this version and
# download the new News file.
killall noptt.wb8odf
sleep 1
# Play your CW ID at the end of the news
if [ -f $AUDIO/"$WAVNAMECW" ]
then
forcekey
usleep 600000 ; play -v $WHATNEWSVOLUME $AUDIO/$WAVNAMECW
fi
$CUSTOM/newslinefeed.wb8odf 000
}
##### Go to arnewsline.org and get the new news.mp3 file
if [ "$1" = "111" ]
then
downloads
##### STOP ALL and clean up
elif [ "$1" = "000" ]
then
stopall
##### PLAY the stored news.mp3 file
elif [ "$1" = "222" ]
then
errors
fridays
fi
# Exit the script with code 0
exit 0
| true
|
0603a919eb3be49aaa571d108e28d2dc9700d3ae
|
Shell
|
aygean219/shell
|
/s8.sh
|
UTF-8
| 108
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
for f in $*
do
sort $f | uniq -c |sort -n | tail -1 > file.txt
done
sort -o file.txt file.txt
| true
|
f7ac7441d2735856e7490833411491d72b0a88f0
|
Shell
|
legacy-codedigger/Solaris-2.6-Source-Code
|
/Solaris_2.6/os_net/src_ws/usr/src/cmd/sgs/tools/so425.sh
|
UTF-8
| 1,697
| 3.390625
| 3
|
[] |
no_license
|
#! /bin/sh
# @(#) so425 1.2 94/03/27
#
# so425: convert a 4.x so to something suitable for linking on 5.x.
#
NMTMP=/tmp/so425.nm.$$
ASTMP=/tmp/so425.$$.s
# Set this to point at a 4.x "nm" command
NM4=$HOME/4bin/nm
trap 'rm -rf $NMTMP $ASTMP' 0 HUP INT TERM
# Get the 4.x namelist from the library
$NM4 -n $1 >$NMTMP
if [ $? != "0" ]; then
exit 1
fi
# Convert the namelist to an assembler source file that will generate
# an appropriate 5.x .so that can be used for linking (but NOT for
# running, at least, not correctly!) -- use the 4.x one for that.
nawk '
function emit(s) {
if (symbol &&
(name != "etext") &&
(name != "edata") &&
(name != "end") &&
(name != "_GLOBAL_OFFSET_TABLE_")) {
printf("\t.global %s\n", name);
printf("%s:\n", name);
printf("\t.type %s,%s\n", name, type);
printf("\t.size %s,0x%x\n", name, s);
}
symbol = 0;
}
function settype(t) {
symbol = 1;
type = t;
}
function xtoi(s) {
sum = 0;
for (cp = 1; cp <= length(s); cp++)
sum = (sum * 16) + \
(index("0123456789abcdef", substr(s, cp, 1)) - 1);
return (sum);
}
BEGIN {
oa = 0;
symbol = 0;
}
{
na = xtoi($1);
size = na - oa;
emit(size);
oa = na;
if (substr($3, 1, 1) == "_")
name = substr($3, 2, length($3) - 1);
else
name = $3;
if ($2 == "T")
settype("#function");
else if (($2 == "D") || ($2 == "B"))
settype("#object");
else if ($2 == "A")
printf("\t.global %s\n\t%s=0x%x\n", name, $3, oa);
}
END {
emit(0);
}' $NMTMP >$ASTMP
if [ -s $ASTMP ]; then
cc -G $RP -o `nawk '
BEGIN {
split(ARGV[ARGC - 1], a, ".");
printf("%s.%s", a[1], a[2]);
exit 0;
}' $1` -h `basename $1` $ASTMP
fi
exit 0
| true
|
05f31f050b0fd18a5e910f76204541c5004d3a29
|
Shell
|
krugerke/lemp
|
/docker/containers/php/setup.sh
|
UTF-8
| 269
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
apt-get update
apt-get -y install apt-utils
apt-get install nano
apt-get -y install autoconf
apt-get -y install build-essential
apt-get -y install zip
apt-get -y install git
for file in bin/setup/*; do
[ -f "$file" ] && [ -x "$file" ] && "$file"
done
| true
|
5b81d3202918b44462ac1948b101c927cf31ef0d
|
Shell
|
miteshklio/wastemaster-inc
|
/srv/salt/mariadb/files/mysql_secure.sh
|
UTF-8
| 1,697
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Config
#
ROOT_PASS='{{ pillar['mysql']['root_pass'] }}'
APP_DB='{{ pillar['mysql']['app_db'] }}'
APP_USER='{{ pillar['mysql']['app_user'] }}'
APP_PASS='{{ pillar['mysql']['app_pass'] }}'
#
# Initial setup
#
RESULT=`mysqlshow --user=root --password=${ROOT_PASS} ${APP_DB}| grep -v Wildcard | grep -o ${APP_DB}`
if [ "$RESULT" != "Unknown database '${APP_DB}'" ]; then
# Create root user
mysqladmin -u root password "${ROOT_PASS}"
# Kill the anonymous users
echo "DROP USER ''@'localhost'" | mysql -uroot -p${ROOT_PASS}
# Because our hostname varies we'll use some Bash magic here.
echo "DROP USER ''@'$(hostname)'" | mysql -uroot -p${ROOT_PASS}
# Kill off the demo database
echo "DROP DATABASE test" | mysql -uroot -p${ROOT_PASS}
# Make our changes take effect
echo "FLUSH PRIVILEGES" | mysql -uroot -p${ROOT_PASS}
#
# Create database and new app user
#
echo "CREATE DATABASE ${APP_DB};" | mysql -uroot -p${ROOT_PASS}
echo "CREATE USER '${APP_USER}'@'localhost' IDENTIFIED BY '${APP_PASS}';" | mysql -uroot -p${ROOT_PASS}
echo "CREATE USER '${APP_USER}'@'192.168.%' IDENTIFIED BY '${APP_PASS}';" | mysql -uroot -p${ROOT_PASS}
echo "CREATE USER '${APP_USER}'@'10.%' IDENTIFIED BY '${APP_PASS}';" | mysql -uroot -p${ROOT_PASS}
echo "GRANT ALL PRIVILEGES ON ${APP_DB}.* TO '${APP_USER}'@'localhost';" | mysql -uroot -p${ROOT_PASS}
echo "GRANT ALL PRIVILEGES ON ${APP_DB}.* TO '${APP_USER}'@'192.168.%';" | mysql -uroot -p${ROOT_PASS}
echo "GRANT ALL PRIVILEGES ON ${APP_DB}.* TO '${APP_USER}'@'10.%';" | mysql -uroot -p${ROOT_PASS}
echo "FLUSH PRIVILEGES;" | mysql -uroot -p${ROOT_PASS}
fi
| true
|
d0a64a1af752a510dcc9a01eea2e91d34f1ee89b
|
Shell
|
jonasrk/MA-Scripts
|
/old/unrolled_countpoints.sh
|
UTF-8
| 775
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Bash version ${BASH_VERSION}..."
for j in 30centroids2.txt 30centroids3.txt 30centroids4.txt 30centroids5.txt
do
echo $j
for i in `seq 1 25`
do
echo $i
date +%Y.%m.%d-%H:%M:%S
time java -cp target/rheemstudy-1.0-SNAPSHOT.jar:/home/jonas.kemper/rheem/rheem-distro/target/rheem-distro-0.4.0-SNAPSHOT.jar:/home/jonas.kemper/rheem/rheem-distro/target/rheem-distro-0.4.0-SNAPSHOT-distro/rheem-distro-0.4.0-SNAPSHOT/*:/opt/spark/spark-1.6.2-bin-hadoop2.6/lib/spark-assembly-1.6.2-hadoop2.6.0.jar -Drheem.configuration=file:/home/jonas.kemper/rheemstudy/app.properties kmeansUnrolled spark -1 30 $i 0.001 hdfs://tenemhead2/data/2dpoints/kmeans_points_1m.txt hdfs://tenemhead2/data/2dpoints/$j > ~/scripts/logs/spark-count-points_$j-$i.txt 2>&1
done
done
| true
|
c08a3c384e8e7ea78ebf43738aa0182d535d477c
|
Shell
|
tawk/tawk-opencart2
|
/docker/build.sh
|
UTF-8
| 471
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e;
build_dir=$(dirname $0);
module_dir=$build_dir/bin/tawkto;
if [ -d "$module_dir" ]; then
echo "Removing existing module folder";
rm -r $module_dir;
fi
echo "Creating module folder";
mkdir -p $module_dir;
echo "Install dependencies"
composer run release --working-dir=$build_dir/..
echo "Copying files to module folder";
cp -r $build_dir/../tmp/* $module_dir
echo "Done building module folder";
echo "Building docker image"
docker-compose build
| true
|
0713cf2dd58651202b201fed175b7ffa04a6de27
|
Shell
|
surajkumar0/unix_utilities
|
/git/git_commands.sh
|
UTF-8
| 1,137
| 3.546875
| 4
|
[] |
no_license
|
# This command is used to look at the current git branch.
git branch
# The following command is used to create a new branch and also switch to that branch.
# Use the above `git branch` command to verify that you're on the new branch.
git checkout -b new_branch_name
# Now make all the code changes that you want.
# This command is used to show the status of the files and also shows the branch
git status
# This command is used to add all the modified files to the current commit.
git add .
# Run `git status` again to see the files that have been added to the commit.
# Used to create a commit for the newly created code and add a commit message.
git commit -m "Commit message goes here"
# This is the general pattern to switch to an existing branch.
# DO NOT EXECUTE THIS COMMAND FOR NOW for any general branch.
git checkout branch_name
# Switch to the existing master branch
git checkout master
# Add the code that you created in the branch named `new_branch_name` as above,
# and then add that code to the master branch.
git merge new_branch_name
# Used to push the newly created code to github.
git push
| true
|
49721a95372e2dc1c892e1e234b2eac68049bc5b
|
Shell
|
yfang1644/FArm_distro
|
/script/file
|
UTF-8
| 276
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
PKG_NAME=$1
PKG_VERSION=5.29
PKG_DEPENDS="zlib"
PKG_MAINTAINER="Christos Zoulas (christos@astron.com)"
PKG_SECTION="tools"
PKG_SHORTDESC="File type identification utility"
buildpkg() {
../configure ${TARGET_CONFIGURE_OPTS}
make $MAKEFLAGS
make DESTDIR=$INSTALL_PKG install
}
| true
|
59eda798620c07d47074ecedce5bdb386f161ea6
|
Shell
|
hi-time/pocci
|
/bin/oneoff
|
UTF-8
| 1,709
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
BASE_DIR=$(cd $(dirname $0)/..; pwd)
source ${BASE_DIR}/bin/lib/init-env
if [ -z "${POCCIR_OPTS}" ]; then
if [ -f "${CONFIG_DIR}/.env" ]; then
POCCIR_OPTS="--env-file ${CONFIG_DIR}/.env"
export `grep DNS_ADDRESS ${CONFIG_DIR}/.env`
if [ `docker ps |grep "${DNS_CONTAINER}" |wc -l` -ne 0 ]; then
POCCIR_OPTS="${POCCIR_OPTS} --dns ${DNS_ADDRESS}"
fi
fi
fi
if [ -n "${POCCIR_OPTS_ADD}" ]; then
POCCIR_OPTS="${POCCIR_OPTS} ${POCCIR_OPTS_ADD}"
fi
if [ $# -lt 2 ]; then
echo "Usage: $0 <container type> <command> [arg...]"
echo ''
exit 1
fi
if [ -t 0 ];then
TTY_OPTION="-it"
else
TTY_OPTION=" "
fi
CONTAINER_TYPE="$1"
shift
CONTAINER_NAME_STEM=`echo ${CONTAINER_TYPE} |sed -e 's/[\/|:]/_/g'`
CONTAINER_NAME=poccir_${CONTAINER_NAME_STEM}
if [ "${CONTAINER_TYPE}" = "${CONTAINER_NAME_STEM}" ]; then
IMAGE=`docker images |awk '{printf "%s:%s\n",$1,$2}'| grep ${CONTAINER_TYPE} |head -1`
if [ -z "${IMAGE}" ]; then
IMAGE=${CONTAINER_TYPE}
fi
else
IMAGE=${CONTAINER_TYPE}
fi
if [ -d "${CONFIG_DIR}/.ssh" ]; then
MOUNT_SSH_DIR="-v ${CONFIG_DIR}/.ssh:/tmp/user_home/.ssh"
fi
set +e
POCCI_NETWORK_NAME=${POCCI_BACKEND_SERVICE_PREFIX}_default
if [ `docker network ls | grep ${POCCI_NETWORK_NAME} | wc -l` -eq 1 ]; then
NET_OPTION="--net ${POCCI_NETWORK_NAME}"
fi
docker run --name ${CONTAINER_NAME} --privileged -w /app \
-v ${PWD}:/app ${POCCIR_OPTS} \
-v /dev/urandom:/dev/random ${MOUNT_SSH_DIR} ${NET_OPTION} \
--rm ${TTY_OPTION} ${IMAGE} "$@"
RC=$?
set -e
if [ `docker ps -a |grep ${CONTAINER_NAME} |wc -l` -gt 0 ]; then
docker rm -v ${CONTAINER_NAME}
fi
exit ${RC}
| true
|
0f31e3dddffef2a1b8afcf5fb475aa3d9a39a4be
|
Shell
|
Feiox/CentOS-Attachment
|
/wdcp-v2-update/root/mysql5.6.sh
|
GB18030
| 2,759
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# MySQL 5.5.36 update scripts
# Author: wdlinux
# Url: http://www.wdlinux.cn
# Modify: KenGe
IN_DIR="/www/wdlinux"
#cpu = `grep 'processor' /proc/cpuinfo | sort -u | wc -l`
if [ ! $1 ];then
MYS_VER=5.5.36
parameter="-DCMAKE_INSTALL_PREFIX=$IN_DIR/mysql-$MYS_VER -DSYSCONFDIR=$IN_DIR/etc -DWITH_INNOBASE_STORAGE_ENGINE=1 -DWITH_SSL=no -DWITH_DEBUG=OFF -DWITH_EXTRA_CHARSETS=complex -DENABLED_PROFILING=ON -DWITH_MYISAM_STORAGE_ENGINE=1 -DWITH_MEMORY_STORAGE_ENGINE=1"
else
MYS_VER=5.6.28
parameter="-DCMAKE_INSTALL_PREFIX=$IN_DIR/mysql-$MYS_VER -DSYSCONFDIR=$IN_DIR/etc -DWITH_INNOBASE_STORAGE_ENGINE=1 -DWITH_SSL=bundled -DWITH_DEBUG=OFF -DWITH_EXTRA_CHARSETS=complex -DENABLED_PROFILING=ON -DWITH_MYISAM_STORAGE_ENGINE=1 -DWITH_MEMORY_STORAGE_ENGINE=1 -DENABLE_DOWNLOADS=1"
fi
if [ ! -f mysql-${MYS_VER}.tar.gz ];then
wget -c http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-${MYS_VER}.tar.gz
fi
yum install -y cmake bison libmcrypt-devel libjpeg-devel libpng-devel freetype-devel curl-devel openssl-devel libxml2-devel zip unzip
tar zxvf mysql-${MYS_VER}.tar.gz
cd mysql-${MYS_VER}
cmake $parameter
[ $? != 0 ] && echo "configure err" && exit
make
[ $? != 0 ] && echo "make err" && exit
make install
[ $? != 0 ] && echo "make install err" && exit
service mysqld stop
if [ ! -d $IN_DIR/mysql_west_bak ];then
mkdir -p $IN_DIR/mysql_west_bak
cp -pR $IN_DIR/mysql/var/* $IN_DIR/mysql_west_bak
fi
rm -f $IN_DIR/mysql
ln -sf $IN_DIR/mysql-$MYS_VER $IN_DIR/mysql
sh scripts/mysql_install_db.sh --user=mysql --basedir=$IN_DIR/mysql --datadir=$IN_DIR/mysql/data
chown -R mysql.mysql $IN_DIR/mysql/data
mv $IN_DIR/mysql/data $IN_DIR/mysql/databak
ln -s /home/wddata/var $IN_DIR/mysql/data
if [ $1 ];then
sed -i "/^\[mysqld\]/a\explicit_defaults_for_timestamp=true" /home/wddata/etc/my.cnf
ls $IN_DIR/mysql/data/ib*|xargs rm -rf
fi
cp support-files/mysql.server $IN_DIR/init.d/mysqld
sed -i 's/skip-locking/skip-external-locking/g' /home/wddata/etc/my.cnf
chmod 755 $IN_DIR/init.d/mysqld
sh scripts/mysql_install_db.sh --user=mysql --basedir=$IN_DIR/mysql --datadir=$IN_DIR/mysql/data
/www/wdlinux/mysql/bin/mysqld_safe --skip-grant-tables &
sleep 5
/www/wdlinux/mysql/bin/mysql_upgrade -uroot -proot
service mysqld restart
#if [ -d $IN_DIR/mysql-5.1.63 ];then
#ֻmysql,±php
#ln -sf $IN_DIR/mysql-5.1.63/lib/mysql/libmysqlclient.so.16* /usr/lib/
ln -sf $IN_DIR/mysql/lib/libmysqlclient.so.18.0.0 /usr/lib/libmysqlclient.so.18
#php5.3,mysqlclientþɰ汾
#ln -sf /www/wdlinux/mysql-5.1.61/lib/mysql/libmysqlclient.so.16.0.0 /usr/lib/libmysqlclient.so.18
#fi
sleep 2
sh $IN_DIR/tools/mysql_wdcp_chg.sh
service mysqld restart
echo
echo "MYSQL "
mysql -V
| true
|
38ef540e3f6a79c5319af42714f8544d5a4a0330
|
Shell
|
roidayan/ovs-tests
|
/ecmp_setup_test_060_180/180/load_120.sh
|
UTF-8
| 1,245
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
P1="ens2f0"
P2="ens2f1"
modprobe -r mlx5_ib mlx5_core
modprobe -r openvswitch
sleep 1
modprobe -v openvswitch
sleep 1
modprobe -v mlx5_core
sleep 1
vms=`virsh list | grep run | awk '{print $1}'`
for i in $vms; do virsh destroy $i ; done
echo 0 > /sys/class/net/$P1/device/sriov_numvfs
echo 0 > /sys/class/net/$P2/device/sriov_numvfs
sleep 2
echo 2 > /sys/class/net/$P1/device/sriov_numvfs
echo 2 > /sys/class/net/$P2/device/sriov_numvfs
sleep 1
ip link set $P1 vf 0 mac e4:1d:2d:fa:60:8a
ip link set $P1 vf 1 mac e4:1d:2d:fb:60:8b
ip link set $P2 vf 0 mac e4:1d:2d:11:80:8c
ip link set $P2 vf 1 mac e4:1d:2d:11:80:8d
echo 0000:81:00.2 > /sys/bus/pci/drivers/mlx5_core/unbind
echo 0000:81:00.3 > /sys/bus/pci/drivers/mlx5_core/unbind
echo 0000:81:02.2 > /sys/bus/pci/drivers/mlx5_core/unbind
echo 0000:81:02.3 > /sys/bus/pci/drivers/mlx5_core/unbind
devlink dev eswitch set pci/0000:81:00.0 mode switchdev
devlink dev eswitch set pci/0000:81:00.1 mode switchdev
echo 0000:81:00.2 > /sys/bus/pci/drivers/mlx5_core/bind
echo 0000:81:00.3 > /sys/bus/pci/drivers/mlx5_core/bind
echo 0000:81:02.2 > /sys/bus/pci/drivers/mlx5_core/bind
echo 0000:81:02.3 > /sys/bus/pci/drivers/mlx5_core/bind
ip link show $P1
ip link show $P2
| true
|
50353640da92ac119176f0cc749d0f0d6702c6a4
|
Shell
|
iqlusioninc/tmkms
|
/tests/support/run-harness-tests.sh
|
UTF-8
| 1,104
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
TMKMS_BIN=${TMKMS_BIN:-"./target/debug/tmkms"}
TMKMS_CONFIG=${TMKMS_CONFIG:-"/harness/tmkms.toml"}
HARNESS_BIN=${HARNESS_BIN:-"tm-signer-harness"}
TMHOME=${TMHOME:-"/harness"}
# Run KMS in the background
${TMKMS_BIN} start -c ${TMKMS_CONFIG} &
TMKMS_PID=$!
# Run the test harness in the foreground
${HARNESS_BIN} run \
-addr tcp://127.0.0.1:61278 \
-tmhome ${TMHOME}
HARNESS_EXIT_CODE=$?
# Kill the KMS, if it's still running
if ps -p ${TMKMS_PID} > /dev/null
then
echo "Killing KMS (pid ${TMKMS_PID})"
kill ${TMKMS_PID}
# Wait a few seconds for KMS to die properly.
# NOTE: This also acts as a test of the KMS listening for and properly
# responding to the SIGTERM signal from `kill`.
sleep 3
# Make sure KMS has actually stopped properly now.
if ps -p ${TMKMS_PID} > /dev/null
then
echo "Failed to stop KMS!"
exit 100
fi
else
echo "KMS (pid ${TMKMS_PID}) already stopped, not killing"
fi
# Bubble the exit code up out of the script
echo "Harness tests exiting with code ${HARNESS_EXIT_CODE}"
exit ${HARNESS_EXIT_CODE}
| true
|
3a96dcac87aafe4027e5bfe7dd2b28116031bafd
|
Shell
|
EstephaniaCalvoC/AirBnB_clone_v2
|
/0-setup_web_static.sh
|
UTF-8
| 1,357
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Configurate Nginx with:
# Port 80
# Permanent redirection /redirect_me.
# Use custom 404 error page
# Custom header X-Served-By
# Prepare server to deploy
apt-get update
apt-get -y install nginx
# Create forlders
mkdir -p /data/web_static/{releases/test,shared}
# Create default page
echo "<html>
<head>
</head>
<body>
Holberton School
</body>
</html>" > /data/web_static/releases/test/index.html
# Create symbolic link
ln -sf /data/web_static/releases/test/ /data/web_static/current
# Change ownership
chown -R ubuntu /data
chgrp -R ubuntu /data
# Configurate server
ufw allow 'Nginx HTTP'
f_config="/etc/nginx/sites-available/default"
# Add 404 redirection
echo "Ceci n'est pas une page" > /usr/share/nginx/html/my_404.html
new_404="my_404.html"
l_new_404="/my_404.html {root /usr/share/nginx/html;\n internal;}"
sed -i "/listen 80 default_server/a error_page 404 /$new_404; location = $l_new_404" $f_config
# Add redirection
new_site="https://github.com/EstephaniaCalvoC/"
sed -i "/listen 80 default_server/a rewrite ^/redirect_me $new_site permanent;" $f_config
# Add header
sed -i "/listen 80 default_server/a add_header X-Served-By \"$HOSTNAME\";" $f_config
# Add alias
sed -i '/listen 80 default_server/a location /hbnb_static/ { alias /data/web_static/current/;}' $f_config
service nginx restart
exit 0
| true
|
dcefbf89d5d60897745abdd8e43c84371c14dc0e
|
Shell
|
MuhammadAbuBakar95/kernel-specialization
|
/occam_pipe_line/run_occam.sh
|
UTF-8
| 471
| 2.578125
| 3
|
[] |
permissive
|
# ${1} -> work directory
# ${2} -> manifest file
# ${3} -> keep.list
# ${4} -> kernel specialization home
cp ${3} ${1}/.
cp ${2} ${1}/.
cd ${4}/LLVMPasses
make build_UsedInAsm
cd ${4}/occam_pipe_line
cp generateExclusionSymbols.py ${1}/.
cd ${1}
KS_PATH=${4} python generateExclusionSymbols.py ${2} exclusion_list
cat ${3} >> exclusion_list
rm occam.log
OCCAM_LOGFILE=occam.log slash --work-dir=slashing --keep-external=exclusion_list --no-strip --no-specialize ${2}
| true
|
ca54e694f143fbb628ef36ad20aa6f7a775d90c6
|
Shell
|
arunasri/rails31mongo
|
/script/chop
|
UTF-8
| 608
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
output=$2
if [ -d $output ]; then
rm -rf $output
fi
mkdir -p $output
# create a temporary file containing the header without
# the content:
head -n 1 $1 > header.csv
# create a temporary file containing the content without
# the header:
tail -n +1 $1 > content.csv
# split the content file into multiple files of 5 lines each:
split -l 5000 content.csv "$output/data_"
# loop through the new split files, adding the header
# and a '.csv' extension:
for f in `ls $output/*`; do cat header.csv $f > $f.csv; rm $f; done;
# remove the temporary files:
rm header.csv
rm content.csv
| true
|
cfa5130d4aba9563315b7ef60b26f3db650423b5
|
Shell
|
Communica/devop-scripts
|
/installApache.sh
|
UTF-8
| 1,143
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
# file: installApache.sh
# author: technocake
# desc: Installs apache2 with php5 mysql and copies backupconfig + www files on pompel
# date: 05.05.2011 15:42
###########################################################################################
echo "Installing apache mit php5 mit mysql"
sudo apt-get install apache2 mysql5-common mysql-server mysql-client php5 libphp5-mod-mysql libapache2-mod-php5
echo "Installing phpmyadmin"
sudo apt-get install libapache2-mod-auth-mysql php5-mysql phpmyadmin
echo "Slår på mysql i php.ini"
sed -i 's/;extension=mysql.so/extension=mysql.so/i' /etc/php5/apache2/php.ini
echo "Fetching all configs from backup"
#--preserve keeps file rights etc
cp -rv --preserve /root/pompel-backup/etc/apache2 /etc/
echo "Fetching all /var/www files from backup"
cp -rv --preserve /root/pompel-backup/var/www /var/
echo "Starter apache på nytttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt"
apache2ctl graceful
| true
|
ff5505ac9d8d56f4148fe55e87859f09706a1a39
|
Shell
|
JoshuaSBrown/Excimontec
|
/slurm_script.sh
|
UTF-8
| 632
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#SBATCH -J Excimontec # Job name
#SBATCH -p partition_name
#SBATCH -n 48 # Number of tasks
#SBATCH -t 01:00:00 # Maximum walltime
#SBATCH --cpus-per-task=1
version_num=v1.0-beta.3
ParameterNum=default
# Setup job directory
mkdir $SLURM_JOB_ID
cd $SLURM_JOB_ID
cp ../Excimontec.exe ./Excimontec.exe
cp ../parameters_$ParameterNum.txt ./parameters_$ParameterNum.txt
# Execute Excimontec Simulation
mpiexec -n 48 Excimontec.exe parameters_$ParameterNum.txt > output.txt
# Cleanup
rm -f Excimontec.exe
tar -zcf $SLURM_JOB_ID.tar.gz $SLURM_JOB_ID
cp $SLURM_JOB_ID.tar.gz ../$SLURM_JOB_ID.tar.gz
rm -f $SLURM_JOB_ID.tar.gz
| true
|
884806ab49024a45caff8d9cfc580261b2f10960
|
Shell
|
TangoMan75/bash_aliases
|
/src/network/get-ip.sh
|
UTF-8
| 268
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
## Get external IP
function external-ip() {
local IP
IP="$(curl -s ipv4.icanhazip.com || wget -qO - ipv4.icanhazip.com)"
if [ -z "${IP}" ]; then
IP="$(curl -s api.ipify.org || wget -qO - api.ipify.org)\n"
fi
echo "${IP}"
}
| true
|
d709ac6affcc5189e4724dbd514e5b0fd16af413
|
Shell
|
vitalinux/vx-pms-dev
|
/usr/bin/vx-git-createdir
|
UTF-8
| 535
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# File: /usr/bin/vx-git-createdir
# Permissions: root:root 755
# Syntax: vx-git-createdir <PATH>
# Author: Alberto Gacías <alberto@migasfree.org>
# Creates a file called .createdir at empty directories from a path
_PATH=$1
if [ -z "$_PATH" ]
then
echo "Enter a directory as a parameter"
exit 1
fi
IFS="|"
for _DIR in $(find $_PATH -depth -empty -type d -printf "%h/%f$IFS")
do
if ! [[ "$_DIR" == *.git/* ]] # We exclude .git directory
then
touch "$_DIR/.createdir"
echo "CREADO $_DIR/.createdir"
fi
done
| true
|
1dc7c4c3a03617cbde520dd21ead7a28feca887f
|
Shell
|
Rob-123/linuxgsm
|
/functions/fn_details
|
UTF-8
| 4,844
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# LGSM fn_details function
# Author: Daniel Gibbs
# Website: http://danielgibbs.co.uk
# Version: 170214
# Description: Displays server infomation.
# Standard Details
# This applies to all engines
fn_details_os(){
echo -e ""
echo -e "\e[93mDistro Details\e[0m"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' =
echo -e "\e[34mDistro:\t\e[0m${os}" >> .fn_details_distro
echo -e "\e[34mArch:\t\e[0m${arch}" >> .fn_details_distro
echo -e "\e[34mKernel:\t\e[0m${kernel}" >> .fn_details_distro
echo -e "\e[34mHostname:\t\e[0m$HOSTNAME" >> .fn_details_distro
echo -e "\e[34mtmux:\t\e[0m${tmuxv}" >> .fn_details_distro
echo -e "\e[34mGLIBC:\t\e[0m${glibcv}" >> .fn_details_distro
column -s $'\t' -t .fn_details_distro
rm -f .fn_details_distro
}
fn_details_performance(){
echo -e ""
echo -e "\e[93mPerformance\e[0m"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' =
echo -e "\e[34mUptime:\t\e[0m${days}d, ${hours}h, ${minutes}m" >> .fn_details_performance
echo -e "\e[34mAvg Load:\t\e[0m${load}" >> .fn_details_performance
column -s $'\t' -t .fn_details_performance
rm -f .fn_details_performance
echo -e ""
echo -e "\e[34mMem:\t\e[34mtotal\t used\t free\e[0m" >> .fn_details_performance
echo -e "\e[34mPhysical:\t\e[0m${physmemtotal}\t${physmemused}\t${physmemfree}\e[0m" >> .fn_details_performance
echo -e "\e[34mSwap:\t\e[0m${swaptotal}\t${swapused}\t${swapfree}\e[0m" >> .fn_details_performance
column -s $'\t' -t .fn_details_performance
rm -f .fn_details_performance
}
fn_details_disk(){
echo -e ""
echo -e "\e[93mDisk Usage\e[0m"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' =
echo -e "\e[34mDisk available:\t\e[0m${availspace}" >> .fn_details_disk
echo -e "\e[34mServerfiles:\t\e[0m${serverfilesdu}" >> .fn_details_disk
if [ -d "${backupdir}" ]; then
echo -e "\e[34mBackups:\t\e[0m${backupdirdu}" >> .fn_details_disk
fi
column -s $'\t' -t .fn_details_disk
rm -f .fn_details_disk
}
fn_details_gameserver(){
if [ ! -e ${servercfgfullpath} ]; then
servername="\e[0;31mCONFIG FILE MISSING!\e[0m"
rcon="\e[0;31mCONFIG FILE MISSING!\e[0m"
servercfgfullpath="${servercfgfullpath} \e[0;31mCONFIG FILE MISSING!!\e[0m"
fi
echo -e ""
echo -e "\e[92m${gamename} Server Details\e[0m"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' =
echo -e "\e[34mServer name:\t\e[0m${servername}" >> .fn_details_gameserver
echo -e "\e[34mServer IP:\t\e[0m${ip}:${port}" >> .fn_details_gameserver
if [ ! -z "${rcon}" ]; then
echo -e "\e[34mRCON password:\t\e[0m${rcon}" >> .fn_details_gameserver
fi
pid=$(tmux list-sessions 2>&1|awk '{print $1}'|grep -E "^${servicename}:"|wc -l)
if [ "${pid}" == "0" ]; then
echo -e "\e[34mStatus:\t\e[0;31mOFFLINE\e[0m" >> .fn_details_gameserver
else
echo -e "\e[34mStatus:\t\e[0;32mONLINE\e[0m" >> .fn_details_gameserver
fi
column -s $'\t' -t .fn_details_gameserver
rm -f .fn_details_gameserver
echo -e ""
echo -e "\e[34mService name:\t\e[0m${servicename}" >> .fn_details_gameserver
echo -e "\e[34mUser:\t\e[0m$(whoami)" >> .fn_details_gameserver
echo -e "\e[34mLocation:\t\e[0m${rootdir}" >> .fn_details_gameserver
if [ ! -z "${servercfgfullpath}" ]; then
echo -e "\e[34mConfig file:\t\e[0m${servercfgfullpath}" >> .fn_details_gameserver
fi
if [ "${gamename}" == "Teamspeak 3" ]; then
echo -e "\e[34mdbplugin:\t\e[0m${dbplugin}" >> .fn_details_gameserver
fi
column -s $'\t' -t .fn_details_gameserver
rm -f .fn_details_gameserver
}
fn_details_commandlineparms(){
echo -e ""
echo -e "\e[92mCommand-line Parameters\e[0m"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' =
echo -e "${executable} ${parms}"
}
fn_details_statusbottom(){
echo -e ""
if [ "${gamename}" == "Teamspeak 3" ]; then
if [ "${ts3status}" = "Server seems to have died" ] || [ "${ts3status}" = "No server running (ts3server.pid is missing)" ]; then
echo -e "\e[34mStatus: \e[0;31mOFFLINE\e[0m"
else
echo -e "\e[34mStatus: \e[0;32mONLINE\e[0m"
fi
else
if [ "${pid}" == "0" ]; then
echo -e "\e[34mStatus: \e[0;31mOFFLINE\e[0m"
else
echo -e "\e[34mStatus: \e[0;32mONLINE\e[0m"
fi
fi
echo -e ""
}
# Engine Specific details
fn_details_source(){
fn_check_ip
fn_parms
fn_details_config
fn_details_distro
fn_details_os
fn_details_performance
fn_details_disk
fn_details_gameserver
fn_details_commandlineparms
echo -e ""
echo -e "\e[92mPorts\e[0m"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' =
echo -e "DESCRIPTION\tDIRECTION\tPORT\tPROTOCOL" >> .fn_details_ports
echo -e "> Game/RCON\tINBOUND\t${port}\ttcp/udp" >> .fn_details_ports
if [ ! -z "${sourcetvport}" ]; then
echo -e "> SourceTV\tINBOUND\t${sourcetvport}\tudp" >> .fn_details_ports
fi
echo -e "< Client\tOUTBOUND\t${clientport}\tudp" >> .fn_details_ports
column -s $'\t' -t .fn_details_ports
rm -f .fn_details_ports
fn_details_statusbottom
}
fn_details_source
| true
|
e5dbb9a9297ca278cdd983815b0acbd40bdc64c3
|
Shell
|
IdeaSynthesis/dokku-autosync
|
/install
|
UTF-8
| 622
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
[[ $DOKKU_TRACE ]] && set -x
source "$PLUGIN_CORE_AVAILABLE_PATH/common/functions"
plugin-install() {
# add the global include
if [[ ! -f "${DOKKU_ROOT}/.ssh/config" ]] ; then
echo -e "Include ${DOKKU_ROOT}/*/autosync\n" >> "${DOKKU_ROOT}/.ssh/config"
chmod 644 "${DOKKU_ROOT}/.ssh/config"
fi
if [[ "$(grep -F "Include ${DOKKU_ROOT}/*/autosync" "${DOKKU_ROOT}/.ssh/config")" == "" ]]; then
echo -e "\nInclude ${DOKKU_ROOT}/*/autosync\n" >> "${DOKKU_ROOT}/.ssh/config"
chmod 644 "${DOKKU_ROOT}/.ssh/config"
fi
}
plugin-install "$@"
| true
|
09526d012c5a0165347ec879b16f57547379563b
|
Shell
|
sgn/dotfiles
|
/sh/91-term-title.zsh
|
UTF-8
| 196
| 3.21875
| 3
|
[
"0BSD"
] |
permissive
|
set_term_title () {
print -Pn "\e]0;$1\a"
}
# Reload autoloadable functions
function freload () {
while (( $# )); do
unfunction $1
autoload -U $1
shift
done
}
compdef _functions freload
| true
|
5550851e84d8e885542f0d4fc810d8a153637274
|
Shell
|
JDeuce/dotfiles
|
/.bashrc
|
UTF-8
| 1,610
| 3.171875
| 3
|
[] |
no_license
|
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# TZ/Locale settings
export TZ=/usr/share/zoneinfo/America/Winnipeg
export MM_CHARSET=utf8
export LANG=en_CA.UTF-8
export LC_ALL=en_CA.UTF-8
export LC_COLLATE=C
# Editor
if ( which vim > /dev/null )
then
alias vi=vim
export EDITOR=vim
export VISUAL=vim
else
export EDITOR=vi
export VISUAL=vi
fi
# History settings
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# fixes problems with long lines overwriting after a win resize
shopt -s checkwinsize
if [ $EUID -ne 0 ]
then
# green username when not running as root
PS1='\[\033[01;32m\]'
else
# red username as root
PS1='\[\033[01;31m\]'
fi
PS1=$PS1'\u \033[00m\]\[\033[01;34m\]\w\[\033[00m\]'
if [ -f ~/.git-completion.bash ]; then
. ~/.git-completion.bash
PS1="${PS1}\$(__git_ps1 ' [\[\e[34;1m\]%s\[\e[0m\]]')"
GIT_PS1_SHOWDIRTYSTATE=true
GIT_PS1_SHOWUNTRACKEDFILES=true
fi
PS1="${PS1} \n\$ "
# adds color to ls in FreeBSD
export CLICOLOR='yes'
alias grep='grep --color'
alias screen='screen -U'
alias rehash='hash -r'
pushd() {
command pushd "$@" > /dev/null
}
export pushd
alias cd=pushd
export PYTHONSTARTUP=~/.pythonrc.py
markdown_to_man() {
file=${1--}
pandoc -s -f markdown_github -t man $file | man -l -
}
alias markman=markdown_to_man
# use 256 color mode
alias tmux="tmux -2"
if [ -f ~/.bashrc-local ]; then
. ~/.bashrc-local
fi
| true
|
9dd3b274c4c7f477ce224f86ca358ec7ccb95f9f
|
Shell
|
petronny/aur3-mirror
|
/plex/PKGBUILD
|
UTF-8
| 1,666
| 2.59375
| 3
|
[] |
no_license
|
# Maintainer : Anish Bhatt <anish[removethis][at]gatech[dot]edu>
pkgname=plex
_majorver=0.9.7.12
_minorver=407
_suffix=db37d6d
pkgver=${_majorver}.${_minorver}
_dver=${_majorver}.${_minorver}-${_suffix}
pkgrel=1
pkgdesc='Plex Media Server'
url='http://www.plexapp.com/linux'
arch=('i686' 'x86_64')
source=(plex-${pkgver}.rpm::http://plex.r.worldssl.net/PlexMediaServer/${_dver}/plexmediaserver-${_dver}.i386.rpm
'plex-icon.png'
'plex.desktop')
md5sums=('8f9df56355ae8c03b7727ef5f5f848dd'
'50103e17fe35809ddaaec3ae315d2d35'
'0979ec3db2a6ff39a7738d0482e64ff4')
if [[ $CARCH == x86_64 ]]; then
source[0]=plex-${pkgver}.rpm::http://plex.r.worldssl.net/PlexMediaServer/${_dver}/plexmediaserver-${_dver}.x86_64.rpm
md5sums[0]=7b0a828988bb0e9e747aa2d28143254c
fi
depends=('avahi' 'curl')
makedepends=('rpmextract')
options=(!strip)
license=('GPL')
build() {
rpmextract.sh plex-${pkgver}.rpm
}
package() {
install -d -m755 ${pkgdir}/usr/lib/plexmediaserver
cp -dpr --no-preserve=ownership usr/lib/plexmediaserver/* "${pkgdir}"/usr/lib/plexmediaserver/.
install -d ${pkgdir}/etc/security/limits.d
install -D -m644 ${srcdir}/etc/security/limits.d/plex.conf ${pkgdir}/etc/security/limits.d
install -Dm644 ${srcdir}/etc/sysconfig/PlexMediaServer "${pkgdir}"/etc/conf.d/plexmediaserver
install -Dm644 ${srcdir}/lib/systemd/system/plex.service "${pkgdir}"/usr/lib/systemd/system/plexmediaserver.service
install -d ${pkgdir}/usr/share/applications
install -D -m644 ${srcdir}/plex.desktop ${pkgdir}/usr/share/applications/
install -d ${pkgdir}/usr/share/pixmaps
install -D -m644 plex-icon.png ${pkgdir}/usr/share/pixmaps/plex-icon.png
}
| true
|
508404a1a58f29f7538cb7106779eb1e010ca048
|
Shell
|
mauri870/assembly-fun
|
/test_runner.sh
|
UTF-8
| 410
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
TESTDIR=tests
SRCDIR=src
function build_and_test() {
echo "Compiling \"$1\"..."
PROGRAM="$1" make
echo "Running test for \"$1\"..."
bats "$TESTDIR/$1.bats"
}
make clean > /dev/null
for PROGRAM in $(ls -d src/*/ | cut -f2 -d'/')
do
if [ $# -eq 1 ] && [ "$1" != "$PROGRAM" ]
then
continue
fi
build_and_test "$PROGRAM"
done
make clean > /dev/null
| true
|
e1d7f47d88a58a61a4d0c6f29b84325de7e56aaa
|
Shell
|
fbudin69500/NIRALSystemScripts
|
/UpdateSlicerExtensions.script
|
UTF-8
| 8,350
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
createNewBranch ()
{
tool=$1
mainBranch="$2"
localMainBranch="$3"
#Create new branch for update
branchName=update-${tool}${localMainBranch}
if [ `git branch | grep -c $branchName` -ne 0 ]
then
echo "Removing existing local branch: $branchName"
git branch -D $branchName
fi
git checkout -B $branchName ${mainBranch}
}
updateDescriptionFile ()
{
descriptionFile=$1
descriptRevision=$2
currentRevision=$3
#Replace hash in s4ext description file
if [ "$OSTYPE" == "linux" ]; then
sed -i "s/$descriptRevision/$currentRevision/g" $descriptionFile
else
sed -i '' "s/$descriptRevision/$currentRevision/g" $descriptionFile
fi
}
updateExtensions ()
{
tool=$1
if [ "$2" == "push" ]
then
localMainBranch="$3"
branchName=update-${tool}${localMainBranch}
if [ `git branch | grep -c "origin/$branchName"` -ne 0 ]
then
echo "Removing existing remote branch: $branchName"
git push origin :$branchName
fi
git push origin $branchName --force
fi
}
ExtensionGit()
{
sourceDir="$1"
tool="$2"
mainBranch="$3"
descriptionFile="$4"
descriptRevision="$5"
currentrevision="$6"
diffmessage="$7"
pushcleanArg="$8"
localMainBranch="$9"
cd $sourceDir
createNewBranch $tool $mainBranch $localMainBranch
updateDescriptionFile $descriptionFile $descriptRevision $currentrevision
git add $descriptionFile
git commit -m "ENH: Update $tool from revision $descriptRevision to $currentrevision" -m "" -m "$diffmessage"
#Update Extensions
updateExtensions $tool $pushcleanArg $localMainBranch
}
if ( [ $# -ne 2 ] || ( [ "$2" != "master" ] && [ "$2" != "4.3" ] && [ "$2" != "4.4" ] ) ) && ( [ $# -ne 3 ] || ([ "$3" != "push" ] && [ "$3" != "clean" ] ) )
then
echo "Usage: $0 SlicerExtensionBuildDirectory [master|4.3|4.4] [push|clean]"
exit 1
fi
mainBranch="$2"
if [ "$mainBranch" == "master" ]
then
localMainBranch=""
else
localMainBranch="-${mainBranch}"
fi
pushcleanArg="$3"
#pushclean cannot be empty
if [ "$pushcleanArg" == "" ]
then
pushcleanArg="none"
fi
currentDir=`pwd`
#Define all our tools included in ExtensionsIndex
toolsNIRALList=(CMFreg DTIAtlasBuilder DTIPrep DTIProcess DTIAtlasFiberAnalyzer FiberViewerLight \
ShapePopulationViewer SPHARM-PDM IntensitySegmenter ModelToModelDistance MeshToLabelMap \
Q3DC EasyClip AnglePlanesExtension MeshStatisticsExtension PickAndPaintExtension \
ResampleDTIlogEuclidean DeveloperToolsForExtensions DTI-Reg\
)
#Find real binary directory
cd $1
var=`more CMakeCache.txt | grep SlicerExtensions_BINARY_DIR`
binaryDir=${var#"SlicerExtensions_BINARY_DIR:STATIC="}
echo "Slicer Extension Build Directory: $binaryDir"
#Find source dir containing s4ext files
var=`more CMakeCache.txt | grep Slicer_EXTENSION_DESCRIPTION_DIR`
sourceDir=${var#"Slicer_EXTENSION_DESCRIPTION_DIR:PATH="}
echo "Slicer Extension Source Directory: $sourceDir"
#Update ExtensionsIndex source code
cd $sourceDir
#Clean if $pushcleanArg == "clean"
if [ "$pushcleanArg" == "clean" ]
then
git fetch origin
git checkout master
for tool in ${toolsNIRALList[@]}
do
branchName=`git branch | grep -e "update-${tool}${localMainBranch}$"`
name=`expr "$branchName" : ".*\(update-.*\)$"`
if [ "$name" != "" ]; then
echo "Removing existing local branch: $branchName"
git branch -D $branchName
fi
done
for tool in ${toolsNIRALList[@]}
do
branchName=`git branch -a | grep -e "origin/update-${tool}${localMainBranch}$"`
name=`expr "$branchName" : ".*\(update-.*\)$"`
if [ "$name" != "" ]; then
echo "Removing existing remote branch: $name"
git push origin :$name
fi
done
exit 0
fi
echo "Update SlicerExtensions"
if [ "`git remote | grep -c upstream`" -eq 0 ]
then
git remote add upstream git://github.com/Slicer/ExtensionsIndex
fi
git fetch upstream
##################################################################
#Checks if mainBranch already exists locally. Otherwise we need to check it out.
if [ `git branch -a |grep -e "${mainBranch}$" |grep -ve "[a-z]" |grep -c ${mainBranch}` -eq 0 ]
then
echo "Branch was not detected locally, we need to checkout it out from the remote repository"
git checkout -b ${mainBranch} origin/${mainBranch}
fi
##################################################
git checkout $mainBranch
git reset --hard upstream/$mainBranch
git push origin $mainBranch
#For all the tools we want to update, go download latest version and compare with
#version that is currently included in this project (ExtensionsIndex). If a
#newer version exists, replace current version included in this project with
#latest version
for tool in ${toolsNIRALList[@]}
do
cd $binaryDir
echo "-----------------------------------------------------"
echo "Tool: $tool"
descriptionFile=$sourceDir/${tool}.s4ext
if [ ! -e $descriptionFile ]
then
echo "Description file $descriptionFile not found. Skipping $tool"
continue
fi
scm=`grep "scm" $descriptionFile`
descriptRevision=`grep "scmrevision" $descriptionFile`
descriptRevision=`expr "$descriptRevision" : " *scmrevision *\(\([a-f]*[0-9]*\)*\) *$"`
echo "Current Description file Revision: $descriptRevision"
repository=`grep "scmurl" $descriptionFile`
repository=`expr "$repository" : " *scmurl *\([a-z].*\)$"`
echo "Repository: $repository"
if [ "`echo $scm | grep -ci git`" -gt 0 ]
then
echo "$tool is a git project"
#Get source code
if [ ! -d $tool ] || [ ! "$(ls -A $tool)" ]
then
echo "git Repository: $repository"
git clone $repository $tool
cd $binaryDir/$tool
else
cd $binaryDir/$tool
echo "Pulling"
git pull origin master
fi
#get current (=latest) commit hash
currentgithash=`git rev-parse HEAD`
echo "Current Project Git hash: $currentgithash"
timeStampDescription=`git log -1 --format="%ct" $descriptRevision`
echo "Description time stamp: `git log -1 --format="%ci" $descriptRevision` - UNIX format: $timeStampDescription"
timeStampCurrent=`git log -1 --format="%ct" $currentgithash`
echo "Current time stamp: `git log -1 --format="%ci" $currentgithash` - UNIX format: $timeStampCurrent"
#if description commit hash is older than the last commit, we update the description file
if [ "$timeStampCurrent" -gt "$timeStampDescription" ]; then
#Create github diff link
repository=`expr "$repository" : "\(.*\)\.git"`
repository=`expr "$repository" : ".*github.com\/\(.*\)"`
diffmessage="https://www.github.com/$repository/compare/$descriptRevision%E2%80%A6$currentgithash"
ExtensionGit $sourceDir $tool $mainBranch $descriptionFile $descriptRevision $currentgithash "$diffmessage" $pushcleanArg $localMainBranch
fi
continue
fi
if [ "`echo $scm | grep -ci svn`" -gt 0 ]
then
echo "$tool is an SVN project"
###get username and password from description file
svnUser=`grep "svnusername" $descriptionFile`
svnUser="--username "`expr "$svnUser" : " *svnusername *\(\([a-z]*[0-9]*\)*\) *$"`
echo "SVN User Name: $svnUser"
svnPassword=`grep "svnpassword" $descriptionFile`
svnPassword="--password "`expr "$svnPassword" : " *svnpassword *\(\([a-z]*[0-9]*\)*\) *$"`
echo "SVN Password: $svnPassword"
#checkout or update project
if [ ! -d $tool ] || [ ! "$(ls -A $tool)" ]
then
echo "SVN Repository: $repository"
svn checkout $svnUser $svnPassword $repository $tool
cd $binaryDir/$tool
else
cd $binaryDir/$tool
svn update $svnUser $svnPassword
fi
#get current revision number
currentrevision=`svnversion`
echo "Current Revision: $currentrevision"
#if mismatch, update description file revision
if [ $currentrevision -gt $descriptRevision ] ;then
#get tool logs and copy them to the ExtensionIndex commit message
diffmessage=""
for i in `seq $currentrevision -1 $((descriptRevision+1))`
do
diffmessage=$diffmessage`svn log $svnUser $svnPassword -r $i`$'\n'
done
ExtensionGit $sourceDir $tool $mainBranch $descriptionFile $descriptRevision $currentrevision "$diffmessage" $pushcleanArg $localMainBranch
fi
continue
else
echo "$tool is of an unknown type. Skipping"
fi
done
cd $SOURCE_DIR
cd $currentDir
| true
|
3a217607595c5c81eb7a4fb8b144cbfb463d735d
|
Shell
|
webmandman/docker-commandbox
|
/resources/publish.sh
|
UTF-8
| 2,457
| 3.6875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
cd $TRAVIS_BUILD_DIR
echo "CWD: $PWD"
echo "Dockerfile: $TRAVIS_BUILD_DIR/${BUILD_IMAGE_DOCKERFILE}"
# Push Version into Images: $IMAGE_VERSION IS SET IN TRAVIS
sed -i -e "s/@version@/$IMAGE_VERSION/g" $TRAVIS_BUILD_DIR/${BUILD_IMAGE_DOCKERFILE}
# Build our deployment image fresh so that no artifacts remain
docker build --no-cache -t ${TRAVIS_COMMIT}:${TRAVIS_JOB_ID} -f $TRAVIS_BUILD_DIR/${BUILD_IMAGE_DOCKERFILE} $TRAVIS_BUILD_DIR/
echo "INFO: Docker image successfully built"
# Log in to Docker Hub
docker login -u $DOCKER_HUB_USERNAME -p "${DOCKER_HUB_PASSWORD}"
echo "INFO: Successfully logged in to Docker Hub!"
# Tag our image with the build reference
# Tag Builds
if [[ $TRAVIS_TAG ]]; then
# Strip the `v` from the start of the tag
if [[ ${BUILD_IMAGE_TAG} == 'ortussolutions/commandbox' ]]; then
BUILD_IMAGE_TAG="${BUILD_IMAGE_TAG}:${TRAVIS_TAG#v}"
else
BUILD_IMAGE_TAG="${BUILD_IMAGE_TAG}-${TRAVIS_TAG#v}"
fi
docker tag ${TRAVIS_COMMIT}:${TRAVIS_JOB_ID} ${BUILD_IMAGE_TAG}
elif [[ ${BUILD_IMAGE_TAG} == 'ortussolutions/commandbox' ]] && [[ $TRAVIS_BRANCH == 'master' ]]; then
# Master Builds
docker tag ${TRAVIS_COMMIT}:${TRAVIS_JOB_ID} ${BUILD_IMAGE_TAG}
else
# Snapshot tagging
if [[ ${BUILD_IMAGE_TAG} == 'ortussolutions/commandbox' ]] && [[ $TRAVIS_BRANCH == 'development' ]]; then
BUILD_IMAGE_TAG="${BUILD_IMAGE_TAG}:snapshot"
elif [[ $TRAVIS_BRANCH == 'development' ]]; then
BUILD_IMAGE_TAG="${BUILD_IMAGE_TAG}-snapshot"
fi
docker tag ${TRAVIS_COMMIT}:${TRAVIS_JOB_ID} ${BUILD_IMAGE_TAG}
fi
# Push our new image and tags to the registry
echo "INFO: Pushing new image to registry ${BUILD_IMAGE_TAG}"
docker push ${BUILD_IMAGE_TAG}
echo "INFO: Image ${BUILD_IMAGE_TAG} successfully published"
# Now create any suppplimentary tags
if [[ ! $TRAVIS_TAG ]] && [[ ${BUILD_IMAGE_TAG} == 'ortussolutions/commandbox' ]] && [[ $TRAVIS_BRANCH == 'master' ]]; then
# Add :latest tag, if applicable
docker tag ${TRAVIS_COMMIT}:${TRAVIS_JOB_ID} ${BUILD_IMAGE_TAG}:latest
echo "INFO: Pushing supplemental tag to registry ${BUILD_IMAGE_TAG}:latest"
docker push ${BUILD_IMAGE_TAG}:latest
# Add commandbox version tag
docker tag ${TRAVIS_COMMIT}:${TRAVIS_JOB_ID} ${BUILD_IMAGE_TAG}:commandbox-${COMMANDBOX_VERSION}
echo "INFO: Pushing supplemental tag to registry ${BUILD_IMAGE_TAG}:commandbox-${COMMANDBOX_VERSION}"
docker push ${BUILD_IMAGE_TAG}:commandbox-${COMMANDBOX_VERSION}
fi
| true
|
be1306a30b67128e5709a67efcc4da5defe1813c
|
Shell
|
shawwn/scrap
|
/mkalias2
|
UTF-8
| 366
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
dir="`pwd`"
cd "$(dirname "$0")"
home="`pwd`"
cd "${dir}"
old="$1"
new="$2"
if [ -z "$new" ]; then
echo " mkalias <existing-script-name> <new-name>"
echo " (aborts if <new-name> already exists)"
exit 1
fi
if [ -e "$new" ]; then
1>&2 echo "Exists, aborting: $new"
exit 1
fi
cat <<EOF | mkmod -q "$old" "$new"
#!/bin/sh
exec "$old" "\$@"
EOF
| true
|
7c660226c0c4646d3bc4fa055616116fcba90908
|
Shell
|
martinfjant/php-blog
|
/provisioner.sh
|
UTF-8
| 2,715
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "--- Installing and configuring PHP 7.0 and MySQL ---"
sudo apt-get install python-software-properties -y
sudo LC_ALL=en_US.UTF-8 add-apt-repository ppa:ondrej/php -y
sudo apt-get update
sudo apt-get install php7.0 php7.0-fpm php7.0-mysql -y
sudo apt-get --purge autoremove -y
sudo service php7.0-fpm restart
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password password root'
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password root'
sudo apt-get -y install mysql-server mysql-client
sudo service mysql start
echo "--- What developer codes without errors turned on? Not you, master. ---"
PHP_ERROR_REPORTING=${PHP_ERROR_REPORTING:-"E_ALL"}
sudo sed -ri 's/^display_errors\s*=\s*Off/display_errors = On/g' /etc/php/7.0/fpm/php.ini
sudo sed -ri 's/^display_errors\s*=\s*Off/display_errors = On/g' /etc/php/7.0/cli/php.ini
sudo sed -ri "s/^error_reporting\s*=.*$//g" /etc/php/7.0/fpm/php.ini
sudo sed -ri "s/^error_reporting\s*=.*$//g" /etc/php/7.0/cli/php.ini
echo "error_reporting = $PHP_ERROR_REPORTING" >> /etc/php/7.0/fpm/php.ini
echo "error_reporting = $PHP_ERROR_REPORTING" >> /etc/php/7.0/cli/php.ini
echo "--- Installing and configuring Xdebug ---"
sudo apt-get install -y php-xdebug
sudo cat << EOF | sudo tee -a /etc/php/7.0/mods-available/xdebug.ini
zend_extension=xdebug.so
xdebug.remote_connect_back = 0
xdebug.remote_enable = 1
xdebug.remote_handler = "dbgp"
xdebug.remote_port = 9000
xdebug.remote_host = 192.168.33.1
xdebug.var_display_max_children = 512
xdebug.var_display_max_data = 1024
xdebug.var_display_max_depth = 10
xdebug.remote_log = /var/www/xdebug.log
EOF
if [ ! -f "/etc/php/7.0/fpm/conf.d/20-xdebug.ini" ]; then
sudo ln -s /etc/php/7.0/mods-available/xdebug.ini /etc/php/7.0/fpm/conf.d/20-xdebug.ini
else
echo '20-xdebug.ini symlink exists'
fi
sudo service php7.0-fpm restart
echo "--- Installing and configuring Nginx ---"
sudo apt-get install nginx -y
sudo cat > /etc/nginx/sites-available/default <<- EOM
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
root /vagrant;
index index.php index.html index.htm;
server_name server_domain_or_IP;
location / {
try_files \$uri \$uri/ /index.php?\$query_string;
}
location ~ \.php\$ {
try_files \$uri /index.php =404;
fastcgi_split_path_info ^(.+\.php)(/.+)\$;
fastcgi_pass unix:/var/run/php/php7.0-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
include fastcgi_params;
}
}
EOM
sed -i 's/sendfile on;/sendfile off;/' /etc/nginx/nginx.conf
sudo service nginx restart
| true
|
245ff8eb6e72a326d292f07ca50bc2aff37e788c
|
Shell
|
ohak/facilities-db
|
/4_deduping.sh
|
UTF-8
| 3,709
| 2.5625
| 3
|
[] |
no_license
|
################################################################################################
## DEDUPING
################################################################################################
## NOTE: This script requires that your setup the DATABASE_URL environment variable.
## Directions are in the README.md.
## DEDUPING
# Merge Child Care and Pre-K Duplicate records
echo 'Merging and dropping Child Care and Pre-K duplicates...'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_ccprek_acs_hhs.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_ccprek_doe_acs.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_ccprek_doe_dohmh.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_ccprek_acs_dohmh.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_ccprek_dohmh.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
time psql -d capdb -U dbadmin -f ./4_deduping/copy_backup4.sql
echo 'Merging and dropping remaining duplicates, pre-COLP...'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_remaining.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
echo 'Merging and dropping remaining duplicates, pre-COLP...'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_sfpsd_relatedlots.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
echo 'Creating backup before merging and dropping COLP duplicates...'
time psql -d capdb -U dbadmin -f ./4_deduping/copy_backup5.sql
echo 'Merging and dropping COLP duplicates by BIN...'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_colp_bin.sql
echo 'Cleaning up remaining dummy values used for array_agg'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
echo 'Merging and dropping COLP duplicates by BBL...'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_colp_bbl.sql
echo 'Cleaning up remaining dummy values used for array_agg'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
echo 'Merging related COLP duplicates on surrounding BBLs...'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_colp_relatedlots_merged.sql
echo 'Cleaning up remaining dummy values used for array_agg'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
echo 'Merging remaining COLP duplicates on surrounding BBLs Part 1...'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_colp_relatedlots_colponly_p1.sql
echo 'Cleaning up remaining dummy values used for array_agg'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
echo 'Merging remaining COLP duplicates on surrounding BBLs Part 2...'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_colp_relatedlots_colponly_p2.sql
echo 'Cleaning up remaining dummy values used for array_agg'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
# Merge records that are exactly the same from the same data source
echo 'Merging and dropping records that are exactly the same from the same data source...'
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_exactsame.sql
time psql -d capdb -U dbadmin -f ./4_deduping/duplicates_removeFAKE.sql
echo 'Deduped!'
echo 'Cleaning up duplicates in BIN and BBl arrays...'
time psql -d capdb -U dbadmin -f ./4_deduping/removeArrayDuplicates.sql
time psql -d capdb -U dbadmin -f ./4_deduping/copy_backup6.sql
| true
|
5907b69994c457dcf29c720e1ebe58e6e5f2707f
|
Shell
|
HalaEzzat/DBEngine
|
/insert.sh
|
UTF-8
| 2,969
| 3.25
| 3
|
[] |
no_license
|
flag="false"
db=$1
tb=$2
for file in /home/hala/dbEngine/DB/$db/*
do
if [ "$file" == "/home/hala/dbEngine/DB/$db/$tb" ]
then
flag="true"
break
fi
done
if [ "$flag" == 'true' ]
then
m=1
coun=1
for c in `cut -d: -f2 "/home/hala/dbEngine/DB/$db/$tb/schema"`
do
echo "enter value for : $c"
read val
pk=`awk -F: -v var1="$c" '{if($2==var1){print $3}}' /home/hala/dbEngine/DB/$db/$tb/schema`
pos=`awk -F: -v var1="$c" '{if($2==var1){print NR}}' /home/hala/dbEngine/DB/$db/$tb/schema`
if [ "$pk" == 'primarykey' ]
then
faild2="true"
while [ "$faild2" == 'true' ]
do
if [[ -z "$val" ]]
then
echo "$c can't be null because it's the primary key"
read val
else
found=`awk -F: -v var1="$pos" -v ver2="$val" '{if($var1==ver2){print var1}}' /home/hala/dbEngine/DB/$db/$tb/data`
if [[ -z "$found" ]]
then
faild2="false"
else
echo "ERROR: primarykey exists,enter valid primarykey"
read val
fi
fi
done
fi
if [[ -z "$val" ]]
then
val="null"
else
tp=`awk -F: -v var1="$c" '{if($2==var1){print $1}}' /home/hala/dbEngine/DB/$db/$tb/schema`
if [ "$tp" == 'int' ]
then
if ! [[ "$val" =~ ^[0-9]+$ ]]
then
echo "ERROR: you must enter an integer value,insert operation failed"
break
fi
elif [ "$tp" == 'String' ]
then
if ! [[ "$val" =~ ^[a-zA-Z]+$ ]]
then
echo "ERROR: you must enter characters only,insert operation failed"
break
fi
elif [ "$tp" == 'mix' ]
then
if ! [[ "$val" =~ ^[a-zA-Z0-9]+$ ]]
then
echo "ERROR: you must enter mix only,insert operation failed"
break
fi
fi
fi
colarr[$m]=$val
(( m = m + 1 ))
done
str=''
for (( i=1; i<=${#colarr[@]}; i++))
do
mycolon=":"
if [ "$i" == "${#colarr[@]}" ]
then
str=$str${colarr[$i]}
else
str=$str${colarr[$i]}$mycolon
fi
done
echo $str >> "/home/hala/dbEngine/DB/$db/$tb/data"
echo "successfully inserted one row"
else
echo "ERROR: no such table"
fi
| true
|
5a531970c7dd85c514d286cc4fc3f6fae9933c38
|
Shell
|
gungwald/utils
|
/unix/bin/reinstall
|
UTF-8
| 237
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
unset ID
. /etc/os-release
if [ $ID = 'fedora' -o $ID = 'rhel' -o $ID = 'centos' -o $ID = 'sles' -o $ID = 'opensuse' ]
then
echo `basename "$0"` does not know how to 'do' that yet.
else
apt-get install --reinstall "$@"
fi
| true
|
286bf9f2943fd82168a3f30d1b7f6ddb53b653a5
|
Shell
|
mirswamp/java-assess
|
/util/create_release_bundle.sh
|
UTF-8
| 3,373
| 3.875
| 4
|
[] |
no_license
|
#! /bin/bash
p=`basename $0`
## Create a java-assess release.
## Must be run from root directory of java-assess workspace.
make_tarball=true
make_cksum=true
p_swamp=/p/swamp
while [ $# -gt 0 ] ; do
case $1 in
--swamp-root)
p_swamp=$2
shift
;;
--no-tar)
make_tarball=false
;;
--no-ck)
make_cksum=false
;;
--test)
make_tarball=false
make_cksum=false
;;
-*)
echo $p: $1: unkown optarg 1>&2
exit 1
;;
*)
break
;;
esac
shift
done
if [ $# -lt 1 -o $# -gt 2 ] ; then
echo usage: $p dest-dir '[version]' 1>&2
exit 1
fi
if false ; then
## hack for vamshi's laptop environment
if [ ! -d $p_swamp ] ; then
p_swamp=$HOME/$p_swamp
echo $p: adjusting /p/swamp for vamshi
fi
fi
if [ -n "$SWAMP_FRAMEWORK_DEPENDENCIES" ]; then
if [ ! -d "$SWAMP_FRAMEWORK_DEPENDENCIES" ]; then
echo "$p: SWAMP_FRAMEWORK_DEPENDENCIES set, but not a directory ($SWAMP_FRAMEWORK_DEPENDENCIES)" 1>&2
exit 1
fi
## SWAMP_FRAMEWORK_DEPENDENCIES overrides p_swamp & --swamp-root
## XXX all uses of p_swamp should be removed
## set p_swamp here, to prevent --swamp-root propagation
p_swamp=/p/swamp
p_swamp_fw=$SWAMP_FRAMEWORK_DEPENDENCIES
elif [ ! -d $p_swamp ] ; then
echo $p: $p_swamp: swamp root dir missing 1>&2
exit 1
else
p_swamp_fw=${p_swamp}/frameworks
fi
update_platform=$p_swamp_fw/platform/update-platform
if [ ! -x $update_platform ] ; then
echo $p: platform update tool missing/unusable 1>&2
exit 1
fi
function md5_sum {
local dest_dir="$1"
(
cd "$dest_dir"
local checksumfile="md5sum"
if test "$(uname -s)" == "Darwin"; then
local MD5EXE="md5"
elif test "$(uname -s)" == "Linux"; then
local MD5EXE="md5sum"
fi
find . -type f ! -name "$checksumfile" -exec "$MD5EXE" '{}' ';' > "$checksumfile"
)
}
version="${2:-$(git tag | sort -V | tail -n 1)}"
if [ $# -eq 1 ] ; then
echo $p: $new_version: version from git
fi
vname=java-assess-$version
echo $p: $vname
## name it something instead of just using $1 all over the place
create_dir=$1
destdir="$create_dir/$vname/noarch"
if [ ! -d "${destdir}" ] ; then
mkdir -p "${destdir}" || exit 1
fi
releasedir="$PWD/release"
if [ -d ${releasedir}/swamp-conf ] ; then
cp -r ${releasedir}/swamp-conf ${destdir}
fi
cp -r ${releasedir}/in-files ${destdir}
s=${p_swamp_fw}/java/in-files
echo $p: $s: installing:
ls $s
## this was cp -r, but that copies symlinks instead of content; this issue
## happens across many swamp installers, should have a standard tool to use
cp -p $s/* ${destdir}/in-files
echo "$version" > "${destdir}/in-files/build_assess_driver_version.txt"
cp ${releasedir}/README.txt "$create_dir/$vname"
cp ${releasedir}/RELEASE_NOTES.txt "$create_dir/$vname"
cp ${releasedir}/LICENSE.txt "$create_dir/$vname"
echo $p: create run bundle
crb=./util/create_run_bundle.sh
if [ "$p_swamp" != "/p/swamp" ] ; then
crb="${crb} --swamp-root $p_swamp"
fi
$crb "${destdir}/in-files" "$version" || exit 1
## does it's own output
if [ "$p_swamp" != "/p/swamp" ] ; then
update_platform="${update_platform} --swamp-root $p_swamp"
fi
$update_platform --framework java --dir $destdir/in-files || exit 1
if $make_cksum ; then
echo $p: checksums
md5_sum $(dirname ${destdir})
fi
if $make_tarball ; then
echo $p roll-up tarball
## binary content in tar makes compression slow
tar cf $create_dir/$vname.tar -C $create_dir $vname
fi
| true
|
828af0290619fd9d415df07e240a07426da56390
|
Shell
|
JiyeongHa/colorLGN
|
/align_LGN_ROI.sh
|
UTF-8
| 12,410
| 2.765625
| 3
|
[] |
no_license
|
###Align everything to T1@main
###T1@main is anatomy aligned to main epi data. (from ?_main.results)
###
#0429 2020
#Align LGN_?_hk2 roi to main
#LGN_?_hk2 is a bigger roi than ?_hk
SN=14
SN_DIR=/group_hpc/WMShimLab2/PSY_Color/Colorv3/${SN}/Img_data
LGN_PD_DIR=${SN_DIR}/LGN_PD
PD_id=190314KYJ
PD_DIR=/group_hpc/WMShimLab/PD/${PD_id}
#make a directory for HK2 roi
mkdir -p ${LGN_PD_DIR}/HK2
chmod -R 777 ${LGN_PD_DIR}/HK2
cd ${LGN_PD_DIR}/HK2
#copy PD_mean_al (HK2 roi background), T1_PD (which PD_mean_al is aligned to) & HK2
#make sure that LGN_?_hk2+orig. is drawn on PD_mean_al!
3dcopy ${PD_DIR}/T1_SS+orig. ${LGN_PD_DIR}/HK2/T1_PD+orig.
3dcopy ${PD_DIR}/PD_mean_al+orig. ${LGN_PD_DIR}/HK2/meanPD@T1_PD
cp ${PD_DIR}/LGN_*_hk2+orig.* ${LGN_PD_DIR}/HK2/
#copy T1@main here
3dcopy ../../T1@main+orig. ${LGN_PD_DIR}/HK2/ -overwrite
#align T1_PD @ T1@main
align_epi_anat.py -dset1 meanPD+orig -dset2 T1@main+orig -dset1to2 \
-cost mi -deoblique off -feature_size 0.5 -ginormous_move -anat_has_skull no -overwrite
#merge LGN
3dmerge -gmax -prefix LGN_hk2 LGN_l_hk2+orig. LGN_r_hk2+orig.
#align LGN @ T1@main using T1_PD_al_mat
3dAllineate -cubic -1Dmatrix_apply meanPD_al_mat.aff12.1D \
-master ../../${SN}_main.results/pb01.${SN}_main.r01.volreg+orig \
-prefix LGN_hk@main+orig.HEAD \
LGN_hk2+orig.HEAD -overwrite
cd ${SN_DIR}
#LGN_hk2@main X LGN_thresmask
run=( 1 2 3 4 )
for r in "${run[@]}"
do
@Align_Centers -no_cp -base T1@main+orig. -dset loc_${r}.nii
done
#align epi to T1 (LGN)
subj=${SN}_loc
afni_proc.py -subj_id ${subj} \
-dsets loc_?.nii \
-blocks align volreg mask regress \
-volreg_base_dset loc_1.nii['0'] \
-volreg_align_e2a \
-align_opts_aea -giant_move \
-copy_anat T1@main+orig. \
-regress_censor_motion 0.5 \
-regress_censor_outliers 0.1
tcsh -xef proc.${subj} |& tee output.proc.${subj}
cd ${subj}.results/
run=( 01 02 03 04 )
for r in "${run[@]}"
do
3dClipLevel pb01.${subj}.r${r}.volreg+orig.HEAD >> clip.txt
3dTstat -mean -prefix r.${r}.base pb01.${subj}.r${r}.volreg+orig.HEAD'[0..$]'
done
clip=$(sort -n clip.txt | sed -n '1p')
run=( 01 02 03 04 )
for r in "${run[@]}"
do
3dcalc -a pb01.${subj}.r${r}.volreg+orig. -b r.${r}.base+orig. \
-expr "(100 * a/b) * step(b-$clip)" -prefix pb01.${subj}.r${r}.scaled
3dTstat -mean -prefix r.${r}.sc_base pb01.${subj}.r${r}.scaled+orig.HEAD'[0..$]'
# detrend linear trend
3dDetrend -polort 1 -prefix pb01.${subj}.r${r}.sc_dt pb01.${subj}.r${r}.scaled+orig
# add mean after detrend
3dcalc -a pb01.${subj}.r${r}.sc_dt+orig.HEAD -b r.${r}.sc_base+orig.HEAD \
-expr 'a+b' -prefix pb01.${subj}.r${r}.sc_dt_am
# calculate mean image
3dTstat -mean -prefix r.${r}.sc_dt_base pb01.${subj}.r${r}.sc_dt_am+orig.HEAD'[0..$]'
# high-pass filter
3dBandpass -prefix pb01.${subj}.r${r}.sc_dt_hp 0.01 99999 pb01.${subj}.r${r}.sc_dt_am+orig
# add mean after hp filter
3dcalc -a pb01.${subj}.r${r}.sc_dt_hp+orig.HEAD -b r.${r}.sc_dt_base+orig.HEAD \
-expr 'a+b' -prefix pb01.${subj}.r${r}.sc_dt_hp_am
#blur
3dmerge -1blur_fwhm 3 -doall -prefix rm.pb01.$subj.r${r}.sc_dt_hp_am_blur \
pb01.${subj}.r${r}.sc_dt_hp_am+orig
# and set boundaries using anat mask
3dcalc -a rm.pb01.${subj}.r${r}.sc_dt_hp_am_blur+orig -b full_mask.${subj}+orig. \
-expr 'a*b' -prefix pb01.${subj}.r${r}.sc_dt_hp_am_blur+orig
rm -f rm.pb01*
done
onsetdir=/group_hpc/WMShimLab/PSY_ColorStudy/
3dDeconvolve -input pb01.${subj}.r*.sc_dt_hp_am_blur+orig.HEAD \
-censor censor_${subj}_combined_2.1D \
-polort 3 \
-num_stimts 9 \
-stim_times 1 ${onsetdir}loc_color.txt 'BLOCK(12,1)' \
-stim_label 1 Color \
-stim_times 2 ${onsetdir}loc_gray.txt 'BLOCK(12,1)' \
-stim_label 2 Grayscale \
-stim_times 3 ${onsetdir}loc_fix.txt 'BLOCK(12,1)' \
-stim_label 3 Fixation \
-stim_file 4 motion_demean.1D'[0]' -stim_base 4 -stim_label 4 roll \
-stim_file 5 motion_demean.1D'[1]' -stim_base 5 -stim_label 5 pitch \
-stim_file 6 motion_demean.1D'[2]' -stim_base 6 -stim_label 6 yaw \
-stim_file 7 motion_demean.1D'[3]' -stim_base 7 -stim_label 7 dS \
-stim_file 8 motion_demean.1D'[4]' -stim_base 8 -stim_label 8 dL \
-stim_file 9 motion_demean.1D'[5]' -stim_base 9 -stim_label 9 dP \
-local_times \
-gltsym 'SYM: +1*Color -1*Grayscale' \
-glt_label 1 Color-Gray \
-gltsym 'SYM: +1*Grayscale -1*Color' \
-glt_label 2 Gray-Color \
-gltsym 'SYM: +2*Grayscale -1*Color -1*Fixation' \
-glt_label 3 Gray-all \
-gltsym 'SYM: +1*Color -1*Grayscale -1*Fixation' \
-glt_label 4 Color-all \
-gltsym 'SYM: +1*Color -1*Fixation' \
-glt_label 5 Color-Fix \
-gltsym 'SYM: +1*Grayscale -1*Fixation' \
-glt_label 6 Grayscale-Fix \
-gltsym 'SYM: +1*Color +1*Grayscale -2*Fixation' \
-glt_label 7 Color+Gray-Fix \
-float \
-jobs 8 \
-fout -tout -x1D X.xmat.1D -xjpeg X.jpg \
-x1D_uncensored X.nocensor.xmat.1D \
-bucket stats.all
3dbucket stats.all+orig.HEAD[23] -prefix stat.Color-Fix
3dcalc -a stat.Color-Fix+orig. -expr 'ispositive(a-3.936)' -prefix thresmask_v3_q.001
3dcalc -a stat.Color-Fix+orig. -expr 'ispositive(a-1.648)' -prefix thresmask_v3_p.1
###
SNs=(01 03 04 05 06 07 08 09 10 11 12 13 14)
for SN in "${SNs[@]}"
do
SN_DIR=/group_hpc/WMShimLab2/PSY_Color/Colorv3/${SN}/Img_data
LGN_PD_DIR=${SN_DIR}/LGN_PD
ROI_DIR=${SN_DIR}/forwardmodel/roi
3dcalc -a ${LGN_PD_DIR}/HK2/LGN_hk@main+orig. -b ${SN_DIR}/${SN}_loc.results/thresmask_v3_p.1+orig. \
-expr 'ispositive(a*b)' -prefix ${ROI_DIR}/LGN_hk2_p.1.nii -overwrite
done
for SN in "${SNs[@]}"
do
SN_DIR=/group_hpc/WMShimLab2/PSY_Color/Colorv3/${SN}/Img_data/${SN}_main.results
LGN_PD_DIR=${SN_DIR}/LGN_PD
NII_DIR=/group_hpc/WMShimLab2/PSY_Color/Colorv3/${SN}/Img_data/forwardmodel/nii
mv ${NII_DIR}/${SN}_main_combined_sc_dt_hp_am.nii ${NII_DIR}/${SN}_main_combined_sc_dt_hp_am_old.nii
cd ${SN_DIR}
3dTcat -prefix ${NII_DIR}/${SN}_main_combined_sc_dt_hp_am.nii \
pb01.${SN}_main.r0?.sc_dt_hp_am+orig.HEAD
done
###
SN=13
rm -rf ${SN}_main.results
rm -rf ${SN}_loc.results
rm -rf proc*
rm -rf T1@main+orig.*
epi_name=main
run=( 01 02 03 04 05 06 07 08 )
for r in "${run[@]}"
do
@Align_Centers -no_cp -base T1_${epi_name}_SS+orig. -dset ${epi_name}_${r}.nii
done
afni_proc.py -subj_id ${SN}_main -dsets main_01.nii main_02.nii main_03.nii \
main_04.nii main_05.nii main_06.nii main_07.nii main_08.nii \
-volreg_base_dset 'main_05.nii[0]' -copy_anat T1_${epi_name}_SS+orig. \
-regress_censor_motion 0.5 -regress_censor_outliers 0.1 -blocks align \
volreg mask regress
tcsh -xef proc.${SN}_main |& tee output.proc.${SN}_main
cd ${SN}_main.results/
align_epi_anat.py -anat2epi -anat T1_main_SS+orig -suffix _al_do -epi external_volreg_base+orig -epi_base 0 -epi_strip 3dAutomask -volreg off -tshift off -deoblique off -anat_has_skull no -giant_move -overwrite
3drename T1_main_SS_al_do+orig. T1@main
cp T1@main+orig.* ../
#align T1_main_SS to main_05.nii[0]
align_epi_anat.py -dset1 T1_main_SS+orig -dset2 main_05.nii'[0]' -dset1to2 \
-cost mi -deoblique off -feature_size 0.5 -ginormous_move -anat_has_skull no
3drename inplane_main_SS_al+orig. inplane@main
#align T1_main_SS to main_05.nii[0]
align_epi_anat.py -dset1 ../T1_main_SS+orig -dset2 inplane@main+orig -dset1to2 \
-cost mi -deoblique off -feature_size 0.5 -ginormous_move -anat_has_skull no
3drename T1_main_SS_al+orig. T1@main
#-------------------make thresmask_v3_p.05
SNs=(01 03 04 05 06 07 08 09 10 11 12 13 14 )
for SN in "${SNs[@]}"
do
SN_DIR=/group_hpc/WMShimLab2/PSY_Color/Colorv3/${SN}/Img_data
LOC_DIR=${SN_DIR}/${SN}_loc.results
LGN_PD_DIR=${SN_DIR}/LGN_PD
ROI_DIR=${SN_DIR}/forwardmodel/roi
MAIN_DIR=${SN_DIR}/${SN}_main.results
cd ${LOC_DIR}
3dcalc -a stat.Color-Fix+orig. -expr 'ispositive(a-1.964)' -prefix thresmask_v3_p.05
3dcalc -a ${LGN_PD_DIR}/HK2/LGN_hk@main+orig. -b ${SN_DIR}/${SN}_loc.results/thresmask_v3_p.05+orig. \
-expr 'ispositive(a*b)' -prefix ${ROI_DIR}/LGN_hk2_p.05.nii -overwrite
done
#align hk3
#-------------------make thresmask_hk3_p.05
SN=13
SN_DIR=/group_hpc/WMShimLab2/PSY_Color/Colorv3/${SN}/Img_data
LGN_PD_DIR=${SN_DIR}/LGN_PD
PD_id=190107JHY
PD_DIR=/group_hpc/WMShimLab/PD/${PD_id}
mkdir -p ${LGN_PD_DIR}/HK3
cd ${LGN_PD_DIR}/HK3
chmod -R 777 ${LGN_PD_DIR}/HK3
#3dcopy ${PD_DIR}/T1_SS+orig. ${LGN_PD_DIR}/HK3/T1_PD+orig.
3dcopy ${PD_DIR}/PD_mean_al+orig. ${LGN_PD_DIR}/HK3/meanPD@T1_PD
cp ${PD_DIR}/LGN_l_hk3.nii.gz ${LGN_PD_DIR}/HK3/
cp ${PD_DIR}/LGN_r_hk3.nii.gz ${LGN_PD_DIR}/HK3/
3dcopy ../HK2/T1@main+orig. ${LGN_PD_DIR}/HK3/ -overwrite
cp ../HK2/*_al_mat.aff12.1D .
#align PD @ T1@main
#align_epi_anat.py -dset1 meanPD@T1_PD+orig -dset2 T1@main+orig -dset1to2 \
# -cost mi -deoblique off -feature_size 0.5 -ginormous_move -anat_has_skull no -overwrite
#align T1_PD @ T1@main
align_epi_anat.py -dset1 PD_mean_SS_al+orig. -dset2 T1@main+orig -dset1to2 \
-cost mi -deoblique off -feature_size 0.5 -ginormous_move -anat_has_skull no -overwrite
lr=(l r)
for r in "${lr[@]}"
do
#align LGN @ T1@main using T1_PD_al_mat
3dAllineate -final NN -1Dmatrix_apply PD_mean_SS_al_al_mat.aff12.1D \
-master ../../${SN}_main.results/pb01.${SN}_main.r01.volreg+orig \
-prefix LGN_${r}_hk3@main+orig.HEAD \
LGN_${r}_hk3.nii -overwrite
done
#merge LGN
3dmerge -gmax -prefix LGN_hk3@main LGN_l_hk3@main+orig. LGN_r_hk3@main+orig. -overwrite
SNs=(01 02 03 05 06 07 08 09 10)
for SN in "${SNs[@]}"
do
SN_DIR=/group_hpc/WMShimLab2/PSY_Color/Colorv3/${SN}/Img_data
LOC_DIR=${SN_DIR}/${SN}_loc.results
LGN_PD_DIR=${SN_DIR}/LGN_PD
ROI_DIR=${SN_DIR}/forwardmodel/roi
3dcalc -a ${LGN_PD_DIR}/HK3/LGN_hk3@main+orig. -b ${SN_DIR}/${SN}_loc.results/thresmask_v3_p.05+orig. \
-expr 'ispositive(a*b)' -prefix ${ROI_DIR}/LGN_hk3_p.05.nii
done
##############################################
#align hk3
#-------------------make thresmask_hk3_p.05
SN=10CES
SN_DIR=/group_hpc/WMShimLab2/PSY_Color/Colorv3/${SN}/Img_data
LGN_PD_DIR=${SN_DIR}/LGN_PD
PD_id=181022CES
PD_DIR=/group_hpc/WMShimLab/PD/${PD_id}
mkdir -p ${LGN_PD_DIR}/HK4
cd ${LGN_PD_DIR}/HK4
chmod -R 777 ${LGN_PD_DIR}/HK4
#3dcopy ${PD_DIR}/T1_SS+orig. ${LGN_PD_DIR}/HK3/T1_PD+orig.
3dcopy ${PD_DIR}/PD_mean_al+orig. ${LGN_PD_DIR}/HK4/meanPD@T1_PD
cp ${PD_DIR}/LGN_l_hk4_rsm.nii ${LGN_PD_DIR}/HK4/
cp ${PD_DIR}/LGN_r_hk4_rsm.nii ${LGN_PD_DIR}/HK4/
3dcopy ../HK2/T1@main+orig. ${LGN_PD_DIR}/HK4/ -overwrite
cp ../HK2/*_al_mat.aff12.1D .
#align PD @ T1@main
#align_epi_anat.py -dset1 meanPD@T1_PD+orig -dset2 T1@main+orig -dset1to2 \
# -cost mi -deoblique off -feature_size 0.5 -ginormous_move -anat_has_skull no -overwrite
lr=(l r)
for r in "${lr[@]}"
do
#align LGN @ T1@main using T1_PD_al_mat
3dAllineate -final NN -1Dmatrix_apply PD_mean_al_al_mat.aff12.1D \
-master ../../${SN}_main.results/pb01.${SN}_main.r01.volreg+orig \
-prefix LGN_${r}_hk4@main+orig.HEAD \
LGN_${r}_hk4_rsm.nii -overwrite
done
#merge LGN
3dmerge -gmax -prefix LGN_hk4@main LGN_l_hk4@main+orig. LGN_r_hk4@main+orig. -overwrite
SNs=(01 02 03 05 06 07 08 09 10)
for SN in "${SNs[@]}"
do
SN_DIR=/group_hpc/WMShimLab2/PSY_Color/Colorv3/${SN}/Img_data
LOC_DIR=${SN_DIR}/${SN}_loc.results
LGN_PD_DIR=${SN_DIR}/LGN_PD
ROI_DIR=${SN_DIR}/forwardmodel/roi
3dcalc -a ${LGN_PD_DIR}/HK4/LGN_hk4@main+orig. -b ${SN_DIR}/${SN}_loc.results/thresmask_v3_p.05+orig. \
-expr 'ispositive(a*b)' -prefix ${ROI_DIR}/LGN_hk4_p.05.nii
done
###############33
| true
|
6abec9eeade5787ea2c8510513a77127c5d88f75
|
Shell
|
KlausWogelius/bin
|
/master
|
UTF-8
| 784
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
#kommando:
# master $1 $2
#beskrivelse:
# kommandoen skaber en virtuel maskine ud fra en installations-iso-fil
# "$1.iso" iso-filens navn
# "$2.qcow2" er masterens navn
#præmis:
# iso-filen eksisterer og hedder "~/.qemu/iso/$1.iso"
#output:
# masteren kommer til at hedde "~/.qemu/vm/$2.qcow2"
# Masteren får en "HD" på 10G
#familie: qcow, vm
#bemærkninger:
# brug kommandoen "klon" for at lave en arbejdskopi af masteren
# brug kommandoen "qcow2" for at køre en master eller en arbejdskopi
#lav en fil til masteren
qemu-img create -f qcow2 ~/.qemu/vm/$2.qcow2 10G
chmod 644 ~/.qemu/vm/$2.qcow2
#installer operativsystemet i masteren vha. installations-iso'en
qemu-system-x86_64 -enable-kvm -boot d -cdrom ~/.qemu/iso/$1.iso -hda ~/.qemu/vm/$2.qcow2
| true
|
1cf10fc1b92e737bf622d99c7fb78efda4f3b8cd
|
Shell
|
tomasbasham/dotfiles
|
/.chezmoiscripts/run_onchange_before_10_compile_terminfo_descriptors.tmpl
|
UTF-8
| 483
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
# -*-mode:sh-*- vim:ft=sh
# compile_terminfo_descriptors
# =============================================================================
# Compile tmux specific terminfo profiles with "tic" to support italics inside
# tmux.
#
# Some system might already have them, but in April 2021 most still don't
# provide them.
# The profiles will be compiled to ~/.terminfo.
tic "{{ .chezmoi.sourceDir }}/tmux.terminfo"
tic "{{ .chezmoi.sourceDir }}/tmux-256color.terminfo"
| true
|
ca6fe3a8218c96a62887c3bb8bd1ff90c276f8f0
|
Shell
|
adampower48/ca4022
|
/setup_scripts/hive_install.sh
|
UTF-8
| 871
| 2.671875
| 3
|
[] |
no_license
|
# Download hive
wget https://ftp.heanet.ie/mirrors/www.apache.org/dist/hive/hive-3.1.2/apache-hive-3.1.2-bin.tar.gz
tar -xvzf apache-hive-3.1.2-bin.tar.gz
# Set hive home
export HIVE_HOME=$(pwd)/apache-hive-3.1.2-bin
# Add hive vars to bashrc
echo '
export HIVE_HOME='$HIVE_HOME'
export PATH=$PATH:$HIVE_HOME/bin
' >> ~/.bashrc
# Copy default config file
cp $HIVE_HOME/conf/hive-default.xml.template $HIVE_HOME/conf/hive-site.xml
# Replace wonky values in config
sed -i 's/system:user.name/user.name/g' $HIVE_HOME/conf/hive-site.xml
sed -i 's/system:java.io.tmpdir/java.io.tmpdir/g' $HIVE_HOME/conf/hive-site.xml
sed -i 's// /' $HIVE_HOME/conf/hive-site.xml
# Replace guava jar
rm $HIVE_HOME/lib/guava*.jar
cp $HADOOP_HOME/share/hadoop/hdfs/lib/guava*.jar $HIVE_HOME/lib/
# Init database
rm -rf metastore_db
$HIVE_HOME/bin/schematool -dbType derby -initSchema
| true
|
d0c5bf6d55ef2d0add297c9d47ce5747f39959a0
|
Shell
|
Newton-Climate/CLARREO_OSSE
|
/submit_parallel
|
UTF-8
| 2,035
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
#submit from the directory where the executable is
#==============================================================================
# This is a CCSM batch job script for scs
#==============================================================================
## BATCH INFO
#PBS -l nodes=4:ppn=8:lr
#PBS -l walltime=04:00:00
#PBS -q lr_batch
#PBS -A ac_radiation
#PBS -j oe
#PBS -N clarreo
#EXPORT CURRENT ENVIRONMENT
#PBS -V
#End of options
#OS SETUP
module load openmpi/1.2.8-intel
cd ${PBS_O_WORKDIR}
ulimit -s unlimited
export MKL_NUM_THREADS=1
#NAME OF INPUT FILE THAT WE WISH TO RUN WITH (not the full path and minus .nc)
input_name=b30.042a.cam2.h0.2000-07
input_name2=b30.042a.cam2.h0.2050-07
#CREATE NAMES
base_dir_path=`pwd`
full_path_binary=${base_dir_path}"/radiation"
settings_forcing="/global/scratch/drfeldma/qsub/settings_forcing.inp"
base_input_path="/global/scratch/drfeldma/esg/"
#DEBUG INFO
#echo
#echo "NAMES"
#echo "current working directory = "${base_dir_path}
#echo "full path for the executable = "${full_path_binary}
#echo "full path for the settings_forcing = "${settings_forcing}
#echo "base path to the input directory = "${base_input_path}
#echo "END NAMES"
#echo
#SETUP THE SYMBOLIC LINK
ln -v --symbolic /global/scratch/drfeldma/qsub/modroot.in .
#EXECUTABLE ARGUMENTS
input_cam=${base_input_path}${input_name}".nc"
output_cam=${input_name}".out.nc"
input_cam2=${base_input_path}${input_name2}".nc"
output_cam2=${input_name2}".out.nc"
#DEBUG INFO
#echo
#echo "FILE PATHS"
#echo "full path for the netcdf input file = "${input_cam}
#echo "name of the output netcdf file = "${output_cam}
#echo
#echo
#echo "full path for the netcdf input file = "${input_cam2}
#echo "name of the output netcdf file = "${output_cam2}
#echo "END FILE PATHS"
#echo
date
#EXECUTE
mpiexec -np 32 ${full_path_binary} ${input_cam} ${input_cam} ${output_cam} < ${settings_forcing}
date
#mpiexec -np 6 ${full_path_binary} ${input_cam2} ${input_cam2} ${output_cam2} < ${settings_forcing}
exit
| true
|
34293f41cb7d9cf204c8d81f71b3370773e97579
|
Shell
|
eisop/plume-lib
|
/.travis-build.sh
|
UTF-8
| 918
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
ROOT=$TRAVIS_BUILD_DIR/..
# Fail the whole script if any command fails
set -e
if [[ "${TYPECHECK}" != "true" ]]; then
make USE_CODECOV=1 all-but-emacs check
else
## Build Checker Framework
(cd $ROOT && git clone https://github.com/typetools/checker-framework.git) || (cd $ROOT && git clone https://github.com/typetools/checker-framework.git)
# This also builds annotation-tools and jsr308-langtools
(cd $ROOT/checker-framework/ && ./.travis-build-without-test.sh downloadjdk)
export CHECKERFRAMEWORK=$ROOT/checker-framework
## No need to do this -- plume-lib already exists.
# ## Obtain plume-lib
# (cd $ROOT && git clone https://github.com/mernst/plume-lib.git) || (cd $ROOT && git clone https://github.com/mernst/plume-lib.git)
# -Afilenames is to prevent the job from timing out if it goes 10 minutes without output
make -C $ROOT/plume-lib/java JAVACHECK_EXTRA_ARGS=-Afilenames check-types
fi
| true
|
ebc6ad7957aa8478a3e3003043e7fabfa658997d
|
Shell
|
Datanexus/kafka
|
/roles/cruisecontrol/templates/linkedin.cruisecontrol.j2
|
UTF-8
| 2,395
| 3.453125
| 3
|
[] |
permissive
|
#!/usr/bin/env sh
# (c) 2016 DataNexus Inc. All Rights Reserved.
# Licensed software not for distribution
if [ $# -eq 0 ] || [ "$1" = "-h" ] ; then
printf "Usage:\\t{{ cruisecontrol.service_name}} [-h]\\thelp\\n"
printf "Usage:\\tsudo -H -u {{ cruisecontrol_user }} {{ cruisecontrol.user_service }}/{{ cruisecontrol.service_name}} [ start | stop | restart ]\\n"
exit 0
fi
# which jcmd to use
if [ -z "$JAVA_HOME" ]; then
JCMD="jcmd"
else
JCMD="$JAVA_HOME/bin/jcmd"
fi
DATE=`which date`
if [ "$1" = "start" ] ; then
pid=`${JCMD} | grep com.linkedin.kafka.cruisecontrol.KafkaCruiseControlMain | /usr/bin/cut -d " " -f 1`
if [ -z ${pid} ]; then
printf "[%s %s] INFO Starting {{ cruisecontrol.service_name }}...\\n" `${DATE} +'%Y-%m-%d %H:%M:%S'` | /usr/bin/tee -a {{ cruisecontrol.config.log }}/{{ cruisecontrol.service_name }}.log
cd {{ cruisecontrol.source_dir}}/{{ cruisecontrol.service_name }}
/usr/bin/nohup /usr/local/src/cruise-control/kafka-cruise-control-start.sh {{ cruisecontrol.config_file }} >> {{ cruisecontrol.config.log }}/{{ cruisecontrol.service_name }}.log 2>&1 &
fi
elif [ "$1" = "stop" ] ; then
pid=`${JCMD} | grep com.linkedin.kafka.cruisecontrol.KafkaCruiseControlMain | /usr/bin/cut -d " " -f 1`
if [[ ${pid} ]] && [[ -n "$(ps -p ${pid} -o pid=)" ]]; then
printf "[%s %s] INFO Stopping {{ cruisecontrol.service_name }}...\\n" `${DATE} +'%Y-%m-%d %H:%M:%S'` | /usr/bin/tee -a {{ cruisecontrol.config.log }}/{{ cruisecontrol.service_name }}.log
/usr/bin/kill $pid
fi
elif [ "$1" = "restart" ] ; then
pid=`${JCMD} | grep com.linkedin.kafka.cruisecontrol.KafkaCruiseControlMain | /usr/bin/cut -d " " -f 1`
if [[ ${pid} ]] && [[ -n "$(ps -p ${pid} -o pid=)" ]]; then
printf "[%s %s] INFO Stopping {{ cruisecontrol.service_name }}...\\n" `${DATE} +'%Y-%m-%d %H:%M:%S'` | /usr/bin/tee -a {{ cruisecontrol.config.log }}/{{ cruisecontrol.service_name }}.log
/usr/bin/kill $pid
fi
printf "[%s %s] INFO Starting {{ cruisecontrol.service_name }}...\\n" `${DATE} +'%Y-%m-%d %H:%M:%S'` | /usr/bin/tee -a {{ cruisecontrol.config.log }}/{{ cruisecontrol.service_name }}.log
cd {{ cruisecontrol.source_dir}}/{{ cruisecontrol.service_name }}
/usr/bin/nohup /usr/local/src/cruise-control/kafka-cruise-control-start.sh {{ cruisecontrol.config_file }} >> {{ cruisecontrol.config.log }}/{{ cruisecontrol.service_name }}.log 2>&1 &
fi
| true
|
8acb5a038d6ad0ba8cd7412607dccb60728a4fbc
|
Shell
|
ceremcem/smith-sync
|
/btrfs-sync.bak
|
UTF-8
| 4,480
| 3.953125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
set -eu -o pipefail
safe_source () { [[ ! -z ${1:-} ]] && source $1; _dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"; _sdir=$(dirname "$(readlink -f "$0")"); }; safe_source
# end of bash boilerplate
safe_source $_sdir/lib/all.sh
show_help(){
local script=$(basename $0)
local reason=${1:-}
[[ ! -z $reason ]] && cat <<REASON
-------------------------------
ERROR: $reason
-------------------------------
REASON
cat <<HELP
$script [options] /path/to/source /path/to/destination
Options:
--dry-run : Dry run, don't touch anything actually
HELP
exit
}
# for debugging
#set -x
# Parse command line arguments
# ---------------------------
# Initialize parameters
dry_run=false
# ---------------------------
args=("$@")
_count=1
while :; do
key="${1:-}"
case $key in
-h|-\?|--help|'')
show_help # Display a usage synopsis.
exit
;;
# --------------------------------------------------------
--dry-run) shift
dry_run=true
;;
# --------------------------------------------------------
-*)
echo
echo "Unknown option: $1"
show_help
exit 1
;;
*) # generate the positional arguments: $_arg1, $_arg2, ...
[[ ! -z ${1:-} ]] && declare _arg$((_count++))="$1" && shift
esac
[[ -z ${1:-} ]] && break
done; set -- "${args[@]}"
# use $_arg1 in place of $1, $_arg2 in place of $2 and so on, "$@" is intact
s=${_arg1:-}
d=${_arg2:-}
[[ -z $s ]] && show_help "Source can not be empty"
[[ -z $d ]] && show_help "Destination can not be empty"
[[ $(whoami) = "root" ]] || { sudo $0 "$@"; exit 0; }
start_timer
[[ $dry_run = true ]] && dry_run_str="(dry run)"
echo "=====================${dry_run_str:-}=========================="
echo "from $s to $d "
echo
echo "Following snapshot roots will be synced:"
for _snap_root in $(get_snapshot_roots $s); do
echo "* $_snap_root"
done
echo "==============================================="
echo
# Fixme: Following command takes too long
#start=$SECONDS
#for i in 1; do
# echo "pass $i"
# find_sent_subs $s $d > x2
#done
#echo "took: $(( $SECONDS - $start ))"
# source and destination should be on different disks
require_different_disks $s $d
src_mnt=$(mount_point_of $s)
dst_mnt=$(mount_point_of $d)
for _snap_root in $(get_snapshot_roots $s); do
snap_root=${_snap_root#$src_mnt/}
echo_blue "Syncing $snap_root -> $dst_mnt/..."
# create target directory structure
mkdir -p "$dst_mnt/$snap_root"
echo "--- already sent: ---"
already_sent=$(find_sent_subs "$src_mnt/$snap_root" "$dst_mnt/$snap_root")
echo "$already_sent"
last_sent=$(echo $already_sent | rev | cut -d " " -f 1 | rev)
#echo "LAST SENT: $last_sent"
echo "--- incomplete transfers ---"
for incomplete in `list_subvol_below $dst_mnt/$snap_root true`; do
if is_subvolume_incomplete $incomplete; then
echo_yellow "Found incomplete snapshot: $incomplete"
if [[ $dry_run = false ]]; then
btrfs sub del $incomplete
else
echo "(This is dry run, won't delete anything actually)"
fi
fi
done
echo "--- missing: ---"
snapshots=$(list_subvol_below $src_mnt/$snap_root)
for missing in `find_missing_subs "$src_mnt/$snap_root" "$dst_mnt/$snap_root"`; do
if [[ ! -z $last_sent ]]; then
if [ $missing \< $last_sent ]; then
echo_yellow "Skipping older snapshot: $(basename $missing)"
continue
fi
fi
parent=$(find_prev_snap $missing $snapshots)
if [[ -z $parent ]]; then
echo_yellow "No parent found for $missing, sending whole snapshot"
_parent_arg=
else
echo_blue "Sending $(basename $missing) (based on $(basename $parent)) "
_parent_arg="-p $parent"
fi
if [[ $dry_run = true ]]; then
echo "(This is dry run, won't send anything actually.)"
else
btrfs send -q $_parent_arg $missing | pv | btrfs receive $dst_mnt/$snap_root/ > /dev/null
echo_green "...$missing succesfully sent."
echo "$missing has been sent to $dst_mnt/$snap_root" >> "$s/log.txt"
fi
done
echo "end of syncing $snap_root"
done
show_timer "Completed in: "
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.