blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
42746e2bc65a79d8ad72dbfc8a3eff83e08a1678 | Shell | NunnaRupaSri/shell-scripting | /Modules/04-variables.sh | UTF-8 | 169 | 2.875 | 3 | [] | no_license | #!/usr/bin/env bash
a=10
echo $a
echo ${a}
echo ${a}00
DATE=$(date +%F)
echo -e "Good Morning, Today date is $DATE"
i=$(($a+8))
echo $i
echo -e "value of B_E = $B_E" | true |
ee0e562ca3a81bdfea8e535294db3116114bafae | Shell | SergioDiaz90/holberton-system_engineering-devops | /0x04-loops_conditions_and_parsing/6-superstitious_numbers | UTF-8 | 303 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env bash
# Displays numbers from 1 to 20 with some conditions
cnt=1
while [ $cnt -le 20 ]
do
echo $cnt
case $cnt in
4)
echo "bad luck from China"
;;
9)
echo "bad luck from Japan"
;;
17)
echo "bad luck from Italy"
;;
esac
cnt=$((cnt + 1))
done
| true |
07e409b1fd5a9d01b9d919e4400ba1f2fb485f2e | Shell | madhusudancs/test-infra | /experiment/bigquery/flakes.sh | UTF-8 | 1,592 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage: ./flakes.sh | tee flakes-$(date +%Y-%m-%d).json
# This script uses flakes.sql to find job flake data for the past week
# The script then filters jobs down to those which flake more than 4x/day
# And also notes any test in those jobs which flake more than 1x/day
out="/tmp/flakes-$(date +%Y-%m-%d).json"
if [[ ! -f "${out}" ]]; then
which bq >/dev/null || (echo 'Cannot find bq on path. Install gcloud' 1>&2 && exit 1)
echo "Flakes results will be available at: ${out}" 1>&2
cat "$(dirname "${0}")/flakes.sql" | bq query --format=prettyjson > "${out}"
fi
which jq >/dev/null || (echo 'Cannot find jq on path. Install jq' 1>&2 && exit 1)
echo 'Jobs flaking more than 4x/day:' 1>&2
cat "${out}" | jq '
[(.[] | select(.flakes|tonumber > 28) | {(.job): {
consistency: .commit_consistency,
flakes: .flakes,
flakiest: ([(.flakiest[] | select(.flakes|tonumber >= 7) | {
(.name): .flakes}) ])| add
}})] | add'
echo "Full flake data saved to: ${out}" 1>&2
| true |
e94a1701dddc5d5a63f6d73c581662bcaaad39b5 | Shell | azukiapp/heroku-buildpack-elixir | /bin/compile | UTF-8 | 2,327 | 3.6875 | 4 | [] | no_license | #!/bin/sh
##
# usage: bin/compile <build-dir> <cache-dir>
indent() {
sed -u 's/^/ /'
}
set -e
bpdir=$(cd $(dirname $(dirname $0)); pwd)
mkdir -p "$1" "$2"
build=$(cd "$1/" && pwd)
test -z ${build} && exit
cache=$(cd "$2/" && pwd)
test -z ${cache} && exit
DEFAULT_OTP_VERSION="R16B01"
if [ -f ${build}/.preferred_otp_version ]; then
OTP_VERSION=$(cat ${build}/.preferred_otp_version)
fi
erl_ver=${OTP_VERSION:=${DEFAULT_OTP_VERSION}}
erl_tarball=${erl_ver}.tar.gz
OTP_TARBALL_URL="https://s3.amazonaws.com/azuki-buildpack-erlang/${erl_tarball}"
ERLROOT=${build}/otp
DEFAULT_ELIXIR_VERSION="master"
if [ -f ${build}/.preferred_elixir_version ]; then
ELIXIR_VERSION=$(cat ${build}/.preferred_elixir_version)
fi
ex_ver=${ELIXIR_VERSION:=${DEFAULT_ELIXIR_VERSION}}
ex_tarball=${ex_ver}.tar.gz
ELIXIR_TARBALL_URL="https://github.com/elixir-lang/elixir/archive/${ex_tarball}"
EXROOT=${build}/ex
echo "-----> Using Erlang/OTP $erl_ver"
(
set -e
# Already cached?
test -f ${cache}/${erl_tarball} && exit
rm -rf ${cache}/* # be sure not to build up cruft
cd ${cache}
echo "-------> Fetching Erlang/OTP $erl_ver"
curl -sO ${OTP_TARBALL_URL} || exit 1
)
echo "-------> Unpacking Erlang/OTP $erl_ver"
mkdir -p ${ERLROOT}
tar zxf ${cache}/${erl_tarball} -C ${ERLROOT} --strip-components=2
echo "-------> Installing Erlang/OTP $erl_ver"
ln -s ${ERLROOT} /app/otp
${ERLROOT}/Install -minimal /app/otp
PATH=/app/otp/bin:$PATH
HOME=/app
if [ ! -e "rebar" ]; then
echo "-----> Installing Rebar from buildpack"
cp ${bpdir}/opt/rebar $build
PATH=${bpdir}/opt:$PATH
fi
echo "-----> Using Elixir $ex_ver"
(
set -e
# Already cached?
test -f ${cache}/${ex_tarball} && exit
rm -rf ${cache}/* # be sure not to build up cruft
cd ${cache}
echo "-------> Fetching Elixir $ex_ver"
curl -LsO ${ELIXIR_TARBALL_URL} -o $ex_tarball || exit 1
)
echo "-------> Unpacking Elixir $ex_ver"
mkdir -p ${EXROOT}
tar xzf ${cache}/${ex_tarball} -C ${EXROOT} --strip-components=1
echo "-------> Installing Elixir $ex_ver"
(
set -e
cd ${EXROOT}
make > /dev/null
)
ln -s ${EXROOT} /app/ex
PATH=/app/ex/bin:$PATH
cd $build
echo "-----> Building with Mix"
unset GIT_DIR
echo "-----> Bundling dependencies"
mix deps.get 2>&1 | indent
echo "-----> Compiling app"
MIX_ENV=prod mix compile 2>&1 | indent
| true |
c611761e6f43456d691234394002efb50168dcf9 | Shell | NVIDIA/ais-k8s | /helm/ais/charts/templates/_set_initial_primary_proxy_env.sh | UTF-8 | 2,945 | 3.453125 | 3 | [
"MIT"
] | permissive | {{- define "proxy.set_initial_primary_proxy_env" -}}
#!/bin/bash
#
# Run in an initContainer on proxy pods. Does nothing if the cluster is already
# established (to avoid requiring a node labeled as initial primary in a running
# cluster)
#
# During initial cluster deployment, proxy pods wait here until exactly one node is
# labeled to host the initial primary proxy. If this pod is a proxy running on that
# chosen node then we pass a hint to the main container to startup as primary proxy.
#
envfile="/var/ais_env/env"
rm -f $envfile
#
# On an established cluster we must not depend on the initial primary proxy hack.
# We recognize an established cluster as one for which we can retrieve an smap
# ping from *any* proxy behind the proxy clusterIP service.
#
url="http://${CLUSTERIP_PROXY_SERVICE_HOSTNAME}:${CLUSTERIP_PROXY_SERVICE_PORT}/v1/daemon?what=smap"
echo "Checking for a 200 result on ${url}"
elapsed=0
while [[ $elapsed -lt 5 ]]; do
code=$(curl -X GET -o /dev/null --silent -w "%{http_code}" $url)
if [[ "$code" == "200" ]]; then
echo " ... success after ${elapsed}s; this is not initial cluster deployment"
exit 0
else
echo " ... failed ($code) at ${elapsed}s, trying for up to 5s"
elapsed=$((elapsed + 1))
sleep 1
fi
done
#
# Most likely initial cluster deployment time, or a very sick cluster in which no
# proxy could answer on the clusterIP service. We'll look for a suitably labeled
# node, and if it is the current node then we'll label the current pod as
# initial primary proxy making it inclined to assume the primary role on startup
# unless on startup it discovers otherwise.
#
# Note that initial cluster deployment even the initial primary proxy will "waste"
# a total of 20s above - 10s in the ping and 10s in curl loop. We could shrink that
# but it's a once-off price to pay, and we don't want to startup as tentative
# primary too easily.
#
filter="{{ .Values.proxy.initialPrimaryProxyNodeLabel.name }}={{ template "ais.fullname" . }}"
if [[ "$MY_POD" == "$DEFAULT_PRIMARY_POD" ]]; then
echo "initContainer complete - this proxy pod is on the primary node $MY_POD"
#
# Indicate to subsequent containers in this pod that we started on the primary node.
#
echo "export AIS_IS_PRIMARY=true" > $envfile
else
echo "initContainer complete - not running on primary proxy node"
fi
# Use DNS as hostnames
pod_dns="${MY_POD}.${MY_SERVICE}.${K8S_NS}.svc.cluster.local"
export AIS_INTRA_HOSTNAME=${pod_dns}
export AIS_DATA_HOSTNAME=${pod_dns}
#
# Update configuration file,substitute environment variables
#
global_conf_template="/var/ais_config_template/ais.json"
global_conf_file="/var/ais_config/ais.json"
cp ${global_conf_template} ${global_conf_file}
local_conf_template="/var/ais_config_template/ais_local.json"
local_conf_file="/var/ais_config/ais_local.json"
envsubst < ${local_conf_template} > ${local_conf_file}
{{end}}
| true |
4b51713c8f8aa13e3b1e2f196bbd1d7d6e8ae7db | Shell | pld-linux/rpm-pld-macros | /rpm-java-requires | UTF-8 | 2,850 | 4.03125 | 4 | [] | no_license | #!/bin/sh
# This script reads filenames from STDIN and outputs any relevant requires
# information that needs to be included in the package.
#
# Based on rpm-4.4.2/scripts/find-req.pl
# Authors: Elan Ruusamäe <glen@pld-linux.org>
export PATH="/sbin:/usr/sbin:/bin:/usr/bin"
# Set the prefix, unless it is overriden
: ${RPM_LIBDIR=/usr/lib/rpm}
# Enable debug: JAVADEPS_DEBUG=true
: ${JAVADEPS_DEBUG=false}
# xsltproc for eclipse feature.xml
: ${xsltproc=/usr/bin/xsltproc}
# "%define java_min_classdataversion 51.0" in spec to minimum version to be 51.0
: ${MIN_CLASSDATAVERSION=}
# save $- state, to enable in functions
debug=$-
javaclassversionfilter() {
if [ "$MIN_CLASSDATAVERSION" ]; then
set -- $* "$MIN_CLASSDATAVERSION"
fi
local v
for v in $*; do
echo "java(ClassDataVersion) >= $v"
done | sort -V | tail -n 1
}
javaclassversion() {
set -$debug
local mode=$1; shift
[ $# -gt 0 ] || return 1
$JAVADEPS_DEBUG && echo >&2 ">> javaclassversion($mode): $*"
# process only requires
[ "$mode" = requires ] || return $ret
local classver=$(echo "$@" | xargs -r file | grep -o 'compiled Java class data, version [0-9.]*' | awk '{print $NF}' | sort -u)
if [ -z "$classver" ]; then
return 1
fi
javaclassversionfilter $classver
return 0
}
javajarversion() {
set -$debug
local mode=$1; shift
local jar=$1
local tmp ret=0
$JAVADEPS_DEBUG && echo >&2 ">> javajarversion($mode): $jar"
# check only files, symlinks could point outside buildroot
[ -f "$jar" -a ! -L "$jar" ] || return $ret
tmp=$(mktemp -d)
unzip -q -d $tmp $jar >&2
# workaround for .jar files with stupid permissions
chmod -R u+rwX $tmp
# find .jar and .class files
find_javadeps $mode $(find $tmp -type f -regextype posix-extended -regex '^.+\.(class|jar)$') || ret=1
rm -rf $tmp
return $ret
}
eclipse_feature() {
set -$debug
local mode=$1; shift
local file=$1
local ret=0
$JAVADEPS_DEBUG && echo >&2 ">> eclipse_feature($mode): $file"
if [ ! -x $xsltproc ]; then
return 0
fi
$xsltproc --stringparam mode $mode ${RPM_LIBDIR}/eclipse-feature.xslt $file
}
find_javadeps() {
set -$debug
local mode=$1; shift
local ret=0
$JAVADEPS_DEBUG && echo >&2 ">> find_javadeps($mode): $*"
for file in $@; do
case $file in
*.jar)
javajarversion $mode "$file" || ret=1
;;
*.class)
javaclassversion $mode "$file" || {
echo >&2 "ERROR: Class version could not be extracted from $file"
ret=1
}
;;
*/feature.xml)
eclipse_feature $mode "$file" || ret=1
;;
*)
$JAVADEPS_DEBUG && echo >&2 ">> find_javadeps($mode): no handle: $file"
;;
esac
done
return $ret
}
ret=0
# default mode to requires for backward compat
mode=requires
case $1 in
-P|--provides)
mode=provides
shift
;;
-R|--requires)
mode=requires
shift
;;
esac
t=$(mktemp)
find_javadeps $mode $(cat -) > $t || ret=1
sort -u $t
rm -f $t
exit $ret
| true |
0c6df232d9a03e28f6cb3c76fb03ca7423ae80ac | Shell | beersj/dotfiles | /zlogin | UTF-8 | 1,392 | 3.28125 | 3 | [
"MIT"
] | permissive | GIT_THEME_PROMPT_DIRTY=" ${red}✗"
GIT_THEME_PROMPT_CLEAN=" ${bold_green}✓"
GIT_THEME_PROMPT_PREFIX=" ${green}|"
GIT_THEME_PROMPT_SUFFIX="${green}|"
ZSH_THEME_GIT_PROMPT_PREFIX="%{$reset_color%}%{$fg[green]%}["
ZSH_THEME_GIT_PROMPT_SUFFIX="]%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_DIRTY="%{$fg[red]%}✗%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg[green]%}✓%{$reset_color%}"
parse_git_branch() {
(command git symbolic-ref -q HEAD || command git name-rev --name-only --no-undefined --always HEAD) 2>/dev/null
}
parse_git_dirty() {
if command git diff-index --quiet HEAD 2> /dev/null; then
echo "$ZSH_THEME_GIT_PROMPT_CLEAN"
else
echo "$ZSH_THEME_GIT_PROMPT_DIRTY"
fi
}
# show current rbenv version if different from rbenv global
rvm_ruby_version() {
echo "[%{$fg_bold[yellow]%}$(~/.rvm/bin/rvm-prompt)%{$reset_color%}]"
}
# adds the current branch name in green
git_prompt_info() {
ref=$(git symbolic-ref HEAD 2> /dev/null)
if [[ -n $ref ]]; then
echo " [%{$fg_bold[magenta]%}${ref#refs/heads/}%{$reset_color%} $(parse_git_dirty)]"
fi
}
# makes color constants available
autoload -U colors
colors
# enable colored output from ls, etc
export CLICOLOR=1
# expand functions in the prompt
setopt promptsubst
# prompt
export PS1='$(rvm_ruby_version) [%{$fg_bold[blue]%}%~%{$reset_color%}]$(git_prompt_info)'$'\n''%{$fg[green]%}→%{$reset_color%} '
| true |
f886dff7523396b647db74945fcfb3a91db60ecb | Shell | YangWanjun/my-docker-files | /sales-ubuntu/fix_error_matplotlib_display.sh | UTF-8 | 440 | 3.0625 | 3 | [] | no_license | #!/bin/bash
# matplotlibの$DISPLAYが未定義エラー対策
if [ ! -d ~/.config ]; then
mkdir ~/.config
fi
if [ ! -d ~/.config/matplotlib ]; then
mkdir ~/.config/matplotlib
fi
cp /usr/local/lib/python2.7/dist-packages/matplotlib/mpl-data/matplotlibrc ~/.config/matplotlib/matplotlibrc.bak
FILENAME=~/.config/matplotlib/matplotlibrc
sed 's/backend\s*:\s*tkagg/backend : Agg/ig' ${FILENAME}.bak >${FILENAME}
rm ${FILENAME}.bak | true |
055d3bfb2bdd85e2925112427fab4c103626e283 | Shell | XuYingJie-z/WGBS_pipline | /WGBS.sh | UTF-8 | 2,884 | 3.328125 | 3 | [] | no_license | #!/usr/bin/bash
help() {
echo "Usage:"
echo "test.sh [-f forward_fq] [-r reverse_fq] [-g reference]"
echo "Description:"
echo "-f forward_fq,the path of forward_fq,fq.gz also can be use."
echo "-r reverse_fq,the path of reverse_fq,fq.gz also can be use."
echo "-n sample name"
echo "-g the path of reference genome"
exit -1
}
while getopts 'f:r:n:g:' OPT; do
case $OPT in
f) forward_fq="$OPTARG";;
r) reverse_fq="$OPTARG";;
n) samplename="$OPTARG";;
g) reference="$OPTARG";;
h) help;;
?) help;;
esac
done
if [ -z $forward_fq ] || [ -z $reverse_fq ] || [ -z $reference ]; then
echo 'error,need args'
help
exit
fi
## creat folder
folder=(rawdata fastqc cutadapt trim mapping call_site conversion_rate_call_site conversion_rate_mapping)
for i in ${folder[*]} ; do
if [ ! -d ../$i ]; then
mkdir ../$i
echo creat $i
fi
done
## 去接头前指控
# fastqc -o ../fastqc $forward_fq $reverse_fq
cutadapt -j 2 -a AGATCGGAAGAG -A AGATCGGAAGAG -o ../cutadapt/${samplename}_cuta_1.fq.gz -p ../cutadapt/${samplename}_cuta_2.fq.gz $forward_fq $reverse_fq
trimmomatic PE -phred33 ../cutadapt/${samplename}_cuta_1.fq.gz ../cutadapt/${samplename}_cuta_2.fq.gz ../trim/${samplename}_trim_1.fq.gz ../trim/${samplename}_unpair_1.fq.gz ../trim/${samplename}_trim_2.fq.gz ../trim/${samplename}_unpair_2.fq.gz LEADING:20 TRAILING:20 SLIDINGWINDOW:4:20 MINLEN:35
## 去接头后指控
fastqc -t 3 -o ../fastqc ../trim/${samplename}_trim_1.fq.gz ../trim/${samplename}_trim_2.fq.gz
## 先计算转化率
bismark --genome /public1/home/sc60357/reference/lambda/ -1 ../trim/${samplename}_trim_1.fq.gz -2 ../trim/${samplename}_trim_2.fq.gz --path_to_bowtie2 /public1/home/sc60357/miniconda3/envs/python3/bin/ -o ../conversion_rate_mapping
deduplicate_bismark --paired --outfile ${samplename} --output_dir ../conversion_rate_mapping ../conversion_rate_mapping/${samplename}_trim_1_bismark_bt2_pe.bam
bismark_methylation_extractor --paired-end --comprehensive --output ../conversion_rate_call_site --bedGraph --cytosine_report --genome_folder /public1/home/sc60357/reference/lambda/ ../conversion_rate_mapping/${samplename}.deduplicated.bam
## 比对去重call位点
bismark --un --genome /public1/home/sc60357/reference/human/GRCh38.p13_Release_36 -1 ../trim/${samplename}_trim_1.fq.gz -2 ../trim/${samplename}_trim_2.fq.gz --path_to_bowtie2 /public1/home/sc60357/miniconda3/envs/python3/bin/ -o ../mapping
deduplicate_bismark --paired --outfile ${samplename} --output_dir ../mapping ../mapping/${samplename}_trim_1_bismark_bt2_pe.bam
bismark_methylation_extractor --paired-end --comprehensive --output ../call_site --bedGraph --cytosine_report --genome_folder /public1/home/sc60357/reference/human/GRCh38.p13_Release_36 ../mapping/${samplename}.deduplicated.bam
| true |
475cc998009b1b176833f887f67364a8075f5188 | Shell | kevwargo/sh-utils | /bashrc/completions/gentoo/findmnt | UTF-8 | 3,150 | 3.375 | 3 | [] | no_license | #!/bin/bash
_findmnt_module()
{
local cur prev OPTS
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
case $prev in
'-p'|'--poll')
COMPREPLY=( $(compgen -W "=list" -- $cur) )
return 0
;;
'-w'|'--timeout')
COMPREPLY=( $(compgen -W "timeout" -- $cur) )
return 0
;;
'-d'|'--direction')
COMPREPLY=( $(compgen -W "forward backward" -- $cur) )
return 0
;;
'-F'|'--tab-file')
local IFS=$'\n'
compopt -o filenames
COMPREPLY=( $(compgen -f -- $cur) )
return 0
;;
'-N'|'--task')
local TID='' I ARR
for I in /proc/*/mountinfo; do IFS=/ read -ra ARR <<< "$I"; TID+="${ARR[2]} "; done
COMPREPLY=( $(compgen -W "$TID" -- $cur) )
return 0
;;
'-O'|'--options')
local MTAB_3RD I
declare -a TMP_ARR
declare -A MNT_OPTS
while read MTAB_3RD; do
IFS=',' read -ra TMP_ARR <<<"$MTAB_3RD"
for I in ${TMP_ARR[@]}; do
MNT_OPTS[$I]='1'
done
done < <($1 -rno OPTIONS)
COMPREPLY=( $(compgen -W "${!MNT_OPTS[@]}" -- $cur) )
return 0
;;
'-o'|'--output')
local prefix realcur OUTPUT_ALL OUTPUT
realcur="${cur##*,}"
prefix="${cur%$realcur}"
OUTPUT_ALL="SOURCE TARGET FSTYPE OPTIONS VFS-OPTIONS
FS-OPTIONS LABEL UUID PARTLABEL PARTUUID
MAJ\:MIN ACTION OLD-TARGET OLD-OPTIONS
SIZE AVAIL USED USE% FSROOT TID ID
OPT-FIELDS PROPAGATION FREQ PASSNO"
for WORD in $OUTPUT_ALL; do
if ! [[ $prefix == *"$WORD"* ]]; then
OUTPUT="$WORD $OUTPUT"
fi
done
compopt -o nospace
COMPREPLY=( $(compgen -P "$prefix" -W "$OUTPUT" -S ',' -- $realcur) )
return 0
;;
'-t'|'--types')
local TYPES
TYPES="adfs affs autofs cifs coda coherent cramfs
debugfs devpts efs ext2 ext3 ext4 hfs
hfsplus hpfs iso9660 jfs minix msdos
ncpfs nfs nfs4 ntfs proc qnx4 ramfs
reiserfs romfs squashfs smbfs sysv tmpfs
ubifs udf ufs umsdos usbfs vfat xenix xfs"
COMPREPLY=( $(compgen -W "$TYPES" -- $cur) )
return 0
;;
'-S'|'--source')
local DEV_MPOINT
DEV_MPOINT=$($1 -rno SOURCE | grep ^/dev)
COMPREPLY=( $(compgen -W "$DEV_MPOINT" -- $cur) )
return 0
;;
'-T'|'--target')
local DEV_MPOINT
DEV_MPOINT=$($1 -rno TARGET)
COMPREPLY=( $(compgen -W "$DEV_MPOINT" -- $cur) )
return 0
;;
'-M'|'--mountpoint')
local IFS=$'\n'
compopt -o filenames
COMPREPLY=( $(compgen -o dirnames -- ${cur:-"/"}) )
return 0
;;
'-h'|'--help'|'-V'|'--version')
return 0
;;
esac
case $cur in
-*)
OPTS="--fstab
--mtab
--kernel
--poll
--timeout
--all
--ascii
--canonicalize
--df
--direction
--evaluate
--tab-file
--first-only
--invert
--json
--list
--task
--noheadings
--notruncate
--options
--output
--pairs
--raw
--types
--nofsroot
--submounts
--source
--target
--mountpoint
--help
--version"
COMPREPLY=( $(compgen -W "${OPTS[*]}" -- $cur) )
return 0
;;
esac
local DEV_MPOINT
DEV_MPOINT=$($1 -rno TARGET,SOURCE)
COMPREPLY=( $(compgen -W "$DEV_MPOINT" -- $cur) )
return 0
}
complete -F _findmnt_module findmnt
| true |
4940f589c1659d6f178412358d659e14e5bec1c7 | Shell | lodrantl/reversi | /ci/mac/install.sh | UTF-8 | 1,743 | 3 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -ev #echo on
export PYENV_VERSION=$PYTHON
export PYTHON_CONFIGURE_OPTS="--enable-framework"
# See https://docs.travis-ci.com/user/osx-ci-environment/#A-note-on-upgrading-packages.
brew outdated pyenv || brew upgrade pyenv
# Import codesign certificate
export CERTIFICATE_P12=ci/mac/ReversiBundle.p12;
export KEYCHAIN=build.keychain;
openssl aes-256-cbc -K $encrypted_80406b3fc467_key -iv $encrypted_80406b3fc467_iv -in ci/mac/ReversiBundle.p12.enc -out $CERTIFICATE_P12 -d
echo "Create keychain"
security create-keychain -p mysecretpassword $KEYCHAIN;
echo "Importing certificates into $KEYCHAIN"
security import $CERTIFICATE_P12 -k $KEYCHAIN -P reversi -T /usr/bin/codesign;
echo "Unlock keychain"
security unlock-keychain -p mysecretpassword $KEYCHAIN;
echo "Increase keychain unlock timeout"
security set-keychain-settings -lut 7200 $KEYCHAIN;
echo "Add keychain to keychain-list"
security list-keychains -s $KEYCHAIN;
security default-keychain -s $KEYCHAIN;
# Install python
pyenv install $PYENV_VERSION
eval "$(pyenv init -)"
# A manual check that the correct version of Python is running.
python --version
# Upgrade pip
python -m pip install --upgrade pip virtualenv setuptools
#Install kivy dependencies
brew install --build-bottle pkg-config sdl2 sdl2_image sdl2_ttf sdl2_mixer
python -m pip install -I Cython==0.23
#Install pyinstaller and upx
brew install upx
python -m pip install pyinstaller pytest
#Install kivy master branch
#USE_OSX_FRAMEWORKS=0 python -m pip install https://github.com/kivy/kivy/archive/master.zip
#Install kivy stable
USE_OSX_FRAMEWORKS=0 python -m pip install kivy
#Workaround for https://github.com/travis-ci/travis-ci/issues/6522
#Turn off exit on failure.
set +ev
| true |
e62ffc919c6f1ee9fcf7a1dab748d1fcabea183d | Shell | SteveSatterfield/HEVf | /idea/src/hev-which/example.sh | UTF-8 | 114 | 2.734375 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #! /bin/sh
# example usage
hev-which -q
h=$?
if [ $h -eq 20 ]
then
echo HEV IRIS environment is set up
fi
| true |
2133e7bd3c590c5c1ac3f8d7a39d0ece2f386766 | Shell | VerKnowSys/ServeD | /bin/web | UTF-8 | 456 | 3 | 3 | [] | no_license | #!/bin/sh
pwd=$(pwd)
scala=$(cat VERSION-SCALA)
version=$(cat VERSION)
jar="${pwd}/svd.web/target/scala-${scala}/web_${scala}-${version}.war"
rm -rf "$jar"
bin/sbt "project web" compile package
if [ -e "${jar}" ]; then
echo "Executing: ${jar}"
java -XX:+UseCompressedOops -XX:MaxPermSize=128M -XX:+UseParallelGC -Xms64m -Xmx256m -Dfile.encoding=UTF-8 -noverify -jar "${jar}" "$@"
else
echo "Assembly not found: ${jar}. Use build script!"
fi
| true |
07135af791af4e2cd013d14b52e23376d0f1a4eb | Shell | katybucsa/faculty | /Operating Systems/Lab/Lab5/b.sh | UTF-8 | 307 | 3.484375 | 3 | [] | no_license | #!/bin/bash
CMD=""
ARG=""
for C in $*; do
if [ "$C" == ";" ]; then
if [ "$CMD" != "" ]; then
$CMD $ARG
fi
CMD=""
ARG=""
elif [ "$CMD" == "" ]; then
CMD=$C
else
ARG="$ARG $C"
fi
done
if [ "$CMD" != "" ]; then
$CMD $ARG
fi
| true |
a3a56b6ca37829ec07ed1fa77c64cc111a2cfec1 | Shell | arabadj/docker-hub | /ubuntu-pptpd/docker-compose.sh | UTF-8 | 1,982 | 3.390625 | 3 | [] | no_license | #!/bin/sh
#========================================================================================================================
[ "$1" != "up" ] && [ "$1" != "down" ] && echo "Usage: $(basename $0) up|down " && exit 0
#========================================================================================================================
export COMPOSE_PROJECT_NAME="vpnp"
export PATH_SERVICE="$HOME/.local/opt/$COMPOSE_PROJECT_NAME"
export PATH_DOCKER="/usr/bin"
export PATH_COMPOSE="/usr/local/bin"
#========================================================================================================================
export service_name="$COMPOSE_PROJECT_NAME"
export service_dir="$PATH_SERVICE"
#------------------------------------------------------------------------------------------------------------------------
export ansible_user="$USER"
export password_vpn="$(aws ssm get-parameters --region eu-central-1 --name password_vpn \
--query Parameters[*].{Value:Value} --output text)"
#========================================================================================================================
mkdir -p "$PATH_SERVICE"
j2 ./templates/docker-compose.yml.j2 -o $PATH_SERVICE/docker-compose.yml
#------------------------------------------------------------------------------------------------------------------------
mkdir -p "$PATH_SERVICE/etc/ppp/"
j2 ./templates/chap-secrets.j2 -o $PATH_SERVICE/etc/ppp/chap-secrets
chmod 600 $PATH_SERVICE/etc/ppp/chap-secrets
#========================================================================================================================
cd $PATH_SERVICE
[ "$1" == "up" ] && $PATH_COMPOSE/docker-compose up -d --remove-orphans
[ "$1" == "down" ] && $PATH_COMPOSE/docker-compose down --remove-orphans
[ "$1" == "down" ] && [ "$2" == "clean" ] && rm -rf $PATH_SERVICE
#========================================================================================================================
| true |
649d12dbd69cdde9badb7c8eebf14ba399a06c21 | Shell | meowmeowxw/dotfiles | /i3/change-wallpapers.sh | UTF-8 | 208 | 2.890625 | 3 | [] | no_license | #!/bin/bash
images=$(ls $HOME/Pictures/wallpapers/slideshow | xargs)
while true
do
for i in $images
do
feh --bg-scale $HOME/Pictures/wallpapers/slideshow/$i
sleep 900
done
done
| true |
7493040a4634c82ccb38e062fb03ab140ef181b0 | Shell | cguerramain/graph-protein-distance-prediction | /scripts/protein_gcnn.slurm | UTF-8 | 1,744 | 2.8125 | 3 | [] | no_license | #!/bin/sh
## Give your job a name to distinguish it from other jobs you run.
#SBATCH --job-name=Antibody_GCNN
## General partitions: all-HiPri, bigmem-HiPri -- (12 hour limit)
## all-LoPri, bigmem-LoPri, gpuq (5 days limit)
## Restricted: CDS_q, CS_q, STATS_q, HH_q, GA_q, ES_q, COS_q (10 day limit)
#SBATCH --partition=gpuq
#SBATCH --gres=gpu:1
## Separate output and error messages into 2 files.
## NOTE: %u=userID, %x=jobName, %N=nodeID, %j=jobID, %A=arrayID, %a=arrayTaskID
#SBATCH --output=/scratch/%u/%x-%N-%j.out # Output file
#SBATCH --error=/scratch/%u/%x-%N-%j.err # Error file
## Slurm can send you updates via email
#SBATCH --mail-type=BEGIN,END,FAIL # ALL,NONE,BEGIN,END,FAIL,REQUEUE,..
#SBATCH --mail-user=cguerra5@gmu.edu # Put your GMU email address here
## Specify how much memory your job needs. (2G is the default)
#SBATCH --mem=2G # Total memory needed per task (units: K,M,G,T)
## Specify how much time your job needs. (default: see partition above)
#SBATCH --time=0-12:00 # Total time needed for job: Days-Hours:Minutes
## Load the relevant modules needed for the job
module load cuda/10.1
## Run your program or script
cd /home/cguerra5/graph-protein-distance-prediction/src
epochs=30
lr=0.0001
batch_size=64
num_blocks='10'
h5file='/home/cguerra5/graph-protein-distance-prediction/data/ab_pdbs.h5'
class_weight_file='/home/cguerra5/graph-protein-distance-prediction/data/antibody_weights.p'
lr_str=${lr//\./p}
python -u gcnn_cli.py --epochs $epochs --lr $lr --num_blocks $num_blocks --h5file $h5file --class_weight_file $class_weight_file \
--save_file /scratch/cguerra5/Antibody_GCNN_epochs${epochs}_lr${lr_str}_batch_size${batch_size}_num_blocks${num_blocks}.p
| true |
391ff7c4670bda8bbf9297e0270a1993951996d8 | Shell | notAlex2/Translation-Team08-IFT6759 | /scripts/custom_tokenizer_trainer.sh | UTF-8 | 1,132 | 2.71875 | 3 | [] | no_license | #!/bin/bash
#SBATCH --time=30:00
#SBATCH --gres=gpu:k80:1
#SBATCH --cpus-per-task=4
#SBATCH --mem=6G
module load python/3.7
source /home/guest140/harman_venv/bin/activate
date
echo ~~~~~~~~~~~~Tokenizing English data
echo
# change training_folder and path_to_save_tokenizer according to language tokenizer to train
# training_folder contains file only related to particular language.
# For example, if training an english tokenizer, training_folder will contain - unaligned.en and train_en.lang1
python ../code/custom_tokenizer_trainer.py \
--vocab_size 30000 \
--lowercase True \
--min_frequency 2 \
--training_folder ../../tokenizer_train_folder_en \
--path_to_save_tokenizer ../tokenizer_data_en_30k \
echo
echo ~~~~~~~~~~~~Tokenizing French data
echo
# Note: Keep lowercase=False for french
python ../code/custom_tokenizer_trainer.py \
--vocab_size 30000 \
--lowercase False \
--min_frequency 2 \
--training_folder ../../tokenizer_train_folder_fr \
--path_to_save_tokenizer ../tokenizer_data_fr_30k \ | true |
45e97dbdaa819b291798eec84a6ee944c18e61e0 | Shell | heenald/CS144 | /project2/runLoad.sh | UTF-8 | 1,062 | 3.09375 | 3 | [] | no_license | #!/bin/bash
# Run the drop.sql batch file to drop existing tables, if they exist.
mysql -u cs144 CS144 < drop.sql
# Run the create.sql batch file to create the database and tables
mysql -u cs144 CS144 < create.sql
# Compile and run the parser to generate the appropriate load files
ant
#Drop dat files if they exist, else data will be appended
rm -f item.dat item_category.dat user_bidder.dat user_seller.dat bid.dat
ant run-all
#Deduplicates the dat files - sorts and keeps unique
sort user_bidder.dat | uniq > user_bidder_tmp.dat
sort user_seller.dat | uniq > user_seller_tmp.dat
sort item_category.dat | uniq > item_category_tmp.dat
sort item.dat | uniq > item_tmp.dat
sort bid.dat | uniq > bid_tmp.dat
mv user_bidder_tmp.dat user_bidder.dat
mv user_seller_tmp.dat user_seller.dat
mv item_category_tmp.dat item_category.dat
mv item_tmp.dat item.dat
mv bid_tmp.dat bid.dat
# Run the load.sql batch file to load the data
mysql -u cs144 CS144 < load.sql
# Remove all temporary files
rm -f item.dat item_category.dat user_bidder.dat user_seller.dat bid.dat
| true |
9e9d3318e17da6e195e2c91025c3f9b1a74b7e74 | Shell | pgajdos/apache-rex | /mod_proxy_connect-basic/run.sh | UTF-8 | 517 | 2.96875 | 3 | [
"LicenseRef-scancode-other-permissive",
"MIT",
"NTP",
"LicenseRef-scancode-rsa-1990",
"LicenseRef-scancode-rsa-md4",
"Beerware",
"RSA-MD",
"HPND-sell-variant",
"Spencer-94",
"LicenseRef-scancode-zeusbench",
"metamail",
"Apache-2.0"
] | permissive | exit_code=0
mkdir -p $AREX_RUN_DIR/htdocs-backend
echo 'secure main index' > $AREX_RUN_DIR/htdocs-backend/index.html
echo "[1] exhibit CONNECT method"
curl -s -k --proxy localhost:$AREX_PORT2 https://localhost:$AREX_PORT1/ \
| grep 'secure main index' || exit_code=1
echo "[2] CONNECT method disallowed for port not allowed by AllowCONNECT"
curl -k --proxy localhost:$AREX_PORT3 https://localhost:$AREX_PORT1/ 2>&1 \
| grep 'Received HTTP code 403 from proxy after CONNECT' || exit_code=2
exit $exit_code
| true |
5afbbe37fd1d60bc21e6c48a5cf492c3a4ea3e42 | Shell | MarvellServer/ThunderX-kernel-tci | /scripts/rootfs-plugin/debian.sh | UTF-8 | 5,421 | 3.578125 | 4 | [] | no_license | # Debian plug-in routines for build-rootfs.sh.
debug_check() {
local info=${1}
echo "debug_check: (${info}) vvvv" >&2
set +e
${sudo} true
mount
${sudo} ls -l /var/run/sudo/ts
set -e
echo "debug_check: (${info}) ^^^^" >&2
}
bootstrap_rootfs() {
local rootfs=${1}
debug_check "${FUNCNAME[0]}:${LINENO}"
case ${target_arch} in
amd64)
debian_arch="amd64"
debian_os_release=${debian_os_release:-"buster"}
debian_os_mirror=${debian_os_mirror:-"http://ftp.us.debian.org/debian"}
;;
arm64)
debian_arch="arm64"
debian_os_release=${debian_os_release:-"buster"}
debian_os_mirror=${debian_os_mirror:-"http://ftp.us.debian.org/debian"}
;;
ppc32|ppc64)
debian_arch="powerpc"
debian_os_release=${debian_os_release:-"unstable"}
debian_os_mirror=${debian_os_mirror:-"http://ftp.ports.debian.org/debian-ports"}
debootstrap_extra="--include=debian-ports-archive-keyring --exclude=powerpc-ibm-utils,powerpc-utils"
;;
*)
echo "${name}: ERROR: Unsupported target-arch '${target_arch}'." >&2
exit 1
;;
esac
(${sudo} debootstrap --foreign --arch ${debian_arch} --no-check-gpg \
${debootstrap_extra} \
${debian_os_release} ${rootfs} ${debian_os_mirror})
debug_check "${FUNCNAME[0]}:${LINENO}"
copy_qemu_static ${rootfs}
${sudo} mount -l -t proc
${sudo} ls -la ${rootfs}
${sudo} find ${rootfs} -type l -exec ls -la {} \; | egrep ' -> /'
${sudo} rm -f ${rootfs}/proc
${sudo} mkdir -p ${rootfs}/proc
${sudo} mount -t proc -o nosuid,nodev,noexec /proc ${rootfs}/proc
${sudo} mount -l -t proc
${sudo} LANG=C.UTF-8 chroot ${rootfs} /bin/sh -x <<EOF
/debootstrap/debootstrap --second-stage
EOF
${sudo} mount -l -t proc
${sudo} umount ${rootfs}/proc || :
${sudo} mount -l -t proc
clean_qemu_static ${rootfs}
debug_check "${FUNCNAME[0]}:${LINENO}"
${sudo} sed --in-place 's/$/ contrib non-free/' \
${rootfs}/etc/apt/sources.list
enter_chroot ${rootfs} "
export DEBIAN_FRONTEND=noninteractive
apt-get update
"
debug_check "${FUNCNAME[0]}:${LINENO}"
}
rootfs_cleanup() {
local rootfs=${1}
debug_check "${FUNCNAME[0]}:${LINENO}"
enter_chroot ${rootfs} "
export DEBIAN_FRONTEND=noninteractive
apt-get -y autoremove
rm -rf /var/lib/apt/lists/*
"
debug_check "${FUNCNAME[0]}:${LINENO}"
}
setup_packages() {
local rootfs=${1}
shift 1
local packages="${@//,/ }"
debug_check "${FUNCNAME[0]}:${LINENO}"
enter_chroot ${rootfs} "
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get -y upgrade
apt-get -y install ${packages}
"
debug_check "${FUNCNAME[0]}:${LINENO}"
}
setup_initrd_boot() {
local rootfs=${1}
${sudo} ln -sf "lib/systemd/systemd" "${rootfs}/init"
${sudo} cp -a "${rootfs}/etc/os-release" "${rootfs}/etc/initrd-release"
}
setup_network() {
local rootfs=${1}
setup_network_systemd ${rootfs}
}
setup_login() {
local rootfs=${1}
local pw=${2}
setup_password ${rootfs} ${pw}
${sudo} sed --in-place \
's|-/sbin/agetty -o|-/sbin/agetty --autologin root -o|' \
${rootfs}/lib/systemd/system/serial-getty@.service
${sudo} sed --in-place \
's|-/sbin/agetty -o|-/sbin/agetty --autologin root -o|' \
${rootfs}/lib/systemd/system/getty@.service
}
setup_sshd() {
local rootfs=${1}
local srv_key=${2}
sshd_config() {
local key=${1}
local value=${2}
${sudo} sed --in-place "s/^${key}.*$//" \
${rootfs}/etc/ssh/sshd_config
echo "${key} ${value}" | sudo_append "${rootfs}/etc/ssh/sshd_config"
}
sshd_config "PermitRootLogin" "yes"
sshd_config "UseDNS" "no"
sshd_config "PermitEmptyPasswords" "yes"
if [[ ! -f "${rootfs}/etc/ssh/ssh_host_rsa_key" ]]; then
echo "${name}: ERROR: Not found: ${rootfs}/etc/ssh/ssh_host_rsa_key" >&2
exit 1
fi
${sudo} cp -f ${rootfs}/etc/ssh/ssh_host_rsa_key ${srv_key}
echo "${name}: USER=@$(id --user --real --name)@" >&2
#printenv
#${sudo} chown $(id --user --real --name): ${srv_key}
}
setup_relay_client() {
local rootfs=${1}
local tci_script="/bin/tci-relay-client.sh"
local tci_service="tci-relay-client.service"
write_tci_client_script "${rootfs}${tci_script}"
sudo_write "${rootfs}/etc/systemd/system/${tci_service}" <<EOF
[Unit]
Description=TCI Relay Client Service
#Requires=network-online.target ssh.service
BindsTo=network-online.target ssh.service
After=network-online.target ssh.service default.target
[Service]
Type=simple
Restart=on-failure
RestartSec=30
StandardOutput=journal+console
StandardError=journal+console
ExecStart=${tci_script}
[Install]
WantedBy=default.target network-online.target
EOF
# FIXME
#[ 139.055550] systemd-networkd-wait-online[2293]: Event loop failed: Connection timed out
#systemd-networkd-wait-online.service: Main process exited, code=exited, status=1/FAILURE
#systemd-networkd-wait-online.service: Failed with result 'exit-code'.
#Startup finished in 16.250s (kernel) + 0 (initrd) + 2min 2.838s (userspace) = 2min 19.089s.
enter_chroot ${rootfs} "
systemctl enable \
${tci_service} \
systemd-networkd-wait-online.service \
"
}
get_default_packages() {
local default_packages="
haveged
login
net-tools
netcat-openbsd
openssh-server
pciutils
strace
tcpdump
"
local default_packages_arm64="
${default_packages}
efibootmgr
firmware-qlogic
firmware-bnx2x
"
if [[ ${debian_default_packages} ]]; then
echo ${debian_default_packages}
elif [[ ${target_arch} == "arm64" ]]; then
echo ${default_packages_arm64}
else
echo ${default_packages}
fi
}
| true |
6505cbff50668230f9e6aa5b6ca0307cd4ab24c9 | Shell | haroldcarr/learn-haskell-coq-ml-etc | /haskell/course/2013-11-nicta/dodiffs | UTF-8 | 883 | 3.25 | 3 | [
"Unlicense"
] | permissive | #!/bin/bash
# Created : 2014 Jul 15 (Tue) 01:56:08 by Harold Carr.
# Last Modified : 2014 Jul 15 (Tue) 12:01:07 by Harold Carr.
base=~/ftptmp/ebooks-music-pics/tech
master=$base/haskell-nicta-course
slave=$base/programming-language/haskell/course/nicta
diffDir=$slave/diffs
s=src
sc=src/Course
files="Setup.lhs $s/Course.hs $sc/Core.hs $sc/Id.hs $sc/Optional.hs $sc/Validation.hs $sc/List.hs $sc/Functor.hs $sc/Apply.hs $sc/Applicative.hs $sc/Bind.hs $sc/Monad.hs $sc/FileIO.hs $sc/State.hs $sc/StateT.hs"
outDir=/tmp/NICTA
mkdir -p $outDir/src/Course
for f in $files
do
echo "--------------------------------------------------"
echo -n $f " "
diff -b $master/$f $slave/$f > $outDir/$f-$$
cmp $diffDir/$f-diff $outDir/$f-$$
if [ $? -eq 0 ]
then echo OK
else echo NEEDS UPDATE, do:
echo " " diff -b $master/$f $slave/$f
fi
done
| true |
6c18b7fe7bb6d63c992672b50449cbee3e40fa7b | Shell | lappsgrid-incubator/GalaxyMods | /rename.sh | UTF-8 | 184 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
unset CDPATH
cd tools
for dir in `ls -d lapps_*` ; do
newname=`echo $dir | sed 's/lapps_//'`
echo "Renaming $dir"
#git rm $dir
mv $dir $newname
git add $newname
done
| true |
e027729f8bb2477e32159a6e294afd99f1a68c3c | Shell | zerami/linux | /bash-autodeploy-scripts/cent6/bin/pineauto | UTF-8 | 1,163 | 3.40625 | 3 | [] | no_license | #!/bin/sh
. /etc/pineauto/scripts.conf
clear
prompt="Input your choose:"
options=("List website" "Add website" "Delete website" "Backup code" "Grant permission webserver" "Manage ZendOpcache" "Install extra framework for PHP" "Install Databases" "Install PhpmyAdmin" "Install Cache")
printf "=========================================================================\n"
printf " PINE AUTO Menu\n"
printf "=========================================================================\n"
PS3="
$prompt"
select opt in "${options[@]}" "Exit"; do
case "$REPLY" in
1 ) /etc/pineauto/menu/list-website;;
2 ) /etc/pineauto/menu/add-website;;
3 ) /etc/pineauto/menu/delete-website;;
4 ) /etc/pineauto/menu/backup-code;;
5 ) /etc/pineauto/menu/grant-permission-webserver;;
6 ) /etc/pineauto/menu/m-opcache;;
7 ) /etc/pineauto/menu/install-php-framework;;
8 ) /etc/pineauto/menu/install-database;;
9 ) /etc/pineauto/menu/install-phpmyadmin;;
10 ) /etc/pineauto/menu/install-cache;;
$(( ${#options[@]}+1 )) ) printf "\nGoodbye...!\n"; break;;
*) echo "Wrong input, please try again";continue;;
esac
done | true |
1a4859c2a1a907ed32703efebada88857fc62b9d | Shell | eduardo-lago-aguilar/blss | /tst/lenny/it-proto | UTF-8 | 1,229 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Integration Tests for prototype 'env/lenny'
# Resolve script location, $0 may be a link
script="$0"
# Need this for relative symlinks
while [ -h "$script" ] ; do
lst=$(ls -ld "$script")
lnk=$(expr "$lst" : '.*-> \(.*\)$')
if expr "$lnk" : '/.*' > /dev/null; then
script="$lnk"
else
script=$(dirname "$script")/"$lnk"
fi
done
BLSS_HOME=$(dirname "$script")/../..
BIN="$BLSS_HOME"/bin
ENV="$BLSS_HOME"/env
EXT="$BLSS_HOME"/ext
TST="$BLSS_HOME"/tst
. "$TST"/tst-commons
# Total amount of tests to perfom
plan_tests 6
# Test Case: Tests for size
DU=$(sudo du -hs /tmp/protoitroot)
DU=${DU%%M[[:space:]]*/tmp/protoitroot}
expected_sz=149
(( DU <= ${expected_sz} ))
ok $? "Verify is proto size $DU <= ${expected_sz}MB"
. "$TST"/it-proto-all
. "$TST"/it-proto-debian
. "$TST"/it-proto-lenny
verify_adm_user_config
verify_static_networking
verify_ip6_disabled
tst_mf_all_purged
# Test Case: Tests for Kernel
ptx dpkg --get-selections linux-image* | awk '{print $1" "$2}' | {
ln_is "linux-image-"$MJ.$MN.$RV-$RS"-amd64 install" "Verify if current Kernel is the correct Linux kernel image"
ln_is "linux-image-amd64 install"
no_ln "Verify if no other linux-image-* package is installed"
}
| true |
7f7201109d618ffb6bf2a59cea15db8f8ff1b705 | Shell | plan44/plan44-feed | /p44pagekite/files/etc/service/pagekite/run | UTF-8 | 615 | 2.875 | 3 | [] | no_license | #!/bin/sh
if [ -x /usr/bin/p44maintd ]; then
# obtain system information vars
p44maintd --defs >/tmp/p44defs
source /tmp/p44defs
fi
if [ -z "${PRODUCT_KITE_FRONTEND}" ]; then
# no product specific config, use generic device config
PRODUCT_KITE_FRONTEND="devices.plan44.ch"
PRODUCT_KITE_PORT="22281"
AUTOKITE_SEED="0x5A27127D2671"
fi
if [ -z "${AUTOKITE_SEED}" ]; then
AUTOKITE_SEED="0"
fi
# start p44pagekite
exec p44pagekite \
-S -F ${PRODUCT_KITE_FRONTEND} -P ${PRODUCT_KITE_PORT} -C \
-x ${AUTOKITE_SEED} \
-f \
/etc/pagekite/kite.cfg \
/flash/kite.cfg \
/flash/tempkite.cfg \
>/dev/null 2>&1
| true |
6905a4be435e3a3bc3cdd94e43d2effa354c9da2 | Shell | cha63506/core-3 | /unionfs-fuse/PKGBUILD | UTF-8 | 852 | 2.546875 | 3 | [] | no_license | #
# Platform Packages for Chakra, part of chakra-project.org
#
# Maintainer: Neophytos Kolokotronis <tetris4@gmail.com>
# Contributors from Arch: Andrzej Giniewicz <gginiu@gmail.com>
# Smith Dhumbumroong <zodmaner at gmail dot com>
pkgname=unionfs-fuse
pkgver=0.26
pkgrel=2
pkgdesc="unionfs-fuse is an effort to create a userland unionfs implementation that is way more flexible than the current in-kernel based unionfs solution."
arch=('x86_64')
url="http://podgorny.cz/moin/UnionFsFuse"
license=('BSD')
depends=('fuse')
makedepends=('cmake')
source=(http://podgorny.cz/unionfs-fuse/releases/$pkgname-$pkgver.tar.xz)
md5sums=('689c636484756f6f7a728ef354cbeac2')
build() {
cd "$srcdir"/$pkgname-$pkgver
make
}
package() {
cd "$srcdir"/$pkgname-$pkgver
make DESTDIR="$pkgdir" PREFIX=/usr install
install -Dm644 "$srcdir"/$pkgname-$pkgver/LICENSE "$pkgdir"/usr/share/licenses/$pkgname/LICENSE
}
| true |
035f9c3cb779514cab48f794946044394a135cfa | Shell | nuno-c-afonso/gesto_eval | /basho_bench/scripts/init_bench.sh | UTF-8 | 472 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ $# -eq 1 ]
then
branch=$1
Command1="cd ./basho_bench/ && git reset --hard && git fetch && git checkout $branch && git pull"
else
Command1="cd ./basho_bench && git stash save --keep-index && git pull origin master && cp ../rebar ."
fi
# ./scripts/parallel_command.sh bench "$Command1"
Command2="cd ./basho_bench && sudo make all"
./scripts/parallel_command.sh bench "$Command2"
./scripts/change_benchname.sh
./scripts/init_name_bench.sh
| true |
f1def674e821d3bb62162eb6e75c3d2a7707e8c3 | Shell | ajdruff/git-get-test | /tests/test-git-get.sh | UTF-8 | 2,498 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
SCRIPT_DIR=
SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")"
oneTimeSetup() {
cleanup
return 0
}
cleanup() {
[ -d "unzip_hw" ] && rm -rf unzip_hw
[ -d "hellogitworld" ] && rm -rf hellogitworld
[ -d "feature_image" ] && rm -rf feature_image
[ -d "RELEASE_1.1" ] && rm -rf RELEASE_1.1
[ -d "test_untar" ] && rm -rf test_untar
[ -d "temp" ] && rm -rf temp
[ -d "test" ] && rm -rf test
[ -f "hellogitworld-master.tar.gz" ] && rm hellogitworld-master.tar.gz
[ -f "hellogitworld-master.zip" ] && rm hellogitworld-master.zip
}
oneTimeTearDown() {
cleanup
return 0
}
test_install() {
(curl -L https://raw.githubusercontent.com/ajdruff/git-get/master/git-install.sh master | bash) || return 1
git get -v || return 1
# docker run kcov/kcov /usr/bin/git-get
}
test_clone() {
git get https://github.com/githubtraining/hellogitworld.git
[ -d "hellogitworld" ] || return 1
[ -d "hellogitworld/.git" ] && return 1
[ -f "hellogitworld/fix.txt" ] || return 1
[ -d "hellogitworld" ] && rm -rf hellogitworld
}
test_clone_branch() {
git get -b feature_image https://github.com/githubtraining/hellogitworld.git feature_image
[ -d "feature_image" ] || return 1
[ -d "feature_image/.git" ] && return 1
[ -f "feature_image/screenshot.png" ] || return 1
}
test_clone_release() {
git get -b RELEASE_1.1 https://github.com/githubtraining/hellogitworld.git RELEASE_1.1
[ -d "RELEASE_1.1" ] || return 1
[ -d "RELEASE_1.1/.git" ] && return 1
[ -f "RELEASE_1.1/resources/labels.properties" ] || return 1
#rm rf hellogitworld
}
test_zip() {
git get -z https://github.com/githubtraining/hellogitworld.git
[ -f "hellogitworld-master.zip" ] || return 1
}
test_unzip() {
rm -rf unzip_hw 2>/dev/null
unzip hellogitworld-master.zip -d unzip_hw
[ -f "unzip_hw/hellogitworld/README.txt" ] || return 1
return 0
}
test_unzip_git_exists() {
[ -d "unzip_hw/hellogitworld/.git" ] && return 1
return 0
}
test_tar() {
git get -t https://github.com/githubtraining/hellogitworld.git
[ -f "hellogitworld-master.tar.gz" ] || return 1
}
test_untar() {
mkdir -p test_untar
tar -xzf hellogitworld-master.tar.gz -C test_untar
[ -f "test_untar/hellogitworld/README.txt" ] || return 1
return 0
}
test_untar_git_exists() {
[ -d "test_untar/hellogitworld/.git" ] && return 1
return 0
}
. "$SCRIPT_DIR"/shunit2
| true |
1eeaf4dec09b4540b5338a92a30349e1bd6f14c4 | Shell | xta0/Dotfiles | /packages/brew.sh | UTF-8 | 416 | 2.796875 | 3 | [
"MIT"
] | permissive |
#Install homebrew
git clone https://github.com/Homebrew/brew ~/homebrew
export PATH="$HOME/homebrew/bin:$HOME/homebrew/sbin:$PATH"
brew update
brew upgrade
tools=(
coreutils
wget
ccat
xz
tree
neofetch
dockutil
)
# Install tools
brew install "${tools[@]}"
pls=(
)
# Install tools
brew install "${pls[@]}"
# sleep 1
sleep 1
# Remove outdated versions from the cellar.
brew cleanup
| true |
eac6307745fc4eaf844bc34e2baaba728d4f7933 | Shell | karamelchef/karamel | /karamel-core/src/main/resources/se/kth/karamel/backend/shellscripts/install_chefdk.sh | UTF-8 | 1,076 | 3.375 | 3 | [
"Apache-2.0"
] | permissive | set -eo pipefail; mkdir -p %install_dir_path% ; cd %install_dir_path%; echo $$ > %pid_file%; echo '#!/bin/bash
RES=0
if [ %osfamily% == "redhat" ] ; then
yum list installed cinc-workstation
if [ $? -ne 0 ] ; then
chefdkfile='cinc-workstation-%chefdk_version%-1.el8.x86_64.rpm'
rm -f "$chefdkfile"
wget "https://repo.hops.works/master/$chefdkfile"
%sudo_command% yum install -y "$chefdkfile"
RES=$?
if [ $RES -ne 0 ] ; then
sleep 10
%sudo_command% yum install -y "$chefdkfile"
fi
fi
elif [ %osfamily% == "ubuntu" ] ; then
dpkg -s chefdk
if [ $? -ne 0 ] ; then
chefdkfile='cinc-workstation_%chefdk_version%-1_amd64.deb'
rm -f "$chefdkfile"
wget "https://repo.hops.works/master/$chefdkfile"
%sudo_command% dpkg -i "$chefdkfile"
RES=$?
if [ $RES -ne 0 ] ; then
sleep 10
%sudo_command% dpkg -i "$chefdkfile"
fi
fi
else
echo "Unrecognized version of linux. Not ubuntu or redhat family."
exit 1
fi
exit $RES
' > install-chefdk.sh ; chmod +x install-chefdk.sh ; ./install-chefdk.sh
| true |
ca4453c8d77ab7191a180d883e5f06a542353de6 | Shell | ciiqr/dotfiles | /home/.local/bin/largest | UTF-8 | 249 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
declare -a files=("$@")
if [[ "${#files[@]}" == 0 ]]; then
mapfile -t files < <(find . -mindepth 1 -maxdepth 1)
fi
sudo du -sh "${files[@]}" 2>&1 | grep -E -v '(^[0-9\.]+K|^0\s|No such file or directory)' | sort -h
| true |
7e650493048ef8f304227d1cd6698bf944c6c452 | Shell | Inpher/inpher-rest-api | /scripts/clearall-api.sh | UTF-8 | 1,299 | 2.703125 | 3 | [] | no_license | #!/bin/bash
OUT=""
OUT="$(echo "$OUT"; echo "<br>Stopping elastic: <br>"; docker stop elastic)"
OUT="$(echo "$OUT"; echo; docker rm -v elastic)"
OUT="$(echo "$OUT"; echo "<br>Stoppig zookeeper: <br>"; docker stop zookeeper)"
OUT="$(echo "$OUT"; echo; docker rm -v zookeeper)"
OUT="$(echo "$OUT"; echo "<br>Stopping rabbitMQ: <br>"; docker stop rabbitmq)"
OUT="$(echo "$OUT"; echo; docker rm -v rabbitmq)"
OUT="$(echo "$OUT"; echo "<br>Stopping HDFS: <br>"; docker stop hdfs)"
OUT="$(echo "$OUT"; echo; docker rm -v hdfs)"
OUT="$(echo "$OUT"; echo "<br>Restarting rabbitMQ: <br>"; docker run --name hdfs -d -t -p 9000:9000 -p 50070:50070 sequenceiq/hadoop-docker)"
OUT="$(echo "$OUT"; echo "<br>Restarting rabbitMQ: <br>"; docker run --name rabbitmq -td -p 5672:5672 rabbitmq)"
OUT="$(echo "$OUT"; echo "<br>Restarting elastic: <br>"; docker run --name elastic -td -p 9300:9300 -p 9200:9200 inpher/elastic-frequency:_ultra)"
OUT="$(echo "$OUT"; echo "<br>Restarting zookeeper: <br>"; docker run -td --name zookeeper -p 2181:2181 jplock/zookeeper)"
OUT="$(echo "$OUT"; echo "<br>Restarting Tomcat: <br>")"
sleep 5
/home/ubuntu/projects/inpher-rest-api/scripts/restart-tomcat
echo "Content-type: text/html"
echo ""
echo "<h3>Cleanup completed</h3>"
echo "<br><br>Log:<br> $OUT"
echo "<br><br>"
| true |
11a7d107a14e333cbb987bfddbeae4ef2fb5e185 | Shell | Lex-2008/dedup | /sample.sh | UTF-8 | 530 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/bin/busybox ash
echo ...First, init the database
./init.sh
echo ...Now, search for files
./search.sh "$@"
echo ...And filter files based on sizes
./filter.sh
echo ...Now, filter files based on first bytes
./checksum.sh first
./filter.sh
echo ...Now, filter files based on last bytes
./checksum.sh last
./filter.sh
echo ...Now, filter files based on middle bytes
./checksum.sh middle
./filter.sh
echo ...Now, filter files based on checksums
./checksum.sh all
./filter.sh
echo ...Now, hardlink similar files
./hardlink.sh
| true |
10155b8085695a70780647c0c7ab49b312f86ea1 | Shell | TAIPANBOX/k8s.io | /dns/push.sh | UTF-8 | 3,719 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This runs as you. It assumes you have built an image named ${USER}/octodns.
# Pushes config to zones.
# args: args to pass to octodns (e.g. --doit, --force, a list of zones)
push () {
docker run -ti \
-u `id -u` \
-v ~/.config/gcloud:/.config/gcloud:ro \
-v `pwd`/octodns-config.yaml:/octodns/config.yaml:ro \
-v `pwd`/zone-configs:/octodns/config:ro \
${USER}/octodns \
octodns-sync \
--config-file=/octodns/config.yaml \
--log-stream-stdout \
--debug \
"$@"
}
# Assumes to be running in a checked-out git repo directory, and in the same
# subdirectory as this file.
if [ ! -f octodns-config.yaml -o ! -d zone-configs ]; then
echo "CWD does not appear to have the configs needed: $(pwd)"
exit 1
fi
# Push to canaries.
echo "Dry-run to canary zones"
push canary.k8s.io. canary.kubernetes.io. > log.canary 2>&1
if [ $? != 0 ]; then
echo "Canary dry-run FAILED, halting; log follows:"
echo "========================================="
cat log.canary
exit 2
fi
echo "Pushing to canary zones"
push --doit canary.k8s.io. canary.kubernetes.io. >> log.canary 2>&1
if [ $? != 0 ]; then
echo "Canary push FAILED, halting; log follows:"
echo "========================================="
cat log.canary
exit 2
fi
echo "Canary push SUCCEEDED"
for zone in canary.k8s.io. canary.kubernetes.io.; do
TRIES=12
echo "Testing canary zone: $zone"
for i in $(seq 1 "$TRIES"); do
./check-zone.sh "$zone" >> log.canary 2>&1
if [ $? == 0 ]; then
break
fi
if [ $i != "$TRIES" ]; then
echo " test failed, might be propagation delay, will retry..."
sleep 10
else
echo "Canary test FAILED, halting; log follows:"
echo "========================================="
cat log.canary
exit 2
fi
done
echo "Canary $zone SUCCEEDED"
done
# Push to prod.
echo "Dry-run to prod zones"
push k8s.io. kubernetes.io. > log.prod 2>&1
if [ $? != 0 ]; then
echo "Prod dry-run FAILED, halting; log follows:"
echo "========================================="
cat log.prod
exit 3
fi
echo "Pushing to prod zones"
push --doit k8s.io. kubernetes.io. >> log.prod 2>&1
if [ $? != 0 ]; then
echo "Prod push FAILED, halting; log follows:"
echo "========================================="
cat log.prod
exit 3
fi
echo "Prod push SUCCEEDED"
for zone in k8s.io. kubernetes.io.; do
TRIES=12
echo "Testing prod zone: $zone"
for i in $(seq 1 "$TRIES"); do
./check-zone.sh "$zone" >> log.prod 2>&1
if [ $? == 0 ]; then
break
fi
if [ $i != "$TRIES" ]; then
echo " test failed, might be propagation delay, will retry..."
sleep 10
else
echo "Prod test FAILED, halting; log follows:"
echo "========================================="
cat log.prod
exit 2
fi
done
echo "Prod $zone SUCCEEDED"
done
| true |
dcd46982015550a31f91649ba3d71d5aa5af9006 | Shell | humlab-sead/sead_clearinghouse | /transport_system/install_transport_system.sh | UTF-8 | 1,685 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
if [ -f ~/vault/.default.sead.server ]; then
dbhost=$(head -n 1 ~/vault/.default.sead.server)
fi
script_name=`basename "$0"`
dbuser=humlab_admin
dbport=5432
dbname=
on_schema_exists=abort
for i in "$@"; do
case $i in
-h=*|--dbhost=*)
dbhost="${i#*=}"; shift;;
-p=*|--port=*)
dbport="${i#*=}"; shift ;;
-d=*|--dbname=*)
dbname="${i#*=}"; shift ;;
-u=*|--dbuser=*)
dbuser="${i#*=}"; shift ;;
-x|--on-schema-exists=*)
on_schema_exists="${i#*=}"; shift ;;
*);;
esac
done
function usage() {
echo "usage: $script_name [--dbhost=target-server] [--port=port] [--dbname=target-database]"
echo ""
echo " Please note that this script deploys the system directly to the target DB."
echo " Use this only for testing. Proper install should be carried out by the SEAD CCS."
echo " Use ./deploy_transport_system.sh to create a change request in SEAD CCS."
echo ""
exit 64
}
function check_install_options() {
if [ "$dbuser" != "humlab_admin" ]; then
echo "fatal: script must be rub by user humlab_admin." >&2
exit 64
fi
if [ "$dbhost" == "" ] || [ "$dbname" == "" ]; then
usage
fi
}
function install_transport_system() {
psql --host=$dbhost --port=$dbport --username=$dbuser --dbname=$dbname --no-password -q -1 -v ON_ERROR_STOP=1 --file ./05_install_transport_system.sql
}
echo "Deploying SEAD Clearinghouse transport system using URI $dbuser@$dbhost:$dbport/$dbname"
check_install_options
echo -n " Running install..."
install_transport_system
echo "done!"
| true |
9c3b8808f07717812dfc27b229a943374772b1f9 | Shell | malinoskj2/prompt_j2 | /tests/run_all.sh | UTF-8 | 253 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env zsh
# Run all tests in this folder
SCRIPT_DIR=${0:a:h}
echo "Running all tests."
echo "SCRIPT_DIR: $SCRIPT_DIR"
echo "------------------------------\n\n"
for file in $SCRIPT_DIR/test*; do
echo "Running: '$file'\n"
$file
done
| true |
283d7096258164bbd33f8276fd635bf990d4b35f | Shell | radmike/cq-scripts | /cq-import-bundles.sh | UTF-8 | 1,070 | 3.53125 | 4 | [] | no_license | #!/bin/bash
#
# This script imports all bundle projects into CQ
#
# Grab the current directory where the script was executed
CURRENT_DIR="$( pwd )"
# Figure out the actual project home
PRJ_HOME="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
# Start script execution
echo '-------------------------------------------------------------------------------'
echo 'import all bundles into cq'
echo '-------------------------------------------------------------------------------'
echo ''
echo 'auto-deploys all bundles via:'
echo ' mvn -Pauto-deploy install'
echo ''
echo 'project home: '
echo ' ' $PRJ_HOME
echo '-------------------------------------------------------------------------------'
echo
echo '-------------------------------------------------------------------------------'
echo 'auto deploying bundles...'
echo '-------------------------------------------------------------------------------'
$PRJ_HOME/scripts/cq-deploy.sh $1 clean-bundles services taglib || exit
# End script execution
# Go back to the original directory
cd $CURRENT_DIR
| true |
815bf6f11f01ea6f0971e05d860e72f90d223db9 | Shell | Varhoo/nelen-alphabet | /bin/docker-entrypoint.sh | UTF-8 | 1,039 | 2.8125 | 3 | [] | no_license | #!/usr/bin/bash
# docker run -d --name "postgresql" -h "postgresql" -p 5432:5432 -e POSTGRES_PASSWORD=gun2onCogh -v /var/lib/postgresql/data:/var/lib/postgresql/data:Z postgres
# create postgres db and user for greentea
PGPASSWORD=$POSTGRES_PASSWORD psql -h $POSTGRES_SERVER -U postgres <<EOF
CREATE USER alphabet WITH PASSWORD '$POSTGRES_PASSWORD';
CREATE DATABASE alphabet owner alphabet;
ALTER USER alphabet CREATEDB;
EOF
mkdir -p /data/log/
source /data/env/bin/activate
PRODUCTION=$( find /data/ -name production.py | grep "settings/production.py" )
export DJANGO_SETTINGS_MODULE="alphabet.settings"
if [ ! -z $PRODUCTION ]; then
export DJANGO_SETTINGS_MODULE="alphabet.settings.production"
fi
# python /data/manage.py migrate --fake-initial --noinput || exit
python /data/manage.py migrate --noinput || exit
python /data/manage.py collectstatic --noinput
# python manage.py compilemessages
uwsgi --http :80 --thunder-lock --enable-threads --master --wsgi-file /data/alphabet/wsgi.py #--daemonize /data/log/uwsgi.log
| true |
d4b7d4d22a00e011e35ab8063b236cfb0b718085 | Shell | vodchella/dotfiles | /.local/bin/ping.sh | UTF-8 | 160 | 3.015625 | 3 | [] | no_license | #!/bin/bash
VALUE=$(ping mail.ru -c1 2> /dev/null | grep 'time=' | awk '{print substr($8, 6)}')
if [ -z "$VALUE" ]; then
echo "?"
else
echo $VALUE
fi
| true |
ed105a03a532ee6c5f4f240b4168747d1a54939a | Shell | nerilex/steelcrypt | /tests/testvectors/convert_to_xml.sh | UTF-8 | 2,242 | 3.453125 | 3 | [] | no_license | #!/bin/bash
#cat <(tr -d '\r \t' | egrep -o '^[^#]*' - | grep '=' | egrep -v '^[[:space:]]*$')
#exit
index=""
cat <<EOF
<?xml version="1.0"?>
<testFile
xmlns="https://testvectors.cryptolib.org/xml-schema/v0.1/block-cipher_kat"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="https://testvectors.cryptolib.org/xml-schema/v0.1/block-cipher_kat
https://testvectors.cryptolib.org/xml-schema/v0.1/block-cipher_kat.xsd">
<header>
EOF
echo " <convertDate>$(date '+%FT%T%:z')</convertDate>"
if [ -n "$1" ]; then
orig_name="$1"
orig_sha256=$(sha256sum "$1" | cut -f1 -d' ')
orig_sha512=$(sha512sum "$1" | cut -f1 -d' ')
exec < "$1"
echo " <originalFilename>""$(basename "$orig_name")""</originalFilename>"
echo " <originalSha256>${orig_sha256}</originalSha256>"
echo " <originalSha512>${orig_sha512}</originalSha512>"
fi
header_done=0
kat_type="kat_vector_without_iv"
while read LINE; do
if [ "$header_done" = "0" ]; then
A=$(sed 's/^#[[:space:]]*\(.*\)$/\1/1' <<< "$LINE")
if [ -n "$A" ]; then
echo " <comment>$A</comment>"
else
echo -e "</header>\n"
header_done=1
echo -e "<body>\n"
fi
else
IFS='=' read KEY VALUE < <(tr -d '\t ' <<< "$LINE")
if [ -n "$VALUE" ]; then
# echo "key: $KEY"
# echo "value: $VALUE"
# echo "c: $index"
case $KEY in
COUNT)
index=$VALUE
;;
KEY)
secret=$VALUE
;;
IV)
iv=$VALUE
kat_type="kat_vector_with_iv"
;;
PLAINTEXT)
if [ $(( ${#VALUE} % 2)) -eq 1 ]; then
plaintext=0$VALUE
else
plaintext=$VALUE
fi
;;
CIPHERTEXT)
if [ $(( ${#VALUE} % 2)) -eq 1 ]; then
ciphertext=0$VALUE
else
ciphertext=$VALUE
fi
;;
*)
;;
esac
else
if [ -n "$index" ]; then
printf " <%s>\n" $kat_type
printf " <%s>%s</%s>\n" index $index index
printf " <%s>%s</%s>\n" key $secret key
if [ -n "$iv" ]; then
printf " <%s>%s</%s>\n" iv "$iv" iv
fi
printf " <%s>%s</%s>\n" plaintext $plaintext plaintext
printf " <%s>%s</%s>\n" ciphertext $ciphertext ciphertext
printf " </%s>\n\n" $kat_type
index=""
fi
fi
fi
done < <( tr -d '\r')
cat <<EOF
</body>
</testFile>
EOF
| true |
5fcbe6158a127e6b2c8530e6ad1336113ffa15f4 | Shell | Pepech97/s.o2EV | /scriptsSO/s30.sh | UTF-8 | 759 | 3.46875 | 3 | [] | no_license | while [ $# -ne 0 ]
do
PROCESO=$1
if [ $# -eq 0 ]
then
echo error, numero de parametros incorrecto
exit 1
fi
ps -e | grep " $PROCESO"$ > /dev/null
if [ $? -ne 0 ]
then
echo error, el proceso no existe
exit 2
fi
NLINEAS=`ps -e | grep " $PROCESO$" | wc -l`
while [ $NLINEAS -ne 0 ]
do
LINEA=`ps -el |grep " $PROCESO"$ | head -$NLINEAS | tail -1`
NUMPROCESO=`echo $LINEA | tr -s " " | cut -d " " -f4`
PRPADRE=`echo $LINEA | tr -s " " | cut -d " " -f5`
if [ $PRPADRE -eq 0 ]
then
echo PID: $NUMPROCESO PPID: No tiene proceso padre $PROCESO
else
NOMPADRE=`ps -p $PRPADRE | tail -1 | sed 's/^ *//' | tr -s " " | cut -d " " -f4`
echo PID: $NUMPROCESO PPID: $PRPADRE $PROCESO
fi
NLINEAS=`expr $NLINEAS - 1`
done
shift
done
| true |
30bd07bb53e2165d97fc3aa946ea308591c993f9 | Shell | ros-noetic-arch/ros-noetic-swri-console | /PKGBUILD | UTF-8 | 1,476 | 2.90625 | 3 | [] | no_license | pkgdesc="ROS - A rosout GUI viewer developed at Southwest Research Insititute as an alternative to rqt_console."
url='http://ros.org/wiki/swri_console'
pkgname='ros-noetic-swri-console'
pkgver='1.1.0'
arch=('i686' 'x86_64' 'aarch64' 'armv7h' 'armv6h')
pkgrel=4
license=('BSD')
ros_makedepends=(
ros-noetic-rosgraph-msgs
ros-noetic-roscpp
ros-noetic-rosbag-storage
ros-noetic-catkin)
makedepends=(
cmake
ros-build-tools
${ros_makedepends[@]}
boost
qt5-base)
ros_depends=(
ros-noetic-rosgraph-msgs
ros-noetic-roscpp
ros-noetic-rosbag-storage)
depends=(
${ros_depends[@]}
boost
qt5-base)
_dir="swri_console-${pkgver}"
source=("${pkgname}-${pkgver}.tar.gz"::"https://github.com/swri-robotics/swri_console/archive/${pkgver}.tar.gz")
sha256sums=('da27e4f92247ce7016a4325ef7ffea0ed18fa6fe671ce0b7f17652778ce9481c')
build() {
# Use ROS environment variables
source /usr/share/ros-build-tools/clear-ros-env.sh
[ -f /opt/ros/noetic/setup.bash ] && source /opt/ros/noetic/setup.bash
# Create build directory
[ -d ${srcdir}/build ] || mkdir ${srcdir}/build
cd ${srcdir}/build
# Build project
cmake ${srcdir}/${_dir} \
-DCATKIN_BUILD_BINARY_PACKAGE=ON \
-DCMAKE_INSTALL_PREFIX=/opt/ros/noetic \
-DPYTHON_EXECUTABLE=/usr/bin/python \
-DSETUPTOOLS_DEB_LAYOUT=OFF
make
}
package() {
cd "${srcdir}/build"
make DESTDIR="${pkgdir}/" install
}
| true |
cf58cbdf36fecf162858720b3af4b2cff71a2ced | Shell | landalex/Scripts | /iso2img.sh | UTF-8 | 390 | 3.1875 | 3 | [] | no_license | #!/bin/bash
read -p "IMG target path: " img
read -p "ISO source path: " iso
hdiutil convert -format UDRW -o $img $iso
mv $img.dmg $img
diskutil list
read -p "What number is the disk? " disk
diskutil unmountDisk /dev/disk$disk
echo "Mounting the iso..."
sudo dd if=$img of=/dev/disk$disk bs=1m
echo "All done! Ejecting..."
diskutil eject /dev/disk$disk
echo "Until next time..."
| true |
ab6c5595dba17dc53d04684d69666e2e294d0005 | Shell | masavo/dotfiles | /sh/sgrm | UTF-8 | 201 | 3.171875 | 3 | [] | no_license | #!/bin/sh
sgrm() {
# files=$(git status --short | awk '{print $2}') &&
files=$(git ls-files)
selected_files=$(echo "$files" | fzf -m --preview 'git diff {}') &&
git rm $selected_files
}
sgrm
| true |
2693133680c70cf05ddd07d66fbeef1b045dfaff | Shell | metadew/iesi | /core/bin/iesi-encrypt.sh | UTF-8 | 460 | 2.921875 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
calling_dir=$(pwd)
relative_dir=$(dirname "${BASH_SOURCE[0]}")
absolute_dir=$calling_dir/$relative_dir
lib_dir=$absolute_dir/../lib
plugin_lib=$absolute_dir/../plugin_lib
classpath="*"
cd $lib_dir
for i in *.jar; do
classpath="$classpath:$lib_dir/$i"
done
cd $plugin_lib
for i in *.jar; do
classpath="$classpath:$plugin_lib/$i"
done
cd $lib_dir
java -cp $classpath io.metadew.iesi.Application -launcher encryption "$@"
cd $calling_dir
| true |
bfe1309ee7a5231131d18c2c436a73033654de44 | Shell | samsucik/prosodic-lid-globalphone | /egs/yomdle_tamil/v1/local/chain/run_e2e_cnn.sh | UTF-8 | 6,669 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2017 Hossein Hadian
# This script does end2end chain training (i.e. from scratch)
# local/chain/compare_wer.sh exp/chain/e2e_cnn_1a/
# System e2e_cnn_1a
# score_basic score_nomalized
# WER 13.64 10.6
# WER (rescored) 13.13 10.2
# CER 2.99 3.0
# CER (rescored) 2.88 2.9
# Final train prob 0.0113
# Final valid prob 0.0152
# steps/info/chain_dir_info.pl exp/chain/e2e_cnn_1a
# exp/chain/e2e_cnn_1a: num-iters=48 nj=5..8 num-params=3.0M dim=40->352 combine=0.047->0.047 (over 2) logprob:train/valid[31,47,final]=(0.002,0.008,0.011/0.008,0.013,0.015)
set -e
# configs for 'chain'
stage=0
nj=30
train_stage=-10
get_egs_stage=-10
affix=1a
# training options
tdnn_dim=450
minibatch_size=150=64,32/300=32,16/600=16,8/1200=8,4
cmvn_opts="--norm-means=false --norm-vars=false"
train_set=train
lang_decode=data/lang
lang_rescore=data/lang_rescore_6g
decode_e2e=true
# End configuration section.
echo "$0 $@" # Print the command line for logging
. ./cmd.sh
. ./path.sh
. ./utils/parse_options.sh
if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi
lang=data/lang_e2e
treedir=exp/chain/e2e_monotree # it's actually just a trivial tree (no tree building)
dir=exp/chain/e2e_cnn_${affix}
if [ $stage -le 0 ]; then
# Create a version of the lang/ directory that has one state per phone in the
# topo file. [note, it really has two states.. the first one is only repeated
# once, the second one has zero or more repeats.]
rm -rf $lang
cp -r data/lang $lang
silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
fi
if [ $stage -le 1 ]; then
steps/nnet3/chain/e2e/prepare_e2e.sh --nj $nj --cmd "$cmd" \
--shared-phones true \
--type mono \
data/$train_set $lang $treedir
$cmd $treedir/log/make_phone_lm.log \
cat data/$train_set/text \| \
steps/nnet3/chain/e2e/text_to_phones.py data/lang \| \
utils/sym2int.pl -f 2- data/lang/phones.txt \| \
chain-est-phone-lm --num-extra-lm-states=500 \
ark:- $treedir/phone_lm.fst
fi
if [ $stage -le 2 ]; then
echo "$0: creating neural net configs using the xconfig parser";
num_targets=$(tree-info $treedir/tree | grep num-pdfs | awk '{print $2}')
cnn_opts="l2-regularize=0.075"
tdnn_opts="l2-regularize=0.075"
output_opts="l2-regularize=0.1"
common1="$cnn_opts required-time-offsets= height-offsets=-2,-1,0,1,2 num-filters-out=36"
common2="$cnn_opts required-time-offsets= height-offsets=-2,-1,0,1,2 num-filters-out=70"
common3="$cnn_opts required-time-offsets= height-offsets=-1,0,1 num-filters-out=70"
mkdir -p $dir/configs
cat <<EOF > $dir/configs/network.xconfig
input dim=40 name=input
conv-relu-batchnorm-layer name=cnn1 height-in=40 height-out=40 time-offsets=-3,-2,-1,0,1,2,3 $common1
conv-relu-batchnorm-layer name=cnn2 height-in=40 height-out=20 time-offsets=-2,-1,0,1,2 $common1 height-subsample-out=2
conv-relu-batchnorm-layer name=cnn3 height-in=20 height-out=20 time-offsets=-4,-2,0,2,4 $common2
conv-relu-batchnorm-layer name=cnn4 height-in=20 height-out=20 time-offsets=-4,-2,0,2,4 $common2
conv-relu-batchnorm-layer name=cnn5 height-in=20 height-out=10 time-offsets=-4,-2,0,2,4 $common2 height-subsample-out=2
conv-relu-batchnorm-layer name=cnn6 height-in=10 height-out=10 time-offsets=-4,0,4 $common3
conv-relu-batchnorm-layer name=cnn7 height-in=10 height-out=10 time-offsets=-4,0,4 $common3
relu-batchnorm-layer name=tdnn1 input=Append(-4,0,4) dim=$tdnn_dim $tdnn_opts
relu-batchnorm-layer name=tdnn2 input=Append(-4,0,4) dim=$tdnn_dim $tdnn_opts
relu-batchnorm-layer name=tdnn3 input=Append(-4,0,4) dim=$tdnn_dim $tdnn_opts
## adding the layers for chain branch
relu-batchnorm-layer name=prefinal-chain dim=$tdnn_dim target-rms=0.5 $output_opts
output-layer name=output include-log-softmax=false dim=$num_targets max-change=1.5 $output_opts
EOF
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs
fi
if [ $stage -le 3 ]; then
steps/nnet3/chain/e2e/train_e2e.py --stage $train_stage \
--cmd "$cmd" \
--feat.cmvn-opts "$cmvn_opts" \
--chain.leaky-hmm-coefficient 0.1 \
--chain.apply-deriv-weights true \
--egs.stage $get_egs_stage \
--egs.opts "--num_egs_diagnostic 100 --num_utts_subset 400" \
--chain.frame-subsampling-factor 4 \
--chain.alignment-subsampling-factor 4 \
--trainer.add-option="--optimization.memory-compression-level=2" \
--trainer.num-chunk-per-minibatch $minibatch_size \
--trainer.frames-per-iter 1500000 \
--trainer.num-epochs 3 \
--trainer.optimization.momentum 0 \
--trainer.optimization.num-jobs-initial 5 \
--trainer.optimization.num-jobs-final 8 \
--trainer.optimization.initial-effective-lrate 0.001 \
--trainer.optimization.final-effective-lrate 0.0001 \
--trainer.optimization.shrink-value 1.0 \
--trainer.max-param-change 2.0 \
--cleanup.remove-egs true \
--feat-dir data/${train_set} \
--tree-dir $treedir \
--dir $dir || exit 1;
fi
if [ $stage -le 4 ] && $decode_e2e; then
# The reason we are using data/lang here, instead of $lang, is just to
# emphasize that it's not actually important to give mkgraph.sh the
# lang directory with the matched topology (since it gets the
# topology file from the model). So you could give it a different
# lang directory, one that contained a wordlist and LM of your choice,
# as long as phones.txt was compatible.
utils/mkgraph.sh \
--self-loop-scale 1.0 $lang_decode \
$dir $dir/graph || exit 1;
fi
if [ $stage -le 5 ] && $decode_e2e; then
frames_per_chunk=$(echo $chunk_width | cut -d, -f1)
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \
--nj 30 --cmd "$cmd" --beam 12 \
$dir/graph data/test $dir/decode_test || exit 1;
steps/lmrescore_const_arpa.sh --cmd "$cmd" $lang_decode $lang_rescore \
data/test $dir/decode_test{,_rescored} || exit 1
echo "Done. Date: $(date). Results:"
local/chain/compare_wer.sh $dir
fi
| true |
88258e903b89eca8d24d9753aeba03baaa89140b | Shell | sravanisabbisett/codinclub | /day6/day6/forloop/foedemo2.sh | UTF-8 | 109 | 3.25 | 3 | [] | no_license | #!/bin/bash -x
read -p "number" number
for ((i=number; i>0; i--))
do
var=$(( $i*($i-1) ))
echo $var
done
| true |
ec68d1a166ddaf8a23f9b1a1086cd64eb5631ea2 | Shell | MobileChromeApps/workshop-cca-eh | /workshop/copy_app_common_to_steps.sh | UTF-8 | 461 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# This script copies the known good app/ui folder into each workshop step. It
# should be rerun after any updates to the common Polymer elements and assets.
set -eu
cd "$(dirname "$0")"
cd ..
./app/ui/prepare.sh
rm -rf workshop/step*/ui
rm -rf workshop/step*/assets
for i in workshop/step*; do
echo $i
# cp -R with the trailing / copies contents into ui, not ui -> ui/ui.
cp -R app/ui $i/
cp -R app/assets $i/
done
# success!
echo "Ok!"
| true |
85253fae100597b0ebc74b90cc3ba03860716b50 | Shell | TopHatCroat/home | /bin/publish-blog | UTF-8 | 290 | 3.78125 | 4 | [] | no_license | #!/bin/sh
# Build and push my blog
set -e
if [ -z $1 ]; then
echo "Usage: publish-blog COMMIT_MESSAGE"
exit 1
fi
cd $DEVDIR/blog
hugo
# Push the source
git commit -am "$1"
git push
# Push the built files
cd public
git add .
git commit -m "$1"
git push
echo "Publish successful!"
| true |
ef5827f0878d688e8263136f50b4f94d7ccccc5f | Shell | gcmt/dotfiles | /nemo/actions/tojpg.sh | UTF-8 | 102 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
for f in "$@"; do
convert "$f" -background white -flatten -alpha off "${f%.*}.jpg"
done
| true |
0d18cbbc363bc5d2d1f7c6e04396b0a7789ca4a3 | Shell | ceskajak/Upsilon_production | /PYTHIA/rivet_MinBias_all_CR1/generate.sh | UTF-8 | 1,038 | 2.78125 | 3 | [] | no_license | #!/bin/bash
mkdir runs
cd runs
echo run folder generated
for i in {0..9..1}
do
mkdir "run00$i"
cp ../src/rivet_MinBias.cmnd run00$i
cp ../src/rivet_MinBias.cc run00$i
cp ../src/rivet_MinBias.sh run00$i/rivet_MinBias00$i.sh
cd run00$i
sed -i "s/rivet_MinBias.root/rivet_MinBias00$i.root/g" rivet_MinBias.cc
sed -i "s/output.yoda/pythia00$i.yoda/g" rivet_MinBias.cc
sed -i "s/error/error00$i/g" rivet_MinBias00$i.sh
sed -i "s/RUN_NO/run00$i/g" rivet_MinBias00$i.sh
/home/ceskajak/scripts/rivet_compile.sh
cd ..
echo run00$i generated
done
for i in {10..99..1}
do
mkdir "run0$i"
cp ../src/rivet_MinBias.cmnd run0$i
cp ../src/rivet_MinBias.cc run0$i
cp ../src/rivet_MinBias.sh run0$i/rivet_MinBias0$i.sh
cd run0$i
sed -i "s/rivet_MinBias.root/rivet_MinBias0$i.root/g" rivet_MinBias.cc
sed -i "s/output.yoda/pythia0$i.yoda/g" rivet_MinBias.cc
sed -i "s/error/error0$i/g" rivet_MinBias0$i.sh
sed -i "s/RUN_NO/run0$i/g" rivet_MinBias0$i.sh
/home/ceskajak/scripts/rivet_compile.sh
cd ..
echo run0$i generated
done
| true |
f8f5a98e08dea60a6e2b73cd1479820b98fa6a48 | Shell | MobaLinux/gitgood | /HW2.sh | UTF-8 | 334 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env bash
#HW section 2
#script will make hidden file, put variables, prints bashrc
#ver 0.0.1
#dorshamay
name=dor
lname=shamay
ID=205592512
DOB=5.6.1994
POB=Israel
touch .user_info.sh && echo "$name $lname $ID $DOB $POB" > .user_info.sh
sudo cp /$HOME/Desktop/2.12.2017/HW2.sh /etc/init.d
sudo update-rc.d HW2.sh defaults
| true |
aadb5e0f1bf73f72f21e71b3e25b114d61221ba1 | Shell | ICHPC/hpc-portal | /remote_host_scripts/pqchem/make_tar | UTF-8 | 133 | 3.078125 | 3 | [] | no_license | #!/bin/sh
# Tars the directory specified on the cmd line and returns the tgz on stdout
cd $1
T=`basename $1`
cd ..
tar -zcf - $T
| true |
4d0e1240420555677f844dc7964358118837c21a | Shell | chordstricken/wotcc | /bin/generate-splash.sh | UTF-8 | 1,633 | 2.546875 | 3 | [] | no_license | #!/bin/bash
##
root=$(realpath $(dirname "$0")/../)
splash="$root/res/splash.png"
# ios
mkdir -p "$root/res/screen/ios"
set -x
convert $splash -gravity center -crop 2048x1536+0+0 "$root/res/screen/ios/screen-ipad-landscape-2x.png"
convert $splash -gravity center -crop 1024x768+0+0 "$root/res/screen/ios/screen-ipad-landscape.png"
convert $splash -gravity center -crop 1536x2048+0+0 "$root/res/screen/ios/screen-ipad-portrait-2x.png"
convert $splash -resize 1024x1024 -gravity center -crop 768x1024+0+0 "$root/res/screen/ios/screen-ipad-portrait.png"
convert $splash -gravity center -crop 960x640+0+0 "$root/res/screen/ios/screen-iphone-landscape-2x.png"
convert $splash -resize 1024x1024 -gravity center -crop 480x320+0+0 "$root/res/screen/ios/screen-iphone-landscape.png"
convert $splash -resize 1024x1024 -gravity center -crop 640x960+0+0 "$root/res/screen/ios/screen-iphone-portrait-2x.png"
convert $splash -resize 1536x1536 -gravity center -crop 640x1136+0+0 "$root/res/screen/ios/screen-iphone-portrait-568h-2x.png"
convert $splash -resize 512x512 -gravity center -crop 320x480+0+0 "$root/res/screen/ios/screen-iphone-portrait.png"
convert $splash -gravity center -crop 750x1334+0+0 "$root/res/screen/ios/Default-667h.png"
convert $splash -gravity center -crop 1242x2208+0+0 "$root/res/screen/ios/Default-736h.png"
convert $splash -gravity center -crop 1125x2436+0+0 "$root/res/screen/ios/Default-2436h.png"
convert $splash -gravity center -crop 2208x1242+0+0 "$root/res/screen/ios/Default-Landscape-736h.png"
convert $splash -gravity center -crop 2436x1125+0+0 "$root/res/screen/ios/Default-Landscape-2436h.png"
| true |
55aab2528ed5995eaa2ffd28f7d8d7a214a6ff0f | Shell | guymguym/noobaa-core | /src/deploy/NVA_build/fix_package_json.sh | UTF-8 | 341 | 3.421875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/bash
count=1
file="package.json"
while true
do
while read line
do
if [[ "${line}" =~ "," ]]
then
sed -i "s/${line}/${line//,/}/g" ${file}
break 2
elif [[ "${line}" =~ "\"" ]]
then
break 2
fi
done < <(tail -${count} ${file})
((count++))
done
| true |
533f4b0f8efcee481112f988f031f33f0eb5773d | Shell | MartinPavic/MartinsBlockchain | /server/updateKeystore.sh | UTF-8 | 966 | 3.015625 | 3 | [] | no_license | #!/bin/bash
ORG_1_KEYSTORE=$(ls ../network/organizations/peerOrganizations/fer.unizg.hr/users/Admin\@fer.unizg.hr/msp/keystore/)
ORG_2_KEYSTORE=$(ls ../network/organizations/peerOrganizations/fsb.unizg.hr/users/Admin\@fsb.unizg.hr/msp/keystore/)
ORG_1_PATH_TO_KEYSTORE="Admin@fer.unizg.hr/msp/keystore/"
ORG_2_PATH_TO_KEYSTORE="Admin@fsb.unizg.hr/msp/keystore/"
UPDATED_KEYSTORE_ORG_1="$ORG_1_PATH_TO_KEYSTORE$ORG_1_KEYSTORE"
UPDATED_KEYSTORE_ORG_2="$ORG_2_PATH_TO_KEYSTORE$ORG_2_KEYSTORE"
# sed -i "s|keystore/.*|${UPDATED_KEYSTORE}|g" connection.yaml
# .* is regex-ese for "any character followed by zero or more of any character(s)"
echo 'updating connection.yaml fer adminPrivateKey path with' ${UPDATED_KEYSTORE_ORG_1}
sed -i -e "s|Admin@fer.unizg.hr/msp/keystore/.*|$UPDATED_KEYSTORE_ORG_1|g" connection.yaml
echo 'updating connection.yaml fsb adminPrivateKey path with' ${UPDATED_KEYSTORE_ORG_2}
sed -i -e "s|Admin@fsb.unizg.hr/msp/keystore/.*|$UPDATED_KEYSTORE_ORG_2|g" connection.yaml
| true |
c99cb40f1b60f0add234159380ba6e8b6dd094be | Shell | GeorgPandeh/Operation-Systems | /Ipr1Variant1.sh | UTF-8 | 100 | 2.75 | 3 | [] | no_license | #!/bin/bash
for i in $(find "$1" -type f)
do
ls -lh "$i" | awk '{print $1, $5, $9}'
done
| true |
6a47556e45cf84064b4fd73724ab740d85ef670c | Shell | breezepqf/script | /linux/chmirror.sh | UTF-8 | 874 | 3.84375 | 4 | [] | no_license | #!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
if [ `whoami` != "root" ]; then
echo " Error: Run this script as root!" && exit 1
fi
list="/etc/apt/sources.list"
last_mirror=`cat $list | grep -o '/[a-z]*\.[^/]*/' | grep -o '[^/]*' | sed -n 2p`
cnt_mirror=$last_mirror
echo
echo "Choose a mirror site for apt-get:"
echo "[0]: $last_mirror (current)"
echo "[1]: mirrors.aliyun.com"
echo "[2]: mirrors.xjtu.edu.cn"
echo "[3]: others"
read -p "Make a choice: [0] " ans
if [ "$ans" = "1" ]; then
cnt_mirror="mirrors.aliyun.com"
sed -i "/.*/s/${last_mirror}/${cnt_mirror}/g" $list
elif [ "$ans" = "2" ]; then
cnt_mirror="mirrors.xjtu.edu.cn"
sed -i "/.*/s/${last_mirror}/${cnt_mirror}/g" $list
elif [ "$ans" = "3" ]; then
read -p "Enter your mirror site: " cnt_mirror
sed -i "/.*/s/${last_mirror}/${cnt_mirror}/g" $list
fi
apt-get update
| true |
658756d9fc4db097910cfcec27d69de18468d927 | Shell | danhtaihoang/frustrated-spin-j1j2 | /J1_J2/220feb_J1J1_transport_results/170feb_ISING_SC_J1J2_results/163feb_ISING_SC_J1J2_0.26_histo/2shell_plot_histo.script | UTF-8 | 550 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Chuong ve dong thoi 2 hoac 3 do thi (dung de tim hieu ung)
# this is script-shell for collection result from osaka
# ===========================================================================================
# Lua chon buoc tien hanh
# ===========================================================================================
echo -n "Number of folder = "
read nF
i_folder=1
while [ $i_folder -le $nF ]
do
cp 2histo_plot.script $i_folder
cd $i_folder
./2histo_plot.script
cd ..
i_folder=$(echo "scale=0; $i_folder+1" | bc)
done
| true |
37ec42a75d5d9b152745086e6fd0cbf7f177ab50 | Shell | tietokilta-saato/tikplay | /legacy/tikradio/util/soita.sh | UTF-8 | 610 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -eu
. ~/.tikradiorc
case "$1" in
http://*ogg)
:> "$TMP/tikplaystreaming"
exec $OGG123 $OGG123_OPTS "$1" ;;
http://*)
:> "$TMP/tikplaystreaming"
exec $MPG123 $MPG123_OPTS "$1" ;;
*)
case `$FILE_CMD "$1"` in
*Ogg*)
exec $OGG123 $OGG123_OPTS $1 ;;
*odule\ sound*)
exec $PLAYMOD "$1" ;;
*SID*)
exec $PLAYSID "$1" ;;
*text*)
exec $FESTIVAL "$1" ;;
*)
exec $MPG123 $MPG123_OPTS $1
esac
#if file "$1" |grep "Ogg" > /dev/null; then
# exec $OGG123 $OGG123_OPTS $1
#else
# exec $MPG123 $MPG123_OPTS $1
#fi
esac
| true |
ef6d004dce9052f9e254f9a96fa6a14631d0fd02 | Shell | mikeeq/mbp-fedora-kernel | /publish.sh | UTF-8 | 1,771 | 3.5625 | 4 | [] | no_license | #!/bin/bash
set -eu -o pipefail
LATEST_RELEASE=$(curl -sI https://github.com/mikeeq/mbp-fedora-kernel/releases/latest | grep -i "location:" | cut -d'v' -f2 | tr -d '\r')
echo >&2 "===]> Info: LATEST_RELEASE=$LATEST_RELEASE"
echo >&2 "===]> Info: Build mbp-fedora-repo..."
cd yum-repo
docker build -t mbp-fedora-repo --build-arg RELEASE_VERSION="${LATEST_RELEASE}" .
cd ..
echo >&2 "===]> Info: Run mbp-fedora-repo in the background..."
DOCKER_CONTAINER_ID=$(docker run --rm -d mbp-fedora-repo)
echo >&2 "===]> Info: Make a zip file with repo content..."
docker exec -t -u 0 "$DOCKER_CONTAINER_ID" /bin/bash -c '
dnf makecache
dnf install -y zip unzip curl cmake
cd /tmp
curl -L https://github.com/libthinkpad/apindex/archive/refs/tags/2.2.zip -O
unzip 2.2.zip
cd apindex-2.2
cmake . -DCMAKE_INSTALL_PREFIX=/usr
make install
cd /var/repo
apindex .
zip -r /tmp/repo.zip ./
'
echo >&2 "===]> Info: Copy zip file to host..."
docker cp "$DOCKER_CONTAINER_ID":/tmp/repo.zip /tmp/repo.zip
echo >&2 "===]> Info: Change branch to gh-pages..."
git fetch origin gh-pages
git checkout gh-pages
echo >&2 "===]> Info: Remove old RPMs..."
rm -rfv ./*.rpm
rm -rfv ./repodata
rm -rfv ./index.html
rm -rfv ./yum-repo
echo >&2 "===]> Info: Copy zip file to repo..."
cp -rfv /tmp/repo.zip ./
echo >&2 "===]> Info: Unzip..."
unzip repo.zip
echo >&2 "===]> Info: Remove zip..."
rm -rfv repo.zip
echo >&2 "===]> Info: Add git config..."
git config user.name "CI-GitHubActions"
git config user.email "ci@github-actions.com"
echo >&2 "===]> Info: Add, commit, push changes to gh-pages remote..."
git add .
git commit -m "Release: $LATEST_RELEASE, date: $(date +'%Y%m%d_%H%M%S')"
git push origin gh-pages
echo >&2 "===]> Info: Stop mbp-fedora-repo container..."
docker stop "$DOCKER_CONTAINER_ID"
| true |
d68c49ac0185f0eb54fe92dee84aaaec56374073 | Shell | mithro/HDMI2USB-fx2-firmware | /.travis-push-docs.sh | UTF-8 | 3,030 | 3.734375 | 4 | [] | no_license | #!/bin/bash
set -e
if [ "$TRAVIS" = true -a "$TRAVIS_SECURE_ENV_VARS" = false ]; then
echo "No environment variables found, skipping (probably a pull request)."
exit 0
fi
if [ "$TRAVIS" = true -a "$TRAVIS_BRANCH" != "master" ]; then
echo "No master branch, skipping."
exit 0
fi
if [ -z "$TRAVIS_REPO_SLUG" ]; then
echo "No TRAVIS_REPO_SLUG value found."
echo "Please set this if running outside Travis."
exit 0
fi
# FIXME: Replace this with a deploy key, so you don't end up with tokens which
# potentially give people *a lot* of access to your GitHub repos.
if [ -z "$GH_TOKEN" ]; then
echo "No GH_TOKEN value found."
echo
echo "Generate a GitHub token at https://github.com/settings/tokens/new"
echo "with *only* the public_repo option."
echo "Then go to https://travis-ci.org/$TRAVIS_REPO_SLUG/settings and"
echo "add an 'Environment Variables' with the following;"
echo " * Name == GH_TOKEN"
echo " * Value == your token value from above"
echo " * Display value in build log == OFF"
echo
echo "It is important that you protect this token, as it has full push"
echo "access to your repos!"
exit 1
fi
if [ -z "$GIT_NAME" ]; then
echo "No GIT_NAME value found."
echo
echo "Then go to https://travis-ci.org/$TRAVIS_REPO_SLUG/settings and"
echo "add an 'Environment Variables' with the following;"
echo " * Name == GIT_NAME"
echo " * Value == Human readable name for the commit author."
echo " Something like \"Tim Ansell's Robot\" is a good choice."
echo " * Display value in build log == ON"
exit 1
fi
if [ -z "$GIT_EMAIL" ]; then
echo "No GIT_EMAIL value found."
echo
echo "Then go to https://travis-ci.org/$TRAVIS_REPO_SLUG/settings and"
echo "add an 'Environment Variables' with the following;"
echo " * Name == GIT_EMAIL"
echo " * Value == Email address the commit author."
echo " Set up an email address, or use your own."
echo " * Display value in build log == ON"
exit 1
fi
TMPDIR=$(mktemp --directory)
if ! git describe --always > /dev/null 2>&1; then
echo "- Fetching non shallow to get git version"
git fetch --unshallow && git fetch --tags
fi
ORIG_GIT_REVISION=`git describe --always`
ORIG_COMMITTER_NAME=$(git log -1 --pretty=%an)
ORIG_COMMITTER_EMAIL=$(git log -1 --pretty=%ae)
echo "- Setting up the output"
cp -aRf docs/html/* $TMPDIR/
find $TMPDIR | sort
echo "- Switching to the gh-pages branch"
git remote set-branches --add origin gh-pages
git fetch origin gh-pages
git checkout origin/gh-pages -b gh-pages
echo "- Adding the newly generated content"
rm -rf *
cp -aRf $TMPDIR/* .
git add -v -A .
echo "- Committing"
export GIT_AUTHOR_EMAIL="$ORIG_COMMITTER_EMAIL"
export GIT_AUTHOR_NAME="$ORIG_COMMITTER_NAME"
export GIT_COMMITTER_EMAIL="$GIT_NAME"
export GIT_COMMITTER_NAME="$GIT_EMAIL"
unset GIT_NAME
unset GIT_EMAIL
git commit -a -m "Travis build #$TRAVIS_BUILD_NUMBER of $ORIG_GIT_REVISION"
echo "- Pushing"
git remote set-url origin https://$GH_TOKEN@github.com/$TRAVIS_REPO_SLUG.git > /dev/null 2>&1
git push origin gh-pages > /dev/null 2>&1
| true |
bb3ee70b52e7fe9b29ddff3428d1a639727f6237 | Shell | joehmchiu/tf-samples | /ec2-mongo-api/test/delete.sh | UTF-8 | 395 | 3.640625 | 4 | [] | no_license |
MF=/tmp/MID
URL=$(cat /tmp/url)
if [ -f $MF ]; then
ID=$(cat /tmp/MID)
else
ID=$1
fi
[ -z $URL ] && { echo "Error: URL not found"; exit 3; }
[ -z $ID ] && { echo "Error: ID not found"; echo "Usage: sh delete.sh [Record ID]"; exit 3; }
# echo "Delete record: $ID"
RES=$(curl -s -X DELETE $URL$ID)
echo $RES
MSG=$(echo $RES | jq '.status' | sed 's/"//g')
[ "$MSG" = "ok" ] && rm -f $MF
| true |
538e8a7b120f8a03ef96b4ab3535da73913af479 | Shell | frederic/rtl819x-toolchain | /users/script/wlan.sh | UTF-8 | 12,202 | 3.09375 | 3 | [] | no_license | #!/bin/sh
#
# script file to start WLAN
#
if [ $# -lt 1 ]; then echo "Usage: $0 wlan_interface"; exit 1 ; fi
GETMIB="flash get $1"
SET_WLAN="iwpriv $1"
SET_WLAN_PARAM="$SET_WLAN set_mib"
IFCONFIG=ifconfig
START_WLAN_APP=wlanapp.sh
MAX_WDS_NUM=8
## Disable WLAN MAC driver and shutdown interface first ##
$IFCONFIG $1 down
eval `$GETMIB HW_RF_TYPE`
eval `$GETMIB WLAN_DISABLED`
eval `$GETMIB MODE`
VAP=`echo $1 | cut -b 7-`
if [ "$VAP" != "" ]; then
# wlan0-va? interface
$START_WLAN_APP kill $1
if [ "$WLAN_DISABLED" = '1' ]; then
exit 1
fi
else
# shutdown all WDS interface
num=0
while [ $num -lt $MAX_WDS_NUM ]
do
$IFCONFIG $1-wds$num down
num=`expr $num + 1`
done
## kill wlan application daemon ##
$START_WLAN_APP kill $1
if [ "$WLAN_DISABLED" = '1' -o "$HW_RF_TYPE" = '0' ]; then
exit 1
fi
fi
## Set parameters to driver ##
eval `$GETMIB HW_REG_DOMAIN`
$SET_WLAN_PARAM regdomain=$HW_REG_DOMAIN
eval `$GETMIB OP_MODE`
eval `$GETMIB WLAN_MAC_ADDR`
if [ "$WLAN_MAC_ADDR" = "000000000000" ]; then
if [ "$VAP" = "" ]; then
eval `$GETMIB HW_WLAN_ADDR`
WLAN_MAC_ADDR=$HW_WLAN_ADDR
elif [ "$VAP" = "va0" ]; then
eval `$GETMIB HW_WLAN_ADDR1`
WLAN_MAC_ADDR=$HW_WLAN_ADDR1
elif [ "$VAP" = "va1" ]; then
eval `$GETMIB HW_WLAN_ADDR2`
WLAN_MAC_ADDR=$HW_WLAN_ADDR2
elif [ "$VAP" = "va2" ]; then
eval `$GETMIB HW_WLAN_ADDR3`
WLAN_MAC_ADDR=$HW_WLAN_ADDR3
else
eval `$GETMIB HW_WLAN_ADDR4`
WLAN_MAC_ADDR=$HW_WLAN_ADDR4
fi
fi
# ifconfig all wlan interface when not in WISP
# ifconfig wlan1 later interface when in WISP mode, the wlan0 will be setup in WAN interface
if [ "$VAP" = "" ]; then
eval `$GETMIB WISP_WAN_ID`
if [ "$OP_MODE" != '2' ] || [ $1 != "wlan$WISP_WAN_ID" ] ;then
$IFCONFIG $1 hw ether $WLAN_MAC_ADDR
fi
if [ "$OP_MODE" = '2' ]; then
$SET_WLAN_PARAM disable_brsc=1
fi
eval `$GETMIB HW_LED_TYPE`
$SET_WLAN_PARAM led_type=$HW_LED_TYPE
## set AP/client/WDS mode ##
if [ "$MODE" = '1' ]; then
## client mode
eval `$GETMIB NETWORK_TYPE`
if [ "$NETWORK_TYPE" = '0' ]; then
$SET_WLAN_PARAM opmode=8
else
$SET_WLAN_PARAM opmode=32
eval `$GETMIB DEFAULT_SSID`
$SET_WLAN_PARAM defssid="$DEFAULT_SSID"
fi
else
## AP mode
$SET_WLAN_PARAM opmode=16
fi
if [ "$MODE" = '2' ]; then
## WDS only
$SET_WLAN_PARAM wds_pure=1
else
$SET_WLAN_PARAM wds_pure=0
fi
#add for mesh
# 802.11s set mesh parameters ====================
if [ "$MODE" = '4' ]; then
$SET_WLAN_PARAM mesh_enable=1
$SET_WLAN_PARAM mesh_ap_enable=1
$SET_WLAN_PARAM mesh_portal_enable=1
elif [ "$MODE" = '5' ]; then
$SET_WLAN_PARAM mesh_enable=1
$SET_WLAN_PARAM mesh_ap_enable=0
$SET_WLAN_PARAM mesh_portal_enable=1
elif [ "$MODE" = '6' ]; then
$SET_WLAN_PARAM mesh_enable=1
$SET_WLAN_PARAM mesh_ap_enable=1
$SET_WLAN_PARAM mesh_portal_enable=0
elif [ "$MODE" = '7' ]; then
$SET_WLAN_PARAM mesh_enable=1
$SET_WLAN_PARAM mesh_ap_enable=0
$SET_WLAN_PARAM mesh_portal_enable=0
else
$SET_WLAN_PARAM mesh_enable=0
$SET_WLAN_PARAM mesh_ap_enable=0
$SET_WLAN_PARAM mesh_portal_enable=0
fi
if [ "$MODE" = 4 ] || [ "$MODE" = 5 ] || [ "$MODE" = 6 ] || [ "$MODE" = 7 ] ; then
eval `$GETMIB MESH_ROOT_ENABLE`
if [ "$MESH_ROOT_ENABLE" = 0 ]; then
$SET_WLAN_PARAM mesh_root_enable=0
else
$SET_WLAN_PARAM mesh_root_enable=1
fi
eval `$GETMIB MESH_ID`
$SET_WLAN_PARAM mesh_id="$MESH_ID"
eval `$GETMIB MESH_MAX_NEIGHTBOR`
$SET_WLAN_PARAM mesh_max_neightbor=$MESH_MAX_NEIGHTBOR
eval `$GETMIB MIB_MESH_WPA_AUTH`
$SET_WLAN_PARAM mesh_privacy=$MIB_MESH_WPA_AUTH
fi
# ====================
#add for mesh
# set RF parameters
$SET_WLAN_PARAM RFChipID=$HW_RF_TYPE
eval `$GETMIB HW_TX_POWER_CCK`
eval `$GETMIB HW_TX_POWER_OFDM`
$SET_WLAN_PARAM TxPowerCCK=$HW_TX_POWER_CCK
$SET_WLAN_PARAM TxPowerOFDM=$HW_TX_POWER_OFDM
eval `$GETMIB HW_WLAN0_11N_LOFDMPWD`
$SET_WLAN_PARAM LOFDM_pwrdiff=$HW_WLAN0_11N_LOFDMPWD
eval `$GETMIB HW_WLAN0_11N_ANTPWD_C`
$SET_WLAN_PARAM antC_pwrdiff=$HW_WLAN0_11N_ANTPWD_C
eval `$GETMIB HW_WLAN0_11N_THER_RFIC`
$SET_WLAN_PARAM ther_rfic=$HW_WLAN0_11N_THER_RFIC
eval `$GETMIB HW_WLAN0_11N_XCAP`
$SET_WLAN_PARAM crystalCap=$HW_WLAN0_11N_XCAP
eval `$GETMIB BEACON_INTERVAL`
$SET_WLAN_PARAM bcnint=$BEACON_INTERVAL
eval `$GETMIB CHANNEL`
$SET_WLAN_PARAM channel=$CHANNEL
else
# vap, set AP mode always
$SET_WLAN_PARAM opmode=16
$IFCONFIG $1 hw ether $WLAN_MAC_ADDR
fi
eval `$GETMIB SSID`
$SET_WLAN_PARAM ssid="$SSID"
eval `$GETMIB BASIC_RATES`
$SET_WLAN_PARAM basicrates=$BASIC_RATES
eval `$GETMIB SUPPORTED_RATES`
$SET_WLAN_PARAM oprates=$SUPPORTED_RATES
eval `$GETMIB RATE_ADAPTIVE_ENABLED`
if [ "$RATE_ADAPTIVE_ENABLED" = '0' ]; then
$SET_WLAN_PARAM autorate=0
eval `$GETMIB FIX_RATE`
$SET_WLAN_PARAM fixrate=$FIX_RATE
else
$SET_WLAN_PARAM autorate=1
fi
eval `$GETMIB RTS_THRESHOLD`
$SET_WLAN_PARAM rtsthres=$RTS_THRESHOLD
eval `$GETMIB FRAG_THRESHOLD`
$SET_WLAN_PARAM fragthres=$FRAG_THRESHOLD
eval `$GETMIB INACTIVITY_TIME`
$SET_WLAN_PARAM expired_time=$INACTIVITY_TIME
eval `$GETMIB PREAMBLE_TYPE`
$SET_WLAN_PARAM preamble=$PREAMBLE_TYPE
eval `$GETMIB HIDDEN_SSID`
$SET_WLAN_PARAM hiddenAP=$HIDDEN_SSID
eval `$GETMIB DTIM_PERIOD`
$SET_WLAN_PARAM dtimperiod=$DTIM_PERIOD
# Use the below only when using specific value
# instead of default setting
#$SET_WLAN_PARAM longretry=6
#$SET_WLAN_PARAM shortretry=6
$SET_WLAN_PARAM aclnum=0
eval `$GETMIB MACAC_ENABLED`
$SET_WLAN_PARAM aclmode=$MACAC_ENABLED
if [ "$MACAC_ENABLED" != '0' ]; then
eval `$GETMIB MACAC_NUM`
if [ "$MACAC_NUM" != 0 ]; then
num=1
while [ $num -le $MACAC_NUM ]
do
AC_TBL=`$GETMIB MACAC_ADDR | grep MACAC_ADDR$num`
addr_comment=`echo $AC_TBL | cut -f2 -d=`
addr=`echo $addr_comment | cut -f1 -d,`
$SET_WLAN_PARAM acladdr=$addr
num=`expr $num + 1`
done
fi
fi
eval `$GETMIB AUTH_TYPE`
eval `$GETMIB ENCRYPT`
if [ "$AUTH_TYPE" = '1' ] && [ "$ENCRYPT" != '1' ]; then
# shared-key and not WEP enabled, force to open-system
AUTH_TYPE=0
fi
$SET_WLAN_PARAM authtype=$AUTH_TYPE
if [ "$ENCRYPT" = '0' ]; then
$SET_WLAN_PARAM encmode=0
elif [ "$ENCRYPT" = '1' ]; then
### WEP mode ##
eval `$GETMIB WEP`
if [ "$WEP" = '1' ]; then
eval `$GETMIB WEP64_KEY1`
eval `$GETMIB WEP64_KEY2`
eval `$GETMIB WEP64_KEY3`
eval `$GETMIB WEP64_KEY4`
eval `$GETMIB WEP_DEFAULT_KEY`
$SET_WLAN_PARAM encmode=1
$SET_WLAN_PARAM wepkey1=$WEP64_KEY1
$SET_WLAN_PARAM wepkey2=$WEP64_KEY2
$SET_WLAN_PARAM wepkey3=$WEP64_KEY3
$SET_WLAN_PARAM wepkey4=$WEP64_KEY4
$SET_WLAN_PARAM wepdkeyid=$WEP_DEFAULT_KEY
else
eval `$GETMIB WEP128_KEY1`
eval `$GETMIB WEP128_KEY2`
eval `$GETMIB WEP128_KEY3`
eval `$GETMIB WEP128_KEY4`
eval `$GETMIB WEP_DEFAULT_KEY`
$SET_WLAN_PARAM encmode=5
$SET_WLAN_PARAM wepkey1=$WEP128_KEY1
$SET_WLAN_PARAM wepkey2=$WEP128_KEY2
$SET_WLAN_PARAM wepkey3=$WEP128_KEY3
$SET_WLAN_PARAM wepkey4=$WEP128_KEY4
$SET_WLAN_PARAM wepdkeyid=$WEP_DEFAULT_KEY
fi
else
## WPA mode ##
$SET_WLAN_PARAM encmode=2
fi
## Set 802.1x flag ##
_ENABLE_1X=0
if [ $ENCRYPT -lt 2 ]; then
eval `$GETMIB ENABLE_1X`
eval `$GETMIB MAC_AUTH_ENABLED`
if [ "$ENABLE_1X" != 0 ] || [ "$MAC_AUTH_ENABLED" != 0 ]; then
_ENABLE_1X=1
fi
else
_ENABLE_1X=1
fi
$SET_WLAN_PARAM 802_1x=$_ENABLE_1X
## set WDS ##
eval `$GETMIB WDS_ENABLED`
eval `$GETMIB WDS_NUM`
$SET_WLAN_PARAM wds_num=0
if [ "$MODE" = 2 -o "$MODE" = 3 ] && [ "$WDS_ENABLED" != 0 ] && [ "$WDS_NUM" != 0 ]; then
num=1
while [ $num -le $WDS_NUM ]
do
WDS_TBL=`$GETMIB WDS | grep WDS$num`
addr_comment=`echo $WDS_TBL | cut -f2 -d=`
addr=`echo $addr_comment | cut -f1 -d,`
txrate=`echo $addr_comment | cut -f2 -d,`
$SET_WLAN_PARAM wds_add=$addr,$txrate
num=`expr $num - 1`
$IFCONFIG $1-wds$num hw ether $WLAN_MAC_ADDR
num=`expr $num + 2`
done
$SET_WLAN_PARAM wds_enable=$WDS_ENABLED
else
$SET_WLAN_PARAM wds_enable=0
fi
if [ "$MODE" = 2 -o "$MODE" = 3 ] && [ "$WDS_ENABLED" != '0' ]; then
eval `$GETMIB WDS_ENCRYPT`
if [ "$WDS_ENCRYPT" = '0' ]; then
$SET_WLAN_PARAM wds_encrypt=0
elif [ "$WDS_ENCRYPT" = '1' ]; then
eval `$GETMIB WDS_WEP_KEY`
$SET_WLAN_PARAM wds_encrypt=1
$SET_WLAN_PARAM wds_wepkey=$WDS_WEP_KEY
elif [ "$WDS_ENCRYPT" = '2' ]; then
eval `$GETMIB WDS_WEP_KEY`
$SET_WLAN_PARAM wds_encrypt=5
$SET_WLAN_PARAM wds_wepkey=$WDS_WEP_KEY
elif [ "$WDS_ENCRYPT" = '3' ]; then
$SET_WLAN_PARAM wds_encrypt=2
else
$SET_WLAN_PARAM wds_encrypt=4
fi
fi
# enable/disable the notification for IAPP
eval `$GETMIB IAPP_DISABLED`
if [ "$IAPP_DISABLED" = 0 ]; then
$SET_WLAN_PARAM iapp_enable=1
else
$SET_WLAN_PARAM iapp_enable=0
fi
#set band
eval `$GETMIB BAND`
eval `$GETMIB WIFI_SPECIFIC`
if [ "$MODE" != '1' ] && [ "$WIFI_SPECIFIC" = 1 ] && [ "$BAND" = '2' ] ; then
BAND=3
fi
if [ "$BAND" = '8' ]; then
BAND=11
$SET_WLAN_PARAM deny_legacy=3
elif [ "$BAND" = '2' ]; then
BAND=3
$SET_WLAN_PARAM deny_legacy=1
elif [ "$BAND" = '10' ]; then
BAND=11
$SET_WLAN_PARAM deny_legacy=1
else
$SET_WLAN_PARAM deny_legacy=0
fi
$SET_WLAN_PARAM band=$BAND
###Set 11n parameter
if [ $BAND = 10 ] || [ $BAND = 11 ]; then
eval `$GETMIB CHANNEL_BONDING`
$SET_WLAN_PARAM use40M=$CHANNEL_BONDING
eval `$GETMIB CONTROL_SIDEBAND`
if [ "$CHANNEL_BONDING" = 0 ]; then
$SET_WLAN_PARAM 2ndchoffset=0
else
if [ "$CONTROL_SIDEBAND" = 0 ]; then
$SET_WLAN_PARAM 2ndchoffset=1
fi
if [ "$CONTROL_SIDEBAND" = 1 ]; then
$SET_WLAN_PARAM 2ndchoffset=2
fi
fi
eval `$GETMIB SHORT_GI`
$SET_WLAN_PARAM shortGI20M=$SHORT_GI
$SET_WLAN_PARAM shortGI40M=$SHORT_GI
eval `$GETMIB AGGREGATION`
if [ "$AGGREGATION" = 0 ]; then
$SET_WLAN_PARAM ampdu=$AGGREGATION
$SET_WLAN_PARAM amsdu=$AGGREGATION
elif [ "$AGGREGATION" = 1 ]; then
$SET_WLAN_PARAM ampdu=1
$SET_WLAN_PARAM amsdu=0
elif [ "$AGGREGATION" = 2 ]; then
$SET_WLAN_PARAM ampdu=0
$SET_WLAN_PARAM amsdu=1
elif [ "$AGGREGATION" = 3 ]; then
$SET_WLAN_PARAM ampdu=1
$SET_WLAN_PARAM amsdu=1
fi
fi
##########
#set nat2.5 disable when client and mac clone is set
eval `$GETMIB MACCLONE_ENABLED`
if [ "$MACCLONE_ENABLED" = '1' -a "$MODE" = '1' ]; then
$SET_WLAN_PARAM nat25_disable=1
$SET_WLAN_PARAM macclone_enable=1
else
$SET_WLAN_PARAM nat25_disable=0
$SET_WLAN_PARAM macclone_enable=0
fi
# set nat2.5 disable and macclone disable when wireless isp mode
if [ "$OP_MODE" = '2' ] ;then
$SET_WLAN_PARAM nat25_disable=1
$SET_WLAN_PARAM macclone_enable=0
fi
# set 11g protection mode
eval `$GETMIB PROTECTION_DISABLED`
if [ "$PROTECTION_DISABLED" = '1' ] ;then
$SET_WLAN_PARAM disable_protection=1
else
$SET_WLAN_PARAM disable_protection=0
fi
# set block relay
eval `$GETMIB BLOCK_RELAY`
$SET_WLAN_PARAM block_relay=$BLOCK_RELAY
# set WiFi specific mode
eval `$GETMIB WIFI_SPECIFIC`
$SET_WLAN_PARAM wifi_specific=$WIFI_SPECIFIC
# for WMM
eval `$GETMIB WMM_ENABLED`
$SET_WLAN_PARAM qos_enable=$WMM_ENABLED
# for guest access
eval `$GETMIB ACCESS`
$SET_WLAN_PARAM guest_access=$ACCESS
#
# following settings is used when driver WPA module is included
#
eval `$GETMIB WPA_AUTH`
if [ $MODE != 1 ] && [ $ENCRYPT -ge 2 ] && [ $ENCRYPT -lt 7 ] && [ $WPA_AUTH = 2 ]; then
if [ $ENCRYPT = 2 ]; then
ENABLE=1
elif [ $ENCRYPT = 4 ]; then
ENABLE=2
elif [ $ENCRYPT = 6 ]; then
ENABLE=3
else
echo "invalid ENCRYPT value!"; exit
fi
$SET_WLAN_PARAM psk_enable=$ENABLE
if [ $ENCRYPT = 2 ] || [ $ENCRYPT = 6 ]; then
eval `$GETMIB WPA_CIPHER_SUITE`
if [ $WPA_CIPHER_SUITE = 1 ]; then
CIPHER=2
elif [ $WPA_CIPHER_SUITE = 2 ]; then
CIPHER=8
elif [ $WPA_CIPHER_SUITE = 3 ]; then
CIPHER=10
else
echo "invalid WPA_CIPHER_SUITE value!"; exit 1
fi
fi
$SET_WLAN_PARAM wpa_cipher=$CIPHER
if [ $ENCRYPT = 4 ] || [ $ENCRYPT = 6 ]; then
eval `$GETMIB WPA2_CIPHER_SUITE`
if [ $WPA2_CIPHER_SUITE = 1 ]; then
CIPHER=2
elif [ $WPA2_CIPHER_SUITE = 2 ]; then
CIPHER=8
elif [ $WPA2_CIPHER_SUITE = 3 ]; then
CIPHER=10
else
echo "invalid WPA2_CIPHER_SUITE value!"; exit 1
fi
fi
$SET_WLAN_PARAM wpa2_cipher=$CIPHER
eval `$GETMIB WPA_PSK`
$SET_WLAN_PARAM passphrase=$WPA_PSK
eval `$GETMIB WPA_GROUP_REKEY_TIME`
$SET_WLAN_PARAM gk_rekey=$WPA_GROUP_REKEY_TIME
else
$SET_WLAN_PARAM psk_enable=0
fi
| true |
94cc35a3d8586f315eca8d3d4822ea68af525a61 | Shell | ericsciple/generator | /script/check-health.sh | UTF-8 | 638 | 4 | 4 | [] | no_license | #!/usr/bin/env bash
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-s|--scale-unit)
SCALE_UNIT="$2"
shift # past argument
shift # past value
;;
-d|--duration)
DURATION="$2"
shift # past argument
shift # past value
;;
*) # unknown option
echo "Unexpected argument '$1'"
exit 1
;;
esac
done
if [ -z "$SCALE_UNIT" ]; then
echo "Missing scale unit"
exit 1
fi
if [ -z "$DURATION" ]; then
echo "Missing duration"
exit 1
fi
if [ "$DURATION" = "once" ]; then
echo "Checking health for scale unit $SCALE_UNIT..."
else
echo "Checking health for scale unit $SCALE_UNIT for $DURATION..."
fi
echo "Done"
| true |
2b6b40cf17b3895873ac0771831cfc3ae97c259c | Shell | takashi247/minishell | /pipetest.sh | UTF-8 | 995 | 3.0625 | 3 | [] | no_license | #!/bin/bash
cd libft
make bonus
cd ..
gcc -g -Wall -Wextra -Werror -I./includes -I./libft \
srcs/echo.c srcs/cd.c srcs/pwd.c srcs/exit.c srcs/env.c srcs/unset.c \
srcs/export.c srcs/export_print.c srcs/export_setenv.c \
srcs/make_command.c srcs/make_token.c srcs/get_next_line.c \
srcs/init_env.c srcs/env_utils.c srcs/env_utils2.c srcs/env_sort.c srcs/env_copy.c \
srcs/minishell.c \
srcs/utils/utils.c srcs/utils/minishell_errors.c srcs/utils/command_utils.c srcs/utils/command_errors.c \
-Llibft -lft -o pipe.out
YELLOW=$(printf '\033[33m')
CYAN=$(printf '\033[36m')
RESET=$(printf '\033[0m')
#printf "\n${CYAN}%s${RESET}\n\n" "***Test starts***"
#printf "${YELLOW}%s${RESET}\n\n" "ls | grep e; echo \$HOME"
#./pipe.out "ls | grep e; echo \$HOME"
#echo
#printf "${YELLOW}%s${RESET}\n\n" "test;test;test;test;"
#./command.out "test;test;test;test;"
#echo
#printf "${YELLOW}%s${RESET}\n\n" "test| test&test>test<;"
#./command.out "test| test&test>test<;"
#echo | true |
ac7f5b7af52bf4b635cf9a0356b64e70fc0b17d9 | Shell | nuvla/deployment | /swarm/swarm-deploy-exoscale.sh | UTF-8 | 2,781 | 3.984375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
cleanup() {
$DM_BIN rm -y "$($DM_BIN ls -q)"
}
trap cleanup ERR
action_err_msg="Usage: ${0} deploy|terminate [swarm_node_count [instance_profile]]"
ACTION=${1:?$action_err_msg}
SWARM_NODE_COUNT=${2:-1}
INSTANCE_PROFILE=${3:-Small}
DM_VER=v0.16.1
DM_BIN=$HOME/docker-machine
if [ ! -f "$DM_BIN" ]; then
base=https://github.com/docker/machine/releases/download/$DM_VER
curl -L $base/docker-machine-"$(uname -s)"-"$(uname -m)" >"$DM_BIN"
fi
chmod +x "$DM_BIN"
SSH_KEY=${SSH_KEY:-${HOME}/.ssh/id_rsa}
if [ ! -f "${SSH_KEY}" ]; then
echo "creating ${SSH_KEY}"
yes y | ssh-keygen -q -t rsa -N '' -f ${SSH_KEY} &>/dev/null
fi
#
# A "docker-machine" security group will be created. Specify
# --exoscale-security-group if you need a specific one.
#
MNAME=dockermachine-$(date +%s)
CREATE_CMD="create --driver exoscale
--exoscale-ssh-user root
--exoscale-ssh-key ${SSH_KEY}
--exoscale-api-key ${EXOSCALE_API_KEY:?provide EXOSCALE_API_KEY value}
--exoscale-api-secret-key ${EXOSCALE_API_SECRET:?provide EXOSCALE_API_SECRET value}
--exoscale-availability-zone ${EXOSCALE_REGION:-CH-GVA-2}
--exoscale-instance-profile ${INSTANCE_PROFILE}"
deploy() {
swarm_node_count=${1:-1}
echo "::: Provisioning master: $MNAME"
$DM_BIN $CREATE_CMD --exoscale-image 'Linux Ubuntu 16.04 LTS 64-bit' "$MNAME"
ip=$($DM_BIN ip "$MNAME")
$DM_BIN ssh "$MNAME" "sudo docker swarm init --force-new-cluster --advertise-addr $ip"
if [ "$swarm_node_count" -gt 1 ]; then
joinToken=$($DM_BIN ssh "$MNAME" "sudo docker swarm join-token worker -q" | tr -d '\r')
for i in $(seq 1 $((swarm_node_count - 1))); do
WNAME=${MNAME}-worker${i}
echo "::: Provisioning worker: $WNAME"
$DM_BIN $CREATE_CMD --exoscale-image 'Linux Ubuntu 16.04 LTS 64-bit' "$WNAME"
$DM_BIN ssh "$WNAME" "sudo docker swarm join --token ${joinToken} ${ip}:2377"
done
fi
$DM_BIN ssh "$MNAME" "sudo docker node ls"
DSTACK_CMD="docker -H $ip:2376 --tls
--tlscacert $HOME/.docker/machine/machines/$MNAME/ca.pem
--tlskey $HOME/.docker/machine/machines/$MNAME/key.pem
--tlscert $HOME/.docker/machine/machines/$MNAME/cert.pem
stack"
echo "docker swarm master: $ip"
}
terminate() {
machines=()
while read m;do machines+=( "$m" );done < <($DM_BIN ls -q)
if [ "${#machines[@]}" -eq 0 ]; then
echo "WARNING: no machines to terminate"
else
for m in ${machines[@]};do
$DM_BIN rm -y "$m" &
done
wait
fi
}
if [ "$ACTION" == "deploy" ]; then
deploy "$SWARM_NODE_COUNT"
elif [ "$ACTION" == "terminate" ]; then
terminate
else
echo "$action_err_msg"
exit 1
fi
| true |
aa5a047def7e8b025f96a907ba110708a550d582 | Shell | frejsoya/EggsML | /concieggs/cmds/bf | UTF-8 | 272 | 3.140625 | 3 | [] | no_license | #!/bin/sh
#
# Afvikl første argument som et Brainfuck-program, med de resterende
# argumenter som inddata.
. $CONCIEGGS_DIR/eggspi.lib
if [ $# -lt 1 ]; then
echo "Brug: bf <program>"
else
prog="$1"
shift
fi
echo "$prog" | $EGGS_LIB_DIR/eggsbf "$*" | cat -v
| true |
b22d98de24b46e3da221bfcdc8c9ea3e96fe99c3 | Shell | KazAoyama/KaigoSystem | /E-LIFE/SHOKUJI/CGI/SHOKUJI_PATTERN_TOUROKU.TOUROKU | UTF-8 | 40,685 | 3.34375 | 3 | [] | no_license | #!/bin/bash
#
# SHOKUJI_NAIYOU_TOUROKU.TOUROKU
# 食事内容登録
#
# Written by E.Okuda 20140114
#--------------------------------------------------------------
# ログ
exec 2> /home/hands/E-LIFE/SHOKUJI/APLOG/LOG.$(basename ${0}).${HOSTNAME}.$(date "+%Y%m%d%H%M%S"); set -xv
# 設定ファイル読込
source /home/hands/E-LIFE/SHOKUJI/CGI/SHOKUJI.INI &> /dev/null
source /home/hands/.bashrc &> /dev/null
# 変数設定
tmp=/var/tmp/${$}
today=$(date +%Y%m%d)
yday=$(mdate ${today}/-1)
thismonth="$(date +%Y%m)"
# ディレクトリ設定
home_dir="/home/hands/E-LIFE"
app_dir="${home_dir}/SHOKUJI"
input_dir="${app_dir}/INPUT"
kanri_dir="/DATA/E-LIFE/SHOKUJI/KANRI"
#--------------------------------------------------------------
#--------------------------------------------------------------
rm -f $tmp-*
#--------------------------------------------------------------
#--------------------------------------------------------------
function error_exit {
message="$1"
echo "message ${message}"
echo "result ng"
rm -f $tmp-*
exit 1
}
function error_unlock {
message="$1"
cat $tmp-target-table |
while read table base ;do
rm -f $tmplock_dir/$base.lock
: ;done
error_exit ${message}
}
#--------------------------------------------------------------
#################################################################################################
# 変更するのはここから
#################################################################################################
#--------------------------------------------------------------
# 変数の定義
namedata=$1
# 必要な値の取得
eval $(name-source ${namedata})
#--------------------------------------------------------------
Shisetsu=$(nameread "SelectShisetsu" $namedata)
Tateya=$(nameread "SelectTateya" $namedata)
# Kyoshitsu=$(nameread "SelectKyoshitsu" $namedata)
Kyoshitsu="_"
USERID=$(nameread "USERID" $namedata)
RIYOUSHAID=$(nameread "SelectTaishousha" $namedata)
ShinkiKikanFrom=$(nameread "ShinkiKikanFrom" $namedata | sed 's/\///g')
ShinkiKikanTo=$(nameread "ShinkiKikanTo" $namedata | sed 's/\///g')
[ -z "${ShinkiKikanTo}" -o "${ShinkiKikanTo}" = "_" ] && ShinkiKikanTo="99999999"
KikanFrom=$(nameread "KikanFrom" $namedata)
KikanTo=$(nameread "KikanTo" $namedata | sed 's/\///g')
[ -z "${KikanTo}" -o "${KikanTo}" = "_" ] && KikanTo="99999999"
Shokuji=$(nameread "SELECTShokuji" $namedata)
Shokushu=$(nameread "SELECTShokushu" $namedata)
Taishousha=$(nameread "SELECTTaishousha" $namedata)
Menu=$(nameread "SELECTMenu" $namedata)
# Menu=$(nameread "Menu" $namedata)
MenuName=$(awk '$1=="'${Menu}'"{print $2}' ${app_dir}/POMPA/${Shisetsu}/SHOKUJI_MENU )
MenuColor=$(awk '$1=="'${Menu}'"{print $16}' ${app_dir}/POMPA/${Shisetsu}/SHOKUJI_MENU)
# NyuukyoshaKakaku=$(nameread "NyuukyoshaKakaku" $namedata)
EtsuranHenkouShinki=$(nameread "EtsuranHenkouShinki" $namedata)
# SHOKUJIPATTERNID=$(nameread "SHOKUJIPATTERNID" $namedata)
if [ "${SHOKUJIPATTERNSHINKIFLG}" = "ok" ] ; then
SHOKUJIPATTERNID="_"
else
SHOKUJIPATTERNID=$(nameread "SHOKUJIPATTERNID" $namedata)
fi
[ -z "${SHOKUJIPATTERNID}" ] && SHOKUJIPATTERNID="_"
#[ -z "${RIYOUSHAID}" -o "${RIYOUSHAID}" = "_" ] && RIYOUSHAID=$(nameread "Nyuukyosha" $namedata)
#--------------------------------------------------------------
# テーブル続き
pompa_dir="${app_dir}/POMPA/${Shisetsu}/${Tateya}"
mkdir -p ${pompa_dir}
seikyu_dir="/DATA/E-LIFE/SEIKYU/SHISETSU/SHOKUJI/${Shisetsu}/${Tateya}"
mkdir -p ${seikyu_dir}/${lastmonth}
mkdir -p ${seikyu_dir}/${thismonth}
mkdir -p ${seikyu_dir}/${nextmonth}
touch ${seikyu_dir}/${lastmonth}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA
touch ${seikyu_dir}/${thismonth}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA
touch ${seikyu_dir}/${nextmonth}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA
# mkdir -p ${kanri_dir}/${Shisetsu}/${Tateya}/${thismonth}
# touch ${kanri_dir}/${Shisetsu}/${Tateya}/${thismonth}/RIYOUSHA_SHOKUJI_JISSEKI
#--------------------------------------------------------------
# 入力チェック
[ -z "${RIYOUSHAID}" -o "${RIYOUSHAID}" = "_" ] && error_exit "利用者情報が不正です。"
[ "${EtsuranHenkouShinki}" = "1" -a "${SHOKUJIPATTERNID}" = "_" ] && error_exit "データが不正です。"
[ -z "${Menu}" ] && Menu="_"
[ "${Shokuji}" != "9" -a "${Menu}" = "_" ] && error_exit "メニューを選択してください。"
echo "${Suuryou}" |
awk '$1!~/^[1-9]*$/{print $0}' > $tmp-suuryou_error
echo "${Suuryou}" |
awk '$1=="0"{print $0}' > $tmp-suuryou_error1
[ "${Shokuji}" != "9" -a -s $tmp-suuryou_error ] && error_exit "数量の値が不正です。(半角数字で、1以上の数字を入力してください。)"
[ "${Shokuji}" != "9" -a -s $tmp-suuryou_error1 ] && error_exit "数量の値が不正です。(半角数字で、1以上の数字を入力してください。)"
#--------------------------------------------------------------
# 日付の再チェック
# 既存データとの比較用
awk '$1=="'${RIYOUSHAID}'"&&$2!="'${SHOKUJIPATTERNID}'"&&$3=="'${Shisetsu}'"&&$(NF-2)!="9"{print $0}' ${pompa_dir}/RIYOUSHA_SHOKUJI_PATTERN/RIYOUSHA_SHOKUJI_PATTERN_HEADER > $tmp-kizondata
# 1:利用者ID 2:食事パターンID 3:施設ID 4:建屋 5:居室
# 6:期間From 7:期間To
awk '$1=="'${RIYOUSHAID}'"&&$2=="'${SHOKUJIPATTERNID}'"{print $0}' ${pompa_dir}/RIYOUSHA_SHOKUJI_PATTERN/RIYOUSHA_SHOKUJI_PATTERN_HEADER > $tmp-this_data
# この食事IDのデータがあったら日付が変更されていないかちぇっく
#if [ -s $tmp-this_data ] ; then
#fi
#-----------------------------------------------------------------------
#--------------------------------------------------------------
# 金額取得
#-------------------------------------
## 20140908追記
#Taishoubi="$(echo "${TaishouNengetsu}""01")"
## 消費税率を取得
#cat ${tbl_dir}/ZEIRITSU_GENGOU_MASTER/ZEIRITSU |
## 1:ID 2:税率 3:適用開始日 4:適用終了日 5:削除フラグ 6:更新日時 7:userid
#awk '$3<="'${Taishoubi}'"&&$4>="'${Taishoubi}'"{print $0}' |
#self 2 > ${tmp}-zeiritsu
#zeiritsu=$(cat ${tmp}-zeiritsu | awk '{print $1+"1"}')
# 税抜き金額確認のため再取得
NyuukyoshaKakaku=$(awk '$1=="'${Menu}'"{print $6}' /home/hands/E-LIFE/SHOKUJI/POMPA/${Shisetsu}/SHOKUJI_MENU)
Guest1Kakaku="$(awk '$1=="'${Menu}'"{print $7}' /home/hands/E-LIFE/SHOKUJI/POMPA/${Shisetsu}/SHOKUJI_MENU)"
Guest2Kakaku="$(awk '$1=="'${Menu}'"{print $8}' /home/hands/E-LIFE/SHOKUJI/POMPA/${Shisetsu}/SHOKUJI_MENU)"
StaffKakaku="$(awk '$1=="'${Menu}'"{print $9}' /home/hands/E-LIFE/SHOKUJI/POMPA/${Shisetsu}/SHOKUJI_MENU)"
# NyuukyoshaKakaku="$(echo "${NyuukyoshaKakakuZeinuki}" | lcalc '$1 * '${zeiritsu}'' | marume -sage 1.0)"
# Guest1Kakaku="$(echo "${Guest1KakakuZeinuki}" | lcalc '$1 * '${zeiritsu}'' | marume -sage 1.0)"
# Guest2Kakaku="$(echo "${Guest2KakakuZeinuki}" | lcalc '$1 * '${zeiritsu}'' | marume -sage 1.0)"
# StaffKakaku="$(echo "${StaffKakakuZeinuki}" |lcalc '$1 * '${zeiritsu}'' | marume -sage 1.0)"
#--------------------------------------------------------------
# 変更の場合
if [ "${EtsuranHenkouShinki}" = "1" ] ; then
# この食事IDのデータがあったら日付が変更されていないかちぇっく
if [ -s $tmp-this_data ] ; then
ThisDataKikanTo=$(awk '{print $7}' $tmp-this_data)
[ "${ThisDataKikanTo}" != "${KikanTo}" ] && error_exit "期間が変更されています。期間の変更をする場合は先に期間変更ボタンを押してください。"
fi
tourokubi_from=$(awk '{print $6}' $tmp-this_data)
tourokubi_to=$(echo "${KikanTo}")
if [ "${tourokubi_to}" != "99999999" ] ; then
kakunin_nengetsu="$(mdate "${tourokubi_to}"/+1 | self 1.1.6)"
else
kakunin_nengetsu="999999"
fi
# 新規の場合
else
# この食事IDのデータがあったら日付が変更されていないかちぇっく
if [ -s $tmp-this_data ] ; then
ThisDataKikanFrom=$(awk '{print $6}' $tmp-this_data)
ThisDataKikanTo=$(awk '{print $7}' $tmp-this_data)
[ "${ThisDataKikanFrom}" != "${ShinkiKikanFrom}" ] && error_exit "期間開始日が変更されています。期間の開始日は変更できないため、新規画面を一から操作して新しいデータを作成してください。"
[ "${ThisDataKikanTo}" != "${ShinkiKikanTo}" ] && error_exit "期間が変更されています。期間終了日を変更する場合はラジオボタンを「変更・削除」に切り替えて対象データの期間を入力後、期間変更ボタンを押してください。"
fi
tourokubi_from=$(echo "${ShinkiKikanFrom}")
tourokubi_to=$(echo "${ShinkiKikanTo}")
kakunin_nengetsu="$(echo "${tourokubi_from}" | self 1.1.6)"
fi
if [ -s $tmp-kizondata ] ; then
cat $tmp-kizondata |
awk '$2!="'${SHOKUJIPATTERNID}'"{print $0}' |
awk '{if($6>"'${tourokubi_from}'"&&$6<="'${tourokubi_to}'"){print $0}
else if($6<="'${tourokubi_from}'"&&$7>="'${tourokubi_to}'"){print $0}
else if($7>="'${tourokubi_from}'"&&$7<"'${tourokubi_to}'"){print $0}
else if($6>="'${tourokubi_from}'"&&$7<="'${tourokubi_to}'"){print $0}
}' > $tmp-taishoubi_choufuku_error
[ -s $tmp-taishoubi_choufuku_error -a "$(gyo $tmp-taishoubi_choufuku_error)" != "0" ] && error_exit "期間に重複するデータがあります。"
else
: > $tmp-kizondata
fi
# 入力された8桁日付が請求確定後のデータかどうか
#----------------------------
kakutei_nengetsu="$(LANG=C sort -k1,1 /DATA/E-LIFE/SEIKYU/SHISETSU/SEIKYU_KAKUTEI/SEIKYU_KAKUTEI.${shisetsu} | awk '$(NF-2)!="9"' | getlast 1 1 | self 2)"
# 登録日が確定月の最終日以外だったらえらー
[ "${kakutei_nengetsu}" -lt "${kakunin_nengetsu}" ] || error_exit "請求確定月以前のデータを入力することはできません。"
#--------------------------------------------------------------
#--------------------------------------------------------------
# 更新テーブル
# 食事パターンマスタ
# 食事実績
# 食事実績請求
#--------------------------------------------------------------
# 新規でパターンIDがなかったらパターンを登録
if [ "${EtsuranHenkouShinki}" = "2" -a "${SHOKUJIPATTERNID}" = "_" ] ; then
# 新規データならばIDをふる(食事ID)
### 管理IDの発番
# 今回取得するID数
last_no="1"
# 食事パターンID
mkdir -p ${kanri_dir}/${Shisetsu}/SHOKUJI_PATTERN_ID/${RIYOUSHAID}
touch ${kanri_dir}/${Shisetsu}/SHOKUJI_PATTERN_ID/${RIYOUSHAID}/SHOKUJI_PATTERN_ID
[ -s ${kanri_dir}/${Shisetsu}/SHOKUJI_PATTERN_ID/${RIYOUSHAID}/SHOKUJI_PATTERN_ID ] || echo "001" > ${kanri_dir}/${Shisetsu}/SHOKUJI_PATTERN_ID/${RIYOUSHAID}/SHOKUJI_PATTERN_ID
# 現状の最終番号
now_pattern_last_no=$(cat ${kanri_dir}/${Shisetsu}/SHOKUJI_PATTERN_ID/${RIYOUSHAID}/SHOKUJI_PATTERN_ID)
# 今回の発番後に、数字が限度を超えないかチェック
new_pattern_last_no=$(expr ${now_pattern_last_no} + ${last_no})
# 超えてたらリセット
[ ${new_pattern_last_no} -gt 999 ] && echo "001" > ${kanri_dir}/${Shisetsu}/SHOKUJI_PATTERN_ID/${RIYOUSHAID}/SHOKUJI_PATTERN_ID
get_no ${last_no} ${kanri_dir}/${Shisetsu}/SHOKUJI_PATTERN_ID/${RIYOUSHAID}/SHOKUJI_PATTERN_ID > $tmp-id_pattern_all
# この場合取得するのは一つなのでそのまま変数に
SHOKUJIPATTERNID="$(cat $tmp-id_pattern_all)"
#data_id="${today}${new_no}"
# パターンヘッダーテーブル
echo "${RIYOUSHAID}" "${SHOKUJIPATTERNID}" "${Shisetsu}" "${Tateya}" "${Kyoshitsu}" "${ShinkiKikanFrom}" "${ShinkiKikanTo}" "_" "_" "_" "1" "${cur_time}" "${USERID}" > $tmp-pattern_header_data_input
# 1:利用者ID 2:食事パターンID 3:施設ID 4:建屋 5:居室
# 6:期間From 7:期間To
fi
: > $tmp-pattern_data_jyunbi
cat ${namedata} |
awk '$1~/^mon/{gsub("mon","1");print $0}' >> $tmp-pattern_data_jyunbi
cat ${namedata} |
awk '$1~/^tue/{gsub("tue","2");print $0}' >> $tmp-pattern_data_jyunbi
cat ${namedata} |
awk '$1~/^wed/{gsub("wed","3");print $0}' >> $tmp-pattern_data_jyunbi
cat ${namedata} |
awk '$1~/^thu/{gsub("thu","4");print $0}' >> $tmp-pattern_data_jyunbi
cat ${namedata} |
awk '$1~/^fri/{gsub("fri","5");print $0}' >> $tmp-pattern_data_jyunbi
cat ${namedata} |
awk '$1~/^sat/{gsub("sat","6");print $0}' >> $tmp-pattern_data_jyunbi
cat ${namedata} |
awk '$1~/^sun/{gsub("sun","7");print $0}' >> $tmp-pattern_data_jyunbi
# 1:曜日番号 2:食事(朝、昼、夜・・・)
############
# 追加の場合
############
if [ "${MODE}" = "tsuika" ] ; then
[ "${Shokuji}" = "9" ] && error_exit "追加ボタンで削除はできません。もう一度やりなおしてください。"
# チェックの入ったデータがなかったら
if [ ! -s $tmp-pattern_data_jyunbi ] ; then
error_exit "追加する曜日の項目にチェックを入れてください。"
# データがあったら
else
# 新規データならばIDをふる(食事ID)
### 管理IDの発番
# 今回取得するID数
last_no="1"
# 食事ID
mkdir -p ${kanri_dir}/${Shisetsu}
touch ${kanri_dir}/${Shisetsu}/SHOKUJI_ID
[ -s ${kanri_dir}/${Shisetsu}/SHOKUJI_ID ] || echo "00000001" > ${kanri_dir}/${Shisetsu}/SHOKUJI_ID
# 現状の最終番号
now_last_no=$(cat ${kanri_dir}/${Shisetsu}/SHOKUJI_ID)
# 今回の発番後に、数字が限度を超えないかチェック
new_last_no=$(expr ${now_last_no} + ${last_no})
# 超えてたらリセット
[ ${new_last_no} -gt "99999999" ] && echo "00000001" > ${kanri_dir}/${Shisetsu}/SHOKUJI_ID
get_no ${last_no} ${kanri_dir}/${Shisetsu}/SHOKUJI_ID > $tmp-id_all
# この場合取得するのは一つなのでそのまま変数に
shokuji_id="$(cat $tmp-id_all)"
#data_id="${today}${new_no}"
cat $tmp-pattern_data_jyunbi |
sed 's/_/ /g' |
# 1:曜日(数字) 2:食事(数字) 3:番号 4:メニューID
awk '{print "'${RIYOUSHAID}'","'${SHOKUJIPATTERNID}'",$1,$2,"'${shokuji_id}'","'${Menu}'","'${MenuName}'","'${Shokushu}'","'${NyuukyoshaKakaku}'","'${Suuryou}'","'${MenuColor}'","_","_","_","_","_","1","'${cur_time}'","'${USERID}'"}' > $tmp-pattern_data_input
# 1:利用者ID 2:食事パターンID 3:曜日 4:食事(朝昼夜おやつ他) 5:食事ID
# 6:食事メニューID 7:食事メニュー名称 8:食事種類ID 9:単価(税抜き) 10:数量
# 11:色ID 12:予備項目1 13:予備項目2 14:予備項目3 15:予備項目4
# 16:予備項目5
fi
############
# 変更の場合
############
elif [ "${MODE}" = "henkou" ] ; then
# チェックの入ったデータがなかったら
if [ ! -s $tmp-pattern_data_jyunbi ] ; then
error_exit "変更する曜日の項目にチェックを入れてください。"
# チェックの入ったデータがあったら
else
# 変更元データの用意
cat $tmp-pattern_data_jyunbi |
awk '{print $0,$NF!="_"?$NF:"@"}' |
delf NF-1 |
sed 's/_/ /g' |
# 1:曜日(数字) 2:食事(数字) 3:食事ID 4:メニューID 5:価格
#> $tmp-henkou_moto_data
awk '{print "'${RIYOUSHAID}'","'${SHOKUJIPATTERNID}'",$1,$2,$3}' |
LANG=C sort > $tmp-henkou_moto_data
# 1:利用者(入居者)ID 2:食事パターンID 3:曜日 4:食事(朝昼夜おやつ他) 5:食事ID
# cp $tmp-* /home/hands/work/okuda
[ "${Shokuji}" = "9" -a "$(awk '$NF!="@"{print $0}' $tmp-henkou_moto_data | gyo)" = "0" ] && error_exit "削除対象データがありません。"
: >$tmp-pattern_data_input
cat ${pompa_dir}/RIYOUSHA_SHOKUJI_PATTERN/RIYOUSHA_SHOKUJI_PATTERN |
# 1:利用者ID 2:食事パターンID 3:曜日 4:食事(朝昼夜おやつ他) 5:食事ID
# 6:食事メニューID 7:食事メニュー名称 8:食事種類ID 9:単価(税抜き) 10:数量
# 11:色ID 12:予備項目1 13:予備項目2 14:予備項目3 15:予備項目4
# 16:予備項目5
join2 key=1/5 - $tmp-henkou_moto_data |
delf NF-2/NF |
awk '$5!="@"{print $0,"9","'${cur_time}'","'${USERID}'"}' >> $tmp-pattern_data_input
# 1:利用者ID 2:食事パターンID 3:曜日 4:食事(朝昼夜おやつ他) 5:食事ID
# 6:食事メニューID 7:食事メニュー名称 8:食事種類ID 9:単価(税抜き) 10:数量
# 11:色ID 12:予備項目1 13:予備項目2 14:予備項目3 15:予備項目4
# 16:予備項目5
#####################
# 変更登録の場合
#####################
if [ "${Shokuji}" != "9" ] ; then
# 新規データならばIDをふる(食事ID)
### 管理IDの発番
# 今回取得するID数
last_no="1"
# 食事ID
mkdir -p ${kanri_dir}/${Shisetsu}
touch ${kanri_dir}/${Shisetsu}/SHOKUJI_ID
[ -s ${kanri_dir}/${Shisetsu}/SHOKUJI_ID ] || echo "00000001" > ${kanri_dir}/${Shisetsu}/SHOKUJI_ID
# 現状の最終番号
now_last_no=$(cat ${kanri_dir}/${Shisetsu}/SHOKUJI_ID)
# 今回の発番後に、数字が限度を超えないかチェック
new_last_no=$(expr ${now_last_no} + ${last_no})
# 超えてたらリセット
[ ${new_last_no} -gt 99999999 ] && echo "00000001" > ${kanri_dir}/${Shisetsu}/SHOKUJI_ID
get_no ${last_no} ${kanri_dir}/${Shisetsu}/SHOKUJI_ID > $tmp-id_all
# この場合取得するのは一つなのでそのまま変数に
shokuji_id="$(cat $tmp-id_all)"
#data_id="${today}${new_no}"
cat $tmp-henkou_moto_data |
# 1:利用者(入居者)ID 2:食事パターンID 3:曜日 4:食事(朝昼夜おやつ他) 5:食事ID
self 1/4 |
awk '{print $0,"'${shokuji_id}'","'${Menu}'","'${MenuName}'","'${Shokushu}'","'${NyuukyoshaKakaku}'","'${Suuryou}'","'${MenuColor}'","_","_","_","_","_","1","'${cur_time}'","'${USERID}'"}' >> $tmp-pattern_data_input
# 1:利用者ID 2:食事パターンID 3:曜日 4:食事(朝昼夜おやつ他) 5:食事ID
# 6:食事メニューID 7:食事メニュー名称 8:食事種類ID 9:単価(税抜き) 10:数量
# 11:色ID 12:予備項目1 13:予備項目2 14:予備項目3 15:予備項目4
# 16:予備項目5
#####################
# 変更削除の場合
#####################
else
echo "最初に用意した$tmp-pattern_data_inputでOK"
fi
fi
fi
[ $(plus ${PIPESTATUS[@]}) -eq 0 ] || error_exit
######################
# 食事実績
######################
# 変更の場合は対象日などを既存のパターンヘッダーテーブルから
if [ -s $tmp-this_data ] ; then
TaishoubiFrom=$(awk '{print $6}' $tmp-this_data)
TaishoubiTo=$(awk '{print $7}' $tmp-this_data)
TaishouToMonth=$(echo "${TaishoubiTo}" | awk '{print substr($1,1,6)}')
TaishouFromMonth=$(echo "${TaishoubiFrom}" | awk '{print substr($1,1,6)}')
# 新規の場合は対象日は入力された日付
else
TaishoubiFrom="$(echo "${ShinkiKikanFrom}")"
TaishoubiTo="$(echo "${ShinkiKikanTo}")"
TaishouToMonth=$(echo "${TaishoubiTo}" | awk '{print substr($1,1,6)}')
TaishouFromMonth=$(echo "${TaishoubiFrom}" | awk '{print substr($1,1,6)}')
fi
# 期間Toが来月以降ならばとりあえず今月の最終日
[ "${TaishouToMonth}" -gt "${thismonth}" ] && TaishoubiTo="$(mdate month | tarr | tail -1)"
:> $tmp-nengetsu
# 期間開始日が今月なら今月のデータをつくる
mdate -a month |
awk '$1>="'${TaishoubiFrom}'"&&$1<="'${TaishoubiTo}'"{print $0}' |
self 2 1 >> $tmp-nengetsu
# 曜日 1:年月日
# 期間開始日が今月以外だったら
if [ "${TaishouFromMonth}" != "${thismonth}" ] ; then
mdate -a ${TaishouFromMonth}m |
awk '$1>="'${TaishoubiFrom}'"&&$1<="'${TaishoubiTo}'"{print $0}' |
self 2 1 >> $tmp-nengetsu
fi
cat $tmp-pattern_data_input |
# 1:利用者ID 2:食事パターンID 3:曜日 4:食事(朝昼夜おやつ他) 5:食事ID
# 6:食事メニューID 7:食事メニュー名称 8:食事種類ID 9:単価(税抜き) 10:数量
# 11:色ID 12:予備項目1 13:予備項目2 14:予備項目3 15:予備項目4
# 16:予備項目5
self 3 1 2 4/NF |
rank > $tmp-pattern_jisseki_jyunbi_jyunbi
jisseki_suu=$(gyo $tmp-pattern_jisseki_jyunbi_jyunbi)
: >> $tmp-pattern_jisseki_jyunbi
for i in $(seq 1 ${jisseki_suu}) ; do
awk '$1=="'${i}'"{print $0}' $tmp-pattern_jisseki_jyunbi_jyunbi |
delf 1 |
# 1:曜日 2:利用者ID 3:食事パターンID 4:食事(朝昼夜おやつ他) 5:食事ID
# 6:食事メニューID 7:食事メニュー名称 8:食事種類ID 9:単価(税抜き) 10:数量
# 11:色ID 12:予備項目1 13:予備項目2 14:予備項目3 15:予備項目4
# 16:予備項目5
cjoin1 key=1 - $tmp-nengetsu |
# 1:曜日 2:利用者ID 3:食事パターンID 4:食事(朝昼夜おやつ他) 5:食事ID
# 6:食事メニューID 7:食事メニュー名称 8:食事種類ID 9:単価(税抜き) 10:数量
# 11:色ID 12:予備項目1 13:予備項目2 14:予備項目3 15:予備項目4
# 16:予備項目5
# 11:日付
self NF 2/NF-1 >> $tmp-pattern_jisseki_jyunbi
# 1:日付 2:利用者ID 3:食事パターンID 4:食事(朝昼夜おやつ他) 5:食事ID
# 6:食事メニューID 7:食事メニュー名称 8:食事種類ID 9:単価(税抜き) 10:数量
# 11:色ID 12:予備項目1 13:予備項目2 14:予備項目3 15:予備項目4
# 16:予備項目5
done
cat $tmp-pattern_jisseki_jyunbi |
awk '{print "'${Shisetsu}'","'${Tateya}'","'${Kyoshitsu}'","1",$0}' |
# 1:施設ID 2:建屋 3:居室 4:入居者ゲストフラグ 5:日付
# 6:利用者ID 7:食事パターンID 8:食事(朝昼夜おやつ他) 9:食事ID 10:食事メニューID
# 11:食事メニュー名称 12:食事種類ID 13:単価(税抜き) 14:数量 15:色ID
# 16:予備項目1 17:予備項目2 18:予備項目3 19:予備項目4 20:予備項目5
self 6 1/4 5 8/NF |
LANG=C sort > $tmp-pattern_jisseki
# 期間開始日が今月以外だったら
if [ "${TaishouFromMonth}" != "${thismonth}" ] ; then
cat $tmp-pattern_jisseki |
# 1:利用者ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:食事(朝昼夜おやつ他) 8:食事ID 9:食事メニュー 10:食事メニュー名
# 11:食事種類ID 12:単価(税抜き) 13:数量 14:メニュー色ID 15:予備項目1
# 16:予備項目2 17:予備項目3 18:予備項目4 19:予備項目5 20:削除フラグ
# 21:更新日 22:更新者
# 何月のデータか
awk '{print $0,substr($6,1,6)}' > $tmp-pattern_jisseki_month
# 今月のデータ
awk '$NF=="'${thismonth}'"{print $0}' $tmp-pattern_jisseki_month |
delf NF > $tmp-pattern_jisseki${thismonth}_input_kesshoku_nocheck
# 先月のデータ
awk '$NF=="'${TaishouFromMonth}'"{print $0}' $tmp-pattern_jisseki_month |
delf NF > $tmp-pattern_jisseki${TaishouFromMonth}_input_kesshoku_nocheck
else
cat $tmp-pattern_jisseki > $tmp-pattern_jisseki${thismonth}_input_kesshoku_nocheck
fi
echo "${thismonth}" "${TaishouFromMonth}" |
tarr |
uniq > $tmp-jisseki_ari_month
# 1:年月
#---------------------------------------------------------------------------------------
# 実績があった場合、その期間に欠食登録があれば作成したinputの更新フラグを9:削除にする
#---------------------------------------------------------------------------------------
if [ -s $tmp-pattern_jisseki${thismonth}_input_kesshoku_nocheck -o -s $tmp-pattern_jisseki${TaishouFromMonth}_input_kesshoku_nocheck ] ; then
for M in $(cat $tmp-jisseki_ari_month) ; do
cat ${seikyu_dir}/${M}/RIYOUSHA_KESSHOKU |
awk '$1=="'${RIYOUSHAID}'"&&$(NF-2)!="9"{print $1,$2,$3,$4,"1",$5,$6}' |
LANG=C sort > $tmp-kesshoku_data
# 1:利用者ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:食事(朝昼夜おやつ他)
if [ -s $tmp-kesshoku_data -a -s $tmp-pattern_jisseki${M}_input_kesshoku_nocheck ] ; then
cat $tmp-pattern_jisseki${M}_input_kesshoku_nocheck |
LANG=C sort |
# 1:利用者ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:食事(朝昼夜おやつ他) 8:食事ID 9:食事メニューID 10:食事メニュー名称
# 11:食事種類ID 12:単価(税抜き) 13:数量 14:色ID 15:予備項目1
# 16:予備項目2 17:予備項目3 18:予備項目4 19:予備項目5 20:削除フラグ
# 21:更新日 22:更新者
join1 +ng key=1/7 $tmp-kesshoku_data - >$tmp-kesshoku_ari 2>$tmp-kesshoku_nashi
# 欠食があったものはフラグを9へ
if [ -s $tmp-kesshoku_ari ] ; then
cat $tmp-kesshoku_ari |
delf NF-2/NF |
awk '{print $0,"9","'${cur_time}'","'${USERID}'"}' > $tmp-kesshoku_ari_result
if [ -s $tmp-kesshoku_nashi ] ; then
cat $tmp-kesshoku_ari_result $tmp-kesshoku_nashi >> $tmp-pattern_jisseki${M}_input
else
cat $tmp-kesshoku_ari_result >> $tmp-pattern_jisseki${M}_input
fi
# 欠食がない場合はそれがいんぷっとへ
else
cat $tmp-kesshoku_nashi >> $tmp-pattern_jisseki${M}_input
fi
else
cat $tmp-pattern_jisseki${M}_input_kesshoku_nocheck >> $tmp-pattern_jisseki${M}_input
fi
done
fi
################
# 請求用データ
################
for MONTH in $(cat $tmp-jisseki_ari_month) ; do
: > $tmp-zeiritsu
for i in $(mdate ${MONTH}m) ; do
# 消費税率を取得
cat ${tbl_dir}/ZEIRITSU_GENGOU_MASTER/ZEIRITSU |
# 1:ID 2:税率 3:適用開始日 4:適用終了日 5:削除フラグ 6:更新日時 7:userid
awk '$3<="'${i}'"&&$4>="'${i}'"{print "'${i}'",$2+1}' >> $tmp-zeiritsu
# 1:日付 2:税率
done
# 単価が0円じゃないもの(税計算するもの)
cat $tmp-pattern_jisseki${MONTH}_input |
# 1:利用者ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:食事(朝昼夜おやつ他) 8:食事ID 9:食事メニュー 10:食事メニュー名
# 11:食事種類ID 12:単価(税抜き) 13:数量 14:メニュー色ID 15:予備項目1
# 16:予備項目2 17:予備項目3 18:予備項目4 19:予備項目5 20:削除フラグ
# 21:更新日 22:更新者
awk '$12!="0"{print $0}' > $tmp-shokuji_jisseki_tankaari_${MONTH}
# 単価が0円のもの(税計算しないもの)
cat $tmp-pattern_jisseki${MONTH}_input |
awk '$12=="0"{print $0}' > $tmp-shokuji_jisseki_0_${MONTH}
: >$tmp-shokuji_jisseki_seikyutaishou_input
if [ -s $tmp-shokuji_jisseki_tankaari_${MONTH} ] ; then
cat $tmp-shokuji_jisseki_tankaari_${MONTH} |
LANG=C sort -k6,6 |
join2 key=6 $tmp-zeiritsu - > $tmp-shokuji_jisseki_tankaari_zeiari_${MONTH}
# 1:利用者(入居者)ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:税率 8:食事(朝昼夜おやつ他) 9:食事ID 10:食事メニュー
# 11:食事メニュー名 12:食事種類ID 13:単価(税抜き) 14:数量 15:メニュー色ID
# 16:予備項目1 17:予備項目2 18:予備項目3 19:予備項目4 20:予備項目5
# 21:削除フラグ 22:更新日 23:更新者
cat $tmp-shokuji_jisseki_tankaari_zeiari_${MONTH} |
lcalc '$13 * $7' |
marume -sage 1.0 |
ycat $tmp-shokuji_jisseki_tankaari_zeiari_${MONTH} - |
### 20140908 変更
awk '{print $0,$13}' |
marume -sage 25.0 |
awk '{if($(NF-1)>$NF){print $0,"1",$(NF-1)-$NF}else{print $0,"1","0"}}' |
delf NF-2 |
# awk '{print $0,"1",$13+$(NF-1)}' |
# 1:利用者(入居者)ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:税率 8:食事(朝昼夜おやつ他) 9:食事ID 10:食事メニュー
# 11:食事メニュー名 12:食事種類ID 13:単価(税抜き) 14:数量 15:メニュー色ID
# 16:予備項目1 17:予備項目2 18:予備項目3 19:予備項目4 20:予備項目5
# 21:削除フラグ 22:更新日 23:更新者 24:税込み価格25:税区分
# 26:税
self 1/6 8/12 NF-1 7 13 NF NF-2 14 NF-5/NF-3 >> $tmp-shokuji_jisseki_seikyutaishou_tochuu_${MONTH}
# 1:利用者(入居者)ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:食事(朝昼夜おやつ他) 8:食事ID 9:食事メニューID 10:食事メニュー名称
# 11:食事種類I 12:税区分 13:税率 14:単価(税抜き) 15:税額
# 16:税込み金額 17:数量 18:削除 19:更新日 20:更新者
# 合計金額を計算する
cat $tmp-shokuji_jisseki_seikyutaishou_tochuu_${MONTH} |
lcalc '$14 * $17','$15 * $17','$16 * $17' |
ycat $tmp-shokuji_jisseki_seikyutaishou_tochuu_${MONTH} - |
self 1/17 NF-2/NF 18/NF-3 |
awk '{print $0,"_","_","_","_","_",$(NF-2),$(NF-1),$NF}' |
# 1:利用者ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:食事(朝昼夜おやつ他) 8:食事ID 9:食事メニューID 10:食事メニュー名称
# 11:食事種類ID 12:税区分 13:税率 14:単価(税抜き) 15:税額
# 16:税込価格 17:数量 18:合計金額(税抜) 19:合計税金額(税額のみ) 20:合計金額(税込)
# 21:削除フラグ 22:更新時間 23:更新者 24:予備項目1 25:予備項目2
# 26:予備項目3 27:予備項目4 28:予備項目5 29:削除フラグ 30:更新時間
# 31:更新者
self 1/20 24/NF >> $tmp-shokuji_jisseki_seikyutaishou${MONTH}_input
# 1:利用者ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:食事(朝昼夜おやつ他) 8:食事ID 9:食事メニューID 10:食事メニュー名称
# 11:食事種類ID 12:税区分 13:税率 14:単価(税抜き) 15:税額
# 16:税込価格 17:数量 18:合計金額(税抜) 19:合計税金額(税額のみ) 20:合計金額(税込)
# 21:予備項目1 22:予備項目2 23:予備項目3 24:予備項目4 25:予備項目5
# 26:削除フラグ 27:更新時間 28:更新者
fi
if [ -s $tmp-shokuji_jisseki_0_${MONTH} ] ; then
cat $tmp-shokuji_jisseki_0_${MONTH} |
# 1:利用者ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:食事(朝昼夜おやつ他) 8:食事ID 9:食事メニュー 10:食事メニュー名
# 11:食事種類ID 12:単価(税抜き) 13:数量 14:メニュー色ID 15:予備項目1
# 16:予備項目2 17:予備項目3 18:予備項目4 19:予備項目5 20:削除フラグ
# 21:更新日 22:更新者
awk '{print $0,"1","0",$12,"0","0",$13,"0","0","0","_","_","_","_","_",$(NF-2),$(NF-1),$NF}' |
self 1/11 23/NF >> $tmp-shokuji_jisseki_seikyutaishou${MONTH}_input
# 1:利用者ID 2:施設ID 3:建屋 4:居室 5:入居者ゲストフラグ
# 6:日付 7:食事(朝昼夜おやつ他) 8:食事ID 9:食事メニューID 10:食事メニュー名称
# 11:食事種類ID 12:税区分 13:税率 14:単価(税抜き) 15:税額
# 16:税込価格 17:数量 18:合計金額(税抜) 19:合計税金額(税額のみ) 20:合計金額(税込)
# 21:予備項目1 22:予備項目2 23:予備項目3 24:予備項目4 25:予備項目5
# 26:削除フラグ 27:更新時間 28:更新者
fi
done
### INPUTデータの作成
### 後で見てわかりやすいように書く
#echo "${INPUT_TEXT1} _" |
### 加工済みのデータだけを元にして一旦selfする
#self NF 1 NF NF NF NF |
# 1:ID 2:入力内容 3:入力者 4:入力者名 5:最終更新日時
# 6:最終更新者
# 最終的に更新情報を代入
#awk '{$1="'${data_id}'";
# $3="'${USER_ID}'";
# $4="'${USER_NAME}'";
# $5="'${cur_time}'";
# $6="'${USER_ID}'";
# print}' > $tmp-sample_input
### ここでのtmpファイル名は更新時に使うので注意すること
#--------------------------------------------------------------
#--------------------------------------------------------------
# 更新対象ファイルのリスト化
# 1:ファイルのパス 2:ファイル名
## ロックファイル作成用テーブル
## POMPAファイルがPOMPA直下でなく、店などのサブディレクトリ配下にいる場合には
## 1フィールド目が「${TEN_CODE}/SAMPLE_DATA」などになる
cat <<- FIN | LANG=C sort -u > $tmp-target-table
RIYOUSHA_SHOKUJI_PATTERN/RIYOUSHA_SHOKUJI_PATTERN RIYOUSHA_SHOKUJI_PATTERN
FIN
# 1:tmpファイル名 2:更新ファイル名 3:キーフィールド 4:更新時間フィールド 5:全体列数
# 6:POMPA場所 7:INPUT場所
[ -s $tmp-pattern_header_data_input ] && echo "RIYOUSHA_SHOKUJI_PATTERN/RIYOUSHA_SHOKUJI_PATTERN_HEADER RIYOUSHA_SHOKUJI_PATTERN_HEADER" >> $tmp-target-table
if [ $(cat $tmp-pattern_jisseki*_input | gyo) -gt "0" ] ; then
for MONTH in $(cat $tmp-jisseki_ari_month) ; do
# [ -s $tmp-pattern_jisseki${MONTH}_input ] && echo "${MONTH}/RIYOUSHA_SHOKUJI_JISSEKI RIYOUSHA_SHOKUJI_JISSEKI" >> $tmp-target-table
# [ -s $tmp-shokuji_jisseki_seikyutaishou${MONTH}_input ] && echo "${MONTH}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA" >> $tmp-target-table
#20150910mod ロック単位変更
[ -s $tmp-pattern_jisseki${MONTH}_input ] && echo "${MONTH}/RIYOUSHA_SHOKUJI_JISSEKI RIYOUSHA_SHOKUJI_JISSEKI_${Shisetsu}_${MONTH}" >> $tmp-target-table
[ -s $tmp-shokuji_jisseki_seikyutaishou${MONTH}_input ] && echo "${MONTH}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA_${Shisetsu}_${MONTH}" >> $tmp-target-table
done
fi
## 更新ファイルの作成とチェック用のテーブル
## 6/7フィールド目は、アプリ間連携で別アプリの配下にあるINPUTを更新する場合用
cat <<- FIN > $tmp-koushin_pompa
pattern_data RIYOUSHA_SHOKUJI_PATTERN 5 18 19 ${pompa_dir}/RIYOUSHA_SHOKUJI_PATTERN ${input_dir}
FIN
#--------------------------------------------------------------
[ -s $tmp-pattern_header_data_input ] && echo "pattern_header_data RIYOUSHA_SHOKUJI_PATTERN_HEADER 2 12 13 ${pompa_dir}/RIYOUSHA_SHOKUJI_PATTERN ${input_dir}" >> $tmp-koushin_pompa
if [ $(cat $tmp-pattern_jisseki*_input | gyo) -gt "0" ] ; then
for MONTH in $(cat $tmp-jisseki_ari_month) ; do
[ -s $tmp-pattern_jisseki${MONTH}_input ] && echo "pattern_jisseki${MONTH} RIYOUSHA_SHOKUJI_JISSEKI 8 21 22 ${pompa_dir}/${MONTH} ${input_dir}" >> $tmp-koushin_pompa
[ -s $tmp-shokuji_jisseki_seikyutaishou${MONTH}_input ] && echo "shokuji_jisseki_seikyutaishou${MONTH} RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA 8 27 28 ${pompa_dir}/${MONTH} ${input_dir}" >> $tmp-koushin_pompa
done
fi
cat $tmp-target-table |
awk '$1!~/*tbl_dir*/{print $0}' |
while read file_name base ;do
mkdir -p ${pompa_dir}/back
cp ${pompa_dir}/${file_name} ${pompa_dir}/back/${file_name}.${today}
done
#################################################################################################
# 変更するのはここまで
#################################################################################################
#--------------------------------------------------------------
# データロック
cat $tmp-target-table |
LANG=C sort -k2,2 |
getlast 2 2 > $tmp-target-table-for-lock
$function_dir/FUNCTION.LOCK_CHECK $tmp-target-table-for-lock
if [ $? -ne 0 ]; then
# エラー(ロックに失敗)
# error_unlock "ロック処理失敗"
# 20150910mod
# ロック処理で失敗したときにアンロック(ロックファイルを削除)すると、他ユーザによるロックを解除してしまうのでerror_exitに変更。
# ロック成功後、ロック解除までの間にエラーが起きてシェルをexitする場合はerror_unlockで確実にロックファイルを削除する。
error_exit "ロック処理失敗"
fi
#--------------------------------------------------------------
#--------------------------------------------------------------
# 更新版作成
# POMPAと指定されたキー項目をつかってマージする
cat $tmp-koushin_pompa |
while read input_name file_name sort_key time_key retu_no pompa_dir_name input_dir_name ; do
cat $tmp-${input_name}_input |
if [ -e ${pompa_dir_name}/${file_name} ] ; then
cat ${pompa_dir_name}/${file_name} -
else
cat -
fi |
LANG=C sort -k1,${sort_key} -k${time_key},${time_key} |
getlast 1 ${sort_key} > $tmp-${input_name}_new
done
#--------------------------------------------------------------
#--------------------------------------------------------------
# 列数チェック
cat $tmp-koushin_pompa |
while read input_name file_name sort_key time_key retu_no pompa_dir_name input_dir_name ; do
[ "$(retu $tmp-${input_name}_new)" != "${retu_no}" ] && : > $tmp-err
[ "$(retu $tmp-${input_name}_new | gyo)" != "1" ] && : > $tmp-err
[ "$(awk 'NF!="'${retu_no}'"' $tmp-${input_name}_new | gyo)" != "0" ] && : > $tmp-err
done
[ -e $tmp-err ] && error_unlock "列数エラー"
## error_exit ではなく error_unlock である点に注意!
#--------------------------------------------------------------
#--------------------------------------------------------------
# バッチ側で処理中なら更新させずに落とす
#[ -e ${tmptmplock_dir}/BATCH.SAMPLE_LOCK ] && error_unlock "システム処理中"
#--------------------------------------------------------------
#--------------------------------------------------------------
# 更新
cat $tmp-koushin_pompa |
while read input_name file_name sort_key time_key retu_no pompa_dir_name input_dir_name ; do
mkdir -p ${pompa_dir_name}
mkdir -p ${input_dir_name}
mkdir -p ${input_dir_name}/${today}
mv -f $tmp-${input_name}_input ${input_dir_name}/${today}/SHOKUJI_PATTERN_TOUROKU/${Shisetsu}/${file_name}.${cur_time}.$(basename $0).$$
mv -f $tmp-${input_name}_new ${pompa_dir_name}/${file_name}
done
#20150915mod ロック解放後の位置から移動
for MONTH in $(cat $tmp-jisseki_ari_month) ; do
if [ -s $tmp-pattern_jisseki${MONTH}_input ] ; then
cp -p ${pompa_dir}/${MONTH}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA ${seikyu_dir}/${MONTH}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA
fi
done
#--------------------------------------------------------------
#cp $tmp-* /home/hands/work/okuda
#--------------------------------------------------------------
# ロックの解除
cat $tmp-target-table-for-lock |
while read table base ;do
rm -f $tmplock_dir/$base.lock
: ;done
#--------------------------------------------------------------
#20150915mod ロック範囲外のためロック解放前に移動↑
# for MONTH in $(cat $tmp-jisseki_ari_month) ; do
# if [ -s $tmp-pattern_jisseki${MONTH}_input ] ; then
# cp -p ${pompa_dir}/${MONTH}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA ${seikyu_dir}/${MONTH}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA
# fi
# done
## cp -p ${pompa_dir}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA ${seikyu_dir}/RIYOUSHA_SHOKUJI_JISSEKI_SEIKYUDATA
## cp -p ${pompa_dir}/RIYOUSHA_SHOKUJI_JISSEKI ${kanri_dir}/${Shisetsu}/${Tateya}/${thismonth}/RIYOUSHA_SHOKUJI_JISSEKI
echo "result ok ${SHOKUJIPATTERNID}"
## 終了
rm -f $tmp-*
exit 0
| true |
eb2f9e0c84508d6e0209ba75249d875e95318d85 | Shell | suhasshettyy/shell-program | /day6pp/comp.sh | UTF-8 | 138 | 3.34375 | 3 | [] | no_license | #!/bin/bash -x
read -p "enter the number:" n
for (( i=2; i<=$n; i++ ))
do
while [ $((($n%$i))) -eq 0 ]
do n=$(($n/$i))
echo $i
done
done
| true |
60ba0e84380f5982bff5b14c593ba5fcab76cb3e | Shell | mukeshmahajan501/CodinClubBatch027 | /linux-content/temp/temp1/day5/untiConversionWithCase.sh | UTF-8 | 762 | 3.21875 | 3 | [] | no_license | #!/bin/bash -x
echo "enter your choice: "
read choice
case $choice in
1)
echo "feet to inch"
echo "enter a number in feet: "
read feet
printf %.f "$(($feet*12))"
;;
2)
echo "feet to meter"
echo "enter a number in feet: "
read feet
printf %.4f "$((1000000000 * ($feet*3048)/10000))e-9"
;;
3)
echo "inches to feet"
echo "enter a number in inches: "
read inch
printf %.4f "$((1000000000 * ($inch*1)/12))e-9"
;;
4)
echo "meter to feet "
echo "enter a number in meter: "
read meter
printf %.f "$(($feet*12))"
;;
*)
echo "wrong choice!!"
;;
esac
| true |
319a4bd358dfb1d4b3581a0d9d48dfbd6cb625d6 | Shell | michlin0825/devops-capstone | /infra/setup-k8s.sh | UTF-8 | 1,832 | 2.71875 | 3 | [] | no_license | ## set up EKS cluster
# install awscli and set aws credentials
sudo yum install python3-pip -y
sudo pip3 install awscli --upgrade
aws configure
# export AWS_ACCESS_KEY_ID=
# export AWS_SECRET_ACCESS_KEY=
# install eksctl
curl --silent --location "https://github.com/weaveworks/eksctl/releases/download/latest_release/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
sudo mv /tmp/eksctl /usr/local/bins
eksctl version
# install kubectl
curl -o kubectl https://amazon-eks.s3-us-west-2.amazonaws.com/1.14.6/2019-08-22/bin/linux/amd64/kubectl
chmod +x ./kubectl
mkdir -p $HOME/bin && cp ./kubectl $HOME/bin/kubectl && export PATH=$HOME/bin:$PATH
echo 'export PATH=$HOME/bin:$PATH' >> ~/.bashrc
kubectl version --short --client
# create eks cluster with cloudformation
eksctl create cluster \
--name devops-capstone \
--region=us-east-1 \
--version 1.14 \
--nodegroup-name standard-workers \
--node-type t3.medium \
--nodes 3 \
--nodes-min 1 \
--nodes-max 4 \
--node-ami auto
# verify eks installation
kubectl get svc
# install aws-iam-authenticator (if kubectl commands don't work)
curl -o aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.14.6/2019-08-22/bin/linux/amd64/aws-iam-authenticator
chmod +x ./aws-iam-authenticator
mkdir -p $HOME/bin && cp ./aws-iam-authenticator $HOME/bin/aws-iam-authenticator && export PATH=$HOME/bin:$PATH
echo 'export PATH=$HOME/bin:$PATH' >> ~/.bashrc
aws-iam-authenticator help
# install aws cli
sudo yum install python3-pip -y
sudo pip3 install awscli --upgrade
# upate EKS config
aws eks update-kubeconfig --region us-east-1 --name devops-capstone
# verify eks installation
kubectl get svc
# deploy to eks cluster
kubectl apply -f webapp-deploy.yml
## other fixes
export LANG=en_US.UTF-8
export LANGUAGE=en_US.UTF-8
export LC_COLLATE=C
export LC_CTYPE=en_US.UTF-8
| true |
d4fe3ac6f9d636f77cffa506ae363d2398ddcecd | Shell | SolaceDev/pubsubplus-cf-dev | /bin/cf_env.sh | UTF-8 | 637 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
export SCRIPT="$( basename "${BASH_SOURCE[0]}" )"
export SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export WORKSPACE=${WORKSPACE:-$SCRIPTPATH/../workspace}
CF_ADMIN_PASSWORD=$(bosh int $WORKSPACE/deployment-vars.yml --path /cf_admin_password)
export CF_ADMIN_PASSWORD=${CF_ADMIN_PASSWORD:-'admin'}
export UAA_ADMIN_CLIENT_SECRET=$(bosh int $WORKSPACE/deployment-vars.yml --path /uaa_admin_client_secret)
SYSTEM_DOMAIN=${SYSTEM_DOMAIN:-"bosh-lite.com"}
CF_API_DOMAIN=${CF_API_DOMAIN:-"api.$SYSTEM_DOMAIN"}
cf api https://$CF_API_DOMAIN --skip-ssl-validation
cf auth admin $CF_ADMIN_PASSWORD
| true |
54717728a0884eac5ed6006044cd0930490183c1 | Shell | Kaedo/MS-AIP-Attacks | /prepare.sh | UTF-8 | 2,402 | 3.953125 | 4 | [] | no_license | #!/usr/bin/env bash
function normalize_cert_and_license {
CERTs=`find input/ -iname "*.drm" | grep -v keyfile`
for FILE in $CERTs; do
echo ""
echo "Normalize the format of file: $FILE"
in="$FILE"
out=''
if [[ $FILE == *"Machine"* ]]; then
out=`echo $FILE| cut -d'/' -f2- | cut -d'.' -f1`
else
out=`echo $FILE| cut -d'/' -f2- | cut -d'-' -f1`
fi
out="processed/$out.drm.xrml"
cat $in | tr -dc '[:print:]' | sed 's/></>\n</g' > $out
done
}
function normalize_sk_and_mk {
OTHERs=`find input/ -iname "*.hex"`
for FILE in $OTHERs; do
echo ""
echo "Normalize the format of file: $FILE"
in="$FILE"
out=`echo $FILE| cut -d'/' -f2-`
out="processed/$out"
cat $in | tr -dc '[:print:]' > $out
done
}
function extract_spc_modulus {
echo "Try to extract RSA modulus from 2048 bit normalized SPC"
SPC2048=`find processed/ -iname "CERT-Machine-2048.drm.xrml"`
for FILE in $SPC2048; do
echo ""
echo "Found $FILE"
pk=`cat $FILE | grep "<NAME>Machine</NAME>" -A8 | grep '<VALUE encoding="base64" size="2048">' | cut -d'>' -f2| cut -d'<' -f1`
hex=`echo "$pk" | base64 -d | xxd -p | tr -d '\n'`
echo -e "File $FILE has 2048 RSA modulus of: $hex"
echo -e "Write modulus to $FILE.modulus"
echo "$hex" > "$FILE.modulus"
done
}
function extract_pl_authorization_data {
echo "\nTry to extract Authorization data from Publishing License"
PL=`find processed/ -iname "PL.drm.xrml"`
for FILE in $PL; do
echo ""
echo "Found $FILE"
erd=`cat $FILE | grep "Encrypted-Rights-Data" | cut -d'>' -f2- | cut -d'<' -f1`
hex=`echo "$erd" | base64 -d | xxd -p | tr -d '\n'`
echo -e "File $FILE has Encrypted-Rights-Data of: $hex"
echo -e "Write ERD to $FILE.erd"
echo "$hex" > "$FILE.erd"
done
}
function extract_enablingbits_from_cert {
echo "Try to extract ENABLING BITS element from Certificates"
CERTs=`find processed/ -iname "*drm.xrml" | grep -v CERT-Machine`
for FILE in $CERTs; do
echo ""
echo "Found $FILE"
enabits=`cat $FILE | grep "</ENABLINGBITS>" -B2 | cut -d'>' -f2| cut -d'<' -f1`
hex=`echo "$enabits" | base64 -d | xxd -p | tr -d '\n'`
echo -e "File $FILE has ENABLINGBITS element of: $hex"
echo -e "Write ENABLINGBITS to $FILE.enablingbits"
echo "$hex" > "$FILE.enablingbits"
done
}
normalize_cert_and_license
normalize_sk_and_mk
extract_spc_modulus
extract_enablingbits_from_cert
extract_pl_authorization_data
| true |
fcf9f1117f7716cb4c52c6528f89a4507a527967 | Shell | hubchenko/build_scripts | /linux/ubuntu/aws/sethostname.sh | UTF-8 | 377 | 3.6875 | 4 | [] | no_license | #!/bin/bash
##Script to set the hostname to the public DNS name in a AWS instance##
set -e
public_fqdn='' #Public FQDN
# run script only as root
if [ $(id -u) != 0 ]; then
echo "This script must be run as root"
exit 1
fi
#setting hostname
sed -i "s/^127.0.0.1.*/127.0.0.1 $public_fqdn/" /etc/hosts
sed -i "1s/.*/$public_fqdn/" /etc/hostname
hostname $public_fqdn | true |
d2cdc232886bd0645f95e742a40e5f21010b6d7d | Shell | sw1nn/prezto | /runcoms/zshrc | UTF-8 | 1,853 | 2.9375 | 3 | [
"MIT"
] | permissive | # -*- mode: shell-script; -*-
# Executes commands at the start of an interactive session.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
# Source Prezto.
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
alias -s pdf=exo-open
alias -s html=browser
alias -s htm=browser
alias -s zip=file-roller
alias -s tar=file-roller
alias -s gz=file-roller
alias -s bz2=file-roller
alias -s rar=file-roller
alias -s mp4=mplayer
alias -s mov=mplayer
alias -s avi=mplayer
alias -s png=sxiv
alias -s jpg=sxiv
alias -s jpeg=sxiv
alias -s gif=sxiv
alias -s mp3=mpg123
alias -g L="| less"
alias -g EL="|& less"
alias -g B="| grep bin | less"
# Customize to your needs...
[ -f "${HOME}/.zshrc.local" ] && source ${HOME}/.zshrc.local
alias emacs="emacsclient -a '' -t"
alias vi="vim"
fpath=(~/.local/share/zsh/site-functions $fpath)
nbsp=$'\u00a0'
bindkey $nbsp kill-whole-line
[ -n "$TERM" ] && export TERM=xterm-256color
[[ -f /usr/bin/aws_zsh_completer.sh ]] && source /usr/bin/aws_zsh_completer.sh
# The next line updates PATH for the Google Cloud SDK.
if [ -f /home/neale/google-cloud-sdk/path.zsh.inc ]; then
source '/home/neale/google-cloud-sdk/path.zsh.inc'
fi
# The next line enables shell command completion for gcloud.
if [ -f /home/neale/google-cloud-sdk/completion.zsh.inc ]; then
source '/home/neale/google-cloud-sdk/completion.zsh.inc'
fi
if [[ -n $INSIDE_EMACS ]] ; then
RPROMPT=''
fi
mosh-cleanup() {
IFS=$'\n' pids=($(who | awk '/neale.*\(mosh/{gsub(/[\[\]()]/,""); print $6;}'))
[ -n "$pids" ] && for pid in ${pids}; do kill $pid; done
}
if [ -f "${HOME}/.gnupg/gpg-agent.info" ]; then
source "${HOME}/.gnupg/gpg-agent.info"
export GPG_AGENT_INFO
export SSH_AUTH_SOCK
export GPG_TTY=$(tty)
echo "UPDATESTARTUPTTY" | gpg-connect-agent >& /dev/null
return
fi
| true |
17747219e7a247eecc2aab074119a8827b60176d | Shell | Rekhajambhulkar/AllPrograms | /ForLoopPrograms/PrimeFactorization.sh | UTF-8 | 333 | 3.5 | 4 | [] | no_license | #! /bin/bash -x
echo "Enter the num:"
read Num
i=$((i*i))
for(( i=2; $i<=$Num;i++ ))
do
#check condiotion til equal to zero
while [ $((Num % i)) -eq 0 ]
do
echo $i
Num=$(( Num / i ))
done
done
#check condition nu is equal to 2
if [ $Num -gt 2 ]
then
echo $Num
fi
| true |
8ac65a594849ec6218bdd0040cf04ebc008d23f7 | Shell | InstantLaravel/InstantLaravel | /ubuntu-provisioning.sh | UTF-8 | 13,991 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# シェルから流す場合は、ルートユーザーになること!!!!
######################## 設定項目 #############################
# 以降の項目を環境に合わせて設定してください
# ルートページ(登録/ログインなど)ドメイン
rootDomain='top.instant.com'
# ルートページドキュメントルート
rootPageDocRoot='public'
# エディター用(Codiad)ドメイン
editorDomain="editor.instant.com"
# プレビュードメイン名
previewDomain='check.instant.com'
# プレビュー用ドキュメントルート
previewDocRoot='public'
# タイムゾーン
timezone='Asia/Tokyo'
# Codiad "base" ユーザーパスワード
basePassword='whitebase'
######################## 設定終了 #############################
#######################
# ユーザー関係・基本設定 #
#######################
# ユーザー作成
useradd --home-dir /home/home --create-home --user-group home
useradd --home-dir /home/codiad --create-home --user-group codiad
useradd --home-dir /home/base --create-home --user-group base
# SSHログイン禁止
echo "- : home : ALL" >> /etc/security/access.conf
echo "- : codiad : ALL" >> /etc/security/access.conf
echo "- : base : ALL" >> /etc/security/access.conf
# homeとbaseユーザーで作成したファイルをエディターで編集可能にする
echo "umask 002" >> /home/base/.profile
echo "umask 002" >> /home/home/.profile
# タイムゾーン設定、-fオプションで既に存在するリンクと同名のファイルを削除
ln -sf /usr/share/zoneinfo/${timezone} /etc/localtime
#############
# パッケージ #
#############
# 既存パッケージ更新
apt-get update
apt-get upgrade -y
# プライベートリポジトリ登録コマンドのインストール
apt-get install -y software-properties-common
# プライベートリポジトリの登録
apt-add-repository ppa:nginx/stable -y
# 以降のインストールに備えて、再度パッケージリストの更新
apt-get update
# 基本ツールのインストール
apt-get install -y vim unzip git
# SQLiteインストール
# シンプルにするためするため、DBはSQLiteのみ
apt-get install -y sqlite3
##################
# PHP、Nginx設定 #
##################
# PHP関係のインストール
apt-get install -y php5-cli php5-dev \
php5-json php5-curl php5-sqlite\
php5-imap php5-mcrypt
php5enmod mcrypt pdo opcache json curl
# PHPコマンドライン設定
sed -i -e "s/error_reporting = .*/error_reporting = E_ALL/" \
-e "s/display_errors = .*/display_errors = On/" \
-e "s/memory_limit = .*/memory_limit = 300M/" \
-e "s/;date.timezone.*/date.timezone = UTC/" /etc/php5/cli/php.ini
# Nginx、PHP-FPMインストール
# nginx-lightでも、今回の要件を満たしているはず
apt-get install -y nginx php5-fpm
# PHP-FPM PHPオプション設定
sed -i -e "s/error_reporting = .*/error_reporting = E_ALL/" \
-e "s/display_errors = .*/display_errors = On/" \
-e "s/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/" \
-e "s/memory_limit = .*/memory_limit = 300M/" \
-e "s/;date.timezone.*/date.timezone = UTC/" /etc/php5/fpm/php.ini
# Nginxオプション設定
sed -i -e "s/user www-data;/user www-data;/" \
-e "s/keepalive_timeout .*/keepalive_timeout 30;/" \
-e "s/^worker_processes .*/worker_processes auto;/" \
-e "s/# server_names_hash_bucket_size .*/server_names_hash_bucket_size 64;/" /etc/nginx/nginx.conf
# PHP-FPM設定
# ホーム(新規登録)
# 一時にアクセスが集まり、その他の時間はほぼアクセスが起きない予想
cat <<EOT > /etc/php5/fpm/pool.d/home.conf
[home]
user = home
group = home
listen = /var/run/php5-fpm.home.sock
listen.owner = home
listen.group = www-data
listen.mode = 0660
pm = ondemand
pm.max_children = 10
pm.start_servers = 2
pm.min_spare_servers = 2
pm.max_spare_servers = 6
chdir = /
EOT
# エディター(Codiad)
# 小さな量のアクセスが、チュートリアル中持続する
cat <<EOT > /etc/php5/fpm/pool.d/codiad.conf
[codiad]
user = codiad
group = codiad
listen = /var/run/php5-fpm.codiad.sock
listen.owner = codiad
listen.group = www-data
listen.mode = 0660
pm = dynamic
pm.max_children = 8
pm.start_servers = 3
pm.min_spare_servers = 3
pm.max_spare_servers = 5
chdir = /
EOT
# baseユーザー(管理ユーザー)
# 専用プレビュー
cat <<EOT > /etc/php5/fpm/pool.d/base.conf
[base]
user = base
group = base
listen = /var/run/php5-fpm.base.sock
listen.owner = base
listen.group = www-data
listen.mode = 0660
pm = ondemand
pm.max_children = 2
pm.start_servers = 2
pm.min_spare_servers = 1
pm.max_spare_servers = 2
chdir = /
EOT
# Nginx 仮想ホスト設定
# トップ(認証/ログイン)仮想ホスト設定
cat <<EOT > /etc/nginx/sites-available/default
server {
listen 80 default_server;
server_name ${rootDomain};
root /home/home/top/${rootPageDocRoot};
index index.php;
location / {
try_files \$uri \$uri/ /index.php?\$query_string;
location ~ \\.php$ {
include fastcgi_params;
# SCRIPT_FILENAMEをオーバーライト
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
fastcgi_split_path_info ^(.+\\.php)(/.+)$;
fastcgi_pass unix:/var/run/php5-fpm.home.sock;
fastcgi_index index.php;
}
}
location = favicon.ico { access_log off; log_not_found off; }
location = robots.txt { access_log off; log_not_found off; }
error_log /var/log/nginx/error.log error;
# rewrite_log on;
sendfile off;
}
EOT
# エディター仮想ホスト設定
cat <<EOT > /etc/nginx/sites-available/editor
server {
listen 80;
server_name ${editorDomain};
root /home/codiad;
location / {
try_files \$uri /index.php?\$query_string;
location ~ \\.php$ {
include fastcgi_params;
# SCRIPT_FILENAMEをオーバーライト
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
fastcgi_split_path_info ^(.+\\.php)(/.+)$;
fastcgi_pass unix:/var/run/php5-fpm.codiad.sock;
fastcgi_index index.php;
}
}
# 直接codiaユーザーでアクセスさせると、workspace下の
# ファイルは全部変更できるため、拒否する。
location ~ ^/workspace {
return 404;
}
error_log /var/log/nginx/error.log error;
# rewrite_log on;
sendfile off;
}
EOT
# プレビュー仮想ホスト設定
cat <<EOT > /etc/nginx/sites-available/preview
server {
listen 80;
server_name ${previewDomain};
root /home/codiad/workspace;
location / {
try_files \$uri /index.html;
}
# このドメインのトップレベルではPHPを実行させない
# そのため、fastcgiへのブリッジ処理は記述しない
# 末尾のスラッシュ除去
rewrite ^/(.+)/$ /\$1;
include /etc/nginx/users.d/*;
error_log /var/log/nginx/error.log error;
# rewrite_log on;
sendfile off;
}
EOT
# 仮想ホストを有効にする
ln -sf /etc/nginx/sites-available/default /etc/nginx/sites-enabled
ln -s /etc/nginx/sites-available/editor /etc/nginx/sites-enabled
ln -s /etc/nginx/sites-available/preview /etc/nginx/sites-enabled
# 各ユーザー用の設定フォルダーを作成する
mkdir /etc/nginx/users.d
# baseユーザー用設定ファイル
cat <<EOT > /etc/nginx/users.d/base
location ~ ^/base(/(.+))?$ {
root /home/codiad/workspace/base/${previewDocRoot};
try_files \$1 /base/index.php?\$query_string;
location ~ ^/base/index.php$ {
include fastcgi_params;
# パラメーターをオーバーライト
fastcgi_param SCRIPT_FILENAME /home/codiad/workspace/base/${previewDocRoot}/index.php;
fastcgi_split_path_info ^(.+\\.php)(.+)$;
fastcgi_pass unix:/var/run/php5-fpm.base.sock;
fastcgi_index index.php;
}
}
EOT
###############
# ルートページ #
###############
# Composerインストール
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
# ルートページインストール
# インストール先は/home/home/top
git clone https://github.com/InstantLaravel/TopPage.git /home/home/top
cd /home/home/top
composer install
cd
# インストール終了後、オーナーを変更
chown -R home:codiad /home/home/top
# codiadグループから書き込めるようにする
find /home/home/top -type d -exec chmod 2775 {} +
find /home/home/top -type f -exec chmod 0664 {} +
# 新規ユーザー作成シェルをsuduで実行するための準備
chmod 744 /home/home/top/add-new-user.sh
echo "home ALL=(ALL) NOPASSWD: /home/home/top/add-new-user.sh" > /etc/sudoers.d/home
echo "home ALL=(ALL) NOPASSWD: /usr/sbin/service" >> /etc/sudoers.d/home
echo 'Defaults:home !requiretty' >> /etc/sudoers.d/home
# ルートロジック中のリダイレクト先設定
sed -i -e "s/\*\*\* EDITOR DOMAIN \*\*\*/${editorDomain}/" /home/home/top/app/routes.php
# 新規ユーザー生成シェル中のドキュメントフォルダー設定
sed -i -e "s/\*\*\* PREVIEW DOC ROOT \*\*\*/${previewDocRoot}/" /home/home/top/add-new-user.sh
##############
# Codiad関係 #
##############
# Codiadホームにgidをセットし、新規ディレクトリー/ファイルのグループが変わらないようにする
chown codiad:codiad /home/codiad
chmod g+s /home/codiad
# Codiadインストール
# wget https://github.com/Codiad/Codiad/archive/v.2.2.8.zip
wget https://github.com/Codiad/Codiad/archive/master.zip
mkdir temp
unzip master.zip -d temp
cp -R temp/Codiad-master/* /home/codiad
rm master.zip
rm -R temp
# Codiad日本語化ファイルのインストール
git clone https://gist.github.com/b55af329ac844c985bf3.git temp
mv temp/ja.php /home/codiad/languages
rm -R temp
sed -i -e 's/"english",/"english",\n "ja" => "日本語",/' /home/codiad/languages/code.php
# Codiad初期設定
#echo "<?php/*|[\"\",{\"username\":\"base\",\"path\":\"base\",\"focused\":true}]|*/?>" > /home/codiad/data/active.php
echo "<?php/*|[\"\"]|*/?>" > /home/codiad/data/active.php
echo "<?php/*|[{\"name\":\"\u30a4\u30f3\u30b9\u30bf\u30f3\u30c8Laravel base\",\"path\":\"base\"}]|*/?>" > /home/codiad/data/projects.php
echo "<?php echo sha1(md5(\"${basePassword}\"));" > temp.php
hashedPassword=`php -f temp.php`
rm temp.php
echo "<?php/*|[{\"username\":\"base\",\"password\":\"${hashedPassword}\",\"project\":\"base\"}]|*/?>" > /home/codiad/data/users.php
sed -e "s+/path/to/codiad+/home/codiad+" \
-e "s+domain\.tld+${editorDomain}+" \
-e "s+America/Chicago+${timezone}+" /home/codiad/config.example.php > /home/codiad/config.php
chown -R codiad:codiad /home/codiad
chown home:codiad /home/codiad/data
chown home:codiad /home/codiad/data/*.php
chmod 775 /home/codiad/data
chmod 775 /home/codiad/workspace
chmod 664 /home/codiad/data/*.php
######################
# チュートリアル対象FW #
######################
# 学習対象プロジェクトインストール
# インストール先は、/home/codiad/workspace/base
# 現在CodiadはUTF8のファイル保存時に正しく保存されないため英語オリジナル版をベースとして使用
composer create-project laravel/laravel /home/codiad/workspace/base
# 日本語言語ファイルのみ日本語翻訳版からコピー
wget https://github.com/laravel-ja/laravel/archive/master.zip
unzip master.zip
mv laravel-master/app/lang/ja /home/codiad/workspace/base/app/lang/ja
rm -R laravel-master
rm master.zip
# Bootstrapをpublicへセット
wget https://github.com/twbs/bootstrap/releases/download/v3.2.0/bootstrap-3.2.0-dist.zip
unzip bootstrap-3.2.0-dist.zip -d bootstrap
mv bootstrap/bootstrap-3.2.0-dist/* /home/codiad/workspace/base/public
rm -R bootstrap*
# インストール終了後、オーナーを変更
chown -R base:codiad /home/codiad/workspace/base
# codiadグループから書き込めるようにする
find /home/codiad/workspace/base -type d -exec sudo chmod 2775 {} +
find /home/codiad/workspace/base -type f -exec sudo chmod 0664 {} +
#############
# 固定ページ #
#############
# プレビューindexページ
sed -e "s/\*\*\* PREVIEW DOMAIN \*\*\*/${previewDomain}/" /home/home/top/preview-resources/index.html > /home/codiad/workspace/index.html
# プレビュー404ページ
mv /home/home/top/preview-resources/404.html /home/codiad/workspace/
# baseへレイアウトのサンプルを用意
mv /home/home/top/preview-resources/*.blade.php /home/codiad/workspace/base/app/views/
#################################################
# プレビューのルートURLアクセス時のindex.htm用リソース #
#################################################
cp -R /home/home/top/public/css /home/codiad/workspace/
chown -R www-data:codiad /home/codiad/workspace/css
cp -R /home/home/top/public/js /home/codiad/workspace/
chown -R www-data:codiad /home/codiad/workspace/js
cp -R /home/home/top/public/fonts /home/codiad/workspace/
chown -R www-data:codiad /home/codiad/workspace/fonts
cp -R /home/home/top/public/img /home/codiad/workspace/
chown -R www-data:codiad /home/codiad/workspace/img
########################
# Nginx、php5-fpm再起動 #
########################
service nginx restart
service php5-fpm stop
service php5-fpm start
########################################
# Nginx、php5-fpm再起動要求監視シェル起動 #
########################################
chown root:root /home/home/top/restart-watchdoc.sh
chmod 744 /home/home/top/restart-watchdoc.sh
/home/home/top/restart-watchdoc.sh &
# 再起動時にも動作するようにrc.localへ登録
sed -i -e "s@^exit 0\$@/home/home/top/restart-watchdoc.sh \&> /dev/null\ \&\nexit 0@" /etc/rc.local
chmod 744 /etc/rc.local
| true |
ffb9c6d595bb57b7821f7a4895902253e1e80d58 | Shell | hyliang96/shareENV | /app/easy-git/branch.sh | UTF-8 | 6,830 | 3.234375 | 3 | [] | no_license | # -------------------------------------------------------------------------
# 分支操作
gb() # 新建分支并检出到此分支: gb 分支名
{
[ $# -ne 0 ] && git branch $1 && git checkout $1
}
alias gbmv='git branch -m' # 重命名分支: gbmv [<旧分支名>=<当前分支名>] <新分支名>
alias gbrm='git branch -D' # 删除分支:gbrm 分支名
alias gbls='git branch -avv' # 列出所有本地枝,及其关联的远枝: gbls
# alias gch='git checkout' # 切换分支:gch 分支名/历史提交编号/HEAD^/HEAD/HEAD~n/HEAD@{n}, 要先git commit一次才能gch
alias gff='git merge' # 快进式merge
alias gpk='git cherry-pick -x' # apply一个提交的增量到HEAD, -x 保留原提交message
# 在A分支, 挑选一或多次别的分支的任意提交的增量, 依次apply到HEAD, 在A分支生成一次提交
# 可能需要多次解决冲突, 每次解决冲突后`gaa`再`gcm`
# 解决完所有冲突后, 当前目录干净, 此时需执行一次 `gpkc`
# 然后目录里出现所有被挑选来的节点的文件, 均已经add, 直接`gcm '<信息>'`即完成提交
# git pick squash
alias gpks='git cherry-pick -x -n' # gpk [文件夹名]'s
alias gpkc='git cherry-pick --continue'
alias gpka='git cherry-pick --abort'
alias gpko='gmgo' # git checkout --ours [文件夹名]'s
alias gpkt='gmgt' # git checkout --theirs [文件夹名]'s
# 在[当前分支]执行 `gsmg [目标分支]`
# git squash merge
# 用于: merge而不产生横向连接
# 提交前的分支图
# t=[目标分支] -o-o-o-o-o(t)
#
# c=[当前分支] o1-o2-o3(c)
# 提交后的分支图
# t=[目标分支] -o-o-o-o---o(t) [包含了o1~o3]
#
# c=[当前分支] o1-o2-o3(c)
gsmg() # git merge --squash
{
local target="$1"
local current="$(get_node_name HEAD)"
if [ "$current" = 'HEAD' ]; then
echo "You are not at the end of a branch, please checkout to a branch before 'bow merge'."
return
fi
git checkout "$target"
git merge --squash "$current"
git commit
# 弹出vim, 编辑commit信息
git checkout "$current"
}
alias gsqc='git commit'
alias gmg='git merge --no-ff' # 将当前枝与此分支合并(非快进)
alias gmgc='git commit' # 解决完冲突, 运行此以继续merge
# alias gmgc='git merge --continue' # 老版本git没有之, 如git 2.7.4没有, git2.20.1有
alias gmga='git merge --abort'
# 在A分支, gmg B分支, 若冲突, 要留A分支的文件, 则gmgo; 要留B分支的文件, 则gmgt
gmgo() { # ours
if [ $# -eq 0 ] || [[ "$1" =~ '^(-a|--all)$' ]]; then
git checkout --ours "$(git_root)"
else
git checkout --ours "$@"
fi
}
gmgt() { # theirs
if [ $# -eq 0 ] || [[ "$1" =~ '^(-a|--all)$' ]]; then
git checkout --theirs "$(git_root)"
else
git checkout --theirs "$@"
fi
}
alias grb='git rebase'
alias grbc='git rebase --continue'
alias grba='git rebase --abort'
# 在A分支, grb B分支, 若冲突, 要留A分支的文件, 则grbo; 要留B分支的文件, 则grbt
# 原生的git rebase后, 若冲突, theirs和ours所指, 与merge的正好相反
grbo() { # ours
if [ $# -eq 0 ] || [[ "$1" =~ '^(-a|--all)$' ]]; then
git checkout --theirs "$(git_root)"
else
git checkout --theirs "$@"
fi
}
grbt() { # theirs
if [ $# -eq 0 ] || [[ "$1" =~ '^(-a|--all)$' ]]; then
git rebase --skip # 若某文件, B 分支删除 而A分支当前冲突的commit有, 则此命令会删除此文件
# git checkout --ours "$(git_root)" # 若某文件, B 分支删除 而A分支当前冲突的commit有, 则此命令不会删除此文件
else
git checkout --ours "$@"
fi
}
# 个人分支: debug 和 feature 分支 commit 前, 先 pull 或 rebase
# 提交到 公共(dev 和 master) 分支前, 先 pull 公共分支
# push 公共和个人分支 前, 先 pull
# 弯弓提交到目标分支
# 用于:
# 当前分支 目标分支
# 在 debug 分支, 提交到 master 分支: `gbmg master`
# 在 feature 分支, 提交到 dev 分支: `gbmg dev`
# 提交前的分支图
# r: 旧根, R: 新根
# t=[目标分支] -r-o-o-R(t)
# \
# c=[当前分支] o-o-o-o(c)
# 提交后的分支图
# t=[目标分支] -r-o-o-R---------o(t)
# \ /
# c=[当前分支] o-o-o-o(c)
gbmg () {
local target="$1"
local current="$(get_node_name HEAD)"
if [ "$current" = 'HEAD' ]; then
echo "You are not at the end of a branch, please checkout to a branch before 'bow merge'."
return
fi
git rebase ${target}
git checkout ${target}
git merge --no-ff --no-edit ${current}
git checkout ${current}
# git merge ${target}
}
# squash过来, 再提交到目标分支 [不推荐, 因为M(c)和M(t)处会同一个冲突解两次]
# 用于:
# 在 dev 分支, 提交 到 master 分支: `gbmg master`
# 提交前的分支图
# t=[目标分支] -o-o-o-A(t)
#
# c=[当前分支] o-o-o(c)
# 提交后的分支图
# t=[目标分支] -o-o-o-A---M(t)
# /
# c=[当前分支] o-o-o-M(c) (已经merge了A)
gsmg() {
local target="$1"
local current="$(get_node_name HEAD)"
if [ "$current" = 'HEAD' ]; then
echo "You are not at the end of a branch, please checkout to a branch before 'bow merge'."
return
fi
git merge --squash ${target}
git checkout ${target}
git merge --no-ff --no-edit ${current}
git checkout ${current}
}
# git follow
# 用于:
# 在 dev 分支, follow master 分支: `gfl master`
# 在 feature 分支, fellow dev 分支: `gfl dev`
# follow前的分支图
# t=[目标分支] -o-o-o-A(t)
#
# c=[当前分支] o-o-o(c)
# follow squash后的分支图
# t=[目标分支] -o-o-o-A(t)
#
# c=[当前分支] o-o-o-M(c) (已经merge了A)
gfls()
{
local target_branch="$1"
git merge --squash ${target_branch} && gflc "$target_branch"
}
gflsc()
{
local target_branch="$1"
git commit --no-edit -m "update following \"${target_branch}\" $(get_hash ${target_branch})"
}
# 三角形提交到目标分支
# 用于:
# 在 dev 分支, 提交 到 master 分支: `gbmg master`
# 提交前的分支图
# t=[目标分支] -o-o-o-o-o(t)
#
# c=[当前分支] o-o-o(c)
# 提交后的分支图
# t=[目标分支] -o-o-o-o---M(t)
# \ /
# c=[当前分支] o-o-o-M(c)
gtmg() {
local target="$1"
local current="$(get_node_name HEAD)"
if [ "$current" = 'HEAD' ]; then
echo "You are not at the end of a branch, please checkout to a branch before 'bow merge'."
return
fi
git merge ${target}
git checkout ${target}
git merge --no-ff --no-edit ${current}
git checkout ${current}
}
| true |
f7a522c3efdfceb0208caa99fd10885b2d8e0f6f | Shell | danielvdende/puppet-kafka | /templates/kafka.init.erb | UTF-8 | 2,208 | 3.859375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#
# Managed by puppet
# Init file for Apache Kafka
#
# chkconfig: 35 85 15
# description: Apache Kafka is a distributed publish-subscribe messaging system
# pidfile: /var/run/kafka.pid
#source function library
. /etc/rc.d/init.d/functions
NAME=<%= @service_name %>
PID_FILE=/var/run/$NAME.pid
KAFKA_USER=<%= @user %>
MAX_WAIT=30 #wait for max 30 seconds when restarting kafka, to allow for port release of JMX port.
DAEMON="<%= @startup_script_dir + "/kafka-server-start.sh"%>"
DAEMON_OPTS="<%= @conf_dir + "/server.properties"%>"
export KAFKA_JMX_OPTS="<%= @jmx_opts %>"
CMD="KAFKA_JMX_OPTS=\"$KAFKA_JMX_OPTS\" $DAEMON $DAEMON_OPTS > <%= @app_log_dir + "/server.out"%> 2> <%= @app_log_dir + "/server.err"%> &"
start() {
ulimit -n <%= @max_nofiles %>
ulimit -s <%= @max_stacksize %>
ulimit -c <%= @max_corefiles_size %>
if [ -f $PID_FILE ]
then
PID=`cat $PID_FILE`
if [ ! -z "`ps -ef | awk '{print $2}' | grep "^$PID$"`" ]
then
echo "$PID_FILE exists, process is already running"
exit 0
else
echo "$PID_FILE exists but the process is not running. Deleting $PID_FILE and re-trying"
rm -f $PID_FILE
start
fi
else
daemon --user $KAFKA_USER --check $NAME $CMD
sleep 2
PID=`ps ax | grep -E '[k]afka.Kafka' | awk '{print $1}'`
echo $PID > $PID_FILE;
echo "$NAME started $PID"
fi
}
stop() {
if [ ! -f $PID_FILE ]
then
echo "$PID_FILE does not exist, process is not running"
return 1
else
killproc -p $PID_FILE -d $MAX_WAIT
rm -f $PID_FILE;
echo "$NAME stopped"
return 0
fi
}
status() {
if [ -f $PID_FILE ]
then
PID=`cat $PID_FILE`
if [ -z "`ps -ef | awk '{print $2}' | grep "^$PID$"`" ]
then
echo "$NAME stopped but pid file exists"
exit 1
else
echo "$NAME running with pid $PID"
exit 0
fi
else
echo "$NAME stopped"
exit 1
fi
}
case "$1" in
status)
status
;;
start)
echo "Starting daemon: "$NAME
start
;;
stop)
echo "Stopping daemon: "$NAME
stop
;;
restart)
echo "Restarting daemon: "$NAME
stop
start
;;
*)
echo "Usage: "$1" {status|start|stop|restart}"
exit 1
esac
exit 0
| true |
66bab38d632cfbe58c59216c50059855309127ba | Shell | santhu2210/cmd_making | /startapp.sh | UTF-8 | 214 | 2.671875 | 3 | [] | no_license | #! /bin/bash
readonly sourceFile="./envr/bin/activate"
source ${sourceFile}
# virtualenv is now active.
nohup python app.py & > flask_details.log &
# flask server run on background and details writed in log file | true |
8a383b05b0fcfc0b23277573bdee767e95d4ac7a | Shell | MuddassirNayyer/azure-devops-bitbucket-cicd | /pipelinesSetupCli.sh | UTF-8 | 1,229 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# Variables: Project Configuration
projectName=TestProjectFromCli
organization=https://dev.azure.com/exampleOrg/
# Variables: Build Pipeline Configuration
buildPipelineName=testBuildPipelineName
buildPipelineDescription=testBuildPipelineDescription
repositoryType=tfsgit # {github, tfsgit}
repositoryCloneUri=https://github.com/ExampleUserName/testRepo.git
repoBranch=master
skipFirstRun=false # {true, false}
yamlPipelinePath=/[funcAppBuildPipeline.yaml](https://github.com/MuddassirNayyer/azure-devops-bitbucket-cicd/blob/master/funcAppBuildPipeline.yaml) # yaml script to generate build pipeline, place it at root of the repository
# DevOps Extension: Install if not already installed
az extension add --name azure-devops
# Connect with DevOps account
az login
# Set Default DevOps Organization
az devops configure \
--defaults organization=$organization
# Create build pipeline
az pipelines create \
--name $buildPipelineName \
--description $buildPipelineName \
--repository $repositoryCloneUri --branch $repoBranch --repository-type $repositoryType \
--yaml-path $yamlPipelinePath \
--project $projectName \
--org $organization \
--skip-first-run $skipFirstRun
| true |
fc8ba8b7fe6c447fc37ef20675f300c521acfa1b | Shell | lo48576/dotfiles_old1 | /profiles/script-wrapper/volume-control/scripts/local/volumecontrol.sh | UTF-8 | 713 | 3.609375 | 4 | [] | no_license | #!/bin/sh
AMIXER="amixer sset Master"
SUMMARY="volume changed"
MESSAGE_BODY=
option="$1"
case "$1" in
on)
$AMIXER on
SUMMARY="Mute off"
MESSAGE_BODY=
;;
off)
$AMIXER off
SUMMARY="Mute on"
MESSAGE_BODY=
;;
toggle)
$AMIXER 1+ toggle
STATUS="$(amixer sget Master | grep -o '\[\(on\|off\)\]' | head -1)"
if [ "$STATUS" == "[on]" ] ; then
STATUS="off"
else
STATUS="on"
fi
SUMMARY="Mute $STATUS"
MESSAGE_BODY=
;;
[0-9]*%[+-])
$AMIXER "$option"
MESSAGE_BODY="current: $(amixer sget Master | grep -o '[0-9]\+%' | head -1)"
;;
[0-9]*%)
$AMIXER "$option"
MESSAGE_BODY="current: $option"
;;
esac
#notify-send --app-name="volumecontrol.sh" "$SUMMARY" "$MESSAGE_BODY"
| true |
2bd8ee2d177a0ae8e653f3ad9999b9ad72577641 | Shell | xsc/bashing | /src/lib/log.sh | UTF-8 | 479 | 3.78125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#!require colorize
# Generic Logging Functions
function error() {
echo -n "$(red "(ERROR)") " 1>&2
echo "$@" 1>&2
}
function fatal() {
error "$@";
exit 1;
}
function success() {
echo "$(green "$@")"
}
function verbose() {
if [[ "$VERBOSE" != "no" ]] || [ -z "$VERBOSE" ]; then
echo "$@"
fi
}
function debug() {
if [[ "$DEBUG" == "yes" ]]; then
echo -n "$(yellow "(DEBUG) ")";
echo "$@";
fi
}
| true |
cfb6377b14347673d3c6a24625a1278408b2a358 | Shell | bazelbuild/bazel | /src/create_java_tools_release.sh | UTF-8 | 5,585 | 3.875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A script that creates java_tools release candidates or release artifacts.
#
# Before creating a release candidate the script assumes that the java_tools
# binaries pipeline was previously run and generated java_tools artifacts at
# a commit hash.
#
# The script is using gsutil to copy artifacts.
#
# Mandatory flags:
# --java_tools_version The version number of the java_tools to be released.
# --rc The release candidate number of current release.
# If --release true then --rc is the number of the rc to
# be released.
# If --release false then --rc is the number of the rc to
# be created.
# --release "true" if the script has to create the release artifact
# or "false" if the script has to create a release
# candidate.
# --commit_hash The commit hash where the java_tools binaries pipeline
# was run. Mandatory only if --release false.
#
# Usage examples:
#
# To create the first release candidate for a new java_tools version 2.1 and
# JDK11, that was built at commit_hash 123456:
#
# src/create_java_tools_release.sh --commit_hash 123456 \
# --java_tools_version 2.1 --rc 1 --release false
#
# To release the release candidate created above:
#
# src/create_java_tools_release.sh \
# --java_tools_version 2.1 --rc 1 --release true
set -euo pipefail
# Parsing the flags.
while [[ -n "$@" ]]; do
arg="$1"; shift
val="$1"; shift
case "$arg" in
"--java_tools_version") java_tools_version="$val" ;;
"--commit_hash") commit_hash="$val" ;;
"--rc") rc="$val" ;;
"--release") release="$val" ;;
*) echo "Flag $arg is not recognized." && exit 1 ;;
esac
done
# Create a tmp directory to download the artifacts from GCS and compute their
# sha256sum.
tmp_dir=$(mktemp -d -t 'tmp_bazel_zip_files_XXXXXX')
trap "rm -fr $tmp_dir" EXIT
gcs_bucket="gs://bazel-mirror/bazel_java_tools"
for platform in "linux" "windows" "darwin_x86_64" "darwin_arm64"; do
rc_url="release_candidates/java/v${java_tools_version}/java_tools_${platform}-v${java_tools_version}-rc${rc}.zip"
if [[ $release == "true" ]]; then
release_artifact="releases/java/v${java_tools_version}/java_tools_${platform}-v${java_tools_version}.zip"
# Make release candidate the release artifact for the current platform.
# Don't overwrite existing file.
gsutil -q cp -n "${gcs_bucket}/${rc_url}" "${gcs_bucket}/${release_artifact}"
else
tmp_url=$(gsutil ls -lh ${gcs_bucket}/tmp/build/${commit_hash}/java/java_tools_${platform}* | sort -k 2 | grep gs -m 1 | awk '{print $4}')
# Make the generated artifact a release candidate for the current platform.
# Don't overwrite existing file.
gsutil -q cp -n ${tmp_url} "${gcs_bucket}/${rc_url}"
release_artifact="${rc_url}"
fi
# Download the file locally to compute its sha256sum (needed to update the
# java_tools in Bazel).
# Don't overwrite existing file.
local_zip="$tmp_dir/java_tools$platform.zip"
gsutil -q cp -n ${gcs_bucket}/${rc_url} ${local_zip}
file_hash=$(sha256sum ${local_zip} | cut -d' ' -f1)
echo "${release_artifact} ${file_hash}"
done
rc_url="release_candidates/java/v${java_tools_version}/java_tools-v${java_tools_version}-rc${rc}.zip"
rc_sources_url="release_candidates/java/v${java_tools_version}/sources/java_tools-v${java_tools_version}-rc${rc}.zip"
if [[ $release == "true" ]]; then
release_artifact="releases/java/v${java_tools_version}/java_tools-v${java_tools_version}.zip"
release_sources_artifact="releases/java/v${java_tools_version}/sources/java_tools-v${java_tools_version}.zip"
# Make release candidate the release artifact for the current platform.
# Don't overwrite existing file.
gsutil -q cp -n "${gcs_bucket}/${rc_url}" "${gcs_bucket}/${release_artifact}"
# Copy the associated zip file that contains the sources of the release zip.
# Don't overwrite existing file.
gsutil -q cp -n "${gcs_bucket}/${rc_sources_url}" "${gcs_bucket}/${release_sources_artifact}"
else
tmp_url=$(gsutil ls -lh ${gcs_bucket}/tmp/build/${commit_hash}/java/java_tools-* | sort -k 2 | grep gs -m 1 | awk '{print $4}')
gsutil -q cp -n ${tmp_url} "${gcs_bucket}/${rc_url}"
release_artifact="${rc_url}"
# Copy the associated zip file that contains the sources of the release zip.
# Don't overwrite existing file.
tmp_sources_url=$(gsutil ls -lh ${gcs_bucket}/tmp/sources/${commit_hash}/java/java_tools-* | sort -k 2 | grep gs -m 1 | awk '{print $4}')
gsutil -q cp -n ${tmp_sources_url} ${gcs_bucket}/${rc_sources_url}
fi
# Download the file locally to compute its sha256sum (needed to update the
# java_tools in Bazel).
# Don't overwrite existing file.
local_zip="$tmp_dir/java_tools.zip"
gsutil -q cp -n ${gcs_bucket}/${rc_url} ${local_zip}
file_hash=$(sha256sum ${local_zip} | cut -d' ' -f1)
echo "${release_artifact} ${file_hash}"
| true |
b43d7ade2ac89b6da1f118ad53723bb2588515c6 | Shell | LinuxA60/Linux-Shell | /shell 基本/8.sh | UTF-8 | 112 | 2.828125 | 3 | [] | no_license | #!/bin/bash //编写shell脚本,计算1-100的和
for i in `seq 1 100`
do
j=$[$j+$i]
done
echo $j
| true |
f268cd426849ccdf96a22d72ee8a90a3694c60cb | Shell | ReliefLabs/EasyTomato | /release/src/router/snmp/testing/test_kul.sh | UTF-8 | 1,248 | 3.65625 | 4 | [
"BSD-3-Clause",
"MIT-CMU",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh
#
# test_kul.sh
#
# Number of SUCCESSes: 2
#
# Run key localization tests and compare with data given in the USM
# documentation.
#
# FIX CHECKEXACT() will produce 2 lines when SCAPI is built with
# SNMP_TESTING_CODE, but only 1 line without the #define. The script
# assumes SNMP_TESTING_CODE is defined.
#
#
. eval_tools.sh
VERIFY ktest
STARTTEST
#------------------------------------ -o-
# Declarations.
#
DATAFILE_PREFIX=data.kul-
DATAFILE_SUFFIXES="md5 sha1"
P=
Ku=
engineID=
kul=
#------------------------------------ -o-
# Test.
#
for dfs in $DATAFILE_SUFFIXES; do
OUTPUT "== Test of key localization correctness with transform \"$dfs\"."
set x `awk '{ print $1 }' ${DATAFILE_PREFIX}$dfs`
shift
[ $# -lt 4 ] && FAILED 1 \
"Wrong number of lines ($#) in datafile \"$DATAFILE_PREFIX}$dfs\"."
P=$1
Ku=$2
engineID=$3
kul=$4
CAPTURE "ktest -l -P $P -E $engineID "
FAILED $? "ktest"
CHECKEXACT $Ku
[ $? -eq 2 ]
FAILED $? "Master key was not generated."
CHECKEXACT $kul
[ $? -eq 2 ]
FAILED $? "Localized key was not generated."
SUCCESS "Key localization correctness test with transform \"$dfs\"."
done
#------------------------------------ -o-
# Cleanup, exit.
#
STOPTEST
exit $failcount
| true |
35a9fe82d5a9afe157fe1c9b7e15f062031f55e5 | Shell | jsilbernet/wp-admin-tools | /backup/wp_backup_db | UTF-8 | 305 | 3.15625 | 3 | [] | no_license | #!/bin/bash
# Copyright (c) 2015 Jascha Silbermann [http://jsilber.net]
backup_dir="${BACKUP_HOME}/wp/db"
now=`date "+%Y-%m-%d_%H-%M-%S"`
backup_file="${MAIN_SITE}_${now}.sql.gz"
echo "Backing up $backup_file to $backup_dir ..."
cd "$WP_HOME"
wp db export - | gzip > "${backup_dir}/${backup_file}"
echo "Done." | true |
9839eeb07b2540861fd176783a6f5c07b8ee004e | Shell | zonyao/occlic | /run.sh | UTF-8 | 1,283 | 3.625 | 4 | [] | no_license | #!/bin/bash
if [ "${AUTHORIZED_KEYS}" != "**None**" ]; then
echo "=> Found authorized keys"
mkdir -p /root/.ssh
chmod 700 /root/.ssh
touch /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
IFS=$'\n'
arr=$(echo ${AUTHORIZED_KEYS} | tr "," "\n")
for x in $arr
do
x=$(echo $x |sed -e 's/^ *//' -e 's/ *$//')
cat /root/.ssh/authorized_keys | grep "$x" >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "=> Adding public key to /root/.ssh/authorized_keys: $x"
echo "$x" >> /root/.ssh/authorized_keys
fi
done
fi
if [ ! -f /.root_pw_set ]; then
/set_root_pw.sh
else
echo "Root password already set!"
cat /app/users
fi
chown www-data:www-data /app -R
if [ "$ALLOW_OVERRIDE" = "**False**" ]; then
unset ALLOW_OVERRIDE
else
sed -i "s/AllowOverride None/AllowOverride All/g" /etc/apache2/apache2.conf
a2enmod rewrite
fi
rm -f /var/run/apache2/apache2.pid
source /etc/apache2/envvars
tail -F /var/log/apache2/* & /etc/init.d/apache2 start &
/usr/sbin/sshd -D &
if [ ! -f /.tomcat_admin_created ]; then
/create_tomcat_admin_user.sh
else
echo "Tomcat admin user's password has been configured!"
cat /app/tomcat_pass
fi
exec ${CATALINA_HOME}/bin/catalina.sh run
| true |
21f0caccebd1123fbbda71340a26e34824d1f6a9 | Shell | rheask8246/megalib | /bin/dcosima-listallinstances | UTF-8 | 3,491 | 3.984375 | 4 | [] | no_license | #!/bin/bash
# Part of the cosima cluster
# Check how many sims can be run on the machine
# Return:
# Positive: number of available simulation slots
commandhelp() {
echo ""
echo "dcosima-listallinstances - check how many simulation instances can be run everywhere";
echo "Copyright by Andreas Zoglauer"
echo ""
echo "Usage: dcosima-listallinstances [options]";
echo ""
echo "Options:"
echo " --help: Show this help."
echo ""
echo "";
echo "Example: "
echo " dcosima-listallinstances";
echo "";
}
# Store command line as array
CMD=( "$@" )
# Check for help
for C in "${CMD[@]}"; do
if [[ ${C} == *-h* ]]; then
echo ""
commandhelp
exit 0
fi
done
CFG=~/.dcosima.cfg
# Read configuration file and extract machines
MACHINES=( `cat ${CFG} | grep "^machine" | gawk '{ print $2 }'` )
REMOTEUSER=( )
REMOTEHOST=( )
REMOTEPORT=( )
#echo " "
#echo "Remote machines setup:"
for (( m=0; m<=$(( ${#MACHINES[*]} -1 )); m++ )); do
REMOTEUSER[$m]=`echo "${MACHINES[$m]}" | awk -F"@" '{ print $1 }'`
REMOTEHOST[$m]=`echo "${MACHINES[$m]}" | awk -F"@" '{ print $2 }' | awk -F":" '{ print $1 }'`
REMOTEPORT[$m]=`echo "${MACHINES[$m]}" | awk -F":" '{ print $2 }'`
#echo " * Found remote machine: ${MACHINES[$m]} (user: ${REMOTEUSER[$m]}, address: ${REMOTEHOST[$m]}, port: ${REMOTEPORT[$m]})"
done
echo " "
echo " "
echo "Available instances:"
ALLALLOWED="0"
ALLPOSSIBLE="0"
for (( m=0; m<=$(( ${#MACHINES[*]} -1 )); m++ )); do
# Do a quick test if the machine is available:
REMOTENAME=`ssh -q -o ConnectTimeout=5 -p ${REMOTEPORT[$m]} ${REMOTEUSER[$m]}@${REMOTEHOST[$m]} "hostname"`
if [ "$?" != "0" ]; then
echo " * Machine ${REMOTEHOST[$m]}, port ${REMOTEPORT[$m]}, user ${REMOTEUSER[$m]}: NOT ACCESSIBLE"
continue
fi
# Check if cosima is installed
AVAILABLE=`ssh -q -o ConnectTimeout=5 -p ${REMOTEPORT[$m]} ${REMOTEUSER[$m]}@${REMOTEHOST[$m]} ". .bash_local; type cosima > /dev/null" 2>&1`
if [ "${AVAILABLE}" != "" ]; then
echo " * Machine ${REMOTENAME} (${REMOTEHOST[$m]}, port ${REMOTEPORT[$m]}, user ${REMOTEUSER[$m]}): Cosmia not installed"
continue
fi
# Check how many instances are allowed
ALLOWED=`ssh -q -p ${REMOTEPORT[$m]} ${REMOTEUSER[$m]}@${REMOTEHOST[$m]} ". .bash_local; dcosima-getinstances"`
if [ "$?" != "0" ]; then
echo " * Machine ${REMOTENAME} (${REMOTEHOST[$m]}, port ${REMOTEPORT[$m]}, user ${REMOTEUSER[$m]}): NOT ACCESSIBLE"
continue
fi
ALLALLOWED=$(( ${ALLALLOWED} + ${ALLOWED} ))
# Check how many instances are possible
REMOTECOMMAND='COUNT=`grep -c ^instances ~/.dcosima.cfg`; if [ ${COUNT} -eq 1 ]; then grep ^instances ~/.dcosima.cfg; fi;'
POSSIBLE=$(ssh -p ${REMOTEPORT[$m]} ${REMOTEUSER[$m]}@${REMOTEHOST[$m]} 'bash -s' <<< ${REMOTECOMMAND})
if [ "$?" != "0" ]; then
echo " * Machine ${REMOTENAME} (${REMOTEHOST[$m]}, port ${REMOTEPORT[$m]}, user ${REMOTEUSER[$m]}): Failed to read instances"
continue
fi
if [ "${POSSIBLE}" != "" ]; then
POSSIBLE=`echo ${POSSIBLE} | awk '{ print $2 }'`
if [[ ! ${POSSIBLE} =~ ^[0-9]+$ ]]; then
echo "WARNING: Cannot parse remote instances level. Assuming 0.";
POSSIBLE="0"
fi
else
POSSIBLE="0"
fi
ALLPOSSIBLE=$(( ${ALLPOSSIBLE} + ${POSSIBLE} ))
echo " * Machine ${REMOTENAME} (${REMOTEHOST[$m]}, port ${REMOTEPORT[$m]}, user ${REMOTEUSER[$m]}): ${ALLOWED} / ${POSSIBLE}"
done
echo " "
echo "Total number of instances: ${ALLALLOWED} / ${ALLPOSSIBLE}"
echo " "
exit 0
| true |
e86a89d37304c879fe67acdde08189cf17e993ae | Shell | openshift/assisted-service | /hack/display_cover_profile.sh | UTF-8 | 359 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
: "${TMP_COVER_PROFILE:=/tmp/display_coverage.out}"
exclude_patterns=("mock_.*")
cp "${COVER_PROFILE}" "${TMP_COVER_PROFILE}"
for pattern in $exclude_patterns; do
sed -i "/${pattern}/d" ${TMP_COVER_PROFILE}
done
go tool cover -html="${TMP_COVER_PROFILE}"
rm -f "${TMP_COVER_PROFILE}"
| true |
411b42fc6082afb00c13ae258a8a4f5dd45a1459 | Shell | AsahiLinux/m1n1 | /version.sh | UTF-8 | 413 | 3.515625 | 4 | [
"MIT",
"BSD-3-Clause",
"OFL-1.1",
"GPL-2.0-only",
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | #!/bin/sh
cd "$(dirname "$0")"
dirbase="$(basename "$(pwd)")"
if [ -n "$M1N1_VERSION_TAG" ]; then
version="$M1N1_VERSION_TAG"
elif [ -e ".git" ]; then
version="$(git describe --tags --always --dirty)"
elif [ "$(echo "${dirbase}" | cut -c1-5)" = "m1n1-" ]; then
version=$(echo "${dirbase}" | cut -c6-)
version="v${version##v}"
else
version="unknown"
fi
echo "#define BUILD_TAG \"$version\""
| true |
21951d9beae3894c62bda4e6c801012e424787f5 | Shell | randomparity/dotfiles | /.bash_aliases | UTF-8 | 1,065 | 3.0625 | 3 | [] | no_license | # Git shortcuts
gitdone() {
git add -A
git commit -S -v -m "$1"
git push
}
function gitl() {
git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit
}
# Use ssht to open tmux automatically for ssh sessions
function ssht() {
ssh $* -t 'tmux a || tmux || /bin/bash'
}
# Dave's custom aliases
# alias sudo="sudo "
# alias yum='FTP3USER=$FTP3USER FTP3PASS=$FTP3PASS $HOME/.local/bin/ibm-yum.sh'
alias hg="history | grep -i "
# DPDK aliases
alias rte=_rte $@
alias bld=_bld $@
alias dbld=_dbld $@
# Microk8s aliases
alias mkctl="microk8s kubectl"
# Use Vagrant with libvirt on Ubuntu
# See https://github.com/vagrant-libvirt/vagrant-libvirt#using-docker-based-installation
alias vagrant='
mkdir -p ~/.vagrant.d/{boxes,data,tmp}; \
docker run -it --rm \
-e LIBVIRT_DEFAULT_URI \
-v /var/run/libvirt/:/var/run/libvirt/ \
-v ~/.vagrant.d:/.vagrant.d \
-v $(pwd):$(pwd) \
-w $(pwd) \
--network host \
vagrantlibvirt/vagrant-libvirt:latest \
vagrant'
| true |
00368356963ef8fe612ef2049319718b714c33a8 | Shell | alvaro3420/covid-vaccination-spain | /fetch.sh | UTF-8 | 291 | 2.671875 | 3 | [] | no_license | # Fetch today's report...
today=$(date '+%Y%m%d')
filename="reports/$today.pdf"
wget -O $filename https://www.mscbs.gob.es/profesionales/saludPublica/ccayes/alertasActual/nCov/documentos/Informe_GIV_comunicacion_$today.pdf
# ...and convert it to text.
pdftotext -enc UTF-8 -table $filename
| true |
b17b73c23ea1019e96405ef2e1549326bc102cb9 | Shell | lse/bitetorrent | /bitetorrent | UTF-8 | 586 | 3.40625 | 3 | [] | no_license | #!/bin/sh
WORKDIR=/var/run/bitetorrent/
MOUNTDIR=mount/
LOOP_DEV=$(losetup | grep 'torrent' | cut -d ' ' -f1)
TORRENT_URL=$(sed 's|^.*root=live:torrent://\([^ ]*\).*$|http://\1|' /proc/cmdline)
TORRENT=$(basename "$TORRENT_URL")
FILENAME=""
function start() {
mkdir -p "$WORKDIR/$MOUNTDIR"
cd "$WORKDIR"
/usr/bin/diskfile "$LOOP_DEV" "$MOUNTDIR"
/usr/bin/curl "$TORRENT_URL" -o "$TORRENT"
FILENAME=$(ctorrent -x "$TORRENT" | grep '<1>' | cut -d' ' -f2)
/usr/bin/ln -fs "$MOUNTDIR/$(basename "$LOOP_DEV")" "$FILENAME"
/usr/bin/ctorrent -e-1 -c -f "$TORRENT" >/dev/null
}
start
| true |
6473ce36eab8de4fe85e725053f3aad0759188d1 | Shell | rwang916/PEPSI | /src/vs_motif.sh | UTF-8 | 2,678 | 3.546875 | 4 | [] | no_license | #!/bin/bash
score_motif()
{
# This function calculates the change presence of putative splicing regulatory elements
prefix=$(basename "$1")
while read line; do
variant_id=$(echo "$line" | cut -f1)
chr=$(echo "$line" | cut -f2)
pos=$(echo "$line" | cut -f3)
ref=$(echo "$line" | cut -f4)
alt=$(echo "$line" | cut -f5)
ref_lb=$(grep "$variant_id" "$seq_file" | cut -f2)
ref_ub=$(grep "$variant_id" "$seq_file" | cut -f3)
refseq=$(grep "$variant_id" "$seq_file" | cut -f4)
alt_lb=$(grep "$variant_id" "$seq_file" | cut -f5)
alt_ub=$(grep "$variant_id" "$seq_file" | cut -f6)
altseq=$(grep "$variant_id" "$seq_file" | cut -f7)
printf "%s\t%s\t%s\t%s" "$chr" "$pos" "$ref" "$alt" >> "$tmp/$prefix.motif"
for group in "ESEseq" "ESSseq"; do
likelihood_score=$(python "vs_motif.py" "$refseq" "$altseq" "$data/$group/$group.list" "$prefix" "$tmp" "$ref_lb" "$ref_ub" "$alt_lb" "$alt_ub" "secondary-structure")
printf "\t%s" "$likelihood_score" >> "$tmp/$prefix.motif"
done
echo "" >> "$tmp/$prefix.motif"
done < "$1"
}
export data
export tmp
export seq_file
export -f score_motif
# Process input arguments: (1) input file of variants, (2) path to a tmp directory to store results, (3) number of threads, (4) file of sequences
input_file="$1"
tmp="$2"
threads="$3"
seq_file="$4"
# Set up working directories
data="../data"
prefix_out=$(basename "$input_file" | cut -d '.' -f1)
num_lines=$(tail -n +2 "$input_file" | wc -l)
lines_per_file=$(awk 'BEGIN {printf("%.0f",('"$num_lines"'+'"$threads"'-1)/'"$threads"')}')
tail -n +2 "$input_file" | split --lines="$lines_per_file" - "$tmp/vs_input."
# Split input file of variants for parallel processing
find "$tmp" -name "vs_input.*" -print0 | parallel -0 -n 1 -P "$threads" -I '{}' score_motif '{}'
# Combine annotations into one file
find "$tmp" -name "vs_input.*.motif" | while read file_motif; do
cat "$file_motif" >> "$tmp/vs_input.motif.unsorted"
done
# Set up header for output file
printf "%s\t%s\t%s\t%s" "chromosome" "hg19_variant_position" "reference" "variant" > "$tmp/$prefix_out.motif"
for group in "ESEseq" "ESSseq"; do
printf "\t%s" "$group" >> "$tmp/$prefix_out.motif"
done
echo "" >> "$tmp/$prefix_out.motif"
sort -k1,1 -k2,2n "$tmp/vs_input.motif.unsorted" >> "$tmp/$prefix_out.motif"
rm $tmp/vs_input.*
rm *_lunp
rm *.ps
| true |
296fba3a586480a67a8ebac5c26836e45660d424 | Shell | floj/zsh_config | /home/.zsh/tools/update_tools | UTF-8 | 1,660 | 2.953125 | 3 | [] | no_license | # vim: set ft=sh:
log() {
echo "\033[0;33m${1}\033[0m"
}
BREW_PACKAGES=(
boost
cmake
colordiff
diff-so-fancy
direnv
exiftool
ffmpeg
flvstreamer
git
imagemagick
jhead
jq
jump
node
nvm
openssl
pstree
python
rbenv
readline
ripgrep
ruby-build
shellcheck
sqlite
tig
unrar
watch
yarn
youtube-dl
zsh
)
BREW_CASKS=(
android-platform-tools
gpg-suite
itsycal
kdiff3
keepassxc
macdown
mattr-slate
vlc
)
NODE_PACKAGES=(
babel-eslint
eslint
gulp
prettier
tslint
typescript
)
RUBY_GEMS=(
bond
bundler
haml-lint
pry
pry-byebug
ripper-tags
rufo
wirb
)
log ">>> Updating zsh_config"
[ -e ~/dev/config/zsh_config ] && cd ~/dev/config/zsh_config && git pull --rebase --autostash
log ">>> Updating vim_config"
[ -e ~/dev/config/zsh_config ] && cd ~/dev/config/vim_config && git pull --rebase --autostash
# If brew list fails there are missing packages
log ">>> Installing missing homebrew packages"
brew tap caskroom/cask
brew tap universal-ctags/universal-ctags
# Check installed packages
INSTALLED=$(brew list -1 | tr '\n' ' ')
for p in "${BREW_PACKAGES[@]}"; do
[[ "${INSTALLED}" != *$p* ]] && brew install "${p}"
done
# Check install casks
INSTALLED=$(brew cask list -1 | tr '\n' ' ')
for p in "${BREW_CASKS[@]}"; do
[[ ${INSTALLED} != *$p* ]] && brew cask install "${p}"
done
if ! brew ls --versions universal-ctags; then
brew install --HEAD universal-ctags
fi
brew upgrade
brew cask upgrade
brew cleanup &
log ">>> Updating rubygems"
gem update --system
log ">>> Installing important gems"
gem install --silent "${RUBY_GEMS[@]}"
log ">>> Installing important JS tools"
npm install -g npm
npm install -g "${NODE_PACKAGES[@]}" --no-package-lock || true
| true |
47909a8d17a0324b940634d7f3d62b472932d21c | Shell | natebolam/coda | /src/app/rosetta/start.sh | UTF-8 | 2,553 | 3.734375 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -eou pipefail
function cleanup
{
CODE=${1:-0}
echo "Killing archive.exe"
kill $(ps aux | egrep '_build/default/src/app/.*archive.exe' | grep -v grep | awk '{ print $2 }') || true
echo "Killing mina.exe"
kill $(ps aux | egrep '_build/default/src/app/.*mina.exe' | grep -v grep | awk '{ print $2 }') || true
echo "Killing agent.exe"
kill $(ps aux | egrep '_build/default/src/app/rosetta/test-agent/agent.exe' | grep -v grep | awk '{ print $2 }') || true
echo "Killing rosetta.exe"
kill $(ps aux | egrep '_build/default/src/app/rosetta' | grep -v grep | awk '{ print $2 }') || true
exit $CODE
}
trap cleanup TERM
trap cleanup INT
PG_CONN=postgres://$USER:$USER@localhost:5432/archiver
# rebuild
pushd ../../../
PATH=/usr/local/bin:$PATH dune b src/app/runtime_genesis_ledger/runtime_genesis_ledger.exe src/app/cli/src/mina.exe src/app/archive/archive.exe src/app/rosetta/rosetta.exe src/app/rosetta/test-agent/agent.exe src/app/rosetta/ocaml-signer/signer.exe
popd
# make genesis (synchronously)
./make-runtime-genesis.sh
# drop tables and recreate
psql -d archiver < drop_tables.sql
psql -d archiver < create_schema.sql
# archive
../../../_build/default/src/app/archive/archive.exe run \
-postgres-uri $PG_CONN \
-log-json \
-config-file /tmp/config.json \
-server-port 3086 &
# wait for it to settle
sleep 3
# demo node
./run-demo.sh \
-external-ip 127.0.0.1 \
-archive-address 3086 \
-log-json \
-log-level debug &
# wait for it to settle
sleep 3
# rosetta
../../../_build/default/src/app/rosetta/rosetta.exe \
-archive-uri $PG_CONN \
-graphql-uri http://localhost:3085/graphql \
-log-level debug \
-log-json \
-port 3087 &
# wait for it to settle
sleep 3
ARG=${1:-NONE}
if [[ "$ARG" == "CURL" ]]; then
echo "Running for curl mode, no agent present"
sleep infinity
else
if [[ "$ARG" == "FOREVER" ]]; then
echo "Running forever, not exiting agent afterwards"
EXTRA_FLAGS=" -dont-exit"
else
EXTRA_FLAGS=""
fi
# test agent
../../../_build/default/src/app/rosetta/test-agent/agent.exe \
-graphql-uri http://localhost:3085/graphql \
-rosetta-uri http://localhost:3087/ \
-log-level Trace \
-log-json $EXTRA_FLAGS &
# wait for test agent to exit (asynchronously)
AGENT_PID=$!
while $(kill -0 $AGENT_PID 2> /dev/null); do
sleep 2
done
set +e
wait $AGENT_PID
AGENT_STATUS=$?
set -e
echo "Test finished with code $AGENT_STATUS"
# then cleanup and forward the status
cleanup $AGENT_STATUS
fi
| true |
3393186cb9d650e00973e2f9e5ac163037ff99d1 | Shell | eaudeweb/heavy-lifter | /etc/scripts/git/pre-commit-with-warnings | UTF-8 | 1,796 | 4 | 4 | [] | no_license | #!/bin/bash
# PHP CodeSniffer pre-commit hook for git
# path to phpcs "binary"
PHPCS_BIN=./vendor/bin/phpcs
# comma-separated list of file patterns being ignored
# Change this line according to your needs.
PHPCS_IGNORE=vendor/*,web/core/*,web/libraries/*,web/modules/contrib/*,/web/themes/contrib/*,/docroot/themes/contrib/*,/docroot/core/*,/docroot/libraries/*,/docroot/modules/contrib/*
# egrep compatible pattern of files to be checked
PHPCS_FILE_PATTERN="\.(php|module|inc|install|theme|yml)$"
# simple check if code sniffer is set up correctly
if [ ! -x $PHPCS_BIN ]; then
echo "PHP CodeSniffer bin not found or executable -> $PHPCS_BIN"
exit 1
fi
# this is the magic:
# retrieve all files in staging area that are added, modified or renamed
# but no deletions etc
FILES=$(git diff-index --name-only --cached --diff-filter=ACMR HEAD -- )
if [ "$FILES" == "" ]; then
exit 0
fi
# match files against whitelist
FILES_TO_CHECK=""
for FILE in $FILES
do
echo "$FILE" | egrep -q "$PHPCS_FILE_PATTERN"
RETVAL=$?
if [ "$RETVAL" -eq "0" ]
then
FILES_TO_CHECK="$FILES_TO_CHECK $FILE"
fi
done
if [ "$FILES_TO_CHECK" == "" ]; then
exit 0
fi
# execute the code sniffer
if [ "$PHPCS_IGNORE" != "" ]; then
IGNORE="--ignore=$PHPCS_IGNORE"
else
IGNORE=""
fi
STAGED_FILES=""
for FILE in $FILES_TO_CHECK
do
ID=$(git diff-index --cached HEAD $FILE | cut -d " " -f4)
STAGED_FILES="$STAGED_FILES $FILE"
done
# Use $PHPCS_BIN -n -s $ENCODING $IGNORE $STAGED_FILES to accept warnings
OUTPUT=$($PHPCS_BIN -n -s $ENCODING $IGNORE $STAGED_FILES)
RETVAL=$?
# Use $PHPCS_BIN -n -s $ENCODING $IGNORE $STAGED_FILES to accept warnings
if [ $RETVAL -ne 0 ]; then
echo $PHPCS_BIN -n -s $ENCODING $IGNORE $STAGED_FILES
echo "$OUTPUT"
fi
exit $RETVAL
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.