blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2c3d368352edf7496ccbbdc2ab409f37b8a3e908
|
Shell
|
sseago/ocp-migration-scripts
|
/dockerfiles/migration-restore/image-restore.sh
|
UTF-8
| 1,349
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# image-backup.sh MIGRATION_REGISTRY MIGRATION_REGISTRY [BACKUP_NAME]
set -x
sleep 3
export KUBECONFIG=/.kube/config
export AWS_SHARED_CREDENTIALS_FILE=/.aws/credentials
# defines OC_USER, OC_PASSWORD, S3_BUCKET
. /migration-env.sh
MIGRATION_REGISTRY=$1
if [ -z "$2" ]
then
BACKUP_NAME=poc-backup
else
BACKUP_NAME=$2
fi
BACKUP_MOUNTPOINT=/tmp
BACKUP_DIR=poc-out
BACKUP_LOCATION=$BACKUP_MOUNTPOINT/$BACKUP_DIR
rm -rf $BACKUP_LOCATION
cd $BACKUP_MOUNTPOINT
aws s3 cp s3://$S3_BUCKET/$BACKUP_NAME.tar.gz .
tar xzvf $BACKUP_NAME.tar.gz
oc login -u $OC_USER -p $OC_PASSWORD
DOCKER_ENDPOINT=$(oc registry info)
NAMESPACE=$(ls $BACKUP_LOCATION)
oc create namespace $NAMESPACE||true
oc project $NAMESPACE
for this_name in $BACKUP_LOCATION/$NAMESPACE/*
do
IMAGESTREAM_NAME=${this_name##*/}
for this_tag in $BACKUP_LOCATION/$NAMESPACE/$IMAGESTREAM_NAME/*
do
TAG=${this_tag##*/}
OC_IMAGE_NAME=$(ls $BACKUP_LOCATION/$NAMESPACE/$IMAGESTREAM_NAME/$TAG)
SRC_DOCKER_REF=$MIGRATION_REGISTRY/$NAMESPACE/$IMAGESTREAM_NAME@$OC_IMAGE_NAME
DEST_DOCKER_REF=$DOCKER_ENDPOINT/$NAMESPACE/$IMAGESTREAM_NAME:$TAG
skopeo copy --dest-creds=$OC_USER:$(oc whoami -t) --dest-tls-verify=false --src-tls-verify=false docker://$SRC_DOCKER_REF docker://$DEST_DOCKER_REF
done
done
ark restore create -w --from-backup $BACKUP_NAME
| true
|
adcf70cd63bea1d01034275ac6cc1f740813f797
|
Shell
|
felixrieseberg/Jaglion-Workbench
|
/core/server/bin/s5-translate.sh
|
UTF-8
| 1,335
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
#
# Input arguments
DATADIR="$1"
CONFIGDIR="$2"
OUTPUTBASE="$3"
# Translate in HDInsight
JOBID=$(curl -u %storageusername%:%storagepassword% -d user.name=%storageusername% -d mdt.attribute_file_delimeter=\; -d mdt.dump_index=4 -d mdt.additional_info_index_delimeter=\; -d mdt.additional_info_index=0\;2\;3 -d mdt.output_file_delimiter=\, -d mdt.input_file_delimeter=\; -d jar=$OUTPUTBASE/%storageusername%.jar -d hdInsightJobName=$OUTPUTBASE -d arg=$OUTPUTBASE/anonymized -d arg=$OUTPUTBASE/translated -d arg=$OUTPUTBASE/MemoryDumpTranslationConfiguration.xml -d arg=$OUTPUTBASE/MemoryDumpAttributeTranslation.csv -d statusdir=$OUTPUTBASE -d callback=null -d class=%storageusername%.MemoryDumpTranslator 'https://%storageusername%3.azurehdinsight.net/templeton/v1/mapreduce/jar' | jsawk 'return this.id')
# Loop checking on job until it's finished
STATUS=$(curl -u %storageusername%:%storagepassword% -s user.name=%storageusername% https://jaglion3.azurehdinsight.net/templeton/v1/jobs/$JOBID?user.name=%storageusername% | jsawk 'return this.status.state')
while [ "$STATUS" != "SUCCEEDED" ]; do
STATUS=$(curl -u %storageusername%:%storagepassword% -s user.name=%storageusername% https://jaglion3.azurehdinsight.net/templeton/v1/jobs/$JOBID?user.name=%storageusername% | jsawk 'return this.status.state')
done
| true
|
239419169f62e5cb01503c5a18816e8c7a4d21b3
|
Shell
|
nicktrav/cilium-debugging
|
/scripts/install_load_test.sh
|
UTF-8
| 388
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
_chart_dir="$_dir/../manifests/load-test"
echo "Creating load-test namespace ..."
kubectl create ns load-test || true
echo "Installing load-test Helm chart ..."
helm upgrade \
load-test \
"$_chart_dir" \
--namespace load-test \
--set cnp.enabled="$1" \
--install
| true
|
aeb2ece1d19d2a4362a4c29625f72a329fc7e765
|
Shell
|
Sunmxt/wing
|
/controller/sae/runtime/builder/runtime_image.sh
|
UTF-8
| 28,179
| 3.671875
| 4
|
[] |
no_license
|
sar_import builder/common.sh
sar_import builder/ci.sh
sar_import builder/validate.sh
sar_import settings/image.sh
sar_import utils.sh
_runtime_image_stash_prefix() {
local prefix=$1
local env=$2
local tag=$3
`hash_for_key "$prefix" "$env" "$tag"`
}
_runtime_image_stash_prefix_by_context() {
eval "local stash_prefix=\$_SAR_RT_BUILD_${context}_STASH_PREFIX"
echo $stash_prefix
}
_generate_runtime_image_dockerfile_add_os_deps_alpine() {
loginfo "[runtime_image_build] add os dependencies with apk."
echo '
RUN set -xe;\
mkdir -p /tmp/apk-cache;\
[ ! -z "'`strip $SAR_RUNTIME_ALPINE_APK_MIRROR`'" ] && sed -Ei "s/dl-cdn\.alpinelinux\.org/'$SAR_RUNTIME_ALPINE_APK_MIRROR'/g" /etc/apk/repositories;\
apk update --cache-dir /tmp/apk-cache;\
apk add '${SAR_RUNTIME_ALPINE_DEPENDENCIES[@]}' --cache-dir /tmp/apk-cache;\
rm -rf /tmp/apk-cache;\
pip install '${SAR_RUNTIME_SYS_PYTHON_DEPENDENCIES[@]}'
'
}
_generate_runtime_image_dockerfile_add_os_deps_centos() {
logerror "[runtime_image_builder] Centos will be supported soon."
}
_generate_runtime_image_dockerfile_add_os_deps_debian() {
logerror "[runtime_image_builder] Debian will be supported soon."
}
_generate_supervisor_system_service() {
local name=$1
local workdir=$2
shift 2
local exec="$*"
echo '
[program:'$name']
command='$exec'
startsecs=20
autorestart=true
stdout_logfile=/dev/stdout
stderr_logfile=/dev/stderr
directory='$workdir'
'
}
_generate_supervisor_cron_service() {
local name=$1
local cron=$2
local workdir=$3
shift 3
local exec="$*"
echo '
'
}
_generate_supervisor_normal_service() {
_generate_supervisor_system_service $*
return $?
}
_generate_runtime_image_dockerfile_add_supervisor_services() {
local context=$1
loginfo "[runtime_image_build] add supervisor services."
local supervisor_root_config=supervisor-$RANDOM$RANDOM$RANDOM.ini
echo '
[unix_http_server]
file=/run/supervisord.sock
[include]
files = /etc/supervisor.d/services/*.conf
[supervisord]
logfile=/var/log/supervisord.log
logfile_maxbytes=0
loglevel=info
pidfile=/run/runtime/supervisord.pid
nodaemon=true
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///run/supervisord.sock
' > "$supervisor_root_config"
echo "COPY $supervisor_root_config /etc/sar_supervisor.conf"
local making_dirs="/var/log/application /run/runtime"
# generate services.
eval "local -i count=\${#_SAR_RT_BUILD_${context}_SVCS[@]}"
local -i idx=1
while [ $idx -le $count ]; do
eval "local key=\${_SAR_RT_BUILD_${context}_SVCS[$idx]}"
eval "local type=\${_SAR_RT_BUILD_${context}_SVC_${key}_TYPE}"
eval "local name=\${_SAR_RT_BUILD_${context}_SVC_${key}_NAME}"
eval "local exec=\${_SAR_RT_BUILD_${context}_SVC_${key}_EXEC}"
eval "local working_dir=\${_SAR_RT_BUILD_${context}_SVC_${key}_WORKING_DIR}"
if [ -z "$working_dir" ]; then
local working_dir=$SAR_RUNTIME_APP_DEFAULT_WORKING_DIR
fi
local making_dirs="$making_dirs '$working_dir'"
local file="supervisor-svc-$key.conf"
case $type in
cron)
eval "locak cron=\${_SAR_RT_BUILD_${context}_SVC_${key}_CRON}"
if ! _generate_supervisor_cron_service "$name" "$cron" "$working_dir" $exec > "$file"; then
logerror "[runtime_image_builder] Generate cronjob $name configuration failure."
return 1
fi
;;
system)
if ! _generate_supervisor_system_service "$name" "$working_dir" $exec > "$file"; then
logerror "[runtime_image_builder] Generate system service $name configuration failure."
return 1
fi
;;
normal)
if ! _generate_supervisor_normal_service "$name" "$working_dir" $exec > "$file"; then
logerror "[runtime_image_builder] Generate normal service $name configuration failure."
return 1
fi
;;
*)
logerror "[runtime_image_builder] Unsupported service type."
return 1
;;
esac
local svc_files="$svc_files $file"
local -i idx=idx+1
done
if [ ! -z "$svc_files" ]; then
echo "COPY $svc_files /etc/supervisor.d/services/"
fi
if [ ! -z "$making_dirs" ]; then
echo '
RUN set -xe;\
mkdir -p '`echo $making_dirs | xargs -n 1 echo | sort | uniq | sed -E 's/(.*)/'\\\\\''\1'\\\\\''/g' | xargs echo`'
'
fi
}
_generate_runtime_image_dockerfile_prebuild_scripts() {
local context=$1
eval "local pre_build_script_keys=\$_SAR_RT_BUILD_${context}_PRE_BUILD_SCRIPTS"
if [ ! -z "$pre_build_script_keys" ]; then
local pre_build_script_keys=`echo $pre_build_script_keys | xargs -n 1 echo | sort | uniq | sed -E 's/(.*)/'\\\\\''\1'\\\\\''/g' | xargs echo`
local pre_build_work_dirs=
for key in `echo $pre_build_script_keys`; do
eval "local pre_build_script_path=\$_SAR_RT_BUILD_${context}_PRE_BUILD_SFRIPT_${key}_PATH"
eval "local pre_build_script_workdir=\$_SAR_RT_BUILD_${context}_PRE_BUILD_SCRIPT_${key}_WORKDIR"
if [ -z "$pre_build_script_workdir" ]; then
local pre_build_script_workdir=/
fi
local pre_build_work_dirs="$pre_build_work_dirs '$pre_build_script_workdir'"
local pre_build_scripts="$pre_build_scripts '$pre_build_script_path'"
# TODO: checksum here.
done
local pre_build_scripts=`echo $pre_build_scripts | xargs -n 1 echo | sort | uniq | sed -E 's/(.*)/'\\\\\''\1'\\\\\''/g' | xargs echo`
local pre_build_work_dirs=`echo $pre_build_work_dirs | xargs -n 1 echo | sort | uniq | sed -E 's/(.*)/'\\\\\''\1'\\\\\''/g' | xargs echo`
# run pre-build scripts.
local failure=0
echo $pre_build_scripts | xargs -n 1 -I {} test -f {} || (local failure=1; logerror some pre-build script missing.)
if [ ! $failure -eq 0 ]; then
return 1
fi
echo "COPY $pre_build_scripts /_sar_package/pre_build_scripts/"
echo -n '
RUN set -xe;\
cd /_sar_package/pre_build_scripts;\
chmod a+x *; mkdir -p '$pre_build_work_dirs';'
for key in `echo $pre_build_script_keys`; do
eval "local pre_build_script_path=\$_SAR_RT_BUILD_${context}_PRE_BUILD_SFRIPT_${key}_PATH"
eval "local pre_build_script_workdir=\$_SAR_RT_BUILD_${context}_PRE_BUILD_SCRIPT_${key}_WORKDIR"
if [ -z "$pre_build_script_workdir" ]; then
local pre_build_script_workdir=/
fi
local script_name=`eval "basename $pre_build_script_path"`
local script_name=`strip $script_name`
local script_name=`path_join /_sar_package/pre_build_scripts $script_name`
echo -n "cd $pre_build_script_workdir; $script_name;"
done
echo
fi
}
_generate_runtime_image_dockerfile_postbuild_scripts() {
local context=$1
eval "local post_build_script_keys=\$_SAR_RT_BUILD_${context}_POST_BUILD_SCRIPTS"
if [ ! -z "$post_build_script_keys" ]; then
local post_build_script_keys=`echo $post_build_script_keys | xargs -n 1 echo | sort | uniq | sed -E 's/(.*)/'\\\\\''\1'\\\\\''/g' | xargs echo`
for key in `echo $post_build_script_keys`; do
eval "local post_build_script_path=\$_SAR_RT_BUILD_${context}_POST_BUILD_SFRIPT_${key}_PATH"
eval "local post_build_script_workdir=\$_SAR_RT_BUILD_${context}_POST_BUILD_SCRIPT_${key}_WORKDIR"
local post_build_scripts="$post_build_scripts '$post_build_script_path'"
if [ -z "$post_build_script_workdir" ]; then
local post_build_script_workdir=/
fi
local post_build_work_dirs="$post_build_work_dirs '$post_build_script_workdir'"
# TODO: checksum here.
done
local post_build_scripts=`echo $post_build_scripts | xargs -n 1 echo | sort | uniq | sed -E 's/(.*)/'\\\\\''\1'\\\\\''/g' | xargs echo`
local post_build_work_dirs=`echo $post_build_work_dirs | xargs -n 1 echo | sort | uniq | sed -E 's/(.*)/'\\\\\''\1'\\\\\''/g' | xargs echo`
# run post-build scripts.
local failure=0
echo $post_build_scripts | xargs -n 1 -I {} test -f {} || (local failure=1; logerror some post-build script missing.)
if [ ! $failure -eq 0 ]; then
return 1
fi
echo "COPY $post_build_scripts /_sar_package/post_build_scripts/"
echo -n '
RUN set -xe;\
cd /_sar_package/post_build_scripts;\
chmod a+x *; mkdir -p '$post_build_work_dirs';'
for key in `echo $post_build_script_keys`; do
eval "local post_build_script_path=\$_SAR_RT_BUILD_${context}_POST_BUILD_SFRIPT_${key}_PATH"
eval "local post_build_script_workdir=\$_SAR_RT_BUILD_${context}_POST_BUILD_SCRIPT_${key}_WORKDIR"
if [ -z "$post_build_script_workdir" ]; then
local post_build_script_workdir=/
fi
local script_name=`eval "basename $post_build_script_path"`
local script_name=`strip $script_name`
local script_name=`path_join /_sar_package/post_build_scripts $script_name`
echo -n "cd $post_build_script_workdir; $script_name;"
done
echo
fi
}
_generate_runtime_image_dockerfile() {
local context=$1
local package_ref=$2
local package_env=$3
local pakcage_tag=$4
local build_id=$RANDOM$RANDOM$RANDOM$RANDOM
eval "local -i deps_count=\${#_SAR_RT_BUILD_${context}_DEPS[@]}"
local failure=0
local -i idx=1
while [ $idx -le $deps_count ]; do
eval "local key=\${_SAR_RT_BUILD_${context}_DEPS[$idx]}"
eval "local pkg_env_name=\${_SAR_RT_BUILD_${context}_DEP_${key}_ENV}"
eval "local pkg_prefix=\${_SAR_RT_BUILD_${context}_DEP_${key}_PREFIX}"
eval "local pkg_tag=\${_SAR_RT_BUILD_${context}_DEP_${key}_TAG}"
local pkg_image_ref=`_ci_get_package_ref "$pkg_prefix" "$pkg_env_name" "$pkg_tag"`
loginfo "[runtime_image_builder][pre_check] check package: $pkg_image_ref"
if ! _validate_dependency_package "$pkg_prefix" "$pkg_prefix" "$pkg_tag"; then
local failure=1
fi
local -i idx=idx+1
done
if [ $failure -ne 0 ]; then
logerror "[runtime_image_builder]" dependency package validation failure.
return 1
fi
# Multi-stage image layers.
local -i idx=1
while [ $idx -le $deps_count ]; do
eval "local key=\${_SAR_RT_BUILD_${context}_DEPS[$idx]}"
eval "local pkg_env_name=\${_SAR_RT_BUILD_${context}_DEP_${key}_ENV}"
eval "local pkg_prefix=\${_SAR_RT_BUILD_${context}_DEP_${key}_PREFIX}"
eval "local pkg_tag=\${_SAR_RT_BUILD_${context}_DEP_${key}_TAG}"
local pkg_image_ref=`_ci_get_package_ref "$pkg_prefix" "$pkg_env_name" "$pkg_tag"`
loginfo "[runtime_image_builder] package $pkg_image_ref used."
echo "FROM $pkg_image_ref AS sar_stage_`hash_for_key $build_id $pkg_image_ref`"
local -i idx=idx+1
done
eval "local base_image=\$_SAR_RT_BUILD_${context}_BASE_IMAGE"
_validate_base_image "$base_image" || return 1
echo "FROM $base_image" # 暂时假设 base image 里有构建需要的各种工具
# pack pre-build and post-build scripts.
if ! _generate_runtime_image_dockerfile_prebuild_scripts $context; then
return 1
fi
# Place packages.
local -i idx=1
while [ $idx -le $deps_count ]; do
eval "local key=\${_SAR_RT_BUILD_${context}_DEPS[$idx]}"
eval "local pkg_env_name=\${_SAR_RT_BUILD_${context}_DEP_${key}_ENV}"
eval "local pkg_prefix=\${_SAR_RT_BUILD_${context}_DEP_${key}_PREFIX}"
eval "local pkg_tag=\${_SAR_RT_BUILD_${context}_DEP_${key}_TAG}"
eval "local placed_path=\${_SAR_RT_BUILD_${context}_DEP_${key}_PLACE_PATH}"
local pkg_image_ref=`_ci_get_package_ref "$pkg_prefix" "$pkg_env_name" "$pkg_tag"`
loginfo "[runtime_image_builder] place package $pkg_image_ref --> $placed_path"
echo "COPY --from=sar_stage_`hash_for_key $build_id $pkg_image_ref` /package/data \"$placed_path\""
local -i idx=idx+1
done
# add system dependencies.
local pkg_mgr=`determine_os_package_manager`
case $pkg_mgr in
apk)
_generate_runtime_image_dockerfile_add_os_deps_alpine || return 1
;;
yum)
_generate_runtime_image_dockerfile_add_os_deps_centos || return 1
;;
apt)
_generate_runtime_image_dockerfile_add_os_deps_debian || return 1
;;
*)
logerror "[runtime_image_builder] unsupported package manager type: $pkg_mgr"
return 1
;;
esac
if ! _generate_runtime_image_dockerfile_add_supervisor_services $context; then
logerror "[runtime_image_builder] failed to add supervisor services."
return 1
fi
# save runtime image metadata and install runtime.
echo '
RUN set -xe;\
[ -d '\''/_sar_package/runtime_install'\'' ] && (echo install runtime; bash /_sar_package/runtime_install/install.sh /opt/runtime );\
mkdir -p /_sar_package;\
echo PKG_REF='\\\'$package_ref\\\'' > /_sar_package/meta;\
echo PKG_ENV='\\\'$package_env\\\'' >> /_sar_package/meta;\
echo PKG_TAG='\\\'$pakcage_tag\\\'' >> /_sar_package/meta;\
echo PKG_TYPE=runtime_image >> /_sar_package/meta;\
echo PKG_APP_NAME='\\\'$application_name\\\'' >> /_sar_package/meta;
'
# run post-build scripts.
if ! _generate_runtime_image_dockerfile_postbuild_scripts $context; then
return 1
fi
echo 'CMD ["supervisord", "-c", "/etc/sar_supervisor.conf"]'
}
build_runtime_image_help() {
echo '
Build runtime image.
usage:
build_runtime_image <build_mode> [options] -t <tag> -e <environment_varaible_name> -r prefix -- [docker build options]
mode:
docker
gitlab-docker
options:
-c <context_name> specified build context. default: system
-s do not push image to registry.
-h <path_to_hash> use file(s) hash for tag.
-f force to build
-i
example:
build_runtime_image -t latest -e ENV -r registry.stuhome.com/mine/myproject
'
}
build_runtime_image() {
local mode=$1
case $mode in
docker)
;;
gitlab-docker)
;;
*)
build_runtime_image_help
logerror unsupported mode: $mode
;;
esac
shift 1
LONGOPTIND=0
while next_long_opt opt $*; do
case $opt in
ignore-runtime)
local ignore_runtime=1
;;
esac
eval `eliminate_long_opt`
done
OPTIND=0
while getopts 't:e:r:c:sh:f' opt; do
case $opt in
t)
local ci_image_tag=$OPTARG
;;
e)
local ci_image_env_name=`_ci_get_env_value "$OPTARG"`
;;
r)
local ci_image_prefix=$OPTARG
;;
c)
local context=$OPTARG
;;
s)
local ci_no_push=1
;;
h)
local path_to_hash=$OPTARG
;;
f)
local force_to_build=1
;;
*)
build_runtime_image_help
logerror "[runtime_image_builder]" unexcepted options -$opt.
;;
esac
done
eval "local __=\${$OPTIND}"
local -i optind=$OPTIND
if [ "$__" != "--" ]; then
if [ ! -z "$__" ]; then
build_runtime_image_help
logerror "[runtime_image_builder] build_runtime_image: got unexcepted non-option argument: \"$__\"."
return 1
fi
local -i optind=optind-1
fi
if [ -z "$context" ]; then
local context=system
fi
if [ "gitlab-docker" != "$mode" ]; then
if [ -z "$ci_image_tag" ]; then
build_runtime_image_help
logerror "[runtime_image_builder]" empty runtime image tag.
return 1
fi
if [ -z "$ci_image_prefix" ]; then
build_runtime_image_help
logerror "[runtime_image_builder]" empty runtime image prefix.
return 1
fi
fi
# add runtime
if [ -z "$ignore_runtime" ]; then
runtime_image_add_dependency -c "$context" -r "$SAR_RUNTIME_PKG_PREFIX" -e "$SAR_RUNTIME_PKG_ENV" -t "$SAR_RUNTIME_PKG_TAG" /_sar_package/runtime_install
fi
local dockerfile=/tmp/Dockerfile-RuntimeImage-$RANDOM$RANDOM$RANDOM
if ! _generate_runtime_image_dockerfile "$context" "$ci_image_prefix" "$ci_image_env_name" "$ci_image_tag" > "$dockerfile" ; then
build_runtime_image_help
logerror "[runtime_image_builder]" generate runtime image failure.
return 1
fi
local -i idx=1
local -i ref=1
local -a opts
while [ $ref -le $optind ]; do
eval "local opt=\${$ref}"
local opt=${opt:1:1}
if [ "$opt" = "c" ]; then
local -i ref=ref+2
continue
fi
eval "opts[$idx]=\"\${$ref}\""
idx=idx+1
ref=ref+1
done
shift $optind
case $mode in
docker)
eval "log_exec _ci_docker_build ${opts[@]} -- -f \"$dockerfile\" $* ." || return 1
;;
gitlab-docker)
eval "log_exec _ci_gitlab_runner_docker_build ${opts[@]} -- -f \"$dockerfile\" $* ." || return 1
;;
esac
}
runtime_image_base_image_help() {
echo '
Set base image of runtime image.
usage:
runtime_image_base_image [options] <image reference>
options:
-c <context_name> specified build context. default: system
example:
runtime_image_base_image alpine:3.7
runtime_image_base_image -c context2 alpine:3.7
'
}
runtime_image_base_image() {
OPTIND=0
while getopts 'c:' opt; do
case $opt in
c)
local context=$OPTARG
;;
*)
runtime_image_base_image_help
logerror "[runtime_image_builder]" unexcepted options -$opt.
;;
esac
done
eval "local base_image=\${$OPTIND}"
if [ -z "$base_image" ]; then
runtime_image_base_image_help
logerror "[runtime_image_builder] base image not specifed."
return 1
fi
if [ -z "$context" ]; then
local context=system
fi
eval "_SAR_RT_BUILD_${context}_BASE_IMAGE=$base_image"
}
runtime_image_add_dependency_help() {
echo '
Add package dependency. Packages will be placed to during building runtime image.
usage:
runtime_image_add_dependency -t <tag> -e <environment_varaible_name> -r prefix [options] <path>
options:
-c <context_name> specified build context. default: system
example:
runtime_image_add_dependency -t c3adea1d -e ENV -r registry.stuhome.com/be/recruitment-fe /app/statics
'
}
runtime_image_add_dependency() {
OPTIND=0
while getopts 't:e:r:c:' opt; do
case $opt in
t)
local ci_package_tag=$OPTARG
;;
e)
local ci_package_env_name=$OPTARG
;;
r)
local ci_package_prefix=$OPTARG
;;
c)
local context=$OPTARG
;;
*)
runtime_image_add_dependency_help
logerror "[runtime_image_builder]" unexcepted options -$opt.
;;
esac
done
if [ -z "$context" ]; then
local context=system
fi
eval "local place_path=\${$OPTIND}"
if [ -z "$place_path" ]; then
runtime_image_add_dependency_help
logerror "[runtime_image_builder] runtime_image_add_dependency: Target path cannot be empty."
return 1
fi
local dependency_key=`hash_for_key $ci_package_prefix "$ci_package_env_name" "$ci_package_tag"`
eval "_SAR_RT_BUILD_${context}_DEP_${dependency_key}_ENV=$ci_package_env_name"
eval "_SAR_RT_BUILD_${context}_DEP_${dependency_key}_PREFIX=$ci_package_prefix"
eval "_SAR_RT_BUILD_${context}_DEP_${dependency_key}_TAG=$ci_package_tag"
eval "_SAR_RT_BUILD_${context}_DEP_${dependency_key}_PLACE_PATH=$place_path"
eval "local -i dep_count=\${#_SAR_RT_BUILD_${context}_DEPS[@]}+1"
eval "_SAR_RT_BUILD_${context}_DEPS[$dep_count]=$dependency_key"
}
runtime_image_add_service_help() {
echo '
Add service to image. Services will be started automatically after conainter started.
usage:
runtime_image_add_service [options] <type> ...
runtime_image_add_service [options] system <service_name> <command>
runtime_image_add_service [options] normal <service_name> <command>
runtime_image_add_service [options] cron <service_name> <command>
options:
-d <path> working directory.
-c <context_name> specified build context. default: system
runtime_image_add_service system conf_updator /opt/runtime/bin/runtime_conf_update.sh
runtime_image_add_service cron runtime_conf_update "5 0 * * *"
runtime_image_add_service normal nginx /sbin/nginx
'
}
_runtime_image_add_service() {
local context=$1
local working_dir=$2
local type=$3
local name=$4
local exec="$5"
local key=`hash_for_key $name`
eval "_SAR_RT_BUILD_${context}_SVC_${key}_TYPE=$type"
eval "_SAR_RT_BUILD_${context}_SVC_${key}_NAME=$name"
eval "_SAR_RT_BUILD_${context}_SVC_${key}_EXEC=\"$exec\""
eval "_SAR_RT_BUILD_${context}_SVC_${key}_WORKING_DIR=$working_dir"
eval "local -i count=\${#_SAR_RT_BUILD_${context}_SVCS[@]}+1"
eval "_SAR_RT_BUILD_${context}_SVCS[$count]=$key"
}
_runtime_image_add_service_cron() {
local context=$1
local working_dir=$2
local name=$3
local cron=$4
local exec="$5"
local key=`hash_for_key $name`
eval "_SAR_RT_BUILD_${context}_SVC_${key}_TYPE=cron"
eval "_SAR_RT_BUILD_${context}_SVC_${key}_NAME=$name"
eval "_SAR_RT_BUILD_${context}_SVC_${key}_CRON=$cron"
eval "_SAR_RT_BUILD_${context}_SVC_${key}_EXEC=\"$exec\""
eval "_SAR_RT_BUILD_${context}_SVC_${key}_WORKING_DIR=$working_dir"
eval "local -i count=\${#_SAR_RT_BUILD_${context}_SVCS[@]}+1"
eval "_SAR_RT_BUILD_${context}_SVCS[$count]=$key"
}
runtime_image_add_service() {
OPTIND=0
while getopts 'c:d:' opt; do
case $opt in
c)
local context=$OPTARG
;;
d)
local working_dir=$OPTARG
;;
*)
runtime_image_add_service_help
logerror "[runtime_image_builder]" unexcepted options -$opt.
;;
esac
done
local -i optind=$OPTIND-1
shift $optind
if [ -z "$context" ]; then
local context=system
fi
local type=$1
local name="$2"
shift 2
local -i idx=1
while [ $idx -le $# ]; do
eval "local param=\$$idx"
if echo "$param" | grep ' ' -q ; then
local exec="$exec '$param'"
else
local exec="$exec $param"
fi
local -i idx=idx+1
done
case $type in
system)
_runtime_image_add_service $context "$working_dir" system "$name" "$exec" || return 1
;;
cron)
_runtime_image_add_service_cron $context "$working_dir" "$name" "$exec" || return 1
;;
normal)
_runtime_image_add_service $context "$working_dir" normal "$name" "$exec" || return 1
;;
*)
logerror "[runtime_image_builder] unknown service type: $type"
;;
esac
}
#runtime_image_pre_build_run() {
# return 1
#}
#
#runtime_image_post_build_run() {
# return 1
#}
runtime_image_pre_build_script_help() {
echo '
Run pre-build script within building of runtime image.
usage:
runtime_image_pre_build_script [options] <script_path>
options:
-d <path> working directory.
-c <context_name> specified build context. default: system
example:
runtime_image_pre_build_script install_lnmp.sh
runtime_image_pre_build_script -c my_context install_nginx.sh
'
}
runtime_image_pre_build_script() {
OPTIND=0
while getopts 'c:d:' opt; do
case $opt in
c)
local context=$OPTARG
;;
d)
local working_dir=$OPTARG
;;
*)
runtime_image_pre_build_script_help
logerror "[runtime_image_builder]" unexcepted options -$opt.
;;
esac
done
if [ -z "$context" ]; then
local context=system
fi
local -i optind=$OPTIND-1
shift $optind
local -i idx=1
local -i failure=0
local script_appended=
while [ $idx -le $# ]; do
eval "local script=\$$idx"
if ! [ -f "$script" ]; then
logerror "[runtime_image_builder] not a script file: $script"
local -i failure=1
fi
local script_appended="$script_appended '$script'"
local -i idx=idx+1
done
if [ -z `strip $script_appended` ]; then
runtime_image_pre_build_script_help
logerror "script missing."
return 1
fi
if [ $failure -gt 0 ]; then
return 1
fi
local key=`eval "hash_file_for_key $script_appended"`
eval "_SAR_RT_BUILD_${context}_PRE_BUILD_SCRIPTS=\"\$_SAR_RT_BUILD_${context}_PRE_BUILD_SCRIPTS \$key\""
eval "_SAR_RT_BUILD_${context}_PRE_BUILD_SFRIPT_${key}_PATH=\"$script_appended\""
eval "_SAR_RT_BUILD_${context}_PRE_BUILD_SCRIPT_${key}_WORKDIR=$working_dir"
}
runtime_image_post_build_script_help() {
echo '
Run post-build script within building of runtime image.
usage:
runtime_image_post_build_script [options] <script_path>
options:
-d <path> working directory.
-c <context_name> specified build context. default: system
example:
runtime_image_post_build_script cleaning.sh
runtime_image_post_build_script -c my_context send_notification.sh
'
}
runtime_image_post_build_script() {
OPTIND=0
while getopts 'c:d:' opt; do
case $opt in
c)
local context=$OPTARG
;;
d)
local working_dir=$OPTARG
;;
*)
runtime_image_post_build_script_help
logerror "[runtime_image_builder]" unexcepted options -$opt.
;;
esac
done
if [ -z "$context" ]; then
local context=system
fi
local -i optind=$OPTIND-1
shift $optind
local -i idx=1
local -i failure=0
local script_appended=
while [ $idx -le $# ]; do
eval "local script=\$$idx"
if ! [ -f "$script" ]; then
logerror "[runtime_image_builder] not a script file: $script"
local -i failure=1
fi
local script_appended="$script_appended '$script'"
local -i idx=idx+1
done
if [ $failure -gt 0 ]; then
return 1
fi
if [ -z `strip $script_appended` ]; then
runtime_image_post_build_script_help
logerror "script missing."
return 1
fi
local key=`eval "hash_file_for_key $script_appended"`
eval "_SAR_RT_BUILD_${context}_POST_BUILD_SCRIPTS=\"\$_SAR_RT_BUILD_${context}_POST_BUILD_SCRIPTS \$key\""
eval "_SAR_RT_BUILD_${context}_POST_BUILD_SFRIPT_${key}_PATH=\"$script_appended\""
eval "_SAR_RT_BUILD_${context}_POST_BUILD_SCRIPT_${key}_WORKDIR=$working_dir"
}
#runtime_image_health_check_script() {
# return 1
#}
| true
|
2b3280e6f6ed68291f1b0b436a216a913f5b956e
|
Shell
|
clementval/hpc-scripts
|
/ecmwf/offline.claw_install.step1_ecgate
|
UTF-8
| 1,196
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# This scripts helps to pack an offline version of the claw-compiler repository
# to be build on LXG cluster. This script needs to be executed on the ecgate.
#
COMPUTER=$(hostname)
CLAW_BRANCH="master"
CLAW_REPO="https://github.com/C2SM-RCM/claw-compiler.git"
# Load recent version of git
module load git
echo ""
echo "========================================="
echo "CLAW FORTRAN Compiler offline ECGATE step"
echo "========================================="
echo "- Computer: $COMPUTER"
echo "- Repo: $CLAW_REPO"
echo "- Branch: $CLAW_BRANCH"
echo ""
# Set up ANT variables
export ANT_HOME="/home/ms/ec_ext/extvc/install/ant/apache-ant-1.9.9"
export PATH=$PATH:${ANT_HOME}/bin
# Needed to be able to clone repository and resolve ANT dependencies
export https_proxy=http://proxy.ecmwf.int:3333
export http_proxy=http://proxy.ecmwf.int:3333
export ANT_OPTS="-Dhttp.proxyHost=proxy.ecmwf.int -Dhttp.proxyPort=3333 -Dhttps.proxyHost=proxy.ecmwf.int -Dhttps.proxyPort=3333"
rm -rf claw-compiler*
git clone -b $CLAW_BRANCH $CLAW_REPO
cd claw-compiler || exit 1
./scripts/offline
cd - || exit 1
tar cvf claw-compiler.tar claw-compiler/*
gzip claw-compiler.tar
rm -rf claw-compiler
| true
|
c5b0c1a69985ed135f292934458870a6ee59e550
|
Shell
|
zzzbra/dotfiles
|
/bash_profile
|
UTF-8
| 8,590
| 2.734375
| 3
|
[] |
no_license
|
# _ _ __ _ _
# | |__ __ _ ___| |__ _ __ _ __ ___ / _(_) | ___
# | '_ \ / _` / __| '_ \ | '_ \| '__/ _ \| |_| | |/ _ \
# | |_) | (_| \__ \ | | | | |_) | | | (_) | _| | | __/
# |_.__/ \__,_|___/_| |_| | .__/|_| \___/|_| |_|_|\___|
# |_|
# =====================
# SYSTEM SETTINGS
# =====================
# =====================
# Path
# =====================
# Homebrew directories
# PATH="/usr/local/bin:$PATH"
# Node Package Manager
# PATH="/usr/local/share/npm/bin:$PATH"
# Heroku Toolbelt
# PATH="/usr/local/heroku/bin:$PATH"
# assigning it its variables here
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/usr/local/sbin:/sbin:/opt/X11/bin:/usr/local/git/bin"
PATH="/usr/local/share/npm/bin:$PATH"
PATH="/usr/local/heroku/bin:$PATH"
PATH="/Users/zzzbra/.rbenv/shims:$PATH"
# TODO dynamically build PATH with USER env variable
# ====================
# File Navigation
# ====================
# LS lists information about files. -F includes a slash for directories.
alias ls='ls -F'
# long list format including hidden files
alias ll='ls -la'
# Because my brain is all fucked from having to use a windows all day
alias dir='ls -la'
# Adds colors to LS
export CLICOLOR=1
# http://geoff.greer.fm/lscolors/
# Describes what color to use for which attribute (files, folders etc.)
# export LSCOLORS=faexcxdxbxegedabagacad
# go back one directory
alias b='cd ..'
# If we make a change to our bash profile we need to reload it
alias reload="clear; source ~/.bash_profile"
## Tab improvements
bind 'set completion-ignore-case on'
# make completions appear immediately after pressing TAB once
bind 'set show-all-if-ambiguous on'
# bind 'TAB: menu-complete'
# Prefer US English
export LC_ALL="en_US.UTF-8"
# use UTF-8
export LANG="en_US"
# get ip address
alias ip?="ifconfig en0 | grep 'inet'"
# =================
# History
# =================
# http://jorge.fbarr.net/2011/03/24/making-your-bash-history-more-efficient/
# elif infocmp xterm-256color >/dev/null 2>&1; then
# export TERM=xterm-256color
# fi
if tput setaf 1 &> /dev/null; then
tput sgr0
if [[ $(tput colors) -ge 256 ]] 2>/dev/null; then
# this is for xterm-256color
BLACK=$(tput setaf 0)
RED=$(tput setaf 1)
GREEN=$(tput setaf 2)
YELLOW=$(tput setaf 226)
BLUE=$(tput setaf 4)
MAGENTA=$(tput setaf 5)
CYAN=$(tput setaf 6)
WHITE=$(tput setaf 7)
ORANGE=$(tput setaf 172)
GREEN=$(tput setaf 190)
PURPLE=$(tput setaf 141)
BG_BLACK=$(tput setab 0)
BG_RED=$(tput setab 1)
BG_GREEN=$(tput setab 2)
BG_BLUE=$(tput setab 4)
BG_MAGENTA=$(tput setab 5)
BG_CYAN=$(tput setab 6)
BG_YELLOW=$(tput setab 226)
BG_ORANGE=$(tput setab 172)
BG_WHITE=$(tput setab 7)
else
MAGENTA=$(tput setaf 5)
ORANGE=$(tput setaf 4)
GREEN=$(tput setaf 2)
PURPLE=$(tput setaf 1)
WHITE=$(tput setaf 7)
fi
BOLD=$(tput bold)
RESET=$(tput sgr0)
UNDERLINE=$(tput sgr 0 1)
else
BLACK="\[\e[0;30m\]"
RED="\033[1;31m"
ORANGE="\033[1;33m"
GREEN="\033[1;32m"
PURPLE="\033[1;35m"
WHITE="\033[1;37m"
YELLOW="\[\e[0;33m\]"
CYAN="\[\e[0;36m\]"
BLUE="\[\e[0;34m\]"
BOLD=""
RESET="\033[m"
fi
# ---------------------
# Print Stats on terminal load
# ---------------------
echo ${BG_RED}${WHITE} zzzbra ${RESET}${WHITE}${BG_BLACK} Creative Technologist ${RESET}${BG_YELLOW}${BLACK} Ars longa, vita brevis ${RESET}
echo "------------------------------------------"
echo $(ruby -v)
echo $(rails -v)
echo $(git --version)
# echo $(heroku --version)
echo $(psql --version)
echo $(brew -v)
echo "npm " $(npm -v)
# if internet connection
# echo "TIL: " $(taocl)
# fi
echo "------------------------------------------"
# ---------------------
# style the git prompt
# ---------------------
style_user="\[${RESET}${WHITE}\]"
style_path="\[${RESET}${CYAN}\]"
style_chars="\[${RESET}${WHITE}\]"
style_branch="${RED}"
# ---------------------
# Build the git prompt
# ---------------------
# Example with committed changes: username ~/documents/GA/wdi on master[+]
PS1="${style_user}\u" # Username
PS1+="${style_path} \w" # Working directory
PS1+="\$(prompt_git)" # Git details
PS1+="\n" # Newline
# PS1+="⫸ \[${RESET}\]" # $ (and reset color) ######### THIS IS THE OLD CODE: ${style_chars}\$
PS1+="${style_chars}\$ \[${RESET}\]" # $ (and reset color)
# =====================
# APPLICATION SETTINGS
# =====================
# ================
# Editor
# ================
export EDITOR="vim"
# =================
# Git
# =================
# -----------------
# Aliases
# -----------------
# Undo a git push
alias undopush="git push -f origin HEAD^:master"
# undo a commit
alias uncommit="git reset --soft HEAD^"
# -----------------
# For the prompt
# -----------------
# Long git to show + ? !
is_git_repo() {
$(git rev-parse --is-inside-work-tree &> /dev/null)
}
is_git_dir() {
$(git rev-parse --is-inside-git-dir 2> /dev/null)
}
get_git_branch() {
local branch_name
# Get the short symbolic ref
branch_name=$(git symbolic-ref --quiet --short HEAD 2> /dev/null) ||
# If HEAD isn't a symbolic ref, get the short SHA
branch_name=$(git rev-parse --short HEAD 2> /dev/null) ||
# Otherwise, just give up
branch_name="(unknown)"
printf $branch_name
}
# Git status information
prompt_git() {
local git_info git_state uc us ut st
if ! is_git_repo || is_git_dir; then
return 1
fi
git_info=$(get_git_branch)
# Check for uncommitted changes in the index
if ! $(git diff --quiet --ignore-submodules --cached); then
uc="+"
fi
# Check for unstaged changes
if ! $(git diff-files --quiet --ignore-submodules --); then
us="!"
fi
# Check for untracked files
if [ -n "$(git ls-files --others --exclude-standard)" ]; then
ut="${RED}?"
fi
# Check for stashed files
if $(git rev-parse --verify refs/stash &>/dev/null); then
st="$"
fi
git_state=$uc$us$ut$st
# Combine the branch name and state information
if [[ $git_state ]]; then
git_info="$git_info${RESET}[$git_state${RESET}]"
fi
printf "${WHITE} on ${style_branch}${git_info}"
}
# Branch Autocompletion
# http://code-worrier.com/blog/autocomplete-git/
if [ -f ~/.git-completion.bash ]; then
. ~/.git-completion.bash
fi
# =================
# Shortcuts
# =================
# =================
# Aliases
# =================
# Directories
alias dev="cd ~/dev"
alias storage="cd ~/dev/libraries"
alias installs="cd /usr/local/bin"
alias cellar="cd /usr/local/Cellar"
alias scratch="cd ~/dev/scratch"
alias sites="cd ~/dev/sites"
alias dotfiles="cd ~/dotfiles"
# Edit Virtual Host or Host Files
alias hosts='sudo vim /etc/hosts'
alias vhosts='sudo vim /etc/apache2/extra/httpd-vhosts.conf'
# Apache Services
alias apache_start='sudo apachectl start'
alias apache_restart='sudo apachectl restart'
alias apache_stop='sudo apachectl stop'
alias httpd_config='sudo vim /etc/apache2/httpd.conf'
# Open this file
alias bp="vim ~/.bash_profile"
# Set Logout Message
alias exit='sh ~/seeyouspacecowboy.sh; sleep 2; exit'
# Hide or show hidden . files in MacOS
alias showFiles="defaults write com.apple.finder AppleShowAllFiles YES; killall Finder /System/Library/CoreServices/Finder.app"
alias hideFiles="defaults write com.apple.finder AppleShowAllFiles NO; killall Finder /System/Library/CoreServices/Finder.app"
# =================
# Source Files
# =================
# .bash_settings and .bash_prompt should be added to .gitignore_global
# An extra file where you can create other settings, such as your
# application usernames or API keys...
if [ -f ~/.bash_settings ]; then
source ~/.bash_settings
fi
# An extra file where you can create other settings for your prompt.
if [ -f ~/.bash_prompt ]; then
source ~/.bash_prompt
fi
# =================
# Miscellany
# =================
# Below here is an area for other commands added by outside programs or
# commands. Attempt to reserve this area for their use!
# Add GHC 7.8.3 to the PATH, via http://ghcformacosx.github.io/
export GHC_DOT_APP="/Applications/ghc-7.8.3.app"
if [ -d "$GHC_DOT_APP" ]; then
export PATH="${HOME}/.cabal/bin:${GHC_DOT_APP}/Contents/bin:${PATH}"
fi
export NVM_DIR="/Users/zzzbra/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
source /usr/local/opt/nvm/nvm.sh
# =================
# Python
# =================
export PYENV_ROOT="$HOME/.pyenv"
| true
|
17cbce9355072002018ba640d8ad2cb724a8aff2
|
Shell
|
peppelinux/rk3066a-box-4.4
|
/rootfs/debian_init.sh
|
UTF-8
| 1,699
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
/debootstrap/debootstrap --second-stage
for f in $(busybox --list-full | grep -v readlink); do [ -e $f ] || ln -s /bin/busybox $f; done
cat > etc/apt/sources.list << EOF
deb http://mirrors.ustc.edu.cn/debian/ jessie main contrib non-free
#deb http://mirrors.ustc.edu.cn/debian/ jessie-backports main contrib non-free
#deb http://mirrors.ustc.edu.cn/debian/ jessie-proposed-updates main contrib non-free
deb http://mirrors.ustc.edu.cn/debian-security/ jessie/updates main contrib non-free
EOF
cat > etc/default/locale << EOF
LC_ALL="en_US.UTF8"
LANG="en_US.UTF-8"
LANGUAGE="en_US:en"
LC_NUMERIC="en_US.UTF-8"
LC_TIME="en_US.UTF-8"
LC_MONETARY="en_US.UTF-8"
LC_PAPER="en_US.UTF-8"
LC_IDENTIFICATION="en_US.UTF-8"
LC_NAME="en_US.UTF-8"
LC_ADDRESS="en_US.UTF-8"
LC_TELEPHONE="en_US.UTF-8"
LC_MEASUREMENT="en_US.UTF-8"
EOF
cat >> etc/bash.bashrc << EOF
alias df='df -Th'
alias free='free -h'
alias ls='ls -hF --color=auto'
alias ll='ls -AlhF --color=auto'
EOF
apt-get update -qq && apt-get -y upgrade
apt-get -y install net-tools
apt-get -y install ifupdown
cat >> etc/network/interfaces << EOF
auto lo
iface lo inet loopback
allow-hotplug eth0
iface eth0 inet dhcp
EOF
ln -sf /usr/share/zoneinfo/Asia/Shanghai etc/localtime
echo "Asia/Shanghai" > etc/timezone
echo "RK3066" > etc/hostname
echo "T0:123:respawn:/sbin/getty 115200 ttyS2" >> etc/inittab
sed -i '/^NTPSERVERS=/cNTPSERVERS="ntp1.aliyun.com"' etc/default/ntpdate
sed -i 's/id:2:initdefault:/id:3:initdefault:/' etc/inittab
sed -i '/^PermitRootLogin/cPermitRootLogin yes' etc/ssh/sshd_config
sed -i 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' etc/locale.gen
locale-gen en_US.UTF-8
echo "root:admin" | chpasswd
apt-get clean
| true
|
983fd261066a380179c0b3a154b7bc5a8c90d678
|
Shell
|
Teamprojekt-HSTrier/Bidirektionales-Kommunikationssystem
|
/FAX/installFAX.sh
|
UTF-8
| 2,153
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
#Alle benötigten Komponenten, welche für den FAX versand benötigt werden installiert
sudo apt-get -y install hylafax-server gammu
sudo faxadduser -p raspberry -u 1002 root
#Ermitteln der Modem Schnittstellt, indem alle Seriellen Schnittstellen ausgelesen werden und nach Modem gefiltert wird.
while read line ; do
modem=$(echo $line | grep Modem)
if [ -z "$modem" ]
then
temp=$line
else
break
fi
done < <(gammu-detect)
#Auslesen der eigentlichen Schnittstelle
seriell=$(echo $temp| cut -d'/' -f 3)
#Benötigte Ordner werden erstellt und berechtigungen zugewiesen
mkdir /home/pi/FAX
sudo chmod 777 /home/pi/FAX
sudo cp ./files/* /home/pi/FAX/
#sudo chmod 777 /home/pi/FAX/sendFAX.sh
#echo "Fuege Autostart-Eintrag hinzu..."
# Hinweis fuer die Vorgehensweise:
# http://stackoverflow.com/questions/878600/how-to-create-a-cron-job-using-bash/878647#878647
#Versenden von FAX in den Autostart eingebunden
CRONTAB_ENTRY="@reboot /bin/bash /home/pi/FAX/CRONsendFAX.sh > /tmp/log 2>&1"
echo " Erstelle Backup der Crontab-Datei..."
crontab -l > cronbackup
if ! grep -q "$CRONTAB_ENTRY" "cronbackup"; then
echo " Eintrag wird eingefuegt..."
echo "@reboot /bin/bash /home/pi/FAX/CRONsendFAX.sh > /tmp/log 2>&1" >> cronbackup
crontab cronbackup
rm cronbackup
else
echo "Eintrag bereits vorhanden."
fi
#Einlesen der eigenen FAX Nummer
if [ -z $TEL_NR ]
then
echo -e "${farbcode}Bitte geben Sie die Telefonnummer der Fax Leitung ein, bestaetigen Sie mit Enter: "
echo -e "Beispiel: +4965000000000${neutral}"
read TEL_NR
echo $TEL_NR > /home/pi/FAX/TEL_NR
fi
#Konfig Datei anpassen und an die benötigte Stelle kopieren
countryCode=${TEL_NR:1:2}
areaCode=${TEL_NR:3:4}
cd /home/pi/FAX
body=$(cat config.ttyACM0)
such1="CountryCode: "
such2="AreaCode: "
such3="FAXNumber: "
such4="LongDistancePrefix: "
body="${body/$such1/$such1$countryCode}"
body="${body/$such2/${such2}0$areaCode}"
body="${body/$such3/$such3$TEL_NR}"
body="${body/$such4/$such4$countryCode}"
echo "$body" > /home/pi/FAX/config.$seriell.temp
sudo cp /home/pi/FAX/config.$seriell.temp /etc/hylafax/config.$seriell
rm /home/pi/FAX/config.$seriell.temp
| true
|
6aa16a39a432bb0c8171678093ef8d97ab178f25
|
Shell
|
gmoshkin/dotfiles
|
/ctrlp_cache.sh
|
UTF-8
| 521
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
source ~/dotfiles/utils.sh
CTRLPCACHE=~/.cache/ctrlp
DIRSLISTFILE=~/.config/ctrlp_cache_dirs
# XXX won't work if you put spaces in directory names so don't do it
for d in $(cat $DIRSLISTFILE); do
# this expands '~' to home directory, don't you just love bash?
eval d=$d
f="${CTRLPCACHE}/$(echo $d | sed 's/\/\//\//g' | sed 's/\//%/g').txt"
if [ -d "$d" ]; then
echo "Searching files for '$d'"
cd "$d"
find_all > "$f"
echo "Done. Written to '$f'"
fi
done
| true
|
59616337022c427c5edadcbc61dd4ae1261bcc42
|
Shell
|
maryno-net/py-project
|
/debian/usr/bin/py-proj
|
UTF-8
| 335
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Making temp folder for python eggs
[ -d ~/.python-eggs ] || mkdir -p ~/.python-eggs
# Changing rights to supress warnings
chmod g-wx,o-wx ~/.python-eggs
# Virtualenv path
PYTHON=/usr/lib/py_proj/bin/python
# Path to the script that must run under the virtualenv
SCRIPT=/usr/lib/py_proj/bin/py_proj
$PYTHON $SCRIPT $@
| true
|
2ceb5ef6e6942515f32a1ff2c90a028f3ba6ef5c
|
Shell
|
makyos/dotfiles
|
/bashrc
|
UTF-8
| 6,472
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
export BASH_SILENCE_DEPRECATION_WARNING=1
export PATH=/usr/local/sbin:$PATH
export PATH=~/bin:$PATH
export LANG=ja_JP.UTF-8
export LESSCHARSET=utf-8
export CLICOLOR=1
#export LSCOLORS=GxFxCxDxBxegedabagaced
#alias node="~/node/node-v13.12.0-linux-x64/bin/node"
#alias npm="~/node/node-v13.12.0-linux-x64/bin/npm"
function darwin() { [ $(uname) == 'Darwin' ]; }
function linux() { [ $(uname) == 'Linux' ]; }
function scan() {
echo $1.{1..254} | xargs -P 254 -n 1 ping -s 56 -c 1 -t 1 | grep ttl
}
#### PS1
GIT_PS1_SHOWDIRTYSTATE=true # *:unstaged, +:staged
GIT_PS1_SHOWUNTRACKEDFILES=true # %:untracked
GIT_PS1_SHOWSTASHSTATE=true # $:stashed
GIT_PS1_SHOWUPSTREAM=auto # >:ahead, <:behind
GIT_PS1_STATESEPARATOR=':'
source ~/.git-completion.bash
source ~/.git-prompt.sh
function rev() { echo -n "\[\e[07m\]" ;}
function reset() { echo -n "\[\e[00m\]" ;}
function line() { echo -n "\[\e[04m\]" ;}
## \[ \] (文字数除外)で囲わないと表示が時々おかしくなるよ
#PS1="\n$(rev)[\u@$(hostname -f):\w$(__git_ps1 "(%s)")]$(reset)\n--> "
#PS1="[\u@$(hostname -f):\w$(__git_ps1 "(%s)")] "
#PS1="$(rev)[\w]$(reset) "
#PS1="$(rev)[\w]$(__git_ps1 "(%s)")$(reset) "
#PROMPT_COMMAND='__git_ps1 "\u@\h:\w" "\\\$ "'
PROMPT_COMMAND='__git_ps1 "$(rev)[${HOSTNAME}:\w]$(reset)" " "'
#### ls
darwin && alias ls='exa'
darwin && alias la='exa -a'
darwin && alias ll='exa -l'
darwin && alias lla='exa -la'
linux && alias ls='ls --color=auto'
linux && alias la='ls -A --color=auto'
linux && alias ll='ls -l --color=auto'
linux && alias lla='ls -la --color=auto'
#### emacs
alias emacs='emacs -nw'
alias e='emacs -nw'
linux && alias vim='nvim'
linux && alias v='nvim'
alias src="cd ~/src"
alias dot="cd ~/src/dotfiles"
alias junk="cd ~/src/JUNK"
alias sand="cd ~/src/sandbox"
alias bs='browser-sync start --server --files "./**/*.html" --files "./**/*.css" --files "./**/*.js"'
alias clamav="sudo freshclam;sudo clamscan -r -i"
#alias sakura='mosh --ssh="ssh -p 22000" makyos@www11364ue.sakura.ne.jp'
alias sakura='ssh -p 22000 makyos@www11364ue.sakura.ne.jp'
alias bashrc=". ~/.bashrc"
alias .bashrc="cd ~; vim .bashrc; bashrc; cd -"
alias vimrc="cd ~/.config/nvim; vim init.vim; cd -"
alias rust="evcxr"
alias tmux0="tmux a -t 0 -d"
alias @="tmux a -t 0 -d"
alias .tmux='vim ~/.tmux.conf'
darwin && alias wifi="/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport -s"
#darwin && alias sw="xcrun swift"
darwin && alias t="open -a /Applications/Utilities/Terminal.app ."
darwin && alias brew_cask_upgrade='for c in `brew cask list`; do ! brew cask info $c | grep -qF "Not installed" || brew cask install $c; done'
darwin && alias fsw='fswatch -o . | xargs -n1 -I{} '
#### homebrew
darwin && export NODEBREW_ROOT=$HOME/.nodebrew
darwin && . ~/.token
## sudo complete
complete -cf sudo
## DOCKER
alias dc-ps='docker container ps -a --format "table{{.ID}}\t{{.Names}}\t{{.Ports}}\t{{.Status}}"'
function dcsh() {
docker container run -e USER=$USER -u $(id -u):$(id -g) -v $PWD:$PWD -w ${PWD} --rm -it ${1} /bin/sh
}
function dcsh-su() {
docker container run -v ${PWD}:${PWD}:delegated -w ${PWD} --rm -it ${1} /bin/sh
}
function dc-exec-sh() {
docker container exec -e USER=$USER -u $(id -u):$(id -g) -it ${1} /bin/sh
}
function dc-exec-sh-su() {
docker container exec -it ${1} /bin/sh
}
function dcimg() { docker images --format "{{.Repository}}:{{.Tag}}" | sort | fzf ;}
function dcrun() {
docker container run \
-e USER=${USER} \
-u $(id -u):$(id -g) \
-v ${PWD}:${PWD}:delegated \
-w ${PWD} \
--rm -it ${@}
}
alias dox='docker exec -it `docker ps --format "{{.Names}}" | fzf` bash'
# NODE="-p 3000:3000 node:lts-alpine"
# function node() { dcrun $NODE "node ${@}" ;}
# function npm() { dcrun $NODE "npm ${@}" ;}
## RUST="rustlang/rust:nightly"
## RUST="rust:latest" ; RUSTREG="/usr/local/cargo/registry"
# RUST="ekidd/rust-musl-builder:latest"; RUSTREG="/home/rust/.cargo/registry"
# function cargo() {
# mkdir -p ${PWD}/registry
# dcrun -v ${PWD}/registry:$RUSTREG $RUST "cargo ${@}"
# }
# function rustup() { dcrun $RUST "rustup ${@}" ;}
[ -e $HOME/.cargo/env ] && source $HOME/.cargo/env
# PYTHON2="python:2-alpine"
# function py2() { dcrun $PYTHON2 python ${@} ;}
# function pip2() { dcrun $PYTHON2 pip ${@} ;}
## function pip2() { dcrun $PYTHON2 pip -t ./pip ${@} ;}
function fw() { sudo firewall-cmd --zone=public --list-all ;}
function fw-open() { sudo firewall-cmd --zone=public --add-port=${1}/tcp; fw ;}
function fw-close() { [ ${1} != "22000" ] && sudo firewall-cmd --zone=public --remove-port=${1}/tcp; fw ;}
function today() { date +%Y-%m%d ;}
function pora() { if [ -p /dev/stdin ]; then cat -; else echo ${@}; fi ;}
function killmosh() { kill $(pidof mosh-server) ;}
function tgz() {
local D=/tmp/${HOSTNAME}__$(yyyy)-$(mo)$(dd)
for f in "$(pora ${@})"; do
ff=$(readlink -f ${f})
dd=${D}$(dirname ${ff})
mkdir -p ${dd}
cd $(dirname ${ff})
tar zcf ${dd}/$(basename ${ff}.tgz) $(basename ${ff})
cd - > /dev/null
ls -lh ${dd}/$(basename ${ff}.tgz)
done
}
function yyyy() { date "+%Y" ;}
function mo() { date "+%m" ;}
function dd() { date "+%d" ;}
function hh() { date "+%H" ;}
function mi() { date "+%M" ;}
function ss() { date "+%S" ;}
function ns() { date "+%N" ;}
function wd() { date "+%a" ;}
function httpd() {
local P="${1}"
[ ${P} != "" ] && P=8080
fw-open ${P}
echo ${HOSTNAME}:${P}
python -m SimpleHTTPServer ${P}
fw-close ${P}
}
function findnew() {
inai_sec=${1}
while true; do
if [ ! -z $(find ${2} -type f -newermt "$(date "+%Y-%m-%d %H:%M:%S" --date "${inai_sec} seconds ago")") ]; then
eval "${3}"
echo; echo
fi
sleep ${inai_sec}
done
}
function cs() {
fw-open 8080
docker run --rm -it -p 8080:8080 -u $(id -u):$(id -g) -v "${HOME}/.local/share/code-server:/home/coder/.local/share/code-server" -v "$PWD:/home/coder/project" codercom/code-server:v2
fw-close 8080
}
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
function red() { printf "%s\n" "$(tput setaf 1)${1}$(tput sgr0)" ;}
function green() { printf "%s\n" "$(tput setaf 2)${1}$(tput sgr0)" ;}
function yellow() { printf "%s\n" "$(tput setaf 3)${1}$(tput sgr0)" ;}
function cyan() { printf "%s\n" "$(tput setaf 6)${1}$(tput sgr0)" ;}
function code() {
docker run -it -p 0.0.0.0:8080:8080 \
-v "$PWD:/home/coder/project" \
-u "$(id -u):$(id -g)" \
codercom/code-server:latest
}
| true
|
751f7ca919657c703351f1d7588321ce1063e385
|
Shell
|
binghuan/sample_video_downloader
|
/download.sh
|
UTF-8
| 1,918
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
echo "/**********************************************"
echo "/* Author: BH_Lin"
echo "/* Tool to download videos for Demo"
echo "**********************************************/"
fileHost="https://my.cdn.tokyo-hot.com/media/samples/"
webHost="https://www.tokyo-hot.com/product/"
fileExtension=".mp4"
STARTED_INDEX_NUMBER=1157
MAX_INDEX_NUMBER=2999
fileIndex=${STARTED_INDEX_NUMBER}
prefixText=""
outputFile="sampleVideos.json"
if [ -f "${outputFile}" ]; then
rm -f "${outputFile}"
fi
echo "[" >"${outputFile}"
while [ ${fileIndex} -le ${MAX_INDEX_NUMBER} ]; do
fileName="n${fileIndex}${fileExtension}"
videoSrc="${fileHost}${fileName}"
echo "► Ready to download: ${videoSrc}"
[ -f "${fileName}" ] && rm -f ${fileName}
wget ${videoSrc}
if [ -f "${fileName}" ]; then
filePath="n${fileIndex}"
webPage="${webHost}${filePath}"
echo "► Ready to download ${webPage}"
[ -f "index.html" ] && rm -f "index.html"
wget ${webPage} -O index.html
posterUrl=$(cat "index.html" | grep -i -E "poster" | sed 's/.*=\"//g' | sed 's/\">.*//g')
posterFileName="${filePath}.jpg"
[ -f "${posterFileName}" ] && rm -f "${posterFileName}"
echo "► Ready to download poster ${posterUrl}"
wget ${posterUrl} -O "${posterFileName}"
title=$(cat index.html | grep -i -E "<title" | sed 's/.*<title>//g' | sed 's/<\/title>//g')
echo ">---------------------------------------------------------------->"
echo "title: ${title}"
echo "src: ${fileName}"
echo "poster: ${posterFileName}"
echo "<----------------------------------------------------------------<\n"
echo "${prefixText}{ \"title\": \"${title}\", \"src\": \"${fileName}\", \"poster\": \"${posterFileName}\"}" >>"${outputFile}"
prefixText=","
fi
((fileIndex++))
done
echo "]" >>"${jsonFile}"
| true
|
77a4dba451f851d4046a9a90fd96b054c2b78dfa
|
Shell
|
arssher/conf
|
/bash/.bash_scripts/bin/split_pdf.sh
|
UTF-8
| 302
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# Split each pdf in curr dir into single page ones
for pdf in *.pdf; do
numpages=$(qpdf --show-npages ${pdf})
for i in $(seq 1 ${numpages}); do
outf="${pdf%.*}_${i}.pdf"
echo "extracting page ${i} of file ${pdf} to ${outf}"
pdftk "${pdf}" cat $i output "${outf}"
done
done
| true
|
ae34cca0156ebddb9eccf6f899fdc90d83f66495
|
Shell
|
jgordon3/VACC_scripts
|
/run_STAR_on_SGE.sh
|
UTF-8
| 4,819
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
#$ -N STAR
#$ -cwd
#$ -j y
# Request [num] amount of [type] nodes
#$ -pe threads 8
# STAR path: /slipstream/galaxy/production/galaxy-dist/tools/star/STAR
######################## DEFAULTS #############################################
DATE=`date`; GENOME="hg38"; NAME="J. Gordon"; RECIPE="SE"; MODE="RNA";
######################## USAGE ###############################################
usage ()
{
printf "%s\n" "" "$DIV1"
printf "%s\t" "$DIV2" "run_STAR_on_SGE.sh" "" "" "" "$DIV2"
printf "%s\n" "" "$DIV1" ""
printf "%s\n" "run_STAR_on_SGE.sh" ""
printf "%s\n" "REQUIRES (NON OPTIONAL): A fastq file to align."
printf "%s\n" "This can be providied by the -F flag with a path/to/a/fastq"
printf "%s\n" "REQUIRES: A reference genome"
printf "%s\n" "OPTIONS: -G: GENOME can be hg38 (default), hg19, mm9, mm10"
printf "%s\n" "OPTIONS: -R: RECIPE can be single end SE (default) or paired end PE"
printf "%s\n" "OPTIONS: -M: MODE can be RNA (defaut) or CHIP"
exit 1;
}
######################## FLAGS ##################################################
while getopts "F:G:t:u:R:M:h:" opt; do
case $opt in
F) FASTQ=$OPTARG;;
G) GENOME=$OPTARG;;
t) FILTER=$OPTARG;;
u) NAME=$OPTARG;;
R) RECIPE=$OPTARG;;
M) MODE=$OPTARG;;
h) usage;;
:) usage;;
*) usage;;
esac
done
shift $(($OPTIND -1))
###################### CHECKS #####################################
# FASTQ
FASTQ=$(readlink -f $FASTQ)
if [ ! -f "$FASTQ" ]; then echo "Need valid path to fastq or fastq.gz file"; fi
# PAIRED
if [[ $RECIPE == "PE" ]]; then FASTQ2=${FASTQ/_R1_/_R2_};
if [ ! -f $FASTQ2 ]; then echo "Paired end flag used (-R PE) and a matching pair could not be found."; echo 'Check that file name is in: "Some_fastq_R2_000.fastq" format'; usage;
fi
fi
# GZIP
FASTQEXT=${FASTQ##*.}
if [ $FASTQEXT = "gz" ]; then gunzip $FASTQ; FASTQ=${FASTQ%.gz}; fi
FASTQ2EXT=${FASTQ2##*.}
if [ -f $FASTQ2 ] && [[ $FASTQ2EXT = "gz" ]]; then gunzip $FASTQ2; FASTQ2=${FASTQ2%.gz}; fi
# OUTPUT FOLDER
if [ ! -d "./BAMS" ]; then mkdir BAMS; fi
###################### FILE NAMES #####################################
BASE=`basename $FASTQ`
ABRV_NAME=${BASE%.fastq}
OUTPUT_NAME=$ABRV_NAME"_STAR_ALIGNMENT.bam"
# GENOME ASSIGNMENT
if [ $GENOME == "hg38" ]; then INDEX=/slipstream/galaxy/data/hg38/star_index
elif [ $GENOME == "hg19" ]; then INDEX=/slipstream/galaxy/data/hg19/star_index
elif [ $GENOME == "mm10" ]; then INDEX=/slipstream/galaxy/data/mm10/star_index
elif [ $GENOME == "mm9" ]; then INDEX=/slipstream/galaxy/data/mm9/star_index
else echo "could not find specified index"; usage;
fi
##################### RUN STAR ##########################################################
if [ $MODE == "RNA" ]; then
STAR --genomeLoad NoSharedMemory --genomeDir $INDEX --readFilesIn $FASTQ $FASTQ2 --runThreadN 8 --seedSearchStartLmax 30 --outTmpDir $ABRV_NAME --outFileNamePrefix $ABRV_NAME --outSAMtype BAM SortedByCoordinate --outStd BAM_SortedByCoordinate > $OUTPUT_NAME
elif [ $MODE == "CHIP" ]; then
STAR --genomeLoad NoSharedMemory --genomeDir $INDEX --readFilesIn $FASTQ $FASTQ2 --runThreadN 8 --alignIntronMax 1 --seedSearchStartLmax 30 --outTmpDir $ABRV_NAME --outFileNamePrefix $ABRV_NAME --outSAMtype BAM SortedByCoordinate --outStd BAM_SortedByCoordinate > $OUTPUT_NAME
else usage
fi
##################### CLEAN UP ##########################################################
# GZIP
if [ $FASTQEXT = "gz" ]; then gzip $FASTQ; fi
if [ -f $FASTQ2 ] && [[ $FASTQ2EXT = "gz" ]]; then gzip $FASTQ2; fi
# REMOVE SORT BAMS
rm -r $ABRV_NAME
# MOVE BAM
mv $OUTPUT_NAME ./BAMS/
# REMOVE UNNECCESSARY LOGS
rm $ABRV_NAME"Log.out" $ABRV_NAME"Log.progress.out" $ABRV_NAME"Log.std.out" $ABRV_NAME"SJ.out.tab"
# CREATE SUMMARY
ALIGN_SUM="STAR_alignment_summaries.txt"
COMB_LOG="STAR_combined_logs.txt"
STAR_LOG="$ABRV_NAME""Log.final.out"
if [ ! -f $ALIGN_SUM ]; then touch $ALIGN_SUM;
printf "%s\t" "File" >> $ALIGN_SUM;
while IFS='' read -r line || [[ -n "$line" ]]; do
new=$(echo $line | awk '{$1=$1}1');
split=$(echo $new | awk -F\| '{print $1}');
printf "%s\t" ${split// /_} >> $ALIGN_SUM;
done < "$STAR_LOG"
printf "\n" >> $ALIGN_SUM
fi
printf "%s\t" "$ABRV_NAME" >> $ALIGN_SUM;
while IFS='' read -r line || [[ -n "$line" ]]; do
new=$(echo $line | awk '{$1=$1}1');
split=$(echo $new | awk -F\| '{print $2}');
printf "%s\t" ${split// /} >> $ALIGN_SUM;
done < "$STAR_LOG"
printf "\n" >> $ALIGN_SUM
echo "$ABRV_NAME" >> $COMB_LOG
cat $STAR_LOG >> $COMB_LOG
printf "\n"
rm $STAR_LOG
# TROUBLESHOOT ######
#echo "FASTQ: $FASTQ"; echo "FASTQ2: $FASTQ2"; echo "FASTQEXT: $FASTQEXT"; echo "FASTQ2EXT: $FASTQ2EXT";
#echo "BASE: $BASE"; echo "ABRV_NAME: $ABRV_NAME"; echo "OUTPUT_NAME: $OUTPUT_NAME";
#echo "RECIPE: $RECIPE"; echo "GENOME: $GENOME"; echo "MODE: $MODE";
| true
|
738f9d28a226ea24ed078777bee8b6b4404bc002
|
Shell
|
nfaction/ubuntu-install
|
/setup-ssh.sh
|
UTF-8
| 1,510
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Setting up SSH..."
###########################
# Setting up root account #
###########################
mkdir -p /root/.ssh;chmod 700 /root/.ssh;cd /root/.ssh;mkdir -p /root/.ssh/tmp;touch /root/.ssh/authorized_keys
###########################
# Setting up user account #
###########################
mkdir -p ~/.ssh;chmod 700 ~/.ssh;cd ~/.ssh;mkdir -p ~/.ssh/tmp;touch ~/.ssh/authorized_keys
################################
# Setting up user's SSH Config #
################################
echo "ControlMaster auto
ControlPath ~/.ssh/tmp/%h_%p_%r
Host *
User YOURUSERNAME
Host tunnel1
HostName tunnel1.sista.arizona.edu
Host tunnel2
HostName tunnel2.sista.arizona.edu
Host dalek
ProxyCommand ssh -q -W %h:%p tunnel1
Host dalek2
HostName dalek
ProxyCommand ssh -q -W %h:%p tunnel2
#Host YOURMACHINE
# ProxyCommand ssh -q -W %h:%p tunnel1
# LocalForward 5999 YOURMACHINE:5900
#Host YOURMACHINE2
# HostName YOURMACHINE
# ProxyCommand ssh -q -W %h:%p tunnel2
# LocalForward 5999 YOURMACHINE:5900
ForwardAgent yes" > ~/.ssh/config
##########################
# Ask to create ssh keys #
##########################
echo "Would you like to create ssh private/public keys?"
read -r -p "Are you sure? [y/N] " response
if [[ $response =~ ^([yY][eE][sS]|[yY])$ ]]
then
echo "Creating keys for root..."
ssh-keygen -t rsa -b 4096 -f /root/.ssh/id_rsa
echo "Creating key for your account..."
ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_rsa
else
echo "No keys created..."
fi
exit 0
| true
|
318994a76bb2bd6fde6ce85ad8aebffd01d4b78e
|
Shell
|
echolevel/protracker
|
/make-linux.sh
|
UTF-8
| 472
| 2.640625
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
rm release/protracker &> /dev/null
echo Compiling, please wait...
gcc src/*.c src/gfx/*.c -lSDL2 -lm -ldl -Wall -Wno-unused-result -Wc++-compat -Wshadow -Winit-self -Wextra -Wunused -Wunreachable-code -Wredundant-decls -Wswitch-default -march=native -mtune=native -O3 -o release/protracker
rm src/*.o src/gfx/*.o &> /dev/null
echo Done! The binary \(protracker\) is in the folder named \'release\'.
echo To run it, type ./protracker in the release folder.
| true
|
b82d427a5febbb6f942fac3d6476d1eb3049e21b
|
Shell
|
jefferickson/dotfiles
|
/languages/.esperanto-word.sh
|
UTF-8
| 642
| 2.875
| 3
|
[] |
no_license
|
#! /bin/bash
## USAGE: ./.esperanto-word.sh VORTO WORD
## Where VORTO is the Esperanto word you want to search and WORD is the English translation (for image search).
echo "Search: $1 ($2)"
echo -n $1 | pbcopy
/Applications/Firefox.app/Contents/MacOS/firefox \
--new-tab "https://eo.wiktionary.org/wiki/$1#Esperanto" \
--new-tab "https://forvo.com/word/$1/#eo" \
--new-tab "https://www.google.com/search?site=&tbm=isch&source=hp&q=$2&oq=$2&gs_l=img.3..0l10.2298.3088.0.3176.5.5.0.0.0.0.60.218.4.4.0....0...1ac.1.64.img..1.4.216.JMP5u9OU3X8" \
--new-tab "http://en.bab.la/dictionary/english-esperanto/$1" \
2>/dev/null
| true
|
4375b6e72ca4600dcf455f18badf5c0d3cbe441a
|
Shell
|
WolverinDEV/TDFT
|
/core/src/main/cpp/compile.sh
|
UTF-8
| 558
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC2046
cd $(dirname "$0") || exit 1
if [[ -z ${JAVA_INCLUDE_DIR} ]]; then
echo "Please specify where to find the jni headers. (JAVA_INCLUDE_DIR)"
exit 1
fi
if [[ ! -e "${JAVA_INCLUDE_DIR}/jni.h" ]]; then
echo "Invalid java include dir: ${JAVA_INCLUDE_DIR}"
echo "Missing jni.h"
exit 1
fi
g++ -c -Wall -Werror -fpic -std=c++17 -I${JAVA_INCLUDE_DIR} -I${JAVA_INCLUDE_DIR}/linux native.cpp
g++ -shared -o ../resources/libnative.so -lstdc++ -static-libstdc++ -static-libgcc native.o -lstdc++fs
# Cleanup
rm native.o
| true
|
79d182ebcfddb2ae3700d6e0d5617956dabe1baf
|
Shell
|
NVIDIA/TensorRT
|
/demo/Tacotron2/scripts/download_checkpoints.sh
|
UTF-8
| 1,211
| 3.109375
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"ISC",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Prepare the download directory
mkdir -p checkpoints && cd checkpoints
# Download the Tacotron2 and Waveglow checkpoints
if [ ! -f "checkpoints/tacotron2_pyt_ckpt_amp_v19.09.0/nvidia_tacotron2pyt_fp16_20190427" ]; then
echo "Downloading Tacotron2 checkpoint from NGC"
ngc registry model download-version nvidia/tacotron2_pyt_ckpt_amp:19.09.0
fi;
if [ ! -f "checkpoints/waveglow_ckpt_amp_256_v19.10.0/nvidia_waveglow256pyt_fp16" ]; then
echo "Downloading Waveglow checkpoint from NGC"
ngc registry model download-version nvidia/waveglow_ckpt_amp_256:19.10.0
fi;
cd -
| true
|
eba2ca15206d28df45b97c45b02bd8b50ef1869e
|
Shell
|
connormckelvey/dotfiles
|
/emacs/setup
|
UTF-8
| 449
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash -e
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$DIR/../lib/util"
is_installed() {
test -f ~/.emacs.d/spacemacs.mk
}
install() {
rm -rf ~/.emacs.d
git clone https://github.com/syl20bnr/spacemacs ~/.emacs.d
cd ~/.emacs.d
git checkout -b develop origin/develop
}
is_configured() {
readlink ~/.spacemacs
}
configure() {
create_symlink $DIR/spacemacs ~/.spacemacs
}
update() {
cd ~/.emacs.d
git pull
}
| true
|
216893531d159a555ad9c07d6bfa2f108de518a1
|
Shell
|
rosanasoriano/jaks-post-config
|
/scripts/config-pam-wheel
|
UTF-8
| 4,887
| 4.25
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Default
file=/etc/pam.d/su
# Specify stack to create haystack from (account|auth|password|session)
type="auth"
# Specify a mode (control) for the module [requisite|required|sufficient|optional]
mode="required"
# Array of key / value options for pam_wheel
# For a complete list please consult man(5) pam_wheel
declare -A flags
flags[use_uid]=""
# Get our current working directory
cwd=$(dirname ${BASH_SOURCE[0]})
# Setup a path for binary tools (just in case)
PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
# Make sure our common libraries are available
if [ ! -f "${cwd}/libs/common" ] ; then
echo "${prog}: Could not load libraries"
exit 1
fi
# Load our libs
source ${cwd}/libs/common
# Displays available arg list
usage()
{
cat <<EOF
Usage $0 [options]
Manage pam_wheel configuration
Help:
-h Show this message
-v Enable verbosity mode
Required:
-a Author name (required)
Options:
-f Array of files to use
-o Specify type [account|auth|password|session]
-m Specify mode [required|requisite|sufficient|optional]
Validation:
-c Perform validation of change
Restoration options:
-r Perform rollback of changes
-i Interactive mode, to be used with -r
Development mode:
-t Run in test mode (assumes test env)
EOF
}
config_pam_wheel()
{
# Error array
errors=()
# Global info array
info=()
# Use test suite if enabled
if [ "${test}" == 1 ] ; then
file=$(create_test "${cwd}/test/originals/$(basename ${file})")
if [ $? != 0 ] ; then
errors+=("ERROR: Error working with test env")
echo "${errors[@]}" && return 1
fi
fi
# Test for presence of file
if [ ! -f ${file} ]; then
errors+=("ERROR: File specified was not found,")
echo "${errors[@]}" && return 1
fi
# Perform restoration if specified
if [ "${restore}" == 1 ] ; then
info+=("$(restore "${file}")")
# Check results & return if an error occured
if [ $? != 0 ]; then
echo "${info[@]}" && return 1
fi
# Everything passed, return 0
echo "${info[@]}" && return 0
fi
# Backup file if we are making changes
if [[ "${check}" == 0 ]] && [[ "${restore}" == 0 ]] ; then
info+=("$(backup "${file}" "${author}")")
# Check results & return if an error occured
if [ $? != 0 ]; then
echo "${info[@]}" && return 1
fi
fi
# Build a pattern based on ${flags[@]} for searching, validation and creation
for flag in "${!flags[@]}"; do
if [ "${flags[${flag}]}" == "" ]; then
pattern="${pattern}${flag} "
else
pattern="${pattern}${flag}=${flags[${flag}]} "
fi
done
# If we are not validating enable pam_wheel changes
if [ "${check}" == 0 ] ; then
info+=("INFO: Applying parameters for pam_wheel,")
# Create a haystack out of ${type} with subpattern pam_wheel.so
find=$(grep -i "${type}" ${file}|grep "${mode}"|grep "pam_wheel.so"|sed -e "s|#|\\\#|g")
if [[ "${find}" =~ pam_wheel ]] && [[ "${find}" != "" ]]; then
sed --follow-symlinks -i "s|${find}|${type} ${mode} pam_wheel.so ${pattern}|g" ${file}
else
flatten_to_replace "${file}" "${type}.*$" "${type} ${mode} pam_wheel.so ${pattern}"
fi
fi
info+=("INFO: Validating parameters for pam_wheel,")
# Create a ${haystack}
#haystack=$(grep "^${type}.*${mode}" ${file} | grep "pam_wheel.so" | grep -v "^\#")
haystack=$(grep "^${type}.*${mode}" ${file} | grep "pam_wheel.so")
# Loop over ${flags[@]} and compare to ${haystack}, provide errors or verbose
for flag in "${!flags[@]}"; do
[ "${flags[${flag}]}" == "" ] && chk="${flag}" || chk="${flag}=${flags[${flag}]}"
if [ "$(echo "${haystack}"|grep "${chk}")" == "" ]; then
errors+=("ERROR: Missing option ${chk},")
else
info+=("INFO: Found ${chk} option specified,")
fi
done
# Return errors
if [ "${#errors[@]}" != 0 ] ; then
errors+=("${info[@]}")
echo "${errors[@]}" && return 1
fi
# Expose information array
echo "${info[@]}"
# Return code
return 0
}
# Global options
verbose=0
check=0
restore=0
test=0
interactive=0
prog=$(basename $0)
# Ensure we have permissions
if [ $UID != 0 ] ; then
echo "${prog}: Requires root privileges"
exit 1
fi
# Set variables
while getopts "ha:vcrtif:o:m:" OPTION ; do
case $OPTION in
h) usage && exit 1 ;;
a) author=$OPTARG ;;
f) file=$OPTARG ;;
o) type=$OPTARG ;;
m) mode=$OPTARG ;;
v) verbose=1 ;;
c) check=1 ;;
r) restore=1 ;;
t) test=1 ;;
i) interactive=1 ;;
?) usage && exit 1 ;;
esac
done
# Make sure we have an author if we are not restoring or validating
if [[ "${author}" == "" ]] && [[ "${restore}" == 0 ]] && [[ "${check}" == 0 ]]; then
echo "${prog}: Must specify an author name (use -a <initials>)"
usage
exit 1
fi
# Robot, do work
main "config_pam_wheel"
| true
|
412908d9caf3e4d143e4ec08b3cc730ebce8dc18
|
Shell
|
tracyone/shell_script
|
/clean_ubuntu.sh
|
UTF-8
| 460
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
sudo apt-get autoclean
sudo apt-get clean
sudo apt-get autoremove
echo -e "Clean usless kernel ...\n"
current_kernel=$(uname -a | grep -E -o '[0-9]+\.[0-9]+\.[0-9]+-[0-9]+')
remove_list=$(sudo dpkg --get-selections |grep -E -o '^linux-(headers|image)[^ ]*-[0-9]+\.[0-9]+\.[0-9]+-[0-9]+[^ ]*\s' | grep -v ${current_kernel})
echo ${remove_list}
for i in ${remove_list}; do
echo -e "remove $i ... \n"
sudo apt-get remove $i -y
sleep 3
done
| true
|
71444e6b3749920e0f1af9b7df646eb55362f3ff
|
Shell
|
LSDtopotools/lsdtt_pcl_docker
|
/Start_LSDTT.sh
|
UTF-8
| 1,134
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This is a startup script for LSDTopoTools
# It clones the LSDTopoTools2 repository into the LSDTopoTools directory
# it then builds the code from there.
# Author: SMM
# Date: 08/10/2018
# clone or pull the repo, depending on what is in there
# check if the files have been cloned
if [ -f /LSDTopoTools/LSDTopoTools2/src/LSDRaster.cpp ]
then
echo "The LSDTopoTools2 repository exists, updating to the latest version."
git --work-tree=/LSDTopoTools/LSDTopoTools2 --git-dir=/LSDTopoTools/LSDTopoTools2/.git pull origin master
else
echo "Cloning the LSDTopoTools2 repository"
git clone https://github.com/LSDtopotools/LSDTopoTools2.git /LSDTopoTools/LSDTopoTools2
fi
# Change the working directory to that of LSDTopoTools2/src
echo "I am going to try to build LSDTopoTools2."
cd /LSDTopoTools/LSDTopoTools2/src
#echo "The current directory is:"
#echo $PWD
echo "Calling the build script."
sh build.sh
# Now update the path
echo "Now I'll add the LSDTopoTools command line programs to your path."
export PATH=/LSDTopoTools/LSDTopoTools2/bin:$PATH
echo "Your path is now:"
echo $PATH
exec /bin/bash
| true
|
f19744774eb512b629aaffe4406ae77085afc8b2
|
Shell
|
bseeger/fedora4-tests
|
/config.tmpl
|
UTF-8
| 1,323
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# Copy this file to "config" and update to reflect your local settings
CWD=$(pwd)
# If you want to test with mvn jetty:run -Pwebac, set this to 1 to use the correct users and passwords.
USE_JETTY=0
if [ "$USE_JETTY" -eq 1 ]; then
# Admin user
AUTH_USER="admin1"
AUTH_PASS="password3"
# User 2
AUTH2_USER="user2"
AUTH2_PASS="password2"
# User 3
AUTH3_USER="user1"
AUTH3_PASS="password1"
else
# Admin Username.
AUTH_USER="fedoraAdmin"
# Admin Password.
AUTH_PASS="secret3"
# Test admin user
AUTH2_USER="adminuser"
AUTH2_PASS="password2"
# Test user
AUTH3_USER="testuser"
AUTH3_PASS="password1"
fi
# Hostname for Fedora instance.
FEDORA_HOST="http://localhost"
# Port for Fedora instance.
FEDORA_PORT="8080"
# Depending on how you start the fedora server, you may need to edit this path.
# The one click webapp doesn't have fcrepo in the path.
FEDORA_PATH="/fcrepo/rest"
# Build the full Fedora URL.
FEDORA_URL="${FEDORA_HOST}:${FEDORA_PORT}${FEDORA_PATH}"
# Directory where turtle, sparql and binary resources sit.
RSCDIR="${PWD}/resources"
# Default options for cURL
CURL_OPTS="-s -o /dev/null --write-out %{http_code} --no-keepalive -i"
# cURL options to not alter response
CUSTOM_CURL_OPTS="-s --no-keepalive -i"
# include shared functions
. ./functions
| true
|
b46eea1f5f3cff5a9720abb7d621918e781fd8ac
|
Shell
|
GitWyd/Titan
|
/setup.sh
|
UTF-8
| 424
| 3.1875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
gtestpath="lib/googletest"
vcpkgpath="lib/vcpkg"
# setup googletest
if [ ! -d "$gtestpath" ]; then
git clone https://github.com/google/googletest.git $gtestpath
cd $gtestpath
git checkout -q release-1.10.0
cd ~- # change back to previous dir and no output to terminal
fi
sudo apt-get install -y --no-install-recommends \
libglew-dev libglfw3-dev libglm-dev libgl-dev libgl1-mesa-dev cmake
| true
|
2a41963592b9bf1178f2a62002204df67fa29fc4
|
Shell
|
sakhiAnand/s4fosslab
|
/untilcount.sh
|
UTF-8
| 82
| 2.796875
| 3
|
[] |
no_license
|
#! /bin/sh
i=1
until [ $i -gt $1 ]
do
echo $i "\n"
i=$(( i + 1))
done
| true
|
ee9355777cf462197c1abf6a13bd532fe83242b7
|
Shell
|
Tier3SW/ATLASLocalRootBase
|
/testing/scripts/git/functions-Linux.sh
|
UTF-8
| 2,715
| 3.46875
| 3
|
[] |
no_license
|
#!----------------------------------------------------------------------------
#!
#! functions-Linux.sh
#!
#! functions for testing the tools
#!
#! Usage:
#! not directly
#!
#! History:
#! 08Mar18: A. De Silva, First version
#!
#!----------------------------------------------------------------------------
#!----------------------------------------------------------------------------
alrb_fn_gitInit()
#!----------------------------------------------------------------------------
{
local alrb_retCode=0
local alrb_runName="init"
local alrb_cmdGitInit="git init"
local alrb_cmdGitInitName="$alrb_relTestDir/git-init.out"
local alrb_cmdGitStatus="git status"
local alrb_cmdGitStatusName="$alrb_relTestDir/git-status.out"
local alrb_runScript="$alrb_relTestDir/git-script-$alrb_runName"
\rm -f $alrb_runScript
\cat << EOF >> $alrb_runScript
source $alrb_relTestDir/git-script-setup.sh
\cd $alrb_relTestDir
source $ATLAS_LOCAL_ROOT_BASE/utilities/evaluator.sh "$alrb_cmdGitInit" $alrb_cmdGitInitName $alrb_Verbose
alrb_retCode=\$?
if [ \$alrb_retCode -eq 0 ]; then
source $ATLAS_LOCAL_ROOT_BASE/utilities/evaluator.sh "$alrb_cmdGitStatus" $alrb_cmdGitStatusName $alrb_Verbose
alrb_retCode=\$?
fi
exit \$alrb_retCode
EOF
alrb_fn_runShellScript $alrb_thisShell $alrb_runScript
alrb_retCode=$?
return $alrb_retCode
}
#!----------------------------------------------------------------------------
alrb_fn_gitTestSetupEnv()
#!----------------------------------------------------------------------------
{
\rm -f $alrb_relTestDir/git-script-setup.sh
\cat << EOF >> $alrb_relTestDir/git-script-setup.sh
source $alrb_envFile.sh
export ATLAS_LOCAL_ROOT_BASE=$ATLAS_LOCAL_ROOT_BASE
source $ATLAS_LOCAL_ROOT_BASE/user/atlasLocalSetup.sh -q
lsetup "git" $alrb_VerboseOpt
if [ \$? -ne 0 ]; then
exit 64
fi
EOF
return 0
}
#!----------------------------------------------------------------------------
alrb_fn_gitTestRun()
#!----------------------------------------------------------------------------
{
local alrb_retCode=0
local alrb_thisEnv
local alrb_thisShell
\echo -e "
\e[1mgit test\e[0m"
(
export ATLAS_LOCAL_ROOT_BASE=$ATLAS_LOCAL_ROOT_BASE
source $ATLAS_LOCAL_ROOT_BASE/user/atlasLocalSetup.sh -q
lsetup git -q
git --version
)
for alrb_thisShell in ${alrb_testShellAr[@]}; do
local alrb_addStatus=""
local alrb_relTestDir="$alrb_toolWorkdir/$alrb_thisShell"
\mkdir -p $alrb_relTestDir
alrb_fn_gitTestSetupEnv
if [ $? -ne 0 ]; then
return 64
fi
alrb_fn_initSummary $alrb_tool $alrb_thisShell "git init"
alrb_fn_gitInit
alrb_fn_addSummary $? exit
done
return 0
}
| true
|
5870544079d92ab111d7a9504a25d2ab2aaff08c
|
Shell
|
lsy521/ClusterBuildScripts
|
/install/config-yarn-CPU-RAM.sh
|
UTF-8
| 7,403
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
################################################################################
## Copyright: HZGOSUN Tech. Co, BigData
## Filename: config-yarn-CPU-RAM.sh
## Description: 配置yarn的CPU和内存
## Version: 1.0
## Author: liusiyang
## Created: 2017-12-11
################################################################################
#set -x ## 用于调试用,不用的时候可以注释掉
#---------------------------------------------------------------------#
# 定义变量 #
#---------------------------------------------------------------------#
cd `dirname $0`
## 脚本所在目录
BIN_DIR=`pwd`
cd ..
## ClusterBuildScripts目录
CLUSTER_BUILD_SCRIPTS_DIR=`pwd`
## log 日记目录
LOG_DIR=${CLUSTER_BUILD_SCRIPTS_DIR}/logs
##log日志文件
LOG_FILE=${LOG_DIR}/config-yarn.log
cd tool/
## yarn-utils.py脚本目录
YARN_UTIL_DIR=`pwd`
## 获取当前机器core数量
CORES=$(cat /proc/cpuinfo| grep "physical id"| sort| uniq| wc -l)
## 获取当前机器内存
MEMORY=$(echo "$(free -h | grep "Mem" | awk '{print $2}')" | sed -r 's/[^0-9.]+//g')
## 获取当前机器上挂载的磁盘个数
DISKS=1
## 是否使用HBase
HBASE=True ## True代表使用,False代表不使用
## cluster_conf.properties文件目录
CONF_DIR=${CLUSTER_BUILD_SCRIPTS_DIR}/conf
## 最终安装的根目录,所有bigdata 相关的根目录
INSTALL_HOME=$(grep Install_HomeDir ${CONF_DIR}/cluster_conf.properties|cut -d '=' -f2)
## yar-site.xml目录
YARN_SITE_XML_DIR=${INSTALL_HOME}/Hadoop/hadoop/etc/hadoop
## yar-site.xml文件路径
YARN_SITE_XML=${YARN_SITE_XML_DIR}/yarn-site.xml
## <value>
VALUE="<value>"
## </value>
VALUE_END="</value>"
#####################################################################
# 函数名:config_yarn
# 描述: 配置yarn的CPU和内存
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function config_yarn ()
{
echo "" | tee -a $LOG_FILE
echo "****************************************************" | tee -a $LOG_FILE
echo “正在配置yarn的内存与CPU,请稍候.......” | tee -a $LOG_FILE
echo "获取当前机器配置信息:cores=${CORES},MEMORY=${MEMORY},DISKS=${DISKS},HBASE=${HBASE}"
python yarn-utils.py -c ${CORES} -m ${MEMORY} -d ${DISKS} -k ${HBASE} > ${BIN_DIR}/chenke.sb | tee -a $LOG_FILE
echo "${BIN_DIR}/chenke.sb文件内容:" | tee -a $LOG_FILE
echo "----------------------------------------------------" | tee -a $LOG_FILE
cat ${BIN_DIR}/chenke.sb | tee -a $LOG_FILE
echo "----------------------------------------------------" | tee -a $LOG_FILE
echo “配置yarn完成!!!!!!” | tee -a $LOG_FILE
}
#####################################################################
# 函数名:config_yarn_site_xml
# 描述: 配置yarn-site.xml
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function config_yarn_site_xml ()
{
echo "" | tee -a $LOG_FILE
echo "****************************************************" | tee -a $LOG_FILE
cd ${YARN_SITE_XML_DIR}
echo “进入${YARN_SITE_XML_DIR}目录,准备配置yarn-site.xml” | tee -a $LOG_FILE
if [ -f "${YARN_SITE_XML}" ] && [ -f "${BIN_DIR}/chenke.sb" ]; then
echo “正在配置yarn-site.xml,请稍候.......” | tee -a $LOG_FILE
## 配置yarn.scheduler.minimum-allocation-mb参数
grep -q "yarn.scheduler.minimum-allocation-mb" ${YARN_SITE_XML}
if [[ $? -eq 0 ]]; then
num1=$[ $(cat yarn-site.xml | cat -n | grep yarn.scheduler.minimum-allocation-mb | awk '{print $1}') +1 ]
val1=$(grep yarn.scheduler.minimum-allocation-mb ${BIN_DIR}/chenke.sb | cut -d '=' -f2)
val11="${VALUE}${val1}${VALUE_END}"
sed -i "${num1}c ${val11}" ${YARN_SITE_XML}
echo "config yarn.scheduler.minimum-allocation-mb=${val1}" | tee -a $LOG_FILE
else
echo "Not fount \"yarn.nodemanager.resource.memory-mb\"!" | tee -a $LOG_FILE
fi
## 配置yarn.scheduler.maximum-allocation-mb参数
grep -q "yarn.scheduler.maximum-allocation-mb" ${YARN_SITE_XML}
if [[ $? -eq 0 ]]; then
num2=$[ $(cat yarn-site.xml | cat -n | grep yarn.scheduler.maximum-allocation-mb | awk '{print $1}') +1 ]
val2=$(grep yarn.scheduler.maximum-allocation-mb ${BIN_DIR}/chenke.sb | cut -d '=' -f2)
val22="${VALUE}${val2}${VALUE_END}"
sed -i "${num2}c ${val22}" ${YARN_SITE_XML}
echo "config yarn.scheduler.maximum-allocation-mb=${val2}" | tee -a $LOG_FILE
else
echo "Not fount \"yarn.scheduler.maximum-allocation-mb\"!" | tee -a $LOG_FILE
fi
## 配置yarn.nodemanager.resource.memory-mb参数
grep -q "yarn.nodemanager.resource.memory-mb" ${YARN_SITE_XML}
if [[ $? -eq 0 ]]; then
num3=$[ $(cat yarn-site.xml | cat -n | grep yarn.nodemanager.resource.memory-mb | awk '{print $1}') +1 ]
val3=$(grep yarn.nodemanager.resource.memory-mb ${BIN_DIR}/chenke.sb | cut -d '=' -f2)
val33="${VALUE}${val3}${VALUE_END}"
sed -i "${num3}c ${val33}" ${YARN_SITE_XML}
echo "config yarn.nodemanager.resource.memory-mb=${val3}" | tee -a $LOG_FILE
else
echo "Not fount \"yarn.nodemanager.resource.memory-mb\"!" | tee -a $LOG_FILE
fi
## 配置yarn.app.mapreduce.am.resource.mb参数
grep -q "yarn.app.mapreduce.am.resource.mb" ${YARN_SITE_XML}
if [[ $? -eq 0 ]]; then
num4=$[ $(cat yarn-site.xml | cat -n | grep yarn.app.mapreduce.am.resource.mb | awk '{print $1}') +1 ]
val4=$(grep yarn.app.mapreduce.am.resource.mb ${BIN_DIR}/chenke.sb | cut -d '=' -f2)
val44="${VALUE}${val4}${VALUE_END}"
sed -i "${num4}c ${val44}" ${YARN_SITE_XML}
echo "config yarn.app.mapreduce.am.resource.mb=${val4}" | tee -a $LOG_FILE
else
echo "Not fount \"yarn.app.mapreduce.am.resource.mb\"!" | tee -a $LOG_FILE
fi
## 配置yarn.app.mapreduce.am.command-opts参数
grep -q "yarn.app.mapreduce.am.command-opts" ${YARN_SITE_XML}
if [[ $? -eq 0 ]]; then
num5=$[ $(cat yarn-site.xml | cat -n | grep yarn.app.mapreduce.am.command-opts | awk '{print $1}') +1 ]
val5=$(grep yarn.app.mapreduce.am.command-opts ${BIN_DIR}/chenke.sb | cut -d '=' -f2)
val55="${VALUE}${val5}${VALUE_END}"
sed -i "${num5}c ${val55}" ${YARN_SITE_XML}
echo "config yarn.app.mapreduce.am.command-opts=${val5}" | tee -a $LOG_FILE
else
echo "Not fount \"yarn.app.mapreduce.am.command-opts\"!" | tee -a $LOG_FILE
fi
else
echo "Not Found \"${YARN_SITE_XML_DIR}\" or \"${BIN_DIR}/chenke.sb\" file!" | tee -a $LOG_FILE
fi
rm -f ${BIN_DIR}/chenke.sb
echo "已删除${BIN_DIR}/chenke.sb文件!" | tee -a $LOG_FILE
echo “配置yarn-site.xml完成!!!!!!” | tee -a $LOG_FILE
}
#####################################################################
# 函数名: main
# 描述: 脚本主要业务入口
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function main()
{
config_yarn
config_yarn_site_xml
}
#---------------------------------------------------------------------#
# 执行流程 #
#---------------------------------------------------------------------#
## 打印时间
echo "" | tee -a $LOG_FILE
echo "$(date "+%Y-%m-%d %H:%M:%S")" | tee -a $LOG_FILE
main
set +x
| true
|
966c5218b8519ce33a08fcde057876d8173f5d46
|
Shell
|
AhiGan/learnable-deep-priors
|
/experiments/run.sh
|
UTF-8
| 2,939
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
run_file='../src/main.py'
folder_data='../data'
gpu='0'
use_pretrained=1
if (( $use_pretrained == 0 )); then
train_list='1 0'
else
train_list='0'
fi
# Multi-Shapes 20x20
dataset='shapes_20x20'
folder_base=$dataset
if [ ! -d $folder_base ]; then
mkdir $folder_base
fi
path_data=$folder_data'/'$dataset'_data.h5'
state_size=16
updater_size=32
folder=$folder_base'/'$state_size'_'$updater_size
for train in $train_list; do
python $run_file \
--gpu $gpu \
--dataset $dataset \
--path_data $path_data \
--folder $folder \
--train $train \
--state_size $state_size \
--updater_size $updater_size
done
# Multi-Shapes 28x28
dataset='shapes_28x28'
path_data=$folder_data'/'$dataset'_3_data.h5'
for binary_image in 1 0; do
folder_base=$dataset'_'$binary_image
if [ ! -d $folder_base ]; then
mkdir $folder_base
fi
for params in '16 16' '16 32' '32 32' '32 64' '64 64'; do
IFS=' ' params=( $params )
state_size=${params[0]}
updater_size=${params[1]}
folder=$folder_base'/'$state_size'_'$updater_size
for train in $train_list; do
python $run_file \
--gpu $gpu \
--dataset $dataset \
--path_data $path_data \
--folder $folder \
--train $train \
--state_size $state_size \
--updater_size $updater_size \
--binary_image $binary_image
done
done
done
# Multi-MNIST
dataset='mnist'
for folder_base in 'mnist_20' 'mnist_500' 'mnist_all'; do
if [ ! -d $folder_base ]; then
mkdir $folder_base
fi
path_data=$folder_data'/'$folder_base'_data.h5'
for params in '16 32' '32 32' '32 64' '64 64'; do
IFS=' ' params=( $params )
state_size=${params[0]}
updater_size=${params[1]}
folder=$folder_base'/'$state_size'_'$updater_size
for train in $train_list; do
python $run_file \
--gpu $gpu \
--dataset $dataset \
--path_data $path_data \
--folder $folder \
--train $train \
--state_size $state_size \
--updater_size $updater_size
done
done
done
# Generalization
dataset='shapes_28x28'
folder_base=$dataset'_1'
train=0
state_size=64
updater_size=64
folder=$folder_base'/'$state_size'_'$updater_size
for num_objects in 2 4; do
path_data=$folder_data'/'$dataset'_'$num_objects'_data.h5'
file_result_base='general_'$num_objects'_result_{}.h5'
python $run_file \
--gpu $gpu \
--dataset $dataset \
--path_data $path_data \
--folder $folder \
--train $train \
--state_size $state_size \
--updater_size $updater_size \
--file_result_base $file_result_base \
--num_objects $num_objects
done
| true
|
e6404ce338b12509509637567be81cc1db812301
|
Shell
|
starshipcoin/starshipsha
|
/contrib/init/starshipshad.init
|
UTF-8
| 1,392
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# starshipshad The starshipsha core server.
#
#
# chkconfig: 345 80 20
# description: starshipshad
# processname: starshipshad
#
# Source function library.
. /etc/init.d/functions
# you can override defaults in /etc/sysconfig/starshipshad, see below
if [ -f /etc/sysconfig/starshipshad ]; then
. /etc/sysconfig/starshipshad
fi
RETVAL=0
prog=starshipshad
# you can override the lockfile via BITCOIND_LOCKFILE in /etc/sysconfig/starshipshad
lockfile=${BITCOIND_LOCKFILE-/var/lock/subsys/starshipshad}
# starshipshad defaults to /usr/bin/starshipshad, override with BITCOIND_BIN
starshipshad=${BITCOIND_BIN-/usr/bin/starshipshad}
# starshipshad opts default to -disablewallet, override with BITCOIND_OPTS
starshipshad_opts=${BITCOIND_OPTS--disablewallet}
start() {
echo -n $"Starting $prog: "
daemon $DAEMONOPTS $starshipshad $starshipshad_opts
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch $lockfile
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc $prog
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -f $lockfile
return $RETVAL
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status $prog
;;
restart)
stop
start
;;
*)
echo "Usage: service $prog {start|stop|status|restart}"
exit 1
;;
esac
| true
|
a5ed9a5bd0616baafb59e70882a51b71806222b6
|
Shell
|
miguelverissimo/dotfiles
|
/modules/development.module
|
UTF-8
| 4,973
| 2.5625
| 3
|
[] |
no_license
|
# vi: ft=bash
# load the creds
# source ~/.pn_anonymize_creds
rall() {
redit
tmux new-window
rsupport
tmux select-window -t !
}
redit() {
tmux rename-window 'rb edit'
tmux split-window -vp 15
tmux split-window -hp 45
tmux send-keys -t 1 'nix-shell' Enter
tmux send-keys -t 1 'nvim' Enter
tmux send-keys -t 2 'nix-shell' Enter
tmux send-keys -t 3 'nix-shell' Enter
}
rsupport() {
tmux rename-window 'rb support'
tmux split-window -hp 50
tmux select-pane -t 1
tmux split-window -vp 80
tmux split-window -vp 70
tmux send-keys -t 1 'nix-shell' Enter
tmux send-keys -t 1 'docker-compose up' Enter
tmux send-keys -t 2 'nix-shell' Enter
tmux send-keys -t 2 'rails_css' Enter
tmux send-keys -t 4 'nix-shell' Enter
tmux send-keys -t 4 'rails_s' Enter
tmux send-keys -t 3 'nix-shell' Enter
tmux select-pane -t 3
}
# for Rails projects
orall () {
tmux rename-window 'rb edit'
tmux split-window -vp 15
tmux split-window -hp 45
tmux send-keys -t 1 'nix-shell' Enter
tmux send-keys -t 1 'nvim' Enter
tmux send-keys -t 2 'nix-shell' Enter
tmux send-keys -t 3 'nix-shell' Enter
tmux new-window
tmux rename-window 'rb support'
tmux split-window -hp 50
tmux split-window -vp 80
tmux split-window -vp 70
tmux select-pane -t 1
tmux split-window -vp 15
tmux send-keys -t 3 'nix-shell' Enter
tmux send-keys -t 3 'docker-compose up' Enter
tmux send-keys -t 4 'nix-shell' Enter
tmux send-keys -t 4 'rails_css' Enter
tmux send-keys -t 5 'nix-shell' Enter
tmux send-keys -t 5 'rails_s' Enter
tmux send-keys -t 1 'nix-shell' Enter
tmux send-keys -t 1 'lazygit' Enter
tmux send-keys -t 2 'nix-shell' Enter
tmux select-pane -t 1
tmux select-window -t !
}
oredit () {
tmux rename-window 'rb edit'
tmux split-window -vp 15
tmux split-window -hp 45
tmux send-keys -t 1 'nix-shell' Enter
tmux send-keys -t 1 'nvim' Enter
tmux send-keys -t 2 'nix-shell' Enter
tmux send-keys -t 3 'nix-shell' Enter
}
orsupport() {
tmux rename-window 'rb support'
tmux split-window -hp 50
tmux split-window -vp 80
tmux split-window -vp 70
tmux select-pane -t 1
tmux split-window -vp 15
tmux send-keys -t 3 'nix-shell' Enter
tmux send-keys -t 3 'docker-compose up' Enter
tmux send-keys -t 4 'nix-shell' Enter
tmux send-keys -t 4 'rails_css' Enter
tmux send-keys -t 5 'nix-shell' Enter
tmux send-keys -t 5 'rails_s' Enter
tmux send-keys -t 1 'nix-shell' Enter
tmux send-keys -t 1 'lazygit' Enter
tmux send-keys -t 2 'nix-shell' Enter
tmux select-pane -t 1
}
# for Ember projects
eall () {
tmux rename-window 'ember edit'
tmux split-window -vp 15
tmux split-window -hp 45
tmux send-keys -t 1 'nix-shell' Enter
tmux send-keys -t 1 'nvim' Enter
tmux send-keys -t 2 'nix-shell' Enter
tmux send-keys -t 3 'nix-shell' Enter
tmux send-keys -t 3 'yarn start' Enter
tmux new-window
tmux rename-window 'ember support'
tmux split-window -hp 50
tmux split-window -vp 80
tmux split-window -vp 70
tmux select-pane -t 1
tmux split-window -vp 15
tmux send-keys -t 3 'nix-shell' Enter
tmux send-keys -t 4 'nix-shell' Enter
tmux send-keys -t 5 'nix-shell' Enter
tmux send-keys -t 1 'nix-shell' Enter
tmux send-keys -t 1 'lazygit' Enter
tmux send-keys -t 2 'nix-shell' Enter
tmux select-pane -t 1
tmux select-window -t !
}
eedit () {
tmux rename-window 'ember edit'
tmux split-window -vp 15
tmux split-window -hp 45
tmux send-keys -t 1 'nix-shell' Enter
tmux send-keys -t 1 'nvim' Enter
tmux send-keys -t 2 'nix-shell' Enter
tmux send-keys -t 3 'nix-shell' Enter
tmux send-keys -t 3 'yarn start' Enter
}
esupport() {
tmux rename-window 'ember support'
tmux split-window -hp 50
tmux split-window -vp 80
tmux split-window -vp 70
tmux select-pane -t 1
tmux split-window -vp 15
tmux send-keys -t 3 'nix-shell' Enter
tmux send-keys -t 4 'nix-shell' Enter
tmux send-keys -t 5 'nix-shell' Enter
tmux send-keys -t 1 'nix-shell' Enter
tmux send-keys -t 1 'lazygit' Enter
tmux send-keys -t 2 'nix-shell' Enter
tmux select-pane -t 1
}
create_repo() {
gh repo create "$@"
glab repo create "$@"
git remote add hub git@github.com:miguelverissimo/"$@".git
git remote add lab git@github.com:miguelverissimo/"$@".git
git remote set-url --push all git@github.com:miguelverissimo/"$@".git
git remote set-url --push all git@gitlab.com:miguelverissimo/"$@".git
}
rails_s() {
if [ -e Procfile.dev ]; then
$(grep -oP 'bin/rails\sserver.*' Procfile.dev)
else
echo "Can't find Procfile.dev"
exit 1
fi
}
rails_css() {
if [ -e Procfile.dev ]; then
$(grep -oP 'bin/rails.*css.*' Procfile.dev)
else
echo "Can't find Procfile.dev"
exit 1
fi
}
alias ns='nix-shell'
alias rsh='nix-shell'
alias esh='nix-shell'
alias rdbm='rails db:migrate'
alias dbmig='rails db:migrate && rails erd'
alias be='bundle exec'
alias dcu='docker-compose up'
alias dcud='docker-compose up -d'
alias dcd='docker-compose down'
| true
|
88521408748df77d05fa7aca0363739036fb552a
|
Shell
|
petronny/aur3-mirror
|
/dvb-usb-rtl2838u-arch/PKGBUILD
|
UTF-8
| 1,136
| 2.890625
| 3
|
[] |
no_license
|
# Maintainer: Peter Ivanov <ivanovp@gmail.com>
# Based on dvb-usb-rtl2832u-arch, author DonVla <donvla@users.sourceforge.net>
pkgname=dvb-usb-rtl2838u-arch
_kernelname="-ARCH"
#_kernelname="-ck"
_kversion="3.1.5"
pkgver=1.1_${_kversion}
pkgrel=1
pkgdesc="Kernel module for the RTL2832U, RTL2836U and RTL2838U DVB-T USB2.0 devices"
arch=('i686' 'x86_64')
url="http://dev.ivanov.eu/projects/rtl2838/"
license=('GPL')
depends=("kernel-headers")
install="${pkgname}.install"
source=("dvb-usb-rtl2838u.tar.gz::http://dev.ivanov.eu/projects/rtl2838/dvb-usb-rtl2838u.tar.gz")
md5sums=('f767d22de33e4eb3de0effff7bba7bd7')
sha256sums=('2bd8bea9cda586ee5dcb2d48c4a1b3eeae7f6ce71166bcff5a74af835951e17e')
build() {
local _KERNEL="${_kversion}-${pkgrel}${_kernelname}"
# set KERNEL_VERSION
sed -e "s#KERNEL_VERSION=.*#KERNEL_VERSION=${_KERNEL}#g" -i "${startdir}/${install}"
cd "${srcdir}/dvb-usb-rtl2832u"
export KBUILD_SRC="/usr/src/linux-${_KERNEL}"
export INSTALL_MOD_PATH="${pkgdir}"
export INSTALL_MOD_DIR=kernel/drivers/media/dvb/dvb-usb
make -C "${KBUILD_SRC}" M="$PWD" modules
make -C "${KBUILD_SRC}" M="$PWD" modules_install
}
# vim:set ts=2 sw=2 et:
| true
|
d91cdb1cafbfb454915674662032c3e84ef13c16
|
Shell
|
dhalperi/continuous-integration
|
/buildkite/setup-ubuntu.sh
|
UTF-8
| 15,152
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Setup script for Ubuntu 14.04 LTS, 16.04 LTS and 18.04 LTS.
# Fail on errors.
# Fail when using undefined variables.
# Print all executed commands.
# Fail when any command in a pipe fails.
set -euxo pipefail
# Prevent dpkg / apt-get / debconf from trying to access stdin.
export DEBIAN_FRONTEND="noninteractive"
### Deduce image configuration from the hostname.
case $(hostname) in
*pipeline*)
config_kind="pipeline"
;;
*trusted*)
config_kind="trusted"
;;
*worker*)
config_kind="worker"
;;
*)
echo "Could not deduce image kind from hostname: $(hostname)!"
exit 1
;;
esac
case $(hostname) in
*ubuntu1404*)
config_os="ubuntu1404"
;;
*ubuntu1604*)
config_os="ubuntu1604"
;;
*ubuntu1804*)
config_os="ubuntu1804"
;;
*)
echo "Could not deduce operating system from hostname: $(hostname)!"
exit 1
esac
case $(hostname) in
*nojava*)
config_java="no"
;;
*java8*)
config_java="8"
;;
*java9*)
config_java="9"
;;
*java10*)
config_java="10"
;;
*)
echo "Could not deduce Java version from hostname: $(hostname)!"
exit 1
esac
### Increase file descriptor limits
{
cat >> /etc/security/limits.conf <<EOF
* soft nofile 100000
* hard nofile 100000
EOF
}
### Install base packages.
{
# Android SDK requires 32-bits libraries.
dpkg --add-architecture i386
apt-get -qqy update
apt-get -qqy dist-upgrade > /dev/null
packages=(
# Bazel dependencies.
build-essential
clang
curl
git
python
python-dev
python3
python3-dev
unzip
wget
xvfb
zip
zlib1g-dev
# Dependencies for Android SDK.
# https://developer.android.com/studio/troubleshoot.html#linux-libraries
# https://code.google.com/p/android/issues/detail?id=207212
expect
libbz2-1.0:i386
libncurses5:i386
libstdc++6:i386
libz1:i386
# Dependencies for TensorFlow.
libcurl3-dev
swig
python-enum34
python-mock
python-numpy
python-pip
python-wheel
python3-mock
python3-numpy
python3-pip
python3-wheel
# Required by Envoy: https://github.com/bazelbuild/continuous-integration/issues/218
automake
autotools-dev
cmake
libtool
m4
# Required by our infrastructure.
lvm2
# Required by Android projects that launch the Android emulator headlessly
# (see https://github.com/bazelbuild/continuous-integration/pull/246)
cpu-checker
qemu-system-x86
unzip
xvfb
# Required by our release process.
devscripts
gnupg
pandoc
reprepro
ssmtp
# Required by our C++ coverage tests.
lcov
llvm
# Required for the Swift toolchain.
clang
libicu-dev
)
# Bazel dependencies.
if [[ "${config_os}" == "ubuntu1804" ]]; then
packages+=("coreutils")
else
packages+=("realpath")
fi
apt-get -qqy install "${packages[@]}" > /dev/null
# Remove apport, as it's unneeded and uses significant CPU and I/O.
apt-get -qqy purge apport
}
### Install required Python packages from pip for Tensorflow.
{
pip install keras_applications keras_preprocessing
pip3 install keras_applications keras_preprocessing
}
### Install Azul Zulu (OpenJDK).
if [[ "${config_java}" != "no" ]]; then
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 0x219BD9C9
apt-add-repository 'deb http://repos.azulsystems.com/ubuntu stable main'
apt-get -qqy update
apt-get -qqy install zulu-${config_java} > /dev/null
else
apt-get -qqy purge *openjdk* *zulu*
apt-get -qqy autoremove --purge
fi
### Install Bazel.
{
bazel_version=$(curl -sSI https://github.com/bazelbuild/bazel/releases/latest | grep '^Location: ' | sed 's|.*/||' | sed $'s/\r//')
curl -sSLo install.sh "https://releases.bazel.build/${bazel_version}/release/bazel-${bazel_version}-installer-linux-x86_64.sh"
bash install.sh > /dev/null
rm -f install.sh
}
### Install the Buildkite Agent on production images.
{
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 &> /dev/null
add-apt-repository -y "deb https://apt.buildkite.com/buildkite-agent stable main"
apt-get -qqy update
apt-get -qqy install buildkite-agent > /dev/null
# Write the Buildkite agent configuration.
cat > /etc/buildkite-agent/buildkite-agent.cfg <<EOF
token="xxx"
name="%hostname-%n"
tags="kind=${config_kind},os=${config_os},java=${config_java}"
build-path="/var/lib/buildkite-agent/builds"
hooks-path="/etc/buildkite-agent/hooks"
plugins-path="/etc/buildkite-agent/plugins"
git-clone-flags="-v --reference /var/lib/bazelbuild"
EOF
# Stop the agent after each job on stateless worker machines.
if [[ "${config_kind}" != "pipeline" ]]; then
cat >> /etc/buildkite-agent/buildkite-agent.cfg <<EOF
disconnect-after-job=true
disconnect-after-job-timeout=86400
EOF
fi
# Add the Buildkite agent hooks.
cat > /etc/buildkite-agent/hooks/environment <<'EOF'
#!/bin/bash
set -euo pipefail
export PATH=$PATH:/usr/lib/google-cloud-sdk/bin:/snap/bin:/snap/google-cloud-sdk/current/bin
export BUILDKITE_ARTIFACT_UPLOAD_DESTINATION="gs://bazel-buildkite-artifacts/$BUILDKITE_JOB_ID"
export BUILDKITE_GS_ACL="publicRead"
gcloud auth configure-docker --quiet
EOF
# The trusted worker machine may only execute certain whitelisted builds.
if [[ "${config_kind}" == "trusted" ]]; then
cat >> /etc/buildkite-agent/hooks/environment <<'EOF'
case ${BUILDKITE_BUILD_CREATOR_EMAIL} in
*@google.com)
;;
*)
echo "Build creator not allowed: ${BUILDKITE_BUILD_CREATOR_EMAIL}"
exit 1
esac
case ${BUILDKITE_REPO} in
https://github.com/bazelbuild/bazel.git|\
https://github.com/bazelbuild/continuous-integration.git)
;;
*)
echo "Repository not allowed: ${BUILDKITE_REPO}"
exit 1
esac
case ${BUILDKITE_ORGANIZATION_SLUG} in
bazel)
;;
*)
echo "Organization not allowed: ${BUILDKITE_PIPELINE_SLUG}"
exit 1
esac
case ${BUILDKITE_PIPELINE_SLUG} in
google-bazel-presubmit-metrics|\
release)
;;
*)
echo "Pipeline not allowed: ${BUILDKITE_PIPELINE_SLUG}"
exit 1
esac
export BUILDKITE_API_TOKEN=$(gsutil cat "gs://bazel-encrypted-secrets/buildkite-api-token.enc" | \
gcloud kms decrypt --location "global" --keyring "buildkite" --key "buildkite-api-token" \
--plaintext-file "-" --ciphertext-file "-")
EOF
fi
# Some notes about our service config:
#
# - All Buildkite agents except the pipeline agent are stateless and need a special service config
# that kills remaining processes and deletes temporary files.
#
# - We set the service to not launch automatically, as the startup script will start it once it is
# done with setting up the local SSD and writing the agent configuration.
if [[ "${config_kind}" == "pipeline" ]]; then
# This is a pipeline worker machine.
systemctl disable buildkite-agent
elif [[ $(systemctl --version 2>/dev/null) ]]; then
# This is a normal worker machine with systemd (e.g. Ubuntu 16.04 LTS).
systemctl disable buildkite-agent
mkdir /etc/systemd/system/buildkite-agent.service.d
cat > /etc/systemd/system/buildkite-agent.service.d/override.conf <<'EOF'
[Service]
Restart=always
PermissionsStartOnly=true
ExecStopPost=/bin/echo "Cleaning up after Buildkite Agent exited ..."
ExecStopPost=/usr/bin/find /tmp -user buildkite-agent -delete
ExecStopPost=/usr/bin/find /var/lib/buildkite-agent -mindepth 1 -maxdepth 1 -execdir rm -rf '{}' +
ExecStopPost=/bin/sh -c 'docker ps -q | xargs -r docker kill'
ExecStopPost=/usr/bin/docker system prune -f --volumes
# Disable tasks accounting, because Bazel is prone to run into resource limits there.
# This fixes the "cgroup: fork rejected by pids controller" error that some CI jobs triggered.
TasksAccounting=no
EOF
elif [[ $(init --version 2>/dev/null | grep upstart) ]]; then
# This is a normal worker machine with upstart (e.g. Ubuntu 14.04 LTS).
cat > /etc/init/buildkite-agent.conf <<'EOF'
description "buildkite-agent"
respawn
respawn limit unlimited
exec sudo -H -u buildkite-agent /usr/bin/buildkite-agent start
# Kill all possibly remaining processes after each build.
post-stop script
set +e
set -x
# Kill all remaining processes.
killall -q -9 -u buildkite-agent
# Clean up left-over files.
find /tmp -user buildkite-agent -delete
find /var/lib/buildkite-agent -mindepth 1 -maxdepth 1 -execdir rm -rf '{}' +
docker ps -q | xargs -r docker kill
docker system prune -f --volumes
end script
EOF
else
echo "Unknown operating system - has neither systemd nor upstart?"
exit 1
fi
}
### Install Docker.
{
apt-get -qqy install apt-transport-https ca-certificates > /dev/null
curl -sSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get -qqy update
apt-get -qqy install docker-ce > /dev/null
# Allow the buildkite-agent user access to Docker.
usermod -aG docker buildkite-agent
# Disable the Docker service, as the startup script has to mount
# /var/lib/docker first.
if [[ -e /bin/systemctl ]]; then
systemctl disable docker
else
echo manual > /etc/init/docker.override
fi
}
### Install Mono.
{
apt-get -qqy install apt-transport-https ca-certificates > /dev/null
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
add-apt-repository "deb https://download.mono-project.com/repo/ubuntu stable-$(lsb_release -cs) main"
apt-get -qqy update
apt-get -qqy install mono-devel mono-complete
}
### Install Node.js.
{
curl -sSL https://deb.nodesource.com/setup_8.x | bash - > /dev/null
apt-get -qqy install nodejs > /dev/null
# Required by Gerrit:
# https://gerrit.googlesource.com/gerrit/+show/master/polygerrit-ui/README.md
npm install -g \
typescript \
fried-twinkie@0.0.15
}
### Install Python 3.6.
{
if [[ "${config_os}" == "ubuntu1804" ]]; then
pip3 install requests uritemplate pyyaml github3.py
else
packages+=("realpath")
apt-get -qqy install zlib1g-dev libssl-dev
PYTHON_VERSION="3.6.8"
mkdir -p /usr/local/src
pushd /usr/local/src
curl -O "https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tar.xz"
tar xfJ "Python-${PYTHON_VERSION}.tar.xz"
rm -f "Python-${PYTHON_VERSION}.tar.xz"
cd "Python-${PYTHON_VERSION}"
# Enable the 'ssl' module.
cat >> Modules/Setup.dist <<'EOF'
_ssl _ssl.c \
-DUSE_SSL -I/usr/include -I/usr/include/openssl \
-L/usr/lib -lssl -lcrypto
EOF
echo "Compiling Python ${PYTHON_VERSION} ..."
./configure --quiet --enable-ipv6
make -s -j8 all > /dev/null
echo "Installing Python ${PYTHON_VERSION} ..."
make -s altinstall > /dev/null
pip3.6 install requests uritemplate pyyaml github3.py
popd
rm -rf "/usr/local/src/Python-${PYTHON_VERSION}"
fi
}
### Enable KVM support.
usermod -a -G kvm buildkite-agent
### Install Android SDK and NDK (only if we have a JVM).
if [[ "${config_java}" != "no" ]]; then
if [[ "${config_java}" == "9" || "${config_java}" == "10" ]]; then
export SDKMANAGER_OPTS="--add-modules java.se.ee"
fi
# Android NDK
cd /opt
curl -sSLo android-ndk.zip https://dl.google.com/android/repository/android-ndk-r15c-linux-x86_64.zip
unzip android-ndk.zip > /dev/null
rm android-ndk.zip
# Android SDK
mkdir -p /opt/android-sdk-linux
cd /opt/android-sdk-linux
curl -sSLo android-sdk.zip https://dl.google.com/android/repository/sdk-tools-linux-4333796.zip
unzip android-sdk.zip > /dev/null
rm android-sdk.zip
tools/bin/sdkmanager --update
expect -c '
set timeout -1
log_user 0
spawn tools/bin/sdkmanager --update
expect {
"Accept? (y/N)" { exp_send "y\r" ; exp_continue }
eof
}
'
# This should be kept in sync with mac/mac-android.sh.
# - build-tools 28.0.1 introduces the new dexer, d8.jar
# - android-24 is required for desugar tests.
expect -c '
set timeout -1
log_user 0
spawn tools/bin/sdkmanager \
"build-tools;27.0.3" \
"build-tools;28.0.2" \
"emulator" \
"extras;android;m2repository" \
"platform-tools" \
"platforms;android-24" \
"platforms;android-28" \
"system-images;android-19;default;x86" \
"system-images;android-21;default;x86" \
"system-images;android-22;default;x86" \
"system-images;android-23;default;x86"
expect {
"Accept? (y/N)" { exp_send "y\r" ; exp_continue }
eof
}
'
chown -R root:root /opt/android*
cat >> /etc/buildkite-agent/hooks/environment <<'EOF'
export ANDROID_HOME="/opt/android-sdk-linux"
echo "Android SDK is at ${ANDROID_HOME}"
export ANDROID_NDK_HOME="/opt/android-ndk-r15c"
echo "Android NDK is at ${ANDROID_NDK_HOME}"
EOF
fi
### Install Swift Toolchain (for rules_swift).
{
mkdir -p /opt/swift
case ${config_os} in
ubuntu1404)
curl -sSL https://swift.org/builds/swift-4.2.1-release/ubuntu1404/swift-4.2.1-RELEASE/swift-4.2.1-RELEASE-ubuntu14.04.tar.gz | \
tar xvz -C /opt/swift --strip 1
;;
ubuntu1604)
curl -sSL https://swift.org/builds/swift-4.2.1-release/ubuntu1604/swift-4.2.1-RELEASE/swift-4.2.1-RELEASE-ubuntu16.04.tar.gz | \
tar xvz -C /opt/swift --strip 1
;;
ubuntu1804)
curl -sSL https://swift.org/builds/swift-4.2.1-release/ubuntu1804/swift-4.2.1-RELEASE/swift-4.2.1-RELEASE-ubuntu18.04.tar.gz | \
tar xvz -C /opt/swift --strip 1
;;
*)
echo "Unsupported operating system: $config_os"
exit 1
esac
cat >> /etc/buildkite-agent/hooks/environment <<'EOF'
export SWIFT_HOME="/opt/swift"
export PATH="$PATH:${SWIFT_HOME}/usr/bin"
echo "Swift toolchain is at ${SWIFT_HOME}"
EOF
}
### Install tools required by the release process.
{
curl -sSL https://github.com/c4milo/github-release/releases/download/v1.1.0/github-release_v1.1.0_linux_amd64.tar.gz | \
tar xvz -C /usr/local/bin
chown root:root /usr/local/bin/github-release
chmod 0755 /usr/local/bin/github-release
}
### Install Sauce Connect (for rules_webtesting).
{
curl -sSL https://saucelabs.com/downloads/sc-4.5.1-linux.tar.gz | \
tar xvz -C /opt
chown -R root:root /opt/sc-4.5.1-linux
ln -s /opt/sc-4.5.1-linux/bin/sc /usr/local/bin/sc
}
### Clean up and trim the filesystem (potentially reduces the final image size).
{
rm -rf /var/lib/apt/lists/*
fstrim -v /
sleep 3
}
poweroff
| true
|
546256033bc3fbd71a06ad33148bfe36e617d2c7
|
Shell
|
nawalgupta/openkit-server
|
/cloud_images/scripts/create_start_gameeso.sh
|
UTF-8
| 1,572
| 3.3125
| 3
|
[] |
no_license
|
echo "Creating Gameeso Boot configuration..."
cat >/etc/init/gameeso.conf <<EOL
description "Gameeso Game Backend"
start on (local-filesystems and net-device-up IFACE!=lo and started mysql)
stop on shutdown
script
exec /usr/bin/start_gameeso
end script
EOL
# Upstart doesnt work on Docker for obvious reasons, so we run the dependency stuff on our own
if [ "$PACKER_BUILDER_TYPE" = "docker" ]; then
cat >>/usr/bin/start_gameeso <<EOL
redis-server &
EOL
# only start MySQL if in standalone mode
if [ "$GAMEESO_MODE" = "standalone" ]; then
cat >>/usr/bin/start_gameeso <<EOL
/etc/init.d/mysql start
EOL
fi
fi
cat >>/usr/bin/start_gameeso <<EOL
mkdir -p /var/gameeso
chmod 7777 -R /var/gameeso
cd /var/gameeso
if [ ! -d "openkit-server" ]; then
echo "Cloning & installing latest Gameeso server development branch"
git clone -b development https://github.com/Gameeso/openkit-server.git
echo "Configuring the NodeJS-side"
cd openkit-server/openkit_importer
npm install
echo "Creating Push-cert folders"
mkdir -p /var/gameeso/apple_certs/production
mkdir -p /var/gameeso/apple_certs/sandbox
cd ../dashboard
echo "Copying config files..."
cp config/database.sample.yml config/database.yml
cp /home/gameeso/ok_config.rb config/ok_config.rb
bundle install --path vendor/bundle
bundle exec bin/rake db:setup
if [ "$GAMEESO_MODE" = "production" ]; then
bundle exec bin/rake assets:precompile
fi
fi
cd /var/gameeso/openkit-server/dashboard
# Delete pid file if exists
rm tmp/pids/server.pid
bin/rails server
EOL
chmod a+x /usr/bin/start_gameeso
| true
|
ff5bb9419af9fa99e3ec505287240d04298bf4b3
|
Shell
|
ambarish510/maven-multimodule-jacoco
|
/mvn-junit-jacoco/deb/usr/share/fk-build-test/start.sh
|
UTF-8
| 481
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
. /usr/share/$PACKAGE/prepare_runtime.sh
echo "Starting service: $PACKAGE"
echo "sudo -u $USER $JAVA_HOME/jre/bin/java $JAVA_OPTS -jar $CLASSPATH --spring.config.location=file:/etc/$PACKAGE/build-metrics-collector.properties"
sudo -u $USER $JAVA_HOME/jre/bin/java $JAVA_OPTS -jar $CLASSPATH --spring.config.location=file:/etc/$PACKAGE/build-metrics-collector.properties 2>$ERROR_FILE 1>$OUT_FILE &
echo $!>$PID_FILE
pid=`cat $PID_FILE`
echo "Service started ($pid)"
| true
|
661fb1e6f8166c9fb3668fca8bf9588bbbe5d949
|
Shell
|
josedejesusAmaya/Terraform-AWS
|
/lesson01/templates/user_data.sh.tpl
|
UTF-8
| 506
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Installs Docker, and runs a container
# Updates packages
yum update -y
# Installs Docker
yum install -y docker
# Starts Docker service
service docker start
# Allows ec2-user to execute Docker
usermod -a -G docker ec2-user
# Runs a Docker container with image ${docker_image}:${docker_tag}
# - runs container in the background
# - exposes container's port 80 in the host's port 80
# - restarts container if it stops
docker run -d -p 80:80 --restart=always ${docker_image}:${docker_tag}
| true
|
10dcc37795eb9d70e604097b7cac0cdb5fd0bda5
|
Shell
|
ekuric/noobaa-core
|
/src/deploy/rpm/create_rpm.sh
|
UTF-8
| 2,994
| 4.15625
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
export PS4='\e[36m+ ${FUNCNAME:-main}@${BASH_SOURCE}:${LINENO} \e[0m'
set -x
spec_name="noobaa.spec"
files_location="/tmp/"
OPTIONS=$( getopt -o 'h,l:' --long "help,location:" -- "$@" )
eval set -- "${OPTIONS}"
function usage(){
set +x
echo "$0 [options]"
echo -e "\nPlace the noobaa tarball, deploy_base.sh and the noobaa.spec in a folder"
echo -e "you can change the default location (\e[32m${files_location}\e[0m) by using the -l/--location flag\n"
echo "-l --location - The files location (default: ${files_location})"
echo "-h --help - Will show this help"
exit 0
}
while true
do
case ${1} in
-l|--location) files_location=${2};
shift 2;;
-h|--help) usage;;
--) shift 1;
break ;;
esac
done
function defining_the_spec(){
local version=${1}
local revision=${2}
sed -i "s/%define revision.*/%define revision ${revision}/g" ${files_location}/${spec_name}
sed -i "s/%define noobaaver.*/%define noobaaver ${version}/g" ${files_location}/${spec_name}
}
function create_rpm(){
local version=${1}
local revision=${2}
#We need to install rpm tools once, if we dont have them we can install
#with the 2 line below:
#yum install -y tree dnf
#dnf install rpm-build rpm-devel rpmlint rpmdevtools
rpmdev-setuptree
echo "time for ${0} in sec: ${SECONDS}"
cp ${files_location}/${spec_name} ~/rpmbuild/SPECS/
cp ${files_location}/noobaa-NVA-${version}-${revision}.tar.gz ~/rpmbuild/SOURCES/
cp ${files_location}/deploy_base.sh ~/rpmbuild/SOURCES/
current_directory=$(pwd)
cd ~/rpmbuild/SPECS/
local srpm=$(rpmbuild -bs noobaa.spec)
cd ${current_directory}
cp ${srpm//Wrote: /} build/public/
cd ~/rpmbuild/SPECS/
echo "+++ ${srpm//Wrote: /} +++"
rpmbuild --rebuild ${srpm//Wrote: /}
}
function verify_pre_requirements(){
number_of_tarballs=$(ls -l ${files_location}/noobaa*tar.gz | wc -l)
if [ ${number_of_tarballs} -ne 1 ]
then
echo "The number of noobaa tars in ${files_location} is ${number_of_tarballs}"
echo "the number of noobaa files should be 1, Exiting."
exit 1
fi
if [ ! -f ${files_location}/noobaa*.tar.gz ]
then
echo "there is no tarball in ${files_location}, Exiting"
exit 1
fi
if [ ! -f ${files_location}/deploy_base.sh ]
then
echo "there is no deploy_base.sh in ${files_location}, Exiting"
exit 1
fi
if [ ! -f ${files_location}/noobaa.spec ]
then
echo "there is no noobaa.spec in ${files_location}, Exiting"
exit 1
fi
}
function main() {
local details=($(ls ${files_location}/noobaa*gz | sed -e 's/.*noobaa-NVA-//g' -e 's/.tar.*//g' -e 's/-/ /g'))
local version=${details[0]}
local revision=${details[1]}
verify_pre_requirements
defining_the_spec ${version} ${revision}
create_rpm ${version} ${revision}
}
main
| true
|
daae4872ad959351c85b7d6af52cff613b20177b
|
Shell
|
michaelsalisbury/builder
|
/defaults/defaults.cifs-ucf.edu.mount.sh
|
UTF-8
| 1,763
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
# Depends on cifs-utils and winbind
# Ubuntu 12.10 does not support options; forceuid, forcegid
read -d $'' awk << END-OF-AWK
{if ((\$1 == "$(whoami)")||(\$3 == "$(whoami)"))
{print \$1" "\$3" "\$4" "\$6;}
}
END-OF-AWK
read username uid gid home < <(awk -F: "${awk}" /etc/passwd)
# These options worked with Ubuntu 11.10 and 12.04
opt=(
iocharset=utf8
file_mode=0750
dir_mode=0750
uid=${uid}
forceuid=${uid}
gid=${gid}
forcegid=${gid}
sec=ntlmssp
)
# changes made for compatability with Ubuntu 12.10
opt=(
iocharset=utf8
file_mode=0750
dir_mode=0750
uid=${uid}
gid=${gid}
sec=ntlmssp
noperm
nounix
)
opt=${opt[*]}
opt=${opt// /,}
while read cred; do
echo '######################################################################'
echo
cred=${cred##*/}
fldr=${cred%-*}
fldr=${fldr#*-}
cred="credentials=${home}/${cred}"
while read share; do
[[ "${share}" =~ ^\ ?#+ ]] && continue
name=${share##*/}
mnt="${home}/${fldr}/${name}"
match="(has been unmounted|not mounted)"
for t in {0..10}; do
sudo umount -t cifs -v "${mnt}" 2>&1 | egrep "${match}" &> /dev/null && break
echo ERROR :: umount loop \#$t
done
mkdir -pv "${mnt}"
sudo mount.cifs "${share}" "${mnt}" -o ${opt},${cred} 2>&1
! (( $? )) && task="SUCCESSFUL" || task="FAILED"
echo " SHARE = ${share}"
echo " MOUNT = ${mnt}"
echo " OPTIONS = ${opt}"
echo "CREDENTIALS = ${cred}"
echo " TASK = ${task}"
echo
done < <(cat ${home}/.cifs-${fldr}-shares)
done < <(ls ${home}/.cifs-*-cred)
| true
|
7612bdc687f07da9eed1add19ac61f8e1993eb5e
|
Shell
|
shambalulu/enterprise-installation
|
/setup-worker.sh
|
UTF-8
| 1,285
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
export DEBIAN_FRONTEND=noninteractive
wget -qO- https://get.docker.io/gpg | apt-key add -
echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list
apt-get update
apt-get install -y linux-image-extra-`uname -r` lxc lxc-docker-1.0.0
if [[ $AWS ]]; then
DOCKER_MOUNT_POINT="--graph=/mnt/docker"
fi
# use LXC, and disable inter-container communication
echo 'DOCKER_OPTS="--icc=false --exec-driver=lxc '$DOCKER_MOUNT_POINT'"' >> /etc/default/docker
service docker restart
sleep 2
docker pull quay.io/travisci/te-worker:latest
docker tag quay.io/travisci/te-worker te-worker
# pick the languages you are interested in
langs='android erlang go haskell jvm node-js perl php python ruby'
tag=latest
for lang in $langs; do
docker pull quay.io/travisci/travis-$lang:$tag
docker tag quay.io/travisci/travis-$lang:$tag travis:$lang
done
# run the install script
docker run --rm te-worker cat /usr/local/travis/src/host.sh | bash
te start
# enable memory and swap accounting (optional, but recommended)
sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/' /etc/default/grub
echo 'Installation complete.'
echo 'It is recommended that this host is restarted before running jobs through it'
| true
|
c7df564289a60d0327451535cfd982449fda90a2
|
Shell
|
nobu666/dotfiles
|
/setup.sh
|
UTF-8
| 757
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
for file in $(find . -name '.*' | grep -v "^.$" | xargs basename)
do
ln -fs $(pwd)/${file} ${HOME}/${file}
done
if ! PATH=~/bin:$PATH type -P diff-highlight >/dev/null 2>&1; then
(
for d in /usr/local/share /usr/share; do
if [[ -x $d/git-core/contrib/diff-highlight/diff-highlight ]]; then
[[ -d ~/bin ]] || mkdir ~/bin
ln -sfn "$d/git-core/contrib/diff-highlight/diff-highlight" ~/bin/
exit
fi
done
)
fi
sudo cp p4merge /usr/local/bin/p4merge
sudo chmod +x /usr/local/bin/p4merge
if [ ! -d ~/.vim/bundle ]
then
mkdir -p ~/.vim/bundle
git clone git://github.com/Shougo/neobundle.vim ~/.vim/bundle/neobundle.vim
echo "run: vim -c ':NeoBundleInstall'"
fi
| true
|
7dd6bf134e6c4ad8e17af11f0b2b7a0debb6eac5
|
Shell
|
PengWEI9/Vix
|
/myki-utils/.svn/pristine/7d/7dd6bf134e6c4ad8e17af11f0b2b7a0debb6eac5.svn-base
|
UTF-8
| 1,046
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
#################################################################################
#
# Vix Technology Licensed software
# (C) 2015 All rights reserved
#
#==============================================================================
#
# This is a wrapper around unlocked_transferud.sh; it prevents more than one
# instance of unlocked_transferud.sh from executing at any one time.
#
# It also prevents unlocked_transferud.sh and unlocked_transfer_alarms.sh
# from executing at the same time by coordinating with transfer_alarms.sh.
#
#################################################################################
# NOTE: we must always take the transferud lock first to prevent possible
# deadlock. We will wait for up to 1 hours for transfer alarms to finish
# as by observation no run of transfer alarms should take this long.
/afc/bin/lockrun.sh -f /tmp/.lock-transferud -F /tmp/.lock-transfer_alarms -W 3600 -- /afc/bin/unlocked_transferud.sh "$@"
exit $?
| true
|
91f976b8c5b99b217ea389332052547843c1a729
|
Shell
|
amhuber/logsearch-boshrelease
|
/jobs/curator/templates/bin/pre-start
|
UTF-8
| 1,054
| 2.90625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -eu
# Wait for os-config script to complete
sleep 15
while true; do
if ! ps aux | grep os-config/bin/pre-start | grep -q -v grep; then
break
fi
echo "Waiting for os-config to complete..."
sleep 15
done
apt-get install -y python3-pip
pip install --no-index --find-links /var/vcap/packages/curator/vendor/ /var/vcap/packages/curator/elasticsearch_curator-7.0.0-py3-none-any.whl
<% if p('curator.execute.hourly') %>
# Execute Curator hourly
cp /var/vcap/jobs/curator/bin/run-curator /etc/cron.hourly
<% else %>
# Remove pre-existing hourly script
rm -f /etc/cron.hourly/run-curator
<% end %>
<% if p('curator.execute.daily') %>
# Execute Curator daily
cp /var/vcap/jobs/curator/bin/run-curator /etc/cron.daily
<% else %>
# Remove pre-existing daily script
rm -f /etc/cron.daily/run-curator
<% end %>
<% if p('curator.execute.weekly') %>
# Execute curator weekly
cp /var/vcap/jobs/curator/bin/run-curator /etc/cron.weekly
<% else %>
# Remove pre-existing weekly script
rm -f /etc/cron.weekly/run-curator
<% end %>
| true
|
480f74cc37826e8df464f7045d8eb9acfaf482a4
|
Shell
|
robier/dokman
|
/src/helpers/styleText.sh
|
UTF-8
| 1,257
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
###
# Style given text
#
# @param1 string Message
# @param2... string Style
###
function styleText
{
local message="${1}"
if ! isRichText ; then
echo -en "${message}"
return
fi
# shellcheck disable=SC2034
local STYLE_bold='\033[97;1m'
# shellcheck disable=SC2034
local STYLE_dim='\033[97;2m'
# shellcheck disable=SC2034
local STYLE_underlined='\033[97;4m'
# shellcheck disable=SC2034
local STYLE_blink='\033[97;5m'
# shellcheck disable=SC2034
local STYLE_inverted='\033[97;7m'
# shellcheck disable=SC2034
local STYLE_hidden='\033[97;8m'
# shellcheck disable=SC2034
local RESET_bold='\033[97;21m'
# shellcheck disable=SC2034
local RESET_dim='\033[97;22m'
# shellcheck disable=SC2034
local RESET_underlined='\033[97;24m'
# shellcheck disable=SC2034
local RESET_blink='\033[97;25m'
# shellcheck disable=SC2034
local RESET_reverse='\033[97;27m'
# shellcheck disable=SC2034
local RESET_hidden='\033[97;28m'
local start
local reset
for style in "${@:1}"
do
eval "start+=\${STYLE_${style}"
eval "reset+=\${RESET_${style}"
done
echo -en "${start}${message}${reset}"
}
| true
|
a66b172d5eb3676a1eb0a013963819f0b1e2f213
|
Shell
|
npmitchell/morphsnakes_wrapper
|
/run_morphsnakes_single_growDemo.sh
|
UTF-8
| 2,765
| 2.71875
| 3
|
[] |
no_license
|
# Example with parameters set for CAAX excellent
#
# Construct mesh for each timepoint
datDir="/mnt/crunch/48Ygal4UASCAAXmCherry/201902072000_excellent/Time6views_60sec_1p4um_25x_obis1p5_2/data/deconvolved_16bit/";
cd $datDir
tp=85;
for (( num=51; num<=100; num++ )); do
tpx=$(printf "%03d" $(( tp )));
idx=$(printf "%03d" $(( num )));
prev=$(printf "%03d" $(( num-1 )))
mslsDir="${datDir}msls_output_growEvolveDemo_tp${tpx}/";
# for all iterations, use the selected timepoint's h5 output
initls=${mslsDir}msls_grow000${prev}.h5
python /mnt/data/code/morphsnakes_wrapper/morphsnakes_wrapper/run_morphsnakes.py -i Time_000${tpx}_c1_stab_Probabilities.h5 -init_ls $initls -o $mslsDir -prenu 0 -presmooth 0 -ofn_ply mesh_grow000${idx}.ply -ofn_ls msls_grow000${idx}.h5 -l1 1 -l2 1 -nu 0.0 -postnu 0 -smooth 0.2 -postsmooth 5 -exit 0.00010 -channel 1 -dtype h5 -permute zyxc -ss 4 -include_boundary_faces -center_guess 175,75,100 -rad0 10 -n 2 ;
done
# -postsmooth 1
# -save
# Adjustment: connect to other series by postnu=3 and prenu=-3
for (( num=0; num<=100; num++ )); do tpx=$(printf "%03d" $(( tp ))); idx=$(printf "%03d" $(( num ))); prev=$(printf "%03d" $(( num-1 ))) ; mslsDir="${datDir}msls_output_growEvolveDemo_tp${tpx}/"; initls=${mslsDir}msls_grow000${prev}.h5; python /mnt/data/code/morphsnakes_wrapper/morphsnakes_wrapper/run_morphsnakes.py -i Time_000${tpx}_c1_stab_Probabilities.h5 -init_ls $initls -o $mslsDir -prenu -3 -presmooth 0 -ofn_ply mesh_grow000${idx}.ply -ofn_ls msls_grow000${idx}.h5 -l1 1 -l2 1 -nu 0.0 -postnu 3 -smooth 0.2 -postsmooth 5 -exit 0.00010 -channel 1 -dtype h5 -permute zyxc -ss 4 -include_boundary_faces -center_guess 175,75,100 -rad0 10 -n 2 ; done
# Note: for first timepoint, may need to run the above a few times with for (( num=0; num<=0; num++ )); do...
# Note: to ignore boundary faces, drop the -include_boundary_faces flag in the above command
############################
# Run mlx script to smooth
############################
# mlxprogram for CAAX, LifeAct:
# mlxprogram='surface_rm_resample10k_reconstruct_LS3_25wu.mlx'
mlxprogram='/mnt/data/code/meshlab_codes/laplace_surface_rm_resample30k_reconstruct_LS3_1p2pc_ssfactor4.mlx'
# mlxprogram for Histone data:
# mlxprogram=''
fns=$mslsDir$ofn_ply*'.ply'
for pcfile in $fns; do
# Clean up mesh file for this timepoint using MeshLab -----------------
outputmesh=${pcfile/$ofn_ply/'mesh_apical_'}
if [ ! -f $outputmesh ]; then
echo $outputmesh
command="meshlabserver -i $pcfile -o $outputmesh -s $mlxprogram -om vn"
$command
else
echo "File already exists: "$outputmesh
fi
done
| true
|
8e30089e73b56c3f59858333cfc3c04bcf041169
|
Shell
|
SixTrack/SixDesk
|
/old/sixjobs/compare_scripts
|
UTF-8
| 422
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
for i in `ls`
do
if test "$i" != "backup"
then
echo "======================="
echo "$i"
echo "======================="
# diff $i ~/w27may09/sixjobs/scripts/$i
# diff ~/w10mar09pro/sixjobs/scripts/$i $i
# diff ~tpieloni/w1/sixjobs/scripts/$i $i
# diff $i ~/w3march10/sixjobs/scripts/$i
# diff $i ~giovanno//w3/sixjobs/scripts/$i
diff $i $SIXDESK/sixjobs/scripts/$i
fi
done
| true
|
f491964a14c7c8a2c5167faff93be2998bf8c79c
|
Shell
|
mr6r4y/set-ups
|
/install-base-dev.sh
|
UTF-8
| 505
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
#: Title: install-base-dev.sh
#: Description: Installation of Python, Ruby, build-essential, etc
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
source "$SCRIPT_DIR/lib/common.sh"
source "$SCRIPT_DIR/config.sh"
set -e
sudo apt-get update
sudo apt -y install git git-lfs \
vim \
fonts-dejavu \
curl \
global \
libgconf2-4 # Turtl needs it
sudo apt-get -y install \
build-essential \
cmake \
python-dev \
python-pip \
python3-dev \
python3-pip
| true
|
d90b8f176e10aa3325f03fad76af11316e0bdea8
|
Shell
|
legacy-codedigger/aix-4.1.3
|
/aix-4.1.3/bldenv/bldtools/bldhistorypath.sh
|
UTF-8
| 906
| 2.859375
| 3
|
[] |
no_license
|
#! /bin/ksh
# @(#)47 1.3 src/bldenv/bldtools/bldhistorypath.sh, bldtools, bos412, GOLDA411a 1/21/92 17:57:09
#
# COMPONENT_NAME: (BLDTOOLS) BAI Build Tools
#
# FUNCTIONS: bldhistorypath
#
# ORIGINS: 27
#
# (C) COPYRIGHT International Business Machines Corp. 1991, 1992
# All Rights Reserved
# Licensed Materials - Property of IBM
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
#
#
# NAME: bldhistorypath
#
# FUNCTION: Return the path of the build history directory.
#
# INPUT:
#
# OUTPUT: build history path is written to stdout
#
# SIDE EFFECTS: none
#
# EXECUTION ENVIRONMENT: the build process environment
#
# RETURNS: 0 (successful) or 1 (failure)
#
. bldloginit
rc=$SUCCESS
[[ $# = 0 ]] || log -x +l -c$0 "illegal syntax"
[[ -n "$TOP" ]] || log -x +l -c$0 "TOP undefined"
print $TOP/HISTORY
exit $rc
| true
|
76f288999a2f7445d69a46f61e2c18896b4d1c15
|
Shell
|
AdevintaSpain/Leku
|
/install_codestyle.sh
|
UTF-8
| 819
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Installs Schibsted's IntelliJ configs into your user configs.
echo "Installing Schibsted IntelliJ configs..."
CONFIGS="./config"
for i in $HOME/Library/Preferences/IntelliJIdea* \
$HOME/Library/Preferences/IdeaIC* \
$HOME/Library/Preferences/AndroidStudio* \
$HOME/.IntelliJIdea*/config \
$HOME/.IdeaIC*/config \
$HOME/.AndroidStudio*/config
do
if [[ -d $i ]]; then
# Install codestyles
mkdir -p $i/codestyles
cp -frv "$CONFIGS/checkstyle/Android.xml" $i/codestyles
cp -frv "$CONFIGS/checkstyle/Java.xml" $i/codestyles
fi
done
echo "Done."
echo ""
echo "Restart IntelliJ and/or AndroidStudio, go to preferences, and apply 'SchibstedAndroid' or 'SchibstedJava' scheme as check style preference."
| true
|
bb7abc37fd965879df2b8b64df36da54dd8866e5
|
Shell
|
chischaschos/code-examples
|
/bash-examples/variables.sh
|
UTF-8
| 509
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
BLA=asdfjasdf
BLA2="$BLA . asdf"
BLA3=${BLA}asdf
echo $BLA
echo $BLA2
echo $BLA3
# no spaces between LH and RH
v1="123"
# the spaces on the left will be included
v2="asdf\
123\
456\
asdf"
echo $v1
echo $v2
# or heredocs http://tldp.org/LDP/abs/html/here-docs.html
cat <<-mytabssupressednotspaces
sdf
tabignored
mytabssupressednotspaces
# or storing to a variable, https://ss64.com/bash/read.html
read -r -d '' VAR <<-EOM
This is line 1.
This is line 2.
Line 3.
EOM
echo $VAR
| true
|
144eb14a07bcfd0361c1e8aa88ce2ad5ad08d7db
|
Shell
|
padsof-uam/Inteligencia-Artificial
|
/P1/a.sh
|
UTF-8
| 2,272
| 3.515625
| 4
|
[] |
no_license
|
#Clean
rm Memoria/*
rm Memoria.*
files_lsp=$(ls *.lisp)
echo $files_lsp
for file in $files_lsp ; do
codes=$(awk '/;;%%/{n++}{print > f n}' f=code $file)
functions=$(ls code*)
for fun in $functions; do
name_arg=$(cat $fun | grep defun | awk -v FS="defun " '{print $2}')
name=$(echo $name_arg | cut -d '(' -f 1 | cut -d ' ' -f 1)
echo "\\\begin{aibox}{\\\function}" > Memoria/$file$name.tex
echo ";; $name" >> Memoria/$file$name.tex
echo >> Memoria/$file$name.tex
echo "SYNTAX: $name_arg" >> Memoria/$file$name.tex
echo "\end{aibox}" >> Memoria/$file$name.tex
echo >> Memoria/$file$name.tex
echo "\\\begin{aibox}{\examples}" >> Memoria/$file$name.tex
echo "\\\begin{alltt}" >> Memoria/$file$name.tex
echo "\end{alltt}" >> Memoria/$file$name.tex
echo >> Memoria/$file$name.tex
echo "\end{aibox}" >> Memoria/$file$name.tex
echo >> Memoria/$file$name.tex
echo "\\\begin{aibox}{\\\comments}" >> Memoria/$file$name.tex
echo >> Memoria/$file$name.tex
echo "\end{aibox}" >> Memoria/$file$name.tex
echo "\\\begin{aibox}{\\\answers}" >> Memoria/$file$name.tex
echo >> Memoria/$file$name.tex
echo "\end{aibox}" >> Memoria/$file$name.tex
echo "\\\begin{aibox}{\othercomments}" >> Memoria/$file$name.tex
echo >> Memoria/$file$name.tex
echo "\end{aibox}" >> Memoria/$file$name.tex
echo "\\\begin{aibox}{\pseudocode}" >> Memoria/$file$name.tex
echo >> Memoria/$file$name.tex
echo "\end{aibox}" >> Memoria/$file$name.tex
echo "\\\begin{aibox}{\\\code}" >> Memoria/$file$name.tex
echo >> Memoria/$file$name.tex
#Con verbatim no hace falta.
#sed -i 's/#/\\#/g' $fun
sed -i ':a;N;$!ba;s/\t/ /g' $fun
echo "\\\begin{alltt}" >> Memoria/$file$name.tex
cat $fun >> Memoria/$file$name.tex
echo "\end{alltt}" >> Memoria/$file$name.tex
echo "\end{aibox}" >> Memoria/$file$name.tex
done
done
cat << EOF > Memoria.tex
\\documentclass{aitemplate}
\\usepackage{ai}
\\usepackage{alltt}
\\begin{document}
\\printtitle{1}
EOF
texs=$(ls Memoria/*.tex)
echo $texs
for tex in $texs; do
echo "\input{$tex}" >> Memoria.tex
echo "\\\newpage">>Memoria.tex
done
echo "\end{document}" >> Memoria.tex
echo Source generated.
echo Cleaning auxiliary files
rm code*
echo Generating pdf...
latexmk -pdf -f -silent Memoria.tex
| true
|
9260f7454128dfb37cc6f68119d802a21f751c99
|
Shell
|
nnstreamer/TAOS-CI
|
/ci/taos/plugins-good/pr-prebuild-indent.sh
|
UTF-8
| 3,377
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
##
# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file pr-prebuild-indent.sh
# @brief Check the code formatting style with GNU indent
# @see https://www.gnu.org/software/indent/
# @see https://github.com/nnstreamer/TAOS-CI
# @author Geunsik Lim <geunsik.lim@samsung.com>
#
# @brief [MODULE] ${BOT_NAME}/pr-prebuild-indent
function pr-prebuild-indent(){
echo "########################################################################################"
echo "[MODULE] ${BOT_NAME}/pr-prebuild-indent: Check the code formatting style with GNU indent"
check_cmd_dep indent
# Note that you have to install up-to-date GNU intent package.
INDENTFORMAT=NA
INDENT_COMMAND="indent"
which ${INDENT_COMMAND}
if [[ $? -ne 0 ]]; then
echo "Error: ${INDENT_COMMAND} is not available."
echo " Please install ${INDENT_COMMAND}."
exit 1
fi
# One way to make sure you are following our coding style is to run your code
# (remember, only the *.c files, not the headers) through GNU Indent
FILES_IN_COMPILER=$(find $SRC_PATH/ -iname '*.cpp' -o -iname '*.c')
FILES_TO_BE_TESTED=$(git ls-files $FILES_IN_COMPILER)
echo "[DEBUG] ${BOT_NAME}/pr-prebuild-indent: run"
# ${INDENT_COMMAND} -i $FILES_TO_BE_TESTED
indent \
--braces-on-if-line \
--case-brace-indentation0 \
--case-indentation2 \
--braces-after-struct-decl-line \
--line-length80 \
--no-tabs \
--cuddle-else \
--dont-line-up-parentheses \
--continuation-indentation4 \
--honour-newlines \
--tab-size8 \
--indent-level2 \
$FILES_TO_BE_TESTED
indent_format_file="indent-format.patch"
git diff > ../report/${indent_format_file}
PATCHFILE_SIZE=$(stat -c%s ../report/${indent_format_file})
if [[ $PATCHFILE_SIZE -ne 0 ]]; then
echo "[DEBUG] GNU indent is failed. Update your code to follow convention after reading ${indent_format_file}."
check_result="failure"
global_check_result="failure"
else
check_result="success"
fi
if [[ $check_result == "success" ]]; then
echo "[DEBUG] Passed. A indent formatting style."
message="Successfully, The commits are passed."
cibot_report $TOKEN "success" "${BOT_NAME}/pr-prebuild-indent" "$message" "${CISERVER}${PRJ_REPO_UPSTREAM_LOCAL}/ci/${dir_commit}/" "$GITHUB_WEBHOOK_API/statuses/$input_commit"
else
echo "[DEBUG] Failed. A indent formatting style."
message="Oooops. The component you are submitting with incorrect indent-format style."
cibot_report $TOKEN "failure" "${BOT_NAME}/pr-prebuild-indent" "$message" "${CISERVER}${PRJ_REPO_UPSTREAM_LOCAL}/ci/${dir_commit}/" "$GITHUB_WEBHOOK_API/statuses/$input_commit"
fi
}
| true
|
674604e0ce5fcdec80e9fa99f77f138848ae7e7c
|
Shell
|
outeredge/dredger
|
/dredger
|
UTF-8
| 2,266
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Check if Dredger is running on Eclipse Che
if [ -n "$CHE_PROJECTS_ROOT" ]
then
if tty -s
then
fgRed=$(tput setaf 1)
fgGreen=$(tput setaf 2)
fgNormal=$(tput sgr0)
fgBold=$(tput bold)
fi
hash jq 2>/dev/null || { echo -e >&2 "${fgRed}I require jq when running in Eclipse Che but it's not installed. Aborting!${fgNormal}"; exit 1; }
DREDGER_CHE_MOUNT=$(docker inspect --format '{{ range .Mounts }}{{ if eq .Destination "'"$CHE_PROJECTS_ROOT"'" }}{{ .Source }}{{ end }}{{ end }}' $HOSTNAME 2>/dev/null) || {
echo -e >&2 "${fgRed}Unable to determine Eclipse Che host mount point. Aborting!${fgNormal}";
exit 1;
}
DREDGER_CHE_INFO=$(curl -s $CHE_API/workspace/$CHE_WORKSPACE_ID?token=$CHE_MACHINE_TOKEN) || {
echo -e >&2 "${fgRed}Unable to connect to Eclipse Che API. Aborting!${fgNormal}";
exit 1;
}
if [[ ${PWD} != *"$CHE_PROJECTS_ROOT"* ]]
then
cd $CHE_PROJECTS_ROOT/*/;
fi
DREDGER_CHE_PROJECT=$(echo ${PWD#$CHE_PROJECTS_ROOT} | cut -d '/' -f 2)
if [ -z "$DREDGER_CHE_PROJECT" ]
then
DREDGER_CHE_PROJECT=$(echo $DREDGER_CHE_INFO | jq -re '.config .projects | first | .name | select (.!=null)') || {
echo -e >&2 "${fgRed}Could not detect Eclipse Che project name. Aborting!${fgNormal}";
exit 1;
}
fi
DREDGER_CHE_WORKSPACE=$(echo $DREDGER_CHE_INFO | jq -re '.config .name | select (.!=null)' || basename $DREDGER_MOUNT)
DREDGER_CHE_HOST=$(echo $DREDGER_CHE_INFO | jq -re 'first(..|.dredgerHost? | select(.!=null))')
host_ips=$(hostname -I)
# Set Dredger defaults
export DREDGER_NAME=$DREDGER_CHE_PROJECT.$DREDGER_CHE_WORKSPACE${DREDGER_CHE_HOST:+.$DREDGER_CHE_HOST}
export DREDGER_HOST=$DREDGER_CHE_PROJECT.$DREDGER_CHE_WORKSPACE.${DREDGER_CHE_HOST:-localhost}
export DREDGER_HOST_IP=${host_ips%% *}
export DREDGER_MOUNT=$DREDGER_CHE_MOUNT/$DREDGER_CHE_PROJECT
export DREDGER_PWD=$CHE_PROJECTS_ROOT/$DREDGER_CHE_PROJECT
echo -e "Detected Eclipse Che project ${fgBold}$DREDGER_CHE_WORKSPACE/$DREDGER_CHE_PROJECT${fgNormal} with host ${fgGreen}http://$DREDGER_HOST${fgNormal}"
fi
exec make --silent ${CHE_PROJECTS_ROOT:+"--include-dir=$CHE_PROJECTS_ROOT/$DREDGER_CHE_PROJECT"} -f /usr/local/dredger/Makefile ARGS="${*:2}" "$1"
| true
|
8595e2950c7941ab55ec70924994352950c94ff5
|
Shell
|
0x000NULL/S-GUI
|
/Linux/IGNOREMEEEEE/include/profiles
|
UTF-8
| 5,577
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/sh
#################################################################################
#
# hardeningone
# ------------------
#
# Copyleft GPL3 2010-2011, Mauro Risonho de Paula Assumpcao (mauro.risonho@gmail.com),
# Web site:
#
# hardeningone comes with ABSOLUTELY NO WARRANTY. This is free software, and you are
# welcome to redistribute it under the terms of the GNU General Public License.
# See LICENSE file for usage of this software.
#
#################################################################################
#
# Read profile/template
#
#################################################################################
#
#YYY Enable check when profile files are complete and completely documented
# Check if default profile is used
if [ "${PROFILE}" = "defaultXXX.prf" ]; then
# echo ""
# echo " ==============================================================================="
# echo " ${WARNING}Warning${NORMAL}: ${WHITE}Default profile wstá sendo usado.${NORMAL}"
# echo " O profile padrao contem uma pequena quantidade de opcões e configuracões."
# echo " Consulte a documentacao para criar um profile customizado."
# echo ""
# echo " [ ${WHITE}Press [ENTER] para continuar com o profile padrao ou [CTRL] + C para parar${NORMAL} ]"
# echo " ==============================================================================="
wait_for_keypress
fi
#
#################################################################################
#
Display --indent 2 --text "" #"- Verificar arquivo de perfil (${PROFILE})..."
logtext "Lendo Perfil/Configuracao ${PROFILE}"
FIND=`cat ${PROFILE} | grep '^config:' | sed 's/ /!space!/g'`
for I in ${FIND}; do
OPTION=`echo ${I} | cut -d ':' -f2`
VALUE=`echo ${I} | cut -d ':' -f3 | sed 's/!space!/ /g'`
logtext "Opcao profile setada: ${OPTION} (com o valor ${VALUE})"
case ${OPTION} in
# Maximum number of WAITing connections
connections_max_wait_state)
OPTIONS_CONN_MAX_WAIT_STATE="${VALUE}"
;;
# Do not check security repository in sources.list (Debian/Ubuntu)
debian_skip_security_repository)
OPTION_DEBIAN_SKIP_SECURITY_REPOSITORY="${VALUE}"
;;
# Skip FreeBSD port audit
freebsd_skip_portaudit)
logtext "Opcao setada: Saindo FreeBSD portaudit"
OPTION_FREEBSD_SKIP_PORTAUDIT="${VALUE}"
;;
# Do (not) log tests if they have an different operating system
log_tests_incorrect_os)
logtext "Opcao setada: No logging for incorrect OS"
if [ "${VALUE}" = "no" ]; then LOG_INCORRECT_OS=0; else LOG_INCORRECT_OS=1; fi
;;
# What type of machine we are scanning (eg. desktop, server, server with storage)
machine_role)
MACHINE_ROLE="${VALUE}"
;;
# Define if any found NTP daemon instance is configured as a server or client
ntpd_role)
NTPD_ROLE="${VALUE}"
;;
# How much seconds to wait between tests
pause_between_tests)
TEST_PAUSE_TIME="${VALUE}"
;;
# Profile name
profile_name)
# YYY dummy
;;
# Tests to always skip (useful for false positives or problematic tests)
test_skip_always)
TEST_SKIP_ALWAYS="${VALUE}"
logtext "Testes que nao foram realizados: ${VALUE}"
;;
# Do not check the latest version on the internet
skip_upgrade_test)
if [ "${VALUE}" = "yes" -o "${VALUE}" = "YES" ]; then SKIP_UPGRADE_TEST=1; else SKIP_UPGRADE_TEST=0; fi
;;
# Define what kind of scan we are performing
test_scan_mode)
if [ "${VALUE}" = "light" ]; then SCAN_TEST_LIGHT="YES"; SCAN_TEST_MEDIUM="NO"; SCAN_TEST_HEAVY="NO"; fi
if [ "${VALUE}" = "normal" ]; then SCAN_TEST_LIGHT="YES"; SCAN_TEST_MEDIUM="YES"; SCAN_TEST_HEAVY="NO"; fi
if [ "${VALUE}" = "full" ]; then SCAN_TEST_LIGHT="YES"; SCAN_TEST_MEDIUM="YES"; SCAN_TEST_HEAVY="YES"; fi
;;
# Catch all bad options and bail out
*)
logtext "Unknown option ${OPTION} (with value: ${VALUE})"
echo "Fatal error: found errors in profile"
echo "Unknown option ${OPTION} found (with value: ${VALUE})"
RemovePIDFile
exit 1
;;
esac
done
#
#################################################################################
#
# Plugins
#
#################################################################################
#
FIND=`cat ${PROFILE} | grep '^plugin_enable=' | sed 's/ /!space!/g'`
for I in ${FIND}; do
PLUGIN=`echo ${I} | cut -d '=' -f2`
if [ -f "${PLUGINDIR}/${PLUGIN}" ]; then
logtext "Plugin Encontrado: ${PLUGIN}"
# XXX - enable plugin
else
logtext "Plugin Nao encontrado: ${PLUGIN} (${PLUGINDIR}/${PLUGIN})"
fi
done
#
#################################################################################
#
# Set default values (only if not configured in profile)
if [ "${MACHINE_ROLE}" = "" ]; then
MACHINE_ROLE="server"
logtext "Set option to default value: MACHINE_ROLE --> ${MACHINE_ROLE}"
fi
if [ "${NTPD_ROLE}" = "" ]; then
NTPD_ROLE="client"
logtext "Set option to default value: NTPD_ROLE --> ${NTPD_ROLE}"
fi
#
#################################################################################
#
logtextbreak
#================================================================================
# hardeningone - Copyleft GPL3 2010-2011, Mauro Risonho de Paula Assumpcao - -
| true
|
afeb8078318b5d916ae0477424b7c2d97551fd9d
|
Shell
|
c-hydro/fp-hyde
|
/bin/utils/hyde_tools_cleaner_datasets_deprecated.sh
|
UTF-8
| 5,838
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#-----------------------------------------------------------------------------------------
# Script information
script_name="HYDE UTILS - CLEANER DATASETS DEPRECATED - REALTIME"
script_version="1.0.0"
script_date="2021/02/22"
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
# Get script information
script_file="hyde_tools_cleaner_datasets_deprecated.sh"
# Get time information (-u to get gmt time)
time_script_now=$(date -u +"%Y-%m-%d 00:00")
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
# Folder of remote and local machine(s)
group_datasets_name=(
"ARCHIVE - STATE"
"ARCHIVE - NWP ECMWF0100"
"ARCHIVE - NWP LAMI-2I"
"ARCHIVE - RADAR MCM"
"ARCHIVE - RFARM ECMWF0100"
"ARCHIVE - RFARM EXPERT FORECAST"
"ARCHIVE - RFARM LAMI-2I"
"ARCHIVE - WEATHER STATIONS"
"DATA - TMP"
"DATA - DATA DYNAMIC - ANCILLARY"
"DATA - DATA DYNAMIC - OUTCOME - EXPERT FORECAST"
"DATA - DATA DYNAMIC - OUTCOME - NWP"
"DATA - DATA DYNAMIC - OUTCOME - OBS"
"DATA - DATA DYNAMIC - OUTCOME - RFARM"
"LOG"
"LOCK"
)
group_folder_datasets=(
"/hydro/archive/model_dset_restart/"
"/hydro/archive/nwp_ecmwf0100_realtime/"
"/hydro/archive/nwp_lami-2i_realtime/"
"/hydro/archive/radar_mcm_realtime/"
"/hydro/archive/rfarm_ecmwf0100_realtime/"
"/hydro/archive/rfarm_expert_forecast_realtime/"
"/hydro/archive/rfarm_lami-2i_realtime/"
"/hydro/archive/weather_stations_realtime/"
"/hydro/data/tmp/"
"/hydro/data/data_dynamic/ancillary/"
"/hydro/data/data_dynamic/outcome/expert_forecast/"
"/hydro/data/data_dynamic/outcome/nwp/"
"/hydro/data/data_dynamic/outcome/obs/"
"/hydro/data/data_dynamic/outcome/rfarm/"
"/hydro/log/"
"/hydro/lock/"
)
group_file_datasets_clean=(
true
true
true
true
true
true
true
true
true
true
true
true
true
true
true
true
)
group_file_datasets_elapsed_days=(
30
15
15
15
15
15
15
30
2
5
15
15
30
15
5
5
)
#-----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# Info script start
echo " ==================================================================================="
echo " ==> "$script_name" (Version: "$script_version" Release_Date: "$script_date")"
echo " ==> START ..."
echo " ===> EXECUTION ..."
time_script_now=$(date -d "$time_script_now" +'%Y-%m-%d 00:00')
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# Iterate over tags
for datasets_id in "${!group_datasets_name[@]}"; do
# ----------------------------------------------------------------------------------------
# Get values of tag(s) and folder(s)
datasets_name=${group_datasets_name[datasets_id]}
folder_datasets=${group_folder_datasets[datasets_id]}
file_datasets_clean=${group_file_datasets_clean[datasets_id]}
file_datasets_elapsed_days=${group_file_datasets_elapsed_days[datasets_id]}
# Info datasets type start
echo " ====> DATASETS TYPE ${datasets_name} ... "
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# Check sync activation
if ${file_datasets_clean} ; then
# ----------------------------------------------------------------------------------------
# Iterate over filename
for file_datasets_name in $(find ${folder_datasets} -type f -mtime +${file_datasets_elapsed_days}); do
echo " ====> DELETE FILENAME ${file_datasets_name} ... "
if [ -f "$file_datasets_name" ] ; then
rm "$file_datasets_name"
echo " ====> DELETE FILENAME ${file_datasets_name} ... DONE"
else
echo " ====> DELETE FILENAME ${file_datasets_name} ... FAILED. FILE NOT FOUND"
fi
done
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# Find empty folders
for folder_empty_name in $(find ${folder_datasets} -type d -empty); do
echo " ====> DELETE EMPTY FOLDER ${folder_empty_name} ... "
if [ -d "$folder_empty_name" ] ; then
rmdir ${folder_empty_name} -vp --ignore-fail-on-non-empty {}
echo " ====> DELETE EMPTY FOLDER ${file_datasets_name} ... DONE"
else
echo " ====> DELETE EMPTY FOLDER ${file_datasets_name} ... FAILED. FOLDER NOT FOUND"
fi
done
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# Info datasets type end
echo " ====> DATASETS TYPE ${datasets_name} ... DONE"
# ----------------------------------------------------------------------------------------
else
# ----------------------------------------------------------------------------------------
# Info tag end (not activated)
echo " ====> DATASETS TYPE ${datasets_name} ... SKIPPED. SYNC NOT ACTIVATED"
# ----------------------------------------------------------------------------------------
fi
# ----------------------------------------------------------------------------------------
done
# Info script end
echo " ==> ... END"
echo " ==> Bye, Bye"
echo " ==================================================================================="
# ----------------------------------------------------------------------------------------
| true
|
e73a15404e6bd4b0fde5a38e722cda58eae5da89
|
Shell
|
jjhazard/projects
|
/Operating Systems and Subprocesses/Prog03_jh/modder.sh
|
UTF-8
| 1,182
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
if ! [ "$#" -eq "2" ]
then
echo "$#"
echo "Incorrect number of args."
echo "Correct usage: script1.sh (image directory) (output folder)"
exit 1
fi
if ! [ -d "$1" ]
then
echo "First argument is not a directory."
exit 1
fi
if ! [ -d "$2" ]
then
echo "Second argument is not a directory."
exit 1
fi
var=$(find $1 -maxdepth 1 -name \*.tga )
if [ -z "$var" ]; then
echo "Directory has no .tga image files."
exit 1
fi
SAVE=$IFS
IFS=$'\n'
var=($var)
IFS=$SAVE
cd Code
gcc dimensions.c image_IO_TGA.c -o dimensions
gcc crop.c image_IO_TGA.c -o crop
gcc split.c image_IO_TGA.c -o split
gcc rotate.c image_IO_TGA.c -o rotate
cd ..
for file in ${var[@]}; do
height=$(./Code/dimensions -h $file)
width=$(./Code/dimensions -w $file)
re='^[0-9]+$'
if [[ $width =~ $re ]] && [[ $height =~ $re ]]
then
hheight=$((height/2))
hwidth=$((width/2))
echo $height $hheight $width $hwidth
./Code/crop $file $2 0 0 $hheight $hwidth
./Code/crop $file $2 $hheight 0 $hheight $hwidth
./Code/crop $file $2 0 $hwidth $hheight $hwidth
./Code/crop $file $2 $hheight $hwidth $hheight $hwidth
./Code/split $file $2
./Code/rotate l $file $2
else
echo $height
fi
done
| true
|
ec1f352aa1a9075f35f14b7a1ad1d1e3466cffc6
|
Shell
|
dmlc/dgl
|
/script/build_doc.sh
|
UTF-8
| 1,541
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
usage() {
cat << EOF
usage: bash $0 OPTIONS
examples:
Build doc with PyTorch-backend: bash $0 -p
Build doc with MXNet-backend: bash $0 -m
Build doc with TensorFlow-backend: bash $0 -t
Build incrementally with PyTorch-backend: bash $0
Remove all outputs and restart a PyTorch build: bash $0 -p -r
Build DGL documentation. By default, build incrementally on top of the current state.
OPTIONS:
-h Show this message.
-p Build doc with PyTorch backend.
-m Build doc with MXNet backend.
-t Build doc with TensorFlow backend.
-r Remove all outputs.
EOF
}
backend="pytorch"
# Parse flags.
while getopts "hpmtr" flag; do
if [[ ${flag} == "p" ]]; then
backend="pytorch"
elif [[ ${flag} == "m" ]]; then
backend="mxnet"
elif [[ ${flag} == "t" ]]; then
backend="tensorflow"
elif [[ ${flag} == "r" ]]; then
remove="YES"
elif [[ ${flag} == "h" ]]; then
usage
exit 0
else
usage
exit 1
fi
done
if [[ -z ${DGL_HOME} ]]; then
echo "ERROR: Please make sure environment variable DGL_HOME is set correctly."
exit 1
fi
if [[ ! ${PWD} == ${DGL_HOME} ]]; then
echo "ERROR: This script only works properly from DGL root directory."
echo " Current: ${PWD}"
echo "DGL_HOME: ${DGL_HOME}"
exit 1
fi
cd ${DGL_HOME}/docs
if [[ ${remove} == "YES" ]]; then
bash clean.sh
fi
export DGLBACKEND=$backend
export DGL_LIBRARY_PATH=${DGL_HOME}/build
export PYTHONPATH=${DGL_HOME}/python:$PYTHONPATH
make $backend
exit 0
| true
|
86e188a3b5da2dd26c72e2591d659da60d6bebe6
|
Shell
|
luyGithub/hello-world-1
|
/icepack_luy/cesm_meltpond_pe/run_real/automatic_2d_parameter_estimate_1p_faster
|
UTF-8
| 1,966
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh -f
clear
echo '!_____________start_______________!'
echo "the_dir_is:____ $PWD"
echo "run started : `date`"
for lon in $(seq 220 220)
do
for lat in $(seq 85 85)
do
echo "-------------- lon= $lon, lat= $lat -------------- "
#preparing modis observation
echo 'modifling matlab script ...'
sed -i "193c lon_goal = $lon; lat_goal = $lat ;" ./MODIS_squeeze_operational.m
dos2unix ./MODIS_squeeze_operational.m
echo 'running matlab ...'
nohup matlab -nojvm -nodisplay -nosplash -logfile log.matlab <MODIS_squeeze_operational.m
modis_file="modis_2005_"$lon"_"$lat"_08day.txt"
echo "modis_file= $modis_file"
# preparing jra forcing
jra_file="jra55_2005_"$lon"_"$lat"_01hr.txt"
if [ -e "../../../Icepack_data/forcing/CFS/$jra_file" ] ; then
echo "exist"
# run producing intermediate forcing
cd ../../producing_intermediate_forcing/run_jra
# change the so-called 'in file'
sed -i "96c atm_data_file = '$jra_file' " ./icepack_in
./automatic_formalrun
# run parameter estimation
# change fortran code
cd ../../cesm_meltpond_pe/run_real
echo 'modifling fortran code ...'
sed -i "192c print *, 'Reading ','$modis_file' " ../build_real/driver/icedrv_MAIN.F90
sed -i "194c open (12, file='$modis_file')" ../build_real/driver/icedrv_MAIN.F90
dos2unix ../build_real/driver/icedrv_MAIN.F90
# rebuild
cd ../build_real ; ./build_driver2_fast.sh ;
# change run congfiguration
cd ../run_real ;
sed -i "96c atm_data_file = '$jra_file' " ./icepack_in
# run
./automatic_formalrun
bash automatic_query_runlog_data_send ; echo "X" ; cat X.txt; cat J.txt ; cat GNORM.txt
else
echo "not exist"
fi
done
done
echo "run finished : `date`"
echo '!_____________finish_______________!'
| true
|
c44f241fb8c7b6c94d69f48b211b06e5ae3e849d
|
Shell
|
apfadler/quil-src
|
/dist/bin/quil-client.sh
|
UTF-8
| 1,783
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ -z "$QUIL_HOME" ]; then
echo "Defaulting QUIL_HOME to" `pwd`
export QUIL_HOME=`pwd`
fi
if [ -z "$QUIL_SERVER" ]; then
echo "Defaulting QUIL_SERVER to localhost"
export QUIL_SERVER=localhost
fi
if [ -z "$QUIL_PORT" ]; then
echo "Defaulting QUIL_SERVER to 8081"
export QUIL_PORT=8081
fi
if [ $1 == "documentcache" ]; then
if [ $3 != "get" ]; then
if [ $3 != "put" ]; then
curl -s -X POST --data-binary @$4 -H "Content-Type: text/plain" http://$QUIL_SERVER:$QUIL_PORT/api/$1/$2/$3
else
curl -s -X POST --data-binary @$5 -H "Content-Type: text/plain" http://$QUIL_SERVER:$QUIL_PORT/api/$1/$2/$3/$4
fi
else
curl -s -X GET -H "Content-Type: text/plain" http://$QUIL_SERVER:$QUIL_PORT/api/$1/$2/$3/$4
fi
fi
if [ $1 == "simplecache" ]; then
if [ $3 != "get" ]; then
if [ $3 != "put" ]; then
curl -s -X POST --data-binary @$4 -H "Content-Type: text/plain" http://$QUIL_SERVER:$QUIL_PORT/api/$1/$2/$3
else
curl -s -X POST --data-binary @$5 -H "Content-Type: text/plain" http://$QUIL_SERVER:$QUIL_PORT/api/$1/$2/$3/$4
fi
else
curl -s -X GET -H "Content-Type: text/plain" http://$QUIL_SERVER:$QUIL_PORT/api/$1/$2/$3/$4
fi
fi
if [ $1 == "compute" ]; then
if [ $2 == "tasks" ]; then
if [ -z "$3" ]; then
curl -s -X GET -H "Content-Type: application/json" http://$QUIL_SERVER:$QUIL_PORT/api/compute/tasks
else
if [ -z "$4" ]; then
curl -s -X GET -H "Content-Type: application/json" http://$QUIL_SERVER:$QUIL_PORT/api/compute/tasks/$3
else
curl -s -X GET -H "Content-Type: application/json" http://$QUIL_SERVER:$QUIL_PORT/api/compute/tasks/$3/$4
fi
fi
else
curl -s -X POST --data-binary @$4 -H "Content-Type: application/json" http://$QUIL_SERVER:$QUIL_PORT/api/$1/$2/$3
fi
fi
| true
|
b4861d151b62c852962986a9efb2324582e89fa7
|
Shell
|
davidnussio/meraki-root-cause
|
/deploy/docker-entrypoint.sh
|
UTF-8
| 159
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
echo "$@"
if [ "$1" = 'frontend' ] || [ "$1" = 'backend' ]; then
cd $1; shift
echo "$@"
exec /usr/local/bin/yarn "$@"
fi
exec "$@"
| true
|
062fd1ad36c3e0231095a25115ce01094525ee98
|
Shell
|
Guirec/dotfiles
|
/shell/bash_aliases
|
UTF-8
| 3,085
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# Easier navigation
# ====================================================
alias ..="cd ../" # Go back 1 directory level
alias ...="cd ../../" # Go back 2 directory levels
alias .3="cd ../../../" # Go back 3 directory levels
alias .4="cd ../../../../" # Go back 4 directory levels
alias .5="cd ../../../../../" # Go back 5 directory levels
alias .6="cd ../../../../../../" # Go back 6 directory levels
alias ~="cd ~" # ~: Go Home
alias o="open ." # o: Open in Finder
alias c="clear" # c: Clear terminal display
alias list="ls -lAh" # list: detailed files and folders list
# Easier manipulations
# ====================================================
alias mkdir="mkdir -p"
# Directories
# ====================================================
alias dl="cd ~/Downloads/"
alias desk="cd ~/Desktop/"
alias docs="cd ~/Documents/"
alias proj="cd ~/Projects/"
# Development
# ====================================================
# Make
alias m="make"
# Docker
alias d="docker"
# Git
alias lg="lazygit"
alias gitclean="git remote prune origin"
alias gitcleanlocal="git branch --merged | grep -v '\*' | grep -v 'master' | grep -v 'develop' | xargs -n 1 git branch -d"
# npm
alias npmlistg="npm list -g --depth=0"
alias npmr="npm run"
alias npmplease="rm -rf node_modules/ && rm -f package-lock.json && npm install"
# Finder
# ====================================================
# Show hidden files
alias showfiles="defaults write com.apple.finder AppleShowAllFiles -bool true && killall Finder"
# Hide hidden files
alias hidefiles="defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder"
# Recursively delete `.DS_Store` files from the current dir
alias dscleanup="find . -type f -name '*.DS_Store' -ls -delete"
# Clean up LaunchServices to remove duplicates in the “Open With” menu
alias lscleanup="/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister -kill -r -domain local -domain system -domain user;killall Finder;echo 'Open With has been rebuilt, Finder will relaunch'"
# Clean Open With context menu
alias fixow="/System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/LaunchServices.framework/Versions/A/Support/lsregister -kill -r -domain local -domain user;killall Finder;echo 'Open With has been rebuilt, Finder will relaunch'"
# System
# ====================================================
# Get macOS Software Updates, update Homebrew and their installed packages
alias update="sudo softwareupdate -i -a; brew update; brew upgrade; brew cleanup"
# Empty the Trash on all mounted volumes and the main HDD.
# Also, clear Apple’s System Logs to improve shell startup speed.
# Finally, clear download history from quarantine.
alias emptytrash="sudo rm -rfv /Volumes/*/.Trashes; sudo rm -rfv ~/.Trash; sudo rm -rfv /private/var/log/asl/*.asl; sqlite3 ~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV* 'delete from LSQuarantineEvent'"
| true
|
04351694fda2ed424faee3f60fb84925913ad950
|
Shell
|
freehuoshan/script_store
|
/gitbook_script/auto_push_ghpages.sh
|
UTF-8
| 261
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
# 该脚本为自动提交gh-pages分支
echo "复制静态网页到gh-pages路径下"
cp -rf _book/* gh-pages/
echo "复制成功"
cd gh-pages
git add ./*
git commit -m "提交静态网页"
git push origin gh-pages
echo "推送成功........."
| true
|
aa2b9c41435a118c036eaaf6a03a2c31dd01cd3d
|
Shell
|
Galunid/dotfiles-1
|
/scripts/entertainment-timer/entertainment-timer
|
UTF-8
| 919
| 4
| 4
|
[] |
no_license
|
#! /bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
if [ "$1" = "--help" ] || [ "$1" = "help" ] || [ -z "$1" ]; then
echo "$ entertainment-timer"
echo "CLI for the entertainment-timer-daemon"
echo ""
echo "Parameters:"
echo " \$1: command"
echo ""
echo "Commands:"
echo " kill: kill the daemon and all subprocesses"
echo " show: show current timer"
echo ""
echo ""
echo ""
echo "Example:"
echo "$ entertainment-timer kill"
exit
fi
CMD="$1"
parseResponse() {
RES=$(cat $DIR/command-response-fifo) && {
case $RES in
404)
echo "Command not found."
echo "Try --help for available commands."
;;
*)
echo $RES
esac
}
}
parseResponse &
killCat() {
# kill cat waiting for response
kill $(ps | grep cat | cut -d\ -f1) 2> /dev/null
exit
}
trap "killCat" 2
echo "$1" > $DIR/command-fifo
killCat
| true
|
ffe043f5a5b5d29ac287aeae8a1bde5c0a7f58ac
|
Shell
|
milksteak-project/steaks
|
/packages/tiv/tiv.sh
|
UTF-8
| 1,018
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
TMPDIR="$HOME/usr/tmp"
PACKAGE="TerminalImageViewer"
VERSION="master"
SOURCE="${PACKAGE}-${VERSION}"
ZIP="${SOURCE}.zip"
LINK="https://github.com/stefanhaustein/${PACKAGE}/archive/${VERSION}.zip"
DEPS="imagemagick"
# -- Install dependencies
function install_dependencies() {
brew install $DEPS
}
echo -e ">>>>> Installing dependencies..."
install_dependencies &> /dev/null
# -- Fetch source
function fetch_package() {
# -- Fetch source ZIP
test -e $ZIP || wget -O $TMPDIR/$ZIP $LINK -q --show-progress
# -- Unpack ZIP
cd $TMPDIR ; unzip -o $ZIP | pv -l >/dev/null
}
echo -e ">>>>> Fetching sources..."
fetch_package &> /dev/null
# -- Install package
function install_package() {
cd $TMPDIR/$SOURCE/src/main/cpp
make
mv tiv $HOME/usr/bin/tiv
}
echo -e ">>>>> Installing package..."
install_package &> /dev/null
# -- Cleanup
function cleanup() {
cd $TMPDIR
rm $ZIP
rm -rf $SOURCE
}
echo -e ">>>>> Cleaning up..."
cleanup &> /dev/null
echo -e "tiv has been successfully installed!"
| true
|
314ecdcd023dbd1c10c8f02850f231f099d8d928
|
Shell
|
portsbuild/portsbuild
|
/directadmin/scripts/custom/unblock_ip.sh
|
UTF-8
| 680
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#VERSION=2.2
#
# Source: http://help.directadmin.com/item.php?id=380
BF=/root/blocked_ips.txt
OS=$(uname)
BLOCK_CHAIN=blocked_ips
if [ "$ip" = "" ]; then
printf "Usage:\n"
echo " $0 1.2.3.4"
exit 1
fi
if [ ! -e "$BF" ]; then
printf "Cannot find %s to unblock the IP.\n" "$BF"
exit 2
fi
COUNT=$(grep -c "^$ip=" $BF)
if [ "$COUNT" -eq 0 ]; then
printf "%s was not in %s. Not unblocking.\n" "$1" "$BF"
exit 2
fi
## Unblock
printf "Unblocking %s\n" "$IP"
grep -v "^$ip=" $BF > $BF.temp
mv $BF.temp $BF
chmod 600 $BF
if [ "$OS" = "FreeBSD" ]; then
/sbin/ipfw -q table 10 delete $ip
else
/sbin/iptables -D ${BLOCK_CHAIN} -s $ip -j DROP
fi
exit 0
| true
|
75f660472a57109c15f5dadc0a7659911d3468fc
|
Shell
|
WilliamGallifrey/PRGrepo
|
/bloque6/6b/2sis.sh
|
UTF-8
| 244
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#Ejercicio 2
total=$( cat precipitaciones.txt | wc -l )
for i in $(awk -F " " '{print $2}' precipitaciones.txt); do
suma=$(expr $suma + $i)
done
res=$(expr "$suma / $total" | bc -l)
echo "La media de precipitaciones es $res"
| true
|
de7e71dce1dbc36393a1d1fb76ceadc900a698d5
|
Shell
|
ipwnosx/iDrive-Linux-Backup
|
/Idrivelib/cronsetup/debian/btw-6.00_8.50/idrivecron
|
UTF-8
| 1,315
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
# Start/stop the __APP__cron daemon.
#
# @IDrive Inc
# Created By : Sabin Cheruvattil
### BEGIN INIT INFO
# Provides: __APP__cron
# Required-Start: $remote_fs $syslog $time
# Required-Stop: $remote_fs $syslog $time
# Should-Start: $network $named slapd autofs ypbind nscd nslcd winbind
# Should-Stop: $network $named slapd autofs ypbind nscd nslcd winbind
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Regular background program processing daemon
# Description: __APP__cron is a perl script which runs sheduled tasks in __APP__ scripts.
### END INIT INFO
PIDFILE=/var/run/__APP__crond.pid
DAEMON="perl __LAUNCHPATH__"
. /lib/lsb/init-functions
get_pid() {
cat "$PIDFILE"
}
is_running() {
[ -f "$PIDFILE" ] && ps -p `get_pid` > /dev/null 2>&1
}
case "$1" in
start) log_daemon_msg "Starting" "__APP__cron"
echo ""
$DAEMON 1>/dev/null 2>/dev/null &
echo $! > "$PIDFILE"
;;
stop) log_daemon_msg "Stopping" "__APP__cron"
echo ""
kill `get_pid`
;;
restart) log_daemon_msg "Restarting" "__APP__cron"
echo ""
$0 stop
$0 start
;;
status)
if is_running; then
echo "Running"
echo "PID: "`get_pid`
else
echo "Stopped"
exit 1
fi
;;
*) log_action_msg "Usage: /etc/init.d/__APP__cron {start|stop|restart|status}"
echo ""
exit 2
;;
esac
exit 0
| true
|
0a57595763f60ee3b58d557d41b26cc80b9c3406
|
Shell
|
vtostaky/mssis-tp
|
/grands_reseaux/tp1/test4.sh
|
UTF-8
| 191
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
awk '
BEGIN{
}{
if($1=="interface"){
interface = $0;
}
if($1 == "ip" && $2 == "address" && $3~/192[.]/){
print interface, $0;
}
}END{
}' conf1.txt
| true
|
ecde5739e5f846bc85174393764366856bfe7075
|
Shell
|
Claude-Ray/dotfiles
|
/.config/yadm/bootstrap
|
UTF-8
| 4,607
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
pacman_packages=(
git
clash
zsh
tmux
lsd
ripgrep
skim
bat
diskus
htop
bashtop
cmake
libvterm
qutebrowser
# nvidia
# nvidia-utils
xorg
xorg-server
xorg-xinit
xclip
wqy-microhei
ttf-dejavu
fcitx5-rime
fcitx5-configtool
fcitx5-qt
rime-double-pinyin
tree
zip
unzip
mu
wget
rsync
dnsutils
gnutls
inetutils
net-tools
openssl
)
install_linux_packages() {
if [[ -f /etc/arch-release ]]; then
sudo pacman -Sy
sudo pacman-key --populate
for apps in "${pacman_packages[@]}"; do
sudo pacman -S --noconfirm --needed "$apps"
done
fi
}
install_oh_my_zsh() {
if ! [ -d "$HOME/.oh-my-zsh" ]; then
echo "Installing oh-my-zsh"
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
cd "$HOME/.oh-my-zsh"
git config --add oh-my-zsh.hide-status 1
git config --add oh-my-zsh.hide-dirty 1
chsh -s $(which zsh)
echo "Installing zsh plugin zsh-autosuggestions"
git clone https://github.com/zsh-users/zsh-autosuggestions "$ZSH_CUSTOM/plugins/zsh-autosuggestions"
echo "Installing zsh theme spaceship"
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git "$ZSH_CUSTOM/themes/powerlevel10k"
# git clone https://github.com/denysdovhan/spaceship-prompt.git "$ZSH_CUSTOM/themes/spaceship-prompt"
# ln -s "$ZSH_CUSTOM/themes/spaceship-prompt/spaceship.zsh-theme" "$ZSH_CUSTOM/themes/spaceship.zsh-theme"
fi
}
install_oh_my_tmux() {
if ! [ -d "$HOME/.tmux" ]; then
echo "Installing oh-my-tmux"
git clone https://github.com/gpakosz/.tmux.git "$HOME/.tmux"
ln -s "$HOME/.tmux/.tmux.conf" "$HOME/.tmux.conf"
fi
# NOTE clipboard: xclip for Linux, pbcopy for OSX
# https://medium.freecodecamp.org/tmux-in-practice-integration-with-system-clipboard-bcd72c62ff7b
}
install_homebrew() {
# install homebrew if it's missing
if ! command -v brew >/dev/null 2>&1; then
echo "Installing homebrew"
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
if [ -f "$HOME/.config/homebrew/Brewfile" ]; then
echo "Updating homebrew bundle"
brew bundle --global
fi
}
install_fcitx_remote_osx() {
if ! command -v fcitx-remote >/dev/null 2>&1; then
mkdir -p "$HOME/soft"
cd "$HOME/soft"
git clone https://github.com/dangxuandev/fcitx-remote-for-osx
cd fcitx-remote-for-osx
./build.py build squirrel-rime-upstream
cp fcitx-remote-squirrel-rime-upstream /usr/local/bin/fcitx-remote-squirrel-rime
ln -snf /usr/local/bin/fcitx-remote-squirrel-rime /usr/local/bin/fcitx-remote
fi
}
setup_system_limit_osx() {
# https://unix.stackexchange.com/questions/108174/how-to-persistently-control-maximum-system-resource-consumption-on-mac/221988#293062
if ! [ -f "/Library/LaunchAgents/com.launchd.maxfiles.plist" ]; then
sudo /usr/libexec/PlistBuddy /Library/LaunchAgents/com.launchd.maxfiles.plist \
-c "add Label string com.launchd.maxfiles" \
-c "add ProgramArguments array" \
-c "add ProgramArguments: string launchctl" \
-c "add ProgramArguments: string limit" \
-c "add ProgramArguments: string maxfiles" \
-c "add ProgramArguments: string 10240" \
-c "add ProgramArguments: string unlimited" \
-c "add RunAtLoad bool true"
fi
if ! [ -f "/Library/LaunchAgents/com.launchd.maxproc.plist" ]; then
sudo /usr/libexec/PlistBuddy /Library/LaunchAgents/com.launchd.maxproc.plist \
-c "add Label string com.launchd.maxproc" \
-c "add ProgramArguments array" \
-c "add ProgramArguments: string launchctl" \
-c "add ProgramArguments: string limit" \
-c "add ProgramArguments: string maxproc" \
-c "add ProgramArguments: string 2000" \
-c "add ProgramArguments: string unlimited" \
-c "add RunAtLoad bool true"
fi
sudo launchctl load /Library/LaunchAgents/com.launchd.*
}
main() {
if [[ "$OSTYPE" == "darwin"* ]]; then
install_homebrew
install_fcitx_remote_osx
setup_system_limit_osx
elif [[ "$OSTYPE" == "linux-gnu" ]]; then
install_linux_packages
fi
install_oh_my_zsh
install_oh_my_tmux
}
main
| true
|
35daf9293995cd998dfc21adb0ffcaa3c7d8aa13
|
Shell
|
toransahu/post-linux-install
|
/src/install_barrier.sh
|
UTF-8
| 655
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# install_barrier.sh
# Copyright (C) 2021 Toran Sahu <toran.sahu@yahoo.com>
#
# Distributed under terms of the MIT license.
#
mkdir -p ~/.tmp && cd ~/.tmp
sudo apt install git cmake make xorg-dev g++ libcurl4-openssl-dev \
libavahi-compat-libdnssd-dev libssl-dev libx11-dev \
libqt4-dev qtbase5-dev
git clone https://github.com/debauchee/barrier.git
# this builds from master,
# you can get release tarballs instead
# if you want to build from a specific tag/release
cd barrier
git submodule update --init --recursive
./clean_build.sh
cd build
sudo make install # install to /usr/local/
sudo ufw allow 24800/tcp
| true
|
cae5ebae1e8152e915bee8b779a5ec360d484edf
|
Shell
|
Noughmad/Sola
|
/Modelska Analiza/Zakljucna/Data/delta.sh
|
UTF-8
| 266
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
MU=$1
for DELTA in -2.094 0 2.094;
do
OUTPUT="g_lambert_${MU}_${DELTA}.dat"
rm -f ${OUTPUT}
for T in {1..50};
do
echo "Starting ${MU}, ${DELTA}, ${T}"
../Planetki/build/planetki $MU $DELTA $T 500 0 2&>> ${OUTPUT};
done
done
| true
|
e257ae76d77d8af23ce5ff8b2c4ba9ecaf861fbc
|
Shell
|
void-linux/void-packages
|
/common/build-style/slashpackage.sh
|
UTF-8
| 766
| 2.921875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#
# This helper is for templates building slashpackage software.
# http://cr.yp.to/slashpackage.html
#
# required variables
#
# build_style=slashpackage
# build_wrksrc=${pkgname}-${version}
# distfiles=<download link>
#
# example (daemontools)
#
# Template file for 'daemontools'
# pkgname=daemontools
# version=0.76
# revision=1
# build_wrksrc=${pkgname}-${version}
# build_style=slashpackage
# short_desc="A collection of tools for managing UNIX services"
# maintainer="bougyman <tj@geoforce.com>"
# license="Public Domain"
# homepage="http://cr.yp.to/daemontools.html"
# distfiles="http://cr.yp.to/daemontools/${pkgname}-${version}.tar.gz"
do_build() {
package/compile
}
do_install() {
for command in command/*; do
vbin $command
done
}
| true
|
2eb0d3cc4b4e960ada7857ec6752cfcbb7765d48
|
Shell
|
datawire/jenkins-image
|
/bin/functions.sh
|
UTF-8
| 1,041
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2016 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
arw_msg() {
printf "%s\n" "--> $1"
}
is_baked() {
commit_hash=${1:?Commit hash not specified}
result=""
if [ "$commit_hash" != "unknown" ]; then
result=$(aws ec2 describe-images \
--owner self \
--filters Name=tag:Commit,Values=${commit_hash} \
--region us-east-1 \
--query 'Images[*].ImageId' \
--output text)
fi
printf "${result}"
}
| true
|
dda8c423524c89884c730d9b67a7b218b8224b36
|
Shell
|
ChPfisterer/k8s-demos
|
/scripts/k8s-setup/01-setup-all.sh
|
UTF-8
| 1,615
| 3.765625
| 4
|
[] |
no_license
|
#! /bin/bash
echo "Set variables"
read -r -p "Enter Hostname: " HOSTNAME
read -r -p "Enter static IP address (e. g. 192.168.1.10): " STATIC_IP
read -r -p "Enter Gateway address (e. g. 192.168.1.1): " GATEWAY4
read -r -p "Enter Admin Username: " ADMIN_USR
read -s -r -p "Enter Admin users password: " ADMIN_USR_PASSWD
echo "Set hostname"
hostnamectl set-hostname "${HOSTNAME}"
echo "Create user and add user to sudoers group"
useradd "${ADMIN_USR}" -m -s /bin/bash
usermod -aG sudo "${ADMIN_USR}"
echo "Setting the password for the new user"
echo "${ADMIN_USR}:${ADMIN_USR_PASSWD}" | chpasswd
echo "Setting static IP address"
rm /etc/netplan/*
echo "Enable cgroups"
sed -i '$ s/$/ cgroup_enable=cpuset cgroup_enable=memory cgroup_memory=1 swapaccount=1/' /boot/firmware/cmdline.txt
echo "# setting static IP address
# This file is generated from information provided by the datasource. Changes
# to it will not persist across an instance reboot. To disable cloud-init's
# network configuration capabilities, write a file
# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
# network: {config: disabled}
network:
ethernets:
eth0:
addresses: [${STATIC_IP}/24]
gateway4: ${GATEWAY4}
nameservers:
addresses: [${GATEWAY4}, 8.8.8.8]
version: 2" | tee /etc/netplan/00-static.yaml
echo "The new network settings will be applied.
Your SSH connection will break.
You need to reconnect with the following information:
ssh ${ADMIN_USR}@${STATIC_IP}
PWD: ${ADMIN_USR_PASSWD}
Hostname is: ${HOSTNAME}"
netplan apply
reboot
| true
|
9ff9210696bea95488f286720d5eaf896c43b851
|
Shell
|
aeroberts/dotfiles
|
/welcome.sh
|
UTF-8
| 1,180
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
h=`date +%H`
TOTAL_SPACES=44
echo //--------------------------------------------//
echo '// //'
if [ $h -lt 12 ]; then
echo '// ☕️ Good morning ☕️ //'
elif [ $h -lt 18 ]; then
echo '// Good afternoon //'
else
echo '// 🌙 Good evening 🌙 //'
fi
# Node Version Block
NODE_VERSION=$(node -v)
NODE_VERSION_LENGTH=${#NODE_VERSION}
NODE_OUTPUT_LENGTH=`expr 31 + $NODE_VERSION_LENGTH`
NODE_OUTPUT_SPACES=`expr $TOTAL_SPACES - $NODE_OUTPUT_LENGTH`
NODE_OUTPUT_FIRST_HALF_SPACES=`expr $NODE_OUTPUT_SPACES / 2`
NODE_OUTPUT_SECOND_HALF_SPACES=`expr $NODE_OUTPUT_SPACES - $NODE_OUTPUT_FIRST_HALF_SPACES`
NODE_OUTPUT_STRING="//"
for i in $(seq 1 $NODE_OUTPUT_FIRST_HALF_SPACES); do
NODE_OUTPUT_STRING+=' '
done
NODE_OUTPUT_STRING+="Currently running node version $NODE_VERSION"
for i in $(seq 1 $NODE_OUTPUT_SECOND_HALF_SPACES); do
NODE_OUTPUT_STRING+=' '
done
NODE_OUTPUT_STRING+="//"
echo "${NODE_OUTPUT_STRING}"
echo '// //'
echo //--------------------------------------------//
| true
|
90f2359f1c72c32d2f1e1757d3cd82ab7f7130fb
|
Shell
|
bornej/M1-collected-works
|
/OpenMP-Benchmark/src/run_bubble.sh
|
UTF-8
| 436
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
# M1 Informatique IM2AG-UGA
# Parallel Algorithms and Programming - TP2:
# Borne Jonathan Isnel Maxime
echo "Algo_type;Nb_threads;Chunk_size;Nb_blocs;Bloc_size;Run_id;Time" > bubble_1_16.csv
for runID in {0..9}; do
for blocksize in 8 16 32 64 128 256 512 1024 2048; do
echo BLOCKSIZE $blocksize
for nb_threads in {1..9} ; do
echo NB_THREAD $nb_threads
./bubble $nb_threads $blocksize $runID 2>> bubble_1_16.csv
done
done
done
| true
|
cc29ff44b1e37a8100a4eb48ee2a0e961133645a
|
Shell
|
KumareshBabuNS/data-lifecycle-service-broker
|
/deploy.sh
|
UTF-8
| 785
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
cf service lifecycle-sb-db 2>&1 > /dev/null
if [ $? -ne 0 ] ; then
echo "INFO: Creating database for broker"
cf create-service p-mysql 100mb-dev lifecycle-sb-db
else
echo "INFO: Database for broker already exists.... skipping"
fi
echo "INFO: Pushing broker"
cf push --no-start
#This assumes your credentials are in the same file as mine.
#see set-cf-env.sh for what the expectations are on the content
#of this file, or README.md
. ../creds/aws-cdm-creds
echo "INFO: Setting up broker environment variables"
./set-cf-env.sh
echo "INFO: Restaging app"
cf start lifecycle-sb
echo "INFO: Creating broker"
cf create-service-broker lifecycle-sb $SECURITY_USER_NAME $SECURITY_USER_PASSWORD http://lifecycle-sb.apps.pcf.jkruckcloud.com
cf enable-service-access lifecycle-sb
| true
|
1f1d8222552570238ee4fa804e70913222c5129f
|
Shell
|
ryuichiueda/kaigan_shellscript
|
/201304/edge.sh
|
UTF-8
| 663
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash -xv
tmp=/tmp/$$
sed 's/#.*$//' $1 |
tr ' ' '\n' |
awk 'NF==1' > $tmp-ppm
W=$(head -n 2 $tmp-ppm | tail -n 1)
H=$(head -n 3 $tmp-ppm | tail -n 1)
D=$(head -n 4 $tmp-ppm | tail -n 1)
tail -n +5 $tmp-ppm |
awk -v w=$W -v h=$H -v d=$D \
'BEGIN{print "P3",w-2,h-2,d}
NR%3==1{n=(NR-1)/3;r[n%w,int(n/w)] = $1}
NR%3==2{n=(NR-2)/3;g[n%w,int(n/w)] = $1}
NR%3==0{n=(NR-3)/3;b[n%w,int(n/w)] = $1}
END{for(y=1;y<h-1;y++){
for(x=1;x<w-1;x++){
a = 2*g[x-1,y-1] + g[x-1,y] + g[x,y-1] - g[x,y+1] - g[x+1,y] - 2*g[x+1,y+1];
a += 128;
th(a);
th(a);
th(a);
}
}}
function th(v){print (v < 0) ? 0 : (v > 255 ? 255 : v)}'
rm -f $tmp-*
exit 0
| true
|
f649ab6994d91843641f1387ad6158a505e5c58e
|
Shell
|
hisashiyashiro/scale
|
/scale-rm/test/case/boxaero/no_banana_na1e4/visualize/visualize.sh
|
UTF-8
| 475
| 3.078125
| 3
|
[
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#! /bin/bash -x
### Visalization ###
echo "+visualize by gpview"
rm -f dcl.pdf
var__set=(CN CCN)
rangeset=(auto auto)
time_set=
i=0
for var in ${var__set[@]}
do
if [ ${rangeset[$i]} == "auto" ]; then
range=""
else
range="--range="${rangeset[$i]}
fi
# time series
gpview history.pe\*.nc@${var},x=0,y=0,z=0 --nocont --wsn 2 || exit
convert -density 150 -rotate 90 +antialias dcl.pdf slice_${var}.png
rm -f dcl.pdf
let i="${i} + 1"
done
| true
|
ae0264c9a3dcd452dc5efd33c51de4f1a9fdee12
|
Shell
|
LeeoSilva/dotfiles
|
/zsh/zshrc
|
UTF-8
| 3,680
| 2.609375
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh installation.
export ZSH=~/.oh-my-zsh
# Exporting Neo-Vim as editor
export EDITOR="nvim"
export VISUAL="nvim"
export TERM=xterm-256color
## User configuration
export LANG=en_US.UTF-8
# C-Family env
export LD_LIBRARY_PATH=/usr/lib/
## Exporting .local to PATH
export PATH=~/.local/bin/:$PATH
# Exporting android to path
export ANDROID_HOME=$HOME/Android/Sdk
export PATH=$PATH:$ANDROID_HOME/tools
# ZSH Theme
ZSH_THEME=powerlevel10k/powerlevel10k
# POWERLEVEL9K CONFIG
POWERLEVEL9K_MODE="nerdfont-complete"
POWERLEVEL9K_COLOR_SCHEME='dark'
POWERLEVEL9K_DISABLE_PROMPT=true
POWERLEVEL9K_LEFT_PROMPT_ELEMENTS=(dir)
POWERLEVEL9K_LEFT_SEGMENT_SEPARATOR=''
POWERLEVEL9K_LEFT_SUBSEGMENT_SEPARATOR=' '
POWERLEVEL9K_WHITESPACE_BETWEEN_LEFT_SEGMENTS=''
POWERLEVEL9K_ALWAYS_SHOW_USER=false
POWERLEVEL9K_DIR_BACKGROUND='none'
POWERLEVEL9K_DIR_FOREGROUND='none'
POWERLEVEL9K_ETC_ICON=''
POWERLEVEL9K_FOLDER_ICON=''
POWERLEVEL9K_HOME_ICON=''
POWERLEVEL9K_HOME_SUB_ICON=''
POWERLEVEL9K_DIR_ETC_BACKGROUND='none'
POWERLEVEL9K_DIR_ETC_FOREGROUND='005'
POWERLEVEL9K_DIR_HOME_BACKGROUND='none'
POWERLEVEL9K_DIR_HOME_FOREGROUND='004'
POWERLEVEL9K_DIR_DEFAULT_BACKGROUND='none'
POWERLEVEL9K_DIR_DEFAULT_FOREGROUND='005'
POWERLEVEL9K_DIR_HOME_SUBFOLDER_BACKGROUND='none'
POWERLEVEL9K_DIR_HOME_SUBFOLDER_FOREGROUND='004'
# PoweLevel9K right prompt
POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS=(status vcs virtualenv)
POWERLEVEL9K_STATUS_VERBOSE=false
POWERLEVEL9K_RIGHT_SEGMENT_SEPARATOR=' '
POWERLEVEL9K_RIGHT_SUBSEGMENT_SEPARATOR=' '
POWERLEVEL9K_WHITESPACE_BETWEEN_RIGHT_SEGMENTS=' '
POWERLEVEL9K_VCS_CLEAN_BACKGROUND='none'
POWERLEVEL9K_VCS_CLEAN_FOREGROUND='076'
POWERLEVEL9K_VCS_UNTRACKED_BACKGROUND='none'
POWERLEVEL9K_VCS_UNTRACKED_FOREGROUND='005'
POWERLEVEL9K_VCS_MODIFIED_BACKGROUND='none'
POWERLEVEL9K_VCS_MODIFIED_FOREGROUND='003'
POWERLEVEL9K_VCS_GIT_HOOKS=(vcs-detect-changes git-untracked git-aheadbehind git-remotebranch git-tagname)
# Enable bi-weekly auto-update checks.
DISABLE_AUTO_UPDATE="false"
# Uncomment the following line to change how often to auto-update (in days).
export UPDATE_ZSH_DAYS=7
# Enable colors in ls.
DISABLE_LS_COLORS="false"
# Disable auto-setting terminal title.
DISABLE_AUTO_TITLE="true"
# Enable command auto-correction.
ENABLE_CORRECTION="false"
DISABLE_CORRECTION="true"
# Display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Assume "cd" when a command is a directory
setopt autocd
# Share the same history between all shells
setopt sharehistory
# Disables auto-correction
unsetopt correct
unsetopt correct_all
plugins=(
git
tmux
tmuxinator
colored-man-pages
git-flow
pyenv
)
# Terminal Aliases
alias ls='ls --color=auto'
alias pwd="pwd; pwd|xclip -selection clipboard"
alias grep='grep --color=auto'
alias xclip="xclip -selection clipboard"
# Aliases->Edit Configs
alias zshconfig="nvim '$HOME'/workspace/dotfiles/zsh/zshrc"
alias aliases="nvim '$HOME'/workspace/dotfiles/zsh/aliases.sh"
alias vimconfig="nvim '$HOME'/workspace/dotfiles/nvim/init.vim"
alias tmuxconfig="nvim '$HOME'/workspace/dotfiles/tmux/tmux.conf"
alias kittyconfig="nvim '$HOME'/workspace/dotfiles/kitty/kitty.conf"
source $ZSH/oh-my-zsh.sh
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
# source "${ZDOTDIR:-$HOME}/.zsh/prompt.sh"
source "${ZDOTDIR:-$HOME}/.zsh/zsh-autosuggestions/zsh-autosuggestions.zsh"
source "${ZDOTDIR:-$HOME}/.zsh/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh"
# Disables CTRL+S and CTRL+Q on the terminal
stty -ixon
# Sets the keyboard layout to BR-ABNT2
setxkbmap -model abnt2 -layout br -variant abnt2
# Enter key is mapped to be used.
bindkey -s "^[OM" "^M"
| true
|
3574ade8d0bbdb843c504fce5daccf0619778912
|
Shell
|
ilin-in/OP
|
/hookflash-libs/curl/projects/gnu-make/build_old
|
UTF-8
| 6,845
| 3.9375
| 4
|
[
"BSD-2-Clause-Views"
] |
permissive
|
#!/bin/bash
# -*- coding: UTF-8 -*-
# Script for building curl library for multiple platforms.
### Define text styles ###########
TXT_C="tput setaf" # set text color in BGR format
TXT_R="tput sgr0" # reset text format
TXT_B="tput bold" # set bold
TXT_U="tput sgr 0 1" # set underlined
# Detect directory in which the script resides
BASEDIR=$(dirname "`[[ $0 == /* ]] && echo "$0" || echo "${PWD}/${0#./}"`")
# Define output directory if not defined yet
[ -z "$STAGING_PREFIX" ] && STAGING_PREFIX=$BASEDIR/../../../../hookflash-staging
# Detect OS
UNAME=$(uname)
validate_before(){
[ $? != "0" ] && exit 1 || true
}
caption(){
echo -e "\n`$TXT_B`Build script`$TXT_R` for building `$TXT_B`curl`$TXT_R` library for multiple platforms."
echo -e "Maintainer: `$TXT_B`Predrag Mandic`$TXT_R` [`$TXT_U``$TXT_C 4`predrag@hookflash.com`$TXT_R`]`$TXT_R`"
}
help_darwin(){
caption
usage_darwin
echo -e "`$TXT_B`\nOptions`$TXT_R`"
echo -e "\t`$TXT_B`x86`$TXT_R` | x86_64 | i386"
echo -e "\t\tAll of these options build the same libraries that work on OS X on a current processor architecture (32 or 64 bit). They exist separately to solve legacy issues."
echo -e "\t`$TXT_B`arm`$TXT_R` | ios"
echo -e "\t\tBoth of these options build the same libraries that work on iOS. They exist separately to solve legacy issues."
echo -e "\t`$TXT_B`all`$TXT_R`"
echo -e "\t\tBuilds both x86 and ios libraries."
echo -e "\t`$TXT_B`clean`$TXT_R`"
echo -e "\t\tDeletes staged libraries for all architectures."
echo ""
}
usage_darwin(){
echo "Usage: build {arm|x86|all|clean|help}"
}
help_linux(){
caption
usage_linux
echo -e "`$TXT_B`\nOptions`$TXT_R`"
echo -e "\t`$TXT_B`x86`$TXT_R` | x86_64 | i386"
echo -e "\t\tAll of these options build the same libraries that work on Linux on a current processor architecture (32 or 64 bit). They exist separately to solve legacy issues."
echo -e "\t`$TXT_B`arm`$TXT_R` | android"
echo -e "\t\tBoth of these options build the same libraries that work on Android. They exist separately to solve legacy issues."
echo -e "\t`$TXT_B`all`$TXT_R`"
echo -e "\t\tBuilds both x86 and android libraries."
echo -e "\t`$TXT_B`clean`$TXT_R`"
echo -e "\t\tDeletes staged libraries for all architectures."
echo ""
}
usage_linux(){
echo "Usage: build {arm|x86|all|clean|help}"
}
build_darwin(){
echo "`$TXT_B``$TXT_C 2`curl build for OS X started...`$TXT_R`"
mkdir -p $STAGING_PREFIX; validate_before
cd $BASEDIR/../../curl
cp include/curl/curlbuild.h.template include/curl/curlbuild.h
cat Makefile.template > Makefile; validate_before
CFLAGS="-m32" ./configure --disable-ftp --disable-file --disable-ldap --disable-dict --disable-telnet --disable-tftp --disable-rtsp --disable-pop3 --disable-imap --disable-smtp --disable-gopher --disable-debug --without-ssl --without-zlib --without-libidn --enable-static=yes --enable-shared=no ; validate_before
make; validate_before
make install prefix=$STAGING_PREFIX; validate_before
rm Makefile
cd -
}
build_ios(){
echo "`$TXT_B``$TXT_C 2`curl build for iOS started...`$TXT_R`"
cd $BASEDIR/../../curl
cp include/curl/curlbuild.h.template include/curl/curlbuild.h
cd -
set -e
#export CURL_VERSION="7.22.0"
export PLATFORM="iPhoneOS"
[[ -d "/Applications/Xcode.app/Contents/Developer" ]] && SDK="5.1" || SKD="5.0"
export SDK
export ARCH="armv7"
[[ -d "/Applications/Xcode.app/Contents/Developer" ]] && DEVELOPER="/Applications/Xcode.app/Contents/Developer" || Developer="/Developer"
export DEVELOPER
export DEVROOT="${DEVELOPER}/Platforms/${PLATFORM}.platform/Developer"
export SDKROOT="${DEVROOT}/SDKs/${PLATFORM}${SDK}.sdk"
export CC=${DEVROOT}/usr/bin/gcc
export LD=${DEVROOT}/usr/bin/ld
export CPP=${DEVELOPER}/usr/bin/cpp
export CXX=${DEVROOT}/usr/bin/g++
unset AR
unset AS
export NM=${DEVROOT}/usr/bin/nm
export CXXCPP=${DEVELOPER}/usr/bin/cpp
export RANLIB=${DEVROOT}/usr/bin/ranlib
export LDFLAGS="-arch ${ARCH} -pipe -no-cpp-precomp -isysroot ${SDKROOT} -L${ROOTDIR}/lib"
export CFLAGS="-arch ${ARCH} -pipe -no-cpp-precomp -isysroot ${SDKROOT} -I${ROOTDIR}/include"
export CXXFLAGS="-arch ${ARCH} -pipe -no-cpp-precomp -isysroot ${SDKROOT} -I${ROOTDIR}/include"
#pushd "../../curl-${CURL_VERSION}"
pushd "../../curl"
rm -rf "${ARCH}"
mkdir -p "${ARCH}"
pushd "${ARCH}"
export ROOTDIR=`pwd`
pushd "../"
export SDK_VERSION=$SDK
mkdir -p $STAGING_PREFIX/iPhoneOS${SDK_VERSION}
cat $BASEDIR/../../curl/Makefile.template > $BASEDIR/../../curl/Makefile
./configure --host=${ARCH}-apple-darwin --prefix=$STAGING_PREFIX/iPhoneOS${SDK_VERSION} --without-ssl --without-libssh2 --with-random=/dev/urandom --disable-shared --enable-static --disable-ipv6 --disable-manual --disable-verbose ; validate_before
pushd "lib"
make; validate_before
make install; validate_before
popd
pushd "include"
make ; validate_before
make install ; validate_before
popd
popd
rm $BASEDIR/../../curl/Makefile
}
clean_darwin(){
echo -e "`$TXT_C 1`This option is not implemented. Use help or contact maintainer for info.`$TXT_R`"
exit 1
#TODO
}
build_linux(){
echo "`$TXT_B``$TXT_C 2`curl build for Linux started...`$TXT_R`"
mkdir -p $STAGING_PREFIX; validate_before
cd $BASEDIR/../../curl
cp include/curl/curlbuild.h.template include/curl/curlbuild.h
cat Makefile.template > Makefile; validate_before
./configure --disable-ftp --disable-file --disable-ldap --disable-dict --disable-telnet --disable-tftp --disable-rtsp --disable-pop3 --disable-imap --disable-smtp --disable-gopher --disable-debug --without-ssl --without-zlib --without-libidn ; validate_before
make ; validate_before
make install prefix=$STAGING_PREFIX; validate_before
rm Makefile
cd -
}
build_android(){
echo -e "`$TXT_C 1`This option is not implemented. Use help or contact maintainer for info.`$TXT_R`"
#cp include/curl/curlbuild.h.template include/curl/curlbuild.h
#TODO: When implementing, don't forget the line above! ^^^
exit 1
#mkdir -p $STAGING_PREFIX
#TODO
}
clean_linux(){
echo -e "`$TXT_C 1`This option is not implemented. Use help or contact maintainer for info.`$TXT_R`"
exit 1
#TODO
}
case "$UNAME" in
Linux)
case "$1" in
arm|android)
build_android
;;
x86|x86_64|i386)
build_linux
;;
all)
build_linux
build_android
;;
clean)
clean_linux
;;
help)
help_linux
;;
*)
usage_linux
exit 1
;;
esac
;;
Darwin)
case "$1" in
arm|ios)
build_ios
;;
x86|x86_64|i386|osx)
build_darwin
;;
all)
build_darwin
build_ios
;;
clean)
clean_darwin
;;
help)
help_darwin
;;
*)
usage_darwin
exit 1
;;
esac
;;
*)
echo "`$TXT_B`$UNAME`$TXT_R`: `$TXT_C 1`operating system not supported`$TXT_R`"
exit 1
;;
esac
| true
|
72fc085664e319e5f81ac25b69e67df1d598d04d
|
Shell
|
braven112/WC-Generator
|
/util/auroUpgrade.sh
|
UTF-8
| 1,198
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
## Generate a new baseline install of an auro component
# Argument expected: name of new repo
function generaterepo {
command wc-generate --test --name "$1"
}
## Migrade files from legacy repo to new build
# Arguments expected: name of old repo, name of new repo
function auroupgrade {
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
command clear
echo -e "The following steps have been address to migrade necessary code from '$1' to '$2'.\n"
echo -e "Please be sure to review a diff within the new repo as this upgrade does not account\nfor legacy repositoy customizations such as additional dependencies or multiple CDN packaged bundles."
# command cp -r "$1"/.git/ "$2"/.git
echo -e "\n${GREEN}./.git directory copied from '$1' to '$2'\n"
# command cp -r "$1"/src/ "$2"/src
echo -e "${GREEN}./src directory copied from '$1' to '$2'\n"
# command cp -r "$1"/test/ "$2"/test
echo -e "${GREEN}./test directory copied from '$1' to '$2'\n"
if [[ $3 = "no-demo" ]]; then
echo -e "${RED}./demo directory was NOT copied${NC}\n"
else
# command cp -r "$1"/demo/ "$2"/demo
echo -e "${GREEN}./demo directory copied from '$1' to '$2'\n${NC}"
fi
}
| true
|
4c0ccfcc87c7861fa2b102291059133e9ef356f0
|
Shell
|
makerspace/makeradmin
|
/db_restore.sh
|
UTF-8
| 669
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
file_path="$1"
if [ -z "$file_path" ]; then
echo "Usage: db_restore.sh <path to sql file>"
exit 1
fi
if [ ! -f "$file_path" ]; then
echo "File not found: $file_path"
exit 1
fi
# Prompt for confirmation before restoring
while true; do
read -p "This will delete your current database!! Are you sure you want to continue? " yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
echo ""
echo "Restoring DB..."
cat $file_path | docker compose exec -T db2 bash -c "exec mysql --default-character-set=utf8mb4 -uroot -p\${MYSQL_ROOT_PASSWORD} makeradmin"
echo "Done"
| true
|
3aff9dc7aa6baa90325e9b9b1e600cb0288351dd
|
Shell
|
nanoant/archmac-packages
|
/extra/python3-recommonmark/PKGBUILD
|
UTF-8
| 1,643
| 2.546875
| 3
|
[] |
no_license
|
# Ported from Arch Linux:
# https://git.archlinux.org/svntogit/community.git/plain/trunk/PKGBUILD?h=packages/python-recommonmark
# Maintainer: Levente Polyak <anthraxx[at]archlinux[dot]org>
pkgname=python3-recommonmark
pkgver=0.6.0
pkgrel=2
_gitcommit=48f9d1a6848c77a104854f556c053f3ed3c2c354
pkgdesc='Markdown parser for docutils'
url='https://github.com/readthedocs/recommonmark'
arch=('any')
license=('MIT')
depends=('python3-docutils' 'python3-commonmark' 'python3-setuptools' 'python3-sphinx')
makedepends=('git')
checkdepends=('python3-pytest')
source=(${pkgname}-${pkgver}.tar.gz::https://github.com/readthedocs/recommonmark/archive/${pkgver}.tar.gz)
sha256sums=('d9aaeef3937e397794d8cd563c458f13d965ee22eb84732c9e214fa7ab8c9999')
build() {
cd recommonmark-${pkgver}
python3 setup.py build
# FIXME: Doc generation crashes sphinx
# make -j1 -C docs text man SPHINXBUILD=sphinx-build
}
check() {
cd recommonmark-${pkgver}
# FIXME: Two tests fail
# py.test
}
package() {
cd recommonmark-${pkgver}
python3 setup.py install --root="${pkgdir}" --skip-build -O1
install -dm755 "${pkgdir}/opt/arch/share/licenses/${pkgname}"
install -m644 license.md "${pkgdir}/opt/arch/share/licenses/${pkgname}/license.md"
# FIXME:
# install -Dm 644 README.md CHANGELOG.md -t "${pkgdir}/opt/arch/share/doc/${pkgname}"
# install -Dm 644 docs/_build/text/*.txt -t "${pkgdir}/opt/arch/share/doc/${pkgname}"
# install -dm755 "${pkgdir}/opt/arch/share/man/man1/${pkgname}.1"
# install -m644 docs/_build/man/recommonmark.1 "${pkgdir}/opt/arch/share/man/man1/${pkgname}.1"
}
# vim: ts=2 sw=2 et:
| true
|
9840879c0b3956594462f9d3c04561834dca2f9f
|
Shell
|
keyko-io/filecoin-verifier-service
|
/e2e-test/setup.sh
|
UTF-8
| 2,163
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export JSDIR=/filecoin-verifier-tools/
sleep 20
lotus wait-api
lotus chain head
export MAIN=$(cat localnet.json | jq -r '.Accounts | .[0] | .Meta .Owner')
export ROOT1=t1vzw5hg23fn7ob4gfpzmzej7h76h6gjr3572elvi # t0101
export ROOT2=t1cncuf2kvfzsmsij3opaypup527ounnpwhiicdci # t0102
# Send funds to root key
lotus send --from $MAIN $ROOT1 5000000
lotus send --from $MAIN $ROOT2 5000000
lotus send --from $MAIN $(lotus wallet new) 12
lotus send --from $MAIN $(lotus wallet new) 12
export VERIFIER=t1us742aljq3rregf6eldkdbi2ymsnfifhq7meyly
export VERIFIER2=$(lotus wallet new)
export CLIENT=$(lotus wallet new)
# Send funds to verifier
lotus send --from $MAIN $VERIFIER 5000000
lotus send --from $MAIN $VERIFIER2 5000000
# Send funds to client
lotus send --from $MAIN $CLIENT 5000000
lotus send --from $MAIN t1o47ee4dqp6fn7hacdalcai5seoxtms2327bpccq 5000000 # slate
lotus send --from $MAIN t1gechnbsldgbqan4q2dwjsicbh25n5xvvdzhqd3y 5000000 # textile
while [ "5000000 FIL" != "$(lotus wallet balance $ROOT2)" ]
do
sleep 1
lotus wallet balance $ROOT2
done
node $JSDIR/samples/api/new-msig.js
sleep 15
node $JSDIR/samples/api/propose-verifier.js t01009
lotus msig inspect t080
sleep 15
lotus msig inspect t080
tmux new-window -t lotus:4 -n appservice -d bash run-app-service.sh
sleep 30
lotus msig inspect t01009
lotus msig inspect t01010
curl -H "Content-Type: application/json" -H "Authorization: Bearer $(cat /filecoin-verifier-service/token)" -d "{\"clientAddress\": \"t01006\", \"datetimeRequested\": 1}" localhost:4001/verifier/client/datacap
lotus-shed verifreg list-verifiers
sleep 30
lotus-shed verifreg list-clients
export DATA=$(lotus client import dddd | awk '{print $NF}')
lotus client local
lotus client deal --verified-deal --from $CLIENT $DATA t01000 0.005 1000000
while [ "3" != "$(lotus-miner sectors list | wc -l)" ]
do
sleep 10
lotus-miner sectors list
done
lotus-miner sectors seal 2
lotus-miner info
lotus-miner sectors list
while [ "3" != "$(lotus-miner sectors list | grep Proving | wc -l)" ]
do
sleep 5
lotus-miner sectors list | tail -n 1
lotus-miner info | grep "Actual Power"
done
sleep 300000
| true
|
ca6890e706914048a7c9323ad96eb22f0807bb28
|
Shell
|
flarouche3129/seg3103_playground
|
/Lab4/fizzbuzz_java/bin/test
|
UTF-8
| 694
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
# Remove any class files from dist
rm -f ./dist/*.class
#Remove for Windows
rm ./dist/*.class
# Compile the application
javac -encoding UTF-8 --source-path src -d dist src/*.java
# Run the app
java -cp ./dist FizzBuzz
# Compile the tests
javac -encoding UTF-8 --source-path test -d dist -cp dist:lib/junit-platform-console-standalone-1.7.1.jar test/*.java
#Compile on Windows? USE THIS ONE TO COMPILE THE CODE
javac -encoding UTF-8 --source-path src -d dist -cp lib/junit-platform-console-standalone-1.7.1.jar test/*.java
#Run the tests on Windows and Mac USE THIS TO RUN THE TESTS
java -jar lib/junit-platform-console-standalone-1.7.1.jar --class-path dist --scan-class-path
| true
|
14e4c608ad631687131fe47d45a16f1599559e90
|
Shell
|
flexbuild/flexbuild
|
/src/misc/tsize.sh
|
UTF-8
| 684
| 3.78125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright 2019 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
# This script queries terminal for current window size and does a stty
# to set rows and columns to that size. It is useful for terminals that
# are confused about size. This often happens when accessing consoles
# via a serial port.
if [ -n "$SSH_CLIENT" ] || [ -n "$SSH_TTY" ]; then
exit
fi
orig=$(stty -g)
stty cbreak -echo min 0 time 8
printf '\033[18t' > /dev/tty
IFS='[;t'
read _ char2 rows cols < /dev/tty
[[ "$char2" == "8" ]] || { stty "$orig" ; exit ; }
stty "$orig"
if [ `awk -v string="$cols" 'BEGIN { print length(string)'}` -gt 3 ]; then
exit
fi
stty rows "$rows" columns "$cols"
| true
|
43f2a8e571054b78a186a133a00cb25d743b5025
|
Shell
|
silverstripe/silverstripe-behat-extension
|
/bin/behat-ss
|
UTF-8
| 482
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo "setting up /artifacts"
mkdir -p artifacts
echo "starting chromedriver"
chromedriver &> artifacts/chromedriver.log 2> artifacts/chromedriver-error.log &
cd_pid=$!
echo "starting webserver"
vendor/bin/serve &> artifacts/serve.log 2> artifacts/serve-error.log &
ws_pid=$!
echo "starting behat"
vendor/bin/behat "$@"
echo "killing webserver (PID: $ws_pid)"
pkill -TERM -P $ws_pid &> /dev/null
echo "killing chromedriver (PID: $cd_pid)"
kill -9 $cd_pid &> /dev/null
| true
|
985bc91f0f07cfadb29b8c1818ba0667bb79f5d8
|
Shell
|
rifqi96/stakewithus-frontend
|
/docker-etc/node/start.sh
|
UTF-8
| 273
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
# If node_modules folder exists, don't npm ci unless --npm-i option is set
if [ ! -d "node_modules" ] || [ "$FORCE_NPM_I" == "true" ] ; then
npm ci
fi
if [ "$NODE_ENV" == "production" ] ; then
npm run build
http-server dist -p 8080
else
npm run serve
fi
| true
|
2d8c0510148b92322a8ca598be51d81bc347caa6
|
Shell
|
colegatron/nginx-lua-pagespeed
|
/set2.sh
|
UTF-8
| 5,756
| 3.09375
| 3
|
[] |
no_license
|
# This will provide nginx version 1.12.0
echo -e "\nUsage: $0"
echo "Run it from any folder, will download and build everything under /tmp/build"
echo -e "\nYou will also need the pre-modified rules-1.12.0 file to build the nginx-extras with lua+googlespeed modules among others"
echo -e "\nAt the end of the process you'll find some debs on /tmp/build. "
read -p "Press [enter] to start..."
CURDIR=${PWD}
# Add repository keys and ensure deb-src is not commented out
sudo apt-get update
sudo apt-get install -y software-properties-common
sudo add-apt-repository -y ppa:nginx/stable
echo "deb http://ppa.launchpad.net/nginx/stable/ubuntu trusty main" > /etc/apt/sources.list.d/nginx-stable-trusty.list
echo "deb-src http://ppa.launchpad.net/nginx/stable/ubuntu trusty main" >> /etc/apt/sources.list.d/nginx-stable-trusty.list
sudo apt-get update #yes, again
sudo apt-get install -y git-core build-essential zlib1g-dev libpcre3 libpcre3-dev redis-server libssl-dev libgeoip-dev libgoogle-perftools-dev geoip-database lua5.1 liblua5.1-0 liblua5.1-0-dev lua-iconv-dev libghc-iconv-dev luarocks libpcre3-dev libghc-zlib-bindings-dev libgd-dev libgd3 dpkg-dev
#rm -rf /tmp/build
mkdir -p /tmp/build
cd /tmp/build
# Now, get the modules that we want to use so that nginx can handle CSRF, geoIP, etc
# First, we will need the nginx development kit - find the latest version at https://github.com/simpl/ngx_devel_kit/tags
#wget -O ngx_devel_kit.tar.gz https://github.com/simpl/ngx_devel_kit/archive/v0.2.18.tar.gz # this is deprecated
wget -O ngx_devel_kit.tar.gz https://github.com/simpl/ngx_devel_kit/archive/v0.3.0.tar.gz
tar xvzf ngx_devel_kit.tar.gz
## Get the headers more module (latest available from https://github.com/agentzh/headers-more-nginx-module/tags)
#wget -O ngx_headers_more.tar.gz https://github.com/agentzh/headers-more-nginx-module/archive/v0.19rc1.tar.gz
#tar xvzf ngx_headers_more.tar.gz
## Get the user agent module:
##git clone git://github.com/taobao/nginx-http-user-agent.git # deprecated
#git clone git://github.com/alibaba/nginx-http-user-agent.git
## Get the iconv module # needs libgd3 and libgd-dev
#git clone git://github.com/calio/iconv-nginx-module.git
## Get the form input module
#git clone git://github.com/calio/form-input-nginx-module.git
# Nginx pagespeed
NPS_VERSION=1.12.34.2
wget https://github.com/pagespeed/ngx_pagespeed/archive/v1.12.34.2-beta.zip
unzip -o v1.12.34.2-beta.zip
cd ngx_pagespeed-1.12.34.2-beta/
psol_url=https://dl.google.com/dl/page-speed/psol/1.12.34.2.tar.gz
[ -e scripts/format_binary_url.sh ] && psol_url=$(scripts/format_binary_url.sh PSOL_BINARY_URL)
wget ${psol_url}
tar -xzvf $(basename ${psol_url}) # extracts to psol/
cd ..
## Lua install
#git clone http://luajit.org/git/luajit-2.0.git
#cd luajit-2.0
#sudo make install
#export LUAJIT_LIB=/usr/local/bin/luajit
#export LUAJIT_INC=/usr/local/include/luajit-2.0
#cd ..
## ngx_lua_module
#wget -O ngx-lua.tgz https://github.com/openresty/lua-nginx-module/archive/v0.10.9rc5.tar.gz
#tar zxvf ngx-lua.tgz
#NGX_LUA_PATH=lua-nginx-module-0.10.9rc5
## Get Redis2
##wget -O ngx_redis2.tar.gz https://github.com/agentzh/redis2-nginx-module/archive/v0.09.tar.gz
#wget -O ngx_redis2.tar.gz https://github.com/openresty/redis2-nginx-module/archive/v0.14.tar.gz
#tar xzvf ngx_redis2.tar.gz
# Dont' use the tgz. After check the build worked, let's build the .deb from the sources and repackage it to reinstall on other servers
#wget http://nginx.org/download/nginx-1.13.1.tar.gz
#tar zxvf nginx-1.13.1.tar.gz
#cd nginx-1.13.1
# Install source and get folder name (could break if apt-get messages changes)
apt-get source -y nginx
apt-get build-dep -y nginx
NGINX_VER="1.12.0" # This is the version provided by the nginx repository for Trustry
cd nginx-${NGINX_VER}
cp $CURDIR/rules-1.12.0 debian/rules
#./configure \
# --prefix=/usr/share/nginx \
# --with-cc-opt='-g -O2 -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security -D_FORTIFY_SOURCE=2' \
# --with-ld-opt="-Wl,-Bsymbolic-functions -Wl,-z,relro,-rpath,${LUAJIT_LIB}" \
# --add-module=../ngx_devel_kit-0.3.0 \
# --add-module=../${NGX_LUA_PATH} \
# --add-module=../redis2-nginx-module-0.14 \
# --conf-path=/etc/nginx/nginx.conf \
# --http-log-path=/var/log/nginx/access.log \
# --error-log-path=/var/log/nginx/error.log \
# --lock-path=/var/lock/nginx.lock \
# --pid-path=/run/nginx.pid \
# --http-client-body-temp-path=/var/lib/nginx/body \
# --http-fastcgi-temp-path=/var/lib/nginx/fastcgi \
# --http-proxy-temp-path=/var/lib/nginx/proxy \
# --http-scgi-temp-path=/var/lib/nginx/scgi \
# --http-uwsgi-temp-path=/var/lib/nginx/uwsgi \
# --with-debug \
# --with-pcre-jit \
# --with-ipv6 \
# --with-http_ssl_module \
# --with-http_stub_status_module \
# --with-http_realip_module \
# --with-http_addition_module \
# --with-http_dav_module \
# --with-http_flv_module \
# --with-http_geoip_module \
# --with-http_gzip_static_module \
# --with-http_image_filter_module \
# --with-http_random_index_module \
# --with-http_secure_link_module \
# --with-http_sub_module \
# --with-mail \
# --with-mail_ssl_module \
# --with-http_geoip_module \
# --with-http_ssl_module \
# --with-http_gzip_static_module \
# --with-google_perftools_module \
# --add-module=../headers-more-nginx-module-0.19rc1 \
# --add-module=../nginx-http-user-agent \
# --add-module=../ngx_pagespeed-${NPS_VERSION}-beta \
# --add-module=../form-input-nginx-module \
# --add-module=../iconv-nginx-module
sudo dpkg-buildpackage -b
cd ..
cp *deb /tmp/output
echo -e "\mAfter copying deb packages from /tmp/output to your localhost outside docker, just remove the container to free space"
| true
|
0f968beebf2f1405b6fe2925f599e7834b5fc0e5
|
Shell
|
9001/usr-local-bin
|
/vmdelta
|
UTF-8
| 602
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
xzhash() { mkdir -p hash; [ $(stat -c%Y -- "$1") -eq $(stat -c%Y -- "hash/$1" 2>/dev/null || echo 0) ] && return; printf 'hashing %s\n' "$1"; pixz -dtk < "$1" | sha512sum | cut -c-128 > "hash/$1"; touch -r "$1" -- "hash/$1"; }
mkpatch() { f1="$2"; f2="$3"; fp="$f1.XD3$1"; printf '%s » %s = %s\n' "$f1" "$f2" "$fp"; xdelta3 -eRs "$f1" < "$f2" | pixz -7tk > "$fp"; touch -r "$f2" "$fp"; xzhash "$f1"; xzhash "$f2"; }
[ "$#" -eq 2 ] || {
echo need arg1: most recent version
echo need arg2: original file
echo example: 'centos.ext4.trim.xfce4{.cfg,}'
exit 1
}
mkpatch -1 "$@"
| true
|
f87da02b3d32b315cbb5cc11eaa7f29b74fdba14
|
Shell
|
finwo/cookie.jar
|
/one.sh
|
UTF-8
| 305
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Get env
WD="$(dirname $0)"
JSE=$(command -v node nodejs mujs | head -n 1)
# Update submodules
git submodule update --init --recursive &>/dev/null
# Find file & print it
FILE=$(find "${WD}/jar" -type f -name \*.json | shuf -n 1)
[ -z "${FILE}" ] || cat "${FILE}" | $JSE $WD/one.js
| true
|
6dbdb6b855f6ee4b3bd864e3214a94e95aa728b3
|
Shell
|
wlsherica/FunTime
|
/ehc2015/log_parser.sh
|
UTF-8
| 311
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#Objective: parser log file
#Usage: sh -x log_parser.sh
actionCount=0
while read line; do
actionBool=0
if [[ "$line" = *group=ec* ]]; then
actionBool=1
(( actionCount++ ))
fi
done < /Users/etu/data/log.txt
echo "Ans. There are $actionCount action keywords from log."
| true
|
540ac696be03835502f42fe9938aff6fa198a11f
|
Shell
|
SixArm/maildir-mkdir
|
/maildir-mkdir
|
UTF-8
| 1,315
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Maildir shell script:
# make a Maildir directory and subdirectories.
#
# Syntax:
#
# maildir-mkdirs [path]
#
# This script makes the required directories:
#
# * `cur` for current mail
# * `new` for new mail
# * `tmp` for temporary files
#
# This script also makes the popular diectories for IMAP and POP:
#
# * `.Drafts` for work in progress mail
# * `.Sent` for sent mail
# * `.Templates` for mail templates
# * `.Trash` for deleted mail
#
# The result:
#
# Maildir/cur
# Maildir/new
# Maildir/tmp
# Maildir/.Drafts
# Maildir/.Sent
# Maildir/.Templates
# Maildir/.Trash
#
# ## Path details
#
# The path default is the current user's home directory.
#
# The path must exist.
#
# ## Examples
#
# To make directories in the current user's home directory:
#
# maildir-mkdir
#
# To make directories in a specific directory:
#
# maildir-mkdir /home/alice
#
# To make directories that get automatically copied when a
# typical Linux sysadmin creates a new user on the system:
#
# maildir-mkdir /etc/skel
#
# Program: maildir-mkdirs
# Version: 1.2.0
# Created: 2010-10-16
# Updated: 2017-03-31XS
# License: GPL
# Contact: Joel Parker Henderson (joel@joelparkerhenderson.com)
##
set -euf
path=${1:-$HOME}
cd "$path"
mkdir -p Maildir/{cur,new,tmp,.Drafts,.Sent,.Templates,.Trash}
| true
|
494062fe503a34eb14111e55842cd745846c4f25
|
Shell
|
apples723/terraformify
|
/terraformify.sh
|
UTF-8
| 2,344
| 4.46875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#generates a EC2 module from template files
#to use this script effectivly:
# 1. copy this hole folder to a directory (i.e ~/.tf_generator)
# 2. add that directory to your path (i.e export PATH=$PATH:~/.tf_generator)
# 3. update the template paths accordingly
__usage="
Usage: new_ec2 [OPTIONS]
Options:
-c <config file> Specify the config file to use for the template. Can be either a file name or file path. If not specified will use ~/.terraformify/tf_default.conf
-i Will initalize the terraform module that is created from the template.
-h Display this message
-u Update the default config. Useful to edit default config before each use, rather then writing a whole config for each use
"
script_path="$HOME/.terraformify"
#exit if not configured
if ! [ -d ${script_path} ]
then
echo "ERROR: you have not ran the install script, please do so before continuing"
exit 1;
fi
#update config file function
function update_config () {
vi ${script_path}/tf_default.conf
exit 0
}
while true;do
case "$1" in
-c | --config)
config_file=$2
shift 2;;
-u | --update)
update_config
;;
-i | --init )
tf_init=true
shift ;;
-h | --help)
echo "$__usage"
exit 0
;;
-* )
echo "invalid option: $1"
echo "$__usage"
exit 1
;;
* )
break;;
esac
done
if [ -z "$config_file" ]
then
echo "no config file was specified using default..."
config_file=${script_path}/tf_default.conf
fi
#template paths
main_template=${script_path}/templates/main.tpl
variables_template=${script_path}/templates/variables.tpl
#tf files that don't need to be editied
fixed_templates_directory=${script_path}/templates/fixed/*
#source the config file
. ${config_file}
#create directory/directory name from instance name and strip quotes
directory_name=$(echo ${instance_name} | sed 's/\"//g')
mkdir ${directory_name}
#copy fixed tf files
cp -r ${fixed_templates_directory} ${directory_name}
#generate dynamic TF files
eval "echo \"$(cat "${main_template}")\"" > ${directory_name}/main.tf
eval "echo \"$(cat "${variables_template}")\"" > ${directory_name}/variables.tf
if [ "$tf_init" = true ]; then
cd ${directory_name}
terraform init
fi
| true
|
e1936129f26be1a721a9ff6d97c36264d93dc22d
|
Shell
|
bichpham/GPIO-Midi
|
/DependeciesInstall.sh
|
UTF-8
| 1,990
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Begin script"
sudo apt-get install libasound2-dev
if [ $? -eq 0 ]; then
echo "libasound2-dev success"
else
echo "libasound2-dev failed"
exit 1
fi
sudo apt-get install libglib2.0-dev -y
if [ $? -eq 0 ]; then
echo "libglib2.0-dev success"
else
echo "libglib2.0-dev failed"
exit 1
fi
sudo apt-get install udev
if [ $? -eq 0 ]; then
echo "udev success"
else
echo "udev failed"
exit 1
fi
sudo apt-get install libreadline-dev -y
if [ $? -eq 0 ]; then
echo "libreadline success"
else
echo "libreadline failed"
exit 1
fi
sudo apt-get install libtool -y
if [ $? -eq 0 ]; then
echo "libtool success"
else
echo "libtool failed"
exit 1
fi
sudo apt-get install intltool -y
if [ $? -eq 0 ]; then
echo "intltool success"
else
echo "intltool failed"
exit 1
fi
sudo apt-get install libdbus-1-dev
if [ $? -eq 0 ]; then
echo "libdbus success"
else
echo "libdbus failed"
exit 1
fi
sudo apt-get install libical-dev -y
if [ $? -eq 0 ]; then
echo "libical-dev success"
else
echo "libical-dev failed"
exit 1
fi
rm pigpio.zip
sudo rm -rf PIGPIO
wget abyz.me.uk/rpi/pigpio/pigpio.zip
if [ $? -eq 0 ]; then
echo "download pigpio success"
else
echo "download pigpio failed"
exit 1
fi
unzip pigpio.zip
if [ $? -eq 0 ]; then
echo "unzip pigpio success"
else
echo "unzip pigpio failed"
exit 1
fi
cd PIGPIO
if [ $? -eq 0 ]; then
echo "cd pigpio success"
else
echo "cd pigpio failed"
exit 1
fi
make
if [ $? -eq 0 ]; then
echo "make pigpio success"
else
echo "make pigpio failed"
exit 1
fi
sudo make install
if [ $? -eq 0 ]; then
echo "make install pigpio success"
else
echo "make install pigpio failed"
exit 1
fi
sudo pigpiod
if [ $? -eq 0 ]; then
echo "pigpiod success"
else
echo "pigpiod failed"
exit 1
fi
wget https://github.com/oxesoft/bluez.zip
echo 'Completed Dependencies Installation'
| true
|
cbe1f92df435f77520251fbe0b5a489a87bec04e
|
Shell
|
pingcai/java-staging
|
/java-test/deploy/build.sh
|
UTF-8
| 373
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# 正常情况下源码和实际运行的jar不能放在一起
# 可以考虑构建完成后清除源代码和Git
prof=$profile
# 如果不指定profile, 默认为dev
[ ! $a ] && export prof=dev
echo "current environment: $prof"
PWD=`pwd`
cd ..
mvn clean -U
mvn package -Dmaven.test.skip=true -P $prof
mv target/java-test*.jar target/java-test.jar
cd $PWD
| true
|
b11883ad07dbc01b2e001968201fed9eafb621f2
|
Shell
|
vikjam/docker
|
/r-bionic/userconf.sh
|
UTF-8
| 337
| 2.8125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/with-contenv bash
# set defaults for environmental variables in case they are undefined
NB_USER=${NB_USER:=jovyan}
RPASSWORD=${RPASSWORD:=rstudio}
# works for jupyterlab but conflicts with rstudio on alternate port
# export JPASSWORD=${JPASSWORD:=jupyter}
# add a password for user
echo "$NB_USER:$RPASSWORD" | chpasswd
| true
|
6de36939c4c81dac1d9d370d369ff2d64749eb5c
|
Shell
|
cbrdy/ocpstuff
|
/scripts/deploy_lab.sh
|
UTF-8
| 2,364
| 2.671875
| 3
|
[] |
no_license
|
TYPE=$1
TEMPLATE=rhel7
#echo "Cleaning up data dirs..."
#ssh root@storage /cloud/scripts/osev3/cleanup.sh
if [ "$TYPE" == "oso" ]; then
TEMPLATE=centos7
echo "- using $TEMPLATE base image"
ssh root@hv2.home.nicknach.net /cloud/scripts/new.sh omaster01 $TEMPLATE 8192 2 52:54:00:18:55:49 &
ssh root@hv3.home.nicknach.net /cloud/scripts/new.sh omaster02 $TEMPLATE 8192 2 52:54:00:19:57:49 &
ssh root@hv5.home.nicknach.net /cloud/scripts/new.sh omaster03 $TEMPLATE 8192 2 52:54:00:18:58:49 &
ssh root@hv2.home.nicknach.net /cloud/scripts/new.sh onode01 $TEMPLATE 16384 2 52:54:00:18:56:49 &
ssh root@hv3.home.nicknach.net /cloud/scripts/new.sh onode02 $TEMPLATE 16385 2 52:54:00:18:57:49 &
ssh root@hv5.home.nicknach.net /cloud/scripts/new.sh onode03 $TEMPLATE 16384 2 52:54:00:12:58:49 &
ssh root@hv2.home.nicknach.net /cloud/scripts/new.sh olb $TEMPLATE 4196 1 52:54:00:18:58:17 &
ssh root@hv2.home.nicknach.net /cloud/scripts/new.sh oinfra01 $TEMPLATE 8192 2 52:54:00:01:58:49 &
ssh root@hv3.home.nicknach.net /cloud/scripts/new.sh oinfra02 $TEMPLATE 8192 2 52:54:00:02:58:49 &
ssh root@hv5.home.nicknach.net /cloud/scripts/new.sh oinfra03 $TEMPLATE 8192 2 52:54:00:03:58:49 &
elif [ "$TYPE" == "ocp" ]; then
echo "- using $TEMPLATE base image"
ssh root@storage.home.nicknach.net /cloud/scripts/new.sh lb $TEMPLATE 4196 2 52:54:00:18:58:16
ssh root@hv4.home.nicknach.net /cloud/scripts/new.sh master01 $TEMPLATE 16384 2 52:54:00:fb:09:ec &
ssh root@hv3.home.nicknach.net /cloud/scripts/new.sh master02 $TEMPLATE 16384 2 52:54:00:18:58:01 &
ssh root@hv5.home.nicknach.net /cloud/scripts/new.sh master03 $TEMPLATE 16384 2 52:54:00:18:58:02 &
ssh root@hv4.home.nicknach.net /cloud/scripts/new.sh node01 $TEMPLATE 16384 2 52:54:00:db:14:7d &
ssh root@hv3.home.nicknach.net /cloud/scripts/new.sh node02 $TEMPLATE 16384 2 52:54:00:68:4a:e3 &
ssh root@hv5.home.nicknach.net /cloud/scripts/new.sh node03 $TEMPLATE 16384 2 52:54:00:68:54:49 &
ssh root@hv4.home.nicknach.net /cloud/scripts/new.sh infra01 $TEMPLATE 16384 2 52:54:00:18:58:03 &
ssh root@hv3.home.nicknach.net /cloud/scripts/new.sh infra02 $TEMPLATE 16384 2 52:54:00:18:59:04 &
ssh root@hv5.home.nicknach.net /cloud/scripts/new.sh infra03 $TEMPLATE 16384 2 52:54:00:18:73:04 &
else
echo "Unrecognized deployment type, idiot. Use 'ocp|oso'"
exit 1
fi
echo "Done!"
| true
|
1226cb91442d40ae2816de4b4e56a191c2917a81
|
Shell
|
alonappleboim/nflab_scripts
|
/shell/parallel_exec_bash/examples_parallel/simplest/merge_all.bash
|
UTF-8
| 174
| 2.65625
| 3
|
[] |
no_license
|
#! /bin/bash
while read line
do
echo "merging $line";
done < "/dev/stdin"
# ^not really using the input, just an example of how to do it
paste f*.tmp > all.tmp
rm f*.tmp
| true
|
fce02f387689e17dea2caf42ef414087a561a75b
|
Shell
|
maxird/docker-images
|
/templates/wait-svc/generate.sh
|
UTF-8
| 389
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
HEREDIR=`basename $SRCDIR`
OUTDIR="$SRCDIR/../../images/$HEREDIR"
function process
{
image=$1
base=$2
outpath="$OUTDIR/$image/$base"
mkdir -p "$outpath"
cp Dockerfile "$outpath/Dockerfile"
cp wait.sh "$outpath/wait.sh"
}
BASE_IMAGE="centos"
BASES="7"
for b in $BASES; do
process $BASE_IMAGE $b
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.