blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0599690990fe991fc85eeecd30c1e43c21dde3ae
|
Shell
|
HonghaoLYU/yumi_ethz_ws
|
/src/yumi/setup_ws/setupWSVariables.bash
|
UTF-8
| 2,090
| 3.59375
| 4
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# PROGRAMMER: Frederick Wachter
# DATE CREATED: 2016-05-20
# LAST MODIFIED: 2016-05-26
# PURPOSE: Create workspace variables for YuMi during initial setup
# Get the directory location of the YuMi folder
if [ -z "$1" ]; then
echo "Please input the path of where the yumi directory is located"
exit
fi
echo "Adding command line workspace variables... " # notify user the process has started
# Add YuMi alias for running scriptsecho "" >> ~/.bashrc # add in blank line before addition
echo "" >> ~/.bashrc # add in a blank ine before addition
echo "# From: YuMi Github Repo" >> ~/.bashrc # add header for added section
echo "# Purpose: Alias for YuMi commands" >> ~/.bashrc # describe purpose for added section
echo "alias yumi='bash ${1}/setup_ws/yumi.bash'" >> ~/.bashrc # allow for YuMi to be run from command line for the real controller
echo "alias yumi_demo='bash ${1}/setup_ws/yumi_demo.bash'" >> ~/.bashrc # allow for YuMi to be run from command line for the fake controller
echo "alias yumi_server='bash ${1}/setup_ws/yumi_server.bash'" >> ~/.bashrc # run the YuMi server from command line to send path commands to the real controller
echo "alias yumi_lead='rosrun yumi_scripts lead_through'" >> ~/.bashrc # run the lead through script from command line for generating RAPID modules
echo "alias yumi_moveit='rosrun yumi_scripts moveit_interface'" >> ~/.bashrc # run the MoveIt! interface script from command line for interfacing MoveIt! with YuMi through command line
echo "alias yumi_node='roslaunch yumi_scripts yumi_node.launch'" >> ~/.bashrc # execute the node used to manipulate YuMi
echo "alias yumi_interface='rosrun yumi_scripts yumi_interface'" >> ~/.bashrc # run the script that interfaces with the YuMi node
echo "alias yumi_leap='roslaunch yumi_scripts leap_interface.launch'" >> ~/.bashrc # execute the node that interfaces the Leap Motion sensor with YuMi
echo "" >> ~/.bashrc # add in blank line underneath addition
source ~/.bashrc # source bashrc to finalize changes for current terminal window
echo "Finished." # notify user that the process is finished
| true
|
2f7645bb36a8cfb0bdc321ec14693a24748a6806
|
Shell
|
roger-mahler/sparv-pipeline
|
/bin/analyze_xml
|
UTF-8
| 467
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
usage() {
echo
echo "Usage: $0 [-h header-element] [-e encoding] [-m maxcount] sgml-files..."
echo
exit 1
}
HDR="teiheader"
ENC="UTF-8"
MAX="0"
while getopts "e:m:h:" opt
do
case $opt in
e) ENC="$OPTARG" ;;
m) MAX="$OPTARG" ;;
h) HDR="$OPTARG" ;;
esac
done
shift $((OPTIND-1))
if [ $# == 0 ] ; then usage ; fi
SOURCES=$@
python -m sparv.xmlanalyzer --header "$HDR" --encoding "$ENC" --maxcount "$MAX" --sources "$SOURCES"
| true
|
c26e690260bb1323c31249bb33a16f8cc21561d7
|
Shell
|
adrianroth10/setup
|
/scripts/mitsuba.sh
|
UTF-8
| 1,230
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
# Currently not working
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. $DIR/../extras/functions.sh
# NOT WORKING
# Tips for the installation on Ubuntu 16.04 LTS
# https://cgcvtutorials.wordpress.com/2017/05/31/install-mitsuba-on-linux-16-04/
$PACKAGE_INSTALL build-essential scons qt4-dev-tools libpng-dev libjpeg-dev libilmbase-dev libxerces-c-dev libboost-all-dev libopenexr-dev libglewmx-dev libxxf86vm-dev libeigen3-dev libfftw3-dev # libpcrecpp0
# might have to download and install the last package from https://packages.ubuntu.com/trusty-updates/libpcrecpp0
wget https://www.mitsuba-renderer.org/repos/mitsuba/archive/tip.zip
unzip tip.zip
mv mitsuba-af602c6fd98a/ ~/.mitsuba
cd ~/.mitsuba
cp build/config-linux-gcc.py config.py
# substitute for the src/bsdfs/irawan.h
sed -i 's/BOOST_VERSION >= 106000/BOOST_VERSION >= 105800' src/bsdfs/irawan.h # not tested
# substitute for the include/mitsuba/core/autodiff.h
# from
# inline DScalar1& operator*=(const Scalar &v) {
# value *= v;
# grad *= v;
# return *this;
# }
# to
# inline DScalar1& operator*=(const DScalar1 &v) {
# grad = v.grad * value + grad * v.value;
# value *= v.value;
# return *this;
# }
scons –j 8
| true
|
aab6364ca97f7d48d8e8dd302377bf7f7abfdb9b
|
Shell
|
pcdshub/pcds-envs
|
/scripts/update_env.sh
|
UTF-8
| 931
| 3.953125
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Updates the previous environment to a new latest, rather than starting from scratch.
if [ -z "${1}" ]; then
echo "Usage: update_env.sh [envname] [base] [py_ver]"
exit
else
ENVNAME="${1}"
fi
if [ -z "${2}" ]; then
BASE="pcds"
else
BASE="${2}"
fi
if [ -z "${3}" ]; then
VER="3.6"
else
VER="${3}"
fi
set -e
source "$(dirname `which conda`)/../etc/profile.d/conda.sh"
ENV_DIR="../envs/${BASE}"
HASREL=`mamba env list | grep "${NAME}"`
if [ -z "${HASREL}" ]; then
mamba env create -y --name "${ENVNAME}" --file "${ENV_DIR}/env.yaml"
fi
conda activate "${ENVNAME}"
conda info -a
echo "Installing python version ${VER}"
conda activate base
conda info -a
mamba install -y -n "${ENVNAME}" python="${VER}"
echo "Updating tagged packages"
mamba install -y -n "${ENVNAME}" --file "${ENV_DIR}/conda-packages.txt"
conda activate "${ENVNAME}"
pip install -r "${ENV_DIR}/pip-packages.txt"
conda list
conda deactivate
| true
|
019993099e61aea585b09df12e04a3547ff6867c
|
Shell
|
sugtao4423/docker-nginx-php
|
/run.sh
|
UTF-8
| 1,483
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
sed -e "s|;listen.owner\s*=.\+|listen.owner = ${PHP_FPM_USER}|g" \
-e "s|;listen.group\s*=.\+|listen.group = ${PHP_FPM_GROUP}|g" \
-e "s|;listen.mode\s*=.\+|listen.mode = ${PHP_FPM_LISTEN_MODE}|g" \
-e "s|user\s*=.\+|user = ${PHP_FPM_USER}|g" \
-e "s|group\s*=.\+|group = ${PHP_FPM_GROUP}|g" \
-e "s|listen\s*=.\+|listen = /var/run/php-fpm/php-fpm.sock|i" \
-e "s|pm.max_children\s*=.\+|pm.max_children = ${PHP_FPM_PM_MAX_CHILDREN}|i" \
-e "s|pm.start_servers\s*=.\+|pm.start_servers = ${PHP_FPM_PM_START_SERVERS}|i" \
-e "s|pm.min_spare_servers\s*=.\+|pm.min_spare_servers = ${PHP_FPM_PM_MIN_SPARE_SERVERS}|i" \
-e "s|pm.max_spare_servers\s*=.\+|pm.max_spare_servers = ${PHP_FPM_PM_MAX_SPARE_SERVERS}|i" \
-i /etc/php81/php-fpm.d/www.conf && \
sed -e "s|;*memory_limit\s*=.*|memory_limit = ${PHP_MEMORY_LIMIT}|i" \
-e "s|;*upload_max_filesize\s*=.*|upload_max_filesize = ${PHP_MAX_UPLOAD}|i" \
-e "s|;*max_file_uploads\s*=.*|max_file_uploads = ${PHP_MAX_FILE_UPLOAD}|i" \
-e "s|;*post_max_size\s*=.*|post_max_size = ${PHP_MAX_POST}|i" \
-e "s|expose_php\s*=.*|expose_php = ${PHP_EXPOSE_PHP}|i" \
-e "s|;date.timezone\s*=.*|date.timezone = \"${PHP_TIMEZONE}\"|i" \
-i /etc/php81/php.ini
echo 'start php-fpm81'
php-fpm81 -D
echo 'started php-fpm81'
echo 'start nginx'
nginx -g 'daemon on;'
echo 'started nginx'
trap_term(){
echo 'exit'
exit 0
}
trap 'trap_term' TERM
while :
do
sleep 1
done
| true
|
1994a84ddb182f2b30dca8cf62438ba5c83a751e
|
Shell
|
admalledd/dotfiles
|
/i3bin/touchpad_toggle.sh
|
UTF-8
| 977
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
TOUCHPAD_ID=$(xinput |grep "TouchPad"|python -c "import sys;sys.stdout.write('%s'%sys.stdin.read().split('\t')[1].split('=')[1])")
TOUCH_STATE=$(xinput list-props $TOUCHPAD_ID|grep "Device Enabled"|python -c "import sys;print sys.stdin.read().split()[-1]")
#if we got a command line, force that state rather than toggling (eg force disable on i3 start)
echo "$1"
if [[ "$1" == "disable" || "$1" == "off" || "$1" == "0" ]]; then
echo "disabling touchpad (via cmdline opt)"
xinput set-prop $TOUCHPAD_ID "Device Enabled" 0
exit
elif [[ "$1" == "enable" || "$1" == "on" || "$1" == "1" ]]; then
echo "enabling touchpad (via cmdline opt)"
xinput set-prop $TOUCHPAD_ID "Device Enabled" 1
exit
fi
if [[ "$TOUCH_STATE" -eq "0" ]]; then
echo "enabling touchpad"
xinput set-prop $TOUCHPAD_ID "Device Enabled" 1
elif [[ "$TOUCH_STATE" -eq "1" ]]; then
echo "disabling touchpad"
xinput set-prop $TOUCHPAD_ID "Device Enabled" 0
fi
| true
|
010b7e48e10bc78f4978c0dba7729134d1bd5962
|
Shell
|
cgiserban/Maya_ML_Texture_Stylizer
|
/StyleTransfer/runStyleTransfer.sh
|
UTF-8
| 1,839
| 3.5
| 4
|
[] |
no_license
|
clear
PROJECT_PATH=$1
STYLE_PATH=$PROJECT_PATH/StyleTransfer/style/$2
array=(${3// / })
echo "- Maya Style Transfer - "
echo "#######################################"
echo "- Creating the environment -"
echo "#######################################"
#Setup the working environment
alias mayapy='/opt/autodesk/maya/bin/mayapy'
export PATH=$PATH:/public/bin/2018
export PATH=/opt/qt/5.11.1/gcc_64/bin:/opt/qt/Tools/QtCreator/bin:$PATH
export PYTHONPATH=$PYTHONPATH:$HOME/NGL/lib
export PYTHONPATH=$PYTHONPATH:$RMANTREE/bin
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/$RMANTREE/lib:
export PATH=$PATH:/$RMANTREE/bin
#Set-up the virtualenv
source $PROJECT_PATH/StyleTransfer/virtualEnvironment/bin/activate
cd $PROJECT_PATH/StyleTransfer
echo "#######################################"
echo "- Collecting textures -"
echo "#######################################"
#Create directories
mkdir $PROJECT_PATH/collectedTX
mkdir $PROJECT_PATH/convertedTX
#Add textures into the directory
for (( i=3;$i<=$#;i=$i+1 ))
do
cp ${!i} $PROJECT_PATH/collectedTX
done
echo "#######################################"
echo "- Installing Dependencies -"
echo "- This may take a while -"
echo "#######################################"
pip install numpy pillow tensorflow scipy moviepy
echo "#######################################"
echo "- Running the NN -"
echo "#######################################"
for i in "${!array[@]}"
do
#Get File Name
for fullpath in "${array[i]}"
do
filename="${fullpath##*/}"
done
#Set Export Name
exportname="Converted_"$filename
#Run network and export results
python run.py --model $STYLE_PATH --input ${array[i]} --output $PROJECT_PATH/convertedTX/$exportname
# python run.py --model $PROJECT_PATH/StyleTransfer/style/la_muse.ckpt --input ${array[i]} --output $PROJECT_PATH/convertedTX/$exportname
done
| true
|
f2382f860b63c6ca6b60abc79ba4df672c676663
|
Shell
|
ttranatping/pf-sp-connection-mgt
|
/scripts/export-sp-connection.sh
|
UTF-8
| 2,768
| 2.75
| 3
|
[] |
no_license
|
function prop {
grep "${1}" ../docker-compose/pf.env|cut -d'=' -f2
}
entityId=$1
bodyContent="<soapenv:Envelope xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\">
<soapenv:Header/>
<soapenv:Body>
<getConnection>
<entityId>${entityId}</entityId>
<role>SP</role>
</getConnection>
</soapenv:Body>
</soapenv:Envelope>"
echo 'exporting connection = ' $1
echo 'exporting to = ' $2
SigningKeyPairMD5Reference_fingerprint=$(java -jar ./pf-cert-extract-md5/target/pf-cert-extract-md5-0.0.1-SNAPSHOT-jar-with-dependencies.jar ./example-certs/SigningKeyPairReference.p12 2FederateM0re ping)
DecryptionKeyPairMD5Reference_fingerprint=$(java -jar ./pf-cert-extract-md5/target/pf-cert-extract-md5-0.0.1-SNAPSHOT-jar-with-dependencies.jar ./example-certs/DecryptionKeyPairReference.p12 2FederateM0re ping)
SecondaryDecryptionKeyPairMD5Reference_fingerprint=$(java -jar ./pf-cert-extract-md5/target/pf-cert-extract-md5-0.0.1-SNAPSHOT-jar-with-dependencies.jar ./example-certs/SecondaryDecryptionKeyPairReference.p12 2FederateM0re ping)
DsigVerificationCert_Base64Encoded=$(cat ./example-certs/DsigVerificationCert.cer | base64)
SecondaryDsigVerificationCert_Base64EncodedCert=$(cat ./example-certs/SecondaryDsigVerificationCert.cer | base64)
EncryptionCert_Base64EncodedCert=$(cat ./example-certs/EncryptionCert.cer | base64)
RoleDescriptor_Base64EncodedCert=$(cat ./example-certs/RoleDescriptor.cer | base64)
curl -X POST \
--header 'SOAPAction: getConnection' \
--header 'Content-Type: text/plain' \
--data "$bodyContent" \
--user connectionmgt:$(prop 'serviceAuthentication_items_connectionManagement_connectionmgt_connectionmgt_sharedSecret') \
https://localhost:9999/pf-mgmt-ws/ws/ConnectionMigrationMgr --insecure | \
xpath '/soapenv:Envelope/soapenv:Body/getConnectionResponse/getConnectionReturn/text()' | \
sed "s/\&/\&/;s/</\</;s/</\</;s/>/\>/;s/'/\'/" | \
xml ed -u "//urn:SigningKeyPairReference/@MD5Fingerprint" -v "${SigningKeyPairMD5Reference_fingerprint}" | \
xml ed -u "//urn:DsigVerificationCert/urn:Base64EncodedCert" -v "${DsigVerificationCert_Base64Encoded}" | \
xml ed -u "//urn:SecondaryDsigVerificationCert/urn:Base64EncodedCert" -v "${SecondaryDsigVerificationCert_Base64EncodedCert}" | \
xml ed -u "//urn:DecryptionKeyPairReference/@MD5Fingerprint" -v "${DecryptionKeyPairMD5Reference_fingerprint}" | \
xml ed -u "//urn:SecondaryDecryptionKeyPairReference/@MD5Fingerprint" -v "${SecondaryDecryptionKeyPairMD5Reference_fingerprint}" | \
xml ed -u "//urn:EncryptionCert/urn:Base64EncodedCert" -v "${EncryptionCert_Base64EncodedCert}" | \
xml ed -u "//md:RoleDescriptor/urn:availableCert/urn:Base64EncodedCert" -v "${RoleDescriptor_Base64EncodedCert}" > $2
| true
|
9f5ac7b5e05f0038a4b27c533772b336afbdaebb
|
Shell
|
mengyou658/spark-job-rest
|
/spark-job-rest/src/main/scripts/start_server.sh
|
UTF-8
| 2,698
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Script to start the job server
# Extra arguments will be spark-submit options, for example
# ./server_start.sh --jars cassandra-spark-connector.jar
set -e
get_abs_script_path() {
pushd . >/dev/null
cd $(dirname $0)
SCRIPTS_DIR=$(pwd)
popd >/dev/null
}
get_abs_script_path
APP_DIR="$(dirname "${SCRIPTS_DIR}")"
PIDFILE="${APP_DIR}/server.pid"
# From this variable depends whether server will be started in detached on in-process mode
SJR_RUN_DETACHED="${SJR_RUN_DETACHED-true}"
DRIVER_MEMORY=1g
GC_OPTS="-XX:+UseConcMarkSweepGC
-verbose:gc -XX:+PrintGCTimeStamps -Xloggc:${APP_DIR}/gc.out
-XX:MaxPermSize=512m
-XX:+CMSClassUnloadingEnabled "
JAVA_OPTS="-Xmx1g -XX:MaxDirectMemorySize=512M
-XX:+HeapDumpOnOutOfMemoryError -Djava.net.preferIPv4Stack=true
-Dcom.sun.management.jmxremote.authenticate=false
-Dcom.sun.management.jmxremote.ssl=false"
MAIN="spark.job.rest.server.Main"
if [ -f "${SCRIPTS_DIR}/settings.sh" ]; then
. "${SCRIPTS_DIR}/settings.sh"
else
echo "Missing ${SCRIPTS_DIR}/settings.sh, exiting"
exit 1
fi
# Create directories if not exist
mkdir -p "${LOG_DIR}"
mkdir -p "${JAR_PATH}"
mkdir -p "${DATABASE_ROOT_DIR}"
LOG_FILE="spark-job-rest.log"
LOGGING_OPTS="-Dlog4j.configuration=log4j.properties
-DLOG_DIR=${LOG_DIR}
-DLOG_FILE=${LOG_FILE}"
# Need to explicitly include app dir in classpath so logging configs can be found
CLASSPATH="${APP_DIR}/${SJR_SERVER_JAR_NAME}:${APP_DIR}:${APP_DIR}/resources"
# Log classpath
echo "CLASSPATH = ${CLASSPATH}" >> "${LOG_DIR}/${LOG_FILE}"
# The following should be exported in order to be accessible in Config substitutions
export SPARK_HOME
export APP_DIR
export JAR_PATH
export CONTEXTS_BASE_DIR
export DATABASE_ROOT_DIR
export CONTEXT_START_SCRIPT="${SCRIPTS_DIR}/context_start.sh"
function start_server() {
# Start application using `spark-submit` which takes cake of computing classpaths
"${SPARK_HOME}/bin/spark-submit" \
--class $MAIN \
--driver-memory $DRIVER_MEMORY \
--conf "spark.executor.extraJavaOptions=${LOGGING_OPTS}" \
--conf "spark.driver.extraClassPath=${CLASSPATH}" \
--driver-java-options "${GC_OPTS} ${JAVA_OPTS} ${LOGGING_OPTS} ${CONFIG_OVERRIDES}" \
$@ "${APP_DIR}/${SJR_SERVER_JAR_NAME}" \
>> "${LOG_DIR}/${LOG_FILE}" 2>&1
}
if [ "${SJR_RUN_DETACHED}" = "true" ]; then
start_server &
echo $! > "${PIDFILE}"
echo "Server started in detached mode. PID = `cat "${PIDFILE}"`"
elif [ "${SJR_RUN_DETACHED}" = "false" ]; then
start_server
else
echo "Wrong value for SJR_RUN_DETACHED = ${SJR_RUN_DETACHED}."
exit -1
fi
| true
|
9087301827546eb30c3950fdd51387b0ab189db6
|
Shell
|
ml4ai/delphi
|
/scripts/install_range-v3_from_source
|
UTF-8
| 261
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
version=0.10.0
wget https://github.com/ericniebler/range-v3/archive/${version}.tar.gz
tar -xf ${version}.tar.gz
pushd range-v3-${version} > /dev/null
mkdir build
cd build
cmake ..
make -j install
popd > /dev/null
rm -rf range-v3-${version}
| true
|
19643b42c1bc26cce309bd6fb73e7b9d5cad7c84
|
Shell
|
moul/cattle
|
/scripts/ci
|
UTF-8
| 1,905
| 3
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
set -e
echo $(hostname)
cd $(dirname $0)
export RUNTIME_DIR=../../../runtime/ci/
export RUNTIME_DIR_CLEAN=true
export CATTLE_LOGBACK_ROOT_LEVEL=error
export CATTLE_DB_CATTLE_DATABASE=mysql
: ${CATTLE_DB_CATTLE_MYSQL_PORT:=13306}
export CATTLE_DB_CATTLE_MYSQL_PORT
# Uncomment this if you're impatient
#export CATTLE_IDEMPOTENT_CHECKS=false
if [ -x "$(which wrapdocker)" ]; then
echo Launching Docker
wrapdocker >/tmp/docker.log 2>&1
docker info
fi
echo_dot()
{
trap "exit 0" TERM
echo -n " "
while true; do
echo -n .
sleep 1
done
}
run()
{
echo -n Running "$@"
echo_dot 2>/dev/null &
DOT_PID=$!
"$@" > /tmp/${1}.log || {
echo "$@" failed
cat /tmp/${1}.log
echo "$@" failed
exit 1
}
kill $DOT_PID
echo
}
# Background to start the Docker pulls
./test-warm >/dev/null &
run ./bootstrap
run ./clean
CI=true MAVEN_ARGS='-B -q' run ./build
run ./run --background
# Wait for completion now to ensure that images are pulled
./test-warm
export AGENT_IMAGE=$(grep bootstrap.required.image ../resources/content/cattle-global.properties | cut -f2 -d=)
DEV_HOST=localhost:8081 ../tools/development/register-boot2docker.sh >/tmp/register.log &
./wait-for-hosts
./test -e py27 -- -m nonparallel
./test -- -m "'not nonparallel'" -n $(($(nproc) + 1)) -v || {
echo Test failed
cat /tmp/run.log
echo Test failed
exit 1
}
# Now test H2
echo "Testing H2"
export RUNTIME_DIR=../../../runtime/ci-h2/
export RUNTIME_DIR_CLEAN=true
export CATTLE_DB_CATTLE_DATABASE=h2
export CATTLE_TEST_HTTP_PORT=8083
export CATTLE_TEST_HTTP_PROXIED_PORT=8084
export CATTLE_TEST_URL=http://localhost:8083/v1/schemas
run ./run --background
./test -e py27 -- core/test_virtual_machine.py core/test_container.py || {
echo Test failed
cat /tmp/run.log
echo Test failed
exit 1
}
| true
|
c3813e2210ceacd68f057eaca3deb7bc1aa6aed4
|
Shell
|
XavierBerger/oe-meta-container
|
/scripts/create_docker_build_appliance_image.sh
|
UTF-8
| 333
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -x
image_tarball="${1:-/home/vagrant/poky/build}/tmp/deploy/images/qemux86-64/container-build-appliance-qemux86-64.tar.bz2"
docker_build_dir="$(realpath $(dirname $0)/../misc/docker/build_appliance/)"
cp ${image_tarball} ${docker_build_dir} \
&& sudo docker build -t yocto-build-appliance-x86_64 ${docker_build_dir}
| true
|
091dd5d786c6f6e19500fc064673bee931663c8e
|
Shell
|
KomplexKapharnaum/RPi-ShowPlayer
|
/bash/create_fake_media_dir.sh
|
UTF-8
| 166
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
cd "$2"
mkdir "$2/video"
mkdir "$2/audio"
cd "$1"
for f in $(find $1); do
#echo $f
#echo $1
f=${f#$1}
echo "$2/$f"
touch "$2/$f"
done
| true
|
9de4d5e0ff68d923dc1e7e100aeb0a350fba187f
|
Shell
|
PennockTech/keybase-docker
|
/keybase.is-up-to-date
|
UTF-8
| 890
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
# The page at <https://prerelease.keybase.io/> links to this:
#readonly CURRENT_LINUX_RELEASE_JSON_URL='http://prerelease.keybase.io.s3.amazonaws.com/update-linux-prod.json'
# but that hostname doesn't have a valid cert if we switch to https.
# Meanwhile, this works for https:
readonly CURRENT_LINUX_RELEASE_JSON_URL='https://prerelease.keybase.io/update-linux-prod.json'
#
# I'm not aware of any documentation or guarantees for that URL,
# but it seems to work, so roll with it.
progname="$(basename "$0" .sh)"
available="$(curl -LSs "$CURRENT_LINUX_RELEASE_JSON_URL" | jq -er .version)"
installed="$(keybase --version)"
installed="${installed##* }"
if [[ "$available" != "$installed" ]]; then
printf '%s: keybase installed is: %s\n' "$progname" "$installed"
printf '%s: keybase available is: %s\n' "$progname" "$available"
exit 2
fi
exit 0
| true
|
28964e51faa74d89beab9e29303cb72001ebb705
|
Shell
|
miguelAlcantara/odmInstall
|
/errorLib.sh
|
UTF-8
| 282
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
CURRENT_DIR=`pwd`
function validateExecution(){
errorCode=$1
errorText=$2
if [ ${errorCode} != 0 ]; then
echo "---------------"
echo "${errorText}"
read input
if [ "$input" == "n" ]; then
exit ${errorCode};
fi
fi
return 0;
}
| true
|
a8582a9af7b81426c5ba6586d664ce2e5d947e16
|
Shell
|
josephholsten/bin
|
/epub-me
|
UTF-8
| 382
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# ===========
# = EPUB ME =
# ===========
# folder to epub + epubcheck
# $1 = epub folder path
# $2 = epub name (optional)
if [ $1 ]; then
FOLDER=$1
else
echo "The epub folder path is missing."
exit 0
fi
cd $FOLDER
if [ $2 ]; then
NAME=$2
else
NAME=${PWD##*/}
fi
zip -0Xq ../$NAME.epub mimetype
zip -Xr9Dq ../$NAME.epub *
cd -
exec epubcheck $NAME.epub
| true
|
4d84ac6a6ad4361507e7745f7ac0c57defd25e0d
|
Shell
|
yayankov/Operating-systems-FMI
|
/shell-script/forExam/13.sh
|
UTF-8
| 238
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 1 ]; then
echo "Wrong number of arguments"
exit 1
fi
if [ ! -d ${1} ]; then
echo "Argument 1 must be a directory"
exit 2
fi
DIR=${1}
find ${DIR} -type l -printf "%Y %f\n" | grep '^[NL]' | awk '{print $2}'
| true
|
67592b6f3e09e12a44d1b06a3b4050eede4fe126
|
Shell
|
abeiro/ascore-v1
|
/Framework/Locale/generate.sh
|
UTF-8
| 140
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
for i in $(find -name "*.po")
do
cd $(dirname $i)
msgfmt $(basename $i) -o $(basename $i|sed s/"\.po"/"\.mo"/)
cd -
done
| true
|
b9ffc43663dd242dbc692e5a1d07f2d7f4c6558e
|
Shell
|
esteba61/Hadoop-provision
|
/scripts/setup-node.sh
|
UTF-8
| 433
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
function sudoAdmin {
sudo -i
}
function hadoopUser {
useradd hadoop
echo -e "hadoop" | (passwd --stdin hadoop)
}
function installTools {
yum install -y java-1.8.0-openjdk-devel
cat > /etc/hosts <<EOF
192.168.92.10 nodemasterx
192.168.92.11 nodea
192.168.92.12 nodeb
EOF
}
echo -e "------sudoAdmin------"
sudoAdmin
echo -e "------hadoopUser------"
hadoopUser
echo -e "------installTools------"
installTools
| true
|
d457cc4031c732a9083ba5e37a6bf50221fb852b
|
Shell
|
SRadatz/bookStack
|
/mariaDB_upgrade
|
UTF-8
| 858
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
#upgrade mariadb from 10.0 to 10.1
#backup DB first
#mysqldump db_name > backup-file.sql
#mysqldump --all-databases > all_databases.sql
#stop services
sudo service nginx stop
sudo service mysql stop
#remove current version
sudo apt remove maria-db server -y
#setup mariadb repositories
sudo apt-get install software-properties-common -y
sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 -y
sudo add-apt-repository 'deb [arch=amd64,arm64,i386,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.1/ubuntu xenial main' -y
sudo apt update
sudo apt upgrade
#reboot
#incase of Unix_socket Error
#sudo vim /etc/mysql.mariadb.conf.d/50-server.cnf
# Under [mysqld] section
# Plugin-load-add = auth_socket.so
#sudo systemctl restart mariadb.service
# Now 'sudo mysql -u root' should work
| true
|
a9efe4835ba8f5f7007eb9832b451094c111a902
|
Shell
|
jomof/jacy
|
/utils/make-qc.bash
|
UTF-8
| 1,455
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# from the main grammar directory
unset DISPLAY;
unset LUI;
lkbdir=${LOGONROOT}/lingo/lkb;
grammardir=${LOGONROOT}/dfki/jacy;
### make input file
cut -d@ -f7 $grammardir/tsdb/skeletons/mrs/item |tail -50 > $grammardir/utils/mrs.50.txt
#
# CHEAP
#
# back up quick check file
mv $grammardir/pet/qc.tdl $grammardir/pet/qc.tdl.old
#flop the grammar once
cd $grammardir
flop japanese
# calculate the quickcheck file
cat $grammardir/utils/mrs.txt | \
cheap -limit=50000 -packing -compute-qc=pet/qc.tdl $grammardir/japanese
# flop the grammar again
cd $grammardir
flop japanese
echo "PET done"
###
### LKB
###
# back up
mv $grammardir/lkb/checkpaths.lsp $grammardir/lkb/checkpaths.lsp.old
### FIXME should redo the input file at somestage
{
cat 2>&1 <<- LISP
(load "$lkbdir/src/general/loadup")
(compile-system "lkb" :force t)
(lkb::read-script-file-aux "$grammardir/lkb/script")
;;; set an edge limit
(setf lkb::*maximum-number-of-edges* '5000)
;; make the checkpaths
(lkb::with-check-path-list-collection
"$grammardir/lkb/checkpaths.lsp"
(lkb::parse-sentences
"$grammardir/utils/mrs.txt"
"/tmp/mrs.txt.out"))
(format t "~%All Done!~%")
#+allegro (excl:exit)
#+sbcl (sb-ext:quit)
LISP
} | ${LOGONROOT}/bin/logon --source -I base -locale ja_JP.UTF-8
echo "LKB done"
echo "please commit the new files"
echo "svn commit -m 'new quick check paths' pet/qc.tdl lkb/checkpaths.lsp"
| true
|
c01673d153d077b3ee13196666777c2305e4326f
|
Shell
|
adamluzsi/testcase
|
/bin/test-go
|
UTF-8
| 310
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
(
type go
) 1>/dev/null
main() (
set -e
shopt -s nullglob globstar
local gmpath path
for gmpath in **/go.mod; do
path=${gmpath%"go.mod"}
cd "${path}"
testCurrent "${@}"
done
)
testCurrent() {
go test ./... -race -count 1 -bench '^BenchmarkTest' "${@}"
}
main "${@}"
| true
|
7233fbadda0cd8671f0b3128119beb71aac2d741
|
Shell
|
pfenninglab/TACIT
|
/evaluationScriptsRetinaModels/step6a_generate_neg_seqs_mm10.sb
|
UTF-8
| 1,086
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH -n 1
#SBATCH --partition=pfen1
#SBATCH --job-name=genNullSeq
#SBATCH --mem=10G
#SBATCH --error=logs/genNullSeq_%A_%a_out.txt
#SBATCH --output=logs/genNullSeq_%A_%a_out.txt
# generate 500 bp background repository of mm10 sequences
BGDIR="/home/csriniv1/resources/biasaway/mm10/500bp"
GENOME="/home/csriniv1/resources/mm10.fa"
#echo "generating background repo..."
#bash /home/csriniv1/resources/biasaway/create_background_repository.sh -f $GENOME -s 500 -r $BGDIR
#echo "Done!"
FGFASTA="/projects/pfenninggroup/machineLearningForComputationalBiology/retina/data/mouse/GSE146897_WTMouse_ret_ATAC/mm10_ret_noTSS_filtered_500bp.fa"
BGFASTA="/projects/pfenninggroup/machineLearningForComputationalBiology/retina/data/mouse/GSE146897_WTMouse_ret_ATAC/mm10_ret_noTSS_filtered_500bp_neg.fa"
echo "generating negative sequences..."
# generate 500bp background sequence for mouse retina ATAC-seq filtered peaks
biasaway c --foreground $FGFASTA --nfold 10 \
--deviation 2.6 --step 50 --winlen 100 \
--bgdirectory $BGDIR --seed 12345 > $BGFASTA
echo "Done!"
echo $BGFASTA
| true
|
4d81b10722d8ff55c40b73330e8505cfbac0b018
|
Shell
|
abstrakct/terrapinstationdotfiles
|
/bin/udsks
|
UTF-8
| 5,026
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# udsks: Un/mounts removable media via udisks
# Adapted by DMcFadzean from some-guy94's script at: https://bbs.archlinux.org/viewtopic.php?pid=877272#p877272
# udsks is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License Version 3 as published by the Free Software Foundation.
# Arch Linux dependencies: dmenu, udisks[, notification program eg. zenity]
# Removable media criteria: not system internal, has media, not partition table[, usage filesystem]
# Note that 'label:' tag is not unique; we require the first as second is subsection of 'partition:'
# Get program name and set default notification flag
pgm=${0##*/}; flg="Removable media"
##Customize notification programs
Fnotify()
{
f1=$1; f2=$2; shift 2; if [ "$f1" = "msg" ]; then m="info"; else m="error"; fi
zenity --title="$f2" --$m --text="$@"
}
##Fnotify()
##{
## f1=$1; f2=$2; shift 2; if [ "$f1" = "msg" ]; then m="normal"; else m="critical"; fi
## notify-send -u $m "$f2" "$@"
##}
# Parse command line
while getopts 'hfF:u:O:' OPTION
do
case $OPTION in
f) if [ -n "$FILEMANAGER" ]; then flm="$FILEMANAGER"; else flm="thunar"; fi ;; ## Default file manager
F) flm="$OPTARG" ;;
u) dir="$OPTARG" ;;
O) opt="$OPTARG" ;;
? | h) cat <<EOF
udsks 2.11: Mounts and unmounts removable media via udisks
Usage: udsks [-f] [-F filemanager] [-u directory] [-O options]
Options:
-f opens the mounted media directory in the default file manager
-F opens the mounted media directory in the specified file manager
-u unmounts the media mounted at the specified directory
-O specifies a string of additional options for Dmenu
If -u is not specified, a menu of all possible un/mount actions is shown
EOF
exit 1 ;;
esac
done
shift $(($OPTIND - 1))
if [ -n "$dir" ]; then
# Unmount media from specified directory
dev=$(mount | grep -m 1 " on $dir type " | awk '{print $1}')
if [ -n "$dev" ]; then drv="unmount: $dev"; else Fnotify err "$flg" "No media mounted at: $dir"; exit 2; fi
else
# Provide a menu of possible un/mount actions and media
if [ -n "$DMENU" ]; then dmenu="$DMENU -p Media -b -fn -*-terminus-*-*-*-*-12-*-*-*-*-*-*-u -nf #D37E2C -nb #000000 -sf #FF0000 -sb #000000"; else dmenu="dmenu -i -p Media -b -fn -*-terminus-*-*-*-*-12-*-*-*-*-*-*-u -nf #D37E2C -nb #000000 -sf #FF0000 -sb #000000"; fi # Dmenu command
zi="system internal:"; za="has media:"; zt="partition table:"; zu="usage:"; zm="is mounted:"; zp="mount paths:"; zl="label:"
fst="" # Store string of devices handled by fstab, converting any UUIDs to devices
while read dev x; do
case "$dev" in /dev/*) fst="$fst$dev ";; UUID=*) fst="$fst$(findfs $dev) ";; *) continue;; esac
done < /etc/fstab #; echo $fst >&2 # debug
# Select removable media device
drv=$(udisks --enumerate-device-files | grep -e "/dev/sd" -e "/dev/sr" | \
(while read dev; do
[[ $fst == *"$dev "* ]] && continue # Reject any device handled by fstab
# Test against removable media criteria and get whether mounted, mountpoint and label
inf=$(udisks --show-info $dev | grep -e "$zi" -e "$za" -e "$zt" -e "$zu" -e "$zm" -e "$zp" -e "$zl")
[[ $(awk "/$zi/ {print \$3}"<<<"$inf") != "0" ]] && continue #; echo $dev: "$inf" >&2 # debug
[[ $(awk "/$za/ {print \$3}"<<<"$inf") != "1" ]] && continue
[[ $(awk "/$zt/ {print \$2}"<<<"$inf") == "table:" ]] && continue
#[[ $(awk "/$zu/ {print \$2}"<<<"$inf") != "filesystem" ]] && continue
# If unmounted then show device with label else show device with mountpoint
if [[ $(awk "/$zm/ {print \$3}"<<<"$inf") == "0" ]]; then echo "mount: $dev," $(grep -m 1 "$zl"<<<"$inf")
else echo "unmount: $dev," $(grep "$zp"<<<"$inf"); fi
done) | sort | $dmenu $opt)
if [[ $drv ]]; then drv=${drv%,*}; else exit; fi # Strip ", etc" from selection
fi
# Call udisks and notify according to output
if [[ $drv == mount:* ]]; then
# Mount media and optionally open with file manager
x=$(udisks --mount ${drv#mount: })
Fnotify msg "$flg" "$x" &
if [ -n "$flm" ] && [[ "$x" == Mounted*at* ]]; then exec $flm "${x##* at }"; fi
else
# Flush buffers and unmount media
sync; x=$(udisks --unmount ${drv#unmount: })
if [ -z "$x" ]; then Fnotify msg "$flg" "Successful $drv"; else Fnotify err "$flg" "$x"; exit 2; fi
fi
exit
# Changelog:
# 12/06/11 v2.11: Add new options -f and -F to open mounted media directory in default or specified file manager
# 02/06/11 v2.01: More user-friendly notification flag $flg
# 22/05/11 v2.00: Add new options -u to unmount from specific directory and -O for additional Dmenu options
# 03/05/11 v1.40: Display unmount error message from udisks; dynamically determine script name
# 02/05/11 v1.30: Source Func and replace notify-send with Fnotify() calling zenity or other notification program
# 30/03/11 v1.20: Default dmenu options to $DMENU environment variable
# 25/02/11 v1.11: To speed up, reject devices handled by fstab
| true
|
92cb861e609f1be6d87aeaba0e45601ee219d429
|
Shell
|
JonShelley/azure
|
/image/ubuntu_18.04_ai/setup_00.sh
|
UTF-8
| 3,839
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
# Required OS: Ubuntu 18.04 LTS
# Requires the following packages to be downloaded from Nvidia's website and placed in /mnt/nvidia
# - nccl-repo-ubuntu1804-2.7.8-ga-cuda11.0_1-1_amd64.deb
# - https://developer.nvidia.com/compute/machine-learning/nccl/secure/v2.7/prod/nccl-repo-ubuntu1804-2.7.8-ga-cuda11.0_1-1_amd64.deb
sudo apt-get update
sudo apt install build-essential -y
### Disable network for cloud init
echo network: {config: disabled} | sudo tee /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg
sudo bash -c "cat > /etc/netplan/50-cloud-init.yaml" <<'EOF'
network:
ethernets:
eth0:
dhcp4: true
version: 2
renderer: networkd
EOF
### Place the topology file in /opt/msft
sudo mkdir -p /opt/msft
sudo bash -c "cat > /opt/msft/topo.xml" <<'EOF'
<system version="1">
<cpu numaid="0" affinity="0000ffff,0000ffff" arch="x86_64" vendor="AuthenticAMD" familyid="143" modelid="49">
<pci busid="ffff:ff:01.0" class="0x060400" link_speed="16 GT/s" link_width="16">
<pci busid="0001:00:00.0" class="0x030200" link_speed="16 GT/s" link_width="16"/>
<pci busid="0101:00:00.0" class="0x020700" link_speed="16 GT/s" link_width="16"/>
<pci busid="0002:00:00.0" class="0x030200" link_speed="16 GT/s" link_width="16"/>
<pci busid="0102:00:00.0" class="0x020700" link_speed="16 GT/s" link_width="16"/>
</pci>
<pci busid="ffff:ff:02.0" class="0x060400" link_speed="16 GT/s" link_width="16">
<pci busid="0003:00:00.0" class="0x030200" link_speed="16 GT/s" link_width="16"/>
<pci busid="0103:00:00.0" class="0x020700" link_speed="16 GT/s" link_width="16"/>
<pci busid="0004:00:00.0" class="0x030200" link_speed="16 GT/s" link_width="16"/>
<pci busid="0104:00:00.0" class="0x020700" link_speed="16 GT/s" link_width="16"/>
</pci>
<pci busid="ffff:ff:03.0" class="0x060400" link_speed="16 GT/s" link_width="16">
<pci busid="000b:00:00.0" class="0x030200" link_speed="16 GT/s" link_width="16"/>
<pci busid="0105:00:00.0" class="0x020700" link_speed="16 GT/s" link_width="16"/>
<pci busid="000c:00:00.0" class="0x030200" link_speed="16 GT/s" link_width="16"/>
<pci busid="0106:00:00.0" class="0x020700" link_speed="16 GT/s" link_width="16"/>
</pci>
<pci busid="ffff:ff:04.0" class="0x060400" link_speed="16 GT/s" link_width="16">
<pci busid="000d:00:00.0" class="0x030200" link_speed="16 GT/s" link_width="16"/>
<pci busid="0107:00:00.0" class="0x020700" link_speed="16 GT/s" link_width="16"/>
<pci busid="000e:00:00.0" class="0x030200" link_speed="16 GT/s" link_width="16"/>
<pci busid="0108:00:00.0" class="0x020700" link_speed="16 GT/s" link_width="16"/>
</pci>
</cpu>
</system>
EOF
sudo bash -c "cat > /etc/udev/rules.d/60-rdma-persistent-naming.rules" <<'EOF'
# SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB)
# Copyright (c) 2019, Mellanox Technologies. All rights reserved. See COPYING file
#
# Rename modes:
# NAME_FALLBACK - Try to name devices in the following order:
# by-pci -> by-guid -> kernel
# NAME_KERNEL - leave name as kernel provided
# NAME_PCI - based on PCI/slot/function location
# NAME_GUID - based on system image GUID
#
# The stable names are combination of device type technology and rename mode.
# Infiniband - ib*
# RoCE - roce*
# iWARP - iw*
# OPA - opa*
# Default (unknown protocol) - rdma*
#
# * NAME_PCI
# pci = 0000:00:0c.4
# Device type = IB
# mlx5_0 -> ibp0s12f4
# * NAME_GUID
# GUID = 5254:00c0:fe12:3455
# Device type = RoCE
# mlx5_0 -> rocex525400c0fe123455
#
ACTION=="add", SUBSYSTEM=="infiniband", PROGRAM="rdma_rename %k NAME_PCI"
EOF
# Change local disk permissions
sudo chmod 1777 /mnt
# Get the kernel patch
#apt install -y linux-image-unsigned-5.4.0-1040-azure/bionic-updates
#sudo reboot
| true
|
e297604266a46b6388b8effe89e34e47fb5124c6
|
Shell
|
tm-dd/MunkiManageScripts
|
/MunkiData/scripts_and_configs/create_new_munki_repository.sh
|
UTF-8
| 7,321
| 3.71875
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# this script setup Munki on a lokal Mac
#
# Copyright (c) 2020 tm-dd (Thomas Mueller)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
####### DEFINITIONS #######
# read settings
source "`dirname $0`/config.sh"
# directory of the software for the importing
if [ "$1" != "" -a "$1" != "stable" -a "$1" != "testing" -a "$1" != "debug" -a "$1" != "useRepo" -a "$1" != "doNotImport" ]
then
munkiTestingPath="$1"
fi
####### HELP AND DEBUG ######
if [ "$1" == "help" -o "$1" == "--help" ]
then
echo
echo "Parameters are: [ ThePathOfTheSoftwareDirectory | stable | testing | useRepo | doNotImport | debug | help | --help ]"
echo "Example 1: $0 '$munkiTestingPath' stable"
echo "Example 2: $0 '$munkiTestingPath' debug"
echo "Example 3: $0 '$munkiTestingPath'"
echo "Example 4: $0"
echo
exit 0
fi
if [ "$1" == "debug" -o "$2" == "debug" ]
then
set -x
fi
####### CREATE THE NEW MUNKI REPOSITORY #######
date
if [ "$1" != "useRepo" -a "$2" != "useRepo" ]
then
# remove the old Munki repository directory and create new directorys
rm -rf "$munkiTestingPath" || (echo "ERROR. Press CTRL + C !!!"; read)
mkdir -p "$munkiTestingPath"
cd "$munkiTestingPath" || (echo "ERROR. Press CTRL + C !!!"; read)
mkdir -p catalogs manifests pkgs pkgsinfo icons
chmod -R a+rx "$munkiTestingPath"
# configure Munki
echo
echo 'Please setup a new Munki installation.'
echo 'You can use a path like "'$munkiTestingPath'" for the repository on a webserver and as "default catalog" e.g. the name "standard_mac_en".'
echo 'The other fields can be empty to use the default settings, as your option.'
echo
# create a new Munki repository configuration (also possible with: "/usr/local/munki/munkiimport --configure")
defaults write ~/Library/Preferences/com.googlecode.munki.munkiimport.plist 'default_catalog' ''
defaults write ~/Library/Preferences/com.googlecode.munki.munkiimport.plist 'editor' ''
defaults write ~/Library/Preferences/com.googlecode.munki.munkiimport.plist 'pkginfo_extension' '.plist'
defaults write ~/Library/Preferences/com.googlecode.munki.munkiimport.plist 'repo_path' "$munkiTestingPath"
defaults write ~/Library/Preferences/com.googlecode.munki.munkiimport.plist 'repo_url' ''
fi
# copy munki manifests and icons
echo; echo "COPPING manifests and icons ..."; echo
set -x
mkdir -p "${munkiTestingPath}/manifests/" "${munkiTestingPath}/icons"
cp ${munkiManifestOffsets}/* "${munkiTestingPath}/manifests/"
cp ${munkiIconOffsets}/* "${munkiTestingPath}/icons/"
chmod 644 ${munkiTestingPath}/manifests/* ${munkiTestingPath}/icons/*
ls -l "${munkiTestingPath}/manifests/"
ls -l "${munkiTestingPath}/icons/"
set +x
echo
####### IMPORTING SOFTWARE TO MUNKI #######
if [ "$1" != "doNotImport" -a "$2" != "doNotImport" ]
then
echo; echo "IMPORT software ..."; echo
# Fileseperator neu setzen
OIFS=$IFS
IFS=$'\n'
NUMBEROFIMPORTS=`find $pathOfSoftware -name import_*_to_munki.sh | wc -l | awk '{ print $1 }'`
NUMBERIMPORT=1
for importfile in `find $pathOfSoftware -name import_*_to_munki.sh`
do
# if running in debug mode, ask before executing any import file
if [ "$1" == "debug" -o "$2" == "debug" ]
then
IMPORT="ask"
echo; echo -n "Should the file '$importfile' execute now ? (y/*) : "
read IMPORT
else
IMPORT='y'
fi
if [ "$IMPORT" == "y" ]
then
# goto to the directory of the script
cd `dirname "$importfile"`
# a progress message for the user
echo; echo "Start import script ${NUMBERIMPORT} of ${NUMBEROFIMPORTS}:"; NUMBERIMPORT=$((${NUMBERIMPORT}+1))
# import the Munki files
bash "$importfile" "$munkiTestingPath"
else
echo -e "SKIPPING file '$importfile' by importing new software.\n"
fi
done
# alten Fileseperator wieder setzen
IFS=$OIFS
else
echo -e "\nTHE SOFTWARE IMPORT WAS SKIPPED, BY USING THE OPTION 'doNotImport'.\n\n"
fi
# setup the access rights for the webserver
chmod -R 755 "$munkiTestingPath"
date
####### TESTS AND CHANGE TO STABLE #######
(
set +x
echo -e "\n\n*** THE TESTING MUNKI REPO IS FINISH NOW. ***\n\n"
echo "PLEASE TEST the new Munki TESTING repository NOW and continue."
echo
)
ANSWER='';
# if the first or second parameter was "stable", do NOT ASK in the next while loop
if [ "$1" == "stable" -o "$2" == "stable" ]
then
ANSWER='stable';
fi
# if the first or second parameter is NOT "TESTING", allow the user to change the Munki repostitory to the stable URL
if [ "$1" != "testing" -a "$2" != "testing" ]
then
# wait up to typing "stable" to change the testing to the stable repository
set +x
while [ "$ANSWER" != "stable" ]
do
echo -n "Write 'stable' to use the new TESTING repository as STABLE now or break with [Ctrl] + [C]. "
read ANSWER
done
echo
echo "OK, waiting 5 seconds and change the repositorys ..."
echo
(set -x /bin/bash `dirname $0`"make_munki_testing_to_stable.sh move")
echo
echo "The new Munki repository is now found on the STABLE and testing URL."
echo
echo 'On the mac clients do: '
echo
echo ' 1. install the Munki tools from: https://github.com/munki/munki/releases'
echo
echo ' 2. use commands like this: (as an example):'
echo
echo ' sudo defaults write /Library/Preferences/ManagedInstalls ClientIdentifier "standard_mac_en" # or use an other repository like: "standard_mac_de", "full_mac_en" or "full_mac_de"'
echo ' sudo defaults write /Library/Preferences/ManagedInstalls SoftwareRepoURL "https://'`hostname`'/repo/" # to setup the URL to your repository'
echo ' sudo defaults read /Library/Preferences/ManagedInstalls # print the current settings of the munki client'
echo ' sudo /usr/local/munki/managedsoftwareupdate'
echo ' open /Applications/Managed\ Software\ Center.app'
echo
echo ' 3. additionally install the "munki reports", at your choice'
echo
fi
date
exit 0
| true
|
2851a5339f6750003f2cd3b250860244bbb57d4e
|
Shell
|
jwiszowata/code_reaper
|
/code/my-app/split_on_functions.sh
|
UTF-8
| 665
| 3.015625
| 3
|
[] |
no_license
|
# split files on 5 groups, and run my function to every group
a=0
for file in ../../FreeCol_files/*;
do
if [ $((a%5)) -eq 0 ]
then
files1=$files1" "$file
fi
if [ $((a%5)) -eq 1 ]
then
files2=$files2" "$file
fi
if [ $((a%5)) -eq 2 ]
then
files3=$files3" "$file
fi
if [ $((a%5)) -eq 3 ]
then
files4=$files4" "$file
fi
if [ $((a%5)) -eq 4 ]
then
files5=$files5" "$file
fi
a=$a+1
done
mvn exec:java -Dexec.args="$files1"
mvn exec:java -Dexec.args="$files2"
mvn exec:java -Dexec.args="$files3"
mvn exec:java -Dexec.args="$files4"
mvn exec:java -Dexec.args="$files5"
| true
|
7c87c6d93dbc91db83ae0e1f89e0ff6a41ded86e
|
Shell
|
statonlab/aurora-galaxy-tools
|
/single-html-tools/aurora_fasta_importer/build-and-run-job-scripts.sh
|
UTF-8
| 534
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
# run job scripts within the tool outputs directory
cd ${REPORT_FILES_PATH}
#========== build and run job 1 script ============
cat >curl-download.sh <<EOF
if [ $(wc -l <$X_f) -gt 2 ]; then
cp $X_f $X_O
else
if [ "$X_e" = "twobit" ]; then
cp $TOOL_INSTALL_DIR/twoBitToFa twoBitToFa
chmod +x twoBitToFa
./twoBitToFa $(head -1 $X_f) $X_O
elif [ "$X_e" = "gz" ]; then
curl $(head -1 $X_f) > output.fa.gz
gunzip -c output.fa.gz > $X_O
else
curl $(head -1 $X_f) > $X_O
fi
fi
EOF
sh curl-download.sh
| true
|
ff6ecff4cb2e8347b7ce5616d5434fdb97f40586
|
Shell
|
richjoslin/bubble-shooter-vr
|
/release-docs.sh
|
UTF-8
| 738
| 3.140625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/bin/sh
set -e
# export VSINSTALLDIR="C:\Program Files (x86)\Microsoft Visual Studio\2017\Community"
# export VisualStudioVersion="15.0"
docfx ./docs/docfx.json
SOURCE_DIR=$PWD
cd ..
TEMP_REPO_DIR=$PWD/bubble-shooter-vr-docfx-temp
echo "remove existing temp dir $TEMP_REPO_DIR"
rm -rf "$TEMP_REPO_DIR"
echo "create temp dir $TEMP_REPO_DIR"
mkdir "$TEMP_REPO_DIR"
echo "clone repo, gh-pages branch"
git clone git@github.com:richjoslin/bubble-shooter-vr.git --branch gh-pages "$TEMP_REPO_DIR"
echo "clear repo dir"
cd "$TEMP_REPO_DIR"
git rm -r *
echo "copy docs into repo"
cp -r "$SOURCE_DIR"/docs/_site/* .
echo "push new docs to remote branch"
git add . -A
git commit -m "auto-update generated documentation"
git push origin gh-pages
| true
|
13d5e7b6b487c772dbe96cdfa0c3ed19ce3c3337
|
Shell
|
yroboros/pycryptoprosdk
|
/renew_test_signature.sh
|
UTF-8
| 608
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
renew_signature(){
rdn=$1
file_name=$2
dir_name=./tests/files/signatures
/opt/cprocsp/bin/amd64/cryptcp -creatcert \
-rdn "$rdn" \
-cont '\\.\HDIMAGE\cont1' \
-sg -ku -du -ca http://cryptopro.ru/certsrv
/opt/cprocsp/bin/amd64/cryptcp -signf \
-dir ${dir_name} \
-dn "Иванов Иван Иванович" \
-cert \
${dir_name}/${file_name}
}
# обновление тестовой подписи
renew_signature 'CN=Иванов Иван Иванович,INN=123456789047,OGRN=1123300000053,SNILS=12345678901,STREET="Улица, дом",L=Город' 'doc.txt'
| true
|
0f9645aefbe7db2da4a332ab72152969260f2fe2
|
Shell
|
stahta01/m6809-dev
|
/mingw-w64-cmoc-os9/PKGBUILD
|
UTF-8
| 1,859
| 2.875
| 3
|
[] |
no_license
|
# Maintainer: Tim S <stahta01@gmail.com>
# Contributor: Paul Hentschel <aur at hpminc.com>
# Based on work from https://github.com/hpmachining/os9-dev
_realname=cmoc_os9
_machine=m6809
_target=${_machine}-unknown
pkgbase=mingw-w64-${_realname}
pkgname="${MINGW_PACKAGE_PREFIX}-${_realname}-git"
pkgver=r85.9f9dfda
pkgrel=4
pkgdesc="CMOC C Library support for OS-9/NitrOS-9 (mingw-w64)"
arch=('any')
url='https://github.com/tlindner/cmoc_os9'
license=('custom')
groups=("${MINGW_PACKAGE_PREFIX}-${_target}-toolchain")
#depends=('cmoc')
makedepends=('git' "${MINGW_PACKAGE_PREFIX}-cmoc")
provides=("${MINGW_PACKAGE_PREFIX}-${_realname}")
conflicts=("${MINGW_PACKAGE_PREFIX}-${_realname}")
options=('!strip')
install=
source=('git+https://github.com/tlindner/cmoc_os9.git')
sha256sums=('SKIP')
pkgver() {
cd "$srcdir/${_realname}"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd "$srcdir/${_realname}"
make -C lib clean
make -C lib
make -C cgfx clean
make -C cgfx
}
#check() {
# cd "$srcdir/${pkgname%-git}"
# make -k check
#}
package() {
cd "$srcdir/${_realname}"
echo "CGFX Library" > LICENSE
echo >> LICENSE
sed -n '/This library/,/ DODGECOLT (Delphi)/p' cgfx/cgfx.docs >> LICENSE
# sed -n '/This library/,/ DODGECOLT (Delphi)/p' 'CGFX Reference.md' \
# | sed 's/ \+/ /g' \
# | fold -s -w80 >> LICENSE
install -Dm644 -t "$pkgdir${MINGW_PREFIX}/share/licenses/${_realname}" LICENSE
install -Dm644 -t "$pkgdir${MINGW_PREFIX}/share/cmoc/include/os9" include/*.h
install -Dm644 -t "$pkgdir${MINGW_PREFIX}/share/cmoc/include/os9/cgfx" cgfx/include/*.h
install -Dm644 -t "$pkgdir${MINGW_PREFIX}/share/cmoc/include/os9/sys" include/sys/*.h
install -Dm644 -t "$pkgdir${MINGW_PREFIX}/share/cmoc/lib" lib/libc.a
install -Dm644 -t "$pkgdir${MINGW_PREFIX}/share/cmoc/lib" cgfx/libcgfx.a
}
| true
|
83b99ed09e462fb7350dad9fb483e7f7eafb1bf2
|
Shell
|
polentino/awesome_terminal
|
/utils/helpers.sh
|
UTF-8
| 6,866
| 4.25
| 4
|
[
"WTFPL"
] |
permissive
|
#!/usr/bin/env bash
# Function that's responsible to detect if the current working directory is under
# (distributed) version control system and, if so, retrieves basic infos to the
# variable passed as parameter.
# Supported DVCS: Git, Mercurial (when installed)
#
# Requires:
# - `git` command installed and available in the current $PATH
# - `hg` command installed and available in the current $PATH
#
# Expected variables:
# - DEFAULT_TEXT_FG the default font color
# - DIRTY_BRANCH_TEXT_FG the font color when the branch is dirty
# - DIRTY_BRANCH_ICON the icon when the branch is dirty
# - CLEAN_BRANCH_TEXT_FG the font color when the branch is clean
# - CLEAN_BRANCH_ICON the icon when the branch is clean
# - GITHUB_ICON the icon/text to be used if the directory managed by git, and hosted in github
# - GITLAB_ICON the icon/text to be used if the directory managed by git, and hosted in gitlab
# - DEFAULT_GIT_ICON the icon/text to be used by default, if the directory is managed by git
# - HG_ICON the icon/text to be used if the project managed by mercurial
# - TEXT_SEPARATOR something used to be placed between the path string, and the dvcs info string
#
# Sets:
# - $1 the reference to the variable passed as parameter
function dvcs_detect {
local BRANCH=""
local ICON=""
# is it a GIT repository?
if [[ "$(git rev-parse --is-inside-work-tree 2> /dev/null)" == true ]] ; then
BRANCH=$(git symbolic-ref --short HEAD)
if [ "$CONDENSED_BRANCH" = true ] ; then
shorten_branch_name $BRANCH
fi
if [[ $(git status -s -uno | wc -l) != 0 ]] ; then
BRANCH=${DIRTY_BRANCH_TEXT_FG}${BRANCH}${DEFAULT_TEXT_FG}
if [ ! -z "${DIRTY_BRANCH_ICON}" ] ; then
ICON=" ${DIRTY_BRANCH_ICON} "
fi
else
BRANCH=${CLEAN_BRANCH_TEXT_FG}${BRANCH}${DEFAULT_TEXT_FG}
if [ ! -z "${CLEAN_BRANCH_ICON}" ] ; then
ICON=" ${CLEAN_BRANCH_ICON} "
fi
fi
local remote_url=$(git config --get remote.origin.url)
if [[ $remote_url = *"github.com"* ]] ; then
GIT_LOGO=$GITHUB_ICON
elif [[ $remote_url = *"gitlab.com"* ]] ; then
GIT_LOGO=$GITLAB_ICON
else
GIT_LOGO=${DEFAULT_GIT_ICON}
fi
# TODO find good branch icon
eval "$1='${TEXT_SEPARATOR}${GIT_LOGO} ${BRANCH}${ICON}'"
# is this a Mercurial repository?
elif [[ "$(hg branch 2> /dev/null)" != "" ]] ; then
BRANCH=$(hg branch)
if [ "$CONDENSED_BRANCH" = true ] ; then
shorten_branch_name $BRANCH
fi
if [[ "$(hg status -m -a -r -d -u)" != "" ]] ; then
BRANCH=${DIRTY_BRANCH_TEXT_FG}${BRANCH}${DEFAULT_TEXT_FG}
if [ ! -z "${DIRTY_BRANCH_ICON}" ] ; then
ICON=" ${DIRTY_BRANCH_ICON}"
fi
else
BRANCH=${CLEAN_BRANCH_TEXT_FG}${BRANCH}${DEFAULT_TEXT_FG}
if [ ! -z "${CLEAN_BRANCH_ICON}" ] ; then
ICON=" ${CLEAN_BRANCH_ICON}"
fi
fi
# TODO find good branch icon
eval "$1='${TEXT_SEPARATOR}${HG_ICON} ${BRANCH}${ICON}'"
else
eval "$1=' '"
fi
}
# Function that's responsible to make the branch name readable, made especially when
# the repo is managed by Atlassian's tools (jira,stash etc..). It condenses the part
# of the branch name before the ticket number, and trims the part after it to a given
# number of characteds, user defined.
#
# Expected variables:
# - $1 the branch name
#
# Sets:
# - BRANCH the updated branch name
function shorten_branch_name {
# remove everything after the last '/' character (included)
prefix=${1%[\/]*}
# now the prefix can be further tokenized
splitted_branch=(${prefix//\/\.+[0-9]\.+\// })
shortened_branch=($(echo $1 | sed -e 's/'${splitted_branch[1]}'*/ /g'))
IFS='\/' read -r -a array <<< "${shortened_branch[0]}"
condensed_branch=""
for ((i = 0 ; i < ${#array[@]} -1 ; i++ )); do
if [[ ${array[i]} =~ [0-9] ]] ; then
condensed_branch=${condensed_branch}${array[i]}$BRANCH_SEPARATOR
else
condensed_branch=${condensed_branch}${array[i]:0:1}$BRANCH_SEPARATOR
fi
done
condensed_branch="${condensed_branch}${array[-1]}${1#"$shortened_branch"}"
# empirical way to compute the length, considering special characters
IFS='\\u' inarr=(${condensed_branch})
correction=$(((${#inarr[@]} - 2) * 3))
unset IFS
if ((${#condensed_branch} - correction > $BRANCH_DESCRIPTION_LENGTH)) ; then
condensed_branch="${condensed_branch:0:$BRANCH_DESCRIPTION_LENGTH + $correction}..."
fi
eval "BRANCH=' ${condensed_branch}'"
}
# Function that's responsible to detect the current working directory, and manipulate it
# to make it a bit good looking :)
#
# Expected variables:
# - HOME_ICON the icon/text to be used instead of /home/$USER/
# - PATH_SEPARATOR the icon/text to be used instead of the path separator
#
# Sets:
# - $1 the reference to the variable passed as parameter
function cwd_detect {
# show the 'home' icon if the path starts with /home/username
local CP=''
if [[ $PWD == /home/$USER* ]] ; then
CP=${PWD/"/home/$USER"/$HOME_ICON}
else
CP=${PWD#?}
fi
eval "$1='${CP//\// $PATH_SEPARATOR }'"
}
# Function that's responsible to detect battery charge level and adapter status (either
# connected/disconnected).
#
# Requires:
# - `upower` command installed and available in the current $PATH
#
# Expected variables:
# - BATTERY_CHARGING_ICON the icon/text to be used when the AC adapter is plugged in
# - BATTTERY_LEVEL_ICONS array of 8 icons/texts to be used instead for a specific charge range
#
# Sets:
# - $1 the reference to the variable passed as parameter
function battery_detect {
# detect battery charge and AC adapter
local charger_status_icon=''
if [[ $(cat /sys/class/power_supply/ADP1/online 2> /dev/null) == 1 ]] ; then
charger_status_icon=${BATTERY_CHARGING_ICON}
fi
local battery_charge=$(upower -i $(upower -e | grep '/battery') | grep --color=never -E percentage|xargs|cut -d' ' -f2|sed s/%// 2> /dev/null)
if [[ $battery_charge -gt 92 ]] ; then
battery_charge_icon=${BATTTERY_LEVEL_ICONS[7]}
elif [[ $battery_charge -gt 78 ]] ; then
battery_charge_icon=${BATTTERY_LEVEL_ICONS[6]}
elif [[ $battery_charge -gt 64 ]] ; then
battery_charge_icon=${BATTTERY_LEVEL_ICONS[5]}
elif [[ $battery_charge -gt 50 ]] ; then
battery_charge_icon=${BATTTERY_LEVEL_ICONS[4]}
elif [[ $battery_charge -gt 36 ]] ; then
battery_charge_icon=${BATTTERY_LEVEL_ICONS[3]}
elif [[ $battery_charge -gt 22 ]] ; then
battery_charge_icon=${BATTTERY_LEVEL_ICONS[2]}
elif [[ $battery_charge -gt 10 ]] ; then
battery_charge_icon=${BATTTERY_LEVEL_ICONS[1]}
else
battery_charge_icon=${BATTTERY_LEVEL_ICONS[0]}
fi
eval "$1='${charger_status_icon} ${battery_charge_icon}'"
}
| true
|
523f0eaa19a82e0a163cb2a33933ecdadf46a0b7
|
Shell
|
denis-beurive/docker
|
/lib/image.sh
|
UTF-8
| 473
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Please, make sure to source the file "container.sh" first.
type __functions_container &>/dev/null
if [ $? -eq 1 ]; then
echo
echo "WARNING !!! The functions defined in this file need the ones defined in the file \"container.sh\"."
echo
fi
function delete_all_images() {
kill_containers
delete_all_stopped_containers
local -r ids=$(docker images -a -q)
if [ ! -z "${ids}" ]; then
docker rmi "${ids}"
fi
}
| true
|
5f0887b82ca4d674b90812cbdc061afbd06435b5
|
Shell
|
yunionio/dashboard
|
/scripts/sync-modules.sh
|
UTF-8
| 1,015
| 3.328125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# set -x
# 执行 checkout
yarn checkout $@
branch=$1
currentPath=$(dirname $0)
sync(){
local moduleName=$1; shift
local path=$1; shift
{ # try
echo
cd $currentPath/$path
git fetch origin && \
git pull origin $branch && \
# 不显示输出
cd - 2>&1 >/dev/null && \
echo "\033[32m [$moduleName] rebase 完成 \033[0m"
} || { # catch
echo "\033[31m [$moduleName] rebase 发生错误,请手动执行rebase \033[0m"
}
}
sync 'dashboard' ../
sync 'dashboard-module-common' ../src
sync 'dashboard-module-cloudenv' ../containers/Cloudenv
sync 'dashboard-module-compute' ../containers/Compute
sync 'dashboard-module-dashboard' ../containers/Dashboard
sync 'dashboard-module-network' ../containers/Network
sync 'dashboard-module-storage' ../containers/Storage
sync 'dashboard-module-k8s' ../containers/K8S
sync 'dashboard-module-helm' ../containers/Helm
sync 'dashboard-module-db' ../containers/DB
sync 'dashboard-module-monitor' ../containers/Monitor
exit 0
| true
|
1aa27a4e9edcba4cf829fce6ba7cddf7eaa7bef9
|
Shell
|
mattrobenolt/prompt
|
/bench/prompt-for-perf.sh
|
UTF-8
| 370
| 2.640625
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/bash
export PROMPT_STYLE_HOSTNAME=''
export PROMPT_STYLE_BRANCH=''
export PROMPT_STYLE_WD=''
export PROMPT_STYLE_RESET=''
(
cd "${1:-.}"
for i in {1..100}; do
/home/josh/dev/projects/prompt/target/debug/prompt >/dev/null
# need to build with debug = true
# /home/josh/dev/projects/prompt/target/x86_64-unknown-linux-musl/release/prompt >/dev/null
done
)
| true
|
49d4323aaa72d3b5e32e9d8c774e02f3991449c4
|
Shell
|
belovehq/aws-repl-and-beakerx
|
/packer/provision
|
UTF-8
| 667
| 2.65625
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/usr/bin/env bash
# Install Java, Leiningen and download Clojure libraries
sudo yum install -y java-1.8.0-openjdk-devel.x86_64
mkdir -p /home/ec2-user/bin
curl https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein -o ~/bin/lein
chmod 755 ~/bin/lein
cd ~/.packer
~/bin/lein deps
cd ~
rmdir .packer
# Install Python3, Jupyter and BeakerX
sudo yum install -y python3
pip3 install --upgrade --user pip
PATH=$PATH:/home/ec2-user/.local/bin
pip install --user jupyter jupyterlab requests beakerx
sudo mkdir -p /usr/share/jupyter /usr/etc/jupyter /usr/etc/ipython
sudo chown ec2-user /usr/share/jupyter /usr/etc/jupyter /usr/etc/ipython
beakerx install
| true
|
0063184eb53935fc853fd4e8fef1e1f21fa1bdb5
|
Shell
|
CuiMingFu/zumastor
|
/ddsnap/scripts/genallpatches.sh
|
UTF-8
| 172
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
pushd patches >/dev/null
vers=$(echo *)
popd >/dev/null
for ver in $vers
do
./scripts/genpatch.sh $ver drivers/md > patches/$ver/AUTO.ddsnap.files.patch
done
| true
|
3d2c254d74dc6704fb18773c9b9e60a992b8b80e
|
Shell
|
wr-fenglei/guessinggame
|
/guessinggame.sh
|
UTF-8
| 1,133
| 4
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
function is_natural_number() {
[[ $1 =~ ^[1-9][0-9]*$|^0$ ]]
}
function is_overflow() {
# the upper limit for Bash integer is 9223372036854775807 (19 digits)
# 0 ... 9223372036854775807 is valid,
# 9223372036854775808 ... 9999999999999999999 will became a signed negative number,
# 10000000000000000000 .... may be signed negative or positive, but must be more than 19 digits.
# Thanks Mateusz Kita
[[ ${#1} -gt 19 ]] || [[ $1 -lt 0 ]]
}
echo "Can you guess how many files are in the current directory?"
count=$(ls -A1 | wc -l)
while :; do
read answer
if ! $(is_natural_number "$answer"); then
echo "Your guess was not natural number, please try to guess again."
elif $(is_overflow $answer); then
echo "Your guess was overflow, please try to guess again."
elif [[ $answer -gt $count ]]; then
echo "Your guess was too high, please try to guess again."
elif [[ $answer -lt $count ]]; then
echo "Your guess was too low, please try to guess again."
else
echo "Congratulation! Your guess was correct."
exit 0
fi
done
| true
|
8461a342efb2c413ac28ca1e93958c28e39f20af
|
Shell
|
ClassicPress/ClassicPress
|
/bin/update-importer-plugin.sh
|
UTF-8
| 1,400
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
IMPORTER_PATH=tests/phpunit/data/plugins/wordpress-importer
IMPORTER_GITHUB_URL=https://github.com/WordPress/wordpress-importer
# This script updates the WordPress importer plugin from its latest version on
# GitHub, which is required for some of the automated tests.
# Sanity check: make sure we have a .git directory, and at least one remote
# pointing to a repository named ClassicPress
if [ ! -d .git ] || ! git remote -v | grep -qi '\b/ClassicPress\b'; then
echo "ERROR: Call this script from within your ClassicPress repository"
exit 1
fi
# Make sure there are no modified files in the local repository
change_type=""
if ! git diff --exit-code --quiet; then
change_type="Modified file(s)"
elif ! git diff --cached --exit-code --quiet; then
change_type="Staged file(s)"
fi
if [ ! -z "$change_type" ]; then
git status
echo
echo "ERROR: $change_type detected"
echo "ERROR: You must start this script from a clean working tree!"
exit 1
fi
set -x
rm -rf "$IMPORTER_PATH"
git clone "$IMPORTER_GITHUB_URL" "$IMPORTER_PATH"
revision="$IMPORTER_GITHUB_URL/commit/$(cd "$IMPORTER_PATH"; git rev-parse HEAD | cut -c1-9)"
rm -rf "$IMPORTER_PATH"/{.git,.travis.yml,phpunit*}
git add "$IMPORTER_PATH"
git commit -m "Update importer plugin for automated tests
Revision: $revision"
set +x
echo
echo 'Success! 1 commit was added to your branch:'
echo
git log -n 1
| true
|
bfce841cb41d1f614457f05978e8a8011a24e4bd
|
Shell
|
swarmstack/teampass
|
/teampass-docker-start.sh
|
UTF-8
| 566
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ ! -d ${VOL}/.git ];
then
echo "Initial setup..."
rm -Rf ${VOL}/*
git clone $REPO_URL ${VOL}
mkdir ${VOL}/sk
chown -Rf nginx:nginx ${VOL}
fi
if [ -f ${VOL}/includes/config/settings.php ] ;
then
echo "Teampass is ready."
rm -rf ${VOL}/install
else
echo "Teampass is not configured yet. Open it in a web browser to run the install process."
echo "Use ${VOL}/sk for the absolute path of your saltkey."
echo "When setup is complete, restart this image to remove the install directory."
fi
# Pass off to the image's script
exec /start.sh
| true
|
0e9c127ac6c930ea76dd7357c119d29acf8451fe
|
Shell
|
mongodb/libmongocrypt
|
/bindings/node/.evergreen/test.sh
|
UTF-8
| 532
| 3.015625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# set -o xtrace # Write all commands first to stderr
set -o errexit # Exit the script with error if any of the commands fail
echo "Setting up environment"
export PATH="/opt/mongodbtoolchain/v2/bin:$PATH"
hash -r
NODE_LTS_VERSION=${NODE_LTS_VERSION:-16}
export NODE_LTS_VERSION=${NODE_LTS_VERSION}
source ./.evergreen/install-dependencies.sh
# install node dependencies
echo "Installing package dependencies (includes a static build)"
bash ./etc/build-static.sh
# Run tests
echo "Running tests"
npm test
| true
|
39c8ef44df10c0863cbca4bd3b077847395b3486
|
Shell
|
habitat-sh/core-plans
|
/packer/plan.sh
|
UTF-8
| 734
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
pkg_name=packer
pkg_origin=core
pkg_version=1.7.8
pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>"
pkg_license=('MPL2')
pkg_source="https://releases.hashicorp.com/${pkg_name}/${pkg_version}/${pkg_name}_${pkg_version}_linux_amd64.zip"
pkg_shasum=8a94b84542d21b8785847f4cccc8a6da4c7be5e16d4b1a2d0a5f7ec5532faec0
pkg_description="Packer is a tool for creating machine and container images for multiple platforms from a single source configuration."
pkg_upstream_url=https://packer.io
pkg_build_deps=(core/unzip)
pkg_bin_dirs=(bin)
do_unpack() {
cd "${HAB_CACHE_SRC_PATH}" || exit
unzip "${pkg_filename}" -d "${pkg_name}-${pkg_version}"
}
do_build() {
return 0
}
do_install() {
install -D packer "${pkg_prefix}/bin/packer"
}
| true
|
026fa740bf588107dd0ed336d8d5ce75a3db0089
|
Shell
|
Mokon/mcommon
|
/tools/hooks/pre-commit
|
UTF-8
| 5,232
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# Copyright (C) 2013-2016 David 'Mokon' Bond, All Rights Reserved
function main {
rc=0
exec 1>&2 # stdout to stderr
set_root
pushd $root &> /dev/null
set_against
process_files
if ! print_results ; then
rc=1
fi
git_checks
popd &> /dev/null
return $rc
}
function process_files {
failures=()
for file in $(get_files) ; do
echo "pre-commit check on $file:"
if [ ! -f $file ]; then
echo "file no longer exists or not a regular file"
elif [[ "$file" =~ \.[ch]p?p?$ ]] ; then
get_commit_file
testcase "const rvalue references" "const\ ([a-zA-Z_][a-zA-Z0-9_]+)&&" 0
testcase "space around paren" "(\(\ )|(\ \))" 0
testcase "space before semi-colon" "\ ;" 0
testcase "logical or" "\|\|" 0
#testcase "logical and" "&&[^)]" 0
testcase "copyright header" "/\*\ Copyright\ \(C\)\ 2013-2016\ David\ 'Mokon'\ Bond,\ All\ Rights\ Reserved\ \*/" 1
testcase "includes using quotes" '#include\ "' 0
cppcheck_static_analyzer
/bin/rm $commit_file
fi
done
}
function testcase {
local message=$1
local check="$2"
local expected=$3
echo -ne "\tchecking for $message...\n"
output=$(grep -Pn "$check" $commit_file ; exit $?)
result=$?
if [ $result -eq $expected ]; then
while read -r line; do
fail "$message" "$line"
done <<< "$output"
fi
}
function cppcheck_static_analyzer {
local cppcheck_new_supp=$root/tools/suppressions/cppcheck_new.supp
local cpperrors=`mktemp`
echo -ne "\tchecking for cppcheck errors...\n"
touch $root/tools/suppressions/cppcheck.supp
local includes="-I src"
if [ -d mcommon/src ] ; then
includes="$includes -I mcommon/src"
fi
cppcheck -q $includes \
--inconclusive --std=c++11 --language=c++ \
--suppressions-list=$root/tools/suppressions/cppcheck.supp \
--suppress=missingIncludeSystem --enable=all \
--template='{line}: {message} Suppression: {id}:{file}:{line}' \
--error-exitcode=1 \
$commit_file 2> $cpperrors
if [ -s $cpperrors ]; then
while read line; do
if [[ ! $line =~ "unmatchedSupp" && ! $line =~ ^\ +$ ]] ; then
if [[ $line =~ "Suppression: " ]] ; then
local new_supp=$(echo "$line" | sed 's/^.*Suppression: //p')
new_supp="${new_supp//"$commit_file"/"$file_pattern"}"
echo "$new_supp" >> $cppcheck_new_supp
line=`echo -n "$line" | sed 's/Suppression: .*$//'`
fi
fail "cppcheck" "$line"
fi
done < $cpperrors
fi
rm $cpperrors
local tmp=`mktemp`
sort $cppcheck_new_supp | uniq > $tmp
/bin/mv $tmp $cppcheck_new_supp
}
function fail {
local message=$1
local output=$2
failures+=("$message: $file:$output")
echo -en "\t\tfailed $output\n"
}
function print_results {
if [ ${#failures[@]} -ne 0 ]; then
echo -ne "\nTest Failures\n\n"
for failure in "${failures[@]}" ; do
echo -e "\t$failure"
done
return 1
fi
}
function get_files {
if is_git_controlled ; then
git diff-index --cached --name-only $against
else
find ./src -name "*.[hc]pp" -printf 'src/%P\n'
fi
}
function git_checks {
if is_git_controlled ; then
if test $(git diff --cached --name-only --diff-filter=A -z $against |
LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 ; then
rc=-1
fi
exec git diff-index --check --cached HEAD
fi
}
function set_root {
root=`git rev-parse --show-toplevel 2> /dev/null`
if [ $? -ne 0 ] ; then
root=.
fi
export root
}
function is_git_controlled {
git rev-parse --show-toplevel &> /dev/null
}
function is_commit_ammend {
local amend_re='.*commit.*--amend'
local cmd=`ps -ocommand= -p $PPID`
if [[ "$cmd" =~ $amend_re ]] ; then
return 0
else
local alsout=`git config --get-regexp '^alias\..*' "$amend_re"`
if [ $? == 0 ] ; then
local als=`echo $alsout | sed -n 's/alias\.\(.*\)/\1/p' \
| awk '{print $1}'`
local als_re="git\s$als"
if [[ "$cmd" =~ $als_re ]] ; then
return 0
fi
fi
fi
return 1
}
function set_against {
if is_git_controlled ; then
if git rev-parse --verify HEAD >/dev/null 2>&1 ; then
if is_commit_ammend ; then
export against=HEAD~1
else
export against=HEAD
fi
else
export against=4b825dc642cb6eb9a060e54bf8d69288fbee4904
fi
fi
}
function get_commit_file {
commit_file=$(mktemp -p $(dirname $file) --suffix=$(basename $file))
file_pattern="$(dirname $file)/*$(basename $file)"
if is_git_controlled ; then
git show :$file > $commit_file
else
cat $file > $commit_file
fi
}
if main ; then
echo "pre-commit pass"
else
exit -1
fi
| true
|
6c4a64f24676fd2d8732841cc7609d7ef9d07a9b
|
Shell
|
apana/MyChi
|
/ChiAnalysis/bin/unfolding/toBatch.sh
|
UTF-8
| 811
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# echo "Number of arguments: " $#
if [ $# -lt 2 ]
then
echo "Usage: toBatch.sh [outputfile] [inputfile] "
echo ""
exit
fi
EXEC=CreateResponseMatrix.py
outfile=$1
infile=$2
Split=$3
echo
echo ">>> Beginning ${EXEC} execution on `date` <<<"
echo
cmsswDir=$LS_SUBCWD
cd $cmsswDir
echo "Current directory: $cmsswDir"
echo $PWD
echo ""
## setup root ###########################
. setup_root.sh
## run the job ###################################
if [ $Split == 1 ]; then
python ${EXEC} -n 0 --train ${outfile} ${infile}
elif [ $Split == 2 ]; then
python ${EXEC} -n 0 --test ${outfile} ${infile}
else
python ${EXEC} -n 0 ${outfile} ${infile}
fi
##################################################
echo
echo ">>> Ending ${EXEC} execution on `date` <<<"
echo
exit
| true
|
4a66c78ae8c6cc9008c55081de75a2eb2ba72a36
|
Shell
|
vikekh/fantasio-torrent
|
/scripts/backup.sh
|
UTF-8
| 740
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# https://docs.docker.com/storage/volumes/#backup-restore-or-migrate-data-volumes
if [ -z "$1" ]; then
echo "No argument"
exit 1
fi
# DATE=$(date '+%Y-%m-%d')
# BACKUP_DIR="${1}/${DATE}"
BACKUP_DIR=$1
mkdir $BACKUP_DIR
# rm -r "${BACKUP_DIR}"/*
. $2
docker stop $TRANSMISSION_CONTAINER_NAME
docker stop $TRANSMISSION_RSS_CONTAINER_NAME
docker run --rm -v $TRANSMISSION_CONFIG_VOLUME_NAME:/data -v $BACKUP_DIR:/backup ubuntu tar cvf /backup/$TRANSMISSION_CONFIG_VOLUME_NAME.tar /data
docker run --rm -v $TRANSMISSION_DATA_VOLUME_NAME:/data -v $BACKUP_DIR:/backup ubuntu tar cvf /backup/$TRANSMISSION_DATA_VOLUME_NAME.tar /data
docker start $TRANSMISSION_CONTAINER_NAME
docker start $TRANSMISSION_RSS_CONTAINER_NAME
| true
|
d07ab449fd8c2b4d4e2a362cd119d239fe8c4d24
|
Shell
|
boogeygan/sudo-dfs
|
/FileServer/run.sh
|
UTF-8
| 471
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 2 ]
then
echo -n "Invalid Number of arguments."
echo "Usage: ./run.sh <IP of RMI Registry> <Port of rmiregistry>"
exit 1;
fi
echo
if [ "$2" -le 0 ] || [ "$2" -ge 65537 ]
then
echo "Invalid Port Number"
exit 1
fi
echo
echo "***Compiling***"
javac *.java
echo
echo "***Compilation Completed***"
echo
echo "Generating stub"
rmic FileServer
echo
echo "***Starting Server*** with IP:'$1' and Port:'$2' "
echo
java FileServer "$1" "$2"
rm *.class
| true
|
4b0e671d634f5942ffab022f8a1ea39e7752e835
|
Shell
|
Boolector/boolector
|
/test/log/translateaxioms.sh
|
UTF-8
| 208
| 3.140625
| 3
|
[
"MIT",
"LicenseRef-scancode-dco-1.1"
] |
permissive
|
#!/bin/sh
for i in 1 2 3 4 5 6 7 8 16 32 64
do
for axiom in *.axiom
do
name=smtaxiom`basename $axiom .axiom`$i.smt
echo $name
rm -f $name
./translateaxiom.sh $axiom $i > $name
done
done
| true
|
bb3c09a132e9d0e38d3e9f5c06c982c92a0b5e5d
|
Shell
|
gpuigros/jenkinstest
|
/scripts/src/main/groovy/com/hotelbeds/jenkins/scripts/ace-package.sh
|
UTF-8
| 347
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
RPM_DESTINATION=$1
rm -f /opt/ace/packages/RPMS/x86_64/ace-hotel-daemon*.rpm
cd /opt/ace/project/build
make dist-daemon BUILD_NUMBER=$RPM_RELEASE;
cd /opt/ace/packages/RPMS/x86_64
RPM_FILE=$(find ace-hotel-daemon*.rpm)
cp /opt/ace/packages/RPMS/x86_64/$RPM_FILE ${WORKSPACE}/$RPM_DESTINATION/ace-hotel-daemon-$BUILD_VERSION.x86_64.rpm
exit 0;
| true
|
42eab694fc57d775f5577bde789f4f212cd8b957
|
Shell
|
xcat2/confluent
|
/confluent_osdeploy/suse15/profiles/server/scripts/setupssh.sh
|
UTF-8
| 1,647
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Carry over install-time ssh material into installed system
mkdir -p /mnt/root/.ssh/
chmod 700 /mnt/root/.ssh/
cp /root/.ssh/authorized_keys /mnt/root/.ssh/
chmd 600 /mnt/root/.ssh/authorized_keys
cp /etc/ssh/*key* /mnt/etc/ssh/
for i in /etc/ssh/*-cert.pub; do
echo HostCertificate $i >> /mnt/etc/ssh/sshd_config
done
for i in /ssh/*.ca; do
echo '@cert-authority *' $(cat $i) >> /mnt/etc/ssh/ssh_known_hosts
done
# Enable ~/.shosts, for the sake of root user, who is forbidden from using shosts.equiv
echo IgnoreRhosts no >> /mnt/etc/ssh/sshd_config
echo HostbasedAuthentication yes >> /mnt/etc/ssh/sshd_config
echo HostbasedUsesNameFromPacketOnly yes >> /mnt/etc/ssh/sshd_config
echo Host '*' >> /mnt/etc/ssh/ssh_config
echo " HostbasedAuthentication yes" >> /mnt/etc/ssh/ssh_config
echo " EnableSSHKeysign yes" >> /mnt/etc/ssh/ssh_config
# Limit the attempts of using host key. This prevents client from using 3 or 4
# authentication attempts through host based attempts
echo " HostbasedKeyTypes *ed25519*" >> /mnt/etc/ssh/ssh_config
# In SUSE platform, setuid for ssh-keysign is required for host based,
# and also must be opted into.
echo /usr/lib/ssh/ssh-keysign root:root 4711 >> /mnt/etc/permissions.local
chmod 4711 /mnt/usr/lib/ssh/ssh-keysign
# Download list of nodes from confluent, and put it into shosts.equiv (for most users) and .shosts (for root)
curl -f -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $(cat /etc/confluent/confluent.apikey)" https://$confluent_mgr/confluent-api/self/nodelist > /tmp/allnodes
cp /tmp/allnodes /mnt/root/.shosts
cp /tmp/allnodes /mnt/etc/ssh/shosts.equiv
| true
|
4c720127018d4eb4613c77d7de9ad36093ca4265
|
Shell
|
Ponce/slackbuilds
|
/development/azuredatastudio/azuredatastudio.SlackBuild
|
UTF-8
| 3,298
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# Slackware build script for azuredatastudio
# Copyright 2023 Martin Bångens Sweden
# All rights reserved.
#
# Redistribution and use of this script, with or without modification, is
# permitted provided that the following conditions are met:
#
# 1. Redistributions of this script must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
cd $(dirname $0) ; CWD=$(pwd)
PRGNAM=azuredatastudio
VERSION=${VERSION:-1.44.1}
BUILD=${BUILD:-1}
TAG=${TAG:-_SBo}
PKGTYPE=${PKGTYPE:-tgz}
if [ -z "$ARCH" ]; then
case "$( uname -m )" in
i?86) ARCH=i586 ;;
arm*) ARCH=arm ;;
*) ARCH=$( uname -m ) ;;
esac
fi
if [ ! -z "${PRINT_PACKAGE_NAME}" ]; then
echo "$PRGNAM-$VERSION-$ARCH-$BUILD$TAG.$PKGTYPE"
exit 0
fi
TMP=${TMP:-/tmp/SBo}
PKG=$TMP/package-$PRGNAM
OUTPUT=${OUTPUT:-/tmp}
if [ "$ARCH" != "x86_64" ]; then
echo this is a x86_64 program only, you can set ARCH=x86_64
exit 1
fi
set -e
rm -rf $PKG
mkdir -p $TMP $PKG $OUTPUT
cd $TMP
rm -rf $PRGNAM-linux-x64
tar xvf $CWD/stable || tar xvf $CWD/$PRGNAM-linux-$VERSION.tar.gz
cd $PRGNAM-linux-x64
chown -R root:root .
install -d $PKG/usr/share/$PRGNAM
cp -a ./* $PKG/usr/share/$PRGNAM
# Symlink the startup script in /usr/bin
install -d $PKG/usr/bin
ln -s /usr/share/$PRGNAM/bin/azuredatastudio $PKG/usr/bin/$PRGNAM
# Add the icon and desktop file
cp ./resources/app/resources/linux/code.png $PRGNAM.png
install -D -m644 $CWD/$PRGNAM.desktop $PKG/usr/share/applications/$PRGNAM.desktop
install -D -m644 ./$PRGNAM.png \
$PKG/usr/share/icons/hicolor/1024x1024/apps/$PRGNAM.png
res=(
512x512
256x256
192x192
128x128
96x96
72x72
64x64
48x48
40x40
36x36
32x32
24x24
22x22
20x20
16x16
)
for _res in "${res[@]}"; do
convert -resize $_res $PRGNAM.png $PRGNAM-scaled.png
install -D -m644 ./$PRGNAM-scaled.png \
$PKG/usr/share/icons/hicolor/$_res/apps/$PRGNAM.png
done
find $PKG -print0 | xargs -0 file | grep -e "executable" -e "shared object" | grep ELF \
| cut -f 1 -d : | xargs strip --strip-unneeded 2> /dev/null || true
mkdir -p $PKG/usr/doc/$PRGNAM-$VERSION
chmod 644 ./resources/app/LICENSE.txt
chown root:root ./resources/app/LICENSE.txt
cp -a \
./resources/app/LICENSE.txt \
$PKG/usr/doc/$PRGNAM-$VERSION
cat $CWD/$PRGNAM.SlackBuild > $PKG/usr/doc/$PRGNAM-$VERSION/$PRGNAM.SlackBuild
mkdir -p $PKG/install
cat $CWD/slack-desc > $PKG/install/slack-desc
cat $CWD/doinst.sh > $PKG/install/doinst.sh
cd $PKG
/sbin/makepkg -l y -c n $OUTPUT/$PRGNAM-$VERSION-$ARCH-$BUILD$TAG.$PKGTYPE
| true
|
a4229ff5b74f8fb7d04637ae401e511da43c54bf
|
Shell
|
noahmorrison/bin
|
/update_colors
|
UTF-8
| 604
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
xrdb ~/.Xresources
# foreground
cmd="\033]10;#$(qxr foreground)\007"
# background
cmd="${cmd}\033]11;#$(qxr background)\007"
# cursor
cmd="${cmd}\033]12;#$(qxr color15)\007"
# highlight background
cmd="${cmd}\033]17;#$(qxr color15)\007"
# highlight foreground
cmd="${cmd}\033]19;#$(qxr color0)\007"
for i in $(seq 0 15);
do
cmd="${cmd}\033]4;${i};#$(qxr color${i})\007"
done
for term in $(ls /dev/pts/)
do
echo -n $cmd > /dev/pts/$term
done
killall ibar
ibar &
if test -n `qxr wallpaper`
then
feh --bg-scale "`qxr wallpaper`"
else
xsetroot -solid "#$(qxr background)"
fi
| true
|
d7602f1a749b1c05c709f1be818782a21c5beb2b
|
Shell
|
gzlock/nest_nuxt_template
|
/build.sh
|
UTF-8
| 421
| 3.40625
| 3
|
[] |
no_license
|
#bin /bin/bash
DIRNAME=$0
if [ "${DIRNAME:0:1}" = "/" ];then
CURDIR=`dirname $DIRNAME`
else
CURDIR="`pwd`"/"`dirname $DIRNAME`"
fi
echo $CURDIR
# 定位到项目目录
cd $CURDIR
# 创建Docker镜像
docker-compose build nest nuxt
# 停止Docker容器
docker-compose stop nuxt nest
# 启动Docker容器
docker-compose up -d
# 删除旧的Docker镜像
docker rmi $(docker images | grep "none" | awk '{print $3}')
| true
|
71036f7fd0b381a465e51be0d66bc41ed0ee9ff5
|
Shell
|
psgivens/MiscellaneousLinux
|
/Desktop/.setup/bootstrap.sh
|
UTF-8
| 1,469
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
# Bootstrapping the environment
### Install tools
sudo apt update
sudo apt install -y git
sudo apt install -y curl
sudo apt install -y vim
sudo apt install -y tmux
### Git this repo
mkdir -p ~/Repos/psgivens
cd ~/Repos/psgivens
git clone https://github.com/psgivens/MiscellaneousLinux.git misc.git
cd misc.git/Desktop
### Symbolic link config files
ln -s "$(pwd)/.vimrc" ~/.vimrc
ln -s "$(pwd)/.tmux.conf" ~/.tmux.conf
mkdir -p ~/.config/powershell
ln -s "$(pwd)/.config/powershell/profile.ps1" ~/.config/powershell/profile.ps1
### Configure vim for pathogen
sudo apt install -y vim-pathogen
mkdir -p ~/.vim/autoload ~/.vim/bundle && \
curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim
### Install vim environment
git clone https://github.com/ctrlpvim/ctrlp.vim.git ~/.vim/bundle/ctrlp.vim
git clone https://github.com/jpalardy/vim-slime.git ~/.vim/bundle/vim-slime
git clone https://github.com/mattn/emmet-vim.git ~/.vim/bundle/emmet-vim
# Import the public repository GPG keys
curl https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add -
curl http://packages.microsoft.com/config/ubuntu/18.04/prod.list | sudo tee /etc/apt/sources.list.d/microsoft.list
sudo apt-get update
# Install PowerShell
#sudo apt-get install -y powershell
### Use slime to install rest
cd .setup
tmux
| true
|
e8f0800c3a6cd878da9818c2196ef3b9789b9127
|
Shell
|
stevenharradine/checksite
|
/checksite.sh
|
UTF-8
| 582
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# (c) 2018 Steven Harradine
site=$1
cachedHash=`cat /tmp/sitehash`
newHash=`curl $site | md5sum | cut -d' ' -f1`
email_to=$2
email_from=$3
email_config_user=$4
email_config_password=$5
email_subject="New update to the page $site"
email_body="New update to the page $site"
if [[ "$cachedHash" == "$newHash" ]]; then
echo "no change"
else
echo "Missmatch"
echo "$newHash" > /tmp/sitehash
sendEmail -f $email_from -t $email_to -u $email_subject -s smtp.gmail.com:587 -o tls=yes -xu $email_config_user -xp email_config_password -m $email_body
fi
| true
|
7f3a01299d29edde7d1bba1158b1faf8c776a2ce
|
Shell
|
spark-2020/utils
|
/installation/golang-dev-machine.sh
|
UTF-8
| 3,143
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Get the latest package list
sudo apt update
# Do the updates
sudo apt-get update
# install wget
sudo apt install -y software-properties-common apt-transport-https wget
# Download the Debian Linux Chrome Remote Desktop installation package:
wget https://dl.google.com/linux/direct/chrome-remote-desktop_current_amd64.deb
# Install the package and its dependencies:
sudo dpkg --install chrome-remote-desktop_current_amd64.deb
sudo apt install -y --fix-broken
# Cleanup remove the unnecessary file after the installation is done:
rm chrome-remote-desktop_current_amd64.deb
# install xcfe
sudo DEBIAN_FRONTEND=noninteractive \
apt install -y xfce4 xfce4-goodies desktop-base
# Configure Chrome Remote Desktop to use Xfce by default:
sudo bash -c 'echo "exec /etc/X11/Xsession /usr/bin/xfce4-session" > /etc/chrome-remote-desktop-session'
# Xfce's default screen locker is Light Locker, which doesn't work with Chrome Remote Desktop.
# install XScreenSaver as an alternative:
sudo apt install -y xscreensaver
# Install Firefox browser
sudo apt -y install firefox
# Install Chrome browser
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo dpkg --install google-chrome-stable_current_amd64.deb
sudo apt install -y --fix-broken
# Cleanup remove the unnecessary file after the installation is done:
rm google-chrome-stable_current_amd64.deb
# Disable the display manager service:
# There is no display connected to the VM --> the display manager service won't start.
sudo systemctl disable lightdm.service
# Install the Google Cloud SDK
echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list
sudo apt-get install apt-transport-https ca-certificates gnupg
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -
sudo apt-get update
sudo apt-get install -y google-cloud-sdk
# END Install the Google Cloud SDK
# Install AWS CLI
# This is needed to interact with AWS resources
# Download the installation file
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
# Unzip the installer
unzip awscliv2.zip
# Run the install program
sudo ./aws/install
# Cleanup: remove the zip file for the aws installer
rm awscliv2.zip
# END Install AWS CLI
# Install Visual Studio Code
sudo snap install --classic code
# install Golang
# Download the code
# This will install Go v1.14.6
wget https://golang.org/dl/go1.14.6.linux-amd64.tar.gz
# Install Golang in the folder /usr/local
sudo tar -C /usr/local -xvf go1.14.6.linux-amd64.tar.gz
# Cleanup remove the installation file
rm go1.14.6.linux-amd64.tar.gz
# create a copy of the orginal /etc/profile file
sudo cp /etc/profile /etc/profile.vanila
# Configure the Go PATH (for all users)
echo '' | sudo tee -a /etc/profile > /dev/null
echo "# Configure the GOPATH for Golang " | sudo tee -a /etc/profile > /dev/null
echo 'export PATH=$PATH:/usr/local/go/bin' | sudo tee -a /etc/profile > /dev/null
# END install Golang
| true
|
e87ea5fbef7a48518c3fcf26b5447968252ca3da
|
Shell
|
cloux/sin
|
/modules/ec2-tools/inst/ec2-benchmark-osboot.sh
|
UTF-8
| 3,134
| 4.15625
| 4
|
[
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
#
# Benchmark AWS EC2 - Operating System's startup time in seconds.
# Measured as time between instance state reported as "running"
# and SSH availability.
# For higher precision, several cycles should be run and averaged.
#
# (cloux@rote.ch)
# your aws-cli profile
profile="default"
# how many boot cycles
cycles=5
###############################################################
instance_id="$1"
if [ -z "$instance_id" ]; then
printf 'Usage: ec2-benchmark-osboot.sh INSTANCE-ID\n'
exit
fi
# fill instance_* global info variables
get_instance_info () {
INSTANCE_INFO=$(aws ec2 describe-instances --profile $profile --instance-ids="$instance_id" 2>/dev/null)
instance_type=$(printf '%s' "$INSTANCE_INFO" | grep 'INSTANCES\s' | cut -f 10)
instance_subnet=$(printf '%s' "$INSTANCE_INFO" | grep 'PLACEMENT\s' | cut -f 2)
instance_state=$(printf '%s' "$INSTANCE_INFO" | grep 'STATE\s' | cut -f 3)
instance_uri=$(printf '%s' "$INSTANCE_INFO" | grep 'INSTANCES\s' | cut -f 15)
}
start_instance () {
printf 'Start instance ... '
aws ec2 start-instances --profile $profile --instance-ids="$instance_id" 2>/dev/null >/dev/null
while true; do
get_instance_info
[ "$instance_state" = "running" ] && break
sleep 0.1
done
printf 'OK\n'
}
get_instance_URI () {
if [ -z "$instance_uri" ]; then
printf 'Wait for URI ... '
while [ -z "$instance_uri" ]; do
get_instance_info
sleep 0.1
done
echo 'OK\n'
fi
if [ "$(printf '%s' "$instance_uri" | grep 'compute.*\.amazonaws\.com')" ]; then
printf ' Instance URI: %s\n' "$instance_uri"
else
printf 'Error: invalid instance URI: %s\n' "$instance_uri"
exit
fi
}
wait_for_ssh () {
printf ' Wait for SSH '
while true; do
#nc -w 1 -4z "$instance_uri" 22 2>/dev/null >/dev/null; [ $? -eq 0 ] && break
[ "$(ssh-keyscan -4 -T 1 "$instance_uri" 2>/dev/null)" ] && break
printf '.'
done
printf ' OK\n'
}
stop_instance () {
printf ' Stop instance ... '
aws ec2 stop-instances --profile $profile --instance-ids="$instance_id" 2>/dev/null >/dev/null
while [ "$instance_state" != "stopped" ]; do
get_instance_info
sleep 0.2
done
printf 'OK\n'
}
##
## Benchmark
##
# check instance state
get_instance_info
if [ "$instance_state" != "stopped" ]; then
if [ -z "$instance_state" ]; then
printf 'Instance %s not found in AWS "%s" profile.\n' "$instance_id" "$profile"
else
printf 'Instance %s is %s.\n' "$instance_id" "$instance_state"
printf 'Stop the instance and then start the benchmark again.\n'
fi
exit
fi
printf '==========================================\n'
printf 'Benchmarking instance: %s\n' "$instance_id"
printf ' Type: %s\n' "$instance_type"
printf ' Subnet: %s\n' "$instance_subnet"
printf '==========================================\n'
for i in $(seq 1 $cycles); do
printf ' Boot cycle: %s of %s\n' "$i" "$cycles"
start_instance
START=$(date +%s.%N)
get_instance_URI
wait_for_ssh
END=$(date +%s.%N)
stop_instance
printf ' Bootup Time: \033[1;95m%s\033[0m sec\n' "$(printf 'scale=1; (%s - %s)/1\n' "$END" "$START" | bc)"
printf '==========================================\n'
done
| true
|
55db5dcc97943c353fef0c6a24331d3869f88ed8
|
Shell
|
periket2000/api_gw
|
/scripts/git_clone.sh
|
UTF-8
| 347
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "... cloning ${GIT_REPO} ..."
cd ${PROJECT_DIR}
git -c http.sslVerify=false clone ${GIT_REPO}
GIT_DIR=$(echo ${GIT_REPO##*/} | cut -d. -f1)
cd ${GIT_DIR}
. ${PROJECT_DIR}/load_venv.sh
echo "... installing requirements ..."
pip install -r requirements.txt
export PYTHONPATH=${PROJECT_DIR}/${GIT_DIR}
python ${SRC_DIR}/${APP_FILE} &
| true
|
279654b0c9c3cb3ef8ef19f86f41701937a052f6
|
Shell
|
szepeviktor/box
|
/.travis/install-ev.sh
|
UTF-8
| 275
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Credits: https://github.com/amphp/amp/blob/8283532/travis/install-ev.sh
#
curl -LS https://pecl.php.net/get/ev | tar -xz;
pushd ev-*;
phpize;
./configure;
make;
make install;
popd;
echo "extension=ev.so" >> "$(php -r 'echo php_ini_loaded_file();')";
| true
|
097ee3fbc51f61aae3b8c4fa0ccc85ccad467a61
|
Shell
|
glemaitre/imbalanced-learn
|
/build_tools/circle/push_doc.sh
|
UTF-8
| 1,033
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script is meant to be called in the "deploy" step defined in
# circle.yml. See https://circleci.com/docs/ for more details.
# The behavior of the script is controlled by environment variable defined
# in the circle.yml in the top level folder of the project.
if [ "$CIRCLE_BRANCH" = "master" ]
then
dir=dev
else
# Strip off .X
dir="${CIRCLE_BRANCH::-2}"
fi
MSG="Pushing the docs to $dir/ for branch: $CIRCLE_BRANCH, commit $CIRCLE_SHA1"
cd $HOME
if [ ! -d $DOC_REPO ];
then git clone --depth 1 --no-checkout "git@github.com:"$ORGANIZATION"/"$DOC_REPO".git";
fi
cd $DOC_REPO
git config core.sparseCheckout true
echo $dir > .git/info/sparse-checkout
git checkout gh-pages
git reset --hard origin/gh-pages
git rm -rf $dir/ && rm -rf $dir/
cp -R $HOME/imbalanced-learn/doc/_build/html $dir
touch $dir/.nojekyll
git config --global user.email $EMAIL
git config --global user.name $USERNAME
git config --global push.default matching
git add -f $dir/
git commit -m "$MSG" $dir
git push origin gh-pages
echo $MSG
| true
|
f1f0617304bcd090a19765d3962980d24369b8c3
|
Shell
|
livingbio/log-server
|
/process.sh
|
UTF-8
| 740
| 3.609375
| 4
|
[] |
no_license
|
#! /bin/bash
#
# process.sh
# Copyright (C) 2014 vagrant <vagrant@vagrant-ubuntu-trusty-64>
#
# Distributed under terms of the MIT license.
#
bucket=tagtoo_rtb_log
for file in `ls request.log.*|grep -v '.gz'`
do
i_file=$file
o_file=`echo ${file}.gz|sed 's/request.log/request.json/'`
echo "process $i_file"
python rtb_upload.py $i_file $o_file
if [ $? -ne 0 ]
then
echo 'process error'
break
fi
echo "processed $i_file"&
echo "start upload $o_file to gs://$bucket/$o_file"
gsutil cp $o_file gs://$bucket/
if [ $? -ne 0 ]
then
echo "upload error"
break
fi
echo "upload success"
echo "remove $i_file $o_file"
rm $i_file $o_file
done
| true
|
4b97976799f748a02e21f81d32f0e9f18dc3fbb7
|
Shell
|
lescpsn/lescpsn
|
/study-notes/qxbuild/purus-install/purus-install.sh
|
UTF-8
| 1,445
| 3.296875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
PURUS_DIR=~/qxdevelop/purus
PURUS_URL=https://git.quxun.io/core/purus.git
################################################################################
function get_purus()
{
if [ -e ${PURUS_DIR} ]; then
rm -rf ${PURUS_DIR}
fi
git clone ${PURUS_URL} ${PURUS_DIR}
if [ $? -ne 0 ]; then
return 1
fi
cp -rf *.yaml ${PURUS_DIR}/
cp -rf area_v3.bin ${PURUS_DIR}/
cp -rf fonts ${PURUS_DIR}/
mkdir -p ${PURUS_DIR}/logs/
return 0
}
################################################################################
function pkg_install()
{
username=`id -un`
if [ "X${username}" == "Xroot" ]; then
apt-get install libjpeg-dev libpng-dev libfreetype6-dev
else
sudo apt-get install libjpeg-dev libpng-dev libfreetype6-dev
fi
if [ $? -ne 0 ]; then
return 1
fi
return 0
}
################################################################################
function main()
{
get_purus
if [ $? -ne 0 ]; then
echo "Install python depend pkg error."
return 1
fi
pkg_install
if [ $? -ne 0 ]; then
echo "Install python depend pkg error."
return 1
fi
pip3 install -r reqirements.txt
if [ $? -ne 0 ]; then
echo "Install python module error."
return 1
fi
}
################################################################################
main "$@"
| true
|
661ee5fcd5f1ea559ea273f11ec3daec38351308
|
Shell
|
CitronTech/metaspot
|
/dev/deployLambda.sh
|
UTF-8
| 446
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
#if [ "$#" = 2 ] && [ "$2" = "--create" ]; then
#fi
cd $1
rm lambda.zip
rm -r src/node_modules/dynamo
cp -r ../../modules/dynamo src/node_modules
rm -r src/node_modules/metaspot
cp -r ../../modules/metaspot src/node_modules
rm -r src/node_modules/utils
cp -r ../../modules/utils src/node_modules
rm -r src/node_modules/crawler
cp -r ../../modules/crawler src/node_modules
cd src
zip -r ../lambda.zip ./*
cd ..
node ./deploy.js $2
| true
|
91f72acd61e7af36781713f870160c07ffcac7b2
|
Shell
|
jlongstreet/glint-snes
|
/install.sh
|
UTF-8
| 1,463
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Force Google DNS for install
echo "nameserver 8.8.8.8" | sudo tee /etc/resolv.conf
echo "nameserver 8.8.4.4" | sudo tee -a /etc/resolv.conf
# Set the glint-snes branch to pull from on github if it isn't already set
if [ -z "$GLINTSNESBASE" ]
then
export GLINTSNESBASE=jlongstreet/glint-snes
fi
if [ -z "$GLINTSNESBRANCH" ]
then
export GLINTSNESBRANCH=master
fi
# Pull down component scripts
curl -L https://raw.github.com/$GLINTSNESBASE/$GLINTSNESBRANCH/scripts/clean-pi.sh > $HOME/clean-pi.sh
curl -L https://raw.github.com/$GLINTSNESBASE/$GLINTSNESBRANCH/scripts/glint-snes.sh > $HOME/glint-snes.sh
curl -L https://raw.github.com/$GLINTSNESBASE/$GLINTSNESBRANCH/scripts/usb-mounting.sh > $HOME/usb-mounting.sh
curl -L https://raw.github.com/$GLINTSNESBASE/$GLINTSNESBRANCH/scripts/copy-roms.sh > $HOME/copy-roms.sh
sudo mv $HOME/copy-roms.sh /usr/local/bin/copy-roms.sh
sudo chmod +x /usr/local/bin/copy-roms.sh
# Run scripts
cd $HOME
bash clean-pi.sh
bash glint-snes.sh
bash usb-mounting.sh
# Remove scripts
rm $HOME/clean-pi.sh
rm $HOME/glint-snes.sh
rm $HOME/usb-mounting.sh
# Clear history
history -c
rm $HOME/.bash_history
# Clean up packages
sudo apt-get -y autoremove
sudo apt-get -y autoclean
sudo apt-get -y clean
# Purge log files
sudo rm -rf `find /var/log/ . -type f`
# Reboot
echo "==============================================================="
echo "glint-snes install scripts done. You should probably reboot now."
| true
|
ed049cbdef12ae307f91eaadd7c391e3ff8a1b07
|
Shell
|
johnnych7027/active_liveness
|
/pre-commit-hook.sh
|
UTF-8
| 325
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
black 2>/dev/null || echo "you do not have black. should install: pip3 install black"
# run formater
git diff --diff-filter=d --cached --name-only | egrep '\.py$' | xargs black 2>/dev/null
# apply changes after formater
git diff --diff-filter=d --cached --name-only | egrep '\.py$' | xargs git add 2>/dev/null
| true
|
200094a0e3336ac3cff6092cc30367ce86e6d11c
|
Shell
|
DavidChatak/Assignment
|
/ASSIGNMENTS_CLARUSWAY/Assignment-case-study.sh
|
UTF-8
| 942
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
clear
su
if [ $? == 0 ] ;
then
exit
echo "welcome $whoami"
else
echo "NO"
fi
control=$(find . -name AccessLog.txt | wc -l)
echo $control
if [ $control = 0 ]; then
sudo touch AccessLog.txt
sudo chmod 777 AccessLog.txt
fi
num=$(cat AccessLog.txt | wc -l )
date=$( date )
sudo echo "$(($num+1)) $USER $date" >> AccessLog.txt
control=$( sudo yum list installed | grep python3 | wc -l )
if [ $control = 0 ]; then
echo "There is NO Python3....."
echo "installing Python3...."
sleep 1
#sudo yum install -y python3
fi
currentdir=$( pwd )
p=$( echo $PATH )
code=$(cat<<END
r=False
print(r)
var = str('$p').split(":")
def odev(var):
for i in var:
print(i)
if i=="$currentdir":
print("------------------------")
print("'$currentdir' in PATH")
print("------------------------")
r=True
return r
print(r)
odev(var)
END
)
python -c "$code"
python pyt.py
echo $r
| true
|
3e5c3183379181b8a60a661dc0571a3b7c47af74
|
Shell
|
jactor-rises/actions-gh-pages
|
/move/move.sh
|
UTF-8
| 1,730
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
############################################
#
# Følgende forutsetninger for dette skriptet
# - github pages befinner seg under mappa docs
# - sist genererte rapport lagges under mappe docs/latest
# - eldre genererte rapporter ligger under mappa docs/generated/<date or timestamp>
#
# Følgende skjer i dette skriptet:
# 1) setter input fra script (mappe hvor innhold skal kopieres og flyttes fra)
# 2) oppretter generert mappe under docs/generated
# 3) kopierer generert html til generert mappe
# 4) sletter gamle genererte html mapper rett under docs/latest mappa
# 5) flytter (og overskriver gammel) generert html til github pages (docs/latest mappa)
#
############################################
if [[ $# -ne 1 ]]; then
echo "Usage: report.sh [relative/path/to/html/folder/to/move]"
exit 1;
fi
INPUT_FOLDER_MOVE_FROM=$1
PROJECT_ROOT="$PWD"
GH_PAGES_GENERATED="$PROJECT_ROOT/docs/generated"
GH_PAGES_RECENT="$PROJECT_ROOT/docs/recent"
if [[ ! -d "$PROJECT_ROOT/$INPUT_FOLDER_MOVE_FROM" ]]; then
echo ::error:: "unable to locate folder to move from $PROJECT_ROOT/$INPUT_FOLDER_MOVE_FROM"
exit 1;
fi
GENERATED_FOLDER=$(date +"%Y-%m-%d")
if [[ -d "$GH_PAGES_GENERATED/$GENERATED_FOLDER" ]]; then
GENERATED_FOLDER=$(date +"%Y-%m-%d.%T")
fi
echo "Flytter html fra mappe $PROJECT_ROOT/$INPUT_FOLDER_MOVE_FROM til mappe $PROJECT_ROOT/docs/latest"
echo "Oppretter også en kopi i $PROJECT_ROOT/docs/generated/$GENERATED_FOLDER"
mkdir ${PROJECT_ROOT}/docs/generated/${GENERATED_FOLDER}
cp -R ${PROJECT_ROOT}/${INPUT_FOLDER_MOVE_FROM}/* ${PROJECT_ROOT}/docs/generated/${GENERATED_FOLDER}/.
cd ${PROJECT_ROOT}/docs/latest && ls | xargs rm -rf
sudo mv ${PROJECT_ROOT}/${INPUT_FOLDER_MOVE_FROM}/* .
| true
|
522c030b4b200927545e306a719dd9d9e7054143
|
Shell
|
kfirlavi/bashlibs
|
/src/bashlibs-os-detection/test/test_os_detection.sh
|
UTF-8
| 3,132
| 3.328125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
$(bashlibs --load-base)
include shunit2_enhancements.sh
include directories.sh
include os_detection.sh
create_root_path() {
set_root_path /tmp
mkdir -p $(release_file_dir)
}
clean_root_path() {
safe_delete_directory_from_tmp $(release_file_dir)
unset_root_path
}
create_ubuntu_lsb_release_file() {
clean_root_path
create_root_path
cat <<- EOF > $(ubuntu_release_file)
DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=12.10
DISTRIB_CODENAME=quantal
DISTRIB_DESCRIPTION="Ubuntu 12.10"
EOF
}
create_gentoo_release_file() {
cat <<- EOF > $(gentoo_release_file)
Gentoo Base System release 2.2
EOF
}
switch_distro_to_ubuntu() {
clean_root_path
create_root_path
create_ubuntu_lsb_release_file
}
switch_distro_to_gentoo() {
clean_root_path
create_root_path
create_gentoo_release_file
}
test_release_file_dir() {
unset_root_path
returns "/etc" "release_file_dir"
set_root_path /tmp
returns "/tmp/etc" "release_file_dir"
}
test_is_ubuntu() {
switch_distro_to_ubuntu
return_true "is_ubuntu"
switch_distro_to_gentoo
return_false "is_ubuntu"
}
test_ubuntu_version() {
switch_distro_to_ubuntu
returns '12.10' "ubuntu_version"
switch_distro_to_gentoo
returns_empty "ubuntu_version"
}
test_ubuntu_version_msb() {
switch_distro_to_ubuntu
returns 12 "ubuntu_version_msb 12.04"
}
test_ubuntu_version_lsb() {
switch_distro_to_ubuntu
returns 04 "ubuntu_version_lsb 12.04"
}
test_is_ubuntu_version_equal_to() {
switch_distro_to_ubuntu
return_true "is_ubuntu_version_equal_to 12.10"
return_false "is_ubuntu_version_equal_to 9.04"
}
test_is_ubuntu_newer_then() {
switch_distro_to_ubuntu
return_true "is_ubuntu_newer_then 9.04 $UBUNTU_DISTRO_FILE"
return_true "is_ubuntu_newer_then 12.04 $UBUNTU_DISTRO_FILE"
return_false "is_ubuntu_newer_then 12.10 $UBUNTU_DISTRO_FILE"
}
test_is_ubuntu_newer_or_equal_to() {
switch_distro_to_ubuntu
return_true "is_ubuntu_newer_or_equal_to 9.04 $UBUNTU_DISTRO_FILE"
return_true "is_ubuntu_newer_or_equal_to 12.04 $UBUNTU_DISTRO_FILE"
return_true "is_ubuntu_newer_or_equal_to 12.10 $UBUNTU_DISTRO_FILE"
return_false "is_ubuntu_newer_or_equal_to 13.10 $UBUNTU_DISTRO_FILE"
}
test_is_gentoo() {
switch_distro_to_gentoo
return_true "is_gentoo"
switch_distro_to_ubuntu
return_false "is_gentoo"
}
test_distro_name() {
switch_distro_to_ubuntu
returns ubuntu "distro_name"
switch_distro_to_gentoo
returns gentoo "distro_name"
}
test_ubuntu_distro_number() {
returns 19.04 "ubuntu_distro_number disco"
returns 18.04 "ubuntu_distro_number bionic"
returns 16.04 "ubuntu_distro_number xenial"
returns 15.04 "ubuntu_distro_number vivid"
returns 4.10 "ubuntu_distro_number warty"
}
test_ubuntu_distro_name() {
returns disco "ubuntu_distro_name 19.04"
returns bionic "ubuntu_distro_name 18.04"
returns xenial "ubuntu_distro_name 16.04"
returns vivid "ubuntu_distro_name 15.04"
returns warty "ubuntu_distro_name 4.10"
}
# load shunit2
source /usr/share/shunit2/shunit2
| true
|
0ef4b01e06419508be733c5b1e79159156f302a8
|
Shell
|
umm-csci-3412-fall-2021/lab-4-erik-natasha
|
/summarize_tree/summarize_tree.sh
|
UTF-8
| 197
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
path=$1;
num_regular=$(find "$path" -type f | wc -l)
num_dir=$(find "$path" -type d | wc -l)
echo "There were" $num_dir "directories."
echo "There were" $num_regular "regular files."
| true
|
aa2ffeb38c93cb0531466127b67ffcbee905b39b
|
Shell
|
kztool/dev-tools
|
/electron/build/osx-debug.sh
|
UTF-8
| 1,184
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
ELECTRON_REPO=https://github.com/electron/electron
# cd the root path
realpath() { [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"; }
ROOT=$(dirname "$(realpath "$0")")
cd ${ROOT}
# clone the depot_tools repository
rm -rf ${ROOT}/depot_tools
git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
# add depot_tools to the end of your PATH
export PATH=${PATH}:${ROOT}/depot_tools
export GIT_CACHE_PATH="${ROOT}/.git_cache"
mkdir -p "${GIT_CACHE_PATH}"
# sccache
export SCCACHE_BUCKET="electronjs-sccache"
export SCCACHE_TWO_TIER=true
mkdir electron-gn && cd electron-gn
gclient config --name "src/electron" --unmanaged ${ELECTRON_REPO}
gclient sync --with_branch_heads --with_tags
#cd src/electron
#git remote remove origin
#git remote add origin ${ELECTRON_REPO}
#git branch --set-upstream-to=origin/master
#
## configuare
#cd ${ROOT}/electron-gn/src
#PWD=`pwd`
#export CHROMIUM_BUILDTOOLS_PATH=${PWD}/buildtools
#export GN_EXTRA_ARGS="${GN_EXTRA_ARGS} cc_wrapper=\"${PWD}/electron/external_binaries/sccache\""
#gn gen out/Debug --args="import(\"//electron/build/args/debug.gn\") $GN_EXTRA_ARGS"
#
## build
#ninja -C out/Debug electron
| true
|
13e18ddcfa3ef56a6f67c5b9a1e94f879e423a6e
|
Shell
|
Grenadingue/baluchon
|
/.baluchon.d/prompt.bash
|
UTF-8
| 664
| 3.171875
| 3
|
[] |
no_license
|
prompt_color="\[${style[reset]}${style[bold]}\]"
session_color="\[${style[reset]}${colors[light-gray]}\]"
user_color="\[${style[bold]}${colors[dark-gray]}\]"
host_color="\[${style[bold]}${colors[dark-gray]}\]"
path_color="\[${style[reset]}${colors[blue]}\]"
end_of_prompt="\[${style[reset]}\]"
if [ "$SESSION_TYPE" == "remote/ssh" ]; then
host_color="\[${colors[green]}\]"
fi
if [ "$USER" == "root" ]; then
user_color="\[${colors[red]}\]"
fi
if [ "$color_support" = yes ]; then
PS1="${session_color}[${user_color}\u${session_color}@${host_color}\h${session_color}]:${path_color}\w${prompt_color} \$ ${end_of_prompt}"
else
PS1='[\u@\h]:\w \$ '
fi
| true
|
54018674e66536c55ebf2ce488b86a6d7e1426f3
|
Shell
|
obongo-dokwub/Bash
|
/scpCopyFileToRemoteDir
|
UTF-8
| 203
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
while getopts f:rd: option
do
case "${option}"
in
f) LOCALFILE=${OPTARG};;
rd) REMOTEFOLDER=${OPTARG};;
esac
done
scp -P portnumber -i id_rsa $LOCALFILE username@remoteAddress:"$REMOTEFOLDER"
| true
|
454c2822355152792cd94de0a757b9f1ac9d8af2
|
Shell
|
mfdorst/scripts
|
/install-tailwind-cli
|
UTF-8
| 277
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
INSTALL_DIR="$HOME/.local/bin"
mkdir -p $INSTALL_DIR
pushd $INSTALL_DIR
wget -O tailwindcss https://github.com/tailwindlabs/tailwindcss/releases/latest/download/tailwindcss-linux-x64
chmod +x tailwindcss
popd
echo "tailwindcss installed to $INSTALL_DIR"
| true
|
5accebf54a3d1b558a1fa5f1d88d98f2b9c9e941
|
Shell
|
henrikstengaard/hstwb-installer
|
/launcher/amibian/amibian.sh
|
UTF-8
| 987
| 3.9375
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Amibian Launcher
# ----------------
# Author: Henrik Noerfjand Stengaard
# Date: 2020-04-05
#
# bash script to show amibian launcher.
# main menu
while true; do
# show main menu
choices=$(dialog --clear --stdout \
--title "HstWB Installer for Amibian v$AMIBIAN_VERSION" \
--menu "Select option:" 0 0 0 \
1 "Run Amiga emulator" \
2 "Midnight Commander" \
3 "Setup" \
4 "System" \
5 "Update" \
6 "About" \
7 "Exit")
clear
# exit, if cancelled
if [ $? -ne 0 ]; then
exit
fi
for choice in $choices; do
case $choice in
1)
case $AMIBIAN_VERSION in
1.5)
./run-amiga-emulator.sh
;;
1.4.1001)
3
;;
esac
;;
2)
mc
;;
3)
pushd setup >/dev/null
./setup.sh
popd >/dev/null
;;
4)
pushd system >/dev/null
./system.sh
popd >/dev/null
;;
5)
./update.sh
# restart script, if updated
if [ $? -eq 0 ]; then
exec "$0"
fi
;;
6)
./about.sh
;;
7)
exit
;;
esac
done
done
| true
|
c2f61ddee3c14760a5c9c8e7fd9d1414f07c5c94
|
Shell
|
tiancheng91/dotfiles
|
/bin/justintv
|
UTF-8
| 833
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/zsh
channel=$1
if [[ -z "$3" ]]; then
password=""
resolution=$2
else
password=$2
resolution=$3
fi
swfaddr=$(curl 2>/dev/null -I "http://www.justin.tv/widgets/live_embed_player.swf?channel=$channel&publisherGuard=$password" | grep Location | awk -F': ' '{print $2}')
embed=$(echo $swfaddr | sed 's,?.*$,,')
r_swfaddr="$embed?channel=$channel"
arguments=""
ruby -rjson -ropen-uri -e "JSON.parse(URI.parse(\"http://usher.justin.tv/find/$channel.json?type=any&channel_subscription=foo&private_code=$password\").read).each {|a| puts \"#{a['type']}: -r #{a['connect']}/#{a['play']} -j '#{a['token']}'\" if a['connect']}" | \
while read line; do
arguments=$(echo "$line --swfVf $r_swfaddr -v -o - | mplayer -" | grep "$resolution")
[ ! -z $arguments ] && echo $arguments | sed "s,^\w:,rtmpdump,"
done
| true
|
b8d4d92b0b2a9dcc1e6ecdaf43cca9bd4075b848
|
Shell
|
rpelisse/bug-clerk-report-job
|
/run.sh
|
UTF-8
| 853
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
readonly BUGCLERK_VERSION=${BUGCLERK_VERSION:-'0.8.0-SNAPSHOT'}
readonly TRACKER_PASSWORD=${TRACKER_PASSWORD}
readonly TRACKER_USERNAME=${TRACKER_USERNAME}
readonly TRACKER_TYPE=${TRACKER_TYPE:-'jira'}
if [ -z "${TRACKER_PASSWORD}" ]; then
echo "Missing Bugzilla Password for user."
exit 1
fi
readonly EAP7_UNRESOLVED='12326686'
readonly FILTER_URL_ENDPOINT='https://issues.jboss.org/rest/api/latest/filter'
readonly FILTER_URL=${FILTER_URL:-"${FILTER_URL_ENDPOINT}/${EAP7_UNRESOLVED}"}
readonly REPORT_FILENAME=${REPORT_FILENAME:-'bugclerk-report.html'}
mvn exec:java "-Dbugclerk.version=${BUGCLERK_VERSION}" "-Dbugclerk.filter.url=${FILTER_URL}" \
"-Dbugclerk.report.filename=${REPORT_FILENAME}" "-Djboss.set.user.password=${TRACKER_PASSWORD}" \
"-Djboss.set.user.login=${TRACKER_USERNAME}" "-Dbugclerk.tracker.type=${TRACKER_TYPE}"
| true
|
dcff8d50d32025ebb8386be7b4b815fd26300fbc
|
Shell
|
drisss/2016
|
/code/extraction_tweets/tweets_per_event.sh
|
UTF-8
| 834
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
HELP="Syntax: $0 N path/to/train_euro2016/
For every .json.tsv file in the provided path, creates an associated
folder containing files for tweets following events after N minutes.
Syntax of the files: the first line reports the event, the following
are the tweets retrieved."
if (( $# != 2 ))
then
printf "%s\n" "$HELP" >&2
exit
fi
path=$2
time_shift=$1
find $path -name "*.json.tsv" | while read file
do
base_filename=$(echo $file | rev | cut -b 10- | rev)
annotation_file=${base_filename}.tsv
if ! test -f "$annotation_file"
then
printf "No file named %s\n" "$annotation_file" >&2
else
printf "Directory %s\n" "$base_filename"
mkdir -p ${base_filename}_per_event
./tweets_per_event.lua $time_shift $annotation_file < $file
if (( $? != 0 ))
then
exit 1
fi
fi
done
| true
|
85879446b71268775bb69f35b542a2ec61145009
|
Shell
|
life1347/havana-auto-deploy
|
/all-in-one/control/diff-configs.sh
|
UTF-8
| 402
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
ext=${1:-orig}
out_file=${2:-./config.diff}
> $out_file
for item in $(find /etc -name "*.${ext}" -print) ; do
path=${item%.$ext}
echo "#================================================================================" >> $out_file
diff -u $path.$ext $path >> $out_file
done
echo "#================================================================================" >> $out_file
| true
|
c0cfe52c555b801b57a0a3918abbc26cee4831b4
|
Shell
|
zpervan/QtVideoEditor
|
/Scripts/install.sh
|
UTF-8
| 1,243
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
echo "This script installs all needed dependencies to successfully run the QyVideoEditor application on Ubuntu 20.04!"
echo "Run this script with sudo privileges in order to install the libraries!"
if [[ $(whoami) != root ]]; then
echo -e " -- \e[31mPlease run this script as root or using sudo\e[0m"
exit
fi
# While installing tzlib, it demands user interaction which stops the installation pipeline. This is a workaround
# to skip the user input section.
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata
echo -e " -- \e[33mFetching updates\e[0m"
apt update -qq
echo -e " -- \e[33mInstalling dependencies\e[0m"
apt install -y build-essential \
clang-11 \
libfmt-dev \
qtbase5-dev \
qtdeclarative5-dev \
qtmultimedia5-dev \
qtquickcontrols2-5-dev \
qml-module-qtquick-controls2 \
qml-module-qtmultimedia \
libqt5multimedia5-plugins \
libqt5svg5-dev \
libopencv-dev
echo -e " -- \e[33mNew symlink to OpenCV4 library\e"
# There is an issue where the symlink is not correct and creates errors while compiling the OpenCV4 code
rm /usr/local/include/opencv2
ln -s /usr/include/opencv4/opencv2 /usr/local/include/opencv2
echo -e " -- \e[32mSetup completed!\e[0m"
| true
|
4763aced4832673e48d10abb5386a7ea732aa296
|
Shell
|
Team-CodeVid/Final-Project
|
/Jenkins/Scripts/Docker/build_images.sh
|
UTF-8
| 851
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#Install dependencies required to build
sudo apt install openjdk-8-jre maven default-jre -y
#Go into our Docker directory
cd Docker
#Prune our system by force
docker system prune -f
#Remove any previous repos
sudo rm -r spring-petclinic-angular
sudo rm -r spring-petclinic-rest
#Clone our REST repo (contains our Dockerfile already)
git clone https://github.com/Team-CodeVid/spring-petclinic-rest
#Clone our Angular repo
git clone https://github.com/Team-CodeVid/spring-petclinic-angular
#Run docker-compose to build both our images
docker-compose build --no-cache
#Log in to our dockerhub account
sudo docker login docker.io -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
# Push the images to docker
sudo docker-compose push
# Remove the app git repos to save space.
sudo rm -r spring-petclinic-rest
sudo rm -r spring-petclinic-angular
| true
|
de911b4491519d9703c988805af252ee4bfee696
|
Shell
|
chris-barry/bin
|
/focus.sh
|
UTF-8
| 1,036
| 4.125
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
# Helps you keep track of time.
# Depends on libnotify
# TODO play a sound when time changes
# TODO argument to send signal that will close program
# Just incase you send it to bg
#trap "notify-send $APP "Timer ended"; exit 0;" SIGINT
# TODO some sort of constant notification of what mode you're in?
# Save to $TIMER ?
APP="Focus"
usage() {
echo "Options"
echo " -h Show this message"
echo " -f [time] Fun time"
echo " -d [time] Distract time"
exit 0
}
main() {
DISTRACT=15
FOCUS=45
while getopts ":f:d:h" OPTION
do
case $OPTION in
h) usage ;;
f) FOCUS=$OPTARG ;;
d) DISTRACT=$OPTARG ;;
?) echo "Invalid argument."; usage ;;
esac
done
FOCUS=$(($FOCUS*60))
DISTRACT=$(($DISTRACT*60))
# let "FOCUS *= 60"
# let "DISTRACT *= 60"
while true; do
notify-send $APP "It's time to focus for $(($FOCUS / 60)) minutes!"
echo "Focus"
sleep $FOCUS
notify-send $APP "You can be distracted for $(($DISTRACT / 60)) minutes."
echo "Distraction"
sleep $DISTRACT
done
exit 0
}
main $*
| true
|
5517620c5ad0a961c0d309f3c6614afde07977b6
|
Shell
|
l0th3r/road66
|
/scripts/build.sh
|
UTF-8
| 530
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# BUILD SCRIPT FILE
# Script to build a version of the game /
#
make build
echo
echo
echo
f_date=$(date +'%m-%d-%Y')
f_name="road66_build_${f_date}"
file_name="road66"
echo "== creating folder"
mkdir ${f_name}
echo "== importing executable"
mv make_out ./${f_name}/${file_name}
echo "== import events"
cp -avr ./event/ ./${f_name}/
echo "== import window script"
cp -avr ./scripts/window_launcher.bat ./${f_name}/
cd ${f_name}
mkdir config
echo "== importing config"
cp -avr ../config/config.cfg ./config/
| true
|
615ebc680e41f4c2829d26030e48991ddaa34d21
|
Shell
|
kaushalnavneet/cd-pipeline-kubernetes
|
/scripts/managedworkers/logdna_excludes.sh
|
UTF-8
| 505
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#checks if LOGDNA_EXCLUDE has already been added and if not patches and restarts the daemon-set
kubectl get ds logdna-agent -n ibm-observe -ojson | jq -e '.spec.template.spec.containers[0].env[] | select(.name=="LOGDNA_EXCLUDE")' ||
(
kubectl patch ds logdna-agent -n ibm-observe --type "json" -p '[{"op":"add","path":"/spec/template/spec/containers/0/env/-","value":{"name":"LOGDNA_EXCLUDE","value":"/var/log/containers/pw-*/**"}}]' &&
kubectl delete pod -l app=logdna-agent -n ibm-observe
)
| true
|
f616a37d4cd0dae4f1ae457d86b50e736748c2cd
|
Shell
|
Acquati/shellscript-cheat-sheet
|
/install-homestead
|
UTF-8
| 1,077
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Installing Homestead."
echo 'deb http://download.virtualbox.org/virtualbox/debian zesty contrib' >> /etc/apt/sources.list
wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
sudo apt-get -y update
sudo apt-get install -y virtualbox-5.1
sudo apt-get install -y dkms
# Install Vagrant
# https://www.vagrantup.com/downloads.html
vagrant box add laravel/homestead
git clone https://github.com/laravel/homestead.git $HOME/Homestead
cd $HOME/Homestead
git checkout v6.3.0
bash init.sh
subl Homestead.yaml
mkdir $HOME/code
echo '
192.168.10.10 teste.dev' >> /etc/hosts
sites:
- map: teste.dev
to: /home/vagrant/code/teste/public
cd $HOME/code
laravel new teste
sites:
- map: homestead.localhost
to: /home/vagrant/code/teste/public
ssh-keygen -t rsa -b 4096 -C "your_email@example.com"
eval "$(ssh-agent -s)"
ssh-add -k ~/.ssh/id_rsa
# Create the virtual machine
vagrant up
# Reload the hots
vagrant reload --provision
# Enter in the virtual machine
vagrant ssh
# Destroy the virtual machine
vagrant destroy --force
| true
|
1795a869351475d861a932639f0e6f17133153c4
|
Shell
|
aoleary/device_lge_g4-common
|
/extract-files.sh
|
UTF-8
| 1,166
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Copyright (C) 2016 The CyanogenMod Project
# Copyright (C) 2017-2020 The LineageOS Project
#
# SPDX-License-Identifier: Apache-2.0
#
set -e
DEVICE_COMMON=g4-common
VENDOR=lge
# Load extractutils and do some sanity checks
MY_DIR="${BASH_SOURCE%/*}"
if [[ ! -d "$MY_DIR" ]]; then MY_DIR="$PWD"; fi
ANDROID_ROOT="$MY_DIR/../../.."
HELPER="$ANDROID_ROOT/tools/extract-utils/extract_utils.sh"
if [ ! -f "$HELPER" ]; then
echo "Unable to find helper script at $HELPER"
exit 1
fi
. "$HELPER"
if [ $# -eq 0 ]; then
SRC=adb
else
if [ $# -eq 1 ]; then
SRC=$1
else
echo "$0: bad number of arguments"
echo ""
echo "usage: $0 [PATH_TO_EXPANDED_ROM]"
echo ""
echo "If PATH_TO_EXPANDED_ROM is not specified, blobs will be extracted from"
echo "the device using adb pull."
exit 1
fi
fi
# Initialize the helper for common device
setup_vendor "$DEVICE_COMMON" "$VENDOR" "$ANDROID_ROOT" true
extract "$MY_DIR"/proprietary-files.txt "$SRC"
# Initialize the helper for device
setup_vendor "$DEVICE" "$VENDOR" "$ANDROID_ROOT"
extract "$MY_DIR"/../$DEVICE/proprietary-files.txt "$SRC"
"$MY_DIR"/setup-makefiles.sh
| true
|
77c45c65abf858bf4cbc5b669e37d4c6feb9b00b
|
Shell
|
davidvic99/6aDavid
|
/script2.sh
|
UTF-8
| 261
| 3.40625
| 3
|
[] |
no_license
|
echo "Escribe un valor mayor que 0:"
read num
while [ $num -le 0 ];do
echo "Vuelve a añadir un numero, debe ser mayor de 0:"
read num
done
let resto=num%2
if [ $resto -eq 0 ];then
echo "El $num es par"
else
echo "El $num es impar"
fi
| true
|
b4eb088fb764f8cc248c78c7465e052be9512f09
|
Shell
|
TheMengLab/Si-C
|
/analysis/compartment/compartment/1/do.sh
|
UTF-8
| 789
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
id=${1}
#grep ^chr19 ../GSM2123564_Haploid_mESC_population_hic.txt | grep -P "\t"chr19 | awk '{print $2/50000,$5/50000,$7}' > contact.dat
#grep -P ^chr${id}"\t" ../../../GSM2123564_Haploid_mESC_population_hic.txt | grep -P "\t"chr${id}"\t" | awk '{print $2,$5,$7}' > contact.dat
cp ../../${id}/matrix.dat .
statenum=`nl matrix.dat | tail -n 1 | awk '{print $1}'`
echo -e matrix.dat '\n' $statenum '\n' contactprob.dat | ./matrixdiff_nodark_coef.o
#Input the filename for original matrix:
#Input the number of states in the system:
#Input filename for output:
#echo -e contactprob.dat '\n' $statenum '\n' rowsum.dat | ./getrowsum.o
#
#
#echo -e contactprob.dat '\n' rowsum.dat '\n' corrmatrix.dat '\n' assign.dat | ./getcorrmatrix.o
#
#rm contact.dat
#rm blockmatrix.dat
| true
|
69241ccd0d6ee9f68543afcf92e34ac1c16f35be
|
Shell
|
Bato/microservices-experiments
|
/redis/spring-cloud-demo/docker-zipkin/cassandra/install.sh
|
UTF-8
| 1,080
| 2.96875
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -eu
echo "*** Installing Cassandra"
curl -SL http://downloads.datastax.com/community/dsc-cassandra-$CASSANDRA_VERSION-bin.tar.gz | tar xz
mv dsc-cassandra-$CASSANDRA_VERSION/* /cassandra/
echo "*** Installing Python"
apk add python
# TODO: Add native snappy lib. Native loader stacktraces in the cassandra log as a results, which is distracting.
echo "*** Starting Cassandra"
/cassandra/bin/cassandra
timeout=300
while [[ "$timeout" -gt 0 ]] && ! /cassandra/bin/cqlsh -e 'SHOW VERSION' localhost >/dev/null 2>/dev/null; do
echo "Waiting ${timeout} seconds for cassandra to come up"
sleep 10
timeout=$(($timeout - 10))
done
echo "*** Importing Scheme"
curl https://raw.githubusercontent.com/openzipkin/zipkin/$ZIPKIN_VERSION/zipkin-cassandra-core/src/main/resources/cassandra-schema-cql3.txt \
| /cassandra/bin/cqlsh --debug localhost
echo "*** Stopping Cassandra"
pkill -f java
echo "*** Cleaning Up"
apk del python --purge
rm -rf /cassandra/javadoc/ /cassandra/pylib/ /cassandra/tools/ /cassandra/lib/*.zip
echo "*** Image build complete"
| true
|
fe1b4ddc0f55f41851cb1b9c26484491bdd1f323
|
Shell
|
fhill2/dotfiles
|
/deprecated/dotbot/setup_mysql.sh
|
UTF-8
| 587
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
sudo pacman -S --needed mariadb
yay -S --needed mycli # autocompletion and syntax highlighting
mariadb-install-db --user=mysql --basedir=/usr --datadir=/var/lib/mysql
sudo systemctl mariadb.service
mysql -u root -p
# atm I am not running mysql as a regular user
# https://dev.mysql.com/doc/mysql-security-excerpt/8.0/en/changing-mysql-user.html
# sudo chown -R f1 /var/lib/mysql
# create a new user
# https://wiki.archlinux.org/title/MariaDB
# CREATE USER 'f1'@'localhost' IDENTIFIED BY '.';
# GRANT ALL PRIVILEGES ON mydb.* TO 'f1'@'localhost';
# FLUSH PRIVILEGES;
# quit
| true
|
d5fd9b2f12dea4a97ee97193e01f3e69b381f81d
|
Shell
|
mcgarrigle/bin
|
/bashrc-os-linux
|
UTF-8
| 540
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
# vim: set ft=bash syntax=on
alias rpm-keys="rpm -q gpg-pubkey --qf '%{NAME}-%{VERSION}-%{RELEASE}\t%{SUMMARY}\n'"
function kto {
gdb --batch-silent --pid=$$ --eval-command='call unbind_variable("TMOUT")'
}
function td {
sudo kill $(cat /var/run/openvpn.pid)
sudo rm -f /var/run/openvpn.pid
}
function tu {
if ip link | grep -qP '^\d: tun\d:'; then
echo 'the tunnel is already up'
else
sudo openvpn --daemon vpn \
--config $HOME/.openvpn/openvpn.config \
--writepid /var/run/openvpn.pid
fi
}
| true
|
9ee65a56ea542b62a5fe52a2608273fb8ed8d604
|
Shell
|
arunvel1988/k8s-operator-ansible-demo
|
/demo/build-push-image.sh
|
UTF-8
| 895
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
#Shell script to build and push image for operator
#Command to build operator image
echo -n "Enter repository name "
read repo_name
echo -n "Enter Operator image name "
read image_name
#command to build image
sudo operator-sdk build $repo_name/$image_name
#Command to push operator image
sudo docker push $repo_name/$image_name
#Command of replace image name in operator yaml
sed -i 's/image-name|$repo_name/$image_name|g' ./deploy/operator.yaml
#command to create role
kubectl apply -f ./deploy/rbac.yaml
#command to create crd
kubectl apply -f ./deploy/crd.yaml
#command to create operator deployment
kubectl apply -f ./deploy/operator.yaml
sleep 50
podstate=`kubectl get pods | grep ansible-operator | awk '{print $3}'`
if [ $podstate == "Running" ]
then
kubectl create -f ./deploy/cr.yaml
else
echo "Wait for Operator Pod to be in Running state"
fi
| true
|
da96af5fb9db17d7440a2e103442091d2ab29dae
|
Shell
|
subbus-g/shell-scripting-programs
|
/day-5-sequences-selections/1-sequence/5_1_unit_conversion.sh
|
UTF-8
| 593
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#a)
no_of_inches=42
#scale=4 fixes no of digits after . to 4
no_of_feets=$(echo "scale=4; $no_of_inches / 12" | bc -l)
echo "42 inches = $no_of_feets feets"
#b)
length_in_ft=60
breadth_in_ft=40
area_in_ft=$(( length_in_ft * breadth_in_ft ))
#scale=4 fixes no of digits after . to 4
area_in_m=$(echo " scale=4 ; $area_in_ft * 0.3048 * 0.3048" | bc -l)
echo "Area of the rectangular plot is $area_in_m meters"
#c)
no_of_plots=25
#1acr=43560 sqft
area_in_acr=$(echo " scale=4 ; $area_in_ft * $no_of_plots / 43560" | bc -l)
echo "The Area of $no_of_plots plots is $area_in_acr acres"
| true
|
963387dc41b38b13d771008b9997edfbd92fbedf
|
Shell
|
ricardomfmsousa/shell-scripts
|
/dev-setup-pop!_os/setup/browsers.sh
|
UTF-8
| 975
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Setup web browsers
source ./functions/utils.sh && no-root
# Add Brave web browser repository
# Free and open-source privacy focused web browser by Brave Software,
# founded by the creator of Javascript and former CEO of Mozilla Corporation
apt-install apt-transport-https curl
curl -s https://brave-browser-apt-release.s3.brave.com/brave-core.asc |
sudo apt-key --keyring /etc/apt/trusted.gpg.d/brave-browser-release.gpg add -
echo "deb [arch=amd64] https://brave-browser-apt-release.s3.brave.com/ stable main" |
sudo tee /etc/apt/sources.list.d/brave-browser-release.list
sudo apt update
PKGS=(
firefox # Free and open-source web browser developed by Mozilla
chromium # Open-source version of Google Chrome
brave-browser # Chromium-based open-source privacy focused web browser
)
apt-install ${PKGS[@]}
# Install latest Google Chrome
download-install-deb "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb"
| true
|
f778041245fe1fc5a9569a8f1fd655894205adad
|
Shell
|
execdf/ambari-redis-cluster
|
/package/scripts/upgrade_ruby.sh
|
UTF-8
| 332
| 2.796875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
dist=$(tr -s ' \011' '\012' < /etc/issue | head -n 1)
if [ "$dist" = "Ubuntu" ]
then
apt-get -y install ruby
gem install redis
else
gpg2 --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
curl -L get.rvm.io | bash -s stable
source /etc/profile.d/rvm.sh
rvm install 2.3.1
rvm use 2.3.1 --default
gem install redis
fi
| true
|
e3347bd1807ed8ba667eff9c43671ac418e51e77
|
Shell
|
vidsy/assume-role
|
/terraform-wrapper.sh
|
UTF-8
| 771
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
TIME_LEFT=$(ruby -e "require 'time'; puts ((Time.parse('$AWS_EXPIRATION') - Time.now) / 60).floor")
if [ "$TIME_LEFT" -lt "0" ]; then
echo "Role has expired ($AWS_EXPIRATION), please exit this shell and start another"
exit -1
fi
function var_file_path() {
if [ -z "$VAR_FILE" ]; then
echo "-var-file=$AWS_ENV.tfvars"
if [ ! -f $AWS_ENV.tfvars ]; then
echo ""
fi
fi
}
VAR_FILE="$(var_file_path)"
case $1 in
plan|apply|destroy|refresh)
terraform.real $@ $VAR_FILE
;;
import)
terraform.real import $VAR_FILE ${@:2}
;;
init)
rm -rf .terraform/terraform.tfstate*
terraform.real init -backend=true -backend-config="bucket=$TERRAFORM_STATE_BUCKET" -backend-config="region=$AWS_REGION"
;;
*)
terraform.real $@
;;
esac
| true
|
d3cb25c6619748f787780ee28f96ec7df316a174
|
Shell
|
jdaeira/Arch-ISO-Plasma
|
/Scripts/145-setup-zsh.sh
|
UTF-8
| 793
| 3.09375
| 3
|
[] |
no_license
|
#! /bin/bash
set -e
########################################################################
# Author : John da Eira
# Email : jdaeira@gmail.com
########################################################################
## Install Necessary Files
sudo pacman -S zsh --noconfirm --needed
sudo pacman -S curl --noconfirm --needed
## Install OH MY ZSH
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
## Install Auto Suggestions Plugin
git clone https://github.com/zsh-users/zsh-autosuggestions.git $ZSH_CUSTOM/plugins/zsh-autosuggestions
## Install Syntax Highlighting Plugin
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git $ZSH_CUSTOM/plugins/zsh-syntax-highlighting
echo "########### OH MY ZSH and Plugins Installed #############"
| true
|
0aca18121b86bd0d2fc37773884e35c003a5f5dc
|
Shell
|
chynten/home-setup
|
/apps/setup.sh
|
UTF-8
| 1,917
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
read -p "Node Name (If multiple, then give node name which has local storage):" NODE_NAME
export NODE_NAME=$NODE_NAME
read -p "Do you wish to install plex (y/n)?" yn
case $yn in
y )
cd plex
sh ./setup.sh
cd ..
;;
esac
read -p "Do you wish to install tautulli (y/n)?" yn
case $yn in
y )
cd tautulli
sh ./setup.sh
cd ..
;;
esac
echo "Installing MySQL..."
cd mysql
sh ./setup.sh
cd ..
cd db-client
sh ./setup.sh
cd ..
read -p "Do you wish to install nextcloud (y/n)?" yn
case $yn in
y )
if ! [ $(kubectl get ns mysql -o jsonpath --template={.status.phase}) = 'Active' ];
then
cd mysql
sh ./setup.sh
cd ..
cd db-client
sh ./setup.sh
cd ..
fi
cd files
sh ./setup.sh
cd ..
;;
esac
read -p "Do you wish to install transmission (y/n)?" yn
case $yn in
y )
cd transmission
sh ./setup.sh
cd ..
;;
esac
read -p "Do you wish to install keycloak (y/n)?" yn
case $yn in
y )
cd sso
sh ./setup.sh
cd ..
;;
esac
read -p "Do you wish to install timemachine (y/n)?" yn
case $yn in
y )
cd timemachine
sh ./setup.sh
cd ..
;;
esac
read -p "Do you wish to install wordpress (y/n)?" yn
case $yn in
y )
cd blog
sh ./setup.sh
cd ..
;;
esac
read -p "Do you wish to install Huginn (y/n)?" yn
case $yn in
y )
cd agent
sh ./setup.sh
cd ..
;;
esac
read -p "Do you wish to install homebridge (y/n)?" yn
case $yn in
y )
cd homebridge
sh ./setup.sh
cd ..
;;
esac
read -p "Do you wish to install AdGuard Home (y/n)?" yn
case $yn in
y )
cd dns
sh ./setup.sh
cd ..
;;
esac
| true
|
ffc62cedadbbaecccfdc97b918c103424f0d0568
|
Shell
|
sauliusg/grammatiker
|
/EBNF/tests/cases/lark-tree_001.sh
|
UTF-8
| 217
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
SCRIPT=$(basename $0 .sh | sed 's/_[0-9][0-9]*$//')
if ./scripts/${SCRIPT} \
tests/inputs/ternary.lark \
tests/inputs/ternary_1.txt
then
echo OK
else
echo Parse error
fi
| true
|
4719cc1913958ddb51b930208ad09e9b10152800
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/gtkplatform-git/PKGBUILD
|
UTF-8
| 681
| 2.75
| 3
|
[] |
no_license
|
pkgname=gtkplatform-git
pkgver=0.1.0.r8.1e6222f
pkgrel=1
pkgdesc="Run Qt applications using gtk+ as a windowing system"
arch=('i686' 'x86_64')
url="https://github.com/CrimsonAS/gtkplatform"
license=('LGPL3' 'GPL2')
depends=('gtk3' 'libnotify' 'qt5-base')
makedepends=('git')
provides=("${pkgname%-VCS}")
conflicts=("${pkgname%-VCS}")
source=('git+https://github.com/CrimsonAS/gtkplatform.git')
md5sums=('SKIP')
pkgver() {
cd "$srcdir/${pkgname%-git}"
printf "%s" "$(git describe --tags --long | sed 's/\([^-]*-\)g/r\1/;s/-/./g')"
}
build() {
cd "$srcdir/${pkgname%-git}"
qmake
make
}
package() {
cd "$srcdir/${pkgname%-git}"
make INSTALL_ROOT="$pkgdir/" install
}
| true
|
6ec23392899cff4ab77cc871c0913265e733f840
|
Shell
|
CartoDB/mobile-sdk
|
/scripts/travis/prerequisites.sh
|
UTF-8
| 1,478
| 2.921875
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
set -e
rvm get head
echo '---- Updating submodules ----'
git submodule update --init --remote --recursive
echo '---- Downloading and setting up boost ----'
curl -o boost_1_77_0.zip -L https://sourceforge.net/projects/boost/files/boost/1.77.0/boost_1_77_0.zip/download
rm -rf boost_1_77_0
unzip boost_1_77_0.zip
cd libs-external
ln -s ../boost_1_77_0 boost
cd ../boost_1_77_0
./bootstrap.sh
./b2 headers
cd ..
echo '---- Downloading and installing CMake ----'
curl -o cmake-3.10.2.tar.gz -L https://cmake.org/files/v3.10/cmake-3.10.2.tar.gz
rm -rf cmake-3.10.2
tar xpfz cmake-3.10.2.tar.gz
cd cmake-3.10.2
./configure --prefix=`pwd`/dist
make
make install
export PATH=$PWD/dist/bin:$PATH
cd ..
echo '---- Downloading and installing SWIG ----'
rm -rf mobile-swig
git clone https://github.com/CartoDB/mobile-swig.git
cd mobile-swig
cd pcre
aclocal
automake
./configure --prefix=`pwd`/pcre-swig-install --disable-shared
make
make install
cd ..
./autogen.sh
./configure --disable-ccache --prefix=`pwd`/dist
make
make install || true
export PATH=$PWD/dist/bin:$PATH
cd ..
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
echo '---- Downloading and installing Android NDK r22b ----'
curl -L https://dl.google.com/android/repository/android-ndk-r22b-linux-x86_64.zip -O
rm -r -f android-ndk-r22b
unzip -q android-ndk-r22b-linux-x86_64.zip
rm android-ndk-r22b-linux-x86_64.zip
export ANDROID_NDK_HOME=`pwd`/android-ndk-r22b;
# export ANDROID_HOME=/usr/local/android-sdk
fi
| true
|
d4e16d96476c8348bbf4e5227906bf39bafd7209
|
Shell
|
rstyczynski/umc
|
/tools/linux/oradb/1.0/oradb
|
UTF-8
| 1,998
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# Tomas Vitvar, tomas@vitvar.com
# this script directory
getSensorData_bin=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# check umc was configured
if [ ! -d $umcRoot ]; then
echo >&2 "umc was not initialized!"
exit 1
fi
# configuration directory
configDir="$umcRoot/etc/tools/oradb"
args=("$@")
function getArgument {
for ((i=0; i <= ${#args[@]}; i++)); do
if [ "${args[i]}" == "$1" ]; then
echo "${args[i]}"
exit 0
fi
done
echo ""
}
# the first parameter is timestamp directive
# since we do not use it, we skip it (the value is None)
shift
# the next arguments are delay, count and metric
# metric argument is oradb probe specific
delay=$1 && shift
count=$1 && shift
metric=$1 && shift
config=$1 && shift
# load configuration
if [ -f $configDir/$config ]; then
source $configDir/$config
fi
# check that the metric file exists and print help if it does not
# this needs to go to err as otherwise would appear in csv when output is logged to a file
if [ ! -f "$configDir/$metric" ]; then
echo >&2 "Invalid metric file. There is no file with name $configDir/$metric."
echo >&2 "Usage: umc oradb collect <delay> <count> <metric-file> [--connect <constr>] [<sql-collector arguments>]"
echo >&2 "Available values for <metric-file> are:"
pwd=$(pwd)
cd $configDir
ls | grep ".sql$" | \
while read line; do
printf " ${line%.*}%-$((20-$(expr length $line)))s $(cat $line | head -n 1 | grep "\-\-")\n"
done
cd $pwd
exit 1
fi
# db connection string, use the default if none cannot be found as per sql metric or is not specified as argument
connstr="";
if [ "$(getArgument "--connect")" == "" ] && [ "$ORADB_CONNSTR" != "" ]; then
connstr="--connect $ORADB_CONNSTR";
fi
# run sql collector
sql-collector $connstr --query $configDir/$metric \
--count $count \
--interval $delay \
--delimiter "$CSVdelimiter" \
$(cat $configDir/global-args | grep -v ^# ) \
"$*"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.