blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e57636b0317243a3ceed6ae59c21422fdec40298
|
Shell
|
cmichi/visualizing-university-submissions
|
/process.sh
|
UTF-8
| 543
| 2.8125
| 3
|
[
"MIT",
"CC-BY-3.0"
] |
permissive
|
#!/bin/sh
cat $1 \
| grep "Datum der letzten" \
| sed "s/<span class=\"small\">Datum der letzten Abgabe: //" \
| sed "s/<br>//" \
| sed "s/---//" \
| awk 'NF > 0' \
| sed 's/^\s*//g' \
| sed 's/\.//g' \
| sed 's/\,//g' \
| awk '{ print $3 "/" $2 "/" $1";"$4":00" }' \
| sed 's/Nov/11/g' \
| sed 's/Dez/12/g' \
| sed 's/Okt/10/g' \
| sed 's/Sep/09/g' \
| sed 's/Jan/01/g' \
| sed 's/Feb/02/g' \
| sed 's/Mar/03/g' \
| sed 's/Apr/04/g' \
| sed 's/Mai/05/g' \
| sed 's/Jun/06/g' \
| sed 's/Jul/07/g' \
| sed 's/Aug/08/g' \
| true
|
2171a185d81c07483c7306f75b3b3eefeba66e00
|
Shell
|
spacetiller/experiment
|
/shell/test-meta.sh
|
UTF-8
| 160
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
# metaprogram
echo '#!/bin/bash' > test-meta-gen.sh
for ((I=1; I<=92; I++)) do
echo "echo $I" >> test-meta-gen.sh
done
chmod +x test-meta-gen.sh
| true
|
cc2b17fe8912db8e4cabad09438b5524d8664675
|
Shell
|
theirix/TorrentMultiplexer
|
/deps/build_libtorrent.sh
|
UTF-8
| 1,462
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
BUILDROOT=`pwd`
NAME=libtorrent
TARNAME=$NAME-0.12.9
INSTALL_DIR=$BUILDROOT/$NAME-prefix
FRAMEWORK=$NAME.framework
MAJOR=14
trap "cd $BUILDROOT" EXIT
rm -rf $TARNAME $FRAMEWORK $INSTALL_DIR
tar xf $TARNAME.tar.gz
cd $TARNAME
./configure --prefix=$INSTALL_DIR --disable-openssl STUFF_CFLAGS="-I$BUILDROOT/libsigc.framework/Headers" STUFF_LIBS="-F$BUILDROOT -framework libsigc"
#./configure --prefix=$INSTALL_DIR STUFF_CFLAGS="-I$BUILDROOT/libsigc.framework/Headers" STUFF_LIBS="-L$BUILDROOT -framework libsigc"
#./configure --prefix=$INSTALL_DIR STUFF_LIBS="-L/projects/TorrentMultiplexer/deps/libsigcxx-prefix/lib" STUFF_CFLAGS="-I/projects/TorrentMultiplexer/deps/libsigcxx-prefix/include -I/projects/TorrentMultiplexer/deps/libsigcxx-prefix/include/sigc++-2.0 -I/projects/TorrentMultiplexer/deps/libsigcxx-prefix/lib/sigc++-2.0/include/"
make
make install
cd $BUILDROOT
mkdir -p $BUILDROOT/$FRAMEWORK/Versions/$MAJOR/Headers $BUILDROOT/$FRAMEWORK/Versions/$MAJOR/Resources
cd $BUILDROOT/$FRAMEWORK
ln -s Versions/$MAJOR/Headers Headers
ln -s Versions/$MAJOR/Resources Resources
cp -R $INSTALL_DIR/include/* $BUILDROOT/$FRAMEWORK/Headers/
cp -RH $INSTALL_DIR/lib/libtorrent.14.dylib $BUILDROOT/$FRAMEWORK/Versions/$MAJOR/libtorrent
ln -s Versions/$MAJOR/$NAME $NAME
cp $BUILDROOT/Info.$NAME.plist Resources/Info.plist
install_name_tool -id @executable_path/../Frameworks/$FRAMEWORK/$NAME $BUILDROOT/$FRAMEWORK/$NAME
rm -rf $INSTALL_DIR
| true
|
133300834e2b7e43002167dd307dc9fb97298129
|
Shell
|
elvout/OS
|
/runall.sh
|
UTF-8
| 226
| 3.28125
| 3
|
[] |
no_license
|
#! /usr/bin/env zsh
if [[ $TESTS_DIR == "" ]]; then
TESTS_DIR="."
fi
for test in $TESTS_DIR/*.ok; do
name=${test:t:r}
printf "\e[2K\e[1G"
echo $name
../breakonfail.sh $name
echo "\e[2K\e[1G---"
done
| true
|
cd330b48d8fb60d187ee1d663dfb2031eb9b45de
|
Shell
|
mads-hartmann/dotfilesv2
|
/bin/serve
|
UTF-8
| 195
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
function serve {
local port=$1
echo "Listening on http://localhost:${port} 🚀"
python -m SimpleHTTPServer ${port} .
}
port=${1:-8080}
serve ${port}
| true
|
0e676384423a285788637af3bc60b3be6059cfad
|
Shell
|
bOmBeLq/mongo-backup
|
/backup.sh
|
UTF-8
| 1,477
| 3.828125
| 4
|
[] |
no_license
|
root_backup_dir='/var/data/backup'
# params from call
type=$1
versions=$2
# check parameters
if [ ! $1 ]; then
echo "backup dir has to be defined" >&2;
exit 1;
fi
if [ ! $2 ]; then
echo "backups count has to be defined" >&2;
exit 1;
fi
backup_dir="$root_backup_dir/$1"
versions=$2
if [ ! -d ${dir} ]; then
echo "backup_dir $dir does not exist" >&2;
exit 1;
fi
if ! echo ${versions} | egrep -q '^[0-9]+$'; then
echo "backups count should be number" >&2;
exit 1
fi
if [ ${versions} -lt 1 ]; then
echo "versions count (second parameter) should be greater than 0" >&2;
exit 1;
fi
#########
# create tmp dir and dump
now=`date +%Y-%m-%d_%H:%M:%S`
tmp_dir="/tmp/${now}"
mkdir ${tmp_dir}
mongodump -o ${tmp_dir}
######
# zip
echo "zipping backup"
cd /tmp
zip_file="${now}.zip"
zip -r ${zip_file} "${now}"
echo "removing tmp dir"
rm -r "${now}"
#######
# copy backup
dir="$backup_dir/$zip_file"
echo "coping backup to ${dir}"
cp ${zip_file} ${dir}
last_path="$root_backup_dir/last.zip"
echo "moving backup to $last_path"
mv ${zip_file} ${last_path}
#########
# remove old backups
backup_count=`ls ${backup_dir}/* | wc -l`
to_delete=$(($backup_count-$versions-1))
if [ ${to_delete} -lt 1 ]; then
exit;
fi;
echo "Deleting $to_delete of $backup_count existing backups";
i=0
for f in ${backup_dir}/*
do
if [ ${i} -eq ${to_delete} ]; then
break
fi
echo "Deleting ${f}"
rm ${f}
i=$((i+1))
done
| true
|
51d8ca95cdad56fee059281f9f6c11eb097f1c58
|
Shell
|
Sen5or/Android
|
/serviceOn.sh
|
UTF-8
| 153
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
tvStatus=$(xset q | tail -n1)
set -- $tvStatus
user=$(whoami)
if [ "$3" = "Off" ]
then
xset dpms force on
echo "Powered ON display."
fi
| true
|
25fd5b31d3fddcedf6c6489655e622888ab33e52
|
Shell
|
enterstudio/lycheejs-runtime
|
/html-webview/package.sh
|
UTF-8
| 7,813
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
lowercase() {
echo "$1" | sed "y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/";
}
OS=`lowercase \`uname\``;
ARCH=`lowercase \`uname -m\``;
LYCHEEJS_ROOT=$(cd "$(dirname "$0")/../../../"; pwd);
RUNTIME_ROOT=$(cd "$(dirname "$0")/"; pwd);
PROJECT_NAME="$2";
PROJECT_ROOT="$LYCHEEJS_ROOT$1";
PROJECT_SIZE=`du -b -s $PROJECT_ROOT | cut -f 1`;
BUILD_ID=`basename $PROJECT_ROOT`;
SDK_DIR="";
ANDROID_AVAILABLE=0;
ANDROID_STATUS=1;
FIREFOXOS_AVAILABLE=0;
FIREFOXOS_STATUS=1;
UBUNTU_AVAILABLE=0;
UBUNTU_STATUS=1;
if [ "$ARCH" == "x86_64" -o "$ARCH" == "amd64" ]; then
ARCH="x86_64";
fi;
if [ "$ARCH" == "i386" -o "$ARCH" == "i686" -o "$ARCH" == "i686-64" ]; then
ARCH="x86";
fi;
if [ "$ARCH" == "armv7l" -o "$ARCH" == "armv8" ]; then
ARCH="arm";
fi;
if [ "$OS" == "darwin" ]; then
OS="osx";
if [ "$ARCH" == "x86_64" ]; then
SDK_DIR="$RUNTIME_ROOT/android-toolchain/sdk-osx/$ARCH";
fi;
elif [ "$OS" == "linux" ]; then
OS="linux";
if [ "$ARCH" == "arm" ] || [ "$ARCH" == "x86_64" ]; then
SDK_DIR="$RUNTIME_ROOT/android-toolchain/sdk-linux/$ARCH";
fi;
fi;
_package_android () {
if [ -d "./$BUILD_ID-android" ]; then
rm -rf "./$BUILD_ID-android";
fi;
mkdir "$BUILD_ID-android";
if [ -d "$SDK_DIR" ] && [ -d "$RUNTIME_ROOT/android" ] && [ -d "$RUNTIME_ROOT/android-toolchain" ]; then
ANDROID_AVAILABLE=1;
cp -R "$RUNTIME_ROOT/android/app" "$BUILD_ID-android/app";
cp "$RUNTIME_ROOT/android/gradle.properties" "$BUILD_ID-android/gradle.properties";
cp "$RUNTIME_ROOT/android/build.gradle" "$BUILD_ID-android/build.gradle";
cp "$RUNTIME_ROOT/android/settings.gradle" "$BUILD_ID-android/settings.gradle";
# TODO: Resize icon.png to mipmap-...dpi/ic_launcher.png variants
cp "$PROJECT_ROOT/core.js" "$BUILD_ID-android/app/src/main/assets/core.js";
cp "$PROJECT_ROOT/icon.png" "$BUILD_ID-android/app/src/main/assets/icon.png";
cp "$PROJECT_ROOT/index.html" "$BUILD_ID-android/app/src/main/assets/index.html";
echo -e "sdk.dir=$SDK_DIR" > "$BUILD_ID-android/local.properties";
# Well, fuck you, Apple.
if [ "$OS" == "osx" ]; then
sed -i '' "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-android/app/app.iml";
sed -i '' "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-android/app/src/main/res/values/strings.xml";
else
sed -i "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-android/app/app.iml";
sed -i "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-android/app/src/main/res/values/strings.xml";
fi;
"$RUNTIME_ROOT/android-toolchain/gradle/bin/gradle" "$BUILD_ID-android";
ANDROID_STATUS=$?;
if [ -d "$BUILD_ID-android/app/build/outputs/apk" ]; then
cp "$BUILD_ID-android/app/build/outputs/apk/app-debug.apk" "$BUILD_ID-android/app-debug.apk";
cp "$BUILD_ID-android/app/build/outputs/apk/app-debug-unaligned.apk" "$BUILD_ID-android/app-debug-unaligned.apk";
cp "$BUILD_ID-android/app/build/outputs/apk/app-release-unsigned.apk" "$BUILD_ID-android/app-release-unsigned.apk";
fi;
rm -rf "$BUILD_ID-android/app";
rm -rf "$BUILD_ID-android/build";
rm "$BUILD_ID-android/build.gradle";
rm "$BUILD_ID-android/settings.gradle";
rm "$BUILD_ID-android/gradle.properties";
rm "$BUILD_ID-android/local.properties";
fi;
if [ "$ANDROID_AVAILABLE" == "0" ]; then
ANDROID_STATUS=0;
fi;
}
_package_firefoxos () {
if [ -d "./$BUILD_ID-firefoxos" ]; then
rm -rf "./$BUILD_ID-firefoxos";
fi;
mkdir "$BUILD_ID-firefoxos";
if [ -d "$RUNTIME_ROOT/firefoxos" ]; then
FIREFOXOS_AVAILABLE=1;
cp -R "$RUNTIME_ROOT/firefoxos/app" "$BUILD_ID-firefoxos/app";
cp "$PROJECT_ROOT/core.js" "$BUILD_ID-firefoxos/app/core.js";
cp "$PROJECT_ROOT/icon.png" "$BUILD_ID-firefoxos/app/icon.png";
cp "$PROJECT_ROOT/index.html" "$BUILD_ID-firefoxos/app/index.html";
# Well, fuck you, Apple.
if [ "$OS" == "osx" ]; then
sed -i '' "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-firefoxos/app/manifest.webapp";
sed -i '' "s/__SIZE__/$PROJECT_SIZE/g" "$BUILD_ID-firefoxos/app/manifest.webapp";
else
sed -i "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-firefoxos/app/manifest.webapp";
sed -i "s/__SIZE__/$PROJECT_SIZE/g" "$BUILD_ID-firefoxos/app/manifest.webapp";
fi;
cd "$BUILD_ID-firefoxos/app";
zip -r -q "../app.zip" ./*;
FIREFOXOS_STATUS=$?;
rm -rf "$BUILD_ID-firefoxos/app";
fi;
if [ "$FIREFOXOS_AVAILABLE" == "0" ]; then
FIREFOXOS_STATUS=0;
fi;
}
_package_ubuntu () {
if [ -d "./$BUILD_ID-ubuntu" ]; then
rm -rf "./$BUILD_ID-ubuntu";
fi;
mkdir "$BUILD_ID-ubuntu";
if [ -d "$RUNTIME_ROOT/ubuntu" ]; then
UBUNTU_AVAILABLE=1;
cp -R "$RUNTIME_ROOT/ubuntu/DEBIAN" "$BUILD_ID-ubuntu/DEBIAN";
cp -R "$RUNTIME_ROOT/ubuntu/root" "$BUILD_ID-ubuntu/root";
cp "$PROJECT_ROOT/core.js" "$BUILD_ID-ubuntu/root/usr/share/__NAME__/core.js";
cp "$PROJECT_ROOT/icon.png" "$BUILD_ID-ubuntu/root/usr/share/__NAME__/icon.png";
cp "$PROJECT_ROOT/index.html" "$BUILD_ID-ubuntu/root/usr/share/__NAME__/index.html";
mv "$BUILD_ID-ubuntu/root/usr/bin/__NAME__" "$BUILD_ID-ubuntu/root/usr/bin/$PROJECT_NAME";
mv "$BUILD_ID-ubuntu/root/usr/share/__NAME__" "$BUILD_ID-ubuntu/root/usr/share/$PROJECT_NAME";
mv "$BUILD_ID-ubuntu/root/usr/share/applications/__NAME__.desktop" "$BUILD_ID-ubuntu/root/usr/share/applications/$PROJECT_NAME.desktop";
# Well, fuck you, Apple.
if [ "$OS" == "osx" ]; then
sed -i '' "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-ubuntu/root/usr/bin/$PROJECT_NAME";
sed -i '' "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-ubuntu/root/usr/share/applications/$PROJECT_NAME.desktop";
sed -i '' "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-ubuntu/root/usr/share/$PROJECT_NAME/apparmor.json";
else
sed -i "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-ubuntu/root/usr/bin/$PROJECT_NAME";
sed -i "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-ubuntu/root/usr/share/applications/$PROJECT_NAME.desktop";
sed -i "s/__NAME__/$PROJECT_NAME/g" "$BUILD_ID-ubuntu/root/usr/share/$PROJECT_NAME/apparmor.json";
fi;
cd "$PROJECT_ROOT/../$BUILD_ID-ubuntu/root";
tar czf $PROJECT_ROOT/../$BUILD_ID-ubuntu/data.tar.gz *;
let SIZE=`du -s $PROJECT_ROOT/../$BUILD_ID-ubuntu/root | sed s'/\s\+.*//'`+8
# Well, fuck you, Apple.
if [ "$OS" == "osx" ]; then
sed -i '' "s/__SIZE__/${SIZE}/g" "$PROJECT_ROOT/../$BUILD_ID-ubuntu/DEBIAN/control";
sed -i '' "s/__NAME__/${PROJECT_NAME}/g" "$PROJECT_ROOT/../$BUILD_ID-ubuntu/DEBIAN/control";
else
sed -i "s/__SIZE__/${SIZE}/g" "$PROJECT_ROOT/../$BUILD_ID-ubuntu/DEBIAN/control";
sed -i "s/__NAME__/${PROJECT_NAME}/g" "$PROJECT_ROOT/../$BUILD_ID-ubuntu/DEBIAN/control";
fi;
cd "$PROJECT_ROOT/../$BUILD_ID-ubuntu/DEBIAN";
tar czf $PROJECT_ROOT/../$BUILD_ID-ubuntu/control.tar.gz *;
cd "$PROJECT_ROOT/../$BUILD_ID-ubuntu";
echo 2.0 > ./debian-binary;
ar r "$PROJECT_ROOT/../$BUILD_ID-ubuntu/$PROJECT_NAME-1.0.0-all.deb" debian-binary control.tar.gz data.tar.gz &>/dev/null;
UBUNTU_STATUS=$?;
cd "$PROJECT_ROOT/../";
rm -rf "$BUILD_ID-ubuntu/DEBIAN";
rm -rf "$BUILD_ID-ubuntu/root";
rm "$BUILD_ID-ubuntu/data.tar.gz";
rm "$BUILD_ID-ubuntu/control.tar.gz";
rm "$BUILD_ID-ubuntu/debian-binary";
fi;
if [ "$UBUNTU_AVAILABLE" == "0" ]; then
UBUNTU_STATUS=0;
fi;
}
if [ -f "$PROJECT_ROOT/index.html" ]; then
# Package process
cd "$PROJECT_ROOT/../";
_package_android;
if [ "$ANDROID_STATUS" != "0" ]; then
echo "FAILURE (Android build)";
fi;
cd "$PROJECT_ROOT/../";
_package_firefoxos;
if [ "$FIREFOXOS_STATUS" != "0" ]; then
echo "FAILURE (FirefoxOS build)";
fi;
cd "$PROJECT_ROOT/../";
_package_ubuntu;
if [ "$UBUNTU_STATUS" != "0" ]; then
echo "FAILURE (Ubuntu build)";
fi;
if [ "$ANDROID_STATUS" != "0" ] || [ "$FIREFOXOS_STATUS" != "0" ] || [ "$UBUNTU_STATUS" != "0" ]; then
exit 1;
fi;
echo "SUCCESS";
exit 0;
else
echo "FAILURE";
exit 1;
fi;
| true
|
b80e251a2e7cebd2a4adfa658210da6f32bda745
|
Shell
|
shenyunhang/linux_configure_scripts
|
/tmux.sh
|
UTF-8
| 568
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -x
set -e
# apt-get
apt-get install -y libevent-dev
apt-get install -y bison flex
# tmux
apt-get install -y tmux
#apt-get remove -y tmux
#cd ~/Documents/
#git clone https://github.com/tmux/tmux.git
#cd tmux
#git checkout 3.1
#sh autogen.sh
#./configure && make
#make install
#echo "source "/usr/local/lib/python2.7/dist-packages/powerline/bindings/tmux/powerline.conf""> .tmux.conf
cd ~
rm -rf .tmux
git clone https://github.com/gpakosz/.tmux.git
ln -s -f .tmux/.tmux.conf
cp .tmux/.tmux.conf.local .
#echo "set -g mouse on" >> .tmux.conf.local
| true
|
5ee580958b851991b27c9232d6dd0cb4763ec6a9
|
Shell
|
nadalpablo/graphql-poc
|
/docker-entrypoint.sh
|
UTF-8
| 417
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
set -eo pipefail
if [ -z "$JAVA_OPTS" ]; then
JAVA_OPTS="-Djava.security.egd=file:/dev/./urandom"
else
JAVA_OPTS="$JAVA_OPTS -Djava.security.egd=file:/dev/./urandom"
fi
if [ $DEBUG == "true" ]; then
JAVA_OPTS="$JAVA_OPTS -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005"
fi
if [ -z "$JAVA_PARAM" ]; then
JAVA_PARAM="-jar /app/app.jar"
fi
exec /usr/bin/java $JAVA_OPTS $JAVA_PARAM
| true
|
5a1f7ac7d86bde9c22d7ee70e06429a40c2d6fb5
|
Shell
|
tahti/dotfiles
|
/bin/turnMonitor.sh
|
UTF-8
| 733
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
#if [ $1=="off" ]
#then
#xset dpms force off; exit 0
#fi
if [ $# -eq 0 ]
then
SIDE="left"
else
SIDE="$1"
fi
OUTPUTS=( $(xrandr | grep \ connected | egrep -o "^[a-zA-Z0-9\-]+[0-9]") )
MODES=( $(xrandr | grep -A1 \ connected | egrep -o "[^a-z] [0-9][0-9][0-9][0-9]?x[0-9][0-9][0-9][0-9]?") )
max=${#OUTPUTS[@]}
if test $max -eq 1
then
echo "Putting ${OUTPUTS[0]} in mode ${MODES[0]}."
xrandr --output ${OUTPUTS[0]} --auto --mode ${MODES[0]}
elif test $max -eq 2
then
echo "Putting ${OUTPUTS[1]} $SIDE of ${OUTPUTS[0]}."
xrandr --output ${OUTPUTS[0]} --auto --mode ${MODES[0]} --output ${OUTPUTS[1]} --${SIDE}-of ${OUTPUTS[0]} --mode ${MODES[1]}
else
echo "More than 2 output not implemented."
fi
~/.fehbg
| true
|
b131b45c1437cb4295e2f86e4d828b091703f570
|
Shell
|
sne11ius/oelint-adv
|
/tests/rule_var_pathhardcode_good
|
UTF-8
| 1,491
| 3.171875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
TESTFILE="${TMP}oelint_adv_test.bb"
trap "rm -f ${TESTFILE}" EXIT
TESTID="oelint.vars.pathhardcode.*"
TESTOCC="0"
cat > ${TESTFILE} << EOF
VAR = "${systemd_user_unitdir}"
VAR = "${systemd_system_unitdir}"
VAR = "${docdir}"
VAR = "${infodir}"
VAR = "${mandir}"
VAR = "${libexecdir}"
VAR = "${systemd_unitdir}"
VAR = "${libdir}"
VAR = "${bindir}"
VAR = "${datadir}"
VAR = "${includedir}"
VAR = "${localstatedir}"
VAR = "${nonarch_base_libdir}"
VAR = "${nonarch_libdir}"
VAR = "${oldincludedir}"
VAR = "${sbindir}"
VAR = "${servicedir}"
VAR = "${sharedstatedir}"
VAR = "${sysconfdir}"
VAR = "/some/other/dir/${sysconfdir}"
SUMMARY = "/usr/datadir"
DESCRIPTION = "/usr/datadir"
HOMEPAGE = "/usr/datadir"
AUTHOR = "/usr/datadir"
BUGTRACKER = "/usr/datadir"
# Here we can talk freely about /usr/lib and /usr/bin
FILES_${PN} += "/usr/lib/totally.valid.file"
do_install_append() {
#To remove the default files from /etc/somefolder
rm -f \${D}\${sysconfdir}/somefolder/*
install -m 0644 \${S}/usr/lib/* \${D}${libdir}
install -m 0644 \${WORKDIR}/usr/bin \${D}${bindir}
echo "foo" | sed "s#/usr/bin/python#/usr/bin/env python#g" > ${D}${bindir}/foo
}
EOF
[ $(python3 -m oelint_adv ${TESTFILE} 2>&1 | grep ":${TESTID}:" | wc -l) = "${TESTOCC}" ] && exit 0;
python3 -m oelint_adv --addrules=jetm ${TESTFILE} 2>&1
_got=$(python3 -m oelint_adv --addrules=jetm ${TESTFILE} 2>&1 | grep ":${TESTID}:" | wc -l)
echo "Test for ${0} failed - got ${_got} expected ${TESTOCC}"
exit 1
| true
|
a8de9f15a8397527461529b074a1da659d48f5db
|
Shell
|
joshuakarjala/dotfiles
|
/osx/set-defaults.sh
|
UTF-8
| 3,485
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
# only if running osx
if test "$(uname -s)" != "Darwin"; then
exit
fi
# Sets reasonable OS X defaults.
#
# Or, in other words, set shit how I like in OS X.
#
# The original idea (and a couple settings) were grabbed from:
# https://github.com/mathiasbynens/dotfiles/blob/master/.osx
#
# Run ./set-defaults.sh and you'll be good to go.
# Disable press-and-hold for keys in favor of key repeat.
defaults write -g ApplePressAndHoldEnabled -bool false
# Use AirDrop over every interface. srsly this should be a default.
defaults write com.apple.NetworkBrowser BrowseAllInterfaces 1
# Always open everything in Finder's list view. This is important.
defaults write com.apple.Finder FXPreferredViewStyle Nlsv
# Show the ~/Library folder.
chflags nohidden ~/Library
# Set the Finder prefs for showing a few different volumes on the Desktop.
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
# Hide Safari's bookmark bar.
defaults write com.apple.Safari ShowFavoritesBar -bool false
# Set up Safari for development.
defaults write com.apple.Safari IncludeInternalDebugMenu -bool true
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
defaults write com.apple.Safari "com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled" -bool true
defaults write NSGlobalDomain WebKitDeveloperExtras -bool true
echo 'Allow Selection in QuickLook'
defaults write com.apple.finder QLEnableTextSelection -bool true
echo 'Completely turn off Dashboard'
defaults write com.apple.dashboard mcx-disabled -bool true
echo "Check for software updates daily, not just once per week"
defaults write com.apple.SoftwareUpdate ScheduleFrequency -int 1
echo 'Disable the “Are you sure you want to open this application?” dialog'
defaults write com.apple.LaunchServices LSQuarantine -bool false
echo 'Enable full keyboard access for all controls'
echo '(e.g. enable Tab in modal dialogs)'
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
echo 'Disable auto-correct'
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
echo 'Finder: disable window animations and Get Info animations'
defaults write com.apple.finder DisableAllAnimations -bool true
echo 'Finder: show all filename extensions'
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
echo 'Disable the warning when changing a file extension'
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
echo "Use column view in all Finder windows by default"
defaults write com.apple.finder FXPreferredViewStyle Clmv
echo 'Avoid creating .DS_Store files on network volumes'
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
echo "Setting Dock to auto-hide and removing the auto-hiding delay"
defaults write com.apple.dock autohide -bool true
defaults write com.apple.dock autohide-delay -float 0
defaults write com.apple.dock autohide-time-modifier -float 0
echo "Disable smart quotes as it's annoying for messages that contain code"
defaults write com.apple.messageshelper.MessageController SOInputLineSettings -dict-add "automaticQuoteSubstitutionEnabled" -bool false
echo "Preventing Time Machine from prompting to use new hard drives as backup volume"
defaults write com.apple.TimeMachine DoNotOfferNewDisksForBackup -bool true
| true
|
1dafff4d6c3b96be51a2eb89eb99a3ab70f47bef
|
Shell
|
davidkhala/mac-utils
|
/ssh.sh
|
UTF-8
| 677
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
set -e
genRSA() {
local keySize
local email
local rsaKeyPrv
read -p "enter email (default:david-khala@hotmail.com):" email
read -p "enter keySize (default:4096) :" keySize
keySize=${keySize:-4096}
email=${email:-"david-khala@hotmail.com"}
ssh-keygen -t rsa -b $keySize -C $email
eval "$(ssh-agent -s)"
if [[ -f ~/.ssh/config ]]; then
local content="
Host *
AddKeysToAgent yes
UseKeychain yes
IdentityFile ~/.ssh/id_rsa
"
echo ${content} >>~/.ssh/config
fi
read -p "Enter key file path again ($HOME/.ssh/id_rsa):" rsaKeyPrv
rsaKeyPrv=${rsaKeyPrv:-"$HOME/.ssh/id_rsa"}
ssh-add -K $rsaKeyPrv
}
copyRSAPub() {
pbcopy <~/.ssh/id_rsa.pub
}
$@
| true
|
587b904c552d563de18fa08e99fc0c2dfbd0955c
|
Shell
|
kurniawan77/Domoticz
|
/scripts/milights/living_fade_off.sh
|
UTF-8
| 239
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
cd /home/pi/domoticz/scripts/milights
for i in {12..1}
do
./milight all B $i
for x in $(seq 100); do # do it slow
echo $i $x # show value and slow down
done
done
#./milight all off
| true
|
c836cea832b4f8e3840304cd714292b6bf0e99a1
|
Shell
|
KennyIT3/LinuxScripts
|
/Scripts/patch.sh
|
UTF-8
| 466
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo -n "Please enter the password: "
read Pass
echo "${Pass}"
while :
do
for i in `cat ./patch_list.txt`
do
echo "$i"
sshpass -p "${Pass}" ssh -o PreferredAuthentications=password -o StrictHostKeychecking=no root@"${i[*]}" "yum update -y"
sshpass -p "${Pass}" ssh -o PreferredAuthentications=password -o StrictHostKeychecking=no root@"${i[*]}" "reboot"
continue
done #Finish for loop
break #Breaks While loop
done
| true
|
371585e1849a62295abf95a51d35eec46073a7d6
|
Shell
|
kids300/dotfiles-1
|
/dev/java/_init
|
UTF-8
| 290
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
source ${0:A:h}/../../deploy
source env.zsh
install() {
# TODO Install android SDK
case $(_os) in
macos) brew cask install java gradle ;;
arch) yay --needed --noconfirm -S jdk8-openjdk gradle ;;
esac
}
# update() {}
# link() {}
# clean() {}
init "$@"
| true
|
7e633df8702ad5e1945fad699a7f8f6559ca6063
|
Shell
|
jamisbrill/McServerAutomatedInstall
|
/mcinstallv2.sh
|
UTF-8
| 1,226
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
echo "please run with sudo (sudo ./mcinstallv2.sh)"
sudo apt-get update #make sure you have all up to date packadges
sudo apt-get upgrade
sudo apt-get install default-jdk #install java
useradd -m -d /home/mc -s /bin/bash -c "mc server user" -U mc #create a seggregated user
echo "please enter a pass for your new user"
passwd mc # User sets password
mkdir /home/mc/serverfiles #create directory to store server files
cd /home/mc/serverfiles # navigate to dir
#curl --output spigot.jar https://cdn.getbukkit.org/spigot/spigot-1.16.5.jar # Download spigot // Update this path
#curl --output spigot.jar https://download.getbukkit.org/craftbukkit/craftbukkit-1.17.1.jar #craftbukkit instead of spigot.jar
curl --output spigot.jar https://download.getbukkit.org/spigot/spigot-1.17.1.jar #download latest spigot
echo "java -Xms300M -Xmx1000M -jar spigot.jar -o false -nogui" > launch.sh # Append To file #|reduced ram
chmod +x launch.sh # Make file executeable
sudo ufw allow 25565 #Open mc port
echo "eula=true" > eula.txt # agree to mc's eula
cd /home/mc/serverfiles #navigate to the directory
sudo ./launch.sh #run the script to launch the server
cd plugins
#curl //download plugins wanted .....
| true
|
a4d98ab39ec5a615a055cac994075f36393cad75
|
Shell
|
SojournLabs/netboot
|
/ipxe_builder.docker/build_ipxe
|
UTF-8
| 1,800
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
# Usage:
# $0 imgserver1 templateserver1 imgserver2 templateserver2 ...
keyname() {
# Output the base name of the key-certificate files pair.
# Assume keyfile names never end in newline characters.
keyname_KEYFILE_NAME=$(ls /vapr/keys)
printf %s "${keyname_KEYFILE_NAME%.*}"
}
gen_ipxe_script() {
cat <<EOF
#!ipxe
dhcp
imgtrust
EOF
IDX=0
while [ $# -gt 1 ]; do
cat <<EOF
:srv_${IDX}
set imgserver ${1}
set templateserver ${2}
set next_src srv_$((${IDX}+1))
goto try_boot
EOF
eval "IDX=$((${IDX}+1))"
shift 2
done
cat <<EOF
:srv_${IDX}
echo Failed to find valid boot source.
shell
:try_boot
imgfetch -t 500 \${imgserver}/entry.ipxe || goto \${next_src}
imgverify entry.ipxe \${imgserver}/entry.ipxe.sig || goto \${next_src}
chain entry.ipxe || goto \${next_src}
EOF
}
# Validate arguments
if [ $(($# % 2)) -eq 1 ]; then
echo "ERROR: Odd number of image/template server sources provided."
exit 1
fi
if [ ! -f /vapr/certstore/ca.crt ]; then
echo "Unable to locate root certificate authority certificate file."
exit 1
fi
# Set up client authentication.
if [ -f /vapr/certstore/"$(keyname)".crt ] && [ -f /vapr/keys/"$(keyname)".key ]; then
# We don't support keynames with whitespace.
CLIENT_AUTH="CERT=/vapr/certstore/$(keyname).crt PRIVKEY=/vapr/keys/$(keyname).key"
else
tput bold; tput setaf 3
printf "WARNING: "
tput sgr0
echo No client authentication enabled.
fi
if [ $# -eq 0 ]; then
echo "No image or template servers provided."
exit 1
fi
gen_ipxe_script "$@" > /tmp/bootscript.ipxe
make -C ipxe/src EMBED=/tmp/bootscript.ipxe TRUST=/vapr/certstore/ca.crt ${CLIENT_AUTH}
for FILENAME in ipxe.iso ipxe.usb; do
cp ipxe/src/bin/$FILENAME build;
done
| true
|
cb4b92c38ae309d5cabaf63a9c649d91e93e605f
|
Shell
|
KaiL4eK/LEGO_ev3_compilation
|
/install_pack/ev3cleti_library/binary/newProject.sh
|
UTF-8
| 2,513
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
read -d '' main_text <<"EOF"
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <ev3_core.h>
int main ( void )
{
// Here lay code =)
return( 0 );
}
EOF
read -d '' makefile_text <<"EOF"
#
# Configurable area
#
# This must be set to robots ip
ip_robot_address = __ip_address
execute_name = __project_name
objects = $(patsubst %.c,%.o,$(wildcard *.c))
cross_compiler_directory = /usr/arm-linux-gnueabi
ev3cleti_lib_directory = $(HOME)/ev3cleti
build_directory = build
library_directory = $(cross_compiler_directory)/lib $(ev3cleti_lib_directory)/lib
include_directory = $(cross_compiler_directory)/include $(ev3cleti_lib_directory)/include
libraries_static = ev3cleti stdc++
libraries_dynamic = pthread m
cflags = $(addprefix -I, $(include_directory)) -Wall -O3 -std=gnu99
ldflags = $(addprefix -L, $(library_directory)) -Wl,-Bstatic $(addprefix -l, $(libraries_static)) -Wl,-Bdynamic $(addprefix -l, $(libraries_dynamic))
CC = arm-linux-gnueabi-gcc
LD = $(CC)
execute_file = $(build_directory)/$(execute_name)
all: $(build_directory) $(execute_file)
$(execute_file): $(build_directory)/$(objects)
$(LD) -o $@ $< $(ldflags)
$(build_directory)/%.o : %.c
$(CC) $(cflags) -c $< -o $@
$(build_directory):
mkdir -p $@
clean:
rm -rf $(build_directory)
install: all
scp $(execute_file) robot@$(ip_robot_address):~/$(execute_name)
EOF
# ------------------------------------------------------------------------------------------------
echo -n "Enter project directory [default: $HOME]:"
read project_directory
if [[ -z $project_directory ]]; then
# Empty input
project_directory=$HOME
else
if [[ ! $project_directory == /* ]]; then
# Relative path
project_directory="$HOME/$project_directory"
fi
fi
echo "Project directory: $project_directory"
echo -n "Enter project name [default: ev3_application]:"
read project_name
if [[ -z $project_name ]]; then
project_name=ev3_application
fi
echo "Project name: $project_name"
mkdir -p $project_directory/$project_name
if [[ ! -d $project_directory/$project_name ]]; then
echo 'Unable to create project directory'
exit 1
fi
echo -n "Enter robot ip [default: 0.0.0.0]:"
read robot_ip
if [[ -z $robot_ip ]]; then
robot_ip="0.0.0.0"
fi
echo "Robot ip: $robot_ip"
echo "$main_text" > $project_directory/$project_name/main.c
makefile_text=${makefile_text/__ip_address/$robot_ip}
makefile_text=${makefile_text/__project_name/$project_name}
echo "$makefile_text" > $project_directory/$project_name/Makefile
| true
|
f0ee0e4e9e15953f5cd23b385980e44d99221c93
|
Shell
|
hubrigant/dotfiles
|
/config_masters/theming.zsh
|
UTF-8
| 427
| 3.796875
| 4
|
[] |
no_license
|
TRY_THEME="p10k"
if [ -n "${TRY_THEME+1}" ]; then
if [ -f ${HOME}/.${TRY_THEME}.zsh ]; then
if ${DEBUG}; then
echo "Using the ${TRY_THEME} theme."
fi
source ${HOME}/.${TRY_THEME}.zsh
else;
echo "TRY_THEME was set, but the RC file ${HOME}/.${TRY_THEME}.zsh doesn't exist"
fi
else;
echo "Couldn't use ${TRY_THEME}, so defaulting to p9k."
source ${HOME}/.p9k.zsh
fi
| true
|
ec681cfd00c9ce4cc342cb47274d30bb39d6c6be
|
Shell
|
jaschenk/Tomcat-Instance-Generator-2.0.0
|
/src/main/resources/tc/bin/setenv.sh
|
UTF-8
| 4,831
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# -----------------------------------------------------------
# Tomcat Instance Environment Settings
# -----------------------------------------------------------
#
# *************************************************************************
# Tomcat Instance Generation
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Instance UUID: ${TOMCAT_INSTANCE_UUID}
# Instance Name: ${TOMCAT_INSTANCE_NAME}
# Environment: ${TOMCAT_ENVIRONMENT_NAME}
#
# *************************************************************************
#
#
# set JRE (Java Runtime Environment)
JRE_HOME=${CATALINA_HOME}/jre
CATALINA_PID=${CATALINA_BASE}/pid/tc.pid
HOSTNAME_SHORT=$(echo -n $HOSTNAME | cut -d'.' -f 1)
ENV_NAME_LC=$(echo -n $HOSTNAME | cut -d'-' -f 2 | awk '{print tolower($0)}')
ENV_NAME_UC=$(echo -n $HOSTNAME | cut -d'-' -f 2 | awk '{print toupper($0)}')
export JRE_HOME CATALINA_PID HOSTNAME_SHORT ENV_NAME_LC ENV_NAME_UC
#
# Set Logging
CATALINA_OUT=${CATALINA_BASE}/logs/${TOMCAT_INSTANCE_NAME}_catalina.out
export CATALINA_OUT
#
# Set JAVA JVM Options
${JVM_OPTS}
#
# Set Global Instance Properties
CATALINA_OPTS="${CATALINA_OPTS} -Dinfra.tomcat.env=\"BASE ${TOMCAT_INSTANCE_NAME}\""
CATALINA_OPTS="${CATALINA_OPTS} -Dinfra.tomcat.instance.name=${TOMCAT_INSTANCE_NAME}"
${INSTANCE_PROPERTIES}
#
# Set Runtime Management Properties
#if [ ${ENV_NAME_UC} = "PRD" ]; then
# CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.controller.url=http://spitfire:8085/controller/"
# CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.collector.url=http://spitfire:8085/collector/"
# CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.console.agent.accountkey=5fc4aef328866ade38ec33c7879b96e4"
#
#else
# CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.controller.url=http://rh-tst-01:8080/controller/"
# CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.collector.url=http://rh-tst-01:8080/collector/"
# CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.console.agent.accountkey=00000000000000000000000000000000"
#fi
#CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.console.agent.groupname=${ENV_NAME_UC}"
#CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.console.agent.servername=${HOSTNAME_SHORT}_${TOMCAT_INSTANCE_NAME}"
#CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.console.agent.hostname=${HOSTNAME_SHORT}"
#CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.console.agent.port=8081"
#CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.console.agent.contextname=console"
#CATALINA_OPTS="${CATALINA_OPTS} -Dcom.managecat.console.agent.secure=false"
#CATALINA_OPTS="${CATALINA_OPTS} -Djava.library.path=/opt/tomcat-home/mcatlib/sigar-lib/lib"
${INSTANCE_MANAGEMENT_PROPERTIES}
#
#
# Set Garbage Collection Properties
#CATALINA_OPTS="${CATALINA_OPTS} -XX:SurvivorRatio=8"
#CATALINA_OPTS="${CATALINA_OPTS} -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled"
#CATALINA_OPTS="${CATALINA_OPTS} -XX:+UseCMSInitiatingOccupancyOnly"
#CATALINA_OPTS="${CATALINA_OPTS} -XX:CMSInitiatingOccupancyFraction=70"
#CATALINA_OPTS="${CATALINA_OPTS} -XX:+ScavengeBeforeFullGC -XX:+CMSScavengeBeforeRemark"
#CATALINA_OPTS="${CATALINA_OPTS} -XX:+PrintGCDateStamps -verbose:gc -XX:+PrintGCDetails"
#CATALINA_OPTS="${CATALINA_OPTS} -Xloggc:\"${CATALINA_BASE}/logs/${TOMCAT_INSTANCE_NAME}_gclog\""
#CATALINA_OPTS="${CATALINA_OPTS} -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
#CATALINA_OPTS="${CATALINA_OPTS} -XX:+HeapDumpOnOutOfMemoryError"
#CATALINA_OPTS="${CATALINA_OPTS} -XX:+PrintConcurrentLocks"
#CATALINA_OPTS="${CATALINA_OPTS} -XX:HeapDumpPath=${CATALINA_BASE}/logs/${TOMCAT_INSTANCE_NAME}_dump.hprof"
# Specific Properties for ${TOMCAT_INSTANCE_NAME} JVM
#
# New Relic
#CATALINA_OPTS="${CATALINA_OPTS} -Dnewrelic.environment=${ENV_NAME_LC}"
#CATALINA_OPTS="${CATALINA_OPTS} -Dnewrelic.config.file=/opt/newrelic/newrelic-${TOMCAT_INSTANCE_NAME}.yml"
#CATALINA_OPTS="${CATALINA_OPTS} -javaagent:/opt/newrelic/newrelic.jar"
#
# Export Property
export CATALINA_OPTS
#
#
# Set Load Library Path
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CATALINA_HOME/lib
export LD_LIBRARY_PATH
#
#
# ########################################################################################
# Additional Settings for Garbage Collection Processing ...
#
# -XX:SurvivorRatio=<ratio>
# -XX:+UseConcMarkSweepGC -XX:+CMSParallel#arkEnabled
# -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=<percent>
# -XX:+ScavengeBeforeFullGC -XX:+CMSScavengeBefore#ark
# -XX:+PrintGCDateStamps -verbose:gc -XX:+PrintGCDetails -Xloggc:"<path to log>"
# -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M
# -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=<path to dump>`date`.hprof
#
# #######################################################################################
| true
|
7e65be4c59f0862d75d6a8a0dbc321b7c6344c95
|
Shell
|
XBrOtk/TCSS562-TLQ
|
/python_template/deploy/build_function.sh
|
UTF-8
| 274
| 2.765625
| 3
|
[] |
no_license
|
#! /bin/bash
# Destroy and prepare build folder
rm -rf build_function
mkdir build_function
# Copy files to build folder.
cp -R ../src/* ./build_function
cp -R ../platforms/aws/* ./build_function
# Zip and submit to AWS Lambda.
cd ./build_function
zip -X -r ./index.zip *
| true
|
3f13ad90ee6ee9395bb7209263306072c1a05ac6
|
Shell
|
grawity/code
|
/bin/find-computer-name
|
UTF-8
| 704
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# get-computer-name -- Extract the computer name out of a mounted Windows partition
#
# Depends on `reged` from the 'chntpw' package.
. lib.bash || exit
for root; do
root="${root%/}"
# Case-insensitive search
for name in 'Windows' 'System32' 'config' 'SYSTEM'; do
debug "lookup '$name' in '$root'"
next=$(find "$root" -mindepth 1 -maxdepth 1 -iname "$name") &&
[[ $next ]] ||
die "path '$root/$name' not found"
root=$next
done
[[ -f $root ]] || die "hive '$root' not a regular file"
echo "reading '$root'"
reged -x "$root" \
'HKEY_LOCAL_MACHINE\SYSTEM' \
'\ControlSet001\Control\ComputerName\ComputerName' \
/dev/stdout |&
grep -i '"ComputerName"'
done
| true
|
aba23e71f41c1aa3869f9c1b2903b32726aeb9d5
|
Shell
|
ayonbakshi/RBD-Physics-Sim
|
/scripts/movie.sh
|
UTF-8
| 295
| 2.890625
| 3
|
[] |
no_license
|
# Script to generate an mp4 from png files of the form $1%d.png (as created by recording, for example)
# $1 is the common prefix of all png's and $2 is the output name
set -e
test "${PWD##*/}" == "out"
ffmpeg -r 60 -f image2 -s 1920x1080 -i $1%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p $2
| true
|
61a63f7fbd7af43dacfd8629b648a94ed6745ab0
|
Shell
|
Hikaru-Morita/allPNGtoEPS
|
/getPNGfilename.sh
|
UTF-8
| 223
| 3.03125
| 3
|
[] |
no_license
|
PNGfilename_array=[]
PNGdir_path="./PNG_images/*"
EPSdir_path="./EPS_images"
filename=""
for filepath in $PNGdir_path; do
filename=`basename $filepath .png`
convert $filepath eps2:"./EPS_images/$filename.eps"
done
| true
|
6a6348196f350e15780ceb21784b7d447c2d7147
|
Shell
|
enjoysoftware/atom-unofficial
|
/bin/atom-launch
|
UTF-8
| 286
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Create $XDG_RUNTIME_DIR if it doesn't exist
[ -n "$XDG_RUNTIME_DIR" ] && mkdir -p $XDG_RUNTIME_DIR -m 700
case "$@" in
*bin/apm*)
exec "$@" --executed-from="$(pwd)" --pid=$$
;;
*)
exec "$@" --executed-from="$(pwd)" --pid=$$ > /dev/null 2>&1 &
;;
esac
| true
|
802c38e97ecf924f738a5a11c08f53f13ace6098
|
Shell
|
knine79/notifyBuildResult
|
/completed.sh
|
UTF-8
| 431
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
BASEDIR=$(dirname "$0")
MSG=$1
NOTIFY=$(cat /usr/local/etc/NotifyBuildResult/notify)
START=$(cat /usr/local/etc/NotifyBuildResult/start-time)
END=$(date +%s)
TIME=$(date -r `echo $END - $START | bc` "+%M:%S")
rm /usr/local/etc/NotifyBuildResult/start-time
if [ "$NOTIFY" != "off" ]; then
$BASEDIR/sendMessage "$MSG" $TIME
if [ "$NOTIFY" == "once" ]; then
echo off > /usr/local/etc/NotifyBuildResult/notify
fi
fi
| true
|
8e6df3756305a3fc6a5526e7c478cbb8770feb86
|
Shell
|
tlaxson14/BashScripts
|
/readtest
|
UTF-8
| 547
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This shell script demonstrates using the bash read command without a loop
echo "Enter in a string, then hit enter:"
read myvar
echo "You entered: $myvar"
echo $myvar > "tempfilename$$"
echo $myvar >> "tempfilename$$"
echo $myvar >> "tempfilename$$"
echo "The temp file contents are:"
cat "tempfilename$$"
echo "THe first line of the file is:"
read entireline < "tempfilename$$"
echo "$entireline"
read firstword restofline < "tempfilename$$"
echo "First word of the first line: \"$firstword\""
echo "Rest of line: \"$restofline\""
| true
|
8083ea934bcc99c45882b175cc9a5edbb25e69c3
|
Shell
|
stephanfriedrich/dockerbunker
|
/data/services/padlockcloud/service.sh
|
UTF-8
| 1,876
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
######
# service specific configuration
# you should setup your service here
######
# overrides service specific docker-variables
safe_to_keep_volumes_when_reconfiguring=1
declare -A WEB_SERVICES
declare -a containers=( "${SERVICE_NAME}-service-dockerbunker" )
declare -a add_to_network=( "${SERVICE_NAME}-service-dockerbunker" )
declare -a networks=( )
declare -A volumes=( [${SERVICE_NAME}-data-vol-1]="/padlock/db" )
declare -A IMAGES=( [service]="chaosbunker/padlock-cloud" )
# service specific functions
# to setup save service specific docker-variables to environment file
configure() {
pre_configure_routine
echo -e "# \e[4mPadlock Cloud Settings\e[0m"
set_domain
echo ""
configure_mx
[[ -f ${CONF_DIR}/padlockcloud/whitelist ]] && rm ${CONF_DIR}/padlockcloud/whitelist
! [[ -d ${CONF_DIR}/padlockcloud ]] && mkdir ${CONF_DIR}/padlockcloud
read -p "Enter E-Mail addresses to whitelist (separated by spaces): " whitelist
whitelist=( ${whitelist} )
for email in ${whitelist[@]};do
echo $email >> ${CONF_DIR}/padlockcloud/whitelist
done
# avoid tr illegal byte sequence in macOS when generating random strings
if [[ $OSTYPE =~ "darwin" ]];then
if [[ $LC_ALL ]];then
oldLC_ALL=$LC_ALL
export LC_ALL=C
else
export LC_ALL=C
fi
fi
cat <<-EOF >> "${SERVICE_ENV}"
PROPER_NAME="${PROPER_NAME}"
SERVICE_NAME=${SERVICE_NAME}
SSL_CHOICE=${SSL_CHOICE}
LE_EMAIL=${LE_EMAIL}
# ------------------------------
# General Settings
# ------------------------------
SERVICE_DOMAIN=${SERVICE_DOMAIN}
## ------------------------------
SERVICE_SPECIFIC_MX=${SERVICE_SPECIFIC_MX}
EOF
if [[ $OSTYPE =~ "darwin" ]];then
[[ $oldLC_ALL ]] && export LC_ALL=$oldLC_ALL || unset LC_ALL
fi
post_configure_routine
}
setup() {
initial_setup_routine
SUBSTITUTE=( "\${SERVICE_DOMAIN}" )
basic_nginx
docker_run_all
post_setup_routine
}
| true
|
86fbc1d128cf41bb4090a8baa78ed13398d441a6
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/slingshot/PKGBUILD
|
UTF-8
| 1,299
| 2.84375
| 3
|
[] |
no_license
|
# Maintainer: twa022 <twa022 at gmail dot com>
# Contributor: jsteel <mail at jsteel dot org>
# Contributor: Erdbeerkaese
# Contributor: Arkham <arkham at archlinux dot us>
pkgname=slingshot
pkgver=0.8.1p
pkgrel=7
pkgdesc="A 2D strategy game in the gravity fields of several planets"
arch=('any')
url="http://slingshot.wikispot.org"
license=('GPL')
depends=('python2-pygame')
source=(http://downloads.sourceforge.net/$pkgname-game/Slingshot-$pkgver-linux.tar.gz
$pkgname.sh
$pkgname.patch)
md5sums=('cdfcf0c85dc7d1b203344c24f591e7b5'
'38bb1952740a5780dda0b1ea32b933aa'
'acf36309734e5357a5a641ea6bc415cf')
build() {
cd "$srcdir"/Slingshot-$pkgver-linux
tar -xzf "$pkgname"_files.tar.gz
patch -Np1 -i "$srcdir"/$pkgname.patch
}
package() {
cd "$srcdir"/Slingshot-$pkgver-linux
install -dm755 "$pkgdir"/usr/share/$pkgname/data/
for file in "$pkgname"/*.py ; do
install -Dm644 "$file" "$pkgdir"/usr/share/$pkgname/
done
for file in "$pkgname"/data/* ; do
install -Dm644 "$file" "$pkgdir"/usr/share/$pkgname/data/
done
install -Dm644 readme "$pkgdir"/usr/share/doc/$pkgname/readme
install -Dm644 $pkgname.desktop "$pkgdir"/usr/share/applications/$pkgname.desktop
install -Dm755 "$srcdir"/$pkgname.sh "$pkgdir"/usr/bin/$pkgname
}
| true
|
02ec27ccb6844da5947e41e506022f2b75c058be
|
Shell
|
mhcerri/configs
|
/home/bin/git-fetch-from-origin
|
UTF-8
| 293
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
branch=$(git rev-parse --abbrev-ref HEAD)
case "$branch" in
*@*)
# Handling special kernel branch...
remote="${branch#*@}"
remote_branch="${branch%%@*}"
;;
*)
remote="origin"
remote_branch="$branch"
;;
esac
echo git fetch "$@" "$remote"
git fetch "$@" "$remote"
| true
|
b700e871bbe2c12572a8461c9ac87339b6d1fa40
|
Shell
|
RafaelGPaz/dotfiles
|
/bin/tours/swfpath_to_currentxml.sh
|
UTF-8
| 492
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# Description: replace '%SWFPATH%' for '%CURRENTXML%' in any xml file inside any directory named 'scenes'
# Usage: Run inside the directory containing all the tours' folders
for tour in $(find . -maxdepth 3 -type d -name 'scenes' ); do
for xmlfile in $(find $tour -name "*.xml" ); do
echo "Editing $xmlfile ..."
xmlfile_bck=$xmlfile"_bck"
sed -e 's/\%SWFPATH\%/\%CURRENTXML\%/g' $xmlfile > $xmlfile_bck
mv $xmlfile_bck $xmlfile
done
done
| true
|
87779fca860b518e64105c3460dc3c834c24e409
|
Shell
|
cedricwalter/Joomla-on-Synology-NAS
|
/scripts/postuninst
|
UTF-8
| 462
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ "$SYNOPKG_PKG_STATUS" == "UPGRADE" -o -f "/tmp/Joomla.upgrade" ]; then
exit 0
fi
rm -rf /usr/syno/synoman/webman/3rdparty/Joomla
ServiceTool="/usr/syno/bin/servicetool"
ApacheUserScript="/usr/syno/etc/rc.d/S97apache-user.sh"
if [ -e "$ServiceTool" ]; then
$ServiceTool --restart webservice > /dev/null 2>&1
if [ "0" != "$?" ]; then
$ApacheUserScript restart > /dev/null 2>&1
fi
else
$ApacheUserScript restart > /dev/null 2>&1
fi
exit 0
| true
|
790c9d6bb6eb5f139f9182fc6ef300fed2b25e9b
|
Shell
|
caguerra/Burkardt-Fortran-90
|
/f_src/lagrange_interp_nd_test/lagrange_interp_nd_test.sh
|
UTF-8
| 489
| 2.765625
| 3
|
[] |
no_license
|
#! /bin/bash
#
gfortran -c -Wall lagrange_interp_nd_test.f90
if [ $? -ne 0 ]; then
echo "Compile error."
exit
fi
#
gfortran lagrange_interp_nd_test.o $HOME/lib/lagrange_interp_nd.o $HOME/lib/r8lib.o
if [ $? -ne 0 ]; then
echo "Load error."
exit
fi
rm lagrange_interp_nd_test.o
#
mv a.out lagrange_interp_nd_test
./lagrange_interp_nd_test > lagrange_interp_nd_test.txt
if [ $? -ne 0 ]; then
echo "Run error."
exit
fi
rm lagrange_interp_nd_test
#
echo "Normal end of execution."
| true
|
38baa2f5efa772cbe1c7d7eb5125d556df2c6476
|
Shell
|
wjqgit/web
|
/test/shell/08-arithmatic_operations_integer_expr.sh
|
UTF-8
| 253
| 2.796875
| 3
|
[] |
no_license
|
#This is script#8
#Arithmetic operations (Integer)
a=13 b=14
echo `expr $a + $b`
echo `expr $a - $b`
echo `expr $a \* $b`
echo `expr $a / $b`
echo `expr $a % $b`
#Precedence
c=7 d=7
echo `expr $a \* $b + $c / $d`
echo `expr $a \* \( $b + $c \) / $d `
| true
|
ba908fe86669c50728eea99a180f4b06aafb9b16
|
Shell
|
ryankumar/ryan_git
|
/shell-programming/1.sh
|
UTF-8
| 314
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
<< COMMENT
for file in *
do
if [ $file == $1 ]
then
echo $file
fi
done
COMMENT
# file search from current directory
# if [ -f add_number.sh ]
if test -f add_number.sh
# if test -d array_file
then
echo "file is exist"
else
echo "file doesn't exist"
fi
| true
|
8e51c2b8b48bacd04ec20e2be405c9e81da9f4ee
|
Shell
|
Code-Hex/Laputa
|
/migrate
|
UTF-8
| 303
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# migrate for ssl
if [ ! -d "./ssl" ]; then
mkdir -p ./ssl
cd ./ssl
openssl genrsa -out laputa.key 2048
openssl req -new -key laputa.key -sha256 -out laputa.csr
openssl x509 -in laputa.csr -days 36500 -req -signkey laputa.key -sha256 -out laputa.crt
cd ..
fi
| true
|
a954e103e6ae1fbf4c47da9fe6aed957e882a2ee
|
Shell
|
padde/dotfiles
|
/install.sh
|
UTF-8
| 1,037
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
DOTFILES=~/.dotfiles
if [ -d $DOTFILES ]; then
echo "$DOTFILES already exists! Aborting."
exit 1
fi
# Fetch dotfiles
git clone https://github.com/padde/dotfiles.git $DOTFILES
cd $DOTFILES
# Install homebrew
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
# Install homebrew packages
brew update
brew tap Homebrew/bundle
brew bundle
# Symlink dotfiles
rcup -v rcrc
rcup -v
# Install Vim plugins
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
vim +PlugInstall +qall < /dev/tty "$@"
# Install TMUX plugins
git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
~/.tmux/plugins/tpm/bindings/install_plugins
# Install ASDF version manager
cd "$HOME"
git clone https://github.com/asdf-vm/asdf.git ~/.asdf
# Install Chrome dotfiles
git clone https://github.com/matthewhadley/chromedotfiles.git ~/.chromedotfiles
ln -vs ~/.js ~/.chromedotfiles/chromedotfiles
| true
|
a1f3ec399b7a9ed52fccece71316a341d8ebb1f5
|
Shell
|
kumarsaurabh20/NGShelper
|
/SGE/bact_01_create_and_submit_rapsearch_jobs.sh
|
UTF-8
| 2,746
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
clear
work_dir=/home/ISAD/mds207/adela_data
clean_dir=$work_dir/merged/deconseq-merged/clean
rap_dir=$work_dir/rap_clean_02_bact
megan_out=$rap_dir/megan_rma_files
nr_db=$work_dir/2.19_bact_nr_db
NSLOTS=30
#
#
# Make the rapdir if it does not already exist:
mkdir -p $rap_dir
# change into it and start creating the rapsearch job list etc.
cd $work_dir
echo "--------------------------------------------------------------------------"
echo "Copying the nr_db and nr_db.info files if they do not exist in $work_dir"
echo "Note that the nr_db file is very big and can take a while to copy over."
echo "--------------------------------------------------------------------------"
time cp -u /home/bio_app_support/rapsearch2_db/2.19_bact_nr_db .
time cp -u /home/bio_app_support/rapsearch2_db/2.19_bact_nr_db.info .
echo "--------------------------------------------------------------------------"
read -p "Press [Enter] key to continue..."
#
cd $rap_dir
mkdir -p $rap_dir/logs
echo "Changed to $PWD"
echo "Removing the old rapsearc_jobs.txt file"
rm rapsearch_jobs.txt
echo "done"
echo "now creating a loop which will take a stock of all *R12.fasta files"
echo "in the $clean_dir directory and then dump a new rapsearch_jobs.txt"
echo "file with the commands we need to run in the $rap_dir path"
echo "--------------------------------------------------------------------------"
cd $clean_dir
for file in $( ls -1 *R12_clean.fa )
do
sample=$(echo $file | cut -c1-7)
echo "--------------------------------------------------------------------------"
echo "mkdir -p $rap_dir/rap_results/$sample"
echo "--------------------------------------------------------------------------"
mkdir -p $rap_dir/rap_results/$sample
LINE="rapsearch -q $clean_dir/$file -d $nr_db -z 30 -o $rap_dir/rap_results/$sample/$sample.rap -b 50 -v 50 -a t"
# echo "Currently processing command for $file"
#
echo "--------------------------------------------------------------------------"
echo "Creating batch file: $rap_dir/RS2_job_$sample.sh"
echo "--------------------------------------------------------------------------"
cat << EOF > $rap_dir/RS2job_$sample.sh
#!/bin/bash
#$ -N RS2$sample
#$ -S /bin/bash
#$ -o $rap_dir/logs
#$ -e $rap_dir/logs
#$ -q all.q
#$ -pe smp 30
# Send mail at submission and completion of script
#$ -m abes
#$ -M mds207@exeter.ac.uk
. /etc/profile.d/modules.sh
module add shared rapsearch2
cd $rap_dir
export OMP_NUM_THREADS=$NSLOTS
$LINE
EOF
#
done
echo "----------------------------------"
read -p "Press [Enter] to continue submitting or Ctrl +C to quit..."
echo "----------------------------------"
#
for job in $(ls -1 $rap_dir/RS2job_*.sh); do qsub $job; done
| true
|
134bcc0bc2771268d78bac7d4ec2980f2b2ca12d
|
Shell
|
micado-scale/ansible-micado
|
/demos/wordpress/3-generate-traffic.sh
|
UTF-8
| 785
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
settings_file="./_settings"
. $settings_file
if [ -z "$MICADO_MASTER" ]; then
echo "Please, set MICADO_MASTER in file named \"$settings_file\"!"
exit
fi
if [ -z "$MICADO_WORKER" ]; then
MICADO_WORKER=$MICADO_MASTER
echo "No MICADO_WORKER specified, trying MICADO MASTER - ensure port 30010 is open..."
fi
if [ -z "$SSL_USER" ]; then
echo " Please, set SSL_USER in file named \"$settings_file\"!"
exit
fi
if [ -z "$SSL_PASS" ]; then
echo " Please, set SSL_PASS in file named \"$settings_file\"!"
exit
fi
echo "Small HTTP load test for 10 minutes at $MICADO_WORKER:30010... CTRL-C to stop"
wrk -t1 -c1 -d10m http://$MICADO_WORKER:30010
# if necessary, adjust the line above to increase the load test
# e.g. wrk -t4 -c40 -d10m http://$MICADO_WORKER:30010
| true
|
28d6da8e0a5b38c4dea05ce81cede8a04c20b803
|
Shell
|
liaoyunkun/tinynf
|
/experiments/perf-lowlevel/measure-stats.sh
|
UTF-8
| 3,784
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
TINYNF_DIR='../../code'
DPDK_DIR='../baselines/dpdk/measurable-nop'
EXTRACFLAGS=''
LAYER='2'
RESULTS_SUFFIX='/nop'
if [ "$1" = 'write' ]; then
EXTRACFLAGS='-DTN_DEBUG_PERF_DOWRITE'
RESULTS_SUFFIX='/write'
elif [ "$1" = 'lookup' ]; then
EXTRACFLAGS='-DTN_DEBUG_PERF_DOLOOKUP'
RESULTS_SUFFIX='/lookup'
elif [ "$1" = 'pol' ]; then
TINYNF_DIR='../baselines/policer/tinynf'
DPDK_DIR='../baselines/policer/dpdk'
LAYER='3'
RESULTS_SUFFIX='/pol'
export EXPIRATION_TIME='4000000'
export POLICER_BURST='1000000000000'
export POLICER_RATE='1000000000000'
export RTE_SDK="$(pwd)/../baselines/vigor/dpdk"
export RTE_TARGET=x86_64-native-linuxapp-gcc
elif [ ! -z "$1" ]; then
echo 'Unknown parameter.'
exit 1
fi
echo 'Measuring low-level stats; this will take less than an hour...'
# Ensure the papi submodule is cloned
git submodule update --init --recursive
# Get the absolute path to papi, we'll use it when building and running things from other dirs
PAPI_DIR="$(readlink -e papi)"
# Build papi if needed, but don't install it, we just want a local version
cd "$PAPI_DIR/src"
if [ ! -e "libpapi.so" ]; then
# Allow external code to link with internal PAPI functions, see TinyNF's util/perf.h
sed -i 's/LIBCFLAGS += -fvisibility=hidden//' Rules.pfm4_pe
./configure
make
fi
cd - >/dev/null
# Ensure papi can read events
echo 0 | sudo dd status=none of=/proc/sys/kernel/perf_event_paranoid
# Ensure the results folder is deleted so we don't accidentally end up with stale results
mkdir -p results
RESULTS_DIR="$(readlink -f results$RESULTS_SUFFIX)"
rm -rf "$RESULTS_DIR"
# Ensure there are no leftover hugepages
sudo rm -rf /dev/hugepages/*
# Load the benchmark config
if [ -f ../../benchmarking/config ]; then
. ../../benchmarking/config
else
echo 'Please successfully run a benchmark at least once before running this'
exit 1
fi
# Start a packet flood, waiting for it to have really started
ssh "$TESTER_HOST" "cd $REMOTE_FOLDER_NAME; ./bench-tester.sh flood $LAYER" >/dev/null 2>&1 &
sleep 30
# assumes pwd is right
# $1: result folder name, e.g. TinyNF or DPDK-1
run_nf()
{
i=0
while [ $i -lt 10 ]; do
# Remove output before the values themselves
LD_LIBRARY_PATH="$PAPI_DIR/src" TN_ARGS="$DUT_DEVS" taskset -c "$DUT_CPUS" make -f "$BENCH_MAKEFILE_NAME" run 2>&1 | sed '0,/Counters:/d' >"$RESULTS_DIR/$1/log$i" &
nf_name="$(make -f "$BENCH_MAKEFILE_NAME" print-nf-name)"
# Wait 5 minutes max before retrying, but don't always wait 5min since that would take too long
for t in $(seq 1 60); do
sleep 5
if ! pgrep -x "$nf_name" >/dev/null ; then
break
fi
done
if pgrep -x "$nf_name" >/dev/null ; then
sudo pkill -x -9 "$nf_name"
else
i=$(echo "$i + 1" | bc)
fi
done
}
# Collect data on TinyNF
cd "$TINYNF_DIR"
mkdir -p "$RESULTS_DIR/TinyNF"
TN_DEBUG=0 TN_CFLAGS="$EXTRACFLAGS -DTN_DEBUG_PERF=10000000 -flto -s -I'$PAPI_DIR/src' -L'$PAPI_DIR/src' -lpapi" make -f "$BENCH_MAKEFILE_NAME" build
run_nf 'TinyNF'
cd - >/dev/null
# Collect data on DPDK, without and with batching
cd "$DPDK_DIR"
for batch in 1 32; do
mkdir -p "$RESULTS_DIR/DPDK-$batch"
TN_BATCH_SIZE=$batch EXTRA_CFLAGS="$EXTRACFLAGS -DTN_DEBUG_PERF=10000000 -I'$PAPI_DIR/src'" EXTRA_LDFLAGS="-L'$PAPI_DIR/src' -lpapi" make -f "$BENCH_MAKEFILE_NAME" build >/dev/null 2>&1
../../../../benchmarking/bind-devices-to-uio.sh $DUT_DEVS
run_nf "DPDK-$batch"
done
cd - >/dev/null
# Stop the flood
ssh "$TESTER_HOST" "sudo pkill -9 MoonGen"
# Since a few of the samples might have negative numbers of cycles or other such oddities...
echo "Done! If you want to look at the raw data, please read the disclaimer in the util/perf.h file of TinyNF's codebase."
| true
|
e34e0f7d2bbd03c7a3c5aeb681e116ea72eea17c
|
Shell
|
jpritt/boiler-experiments
|
/scripts/qsub/unpaired/simulate.sh
|
UTF-8
| 477
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
sizes=('5' '10' '25' '50' '100')
for i in "${sizes[@]}";
do
mkdir -p /scratch0/langmead-fs1/user/jacob/compress-alignments-test/drosophila/tech_reps/single$i/all_reps
cp simulation$i.par /scratch0/langmead-fs1/user/jacob/compress-alignments-test/drosophila/tech_reps/single$i/all_reps/simulation.par
qsub -vDIR=/scratch0/langmead-fs1/user/jacob/compress-alignments-test/drosophila/tech_reps/single$i/all_reps ../simulate_unpaired.pbs
sleep 2
done
| true
|
4b18a7c76f814816e2908bc7741252498f98f11e
|
Shell
|
ruebroad/scripts_and_stuff
|
/scripts/ubuntu_setup.sh
|
UTF-8
| 3,276
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
sudo apt update
# install git
sudo apt install git -y
# Install curl
sudo apt install curl -y
# Install gnome tweaks
sudo apt install gnome-tweaks
# install brave browser
curl -s https://brave-browser-apt-release.s3.brave.com/brave-core.asc | sudo apt-key --keyring /etc/apt/trusted.gpg.d/brave-browser-release.gpg add -
source /etc/os-release
echo "deb [arch=amd64] https://brave-browser-apt-release.s3.brave.com/ $UBUNTU_CODENAME main" | sudo tee /etc/apt/sources.list.d/brave-browser-release-${UBUNTU_CODENAME}.list
sudo apt update
sudo apt install brave-keyring brave-browser
# install vscode
sudo snap install code --classic
# install gitkraken
sudo snap install gitkraken
# install zsh
sudo apt install zsh -y
# make zsh default - requires logout to work
chsh -s $(which zsh)
# install oh-my-zsh
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
# install zsh-completions
git clone https://github.com/zsh-users/zsh-completions ~/.oh-my-zsh/custom/plugins/zsh-completions
## add zsh-conmpletions to plugins in .zshrc
autoload -U compinit && compinit
# install powerline
git clone https://github.com/bhilburn/powerlevel9k.git ~/.oh-my-zsh/custom/themes/powerlevel9k
echo "source ~/.oh-my-zsh/custom/themes/powerlevel9k/powerlevel9k.zsh-theme \n" >> ~/.zshrc
# install awesome fonts
mkdir -p ~/code/new_fonts/
git clone https://github.com/gabrielelana/awesome-terminal-fonts.git ~/code/useful-repos/
mkdir -p ~/.fonts
cp ~/code/useful-repos/awesome-terminal-fonts/build/* ~/.fonts
fc-cache -fv ~/.fonts
mkdir ~/.config/fontconfig/conf.d/
cp ~/code/useful-repos/awesome-terminal-fonts/config/10-symbols.conf ~/.config/fontconfig/conf.d/10-symbols.conf
echo "\n"
echo "source ~/.fonts/*.sh \n" >> ~/.zshrc
# Syntax highlighting
cd ~/.oh-my-zsh && git clone git://github.com/zsh-users/zsh-syntax-highlighting.git
echo "source ~/.oh-my-zsh/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh \n" >> ~/.zshrc
# Auto suggestions
git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions
echo "source ~/.zsh/zsh-autosuggestions/zsh-autosuggestions.zsh \n" >> ~/.zshrc
# plugins=(zsh-autosuggestions)
# Colour ls output
echo "alias ls='ls -G'" >> ~/.zshrc
# Add alias for terraform and chef test kitchen
echo "alias tfm='terraform'" >> ~/.zshrc
echo "alias ctk='kitchen'" >> ~/.zshrc
# zsh customizations
echo "plugins=(git aws brew docker github kubectl)" >> ~/.zshrc
echo "POWERLEVEL9K_MODE='nerdfont-complete'" >> ~/.zshrc
echo "ZSH_THEME=\"powerlevel9k/powerlevel9k\"" >> ~/.zshrc
echo "POWERLEVEL9K_SHORTEN_STRATEGY=\"truncate_middle\"" >> ~/.zshrc
echo "POWERLEVEL9K_STATUS_VERBOSE=false" >> ~/.zshrc
echo "POWERLEVEL9K_LEFT_PROMPT_ELEMENTS=(dir vcs)" >> ~/.zshrc
echo "POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS=(aws)" >> ~/.zshrc
echo "POWERLEVEL9K_DIR_HOME_SUBFOLDER_FOREGROUND=\"black\"" >> ~/.zshrc
echo "POWERLEVEL9K_DIR_HOME_SUBFOLDER_BACKGROUND=\"135\"" >> ~/.zshrc
echo "POWERLEVEL9K_AWS_FOREGROUND=\"black\"" >> ~/.zshrc
echo "POWERLEVEL9K_AWS_BACKGROUND=\"166\" # darkorange3a" >> ~/.zshrc
# install python, pip, awscli
sudo apt install python3 python3-pip -y
pip3 install awscli --upgrade
# install polarr
sudo snap install polarr
| true
|
4aa0f107ab442800e072438dbca94e65add672b8
|
Shell
|
aaivu/aaivu-machine-trans-eng-sin
|
/src/Subword-segmentation/Transformer-subword-regularization/scripts/preprocess-joined-ensi.sh
|
UTF-8
| 1,828
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
SRC=en
TGT=si
VOCABSIZE=5000 # vocabulary size
TRAIN_MINLEN=0
ROOT=$(dirname "$0")
SCRIPTS=$ROOT/scripts
DATA=$ROOT/data
TMP=$DATA/${SRC}_${TGT}_unigram${VOCABSIZE}
DATABIN=$ROOT/data-bin/${SRC}_${TGT}_unigram${VOCABSIZE}
mkdir -p $TMP $DATABIN
SPM_TRAIN=$SCRIPTS/spm_train.py
SPM_ENCODE=$SCRIPTS/spm_encode.py
TRAIN_SET="train-dataset/train"
VALID_SET="valid-dataset/valid"
TEST_SET="test-dataset/test"
# joined model for unigram based subword segementation with sentencepiece over the tokenized source and target
python $SPM_TRAIN \
--input=$DATA/${TRAIN_SET}.$SRC,$DATA/${TRAIN_SET}.$TGT \
--model_prefix=$DATABIN/sentencepiece.unigram \
--vocab_size=$VOCABSIZE \
--character_coverage=1.0 \
--model_type=unigram
# encode train
python $SPM_ENCODE \
--model $DATABIN/sentencepiece.unigram.model \
--output_format=piece \
--inputs $DATA/${TRAIN_SET}.$SRC $DATA/${TRAIN_SET}.$TGT \
--outputs $TMP/train.unigram.$SRC $TMP/train.unigram.$TGT \
--min-len $TRAIN_MINLEN \
--nbest_size 64 \
--alpha 0.1
# encode valid
python $SPM_ENCODE \
--model $DATABIN/sentencepiece.unigram.model \
--output_format=piece \
--inputs $DATA/${VALID_SET}.$SRC $DATA/${VALID_SET}.$TGT \
--outputs $TMP/valid.unigram.$SRC $TMP/valid.unigram.$TGT \
--nbest_size 64 \
--alpha 0.1
# encode test
python $SPM_ENCODE \
--model $DATABIN/sentencepiece.unigram.model \
--output_format=piece \
--inputs $DATA/${TEST_SET}.$SRC $DATA/${TEST_SET}.$TGT \
--outputs $TMP/test.unigram.$SRC $TMP/test.unigram.$TGT \
--nbest_size 64 \
--alpha 0.1
# binarize data
fairseq-preprocess \
--source-lang $SRC --target-lang $TGT \
--trainpref $TMP/train.unigram --validpref $TMP/valid.unigram --testpref $TMP/test.unigram \
--destdir $DATABIN \
--joined-dictionary \
--workers 4
| true
|
a8172144ee7de651de0e9f9520e0cdbc8e42ec83
|
Shell
|
hopecaogit/sinatra
|
/check_ec2_public_dns.sh
|
UTF-8
| 458
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
#first parameter of this script is resource filename
if [ $# -eq 0 ]; then
echo "Please provide resource filename as first parameter"
exit 1
fi
deploy_region="ap-southeast-2"
resr_file=${1}
source ${resr_file}
aws ec2 describe-instances --instance-ids ${inst_id} --region ${deploy_region} \
--query 'Reservations[*].Instances[0].{PublicDnsName:PublicDnsName,Status:State.Name,PublicIpAddress:PublicIpAddress}' \
--output table
| true
|
415b1b47922b79a1dff1d404e46785b760732638
|
Shell
|
agonzalezcollins/Instalaciones
|
/apps/Robomongo/robomongo.sh
|
UTF-8
| 875
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Ver ultima version en: https://robomongo.org/download"
wget https://download.robomongo.org/1.2.1/linux/robo3t-1.2.1-linux-x86_64-3e50a65.tar.gz
tar -xvzf robo3t-1.2.1-linux-x86_64-3e50a65.tar.gz
rm robo3t*.tar.gz
sudo mkdir -p /usr/local/bin/robomongo
sudo mv robo3t-1.2.1-linux-x86_64-3e50a65/ /usr/local/bin/robomongo
cd /usr/local/bin/robomongo/bin
sudo echo "alias robomongo='/usr/local/bin/robomongo/bin/robo3t'" >> ~/.bashrc
source ~/.bashrc
robomongo
echo "Create ICONS:"
sudo mkdir -p /usr/share/applications/myicons/
cd /usr/share/applications/myicons/
sudo wget https://blog.robomongo.org/content/images/2015/12/robomongo-128x128.png -O robomongo.png
sudo cp Robomongo.desktop /usr/share/applications/Robomongo.desktop
# Si desea agregarlo en el PATH
export PATH=/usr/bin/robomongo/bin:$PATH
set PATH $PATH /usr/local/bin/robomongo/bin
| true
|
4c29b0614f4d488187c519608f609e28b1ba319b
|
Shell
|
ghxwm/tflow
|
/jalangi/config/likely-types-on-trace1.sh
|
UTF-8
| 515
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export PATH=$PATH:/opt/local/bin:/usr/local/bin
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )"
cd $DIR
echo "" > jalangi/out/out.html
echo "Inferring likely types ..."
export JALANGI_MODE=replay
export JALANGI_ANALYSIS=analyses/likelytype/LikelyTypeInferEngine
node src/js/commands/replay.js jalangi_trace1
sleep 1
#export DYLD_LIBRARY_PATH=/opt/local/lib
dot -Tpng jalangi_types.dot -o jalangi/out/jalangi_types.png
echo "<img src=\"jalangi_types.png\"></img>" > jalangi/out/out.html
| true
|
5c46ea3e91718089e241368ead5a53e430a5a435
|
Shell
|
bmwcarit/joynr
|
/tests/graceful-shutdown-test/build_all.sh
|
UTF-8
| 324
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -Eeuxo pipefail
# build joynr backend images
cd ../../docker/ && ./build_backend.sh
cd -
# build the other images
for BUILD_DIR in graceful-shutdown-test-consumer graceful-shutdown-test-provider graceful-shutdown-test-second-level-provider
do
pushd $BUILD_DIR
./build_docker_image.sh
popd
done
| true
|
6ab1e535e42fd292df5eb10a8a31c9a1c6308090
|
Shell
|
m-walker/hello-world
|
/.bash_profile
|
UTF-8
| 638
| 2.71875
| 3
|
[] |
no_license
|
if [ -e code ]; then
cd code
fi
rvm_silence_path_mismatch_check_flag=1
bind "set completion-ignore-case on"
function parse_git_branch {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
function prompt {
local RED="\[\033[0;31m\]"
local CHAR="♥"
local BLUE="\[\e[0;49;34m\]"
export PS1="\[\e]2;\u@\h\a[\e[37;44;1m\]\t\[\e[0m\]]$RED\$(parse_git_branch) \[\e[32m\]\W\[\e[0m\]\n\[\e[0;31m\]$BLUE//$RED $CHAR \[\e[0m\]"
export PS2='> '
export PS4='+ '
}
prompt
export PATH=/var/opt/rails/bin:/var/opt/httpserver/bin:/var/opt/atom/bin:/var/opt/shotgun/bin:/var/opt/rackup/bin:$PATH
| true
|
cf1289adb1012147cddde93f7a68757a200f8141
|
Shell
|
me1kd/urlwatch_conf
|
/loop.sh
|
UTF-8
| 125
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
while :
do
now=$(date +"%T")
echo "Current time : $now"
python3 $1 >> ./logs.txt
sleep $2
done
| true
|
49c2a36a70ba29ab74e4ce6404b0986210a04a61
|
Shell
|
rhodey/usbreset
|
/reset-rtl.sh
|
UTF-8
| 398
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
device_count=`lsusb -d 0x0bda:2838 | wc -l`
if [ "$device_count" -eq "0" ]; then
echo "no RTL devices found."
exit 1
fi
device_ids=`lsusb -d 0x0bda:2838 | cut -c5-8,16-18`
while read -r device_id; do
parts=($device_id)
device="/dev/bus/usb/${parts[0]}/${parts[1]}"
docker run \
--device $device:$device \
--rm usbreset $device
done <<< "$device_ids"
| true
|
d2488a163ab0802a671211b2f2c93133e5c95ccd
|
Shell
|
tap52384/getgrav-php
|
/install-php.sh
|
UTF-8
| 4,496
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
APACHE_CONF=/usr/local/etc/httpd/httpd.conf
# Checks whether Homebrew is installed or not
function checkBrew {
installed brew
BREW_INSTALLED=$?
}
# Checks whether the specified application is available in the $PATH
function installed() {
# empty case: empty string
if [ -z "$1" ]; then
return 1;
fi
# uses which to see if the command is in the $PATH
which $1 > /dev/null
return $?
}
# Checks if the given line exists in the specified file and replaces it.
# param: filename The file to search in
# param: needle Regex for finding the text to be replaced
# param: haystack Text to replace the matches
# param: explain Text that is printed to stdout that explains what happened
function replaceline () {
# empty case: file does not exist
# https://stackoverflow.com/a/638980/1620794
if [ ! -f $1 ]; then
echo "replaceline() failed; file not found: $1"
exit() { return 1; }
fi
# grep for the needle in the file
# $2 - needle regex
# $1 - path to file to be changed
echo "grep needle \"$2\" to find in file \"$1\""
grep -q "$2" $1
LINE_EXISTS=$?
if [ "$LINE_EXISTS" -eq 0 ]; then
# sed requires empty parameter after -i option on macOS
# https://stackoverflow.com/a/16746032/1620794
echo $4
sed -i "" "s/$2/$3" $1
return $?
fi
echo "needle \"$2\" not found. moving on..."
return 1;
}
# 0. Detect if certain requirements are already installed
checkBrew
# The current date and time, used for filenames
# https://unix.stackexchange.com/a/57592/260936
TODAY=`date +%Y-%m-%d.%H:%M:%S`
TODAY_SUFFIX=`date +%Y%m%d.%H%M%S`
# The current username
CURRENT_USERNAME=$(id -un)
# Check whether Ruby is installed (should be by default on macOS)
which ruby > /dev/null
RUBY_INSTALLED=$?
APACHE_LOCATION=$(which apachectl)
# 1. If Homebrew is not installed, go ahead and install it
if [ "$BREW_INSTALLED" -eq 1 ]; then
echo "Homebrew not installed; installing now..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
else
echo "Homebrew is already installed."
fi
# Checks again to make sure Homebrew is installed
checkBrew
if [ "$BREW_INSTALLED" -eq 1 ]; then
echo "Homebrew unavailable and installation failed; stopping here..."
exit() { return 1; }
fi
# 2. Install Apache 2.4 via Homebrew
echo "Shutting down Apache..."
sudo apachectl stop
echo "Unloading native Apache service..."
sudo launchctl unload -w /System/Library/LaunchDaemons/org.apache.httpd.plist 2>/dev/null
# Detect if Homebrew package is installed
# https://stackoverflow.com/a/20802425/1620794
brew ls --versions httpd
BREW_HTTPD_INSTALLED=$?
if [ "$BREW_HTTPD_INSTALLED" -eq 1 ]; then
echo "Installing httpd formula (Apache)..."
brew install httpd
else
echo "The httpd formula (Apache) is already installed via Homebrew"
fi
echo "Setting Apache to auto-start upon system boot..."
sudo brew services start httpd
LOCALHOST_8080_RESPONSE=$(curl --write-out %{http_code} --silent --insecure --output /dev/null http://localhost:8080)
LOCALHOST_80_RESPONSE=$(curl --write-out %{http_code} --silent --insecure --output /dev/null http://localhost:80)
if [ "$LOCALHOST_8080_RESPONSE" -lt 200 ] && [ "$LOCALHOST_8080_RESPONSE" -gt 204 ] && [ "$LOCALHOST_80_RESPONSE" -lt 200 ] && [ "$LOCALHOST_80_RESPONSE" -gt 204 ]; then
echo "Localhost unavailable for both port 80 and 8080; stopping..."
exit() { return 1; }
fi
echo "Localhost is available; Apache is currently running..."
# 3. Apache Configuration
# Backup current Apache conf file
echo "Creating a backup of the Apache configuration file before continuing..."
cp -v $APACHE_CONF "$APACHE_CONF.original.$TODAY_SUFFIX"
if [ ! $? -eq 0 ]; then
echo "Could not successfully create a backup of the Apache conf file; stopping to prevent errors..."
exit() { return 1; }
fi
# Change the default port of 8080 to 80
replaceline $APACHE_CONF '^Listen 8080$' 'Listen 80/' 'Change Apache port to 80...'
# Change the DocumentRoot to /Users/CURRENT_USERNAME/Sites
mkdir "\/Users\/$CURRENT_USERNAME\/Sites/"
replaceline $APACHE_CONF '^DocumentRoot.*' "DocumentRoot \/Users\/$CURRENT_USERNAME\/Sites/" "Set DocumentRoot to /Users/$CURRENT_USERNAME/Sites..."
# Change the directory for the DocumentRoot to /Users/CURRENT_USERNAME/Sites
DOCUMENTROOT_LINE_NUM=$(grep -n '^DocumentRoot.*' /usr/local/etc/httpd/httpd.conf | cut -f1 -d:)
| true
|
80a630e859a736639bd79f358623c9fb0e13d7ba
|
Shell
|
nyclei/eyNotes
|
/rcl-platform/tariq-scripts/create-marathon-lb-accounts.sh
|
UTF-8
| 3,703
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# SCRIPT: install-marathon-lb.sh
# Check if the DC/OS CLI is in the PATH
CMD_FILE=$(which dcos)
if [ "$CMD_FILE" == "" ]
then
echo
echo " The DC/OS Command Line Interface binary is not installed or not in your path. Please install it."
echo " Exiting."
echo
exit 1
fi
# Check if the JQ program is in the PATH
CMD_FILE=$(which jq)
if [ "$CMD_FILE" == "" ]
then
echo
echo " The JSON Query (jq) binary is not installed or not in your path. Please install it."
echoZZ
echo " brew install jq -- on a Mac"
echo
echo " Exiting......"
echo
exit 1
fi
# Check if the DC/OS CLI is greater than 0.4.12
CLI_VER=$(dcos --version | grep dcoscli.version | cut -d '=' -f 2)
CLI_VER_MINOR=`echo $CLI_VER | cut -d '.' -f 2 `
CLI_VER_PATCH=`echo $CLI_VER | cut -d '.' -f 3 `
#echo "CLI Minor and Patch == $CLI_VER_MINOR $CLI_VER_PATCH"
if ! [ "$CLI_VER_MINOR" -ge "4" ] && [ "$CLI_VER_PATCH" -ge "13" ]
then
echo
echo " Your DC/OS CLI version is not correct. Please upgrade your CLI version."
echo " Exiting. "
exit 1
fi
# Check if user is logged into the CLI
while true
do
AUTH_TOKEN=$(dcos config show core.dcos_acs_token 2>&1)
if [[ "$AUTH_TOKEN" = *"doesn't exist"* ]]
then
echo
echo " Not logged into the DC/OS CLI. Running login command now. Or press CTL-C "
echo
dcos auth login
else
break
fi
done
# Check if the dcos acs token is valid
while true
do
RESULT=$(dcos node 2>&1)
if [[ "$RESULT" = *"Your core.dcos_acs_token is invalid"* ]]
then
echo
echo " Your DC/OS dcos_acs_token is invalid. Running login command now. Or press CTRL-C "
echo
dcos auth login
else
break
fi
done
# Install the Security Subcommand into the CLI
echo
#dcos package install --yes --cli dcos-enterprise-cli
# Create/Install the Certificate for Marthon-LB
echo
echo " Creating/installing the certificate for Marthon-LB"
echo
echo " Please ignore the message: Failed to execute script dcos-security"
echo
dcos security org service-accounts keypair -l 4096 marathon-lb-external-private-key.pem marathon-lb-external-public-key.pem
dcos security org service-accounts create -p marathon-lb-external-public-key.pem -d "dcos_marathon_lb-external service account" dcos_marathon_lb-external
dcos security org service-accounts show dcos_marathon_lb-external
curl -skSL -X PUT -H 'Content-Type: application/json' -d '{"description": "Marathon Services"}' -H "Authorization: token=$(dcos config show core.dcos_acs_token)" $(dcos config show core.dcos_url)/acs/api/v1/acls/dcos:service:marathon:marathon:services:%252F
curl -skSL -X PUT -H 'Content-Type: application/json' -d '{"description": "Marathon Events"}' -H "Authorization: token=$(dcos config show core.dcos_acs_token)" $(dcos config show core.dcos_url)/acs/api/v1/acls/dcos:service:marathon:marathon:admin:events
curl -skSL -X PUT -H "Authorization: token=$(dcos config show core.dcos_acs_token)" $(dcos config show core.dcos_url)/acs/api/v1/acls/dcos:service:marathon:marathon:services:%252F/users/dcos_marathon_lb-external/read
curl -skSL -X PUT -H "Authorization: token=$(dcos config show core.dcos_acs_token)" $(dcos config show core.dcos_url)/acs/api/v1/acls/dcos:service:marathon:marathon:admin:events/users/dcos_marathon_lb-external/read
dcos security secrets create-sa-secret marathon-lb-external-private-key.pem dcos_marathon_lb-external marathon-lb-external
dcos security secrets list /
dcos security secrets get /marathon-lb-external --json | jq -r .value | jq
# end of script
| true
|
8af0e58508d0ff0932b61f254bb260032fd5aa99
|
Shell
|
wiedehopf/adsb-scripts
|
/throttled.sh
|
UTF-8
| 1,421
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if ! command -v vcgencmd &>/dev/null
then
echo -------
echo "Command vcgencmd not found, can't check for throttling!"
echo "Exiting."
echo -------
exit 1
fi
#Flag Bits
UNDERVOLTED=0x1
CAPPED=0x2
THROTTLED=0x4
SOFT_TEMPLIMIT=0x8
HAS_UNDERVOLTED=0x10000
HAS_CAPPED=0x20000
HAS_THROTTLED=0x40000
HAS_SOFT_TEMPLIMIT=0x80000
#Text Colors
GREEN=`tput setaf 2`
RED=`tput setaf 1`
NC=`tput sgr0`
#Output Strings
GOOD="${GREEN}NO${NC}"
BAD="${RED}YES${NC}"
#Get Status, extract hex
STATUS=$(vcgencmd get_throttled)
STATUS=${STATUS#*=}
echo -n "Status: "
(($STATUS!=0)) && echo "${RED}${STATUS}${NC}" || echo "${GREEN}${STATUS}${NC}"
echo "Undervolted:"
echo -n " Now: "
((($STATUS&UNDERVOLTED)!=0)) && echo "${BAD}" || echo "${GOOD}"
echo -n " Run: "
((($STATUS&HAS_UNDERVOLTED)!=0)) && echo "${BAD}" || echo "${GOOD}"
echo "Throttled:"
echo -n " Now: "
((($STATUS&THROTTLED)!=0)) && echo "${BAD}" || echo "${GOOD}"
echo -n " Run: "
((($STATUS&HAS_THROTTLED)!=0)) && echo "${BAD}" || echo "${GOOD}"
echo "Frequency Capped:"
echo -n " Now: "
((($STATUS&CAPPED)!=0)) && echo "${BAD}" || echo "${GOOD}"
echo -n " Run: "
((($STATUS&HAS_CAPPED)!=0)) && echo "${BAD}" || echo "${GOOD}"
echo "Softlimit:"
echo -n " Now: "
((($STATUS&SOFT_TEMPLIMIT)!=0)) && echo "${BAD}" || echo "${GOOD}"
echo -n " Run: "
((($STATUS&HAS_SOFT_TEMPLIMIT)!=0)) && echo "${BAD}" || echo "${GOOD}"
| true
|
2abd000fbb4b3ed90ff6c4b34c2cb6228d5f70a5
|
Shell
|
optimistiCli/util
|
/lastfile
|
UTF-8
| 1,326
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
read -r -d '' USAGE << "EOU"
Usage:
lastfile [-h] [-s <number>] [<dir>]
Return the path to the latest file in the given directory, by default the
current directory is used.
Options:
-h
Print this help message and exit
-s <N>
Skip N latest files and return the path to the N+1st latest file
EOU
function brag_and_exit {
if [ -n "$1" ] ; then
ERR_MESSAGE="$1"
else
ERR_MESSAGE='Something went wrong'
fi
echo 'Error: '"${ERR_MESSAGE}"$'\n\n'"${USAGE}"
exit 1
}
BASEDIR='./'
SKIP='0'
LASTPARAM=''
while [[ -n "$1" ]] ; do
if [[ $1 =~ ^-e$ ]] ; then
shift
SKIP="$1"
elif [[ $1 =~ ^-h$ ]] ; then
echo $USAGE
exit
elif [[ $1 =~ ^-[[:alnum:]]$ ]] ; then
USELESS="$1" # Skip unknown options
else
LASTPARAM="$1"
fi
shift
done
echo "L $LASTPARAM"
exit
if [[ ! $SKIP =~ ^[[:digit:]]+$ ]] ; then
brag_and_exit "Skip lines \"$SKIP\" is not a number"
fi
if [ -n "$1" ] ; then
BASEDIR="$1"
fi
REALBASEDIR=$(realpath -m "$BASEDIR")
if [ ! -d $REALBASEDIR ] ; then
brag_and_exit "\"$BASEDIR\" is not a directory"
fi
LASTFILES=$(ls -1t "$REALBASEDIR" | head -n $((SKIP+1)))
echo -n "$LASTFILES"
exit
LASTFILE=$(ls -1t "$REALBASEDIR" | head -n $((SKIP+1)) | tail -n 1)
echo ')))'"$LASTFILE"'((('
# // TODO: Gracefully report empty dirs
echo "${REALBASEDIR}${LASTFILE}"
| true
|
d28f1284dd79c559850ff68e9820f5603ae18bf6
|
Shell
|
Mantha32/Teaching-HEIGVD-AMT-2019-Project-CourseApp
|
/localGlassFishConfig.sh
|
UTF-8
| 2,179
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Source: https://github.com/payara/Payara-Examples/blob/master/administration-examples/setup-sample-domain.sh
# Payara Variable Initialisation
# DOMAIN_NAME will be the name that you use to reference your domain when
# working with some Payara/Glassfish commands.
DOMAIN_NAME="domain1"
# The PAYARA_HOME variable points to your Payara install location. The below
# path would be appropriate for Payara versions 4.1.x
#PAYARA_HOME="/opt/payara5/glassfish"
#Doriane PAYARA home
PAYARA_HOME="/Users/dorianekaffo/Desktop/semestre5/AMT/Serveurs/payara5/glassfish"
# The ASADMIN variable points to the location of the asadmin script used
# to run the Payara asadmin commands
ASADMIN=${PAYARA_HOME}/bin/asadmin
echo "AS_ADMIN_PASSWORD=admin" > /tmp/gfpw.txt;
#${ASADMIN} --interactive=false --user admin --passwordfile=/tmp/gfpw.txt change-admin-password
# The use of the DOMAIN_NAME variable at the end of the asadmin command is used
# to determine the directory with which the domain's data is stored in within
# your Payara install. The name must contain only ASCII characters and be valid
# for a directory name on your operating system.
#
# More information regarding the use and option of the asadmin create-domain
# command can be found at: https://docs.oracle.com/html/E24938_01/create-domain.htm
#${ASADMIN} delete-domain ${DOMAIN_NAME}
#${ASADMIN} --user admin --passwordfile=/tmp/gfpw.txt create-domain --portbase ${PORT_BASE} ${DOMAIN_NAME}
## Check out the Ip address for de mysql database running in docker
#DB_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' mysqlcourse)
# Start the newly created sampleDomain domain using the asadmin start-domain
# command.
${ASADMIN} start-domain ${DOMAIN_NAME} && \
${ASADMIN} --interactive=false --user admin create-jdbc-connection-pool --restype javax.sql.DataSource --datasourceclassname com.mysql.jdbc.jdbc2.optional.MysqlDataSource --property user=root:password=adminpw:DatabaseName=schedule:ServerName=172.19.0.2:port=3306 pool_course && \
${ASADMIN} --interactive=false --user admin create-jdbc-resource --connectionpoolid pool_course jdbc/schedule
| true
|
ff773f237fb74288a25144f635bcb60385748608
|
Shell
|
TheBrokenRail/android-bin
|
/build.sh
|
UTF-8
| 372
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
set +e
run() {
local RESET='\033[0m'
local START='\e[33m\e[1m'
local FAIL='\e[31m\e[1m'
local SUCCESS='\e[32m\e[1m'
echo -e "${START}Running $1${RESET}"
$1
if [[ $? != 0 ]]; then
echo -e "${FAIL}$1 Failed${RESET}"
else
echo -e "${SUCCESS}$1 Succeeded${RESET}"
fi
}
run ./build-jerryscript.sh
run ./build-lua.sh
run ./build-ruby.sh
| true
|
fae225769c9b6abff635e045a54e29effb780629
|
Shell
|
achmadnabawi/sparkdata
|
/airflow/wrt/ec/item_and_shop_info/ec_iteminfo_parse.sh
|
UTF-8
| 465
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
source ~/.bashrc
pre_path='/home/wrt/sparkdata'
hadoop fs -test -e /user/wrt/temp/iteminfo_tmp
if [ $? -eq 0 ] ;then
hadoop fs -rmr /user/wrt/temp/iteminfo_tmp
else
echo 'Directory is not exist,you can run you spark job as you want!!!'
fi
spark2-submit --driver-memory 6G --num-executors 20 --executor-memory 20G --executor-cores 5 \
$pre_path/wrt/data_base_process/t_base_item_info.py -spark
hadoop fs -chmod -R 777 /user/wrt/temp/iteminfo_tmp
| true
|
9edb1c362716f264c8612663a8630c36e7cd85fc
|
Shell
|
georgewitteman/dotfiles
|
/home/exact_dot_scripts/executable_jqq
|
UTF-8
| 312
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
set -o errexit
if test ! -t 0; then
jq --color-output --unbuffered --raw-input --raw-output '. as $raw | try fromjson catch $raw'
else
printf "jq --color-output --unbuffered --raw-input --raw-output '. as \$raw | try fromjson catch \$raw'" | pbcopy
echo "ok: copied jq command to clipboard"
fi
| true
|
fb81c45551ff15996503ba6cf0ffc34b06ceef0a
|
Shell
|
dellelce/mkit
|
/modules/postgresql10/hooks/custom_download.sh
|
UTF-8
| 333
| 3.390625
| 3
|
[] |
no_license
|
# add support for downloading postgresql from custom commit
[ ! -z "$pgcommit" ] &&
{
commit="${pgcommit}"
fname="$PWD/pg10-${commit}.tar.gz"
ghpath="postgres/postgres"
fullurl="https://github.com/${ghpath}/archive/${commit}.tar.gz"
wget -q -O "$fname" "$fullurl"
rc=$?
[ -f "$fname" ] && echo "$fname"
exit $rc
}
exit 0
| true
|
663b539e7c5b2c5535d2365220e2abf1a8720dda
|
Shell
|
Ichbinjoe/dotfiles
|
/i3lock-multimonitor
|
UTF-8
| 2,123
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
#Constants
DISPLAY_RE="([0-9]+)x([0-9]+)\\+([0-9]+)\\+([0-9]+)" # Regex to find display dimensions
IMAGE_RE="([0-9]+)x([0-9]+)" # Regex to find image dimensions
FOLDER=`dirname "$BASH_SOURCE"` # Current folder
CACHE_FOLDER="$FOLDER"/img/cache/ # Cache folder
#Image paths
BKG_IMG="$FOLDER/img/background.png" # Path of background image
MD5_BKG_IMG=$(md5sum $BKG_IMG | cut -c 1-10)
MD5_SCREEN_CONFIG=$(xrandr | md5sum - | cut -c 1-32) # Hash of xrandr output
OUTPUT_IMG="$CACHE_FOLDER""$MD5_SCREEN_CONFIG"."$MD5_BKG_IMG".png # Path of final image
OUTPUT_IMG_WIDTH=0 # Decide size to cover all screens
OUTPUT_IMG_HEIGHT=0 # Decide size to cover all screens
if [ -e $OUTPUT_IMG ]
then
# Lock screen since image already exists
i3lock --image=$OUTPUT_IMG --tiling --show-failed-attempts
exit 0
fi
#Execute xrandr to get information about the monitors:
while read LINE
do
#If we are reading the line that contains the position information:
if [[ $LINE =~ $DISPLAY_RE ]]; then
#Extract information and append some parameters to the ones that will be given to ImageMagick:
SCREEN_WIDTH=${BASH_REMATCH[1]}
SCREEN_HEIGHT=${BASH_REMATCH[2]}
SCREEN_X=${BASH_REMATCH[3]}
SCREEN_Y=${BASH_REMATCH[4]}
CACHE_IMG="$CACHE_FOLDER""$SCREEN_WIDTH"x"$SCREEN_HEIGHT"."$MD5_BKG_IMG".png
## if cache for that screensize doesnt exist
if ! [ -e $CACHE_IMG ]
then
eval convert '$BKG_IMG' '-resize' '${SCREEN_WIDTH}X${SCREEN_HEIGHT}!' \
'$CACHE_IMG'
fi
# Decide size of output image
if (( $OUTPUT_IMG_WIDTH < $SCREEN_WIDTH+$SCREEN_X )); then OUTPUT_IMG_WIDTH=$(($SCREEN_WIDTH+$SCREEN_X)); fi;
if (( $OUTPUT_IMG_HEIGHT < $SCREEN_HEIGHT+$SCREEN_Y )); then OUTPUT_IMG_HEIGHT=$(( $SCREEN_HEIGHT+$SCREEN_Y )); fi;
PARAMS="$PARAMS $CACHE_IMG -geometry +$SCREEN_X+$SCREEN_Y -composite "
fi
done <<<"`xrandr`"
#Execute ImageMagick:
eval convert -size ${OUTPUT_IMG_WIDTH}x${OUTPUT_IMG_HEIGHT} 'xc:black' $OUTPUT_IMG
eval convert $OUTPUT_IMG $PARAMS $OUTPUT_IMG
#Lock the screen:
i3lock --image=$OUTPUT_IMG --tiling --show-failed-attempts
| true
|
c935ba7a9a7a4d583ff46a18db651997c6d4541c
|
Shell
|
Argonne-National-Laboratory/Arcond
|
/scripts/arc_update
|
UTF-8
| 291
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
#>> ArCond update script
# Get newest version of arcond from ASC
# ARC_HOME='/users/condor/Arcond'
# PWD=`pwd`
# cp -f $ARC_HOME/arcond $PWD/
# cp -f $ARC_HOME/basic/* $PWD/basic/
# cp -f $ARC_HOME/admin/* $PWD/admin/
# echo "ArCond was updated in your installation area"
| true
|
795a1fca1738c8f3720d76a96720aafdbb72def5
|
Shell
|
plitc/r-vmadm
|
/rel/pkgng/deploy/usr/local/lib/brand/jail/halted
|
UTF-8
| 233
| 2.5625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# include shared utility functions
brand_root="$1"
jail_root="$2"
. ${brand_root}/../shared/utils.sh
validate_root "${jail_root}"
# outer jail
umount ${jail_root}/root/dev
# inner jail
umount ${jail_root}/root/jail/dev
| true
|
01349c7b086bfa87b0222e58ed1c273b8e5eedf1
|
Shell
|
mba811/cloudrouter-dockerfiles
|
/contrib/hooks/commit-msg.signoff-verify
|
UTF-8
| 192
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# A commit-msg hook that checks for sign-off
grep '^Signed-off-by: ' "$1" > /dev/null || {
echo >&2 Aborting commit ...
echo >&2 [POLICY] Commits require sign-off.
exit 1
}
| true
|
066541616e9e01a2dfa691da9ebfe74b843aaceb
|
Shell
|
tangdavid/PRS-sim
|
/scripts/sim/filter.sh
|
UTF-8
| 482
| 2.96875
| 3
|
[] |
no_license
|
#$ -N job-filter
#$ -cwd
#$ -l h_rt=05:00:00,h_data=16G
#$ -j y
#!/bin/bash
. /u/local/Modules/default/init/modules.sh
module load vcftools
module load bcftools
if [ $# -ne 1 ]
then
echo "Usage: ./filter.sh [population 1]"
exit 1
fi
# input files
hapmatrix='../../data/vcf/full.recode.vcf'
pop=$1
out="../../data/vcf/$pop"
# filter by population
vcftools \
--vcf $hapmatrix \
--recode \
--keep "${out}.txt" \
--out ${out}
# for hoffman time out
echo "sleeping"
sleep 5m
| true
|
2793fab8e304fb971f4b7c1c0430e6fe3f1b612c
|
Shell
|
Silverpeas/silverpeas-native-packages
|
/rpm/silverpeas.init
|
UTF-8
| 2,243
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
### BEGIN INIT INFO
# Provides: silverpeas
# Required-Start: $local_fs $remote_fs $network $time $named openoffice
# Required-Stop: $local_fs $remote_fs $network $time $named openoffice
# Default-Start: 3 5
# Default-Stop: 0 1 2 6
# Short-Description: Silverpeas
# Description: Open platform to create a collaborative web site
### END INIT INFO
NAME=silverpeas
SCRIPTNAME=/etc/init.d/$NAME
if [ `id -u` -ne 0 ]; then
echo "The $NAME init script can only be run as root"
exit 1
fi
[ "Z$SILVERPEAS_HOME" = "Z" ] && export SILVERPEAS_HOME=/opt/silverpeas
[ "Z$JBOSS_HOME" = "Z" ] && export JBOSS_HOME=/opt/silverpeas/jboss-6.1.0.Final
PRG_START=silverpeas_start_jboss.sh
PRG_STOP=silverpeas_stop_jboss.sh
PIDFILE=/var/run/silverpeas.pid
RUN_AS_USER=silverpeas
# source function library
. /etc/rc.d/init.d/functions
if [ ! -f /var/log/silverpeas.log ]; then
touch /var/log/silverpeas.log
chown silverpeas:adm /var/log/silverpeas.log
fi
get_pid()
{
PID=
pj=`pidof java`
test $? -eq 0 || return 1
for pid in $pj; do
PID=`ps ax | grep $pid | grep silverpeas | tr -s ' ' | cut -d ' ' -f 1`
test "Z$PID" = "Z" && PID=`ps ax | grep $pid | grep silverpeas | tr -s ' ' | cut -d ' ' -f 2`
test "Z$PID" != "Z" && break
done
}
rh_status()
{
do_status > /dev/null 2>&1
}
do_start()
{
echo -n $"Starting $NAME: "
su - $RUN_AS_USER -c "$SILVERPEAS_HOME/bin/$PRG_START &> /var/log/silverpeas.log"
sleep 10s
get_pid
if [ "Z$PID" != "Z" ]; then
echo $PID > $PIDFILE
success
else
failure
fi
echo
}
do_stop()
{
echo -n $"Stopping $NAME"
su - $RUN_AS_USER -c "$SILVERPEAS_HOME/bin/$PRG_STOP &> /dev/null"
get_pid
until [ "Z$PID" = "Z" ]; do
echo -n "."
sleep 5s
get_pid
done
success
echo
rm -f $PIDFILE || true
}
do_status()
{
status -p $PIDFILE $NAME
}
do_restart()
{
do_stop
do_start
}
case "$1" in
start)
rh_status && exit 0
do_start
;;
stop)
rh_status || exit 0
do_stop
;;
status)
do_status
;;
restart|force-reload)
rh_status || exit 7
do_restart
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac
exit 0
| true
|
697d863ba27a0e79541cd7f2a9cc267cf23fc5a3
|
Shell
|
ReneFroger/dotfiles
|
/bashrc
|
UTF-8
| 5,269
| 2.578125
| 3
|
[] |
no_license
|
DOTFILES=$HOME/dotfiles
# -----------------------------------------------------------------------------
# Environment
# -----------------------------------------------------------------------------
export EDITOR='vim'
export PIP_DOWNLOAD_CACHE="$HOME/.pip/cache"
alias vim='vim' # Override
# -----------------------------------------------------------------------------
# virtualenv
# -----------------------------------------------------------------------------
export WORKON_HOME=$HOME/Code/envs
source /usr/local/bin/virtualenvwrapper.sh
# -----------------------------------------------------------------------------
# Shortcuts
# -----------------------------------------------------------------------------
alias ls='ls -h'
alias l='ls'
alias du='du -sh'
alias fn='find . -name'
alias sb='source $HOME/.bashrc'
alias f='fab'
alias fv='fab -R vagrant'
alias envs='cd $WORKON_HOME'
alias ghcm='ghc --make -optl"-Wl,-read_only_relocs,suppress"'
# $ proxyssh user@host
alias proxyssh='ssh -D 8888 -f -C -q -N'
# Recursive sed
# alias s="find . -path './.git' -prune -o -type f -print0 | xargs -0 sed -i ''"
# alias s="find . -name '.git' -prune -o -type f -print0 | xargs -0 sed -i ''"
alias e='tar xzvf'
alias fv='fab -R vagrant'
alias gist='curl -F "sprunge=<-" http://gist.honza.ca'
alias copy='pbcopy'
alias fin="open $HOME/Dropbox/Documents/finances.ods"
# ack
alias cack='ack --type=coffee'
alias pack='ack --type=python'
alias offline='offlineimap -qf INBOX; notmuch new'
alias ios='open -a "iPhone Simulator.app"'
# tmux
alias new='tmux new -s'
alias att='tmux attach -t'
function prettyjson() {
python -mjson.tool
}
alias goawayswapfilesyouareswapfilesidontevenneedyou='rm ~/.vim/tmp/swap/*'
function psg {
ps auxww | grep --color=always $* | grep -v grep | sed -e 's/ */ /g' | cut -d' ' -f 2,11-
}
source $HOME/dotfiles/z/z.sh
# -----------------------------------------------------------------------------
# Git
# -----------------------------------------------------------------------------
alias gitl="$(which git) log --pretty=format:'%h - %an, %ar : %s'"
alias gits='clear; git status'
alias gitchart="git shortlog --numbered --summary"
alias gitg='git log --oneline --graph'
alias gall='git submodule foreach git pull origin master'
alias gplo='git pull --rebase origin'
alias gpso='git push origin'
# -----------------------------------------------------------------------------
# Python & Django
# -----------------------------------------------------------------------------
alias rmpyc='find . -name "*.pyc" -exec rm {} \;'
alias m='python manage.py'
alias run='python manage.py runserver'
alias sync='python manage.py syncdb'
alias rrun="rm dev.db ; sync --noinput ; m migrate; m createsuperuser --user=honza --email=me@honza.ca; m runserver"
# -----------------------------------------------------------------------------
# todo.txt
# -----------------------------------------------------------------------------
alias t='~/dotfiles/todo.txt/todo.sh'
export PATH=/usr/local/bin:$PATH
export PATH=$PATH:/usr/local/sbin
export PATH=$PATH:/usr/texbin
export PATH=$PATH:$HOME/.cabal/bin
export PATH=$PATH:$HOME/bin
export PATH=/usr/local/share/python:$PATH
export PATH=/usr/local/Cellar/ruby/1.9.3-p194/bin:$PATH
# -----------------------------------------------------------------------------
# Prompt
# -----------------------------------------------------------------------------
# Prompt stuff
export VIRTUAL_ENV_DISABLE_PROMPT=1
function ve () {
if [ -z "$VIRTUAL_ENV" ]; then
echo ''
return
fi
x=`basename $VIRTUAL_ENV`
echo "$x"
}
function parse_git_branch {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e "s/* \(.*\)/[\1]/"
}
c_yellow="\[\033[33m\]"
c_blue="\[\033[34m\]"
c_magenta="\[\033[35m\]"
c_reset="\[\033[m\]"
export PS1="$c_yellow\$(ve)$c_reset \w $c_blue\$(parse_git_branch)$c_reset $ "
# -----------------------------------------------------------------------------
# Notifiers
# -----------------------------------------------------------------------------
function grr() {
$*
terminal-notifier -title "Finished" -message "'$*' finished"
}
function spp() {
$*
say "'$*' finished"
}
# -----------------------------------------------------------------------------
# PostgreSQL
# -----------------------------------------------------------------------------
alias startpostgres='pg_ctl -D /usr/local/var/postgres -l /usr/local/var/postgres/server.log start'
alias stoppostgres='pg_ctl -D /usr/local/var/postgres stop -s -m fast'
# -----------------------------------------------------------------------------
# Completion
# -----------------------------------------------------------------------------
if [ -f `brew --prefix`/etc/bash_completion ]; then
. `brew --prefix`/etc/bash_completion
fi
# -----------------------------------------------------------------------------
# Other
# -----------------------------------------------------------------------------
# Stop telling me I have new mail
unset MAILCHECK
# export SHELL=/usr/local/bin/bash
export GREP_OPTIONS='--color=auto'
if [[ -s $HOME/.bashrc_local ]] ; then source $HOME/.bashrc_local ; fi
if [[ -s $DOTFILES/bash_functions ]] ; then source $DOTFILES/bash_functions ; fi
| true
|
6593df1a559dfa3e7db07c3653209fe872e645db
|
Shell
|
achreto/barrelfish
|
/lib/acpica/generate/linux/make-patches.sh
|
UTF-8
| 3,437
| 4.3125
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-dco-1.1",
"MIT"
] |
permissive
|
#!/bin/bash
#
# NAME:
# make-patches.sh - extract a set of linuxized patches from the
# ACPICA git repository
#
# SYNOPSIS:
# make-patches.sh [-r release] [-u] <old_commit> [new_commit]
#
# DESCRIPTION:
# Creates the linuxized patch set from old_commit to new_commit in
# ACPICA git repository.
# Parameters:
# old_commit The old commit ID.
# new_commit The new commit ID. If this parameter is not specified,
# the new commit ID is default to HEAD.
# Options:
# -r release Specify a release ID, it will turn out to be the name of
# the patch files. If this option is not specified, the
# default name of the patch files will be the current month
# in YYYYmm date format.
# -u Generate upstream commit IDs in the linuxized patches.
#
RELEASE=`date +%Y%m`
usage()
{
echo "Usage: `basename $0` [-r release] [-u] <old_commit> [new_commit]"
echo "Where:"
echo " -r: set release ID, default is $RELEASE in YYYYmm- date format"
echo " -u: generate upstream commit IDs"
echo " old_commit: the old commit id\n";
echo " new_commit: optional, the new commit id, default to HEAD";
exit 0
}
SCRIPT=`(cd \`dirname $0\`; pwd)`
. $SCRIPT/libacpica.sh
ACPICA_DIR=$CURDIR/patches.acpica.$RELEASE
LINUX_DIR=$CURDIR/patches.linux.$RELEASE
NEW_RELEASE="HEAD"
OLD_RELEASE="HEAD"
ACPICA_CNT=0
LINUX_CNT=0
MAINTAINER="Bob Moore <robert.moore@intel.com>"
GIT_EXTRACT="$SCRIPT/gen-patch.sh"
RELEASE="${RELEASE}-"
while getopts "dr:u" opt
do
case $opt in
d) DRYRUN="yes";;
r) RELEASE=$OPTARG;;
u) GIT_EXTRACT="${GIT_EXTRACT} -u -m '${MAINTAINER}'";;
?) echo "Invalid argument $opt"
usage;;
esac
done
shift $(($OPTIND - 1))
if [ -z $1 ]; then
echo "old_commit is not specified"
usage
fi
OLD_RELEASE=$1
if [ ! -z $2 ]; then
NEW_RELEASE=$2
fi
COMMITS=`git rev-list --reverse $OLD_RELEASE..$NEW_RELEASE`
for c in $COMMITS; do
ACPICA_CNT=`expr $ACPICA_CNT + 1`
done
generate_patch()
{
local cid aid lid
local COMMIT SUBJECT
cid=$1
aid=$2
lid=$3
COMMIT=`git log -1 -c $cid --format=%H | cut -c 1-8`
SUBJECT=`git log -1 -c $cid --format=%s`
echo "[make-patches.sh] Generating patch ($aid:$lid:$COMMIT: $SUBJECT)..."
(
cd $SCRIPT
if [ "x$DRYRUN" = "xyes" ]; then
echo $GIT_EXTRACT -i $lid $COMMIT
else
eval $GIT_EXTRACT -i $lid $COMMIT
echo "[make-patches.sh] Copying ACPICA patch ($RELEASE$aid.patch)..."
mv acpica-$COMMIT.patch $ACPICA_DIR/$RELEASE$aid.patch
echo $RELEASE$aid.patch >> $ACPICA_DIR/series
fi
if [ -f linux-$COMMIT.patch ]; then
if [ "x$DRYRUN" != "xyes" ]; then
echo "[make-patches.sh] Copying Linux patch ($RELEASE$lid.patch)..."
mv linux-$COMMIT.patch $LINUX_DIR/$RELEASE$lid.patch
echo $RELEASE$lid.patch >> $LINUX_DIR/series
fi
fi
)
}
rm -rf $ACPICA_DIR
rm -rf $LINUX_DIR
mkdir -p $ACPICA_DIR
mkdir -p $LINUX_DIR
ACPICA_IDX=1
LINUX_IDX=1
make_acpisrc $SRCDIR force > /dev/null
for c in $COMMITS; do
generate_patch $c $ACPICA_IDX $LINUX_IDX
LINUX_TO=$LINUX_DIR/$RELEASE$LINUX_IDX.patch
if [ -f $LINUX_TO ]; then
echo "[make-patches.sh] Generated $LINUX_TO."
LINUX_IDX=`expr $LINUX_IDX + 1`
fi
ACPICA_IDX=`expr $ACPICA_IDX + 1`
done
LINUX_CNT=`expr $LINUX_IDX - 1`
echo "[make-patches.sh] Generated $ACPICA_CNT raw patches and $LINUX_CNT linuxized patches."
| true
|
e0d195eb13781341d8daad45eda79bec44ed546e
|
Shell
|
nikolavp/configs
|
/bin/make-l
|
UTF-8
| 468
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
# vim: set sw=4 sts=4 et foldmethod=indent :
# Same as fab -l but without the documentation see to make your makefiles
# self documenting.
# If this is a self documenting makefile as described indent
# https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
# use it
# Note that both of these don't work for Makefiles which source other
# makefiles
if [[ -n $(grep 'help:' Makefile) ]];then
make help
exit
fi
grep -o '^\w\+:' Makefile
| true
|
68864aa5773bd0505801576da9bfc04fa41ee612
|
Shell
|
DenisHsieh/opencv_installation_scripts
|
/opencv_install_ubuntu_1604.sh
|
UTF-8
| 2,658
| 2.828125
| 3
|
[] |
no_license
|
function run () {
sudo apt install -y libjpeg8-dev libtiff5-dev libjasper-dev libpng12-dev libhdf5-dev \
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev \
libx264-dev libgtk-3-dev libatlas-base-dev gfortran \
build-essential cmake pkg-config libeigen3-dev libtbb-dev libtbb2 \
make build-essential libssl-dev zlib1g-dev libbz2-dev \
libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev xz-utils
# step 2. install pyenv
env PYTHON_CONFIGURE_OPTS="--enable-shared" CFLAGS="-O2" pyenv install 3.6.0
pyenv global 3.6.0
# step 3. install numpy
pip install -U pip setuptools wheel cython numpy
# step 4. setup /opt
sudo mkdir -p /opt/src
sudo chown $(whoami) /opt
sudo chown $(whoami) /opt/src
cd /opt/src
# step 6. build opencv
cd /opt/src
curl -L https://github.com/opencv/opencv/archive/3.2.0.zip -o opencv32.zip
curl -L https://github.com/opencv/opencv_contrib/archive/3.2.0.zip -o opencv32contrib.zip
unzip opencv32.zip
unzip opencv32contrib.zip
mv -v opencv-3.2.0 /opt/src/opencv32_py36
mv -v opencv_contrib-3.2.0 /opt/src/opencv32_py36_contrib
cd /opt/src/opencv32_py36
mkdir /opt/src/opencv32_py36/release
cd /opt/src/opencv32_py36/release
cmake \
-D CMAKE_INSTALL_PREFIX=/opt/opencv32_py36 \
-D OPENCV_EXTRA_MODULES_PATH=/opt/src/opencv32_py36_contrib/modules \
-D BUILD_opencv_python2=OFF \
-D BUILD_opencv_python3=ON \
-D BUILD_TIFF=ON \
-D BUILD_opencv_java=OFF \
-D WITH_CUDA=OFF \
-D ENABLE_AVX=ON \
-D WITH_OPENGL=ON \
-D WITH_OPENCL=ON \
-D WITH_IPP=OFF \
-D WITH_TBB=ON \
-D WITH_EIGEN=ON \
-D WITH_V4L=ON \
-D WITH_VTK=OFF \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D CMAKE_BUILD_TYPE=RELEASE \
-D PYTHON3_LIBRARY=$(python -c "import re, os.path; print(os.path.normpath(os.path.join(os.path.dirname(re.__file__), '..', 'libpython3.6m.so')))") \
-D PYTHON3_EXECUTABLE=$(which python) \
-D PYTHON3_INCLUDE_DIRS=$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \
-D PYTHON3_PACKAGES_PATH=$(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") ..
make -j8
cd /opt/src/opencv32_py36/release
make install
pyenv virtualenv 3.6.0 main
pyenv global main
pip install -U pip setuptools wheel numpy # important to install in every new virtual environment where we symlink opencv
ln -s "$HOME/.pyenv/versions/3.6.0/lib/python3.6/site-packages/cv2.cpython-36m-x86_64-linux-gnu.so" \
"$HOME/.pyenv/versions/main/lib/python3.6/site-packages/cv2.cpython-36m-x86_64-linux-gnu.so"
}
run
| true
|
eb2eff6d0b411ac22ae41061d176410009f48dd8
|
Shell
|
epam/fonda
|
/src/integrationTest/resources/templates/dnaAmpliconVarFastq/testControlSampleNotNAAllTasksXenomeNo/DnaAmpliconVar_Fastq_exomecnv_for_GA5_analysis.txt
|
UTF-8
| 1,516
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
# --- SGE options --- #
#$ -V
#$ -wd build/resources/integrationTest/output
#$ -N DnaAmpliconVar_Fastq_exomecnv_for_GA5_analysis
#$ -o build/resources/integrationTest/output/log_files/DnaAmpliconVar_Fastq_exomecnv_for_GA5_analysis.log
#$ -e build/resources/integrationTest/output/err_files/DnaAmpliconVar_Fastq_exomecnv_for_GA5_analysis.err
#$ -q main.q
#$ -R y
#$ -pe threaded 4
#$ -m a
# --- The commands to be executed --- #
cd build/resources/integrationTest/output
echo `date` Begin the job execution...
echo `date` Begin Step: ExomeCNV detection...
/usr/lib/jvm/java-8-openjdk-amd64/bin/java -Xmx10g -jar /usr/bin/gatk -T DepthOfCoverage -R /ngs/data/reference_genome/hg19/hg19_decoy/hg19.decoy.fa -I build/resources/integrationTest/output/N/bam/N.merged.sorted.realign.realign.recal.bam -L /ngs/data/test_data_Padded.bed -o null/N
/usr/lib/jvm/java-8-openjdk-amd64/bin/java -Xmx10g -jar /usr/bin/gatk -T DepthOfCoverage -R /ngs/data/reference_genome/hg19/hg19_decoy/hg19.decoy.fa -I build/resources/integrationTest/output/GA5/bam/GA5.merged.sorted.realign.realign.recal.bam -L /ngs/data/test_data_Padded.bed -o null/GA5
/usr/bin/Rscript /usr/bin/exomecnv/exome_cnv.R -t null/GA5.sample_interval_summary -n null/N.sample_interval_summary -o null -s GA5
if [ $? -eq 0 ]
then
echo `date` Successful Step: ExomeCNV detection.
sleep 8
else
echo `date` Error Step: ExomeCNV detection.
echo `date` The job was aborted due to ERRORS found.
exit 1;
fi
echo `date` Finish the job execution!
| true
|
b2c27a5bbbb85a0d1398fe10ecc210e856660ce7
|
Shell
|
bollu/coremlir
|
/nofib/spectral/life/life.test
|
UTF-8
| 356
| 3.171875
| 3
|
[
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
#! /bin/sh
#
$1 | sum > /tmp/sum-real-$$
cat > /tmp/sum-expected-$$ << EOTHING
02845 1350
EOTHING
if cmp -s /tmp/sum-real-$$ /tmp/sum-expected-$$ ; then
/bin/rm /tmp/sum*$$
exit 0
else
echo -n '*** sum I got: ' ; cat /tmp/sum-real-$$
echo -n '*** sum I expected: ' ; cat /tmp/sum-expected-$$
/bin/rm /tmp/sum*$$
exit 1
fi
| true
|
b63f7cf6792a10ec0ef6f9298f1b8038c1dd71ac
|
Shell
|
paulolobt/env-examples
|
/Scripts/Shell/example-cli.sh
|
UTF-8
| 7,113
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/sh
#===============================================================================
#
# FILE: app-cli.sh
#
# USAGE: ./app-cli.sh [options] [services]
#
# DESCRIPTION: App environment client to manage services and data containers.
#
# OPTIONS: -h, --help show help
# -d, --delete delete all containers including data container
# -u, --update update all containers except data container
# REQUIREMENTS: Docker Engine and Docker Compose installed.
# BUGS: ---
# NOTES: ---
# AUTHOR: Paulo Lobato <paulo.lobatojr@hotmail.com>
# COMPANY: ---
# VERSION: 1.1
# REVISION: ---
#===============================================================================
usage () {
echo "./app-cli.sh - App environment client"
echo " "
echo "./app-cli.sh [options] [services]"
echo " "
echo "options:"
echo "-h, --help show help"
echo "-d, --delete delete all containers including data container"
echo "-u, --update update all containers except data container"
echo " "
echo "services:"
echo "<empty> App services + App Analytics services"
echo "app App services"
echo "analytics App Analytics services"
}
stopContainer () {
if [ "$1" = "--no-data" ]; then
# Stop all containers that have a `appenv_` prefix except data containers
docker ps | grep appenv | grep -v 'db\|elasticsearch' | cut -d ' ' -f 1 | xargs docker stop
else
# Stop all containers that have a `appenv_` prefix
docker stop $(docker ps -q -f name=appenv_*)
fi
}
deleteContainer () {
if [ "$1" = "--no-data" ]; then
# Delete all containers that have a `appenv_` prefix except data containers
docker ps -a | grep appenv | grep -v 'db\|elasticsearch' | cut -d ' ' -f 1 | xargs docker rm
else
# Delete all containers that have a `appenv_` prefix
docker rm $(docker ps -aq -f name=appenv_*)
fi
}
deleteContainerImage () {
# Delete all images that have a `appenv_` prefix
docker rmi $(docker images -q appenv_*)
}
deployAppServices () {
# Install Magento container
docker-compose -f docker-compose.magento.yml run --rm magentosetup
docker-compose -f docker-compose.magento.yml up -d magentoapp
# Deploy App proxy container
docker-compose -f docker-compose.common.yml up -d
# Deploy App Admin container
docker-compose -f docker-compose.admin.yml up -d webadmin
# Deploy Totem container
if [ "$SERVICE_TOTEM_UP" = true ]; then
docker-compose -f docker-compose.totem.yml up -d
fi
# Deploy App Web Store and Service Store container
docker-compose -f docker-compose.store.yml up -d webstore
}
deployAppAnalyticsServices () {
# Deploy App proxy container
docker-compose -f docker-compose.common.yml up -d
# Deploy ELK containers
if [ "$SERVICE_ELK_KIBANA_UP" = true ]; then
docker-compose -f docker-compose.elk.yml up -d
else
docker-compose -f docker-compose.elk.yml up -d kafka-rest elasticsearch logstash pentaho
fi
}
validateAppEnv () {
# Check if the environment variables exists
if [ $SERVICE_STORE_HOSTNAME ] && [ $SERVICE_MAGENTO_HOSTNAME ] &&
[ $SERVICE_ADMIN_HOSTNAME ] && [ $SERVICE_STORE_WEB_HOSTNAME ]; then
return 0
else
return 1
fi
}
validateAppAnalyticsEnv () {
# Check if the environment variables exists
if [ $SERVICE_ELK_ELASTICSEARCH_HOSTNAME ] && [ $SERVICE_ELK_KIBANA_HOSTNAME ] &&
[ $SERVICE_ELK_LOGSTASH_HOSTNAME ] && [ $SERVICE_ELK_PENTAHO_DB_HOSTNAME ]; then
return 0
else
return 1
fi
}
main () {
while test $# -gt 0; do
case "$1" in
-h|--help)
usage
exit 0
;;
--delete)
shift
if validateAppEnv || validateAppAnalyticsEnv; then
stopContainer
deleteContainer
deleteContainerImage
else
echo 'Error: If App services set SERVICE_STORE_HOSTNAME, SERVICE_ADMIN_HOSTNAME, SERVICE_STORE_WEB_HOSTNAME, SERVICE_MAGENTO_HOSTNAME. Also Analytics services SERVICE_ELK_ELASTICSEARCH_HOSTNAME, SERVICE_ELK_KIBANA_HOSTNAME, SERVICE_ELK_LOGSTASH_HOSTNAME and SERVICE_ELK_PENTAHO_DB_HOSTNAME environment variables.'
exit 1
fi
break
;;
--update)
shift
if validateAppEnv || validateAppAnalyticsEnv; then
stopContainer --no-data
deleteContainer --no-data
deleteContainerImage
else
echo 'Error: If App services set SERVICE_STORE_HOSTNAME, SERVICE_ADMIN_HOSTNAME, SERVICE_STORE_WEB_HOSTNAME, SERVICE_MAGENTO_HOSTNAME. Also Analytics services SERVICE_ELK_ELASTICSEARCH_HOSTNAME, SERVICE_ELK_KIBANA_HOSTNAME, SERVICE_ELK_LOGSTASH_HOSTNAME and SERVICE_ELK_PENTAHO_DB_HOSTNAME environment variables.'
exit 1
fi
break
;;
*)
echo "./app-cli: '$1' is not a valid option."
echo "See './app-cli --help'."
exit 1
break
;;
esac
done
if [ $# -eq 0 ] ; then
if validateAppEnv && validateAppAnalyticsEnv; then
deployAppAnalyticsServices
deployAppServices
else
echo 'Error: Set SERVICE_STORE_HOSTNAME, SERVICE_ADMIN_HOSTNAME, SERVICE_STORE_WEB_HOSTNAME, SERVICE_MAGENTO_HOSTNAME, SERVICE_ELK_ELASTICSEARCH_HOSTNAME, SERVICE_ELK_KIBANA_HOSTNAME, SERVICE_ELK_LOGSTASH_HOSTNAME and SERVICE_ELK_PENTAHO_DB_HOSTNAME environment variables.'
exit 1
fi
else
while test $# -gt 0; do
case "$1" in
app)
shift
if validateAppEnv; then
deployAppServices
echo "App"
else
echo 'Error: Set SERVICE_STORE_HOSTNAME, SERVICE_ADMIN_HOSTNAME, SERVICE_STORE_WEB_HOSTNAME and SERVICE_MAGENTO_HOSTNAME environment variables.'
exit 1
fi
;;
analytics)
shift
if validateAppAnalyticsEnv; then
deployAppAnalyticsServices
else
echo 'Error: Set SERVICE_ELK_ELASTICSEARCH_HOSTNAME, SERVICE_ELK_KIBANA_HOSTNAME and SERVICE_ELK_LOGSTASH_HOSTNAME environment variables.'
exit 1
fi
;;
*)
echo "'$1' is not a valid service."
shift
;;
esac
done
fi
}
if [ $# -eq 0 ] ; then
main --help
exit 0
fi
main $@
| true
|
fafb87c7047f23faf93db38d3780a57931e5375d
|
Shell
|
vicentecg/bash
|
/bash.sh
|
UTF-8
| 418
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
clear
while :
do
echo " Opciones disponibles "
echo "a. Calculadora"
echo "b. Apagar"
echo "c. Reiniciar"
echo "d. Sistema"
echo "e. Salir"
echo -n "Seleccione una opcion [a - e]"
read opcion
case $opcion in
a) echo "Abriendo calculadora:";python calculadora.py;;
b) echo "Apagando:";shutdown -h now;;
c) echo "Reiniciando";shutdown -r now;;
d) echo "Kernel";cat /etc/*release;;
e) echo "Adios";exit 1;;
| true
|
d35da5b0fea3dababaaf47ada2100aa1d78abd49
|
Shell
|
AbhilashTUofficial/Shell-scripting
|
/lab/EX2/A/script.sh
|
UTF-8
| 306
| 2.625
| 3
|
[] |
no_license
|
logname
id -un
whoami
sh
echo -e "current shell: $SHELL"
echo $HOME
echo -e "your os type:$OSTYPE"
echo -e "path: $PATH"
echo -e "current directory: pwd"
echo -e "currently logged $nouser user(s)"
echo -e "computer cpu information:"
cat/proc/cpuinfo
echo -e "computer memory information:"
cat/proc/meminfo
| true
|
4fe2e381a2198cfcc172dba1f86e0a16dd04a606
|
Shell
|
muad-dweeb/utilites
|
/scripts/setup_virtualenv.sh
|
UTF-8
| 411
| 2.59375
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
# Assumes no pre-existing virtualenv setup
# Install packages
sudo apt-get install python-pip
sudo pip install --upgrade pip
sudo pip install virtualenv virtualenvwrapper
sudo apt install virtualenv
echo "Add the following lines to .bashrc:"
echo 'export WORKON_HOME=$HOME/.virtualenvs'
echo 'export PIP_DOWNLOAD_CACHE=$HOME/.pip_download_cache'
echo 'source /usr/local/bin/virtualenvwrapper.sh'
| true
|
3e858a177f775e2e4016beec3310d7da740e9392
|
Shell
|
pfafman/meteor-bootstrap-material-design-less
|
/update.sh
|
UTF-8
| 1,233
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
BOOTSTRAP_GIT_ROOT=$1
BOOTSTRAP_ROOT='lib'
BOOTSTRAP_DIRS="less js fonts"
BOOTSTRAP_JS="transition alert button carousel collapse dropdown modal tooltip popover scrollspy tab affix"
METEOR_PACKAGE_FILE=package.js
# check if the path is given and exists
if [ ! -d $BOOTSTRAP_GIT_ROOT ]
then
echo "You must have a copy of bootstrap-material-design git repository and give a valid path as parameter to this script"
exit 1
fi
# check if all reaquired directories exist
for DIR in $BOOTSTRAP_DIRS
do
if [ ! -d "$BOOTSTRAP_ROOT/$DIR" ]
then
echo "The required directory '$DIR' wasn't found in your bootstrap-material-design copy"
exit 2
fi
done
echo "bootstrap installation found, delete old files..."
rm -r $BOOTSTRAP_ROOT/less/* $BOOTSTRAP_ROOT/js/* $BOOTSTRAP_ROOT/fonts/*
echo "copy files from bootstrap installation..."
cp -rv $BOOTSTRAP_GIT_ROOT/scripts/*.js $BOOTSTRAP_ROOT/js
cp -rv $BOOTSTRAP_GIT_ROOT/fonts/* $BOOTSTRAP_ROOT/fonts
cp -rv $BOOTSTRAP_GIT_ROOT/less/* $BOOTSTRAP_ROOT/less
# versions of meteor after v0.7.0.1 (excluding) want .import.less instead of .lessimport
rename -v "s/\\.less/\\.import.less/" lib/less/*.less
sed -i '' "s/\\.less/\\.import.less/g" lib/less/*.less
echo "done!"
| true
|
b4e73f2ebb4027e6e632349f42e4924bb460a566
|
Shell
|
asanza/asanza.github.io
|
/jekyll-post.sh
|
UTF-8
| 402
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
if [[ -z $1 ]]; then
echo "A post title is required. Bye.."
exit 1
fi
_post=$(echo $1 | tr ' ' '-')
_date=$(date +'%Y-%m-%d')
_datetime=$(date +'%Y-%m-%d %H:%M:%S')
_title="${_date}-${_post}.md"
_cwd=$(pwd)
_post_file="${_cwd}/_posts/${_title}"
if [[ -f ${_post_file} ]]; then
echo "File already exists. Bye.."
exit 1
fi
cat << EOF >| ${_post_file}
---
layout: post
title: $1
---
EOF
| true
|
b9b9fb5bf12f797c61afb094ba86bebc008febd7
|
Shell
|
timplab/ncov
|
/pipeline_scripts/run_nextstrain_clades.sh
|
UTF-8
| 1,434
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$(uname -s)" = 'Linux' ]; then
BINDIR=$(dirname "$(readlink -f "$0" || echo "$(echo "$0" | sed -e 's,\\,/,g')")")
else
BINDIR=$(dirname "$(readlink "$0" || echo "$(echo "$0" | sed -e 's,\\,/,g')")")
fi
#Run directory
RUN=$1
if [ $RUN == "test_data" ]; then
DIR="/home/idies/workspace/covid19/sequencing_runs/test_data/artic-pipeline/5-post-filter/"
echo "Running SnpEff on test data "
else
DIR="/home/idies/workspace/covid19/sequencing_runs/$RUN/artic-pipeline/5-post-filter"
fi
OUTDIR=$DIR
SCRIPT_DIR="/home/idies/workspace/covid19/code/ncov/pipeline_scripts"
REF_GB="/home/idies/workspace/covid19/ncov_reference/reference_seq.gb"
NEXTSTRAIN_CLADES="/home/idies/workspace/covid19/ncov_reference/clades.tsv"
source /home/idies/workspace/covid19/bashrc
conda activate nextstrain
echo "Assigning nextstrain clades for consensus sequences in ${DIR}"
CONS_FASTA=$OUTDIR/postfilt_consensus_all.fasta
if [ ! -f "$CONS_FASTA" ]; then
echo " File : $CONS_FASTA does not exist... Making "
cat ${DIR}/MDHP*.complete.fasta > $CONS_FASTA
fi
#usage: assign_clades.py [-h] --sequences SEQUENCES --clade CLADE --gbk GBK [--output OUTPUT] [--keep-temporary-files] [--chunk-size CHUNK_SIZE]
# [--nthreads NTHREADS]
${SCRIPT_DIR}/assign_clades.py --sequences ${CONS_FASTA} --output ${OUTDIR}/nextstrain_clades.tsv --gbk ${REF_GB} --clade ${NEXTSTRAIN_CLADES}
echo "DONE"
| true
|
76da99f26883e60548572e92d833fbb17c286529
|
Shell
|
shivakarna2991/kkphpcode_with_old_features
|
/core/CRON Jobs/FindTaskToExecute.sh
|
UTF-8
| 839
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Bash script for slave servers, checks to see if it should run based on server Instance-Type.
# By definition, slave servers will be of AWS instance type c4.4xlarge - a fast, server for doing the vid processing.
# While the master server will be a lower-end, always on (less-expensive) server like an m4.xlarge or something.
# The below checks to make sure this script is approproate based on the server instance type.
#
instancetype=`GET http://169.254.169.254/latest/meta-data/instance-type`
#
if [ $instancetype != 'c4.4xlarge' ]
then return 0
else
# retrieve the instance-id of this server and pass it to the FindTaskToExecute.php script
instanceid=`GET http://169.254.169.254/latest/meta-data/instance-id`
cd /home
php core/"CRON Jobs"/FindTaskToExecute.php -i $instanceid
fi
| true
|
6edb10cb546da12c4ca1c6dbf45f29f34185e422
|
Shell
|
Arjunkhera/Notes
|
/tools/commandline/solutions_UnixWorkbench/varthird.sh
|
UTF-8
| 95
| 2.890625
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
num=$#
first=$1
res=$(expr $num \* $first)
echo "The product is : $res"
| true
|
38f4fba75952778792ba84517b1ebc90ca695cf5
|
Shell
|
njsmith/download
|
/installers/rpm-code/codes.sh
|
UTF-8
| 6,404
| 3.78125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
codes_assert_easy_install() {
local easy=$(find $(pyenv prefix)/lib -name easy-install.pth)
if [[ $easy ]]; then
install_err "$easy: packages used python setup.py install instead of pip:
$(cat "$easy")"
fi
}
codes_bin_dir() {
dirname "$(pyenv which python)"
}
codes_cmake() {
mkdir build
cd build
cmake "$@" ..
}
codes_curl() {
curl -s -S -L "$@"
}
codes_dependencies() {
local i
for i in "$@"; do
rpm_code_build_depends+=( "rscode-$i" )
done
install_repo_eval code "$@"
codes_touch_sentinel
}
codes_download() {
# If download is an rpm, also installs
local repo=$1
local qualifier=${2:-}
local package=${3:-}
local version=${4:-}
if [[ ! $repo =~ / ]]; then
repo=radiasoft/$repo
fi
if [[ ! $repo =~ ^(ftp|https?): ]]; then
repo=https://github.com/$repo.git
fi
codes_msg "Download: $repo"
case $repo in
*.git)
local d=$(basename "$repo" .git)
if [[ -d "$d" && ${codes_download_reuse_git:-} ]]; then
cd "$d"
codes_msg "Cleaning: $PWD"
git clean -dfx
elif [[ $qualifier ]]; then
# Don't pass --depth in this case for a couple of reasons:
# 1) we don't know where the commit is; 2) It might be a simple http
# transport (synergia.sh) which doesn't support git
git clone --recursive -q "$repo"
cd "$d"
git checkout "$qualifier"
else
git clone --recursive --depth 1 "$repo"
cd "$d"
fi
local manifest=('' '')
repo=
;;
*.tar\.gz)
local b=$(basename "$repo" .tar.gz)
local d=${qualifier:-$b}
local t=tarball-$RANDOM
codes_curl -o "$t" "$repo"
tar xzf "$t"
rm -f "$t"
cd "$d"
if [[ ! $b =~ ^(.+)-([[:digit:]].+)$ ]]; then
codes_err "$repo: basename does not match version regex"
fi
local manifest=(
"${BASH_REMATCH[1]}"
"${BASH_REMATCH[2]}"
)
;;
*.rpm)
local b=$(basename "$repo")
local n="${b//-*/}"
if rpm --quiet -q "$n"; then
echo "$b already installed"
else
# not a yum dependency (codes script copies the files)
install_yum_install "$repo"
fi
local manifest=(
"$(rpm -q --queryformat '%{NAME}' "$n")"
"$(rpm -q --queryformat '%{VERSION}-%{RELEASE}' "$n")"
)
;;
*)
codes_err "$repo: unknown repository format; must end in .git, .rpm, .tar.gz"
;;
esac
if [[ ! ${codes_download_reuse_git:-} ]]; then
codes_manifest_add_code "${package:-${manifest[0]}}" "${version:-${manifest[1]}}" "$repo"
fi
return 0
}
codes_download_foss() {
local path=$1
shift
codes_download https://depot.radiasoft.org/foss/"$path" "$@"
}
codes_download_module_file() {
local file=$1
install_download "codes/$codes_module/$file" > "$file"
}
codes_err() {
codes_msg "$@"
return 1
}
codes_install() {
local module=$1
local prev=$(pwd)
local build_d=$HOME/src/radiasoft/codes/$module-$(date -u +%Y%m%d.%H%M%S)
rm -rf "$build_d"
mkdir -p "$build_d"
codes_msg "Build: $module"
codes_msg "Directory: $build_d"
cd "$build_d"
rpm_code_build_src_dir=( "$build_d" )
codes_install_sentinel=$build_d/.codes_install
codes_touch_sentinel
local codes_module=$module
install_script_eval "codes/$module.sh"
cd "$prev"
if [[ $module == common ]]; then
return
fi
local p=${module}_python_install
if compgen -A function "$p"; then
# Needed for pyenv
install_source_bashrc
local v
local codes_download_reuse_git=
local vs=${module}_python_versions
# No quotes so splits
for v in ${!vs}; do
cd "$build_d"
install_not_strict_cmd pyenv activate py"$v"
"$p" "$v"
codes_install_add_python
codes_download_reuse_git=1
done
else
codes_install_add_python
fi
cd "$prev"
}
codes_install_add_python() {
local pp=$(pyenv prefix)
# This excludes all the top level directories and python2.7/site-packages
rpm_code_build_exclude_add "$pp"/* "$(codes_pylib_dir)"
codes_assert_easy_install
# note: --newer doesn't work, because some installers preserve mtime
find "$pp/" ! -name pip-selfcheck.json ! -name '*.pyc' ! -name '*.pyo' \
\( -type f -o -type l \) -cnewer "$codes_install_sentinel" \
| rpm_code_build_include_add
}
codes_main() {
codes_install "$@"
}
codes_make_install() {
local cmd=( make -j$(codes_num_cores) )
if [[ $@ ]]; then
cmd+=( "$@" )
else
cmd+=( install )
fi
"${cmd[@]}"
}
codes_manifest_add_code() {
# must supply all three params unless in a git repo
local package=${1:-}
local version=${2:-}
local repo=${3:-}
local pwd=$(pwd)
if [[ ! $package ]]; then
package=$(basename "$pwd")
fi
if [[ ! $version ]]; then
version=$(git rev-parse HEAD)
fi
if [[ ! $repo ]]; then
repo=$(git config --get remote.origin.url)
fi
rpm_code_build_desc+="version: $version
source: $repo
build: $pwd
"
}
codes_msg() {
echo "$(date -u +%H:%M:%SZ)" "$@" 1>&2
}
codes_num_cores() {
local res=$(grep -c '^core id[[:space:]]*:' /proc/cpuinfo)
# Use half the cores (likely hyperthreads) except if on TRAVIS
if [[ ${TRAVIS:-} != true ]]; then
res=$(( $res / 2 ))
fi
if (( $res < 1 )); then
res=1
fi
echo "$res"
}
codes_pylib_dir() {
python -c 'import sys; from distutils.sysconfig import get_python_lib as x; sys.stdout.write(x())'
}
codes_python_install() {
pip install .
codes_assert_easy_install
}
codes_touch_sentinel() {
# Need a new ctime, see find above
rm -f "$codes_install_sentinel"
touch "$codes_install_sentinel"
}
codes_yum_dependencies() {
rpm_code_build_depends+=( "$@" )
install_yum_install "$@"
}
| true
|
4bdce50c57ba19221f48aadf5428c6ef0c992f8a
|
Shell
|
Cloudxtreme/archC7
|
/installarch.sh
|
UTF-8
| 7,562
| 3.59375
| 4
|
[] |
no_license
|
## Code modified from
## http://chromeos-cr48.blogspot.com/2013/05/chrubuntu-one-script-to-rule-them-all_31.html
#
#
# Check we're in dev mode.
fw_type="`crossystem mainfw_type`"
if [ ! "$fw_type" = "developer" ]
then
echo -e "\nYou're Chromebook is not running a developer BIOS!"
echo -e "You need to run:"
echo -e ""
echo -e "sudo chromeos-firmwareupdate --mode=todev"
echo -e ""
echo -e "and then re-run this script."
exit
fi
# Keep display on.
powerd_status="`initctl status powerd`"
if [ ! "$powerd_status" = "powerd stop/waiting" ]
then
echo -e "Stopping powerd to keep display from timing out..."
initctl stop powerd
fi
setterm -blank 0
# Write changes to disk
if [ "$3" != "" ]; then
target_disk=$3
echo "Got ${target_disk} as target drive"
echo ""
echo "WARNING! All data on this device will be wiped out! Continue at your own risk!"
echo ""
read -p "Press [Enter] to install ChromeArch on ${target_disk} or CTRL+C to quit"
ext_size="`blockdev --getsz ${target_disk}`"
aroot_size=$((ext_size - 65600 - 33))
parted --script ${target_disk} "mktable gpt"
cgpt create ${target_disk}
cgpt add -i 6 -b 64 -s 32768 -S 1 -P 5 -l KERN-A -t "kernel" ${target_disk}
cgpt add -i 7 -b 65600 -s $aroot_size -l ROOT-A -t "rootfs" ${target_disk}
sync
blockdev --rereadpt ${target_disk}
partprobe ${target_disk}
crossystem dev_boot_usb=1
else
#Prompt user for disk sizes
target_disk="`rootdev -d -s`"
# Do partitioning (if we haven't already)
ckern_size="`cgpt show -i 6 -n -s -q ${target_disk}`"
croot_size="`cgpt show -i 7 -n -s -q ${target_disk}`"
state_size="`cgpt show -i 1 -n -s -q ${target_disk}`"
max_arch_size=$(($state_size/1024/1024/2))
rec_arch_size=$(($max_arch_size - 1))
# If KERN-C and ROOT-C are one, we partition, otherwise assume they're what they need to be...
if [ "$ckern_size" = "1" -o "$croot_size" = "1" ]
then
while :
do
read -p "Enter the size in gigabytes you want to reserve for arch. Acceptable range is 5 to $max_arch_size but $rec_arch_size is the recommended maximum: " arch_size
if [ ! $arch_size -ne 0 2>/dev/null ]
then
echo -e "\n\nNumbers only please...\n\n"
continue
fi
if [ $arch_size -lt 5 -o $arch_size -gt $max_arch_size ]
then
echo -e "\n\nThat number is out of range. Enter a number 5 through $max_arch_size\n\n"
continue
fi
break
done
# We've got our size in GB for ROOT-C so do the math...
#calculate sector size for rootc
rootc_size=$(($arch_size*1024*1024*2))
#kernc is always 16mb
kernc_size=32768
#new stateful size with rootc and kernc subtracted from original
stateful_size=$(($state_size - $rootc_size - $kernc_size))
#start stateful at the same spot it currently starts at
stateful_start="`cgpt show -i 1 -n -b -q ${target_disk}`"
#start kernc at stateful start plus stateful size
kernc_start=$(($stateful_start + $stateful_size))
#start rootc at kernc start plus kernc size
rootc_start=$(($kernc_start + $kernc_size))
#Do the real work
echo -e "\n\nModifying partition table to make room for arch."
echo -e "Your Chromebook will reboot, wipe your data and then"
echo -e "you should re-run this script..."
umount -f /mnt/stateful_partition
# stateful first
cgpt add -i 1 -b $stateful_start -s $stateful_size -l STATE ${target_disk}
# now kernc
cgpt add -i 6 -b $kernc_start -s $kernc_size -l KERN-C ${target_disk}
# finally rootc
cgpt add -i 7 -b $rootc_start -s $rootc_size -l ROOT-C ${target_disk}
read -p "Press [Enter] to reboot..."
reboot
exit
fi
fi
# hwid lets us know if this is a Mario (Cr-48), Alex (Samsung Series 5), ZGB (Acer), etc
hwid="`crossystem hwid`"
chromebook_arch="`uname -m`"
arch_version="default"
echo -e "\nChrome device model is: $hwid\n"
echo -e "Attempting to pacstrap Arch Linux\n"
echo -e "Kernel Arch is: $chromebook_arch Installing Arch Linux...\n"
read -p "Press [Enter] to continue..."
if [ ! -d /mnt/stateful_partition/arch ]
then
mkdir /mnt/stateful_partition/arch
fi
cd /mnt/stateful_partition/arch
# # This wont work on arm yet! Sorry!
target_rootfs="${target_disk}7"
target_kern="${target_disk}6"
echo "Target Kernel Partition: $target_kern Target Root FS: ${target_rootfs}"
if mount|grep ${target_rootfs}
then
echo "Refusing to continue since ${target_rootfs} is formatted and mounted. Try rebooting"
exit
fi
# Format rootfs to ext4
mkfs.ext4 ${target_rootfs}
# Mount new root
if [ ! -d /tmp/archfs ]
then
mkdir /tmp/archfs
fi
mount -t ext4 ${target_rootfs} /tmp/archfs
# pacstrap arch Get OS Image and extract to root.
## TODO
wget https://raw.github.com/GrayHatter/archC7/develop/arch-bootstrap.sh
chmod +x arch-bootstrap.sh
. ./arch-bootstrap.sh /tmp/archfs
# Grab a copy of cgpt for our new install.
if [ -f /usr/bin/old_bins/cgpt ]
then
cp /usr/bin/old_bins/cgpt /tmp/archfs/usr/bin/
else
cp /usr/bin/cgpt /tmp/archfs/usr/bin/
fi
chmod a+rx /tmp/archfs/usr/bin/cgpt
# Set hostname vars.
cp /etc/resolv.conf /tmp/archfs/etc/
echo ChromeArch > /tmp/archfs/etc/hostname
#echo -e "127.0.0.1 localhost
echo -e "\n127.0.1.1 ChromeArch" >> /tmp/archfs/etc/hosts
# We're about to chroot: remount.
mount -o bind /proc /tmp/archfs/proc
mount -o bind /dev /tmp/archfs/dev
mount -o bind /dev/pts /tmp/archfs/dev/pts
mount -o bind /sys /tmp/archfs/sys
# Finish arch setup
echo -e "echo 'chromearch' > /etc/hostname
echo 'This will take some time, just wait, it will probably work...'
/usr/bin/pacman-key --init
/usr/bin/pacman-key --populate archlinux
/usr/bin/pacman \
--noconfirm -Syu --force
/usr/bin/pacman \
--noconfirm -Sy --force base
" > /tmp/archfs/install-arch.sh
# chroot and run install/update script.
chmod a+x /tmp/archfs/install-arch.sh
chroot /tmp/archfs /bin/bash -c /install-arch.sh
rm /tmp/archfs/install-arch.sh
#write a script to make some changes on root login
#cgpt set successful
# Prepare our kernel
# Copy modules.
KERN_VER=`uname -r`
mkdir -p /tmp/archfs/lib/modules/$KERN_VER/
cp -ar /lib/modules/$KERN_VER/* /tmp/archfs/lib/modules/$KERN_VER/
if [ ! -d /tmp/archfs/lib/firmware/ ]
then
mkdir /tmp/archfs/lib/firmware/
fi
# Copy over lib/firmware
cp -ar /lib/firmware/* /tmp/archfs/lib/firmware/
echo "console=tty1 quiet root=${target_rootfs} rootwait rw lsm.module_locking=0" > kernel-config
vbutil_arch="x86"
current_rootfs="`rootdev -s`"
current_kernfs_num=$((${current_rootfs: -1:1}-1))
current_kernfs=${current_rootfs: 0:-1}$current_kernfs_num
# Sign kernel so it will boot
vbutil_kernel --repack ${target_kern} \
--oldblob $current_kernfs \
--keyblock /usr/share/vboot/devkeys/kernel.keyblock \
--version 1 \
--signprivate /usr/share/vboot/devkeys/kernel_data_key.vbprivk \
--config kernel-config \
--arch $vbutil_arch
#Set arch kernel partition as top priority for next boot (and next boot only)
cgpt add -i 6 -P 5 -T 1 ${target_disk}
# We're done, prompt user.
echo -e "
Installation seems to be complete. If ChromeArch fails to boot when you reboot,
power off your Chrome OS device and then turn it back on. You'll be back
in Chrome OS. If you're happy with ChromeArch when you reboot be sure to run:
sudo cgpt add -i 6 -P 5 -S 1 ${target_disk}
To make it the default boot option. The ChromeArch login is:
Username: root
Password: [blank]
We're now ready to start ChromeArch!
"
read -p "Press [Enter] to reboot..."
reboot
| true
|
2220659f175bf8619ff66fb090d3150b49015d14
|
Shell
|
cf-gemfire-org/routing-release
|
/ci/tasks/create-final-routing-api-cli-release/task
|
UTF-8
| 768
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -exu
export GOROOT=/usr/local/go
export PATH=$GOROOT/bin:$PATH
root_dir="${PWD}"
OUTPUT_DIR="${root_dir}/${OUTPUT_DIR:?"OUTPUT_DIR must be provided"}"
export RTR_VERSION=$(cat version/version)
pushd "${CF_ROUTING_RELEASE_DIR}/"
export GOPATH=$PWD
export PATH=$GOPATH/bin:$PATH
cli_src_dir="src/code.cloudfoundry.org/routing-api-cli"
rm -rf "${cli_src_dir}"
cp -r "${root_dir}/routing-api-cli-src" "${cli_src_dir}"
pushd "${cli_src_dir}"
./bin/build-all.sh
pushd out
tar -vczf rtr-darwin-amd64.tgz rtr-darwin-amd64
tar -vczf rtr-linux-amd64.tgz rtr-linux-amd64
echo "===GENERATED BINARIES==="
CLI_BINARY=$PWD
popd
ls out/
popd
popd
mkdir -p "${OUTPUT_DIR}"
cp -r "${CLI_BINARY}" "${OUTPUT_DIR}"
ls "${OUTPUT_DIR}"
| true
|
11f1abb795261cdc1bda2d388be88845111e823f
|
Shell
|
chakralinux/desktop
|
/groovy/PKGBUILD
|
UTF-8
| 2,158
| 2.953125
| 3
|
[] |
no_license
|
pkgbase=groovy
pkgname=('groovy' 'groovy-docs')
pkgver=2.4.11
pkgrel=1
pkgdesc='Programming language based on Java, inspired by Python, Ruby and Smalltalk'
arch=('any')
url='http://groovy-lang.org/'
license=('APACHE')
makedepends=('gendesk')
source=("http://dl.bintray.com/groovy/maven/apache-$pkgbase-binary-$pkgver.zip"
"http://dl.bintray.com/groovy/maven/apache-$pkgbase-docs-$pkgver.zip"
"$pkgbase.png")
sha256sums=('4479a3653c565b173fc0d0e5e514a06f1c7d6f93926cbe0c5d8d29e5e4a0347c'
'6cc0221b94fb15378e399a69d36c32e5102942f9a8b18cd3b34258fbaee21d96'
'24e539235b5762b1220f39d7953a52e9b4a458c2be4f66a60219755363ba6dba')
prepare() {
# Generate desktop shortcut
gendesk -f -n --pkgname "$pkgbase" --pkgdesc 'Groovy programming language' \
--exec 'groovyConsole' --name 'Groovy Console'
# Set GROOVY_HOME correctly at the top of all the scripts.
# There are probably better ways to do this, but this works.
for f in "$pkgbase-$pkgver/bin/"*; do
sed 's:bin/sh:bin/sh\nGROOVY_HOME=/usr/share/groovy:' -i "$f"
done
cd "$pkgbase-$pkgver/embeddable"
ln -sf "$pkgbase-all-$pkgver.jar" "$pkgbase-all.jar"
}
package_groovy() {
depends=('bash' 'java-environment')
optdepends=('groovy-docs: html and pdf documentation for Groovy')
cd "$pkgbase-$pkgver"
# Create the directories and package the files
install -d "$pkgdir/usr/share/groovy" "$pkgdir/usr/bin"
cp -r lib conf embeddable "$pkgdir/usr/share/groovy"
cp bin/* "$pkgdir/usr/bin"
# Remove all DOS/Windows batch files
find $pkgdir -name '*.bat' -exec rm {} \;
# Package the license file
install -Dm644 LICENSE \
"$pkgdir/usr/share/licenses/$pkgbase/LICENSE"
# Package the desktop shortcut for Groovy Console
install -Dm644 "$srcdir/$pkgbase.desktop" \
"$pkgdir/usr/share/applications/$pkgbase.desktop"
install -Dm644 "$srcdir/$pkgbase.png" \
"$pkgdir/usr/share/pixmaps/$pkgbase.png"
}
package_groovy-docs() {
cd "$pkgbase-$pkgver"
install -d "$pkgdir/usr/share/doc/$pkgbase-$pkgver"
cp -r html/* licenses "$pkgdir/usr/share/doc/$pkgbase-$pkgver"
find "$pkgdir" -type f -exec chmod -x {} \;
}
| true
|
d1f03c74d2021582eeca735aba3722d717cef9c2
|
Shell
|
strymj/ubuntu_settings
|
/scripts/display_settings.sh
|
UTF-8
| 2,789
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
echo "##### display_settings.sh #####"
brightness=$(xrandr --verbose | grep -m 1 rightness | awk '{ print $2 }')
brightness_max=1.0
brightness_min=0.1
brightness_step=0.1
brightness_mine=0.5
redshift_on=5000
redshift_off=6500
gamma=$(xrandr --verbose | grep -m 1 amma | awk '{ print $2}')
gamma_default="1.0:1.0:1.0"
redshift=${redshift_off}
if [ ${gamma} != ${gamma_default} ]; then
redshift=${redshift_on}
fi
_help ()
{
echo "Usage: sh redshift_brightness.sh [options] [args]"
echo "-v Show current value."
echo "-d Default redshift and brightness value."
echo "-m My favorite redshift and brightness value."
echo "-r <redshift arg>"
echo " on --- Turn redshift on."
echo " off --- Turn redshift off."
echo " toggle --- Switch redshift on and off."
echo "-b <brightness arg>"
echo " [value] --- Set brightness(0.1 ~ 1.0)."
echo " inc, + --- Increment brightness by ${brightness_step}."
echo " dec, - --- Decrement brightness by ${brightness_step}."
echo " default --- Default brightness(1.0)."
echo "-h Show this help message."
}
show_current_value ()
{
echo "Redshift temp : ${redshift} K"
echo "Brightness : ${brightness}"
}
redshift_fcn ()
{
case $1 in
"on")
echo "[Redshift on]"
redshift=${redshift_on};;
"off")
echo "[Redshift off]"
redshift=${redshift_off};;
"toggle")
echo "[Redshift toggle]"
if [ ${redshift} -eq ${redshift_on} ]; then
redshift=${redshift_off}
else
redshift=${redshift_on}
fi;;
?*)
_help && exit;;
esac
}
brightness_range_check ()
{
if [ `echo "${brightness} < ${brightness_min}" | bc` -eq 1 ]; then
brightness=${brightness_min}
elif [ `echo "${brightness_max} < ${brightness}" | bc` -eq 1 ]; then
brightness=${brightness_max}
fi
}
brightness_fcn ()
{
case $1 in
"inc" | "+")
echo "[Brightness increment]"
brightness=$(echo "scale=2; ${brightness} + ${brightness_step}" | bc);;
"dec" | "-")
echo "[Brightness decrement]"
brightness=$(echo "scale=2; ${brightness} - ${brightness_step}" | bc);;
"default")
echo "[Default brightness]"
brightness=${brightness_max};;
?*)
brightness=$1;;
esac
brightness_range_check ${brightness}
}
set_my_value()
{
echo "Set default brightness and redshift value."
redshift=${redshift_on}
brightness=${brightness_mine}
}
set_default_value()
{
echo "Set my brightness and redshift value."
redshift=${redshift_off}
brightness=${brightness_max}
}
if [ $# = 0 ]; then
_help && exit
fi
while getopts "b:r:dmhv" OPT; do
case ${OPT} in
r) redshift_fcn ${OPTARG};;
b) brightness_fcn ${OPTARG};;
d) set_default_value;;
m) set_my_value;;
v) show_current_value;;
h) _help && exit;;
esac
done
redshift -O ${redshift} -b ${brightness}
exit
| true
|
8c3c4d5ccdae7a99bf3f0bb09c59e80a65ae9260
|
Shell
|
tupyy/dotfiles
|
/utils/.utils/utils.sh
|
UTF-8
| 381
| 2.515625
| 3
|
[] |
no_license
|
# source fzf functions
[[ -n "$HOME/.utils/fzf_functions.sh" ]] && source "$HOME/.utils/fzf_functions.sh"
# source aws alias
[[ -n "$HOME/.utils/aws_aliases.sh" ]] && source "$HOME/.utils/aws_aliases.sh"
# source kube utils
[[ -n "$HOME/.utils/kube.sh" ]] && source "$HOME/.utils/kube.sh"
[[ -n "$HOME/.utils/change_font_size.sh" ]] && source "$HOME/.utils/change_font_size.sh"
| true
|
7c9c3adf66a690a5d0aec4f11dca9e196238d506
|
Shell
|
kalettuce/cse461-22
|
/project3/run.sh
|
UTF-8
| 770
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
# Note: Mininet must be run as root. So invoke this shell script
# using sudo.
time=200
bwnet=1.5
# TODO: If you want the RTT to be 20ms what should the delay on each
# link be? Set this value correctly.
delay=5
for qsize in 20 100; do
dir=-q$qsize
# TODO: Run bufferbloat.py here...
python3 bufferbloat.py -b $bwnet --delay $delay -d output -t $time --maxq $qsize
# TODO: Ensure the input file names match the ones you use in
# bufferbloat.py script. Also ensure the plot file names match
# the required naming convention when submitting your tarball.
python3 plot_queue.py -f output/q.txt -o output/reno-buffer$dir.png
python3 plot_ping.py -f output/ping.txt -o output/reno-rtt$dir.png
rm -f output/*.txt
done
| true
|
0b4ec4dffdded631d0e2581d5712bf27b2254596
|
Shell
|
os1315/DVS-ESA
|
/open-loop-dataset-tools/shell/view_compound.sh
|
UTF-8
| 2,446
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
NEW_FILE="noisetest_2"
NEW_PATH="frames/${NEW_FILE}"
if [ -d $NEW_PATH ]
then
echo "A folder for $NEW_FILE exists"
else
echo "Creating new directory for $NEW_FILE"
mkdir $NEW_PATH
NEW_PATH="frames/${NEW_FILE}/raw_bright"
NEW_CMD="frames/${NEW_FILE}/raw_bright/${NEW_FILE}_%03d"
mkdir $NEW_PATH
fi
# Basic view script for Itokawa model using standard radiance camera.
../../bin/viewer \
-noini \
-ini ../pangu.ini \
-ini itokawa.ini \
-err - \
\
-colour 1 1 1 \
-sky black \
-dynamic_shadows pssm \
-nocull_face \
\
-reflect hapke \
-hapke_w 0.33 \
-hapke_B0 0.95 \
-hapke_h 0.05 \
-hapke_L 0.000 \
-hapke_scale 4.667314 \
\
-use_camera_model \
\
-detector 0 1 128 128 0 \
-irradiance 10000000.0 10000000.0 10000000.0 \
-inverse_square_law \
-aperture 0 ignored 1 0 \
-pixel_angle 0 unit \
-distortion 0 0.0 0.0 0.0 0.0 0.0 \
-tangential 0 0.0 0.0 0.0 0.0 \
-scattering 0 ignored 0.0 0.0 0.0 0 \
-psf_gauss 0 0 0.0 0 0.0 0 0.0 \
-exposure 0 1 300 \
-readout_basic 0 0 0.09 0 1000000 1e12 0.0 0 \
-smear 0 none down 1.0 0.0 0.0 0.0 \
-photon_noise 0 0 \
-dark_current 0 none 0 0 \
-detector_radiation 0 0 \
\
-flight test_traj.fli \
-movie \
-image_format float \
-savefmt "frames/${NEW_FILE}/raw_bright/${NEW_FILE}_%03d" \
-quit \
itokawa_q512.pan \
\
NEW_PATH="frames/${NEW_FILE}/raw_dim"
mkdir $NEW_PATH
# Basic view script for Itokawa model using standard radiance camera.
../../bin/viewer \
-noini \
-ini ../pangu.ini \
-ini itokawa.ini \
-err - \
\
-colour 1 1 1 \
-sky black \
-dynamic_shadows pssm \
-nocull_face \
\
-reflect hapke \
-hapke_w 0.33 \
-hapke_B0 0.95 \
-hapke_h 0.05 \
-hapke_L 0.000 \
-hapke_scale 4.667314 \
\
-use_camera_model \
\
-detector 0 1 128 128 0 \
-irradiance 10000.0 10000.0 10000.0 \
-inverse_square_law \
-aperture 0 ignored 1 0 \
-pixel_angle 0 unit \
-distortion 0 0.0 0.0 0.0 0.0 0.0 \
-tangential 0 0.0 0.0 0.0 0.0 \
-scattering 0 ignored 0.0 0.0 0.0 0 \
-psf_gauss 0 0 0.0 0 0.0 0 0.0 \
-exposure 0 0.015 300 \
-readout_basic 0 0 1 0 10 1e6 0.055 24.966 \
-smear 0 none down 1.0 0.0 0.0 0.0 \
-photon_noise 0 0 \
-dark_current 0 constant 0 0 \
-detector_radiation 0 0 \
\
-flight test_traj.fli \
-movie \
-image_format float \
-savefmt "frames/${NEW_FILE}/raw_dim/${NEW_FILE}_%03d" \
-quit \
itokawa_q512.pan \
\
| true
|
15fb193324f84c99da8a0eb49e18e39396731c1c
|
Shell
|
zgtman/fusion_genes
|
/parsing_arriba_result.sh
|
UTF-8
| 340
| 3.015625
| 3
|
[] |
no_license
|
#! /bin/bash
rm -f tmp_result.xls
for i in *_fusions.tsv;
do name=${i%_out*}
awk -v name=$name '{OFS="\t"}NR>1{print name,$0}' $i >> tmp_result.xls
done
file=$(ls -1 *_fusions.tsv | awk 'NR==1{print $0}')
head -1 $file | awk '{OFS="\t"}{print "NAME",$0}' | tr -d "#" | cat - tmp_result.xls > final_arriba.xls
rm -f tmp_result.xls
| true
|
29bd48174f0d0f8ca81591a29f1288e0d1337485
|
Shell
|
Frojd/VersionWatcher
|
/tools/track.sh
|
UTF-8
| 3,238
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
# Usage example: ./track.sh python django
containsElement () {
local e match="$1"
shift
for e; do [[ "$e" == "$match" ]] && return 0; done
return 1
}
CMD=$1
CIRCLE_BRANCH=${CIRCLE_BRANCH:-master}
SERVICE_DOMAIN=${TRACKER_API:=https://n2t2izj4a0.execute-api.eu-west-1.amazonaws.com}
PROJECT="$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME"
VERSION=${CIRCLE_TAG:-$CIRCLE_SHA1}
IGNORE_BRANCHES=$TRACKER_IGNORE_BRANCHES
IFS=', ' read -r -a IGNORE_BRANCHES <<< "$IGNORE_BRANCHES"
if [ -z "$TRACKER_API_KEY" ]; then
echo "Error: Missing TRACKER_API_KEY value";
exit;
fi
if containsElement $CIRCLE_BRANCH "${IGNORE_BRANCHES[@]}"; then
echo "Ignoring branch $CIRCLE_BRANCH";
exit;
fi
case "$CMD" in
"wp-bedrock-circle-setup" )
if [ ! -f "test.env" ]; then
cat > test.env <<EOL
DB_USER=ubuntu
DB_NAME=circle_test
DB_PASSWORD=
DB_HOST=127.0.0.1
WP_HOME=
WP_SITEURL=
EOL
fi
mv test.env .env
composer install
curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar && chmod +x ./wp-cli.phar
./wp-cli.phar core install --allow-root --admin_name=admin --admin_password=admin --admin_email=admin@example.com --url=http://example.com.dev --title=WordPress
;;
"wp-circle2-setup" )
curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar && chmod +x ./wp-cli.phar
./wp-cli.phar core config --allow-root --dbname=circle_test --dbuser=root --dbhost=127.0.0.1
./wp-cli.phar core install --allow-root --admin_name=admin --admin_password=admin --admin_email=admin@example.com --url=http://example.com.dev --title=WordPress
;;
"wp-circle-setup" )
curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar && chmod +x ./wp-cli.phar
./wp-cli.phar core config --allow-root --dbname=circle_test --dbuser=ubuntu --dbhost=127.0.0.1
./wp-cli.phar core install --allow-root --admin_name=admin --admin_password=admin --admin_email=admin@example.com --url=http://example.com.dev --title=WordPress
;;
"wordpress" )
WP_VERSION=$(./wp-cli.phar core version)
URL="$SERVICE_DOMAIN/prod/tracker/wp?project=$PROJECT&version=$VERSION&wpversion=$WP_VERSION&branch=$CIRCLE_BRANCH&commit=$CIRCLE_SHA1"
curl -X POST $URL -H "Content-Type: application/json; charset=utf-8" -H "x-api-key: $TRACKER_API_KEY" -d $(./wp-cli.phar plugin list --format=json)
;;
"python" )
LABEL=${2:-django}
pip freeze > post-requirements.txt
URL="$SERVICE_DOMAIN/prod/tracker/python?project=$PROJECT&version=$VERSION&label=$LABEL&branch=$CIRCLE_BRANCH&commit=$CIRCLE_SHA1"
curl -X POST $URL -H "Content-Type: text/plain; charset=utf-8" -H "x-api-key: $TRACKER_API_KEY" --data-binary @post-requirements.txt
rm post-requirements.txt
;;
"node" )
LABEL=${2:-node}
URL="$SERVICE_DOMAIN/prod/tracker/node?project=$PROJECT&version=$VERSION&label=$LABEL&branch=$CIRCLE_BRANCH&commit=$CIRCLE_SHA1"
curl -X POST $URL -H "Content-Type: application/json; charset=utf-8" -H "x-api-key: $TRACKER_API_KEY" -d @package.json
;;
esac
| true
|
2bf5751a09f86e2a40e0f3d677628ee09a5bfb1c
|
Shell
|
FauxFaux/debian-control
|
/g/gtkballs/gtkballs_3.1.5-11+b1_amd64/postinst
|
UTF-8
| 604
| 3.484375
| 3
|
[] |
no_license
|
#! /bin/sh
# postinst script for Gtkballs
set -e
case "$1" in
configure)
# create a high score file
SCOREFILE=/var/games/gtkballs-scores;
if [ ! -f $SCOREFILE ]; then
touch $SCOREFILE;
chgrp games $SCOREFILE;
chmod 0664 $SCOREFILE;
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# Automatically added by dh_installmenu
if [ "$1" = "configure" ] && [ -x "`which update-menus 2>/dev/null`" ]; then
update-menus
fi
# End automatically added section
exit 0
| true
|
944d7f5847dfe4df39a5741360cfce8dac55ba3e
|
Shell
|
rnoth/kludgewm
|
/recall
|
UTF-8
| 237
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
TMPDIR=${TMPDIR:-/tmp}
test -z "$1" && { ls -b ${TMPDIR}/marks | \
while read mark; do
printf "%s\t%s\n" ${mark} `cat ${TMPDIR}/marks/${mark}`
done
}
test -n "$1" && { cat ${TMPDIR}/marks/${1}
shift
}
kludge "$@"
| true
|
a8fbd0b622593578f5ef43c5e0fa0f8a1a26edb2
|
Shell
|
bborsari/Scripts
|
/utils/add_header.sh
|
UTF-8
| 298
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
#adds a header to a file; the header is R-type (length of the header = NF-1)
[ $# -ge 1 -a -f "$1" ] && input="$1" || input="-"
awk 'BEGIN{FS=OFS="\t"}{if(NR==1){
for(i=1;i<=(NF-2);i++)
{printf "V"i"\t"};
printf "V"NF-1"\n";
print $0}
else
{print $0}}' $input
| true
|
13cd2905890af21340b9e04a4991244b0230389e
|
Shell
|
jim-meyer/PrimeChallenge
|
/ColdVsWarmCacheTest.sh
|
UTF-8
| 1,319
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
set -euxo pipefail
if [[ $# -lt 1 ]]; then
echo "Usage: ${0} <<IP address of prime challenge server>>"
exit 1
fi
PRIME_SERVER="${1}"
jobid=$(curl --request POST "http://${PRIME_SERVER}:8080/Start/1,3")
echo ${jobid}
curl --request GET "http://${PRIME_SERVER}:8080/Query/${jobid}"
STARTTIME=$(date +%s)
jobid=$(curl --request POST "http://${PRIME_SERVER}:8080/Start/1,1234567")
echo ${jobid}
until ! curl --request GET "http://${PRIME_SERVER}:8080/Query/${jobid}" -v 2>&1 | grep "HTTP/1.0 204"; do
printf '.'
sleep 1
done
curl --request GET "http://${PRIME_SERVER}:8080/Query/${jobid}"
ENDTIME=$(date +%s)
cold_cache_duration=$(($ENDTIME - $STARTTIME))
echo "It took $cold_cache_duration seconds get the prime numbers from a cold cache state"
STARTTIME=$(date +%s)
jobid=$(curl --request POST "http://${PRIME_SERVER}:8080/Start/1,1234567")
echo ${jobid}
until ! curl --request GET "http://${PRIME_SERVER}:8080/Query/${jobid}" -v 2>&1 | grep "HTTP/1.0 204"; do
printf '.'
sleep 1
done
curl --request GET "http://${PRIME_SERVER}:8080/Query/${jobid}"
ENDTIME=$(date +%s)
warm_cache_duration=$(($ENDTIME - $STARTTIME))
echo "It took $warm_cache_duration seconds get the prime numbers from a warm cache state"
echo "$warm_cache_duration should be much less than $cold_cache_duration"
| true
|
983fb62a4b193ff0cb07d6b73f4dd943eb2751bb
|
Shell
|
DHTC-Tools/ATLAS
|
/dCache Utilities/pathfinder.sh
|
UTF-8
| 375
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/local/bin/bash
#$Id: pnfsidof.sh,v 1.2 2009/12/17 21:30:22 sarah Exp $
get_pnfsidof() {
/usr/local/bin/dcache-admin << EOF 2>/dev/null | strings | egrep -v '^\[|dCache Admin|dmg.util.CommandExitException|Connection reset by peer'
cd PnfsManager
pathfinder $1
..
logoff
EOF
}
get_pnfsidof $1
if [ $? != 0 ] ; then
get_pnfsidof $1
exit $?
fi
exit 0
| true
|
df42d328c370f48a67733fba8c5b8d56d9c0714b
|
Shell
|
sanyuli/cygwin-toy-scripts
|
/build_ChinaDNS.sh
|
UTF-8
| 1,325
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
Work_Root=$(pwd)
PKG_NAME="ChinaDNS"
DOWNLOAD_URL="https://github.com/shadowsocks/ChinaDNS/releases/download/1.3.2/chinadns-1.3.2.tar.gz"
SRC_FILE="1.3.2.tar.gz"
SRC_FILE_MD5="285957df58a3522ee9d06f09838e2bb8"
DIST_FILES="${Work_Root}/srcs/ChinaDNS_cygport/ChinaDNS-1.3.2-1bl1.i686/inst/usr/bin/chinadns.exe"
SRC_DIR_NAME="ChinaDNS-1.3.2"
([ -z "$PKG_NAME" ] || [ -z "$DOWNLOAD_URL" ] || [ -z "$SRC_FILE" ] || [ -z "$SRC_DIR_NAME" ] || [ -z "$SRC_FILE_MD5" ] || [ -z "$DIST_FILES" ]) && exit
[ "$1" = "clean" ] && rm -rf $Work_Root/srcs/ChinaDNS_cygport/ChinaDNS-1.3.2-1bl1.i686 && exit
check_builded() {
if [ ! -f $DIST_FILES ]; then
cat <<EOF
Build fails with ${DIST_FILES} no esixt!
EOF
return 1
else
cat <<EOF
${DIST_FILES} esixt
EOF
return 0
fi
}
if check_builded; then
cat <<EOF
run: $0 clean
EOF
exit
fi
. $Work_Root/http_proxy.conf
if [ ! -f $Work_Root/srcs/ChinaDNS_cygport/$SRC_FILE ] && [ "$(md5sum $Work_Root/srcs/ChinaDNS_cygport/$SRC_FILE | cut -d ' ' -f1)" != "${SRC_FILE_MD5}" ]; then
wget "${DOWNLOAD_URL}" -O $Work_Root/srcs/ChinaDNS_cygport/$SRC_FILE
fi
cd $Work_Root/srcs/ChinaDNS_cygport
cygport ChinaDNS-1.3.2-1bl1.cygport all
if check_builded; then
cp -vr ${Work_Root}/srcs/ChinaDNS_cygport/ChinaDNS-1.3.2-1bl1.i686/inst/usr/bin/chinadns.exe $Work_Root/dist/ChinaDNS
fi
| true
|
81057d744ee7bb7d7e2cd68511467757a81523fc
|
Shell
|
cleverom/bash_scripting_intro_course
|
/create_script2
|
UTF-8
| 2,066
| 4.59375
| 5
|
[] |
no_license
|
#!/bin/bash
# Create script
# This script creates new bash scripts, sets their permission and more
# Author: Tega
# The shift command removes the first argument
# All positional parameters shift
# $2 -> $1, $3 -> $2, $4 -> $3 etc.
# $# i.e total number of arguments is reduced by one after every "shift" call
# You can give a number to shift multiple e.g "shift 3" removes the first three arguments
# Test if there is an argument
if [[ ! $1 ]]; then
echo "Missing argument"
exit 1
fi
# if the total number of argument provided is exactly one, then we can open the editor
# after its creation else for multiple arguments, we don't open the editor
declare open_editor=""
if [[ $# -eq 1 ]]; then
open_editor="true"
fi
declare -r scriptsdir="${HOME}/Documents/bash_scripting_prac"
# Check that the directory to save to exists
if [[ ! -d $scriptsdir ]]; then
# Try to create the directory if it does not exist
if mkdir "$scriptsdir"; then
echo "Created ${scriptsdir}" >&2
else
echo "Could not create ${scriptsdir}" >&2
exit 1
fi
fi
# The logic below handles the creation of multiple scripts if more than one argument is provided
while [[ $1 ]]; do
scriptname="$1"
filename="${scriptsdir}/${scriptname}"
# Check if the new file already exists in the directory
if [[ -e $filename ]]; then
echo "File ${filename} already exists" >&2
shift
continue
fi
# Check if the name of the script you provide clashes with the name of an existing command
# redirecting to /dev/null as shown below basically discards the output message so it does not clutter the terminal
# In the case of errors, using 2>&1 redirects from stderr to stdout
if type "$scriptname" > /dev/null 2>&1; then
echo "There is already a command with name ${scriptname}" >&2
shift
continue
fi
# Create a script with a single line
echo "#!/bin/bash" > "$filename"
# Add executable permission
chmod u+x "$filename"
shift
done
if [[ $open_editor ]]; then
echo opening
if [[ $EDITOR ]]; then
$EDITOR "$filename"
fi
fi
| true
|
f46c45e58de73808659b3de575e747a8f203f503
|
Shell
|
kvnlpz/x86-Assembly-files
|
/assignment3submission/run.sh
|
UTF-8
| 1,995
| 3.3125
| 3
|
[] |
no_license
|
#Copyright (C) 2020 Kevin Lopez
#This program is free software# you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation# either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY# without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
# Author: Kevin Lopez
# Email: Kevinlopez8554@csu.fullerton.edu
# Date of last update: 3/5/20
# System Requirements: A modern computer with linux, and an x86 processor
# Program title: Biggest Integer Finder
# Program Description: This program receives a sequence of integers, then puts them into an array
# Then the program prints out the array, then prints out the array. Afterwards, the program finds
# the biggest number in the array and its index, then tells us what they are.
#
# FILES: main.c
# manager.asm
# input_array.cpp
# display_array.asm
# find_biggest.asm
# run.sh <---- You are here.
# File Purpose: Assemble and Compile every file we need and link them together so our program can run properly.
# Remove other helper files.
rm *.out
rm *.o
rm *.lis
# Assemble x86 module manager.asm.
nasm -f elf64 -l manager.lis -o manager.o manager.asm
# Assemble x86 module display_array.asm.
nasm -f elf64 -l display_array.lis -o display_array.o display_array.asm
# Assemble x86 module find_biggest.asm.
nasm -f elf64 -l find_biggest.lis -o find_biggest.o find_biggest.asm
# Compile C module main.c
g++ -c -m64 -Wall -l main.lis -o main.o main.c -fno-pie -no-pie
# Compile C++ module input_array.cpp
g++ -c -m64 -Wall -l input_array.lis -o input_array.o input_array.cpp -fno-pie -no-pie
# Link object files.
g++ -m64 -o program.out manager.o display_array.o main.o input_array.o find_biggest.o -fno-pie -no-pie
# .out file execution.
./program.out
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.