blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
64d4702a6793af3050ab8fb928f0673867a8b590
|
Shell
|
xgarcias/jokerng
|
/packer/files/scripts/jail2pkg
|
UTF-8
| 3,954
| 3.484375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
ZPOOL="zroot"
error_msg() {
echo $1
exit 1
}
fstab_template() {
local hostname=$1
echo /usr/jails/basejail /usr/jails/${hostname}/basejail nullfs ro 0 0
}
ezjail_template () {
local hostname=$1
local safename=$2
local ipaddress=$3
cat <<EOF
export jail_${safename}_hostname="${hostname}"
export jail_${safename}_ip="${ipaddress}"
export jail_${safename}_rootdir="/usr/jails/${hostname}"
export jail_${safename}_exec_start="/bin/sh /etc/rc"
export jail_${safename}_exec_stop=""
export jail_${safename}_mount_enable="YES"
export jail_${safename}_devfs_enable="YES"
export jail_${safename}_devfs_ruleset="devfsrules_jail"
export jail_${safename}_procfs_enable="NO"
export jail_${safename}_fdescfs_enable="YES"
export jail_${safename}_image=""
export jail_${safename}_imagetype="zfs"
export jail_${safename}_attachparams=""
export jail_${safename}_attachblocking=""
export jail_${safename}_forceblocking="YES"
export jail_${safename}_zfs_datasets=""
export jail_${safename}_cpuset=""
export jail_${safename}_fib=""
export jail_${safename}_parentzfs="${ZPOOL}/usr/jails"
export jail_${safename}_parameters=""
export jail_${safename}_post_start_script=""
export jail_${safename}_retention_policy=""
EOF
}
# USER INPUT
RELEASE="myapp-1.0"
SRCJAIL="template-42"
IPADDR="172.31.0.201"
# USER INPUT
[ $# -eq 3 ] || error_msg "expecting three parameters: source_jail destination_jail ip_address. i.e $ ./jail2pkg template-42 myapp-1.0 172.31.0.201"
SRCJAIL=$1
RELEASE=$2
IPADDR=$3
echo ${IPADDR} |egrep -q "^[[:digit:]]{1,3}.[[:digit:]]{1,3}.[[:digit:]]{1,3}.[[:digit:]]{1,3}$" || error_msg "failed to parse ipaddress"
ezjail-admin list | awk '{print $4}' | tail -n +3 | grep -q "^${SRCJAIL}$"
[ $? -eq 0 ] || error_msg "The jail $SRCJAIL doesn't exist"
JAILPATH=/usr/local/var/jockerng/jail
EZJAILPATH=/usr/local/etc/ezjail
STAGEDIR=/tmp/stage
HOSTNAME=`echo -n "${RELEASE}" | tr '/~' '__'`
SAFENAME=`echo -n "${RELEASE}" | tr -c '[:alnum:]' _`
PARENTVOL=${ZPOOL}/usr/jails
SRCVOLNAME=${PARENTVOL}/${SRCJAIL}
rm -rf ${STAGEDIR}
mkdir -p ${STAGEDIR}${JAILPATH}
mkdir -p ${STAGEDIR}${EZJAILPATH}
mkdir -p ${STAGEDIR}/etc
cat >> ${STAGEDIR}/+PRE_INSTALL <<EOF
zfs list | awk '{print \$1}'| grep -q "${PARENTVOL}/${HOSTNAME}"
if [ \$? -eq 0 ]; then
echo "Volume ${PARENTVOL}/${HOSTNAME} already exists, aborting"
exit 1
fi
EOF
cat >> ${STAGEDIR}/+POST_INSTALL <<EOF
echo "Loading jail ${HOSTNAME}"
bzcat ${JAILPATH}/${HOSTNAME}.bz2 | zfs receive "${PARENTVOL}/${HOSTNAME}"
zfs destroy ${PARENTVOL}/${HOSTNAME}@export
EOF
cat >> ${STAGEDIR}/+PRE_DEINSTALL <<EOF
echo "Deleting jail ${HOSTNAME}"
zfs list | awk '{print \$1}'| grep -q "${PARENTVOL}/${HOSTNAME}"
if [ \$? -eq 0 ]; then
zfs destroy ${PARENTVOL}/${HOSTNAME}
fi
EOF
cat >> ${STAGEDIR}/+MANIFEST <<EOF
name: jokerng-jail-${HOSTNAME}
version: "0.1_1"
origin: misc/jokerng-jail-${HOSTNAME}
comment: "application jail ${HOSTNAME}"
desc: "application jail ${HOSTNAME}"
maintainer: @doe.it
www: https://doe.it
prefix: /
deps: {
ezjail: { origin: sysutils/ezjail }
}
EOF
echo "dumping the jail ${HOSTNAME}"
zfs snapshot ${SRCVOLNAME}@export
zfs send ${SRCVOLNAME}@export |bzip2 > ${STAGEDIR}${JAILPATH}/${HOSTNAME}.bz2
zfs destroy ${SRCVOLNAME}@export
ezjail_template $HOSTNAME $SAFENAME "$IPADDR" > ${STAGEDIR}/$EZJAILPATH/${SAFENAME}
fstab_template $HOSTNAME > ${STAGEDIR}/etc/fstab.${SAFENAME}
echo "${JAILPATH}/${HOSTNAME}.bz2" >> ${STAGEDIR}/plist
echo "$EZJAILPATH/${SAFENAME}" >> ${STAGEDIR}/plist
echo "/etc/fstab.${SAFENAME}" >> ${STAGEDIR}/plist
echo "creating jail package ${HOSTNAME}"
pkg create -m ${STAGEDIR}/ -r ${STAGEDIR}/ -p ${STAGEDIR}/plist -o .
#ezjail-admin create -b template-42 '172.31.0.201' > /dev/null || error_msg "unable to create template jail"
#echo "nameserver 8.8.8.8" > /usr/jails/template-42/etc/resolv.conf
#ezjail-admin delete -wf template-42 > /dev/null || error_msg "unable to delete the template jail"
| true
|
c798dec83c11b6318418f6958b8a1c0be4357fde
|
Shell
|
rostskadat/aws-cloudformation
|
/playbooks/roles/common/files/root/tag_root_volume.sh
|
UTF-8
| 1,259
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# FILE: tag_root_volume.sh
#
# DESCRIPTION: This script will tag the EC2 instance root EBS Volume. This is taken from:
# https://stackoverflow.com/questions/24026425/is-there-a-way-to-tag-a-root-volume-when-initializing-from-the-cloudformation-te
# It will determine the volume_id attached to the current instance, extract the PLATFROM tag of that instance and
# propagate the tag to the volume.
#
Region="$1"
StackName="$2"
[ -z "$Region" ] && echo "Invalid Region" && exit 1
[ -z "$StackName" ] && echo "Invalid StackName" && exit 1
echo "Tagging Root volume..."
instance_id=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
volume_id=$(aws --region $Region ec2 describe-volumes --filters "Name=attachment.instance-id,Values=$instance_id" "Name=attachment.device,Values=/dev/xvda" --query "Volumes[0].VolumeId" --output text)
platform=$(aws --region $Region ec2 describe-instances --instance-ids $instance_id --query "Reservations[0].Instances[0].Tags" --output table | grep PLATFORM | cut -d '|' -f 3 | sed -e 's/ //g')
aws --region $Region ec2 create-tags --resources $volume_id --tag "Key=Name,Value=${StackName}-VolumeRoot"
aws --region $Region ec2 create-tags --resources $volume_id --tag "Key=PLATFORM,Value=$platform"
| true
|
85d9de28f17de8a8857cfc960420db8616637f3a
|
Shell
|
hoijui/CG1
|
/ex04/run.sh
|
UTF-8
| 596
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
SCRIPT_DIR=$(cd $(dirname $0); pwd)
TARGET_DIR="${SCRIPT_DIR}/target"
RESOURCES_DIR="${SCRIPT_DIR}/src/main/resources"
EXECUTABLE="${TARGET_DIR}/cg1_ex04"
cd "${SCRIPT_DIR}"
if [ ! -f "${EXECUTABLE}" ]; then
./build.sh
fi
cd "${TARGET_DIR}"
if [ ! -e data ]; then
ln -s ../src/data data
fi
if [ ! -e meshes ]; then
if [ ! -e ../src/meshes ]; then
ln -s ../../ex03/src/meshes ../src/meshes
fi
ln -s ../src/meshes meshes
fi
ARGUMENTS=${2} ${3} ${4}
if [ "${1}" = "debug" ]; then
echo "ARGUMENTS: ${ARGUMENTS}"
ddd "${EXECUTABLE}" &
else
"${EXECUTABLE}" ${ARGUMENTS}
fi
| true
|
55e5204b2adcb44d368161e2c314dedddc833742
|
Shell
|
lakebug/spc_post
|
/scripts/exforecast_href_cal_thunder.sh
|
UTF-8
| 1,395
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "============================================================="
echo "= ="
echo "= Start the HREF Calibrated Thunderstorm forecast for f`printf %03d ${fhour}` ="
echo "= ="
echo "============================================================="
set -x
# get output file names
hour=`printf %03d ${fhour}`
if [ $hour -lt 4 ]
then
grib_files=("hrefct.t${cyc}z.thunder_1hr.f${hour}.grib2")
else
grib_files=("hrefct.t${cyc}z.thunder_1hr.f${hour}.grib2" "hrefct.t${cyc}z.thunder_4hr.f${hour}.grib2")
fi
# COLDSTART check
# Remove all existing grib2 files for current cycle if YES
if [ ${COLDSTART} == "YES" ]; then
for grib_file in ${grib_files[@]}
do
if [ -f $COMOUTspc_post/thunder/${grib_file} ]; then
echo "COLDSTART - removing $COMOUTspc_post/thunder/${grib_file}"
rm $COMOUTspc_post/thunder/${grib_file}
fi
done
fi
# start forecast
python ${USHspc_post}/href_calib_thunder/forecast_href_cal_thunder.py ${fhour}
export err=$?; err_chk
# dbnet alerts
if [ "$SENDDBN" = 'YES' ]
then
for grib_file in ${grib_files[@]}
do
echo " Sending ${grib_file} to DBNET."
$DBNROOT/bin/dbn_alert MODEL SPCPOST_THUNDER_GRIB $job $COMOUTspc_post/thunder/${grib_file}
done
fi
exit 0
| true
|
36c422846e403e73f30ed969c547a5f8ce48ca07
|
Shell
|
nucleoosystem/PyGreSQL
|
/mktar
|
UTF-8
| 1,843
| 3.625
| 4
|
[
"PostgreSQL"
] |
permissive
|
#!/bin/sh
VERSION=5.1
DISTDIR=/u/WEB/pyg/files
# some safety tests
if [ ! -d $DISTDIR ]
then
echo "Hmmm. Are you sure you are on the right server?"
exit 1
fi
if [ ! -f setup.py -o ! -f pgmodule.c -o ! -d tests -o ! -d docs ]
then
echo "Hmmm. Are you sure you are in the right directory?"
exit 1
fi
FILES="*.c *.h *.py *.cfg *.rst *.txt"
NUMFILES=`ls $FILES | wc -l`
if [ $NUMFILES != 15 ]
then
echo "Hmmm. The number of top-level files seems to be wrong:"
ls $FILES
echo "Maybe you should do a clean checkout first."
echo "If something has changed, edit MANIFEST.in and mktar."
exit 1
fi
FILES="mktar mkdocs docs tests pg.py pgdb.py pgmodule.c setup.cfg"
PERMS=`stat --printf="%a" $FILES`
if [ $? -eq 0 -a "$PERMS" != '755755755755644644644644' ]
then
echo "Hmmm. File permissions are not set properly."
echo "Use a filesystem with permissions and do a clean checkout first."
exit 1
fi
if [ -f BETA ]
then
VERSION=$VERSION-pre`date +"%y%m%d"`
PACKAGE=pygresql.pkg-beta
SYMLINK=PyGreSQL-beta.tar.gz
else
PACKAGE=pygresql.pkg
SYMLINK=PyGreSQL.tar.gz
fi
# Package up as a source tarball in the distribution directory.
echo "Making source tarball..."
echo
umask 0022
# Make sure that the documentation has been built.
if ! ./mkdocs
then
echo "Hmmm. The documentation could not be built."
exit 1
fi
# Package as source distribution.
rm -rf build dist
if ! python3 setup.py sdist
then
echo "Hmmm. The source distribution could not be created."
exit 1
fi
DF=`ls dist`
if [ $? -ne 0 -o -z "$DF" ]
then
echo "Hmmm. The source distribution could not be found."
exit 1
fi
TF=$DISTDIR/$DF
if ! cp dist/$DF $TF
then
echo "Hmmm. The source distribution could not be copied."
exit 1
fi
chmod 644 $TF
rm -f $DISTDIR/$SYMLINK
ln -s $DF $DISTDIR/$SYMLINK
echo
echo "$TF has been built."
| true
|
dafe225f16c88b35de55f87b91a54392222a308e
|
Shell
|
Vald/docgreSQL
|
/lib/initDocDefDB.sh
|
UTF-8
| 1,860
| 2.546875
| 3
|
[] |
no_license
|
#! /bin/bash
# encoding: utf-8
# definition of doc_fields TABLE
# it corresponds to available fields in documentation
sqlite3 docDefDB.db "DROP TABLE IF EXISTS doc_fields"
sqlite3 docDefDB.db "CREATE TABLE doc_fields (field TEXT, several TEXT NOT NULL, PRIMARY KEY (field))"
sqlite3 docDefDB.db "INSERT INTO doc_fields VALUES ('title', 'False')"
sqlite3 docDefDB.db "INSERT INTO doc_fields VALUES ('description', 'False')"
sqlite3 docDefDB.db "INSERT INTO doc_fields VALUES ('name', 'False')"
sqlite3 docDefDB.db "INSERT INTO doc_fields VALUES ('depends', 'True')"
sqlite3 docDefDB.db "INSERT INTO doc_fields VALUES ('inheritsFields', 'True')"
sqlite3 docDefDB.db "INSERT INTO doc_fields VALUES ('field', 'True')"
# definition of doc_fields TABLE
# it corresponds to available args for each field in documentation
sqlite3 docDefDB.db "DROP TABLE IF EXISTS doc_field_args"
sqlite3 docDefDB.db "CREATE TABLE doc_field_args (field TEXT REFERENCES doc_fields (field), arg TEXT, PRIMARY KEY (field, arg))"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('title', 'value')"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('description', 'value')"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('name', 'schema')"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('name', 'table')"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('depends', 'schema')"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('depends', 'table')"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('inheritsFields', 'schema')"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('inheritsFields', 'table')"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('field', 'name')"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('field', 'type')"
sqlite3 docDefDB.db "INSERT INTO doc_field_args VALUES ('field', 'description')"
| true
|
7699363fc1d126a174f87dbf5c8f4cec3296a117
|
Shell
|
Pavche/bash-scripts
|
/find-ipv6-dns-records.sh
|
UTF-8
| 490
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
SERVER_LIST="eduroam.egi.eu
mailman.egi.eu
www.egi.eu
wiki.egi.eu
aldor.ics.muni.cz
documents.egi.eu
www.metacentrum.cz
rt.egi.eu
indico.egi.eu
portal.egi.eu
mailman.cerit-sc.cz
rt4.egi.eu
wiki.metacentrum.cz
deb8.egi.eu
www.opensciencecommons.org
sso.egi.eu
documents.metacentrum.cz
confluence.egi.eu"
echo "Check IPv6 addresses"
echo
for S in $SERVER_LIST2
do
echo "Hostname: $S"
dig -t AAAA +short $S
echo
ssh root@$S "ip -6 a s"
read key
clear
done
| true
|
76f1125ea6f469b3ac3ab4ec339f96bf00ec7f91
|
Shell
|
AppScale/ats-deploy
|
/roles/cloud/files/eucalyptus-certbot-hook
|
UTF-8
| 1,200
| 3.703125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Import HTTPS key/certificate from PEM files and configure all hosts
# to use.
#
# This hook a/b switches to coordinate moving between certifcates on
# all hosts and allow manual rollback.
#
set -eo pipefail
KEY_ALIAS_A="eucalyptus-cloud-a"
KEY_ALIAS_B="eucalyptus-cloud-b"
KEY_ALIAS_PROP="bootstrap.webservices.ssl.server_alias"
SERVICE_PORT_PROP="bootstrap.webservices.port"
KEY_ALIAS_CURRENT=$(euctl -n "${KEY_ALIAS_PROP}")
KEY_ALIAS_NEXT="${KEY_ALIAS_A}"
if [ "${KEY_ALIAS_CURRENT}" = "${KEY_ALIAS_A}" ] ; then
KEY_ALIAS_NEXT="${KEY_ALIAS_B}"
fi
TARGET_PORT=$(euctl -n "${SERVICE_PORT_PROP}")
TARGET_HOSTS=$(euserv-describe-services \
--filter service-type=cluster \
--filter service-type=eucalyptus \
--filter service-type=storage \
--filter service-type=user-api \
--group-by-host | awk '{print $2}' | sort -u)
[ -n "${TARGET_HOSTS}" ] || { echo "No hosts found" >&2; exit 3; }
for TARGET_HOST in ${TARGET_HOSTS} ; do
export EUCA_PROPERTIES_URL="http://${TARGET_HOST}:${TARGET_PORT}/services/Properties"
"/usr/local/bin/eucalyptus-cloud-https-import" --alias "${KEY_ALIAS_NEXT}" "$@"
done
unset EUCA_PROPERTIES_URL
euctl "${KEY_ALIAS_PROP}"="${KEY_ALIAS_NEXT}"
| true
|
4eb0188e4c4c233bd2eab31a4ada4b30c8d70489
|
Shell
|
jianling/tangram.baidu.com
|
/source/github_update.sh
|
UTF-8
| 3,199
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
#和github有关的变量
rootPath=$(pwd)
basePath=$rootPath"/github" #根目录
#和文件复制有关的变量
copyFilePath=$rootPath"/../src/js" #文件复制的目录
fragmentFolder="fragment" #存放碎片文件夹的名称
downloadFolder="download" #存放供用户下载的大文件的目录
#和md5生成文件有关的变量
md5Path=$rootPath
#根据一个目录是否是git目录来运算出git是clone还是pull
function getGitType(){
gitFolder="clone"
[ -d $1 ] && [ -d $1"/.git" ] && gitFolder="pull"
}
function githubUpdate(){
[ ! -d $basePath ] && mkdir $basePath
cd $basePath
getGitType "Tangram-base"
if [ "$gitFolder" == "clone" ]; then
git clone http://github.com/BaiduFE/Tangram-base.git
else
cd "Tangram-base"
git pull http://github.com/BaiduFE/Tangram-base.git master master
fi
echo "----------tangram base complete----------"
cd $basePath
getGitType "Tangram-component"
if [ "$gitFolder" == "clone" ]; then
git clone http://github.com/BaiduFE/Tangram-component.git
else
cd "Tangram-component"
git pull http://github.com/BaiduFE/Tangram-component.git master master
fi
echo "----------tangram component complete----------"
cd $basePath
getGitType "Tangram-component-dev0.2"
if [ "$gitFolder" == "clone" ]; then
git clone -b 0.2 http://github.com/BaiduFE/Tangram-component.git Tangram-component-dev0.2
else
cd "Tangram-component-dev0.2"
git pull http://github.com/BaiduFE/Tangram-component.git 0.2 0.2
fi
echo "----------tangram component dev0.2 complete----------"
#mobile start
}
function getVersion(){
cd $basePath"/Tangram-base/release"
tangramVersion=$(grep "\{version:\"[0-9\.]*\"\}" all_release.js -o|grep -o "[0-9\.]*")
cd -
}
function createMD5(){
cd $basePath"/Tangram-base/release"
fileName=("all_release.js" "all_release_src.js" "core_release.js" "core_release_src.js")
echo "version="$tangramVersion";">$md5Path"/md5.properties"
for item in ${fileName[*]}; do
gzip -c $item>$item".gz"
echo ${item}"="$(md5sum $item|awk '{print $1}')","$(du -b $item|awk '{print $1}')","$(du -b $item".gz"|awk '{print $1}')";">>$md5Path"/md5.properties"
done
}
function copyFiles(){
#复制碎片文件
cd $copyFilePath
[ -d "${fragmentFolder}" ] && rm -rf $fragmentFolder
cp -rf $basePath $fragmentFolder
#复制大文件
getVersion
[ -d "${downloadFolder}" ] && rm -rf $downloadFolder
mkdir $downloadFolder
cp $basePath"/Tangram-base/release/all_release.js" $downloadFolder"/tangram-"$tangramVersion".js"
cp $basePath"/Tangram-base/release/all_release_src.js" $downloadFolder"/tangram-"$tangramVersion".source.js"
cp $basePath"/Tangram-base/release/core_release_src.js" $downloadFolder"/tangram-"$tangramVersion".core.source.js"
cp $basePath"/Tangram-base/release/core_release.js" $downloadFolder"/tangram-"$tangramVersion".core.js"
cp $basePath"/Tangram-base/release/tangram_all.js" $downloadFolder"/tangram-all.js"
cp $basePath"/Tangram-base/release/all_release.js" $downloadFolder"/tangram.js"
cd -
}
[ ! -n "$1" ] && githubUpdate
cd $basePath"/Tangram-base/release"
ant -f build_release.xml release-all > /dev/null
cd -
copyFiles
createMD5
echo "----------All done----------"
exit
| true
|
4e0727e47519a5a3f79de52dd2a2ef712462d125
|
Shell
|
xlii-chl/scripts
|
/project/extend/v2/bash/wexample/site/config.sh
|
UTF-8
| 284
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
siteConfigArgs() {
_ARGUMENTS=(
'key k "Key of config param to get" true'
'dir_site d "Root site directory" false'
)
}
siteConfig() {
if [ -z "${DIR_SITE+x}" ]; then
DIR_SITE=./
fi;
. .wex
# Uppercase key.
eval 'echo ${'${KEY^^}'}'
}
| true
|
fd473adf7255f11d8802f96fbb8c9663900e7f22
|
Shell
|
fabloch/linux-makerspace-installer
|
/install.sh
|
UTF-8
| 3,318
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
######### Main setup #########
sudo add-apt-repository universe
sudo apt-get update
sudo apt-get upgrade
# ubuntu make
sudo add-apt-repository ppa:ubuntu-desktop/ubuntu-make
sudo apt-get update
sudo apt-get install -y ubuntu-make
# create user
USR = fabloch
PWD = fabloch
# echo "Username: [fabloch]";
# read USR;
# echo "Password: [fabloch]";
# read PWD;
HASHEDPWD = "$(python3 -c 'import crypt; print(crypt.crypt("'"$PWD"'"))')"
useradd -m -p $HASHEDPWD $USR && echo "User created with success"
# create samba credential files #
sudo touch /etc/samba/cred
sudo sh -c "echo 'username=yourusername' >> /etc/samba/cred"
sudo sh -c "echo 'password=yourpassword' >> /etc/samba/cred"
sudo chmod 0600 /etc/samba/cred
# add new hosts #
sudo sh -c "echo '192.168.0.105 remotemachinename' >> /etc/hosts"
######### Softwares #########
# Make sure to apt-get update
# install software with -y in the same block
#### Office
# firefox and bookmarks
sudo add-apt-repository ppa:ubuntu-mozilla-security/ppa
sudo apt-get update
sudo apt-get install -y firefox
cp firefox/bookmarkbackups /home/fabloch/.mozilla/firefox/*.default/bookmarkbackups
# libreofffice
sudo add-apt-repository ppa:libreoffice/ppa
sudo apt-get update
sudo apt-get install -y libreoffice
# fileZilla
sudo apt-get install -y filezilla
# font-manager
add-apt-repository ppa:font-manager/staging
sudo apt-get update
sudo apt-get install -y font-manager
# simple-scan
sudo apt-get install -y simple-scan
# vlc
sudo add-apt-repository ppa:c-korn/vlc
sudo apt-get update
sudo apt-get install -y vlc
#### 3D modelling and CAD
# freecad
sudo add-apt-repository ppa:freecad-maintainers/freecad-stable
sudo apt-get update
sudo apt-get install -y freecad
# blender
sudo add-apt-repository ppa:thomas-schiex/blender
sudo apt-get update
sudo apt-get install -y blender
#### 3D printing and CAD
# Prusa Slicer
sudo cd Downloads
sudo ls
sudo chmod a+x PrusaSlicer-exact-file-name.AppImage
sudo ./PrusaSlicer-exact-file-name.AppImage
# repetier-host
sudo apt-get install -y repetier-host
#### Design
# inkscape
sudo add-apt-repository ppa:inkscape.dev/stable
sudo apt-get update
sudo apt install inkscape
# scribus
sudo add-apt-repository ppa:scribus/ppa
sudo apt-get update
sudo apt-get install -y scribus
# gimp
sudo add-apt-repository ppa:otto-kesselgulasch/gimp
sudo apt-get update
sudo apt-get install -y gimp
#### photography & video
# darktable
sudo add-apt-repository ppa:pmjdebruijn/darktable-release
sudo apt-get update
sudo apt-get install -y darktable
# handbrake
sudo add-apt-repository ppa:stebbins/handbrake-releases
sudo apt-get update
sudo apt-get install -y handbrake-cli handbrake-gtk
# obs-studio
sudo add-apt-repository ppa:obsproject/obs-studio
sudo apt-get update
sudo apt-get install -y obs-studio
#### Lasercut
# visicut
wget http://download.visicut.org/files/master/Debian-Ubuntu-Mint/visicut_1.8-94-g0188ab30-1_all.deb
dpkg -i visicut_1.8-94-g0188ab30-1_all.deb
rm visicut_1.8-94-g0188ab30-1_all.deb
#### Electronics & IOT
# arduino-ide
sudo apt-get install arduino
#### Coding
# atom
sudo add-apt-repository ppa:webupd8team/atom
sudo apt-get update
sudo apt-get install atom
# code::blocks
sudo apt-get install -y gcc
sudo apt-get install -y clang
sudo apt-get update
sudo apt-get install -y codeblocks
| true
|
ac14a38eccbe0cc8286f41de36466cde0f7a2d4d
|
Shell
|
Fid04/core-ddos
|
/configs/iptables.sh
|
UTF-8
| 2,160
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
# Modifica o endereço IP dos pacotes vindos da máquina 192.168.1.2 da rede interna
# que tem como destino a interface eth1 para 200.200.217.40 (que é o nosso endereço
# IP da interface ligada a Internet).
# iptables -t nat -A POSTROUTING -s 192.168.1.2 -o eth1 -j SNAT --to 200.200.217.40
#Modifica o endereço IP de origem de todas as máquinas da rede 192.168.1.0/24 que tem o destino a interface eth0 para 200.241.200.40 a 200.241.200.50. O endereço IP selecionado é escolhido de acordo com o último IP alocado.
#iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -o eth0 -j SNAT --to 200.200.217.40-200.200.217.50
# A diferença é que o alvo é -j MASQUERADE. O comando abaixo faz IP Masquerading de todo o tráfego de 192.168.1.0 indo para a interface eth0: O endereço IP dos pacotes vindos de 192.168.1.0 são substituídos pelo IP oferecido pelo seu provedor de acesso no momento da conexão, quando a resposta é retornada a operação inversa é realizada para garantir que a resposta chegue ao destino. Nenhuma máquina da internet poderá ter acesso direto a sua máquina conectava via Masquerading. Para fazer o IP Masquerading de todas as máquinas da rede 192.168.1.*:
iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -o eth2 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 10.0.1.0/24 -o eth2 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 10.0.2.0/24 -o eth2 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 10.0.3.0/24 -o eth2 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 10.0.4.0/24 -o eth2 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 10.0.5.0/24 -o eth2 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 10.0.6.0/24 -o eth2 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 10.0.7.0/24 -o eth2 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 192.168.2.0/24 -o eth2 -j MASQUERADE
# Modifica o endereço IP destino dos pacotes de 192.168.42.129/24 vindo da interface eth2 para 10.0.5.1
#iptables -A FORWARD -i eth2 -d 10.0.0.10 -j ACCEPT
#iptables -t nat -A PREROUTING -i eth2 -s 192.168.42.129 -j DNAT --to-destination 10.0.0.0/24 # → IP do servidor
iptables -t nat -A PREROUTING -i eth2 -j DNAT --to 192.168.2.99
| true
|
28625cb243eebcbb21d22cfe0c431d386b92e686
|
Shell
|
mwatson128/Perl_scripts
|
/offln/rbin/rj
|
UTF-8
| 149
| 2.65625
| 3
|
[] |
no_license
|
#! /bin/sh
if [ -z "$1" ]
then
TODAY=`env TZ=GMT0 date '+%m%d%y'`
else
TODAY=$1
fi
echo $TODAY
ulgscan -f ${HOME}/bin/rj_ALIAS ./rej${TODAY}.lg
| true
|
0cfbaed6da04f8e98a552a361671dd29b12e0329
|
Shell
|
ideasonpurpose/basic-wordpress-box
|
/scripts/setup.sh
|
UTF-8
| 414
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -eux
# Add vagrant user to sudoers.
echo "vagrant ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
sed -i "s/^.*requiretty/#Defaults requiretty/" /etc/sudoers
# # Install Ansible prereqs
# sudo apt-get -y update
# sudo apt-get -y install software-properties-common
# # Install Ansible.
# sudo apt-add-repository -y ppa:ansible/ansible
# sudo apt-get -y update
# sudo apt-get -y install ansible
| true
|
ec9809160df6d9381bbae13906735508c5872e30
|
Shell
|
mosscylium/forestfloor
|
/build.sh
|
UTF-8
| 127
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
BUILD_PATH=$(dirname $0)
# Build base container image
sudo docker build -t="forestfloor/base" $BUILD_PATH
| true
|
1f55d8bbe896b34d2c179d750e5fe79e2886b520
|
Shell
|
Kazhuu/bash-examples
|
/return-value.sh
|
UTF-8
| 1,032
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# In bash functions cannot return values like normal programming languages when
# called. What they will return is status code of the last executed command. 0
# for successful command and non-zero for failure (1-255). This value can be
# accessed outside of the function with '$?' variable. Functions can also
# specify return value explicitly with 'return' keyword, but it will only accept
# numeric values (0-255).
without_return() {
echo "inside of function without return keyword"
}
with_return() {
echo "inside of return keyword function"
return 10
}
without_return
echo $? # 0
with_return
echo $? # 10
# Other options to return values from functions is to use global variables or
# write return values to stdout instead with using command substitution '$()'.
return_global() {
return_value="hoot"
}
return_substitution() {
echo "returned value using stdout"
}
return_global
echo $return_value # hoot
return_value=$(return_substitution)
echo $return_value # returned value using stdout
| true
|
d05bf4d9c8da4ae91ca9d8e6a582e73bd88637e8
|
Shell
|
jiazhizhong/bk-log
|
/apps/log_extract/scripts/cos.sh
|
UTF-8
| 1,431
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
export LC_ALL=C
export LANG=C
anynowtime="date +'%Y-%m-%d %H:%M:%S'"
NOW="echo [\`$anynowtime\`][PID:$$]"
TARGET_DIR_PREFIX="/bk_log_extract/distribution/"
function job_start {
echo "$(eval "$NOW") job_start"
}
function job_log {
MSG="$*"
echo "$(eval "$NOW") $MSG"
}
function job_success {
MSG="$*"
echo "$(eval "$NOW") job_success:[$MSG]"
exit 0
}
function job_fail {
MSG="$*"
echo "$(eval "$NOW") job_fail:[$MSG]"
exit 1
}
dst_path=$1;shift
cos_pack_file_name=$1;shift
target_dir=$1;shift
run_ver=$1
# check target_dir must is BKLOG dir
[[ $target_dir == *$TARGET_DIR_PREFIX* ]] || job_fail "$target_dir illegal"
add_ip_to_pack_file() {
cd "$target_dir" || job_fail "$target_dir not exists"
not_dir="${target_dir}/[]/"
if [ -d "$not_dir" ]; then
cd "[]"
fi
for ip in ./* ; do
cd $ip
pack_file=$(ls)
if [ -d "$not_dir" ]; then
mv $pack_file "../../${ip}_${pack_file}"
cd "${target_dir}/[]/"
rm -rf "${target_dir}/[]/${ip}"
continue
fi
mv $pack_file "../${ip}_${pack_file}"
cd "${target_dir}"
rm -rf "${target_dir}/${ip}"
done
if [ -d "$not_dir" ]; then
rm -rf "${target_dir}/[]"
fi
}
add_ip_to_pack_file
job_log "tar -cPf ${dst_path}/${cos_pack_file_name} -C $target_dir ./"
tar -cPf "${dst_path}/${cos_pack_file_name}" -C "$target_dir" ./
rm -rf "$target_dir"
job_success "Cos upload success"
| true
|
52d34bbd59af5ef521b80f05a6fec6cfc580b3c2
|
Shell
|
SusovanGithub/college-time-native-coding
|
/Shell Programs/Years Programs/2017/D2_8.sh
|
UTF-8
| 348
| 3.71875
| 4
|
[] |
no_license
|
# Input 3 numbers through the keyboard. Write a shell script to find their LCM.
str=""
for ((i=0;i<3;i++))
do
echo -n "Enter a Number ="
read n
n=`factor $n|cut -d' ' -f 2-`
str=$str$n" "
done
str=`echo "$str"|tr -t ' ' '\n'`
echo -e "$str"|sort -u >ff
lcm=1
exec<ff
while read n
do
let lcm=$lcm*$n
done
echo "LCM of that numbers =$lcm"
rm ff
| true
|
73015a28a7b80a510b01502475e65e1dd79b7e67
|
Shell
|
MEGA65/mega65-keyboard-cpld
|
/version.sh
|
UTF-8
| 670
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
# status of 'B'ranch in 'S'hort format
branch=`git status -b -s | head -n 1`
# get from charpos3, for 6 chars
branch2=${branch:3:6}
version=`git describe --always --abbrev=8`
datestamp=$(expr $(expr $(date +%Y) - 2020) \* 366 + `date +%j`)
echo ${datestamp}-${version} > version.txt
echo $datestamp $version
cat > src/version.vhdl <<ENDTEMPLATE
library ieee;
use Std.TextIO.all;
use ieee.STD_LOGIC_1164.all;
use ieee.numeric_std.all;
package version is
constant git_version : unsigned(31 downto 0) := x"${version}";
constant git_date : unsigned(13 downto 0) := to_unsigned(${datestamp},14);
end version;
ENDTEMPLATE
echo "wrote: src/version.vhdl"
| true
|
7d7a7a1de520f4a991108f918096c1d80fa87363
|
Shell
|
osabuoun/repast
|
/Repast2/repast.sh
|
UTF-8
| 4,281
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
compile() {
WRK=~/Model
rm -rf $WRK
mkdir -p $WRK
cd $WRK
if [ ! -e $SRCDIR/srcmodel.tar ]; then
pwd
ls -al
echo "srcmodel.tar not found"
exit 1
fi
tar -xf $SRCDIR/srcmodel.tar
MODEL_NAME=`tar -tf $SRCDIR/srcmodel.tar | head -1 | awk -F'/' '{ print $1 }'`
cd $MODEL_NAME
if [ $? -ne 0 ]; then
echo "Unable to change to $WRK/$MODEL_NAME"
exit 1
fi
INTERNAL_NAME=`ls src`
SRC=$WRK/$MODEL_NAME/src/$INTERNAL_NAME
BIN=$WRK/$MODEL_NAME/bin/$INTERNAL_NAME
if [ ! -d "$SRC" ]; then
echo "no source file directory $SRC, cannot proceed"
exit 1
fi
if [ ! -d "$BIN" ]; then
mkdir -p "$BIN"
fi
CP=$CP:$PLUGINS/libs.bsf_$VERSION/lib/*:$PLUGINS/libs.ext_$VERSION/lib/*:$PLUGINS/$ROOT.batch_$VERSION/lib/*
CP=$CP:$PLUGINS/$ROOT.batch_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.distributed.batch_$VERSION/lib/*
CP=$CP:$PLUGINS/$ROOT.distributed.batch_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.core_$VERSION/lib/*
CP=$CP:$PLUGINS/$ROOT.core_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.runtime_$VERSION/lib/*
CP=$CP:$PLUGINS/$ROOT.runtime_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.data_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.dataLoader_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.scenario_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.essentials_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.groovy_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.intergation_$VERSION/lib/*
CP=$CP:$PLUGINS/$ROOT.intergation_$VERSION/bin
CP=$CP:$PLUGINS/saf.core.ui_$VERSION/saf.core.v3d.jar
CP=$CP:$PLUGINS/saf.core.ui_$VERSION/lib/*
CP=$CP:$SRC
CP=$CP:$BIN
CP=$CP:$PLUGINS/JoSQL-2.2.jar
cd $SRC
CLIST=""
for f in *.java; do
BASE=`echo $f | awk -F'.' '{ print $1 }'`
BINCAND=$BIN/$BASE.class
if [ ! -e $BINCAND ]; then
CLIST=$CLIST" "$BASE
fi
done
if [ "$CLIST" != "" ]; then
/usr/bin/javac -cp $CP *.java
if [ $? -ne 0 ]; then
echo "unable to compile $s.java"
exit 1
fi
cp *.class $BIN
fi
cd $WRK
tar -cf $SRCDIR/model.tar $MODEL_NAME
exit 0
}
SRCDIR=`pwd`
TEMP=`pwd`
VERSION=2.1.0
ROOT=repast.simphony
INSTALLATION=/opt/repast
ROOTA=$INSTALLATION/RepastTest2/MyModels
PLUGINS=$ROOTA/plugins
# Model directories
#if [ $# -ne 1 ]; then
# echo $@
# echo "usage: repast.sh ModelName"
# exit 1
#fi
if [ "$1" = "compile" ]; then
compile
exit 0
fi
if [ ! -e $SRCDIR/batch_params.xml ]; then
pwd
ls -al
echo "batch_params.xml not found"
exit 1
fi
if [ ! -e $SRCDIR/model.tar ]; then
pwd
ls -al
echo "model.tar not found"
exit 1
fi
MODEL_NAME=`tar -tf $SRCDIR/model.tar | head -1 | awk -F'/' '{ print $1 }'`
MODEL_FOLDER=$ROOTA/$MODEL_NAME
echo $MODEL_NAME
echo $MODEL_FOLDER
cp $SRCDIR/batch_params.xml $INSTALLATION/RepastTest2
(cd $ROOTA;tar -xf $SRCDIR/model.tar)
# Add to classpath
CP=$CP:$PLUGINS/libs.bsf_$VERSION/lib/*:$PLUGINS/libs.ext_$VERSION/lib/*:$PLUGINS/$ROOT.batch_$VERSION/lib/*
CP=$CP:$PLUGINS/$ROOT.batch_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.distributed.batch_$VERSION/lib/*
CP=$CP:$PLUGINS/$ROOT.distributed.batch_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.core_$VERSION/lib/*
CP=$CP:$PLUGINS/$ROOT.core_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.runtime_$VERSION/lib/*
CP=$CP:$PLUGINS/$ROOT.runtime_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.data_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.dataLoader_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.scenario_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.essentials_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.groovy_$VERSION/bin
CP=$CP:$PLUGINS/$ROOT.intergation_$VERSION/lib/*
CP=$CP:$PLUGINS/$ROOT.intergation_$VERSION/bin
CP=$CP:$PLUGINS/saf.core.ui_$VERSION/saf.core.v3d.jar
CP=$CP:$PLUGINS/saf.core.ui_$VERSION/lib/*
CP=$CP:$MODEL_FOLDER/bin
CP=$CP:$MODEL_FOLDER/src
CP=$CP:$PLUGINS/JoSQL-2.2.jar
CP=$CP:$PLUGINS/jackson-all-1.9.0.jar
CP=$CP:$PLUGINS/log4j-api-2.1.jar
CP=$CP:$PLUGINS/portico.jar
CP=$CP:$PLUGINS/log4j-core-2.1.jar
# Change directory to the model default
cd $MODEL_FOLDER
# Run
java -cp $CP repast.simphony.runtime.RepastBatchMain -params "$TEMP/batch_params.xml" "$MODEL_FOLDER/$MODEL_NAME.rs"
if [ $? -ne 0 ]; then
echo "Exiting with error status"
exit 1
fi
cd output
if [ $? -ne 0 ]; then
echo "No output directory, creating"
mkdir output
cd output
fi
if find . -mindepth 1 -print -quit | grep -q .; then
:
else
echo "No output files"
exit 1
fi
tar -cf $TEMP/output.tar *
cd $TEMP
rm -rf $MODEL_FOLDER
#rm *.txt
exit 0
| true
|
7439fb9c25535e471467cf95c815bfbe8060655a
|
Shell
|
williamhogman/fsbbs
|
/scripts/build-doc.sh
|
UTF-8
| 594
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
rm -rf .bld
mkdir .bld
cat *.md > .bld/markdown_all.md
echo "Copying html and css"
cp head.html .bld/head.html
cp foot.html .bld/foot.html
cp style.css .bld/style.css
echo "Converting dia to svg"
find ./img/ -name "*.dia" | parallel dia -e {.}.svg {}
mkdir .bld/img
cp -v img/*.png .bld/img
cp -v img/*.svg .bld/img
cd .bld
echo "Generating markdown"
markdown markdown_all.md > markdown_all.html
echo "Generating HTML"
cat head.html markdown_all.html foot.html > all.html
echo "Printing HTML"
wkhtmltopdf all.html all.pdf
echo "Copying result"
cp all.pdf ../
echo "DONE"
| true
|
3d35b1a222059bb130c779d9a875f96abcdd321d
|
Shell
|
RandhirGupta/CurrencyConverter-Multiplatform
|
/script/android_script.sh
|
UTF-8
| 1,058
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
function installSdk(){
#Use existing SDK with existing 'sdkmanager', otherwise install it
which sdkmanager &> /dev/null || getAndroidSdk
PROXY_ARGS=""
if [[ ! -z "$HTTPS_PROXY" ]]; then
PROXY_HOST="$(echo "$HTTPS_PROXY" | cut -d : -f 1,1)"
PROXY_PORT="$(echo "$HTTPS_PROXY" | cut -d : -f 2,2)"
PROXY_ARGS="--proxy=http --proxy_host=$PROXY_HOST --proxy_port=$PROXY_PORT"
fi
echo y | "$ANDROID_HOME/tools/bin/sdkmanager" $PROXY_ARGS "$@"
}
function getAndroidSdk {
TMP=/tmp/sdk$$.zip
download 'https://dl.google.com/android/repository/tools_r25.2.3-linux.zip' $TMP
unzip -qod "$ANDROID_SDK" $TMP
rm $TMP
}
function installAndroidSDK {
export PATH="$ANDROID_HOME/platform-tools:$ANDROID_HOME/tools:$ANDROID_HOME/tools/bin:$PATH"
mkdir -p "$ANDROID_HOME/licenses/"
echo > "$ANDROID_HOME/licenses/android-sdk-license"
echo -n 24333f8a63b6825ea9c5514f83c2829b004d1fee > "$ANDROID_HOME/licenses/android-sdk-license"
installsdk 'platforms;android-29' 'cmake;3.6.4111459' 'build-tools;29.0.3'
}
| true
|
05e9277871803ed9425ba1863f0b6f62dedf9901
|
Shell
|
jgajula19/repo-1
|
/jsscript3.sh
|
UTF-8
| 140
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash/
# a script to add two numbers
echo "enter first number"
read a;
echo "enter second number"
read b;
echo "add = $(( $a + $b))"
| true
|
b7480161f491723594d9a8e3853e93cb393edaba
|
Shell
|
Mossop/symmetrical-telegram
|
/build_bsdiff_patch
|
UTF-8
| 2,186
| 3.953125
| 4
|
[] |
no_license
|
#! /bin/bash
XZ="xz --compress --lzma2 --format=xz --check=crc64"
XZ86="xz --compress --x86 --lzma2 --format=xz --check=crc64"
MBSDIFF="./mbsdiff/mbsdiff"
ZUCCHINI="./zucchini/zucchini -gen"
function do_extract() {
PLATFORM=$1
VERSION=$2
DIR=extracts/$PLATFORM/$VERSION
echo $DIR
if [ -d $DIR ]; then
return
fi
./extract $PLATFORM $VERSION
}
INFO="/dev/stdout"
NOISY="/dev/stdout"
PLATFORM=""
while [ "$PLATFORM" == "" ]; do
case $1 in
-q ) NOISY="/dev/null"
;;
-s ) NOISY="/dev/null"
INFO="/dev/null"
;;
* ) PLATFORM=$1
;;
esac
shift
done
OLDVERSION=$1
NEWVERSION=$2
TARGET="patches/bsdiff/$PLATFORM/$OLDVERSION-$NEWVERSION"
if [ -d "$TARGET" ]; then
exit
fi
set -e
OLDDIR=`do_extract $PLATFORM $OLDVERSION`
NEWDIR=`do_extract $PLATFORM $NEWVERSION`
for new_file in `cd $NEWDIR && find -type f`
do
path=`echo "$new_file" | cut -c3-`
base=`dirname $path`
mkdir -p $TARGET/$base
if ! test -f "$OLDDIR/$path" || ! diff -q $OLDDIR/$path $NEWDIR/$path >/dev/null; then
echo $path > $INFO
cp "$NEWDIR/$path" "$TARGET/$path"
BESTFILE="$TARGET/$path"
BESTSIZE=`cat $BESTFILE | wc -c`
SIZE=`$XZ86 --stdout $NEWDIR/$path | tee "$TARGET/${path}.xz" | wc -c`
if (( SIZE < BESTSIZE )); then
rm $BESTFILE
BESTFILE=$TARGET/${path}.xz
BESTSIZE=$SIZE
else
rm "$TARGET/${path}.xz"
fi
if [ -f "$OLDDIR/$path" ]; then
PATCH=`mktemp`
echo " bsdiff..." > $NOISY
$MBSDIFF "$OLDDIR/$path" "$NEWDIR/$path" "$TARGET/${path}.bsdiff" > /dev/null
ZSIZE=`cat "$TARGET/${path}.bsdiff" | wc -c`
XSIZE=`$XZ86 --stdout "$TARGET/${path}.bsdiff" | tee "$TARGET/${path}.bsdiff.xz" | wc -c`
if (( XSIZE < BESTSIZE )); then
rm $BESTFILE
BESTFILE="$TARGET/${path}.bsdiff.xz"
BESTSIZE=$XSIZE
else
rm "$TARGET/${path}.bsdiff.xz"
fi
if (( ZSIZE < BESTSIZE )); then
rm $BESTFILE
BESTFILE="$TARGET/${path}.bsdiff"
BESTSIZE=$ZSIZE
else
rm "$TARGET/${path}.bsdiff"
fi
echo "Best choice is $BESTFILE" > $NOISY
fi
fi
done
| true
|
4ff8094d406e02714b6902c4006cc3f6f11c2b1d
|
Shell
|
MW-autocat-script/MW-autocat-script
|
/catscripts/Education/GED/GED.sh
|
UTF-8
| 395
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
readonly KEYWORDS_GED="\bGED(|s)\b|General(| )Educational(| )Development"
readonly KEYWORDS_GED_ALL="$KEYWORDS_GED"
if [ "$1" == "" ];
then
debug_start "General Educational Development tests (GEDs)"
GED=$(egrep -i "$KEYWORDS_GED" "$NEWPAGES")
categorize "GED" "General Educational Development tests (GEDs)"
debug_end "General Educational Development tests (GEDs)"
fi
| true
|
209aa8f8f86737af826f5ec251898dd237cb459c
|
Shell
|
KingdomTaoF/Python-exercise
|
/shell/tx_tencent_name.sh
|
UTF-8
| 6,137
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# 功能:1、这个是用来根据上周的腾讯的开服计划生成下周的开服计划的,并且会生成每个区服对应的区服ID和区服名称
# 用法: tencent_name.sh 开服邮件文件 开服个数(天多少个就生成多少个)
# 2、根据邮件简单显示当周的开服计划
# 用法:tencent_name.sh 开服计划邮件
#animallist=['子鼠', '丑牛', '寅虎', '卯兔', '辰龙', '巳蛇', '午马', '未羊', '申猴', '酉鸡', '戌狗', '亥猪']
animallist=(子鼠 丑牛 寅虎 卯兔 辰龙 巳蛇 午马 未羊 申猴 酉鸡 戌狗 亥猪)
#constellationlist=['金牛座', '双子座', '巨蟹座', '狮子座', '处女座', '天秤座', '天蝎座', '射手座', '摩羯座', '水瓶座', '双鱼座', '白羊座']
constellationlist=(金牛座 双子座 巨蟹座 狮子座 处女座 天秤座 天蝎座 射手座 摩羯座 水瓶座 双鱼座 白羊座)
# 根据本周开服生成开服计划
function test1(){
sed -i '/星期/d' $1
sed -i '/^\s*$/d' $1
echo -e "\033[32m更新列表:\033[0m"
UpdateList $1 "星座服"
UpdateList $1 "生肖服"
UpdateList $1 "联盟服"
UpdateList $1 "热血服"
echo -e "\033[32m完整开服计划:\033[0m"
awk '{print "tencent",$4,$5}' $1
}
function UpdateList(){
echo "tencent|"`awk -v name=$2 '{if($3==name)print $4}' $1 | head -1`-`awk -v name=$2 '{if($3==name)print $4}' $1 | tail -1`
}
#UpdateList $1 "星座服"
# 参数:开服邮件 开服个数
function RexueAndLianmeng(){
#last_rexue_qufuname=`grep "热血服" $1 | tail -1 | awk '{print $5}'`
last_rexue_id=`grep "热血服" $1 | tail -1 | awk '{print $4}'`
echo -e "\033[32m热血服区服名称:\033[0m"
if [ `grep "热血服" $1 | tail -1 | awk '{print $1}'` == 7 ];then
other_name_list $last_rexue_id $2 '热血服'
else
other_name_list $[$last_rexue_id-1] $2 '热血服'
fi
last_lianmeng_id=`grep "联盟服" $1 | tail -1 | awk '{print $4}'`
echo -e "\033[32m联盟服区服名称:\033[0m"
if [ `grep "联盟服" $1 | tail -1 | awk '{print $1}'` -ne 1 ];then
other_name_list $last_lianmeng_id $2 '联盟服'
else
other_name_list $[$last_lianmeng_id-1] $2 '联盟服'
fi
}
# 参数:起始ID 开服个数 开服类型
function other_name_list(){
if [ $3 == '热血服' ];then
for i in `seq $[$1+1] $[$2+$1]`;do
echo "tencent $i ${3:0:2}$i服"
done
else
for i in `seq $[$1+1] $[$2+$1]`;do
if [ $i -gt 6000 ];then
echo "tencent $i ${3:0:2}$[$i-6000]区"
else
echo "tencent $[$i+6000] ${3:0:2}$i区"
fi
done
fi
}
# 参数:星座服/生肖服的名称 列表名称
function find_index() {
#echo $1
case $2 in
'animallist')
for i in `seq 0 11`;do
#echo ${animallist[$i]}
if [ $1 == ${animallist[$i]} ];then
echo -n $i
break
else
if [ $i -eq 11 ];then
echo "找不到该成员"
exit 1
else
continue
fi
fi
done
;;
'constellationlist')
for i in `seq 0 11`;do
if [ $1 == ${constellationlist[$i]} ];then
echo -n $i
break
else
if [ $i -eq 11 ];then
echo "找不到该成员"
exit 1
else
continue
fi
fi
done
;;
*)
echo "参数错误"
;;
esac
}
#find_index '午马' animallist
#echo ''
#find_index '射手座' constellationlist
#echo ${animallist[2]}
# 参数:起始index,开服个数,名字起始数字,区服ID,数组名
function name_list(){
for i in `seq 1 $2`;do
tmp=$[$1+$i]
case $5 in
'constellationlist')
if [ $tmp -gt 11 ];then
echo "tencent $[$4+$i] ${constellationlist[$[$tmp-12]]}$[$3+1]服"
else
echo "tencent $[$4+$i] ${constellationlist[$tmp]}$3服"
#echo "tencent $[$4+$i] "
fi
;;
'animallist')
if [ $tmp -gt 11 ];then
echo "tencent $[$4+$i] ${animallist[$[$tmp-12]]}$[$3+1]服"
else
echo "tencent $[$4+$i] ${animallist[$tmp]}$3服"
#echo "tencent $[$4+$i] "
fi
;;
*)
echo "错误的参数"
;;
esac
done
}
#name_list 10 4 27 7315 constellationlist
# 根据上周邮件生成下周开服
function test2(){
sed -i '/星期/d' $1
sed -i '/^\s*$/d' $1
last_xingzuo_qufuname=`grep "星座服" $1 | tail -1 | awk '{print $5}'`
last_xingzuo_id=`grep "星座服" $1 | tail -1 | awk '{print $4}'`
last_xingzuo_name=${last_xingzuo_qufuname:0:3}
#echo $last_xingzuo_name
last_xingzuo_index=`find_index $last_xingzuo_name constellationlist`
last_xingzuo_namenum=${last_xingzuo_qufuname:3:2}
#echo $last_xingzuo_index
echo -e "\033[32m星座服区服名称:\033[0m"
if [ `grep "星座服" $1 | tail -1 | awk '{print $1}'` == 7 ];then
name_list $last_xingzuo_index $2 $last_xingzuo_namenum $last_xingzuo_id constellationlist
else
echo "tencent `grep "星座服" $1 | tail -1 | awk '{print $4,$5}'`"
name_list $last_xingzuo_index $[$2-1] $last_xingzuo_namenum $last_xingzuo_id constellationlist
fi
last_shengxiao_qufuname=`grep "生肖服" $1 | tail -1 | awk '{print $5}'`
last_shengxiao_id=`grep "生肖服" $1 | tail -1 | awk '{print $4}'`
last_shengxiao_name=${last_shengxiao_qufuname:0:2}
#echo $last_shengxiao_id
last_shengxiao_index=`find_index $last_shengxiao_name animallist`
if [ `echo -n $last_shengxiao_qufuname | wc -m` -gt 4 ];then
last_shengxiao_namenum=${last_shengxiao_qufuname:2:2}
else
last_shengxiao_namenum=${last_shengxiao_qufuname:2:1}
fi
echo -e "\033[32m生肖服区服名称:\033[0m"
#name_list $last_shengxiao_index $2 $last_shengxiao_namenum $last_shengxiao_id animallist
if [ `grep "生肖服" $1 | tail -1 | awk '{print $1}'` == 7 ];then
name_list $last_shengxiao_index $2 $last_shengxiao_namenum $last_shengxiao_id animallist
else
echo "tencent `grep "生肖服" $1 | tail -1 | awk '{print $4,$5}'`"
name_list $last_shengxiao_index $[$2-1] $last_shengxiao_namenum $last_shengxiao_id animallist
fi
}
#test2 'tencent.txt' 5
if [[ "$#" -eq 1 && -f $1 ]]; then
test1 $1
elif [ "$#" -eq 2 ];then
test2 $1 $2
RexueAndLianmeng $1 $2
else
echo '用法:`basename $0` star_areaFile(上周or本周件列表文件) [开服个数]'
fi
#echo ${animallist[0]}
| true
|
d94a7effe4a26bc0c7f900596a312461c8b7a7e0
|
Shell
|
zhaofeng-shu33/compress-files-to-rar
|
/install_dep.sh
|
UTF-8
| 188
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f /usr/local/bin/rar ]; then
wget https://www.rarlab.com/rar/rarlinux-x64-5.8.b1.tar.gz
tar -xzf rarlinux-x64-5.8.b1.tar.gz
cd rar
sudo make install
fi
| true
|
89e706bf74afc98a33c0a578fbd7f9b9f76bfa13
|
Shell
|
socratesone/vespa
|
/travis/travis.sh
|
UTF-8
| 614
| 3.109375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
set -e
# Workaround for Travis log output timeout (jobs without output over 10 minutes are killed)
function bell() {
while true; do
echo "."
sleep 300
done
}
DOCKER_IMAGE=vespaengine/vespa-build-centos7:latest
bell &
docker run --rm -v ${HOME}/.m2:/root/.m2 -v ${HOME}/.ccache:/root/.ccache -v $(pwd):/source \
-e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST \
--entrypoint /source/travis/travis-build.sh ${DOCKER_IMAGE}
exit $?
| true
|
3e182fd6f426d9cd0515d84a5a38815c94a41ed0
|
Shell
|
alejoceballos/vagrant-provisioning
|
/bootstrap-jenkins.sh
|
UTF-8
| 1,974
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Check:
# http://docs.vagrantup.com/v2/getting-started/provisioning.html
## #############################################################################
## Jenkins
## References:
## + https://wiki.jenkins-ci.org/display/JENKINS/Installing+Jenkins+on+Ubuntu
## #############################################################################
echo "## ######################################################################"
echo "## Jenkins"
echo "## ######################################################################"
# Add PPA (didn't know this way of adding a PPA)
wget -q -O - https://jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add -
sh -c 'echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list'
apt-get update
# Install Jenkins
apt-get -y install jenkins
# ##############################################################################
# Download and install git plugin & dependencies
# ##############################################################################
wget -O /var/lib/jenkins/plugins/mailer.hpi "http://updates.jenkins-ci.org/latest/mailer.hpi"
chown jenkins:jenkins /var/lib/jenkins/plugins/mailer.hpi
# Installed by default
# "http://updates.jenkins-ci.org/latest/scm-api.hpi"
wget -O /var/lib/jenkins/plugins/matrix-project.hpi "http://updates.jenkins-ci.org/latest/matrix-project.hpi"
chown jenkins:jenkins /var/lib/jenkins/plugins/matrix-project.hpi
wget -O /var/lib/jenkins/plugins/git-client.hpi "http://updates.jenkins-ci.org/latest/git-client.hpi"
chown jenkins:jenkins /var/lib/jenkins/plugins/git-client.hpi
# Installed by default
# http://updates.jenkins-ci.org/latest/ssh-credentials.hpi
# Installed by default
# http://updates.jenkins-ci.org/latest/credentials.hpi
wget -O /var/lib/jenkins/plugins/git.hpi "http://updates.jenkins-ci.org/latest/git.hpi"
chown jenkins:jenkins /var/lib/jenkins/plugins/git.hpi
# Restart jenkins
/etc/init.d/jenkins restart
| true
|
ffcef031b40ddfdf7d5177683919f245933e0985
|
Shell
|
uladkasach/clientside-view-loader
|
/active_modules/_init.sh
|
UTF-8
| 714
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
modules=( "clientside-require" )
BASE="$PWD/.."
## add modules in modules list
for MODULE_NAME in ${modules[@]}; do
echo "ensuring $MODULE_NAME is linked"
if [ ! -d "$BASE/active_modules/$MODULE_NAME" ]; then
echo " -> creating sym link for : $BASE/node_modules/$MODULE_NAME"
ln -s "$BASE/node_modules/$MODULE_NAME" "$BASE/active_modules" ## create sym link if not already exists
fi
done
## remove dirs not in modules list
DIRS=`ls -l $BASE/active_modules | grep "\->" | awk '{print $9}'`
for DIR in $DIRS; do
if [[ ! " ${modules[@]} " =~ " ${DIR} " ]]; then
echo "the sym link '$DIR' is not valid, removing it";
rm -r "$BASE/active_modules/$DIR"
fi;
done
| true
|
a5d49121225599c1174f4e686eb7e6f0ac5dd0af
|
Shell
|
joeribekker/dotfiles
|
/bash/completions.sh
|
UTF-8
| 306
| 2.65625
| 3
|
[] |
no_license
|
source ~/.dotfiles/bash/completions/virtualenv.sh
source ~/.dotfiles/bash/completions/django.sh
# Different local bash completions
if [ -f /opt/local/etc/bash_completion ]; then
. /opt/local/etc/bash_completion
fi
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
| true
|
599a80ce148028056e4f14ed3e05395c4079178d
|
Shell
|
comit-network/create-comit-app
|
/create/tests/run_demo.sh
|
UTF-8
| 2,380
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <demo name>"
exit 2;
fi
DEMO_NAME=$1;
PROJECT_DIR=$(git rev-parse --show-toplevel)
DEMO_DIR="${PROJECT_DIR}/create/new_project/demos/${DEMO_NAME}"
if ! [ -d "$DEMO_DIR" ]; then
echo "Demo dir does not exit: $DEMO_DIR";
exit 2;
fi
LOG_FILE=$(mktemp)
## Start tests
cd "${DEMO_DIR}"
yarn install > /dev/null
## Start-up environment
yarn run start-env > /dev/null &
STARTENV_PID=$!
ENV_READY=false
CCA_TIMEOUT=60
function check_containers() {
ERROR=false
for CONTAINER in ethereum bitcoin cnd_0 cnd_1; do
NUM=$(docker ps -qf name=${CONTAINER} |wc -l)
if test "$NUM" -ne 1; then
ERROR=true;
break;
fi
done
$ERROR && echo 1 || echo 0
}
while [ $CCA_TIMEOUT -gt 0 ]; do
if [ "$(check_containers)" -eq 0 ]; then
CCA_TIMEOUT=0
ENV_READY=true
else
sleep 1;
CCA_TIMEOUT=$((CCA_TIMEOUT-1));
fi
done
if ! $ENV_READY; then
echo "FAIL: ${CONTAINER} docker container was not started."
kill $STARTENV_PID;
wait $STARTENV_PID;
exit 1;
fi
# Run the example
RUN_TIMEOUT=60
TEST_PASSED=false
NON_INTERACTIVE=true yarn run swap > "${LOG_FILE}" 2>&1 &
RUN_PID=$!
function check_swap() {
local LOG_FILE=$1;
grep -q "Swapped!" "$LOG_FILE";
echo $?;
}
while [ $RUN_TIMEOUT -gt 0 ]; do
if [ "$(check_swap "$LOG_FILE")" -eq 0 ]; then
RUN_TIMEOUT=0;
TEST_PASSED=true;
else
sleep 1;
RUN_TIMEOUT=$((RUN_TIMEOUT-1));
fi
done
if $TEST_PASSED; then
echo "SUCCESS: It swapped.";
EXIT_CODE=0;
else
echo "FAIL: It did not swap.";
cat "$LOG_FILE";
EXIT_CODE=1;
fi
wait $RUN_PID || true;
kill -s SIGINT $STARTENV_PID;
wait $STARTENV_PID || true;
# Ensure clean up
yarn run clean-env > /dev/null &
# Count the number of containers still running
function check_containers() {
ERROR=false
for CONTAINER in ethereum bitcoin cnd_0 cnd_1; do
NUM=$(docker ps -qf name=${CONTAINER} |wc -l)
if test "$NUM" -eq 1; then
ERROR=true;
break;
fi
done
$ERROR && echo 1 || echo 0
}
# Wait for cleaning up environment
TIMEOUT=10
while [ $TIMEOUT -gt 0 ]; do
if [ "$(check_containers)" -eq 0 ]; then
TEST_PASSED=true;
TIMEOUT=0
else
echo "Waiting for containers to die";
sleep 1;
TIMEOUT=$((TIMEOUT-1));
fi
done
rm -f "${LOG_FILE}"
exit $EXIT_CODE;
| true
|
8e90e98dae0e79db497b0ca00910171ab7550f90
|
Shell
|
cha63506/packages-1
|
/docutils/trunk/PKGBUILD
|
UTF-8
| 1,584
| 2.59375
| 3
|
[] |
no_license
|
# $Id$
# Maintainer : Ionut Biru <ibiru@archlinux.org>
# Contributor: Sergej Pupykin <pupykin.s+arch@gmail.com>
pkgname=docutils
pkgver=0.8.1
pkgrel=1
pkgdesc="Set of tools for processing plaintext docs into formats such as HTML, XML, or LaTeX"
arch=('any')
url="http://docutils.sourceforge.net"
license=('custom')
depends=('python2')
source=(http://downloads.sourceforge.net/docutils/docutils-$pkgver.tar.gz)
md5sums=('2ecf8ba3ece1be1ed666150a80c838c8')
build() {
cd ${srcdir}/${pkgname}-${pkgver}
python2 setup.py build
}
package() {
cd ${srcdir}/${pkgname}-${pkgver}
python2 setup.py install --root=${pkgdir} --optimize=1
for f in ${pkgdir}/usr/bin/*.py; do
ln -s $(basename $f) $pkgdir/usr/bin/$(basename $f .py)
done
sed -e 's|#!/usr/bin/env python|#!/usr/bin/env python2|' \
-i ${pkgdir}/usr/lib/python2.7/site-packages/docutils/_string_template_compat.py
sed -e 's|#!/usr/bin/env python|#!/usr/bin/env python2|' \
-i ${pkgdir}/usr/lib/python2.7/site-packages/docutils/writers/xetex/__init__.py
sed -e 's|#! /usr/bin/env python|#!/usr/bin/env python2|' \
-i ${pkgdir}/usr/lib/python2.7/site-packages/docutils/math/math2html.py
sed -e 's|#!/usr/bin/env python|#!/usr/bin/env python2|' \
-i ${pkgdir}/usr/lib/python2.7/site-packages/docutils/math/latex2mathml.py
sed -e 's|#!/usr/bin/env python|#!/usr/bin/env python2|' \
-i ${pkgdir}/usr/lib/python2.7/site-packages/docutils/error_reporting.py
install -D -m644 COPYING.txt ${pkgdir}/usr/share/licenses/${pkgname}/COPYING.txt
install -D -m644 licenses/python* ${pkgdir}/usr/share/licenses/${pkgname}/
}
| true
|
082ff02982c596352f9282056a5b11ca9b2d3ad7
|
Shell
|
LeSpocky/eis
|
/cuimenu/menus/var/install/bin/lvmman.cui.pvs.module.sh
|
UTF-8
| 20,936
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
#------------------------------------------------------------------------------
# /var/install/bin/lvmman.cui.pvs.module.sh - module for eisfair lvm mananger
#
# Creation: 2014-10-01 Jens Vehlhaber jens@eisfair.org
# Copyright (c) 2001-2014 the eisfair team, team(at)eisfair(dot)org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#----------------------------------------------------------------------------
#============================================================================
# control constants
#============================================================================
IDC_PVS_LIST='12'
IDC_PVSDLG_BUTOK='10'
IDC_PVSDLG_BUTCANCEL='11'
IDC_PVSDLG_LABEL1='12'
IDC_PVSDLG_EDNAME='20'
#----------------------------------------------------------------------------
# pvs_create_gid
# expects: nothing
# returns: next free gid in ${pvsdlg_pvsgid}
#----------------------------------------------------------------------------
function pvs_create_gid
{
oldifs="$IFS"
IFS=':'
pvsdlg_pvsgid=200
while read line
do
set -- $line
if [ $3 -gt ${pvsdlg_pvsgid} -a $3 -lt 300 ]
then
pvsdlg_pvsgid=$3
fi
done </etc/pvs
IFS="$oldifs"
pvsdlg_pvsgid=$[${pvsdlg_pvsgid} + 1]
}
#============================================================================
# pvsdlg - dialog to create and edit pvs
#============================================================================
#----------------------------------------------------------------------------
# pvsdlg_ok_clicked
# Ok button clicked hook
# expects: $1 : window handle of dialog window
# $2 : button control id
# returns: 1 : event handled
#----------------------------------------------------------------------------
function pvsdlg_ok_clicked()
{
local win="$p2"
local ctrl
cui_window_getctrl "$win" "$IDC_PVSDLG_EDNAME"
if [ "$p2" != "0" ]
then
ctrl="$p2"
cui_edit_gettext "$ctrl"
pvsdlg_pvsname="$p2"
fi
if [ -z "${pvsdlg_pvsname}" ]
then
cui_message "$win" "No pvs name entered! Please enter a valid name" \
"Missing data" "$MB_ERROR"
cui_return 1
return
fi
cui_window_close "$win" "$IDOK"
cui_return 1
}
#----------------------------------------------------------------------------
# pvsdlg_cancel_clicked
# Cancel button clicked hook
# expects: $1 : window handle of dialog window
# $2 : button control id
# returns: 1 : event handled
#----------------------------------------------------------------------------
function pvsdlg_cancel_clicked()
{
cui_window_close "$p2" "$IDCANCEL"
cui_return 1
}
#----------------------------------------------------------------------------
# pvsdlg_create_hook
# Dialog create hook - create dialog controls
# expects: $1 : window handle of dialog window
# returns: 1 : event handled
#----------------------------------------------------------------------------
function pvsdlg_create_hook()
{
local dlg="$p2"
local ctrl
if cui_label_new "$dlg" "Group name:" 2 1 14 1 $IDC_PVSDLG_LABEL1 $CWS_NONE $CWS_NONE
then
cui_window_create "$p2"
fi
if cui_edit_new "$dlg" "" 17 1 11 1 8 $IDC_PVSDLG_EDNAME $CWS_NONE $CWS_NONE
then
ctrl="$p2"
cui_window_create "$ctrl"
cui_edit_settext "$ctrl" "${pvsdlg_pvsname}"
fi
if cui_button_new "$dlg" "&OK" 5 3 10 1 $IDC_PVSDLG_BUTOK $CWS_DEFOK $CWS_NONE
then
ctrl="$p2"
cui_button_callback "$ctrl" "$BUTTON_CLICKED" "$dlg" pvsdlg_ok_clicked
cui_window_create "$ctrl"
fi
if cui_button_new "$dlg" "&Cancel" 16 3 10 1 $IDC_PVSDLG_BUTCANCEL $CWS_DEFCANCEL $CWS_NONE
then
ctrl="$p2"
cui_button_callback "$ctrl" "$BUTTON_CLICKED" "$dlg" pvsdlg_cancel_clicked
cui_window_create "$ctrl"
fi
cui_return 1
}
#============================================================================
# functions to create modify or delete pvs (using pvsdlg)
#============================================================================
#----------------------------------------------------------------------------
# pvs_editpvs_dialog
# Modify the pvs entry that has been selected in the list view
# returns: 0 : modified (reload data)
# 1 : not modified (don't reload data)
#----------------------------------------------------------------------------
function pvs_editpvs_dialog()
{
local win="$1"
local result="$IDCANCEL"
local ctrl
local index
cui_window_getctrl "$win" "$IDC_PVS_LIST"
if [ "$p2" != "0" ]
then
ctrl="$p2"
cui_listview_getsel "$ctrl"
if [ "$p2" != "-1" ]
then
index="$p2"
cui_listview_gettext "$ctrl" "$index" "0" && pvsdlg_pvsname="$p2"
local orig_pvsname="${pvsdlg_pvsname}"
if cui_window_new "$win" 0 0 32 7 $[$CWS_POPUP + $CWS_BORDER + $CWS_CENTERED]
then
local dlg="$p2"
cui_window_setcolors "$dlg" "DIALOG"
cui_window_settext "$dlg" "Edit Group"
cui_window_sethook "$dlg" "$HOOK_CREATE" pvsdlg_create_hook
cui_window_create "$dlg"
cui_window_modal "$dlg" && result="$p2"
if [ "$result" == "$IDOK" ]
then
if [ "${orig_pvsname}" != "${pvsdlg_pvsname}" ]
then
grep "^${pvsdlg_pvsname}:" /etc/pvs >/dev/null
if [ $? == 0 ]
then
cui_message "$win" \
"Group \"${pvsdlg_pvsname}\" already exists!" \
"Error" "$MB_ERROR"
result="$IDCANCEL"
else
errmsg=$(/sbin/pvsmod -n "${pvsdlg_pvsname}" ${orig_pvsname} 2>&1)
if [ "$?" != "0" ]
then
cui_message "$win" \
"Error! $errmsg" "Error" "$MB_ERROR"
result="$IDCANCEL"
fi
fi
fi
fi
cui_window_destroy "$dlg"
fi
fi
fi
[ "$result" == "$IDOK" ]
return "$?"
}
#----------------------------------------------------------------------------
# pvs_createpvs_dialog
# Create a new pvs entry
# returns: 0 : created (reload data)
# 1 : not modified (don't reload data)
#----------------------------------------------------------------------------
function pvs_createpvs_dialog()
{
local win="$1"
local result="$IDCANCEL"
pvsdlg_pvsname=""
if cui_window_new "$win" 0 0 32 7 $[$CWS_POPUP + $CWS_BORDER + $CWS_CENTERED]
then
local dlg="$p2"
cui_window_setcolors "$dlg" "DIALOG"
cui_window_settext "$dlg" "Create Group"
cui_window_sethook "$dlg" "$HOOK_CREATE" pvsdlg_create_hook
cui_window_create "$dlg"
cui_window_modal "$dlg" && result="$p2"
if [ "$result" == "$IDOK" ]
then
grep "^${pvsdlg_pvsname}:" /etc/pvs >/dev/null
if [ $? != 0 ]
then
pvs_create_gid
errmsg=$(/sbin/pvcreate /dev/sdb1 2>&1)
if [ "$?" != "0" ]
then
cui_message "$win" \
"Error! $errmsg" "Error" "$MB_ERROR"
result="$IDCANCEL"
fi
else
cui_message "$win" \
"Group \"${pvsdlg_pvsname}\" already exists!" \
"Error" "$MB_ERROR"
result="$IDCANCEL"
fi
fi
cui_window_destroy "$dlg"
fi
[ "$result" == "$IDOK" ]
return "$?"
}
#----------------------------------------------------------------------------
# pvs_deletepvs_dialog
# Remove the pvs entry that has been selected in the list view
# returns: 0 : modified (reload data)
# 1 : not modified (don't reload data)
#----------------------------------------------------------------------------
function pvs_deletepvs_dialog()
{
local win="$1"
local result="$IDCANCEL"
local ctrl
local index
cui_window_getctrl "$win" "$IDC_PVS_LIST"
if [ "$p2" != "0" ]
then
ctrl="$p2"
cui_listview_getsel "$ctrl"
if [ "$p2" != "-1" ]
then
index="$p2"
cui_listview_gettext "$ctrl" "$index" "0" && pvsdlg_pvsname="$p2"
cui_listview_gettext "$ctrl" "$index" "1" && pvsdlg_pvsgid="$p2"
if [ "${pvsdlg_pvsgid}" -lt 200 -o "${pvsdlg_pvsgid}" -ge 65534 ]
then
cui_message "$win" "It is not allowed to remove pvs \"${pvsdlg_pvsname}\", sorry!" "Error" "${MB_ERROR}"
else
cui_message "$win" "Really Delete pvs \"${pvsdlg_pvsname}\"?" "Question" "${MB_YESNO}"
if [ "$p2" == "$IDYES" ]
then
local errmsg=$(/usr/sbin/pvsdel "${pvsdlg_pvsname}" 2>&1)
if [ "$?" == "0" ]
then
result="$IDOK"
else
cui_message "$win" \
"Error! $errmsg" "Error" "$MB_ERROR"
result="$IDCANCEL"
fi
fi
fi
fi
fi
[ "$result" == "$IDOK" ]
return "$?"
}
#============================================================================
# functions to sort the list view control and to select the sort column
#============================================================================
#----------------------------------------------------------------------------
# pvs_sort_list
# Sort the list view control by the column specified in pvs_sortcolumn
# expects: $1 : listview window handle
# returns: nothing
#----------------------------------------------------------------------------
function pvs_sort_list()
{
local ctrl=$1
local mode="0"
if [ "${pvs_sortcolumn}" != "-1" ]
then
if [ "${pvs_sortmode}" == "up" ]
then
mode="1"
fi
if [ "${pvs_sortcolumn}" == "1" ]
then
cui_listview_numericsort "$ctrl" "${pvs_sortcolumn}" "$mode"
else
cui_listview_alphasort "$ctrl" "${pvs_sortcolumn}" "$mode"
fi
fi
}
#----------------------------------------------------------------------------
# pvs_sortmenu_clicked_hook
# expects: $p2 : window handle
# $p3 : control window handle
# returns: nothing
#----------------------------------------------------------------------------
function pvs_sortmenu_clicked_hook()
{
cui_window_close "$p3" "$IDOK"
cui_return 1
}
#----------------------------------------------------------------------------
# pvs_sortmenu_escape_hook
# expects: $p2 : window handle
# $p3 : control window handle
# returns: nothing
#----------------------------------------------------------------------------
function pvs_sortmenu_escape_hook()
{
cui_window_close "$p3" "$IDCANCEL"
cui_return 1
}
#----------------------------------------------------------------------------
# pvs_sortmenu_postkey_hook
# expects: $p2 : window handle
# $p3 : control window handle
# $p4 : key code
# returns: 1 : Key handled, 2 : Key ignored
#----------------------------------------------------------------------------
function pvs_sortmenu_postkey_hook()
{
local ctrl="$p3"
if [ "$p4" == "$KEY_F10" ]
then
cui_window_close "$ctrl" "$IDCANCEL"
cui_window_quit 0
cui_return 1
else
cui_return 0
fi
}
#----------------------------------------------------------------------------
# pvs_select_sort_column
# Show menu to select the sort column
# expects: $1 : base window handle
# returns: nothing
#----------------------------------------------------------------------------
function pvs_select_sort_column()
{
local win="$1"
local menu
local result
local item
local oldcolumn="${pvs_sortcolumn}"
local oldmode="${pvs_sortmode}"
if cui_menu_new "$win" "Sort column" 0 0 36 10 1 "$[$CWS_CENTERED + $CWS_POPUP]" "$CWS_NONE"
then
menu="$p2"
cui_menu_additem "$menu" "Don't sort" 1
cui_menu_additem "$menu" "Sort by Group (ascending)" 2
cui_menu_additem "$menu" "Sort by Group (descending)" 3
cui_menu_additem "$menu" "Sort by Gid (ascending)" 4
cui_menu_additem "$menu" "Sort by Gid (descending)" 5
cui_menu_addseparator "$menu"
cui_menu_additem "$menu" "Close menu" 0
cui_menu_selitem "$menu" 1
cui_menu_callback "$menu" "$MENU_CLICKED" "$win" pvs_sortmenu_clicked_hook
cui_menu_callback "$menu" "$MENU_ESCAPE" "$win" pvs_sortmenu_escape_hook
cui_menu_callback "$menu" "$MENU_POSTKEY" "$win" pvs_sortmenu_postkey_hook
cui_window_create "$menu"
cui_window_modal "$menu" && result="$p2"
if [ "$result" == "$IDOK" ]
then
cui_menu_getselitem "$menu"
item="$p2"
case $item in
1)
pvs_sortcolumn="-1"
;;
2)
pvs_sortcolumn="0"
pvs_sortmode="up"
;;
3)
pvs_sortcolumn="0"
pvs_sortmode="down"
;;
4)
pvs_sortcolumn="1"
pvs_sortmode="up"
;;
5)
pvs_sortcolumn="1"
pvs_sortmode="down"
;;
esac
fi
cui_window_destroy "$menu"
if [ "$oldcolumn" != "${pvs_sortcolumn}" -o "$oldmode" != "${pvs_sortmode}" ]
then
pvs_readdata "$win"
fi
fi
}
#============================================================================
# pvs module (module functions called from userman.cui.sh)
#============================================================================
#----------------------------------------------------------------------------
# pvs module
#----------------------------------------------------------------------------
pvs_menu="Physical Volume"
pvs_sortcolumn="-1"
pvs_sortmode="up"
#----------------------------------------------------------------------------
# pvs_list_postkey_hook (catch ENTER key)
# $p2 --> window handle of main window
# $p3 --> window handle of list control
# $p4 --> key
#----------------------------------------------------------------------------
function pvs_list_postkey_hook()
{
local win="$p2"
local key="$p4"
if [ "$key" == "${KEY_ENTER}" ]
then
pvs_key "$win" "$KEY_F4"
cui_return 1
else
cui_return 0
fi
}
#----------------------------------------------------------------------------
# pvs_init (init the grous module)
# $1 --> window handle of main window
#----------------------------------------------------------------------------
function pvs_init()
{
local win="$1"
local ctrl
cui_listview_new "$win" "" 0 0 30 10 6 "${IDC_PVS_LIST}" "$CWS_NONE" "$CWS_NONE" && ctrl="$p2"
if cui_valid_handle "$ctrl"
then
cui_listview_setcoltext "$ctrl" 0 "PV"
cui_listview_setcoltext "$ctrl" 1 "VG"
cui_listview_setcoltext "$ctrl" 2 "Format"
cui_listview_setcoltext "$ctrl" 3 "Attr"
cui_listview_setcoltext "$ctrl" 4 "Size"
cui_listview_setcoltext "$ctrl" 5 "Free"
cui_listview_callback "$ctrl" "$LISTVIEW_POSTKEY" "$win" pvs_list_postkey_hook
cui_window_create "$ctrl"
fi
cui_window_getctrl "$win" "${IDC_HELPTEXT}" && ctrl="$p2"
if cui_valid_handle "$ctrl"
then
cui_textview_add "$ctrl" "Add, modify or delete pvs" 1
cui_window_totop "$ctrl"
fi
cui_window_setlstatustext "$win" "Commands: F4=Edit F7=Create F8=Delete F9=Sort F10=Exit"
}
#----------------------------------------------------------------------------
# pvs_close (close the pvs module)
# $1 --> window handle of main window
#----------------------------------------------------------------------------
function pvs_close()
{
local win="$1"
local ctrl
cui_window_getctrl "$win" "${IDC_PVS_LIST}" && ctrl="$p2"
if cui_valid_handle "$ctrl"
then
cui_window_destroy "$ctrl"
fi
cui_window_getctrl "$win" "${IDC_HELPTEXT}" && ctrl="$p2"
if cui_valid_handle "$ctrl"
then
cui_textview_clear "$ctrl"
fi
}
#----------------------------------------------------------------------------
# pvs_size (resize the pvs module windows)
# $1 --> window handle of main window
# $2 --> x
# $3 --> y
# $4 --> w
# $5 --> h
#----------------------------------------------------------------------------
function pvs_size()
{
local ctrl
cui_window_getctrl "$1" "${IDC_PVS_LIST}" && ctrl="$p2"
if cui_valid_handle "$ctrl"
then
cui_window_move "$ctrl" "$2" "$3" "$4" "$5"
fi
}
#----------------------------------------------------------------------------
# pvs_readdata (read data of the pvs module)
# $1 --> window handle of main window
#----------------------------------------------------------------------------
function pvs_readdata()
{
local ctrl
local win="$1"
local sel;
local count;
local index;
# read user inforamtion
cui_window_getctrl "$win" "$IDC_PVS_LIST" && ctrl="$p2"
if cui_valid_handle "$ctrl"
then
cui_listview_getsel "$ctrl" && sel="$p2"
cui_listview_clear "$ctrl"
_ifs="$IFS"
pvs --separator=! --noheadings | while read line
do
IFS="!"
set -- $line
cui_listview_add "$ctrl" && index="$p2"
cui_listview_settext "$ctrl" "$index" 0 "$1" # PV
cui_listview_settext "$ctrl" "$index" 1 "$2" # VG
cui_listview_settext "$ctrl" "$index" 2 "$3" # Format
cui_listview_settext "$ctrl" "$index" 3 "$4" # Attr
cui_listview_settext "$ctrl" "$index" 4 "$5" # Size
cui_listview_settext "$ctrl" "$index" 5 "$6" # Free
IFS="$_ifs"
done
cui_listview_update "$ctrl"
cui_listview_getcount "$ctrl" && count="$p2"
if [ "$sel" -ge "0" -a "$count" -gt "0" ]
then
if [ "$sel" -ge "$count" ]
then
sel=$[$count - 1]
fi
pvs_sort_list "$ctrl"
cui_listview_setsel "$ctrl" "$sel"
else
pvs_sort_list "$ctrl"
cui_listview_setsel "$ctrl" "0"
fi
fi
}
#----------------------------------------------------------------------------
# pvs_activate (activate the pvs module)
# $1 --> window handle of main window
#----------------------------------------------------------------------------
function pvs_activate()
{
local ctrl
local win="$1"
# set focus to list
cui_window_getctrl "$win" "$IDC_PVS_LIST"
if [ "$p2" != "0" ]
then
ctrl="$p2"
cui_window_setfocus "$ctrl"
fi
}
#----------------------------------------------------------------------------
# pvs_key (handle keyboard input)
# $1 --> window handle of main window
# $2 --> keyboard input
#----------------------------------------------------------------------------
function pvs_key()
{
local win="$1"
local key="$2"
case "$key" in
"$KEY_F4")
if pvs_editpvs_dialog $win
then
pvs_readdata $win
fi
return 0
;;
"$KEY_F7")
if pvs_createpvs_dialog $win
then
pvs_readdata $win
fi
return 0
;;
"$KEY_F8")
if pvs_deletepvs_dialog $win
then
pvs_readdata $win
fi
return 0
;;
"$KEY_F9")
pvs_select_sort_column $win
return 0
;;
esac
return 1
}
#============================================================================
# end of pvs module
#============================================================================
| true
|
c02450aa655b181992b6e0e9144f72daa2e92c75
|
Shell
|
zamlz-wsl/dotfiles
|
/etc/lemonbar/modules/memory
|
UTF-8
| 967
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
. $HOME/etc/lemonbar/config
LOGGER=$(get_logger lemonbar.memory)
$LOGGER "Initializing memory module"
while true; do
memory=$(free -h | grep 'Mem:')
memory_total=$(echo $memory | awk {'print $2'} | tr -d ' ')
memory_used=$(echo $memory | awk {'print $3'} | tr -d ' ')
mem_msg="$memory_used/$memory_total"
# TODO: Set this dynamically based on used mem
mem_color=$MAGENTA
swap=$(free -h | grep 'Swap:')
swap_total=$(echo $swap | awk {'print $2'} | tr -d ' ')
swap_used=$(echo $swap | awk {'print $3'} | tr -d ' ')
swap_msg="$swap_used/$swap_total"
# TODO: Set this dynamically based on used mem
swap_color=$MAGENTA
msg="%{B$BLACK}%{F$MAGENTA}${PL_Lb}%{F-}"
msg="$msg%{F$mem_color} ${ICON_MEMORY}$mem_msg %{F-}"
msg="$msg%{F$MAGENTA}${PL_Lb}%{F-}"
msg="$msg%{F$swap_color} ${ICON_SWAP}$swap_msg %{F-}%{B-}"
echo "$ANCHOR_MEMORY$msg"
sleep $REFRESH_MEMORY
done
# vim: set ft=sh:
| true
|
54413f18f1ef06ff8115e0298de3ce62e23ed7e8
|
Shell
|
dowjones/reapsaw
|
/entrypoint.sh
|
UTF-8
| 2,076
| 3.828125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
create_reports()
{
# ----------------------------------------------------------------
# Generate consolidated report and send results to Report Portal
# ----------------------------------------------------------------
RP=false
if [[ "${REPORT_PORTAL_URL}" && "${RP_TOKEN}" ]]; then
if [[ "${RP_PROJECT}" || "${PROJECT}" ]]; then
RP=true
else
echo "Please specify RP_PROJECT as environment variable for sending in Report Portal"
exit 0
fi
fi
generate_reports -r $RP
exit 0
}
{
if [[ "${TASKS}" ]]; then
mkdir -p /code/reports
if [[ "${TASKS}" == "snyk" ]]; then
echo "Snyk selected..."
if [[ "${SNYK_TOKEN}" ]]; then
scan
create_reports
else
echo "Please specify SNYK_TOKEN as environment variable."
fi
elif [[ "${TASKS}" == *"cx" ]] || [[ "${TASKS}" == *"cx_commit" ]] || [[ "${TASKS}" == "cx"* ]]; then
if [[ "${CX_USER}" ]] && [[ "${CX_PASSWORD}" ]] && [[ "${CX_URL}" ]]; then
if [[ "${TASKS}" == *"snyk"* ]]; then
if [[ "${SNYK_TOKEN}" ]]; then
echo "Checkmarx and Snyk tools selected..."
scan
create_reports
else
echo "Please specify SNYK_TOKEN as environment variable."
fi
else
echo "Checkmarx selected..."
scan
create_reports
fi
else
echo "Please specify next environment variables to run Checkmarx: 'CX_USER', 'CX_PASSWORD' and 'CX_URL'."
fi
else
echo "Unsupported TASKS value: ${TASKS}"
echo "Possible options: \"cx,snyk\", \"cx\",\"snyk\""
fi
else
echo "Please set TASKS environment variable"
fi
} || {
echo "Something went wrong. Please verify docker run command."
exit 0
}
| true
|
07cf9751509bad0c5abe099a8d9648dab82ff8ca
|
Shell
|
arkh91/packages
|
/Ubuntu/mac_address_changer.sh
|
UTF-8
| 616
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
hexchars="0123456789ABCDEF"
end=$( for i in {1..10} ; do echo -n ${hexchars:$(( $RANDOM % 16 )):1} ; done | sed -e 's/\(..\)/:\1/g' )
MAC=00$end
service network-manager stop
#ifconfig wlan0 down
#ifconfig wlan0 hw ether $MAC
#ifconfig wlan0 up
ifconfig enp0s3 down
ifconfig enp0s3 hw ether $MAC
ifconfig enp0s3 up
service network-manager start
echo $MAC
# run this script as root.
#Note: Use name of your network interface in place of “wlan0”
#You will have to change its permissions to get executed from terminal
# sudo chmod u+x <script_name>.py
# sudo ./<script_name>.py
| true
|
5abe94f937f7b0003b2d23a9c52b51c1bd17d8b6
|
Shell
|
shivamsharma00/hackerrank-problems
|
/Artificial Intelligence/backup.txt
|
UTF-8
| 577
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# creating a new user can also given separately on terminal
sudo adduser username
# shell script to backup folders
current_time = `date +%b-%d-%y` # Get the cuurent time to use as folder
dest = /home/usr/shivam/backup-$current_time.tar.gz # creating a backup file
src = /home/usr/shivam/data_folder # folder which is going to be backed up
tar -cpzf $dest $src # creating the backup, c-create, p-preserve permission of files, z-compress the files
# show the size of the folder
du -sh
# report file system disk space usage
df
| true
|
acb75ed7e3f94aa6052ec9b0bb853f0fc70f1659
|
Shell
|
Martians/deploy
|
/shell/ambari/vagrant/script/repo/local_network.sh
|
UTF-8
| 746
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/sh
source /vagrant/config/config.sh
REPO_LOCAL=repo.$DOMAIN
#REPO_LOCAL=$REPO_HOST
sudo rm /etc/yum.repos.d/local_network.repo -rf
cat << EOF | sudo tee -a /etc/yum.repos.d/local_network.repo
[common]
name=local network common repo
baseurl=http://$REPO_LOCAL/common/centos7/
gpgcheck=0
enabled=1
priority=1
proxy=_none_
EOF
sudo rm /etc/yum.repos.d/ambari.repo -rf
cat << EOF | sudo tee -a /etc/yum.repos.d/ambari.repo
[ambari-2.6.0.0]
name=local ambari repo
baseurl=http://$REPO_LOCAL/ambari/centos7/2.x/updates/2.6.0.0/
gpgcheck=0
enabled=1
priority=1
proxy=_none_
EOF
<<'COMMENT'
[hdp]
name=local hdp repo
baseurl=http://$REPO_LOCAL/component/HDP/centos7/2.x/updates/2.6.3.0/
gpgcheck=0
enabled=1
priority=1
proxy=_none_
EOF
COMMENT
| true
|
cb07e7a6eed88c300f9a7ec246ce20496ba5a3ee
|
Shell
|
dasaed/cmsc6950
|
/IntroductionToShellScripts/quotes-ansi-c.sh
|
UTF-8
| 492
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# We have used \n as a new line, \x40 is hex value for @ # and \56 is octal value for .
echo
echo "We have used \x40 is $hex value for @ echo and \56 is octal value for . and \n as a new line"
echo
echo $'email: web\x40linuxconfig\56org\n'
echo $'web: www\56linuxconfig\56org\n'
echo "alert (bell twice)" $'\a\a' "horizontal tab three times" $'\t\t\t' "vertical tab once" $'\v'
var=VARIABLE
echo "Note that double quote does not affect \" and $var and `date`"
echo
echo End
| true
|
f3b9247d1da67b6f5e240bd23654cba250a424a6
|
Shell
|
michaelstein/TypeHelper
|
/build-linux-debug-qtcreator.sh
|
UTF-8
| 337
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
mkdir -p "$SOURCE_DIR/../build-TypeHelper-conan"
cd "$SOURCE_DIR/../build-TypeHelper-conan"
conan install $SOURCE_DIR -pr=gcc820d
source activate.sh && cmake -DCMAKE_BUILD_TYPE=Debug -G "CodeBlocks - Unix Makefiles" $SOURCE_DIR
source deactivate.sh
| true
|
00011db6228c8345b76811799b54f8625acfc36b
|
Shell
|
hashd/dotfiles
|
/confs/zsh/zsh.aliases
|
UTF-8
| 1,664
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
alias tmux='tmux -u -2'
alias home='cd ~'
# Folder aliases
alias reset="tput reset;clear;"
alias serve="python -m SimpleHTTPServer"
# apt-get aliases
alias install="sudo apt-get install"
alias upgrade="sudo apt-get upgrade"
alias remove="sudo apt-get remove"
alias update="sudo apt-get update"
alias dist-upgrade="sudo apt-get dist-upgrade"
alias uptodate="update && upgrade"
alias pfind="ps -ef | grep"
alias fs="fortune -s"
alias fl="fortune -l"
alias tf="typefortune -s -n "
alias javahome6="export JAVA_HOME=/usr/lib/jvm/java-6-oracle"
alias javahome7="export JAVA_HOME=/usr/lib/jvm/java-7-oracle"
alias javahome8="export JAVA_HOME=/usr/lib/jvm/java-8-oracle"
alias setjava6="echo 1 | sudo update-alternatives --config java >/dev/null 2>&1"
alias setjava7="echo 2 | sudo update-alternatives --config java >/dev/null 2>&1"
alias setjava8="echo 3 | sudo update-alternatives --config java >/dev/null 2>&1"
alias setjavac6="echo 1 | sudo update-alternatives --config javac >/dev/null 2>&1"
alias setjavac7="echo 2 | sudo update-alternatives --config javac >/dev/null 2>&1"
alias setjavac8="echo 3 | sudo update-alternatives --config javac >/dev/null 2>&1"
alias setjavaws6="echo 1 | sudo update-alternatives --config javaws >/dev/null 2>&1"
alias setjavaws7="echo 2 | sudo update-alternatives --config javaws >/dev/null 2>&1"
alias setjavaws8="echo 3 | sudo update-alternatives --config javaws >/dev/null 2>&1"
alias checkjava="java -version && javac -version && echo $JAVA_HOME"
alias java6="setjava6;setjavac6;setjavaws6;javahome6"
alias java7="setjava7;setjavac7;setjavaws7;javahome7"
alias java8="setjava8;setjavac8;setjavaws8;javahome8"
| true
|
eabcb889ec0dde23cdf4eb84e789683102e3b09a
|
Shell
|
arendina/linuxfromscratch
|
/scripts/5.29.Perl.sh
|
UTF-8
| 331
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
# 5.30. Perl-$ver
ver='5.28.1'
cd $LFS/sources
tar -xvf perl-$ver.tar.xz
cd perl-$ver
sh Configure -des -Dprefix=/tools -Dlibs=-lm -Uloclibpth -Ulocincpth
make
cp -v perl cpan/podlators/scripts/pod2man /tools/bin
mkdir -pv /tools/lib/perl5/$ver
cp -Rv lib/* /tools/lib/perl5/$ver
cd $LFS/sources
rm -rf perl-$ver
| true
|
e0e186e9626bab3bce617049afab1f82c4d5977b
|
Shell
|
sordina/Profilerrific
|
/profilerrific
|
UTF-8
| 2,017
| 3.875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
function usage {
echo "Usage: profilerrific"
echo " -n --name name"
echo " [-h --help ]"
echo " [-s --scripts script ... ]"
echo " [-r --revisions revision ... ]"
echo " [-d --diff prefix ]"
echo " [-g --grapher script ]"
}
function tidy {
echo "$@" | sed -e 's/\s*-.*//'
}
if [[ ! "$*" ]]; then
usage
exit
fi
while [[ "$@" ]]; do
ARG="$1"
shift
case "$ARG" in
-h|--help) usage; break;;
-n|--name) name=` tidy "$@"`;;
-r|--revisions) revisions=`tidy "$@"`;;
-s|--scripts) scripts=` tidy "$@"`;;
-g|--grapher) grapher=` tidy "$@"`;;
-d|--diff) diff="true"; diff_prefix=`tidy "$@"`;;
esac
done
if [[ ! "$name" ]]; then
echo "Missing profile name"
usage
exit 1
fi
current_revision=`git rev-parse --short HEAD`
echo "Current revision: $current_revision"
if [[ "$revisions" ]]; then echo "Revisions: $revisions"; fi
if [[ "$scripts" ]]; then echo "Scripts: $scripts"; fi
# Run the scripts for each revision
for revision in $revisions; do
git reset --hard $revision --
for script in $scripts; do
$script "$name" "$revision"
done
done
# Run the scripts even if no revisions are supplied
if [[ ! "$revisions" ]]; then
for script in $scripts; do
$script "$name" "$current_revision"
done
fi
# Bring the WC back to where we left off
git reset --hard $current_revision --
for revisionl in $revisions; do
for revisionr in $revisions; do
if [[ $revisionl < $revisionr ]]; then
# Output a diff of each pair
if [[ "$diff" ]]; then
echo "Dumping source diff for revisions-range $revisionl..$revisionr"
git diff --no-color "$revisionl..$revisionr" > ${diff_prefix}diff-$revisionl..$revisionr.diff
fi
# Graph each revision pair
if [[ "$grapher" ]]; then
for script in $scripts; do
echo "Graphing script $script results for revisions $revisionl vs. $revisionr"
$grapher "$name" "$script" "$revisionl" "$revisionr"
done
fi
fi
done
done
| true
|
551bcbf27fcd8bf2dede116df07832bda89c25d6
|
Shell
|
hanscj1/cepheus
|
/cookbooks/cepheus/files/default/scripts/ceph_replace_failed_drive.sh
|
UTF-8
| 2,564
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Author: Chris Jones <chris.jones@lambdastack.io>
# Copyright 2017, LambdaStack
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# IMPORTANT: This script needs to run on the Ceph node where the OSD lives!
set -e
osd=$1
if [[ -z $osd ]]; then
echo 'Must pass in a valid OSD number.'
exit 1
fi
# Step -1:
# NOTE: On some hardware platforms, udev rules have `hot replaceable` drives showing up as a different device name.
# For example, /dev/sdk may have failed but when replaced it could show up as /dev/sdo or something. You can modify
# udev rules or ignore it for now. Meaning, run through the below using the new device name (i.e., /dev/sdo) and
# re-use the journal device as normal. If the node is 're-cheffed' then nothing will happen because the 'deployed'
# check is enabled. If the node happens to reboot then the default udev rules will enumerate the devices as expected
# with the device going back to the name of /dev/sdk. Again, this may not be the case for all hardware and controllers.
# Step 0:
# NOTE: Make sure data device is zapped if not already. If you run `sudo ceph-disk list` and it reports a failure then it may
# have old Ceph data partition. In this case (be careful), zap the data drive first.
# sudo dd if=/dev/zero of=/dev/<whatever data device> bs=512 count=1 conv=notrunc
# sduo sgdisk --zap-all /dev/<whatever data device>
# Step 1: IMPORTANT - Start with the lowest OSD number if multiple drive fails. Ceph reuses empty OSD sequence. For example,
# if OSD.100 and OSD.150 fails then replace OSD.100 first and then OSD.150. This makes things easier to manage.
# ceph_remove_osd.sh $osd
# Step 2:
# Wait for Ceph to rebalance
# Step 3: NOTE: This will format the disk and create the OSD. Nothing really happens to data until the crushmap is set.
# Do `sudo ceph-disk list` to find the journal of the device
# ceph_add_osd.sh $data_device $journal_device
# Step 4: NOTE: Get the weight from `ceph osd tree` assuming the same size disk
# ceph_osd_crush_weight_create_move.sh $osd $rack $weight
# Step 5:
# sudo service ceph start
| true
|
7a8171887893e05e2abab1905efc921cf51630e6
|
Shell
|
tomasebm/TwitterStreamerTemplate
|
/scripts/diskusagewarning.sh
|
UTF-8
| 386
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
# www.fduran.com
# script that will send an email to EMAIL when disk use in partition PART is bigger than %MAX
# adapt these 3 parameters to your case
MAX=90
EMAIL=ejemplo@gmail.com
PART=xvda1
USE=`df -h |grep $PART | awk '{ print $5 }' | cut -d'%' -f1`
if [ $USE -gt $MAX ]; then
echo "Porcentaje en uso: $USE" | mail -s "Scraper de Twitter con poco espacio restante." $EMAIL
fi
| true
|
8d19eaeb6af6f364e2f8a8bb76eb237be3b41ab6
|
Shell
|
foss-for-synopsys-dwc-arc-processors/synopsys-caffe-models
|
/caffe_models/evgen_util.bash
|
UTF-8
| 2,317
| 3.609375
| 4
|
[] |
no_license
|
# Utilities to be sourced in another script
if [ -z "$name" ] ; then
name="test"
fi
if [[ -z "$EV_CNNSDK_HOME" || ! -d $EV_CNNSDK_HOME ]] ; then
echo "EV_CNNSDK_HOME not correctly set in the environment"
exit 1
fi
evgen=${EV_CNNSDK_HOME}/evgencnn/scripts/evgencnn
blobr=${EV_CNNSDK_HOME}/install/bin/blobreader
if [ ! -f $evgen ] ; then
echo "evgencnn executable not found : $evgen"
exit 1
fi
# Secret evgencnn options : wof & vof
export __SAVER=1
# More verbose evgencnn output
export show_classify=1
if [ -f "${name}.opts" ] ; then
base_opts=$(<${name}.opts)
base_opts=$(echo $base_opts)
echo "Using ${name}.opts"
fi
function runcmd()
{
echo "$@"
eval "$@"
if [ $? -ne 0 ] ; then
echo "Failed to run command"
exit 1
fi
}
# Verification info loaded from bin files
function wr_bld_vof()
{
local out=$1
local dir=$(dirname ${out})
if [ ! -d $dir ] ; then
echo "Dir nto found : $dir"
exit 2
fi
cat <<- EOF > $out
#!/bin/bash -ex
g++ -O2 -DWEIGHTS_IN_FILE -DVERIFY_IN_FILE \\
-I ./include \\
code/${name}_impl.cc \\
verify/${name}_verify.cc \\
-o ${name}.exe
EOF
chmod u+x $out
echo "Created $out"
pushd $dir
runcmd ./$(basename ${out})
popd
}
# Verification info loaded from bin files + dump intermediate values
function wr_bld_vof_dump()
{
local out=$1
cat <<- EOF > $out
#!/bin/bash -ex
g++ -O2 -DWEIGHTS_IN_FILE -DVERIFY_IN_FILE -DCNN_NATIVE_VERIFY \\
-I ./include -I \${EV_CNNSDK_HOME}/include \\
code/${name}_impl.cc \\
verify/${name}_verify.c \\
-o ${name}_dump.exe
EOF
chmod u+x $out
echo "Created $out"
}
# Statically link all the input data (found in .c files).
# This leads to slow compilation and slow startup. Unfortunatelly it is
# the default mode of operation for the CNN tools.
function wr_bld_static()
{
local out=$1
cat <<- EOF > $out
#!/bin/bash -ex
# Argv 1 : the name of the C file containing the input image
input_img=\${1:-no_input_specified}
g++ -O2 -DCNN_NATIVE_VERIFY \\
-I ./include -I \${EV_CNNSDK_HOME}/include \\
code/${name}_impl.cc \\
weights/${name}_filters.c \\
verify/${name}_verify.c \\
\${input_img} \\
-o ${name}_static.exe
EOF
chmod u+x $out
echo "Created $out"
}
| true
|
1381d7caae00cf94108b2e93d9dab4aa5f319c12
|
Shell
|
ken3/LFS
|
/8.2/scripts/320_acl.sh
|
EUC-JP
| 3,335
| 3.703125
| 4
|
[] |
no_license
|
# Linux From Scratch - Version 8.2
# Chapter 6. Installing Basic System Software
# 6.25. Acl-2.2.52
#######################################################################
# ե졼ɤ߹ #
#######################################################################
. buildlfs || exit 1
#######################################################################
# оݥȥӥɥǥ쥯ȥꤹ #
#######################################################################
PKGVERSION=2.2.52
SOURCEROOT=acl-${PKGVERSION}
BUILDROOT=${SOURCEROOT}
BUILDDIR=${BUILDTOP}/${BUILDROOT}
ARCHIVE=`tarballpath $SOURCEROOT.src`
#######################################################################
# ¹Բǽå #
#######################################################################
lfs_selfcheck || exit 2
#######################################################################
# Ÿ/ѥåŬ/configure/Makefile #
#######################################################################
do_setup()
{
cd $BUILDTOP
/bin/rm -rf $SOURCEROOT $BUILDROOT
tar xvf $ARCHIVE
[ "$SOURCEROOT" == "$BUILDROOT" ] || mv $SOURCEROOT $BUILDROOT
cd $BUILDDIR || exit 1
sed -i -e 's|/@pkg_name@|&-@pkg_version@|' include/builddefs.in
sed -i "s:| sed.*::g" test/{sbits-restore,cp,misc}.test
sed -i -e "/TABS-1;/a if (x > (TABS-1)) x = (TABS-1);" libacl/__acl_to_any_text.c
./configure --prefix=/usr \
--bindir=/bin \
--disable-static \
--libexecdir=/usr/lib
}
#######################################################################
# ѥ¹ #
#######################################################################
do_build()
{
[ -d $BUILDDIR ] || do_setup
cd $BUILDDIR || exit 1
make
}
#######################################################################
# ƥȼ¹ #
#######################################################################
do_test()
{
cd $BUILDDIR || exit 1
:
}
#######################################################################
# ȡ #
#######################################################################
do_install()
{
cd $BUILDDIR || exit 1
make install install-dev install-lib
chmod -v 755 /usr/lib/libacl.so
mv -v /usr/lib/libacl.so.* /lib
ln -sfv ../../lib/$(readlink /usr/lib/libacl.so) /usr/lib/libacl.so
}
#######################################################################
# ӥɥĥ #
#######################################################################
do_clean()
{
cd $BUILDTOP
/bin/rm -rf $SOURCEROOT $BUILDROOT
}
#######################################################################
# ǻꤷ¹Ԥ #
#######################################################################
do_action $@
| true
|
28574c2a8d6e71022341647905a1bd02e611ea0c
|
Shell
|
sghsri/dotfiles
|
/.bashrc
|
UTF-8
| 551
| 2.703125
| 3
|
[] |
no_license
|
PROMPT_DIRTRIM=3
bind '"\t":menu-complete'
bind "set show-all-if-ambiguous on"
bind "set completion-ignore-case on"
bind "set menu-complete-display-prefix on"
###-begin-npm-completion-###
#
# npm command completion script
#
# Installation: npm completion >> ~/.bashrc (or ~/.zshrc)
# Or, maybe: npm completion > /usr/local/etc/bash_completion.d/npm
#
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
| true
|
c258d9423f18edb23403ed211273e95a31cd8774
|
Shell
|
Xitog/tallentaa
|
/tools/pull_all.sh
|
UTF-8
| 2,655
| 3.28125
| 3
|
[] |
no_license
|
# status: lx w8 w10 https ssh
# ash yes yes https://github.com/Xitog/ash.git git@github.com:Xitog/ash.git
# dgx yes yes https://github.com/Xitog/dgx.git git@github.com:Xitog/dgx.git
# hamill yes yes https://github.com/Xitog/hamill.git git@github.com:Xitog/hamill.git
# jyx yes yes https://github.com/Xitog/jyx.git git@github.com:Xitog/jyx.git
# tal yes https://github.com/Xitog/tal.git git@github.com:Xitog/tal.git
# tallentaa yes yes https://github.com/Xitog/tallentaa.git git@github.com:Xitog/tallentaa.git
# teddy yes yes https://github.com/Xitog/teddy.git git@github.com:Xitog/teddy.git
# raycasting yes yes https://github.com/Xitog/raycasting.git git@github.com:Xitog/raycasting.git
# weyland yes yes https://github.com/Xitog/weyland.git git@github.com:Xitog/weyland.git
GRN='\033[1;32m'
RED='\033[1;31m'
NC='\033[0m' # No Color
nb=$(find . -mindepth 1 -maxdepth 1 -type d | wc -l)
echo Total of local repository: $nb
printf "\n"
count=1
ok=0
nb_ok=0
for file in */
do
name=${file%*/}
echo ---------------------------------------------------------------
echo Updating "$name" $count/$nb
echo ---------------------------------------------------------------
cd $file
output="$(git pull)"
if [ "$output" = "Déjà à jour." ]
then
echo -e "${GRN}$output${NC}"
ok=1
((nb_ok=nb_ok+1))
fi
if [ "$output" = "Already up to date." ]
then
echo -e "${GRN}$output${NC}"
ok=1
((nb_ok=nb_ok+1))
fi
if [ $ok == 0 ]
then
echo -e "${RED}$output${NC}"
fi
git status
git remote -v
cd ..
printf "\n"
let "count+=1"
done
((sum=count-1))
if [ $nb_ok == $sum ]
then
echo -e "${GRN}---------------------------------------------------------------${NC}"
echo -e "${GRN}All repositories ($sum) are up to date.${NC}"
echo -e "${GRN}(but there could be some local changes)${NC}"
echo -e "${GRN}---------------------------------------------------------------${NC}\n"
else
((updated=$sum-$nb_ok))
echo -e "${RED}---------------------------------------------------------------${NC}"
echo -e "${RED}$updated / $sum repository(ies) updated${NC}"
echo -e "${GRN}$nb_ok / $sum repository(ies) already up to date${NC}"
echo -e "${RED}---------------------------------------------------------------${NC}\n"
fi
printf "** Fin **\n"
| true
|
54a7453d5ebda0de6bedbe14c5bfc0ae2b05b1f3
|
Shell
|
drjosephliu/dotfiles
|
/.bash_profile
|
UTF-8
| 721
| 2.859375
| 3
|
[] |
no_license
|
alias ll='ls -hartl'
alias g='grep -i'
alias get='curl -OL'
alias tmux="TERM=screen-256color-bce tmux"
alias vim='nvim'
alias python='python3'
alias pip='pip3'
eval "$(/opt/homebrew/bin/brew shellenv)"
source ~/.git-prompt.sh
PS1='[\W$(__git_ps1 " (%s)")]\$ '
export PROMPT_COMMAND='echo -ne "\033]0;${PWD/#$HOME/~}\007"'
export PATH=~/.npm-global/bin:$PATH
export FZF_DEFAULT_COMMAND='rg --files --no-ignore-vcs --hidden'
[ -f /usr/local/etc/bash_completion ] && . /usr/local/etc/bash_completion || {
# if not found in /usr/local/etc, try the brew --prefix location
[ -f "$(brew --prefix)/etc/bash_completion.d/git-completion.bash" ] && \
. $(brew --prefix)/etc/bash_completion.d/git-completion.bash
}
| true
|
dee5c241f022c74497fc8e8620c0fa30c45d0e96
|
Shell
|
NaomiGaynor/dotfiles
|
/brew.sh
|
UTF-8
| 2,203
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
source ./utils/confirm.sh
brew_packages=(
vim
htop
yarn
tree
exa # ls replacement https://github.com/ogham/exa
watch
jq # parse json response https://stedolan.github.io/jq/
bat # cat replacement https://github.com/sharkdp/bat
prettyping # ping replacement https://github.com/denilsonsa/prettyping
fd # user-friendly find alternative https://github.com/sharkdp/fd/
ncdu # better du
tldr # better man https://github.com/tldr-pages/tldr/
noti # notification from terminal https://github.com/variadico/noti
terraform
nmap
)
brew_cask_packages=(
# Browsers
google-chrome
google-chrome-canary
firefox
firefox-developer-edition
firefox-nightly
opera
# More
drawio # Macosx desktop client
macdown # Markdown client
visual-studio-code
slack
iterm2
postman # Proxy
virtualbox
docker
dropbox
# Personal
whatsapp
spotify
# Quick look plugins (https://github.com/sindresorhus/quick-look-plugins)
qlcolorcode
qlstephen
qlmarkdown
quicklook-json
)
if [ "$1" == "--remove" ] || [ "$1" == "-r" ]; then
confirm "Are you sure you want to disinstall Brew and all the packages?" || exit
for i in "${brew_packages[@]}"
do
:
brew uninstall "$i"
done
echo "Uninstalling Brew Cask packages"
for i in "${brew_cask_packages[@]}"
do
:
brew cask uninstall "$i"
done
# If Brew exists, uninstalling it
if [[ "$(type -P brew)" ]]; then
echo "Uninstalling Brew"
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/uninstall)"
fi
exit
fi
# Make sure homebrew is installed first
if [[ ! "$(type -P brew)" ]]; then
echo "Installing Homebrew"
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
brew tap caskroom/cask
brew tap homebrew/cask-versions
brew doctor
brew update
echo "Installing Brew packages"
for i in "${brew_packages[@]}"
do
:
brew install "$i"
done
echo "Installing Cask packages"
for i in "${brew_cask_packages[@]}"
do
:
brew cask install "$i"
done
| true
|
6b0bf8d0b15f9a959cc8a384ea64d0d2aea0b847
|
Shell
|
auroraeosrose/scripts
|
/php/valgrind.sh
|
UTF-8
| 767
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# This will run php tests with leak detection and valgrind on
# for already compiled debug and zts-debug
# this can still take forever
# this is where your PHP binaries are
[ -z $PHP_BIN_ROOT ] && PHP_BIN_ROOT=/usr/local
# this is the parent dir for your git checkouts
[ -z $PHP_SOURCE ] && PHP_SOURCE=./
# this makes it automated instead of waiting for you after a test run
export NO_INTERACTION=1
# Reduced down to just 7.1 plus - at this point I don't care about older stuff
for version in "php-src" "php-7.1"
do
cd $PHP_SOURCE/$version
for type in -zts-debug -debug
do
export TEST_PHP_EXECUTABLE=$PHP_BIN_ROOT/$version$type/bin/php
echo $TEST_PHP_EXECUTABLE
$TEST_PHP_EXECUTABLE run-tests.php -m
done
done
| true
|
9948d0a01df80bfac9cbc19d94af12e5df1cb76a
|
Shell
|
dfki-ric/robot_remote_control
|
/compile_protobuf_from_source.bash
|
UTF-8
| 870
| 4.09375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e # abort with error if any command returns with something other than zero
############
# Install script for compiling proto3 from source
#
# In case your OS does not provide protobuf3, install the build dependencies of protobuf:
# $> sudo apt-get install autoconf automake libtool curl make g++ unzip
############
function build_protobuf {
wget https://github.com/protocolbuffers/protobuf/archive/v3.17.3.tar.gz
tar xzf v3.17.3.tar.gz
cd protobuf-3.17.3
./autogen.sh
./configure --prefix=$1
make
make install
cd ../
}
INSTALL_PATH=""
if [ -z $1 ]; then
echo "[INFO] You have not specified an install path. Packages are installed to /usr/local."
echo -n " Press Ctrl+C to stop"
sleep 1 && echo -n "."
sleep 1 && echo -n "."
sleep 1 && echo -n "."
else
INSTALL_PATH=$1
fi
build_protobuf "$INSTALL_PATH"
| true
|
a1e03869266f3815d483d930779709d96b6abc58
|
Shell
|
dinner3000/scripts
|
/ubuntu_18_init_env.sh
|
UTF-8
| 2,378
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/sh -ex
#Set proxy if needed
export all_proxy=http://172.16.52.1:33888
#Oh my zsh - install
apt install -y zsh
curl -Lo omz_install.sh https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh
chmod +x omz_install.sh
./omz_install.sh
git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosugg
git clone git://github.com/wting/autojump.git
cd autojump && ./install.py
#Oh my zsh - setup
#~/.zshrc -- custom omz
#ZSH_THEME="ys"
#DISABLE_UPDATE_PROMPT="true"
#plugins=( git node npm mvn autojump zsh-autosuggestions )
#[[ -s /root/.autojump/etc/profile.d/autojump.sh ]] && source /root/.autojump/etc/profile.d/autojump.sh
#autoload -U compinit && compinit -u
#Docker - install
apt-get install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update
apt install docker-ce docker-ce-cli containerd.io
apt install -y docker-compose
#Docker setup
#/lib/systemd/system/docker.service - add below lines if needed
#Environment="HTTP_PROXY=http://172.16.52.1:33888" "HTTPS_PROXY=http://172.16.52.1:33888" "NO_PROXY=172.16.52.134"
#/etc/docker/daemon.json - add below lines if needed
# "insecure-registries": [
# "hub.devops.local",
# "hub.devops.local:5000",
# "172.16.52.129",
# "172.16.52.129:5000"
# ],
# "registry-mirrors": [
# "https://dockerhub.azk8s.cn",
# "https://docker.mirrors.ustc.edu.cn",
# "http://hub-mirror.c.163.com"
# ],
systemctl daemon-reload; systemctl restart docker
docker run --rm hello-word
#Nodejs
curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
apt install -y nodejs
apt install -y git maven
apt-get install gcc g++ make
apt install -y openjdk-8-jdk
git clone https://github.com/gcuisinier/jenv.git ~/.jenv
echo 'export PATH="$HOME/.jenv/bin:$PATH"' >> ~/.bash_profile
echo 'eval "$(jenv init -)"' >> ~/.bash_profile
echo 'export PATH="$HOME/.jenv/bin:$PATH"' >> ~/.zshrc
echo 'eval "$(jenv init -)"' >> ~/.zshrc
jenv add /usr/lib/jvm/java-11-openjdk-amd64
jenv add /usr/lib/jvm/java-8-openjdk-amd64
jenv versions
export JENV_ROOT=/usr/local/opt/jenv
eval "$(jenv init -)"
jenv doctor
jenv global 1.8
| true
|
a5db2b1c5e4e458af6fc72363db78749d36fdc80
|
Shell
|
aarthiKarthik/hyperledger_fabric
|
/Upgrade_CC/marbles_script.sh
|
UTF-8
| 2,611
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
echo
echo " "
echo "Starting marbles private data collection demo"
echo " "
CHANNEL_NAME="$1"
CC_NAME="$2"
VERSION_NUM="$3"
CC_SRC_PATH="github.com/chaincode/marbles02_private/go/"
COLLECTIONS_PATH="$GOPATH/src/github.com/chaincode/marbles02_private/collections_config.json"
echo "Channel name : "$CHANNEL_NAME
. scripts/utils.sh
installMarblesChaincode() {
PEER=$1
ORG=$2
setGlobals $PEER $ORG
set -x
peer chaincode install -n ${CC_NAME} -v ${VERSION_NUM} -p ${CC_SRC_PATH} >&log.txt
res=$?
set +x
cat log.txt
verifyResult $res "Chaincode installation on peer${PEER}.org${ORG} has failed"
echo "===================== Chaincode is installed on peer${PEER}.org${ORG} ===================== "
echo
}
instantiateMarblesChaincode() {
PEER=$1
ORG=$2
setGlobals $PEER $ORG
# while 'peer chaincode' command can get the orderer endpoint from the peer
# (if join was successful), let's supply it directly as we know it using
# the "-o" option
set -x
peer chaincode instantiate -o orderer.example.com:7050 --tls --cafile $ORDERER_CA -C ${CHANNEL_NAME} -n ${CC_NAME} -v ${VERSION_NUM} -c '{"Args":["init"]}' -P "OR('Org1MSP.member','Org2MSP.member')" --collections-config ${COLLECTIONS_PATH}
res=$?
set +x
cat log.txt
verifyResult $res "Chaincode instantiation on peer${PEER}.org${ORG} on channel '$CHANNEL_NAME' failed"
echo "===================== Chaincode is instantiated on peer${PEER}.org${ORG} on channel '$CHANNEL_NAME' ===================== "
echo
}
marbleschaincodeInvokeInit() {
PEER=$1
ORG=$2
ARG=$3
setGlobals $PEER $ORG
PEER0_ORG1_CA=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt
set -x
peer chaincode invoke -o orderer.example.com:7050 --tls --cafile $ORDERER_CA -C ${CHANNEL_NAME} -n ${CC_NAME} -c '{"Args":["initMarble","'${ARG}'","blue","35","tom","99"]}'
res=$?
set +x
cat log.txt
verifyResult $res "Invoke execution on $PEERS failed"
echo "===================== Invoke transaction successful on $PEERS on channel '$CHANNEL_NAME' ===================== "
echo
}
## Install chaincode on peer0.org1 and peer0.org2
echo "Installing marbles chaincode on peer0.org1..."
installMarblesChaincode 0 1
echo "Install marbles chaincode on peer0.org2..."
installMarblesChaincode 0 2
# Instantiate chaincode on peer0.org1
echo "Instantiating chaincode on peer0.org1..."
instantiateMarblesChaincode 0 1
# Invoke chaincode on peer0.org1 and peer0.org2
echo "Sending invoke transaction on peer0.org1"
marbleschaincodeInvokeInit 0 1
| true
|
373f1ede57854f7d1a6a75fd0b23727e2406d2e1
|
Shell
|
ekmixon/conjur-api-go
|
/bin/utils.sh
|
UTF-8
| 498
| 3.484375
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
export compose_file="../docker-compose.yml"
function announce() {
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo -e "$BLUE
================================
${1}
================================
$NC"
}
exec_on() {
local container="$1"; shift
docker exec "$(docker-compose -p $COMPOSE_PROJECT_NAME ps -q $container)" "$@"
}
oss_only(){
[ "$TEST_VERSION" == "oss" ]
}
function teardown {
docker-compose -p $COMPOSE_PROJECT_NAME down -v
}
| true
|
e031f1659c1ab6dde8850bc2d5a1f79b8236295b
|
Shell
|
bioconda/bioconda-recipes
|
/recipes/ngscheckmate/build.sh
|
UTF-8
| 599
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd $PREFIX
mv $SRC_DIR $PREFIX/NGSCheckMate
mkdir -p $PREFIX/bin
cat << EOF > $PREFIX/bin/ncm.py
#!/usr/bin/env bash
export NCM_HOME=$PREFIX/NGSCheckMate
echo "Set the path to your reference file with the NCM_REF environment variable"
echo "eg. export NCM_REF=/<path>/<to>/<reference>"
echo
python $PREFIX/NGSCheckMate/ncm.py "\$@"
EOF
cat << EOF > $PREFIX/bin/ncm_fastq.py
#!/usr/bin/env bash
export NCM_HOME=$PREFIX/NGSCheckMate
python $PREFIX/NGSCheckMate/ncm_fastq.py "\$@"
EOF
cat << EOF > $PREFIX/NGSCheckMate/ncm.conf
SAMTOOLS=samtools
BCFTOOLS=bcftools
REF=\$NCM_REF
EOF
| true
|
93dfaf999bcb51ab2657a1c39c867a3b1f7f21de
|
Shell
|
spacefrogg/afs-scripts
|
/afs-user-distribution.sh
|
UTF-8
| 1,784
| 4.40625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Version: 0.4
function usage() {
cat<<ENDUSAGE
Usage: $(basename $0) [-a|-g <num>|-h]
Options:
-a Simply count all the users for each letter of the alphabet
and display the distribution.
-g <num> Determine adjacent letter groupings that produces
groups with about <num> users. Useful for determining
appropriate sized group of users to batch process all at once.
-h|--help display this help message
Example usage:
The 'afs-list-mountpoints.sh' script updates the cached list of volume
mount points for all users, but it is unmanagable to search through
all users at once. Instead, you can use this program to create a
list of letter ranges of about 65 users:
$(basename $0) -g 65
If you just want to see how many users there are in each letter directory,
sorted from fewest users to most users:
$(basename $0) -a | sort -n -k 2
ENDUSAGE
exit
}
function get_user_distribution() {
for l in $(echo {a..z}); do
echo -n "$l "
/bin/ls -1 /afs/ss2k/users/$l/ | wc -l
done
}
function get_groups_of_size() {
max=$1
sum=0
newline=1
get_user_distribution | \
while read line; do
l=${line/ */}
n=${line/* /}
[ $newline -eq 1 ] && { echo -n "$l-"; newline=0; }
((sum+=n))
[ $sum -gt $max ] && { echo "$l $sum"; sum=0; newline=1; }
[ "$l" = "z" -a $sum -ne 0 ] && echo "z $sum"
done
}
[ $# -eq 0 ] && usage
case $1 in
-a) get_user_distribution; ;;
-g) if [ -z "$2" ] || echo $2| grep -q "[^0-9]"; then
echo "Error: missing <num> argument."; usage
fi
get_groups_of_size $2
;;
*) usage; ;;
esac
| true
|
08ade350ebb5c2e59b633a7048d4c5a59bb2d467
|
Shell
|
mkoskar/dotfiles
|
/bin/sshgen-cert
|
UTF-8
| 1,086
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -eu
prgname=${0##*/}
_usage() {
cat <<-EOF
usage: $prgname [-H] [-P] ca_key pub_key identity principals [ssh-keygen opts]
Generate SSH client or host certificate.
Note: Multiple principals may be specified separated by commas.
-H - generate host certificate
-P - generate port-forwarding only certificate
EOF
}
usage() { _usage; exit 0; }
usage_err() { _usage; exit 2; } >&2
declare -i host=0 portfwd=0
while getopts HPh opt; do
case $opt in
H) host=1 ;;
P) portfwd=1 ;;
h) usage ;;
*) usage_err ;;
esac
done
shift $((OPTIND-1))
(( $# < 4 )) && usage_err
# ----------------------------------------
ca_key=$1
pub_key=$2
identity=$3
principals=$4
shift 4
opts=(
-s "$ca_key"
-I "$identity"
-n "$principals"
-V +52w
)
if (( host )); then
opts+=(-h)
else
if (( portfwd )); then
opts+=(
-O no-agent-forwarding
-O no-pty
-O no-user-rc
-O no-x11-forwarding
)
fi
fi
ssh-keygen "${opts[@]}" "$@" "$pub_key"
| true
|
606a0de4b6f8db33636028c27256074a57a84e96
|
Shell
|
cdeck3r/BilderSkript
|
/Dockerfiles/install_builder_supplementals.sh
|
UTF-8
| 3,700
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Install supplemental software
# * slackr: bash script for Slack messaging
# * hugo: static webpage generator
# * java: detect and version check
#
# this directory is the script directory
SCRIPT_DIR="$( cd "$(dirname "$0")" ; pwd -P )"
cd $SCRIPT_DIR
# important variables
SLACKR_URL='https://github.com/a-sync/slackr.git'
HUGO_DIR="$SCRIPT_DIR"/hugo
HUGO_URL='https://github.com/gohugoio/hugo/releases/download/v0.56.3/hugo_0.56.3_Linux-64bit.tar.gz'
HUGO_URL='https://github.com/gohugoio/hugo/releases/download/v0.62.0/hugo_0.62.0_Linux-64bit.tar.gz'
PLANTUML_JAR=$SCRIPT_DIR/plantuml/plantuml.jar
PLANTUML_URL='https://sourceforge.net/projects/plantuml/files/plantuml.jar/download'
JAVA_MIN_VERSION="1.5"
#####################################################
# Helper functions
#####################################################
#
# logging on stdout
# Param #1: log level, e.g. INFO, WARN, ERROR
# Param #2: log message
log_echo () {
LOG_LEVEL=$1
LOG_MSG=$2
TS=$(date '+%Y-%m-%d %H:%M:%S,%s')
echo "$TS - $SCRIPT_NAME - $LOG_LEVEL - $LOG_MSG"
}
#####################################################
# Main program
#####################################################
# Source: https://stackoverflow.com/a/677212
command -v git >/dev/null 2>&1 || { echo >&2 "I require git but it's not installed. Abort."; exit 1; }
#
# slackr
# Simple shell command to send or pipe content to slack via webhooks.
#
if [[ -x "$SCRIPT_DIR/slackr/slackr" ]]; then
log_echo "INFO" "Slackr found: $SCRIPT_DIR/slackr"
else
log_echo "INFO" "Start installing: slackr"
git clone "$SLACKR_URL"
if [[ $? -ne 0 ]]
then
log_echo "ERROR" "Install FAILED: slackr"
#exit 1
fi
cd slackr && chmod +x slackr
# back
cd ..
log_echo "INFO" "Install done: slackr"
fi
#
# hugo
#
log_echo "INFO" "Start installing: hugo"
mkdir -p "$HUGO_DIR"
wget --no-check-certificate -qO- ${HUGO_URL} | tar -C "$HUGO_DIR" -xvz
if [[ $? -ne 0 ]]
then
log_echo "ERROR" "Install FAILED: hugo"
exit 1
else
log_echo "INFO" "Install done: hugo"
fi
#
# plantuml
#
if [ -f "$PLANTUML_JAR" ]; then
# log string
log_echo "INFO" "plantuml jar found: $PLANTUML_JAR"
else
# we need to download the plantuml jar
# log string
log_echo "WARN" "plantuml jar not found: $PLANTUML_JAR"
# create directory and download using wget
PLANTUML_DIR=$(dirname $PLANTUML_JAR)
mkdir -p $PLANTUML_DIR
log_echo "INFO" "Download plantuml jar into directory: $PLANTUML_DIR"
wget --no-check-certificate \
--retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 -t 10 \
-q -O $PLANTUML_JAR $PLANTUML_URL
# error check
if [[ $? -ne 0 ]]; then
log_echo "ERROR" "Error downloading plantuml jar: "$PLANTUML_URL""
exit 1
fi
log_echo "INFO" "Download of plantuml jar successful."
fi
#
# Check java version
# Source: https://stackoverflow.com/a/7335524
#
log_echo "INFO" "Check for java and java version"
if type -p java; then
log_echo "INFO" "Found java executable in PATH"
_JAVA=java
elif [[ -n "$JAVA_HOME" ]] && [[ -x "$JAVA_HOME/bin/java" ]]; then
log_echo "INFO" "Found java executable in JAVA_HOME"
_JAVA="$JAVA_HOME/bin/java"
else
log_echo "WARN" "Java not found. Please install."
fi
if [[ "$_JAVA" ]]; then
JAVA_VERSION=$("$_JAVA" -version 2>&1 | awk -F '"' '/version/ {print $2}')
log_echo "INFO" "Java version found: $JAVA_VERSION"
if [[ "$JAVA_VERSION" > "$JAVA_MIN_VERSION" ]]; then
log_echo "INFO" "Java version $JAVA_VERSION is greater than $JAVA_MIN_VERSION."
else
log_echo "WARN" "Java version $JAVA_VERSION is less or equal than $JAVA_MIN_VERSION."
fi
fi
| true
|
fe0346459bf57ccca55a9cfa0b67bc3513a8b0d9
|
Shell
|
rburchell/dotfiles
|
/.local/bin/linux/diotop
|
UTF-8
| 719
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# Poor man's iotop.
# Probably never useful, but it was, once, when
# I had no capability to get a working iotop running.
if [ "$(id -u)" -ne 0 ] ; then
echo "Must be root" 2>&1
exit 1
fi
delay=2
lista=$(for p in $(pgrep "."); do echo -n "$p "; grep ^rchar /proc/$p/io 2>/dev/null; done)
while :; do
echo "-----"
listb=$(for p in $(pgrep "."); do echo -n "$p "; grep ^rchar /proc/$p/io 2>/dev/null; done)
echo "$lista" | while read -r pida xa bytesa; do
[ -e "/proc/$pida" ] || continue
echo -en "$pida:\t"
bytesb=$(echo "$listb" | awk -v pid=$pida '$1==pid{print $3}')
echo "$((($bytesb - $bytesa) / $delay)) b/s"
done | sort -nk2 | tail
sleep $delay
listb=$lista
done
| true
|
0f7f0eee136bff580f03528598addaaec1476edb
|
Shell
|
isabella232/unscramble
|
/languages/zsh/unscramble
|
UTF-8
| 283
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
# A zsh script to unscramble a word.
input_length=${#1}
input_letters=${${(Lo)${(s::)1}}}
while read word; do
if [[ ${#word} == $input_length ]] && [[ ${${(Lo)${(s::)word}}} == $input_letters ]]; then
echo $word
exit
fi
done < /usr/share/dict/words
| true
|
2224addd5cdea3ed6b4d6a6c0f899696febb201b
|
Shell
|
Charangit-new/ArrayProblems
|
/arrayrandom.sh
|
UTF-8
| 662
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "Enter no of inputs " n
for (( i=1 ; i<=n ; i++))
do
random=$(($RANDOM%900+100))
arr[i]=$random
done
echo "The array of $n elements : "
echo "${arr[@]} "
max1=${arr[1]}
#max2=${arr[1]}
for (( i=1;i<=n;i++))
do
if [ ${arr[i]} -gt $max1 ]
then
max2=$max1
max1=${arr[i]}
elif [[ ${arr[i]} -gt $max2 ]] && [[ ! ${arr[i]} -eq $max1 ]]
then
max2=${arr[i]}
fi
done
echo "2nd Largest number=$max2"
min1=${arr[1]}
for ((i=1;i<=n;i++))
do
if [ ${arr[i]} -lt $min1 ]
then
min2=$min1
min1=${arr[i]}
elif [[ ${arr[i]} -lt $min2 ]] && [[ ! ${arr[i]} -eq $min1 ]]
then
min2=${arr[i]}
fi
done
echo "2nd smallest=$min2"
| true
|
753fbb38d9026db2ade6c28dbd40968e06915799
|
Shell
|
JoshMKing/Scripts
|
/syshud
|
UTF-8
| 3,070
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# Display basic system info when logging in to a cpanel server
#
# Adapted by Josh King <j.king@kualo.com>
REVISION=beta5;
if [ ! -d "/root/.cpanel" ]; then
echo "Exiting. This is not a cPanel Machine."
exit 1
fi
#set colors
red='\E[1;31m';
yellow='\E[1;33m';
green='\E[1;32m';
white='e[1;37m';
normal='tput sgr0';
#get primary ip
IPADDR=`/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
WHMVER=`/usr/local/cpanel/cpanel -V`
#Get Basic OS Info
KERNFO=`uname -or`
OSVER=`cat /etc/redhat-release | awk '{ print $1,$3}'`
#Get in depth info on IO
IOCT=`iostat -txN | sed -n '6,21p'`
#Memory use
MEMCT=`vmstat -s -S M | head`
# TOP CPU
TCU=`ps aux | awk '{print $2, "\t"$3,"\t" $11}' | sort -k2r | head -n 10`
#TOP RAM
TRAM=` ps aux | awk '{print $2, "\t"$4,"\t" $11}' | sort -k2r | head -n 10`
###
### BEGIN APACHE SECTION
###
#tests if apache is active or not, if port 80 is open on server, then assumes apache is online, if 0 apache offline
APACHEACTIVE=`netstat -na |grep :80 |grep LISTEN |wc -l`
if [ "$APACHEACTIVE" == "0" ]; then
APACHEONLINE="NO"
else
APACHEONLINE="YES"
fi
if [ "$APACHEONLINE" == "YES" ]; then
APACHEVERSION=`httpd -v |grep version |awk '{ print $3}' |cut -c 8-13`
APACHECONNECTS=`netstat -n | grep :80 |wc -l`
APACHEOUTPUT="$APACHEVERSION Online / Current Connections $APACHECONNECTS"
else
APACHEOUTPUT="OFFLINE"
fi
###
### BEGIN MYSQL SECTION
###
MYSQLACTIVE=`service mysql status |awk '{ print $3}'`
if [ "$MYSQLACTIVE" = "running" ]; then
MYSQLVERSION=`mysql -V | awk '{ print $5}' | cut -d ',' -f 1`
MYSQLPROCESSES=`mysqladmin status |awk '{ print $4}'`
MYSQLOUTPUT="$MYSQLVERSION Online / Active Threads: $MYSQLPROCESSES"
else
MYSQLOUTPUT="OFFLINE"
fi
###
### BEGIN EXIM SECTION
###
EXIMACTIVE=`service exim status |awk '{ print $NF}'`
if [ "$EXIMACTIVE" = "running..." ]; then
MAILMSGS=`exim -bpc`
EXIMOUTPUT="Online / Messages in Queue: $MAILMSGS"
else
EXIMOUTPUT="OFFLINE"
fi
PHPVERSION=`php -v |grep cli |awk '{ print $2}'`
###
### DISPLAY SUMMARY INFORMATION
###
echo " ************************* WELCOME! ***********************"
echo " ######################## HERE IS THE CURRENT #######################"
echo " ########################## SYSTEM SUMMARY ##########################"
uptime
echo ""
echo " Hostname: $HOSTNAME"
echo " Primary IP: $IPADDR"
echo ""
echo " System Version Information"
echo " ------------------------------"
echo " WHM Version: $WHMVER"
echo " PHP Version: $PHPVERSION"
echo " Kernel base: $KERNFO"
echo " OS Version : $OSVER"
echo ""
echo " Service Summary"
echo " ------------------------------"
echo " Apache $APACHEOUTPUT"
echo " MySQL $MYSQLOUTPUT"
echo " Exim $EXIMOUTPUT"
echo ""
echo ""
echo " ######################### EXTENDED INFORMATION #####################"
echo ""
echo " ---Top PID CPU USEAGE---"
echo " $TCU"
echo " ---Top PID RAM USAGE---"
echo " $TRAM"
echo " ---CURRENT SYSTEM MEMORY USAGE ---"
echo " $MEMCT"
echo "-- IOstat RECORD (RUN INTERACTIVELY FOR REALTIME COUNTS)---"
echo " $IOCT"
| true
|
e9e18ea92a5f9fb058f18127749299ec659fc01a
|
Shell
|
XingyuXu-cuhk/Landuse_DL
|
/thawslumpScripts/exe_mrcnn.sh
|
UTF-8
| 2,798
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#introduction: Run the whole process of mapping thaw slumps base on Mask_RCNN
#
#authors: Huang Lingcao
#email:huanglingcao@gmail.com
#add time: 11 November, 2018
#MAKE SURE the /usr/bin/python, which is python2 on Cryo06
export PATH=/usr/bin:$PATH
# python2 on Cryo03, tensorflow 1.6
export PATH=/home/hlc/programs/anaconda2/bin:$PATH
## cudnn 7.1.4 for tensorflow 1.12
export LD_LIBRARY_PATH=~/programs/cudnn-9.0-linux-x64-v7.1/cuda/lib64:$LD_LIBRARY_PATH
# Exit immediately if a command exits with a non-zero status. E: error trace
set -eE -o functrace
eo_dir=~/codes/PycharmProjects/Landuse_DL
cd ${eo_dir}
git pull
cd -
cd ~/codes/PycharmProjects/object_detection/yghlc_Mask_RCNN
git pull
cd -
## modify according to test requirement or environment
#set GPU on Cryo06
export CUDA_VISIBLE_DEVICES=1
#set GPU on Cryo03
export CUDA_VISIBLE_DEVICES=0,1
gpu_num=2
para_file=para_mrcnn.ini
if [ ! -f $para_file ]; then
echo "File ${para_file} not exists in current folder: ${PWD}"
exit 1
fi
################################################
SECONDS=0
## remove previous data or results if necessary
#${eo_dir}/thawslumpScripts/remove_previous_data.sh ${para_file}
#
## get a ground truth raster if it did not exists or the corresponding shape file gets update
#${eo_dir}/thawslumpScripts/get_ground_truth_raster.sh ${para_file}
#
##extract sub_images based on the training polgyons
#${eo_dir}/thawslumpScripts/get_sub_images.sh ${para_file}
################################################
## preparing training images.
# there is another script ("build_RS_data.py"), but seem have not finished. 26 Oct 2018 hlc
#${eo_dir}/thawslumpScripts/split_sub_images_outTIF.sh ${para_file}
##${eo_dir}/thawslumpScripts/training_img_augment.sh ${para_file}
##separate the images to training and validation portion, add "-s" to shuffle the list before splitting
#${eo_dir}/datasets/train_test_split.py list/trainval.txt -s
#exit
duration=$SECONDS
echo "$(date): time cost of preparing training: ${duration} seconds">>"time_cost.txt"
SECONDS=0
################################################
## training
~/programs/anaconda3/bin/python3 ${eo_dir}/thawslumpScripts/thawS_rs_maskrcnn.py train --para_file=${para_file} --model='coco'
duration=$SECONDS
echo "$(date): time cost of training: ${duration} seconds">>"time_cost.txt"
SECONDS=0
################################################
################################################
## inference and post processing, including output "time_cost.txt"
${eo_dir}/thawslumpScripts/inf_mrcnn.sh ${para_file}
${eo_dir}/thawslumpScripts/postProc.sh ${para_file}
${eo_dir}/thawslumpScripts/accuracies_assess.sh ${para_file}
################################################
## backup results
${eo_dir}/thawslumpScripts/backup_results.sh ${para_file} 1
| true
|
a679f721b40adad46d2a32b62fe17bb2f932cccb
|
Shell
|
ohmyzsh/ohmyzsh
|
/plugins/qrcode/qrcode.plugin.zsh
|
UTF-8
| 486
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
# Imported and improved from https://qrcode.show/, section SHELL FUNCTIONS
_qrcode_show_message() {
echo "Type or paste your text, add a new blank line, and press ^d"
}
qrcode () {
local input="$*"
[ -z "$input" ] && _qrcode_show_message && local input="@/dev/stdin"
curl -d "$input" https://qrcode.show
}
qrsvg () {
local input="$*"
[ -z "$input" ] && _qrcode_show_message && local input="@/dev/stdin"
curl -d "$input" https://qrcode.show -H "Accept: image/svg+xml"
}
| true
|
e195cb68278c69082857a8bc84590ed2278d41cf
|
Shell
|
emiljas/awsProject
|
/linuxScripts/startup
|
UTF-8
| 829
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
apt-get -y install npm
npm install
npm install -g grunt-cli
apt-get -y install s3cmd
apt-get -y install nodejs-legacy
apt-get -y install graphicsmagick
apt-get -y install imagemagick
grunt browserify
AWS_ACCESS_KEY=$1
AWS_SECRET_KEY=$2
touch .s3cfg
sed --in-place "s/access_key/access_key = $AWS_ACCESS_KEY/" .s3cfg
sed --in-place "s|secret_key|secret_key = $AWS_SECRET_KEY|" .s3cfg
s3cmd -c .s3cfg get s3://emil-project/config/config /tmp/config
while read line; do
IFS=':' read -ra config <<< "$line"
echo ${config[0]}
echo ${config[1]}
find -name '*.js-template' -exec sed -i 's/${config[0]}/${config[1]}/g' {} +
done </tmp/config
for file in $(find -name "*-template"); do
dirname="${basename%/[^/]*}"
filename="${basename:${#dirname} + 1}"
mv "$file" "${dirname}/${filename}.js"
done
rm /tmp/config
| true
|
4a99d31bdd6ff1a9a6403fff4e66ae7475d56470
|
Shell
|
felixduvallet/felixd-admin
|
/dot-files/bash/bashrc.d/bashrc.armarx
|
UTF-8
| 1,059
| 3.453125
| 3
|
[] |
no_license
|
# -*- shell-script -*-
# ------------------------------------------------------------ #
#
# This file is located in the .bashrc.d directory. It will be
# run when .bashrc is loaded.
#
# ------------------------------------------------------------
# ArmarX is located here: https://gitlab.com/ArmarX/
# http://h2t-projects.webarchiv.kit.edu/Projects/ArmarX/
# ------------------------------------------------------------ #
# Enable armarx tab completion if it is installed (use `hash` to check).
if hash armarx 2>/dev/null; then
eval "$(register-python-argcomplete armarx)"
eval "$(register-python-argcomplete armarx-dev)"
fi
# Helper scripts to start & stop scenarios more quickly.
axgo() {
SCENARIO_FILE='./startScenario.sh'
if [ -f $SETUP_FPATH ]
then
. $SCENARIO_FILE
else
echo "No such file: $SCENARIO_FILE"
fi
}
axstop() {
SCENARIO_FILE='./stopScenario.sh'
if [ -f $SETUP_FPATH ]
then
. $SCENARIO_FILE && . $SCENARIO_FILE 9
else
echo "No such file: $SCENARIO_FILE"
fi
}
| true
|
0d4a1cee9c54b911871068a16a7cb1c9ca316d28
|
Shell
|
DivvyPayHQ/web-homework
|
/elixir/test-entrypoint.sh
|
UTF-8
| 646
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Docker entrypoint script.
# Wait until Postgres is ready
while ! pg_isready -q -h $POSTGRES_HOST -p 5432 -U $POSTGRES_USER
do
echo pg_isready -h $POSTGRES_HOST -p 5432 -U $POSTGRES_USER
echo $(pg_isready -h $POSTGRES_HOST -p 5432 -U $POSTGRES_USER)
echo "$(date) - waiting for database to start"
sleep 2
done
# Create, migrate, and seed database if it doesn't exist.
if [[ -z `psql -Atqc "\\list $POSTGRES_DATABASE"` ]]; then
echo "Database $POSTGRES_DATABASE does not exist. Creating..."
mix ecto.create
mix ecto.migrate
mix run priv/repo/seeds.exs
echo "Database $POSTGRES_DATABASE created."
fi
exec mix test
| true
|
c5a1f848bd810f0347b3c89bbaca35be9519fb70
|
Shell
|
koenverburg/dotfiles
|
/macos/.zshrc
|
UTF-8
| 2,053
| 2.5625
| 3
|
[] |
no_license
|
export ZSH="$HOME/.oh-my-zsh"
export VISUAL="nvim"
export EDITOR="nvim"
export SHELL="zsh"
plugins=(
git
)
source $ZSH/oh-my-zsh.sh
# source ~/.bin/tmuxinator.zsh
## git
export REVIEW_BASE="master"
# Ruby
eval "$(rbenv init -)" #init rbnenv for ruby
# Vim
alias vim=/usr/local/bin/nvim
# tmux
alias mux='tmuxinator'
# Make aliases
alias mt='make test'
# Yarn Aliases
alias ys='yarn serve'
alias yd='yarn dist'
alias ye='yarn export'
alias killbill='rm -f ./yarn.lock;rm -rf ./node_modules;yarn install'
# project jumping
alias dot='cd ~/code/github/dotfiles'
# git aliases
alias gf='git fetch --prune'
alias gs='git status -sb'
alias glo='git log --oneline'
alias gl='git log'
alias gpd='git pull'
alias gpo='git push origin'
alias gpuo='git push -u origin'
# docker
alias dlsc='docker container ls -a' # -a because I want to see ALL
alias dlsi='docker images -a' # -a because I want to see ALL
alias dps='docker ps'
alias dcud='docker-compose up -d'
alias dcd='docker-compose down'
alias dockerclean='docker rmi $(docker images --filter "dangling=true" -q --no-trunc)'
# Aliases for ls
alias ll='ls -lh'
alias l='ls -lhA'
alias lsa='ls -a'
alias rm='rm -i'
alias weather='curl http://wttr.in'
alias t=/usr/local/Cellar/tilt/0.14.1/bin/tilt
alias own=private_repo()
function private_repo() {
git config user.name = "Koen Verburg"
git config user.email = "creativekoen@gmail.com"
}
## NVM
# export NVM_DIR=~/.nvm
#source $(brew --prefix nvm)/nvm.sh
## python
#alias python="/usr/local/bin/python3"
## Fixes fzf searching in git ignored files and folders
# export FZF_DEFAULT_COMMAND='ag --hidden --ignore .git -g ""'
# export TYPEWRITTEN_MULTILINE=false
# export TYPEWRITTEN_CURSOR="underscore"
autoload -U promptinit; promptinit
prompt pure
export RUBY_CONFIGURE_OPTS="--with-openssl-dir=$(brew --prefix openssl@1.1)"
[[ -s "$HOME/.kiex/scripts/kiex" ]] && source "$HOME/.kiex/scripts/kiex"
export PATH=$PATH:"$HOME/.kiex/elixirs/elixir-1.10.2/bin":$HOME/.kerl/21.3/bin:/usr/local/Cellar/tilt/0.14.1/bin/
#"$HOME/usr/local/lib/ruby/gems/2.7.0/bin"
| true
|
27b9890f24ac7aec4bebbb90b1d9882108a6146a
|
Shell
|
akifuji/blockchain-simulator
|
/server/test_3nodes.sh
|
UTF-8
| 5,006
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
# clock 0
# node1: idle
# node2: idle
# node3: idle
curl -X GET http://localhost:50082/start
curl -X GET http://localhost:50083/start
curl -X GET http://localhost:50084/start
curl -H "Content-type: application/json" -X POST -d '{"sender": "aki", "recipient": "so", "value": 3}' localhost:50082/tx/add
node1_status=$(curl -X GET http://localhost:50084/status)
if [ "$node1_status" != '{"clock": 0, "status": 1}' ]; then
echo {"clock": 0, "status": 1} expected, but got $node1_status
exit 1
fi
node1_txs=$(curl -X GET http://localhost:50082/txPool/all)
if [ "$node1_txs" != '[]' ]; then
echo [] expected, but got $node1_txs
exit 1
fi
# clock 1
# node1: received_tx
# node2: idle
# node2: idle
curl -X GET http://localhost:50082/clock/1
curl -X GET http://localhost:50083/clock/1
curl -X GET http://localhost:50084/clock/1
node1_status=$(curl -X GET http://localhost:50082/status)
if [ "$node1_status" != '{"clock": 1, "status": 6}' ]; then
echo {"clock": 1, "status": 6} expected, but got $node1_status
exit 1
fi
node2_status=$(curl -X GET http://localhost:50083/status)
if [ "$node2_status" != '{"clock": 1, "status": 1}' ]; then
echo {"clock": 1, "status": 1} expected, but got $node2_status
exit 1
fi
node3_status=$(curl -X GET http://localhost:50083/status)
if [ "$node2_status" != '{"clock": 1, "status": 1}' ]; then
echo {"clock": 1, "status": 1} expected, but got $node2_status
exit 1
fi
node1_txs=$(curl -X GET http://localhost:50082/txPool/all)
if [ "$node1_txs" != '[{"sender": "aki", "recipient": "so", "value": 3}]' ]; then
echo [{"sender": "aki", "recipient": "so", "value": 3}] expected, but got $node1_txs
exit 1
fi
# clock 2
# node1: broadcasted_tx
# node2: received_tx
# node3: received_tx
curl -X GET http://localhost:50082/clock/2
curl -X GET http://localhost:50083/clock/2
curl -X GET http://localhost:50084/clock/2
node1_status=$(curl -X GET http://localhost:50082/status)
if [ "$node1_status" != '{"clock": 2, "status": 4}' ]; then
echo {"clock": 2, "status": 4} expected, but got $node1_status
exit 1
fi
node2_status=$(curl -X GET http://localhost:50083/status)
if [ "$node2_status" != '{"clock": 2, "status": 6}' ]; then
echo {"clock": 2, "status": 6} expected, but got $node2_status
exit 1
fi
node3_status=$(curl -X GET http://localhost:50084/status)
if [ "$node3_status" != '{"clock": 2, "status": 6}' ]; then
echo {"clock": 2, "status": 6} expected, but got $node3_status
exit 1
fi
node2_txs=$(curl -X GET http://localhost:50083/txPool/all)
if [ "$node2_txs" != '[{"sender": "aki", "recipient": "so", "value": 3}]' ]; then
echo [{"sender": "aki", "recipient": "so", "value": 3}] expected, but got $node2_txs
exit 1
fi
node3_txs=$(curl -X GET http://localhost:50084/txPool/all)
if [ "$node2_txs" != '[{"sender": "aki", "recipient": "so", "value": 3}]' ]; then
echo [{"sender": "aki", "recipient": "so", "value": 3}] expected, but got $node3_txs
exit 1
fi
# clock 3
# node1: idle
# node2: broadcasted_tx
# node3: broadcasted_tx
curl -X GET http://localhost:50082/clock/3
curl -X GET http://localhost:50083/clock/3
curl -X GET http://localhost:50084/clock/3
node1_status=$(curl -X GET http://localhost:50082/status)
if [ "$node1_status" != '{"clock": 3, "status": 1}' ]; then
echo {"clock": 3, "status": 1} expected, but got $node1_status
exit 1
fi
node2_status=$(curl -X GET http://localhost:50083/status)
if [ "$node2_status" != '{"clock": 3, "status": 4}' ]; then
echo {"clock": 3, "status": 4} expected, but got $node2_status
exit 1
fi
node3_status=$(curl -X GET http://localhost:50084/status)
if [ "$node3_status" != '{"clock": 3, "status": 4}' ]; then
echo {"clock": 3, "status": 4} expected, but got $node3_status
exit 1
fi
# clock 4
# node1: idle
# node2: idle
curl -X GET http://localhost:50082/clock/4
curl -X GET http://localhost:50083/clock/4
curl -X GET http://localhost:50082/mine
node1_status=$(curl -X GET http://localhost:50082/status)
if [ "$node1_status" != '{"clock": 4, "status": 1}' ]; then
echo {"clock": 4, "status": 1} expected, but got $node1_status
exit 1
fi
node2_status=$(curl -X GET http://localhost:50083/status)
if [ "$node2_status" != '{"clock": 4, "status": 1}' ]; then
echo {"clock": 4, "status": 1} expected, but got $node2_status
exit 1
fi
# clock5
# node1: mining
# node2: idle
curl -X GET http://localhost:50082/clock/5
curl -X GET http://localhost:50083/clock/5
node1_status=$(curl -X GET http://localhost:50082/status)
if [ "$node1_status" != '{"clock": 5, "status": 2}' ]; then
echo {"clock": 5, "status": 2} expected, but got $node1_status
exit 1
fi
node2_status=$(curl -X GET http://localhost:50083/status)
if [ "$node2_status" != '{"clock": 5, "status": 1}' ]; then
echo {"clock": 5, "status": 1} expected, but got $node2_status
exit 1
fi
# curl -H "Content-type: application/json" -X POST -d '{"addr": "192.168.1.8", "port": 65003}' localhost:50082/peer/clear
| true
|
97cf1e5a0ef3e1aded8253c9ddf90b633c361896
|
Shell
|
rushiagr/jenkins-job-config
|
/cinder-jiocloud.sh
|
UTF-8
| 1,274
| 2.953125
| 3
|
[] |
no_license
|
if [ "$GIT_BRANCH" = "origin/master" ]
then
# Push rebased changes to jiocloud/master, and merged
# changes to jiocloud/devmaster
git checkout -b jiocloud_master jiocloud/master;
git rebase origin/master;
tox -e py27;
git push jiocloud jiocloud_master:master --force;
# If rebase succeeds, then merge too will!
# NOTE!: merge may not, as jiocloud/devmaster might have
# changed, but that is user's fault!
git checkout -b jiocloud_devmaster jiocloud/devmaster;
git branch -D jiocloud_master;
git merge origin/master;
git push jiocloud jiocloud_devmaster:devmaster;
git checkout master;
git branch -D jiocloud_devmaster;
elif [ "$GIT_BRANCH" = "jiocloud/manualmaster" ]
then
git checkout -b jiocloud_manualmaster;
tox -e py27;
git push jiocloud jiocloud_manualmaster:master --force;
git checkout master;
git branch -D jiocloud_manualmaster;
elif [ "$GIT_BRANCH" = "jiocloud/devmaster" ]
then
git checkout -b jiocloud_devmaster jiocloud/devmaster;
git rebase jiocloud/master;
tox -e py27;
git push jiocloud jiocloud_devmaster:master --force;
git checkout master;
git branch -D jiocloud_devmaster;
elif [ "$GIT_BRANCH" = "jiocloud/master" ]
then
echo "Not building jiocloud/master branch. Skipping";
else
exit 1;
fi
| true
|
73b79d279370d581db480fb25a75b4ef73d891e5
|
Shell
|
nortonmd/dx-bin
|
/mkpkg
|
UTF-8
| 230
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $# -eq 1 ]]
then
package_name=$1
else
read -p "Package Name? " package_name
fi
echo "sfdx force:package2:create -n ${package_name} -o Unlocked -e"
sfdx force:package2:create -n ${package_name} -o Unlocked -e
| true
|
88cf112188469af07725bf5c93cc3f2ec59835eb
|
Shell
|
nk-gears/terraform
|
/cfn-templates/aip-bi/lambda/archive-s3/s3-archive-lambda.sh
|
UTF-8
| 468
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -x
aws lambda create-function --function-name s3-copy2archive-lambda \
--runtime python2.7 \
--role "arn:aws:iam::661072482170:role/s3_lambda_copy" \
--handler lambda_handler.lambda_handler \
--zip-file fileb://lambda_handler.zip \
--description "Copy a S3 Put object to the Archive Bucket, Glacier" \
--timeout 300 \
--memory-size 1024
set -x
if [ "$?" -ne "0" ]; then
echo "failed to install lambda function"
exit 1
fi
| true
|
e97fec891eff8fd7d152b6c2dddce2144f5eecc6
|
Shell
|
aryelgois/vcsh-php
|
/bin/,setup-composer-global
|
UTF-8
| 198
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
,setup-composer
if [[ $? -eq 0 ]]; then
mkdir -p ~/.local/bin
mv composer.phar ~/.local/bin/composer
else
>&2 echo 'ERROR: Could not install Composer'
exit 1
fi
| true
|
97c55b68ae092aa0a07b719088841090a3b1c4e7
|
Shell
|
StijnRuts/Dot-scripts
|
/create-new-user.sh
|
UTF-8
| 1,456
| 3.984375
| 4
|
[
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
#!/usr/bin/env bash
source ./shared-functions.sh
# Loop while user name is blank, has spaces, or has capital letters in it.
USER=""
while [[ ${#USER} -eq 0 ]] || [[ $USER =~ \ |\' ]] || [[ $USER =~ [^a-z0-9\ ] ]]; do
read -p "username:" USER
done
# Add the user
# -m creates the user's home directory
# -g the user's initial login group
# -G a list of supplementary groups
# -s defines the user's default login shell
useradd ${USER} -m -g users -G storage,power,network,video,audio,lp -s /bin/bash
# Prompt for password
echo "password:"
passwd $USER
# Setup sudo access
read -r -p "Setup sudo for $USER? [y/N] " response
if [[ $response =~ ^([yY][eE][sS]|[yY])$ ]]; then
package_install "sudo"
# set sudo access for the 'wheel' group in /etc/sudoers.d/
# instead of editing /etc/sudoers directly
echo "%wheel ALL=(ALL) ALL" > /etc/sudoers.d/wheel
# Add user to the 'wheel' group
groupadd wheel
gpasswd -a ${USER} wheel
fi
# Set up basic configuration files and permissions for user
cp /etc/skel/.bashrc /home/${USER}
chown -R ${USER}:users /home/${USER}
# Create user dirs
package_install "xdg-user-dirs"
su - ${USER} -c "xdg-user-dirs-update"
# Passing aliases
# If you use a lot of aliases, you might have noticed that they do not carry over to the root account when using sudo.
# However, there is an easy way to make them work. Simply add the following to your ~/.bashrc or /etc/bash.bashrc:
# alias sudo='sudo '
| true
|
753843d858ad658dd48288aca686c70243b47a02
|
Shell
|
kgshukla/jboss-datagrid-ocp-upgrade
|
/populatejdg.sh
|
UTF-8
| 279
| 2.859375
| 3
|
[] |
no_license
|
JDG_ROUTE=http://$(oc get route datagrid-app -o template --template='{{.spec.host}}')
i=1;
while [ $i -le 2000 ]
do
curl -X POST -i -H "Content-type:application/json" -d "{\"Value $i\"}" $JDG_ROUTE/rest/mycache/$i;
(( i++ ))
echo "value of i is = $i";
sleep 0.1;
done
| true
|
2cbf30918546d77ebb87431c723e004da10683f9
|
Shell
|
frap/atea-dotfiles
|
/source/60_net.sh
|
UTF-8
| 1,769
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
# source secrets such as VCENTER_PASS
if [ -f ~/.secrets ]; then
. ~/.secrets
fi
# IP addresses
alias wanip="dig +short myip.opendns.com @resolver1.opendns.com"
alias myip="curl icanhazip.com" # Your public IP address
alias whois="whois -h whois-servers.net"
# Flush Directory Service cache
alias flush="dscacheutil -flushcache"
# View HTTP traffic
alias httpdump="sudo tcpdump -i en1 -n -s 0 -w - | grep -a -o -E \"Host\: .*|GET \/.*\""
# Ping the specified server (or the default 8.8.8.8) and say "ping"
# through the speaker every time a ping is successful. Based on an
# idea from @gnarf.
function pingtest() {
local c
for c in say spd-say; do [[ "$(which $c)" ]] && break; done
ping ${1:-8.8.8.8} | perl -pe '/bytes from/ && `'$c' ping`'
}
if has_govc
then
export GOVC_URL=https://administrator@vsphere.local:${VCENTER_PASS}@vcenter.atea.dev
export GOVC_TLS_KNOWN_HOSTS=~/.govc_known_hosts
export GOVC_INSECURE=true
function vm.ip() {
govc vm.info -json $1 | jq -r .VirtualMachines[].Guest.Net[].IpAddress[0]
}
alias vm.power.on='govc vm.power -on=true ';
alias vm.power.off='govc vm.power -off=true';
alias vm.dev.ls='govc device.ls -vm ';
fi
if hash consul 2>/dev/null
then
alias cons.mem='consul members';
fi
if hash curl 2>/dev/null
then
alias curl-trace='curl -w "@$HOME/.curl-format" -o /dev/null -s'
fi
getIP() {
curr_hostname=$(hostname -s)
interface=${1:-eth0}
perldoc -l Regexp::Common >/dev/null 2>&1
if [ $? -eq 0 ] ; then
local ip=$(ifconfig $interface | perl -MRegexp::Common -lne 'print $1 if /($RE{net}{IPv4})/' | grep -v "127.0.0.1")
else
local ip=$(ip -o -4 add list $interface | awk '{print $4}' | cut -d/ -f1)
fi
e_arrow "$interface=$ip"
}
| true
|
eb894b6352b831cc0be4d6c28b6e45f25a883364
|
Shell
|
gaffonso/xen_management_scripts
|
/install_and_configure_redis2611.sh
|
UTF-8
| 1,893
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# run with:
# curl -s https://raw.github.com/gaffonso/xen_management_scripts/master/install_and_configure_redis2611.sh | bash -s password
#
# Note: Script assumes
# - vm was created using the xen_create_vm.sh script (or equivalent)
# - logged-in as root
#set -o xtrace
# check params
if [ $# -ne 1 ]; then
echo "Missing redis password."
exit 1
fi
REDIS_PASSWORD=$1
echo
echo "---------------------------------------"
echo "Installing and Configuring Redis 2.6.11"
echo "---------------------------------------"
echo
# Must be root to run this script
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
## download, build, install and configure redis
# download
wget http://redis.googlecode.com/files/redis-2.6.11.tar.gz -P /stor/downloads/
tar zxf /stor/downloads/redis-2.6.11.tar.gz -C /usr/local
# build (and install as command-line program)
cd /usr/local/redis-2.6.11/
yum -y install make gcc tcl.x86_64 # required to build redis
make distclean # need the distclean otherwise we get complaints about jemalloc being missing/out-of-date
make test
make install
# configure
sed -i.bak "s|# requirepass foobared|requirepass $REDIS_PASSWORD|" /usr/local/redis-2.6.11/redis.conf
# install as system service
wget https://raw.github.com/gaffonso/xen_management_scripts/master/install_and_configure_redis2611-redis-server.chkconfig -P /stor/downloads/
mv /stor/downloads/install_and_configure_redis2611-redis-server.chkconfig /etc/init.d/redis-server
chmod 755 /etc/init.d/redis-server
chkconfig --add redis-server
chkconfig --level 345 redis-server on
service redis-server start & # todo: ampersand should not be necessary to put into background
echo
echo "--------------"
echo "Setup Complete"
echo "--------------"
echo
hostname
echo
redis-cli --version
echo
redis-server --version
echo
service redis-server status
| true
|
45010737618893f45272d975c402cb60d231ad7d
|
Shell
|
opengauss-mirror/openGauss-server
|
/docker/dockerfiles/buildDockerImage.sh
|
UTF-8
| 5,246
| 4
| 4
|
[
"LicenseRef-scancode-mulanpsl-2.0-en",
"LicenseRef-scancode-unknown-license-reference",
"PostgreSQL",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unicode",
"LicenseRef-scancode-warranty-disclaimer",
"curl",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LGPL-2.1-only",
"CC-BY-4.0",
"LicenseRef-scancode-protobuf",
"OpenSSL",
"LicenseRef-scancode-generic-export-compliance",
"X11-distribute-modifications-variant",
"LicenseRef-scancode-other-permissive",
"MIT",
"NCSA",
"Python-2.0",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-only",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"Zlib"
] |
permissive
|
#!/bin/bash
# Build docker image
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2028. All rights reserved.
#
#openGauss is licensed under Mulan PSL v2.
#You can use this software according to the terms and conditions of the Mulan PSL v2.
#You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
#-------------------------------------------------------------------------
#
# buildDockerImage.sh
# Build docker image
#
# IDENTIFICATION
# GaussDBKernel/server/docker/dockerfiles/buildDockerImage.sh
#
#-------------------------------------------------------------------------
usage() {
cat << EOF
Usage: buildDockerImage.sh -v [version] [-i] [Docker build option]
Builds a Docker Image for openGauss
Parameters:
-v: version to build
Choose one of: $(for i in $(ls -d */); do echo -n "${i%%/} "; done)
-i: ignores the SHA256 checksums
LICENSE UPL 1.0
EOF
}
# Validate packages
checksum_packages() {
if [ "${arch}" = "amd64" ]; then
sha256_file="sha256_file_amd64"
else
sha256_file="sha256_file_arm64"
fi
if hash sha256sum 2>/dev/null; then
echo "Checking if required packages are present and valid..."
if ! sha256sum -c "$sha256_file"; then
echo "SHA256 for required packages to build this image did not match!"
echo "Make sure to download missing files in folder $VERSION."
exit 1;
fi
else
echo "Ignored SHA256 sum, 'sha256sum' command not available.";
fi
}
# Check Docker version
check_docker_version() {
# Get Docker Server version
echo "Checking Docker version."
DOCKER_VERSION=$(docker version --format '{{.Server.Version | printf "%.5s" }}'|| exit 0)
# Remove dot in Docker version
DOCKER_VERSION=${DOCKER_VERSION//./}
if [ -z "$DOCKER_VERSION" ]; then
# docker could be aliased to podman and errored out (https://github.com/containers/libpod/pull/4608)
checkPodmanVersion
elif [ "$DOCKER_VERSION" -lt "${MIN_DOCKER_VERSION//./}" ]; then
echo "Docker version is below the minimum required version $MIN_DOCKER_VERSION"
echo "Please upgrade your Docker installation to proceed."
exit 1;
fi;
}
##############
#### MAIN ####
##############
# Parameters
VERSION="5.0.0"
SKIPCHECKSUM=0
DOCKEROPS=""
MIN_DOCKER_VERSION="17.09"
arch=$(case $(uname -m) in i386) echo "386" ;; i686) echo "386" ;; x86_64) echo "amd64";; aarch64)echo "arm64";; esac)
if [ "${arch}" = "amd64" ]; then
DOCKERFILE="dockerfile_amd"
else
DOCKERFILE="dockerfile_arm"
fi
if [ "$#" -eq 0 ]; then
usage;
exit 1;
fi
while getopts "hesxiv:o:" optname; do
case "$optname" in
"h")
usage
exit 0;
;;
"i")
SKIPCHECKSUM=1
;;
"v")
VERSION="$OPTARG"
;;
"o")
DOCKEROPS="$OPTARG"
;;
"?")
usage;
exit 1;
;;
*)
# Should not occur
echo "Unknown error while processing options inside buildDockerImage.sh"
;;
esac
done
check_docker_version
# Which Dockerfile should be used?
if [ "$VERSION" == "12.1.0.2" ] || [ "$VERSION" == "11.2.0.2" ] || [ "$VERSION" == "18.4.0" ]; then
DOCKERFILE="$DOCKERFILE"
fi;
# openGauss Database Image Name
IMAGE_NAME="opengauss:$VERSION"
# Go into version folder
cd "$VERSION" || {
echo "Could not find version directory '$VERSION'";
exit 1;
}
if [ ! "$SKIPCHECKSUM" -eq 1 ]; then
checksum_packages
else
echo "Ignored SHA256 checksum."
fi
echo "=========================="
echo "DOCKER info:"
docker info
echo "=========================="
# Proxy settings
PROXY_SETTINGS=""
if [ "${http_proxy}" != "" ]; then
PROXY_SETTINGS="$PROXY_SETTINGS --build-arg http_proxy=${http_proxy}"
fi
if [ "${https_proxy}" != "" ]; then
PROXY_SETTINGS="$PROXY_SETTINGS --build-arg https_proxy=${https_proxy}"
fi
if [ "${ftp_proxy}" != "" ]; then
PROXY_SETTINGS="$PROXY_SETTINGS --build-arg ftp_proxy=${ftp_proxy}"
fi
if [ "${no_proxy}" != "" ]; then
PROXY_SETTINGS="$PROXY_SETTINGS --build-arg no_proxy=${no_proxy}"
fi
if [ "$PROXY_SETTINGS" != "" ]; then
echo "Proxy settings were found and will be used during the build."
fi
# ################## #
# BUILDING THE IMAGE #
# ################## #
echo "Building image '$IMAGE_NAME' ..."
# BUILD THE IMAGE (replace all environment variables)
BUILD_START=$(date '+%s')
docker build --force-rm=true --no-cache=true \
$DOCKEROPS $PROXY_SETTINGS \
-t $IMAGE_NAME -f $DOCKERFILE . || {
echo ""
echo "ERROR: openGauss Database Docker Image was NOT successfully created."
echo "ERROR: Check the output and correct any reported problems with the docker build operation."
exit 1
}
# Remove dangling images (intermitten images with tag <none>)
yes | docker image prune > /dev/null
BUILD_END=$(date '+%s')
BUILD_ELAPSED=$(expr $BUILD_END - $BUILD_START)
echo ""
echo ""
cat << EOF
openGauss Docker Image $VERSION is ready to be extended:
--> $IMAGE_NAME
Build completed in $BUILD_ELAPSED seconds.
EOF
| true
|
2ec2af4f406adf5dc859326c9149dbfc549c327d
|
Shell
|
magus/dotfiles
|
/bin/check_bin
|
UTF-8
| 114
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
# test if first argument exists in path
name="$1"
if [ ! -f "$(which "$name")" ]; then
exit 1
fi
| true
|
042d436d1b3376cd62038174af02cc86aca0f1e4
|
Shell
|
terence-bigtt/airBerry
|
/backend/scripts/install.sh
|
UTF-8
| 362
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
export airberry_user="airberry"
export airberry_home="/home/"$airberry_user
export service_name="airberry.service"
export service_path="/etc/systemd/system/"$service_name
if [[ "$EUID" -ne "0" ]]
then echo "Please run as root"
exit
fi
bash ./install-interfaces.sh
bash ./install-setupenv.sh
bash ./install-airberry.sh
bash ./install-raspap.sh
| true
|
0f8cca0effaeadf6736264e534dbe044b42a9fbe
|
Shell
|
emilsergiev/dotfiles
|
/.bashrc
|
UTF-8
| 1,717
| 2.953125
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
## Useful aliases
alias ls='exa -al --color=always --group-directories-first' # preferred listing
alias la='exa -a --color=always --group-directories-first' # all files and dirs
alias ll='exa -l --color=always --group-directories-first' # long format
alias lt='exa -aT --color=always --group-directories-first' # tree listing
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias .....='cd ../../../..'
alias ......='cd ../../../../..'
alias dir='dir --color=auto'
alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
alias aup="pamac upgrade --aur"
alias grubup="sudo update-grub"
alias fixpacman="sudo rm /var/lib/pacman/db.lck"
alias tarnow='tar -acf '
alias untar='tar -zxvf '
alias wget='wget -c '
alias psmem='ps auxf | sort -nr -k 4'
alias psmem10='ps auxf | sort -nr -k 4 | head -10'
alias upd='sudo reflector --latest 5 --age 2 --fastest 5 --protocol https --sort rate --save /etc/pacman.d/mirrorlist && cat /etc/pacman.d/mirrorlist && sudo pacman -Syu && fish_update_completions'
# Hardware Info
alias hw='hwinfo --short'
# Sort installed packages according to size in MB
alias big="expac -H M '%m\t%n' | sort -h | nl"
# List amount of -git packages
alias gitpkg='pacman -Q | grep -i "\-git" | wc -l'
#Cleanup orphaned packages
alias cleanup='sudo pacman -Rns (pacman -Qtdq)'
#get the error messages from journalctl
alias jctl="journalctl -p 3 -xb"
#Recent Installed Packages
alias rip="expac --timefmt='%Y-%m-%d %T' '%l\t%n %v' | sort | tail -200 | nl"
# PS1='[\u@\h \W]\$ '
# Starship
eval "$(starship init bash)"
| true
|
f920e7231bdd933242db161b445ba94d59e10f5b
|
Shell
|
vikassamadhiya1/core-kubernetes-infra---Copy
|
/deploy-server.sh
|
UTF-8
| 2,886
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
#
[ -n "${AWS_ACCESS_KEY_ID}" ] || { echo "AWS_ACCESS_KEY_ID environment variable not defined"; exit 1; }
[ -n "${AWS_SECRET_ACCESS_KEY}" ] || { echo "AWS_SECRET_ACCESS_KEY environment variable not defined"; exit 1; }
[ -n "${PDXC_ENV}" ] || { echo "PDXC_ENV environment variable not defined"; exit 1; }
echo "############# deploy.sh starting ###############"
# Command line parsing
if [ "$1" == "--plan-only" ]; then
terraformAction="plan"
elif [ "$1" == "--auto-apply" ]; then
terraformAction="apply"
elif [ "$#" -eq 0 ]; then
terraformAction="apply"
fi
echo "AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION}"
# Setup bucket name
pip show docutils
aws_account=$(aws sts get-caller-identity --output text --query Account)
export TF_VAR_region="${AWS_DEFAULT_REGION}"
export TF_VAR_PDXC_ENV="$PDXC_ENV"
export PDXC_ENV_LOWERCASE=$(echo "$PDXC_ENV" | tr '[:upper:]' '[:lower:]' | sed "s/_/-/g")
export app_three_letter_prefix="ctk"
export TF_VAR_tf_state_bucket="${app_three_letter_prefix}-tfstate-${aws_account}${AWS_DEFAULT_REGION}"
echo "====>TF_VAR_tf_state_bucket=$TF_VAR_tf_state_bucket"
if aws s3 ls $TF_VAR_tf_state_bucket 2>&1 | grep -q 'NoSuchBucket';
then
if [ $AWS_DEFAULT_REGION = 'us-east-1' ]
then
echo "Creating state Terraform backend bucket at ${AWS_DEFAULT_REGION}"
aws s3api create-bucket --bucket $TF_VAR_tf_state_bucket --region $TF_VAR_region
else
echo "Creating state Terraform backend bucket at ${AWS_DEFAULT_REGION}"
aws s3api create-bucket --bucket $TF_VAR_tf_state_bucket --region $TF_VAR_region --create-bucket-configuration LocationConstraint="${TF_VAR_region}"
fi
else
echo "Terraform backend bucket already exists."
fi
cd server
rm -rf .terraform
echo "==============TF Start ================="
terraform --version
terraform init -backend-config "bucket=$TF_VAR_tf_state_bucket" -backend-config "region=$TF_VAR_region" -backend-config "key=$app_three_letter_prefix" -backend-config "encrypt=true"
# Deploy
if [[ "${terraformAction}" == "plan" ]]; then
echo "Plan Section=$AWS_DEFAULT_REGION & $app_three_letter_prefix"
terraform plan -var aws_region=$AWS_DEFAULT_REGION -var app_three_letter_prefix=$app_three_letter_prefix
elif [[ "${terraformAction}" == "apply" ]]; then
# Regular
echo "Apply=$AWS_DEFAULT_REGION & $app_three_letter_prefix"
terraform plan -out tfplan -input=false -var aws_region=$AWS_DEFAULT_REGION -var app_three_letter_prefix=$app_three_letter_prefix
terraform apply -input=false tfplan
else
echo "Invalid terraform action: ${terraformAction}"
# Always return an exit code when it is not handle by the command you use.
exit 2
fi
if [ $? -eq 1 ]; then
echo "====>Terraform failed applying plan "
exit 2
fi
| true
|
5617bf19a05f502227bb156f347898b4217c0c62
|
Shell
|
twosigma/waiter
|
/containers/raven/raven-start
|
UTF-8
| 3,581
| 4.03125
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/sh
# This script takes no argument and uses SERVICE_PORT and PORT0 environment variables
# to setup /etc/envoy/envoy.yaml file.
# SERVICE_PORT and PORT0 must be set
# FORCE_TLS_TERMINATION is optional (true|false, default false)
# HEALTH_CHECK_POROTOCOL is optional (https|http|h2|h2c, default to SERVICE_PROTOCOL)
# HEALTH_CHECK_PORT_INDEX is optional ([0-9], default 0)
# SERVICE_POROTOCOL is optional (https|http|h2|h2c, default http)
: ${CONFIG_DEBUG:=0}
: ${FORCE_TLS_TERMINATION:=false}
: ${SERVICE_PROTOCOL:=http}
: ${HEALTH_CHECK_PORT_INDEX:=0}
: ${HEALTH_CHECK_PROTOCOL:=${SERVICE_PROTOCOL}}
log() {
printf '[%s] %s\n' $(date -u +%FT%TZ) "$*"
}
die() {
log "$@"
exit 1
}
drop_config_section() {
awk "
/# BEGIN ${1}/ { drop=1 }
{ if (!drop) { print \$0 } }
/# END ${1}/ { drop=0 }
" ${CONFIG_YAML} >${CONFIG_YAML_TMP}
mv ${CONFIG_YAML_TMP} ${CONFIG_YAML}
}
is_http2() {
echo $1 | grep -iq h2
}
is_tls() {
[ $1 == true ] || ( echo $2 | grep -iqE '^(https|h2)$' )
}
# check if SERVICE_PORT is present
if [ "${SERVICE_PORT}" ]; then
log "SERVICE_PORT=${SERVICE_PORT}"
else
die 'SERVICE_PORT is not set. Exiting raven-start'
fi
# check if PORT0 is present
if [ "${PORT0}" ]; then
log "PORT0=${PORT0}"
else
die 'PORT0 is not set. Exiting raven-start'
fi
CONFIG_YAML=/etc/envoy/envoy.yaml
CONFIG_YAML_TMP=/etc/envoy/envoy.temporary.yaml
CONFIG_YAML_TEMPLATE=/etc/envoy/envoy.template.yaml
SSL_KEY=/var/tmp/certs/sidecar.key
SSL_CERT=/var/tmp/certs/sidecar.cert
mkdir -p $(dirname ${SSL_KEY})
openssl req -x509 -nodes -days 30 -newkey rsa:2048 \
-keyout ${SSL_KEY} -out ${SSL_CERT} \
-subj "/CN=$(hostname -f)"
log 'Substituting variables in configuration template'
sed -e "s/{{SERVICE_PORT}}/${PORT0}/g" \
-e "s/{{HEALTH_CHECK_PORT}}/$(( PORT0 + HEALTH_CHECK_PORT_INDEX ))/g" \
-e "s/{{PROXY_SERVICE_PORT}}/${SERVICE_PORT}/g" \
-e "s/{{PROXY_HEALTH_CHECK_PORT}}/$(( SERVICE_PORT + HEALTH_CHECK_PORT_INDEX ))/g" \
-e "s/{{HOST}}/$(hostname -f)/g" \
-e "s%{{SSL_KEY}}%${SSL_KEY}%g" \
-e "s%{{SSL_CERT}}%${SSL_CERT}%g" \
${CONFIG_YAML_TEMPLATE} >${CONFIG_YAML}
if [ ${HEALTH_CHECK_PORT_INDEX} -gt 0 ]; then
log "HEALTH_CHECK_PORT_INDEX is positive: ${HEALTH_CHECK_PORT_INDEX}"
else
log 'Removing separate health-check port'
drop_config_section 'health-check backend'
fi
# use either http1 or http2 to talk to back-end cluster
if is_http2 ${SERVICE_PROTOCOL}; then
log 'Removing http1 options for http2 cluster'
sed -i -E '/# WHEN (up|down)stream-http1/d' ${CONFIG_YAML}
else
log 'Removing http2 options for http1 cluster'
sed -i -E '/# WHEN (up|down)stream-http2/d' ${CONFIG_YAML}
fi
if ! is_tls false ${SERVICE_PROTOCOL}; then
log 'Removing app upstream TLS options'
drop_config_section 'app upstream-tls'
fi
if ! is_tls false ${HEALTH_CHECK_PROTOCOL}; then
log 'Removing health-check upstream TLS options'
drop_config_section 'health-check upstream-tls'
fi
if ! is_tls ${FORCE_TLS_TERMINATION} ${SERVICE_PROTOCOL}; then
log 'Removing app downstream TLS options'
drop_config_section 'app downstream-tls'
fi
if ! is_tls ${FORCE_TLS_TERMINATION} ${HEALTH_CHECK_PROTOCOL}; then
log 'Removing health-check downstream TLS options'
drop_config_section 'health-check downstream-tls'
fi
if [[ ${CONFIG_DEBUG} != 0 ]]; then
log 'Dumping final envoy config yaml'
echo '>>>>>'
cat ${CONFIG_YAML}
echo '<<<<<'
fi
# start envoy with config file
log 'Starting envoy'
exec envoy -c ${CONFIG_YAML} --base-id 1
| true
|
157ee6681a82fefcd6579b024d4db990f23e704f
|
Shell
|
bs111/FFmpegBuildTool
|
/macos/tools/do-compile-ffmpeg.sh
|
UTF-8
| 4,318
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
RED='\033[0;31m'
Green='\033[0;33m'
NC='\033[0m' # No Color
echo "--------------------"
echo "${RED}[*] check input params [检查输入参数] $1 ${NC}"
echo "--------------------"
ARCH=$1
BUILD_OPT=$2
echo "ARCH[架构] = $ARCH"
echo "BUILD_OPT[构建参数] = $BUILD_OPT"
if [ -z "$ARCH" ]; then
echo "You must specific an architecture 'x86_64, ...'."
exit 1
fi
BUILD_ROOT=`pwd`/tools
BUILD_NAME=
FFMPEG_SOURCE_PATH=
# compiler options
CFG_FLAGS=
# --extra-cflags would provide extra command-line switches for the C compiler,
DEP_INCLUDES=
CFLAGS=
# --extra-ldflags would provide extra flags for the linker.
DEP_LIBS=
LDFLAGS=
PRODUCT=product
TOOLCHAIN_SYSROOT="/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk"
TOOLCHAIN_AS="/Applications/Xcode.app/Contents/Developer/usr/bin/gcc"
TOOLCHAIN_LD="/Applications/Xcode.app/Contents/Developer/usr/bin/ld"
echo ""
echo "--------------------"
echo "${RED}[*] make pre params [确定预参数] ${NC}"
echo "--------------------"
if [ "$ARCH" = "x86_64" ]; then
BUILD_NAME=ffmpeg-x86_64
FFMPEG_SOURCE_PATH=${BUILD_ROOT}/${BUILD_NAME}
CFG_FLAGS="$CFG_FLAGS --arch=x86_64 --cpu=x86_64"
DEP_INCLUDES="$DEP_INCLUDES -I/usr/local/include"
CFLAGS="$CFLAGS"
DEP_LIBS="$DEP_LIBS -L/usr/local/lib"
LDFLAGS="$LDFLAGS -Wl,-no_compact_unwind"
else
echo "unknown architecture $ARCH";
exit 1
fi
if [ ! -d ${FFMPEG_SOURCE_PATH} ]; then
echo ""
echo "!! ERROR"
echo "!! Can not find FFmpeg directory for $BUILD_NAME"
echo ""
exit 1
fi
FFMPEG_OUTPUT_PATH=${BUILD_ROOT}/build/${BUILD_NAME}/output
SHARED_OUTPUT_PATH=${BUILD_ROOT}/../${PRODUCT}/${BUILD_NAME}
mkdir -p ${FFMPEG_OUTPUT_PATH}
mkdir -p ${SHARED_OUTPUT_PATH}
echo "BUILD_NAME[构建名称] = $BUILD_NAME"
echo ""
echo "CFG_FLAGS[编译参数] = $CFG_FLAGS"
echo ""
echo "DEP_INCLUDES[编译器依赖头文件] = $DEP_INCLUDES"
echo ""
echo "CFLAGS[编译器参数] = $CFLAGS"
echo ""
echo "DEP_LIBS[链接器依赖库] = $DEP_LIBS"
echo ""
echo "LDFLAGS[链接器参数] = $LDFLAGS"
echo ""
echo "TOOLCHAIN_SYSROOT[编译链Root] = $TOOLCHAIN_SYSROOT"
echo ""
echo "TOOLCHAIN_AS[] = $TOOLCHAIN_AS"
echo ""
echo "FFMPEG_SOURCE_PATH[源码目录] = $FFMPEG_SOURCE_PATH"
echo ""
echo "FFMPEG_OUTPUT_PATH[编译输出目录] = $FFMPEG_OUTPUT_PATH"
echo ""
echo "--------------------"
echo "${RED}[*] make ffmpeg params [确定FFmpeg编译参数] ${NC}"
echo "--------------------"
CFG_FLAGS="$CFG_FLAGS --prefix=$FFMPEG_OUTPUT_PATH"
CFG_FLAGS="$CFG_FLAGS --sysroot=$TOOLCHAIN_SYSROOT"
CFG_FLAGS="$CFG_FLAGS --cc=clang"
CFG_FLAGS="$CFG_FLAGS --as=${TOOLCHAIN_AS}"
CFG_FLAGS="$CFG_FLAGS --strip="
CFG_FLAGS="$CFG_FLAGS --host-cflags= --host-ldflags="
CFG_FLAGS="$CFG_FLAGS --enable-cross-compile"
CFG_FLAGS="$CFG_FLAGS --target-os=darwin"
CFG_FLAGS="$CFG_FLAGS --disable-stripping"
case "$BUILD_OPT" in
debug)
CFG_FLAGS="$CFG_FLAGS --disable-optimizations"
CFG_FLAGS="$CFG_FLAGS --enable-debug"
CFG_FLAGS="$CFG_FLAGS --disable-small"
;;
*)
CFG_FLAGS="$CFG_FLAGS --enable-optimizations"
CFG_FLAGS="$CFG_FLAGS --disable-debug"
CFG_FLAGS="$CFG_FLAGS --enable-small"
;;
esac
export COMMON_CFG_FLAGS=
. ${BUILD_ROOT}/../config/module.sh
CFG_FLAGS="$CFG_FLAGS $COMMON_CFG_FLAGS"
echo "PATH[环境变量] = $PATH"
echo ""
echo "CFG_FLAGS[编译参数] = $CFG_FLAGS"
echo ""
echo "DEP_INCLUDES[编译器依赖头文件] = $DEP_INCLUDES"
echo ""
echo "DEP_LIBS[链接器依赖库] = $DEP_LIBS"
echo ""
echo "CFLAGS[编译器参数] = $CFLAGS"
echo ""
echo "LDFLAGS[链接器参数] = $LDFLAGS"
echo "--------------------"
echo "${RED}[*] configurate ffmpeg [配置FFmpeg] ${NC}"
echo "--------------------"
cd ${FFMPEG_SOURCE_PATH}
./configure ${CFG_FLAGS} --extra-cflags="$CFLAGS" --extra-ldflags="$DEP_LIBS $LDFLAGS"
make clean
echo ""
echo "--------------------"
echo "${RED}[*] compile ffmpeg [编译FFmpeg] ${NC}"
echo "--------------------"
echo "FFMPEG_OUTPUT_PATH = $FFMPEG_OUTPUT_PATH"
make install -j8 > /dev/null
cp -r ${FFMPEG_OUTPUT_PATH}/include ${SHARED_OUTPUT_PATH}/include
cp -r ${FFMPEG_OUTPUT_PATH}/lib ${SHARED_OUTPUT_PATH}/lib
echo "FFmpeg install success"
| true
|
9edff5eaa809f8dae1f97ca1b2e48f1d515c3594
|
Shell
|
jbranchaud/deval
|
/VoldemortS1.sh
|
UTF-8
| 4,095
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
# VoldemortS1
#
# This shell script will setup scenario1 for the Voldemort project.
if [ -z "$DEVAL" ]
then
echo "Your DEVAL variable is emtpy, please run setup.sh."
exit 1
fi
if [ -z "$DEVALSUB" ]
then
echo "Your DEVALSUB variable is empty, please run setup.sh."
exit 1
fi
# setup the directory structure for this project scenario
configfile=$DEVAL/subjects/voldemort/voldemortE.yaml
python $DEVAL/dirify.py $configfile
# name the repository clone variables
tmpdir=$DEVAL/tmp
reponame=voldemortE
repodir=$tmpdir/$reponame
# checkout the Voldemort project, overwrite if it is already there.
voldemortURL=https://github.com/jbranchaud/voldemortE.git
git clone $voldemortURL $repodir
# go to the repository directory so that we can start jumping to different
# branches
cd $repodir
# name the branches here:
basebranch=scenario1
sourcebranch=scenario1-s1
targetbranch=scenario1-t1
# name the directories of interest
projectsrc=src/java/voldemort
projectbin=bin/voldemort
projectlib=lib
# name the destination directories
destsrc=$DEVALSUB/voldemortE/scenario1/base/src
destbin=$DEVALSUB/voldemortE/scenario1/base/bin
destlib=$DEVALSUB/voldemortE/scenario1/base/lib
echo "*************************************"
echo "Setting up the base for scenario1"
# checkout the base branch for scenario1
git checkout -b $basebranch origin/$basebranch
# build the project
ant build
# grab each the src, bin, and lib and put them where they belong
cp -R $projectsrc $destsrc
cp -R $projectbin $destbin
cp -R $projectlib/* $destlib
# name the destination directories
destsrc=$DEVALSUB/voldemortE/scenario1/source1/src
destbin=$DEVALSUB/voldemortE/scenario1/source1/bin
destlib=$DEVALSUB/voldemortE/scenario1/source1/lib
echo "*************************************"
echo "Setting up the source for scenario1"
# checkout the source1 branch for scenario1
git checkout -b $sourcebranch origin/$sourcebranch
# build the project
ant build
# grab each the src, bin, and lib and put them where they belong
cp -R $projectsrc $destsrc
cp -R $projectbin $destbin
cp -R $projectlib/* $destlib
# name the destination directories
destsrc=$DEVALSUB/voldemortE/scenario1/target1/src
destbin=$DEVALSUB/voldemortE/scenario1/target1/bin
destlib=$DEVALSUB/voldemortE/scenario1/target1/lib
echo "*************************************"
echo "Setting up the target for scenario1"
# checkout the target1 branch for scenario1
git checkout -b $targetbranch origin/$targetbranch
# build the project
ant build
# grab each the src, bin, and lib and put them where they belong
cp -R $projectsrc $destsrc
cp -R $projectbin $destbin
cp -R $projectlib/* $destlib
# add the ROI.txt to the project
sourceROI=$DEVAL/subjects/voldemort/scenario1-roi1.txt
destROI=${DEVALSUB}/voldemortE/scenario1/config1/target1/roi1.txt
cp $sourceROI $destROI
sourceROI=$DEVAL/subjects/voldemort/scenario1-roi2.txt
destROI=${DEVALSUB}/voldemortE/scenario1/config1/target1/roi2.txt
cp $sourceROI $destROI
sourceROI=$DEVAL/subjects/voldemort/scenario1-roi3.txt
destROI=${DEVALSUB}/voldemortE/scenario1/config1/target1/roi3.txt
cp $sourceROI $destROI
sourceROI=$DEVAL/subjects/voldemort/scenario1-roi4.txt
destROI=${DEVALSUB}/voldemortE/scenario1/config1/target1/roi4.txt
cp $sourceROI $destROI
# get out of the repository and delete it
cd $DEVAL
rm -rf $repodir
# create the AST Diff XML files for the base->source and base->target
baseSrc=$DEVALSUB/voldemortE/scenario1/base/src
sourceSrc=$DEVALSUB/voldemortE/scenario1/source1/src
targetSrc=$DEVALSUB/voldemortE/scenario1/target1/src
baseToSource=$DEVALSUB/voldemortE/scenario1/config1/source1/astdiff
baseToTarget=$DEVALSUB/voldemortE/scenario1/config1/target1/astdiff
# ASTDiff for base->source
echo "*************************************"
echo "Generating ASTDiffs for Base -> Source1"
$DEVAL/diffASTs.sh $baseSrc $sourceSrc $baseToSource
# ASTDiff for base->target
echo "*************************************"
echo "Generating ASTDiffs for Base -> Target1"
$DEVAL/diffASTs.sh $baseSrc $targetSrc $baseToTarget
echo "The project has been initialized for evaluation."
| true
|
4fe3951b93387ebde4579f8221cb14931368e7dd
|
Shell
|
pld-linux/libcgroup
|
/cgred.init
|
UTF-8
| 2,085
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
#
# cgred CGroups Rules Engine Daemon
# chkconfig: 2345 02 98
# description: This is a daemon for automatically classifying processes \
# into cgroups based on UID/GID.
#
# processname: cgrulesengd
# pidfile: /var/run/cgrulesengd.pid
#
# Source function library
. /etc/rc.d/init.d/functions
# Read in configuration options.
[ -f /etc/sysconfig/cgred ] && . /etc/sysconfig/cgred
start() {
if [ -f /var/lock/subsys/cgred ]; then
msg_already_running "CGroup Rules Engine Daemon"
return
fi
if [ ! -s /etc/cgrules.conf ]; then
nls "CGroup Rules Engine Daemon not configured"
RETVAL=6
return
fi
if ! grep -qs "^cgroup" /proc/mounts; then
nls "Cannot find cgroups, is cgconfig service running?"
RETVAL=1
return
fi
msg_starting "CGroup Rules Engine Daemon"
daemon --pidfile /var/run/cgrulesengd.pid /sbin/cgrulesengd $OPTIONS
RETVAL=$?
if [ $RETVAL -ne 0 ]; then
return 7
fi
touch /var/lock/subsys/cgred
pidof cgrulesengd > /var/run/cgrulesengd.pid
}
stop() {
if [ ! -f /var/lock/subsys/cgred ]; then
msg_not_running "CGroup Rules Engine Daemon"
return
fi
msg_stopping "CGroup Rules Engine Daemon"
killproc --pidfile /var/run/cgrulesengd.pid cgrulesengd -TERM
RETVAL=$?
rm -f /var/lock/subsys/cgred /var/run/cgrulesengd.pid
}
reload() {
if [ ! -f /var/lock/subsys/cgred ] ; then
msg_not_running "CGroup Rules Engine Daemon"
return
fi
show "Reloading rules configuration..."
# SIGUSR2
kill -s 12 $(cat ${pidfile})
RETVAL=$?
if [ $RETVAL -eq 0 ]; then
fail
else
ok
fi
}
condrestart() {
if [ ! -f /var/lock/subsys/cgred ]; then
msg_not_running "CGroup Rules Engine Daemon"
RETVAL=$1
return
fi
stop
start
}
RETVAL=0
# See how we are called
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
reload)
reload
;;
try-restart)
condrestart 0
;;
force-reload)
condrestart 7
;;
status)
status --pidfile /var/run/cgrulesengd.pid cgrulesengd
RETVAL=$?
;;
*)
msg_usage "$0 {start|stop|restart|try-restart|reload|force-reload|status}"
exit 3
;;
esac
exit $RETVAL
| true
|
208cc715c1161377fdbedd019f20a41066612b1d
|
Shell
|
jasonltorchinsky/vert_remap
|
/run/debug_local.sh
|
UTF-8
| 862
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# Runs the vertical remapping code on a local machine.
# Set directory path variables.
run_dir=`pwd`
build_dir=../build
# Re-build the application
echo '-- Re-building executable...'
cd "$build_dir"
rm -rf *
cmake ..
cmake --build .
exec=vert_remap
if test -f "$exec"; then
# Set run variables
echo '-- Setting run variables...'
cd "$run_dir"
cell_counts=(4)
ogrid_opts=(cub)
tfunc_opts=(asr)
alg_opts=(lmb)
rngseed=42
for cells in ${cell_counts[@]}
do
for ogrid in ${ogrid_opts[@]}
do
for tfunc in ${tfunc_opts[@]}
do
for alg in ${alg_opts[@]}
do
gdb -tui --args $build_dir/vert_remap --verbose ncell $((2**$cells)) ogrid $ogrid tfunc $tfunc alg $alg seed $rngseed
done
done
done
done
echo '-- Runs complete!'
else
echo '-- Executable build failed!'
fi
| true
|
f69ed2a1f547b736679b21673d09fadd1e3c55ee
|
Shell
|
hringriin/chordbook
|
/addToSetlist.sh
|
UTF-8
| 9,868
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
#################################################################################
# Title : Add Leadsheets/Chords/Texts/... to Setlist #
# Author : Joschka Koester #
# E-Mail (work): jkoester@tzi.de #
# E-Mail (priv): koester.joschka@gmail.com #
# Date : 2014-03-13 #
# Version : v1.0-5 #
# Requires : dialog #
# Category : Shell menu tools #
#################################################################################
# Description: #
# This Script creates a LaTeX-File with the specified name. Additionally #
# it fills the file with the following content: #
# - \section{@title} #
# - \subsection{Text} #
# - [\includepdf | \include | \input] -link to the text #
# - \subsection{Noten, Leadsheets, \ldots} #
# - \includepdf to the notes, leadsheets, ... #
# Note: #
# - ... #
#################################################################################
# Variables for title, backtitle and version
version="v2.0-0"
dialogtitle="Add to Setlist $version"
backtitle="Linux Shell Script - by hringriin"
PN=`basename "$0"`
# The variable for the outputfile $file
file="./addToSetlist.txt"
# Getting stdrr working with this script
_tmp1="/tmp/tmp1.$$"
_tmp2="/tmp/tmp2.$$"
_tmp3="/tmp/tmp3.$$"
_tmp4="/tmp/tmp4.$$"
_tmp5="/tmp/tmp5.$$"
dialog 2>$_tmp1
#dialog 2>$_tmp2
DVER=`cat $_tmp1 | head -1`
# The shitty esc key
esc="Do not press the shitty [ESC] key!"
function legacy()
{
# Yes or No?
dialog --title "$dialogtitle" --backtitle "$backtitle" --yesno "Do you want to add a song to Setlist?" 0 0
# get response
response=$?
case $response in
0)
dialog --title "$dialogtitle" \
--backtitle "$backtitle" \
--checklist "Tag item(s) to choose. \n\nMove using [UP], [DOWN], [SPACE] to select and [ENTER] to confirm your choice!" 0 0 0 \
10 "Create Lyrics entry in LaTeX file" off \
20 "Create Leadsheet entry in LaTeX file" off \
30 "Create Chored Lyrics entry in LaTeX file" on 2>$_tmp1
sh -c "cp $_tmp1 $_tmp2"
dialog --backtitle "$backtitle" \
--form "Fill in the form for the name of the Artist, the Title and the Prefix of the song.\n
The Prefix is meant to be the filename without any ending, 'mad_world' for example." 0 0 0 \
"Artist" 2 4 "" 2 20 80 0 \
"Title" 4 4 "" 4 20 80 0 \
"Index" 6 4 "" 6 20 80 0 \
"Copyright" 8 4 "" 8 20 80 0 \
"License" 10 4 "" 10 20 80 0 \
2>$_tmp1
# Iterator
i=0
while read line
do
area[i]="$line"
i=`expr $i+1`
done <$_tmp1
artist="${area[0]}"
title="${area[1]}"
index="${area[2]}"
copyright="${area[3]}"
license="${area[4]}"
tex="$index.tex"
file="$index.pdf"
sh -c "touch $tex"
# echo -e "\section{$artist -- $title}">>$tex
# echo -e " \subsection{Text}">>$tex
e=$(grep -c "10" "$_tmp2")
if [ $e -gt 0 ]; then
echo -e " \includepdf[pages=-,scale=0.8]{input/Texte/$file}">>$tex
# else
# echo -e " %\includepdf[pages=-,scale=0.8]{input/Texte/$file}">>$tex
fi
# echo -e " \subsection{Noten, Leadsheets, \ldots}">>$tex
e=$(grep -c "20" "$_tmp2")
if [ $e -gt 0 ]; then
echo -e " \includepdf[pages=-,scale=0.8]{input/Noten/$file}">>$tex
# else
# echo -e " %\includepdf[pages=-,scale=0.8]{input/Noten/$file}">>$tex
fi
e=$(grep -c "30" "$_tmp2")
if [ $e -gt 0 ]; then
echo -e "\\\stepcounter{subsection}\n">>$tex
echo -e "\\\beginsong{$title}[%">>$tex
echo -e " by={$artist},">>$tex
echo -e " cr={$copyright},">>$tex
echo -e " li={$license},">>$tex
echo -e " index={$index}]\n">>$tex
echo -e " \\\phantomsection">>$tex
echo -e " \\\addcontentsline{toc}{subsection}{\\\thesubsection \\\quad \\\textbf{$title} --- $artist}\n">>$tex
echo -e " \\\label{$index}\n">>$tex
echo -e " \\\beginverse">>$tex
echo -e " Example verse ...">>$tex
echo -e " \\\endverse">>$tex
echo -e "\\\endsong">>$tex
fi
;;
1) echo "No file has been touched!";;
255) echo "$esc";;
esac
rm $_tmp1
rm $_tmp2
}
function checkLegacy()
{
# Yes or No?
dialog --title "$dialogtitle" --backtitle "$backtitle" --defaultno --yesno "Do you want the legacy version of this script?" 0 0
# get response
response=$?
case $response in
0)
dialog --msgbox "Alright, legacy version. Proceed at your own risk!" 0 0 ;
legacy
;;
1)
addSong
;;
255) echo "$esc";;
esac
}
function main()
{
checkLegacy
cleanup
}
function addSong()
{
# Yes or No?
dialog --title "$dialogtitle" --backtitle "$backtitle" --yesno "Do you want to add a song to Setlist?" 0 0
# get response
response=$?
case $response in
0)
dialog --backtitle "$backtitle" \
--form "Fill in the form according to the labels. The filename has to be conform to the follow regular expression: [A-Za-z0-9_-]*[A-Za-z0-9_-]. The filename will also specify the folder structure for this song." 0 0 0 \
"Artist" 2 4 "" 2 20 80 0 \
"Songname" 4 4 "" 4 20 80 0 \
"Filename" 6 4 "" 6 20 80 0 \
"Copyright" 8 4 "" 8 20 80 0 \
"License" 10 4 "" 10 20 80 0 \
"Arranger" 12 4 "" 12 20 80 0 \
2>$_tmp3
if [[ $? -ne 0 ]] ; then
dialog --msgbox "Aboring and exiting script." 0 0 ;
exit 1
fi
# Iterator
i=0
while read line
do
area[i]="$line"
i=`expr $i+1`
done <$_tmp3
artist="${area[0]}"
title="${area[1]}"
filename="${area[2]}"
copyright="${area[3]}"
license="${area[4]}"
arranger="${area[5]}"
srcPath="src"
templatePath="Templates"
latexTemplate="template.tex"
lilypondTemplate="template.ly"
latexPath="${srcPath}/${filename}/LaTeX"
lilypondPath="${srcPath}/${filename}/lilypond"
texFile="${latexPath}/${filename}.tex"
lilypondFile="${lilypondPath}/${filename}.ly"
mkdir -p ${latexPath}
mkdir -p ${lilypondPath}
cp ${templatePath}/${latexTemplate} ${texFile}
cp ${templatePath}/${lilypondTemplate} ${lilypondFile}
sed 's/<SONGTITLE>/'"${title}"'/g' ${texFile} > ${_tmp4}
cp ${_tmp4} ${texFile}
sed 's/<ARTIST>/'"${artist}"'/g' ${texFile} > $_tmp4
cp $_tmp4 ${texFile}
sed 's/<COPYRIGHT>/'"${copyright}"'/g' ${texFile} > $_tmp4
cp ${_tmp4} ${texFile}
sed 's/<LICENSE>/'"${license}"'/g' ${texFile} > $_tmp4
cp $_tmp4 ${texFile}
sed 's/<INDEX>/'"${filename}"'/g' ${texFile} > $_tmp4
cp $_tmp4 ${texFile}
sed 's/<SONGTITLE>/'"${title}"'/g' ${lilypondFile} > $_tmp4
cp $_tmp4 ${lilypondFile}
sed 's/<ARTIST>/'"${artist}"'/g' ${lilypondFile} > $_tmp4
cp $_tmp4 ${lilypondFile}
sed 's/<ARRANGER>/'"${arranger}"'/g' ${lilypondFile} > $_tmp4
cp $_tmp4 ${lilypondFile}
cd ${lilypondPath}
cp ../../../Templates/Makefile Makefile
sed 's/<FILENAME>/'"${filename}"'/g' Makefile > $_tmp5
cp $_tmp5 Makefile
cd -
cp ${templatePath}/gitignore-Template ${lilypondPath}/.gitignore
dialog --msgbox "Script has finished. There should be a new song in 'src/' named '${title}' in 'src/${filename}', have fun!" 0 0 ;
;;
1)
main
;;
255) echo "$esc";;
esac
}
function cleanup()
{
rm $_tmp1
rm $_tmp2
rm $_tmp3
rm $_tmp4
rm $_tmp5
unset version
unset dialogtitle
unset backtitle
unset PN
unset file
unset _tmp1
unset _tmp2
unset _tmp3
unset _tmp4
unset _tmp5
unset DVER
unset esc
unset response
unset i
unset area
unset artist
unset title
unset index
unset copyright
unset license
unset arranger
unset tex
unset file
unset line
unset e
unset srcPath
unset templatePath
unset latexTemplate
unset lilypondTemplate
unset latexPath
unset lilypondPath
unset texFile
unset lilypondFile
}
main
exit 0
| true
|
3d643e9be9c6a6829c7c46dc68992581e3c159f3
|
Shell
|
darthbrian/vagrinit5
|
/pyinstall.sh
|
UTF-8
| 1,920
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Installing Python and setting it up..."
isUbuntu=`uname -a | grep -ic 'ubuntu'`
isDebian=`uname -a | grep -ic 'debian'`
if [[ $isUbuntu -gt 0 || $isDebian -gt 0 ]];
then
echo "Detected Ubuntu or Debian Installation. Using apt-get to install Python..."
if ! [ -x "$(command -v python)" ];
then
sudo apt-get update >/dev/null 2>&1
echo "Installing Python2 for custom mod support"
sudo apt-get install -y python >/dev/null 2>&1
echo "Installing python-apt and setting it up..."
sudo apt-get install -y python-apt >/dev/null 2>&1
echo "Installing Pip and setting it up..."
sudo apt-get install -y python3-pip >/dev/null 2>&1
echo "Installing the Python3 module Pexpect and setting it up..."
sudo pip3 install pexpect >/dev/null 2>&1
else
echo "Detected python is already installed. Skipping installation..."
fi
else
echo "Detected Centos or Red Hat Installation. Using yum to install Python..."
if ! [ -x "$(command -v python3.6)" ];
then
echo "Need to install python..."
echo "Updating distribution and utils..."
sudo yum -y update >/dev/null 2>&1
sudo yum -y install yum-utils >/dev/null 2>&1
sudo yum -y groupinstall development >/dev/null 2>&1
sudo yum -y install https://centos7.iuscommunity.org/ius-release.rpm >/dev/null 2<&1
echo "Installing Python 3 and setting it up..."
sudo yum -y install python36u >/dev/null 2>&1
echo "Installing Pip and setting it up..."
sudo yum -y install python36u-pip >/dev/null 2>&1
echo "Installing the Python3 module Pexpect and setting it up..."
sudo pip3.6 install pexpect >/dev/null 2>&1
sudo yum -y install python36u-devel >/dev/null 2>&1
else
echo "Detected python is already installed. Skipping installation..."
fi
fi
| true
|
63d4bca27e52ccf9d133449c06a0ffb6e29c4439
|
Shell
|
rperea14/drigoscripts
|
/drigo_Tractography/older/drigo_Tracts/drigo_TractThalamus.sh
|
UTF-8
| 3,821
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
#Rodrigo Perea
#rperea@kumc.edu (alternative email grandrigo@gmail.com)
#PLEASE ADD DESCRIPTION HERE:!!!!!!
#
#
#THIS SCRIPT WAS CREATED UNDER bash version 3.2 unders OSX 10.7. To check your bash version,
#enter bash --version
#Any questions please feel free to contact me at the email provided above.
SOURCE=$( pwd )
#Checking what you are doing.....
if [ $1 = "--help" ] ; then
echo "Instructions to be entered here....
\$1 should be the start of the HSC number...
"
exit
fi
for DIR in $(ls -d $1* ) ;
do
DIRtemp=$(basename $DIR)
# DIRA=${DIRtemp/.bedpostX}
#This will create a directory called ... into each folder
TT="THAL_TRAC"
###ALL THIS IS PRE-PROCESSING BEORE TRACTROGRAPHY
###I JUST COPIED THE SEGMENTATIONS FROM FS AND NODIF BRAIN AND PUT THEM INTO
###STANDARD SPACE
echo "In directory $DIR"
#echo "Copying data.nii.gz from bedpost_in to bedpostX..."
#Copying the nodif_brain form previous bedpostx directory
#cp $SOURCE/../BEDPOSTx_in/$DIRA/data.nii.gz $DIR/nodif_brain.nii.gz
#echo "Converting FS brain and aseg_aparc to NIFTII.."
#converting th *.mgz images to niftii gzipped format (nii.gz)
#mri_convert ../FSs/${DIRA}_FS/mri/brain.mgz $DIR/FS_btemp.nii.gz
#mri_convert ../FSs/${DIRA}_FS/mri/aparc+aseg.mgz $DIR/FS_aatemp.nii.gz
#echo "Reorienting $DIRA brain and apar_aseg"
#reorienting the copied images from FS
#fslreorient2std $DIR/FS_btemp.nii.gz $DIR/FS_brain.nii.gz
#fslreorient2std $DIR/FS_aatemp.nii.gz $DIR/FS_aparc_aseg.nii.gz
#"
###############################
#NOW TIME FOR MASKING OUR REGIONS THAT WILL GO INTO OUR TRACTOGRAPHY
mkdir -p $DIR/$TT
#Values are:
#49 --> R-Thalamus, 10 --> L-Thalamus
#1006 --> L-Enthorinal 2006 --> R-Enthorinnal
#1/2.008 -->InfParietal, 1/2.009-->Inf.Temp
#1/2.011 -->LatOcci, 1/2.12-->LatOrbFrontal
#1/2.028 -->SupFron, 1/2.029 -->SupParietal
#MAKING ALL THE CORTICAL REGIONS....
declare -a CORTICAL=( L-RosMidFr L-FrPole L-LatOrbFr L-MedOrbFr L-SupFr L-ParsTriang L-CauMiddFr L-ParPerCu L-Insula L-PreCentral \
L-PostCentral L-SupraMarg L-SupTemp L-TempPole L-MidTemp L-InfTemp L-InfPar L-SupPar L-LatOcc L-Fusi \
L-Entho L-ParaHipp L-Lingual L-ParaCentral L-Cuneus L-PreCuneus L-IsthCing L-PostCing L-CauAntCing L-RosAntCing)
declare -a CORNUM=( 1027 1032 1012 1014 1028 1020 1003 1018 1035 1024 \
1022 1031 1030 1033 1015 1009 1008 1029 1011 1007 \
1006 1016 1013 1017 1005 1025 1010 1023 1002 1026)
tractcounter=0
#while [ $tractcounter -le 29 ]
#do
# echo "The tract ${CORTICAL[(($tractcounter))]} is ${CORNUM[(($tractcounter))]} "
# fslmaths $DIR/FS_aparc_aseg.nii.gz -thr ${CORNUM[(($tractcounter))]} -uthr ${CORNUM[(($tractcounter))]} $DIR/$TT/${CORTICAL[(($tractcounter))]}
# tractcounter=$((tractcounter+1))
#done
# echo "Thalaming...."
# fslmaths $DIR/FS_aparc_aseg.nii.gz -thr 10 -uthr 10 $DIR/$TT/Thalamus-L
#########Creating the registration models on each directory
#flirt -in $DIR/nodif_brain -ref $DIR/FS_brain -omat $DIR/xfms/diff2str.mat -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 6 -cost corratio
#convert_xfm -omat $DIR/xfms/str2diff.mat -inverse $DIR/xfms/diff2str.mat
# mkdir -p $DIR/R-Thalamus-Tract
# mkdir -p $DIR/L-Thalamus-Tract
#############Create target masks necessary for tracking
#
#
#
#
#
#
TTR="Tracts_Intensity_BAK"
mkdir $DIR/$TT/$TTR
echo "Making a backup Tract with their intensity..."
cp $DIR/$TT/* $DIR/$TT/$TTR
for TARGET in $(ls $DIR/$TT/L-* ) ;
do
echo "Doing... fslmaths $TARGET -div $TARGET $TARGET"
fslmaths $TARGET -div $TARGET $TARGET
# echo "$SOURCE/$target" >> $DIR/L-Thalamus-Tract/targets.txt"
done
done
| true
|
24f216cc6a703590b1786c70cbb6ee1bfe8cddd0
|
Shell
|
Fantombear/ORIE5270
|
/project/process_data/code/quotebbo.sh
|
UTF-8
| 473
| 3.140625
| 3
|
[] |
no_license
|
filename='filelist'
filelines=`cat $filename`
directory=taq.12.2014/
for line in $filelines ; do
echo $line
locate_dir=$'s3://'$directory$line
sudo aws s3 cp $locate_dir ./temp.zip;
name_file="$(unzip -l temp.zip | awk '/-----/ {p = ++p % 2; next} p {print $NF}')";
unzip -o temp.zip;
rm -f temp.zip;
hadoop fs -put -f $name_file input;
spark-submit quote_spark.py input/$name_file;
hadoop fs -rm -f input/$name_file;
rm -f $name_file
done
| true
|
2c92c795b2de65de5b9f27e633179667a21a767a
|
Shell
|
hileamlakB/tuxcut
|
/client/images/icons2py.sh
|
UTF-8
| 289
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# clean the old icons.py
rm -rf ../icons.py
arr=($(ls *.png))
# create new icons.py with all ocons in the directory
for icon in ${arr[@]}
do
if [ $icon = ${arr[0]} ]; then
img2py $icon ../icons.py
else
img2py -a $icon ../icons.py
fi
done
| true
|
eb173c5c2f5fa8987791ef29710bcb8b2292116b
|
Shell
|
cathay4t/bin_folder
|
/tk
|
UTF-8
| 121
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
TOKEN=`pytoken`
if [ "CHK${DISPLAY}" != "CHK" ];then
echo -n $TOKEN | xclip -selection c
fi
echo $TOKEN
| true
|
f2cbbb6d0038f45e95a0f79aa68d349d42952f83
|
Shell
|
ldwardx/InjectionIIISupportDevice
|
/InjectionIII/InjectionSetup
|
UTF-8
| 1,424
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
#InjectionSetup
if [[ ! -d "/Applications/Xcode.app" ]]; then
exit 0
fi
export CODESIGN_ALLOCATE=/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/codesign_allocate
app="$BUILT_PRODUCTS_DIR/$FULL_PRODUCT_NAME"
bundle_src="/Applications/InjectionIII.app/Contents/Resources/iOSInjection_Device.bundle"
bundle_dst="$app/iOSInjection_Device.bundle"
function check_arch {
if [[ "$CONFIGURATION" != "Debug" || "$ARCHS" != "arm64" ]]; then
exit 0
fi
}
function check_src() {
if [[ ! -d "$bundle_src" ]]; then
echo "Not found iOSInjection_Device.bundle, expect location:\n$bundle_src"
exit 0
fi
}
function sign_bundle() {
sign "$bundle_src"
find "$bundle_src" -name '*.dylib' | while read file; do sign "$file"; done
}
function sign() {
/usr/bin/codesign --deep --force -s "$EXPANDED_CODE_SIGN_IDENTITY" "$1"
}
function copy_bundle() {
if [[ -d "$bundle_dst" ]]; then
rm -rf "$bundle_dst"
fi
cp -rf "$bundle_src" "$bundle_dst"
}
function recordSign() {
echo "$EXPANDED_CODE_SIGN_IDENTITY" > "$bundle_dst/sign"
}
function recordIP() {
for NUM in $(seq 0 1000); do
ip=`ipconfig getifaddr en$NUM`
if [ -n "$ip" ]; then
break
fi
done
echo "$ip" > "$bundle_dst/ip"
}
check_arch && check_src && sign_bundle && copy_bundle && recordSign && recordIP
| true
|
079e21c0effbe04505a8b0e86a1522e880edcb75
|
Shell
|
jaeee/jaee-Demo
|
/procbar.sh
|
UTF-8
| 535
| 3.015625
| 3
|
[] |
no_license
|
#########################################################################
# File Name: procbar.sh
# Author: liumin
# mail: 1106863227@qq.com
# Created Time: Sun 02 Jul 2017 09:21:46 AM CST
#########################################################################
#!/bin/bash
num=0
str='#'
max=100
pro=('|' '/' '-' '\')
while [ $num -le $max ]
do
((colour=30+num%8))
echo -en "\e[1;"$colour"m"
let index=num%4
printf "[%-100s %d%% %c]\r" "$str" "$num" "${pro[$index]}"
let num++
sleep 0.1
str+='#'
done
printf "\n"
echo -e "\e[1;30;m"
| true
|
6263176cef2cd5370400df230b191fe456a1f687
|
Shell
|
jairsjunior/kafka-kerberos
|
/tests/test-using-rest-proxy.sh
|
UTF-8
| 1,763
| 3.109375
| 3
|
[] |
no_license
|
# Produce a message using JSON with the value '{ "foo": "bar" }' to the topic test-topic
echo "Producing message 1"
curl -X POST -H "Content-Type: application/vnd.kafka.json.v2+json" \
-H "Accept: application/vnd.kafka.v2+json" \
--data '{"records":[{"value":{"foo":"bar 1"}}]}' "http://localhost:8082/topics/test-topic"
echo "\nProducing message 2"
curl -X POST -H "Content-Type: application/vnd.kafka.json.v2+json" \
-H "Accept: application/vnd.kafka.v2+json" \
--data '{"records":[{"value":{"foo":"bar 2"}}]}' "http://localhost:8082/topics/test-topic"
echo "\nWait to start Consuming"
sleep 5
# Create a consumer for JSON data, starting at the beginning of the topic's
# log and subscribe to a topic. Then consume some data using the base URL in the first response.
# Finally, close the consumer with a DELETE to make it leave the group and clean up
# its resources.
echo "\nCreate Consumer"
curl -X POST -H "Content-Type: application/vnd.kafka.v2+json" \
--data '{"name": "my_consumer_instance", "format": "json", "auto.offset.reset": "earliest"}' \
http://localhost:8082/consumers/my_json_consumer
# Expected output from preceding command
# {
# "instance_id":"my_consumer_instance",
# "base_uri":"http://localhost:8082/consumers/my_json_consumer/instances/my_consumer_instance"
# }
echo "\nSubscribe topics"
curl -X POST -H "Content-Type: application/vnd.kafka.v2+json" --data '{"topics":["test-topic"]}' \
http://localhost:8082/consumers/my_json_consumer/instances/my_consumer_instance/subscription
# No content in response
echo "\nConsuming Data"
curl -X GET -H "Accept: application/vnd.kafka.json.v2+json" \
http://localhost:8082/consumers/my_json_consumer/instances/my_consumer_instance/records
| true
|
d5e1ca5ce8444f436fa696b298b5e11ba3461ce9
|
Shell
|
tjmacke/dd_maps
|
/scripts/get_addrs_from_runs.sh
|
UTF-8
| 2,775
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
#
. ~/etc/funcs.sh
export LC_ALL=C
U_MSG="usage: $0 [ -help ] [ -c addr-info-file ] -at { src | dst } [ runs-file ]"
if [ -z "$DM_HOME" ] ; then
LOG ERROR "DM_HOME is not defined"
exit 1
fi
DM_ADDRS=$DM_HOME/addrs
DM_ETC=$DM_HOME/etc
DM_LIB=$DM_HOME/lib
DM_SCRIPTS=$DM_HOME/scripts
# awk v3 does not support include
AWK_VERSION="$(awk --version | awk '{ nf = split($3, ary, /[,.]/) ; print ary[1] ; exit 0 }')"
if [ "$AWK_VERSION" == "3" ] ; then
AWK="igawk --re-interval"
CFG_UTILS="$DM_LIB/cfg_utils.awk"
ADDR_UTILS="$DM_LIB/addr_utils.awk"
elif [ "$AWK_VERSION" == "4" ] || [ "$AWK_VERSION" == "5" ] ; then
AWK=awk
CFG_UTILS="\"$DM_LIB/cfg_utils.awk\""
ADDR_UTILS="\"$DM_LIB/addr_utils.awk\""
else
LOG ERROR "unsupported awk version: \"$AWK_VERSION\": must be 3, 4 or 5"
exit 1
fi
AI_FILE=$DM_ETC/address.info
ATYPE=
FILE=
while [ $# -gt 0 ] ; do
case $1 in
-help)
echo "$U_MSG"
exit 0
;;
-c)
shift
if [ $# -eq 0 ] ; then
LOG ERROR "-c requires addr-info-file argument"
echo "$U_MSG" 1>&2
exit 1
fi
AI_FILE=$1
shift
;;
-at)
shift
if [ $# -eq 0 ] ; then
LOG ERROR "-at requires address-type argument"
echo "$U_MSG" 1>&2
exit 1
fi
ATYPE=$1
shift
;;
-*)
LOG ERROR "unknown option $1"
echo "$U_MSG" 1>&2
exit 1
;;
*)
FILE=$1
shift
break
;;
esac
done
if [ $# -ne 0 ] ; then
LOG ERROR "extra arguments $*"
echo "$U_MSG" 1>&2
exit 1
fi
if [ -z "$ATYPE" ] ; then
LOG ERROR "missing -at address-type argument"
echo "$U_MSG" 1>&2
exit 1
elif [ "$ATYPE" != "src" ] && [ "$ATYPE" != "dst" ] ; then
LOG ERROR "unkonwn address type $ATYPE, must be src or dst"
echo "$U_MSG" 1>&2
exit 1
fi
$AWK -F'\t' '
@include '"$CFG_UTILS"'
@include '"$ADDR_UTILS"'
BEGIN {
atype = "'"$ATYPE"'"
ai_file = "'"$AI_FILE"'"
if(CFG_read(ai_file, addr_info)){
err = 1
exit err
}
if(AU_init(addr_info, us_states, us_states_long, towns_a2q, towns_r2q, st_types_2qry, dirs_2qry, ords_2qry)){
err = 1
exit err
}
pq_options["rply"] = 0
pq_options["do_subs"] = 1
pq_options["no_name"] = "Residence"
pr_hdr = 1
}
$5 == "Job" {
date = $1
src = $6
dst = $7
err = AU_parse(pq_options, atype == "src" ? src : dst, addr_ary, us_states, us_states_long, towns_a2q, st_types_2qry, dirs_2qry, ords_2qry)
if(pr_hdr){
pr_hdr = 0
if(atype == "src")
printf("status\tdate\tsrc\tdst\tqSrc\tsName\n")
else
printf("status\tdate\tsrc\tdst\tqDst\tdName\n")
}
printf("%s", addr_ary["status"])
if(addr_ary["status"] == "B")
printf(", %s", addr_ary["emsg"])
printf("\t%s\t%s\t%s", date, src, dst)
if(addr_ary["status"] == "B")
printf("\t\t")
else
printf("\t%s, %s, %s\t%s", addr_ary["street"], addr_ary["town"], addr_ary["state"], addr_ary["name"])
printf("\n")
}' $FILE
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.