blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b58632a8961857bb45f92999a5d851a3e572065d
|
Shell
|
Neyzoter/TestScript
|
/linux/dos2unixUtil.sh
|
UTF-8
| 263
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Please input file name ..."
read file
echo "Overwrite?[y/n]"
read overwrite
if [ $overwrite = "y" ]; then
dos2unix $file
else
echo "Please input file name ..."
read newFile
dos2unix -k -n $file $newFile
fi
cat -e $file | head -n 1
| true
|
a7debe18b4c4d30400d2852fe23fc72b101a7be4
|
Shell
|
romarcablao/kubernetes-vm-setup
|
/kubernetes-docker/scripts/common.sh
|
UTF-8
| 2,275
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
FLAG=$1
echo "------------------------------------------------------------------------------"
echo " $FLAG"
echo " $FLAG ->> Adding Kubernetes and Docker-CE Repo"
echo " $FLAG"
echo "------------------------------------------------------------------------------"
### Install packages to allow apt to use a repository over HTTPS
apt-get update && apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common
### Add Kubernetes GPG key
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
### Kubernetes Repo
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
### Add Docker’s official GPG key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
### Add Docker apt repository.
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
echo "------------------------------------------------------------------------------"
echo " $FLAG"
echo " $FLAG ->> Updating Repositories"
echo " $FLAG"
echo "------------------------------------------------------------------------------"
apt-get update
echo "------------------------------------------------------------------------------"
echo " $FLAG"
echo " $FLAG ->> Installing Misc/Recommended Packages"
echo " $FLAG"
echo "------------------------------------------------------------------------------"
apt-get install -y avahi-daemon libnss-mdns traceroute htop httpie bash-completion
echo "------------------------------------------------------------------------------"
echo " $FLAG"
echo " $FLAG ->> Installing Docker and Kubernetes"
echo " $FLAG"
echo "------------------------------------------------------------------------------"
apt-get install -y docker-ce docker-ce-cli containerd.io
apt-get install -y kubeadm=1.18.5-00 kubelet=1.18.5-00 kubectl=1.18.5-00
# Setup Docker daemon
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
# Restart docker
systemctl daemon-reload
systemctl restart docker
| true
|
d0c9911e8b1a26edabe4fecbd8f6588001e6c66a
|
Shell
|
Azure-Samples/azure-cli-samples
|
/sql-database/failover-groups/add-managed-instance-to-failover-group-az-cli.sh
|
UTF-8
| 11,719
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Failed validation in Bash 12/01/2021 - not yet supported in Managed Instance using Azure CLI.
# In order to establish failover group between two SQL MIs, both of them have to be part of the same DNS zone.
# To achieve this, you need to provide instance partner to the secondary instance during creation.
# However, this property is not yet available in CLI
# So, not surfaced in md file or in TOC
# Due to deployment times, you should plan for a full day to complete the entire script. You can monitor deployment progress in the activity log within the Azure portal. For more information on deployment times, see https://docs.microsoft.com/azure/sql-database/sql-database-managed-instance#managed-instance-management-operations.
let "randomIdentifier=$RANDOM*$RANDOM"
location="East US"
resourceGroup="msdocs-azuresql-rg-$randomIdentifier"
tag="add-managed-instance-to-failover-group-az-cli"
vnet="msdocs-azuresql-vnet-$randomIdentifier"
subnet="msdocs-azuresql-subnet-$randomIdentifier"
nsg="msdocs-azuresql-nsg-$randomIdentifier"
route="msdocs-azuresql-route-$randomIdentifier"
instance="msdocs-azuresql-instance-$randomIdentifier"
login="azureuser"
password="Pa$$w0rD-$randomIdentifier"
vpnSharedKey="abc123"
gateway="msdocs-azuresql-gateway-$randomIdentifier"
gatewayIp="$gateway-ip"
gatewayConnection="$gateway-connection"
failoverResourceGroup="msdocs-azuresql-failover-rg-$randomIdentifier"
failoverLocation="Central US"
failoverGroup="msdocs-azuresql-failover-group-$randomIdentifier"
failoverVnet="msdocs-azuresql-failover-vnet-$randomIdentifier"
failoverSubnet="msdocs-azuresql-failover-subnet-$randomIdentifier"
failoverNsg="msdocs-azuresql-failover-nsg-$randomIdentifier"
failoverRoute="msdocs-azuresql-failover-route-$randomIdentifier"
failoverInstance="msdocs-azuresql-failover-instance-$randomIdentifier"
failoverGateway="msdocs-azuresql-failover-gateway-$randomIdentifier"
failoverGatewayIP="$failoverGateway-ip"
failoverGatewayConnection="$failoverGateway-connection"
echo "Using resource groups $resourceGroup and $failoverResourceGroup with login: $login, password: $password..."
echo "Creating $resourceGroup in $location and $failoverResourceGroup in $failoverLocation..."
az group create --name $resourceGroup --location "$location" --tags $tag
az group create --name $failoverResourceGroup --location "$failoverLocation"
echo "Creating $vnet with $subnet..."
az network vnet create --name $vnet --resource-group $resourceGroup --location "$location" --address-prefixes 10.0.0.0/16
az network vnet subnet create --name $subnet --resource-group $resourceGroup --vnet-name $vnet --address-prefixes 10.0.0.0/24 --delegations Microsoft.Sql/managedInstances
echo "Creating $nsg..."
az network nsg create --name $nsg --resource-group $resourceGroup --location "$location"
az network nsg rule create --name "allow_management_inbound" --nsg-name $nsg --priority 100 --resource-group $resourceGroup --access Allow --destination-address-prefixes 10.0.0.0/24 --destination-port-ranges 9000 9003 1438 1440 1452 --direction Inbound --protocol Tcp --source-address-prefixes "*" --source-port-ranges "*"
az network nsg rule create --name "allow_misubnet_inbound" --nsg-name $nsg --priority 200 --resource-group $resourceGroup --access Allow --destination-address-prefixes 10.0.0.0/24 --destination-port-ranges "*" --direction Inbound --protocol "*" --source-address-prefixes 10.0.0.0/24 --source-port-ranges "*"
az network nsg rule create --name "allow_health_probe_inbound" --nsg-name $nsg --priority 300 --resource-group $resourceGroup --access Allow --destination-address-prefixes 10.0.0.0/24 --destination-port-ranges "*" --direction Inbound --protocol "*" --source-address-prefixes AzureLoadBalancer --source-port-ranges "*"
az network nsg rule create --name "allow_management_outbound" --nsg-name $nsg --priority 1100 --resource-group $resourceGroup --access Allow --destination-address-prefixes AzureCloud --destination-port-ranges 443 12000 --direction Outbound --protocol Tcp --source-address-prefixes 10.0.0.0/24 --source-port-ranges "*"
az network nsg rule create --name "allow_misubnet_outbound" --nsg-name $nsg --priority 200 --resource-group $resourceGroup --access Allow --destination-address-prefixes 10.0.0.0/24 --destination-port-ranges "*" --direction Outbound --protocol "*" --source-address-prefixes 10.0.0.0/24 --source-port-ranges "*"
echo "Creating $route..."
az network route-table create --name $route --resource-group $resourceGroup --location "$location"
az network route-table route create --address-prefix 0.0.0.0/0 --name "primaryToMIManagementService" --next-hop-type Internet --resource-group $resourceGroup --route-table-name $route
az network route-table route create --address-prefix 10.0.0.0/24 --name "ToLocalClusterNode" --next-hop-type VnetLocal --resource-group $resourceGroup --route-table-name $route
echo "Configuring $subnet with $nsg and $route..."
az network vnet subnet update --name $subnet --network-security-group $nsg --route-table $route --vnet-name $vnet --resource-group $resourceGroup
# This step will take awhile to complete. You can monitor deployment progress in the activity log within the Azure portal.
echo "Creating $instance with $vnet and $subnet..."
az sql mi create --admin-password $password --admin-user $login --name $instance --resource-group $resourceGroup --subnet $subnet --vnet-name $vnet --location "$location" --assign-identity
echo "Creating $failoverVnet with $failoverSubnet..."
az network vnet create --name $failoverVnet --resource-group $failoverResourceGroup --location "$failoverLocation" --address-prefixes 10.128.0.0/16
az network vnet subnet create --name $failoverSubnet --resource-group $failoverResourceGroup --vnet-name $failoverVnet --address-prefixes 10.128.0.0/24 --delegations Microsoft.Sql/managedInstances
echo "Creating $failoverNsg..."
az network nsg create --name $failoverNsg --resource-group $failoverResourceGroup --location "$failoverLocation"
az network nsg rule create --name "allow_management_inbound" --nsg-name $failoverNsg --priority 100 --resource-group $failoverResourceGroup --access Allow --destination-address-prefixes 10.128.0.0/24 --destination-port-ranges 9000 9003 1438 1440 1452 --direction Inbound --protocol Tcp --source-address-prefixes "*" --source-port-ranges "*"
az network nsg rule create --name "allow_misubnet_inbound" --nsg-name $failoverNsg --priority 200 --resource-group $failoverResourceGroup --access Allow --destination-address-prefixes 10.128.0.0/24 --destination-port-ranges "*" --direction Inbound --protocol "*" --source-address-prefixes 10.128.0.0/24 --source-port-ranges "*"
az network nsg rule create --name "allow_health_probe_inbound" --nsg-name $failoverNsg --priority 300 --resource-group $failoverResourceGroup --access Allow --destination-address-prefixes 10.128.0.0/24 --destination-port-ranges "*" --direction Inbound --protocol "*" --source-address-prefixes AzureLoadBalancer --source-port-ranges "*"
az network nsg rule create --name "allow_management_outbound" --nsg-name $failoverNsg --priority 1100 --resource-group $failoverResourceGroup --access Allow --destination-address-prefixes AzureCloud --destination-port-ranges 443 12000 --direction Outbound --protocol Tcp --source-address-prefixes 10.128.0.0/24 --source-port-ranges "*"
az network nsg rule create --name "allow_misubnet_outbound" --nsg-name $failoverNsg --priority 200 --resource-group $failoverResourceGroup --access Allow --destination-address-prefixes 10.128.0.0/24 --destination-port-ranges "*" --direction Outbound --protocol "*" --source-address-prefixes 10.128.0.0/24 --source-port-ranges "*"
echo "Creating $failoverRoute..."
az network route-table create --name $failoverRoute --resource-group $failoverResourceGroup --location "$failoverLocation"
az network route-table route create --address-prefix 0.0.0.0/0 --name "primaryToMIManagementService" --next-hop-type Internet --resource-group $failoverResourceGroup --route-table-name $failoverRoute
az network route-table route create --address-prefix 10.128.0.0/24 --name "ToLocalClusterNode" --next-hop-type VnetLocal --resource-group $failoverResourceGroup --route-table-name $failoverRoute
echo "Configuring $failoverSubnet with $failoverNsg and $failoverRoute..."
az network vnet subnet update --name $failoverSubnet --network-security-group $failoverNsg --route-table $failoverRoute --vnet-name $failoverVnet --resource-group $failoverResourceGroup
# This step will take awhile to complete. You can monitor deployment progress in the activity log within the Azure portal.
echo "Creating $failoverInstance with $failoverVnet and $failoverSubnet..."
az sql mi create --admin-password $password --admin-user $login --name $failoverInstance --resource-group $failoverResourceGroup --subnet $failoverSubnet --vnet-name $failoverVnet --location "$failoverLocation" --assign-identity
echo "Creating gateway..."
az network vnet subnet create --name "GatewaySubnet" --resource-group $resourceGroup --vnet-name $vnet --address-prefixes 10.0.255.0/27
az network public-ip create --name $gatewayIp --resource-group $resourceGroup --allocation-method Dynamic --location "$location"
az network vnet-gateway create --name $gateway --public-ip-addresses $gatewayIp --resource-group $resourceGroup --vnet $vnet --asn 61000 --gateway-type Vpn --location "$location" --sku VpnGw1 --vpn-type RouteBased #-EnableBgp $true
echo "Creating failover gateway..."
az network vnet subnet create --name "GatewaySubnet" --resource-group $failoverResourceGroup --vnet-name $failoverVnet --address-prefixes 10.128.255.0/27
az network public-ip create --name $failoverGatewayIP --resource-group $failoverResourceGroup --allocation-method Dynamic --location "$failoverLocation"
az network vnet-gateway create --name $failoverGateway --public-ip-addresses $failoverGatewayIP --resource-group $failoverResourceGroup --vnet $failoverVnet --asn 62000 --gateway-type Vpn --location "$failoverLocation" --sku VpnGw1 --vpn-type RouteBased
echo "Connecting gateway and failover gateway..."
az network vpn-connection create --name $gatewayConnection --resource-group $resourceGroup --vnet-gateway1 $gateway --enable-bgp --location "$location" --vnet-gateway2 $failoverGateway --shared-key $vpnSharedKey
az network vpn-connection create --name $failoverGatewayConnection --resource-group $failoverResourceGroup --vnet-gateway1 $failoverGateway --enable-bgp --location "$failoverLocation" --shared-key $vpnSharedKey --vnet-gateway2 $gateway
echo "Creating the failover group..."
az sql instance-failover-group create --mi $instance --name $failoverGroup--partner-mi $failoverInstance --resource-group $resourceGroup --partner-resource-group $failoverResourceGroup --failover-policy Automatic --grace-period 1
az sql instance-failover-group show --location "$location" --name $failoverGroup--resource-group $resourceGroup # verify the primary role
echo "Failing managed instance over to secondary location..."
az sql instance-failover-group set-primary --location "$failoverLocation" --name $failoverGroup--resource-group $resource
az sql instance-failover-group show --location "$failoverLocation" --name $failoverGroup--resource-group $resourceGroup # verify the primary role
echo "Failing managed instance back to primary location..."
az sql instance-failover-group set-primary --location "$location" --name $failoverGroup--resource-group $resource
az sql instance-failover-group show --location "$location" --name $failoverGroup--resource-group $resourceGroup # verify the primary role
# echo "Deleting all resources"
# az group delete --name $failoverResourceGroup -y
# az group delete --name $resourceGroup -y
| true
|
d2d522b8b1de2ff71071d6aaabe6336236573a60
|
Shell
|
LesyaMazurevich/slackbuilds
|
/qt5/qt5-qtbase/qt5-qtbase.SlackBuild
|
UTF-8
| 16,619
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
#-- qt5-qtbase for Slackware --
# Build script by Phantom X <megaphantomx at bol.com.br>
# Suggested usage: $ qt5-qtbase.SlackBuild 2>&1 | tee build.log
#--
# Copyright 2008-2015 Phantom X, Goiania, Brazil.
# Copyright 2006 Martijn Dekker, Groningen, Netherlands.
# All rights reserved.
#
# Redistribution and use of this script, with or without modification, is
# permitted provided that the following conditions are met:
#
# 1. Redistributions of this script must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# http://qt-project.org/
PACKAGER_ID=${PACKAGER_ID:-$USER}
PACKAGER=${PACKAGER:-$USER@$HOSTNAME}
# Set YES for native build with gcc >= 4.2
SB_NATIVE=${SB_NATIVE:-NO}
# Set to YES to replicate slackbuild and patches
SB_REP=${SB_REP:-YES}
CWD=$(pwd)
TMP=${TMP:-/tmp}
if [ ! -d ${TMP} ]; then
mkdir -p ${TMP}
fi
PNAME=qtbase
NAME=qt5-qtbase
PKG=${PKG:-${TMP}/package-${NAME}}
VERSION=${VERSION:-5.5.0}
RVER=$(echo ${VERSION} | cut -d- -f1)
SVER=$(echo ${RVER} | cut -d. -f1-2)
if [ "${SB_NATIVE}" = "YES" ] ;then
ARCH=${ARCH:-$(uname -m)}
else
ARCH=${ARCH:-x86_64}
fi
if [ "${ARCH}" = "x86_64" ] ;then
SLKTARGET=${SLKTARGET:-x86_64}
else
SLKTARGET=${SLKTARGET:-i586}
fi
SLKDTARGET=${SLKDTARGET:-slackware}
BUILD=${BUILD:-1}
NJOBS=${NJOBS:-$(( $(getconf _NPROCESSORS_ONLN) + 1 ))}
DOCDIR=${PKG}/usr/doc/${NAME}-${VERSION}
SBDIR=${PKG}/usr/src/slackbuilds/qt5/${NAME}
PKGDEST=${PKGDEST:-${CWD}}
PKGFORMAT=${PKGFORMAT:-txz}
PKGNAME=${NAME}-$(echo ${VERSION} | tr - . )-${ARCH}-${BUILD}${PACKAGER_ID}
# Set to YES to enable precompiled headers
SB_PCH=${SB_PCH:-NO}
# Set to YES to build demo and examples
SB_DEMOS=${SB_DEMOS:-NO}
# Set to YES to enable postgresql plugin
SB_PGSQL=${SB_PGSQL:-YES}
# Set to YES to enable systemd journald support
SB_SYSTEMD=${SB_SYSTEMD:-YES}
DATE=$(LC_ALL=C date +%d-%b-%Y)
SRCDIR=${PNAME}-opensource-src-${RVER}
SRCARCHIVE=${PNAME}-opensource-src-${VERSION}.tar.xz
DL_PROG=${DL_PROG:-wget}
DL_TO=${DL_TO:-5}
DL_OPTS=${DL_OPTS:-"--timeout=${DL_TO}"}
DL_URL="http://download.qt-project.org/official_releases/qt/${SVER}/${VERSION}/submodules/${SRCARCHIVE}"
DL_URLB="http://download.qt-project.org/snapshots/qt/${SVER}/${VERSION//RC/rc}/submodules/${SRCARCHIVE}"
# if source is not present, download in source rootdir if possible
test -r ${CWD}/${SRCARCHIVE} || ${DL_PROG} ${DL_OPTS} ${DL_URL} || ${DL_PROG} ${DL_OPTS} ${DL_URLB} || exit 1
if [ "${SB_NATIVE}" = "YES" ] ;then
SLKCFLAGS="-O2 -march=native -mtune=native -pipe"
[ "${SB_ECFLAGS}" ] && SLKCFLAGS="${SLKCFLAGS} ${SB_ECFLAGS}"
else
case "${ARCH}" in
i[3-6]86) SLKCFLAGS="-O2 -march=${ARCH} -mtune=i686"
;;
x86_64) SLKCFLAGS="-O2 -fPIC"
;;
s390|*) SLKCFLAGS="-O2"
;;
esac
fi
if [ "${ARCH}" = "x86_64" ] ;then
LIBDIRSUFFIX="64"
SLKCFLAGS="${SLKCFLAGS} -fPIC"
else
LIBDIRSUFFIX=""
fi
unset CFLAGS CXXFLAGS CPPFLAGS
if [ -d ${PKG} ]; then
# Clean up a previous build
rm -rf ${PKG}
fi
mkdir -p ${PKG}
cd ${TMP}
rm -rf ${SRCDIR}
tar -xvf ${CWD}/${SRCARCHIVE} || exit 1
cd ${SRCDIR} || exit 1
SB_SROOT="$(pwd)"
# Set the config option variables if they are not already set:
[ -r ../qt5.options ] && source ../qt5.options
# these should match contents of qt5.macros:
_qt5_prefix=${_qt5_prefix:-/usr/lib${LIBDIRSUFFIX}/qt5}
_qt5_archdatadir=${_qt5_archdatadir:-/usr/lib${LIBDIRSUFFIX}/qt5}
_qt5_bindir=${_qt5_bindir:-${_qt5_prefix}/bin}
_qt5_datadir=${_qt5_datadir:-/usr/share/qt5}
_qt5_docdir=${_qt5_docdir:-/usr/doc/qt5}
_qt5_examplesdir=${_qt5_examplesdir:-${_qt5_prefix}/examples}
_qt5_headerdir=${_qt5_headerdir:-/usr/include/qt5}
_qt5_importdir=${_qt5_importdir:-${_qt5_archdatadir}/imports}
_qt5_libdir=${_qt5_libdir:-/usr/lib${LIBDIRSUFFIX}}
_qt5_libexecdir=${_qt5_libexecdir:-${_qt5_archdatadir}/libexec}
_qt5_plugindir=${_qt5_plugindir:-${_qt5_archdatadir}/plugins}
_qt5_settingsdir=${_qt5_settingsdir:-/etc/xdg}
_qt5_sysconfdir=${_qt5_sysconfdir:-${_qt5_settingsdir}}
_qt5_translationdir=${_qt5_translationdir:-/usr/share/qt5/translations}
chmod -R u+w,go+r-w,a-s .
if [ -r ${CWD}/apply-patches.sh ]; then
. ${CWD}/apply-patches.sh
fi
# drop -fexceptions from $SLKCFLAGS
SLKCFLAGS="$(echo ${SLKCFLAGS} | sed 's|-fexceptions||g')"
## customize our platform
if [ "lib${LIBDIRSUFFIX}" == "lib64" ] ;then
platform=linux-g++-64
else
platform=linux-g++
fi
sed -i \
-e "s|-O2|${SLKCFLAGS}|g" \
-e '/^QMAKE_LIBDIR_X11/d' \
-e '/^QMAKE_LIBDIR_OPENGL/d' \
mkspecs/linux-g++{,-32,-64}/qmake.conf || exit 1
sed -i \
-e "s|-O2|${SLKCFLAGS}|g" \
-e "s|-O3|${SLKCFLAGS}|g" \
mkspecs/common/qcc-base.conf || exit 1
if [ "${_qt4_libdir}" == "/usr/lib${LIBDIRSUFFIX}" ] ;then
sed -i \
-e "s,QMAKE_LIBDIR_X11.*,QMAKE_LIBDIR_X11\t=," \
-e "s,QMAKE_INCDIR_X11.*,QMAKE_INCDIR_X11\t=," \
-e "s,QMAKE_INCDIR_OPENGL.*,QMAKE_INCDIR_OPENGL\t=," \
-e "s,QMAKE_LIBDIR_OPENGL.*,QMAKE_LIBDIR_OPENGL\t=," \
mkspecs/common/linux.conf
fi
sed -i -e 's|libsystemd-journal|libsystemd|g' \
config.tests/unix/journald/journald.pro src/corelib/global/global.pri || exit 1
unset SLKLDFLAGS
if [ "${LDFLAGS}" ] ;then
SLKLDFLAGS="${LDFLAGS}"
sed -i -e "s|^\(QMAKE_LFLAGS_RELEASE.*\)|\1 ${SLKLDFLAGS}|" \
mkspecs/common/g++-unix.conf || exit 1
fi
unset LDFLAGS
# move some bundled libs to ensure they're not accidentally used
( cd src/3rdparty
mkdir UNUSED
mv freetype libjpeg libpng sqlite xcb xkbcommon zlib UNUSED/
) || exit $?
unset SB_DEMOSOPTS SB_PCHOPTS SB_PGSQLOPTS SB_SYSTEMDOPTS
if [ "${SB_DEMOS}" != "YES" ] ; then
sed -i '/^CFG_NOBUILD_PARTS=/s|"$| demos examples"|g' \
configure || exit 1
SB_DEMOSOPTS="-nomake demos -nomake examples"
fi
[ "${SB_PCH}" == "YES" ] || SB_PCHOPTS="-no-pch"
[ "${SB_PGSQL}" = "YES" ] && SB_PGSQLOPTS="-plugin-sql-psql"
[ "${SB_SYSTEMD}" = "YES" ] && SB_SYSTEMDOPTS='-journald'
./configure -v \
-confirm-license \
-opensource \
-prefix ${_qt5_prefix} \
-archdatadir ${_qt5_archdatadir} \
-bindir ${_qt5_bindir} \
-datadir ${_qt5_datadir} \
-docdir ${_qt5_docdir} \
-examplesdir ${_qt5_examplesdir} \
-headerdir ${_qt5_headerdir} \
-importdir ${_qt5_importdir} \
-libdir ${_qt5_libdir} \
-libexecdir ${_qt5_libexecdir} \
-plugindir ${_qt5_plugindir} \
-sysconfdir ${_qt5_sysconfdir} \
-translationdir ${_qt5_translationdir} \
-platform ${platform} \
-release \
-shared \
-accessibility \
-dbus-linked \
-fontconfig \
-glib \
-gtkstyle \
-iconv \
-icu \
-openssl-linked \
-optimized-qmake \
-nomake tests \
${SB_PCHOPTS} \
-no-rpath \
-no-separate-debug-info \
-no-strip \
${SB_PGSQLOPTS} \
-system-harfbuzz \
-system-libjpeg \
-system-libpng \
-system-pcre \
-system-sqlite \
-system-xkbcommon \
-system-zlib \
${SB_SYSTEMDOPTS} \
|| exit $?
make -j${NJOBS} || make || exit $?
make install INSTALL_ROOT=${PKG} || exit $?
# create/own dirs
mkdir -p ${PKG}/${_qt5_archdatadir}/mkspecs/modules
mkdir -p ${PKG}/${_qt5_importdir}
mkdir -p ${PKG}/${_qt5_libexecdir}
mkdir -p ${PKG}/${_qt5_plugindir}/iconengines
mkdir -p ${PKG}/${_qt5_translationdir}
# put non-conflicting binaries with -qt5 postfix in ${_bindir}
if [ "/usr/bin" != "${_qt5_bindir}" ] ;then
reldir=$(perl -e 'use File::Spec; print File::Spec->abs2rel($ARGV[0], $ARGV[1])' /usr/bin ${_qt5_bindir})
reldir2=$(perl -e 'use File::Spec; print File::Spec->abs2rel($ARGV[0], $ARGV[1])' ${_qt5_bindir} /usr/bin)
mkdir -p ${PKG}/usr/bin
( cd ${PKG}/${_qt5_bindir}
for i in * ; do
case "${i}" in
moc|qdbuscpp2xml|qdbusxml2cpp|qmake|rcc|syncqt|uic)
ln -sv ${reldir2}/${i} ${reldir}/${i}-qt5
ln -sv ${i} ${i}-qt5
;;
*)
ln -sv ${reldir2}/${i} ${reldir}/${i}
;;
esac
done
) || exit 1
fi
## .prl/.la file love
# nuke .prl reference(s) to %%buildroot, excessive (.la-like) libs
( cd ${PKG}/${_qt5_libdir}
for prl_file in libQt5*.prl ; do
sed -i -e "/^QMAKE_PRL_BUILD_DIR/d" ${prl_file}
if [ -f "$(basename ${prl_file} .prl).so" ]; then
rm -fv "$(basename ${prl_file} .prl).la"
sed -i -e "/^QMAKE_PRL_LIBS/d" ${prl_file}
fi
done
)
sed -i \
-e "s|-L${SB_SROOT}/lib||g" \
-e "/^Libs.private/s|-L/usr/lib${LIBDIRSUFFIX} ||g" \
-e "/^QMAKE_PRL_LIBS/s|-L/usr/lib${LIBDIRSUFFIX} ||g" \
${PKG}/${_qt5_libdir}/pkgconfig/*.pc \
${PKG}/${_qt5_libdir}/*.prl
find ${PKG} | xargs file | grep -e "executable" -e "shared object" | grep ELF \
| cut -f 1 -d : | xargs strip --strip-unneeded 2> /dev/null
unset MARCH
# multilib: qconfig.h
case "${ARCH}" in
i[3-6]86) MARCH=32
;;
s390) MARCH=32
;;
x86_64) MARCH=64
;;
esac
if [ -n "${MARCH}" ] ;then
mv ${PKG}/${_qt5_headerdir}/QtCore/qconfig.h \
${PKG}/${_qt5_headerdir}/QtCore/qconfig-${MARCH}.h || exit 1
cat > ${PKG}/${_qt5_headerdir}/QtCore/qconfig.h <<'EOF'
/* qconfig.h */
/* This file is here to prevent a file conflict on multiarch systems. A
* conflict will occur because qconfig.h has arch-specific definitions.
*
* DO NOT INCLUDE THE NEW FILE DIRECTLY -- ALWAYS INCLUDE THIS ONE INSTEAD. */
#ifndef QCONFIG_MULTILIB_H
#define QCONFIG_MULTILIB_H
#include <bits/wordsize.h>
#if __WORDSIZE == 32
#include "QtCore/qconfig-32.h"
#elif __WORDSIZE == 64
#include "QtCore/qconfig-64.h"
#else
#error "unexpected value for __WORDSIZE macro"
#endif
#endif
EOF
chmod 0644 ${PKG}/${_qt5_headerdir}/QtCore/qconfig.h || exit 1
fi
mkdir -p ${PKG}/etc/xdg/qtchooser
( cd ${PKG}/etc/xdg/qtchooser
echo "${_qt5_bindir}" > 5.conf
echo "${_qt5_prefix}" >> 5.conf
if [ -n "${MARCH}" ] ;then
mv 5.conf 5-${MARCH}.conf
ln -sf 5-${MARCH}.conf 5.conf
fi
)
# Put this back as shipped:
sed -i -e "s|${SLKCFLAGS}|-O2|g" \
${PKG}/${_qt5_archdatadir}/mkspecs/*/qmake.conf || exit 1
sed -i \
-e "s|${SLKCFLAGS}|-O2|g" \
-e "/QMAKE_CFLAGS_OPTIMIZE_FULL/s|${SLKCFLAGS}|-O3|g" \
mkspecs/common/qcc-base.conf || exit 1
if [ "${SLKLDFLAGS}" ] ;then
sed -i -e "/^QMAKE_LFLAGS_RELEASE/s| ${SLKLDFLAGS}||g" \
${PKG}/${_qt5_archdatadir}/mkspecs/common/g++-unix.conf || exit 1
fi
if [ "${_qt5_docdir}" != "${_qt5_prefix}/doc" ] ;then
# -doc make symbolic link to _qt5_docdir
rm -rf ${PKG}/${_qt5_prefix}/doc
ln -s ../../doc/qt5 ${PKG}/${_qt5_prefix}/doc
fi
# Qt5.pc
cat > ${PKG}/usr/lib${LIBDIRSUFFIX}/pkgconfig/Qt5.pc <<EOFP
prefix=${_qt5_prefix}
archdatadir=${_qt5_archdatadir}
bindir=${_qt5_bindir}
datadir=${_qt5_datadir}
docdir=${_qt5_docdir}
examplesdir=${_qt5_examplesdir}
headerdir=${_qt5_headerdir}
importdir=${_qt5_importdir}
libdir=${_qt5_libdir}
libexecdir=${_qt5_libexecdir}
moc=${_qt5_bindir}/moc
plugindir=${_qt5_plugindir}
qmake=${_qt5_bindir}/qmake
settingsdir=${_qt5_settingsdir}
sysconfdir=${_qt5_sysconfdir}
translationdir=${_qt5_translationdir}
Name: Qt5
Description: Qt5 Configuration
Version: ${RVER}
EOFP
# rpm macros
mkdir -p ${PKG}/usr/lib/rpm/macros.d
cat > ${PKG}/usr/lib/rpm/macros.d/macros.qt5 <<EOFM
%_qt5 ${NAME}
%_qt5_version ${RVER}
%_qt5_evr ${version}-${BUILD}
%_qt5_prefix %{_libdir}/qt5
%_qt5_archdatadir %{_qt5_prefix}
%_qt5_bindir %{_qt5_prefix}/bin
%_qt5_datadir %{_datadir}/qt5
%_qt5_docdir %{_docdir}/qt5
%_qt5_examples %{_qt5_prefix}/examples
%_qt5_headerdir %{_includedir}/qt5
%_qt5_importdir %{_qt5_archdatadir}/imports
%_qt5_libdir %{_libdir}
%_qt5_libexecdir %{_qt5_archdatadir}/libexec
%_qt5_plugindir %{_qt5_archdatadir}/plugins
%_qt5_qmake %{_qt5_bindir}/qmake
%_qt5_settingsdir %{_sysconfdir}/xdg
%_qt5_sysconfdir %{_qt5_settingsdir}
%_qt5_translationdir %{_datadir}/qt5/translations
%qmake_qt5 \
%{_qt5_qmake} \\\
QMAKE_CFLAGS_DEBUG="${CFLAGS:-%optflags}" \\\
QMAKE_CFLAGS_RELEASE="${CFLAGS:-%optflags}" \\\
QMAKE_CXXFLAGS_DEBUG="${CXXFLAGS:-%optflags}" \\\
QMAKE_CXXFLAGS_RELEASE="${CXXFLAGS:-%optflags}" \\\
QMAKE_LFLAGS_DEBUG="${LDFLAGS:-%{?__global_ldflags}}" \\\
QMAKE_LFLAGS_RELEASE="${LDFLAGS:-%{?__global_ldflags}}" \\\
QMAKE_STRIP=
EOFM
mkdir -p ${PKG}/etc/X11/xinit/xinitrc.d
cat > ${PKG}/etc/X11/xinit/xinitrc.d/10-qt5-check-opengl2.sh <<'EOF'
#!/bin/sh
OPENGL_VERSION=$(LANG=C glxinfo | grep '^OpenGL version string: ' | sed -e 's/^OpenGL version string: \([0-9]\).*$/\1/g')
if [ "${OPENGL_VERSION}" -lt 2 ]; then
QT_XCB_FORCE_SOFTWARE_OPENGL=1
export QT_XCB_FORCE_SOFTWARE_OPENGL
fi
EOF
chmod 0755 ${PKG}/etc/X11/xinit/xinitrc.d/10-qt5-check-opengl2.sh || exit 1
mkdir -p ${PKG}/etc/xdg/QtProject
cat > ${PKG}/etc/xdg/QtProject/qtlogging.ini.new <<'EOF'
[Rules]
*.debug=false
EOF
# Add a documentation directory:
( cd ${PKG}/usr/doc && ln -s ${NAME} ${NAME}-${VERSION} )
#mkdir -p ${DOCDIR}
cp -a \
LICENSE.GPL LICENSE.LGPL LGPL_EXCEPTION.txt ${CWD}/ChangeLog.SB \
${DOCDIR}/
[ -r dist/changes-${VERSION} ] && head -n 1000 dist/changes-${VERSION} > ${DOCDIR}/changes-${VERSION}
find ${DOCDIR}/ -type d -print0 | xargs -0 chmod 0755
find ${DOCDIR}/ -type f -print0 | xargs -0 chmod 0644
find ${DOCDIR}/ -type f -size 0 -print0 | xargs -0 rm -f
# Compress and link manpages, if any:
if [ -d ${PKG}/usr/share/man ]; then
mv ${PKG}/usr/share/man ${PKG}/usr/man
rmdir ${PKG}/usr/share
fi
if [ -d ${PKG}/usr/man ]; then
( cd ${PKG}/usr/man
for manpagedir in $(find . -type d -name "man*") ; do
( cd ${manpagedir}
for eachpage in $( find . -type l -maxdepth 1) ; do
ln -s $( readlink ${eachpage} ).gz ${eachpage}.gz
rm -f ${eachpage}
done
gzip -9 *.?
# Prevent errors
rm -f *.gz.gz
)
done
)
fi
mkdir -p ${PKG}/install
cat ${CWD}/slack-desc > ${PKG}/install/slack-desc
cat ${CWD}/slack-required > ${PKG}/install/slack-required
cat > ${PKG}/install/doinst.sh <<EOF
#!/bin/sh
config() {
NEW="\$1"
OLD="\$(dirname \$NEW)/\$(basename \$NEW .new)"
# If there's no config file by that name, mv it over:
if [ ! -r \$OLD ]; then
mv \$NEW \$OLD
elif [ "\$(cat \$OLD | md5sum)" = "\$(cat \$NEW | md5sum)" ]; then
# toss the redundant copy
rm \$NEW
fi
# Otherwise, we leave the .new copy for the admin to consider...
}
## List of conf files to check. The conf files in your package should end in .new
EOF
( cd ${PKG}
find etc/ -name '*.new' -exec echo config {} ';' | sort >> ${PKG}/install/doinst.sh
find etc/ -name '*.new' -a -size 0 -exec echo rm -f {} ';' | sort >> ${PKG}/install/doinst.sh
echo >> ${PKG}/install/doinst.sh
)
sed -i "s|_PACKAGER|${PACKAGER}|g; s|_BUILD_DATE|${DATE}|g" \
${PKG}/install/slack-desc
if [ "${SB_REP}" = "YES" ] ;then
# Replicate slackbuild and patches
mkdir -p ${SBDIR}/patches
install -pm0644 ${CWD}/slack-desc ${CWD}/slack-required ${CWD}/ChangeLog.SB \
${CWD}/apply-patches.sh ${SBDIR}/
install -pm0755 ${CWD}/${NAME}.SlackBuild \
${SBDIR}/${NAME}.SlackBuild
install -pm0644 ${CWD}/patches/*.* \
${SBDIR}/patches/
rmdir ${SBDIR}/patches
fi
# Build package:
set +o xtrace # no longer print commands upon execution
set -e
ROOTCOMMANDS="set -o errexit -o xtrace ; cd ${PKG} ;
/bin/chown --recursive root:root . ;"
ROOTCOMMANDS="${ROOTCOMMANDS}
/sbin/makepkg --linkadd y --chown n ${PKGDEST}/${PKGNAME}.${PKGFORMAT} "
if test ${UID} = 0; then
eval ${ROOTCOMMANDS}
set +o xtrace
elif test "$(type -t fakeroot)" = 'file'; then
echo -e "\e[1mEntering fakeroot environment.\e[0m"
echo ${ROOTCOMMANDS} | fakeroot
else
echo -e "\e[1mPlease enter your root password.\e[0m (Consider installing fakeroot.)"
/bin/su -c "${ROOTCOMMANDS}"
fi
# Clean up the extra stuff:
if [ "$1" = "--cleanup" ]; then
echo "Cleaning..."
if [ -d ${TMP}/${SRCDIR} ]; then
rm -rf ${TMP}/${SRCDIR} && echo "${TMP}/${SRCDIR} cleanup completed"
fi
if [ -d ${PKG} ]; then
rm -rf ${PKG} && echo "${PKG} cleanup completed"
fi
rmdir ${TMP} && echo "${TMP} cleanup completed"
fi
exit 0
| true
|
8eece5be18ff29df35f1cd99164c51059c65690e
|
Shell
|
marcuskais/skripti_alused
|
/praks7/yl3.sh
|
UTF-8
| 214
| 2.65625
| 3
|
[] |
no_license
|
#
#
#!/bin/bash
#
#
#kujundi valjastamine
#
#
echo "Sisesta ridade arv"
read read
for (( i=1; i <=$read; i++))
do
echo -n "$i."
for ((j=1; j<=i; j++))
do
echo -n "*"
done
echo ""
done
| true
|
ce4b15f2df91eca1dcd0c4fbc9851c9b8fd3d43e
|
Shell
|
Stonnos/eca
|
/automation/db/mssql/init-script.sh
|
UTF-8
| 379
| 2.59375
| 3
|
[] |
no_license
|
for i in {1..90};
do
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P $SA_PASSWORD -d master
if [ $? -eq 0 ]
then
echo "SQL server started"
break
else
echo "SQL server not ready yet..."
sleep 1
fi
done
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P $SA_PASSWORD -d master -i init-schema.sql
echo "init-schema.sql completed"
| true
|
b9b462984d1bcd0475db54faabfa5c185ff9be45
|
Shell
|
laife/life2.0
|
/life.sh
|
UTF-8
| 1,294
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
menu=('interface' 'dhcpd' 'sair')
select option in "${menu[@]}"; do
case "$option" in
"${menu[0]}")
cd /etc/network
mv interfaces interfaces.original
touch interfaces
echo "source /etc/network/interfaces.d/*
auto lo
iface lo inet loopback
allow-hotplug enp0s3
auto enp0s3
iface enp0s3 inet dhcp
allow-hotplug enp0s8
auto enp0s8
iface enp0s8 inet static
address 192.168.0.1
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255" >> interfaces
apt-get install isc-dhcp-server -y &&
cd ../default/
mv isc-dhcp-server isc-dhcp-server.original
touch isc-dhcp-server
echo 'INTERFACESv4="enp0s8"
INTERFACESv6=""' >> isc-dhcp-server
;;
"${menu[1]}")
cd /etc/dhcp/
mv dhcpd.conf dhcpd.conf.original
touch dhcpd.conf
echo "ddns-update-style none;
option domain-name-servers 192.168.0.1;
default-lease-time 600;
max-lease-time 7200;
authoritative;
log-facility local7;
subnet 192.168.0.0 netmask 255.255.255.0{
range 192.168.0.50 192.168.0.100;
option routers 192.168.0.1;
}" >> dhcpd.conf
apt-get install apache2 -y
apt-get install ssh -y
;;
"${menu[2]}") exit ;;
*) echo 'Erro opção invalida!';;
esac
done
| true
|
421224c696aaae44196cfbd0214903b4f2694ef3
|
Shell
|
tmax-cloud/hypercloud-operator-go
|
/test/cmd/example.sh
|
UTF-8
| 1,029
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
run_simple_tests() {
set -o nounset
set -o errexit
hypercloud::log::status "Testing kubernetes cluster(v1:pods/v1:namespaces)"
hypercloud::log::status "Create namespace test"
kubectl create ns test
hypercloud::test::get_object_assert namespace/test "{{$id_field}}" 'test'
hypercloud::log::status "Create nginx pod"
kubectl create -f - << __EOF__
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "test",
"namespace": "test"
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx"
}
]
}
}
__EOF__
hypercloud::test::get_object_assert pod/test "{{$id_field}}" 'test' "-n test"
hypercloud::log::status "Delete nginx pod"
kubectl delete pod test -n test
hypercloud::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' "-n test"
hypercloud::log::status "Delete namespace test"
kubectl delete ns test
set +o nounset
set +o errexit
}
| true
|
9daa00f08d9f6eed84a7743bf0aaf77fcf449231
|
Shell
|
xamexd/vmware
|
/application.sh
|
UTF-8
| 311
| 3.296875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ "$1" = "start" ]; then
./application.vi
elif [ "$1" = "status" ]; then
if ps -p "$2" > /dev/null
then
echo "running"
else
echo "not running"
fi
elif [ "$1" = "stop" ]; then
kill -9 "$2"
else
echo "usage: $0 [start, status, or stop], if status then [ppid], if stop[ppid]"
fi
| true
|
5237bdd498f4bc0b656b29b61e8db1ca1b6ecff3
|
Shell
|
ifzz/sona
|
/keepalived/restart_service.sh
|
UTF-8
| 300
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
ps -fe |grep keepalived |grep -v grep
if [ $? -eq 0 ]; then
echo "do nothing"
exit
fi
ps -fe |grep sona_broker |grep -v grep
if [ $? -ne 0 ]; then
exit
fi
DETECT=`/etc/keepalived/broker_detect 127.0.0.1 9902`
if [ $? -ne 0 ]; then
exit
fi
/etc/init.d/keepalived start
| true
|
5c48ce109f300fa09df418be08e1f4c0d36f26e9
|
Shell
|
wontfix-org/bash-bundles-prompt
|
/bundle
|
UTF-8
| 2,300
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
BBPROMPT_HOSTCOLOR=${BBPROMPT_HOSTCOLOR:-"\$(bbprompt_hostcolor)"}
BBPROMPT_USER=${BBPROMPT_USER:-"\[\033[38;5;230m\]\u"}
BBPROMPT_HOST=${BBPROMPT_HOST:-"\[$BBPROMPT_HOSTCOLOR\]\\h"}
BBPROMPT_DIRECTORY=${BBPROMPT_DIRECTORY:-'\[\033[38;5;104m\]\W$(__git_ps1 " (%s)")'}
BBPROMPT_RETURN_CODE=${BBPROMPT_RETURN_CODE:-'\[$(bbprompt_return_code $return_code)\]'}
BBPROMPT_JOBS=${BBPROMPT_JOBS:-'$(jobs=$(jobs | wc -l ); [ $jobs -gt 0 ] && echo -n "[$jobs] ")'}
BBPROMPT_PRE_COMMAND=${BBPROMPT_PRE_COMMAND:-"export return_code=\$?"}
BBPROMPT_POST_COMMAND=${BBPROMPT_POST_COMMAND:-"history -a"}
BBPROMPT_RETURN_CODE_COLOR=${BBPROMPT_RETURN_CODE_COLOR:-'\033[0;0m\033[38;5;124m'}
BBPROMPT_COLOR_L_BLUE=${BBPROMPT_COLOR_L_BLUE:-'\033[38;5;33m'}
BBPROMPT_COLOR_D_BLUE=${BBPROMPT_COLOR_D_BLUE:-'\033[38;5;21m'}
BBPROMPT_COLOR_M_PURP=${BBPROMPT_COLOR_M_PURP:-'\033[38;5;69m'}
BBPROMPT_COLOR_L_YELL=${BBPROMPT_COLOR_L_YELL:-'\033[38;5;229m'}
BBPROMPT_COLOR_M_YELL=${BBPROMPT_COLOR_M_YELL:-'\033[38;5;227m'}
BBPROMPT_COLOR_M_YELL=${BBPROMPT_COLOR_M_YELL:-'\033[38;5;227m'}
BBPROMPT_COLOR_M_GREN=${BBPROMPT_COLOR_M_GREN:-'\033[38;5;35m'}
BBPROMPT_COLOR_M_GREY=${BBPROMPT_COLOR_M_GREY:-'\033[38;5;245m'}
BBPROMPT_COLOR_M_ORNG=${BBPROMPT_COLOR_M_ORNG:-'\033[38;5;208m'}
BBPROMPT_COLOR_L_PINK=${BBPROMPT_COLOR_L_PINK:-'\033[38;5;206m'}
BBPROMPT_COLOR_M_TEAL=${BBPROMPT_COLOR_M_TEAL:-'\033[38;5;38m'}
BBPROMPT_COLOR_M_BRWN=${BBPROMPT_COLOR_M_BRWN:-'\033[38;5;130m'}
BBPROMPT_COLOR_L_WHTE=${BBPROMPT_COLOR_L_WHTE:-'\033[38;5;230m'}
BBPROMPT_COLOR_END=${BBPROMPT_COLOR_END:-'\033[0;0m'}
NC='\[\e[0m\]'
bbprompt_return_code() {
if [ "$1" != "0" ] ; then
echo -ne $BBPROMPT_RETURN_CODE_COLOR$1' '
fi
return $1
}
export PS1=${BBPROMPT_PS1:-"${BBPROMPT_USER}@${BBPROMPT_HOST} ${BBPROMPT_DIRECTORY} ${BBPROMPT_JOBS}$NC$ "}
# display user@host:/path in title, append to bash_history on every command
case $TERM in
xterm*|rxvt|Eterm|eterm|rxvt-unicode)
PROMPT_COMMAND='echo -ne "\033]0;${USER}@${HOSTNAME%%.*}:${PWD/$HOME/~}\007"'
;;
screen)
PROMPT_COMMAND='echo -ne "\033_${USER}@${HOSTNAME%%.*}:${PWD/$HOME/~}\033\\" ; echo -ne "\033k${USER}@${HOSTNAME%%.*}:${PWD/$HOME/~}\033\\"'
;;
esac
PROMPT_COMMAND="$BBPROMPT_PRE_COMMAND ; $PROMPT_COMMAND ; $BBPROMPT_POST_COMMAND"
| true
|
d7f347beca51c4ffe3c63c66a7050a44a0b75776
|
Shell
|
virtualtam/developus-apparatus
|
/scripts/spider_wget.sh
|
UTF-8
| 560
| 4.21875
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
#
# Check if a file needs to be (re-)downloaded by comparing local
# and remote checksums
url=$1
dl=1
if [[ -f $file_path ]]; then
local_size=$(ls -l $file_path | awk '{print $5}')
remote_size=$(wget --spider $url 2>&1 | awk '/Length/ {print $2}')
if [[ $local_size -eq $remote_size ]]; then
echo "The file was previously downloaded"
dl=0
else
echo "Corrupted file found, re-downloading..."
rm -f $file_path
fi
else
echo "Downloading file..."
fi
[[ $dl -eq 1 ]] && wget $url -O $file_path
| true
|
8d96c2d30a581832e369cade45ac7105c36bd4b1
|
Shell
|
gabrielecanepa/dotfiles
|
/.bashrc
|
UTF-8
| 116
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# Backup to zsh if available.
if command -v zsh >/dev/null; then
export SHELL=/bin/zsh
exec zsh
fi
| true
|
38aa70fbefc24a77062bbf78d40536efb8779b7b
|
Shell
|
ramoh/hobnob
|
/scripts/e2e.test.sh
|
UTF-8
| 680
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
RETRY_INTERVAL=${RETRY_INTERVAL:-0.2}
#Make sure the port is not already bound
if netstat -vnap tcp | grep -q 8080; then
echo "Another process is using port 8080"
exit 1
fi
if ! launchctl list | grep -q 'elasticsearch-full'; then
echo "Starting elastic search service"
brew services run elasticsearch-full
until curl --silent localhost:9200 -w " " -o /dev/null; do
sleep "$RETRY_INTERVAL"
done
fi
yarn run serve &
echo "API service has been started"
until netstat -vanp tcp | grep -q 8080; do
sleep "$RETRY_INTERVAL"
done
npx cucumber-js spec/cucumber/features --require-module @babel/register --require spec/cucumber/steps
kill -15 0
| true
|
13fa0d4114dd4555ab17acee4ea13288cdb0ac5e
|
Shell
|
koundinyabs/xfmr
|
/ci/latest/update-maven-app.sh
|
UTF-8
| 571
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
set -e -u
cd git-repo
THE_VERSION=$(grep -o '<revision[^"]*' pom.xml | sed -e 's/<revision>\(.*\)<\/revision>/\1/')
echo "Updating xfmr in CF with ${THE_VERSION} of Maven artifact"
# Replace `<ROUTE>` with the route of SCDF-server running in CF.
# curl \
# -X \
# POST "http://sabby-test-dataflow-server.cfapps.io/streams/deployments/update/fooxfmr" \
# -d '{"updateProperties":{"version.xfmr":"'"${THE_VERSION}"'"},"releaseName":"fooxfmr","packageIdentifier":{"packageName":"fooxfmr"}}' \
# -H "Content-Type: application/json" \
# -v
| true
|
6596b1d569381402e4a6e4e3bf8a6483ead5afd8
|
Shell
|
mhristof/dotfiles
|
/gitlab-work.sh
|
UTF-8
| 2,131
| 3.703125
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
# http://redsymbol.net/articles/unofficial-bash-strict-mode/
set -euo pipefail
IFS=$'\n\t'
die() {
echo "$*" 1>&2
exit 1
}
repo() {
local url
url="$1"
echo "cd $(DRY=true ~/bin/clone "$url" | awk '{print $NF}')"
exit 0
}
URL=${1:-}
if [[ -z $URL ]]; then
die "Error, please provide a url"
fi
# shellcheck disable=SC2001
project="$(sed 's!/-/.*!!' <<<"${URL/https:\/\/gitlab.com\//}")"
projectURL="$(echo -ne "$project" | python3 -c 'import sys; import urllib.parse; print(urllib.parse.quote(sys.stdin.read(), safe=""))')"
postCD=""
if [[ -z ${GITLAB_TOKEN:-} ]]; then
GITLAB_TOKEN=$(/usr/bin/security find-generic-password -s germ -w -a GITLAB_READONLY_TOKEN | cut -d"'" -f2)
export GITLAB_TOKEN
fi
case $URL in
*/jobs/*)
id="$(basename "$URL")"
mrID="$(glab api "/projects/$projectURL/jobs/$id" | jq -r .ref | cut -d/ -f3)"
;;
*/merge_requests/*)
#shellcheck disable=SC2001
mrID="$(sed 's/.*\(merge_requests.*\)/\1/g' <<<"$URL" | cut -d/ -f2)"
;;
*/-/tree/*)
#shellcheck disable=SC2001
project=$(sed 's!https://gitlab.com/!!g' <<<"$URL" | sed 's!/-/.*!!g')
#shellcheck disable=SC2001
branch=$(sed 's!.*/tree/!!g' <<<"$URL" | cut -d/ -f1)
postCD="&& cd $(sed "s!.*$branch/!!g" <<<"$URL")"
;;
*/-/blob/*)
#shellcheck disable=SC2001
project=$(sed 's!https://gitlab.com/!!g' <<<"$URL" | sed 's!/-/.*!!g')
#shellcheck disable=SC2001
branch=$(sed 's!.*/(blob|tree)/!!g' <<<"$URL" | cut -d/ -f1)
;;
*) repo "$URL" ;;
esac
if [[ -n ${mrID:-} ]]; then
branch=$(glab api "/projects/$projectURL/merge_requests/$mrID" | jq -r .source_branch)
fi
if [[ -z ${branch:-} ]]; then
die "Error, branch is not set"
fi
dest="$HOME/code/gitlab.com/$project"
if [[ ! -d $dest ]]; then
clone "$(glab api "/projects/$projectURL" | jq -r .ssh_url_to_repo)"
fi
cat <<EOF
cd $dest && git checkout \$(git-main.sh) && git pull && git fetch --prune && git checkout $branch && (git pull || git-force-fetch.sh) $postCD
EOF
| true
|
9226473b839f9e1db0af2d322411ffb97836042d
|
Shell
|
lremes/vault-rpm
|
/SOURCES/vault-bootstrap.sh
|
UTF-8
| 1,908
| 3.875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Constants
#
LOCK_PATH="locks/vault/bootstrap"
SECURE_TOKEN_PATH="/etc/default/consul_secure.env"
VAULT_URL="https://localhost:8200"
# Variables
#
token="$1"
function do_exit {
rval=$1
consul-cli kv unlock --session=${sessionid} ${LOCK_PATH}
exit ${rval}
}
if [ -z "${token}" -a -f ${SECURE_TOKEN_PATH} ]; then
source ${SECURE_TOKEN_PATH}
token=${SECURE_TOKEN}
fi
sessionid=$(consul-cli kv lock --lock-delay=5s ${LOCK_PATH})
# Initialize vault iff it's not already initialized.
is_init=$(curl -s -1 $VAULT_URL/v1/sys/init | jq .initialized)
if [ "${is_init}" == "true" ]; then
do_exit 0
fi
# Check the vault of ${LOCK_PATH}. If it is "init_done" then
# the initialization of vault is complete and the process needs
# to be restarted
#
is_init=$(consul-cli kv read ${LOCK_PATH})
if [ "${is_init}" == "init_done" ]; then
systemctl restart vault
do_exit 0
fi
output=$(curl -X PUT -s -1 $VAULT_URL/v1/sys/init \
-d '{ "secret_shares": 5, "secret_threshold": 3}')
keys=$(echo ${output} | jq -r .keys[])
root_token=$(echo ${output} | jq -r .root_token)
consul-cli kv write --token=${token} secure/vault/keys ${keys}
if [ $? -ne 0 ]; then
echo "Error initializing vault!"
echo "Keys written to: /etc/vault/keys"
echo "Root token written to: /etc/vault/root_token"
echo ${output} > /etc/vault/output
echo ${keys} > /etc/vault/keys
echo ${root_token} > /etc/vault/root_token
do_exit 1
fi
consul-cli kv write --token=${token} secure/vault/root_token ${root_token}
if [ $? -ne 0 ]; then
echo "Error initializing vault!"
echo "Keys written to: /etc/vault/keys"
echo "Root token written to: /etc/vault/root_token"
echo ${output} > /etc/vault/output
echo ${keys} > /etc/vault/keys
echo ${root_token} > /etc/vault/root_token
do_exit 1
fi
# Write "init_done" to LOCK_PATH so future runs don't try to re-init
consul-cli kv write ${LOCK_PATH} init_done
do_exit 0
| true
|
9c5e2d698c5421b78328a8a8283b4dd1b55dd735
|
Shell
|
sweety-apps/money_pig
|
/wangcai_svr/sbin/show_user_info.sh
|
UTF-8
| 465
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ]; then
echo "usage $0 <userid>"
exit
fi
userid=$1
echo "SELECT * FROM user_info WHERE id = $userid \G" | mysql wangcai
echo "SELECT * FROM user_device WHERE userid = $userid \G" | mysql wangcai
echo "SELECT * FROM billing_account WHERE userid = $userid \G" | mysql wangcai_billing
echo "SELECT userid, device_id, serial_num, money, remark, insert_time, err, msg FROM billing_log WHERE userid = $userid" | mysql wangcai_billing
| true
|
062bfc906c5cf5644532a53e8bfbc8632119fe61
|
Shell
|
cheng6563/fdboot
|
/extracted/boot.sh
|
UTF-8
| 4,950
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'Welcome fdboot.'
if [[ -z $START_CLASS ]]; then
START_CLASS=$(sed -n 's/^Start-Class: //p' ./META-INF/MANIFEST.MF)
echo "Use start class in ./META-INF/MANIFEST.MF"
fi
if [[ -z $START_CLASS ]]; then
START_CLASS=$(sed -n 's/^Main-Class: //p' ./META-INF/MANIFEST.MF)
echo "Use main class in ./META-INF/MANIFEST.MF"
fi
if [[ -z $START_CLASS ]]; then
echo "Not found start class. please edit ./META-INF/MANIFEST.MF file or set to environment."
exit 1
fi
START_CLASS=`echo $START_CLASS | tr -d '\r'`
echo "Found start class: ${START_CLASS}."
if [[ -f "application.properties" ]]; then mv application.properties application-d.properties; fi
if [[ -f "application.yml" ]]; then mv application.yml application-d.yml; fi
if [[ -f "application.yaml" ]]; then mv application.yaml application-d.yaml; fi
if [[ -f "application.json" ]]; then mv application.json application-d.json; fi
if [[ -f "application-d.properties" ]]; then PROFILE_D=true; fi
if [[ -f "application-d.yml" ]]; then PROFILE_D=true; fi
if [[ -f "application-d.yaml" ]]; then PROFILE_D=true; fi
if [[ -f "application-d.json" ]]; then PROFILE_D=true; fi
if [[ ! -n "$SPRING_CLOUD_CONFIG_URL" ]]; then
SPRING_CLOUD_CONFIG_URL=http://localhost:8888
echo >bootstrap.properties
echo "No SPRING_CLOUD_CONFIG_URL env, disable spring cloud config."
echo "# No SPRING_CLOUD_CONFIG_URL env, disable spring cloud config.">>bootstrap.properties
echo "spring.cloud.config.enabled=false">>bootstrap.properties
fi
# 读取计算机名
HOSTNAME=$(hostname)
# 获取主要IP
HOST_PRIMARY_IP=$(ip route get 1 | sed -n 's/^.*src \([0-9.]*\) .*$/\1/p')
# 将计算机名写入hosts
echo "127.0.0.1 $HOSTNAME" >>/etc/hosts
# 生成随机端口号
RANDOM_SEED="${START_CLASS}#${APP_NAME}#${HOST_PRIMARY_IP}"
PORT=0
RANDOM_SEED_HEX=`echo -n $RANDOM_SEED | md5sum | awk '{print $1}'`
RANDOM_SEED_SHORT=${RANDOM_SEED_HEX:0:8}
RANDOM_SEED=`printf "%d\n" 0x${RANDOM_SEED_SHORT}`
read LOWERPORT UPPERPORT </proc/sys/net/ipv4/ip_local_port_range
let RANDOM_DIFF=UPPERPORT-LOWERPORT
RANDOM=$RANDOM_SEED
while :; do
r=$RANDOM
let PORT=RANDOM_DIFF%r+LOWERPORT
# PORT="$(shuf -i $LOWERPORT-$UPPERPORT -n 1)"
ss -an | awk '{print $5}' | grep -q ":$PORT" || break
done
#echo "Random port: $PORT"
# 使用环境变量SERVER_PORT中的端口号,如果没有就使用随机的
if [[ -n $SERVER_PORT ]]; then
APP_PORT=$SERVER_PORT
echo "Use env port: $APP_PORT"
else
APP_PORT=$PORT
echo "Use random port: $APP_PORT"
fi
# 将服务端口号写入文件,用于健康检查
echo $APP_PORT>/app/APP_PORT
if [[ -z "$JAVA_TOOL_OPTIONS" ]]; then
# ribbon调用重试
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS -Dribbon.MaxAutoRetries=1"
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS -Dribbon.MaxAutoRetriesNextServer=3"
# eureka刷新
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS -Deureka.client.registry-fetch-interval-seconds=3"
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS -Deureka.instance.lease-renewal-interval-in-seconds=5"
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS -Deureka.instance.lease-expiration-duration-in-seconds=15"
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS -Dribbon.ServerListRefreshInterval=1000"
# eureka主动健康检查
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS -Deureka.client.healthcheck.enabled=true"
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS"' -Deureka.instance.instance-id=${spring.application.name}#'"${HOST_PRIMARY_IP}#${APP_PORT}"
fi
# 服务端口号
APP_PARAM_BASE="$APP_PARAM_BASE --server.port=$APP_PORT"
export SERVER_PORT=$APP_PORT
# 如果没有$PROFILE变量,就设为default,使用默认profile
if [[ -n "$PROFILE" ]]; then
export SPRING_PROFILES_ACTIVE=$PROFILE
elif [[ $PROFILE_D ]]; then
export SPRING_PROFILES_ACTIVE=d
fi
# 生成java opts ,拼接运行命令
# -Djava.awt.headless=true 参数设置用软件处理图像,因为虚拟机里没显卡
# -Djava.net.preferIPv4Stack 使用ipv4通信
# -Djava.security.egd=file:/dev/./urandom 使用伪随机数,避免linux熵池不够导致系统阻塞
# -Dspring.cloud.config.uri=$SPRING_CLOUD_CONFIG_URL 应用Spring Cloud Config地址
# -XX:+CrashOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/fdserver/${HOSTNAME}_${APP_PORT}.hprof 使内存溢出时立即停止应用并保存dump
export JAVA_TOOL_OPTIONS="$JAVA_TOOL_OPTIONS $JAVA_MEM_OPTS $JAVA_GC_OPTS -Djava.awt.headless=true -Djava.net.preferIPv4Stack=true -Djava.security.egd=file:/dev/./urandom -Dspring.cloud.config.uri=$SPRING_CLOUD_CONFIG_URL -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/fdserver/${HOSTNAME}_${APP_PORT}.hprof "
JAVA_CP_OPTS="-cp .:./BOOT-INF/classes:./BOOT-INF/lib/*"
JAVA_CMD="java $JAVA_OPTS $JAVA_CP_OPTS $START_CLASS $APP_PARAM_BASE $APP_PARAM"
echo "Java cmd: $JAVA_CMD"
exec $JAVA_CMD
| true
|
a10e9d7de097a7104202db08acb011d1fb5dedfd
|
Shell
|
sebgod/userscripts
|
/homebin/src/mercury_build_scripts/build_only_compiler.sh
|
UTF-8
| 584
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/sh
#
pushd $MERCURY_GIT
PARALLEL=-j2
TARGET_DRIVE=c
TARGET_DIR=/mercury/dev-$MERCURY_CC
PREFIX=${TARGET_DRIVE}:${TARGET_DIR}
echo ./configure --with-cc=$MERCURY_CC --prefix=$PREFIX \
--enable-libgrades=$MERCURY_LIBGRADES \
--enable-new-mercuryfile-struct
git checkout build &&
git rebase master &&
aclocal -I m4 &&
autoconf &&
./configure --with-cc=$MERCURY_CC --prefix=$PREFIX \
--enable-libgrades=$MERCURY_LIBGRADES \
--enable-new-mercuryfile-struct &&
make &&
cp compiler/mercury_compile.exe /$TARGET_DRIVE$TARGET_DIR/bin &&
true
popd # popd $MERCURY_GIT
| true
|
4ac64df70af320c045d9359111199d8cb560e92c
|
Shell
|
marcorvazquezs/Docker
|
/Counter_Web_App/random_number.sh
|
UTF-8
| 153
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
while true
do
echo "<br /><center><h1>Your random number is: $RANDOM</h1></center>" > /usr/share/nginx/html/index.html
sleep 1
done
| true
|
ded0218790260fae37c52ea861d2e3df9eaf53e4
|
Shell
|
hjyou07/personalProjects
|
/Systems-primitiveSearchEngine/lab1_bash/myScript.sh
|
UTF-8
| 2,582
| 4.25
| 4
|
[] |
no_license
|
# Problem Statement:
# Often I have to copy some files from the remote repository
# into my local repository, most often for starter codes.
# In order to do that I need to "git pull" from "resources" folder,
# then see what directory I want to copy into my local student repository,
# then copy a specific directory into my repository.
# It is not a hard job, but it is something I found can be "automated",
# and can be very efficient if implemented and used.
# Name of command: cp -R [source] [destination]
# Example of its usage:
# cp -R ~/cs5007/resources/lab1_bash ~/cs5007/hjyou_CS5006
# it copies lab1_bash from resources and saves it in hjyou_CS5006, my local repo.
# Here is the myScript.sh script which interacts with the user,
# and copies a specific directory user(usually me) chooses into local repository.
# usage: sh myScript.sh
# A helper function that checks the validity of the user input
# if the user input matches one of the existing directories,
# returns 0. otherwise returns 1.
checkInput() {
# shell apparently can do automatic tokenization,
# (oh my who knew?! it's like python magic!)
# and $word tokenizes each substring
# from string $var which saved the output of ls command.
result=1
for word in $var
do
if [ $dirname = $word ]; then
# 0 = true, 1=false
result=0
fi
done
}
# Changes the directory to the resources repo, and pull any updated changes.
# Then it lists all the directories available for copy,
# and prompts the user to enter the name of the directory to copy.
cd ~/cs5007/resources
git pull
echo "\nfolders under resources repo:"
ls
echo "\nPick one you want to copy"
read dirname
# this saves the output of ls command (currently in ~/resources)
# into a variable named "var"
var=$(ls)
# executes the funciton checkInput() defined above,
# and saves the result into a variable named isValidInput(either 0 or 1).
checkInput
isValidInput=$result
# while isValidInput = 1, prompt the user again for an input,
# because the input didn't match any of the directories available for copy.
while [ $isValidInput -eq 1 ]
do
echo "that was an invalid directory name, enter the name as it's shown"
read dirname
checkInput
isValidInput=$result
done
# if the user input is valid, copy that requested directory
# from resources to local student repo.
# Then it lists all the directories in the student repo to show the result
cp -R ~/cs5007/resources/$dirname ~/cs5007/hjyou_CS5006
cd ~/cs5007/hjyou_CS5006
echo "\nYou have copied \"$dirname\", and now your repo has the following:"
ls
| true
|
3088a52f8fd81ba53b71d4286a20452edc8a37e8
|
Shell
|
twmccart/SRAssemblerold
|
/data/xtestA
|
UTF-8
| 2,295
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "============================================================================================================"
echo " Test1: Use the -1, -2 options to assign reads files."
echo "============================================================================================================"
rm -rf Atestout1
rm -rf reads_data
mkdir -p Atestout1
../bin/SRAssembler -q input/LOC_Os06g04560.pep -t protein -p SRAssembler.conf -1 input/reads1_200.fq -2 input/reads2_200.fq -z 200 -r ./reads_data -x 15000 -o Atestout1 -A 1 -S 0 -s arabidopsis -n 10
echo "============================================================================================================"
echo " Test2: Use the library definition file (-l option) and the previously pre-processed reads data (-r option)."
echo "============================================================================================================"
rm -rf Atestout2
mkdir -p Atestout2
../bin/SRAssembler -q input/LOC_Os06g04560.pep -t protein -p SRAssembler.conf -l libraries_200bp.conf -r ./reads_data -x 15000 -o Atestout2 -A 1 -S 0 -s arabidopsis -n 10
echo "============================================================================================================"
echo " Test3: Use two libraries. Only the second library will be pre-processed."
echo "============================================================================================================"
rm -rf Atestout3
mkdir -p Atestout3
../bin/SRAssembler -q input/LOC_Os06g04560.pep -t protein -p SRAssembler.conf -l libraries_200bp_1kb.conf -r ./reads_data -x 15000 -o Atestout3 -A 1 -S 0 -s arabidopsis -n 3
echo "============================================================================================================"
echo " Test4: Previous example, but using the MPI implementation."
echo "============================================================================================================"
rm -rf Atestout4
mkdir -p Atestout4
mpirun -n 4 ../bin/SRAssembler_MPI -q input/LOC_Os06g04560.pep -t protein -p SRAssembler.conf -l libraries_200bp_1kb.conf -r ./reads_data -x 15000 -o Atestout4 -A 1 -S 0 -s arabidopsis -n 3
| true
|
589ed935b8a17c21742fe353e2368fcde31253eb
|
Shell
|
xibo-sun/VTR_with_Yosys
|
/vtr@2780988/.github/travis/install.sh
|
UTF-8
| 315
| 2.703125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"MIT-Modern-Variant"
] |
permissive
|
#!/bin/bash
source .github/travis/common.sh
set -e
# Git repo fixup
start_section "environment.git" "Setting up ${YELLOW}git checkout${NC}"
set -x
git fetch --tags
git submodule update --recursive --init
git submodule foreach git submodule update --recursive --init
set +x
end_section "environment.git"
$SPACER
| true
|
2f2625b54f949e99113fa3e2764b8aded0ce783d
|
Shell
|
avm99963/foam
|
/apps/builder/build.sh
|
UTF-8
| 666
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
BASEDIR=$(readlink -f $(dirname "$0"))
BUILD_DIR="$BASEDIR"/build
FOAM_DIR=../..
APP_DEF=foam.apps.builder.App
BASEDIR_FILES=( designer_view.html bg.js config.js manifest.json _locales 128.png builder.css )
pushd "$BASEDIR"
mkdir -p "$BUILD_DIR"
node --harmony "$FOAM_DIR/tools/foam.js" foam.build.BuildApp appDefinition=$APP_DEF "targetPath=$BUILD_DIR"
cp $FOAM_DIR/core/foam.css $BUILD_DIR/foam.css
for FILE in ${BASEDIR_FILES[@]}; do
rm -rf $BUILD_DIR/$FILE
cp -r $BASEDIR/$FILE $BUILD_DIR/$FILE
done
cd "$BUILD_DIR"
uglifyjs -b semicolons=false,beautify=false foam.js -c unused=false > foam-min.js
mv foam-min.js foam.js
rm unused.html
popd
| true
|
4893753ea435b4dfa3343cb654d7805b8be5b193
|
Shell
|
jmetzz/sandbox-scripts
|
/learn-bash/echoFile2.sh
|
UTF-8
| 1,237
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
usage() {
echo "Usage:"
echo " prepareSql [-f] [-t]"
echo
echo "Arguments:"
echo " -f file"
echo " -t type"
}
main() {
echo "Start sql generation"
for line in $(tail -n +2 $INPUT | tr '\n\r' ' ');
do
export C_ID=$(echo $line | cut -d ',' -f 1);
export A_ID=$(echo $line | cut -d ',' -f 2);
echo "delete from DB2.CONTACT_ADDRESSES where ADDRESS_TYPE = $TYPE and ADDRESS_ID = $A_ID and COMPANY_ID = $C_ID;" >> delete.sql;
echo "($TYPE, $A_ID, $C_ID, 'N', 'TEST for OHM-31701', CURRENT TIMESTAMP, 'OHM-31701', CURRENT TIMESTAMP )" >> values.sql;
printf "."
done
print "\n"
echo "insert into DB2.CONTACT_ADDRESSES (ADDRESS_TYPE, ADDRESS_ID, COMPANY_ID, DEFAULT_ADDRESS, CREATED_BY, CREATE_DATE, CHANGED_BY, CHANGE_DATE) values " >> insert.sql
echo $(tail values.sql | paste -sd "," - | sed -e 's,)\,,)\,\n\r,gm') >> insert.sql
cat delete.sql insert.sql > output.sql
rm delete.sql
rm insert.sql
rm values.sql
echo "Sql statements saved in file output.sql"
}
if [[ $1 == '--help' ]]
then
usage
exit 0
fi
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-f|--file)
INPUT="$2"
shift
;;
-t|--type)
TYPE="$2"
shift
;;
esac
shift
done
main
| true
|
8c6817143fa25b7c58fa8ed549db17bc0fb915f2
|
Shell
|
alessio/dokku
|
/tests/unit/20_core_ports_1.bats
|
UTF-8
| 2,787
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
load test_helper
setup() {
[[ -f "$DOKKU_ROOT/VHOST" ]] && cp -f "$DOKKU_ROOT/VHOST" "$DOKKU_ROOT/VHOST.bak"
[[ -f "$DOKKU_ROOT/HOSTNAME" ]] && cp -f "$DOKKU_ROOT/HOSTNAME" "$DOKKU_ROOT/HOSTNAME.bak"
DOCKERFILE="$BATS_TMPDIR/Dockerfile"
}
teardown() {
destroy_app
[[ -f "$DOKKU_ROOT/VHOST.bak" ]] && mv "$DOKKU_ROOT/VHOST.bak" "$DOKKU_ROOT/VHOST"
[[ -f "$DOKKU_ROOT/HOSTNAME.bak" ]] && mv "$DOKKU_ROOT/HOSTNAME.bak" "$DOKKU_ROOT/HOSTNAME"
}
check_urls() {
local PATTERN="$1"
run bash -c "dokku --quiet urls $TEST_APP | egrep \"${1}\""
echo "output: "$output
echo "status: "$status
assert_success
}
@test "(core) port exposure (with global VHOST)" {
echo "dokku.me" > "$DOKKU_ROOT/VHOST"
deploy_app
CONTAINER_ID=$(< $DOKKU_ROOT/$TEST_APP/CONTAINER.web.1)
run bash -c "docker port $CONTAINER_ID | sed 's/[0-9.]*://' | egrep -q '[0-9]*'"
echo "output: "$output
echo "status: "$status
assert_failure
check_urls http://${TEST_APP}.dokku.me
}
@test "(core) port exposure (without global VHOST and real HOSTNAME)" {
rm "$DOKKU_ROOT/VHOST"
echo "${TEST_APP}.dokku.me" > "$DOKKU_ROOT/HOSTNAME"
deploy_app
CONTAINER_ID=$(< $DOKKU_ROOT/$TEST_APP/CONTAINER.web.1)
run bash -c "docker port $CONTAINER_ID | sed 's/[0-9.]*://' | egrep -q '[0-9]*'"
echo "output: "$output
echo "status: "$status
assert_success
HOSTNAME=$(< "$DOKKU_ROOT/HOSTNAME")
check_urls http://${HOSTNAME}:[0-9]+
}
@test "(core) port exposure (with NO_VHOST set)" {
deploy_app
dokku config:set $TEST_APP NO_VHOST=1
CONTAINER_ID=$(< $DOKKU_ROOT/$TEST_APP/CONTAINER.web.1)
run bash -c "docker port $CONTAINER_ID | sed 's/[0-9.]*://' | egrep -q '[0-9]*'"
echo "output: "$output
echo "status: "$status
assert_success
HOSTNAME=$(< "$DOKKU_ROOT/HOSTNAME")
check_urls http://${HOSTNAME}:[0-9]+
}
@test "(core) port exposure (without global VHOST and IPv4 address as HOSTNAME)" {
rm "$DOKKU_ROOT/VHOST"
echo "127.0.0.1" > "$DOKKU_ROOT/HOSTNAME"
deploy_app
CONTAINER_ID=$(< $DOKKU_ROOT/$TEST_APP/CONTAINER.web.1)
run bash -c "docker port $CONTAINER_ID | sed 's/[0-9.]*://' | egrep -q '[0-9]*'"
echo "output: "$output
echo "status: "$status
assert_success
HOSTNAME=$(< "$DOKKU_ROOT/HOSTNAME")
check_urls http://${HOSTNAME}:[0-9]+
}
@test "(core) port exposure (without global VHOST and IPv6 address as HOSTNAME)" {
rm "$DOKKU_ROOT/VHOST"
echo "fda5:c7db:a520:bb6d::aabb:ccdd:eeff" > "$DOKKU_ROOT/HOSTNAME"
deploy_app
CONTAINER_ID=$(< $DOKKU_ROOT/$TEST_APP/CONTAINER.web.1)
run bash -c "docker port $CONTAINER_ID | sed 's/[0-9.]*://' | egrep -q '[0-9]*'"
echo "output: "$output
echo "status: "$status
assert_success
HOSTNAME=$(< "$DOKKU_ROOT/HOSTNAME")
check_urls http://${HOSTNAME}:[0-9]+
}
| true
|
24c3902c84d800d25d72884ddae487c2675c5f41
|
Shell
|
AImissq/puppy-no-sudo-bash
|
/no-sudo.sh
|
UTF-8
| 863
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
#developer - philip ackroyd
#Copyright (C) 2019 ackroydAI
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses
#check dependency requirement
if ! [[ /bin/sed ]]; then
echo " you need to install sed "
exit 1
fi
#then execute script
sed -i -e 's/sudo//g' path_to_yourfile.sh
| true
|
1a0124c818d12e560dea21883d04f74e3bfac3fb
|
Shell
|
matheuscscp/TG
|
/benchmark/scripts/check_batch_proofs
|
UTF-8
| 1,984
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
prover_res() {
grep Theorem prover_$$.out > /dev/null 2> /dev/null
if [ "$?" = "0" ]; then
return 0
else
grep CounterSatisfiable prover_$$.out > /dev/null 2> /dev/null
if [ "$?" = "0" ]; then
return 1
else
grep ResourceOut prover_$$.out > /dev/null 2> /dev/null
if [ "$?" = "0" ]; then
return 2
else
return 3
fi
fi
fi
}
if [ "$#" != "2" ]; then
echo "Usage mode: $0 <batch name> <timeout>"
exit
fi
for file in `ls -Sr proofs/$1/*`;
do
fn=$(basename $file)
# check if $file was already checked
grep ",$fn" infos/$1.check > /dev/null 2> /dev/null
if [ "$?" = "0" ]; then
continue
fi
# check if batch exited normally
grep ",$fn" infos/$1.info > check_proofs_$$.tmp 2> /dev/null
if [ "$?" != "0" ]; then
echo "$0: $fn do not occurs inside infos/$1.info"
exit
fi
# check if batch exceeded time limit
grep ",timeout," check_proofs_$$.tmp > /dev/null 2> /dev/null
if [ "$?" = "0" ]; then
echo ",$fn,timeout," >> infos/$1.check
continue
fi
# check if batch exceeded memory limit
grep ",memout," check_proofs_$$.tmp > /dev/null 2> /dev/null
if [ "$?" = "0" ]; then
echo ",$fn,memout," >> infos/$1.check
continue
fi
# check file size
fsize=$(stat -c%s "$file")
if [ "$fsize" = "23" ]; then
echo ",$fn,empty," >> infos/$1.check
continue
fi
# run prover for supposed theorem $file
timeout $2 eprover -s --auto --tptp3-format $file > prover_$$.out 2> prover_$$.out
if [ "$?" = "124" ]; then
echo ",$fn,timeout," >> infos/$1.check
continue
fi
# check result
prover_res
case "$?" in
"0") echo ",$fn,ok," >> infos/$1.check
;;
"1") echo ",$fn,fail," >> infos/$1.check
;;
"2") echo ",$fn,memout," >> infos/$1.check
;;
*) echo "$0: error checking $file"
exit
;;
esac
done
rm -rf prover_$$.out check_proofs_$$.tmp
| true
|
51251495378b7ba4059e232bb2cf106ce305346a
|
Shell
|
FreifunkHochstift/ffho-salt-public
|
/icinga2/plugins/check_salt
|
UTF-8
| 613
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
# Check if state-file exists, otherwise exit with unknown
if [ ! -f /var/cache/salt/state_apply ] ; then echo "Statefile does not exist" ; exit 3 ; fi
# Check age of statefile. If it's older than 2 hours, exit with unknown
if [ $(($(date +%s) - $(date -r /var/cache/salt/state_apply +%s))) -gt 25200 ] ; then echo "Statefile too old" ; exit 3 ; fi
# List all IDs and exclude ffho-repo
CHANGES_IDS=$(grep "ID:" /var/cache/salt/state_apply | grep -v "ID: .*-repo$")
if [ -n "$CHANGES_IDS" ] ; then
echo "IDs with changes:"
echo "$CHANGES_IDS"
exit 1 # warning
fi
echo "Nothing to do"
exit 0 # ok
| true
|
51ff1d72f06cf8b9149a081e94aca1c4ba467884
|
Shell
|
gawara/ruby-sample
|
/detach.sh
|
UTF-8
| 3,708
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# @(#) detach.sh ver.1.0.0 2015.08.01
#
# Usage:
# detach.sh param1
# param1 - インスタンスID
#
# Description:
# 引数で指定されたインスタンスIDのインスタンスを、AutoScalingGroupから切り離し、
# インスタンスを停止するスクリプト。
# 本スクリプトの実行には、下記ツールが必要。
# - jq
# jqインストールコマンド
# - sudo curl -o /usr/bin/jq http://stedolan.github.io/jq/download/linux64/jq && sudo chmod +x /usr/bin/jq
# - aws cli
# ※ aws cliには、クレデンシャル(or ロール)及びリージョン情報が登録済みであること。
#
###########################################################################
# 引数検証
if [ $# -ne 1 ]; then
echo "Invalid Parameters." 1>&2
exit 1
fi
# インスタンスの存在確認
INSTANCE_ID=$1
#export AWS_DEFAULT_REGION="ap-northeast-1"
echo "Instance Id: $INSTANCE_ID"
INSTANCE_NUM=`aws ec2 describe-instances --instance-ids $INSTANCE_ID | jq -r '.Reservations[0].Instances | length'`
if ! expr "$INSTANCE_NUM" : '[0-9]*' > /dev/null; then
echo " AWS Command Error." 1>&2
exit 1
fi
if [ $INSTANCE_NUM -ne 1 ]; then
echo " $INSTANCE_ID is not found." 1>&2
exit 1
fi
# インスタンスが紐づくAutoScalingGroup名を取得
echo "Getting AutoScaling group name..." 1>&2
AUTO_SCALING_GROUP_NAME=`aws ec2 describe-instances --instance-ids $INSTANCE_ID | jq -r '.Reservations[0].Instances[0].Tags[] | select(.Key=="aws:autoscaling:groupName").Value'`
if [ ${#AUTO_SCALING_GROUP_NAME} -eq 0 ]; then
echo " AUTO_SCALING_GROUP_NAME is not found in tags." 1>&2
exit 1
fi
echo " Name: $AUTO_SCALING_GROUP_NAME" 1>&2
# AutoScalingGroupからインスタンスを切り離す(デタッチ)
echo "Detach command executing..." 1>&2
ACTIVITY_ID=`aws autoscaling detach-instances --instance-ids $INSTANCE_ID --auto-scaling-group-name $AUTO_SCALING_GROUP_NAME --no-should-decrement-desired-capacity | jq -r '.Activities[0].ActivityId'`
if [ "$ACTIVITY_ID" = "" ]; then
echo " Detach command failed." 1>&2
exit 1
fi
# デタッチ処理の終了を待つ
function waitActivitySuccessfull() {
local ACTIVITY_ID=$1
local LOOP_MAX=10
for ((i=0; i < $LOOP_MAX; i++)); do
echo " Describing AutoScalingGroup activity status..." 1>&2
local ACTIVITY_STATUS=`aws autoscaling describe-scaling-activities --activity-ids $ACTIVITY_ID | jq -r '.Activities[0].StatusCode'`
echo " Activity status: $ACTIVITY_STATUS" 1>&2
if [ "$ACTIVITY_STATUS" = "Successful" ]; then
return 0
fi
echo " waiting 60 seconds..."
sleep 60s
done
echo " Retry max error. Detach activity failed." 1>&2
exit 1
}
waitActivitySuccessfull $ACTIVITY_ID
# インスタンスを切り離し後、インスタンスを停止
echo "Stopping instance..." 1>&2
INSTANCE_STATUS=`aws ec2 stop-instances --instance-ids $INSTANCE_ID | jq -r '.StoppingInstances[0].CurrentState.Name'`
if [ "$INSTANCE_STATUS" != "stopping" ]; then
echo " Stop command failed." 1>&2
exit 1
fi
# インスタンスの停止を待つ
function waitStoppedInstance() {
local INSTANCE_ID=$1
local LOOP_MAX=10
for ((i=0; i < $LOOP_MAX; i++)); do
local INSTANCE_STATUS=`aws ec2 describe-instances --instance-ids $INSTANCE_ID | jq -r '.Reservations[0].Instances[0].State.Name'`
echo " Instance status: $INSTANCE_STATUS" 1>&2
if [ "$INSTANCE_STATUS" = "stopped" ]; then
return 0
fi
echo " waiting 60 seconds..."
sleep 60s
done
echo " Retry max error. Stop instance failed." 1>&2
exit 1
}
waitStoppedInstance $INSTANCE_ID
# 終了
echo "All process finished." 1>&2
exit 0
| true
|
8ac5da148bbb9389996f560f6879c39e84c3f2e0
|
Shell
|
yueyz818/webrtc-bin
|
/buildTarget.sh
|
UTF-8
| 8,070
| 3.609375
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
set -e
target=webrtc
platform=all
configuration=debug
architecture=all
merge=1
logLevel=2
################
target_webrtc=0
target_ortc=0
platform_iOS=0
platform_macOS=0
platform_linux=0
platform_android=0
configuration_Release=0
configuration_Debug=0
architecture_arm=0
architecture_armv7=0
architecture_arm64=0
architecture_x86=0
architecture_x64=0
#log levels
error=0
info=1
warning=2
debug=3
trace=4
################
HOST_SYSTEM=mac
HOST_OS=osx
basePath="webrtc/xplatform/webrtc/out"
ninjaExe="webrtc/xplatform/depot_tools/ninja"
webrtcLibPath=obj/webrtc/libwebrtc.a
ortcLibPath=libOrtc.dylib
systemcheck()
{
if [ "$OSTYPE" == "linux-gnu" ];
then
HOST_SYSTEM=linux
HOST_OS=$(lsb_release -si | awk '{print tolower($0)}')
HOST_ARCH=$(uname -m | sed 's/x86_//;s/i[3-6]86/32/')
HOST_VER=$(lsb_release -sr)
fi
}
identifyPlatform()
{
print $trace "Identifying target platforms ..."
if [ "$platform" == "all" ]; then
if [ "$HOST_SYSTEM" == "linux" ]; then
platform_linux=1
platform_android=1
platform_iOS=0
platform_macOS=0
messageText="WebRtc will be built for linux and android platforms ..."
else
platform_iOS=1
platform_macOS=1
platform_linux=0
platform_android=0
messageText="WebRtc will be built for iOS and macOS platforms ..."
fi
elif [ "$platform" == "ios" ]; then
platform_iOS=1
platform_macOS=0
platform_linux=0
platform_android=0
messageText="WebRtc will be built for $platform platform..."
elif [ "$platform" == "mac" ]; then
platform_macOS=1
platform_iOS=0
platform_linux=0
platform_android=0
messageText="WebRtc will be built for $platform platform..."
elif [ "$platform" == "linux" ]; then
platform_linux=1
platform_macOS=0
platform_iOS=0
platform_android=0
messageText="WebRtc will be built for $platform platform..."
elif [ "$platform" == "android" ]; then
platform_android=1
platform_linux=0
platform_macOS=0
platform_iOS=0
messageText="WebRtc will be built for $platform platform..."
else
error 1 "Invalid platform"
fi
print $warning "$messageText"
}
identifyConfiguration()
{
print $trace "Identifying target configuration ..."
if [ "$configuration" == "all" ]; then
configuration_Release=1
configuration_Debug=1
elif [ "$configuration" == "release" ]; then
configuration_Release=1
configuration_Debug=0
else
configuration_Release=0
configuration_Debug=1
fi
}
identifyArchitecture()
{
print $trace "Identifying target architecture ..."
if [ $platform_iOS -eq 1 ] || [ $platform_android -eq 1 ]; then
if [ "$architecture" == "all" ]; then
architecture_arm=1
architecture_arm64=1
elif [ "$architecture" == "arm" ]; then
architecture_arm=1
architecture_arm64=0
elif [ "$architecture" == "arm64" ]; then
architecture_arm=0
architecture_arm64=1
fi
fi
if [ $platform_macOS -eq 1 ] || [ $platform_linux -eq 1 ]; then
if [ "$architecture" == "all" ]; then
architecture_x86=1
architecture_x64=1
elif [ "$architecture" == "x86" ]; then
architecture_x86=1
architecture_x64=0
elif [ "$architecture" == "x64" ]; then
architecture_x86=0
architecture_x64=1
fi
fi
}
identifyTarget()
{
print $trace "Identifying target ..."
if [ "$target" == "all" ]; then
target_webrtc=1
target_ortc=1
elif [ "$target" == "ortc" ]; then
target_webrtc=0
target_ortc=1
else
target_webrtc=1
target_ortc=0
fi
}
buildTarget()
{
targetPath=$basePath/$1_$2_$3
print $debug "Buidling $4 in $targetPath ..."
$ninjaExe -C $targetPath $4
}
buildConfiguration()
{
if [ $target_webrtc -eq 1 ]; then
buildTarget $1 $2 $3 webrtc
fi
if [ $target_ortc -eq 1 ]; then
buildTarget $1 $2 $3 ortc
fi
}
buildArchitecture()
{
if [ $configuration_Release -eq 1 ]; then
buildConfiguration $1 $2 release
fi
if [ $configuration_Debug -eq 1 ]; then
buildConfiguration $1 $2 debug
fi
}
buildPlatform()
{
print $debug "Building for platform $1"
if [ "$1" == "ios" ] || [ "$1" == "android" ]; then
if [ $architecture_arm -eq 1 ]; then
buildArchitecture $1 arm
fi
if [ $architecture_arm64 -eq 1 ]; then
buildArchitecture $1 arm64
fi
fi
if [ "$1" == "mac" ] || [ "$1" == "linux" ]; then
if [ $architecture_x86 -eq 1 ]; then
buildArchitecture $1 x86
fi
if [ $architecture_x64 -eq 1 ]; then
buildArchitecture $1 x64
fi
fi
}
build()
{
if [ $platform_iOS -eq 1 ]; then
buildPlatform ios
fi
if [ $platform_macOS -eq 1 ]; then
buildPlatform mac
fi
if [ $platform_linux -eq 1 ]; then
buildPlatform linux
fi
if [ $platform_android -eq 1 ]; then
buildPlatform android
fi
}
runLipo()
{
print $debug "Merging $1 and $2 to $3 ..."
lipo -create $1 $2 -output $3
if [ $? -ne 0 ]; then
error 1 "Could not merge in #3 lib"
fi
}
mergeConfiguration()
{
print $trace "Running merge for $1 $2 $3"
if [ $platform_iOS -eq 1 ]; then
if [ -f $basePath/ios_arm_$1/$2 ] && [ -f $basePath/ios_arm64_$1/$2 ]; then
make_directory webrtc/xplatform/webrtc/out_ios_$1
if [ "$3" == "webrtc" ]; then
runLipo $basePath/ios_arm_$1/$2 $basePath/ios_arm64_$1/$2 webrtc/xplatform/webrtc/out_ios_$1/$3.a
fi
if [ "$3" == "ortc" ]; then
runLipo $basePath/ios_arm_$1/$2 $basePath/ios_arm64_$1/$2 webrtc/xplatform/webrtc/out_ios_$1/libOrtc.dylib
fi
fi
fi
}
mergeTarget()
{
if [ $configuration_Release -eq 1 ]; then
mergeConfiguration release $1 $2
fi
if [ $configuration_Debug -eq 1 ]; then
mergeConfiguration debug $1 $2
fi
}
mergeLibs()
{
print $debug "Merging libs ..."
if [ $target_webrtc -eq 1 ]; then
mergeTarget $webrtcLibPath webrtc
fi
if [ $target_ortc -eq 1 ]; then
mergeTarget $ortcLibPath ortc
fi
}
make_directory()
{
if [ ! -d "$1" ]; then
print $trace "Creating directory \"$1\"..."
mkdir -p $1
if [ $? -ne 0 ]; then
error 1 "Failed creating $1 directory"
fi
fi
}
print()
{
logType=$1
logMessage=$2
if [ $logLevel -eq $logType ] || [ $logLevel -gt $logType ]
then
if [ $logType -eq 0 ]
then
printf "\e[0;31m $logMessage \e[m\n"
fi
if [ $logType -eq 1 ]
then
printf "\e[0;32m $logMessage \e[m\n"
fi
if [ $logType -eq 2 ]
then
printf "\e[0;33m $logMessage \e[m\n"
fi
if [ $logType -gt 2 ]
then
echo $logMessage
fi
fi
}
error()
{
criticalError=$1
errorMessage=$2
if [ $criticalError -eq 0 ]
then
echo
print $warning "WARNING: $errorMessage"
echo
else
echo
print $error "CRITICAL ERROR: $errorMessage"
echo
echo
print $error "FAILURE:Preparing WebRtc environment has failed!"
echo
popd > /dev/null
exit 1
fi
}
while true;
do
tempParam=$(echo $1 | awk '{print tolower($0)}')
case "$tempParam" in
"")
break;;
-platform|-p)
platform=$2
shift 2
;;
-target|-t)
target=$2
shift 2
;;
-configuration|-c)
configuration=$2
shift 2
;;
-architecture|-a)
architecture=$2
shift 2
;;
-merge|-m)
merge=1
shift 1
;;
-help|-h)
help
exit 1
;;
-loglevel|-l)
logLevel=$2
if [ "$2" == "error" ]; then
logLevel=0
elif [ "$2" == "info" ]; then
logLevel=1
elif [ "$2" == "warning" ]; then
logLevel=2
elif [ "$2" == "debug" ]; then
logLevel=3
elif [ "$2" == "trace" ]; then
logLevel=4
fi
shift 2
;;
*)
error 1 "Command line argument was not understood"
esac
done
systemcheck
identifyPlatform
identifyConfiguration
identifyArchitecture
identifyTarget
build
if [ $merge -eq 1 ]; then
mergeLibs
fi
| true
|
bfa57e72211411fdf453299a9d01f5d2bd3985c0
|
Shell
|
dkirrane/gf-test
|
/my-proj/setenv.sh
|
UTF-8
| 2,837
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export GITFLOW_VERSION="3.0"
export M2_HOME=C:/apache-maven-3.0.5
# export M2_HOME=C:/apache-maven-3.3.9
# Maven attach Debugger
# export MAVEN_OPTS="-Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000"
# Maven attach Netbeans Profiler
# https://blogs.oracle.com/nbprofiler/entry/space_in_path_on_windows
# Need to install NetBeans to a path wihtout spaces
# export MAVEN_OPTS="-agentpath:C:\\NetBeans8.1\\profiler\\lib\\deployed\\jdk16\\windows-amd64\\profilerinterface.dll=C:\\NetBeans8.1\\profiler\\lib,5140"
echo "GITFLOW_VERSION=${GITFLOW_VERSION}"
echo "M2_HOME=${M2_HOME}"
echo "MAVEN_OPTS=${MAVEN_OPTS}"
# Delete old Gitflow plugin
MAVEN_REPO=`${M2_HOME}/bin/mvn help:evaluate -Dexpression=settings.localRepository | grep -v '\[INFO\]' | grep -v 'Download'`
echo "MAVEN_REPO=${MAVEN_REPO}"
PLUGIN_DIR="${MAVEN_REPO}/com/dkirrane"
if [ -d "$PLUGIN_DIR" ]; then
echo -e ""
read -r -p "Do you want to delete ggitflow-maven-plugin from local Maven repo '${PLUGIN_DIR}' ? [y/N]" choice
if [[ $choice =~ ^([yY][eE][sS]|[yY])$ ]]
then
rm -Rf ${PLUGIN_DIR}
else
echo -e ""
fi
fi
function runCmd {
echo "\$ $@" ; "$@" ;
local status=$?
if [ $status -ne 0 ]; then
echo "Failed to run with $1" >&2
exit
fi
return $status
}
function changeParentPom {
echo -e "\n\n"
local timestamp=`date --rfc-3339=seconds`
runCmd git checkout $1
runCmd sed -i -e "s|<description>my-proj.*</description>|<description>my-proj ${timestamp}</description>|g" $DIR/pom.xml
runCmd git commit -am "Parent pom change on $1 to cause merge-conflict"
}
function changeModule1Pom {
echo -e "\n\n"
local timestamp=`date --rfc-3339=seconds`
runCmd git checkout $1
runCmd sed -i -e "s|<description>my-proj-module1.*</description>|<description>my-proj-module1 ${timestamp}</description>|g" $DIR/my-proj-module1/pom.xml
runCmd git commit -am "my-proj-module1 pom change on $1 to cause merge-conflict"
}
function changeModule2Pom {
echo -e "\n\n"
local timestamp=`date --rfc-3339=seconds`
runCmd git checkout $1
runCmd sed -i -e "s|<description>my-proj-module2.*</description>|<description>my-proj-module2 ${timestamp}</description>|g" $DIR/my-proj-module2/pom.xml
runCmd git commit -am "my-proj-module2 pom change on $1 to cause merge-conflict"
}
function changeModule2App {
echo -e "\n\n"
local timestamp=`date --rfc-3339=seconds`
runCmd git checkout $1
runCmd sed -i -e "s/\"Hello Module2.*\"/\"Hello Module2 ${timestamp}\"/g" $DIR/my-proj-module2/src/main/java/com/mycompany/module2/App.java
runCmd git commit -am "Commit change on $1 to cause merge-conflict"
}
function pushAll {
echo -e "\n\n"
echo -e "Pushing all branches"
runCmd git push --all
}
| true
|
42e90a4e244e9772dd9cbdcdfd622da52f30a086
|
Shell
|
vgerak/pazcal
|
/bin/pzcheck
|
UTF-8
| 400
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
while [ "$1" != "" ]; do
case "$1" in
-o)
shift
shift
;;
-*)
shift
;;
*)
cat "$1" \
| grep PROGRAM \
| sed 's/.*PROGRAM[ \t]*\([A-Za-z0-9_]*\).*/\1/' \
| sed 's/^main$/__pazcal_main/'
shift
;;
esac
done
exit 0
| true
|
215cfe042b2bba2e9015b37ad5c2d82bac1e5e81
|
Shell
|
jamesoff/zsh
|
/local-plugins/starship/init.zsh
|
UTF-8
| 945
| 3.03125
| 3
|
[] |
no_license
|
launch_starship() {
local _latest_starship
local _starship_time
if [[ -x $HOME/src/starship/target/release/starship ]]; then
_latest_starship=$HOME/src/starship/target/release/starship
_starship_time=$( stat -f %m "$_latest_starship" )
fi
if [[ -x $HOME/src/starship/target/debug/starship ]]; then
if [[ -n $_latest_starship ]]; then
if [[ $( stat -f %m $HOME/src/starship/target/debug/starship ) -gt $_starship_time ]]; then
_load_debug "debug starship is the newer binary"
_latest_starship=$HOME/src/starship/target/debug/starship
else
_load_debug "release starship is the newer binary"
fi
else
_latest_starship=$HOME/src/starship/target/debug/starship
fi
fi
if [[ -z $_latest_starship ]]; then
if has starship; then
_load_debug "using installed starship"
eval $( starship init zsh )
fi
else
_load_debug "using starship at $_latest_starship"
eval $( $_latest_starship init zsh)
fi
}
| true
|
a54d82c389a575689860d22584a531f0c88c2be8
|
Shell
|
lasizoillo/docker-volume-nest
|
/resources/share/examples/zfs_local/path.sh
|
UTF-8
| 325
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -eu
# Return path where volume should be mounted. Use stderr for errors and stdout to show path where file is mounted
declare base_mnt_dataset=${ZFS_BASE_MNT_DATASET:-/tank/docker-volumes}
declare volname=$1
declare mount_point="${base_mnt_dataset}/${volname}"
mountpoint -q ${mount_point} && echo ${mount_point}
| true
|
7bc70204add325b2f8c4d3d96047ff774060b4c9
|
Shell
|
rohitsengar001/OS-Lab
|
/table.sh
|
UTF-8
| 134
| 3.328125
| 3
|
[] |
no_license
|
echo "enter the number "
read num
i=1
echo " "
while [ $i -lt 11 ]
do
tb=$(( $num * $i ))
echo "$tb "
i=$(( $i + 1 ))
done
| true
|
0a5096270e2d7c04db64cf17ea6f57d0e411b6a2
|
Shell
|
gsakkas/edsger-compiler
|
/edsger_compiler/check_installation.sh
|
UTF-8
| 677
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
TEST_PATH=./edsger_compiler/tests
echo "Checking installation..."
echo
for i in {1..4}
do
echo "Testcase $i out of 4:"
./edsc -O $TEST_PATH/test_$i.eds
./$TEST_PATH/test_$i > $TEST_PATH/test_temp_$i
if ( diff $TEST_PATH/test_temp_$i $TEST_PATH/test_output_$i > .__testing__) then
echo "OK!"
else
echo "Something went wrong with this testcase only!"
echo
echo "Exiting..."
rm .__testing__ $TEST_PATH/test_$i $TEST_PATH/test_$i.imm $TEST_PATH/test_$i.asm $TEST_PATH/test_temp_$i
exit 1
fi
rm .__testing__ $TEST_PATH/test_$i ./$TEST_PATH/test_$i.imm $TEST_PATH/test_$i.asm $TEST_PATH/test_temp_$i
done
echo
echo "Installation is complete!"
exit 0
| true
|
709547cf24b47ee3b612aa5a3b0a242cd93d6e74
|
Shell
|
Vectra130/vdr_eeebox_scripts
|
/.suspend2s3.sh
|
UTF-8
| 1,625
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
# v1.1 eeebox
. /etc/vectra130/configs/sysconfig/.sysconfig
#Suspend to ram vorbereiten
rm /tmp/.startsuspend-s3
touch /tmp/.s2ram_active
logger -t SUSPEND "System geht in Suspend"
#frontend beenden
killall -q -9 .frontend.sh
# Aufraeumen
echo "" > /var/log/debug
echo "" > /var/log/error
rm -r /var/lib/apt/lists/*
rm -r /var/cache/apt/archives/*
rm /tmp/.[a-zA-Z]*
rm /tmp/[a-zA-Z]*
sync && echo 3 > /proc/sys/vm/drop_caches
# wichtige Aktionen vorm S3
killall -q .watchdog.sh
$SCRIPTDIR/.watchdog.sh kill
expect -c "set echo \"-noecho\"; set timeout 10; spawn -noecho "$SCRIPTDIR"/.get_avr_infos.sh; expect timeout { exit 1 } eof { exit 0 }"
stop avahi-daemon
rmmod lirc_igorplugusb
#Shutdown Aktionen (nur eine auswaehlen!!!)
#poweroff
# killall -9 -q oscam
# killall -9 -q vdr-$VDRVERS
# poweroff
#Suspend to Swapfile
# s2disk
#Suspend to Ram
#s2ram Watchdog starten
$SCRIPTDIR/.s2ram_watchdog.sh &
s2ram --force --acpi_sleep 1
#Aufwachen
#Aufwach Grafik setzen
$SCRIPTDIR/.showscreenimage.sh wakeup &
# aktionen nach dem Aufwachen
modprobe lirc_igorplugusb
if [ "$USEWLAN" == "1" ]; then
. $SCRIPTDIR/.startwlan
fi
start avahi-daemon
[ $(pidof -xs .watchdog.sh | wc -w) == 0 ] && nice -$_watchdog_sh_nice $SCRIPTDIR/.watchdog.sh &
rm /tmp/.s2ram_active
test -e /tmp/.powersave && rm /tmp/.powersave
$SCRIPTDIR/.frontend.sh &
logger -t SUSPEND "System ist wieder aufgewacht"
exit 0
| true
|
d68a3b9dc5ed2cf8dbf27c0fbbd286e5e731ab9b
|
Shell
|
eyeskiller/fuck
|
/macos/.bash_aliases
|
UTF-8
| 77
| 2.59375
| 3
|
[] |
no_license
|
fuck(){
killall $2
echo "Fuck $1 $2 I am MAC!"
}
| true
|
42289ce24dbe1b2af87d253bfc425bceb0ab717c
|
Shell
|
lynzh/scripts
|
/addr
|
UTF-8
| 167
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
#
first=''
second=''
third=''
fourth=''
echo "State the range of IP addr :"
read first, second, third, fourth
echo "ip addr: $first.$second.$third.$fourth"
| true
|
6e693b993745f8a66cb3c340dc11ad5a20bfef9a
|
Shell
|
danX-4q/deploy-gerrit
|
/gerritAlp/setup-once.sh
|
UTF-8
| 942
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function step_2 {
sed docker-compose.yaml -e 's/^#init\( entrypoint.*\)$/\1/g' > docker-compose--setup-only.yaml
docker-compose -f docker-compose--setup-only.yaml up gerrit && rm -rf docker-compose--setup-only.yaml
./modify-gerrit-conf.sh
#激动,终于试出,两次初始化,将数据库由h2迁移到postgres的效果
./delete-gerrit-data.sh
sed docker-compose.yaml -e 's/^#init\( entrypoint.*\)$/\1/g' > docker-compose--setup-only.yaml
docker-compose -f docker-compose--setup-only.yaml up gerrit && rm -rf docker-compose--setup-only.yaml
}
function step_1 {
docker-compose up -d postgres
while true
do
docker-compose logs --tail=5 postgres | grep "database system is ready to accept connections" -q && break
sleep 1
done
echo "postgres ready for connections."
}
function step_3 {
docker-compose up -d
}
step_1
step_2
step_3
echo "$0 said: setup over, and already docker-compose up"
| true
|
9cbdf80f264a3f36aca40fc165dd083a209857f5
|
Shell
|
kenjirofukuda/sandbox
|
/Tools/enable_arc.sh
|
UTF-8
| 309
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
target_dir="."
if [ $# -gt 0 ]; then
target_dir="$1"
fi
# echo "target_dir=${target_dir}"
if [ ! -d "$target_dir" ]; then
echo "not found: $target_dir"
exit 1
fi
find "$target_dir" -type f \( -name "GNUmakefile" -or -name "GNUmakefile.*" \) \
-exec "$(dirname $0)/_enable_arc.sh" {} \;
| true
|
1c96173eae0282689dc867bddd59b156dbef463c
|
Shell
|
AEW2015/Vitis-AI
|
/tools/Vitis-AI-Quantizer/vai_q_pytorch/tools/format_code.sh
|
UTF-8
| 421
| 3.734375
| 4
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
yapf_format() {
yapf -i --style='{based_on_style: chromium, indent_width: 2, blank_lines_around_top_level_definition: 1}' $1
}
strip_trailing_ctrlm() {
sed -i "s/^M//" $1
}
for file in $(find "${SCRIPT_DIR}/.." -name *.py); do
echo "${file}"
yapf_format "${file}"
strip_trailing_ctrlm "${file}"
dos2unix "${file}"
done
| true
|
ebed5ee6f6de4c326e86b03eb7a2f01b7dd03bbe
|
Shell
|
azinchen/duplicacy
|
/root/usr/bin/container-init-up-99-backupimmediately.sh
|
UTF-8
| 331
| 3.046875
| 3
|
[
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
#!/command/with-contenv bash
exitcode=0
if [[ $RUN_JOB_IMMEDIATELY == "yes" ]] || [[ $RUN_JOB_IMMEDIATELY == "YES" ]]; then
if [[ "${BACKUP_CRON}" ]]; then
backup.sh
exitcode=$?
fi
if [[ "${PRUNE_CRON}" ]] && [[ $exitcode -eq 0 ]]; then
prune.sh
exitcode=$?
fi
fi
exit $exitcode
| true
|
b61203a7b1746eeeab13ec319f1922ec753a5233
|
Shell
|
hmrc/pension-administrator-frontend
|
/migrations/applied_migrations/BusinessType.sh
|
UTF-8
| 2,083
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Applying migration BusinessType"
echo "Adding routes to conf/register.routes"
echo "" >> ../conf/app.routes
echo "GET /businessType controllers.register.company.BusinessTypeController.onPageLoad(mode: Mode = NormalMode)" >> ../conf/register.routes
echo "POST /businessType controllers.register.company.BusinessTypeController.onSubmit(mode: Mode = NormalMode)" >> ../conf/register.routes
echo "GET /changeBusinessType controllers.register.company.BusinessTypeController.onPageLoad(mode: Mode = CheckMode)" >> ../conf/register.routes
echo "POST /changeBusinessType controllers.register.company.BusinessTypeController.onSubmit(mode: Mode = CheckMode)" >> ../conf/register.routes
echo "Adding messages to conf.messages"
echo "" >> ../conf/messages.en
echo "businessType.title = businessType" >> ../conf/messages.en
echo "businessType.heading = businessType" >> ../conf/messages.en
echo "businessType.option1 = businessType" Option 1 >> ../conf/messages.en
echo "businessType.option2 = businessType" Option 2 >> ../conf/messages.en
echo "businessType.checkYourAnswersLabel = businessType" >> ../conf/messages.en
echo "businessType.error.required = Please give an answer for businessType" >> ../conf/messages.en
echo "Adding helper method to CheckYourAnswersHelper"
awk '/class/ {\
print;\
print "";\
print " def businessType: Seq[AnswerRow] = userAnswers.get(identifiers.register.BusinessTypeId) match {";\
print " case Some(x) => Seq(AnswerRow(\"businessType.checkYourAnswersLabel\", s\"businessType.$x\", true, controllers.register.routes.BusinessTypeController.onPageLoad(CheckMode).url))";\
print " case _ => Nil";\
print " }";\
next }1' ../app/utils/CheckYourAnswersHelper.scala > tmp && mv tmp ../app/utils/CheckYourAnswersHelper.scala
echo "Moving test files from generated-test/ to test/"
rsync -avm --include='*.scala' -f 'hide,! */' ../generated-test/ ../test/
rm -rf ../generated-test/
echo "Migration BusinessType completed"
| true
|
458ca96b3b215b53c97cd7777589cd8f9f347d4d
|
Shell
|
dirten/git-toolbelt
|
/git-assume
|
UTF-8
| 852
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
usage () {
echo "usage: git assume [-ah] <file> [<file> ...]" >&2
echo >&2
echo "Assume files to be unchanged, so they won't show up in status " >&2
echo "reports, and won't be included when staging files." >&2
echo >&2
echo "Options:" >&2
echo "-a Assume all locally modified files" >&2
echo "-h Show this help" >&2
}
all=0
while getopts ah flag; do
case "$flag" in
a) all=1;;
h) usage; exit 2;;
esac
done
shift $(($OPTIND - 1))
locally_modified_files () {
git status --porcelain --untracked-files=no | cut -c 2- | grep -Ee '^[MD]' | cut -c 3-
}
if [ $all -eq 1 ]; then
git update-index --assume-unchanged $(locally_modified_files)
else
if [ $# -gt 0 ]; then
git update-index --assume-unchanged "$@"
else
usage
exit 2
fi
fi
| true
|
40a6b50012f6c9066058234b2399fff0b7d53761
|
Shell
|
yanatan16/denvr
|
/dev-resources/test-app/start.sh
|
UTF-8
| 273
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ -e node_modules ]]; then
rm -rf node_modules
fi
if [[ -z $SYNC ]]; then
node $1
else
nodemon --exec 'cp package.json /data && cd /data && npm install' \
--watch package.json
nodemon $1 --watch . --watch /data/node_modules
fi
| true
|
5e5d3246dfc732b6568eb8098205cf12539e8c50
|
Shell
|
netj/selectmail
|
/src/keepmail.sh
|
UTF-8
| 5,937
| 3.96875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
Id="SelectMail $VERSION" # (http://netj.org/selectmail)
# KeepMail -- remove and extract messages from mailboxes
# Author: Jaeho Shin <netj@sparcs.org>
# Created: 2007-02-24
set -e
. "`dirname "$0"`/$EXEDIR"/common
keepmail() {
local mode=keep output= spread=
# process options
local o=
while getopts "ho:ym" o; do
case "$o" in
h) mode=help ;;
o) output=$OPTARG ;;
y) spread=yearly ;;
m) spread=monthly ;;
*) see_usage ;;
esac
done
shift $(($OPTIND - 1))
case "$mode" in
help) # show usage
cat <<-EOF
$Id
$Name -- remove and extract messages from mailboxes
Usage: $Name [-ym] [-o <archive>] <pattern> <mailbox>...
pattern:
all pattern that Mutt recognizes is allowed, see muttrc(5)
mailbox:
one or more mailboxes you want to work on
archive:
mailbox you want to store unmatched messages if specified
Examples:
$Name -o ~/Mail/INBOX.gz "~r <3m" =.Inbox
to keep mails received within 3 months in =.Inbox and archive others
$Name "~r <2w" =.Trash
to delete mails in =Trash older than 2 weeks
$Name "~r \`date -r timestamp +%d/%m/%Y\`-" =.News
to keep mails received after the timestamp file was touched in =.News
$Name -y -o ~/Mail/lists-%Y.gz "~r <30d" =.lists
to keep mails received within 30 days in =.lists and archive others
spread over years, e.g. ~/Mail/lists-2007.gz.
EOF
;;
keep) # keep messages
# validate arguments
[ $# -gt 0 ] || see_usage
local patt=$1; shift
[ -n "$patt" ] || { see_usage "no pattern specified"; }
[ $# -gt 0 ] || { see_usage "no mailbox specified"; }
# decide whether to extract or delete
if [ -n "$output" ]; then
if [ -n "$spread" ]; then
# spread
case "$spread" in
yearly) spread() {
local from=$1 y= o=
for y in $(seq `date +%Y` -1 1970); do
o=`sed <<<"$output" -e "s/%Y/$y/g"`
echo -n " storing '~d 01/01/$y-' to $o"
mvmsgs "~d 01/01/$y-" "$o" "$from"
echo
[ -s "$from" ] || break
done
} ;;
monthly) spread() {
local from=$1 y= m= m0=`date +%m` o=
for y in $(seq `date +%Y` -1 1970); do
for m in $(seq $m0 -1 1); do
m=`printf %02d $m`
o=`sed <<<"$output" -e "s/%Y/$y/g" -e "s/%m/$m/g"`
echo -n " storing '~d 01/$m/$y-' to $o"
mvmsgs "~d 01/$m/$y-" "$o" "$from"
echo
[ -s "$from" ] || break
done
[ -s "$from" ] || break
m0=12
done
} ;;
*)
err "spread-$spread not implemented :("
return 4
;;
esac
keep() {
local pfx=$1; pfx=${pfx#=}; pfx=${pfx#+}; pfx=${pfx#!}
local tmp=`mktemp "$pfx.keepmail.XXXXXX"`
[ -n "$tmp" ] || return 8
# extract to $tmp first
echo -n "extracting '!($patt)' from $@"
mvmsgs "!($patt)" "$tmp" "$@"
echo
# spread
echo "spreading messages to $output"
spread "$tmp"
# clean up
[ -s "$tmp" ] || rm -f "$tmp"
}
else
# move
keep() {
local from=
for from in "$@"; do
echo -n "moving '!($patt)' from $from to $output"
mvmsgs "!($patt)" "$output" "$from"
echo
done
}
fi
else
# delete
keep() {
local from=
for from in "$@"; do
echo -n "deleting '!($patt)' from $from"
rmmsgs "!($patt)" "$from"
echo
done
}
fi
# do the work
keep "$@"
;;
esac
}
Mutt() {
local from=$1 cmd=$2
screen -D -m \
mutt -z -f "$from" \
-e 'unset confirmcreate confirmappend mark_old' \
-e 'set delete quit' \
-e 'push "'"$cmd"'"'
}
rmmsgs() {
local patt=$1 from=; shift
for from in "$@"; do
Mutt "$from" "<delete-pattern>$patt<Enter><quit>"
done
}
mvmsgs() {
local patt=$1 to=$2 from=; shift 2
# sanitize parameters
local opt=
case "$to" in
*.gz|*.bz2) # compressed folders
env_provides compressed_folders
# XXX: compressed folders need mbox_type=mbox (2007-03)
opt="set mbox_type=mbox"
;;
*/) # Maildir
opt="set mbox_type=Maildir"
to="`dirname "$to"`/`basename "$to"`"
;;
esac
opt=${opt:+<enter-command>$opt<Enter>}
mkdir -p "`dirname "$to"`" || err "cannot create \`$to'"
# let Mutt handle the rest
local tmp=`mktemp "$to.XXXXXX"`
for from in "$@"; do
Mutt "$from" "<tag-pattern>$patt<Enter><tag-prefix><copy-message>${tmp// / }<Enter><delete-pattern>$patt<Enter><quit>"
done
Mutt "$tmp" "<delete-pattern>!($patt)<Enter><quit>"
Mutt "$tmp" "<tag-pattern>.<Enter>$opt<tag-prefix><save-message>${to// / }<Enter><quit>"
rm -f "$tmp"
}
env_provides mutt
"$Name" "$@"
| true
|
c2c3c5276c2b3df733ca596498d70371c6c3aebe
|
Shell
|
haiyaoxliu/btd5
|
/tools/_log.sh
|
UTF-8
| 208
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
LOG=~/Github/btd5/tools/log
if [ ! -f $LOG ]; then
echo "[$now] ABORT: no log"
exit 1
else
for MSG in "$@"
do
now=$(date +"%T")
echo "[$now] $MSG" >> $LOG
done
fi
exit 1
| true
|
5fdd1ce213bdd81472cafd4f2df43ac32a978923
|
Shell
|
cul-it/drupal-scripts
|
/aegir_site_migrate.sh
|
UTF-8
| 7,558
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# aegir_site_migrate.sh - move a production site to an existing site in aegir
# based on https://omega8.cc/import-your-sites-to-aegir-in-8-easy-steps-109
# run this on test server
### svn:keyword $Date: 2014-03-31 16:51:59 -0400 (Mon, 31 Mar 2014) $
### svn:keyword $Author: jgr25 $
### svn:keyword $Rev: 2264 $
### svn:keyword $URL: https://svn.library.cornell.edu/cul-drupal/scripts/update_test_from_production.sh $
testdomain="xxx.test2.library.cornell.edu"
productiondomain="xxx.library.cornell.edu"
SOURCE_MACHINE="victoria01"
HOST_MACHINE="sf-lib-web-007.serverfarm.cornell.edu"
AEGIR_HOST="web-stg.library.cornell.edu"
DOMAIN_SUFFIX="stg.library.cornell.edu"
USER_GROUP="aegir:apachegir"
# An error exit function
function error_exit
{
echo "**************************************"
echo "$1" 1>&2
echo "**************************************"
exit 1
}
function Confirm() {
while true
do
echo -n "Please confirm (y or n) :"
read CONFIRM
case $CONFIRM in
y|Y|YES|yes|Yes) return 1 ;;
n|N|no|NO|No) return 0 ;;
*) echo Please enter only y or n
esac
done
}
# First we define the function
function ConfirmOrExit() {
while true
do
echo -n "Please confirm (y or n) :"
read CONFIRM
case $CONFIRM in
y|Y|YES|yes|Yes) break ;;
n|N|no|NO|No)
echo Aborting - you entered $CONFIRM
exit
;;
*) echo Please enter only y or n
esac
done
echo You entered $CONFIRM. Continuing ...
}
# Make sure we're on the test machine
if [ "$HOSTNAME" != "$HOST_MACHINE" ]; then
error_exit "Only run $0 on $AEGIR_HOST"
fi
# Make sure only root can run our script
if [[ $EUID -ne 0 ]]; then
echo "Usage: sudo $0 <production domain> <platform_name> <site name>"
error_exit "This script has to be run with sudo powers."
fi
# check argument count
if [ $# -ne 3 ]; then
error_exit "Usage: sudo $0 <production domain> <platform_name> <site name>"
fi
productiondomain="$1"
platform_name="$2"
site_name="$3"
testsite="/var/aegir/platforms/$platform_name/sites/$site_name"
productionsite="$SUDO_USER@$SOURCE_MACHINE:/libweb/sites/$productiondomain/htdocs/"
productionsite_files="${productionsite}sites/default/files"
if [ ! -d "$testsite" ]; then
error_exit "First create $site_name in platform $platform_name!"
fi
echo "This will move $productiondomain from $SOURCE_MACHINE into an Aegir platform called $platform_name and site named $site_name"
ConfirmOrExit
let STEP=1
echo "First do this: "
echo " $STEP. Configure the 'Site under maintenance' block on the PRODUCTION server"
echo " http://$productiondomain/admin/build/block"
echo " http://$productiondomain/admin/structure/block"
echo " click 'configure' next to 'Site under maintenance'"
echo " Under 'Page specific visibility settings' select"
echo " 'Show on every page except the listed pages.'"
echo "You did this, right?"
ConfirmOrExit
let STEP=STEP+1
echo " $STEP. make a backup of the PRODUCTION site to the Manual Backups Directory"
echo " http://$productiondomain/admin/content/backup_migrate/export"
echo " http://$productiondomain/admin/config/system/backup_migrate"
echo "You did this, right?"
ConfirmOrExit
let STEP=STEP+1
echo " $STEP. Copy site files from $SOURCE_MACHINE"
echo " rsync needs your password for the PRODUCTION server:"
sudo rsync -av "$productionsite_files/*" "$testsite/files"
# copy in a .htaccess file so clean urls will work
cd "$testsite"
wget -q https://drupalgooglecode.googlecode.com/svn/trunk/.htaccess
# set up permissions for aegir
sudo chmod -R 755 "$testsite"
sudo chmod -R 777 "$testsite/files/"
sudo chown -R "$USER_GROUP" "$testsite"
# see if there are drupal private file system files to move
testsiteprivate="${testsite}/private/files/"
sudo mkdir -p "$testsiteprivate"
if [ -d "$testsiteprivate" ]; then
productionsiteprivate="$SUDO_USER@$SOURCE_MACHINE:/libweb/sites/$productiondomain/drupal_files/"
# rsync needs sudo
echo " rsync needs your password again for the private data on the PRODUCTION server:"
rsync -av --exclude=.svn "$productionsiteprivate" "$testsiteprivate"
# set up permissions for aegir
sudo chmod -R 775 "${testsite}/private"
sudo chown -R "$USER_GROUP" "$testsiteprivate"
fi
let STEP=STEP+1
echo "Now do this: "
echo " $STEP. Go to the new site and set up user #1 (Administrative User)"
echo " http://$AEGIR_HOST/hosting/sites"
echo " click on your site: $platform_name.${DOMAIN_SUFFIX}"
echo " click on the Go to $platform_name.${DOMAIN_SUFFIX} link"
echo " set up admin user email and password"
echo " Hit Save at the bottom of the page"
echo "You did this, right?"
ConfirmOrExit
let STEP=STEP+1
echo "Now do this:"
echo " $STEP. Enable the Backup Migrate module"
echo " http://$platform_name.${DOMAIN_SUFFIX}/admin/build/modules"
echo " http://$platform_name.${DOMAIN_SUFFIX}/admin/modules"
echo " Check off Backup Migrate"
echo " Hit Save configuration at the bottom of the page"
echo "You did this, right?"
ConfirmOrExit
let STEP=STEP+1
echo "Now do this:"
echo " $STEP. Check for the backup file you just created among the backups"
echo " http://$platform_name.${DOMAIN_SUFFIX}/admin/content/backup_migrate/destination/list/files/manual"
echo " http://$platform_name.${DOMAIN_SUFFIX}/admin/config/system/backup_migrate/destination/list/files/manual"
echo "Do you see the backup file there?"
Confirm
retval=$?
if [ "$retval" == 0 ]; then
let STEP=STEP+1
echo "Now do this:"
echo " $STEP. Set path of Backup Migrate manual backups so we can find the database backup"
echo " http://$platform_name.${DOMAIN_SUFFIX}/admin/content/backup_migrate/destination"
echo " http://$platform_name.${DOMAIN_SUFFIX}/admin/config/system/backup_migrate/destination"
echo " Click override or edit and set the manual backups path to "
echo " sites/$platform_name.${DOMAIN_SUFFIX}/private/files/backup_migrate/manual"
echo "You did this, right?"
ConfirmOrExit
fi
let STEP=STEP+1
echo "Now do this: "
echo " $STEP. restore the new backup copy to the TEST server"
echo " http://$platform_name.${DOMAIN_SUFFIX}/admin/content/backup_migrate/destination/list/files/manual"
echo " http://$platform_name.${DOMAIN_SUFFIX}/admin/config/system/backup_migrate/destination/list/files/manual"
echo " click 'restore' next to the latest version of the file"
echo " on the next page hit the Restore button"
echo "You did this, right?"
ConfirmOrExit
let STEP=STEP+1
echo " $STEP. Rename the site (using Migrate task) once"
echo " http://$AEGIR_HOST/hosting/sites"
echo " click on your site $platform_name.${DOMAIN_SUFFIX}"
echo " Click on Migrate > Run"
echo " Domain name: temp.$platform_name.${DOMAIN_SUFFIX}"
echo " Database server: localhost"
echo " Platform: (use Current platform)"
echo " click on Migrate and wait for the Migrate task to finish"
echo " (this process fixes up the paths to images and files within the site)"
echo "You did this, right?"
ConfirmOrExit
let STEP=STEP+1
echo " $STEP. Rename the site (using Migrate task) again a second time"
echo " http://$AEGIR_HOST/hosting/sites"
echo " click on your site temp.$platform_name.${DOMAIN_SUFFIX}"
echo " Click on Migrate > Run"
echo " Domain name: $platform_name.${DOMAIN_SUFFIX}"
echo " Database server: localhost"
echo " Platform: (use Current platform)"
echo " click on Migrate and wait for the Migrate task to finish"
echo "You did this, right?"
ConfirmOrExit
let STEP=STEP+1
echo " $STEP. Re-verify the site"
echo " http://$AEGIR_HOST/hosting/sites"
echo " click on your site $platform_name.${DOMAIN_SUFFIX}"
echo " click on Verify > Run and wait"
echo "You did this, right?"
ConfirmOrExit
echo "have a nice day"
| true
|
dbc7e41b011618cfefe570d544893b907a02e124
|
Shell
|
dselig11235/pentest-tools
|
/ssh_enum_algos.sh
|
UTF-8
| 1,291
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
script=$(readlink -f "$0")
dir=$(dirname "$script")
. "$dir/target-functions.sh"
. "$dir/screenshot-functions.sh"
for t in `cat targets.ssh`
do
temp=`mktemp`
spool "$temp" nmap -Pn --script ssh2-enum-algos -p $(getport $t) $(gethost $t)
temp2=`mktemp`
"$dir/sshcolor.pl" "$temp.spl" | grep -v '^$' > "$temp2"
rm "$temp.spl"
kex='Key Exchange Algorithms Used on Multiple SSH Servers.spl'
cat "$temp2" | between.pl '' 'server_host_key_algorithms:' \
'Nmap done' 'Nmap done' >> "$kex"
echo >> "$kex"
serverkey='SSH Host Keys Used on Multiple Servers.spl'
cat "$temp2" | between.pl '' 'kex_algorithms:' 'server_host_key_algorithms:' 'encryption_algorithms:' \
'Nmap done' 'Nmap done' >> "$serverkey"
echo >> "$serverkey"
encryption='Insecure Encryption Algorithms Used on Multiple SSH Servers.spl'
cat "$temp2" | between.pl '' 'kex_algorithms:' 'encryption_algorithms:' 'mac_algorithms:' \
'Nmap done' 'Nmap done' >> "$encryption"
echo >> "$encryption"
macs='Message Authentication Code Algorithms Used on Multiple SSH Servers.spl'
cat "$temp2" | between.pl '' 'kex_algorithms:' 'mac_algorithms:' 'compression_algorithms:' \
'Nmap done' 'Nmap done' >> "$macs"
echo >> "$macs"
done
| true
|
dc25dcc17acbacb0f4936183aa307c2838798701
|
Shell
|
jankenshow/lfind
|
/install.sh
|
UTF-8
| 287
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR=$(cd $(dirname $0); pwd)
chmod -R 700 ./lfind
mv ${SCRIPT_DIR}/lfind ~/.local/bin
echo -n -e "\n" >> ~/.bashrc
echo "# add local commands to path" >> ~/.bashrc
echo 'export PATH="$PATH:$HOME/.local/bin"' >> ~/.bashrc
source $HOME/.bashrc
rm -rf ${SCRIPT_DIR}
| true
|
be34d1a0fce8a4bd20a3dea389977124c7894d7e
|
Shell
|
codefinity/micro-continuum
|
/zipkin/wait-for.sh
|
UTF-8
| 636
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# wait-for.sh
set -e
shift
cmd="$@"
is_elasticsearch_server_on=false
until $is_elasticsearch_server_on ; do
sleep 20
if [ $(nc -z elasticsearch 9200 ; echo $?) -eq 0 ]
then
is_elasticsearch_server_on=true
echo "$(date) - Elasticsearch Connected"
else
echo "$(date) - Waiting for Elasticsearch"
fi
done
>&2 echo "Dependencies are up - starting zipkin Container"
exec java -jar ./app/zipkin-server-2.7.1-exec.jar STORAGE_TYPE=elasticsearch ES_HOSTS=elasticsearch
#Zipkin jar downloaded from
#https://jcenter.bintray.com/io/zipkin/java/zipkin-server/2.7.1/zipkin-server-2.7.1-exec.jar
| true
|
242daabcdb7fd87dfd49db8e13ed2afb4c47cb44
|
Shell
|
gazingatnavel/ucf-spring2017-cis3360-program2-crcheck
|
/run-test-cases.sh
|
UTF-8
| 2,785
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
# Variable '$?' is the exit status of a command
# Test for C or Java program.
if [ -a "crcheck.c" ]; then
isC=1
elif [ -a "crcheck.java" ]; then
isC=0
else
echo "Unable to find source file crcheck.c or crcheck.java"
exit 1
fi
# Compile source into executable.
if [ $isC == 1 ]; then
# Compile crcheck.c file to crcheck executable.
echo "Compiling crcheck.c..."
gcc crcheck.c -o crcheck 2> /dev/null
else
# Compile crcheck.java file to crcheck.class executable.
echo "Compiling crcheck.java"
javac crcheck.java 2> /dev/null
fi
# Check that crcheck.c (or crcheck.java) compiled.
compile_val=$?
if [ $compile_val != 0 ]; then
echo "fail (failed to compile)"
exit 1
fi
FILES="WC-ngi
input2A
WS
WC-16"
# Loop through files.
for f in $FILES
do
# Provide some feedback.
echo -n "Checking crcheck c $f.plain... "
# Run crcheck compute option
if [ $isC == 1 ]; then
./crcheck c $f.plain > $f-plain.txt 2> /dev/null
else
java crcheck c $f.plain > $f-plain.txt 2> /dev/null
fi
execute_val=$?
if [ $execute_val != 0 ]; then
echo "fail (program failed or crashed)"
exit 1
fi
# Compare the computed output file to the expected output file,
# ignoring blank lines.
diff -B $f-plain.txt $f-plain-output.txt > /dev/null
# Print status, fail or pass.
diff_val=$?
if [ $diff_val != 0 ]; then
echo "fail (output does not match)"
else
echo "PASS!"
fi
# Remove output file.
rm $f-plain.txt
# Provide some feedback.
echo -n "Checking crcheck v $f.crc... "
if [ $isC == 1 ]; then
./crcheck v $f.crc > $f-crc.txt 2> /dev/null
else
java crcheck v $f.crc > $f-crc.txt 2> /dev/null
fi
execute_val=$?
if [ $execute_val != 0 ]; then
echo "fail (program failed or crashed)"
exit 1
fi
# Compare the computed output file to the expected output file,
# ignoring blank lines.
diff -B $f-crc.txt $f-crc-output.txt > /dev/null
# Print status, fail or pass.
diff_val=$?
if [ $diff_val != 0 ]; then
echo "fail (output does not match)"
else
echo "PASS!"
fi
# Remove output file.
rm $f-crc.txt
done
# Run for WS-BOGUS.crc, which fails CRC validation.
echo -n "Checking crcheck v WS-BOGUS.crc... "
./crcheck v WS-BOGUS.crc > WS-BOGUS-crc.txt 2> /dev/null
execute_val=$?
if [ $execute_val != 0 ]; then
echo "fail (program failed or crashed)"
exit 1
fi
# Compare the computed output file to the expected output file,
# ignoring blank lines.
diff -B WS-BOGUS-crc.txt WS-BOGUS-crc-output.txt > /dev/null
# Print status, fail or pass.
diff_val=$?
if [ $diff_val != 0 ]; then
echo "fail (output does not match)"
else
echo "PASS!"
fi
# Remove output file.
rm WS-BOGUS-crc.txt
exit 0
| true
|
6a882b3b444cf88886c1c2b0647460d51ca08abb
|
Shell
|
cwru-robotics/ariac-docker
|
/prepare_ariac_system.bash
|
UTF-8
| 453
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash -x
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo "Preparing the ARIAC competition setup"
ROS_DISTRO_TO_BUILD=${1}
if [[ -z ${ROS_DISTRO_TO_BUILD} ]]; then
echo " - No ROS distributions specified as first arg, assumming melodic"
sleep 3
ROS_DISTRO_TO_BUILD="melodic"
fi
${DIR}/ariac-server/build-images.sh ${ROS_DISTRO_TO_BUILD}
${DIR}/ariac-competitor/build_competitor_base_image.bash ${ROS_DISTRO_TO_BUILD}
| true
|
6bd1b1f9dcaf41cf23e0ecb41728429fbb375b24
|
Shell
|
rsnemmen/raspberry-pi-overclocking
|
/normal.sh
|
UTF-8
| 301
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Run this script to cancel the overclocking. This will return the
# raspberry pi to the standard settings and reboot the system.
#
# applying standard, factory settings
sudo cp ./config_std.txt /boot/config.txt
echo "Applied factory settings, now rebooting"
echo
# rebooting
sudo reboot
| true
|
a0ce5ddf6ee3ae5782386cafb36067b5447ef4bb
|
Shell
|
myurasov/NodeAppStrap
|
/dev/scripts/deploy-production.sh
|
UTF-8
| 959
| 3.40625
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
localRoot="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../.."
# config
serverUser=root
server=<server-name>
remoteRoot=<remote-root>
remoteUser=root
rsyncOptions="-avz --delete --exclude=.DS_Store --exclude=.git --exclude=data/ --exclude=_private/"
sshOptions="-i ${localRoot}/dev/scripts/production-remote.id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
pushd ${localRoot}
# set key file permissions
chmod 0600 ${localRoot}/dev/scripts/*.id_rsa
# create dirs, set access rights, setup environment
ssh ${sshOptions} -t ${serverUser}@${server} "\
sudo mkdir -pv ${remoteRoot}; \
sudo chown $remoteUser:$remoteUser $remoteRoot;"
# sync files
rsync ${rsyncOptions} -e "ssh ${sshOptions}" ${localRoot}/* ${serverUser}@${server}:${remoteRoot}/
# restart server
ssh ${sshOptions} -t ${serverUser}@${server} "\
forever stop ${remoteRoot}/src/server/app.js; \
forever start ${remoteRoot}/src/server/app.js;"
popd
| true
|
e91a286c419eceb4c6c29fab6263329411521a95
|
Shell
|
aurelienmaury/prom-tmp
|
/templates/host.sh
|
UTF-8
| 1,068
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
HOSTNAMECTL=$(hostnamectl)
TEXTFILE_COLLECTOR_DIR=/var/lib/node_exporter/textfile_collector
PROM_FILE=$TEXTFILE_COLLECTOR_DIR/host.prom
TEMP_FILE=${PROM_FILE}.$$
mkdir -p $TEXTFILE_COLLECTOR_DIR
echo -n 'host{' > $TEMP_FILE
echo "$HOSTNAMECTL" | grep "Static hostname" | awk 'BEGIN {FS=": "};{printf "host=\""$2"\""}' >> $TEMP_FILE
echo "$HOSTNAMECTL" | grep "Chassis" | awk 'BEGIN {FS=": "};{printf ",chassis=\""$2"\""}' >> $TEMP_FILE
echo "$HOSTNAMECTL" | grep "Machine ID" | awk 'BEGIN {FS=": "};{printf ",machineid=\""$2"\""}' >> $TEMP_FILE
echo "$HOSTNAMECTL" | grep "Boot ID" | awk 'BEGIN {FS=": "};{printf ",bootid=\""$2"\""}' >> $TEMP_FILE
echo "$HOSTNAMECTL" | grep "Operating System" | awk 'BEGIN {FS=": "};{printf ",os=\""$2"\""}' >> $TEMP_FILE
echo "$HOSTNAMECTL" | grep "Kernel" | awk 'BEGIN {FS=": "};{printf ",kernel=\""$2"\""}' >> $TEMP_FILE
echo "$HOSTNAMECTL" | grep "Architecture" | awk 'BEGIN {FS=": "};{printf ",arch=\""$2"\""}' >> $TEMP_FILE
echo '} 1' >> $TEMP_FILE
mv $TEMP_FILE $PROM_FILE
| true
|
3a9cd8998ac66ea393d5f66fb8c72ea85c066591
|
Shell
|
NateWeiler/Resources
|
/Git/Clone/clone.bash
|
UTF-8
| 210
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
echo What is the 'GitHub User '
read $GitHubUser
curl -s https://api.github.com/users/$GitHubUser/repos | grep \"clone_url\" | awk '{print $2}' | sed -e 's/"//g' -e 's/,//g' | xargs -n1 git clone
| true
|
6331399556f79d442e697cfdbaa79a56ac25d51b
|
Shell
|
acupt/amazing
|
/deploy.sh
|
UTF-8
| 254
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
git pull
if [ $? -ne 0 ]; then
echo "pull failed"
exit 2;
fi
mvn clean package -Dmaven.test.skip=true
if [ $? -ne 0 ]; then
echo "package failed"
exit 2;
fi
nohup java -jar target/amazing-1.0-SNAPSHOT.jar >/dev/null 2>&1 &
echo "finish"
| true
|
94e31f170e822d0cfa36d13146200680bb51856d
|
Shell
|
vKnmnn/dotfiles
|
/.bashrc
|
UTF-8
| 6,057
| 3.3125
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
[[ $- != *i* ]] && return
export PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/bin:/opt/android-sdk/platform-tools:/usr/lib/jvm/default/bin:/usr/bin/site_perl:/usr/bin/vendor_perl:/usr/bin/core_perl:/opt/VSCode-linux-x64/bin
export GIT_PROMPT_ONLY_IN_REPO=1
export HISTCONTROL="ignoredups:erasedups"
export HISTSIZE=-1
export HISTFILESIZE=10000
shopt -s histappend
PROMPT_COMMAND="${PROMPT_COMMAND:+$PROMPT_COMMAND$'\n'}history -a; history -c; history -r"
#export TERM="xterm-256color"
export EDITOR="vim"
export VISUAL="vim"
if [[ -f $HOME/.bash_alias ]] ; then
source $HOME/.bash_alias
fi
if [[ -f $HOME/build/bash-git-prompt/gitprompt.sh ]]; then
source $HOME/build/bash-git-prompt/gitprompt.sh || echo "bash-git-prompt nicht in .config gefunden"
fi
if [[ -f $HOME/.cache/wal/sequences ]];then
(cat $HOME/.cache/wal/sequences &) || echo "wal not installed?"
fi
if [[ -f $HOME/.cache/wal/colors-tty.sh ]]; then
source $HOME/.cache/wal/colors-tty.sh
fi
if [[ -f ${HOME}/.bash_${HOSTNAME} ]]; then
source $HOME/.bash_${HOSTNAME}
fi
fanspeed() {
while : ; do sensors | awk '{if ($0 ~ /Package/) temp = $4; else if ($0 ~ /fan/) {fan = $2; unit = $3}} END{print temp" "fan" "unit}'; sleep 2; done
}
man() {
LESS_TERMCAP_md=$'\e[01;31m' \
LESS_TERMCAP_me=$'\e[0m' \
LESS_TERMCAP_se=$'\e[0m' \
LESS_TERMCAP_so=$'\e[01;44;33m' \
LESS_TERMCAP_ue=$'\e[0m' \
LESS_TERMCAP_us=$'\e[01;32m' \
command man "$@"
}
cs() {
cd "${@}";
ls -G --color=auto
}
mkcd() {
if [ ! -n "${1}" ]; then
echo "No directory name given"
elif [ -d "${1}" ]; then
echo "\`"${1}" already exists.! I'm going in."
cd "${1}"
pwd
else
command mkdir -p "${1}" ; cd "${1}"
echo "Success!"
pwd
fi
}
## MANJARO DEFAULTS HEREAFTER
colors() {
local fgc bgc vals seq0
printf "Color escapes are %s\n" '\e[${value};...;${value}m'
printf "Values 30..37 are \e[33mforeground colors\e[m\n"
printf "Values 40..47 are \e[43mbackground colors\e[m\n"
printf "Value 1 gives a \e[1mbold-faced look\e[m\n\n"
# foreground colors
for fgc in {30..37}; do
# background colors
for bgc in {40..47}; do
fgc=${fgc#37} # white
bgc=${bgc#40} # black
vals="${fgc:+$fgc;}${bgc}"
vals=${vals%%;}
seq0="${vals:+\e[${vals}m}"
printf " %-9s" "${seq0:-(default)}"
printf " ${seq0}TEXT\e[m"
printf " \e[${vals:+${vals+$vals;}}1mBOLD\e[m"
done
echo; echo
done
}
[ -r /usr/share/bash-completion/bash_completion ] && . /usr/share/bash-completion/bash_completion
use_color=true
# Set colorful PS1 only on colorful terminals.
# dircolors --print-database uses its own built-in database
# instead of using /etc/DIR_COLORS. Try to use the external file
# first to take advantage of user additions. Use internal bash
# globbing instead of external grep binary.
safe_term=${TERM//[^[:alnum:]]/?} # sanitize TERM
match_lhs=""
[[ -f ~/.dir_colors ]] && match_lhs="${match_lhs}$(<~/.dir_colors)"
[[ -f /etc/DIR_COLORS ]] && match_lhs="${match_lhs}$(</etc/DIR_COLORS)"
[[ -z ${match_lhs} ]] \
&& type -P dircolors >/dev/null \
&& match_lhs=$(dircolors --print-database)
[[ $'\n'${match_lhs} == *$'\n'"TERM "${safe_term}* ]] && use_color=true
if ${use_color} ; then
# Enable colors for ls, etc. Prefer ~/.dir_colors #64489
if type -P dircolors >/dev/null ; then
if [[ -f ~/.dir_colors ]] ; then
eval $(dircolors -b ~/.dir_colors)
elif [[ -f /etc/DIR_COLORS ]] ; then
eval $(dircolors -b /etc/DIR_COLORS)
fi
fi
if [[ ${EUID} == 0 ]] ; then
PS1='\[\033[01;31m\][\h\[\033[01;36m\] \W\[\033[01;31m\]]\$\[\033[00m\] '
else
true
#PS1='\[\033[01;32m\][\u@\h\[\033[01;37m\] \W\[\033[01;32m\]]\$\[\033[00m\] '
fi
alias ls='ls --color=auto'
alias grep='grep --colour=auto'
alias egrep='egrep --colour=auto'
alias fgrep='fgrep --colour=auto'
else
if [[ ${EUID} == 0 ]] ; then
# show root@ when we don't have colors
PS1='\u@\h \W \$ '
else
PS1='\u@\h \w \$ '
fi
fi
unset use_color safe_term match_lhs sh
alias cp="cp -i" # confirm before overwriting something
alias df='df -h' # human-readable sizes
alias free='free -m' # show sizes in MB
alias np='nano -w PKGBUILD'
alias more=less
xhost +local:root > /dev/null 2>&1
#auto completion for sudo, man
if [[ "$PS1" ]]; then
complete -cf sudo
complete -cf man
fi
# Bash won't get SIGWINCH if another process is in the foreground.
# Enable checkwinsize so that bash will check the terminal size when
# it regains control. #65623
# http://cnswww.cns.cwru.edu/~chet/bash/FAQ (E11)
shopt -s checkwinsize
shopt -s expand_aliases
export QT_SELECT=5
# # ex - archive extractor
# # usage: ex <file>
ex ()
{
if [ -f $1 ] ; then
case $1 in
*.tar.bz2) tar xjf $1 ;;
*.tar.gz) tar xzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xf $1 ;;
*.tbz2) tar xjf $1 ;;
*.tgz) tar xzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1;;
*.7z) 7z x $1 ;;
*) echo "'$1' cannot be extracted via ex()" ;;
esac
else
echo "'$1' is not a valid file"
fi
}
# better yaourt colors
export YAOURT_COLORS="nb=1:pkg=1:ver=1;32:lver=1;45:installed=1;42:grp=1;34:od=1;41;5:votes=1;44:dsc=0:other=1;35"
export PS1="\[\033[38;5;7m\][\[$(tput sgr0)\]\[\033[38;5;13m\]\u\[$(tput sgr0)\]\[\033[38;5;15m\] @ \[$(tput sgr0)\]\[\033[38;5;4m\]\h\[$(tput sgr0)\]\[\033[38;5;7m\]]\[$(tput sgr0)\]\[\033[38;5;15m\]: \[$(tput sgr0)\]\[\033[38;5;3m\][\w]\[$(tput sgr0)\]\[\033[38;5;15m\] \n\[$(tput sgr0)\]\[\033[38;5;7m\]\A\[$(tput sgr0)\]\[\033[38;5;15m\] \[$(tput sgr0)\]\[\033[38;5;7m\]\\$\[$(tput sgr0)\]\[\033[38;5;15m\] \[$(tput sgr0)\]"
export TERM=xterm
source /home/omicron/.config/broot/launcher/bash/br
| true
|
356af4a5c9fa8fbf60c0995f0f516cf0b6f26862
|
Shell
|
jperocho/a-vagrant-scripts
|
/initsite
|
UTF-8
| 1,041
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
SITENAME=$1
cat > /etc/nginx/conf.d/${SITENAME}.dev.box.conf <<EOF
server {
listen 80;
listen [::]:80;
server_name ${SITENAME}.dev.box;
root /vagrant/www/${SITENAME};
index index.php index.html index.htm;
location / {
try_files \$uri \$uri/ /index.php?\$args;
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
location ~ \.php$ {
try_files \$uri =404;
fastcgi_pass unix:/var/run/php-fpm/php-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
include fastcgi_params;
}
location ~* ^.+\.(js|css)$ {
expires -1;
sendfile off;
}
}
EOF
mkdir /vagrant/www/${SITENAME}
echo "${SITENAME} is now Working" >> /vagrant/www/${SITENAME}/index.php
echo "127.0.0.1 ${SITENAME}.dev.box" >> /etc/hosts
systemctl restart nginx
systemctl restart php-fpm
| true
|
9e3d0c52026e8bb2928b851706c45f49ae722e62
|
Shell
|
maximbaz/connman_dmenu
|
/connman_dmenu
|
UTF-8
| 1,814
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
readonly STORAGE_PATH=/var/lib/connman
readonly SCAN_RESULT=$(mktemp) || exit
trap 'rm -f $SCAN_RESULT' EXIT
[[ "$1" == "scan" ]] && scan=1 || scan=0
get_services() {
connmanctl enable wifi &>/dev/null
(( scan )) && timeout 5 connmanctl scan wifi &>/dev/null
connmanctl services | cut -c 5- | rev | sed -r 's/([^\s+])\s+(.*)/\1\t\2/' | rev >$SCAN_RESULT
}
index_to_name() {
awk -v line="$1" 'NR == line { print $1 }' $SCAN_RESULT
}
index_to_service() {
awk -v line="$1" 'NR == line { print $2 }' $SCAN_RESULT
}
id_to_security() {
cut -d _ -f 5 <<<"$1"
}
is_connected() {
state=$(connmanctl services "$1" | awk '$1 == "State" { print $3 }')
[[ ! "$state" =~ ^(idle|failure)$ ]]
}
create_dmenu() {
local order=1
local name
local id
local security
local disconnect
while IFS=$'\t' read -r name id; do
security=''
disconnect=''
is_connected "$id" && disconnect='(disconnect)'
case "$id" in
wifi_*) security="$(id_to_security "$id")" ;;
vpn_*) security=vpn ;;
esac
printf '%2s %-40s%9s %s\n' "$order" "$name" "$security" "$disconnect"
(( order++ ))
done <$SCAN_RESULT
}
get_services
index="$(create_dmenu | dmenu -l 10 -i -p 'Select network' | sed 's/^ *//g' | cut -d ' ' -f 1)"
name="$(index_to_name "$index")"
id="$(index_to_service "$index")"
[ -z "$id" ] && exit 1
if is_connected "$id"; then
connmanctl disconnect "$id"
exit 0
fi
connman_msg="$(timeout 10 connmanctl connect "$id" 2>&1 | head -n 1)"
if [[ "$connman_msg" != Connected* ]]; then
error_msg='Timeout connecting to network'
[[ "$connman_msg" ]] && error_msg="$(cut -d ' ' -f 3- <<<"$connman_msg")"
notify-send -u critical "Unable to connect to \"$name\"" "$error_msg"
fi
| true
|
4e542671e6dfe20ccdf6610284083fbac3710455
|
Shell
|
particleman314/ShellLibrary
|
/lib/newscriptgen.sh
|
UTF-8
| 7,858
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
###############################################################################
# Copyright (c) 2016. All rights reserved.
# Mike Klusman IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
# COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
# ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR
# STANDARD, Mike Klusman IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION
# IS FREE FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE
# FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
# Mike Klusman EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
# THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO
# ANY WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE
# FROM CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE.
###############################################################################
###############################################################################
#
## @Author : Mike Klusman
## @Software Package : Shell Automated Testing -- New Script Generation
## @Application : Support Functionality
## @Language : Bourne Shell
## @Version : 1.00
#
###############################################################################
###############################################################################
#
# Functions Supplied:
#
# __add_content_file
# __has_header
# __has_disclaimer
# add_content_type
# add_disclaimer
# add_header
#
###############################################################################
# shellcheck disable=SC2016,SC2068,SC2039,SC2181
[ -z "${__BACKUP_EXTENSION}" ] && __BACKUP_EXTENSION='bck'
__add_content_file()
{
__debug $@
typeset backup_ext="${__BACKUP_EXTENSION}"
typeset filename=
typeset contentfile=
typeset rmbackup="${YES}"
OPTIND=1
while getoptex "f: file: c: contentfile: k. keep-backup. b. backup-extension." "$@"
do
case "${OPTOPT}" in
'f'|'file' ) filename="${OPTARG}";;
'c'|'contentfile' ) contentfile="${OPTARG}";;
'k'|'keep-backup' ) rmbackup="${NO}";;
'b'|'backup-extension' ) backup_ext="${OPTARG}";;
esac
done
shift $(( OPTIND-1 ))
if [ "$( is_empty --str "${filename}" )" -eq "${YES}" ]
then
return "${FAIL}"
else
[ ! -f "${contentfile}" ] && return "${FAIL}"
typeset tmpfile="$( make_temp_file )"
[ "$( is_empty --str "${tmpfile}" )" -eq "${YES}" ] && return "${FAIL}"
[ -f "${filename}.${backup_ext}" ] && \rm -f "${filename}.${backup_ext}"
\cat "${contentfile}" > "${tmpfile}"
\cat "${filename}" >> "${tmpfile}"
\cp -pr "${filename}" "${filename}.${backup_ext}"
[ ! -f "${filename}.${backup_ext}" ] && print_plain --message "[ ERROR ] Unable to copy original file to make backup..."
\mv -f "${tmpfile}" "${filename}"
if [ -f "${tmpfile}" ]
then
print_plain --message "[ WARN ] Unable to remove temporary file --> ${tmpfile}. Please do so manually!"
else
\chmod 0644 "${filename}"
fi
[ "${rmbackup}" -eq "${YES}" ] && [ -f "${filename}.${backup_ext}" ] && \rm -f "${filename}.${backup_ext}"
fi
return "${PASS}"
}
__has_header()
{
__debug $@
typeset filename=
OPTIND=1
while getoptex "f: file:" "$@"
do
case "${OPTOPT}" in
'f'|'file' ) filename="${OPTARG}";;
esac
done
shift $(( OPTIND-1 ))
[ -z "${filename}" ] || [ ! -f "${filename}" ] && return "${NO}"
typeset match="$( find_match_in_file --file "${filename}" --pattern "Software Package : " )"
[ $? -eq "${PASS}" ] && [ "$( is_empty --str "${match}" )" -eq "${NO}" ] && return "${YES}"
return "${NO}"
}
__has_disclaimer()
{
__debug $@
typeset filename=
OPTIND=1
while getoptex "f: file:" "$@"
do
case "${OPTOPT}" in
'f'|'file' ) filename="${OPTARG}";;
esac
done
shift $(( OPTIND-1 ))
[ -z "${filename}" ] || [ ! -f "${filename}" ] && return "${NO}"
typeset match="$( find_match_in_file --file "${filename}" --pattern "IS FREE FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE" )"
[ $? -eq "${PASS}" ] && [ "$( is_empty --str "${match}" )" -eq "${NO}" ] && return "${YES}"
return "${NO}"
}
__initialize_newscriptgen()
{
[ -z "${SLCF_SHELL_TOP}" ] && SLCF_SHELL_TOP=$( \readlink "$( \dirname '$0' )" )
__load __initialize_filemgt "${SLCF_SHELL_TOP}/lib/filemgt.sh"
__initialize "__initialize_newscriptgen"
}
__prepared_newscriptgen()
{
__prepared "__prepared_newscriptgen"
}
add_content_type()
{
__debug $@
typeset filename=
typeset content_type=
OPTIND=1
while getoptex "c: content: f: file:" "$@"
do
case "${OPTOPT}" in
'f'|'file' ) filename="${OPTARG}";;
'c'|'content' ) content_type="${OPTARG}";;
esac
done
shift $(( OPTIND-1 ))
[ "$( is_empty --str "${content_type}" )" -eq "${YES}" ] && content_type='disclaimer'
[ "$( is_empty --str "${filename}" )" -eq "${YES}" ] && return "${FAIL}"
typeset funccall="__has_${content_type}"
#typeset RC=$( fn_exists ${funccall} )
#[ "${RC}" -ne "${PASS}" ] && return "${RC}"
eval "${funccall} --file \"${filename}\""
RC=$?
if [ "${RC}" -eq "${NO}" ]
then
eval "add_${content_type} --file \"${filename}\""
RC=$?
else
print_plain --message "Already detected ${content_type} within file..."
fi
return "${RC}"
}
add_disclaimer()
{
__debug $@
typeset filename=
typeset disclaimer=
typeset rmbackup="${YES}"
OPTIND=1
while getoptex "f: file: c: contentfile: k. keep-backup." "$@"
do
case "${OPTOPT}" in
'f'|'file' ) filename="${OPTARG}";;
'c'|'contentfile' ) disclaimer="${OPTARG}";;
'k'|'keep-backup' ) rmbackup="${NO}";;
esac
done
shift $(( OPTIND-1 ))
[ -z "${filename}" ] || [ ! -f "${filename}" ] && return "${FAIL}"
# Need to add capability of substitution where necessary (base list and user augmented list)
typeset current_year="$( \date "+%Y" )"
[ "$( is_empty --str "${disclaimer}" )" -eq "${YES}" ] && disclaimer="${SLCF_SHELL_TOP}/resources/common/disclaimer.txt"
typeset RC="${PASS}"
__add_content_file --file "${filename}" --contentfile "${disclaimer}" --keep-backup "$( invert "${rmbackup}" )"
RC=$?
if [ "${RC}" -ne "${PASS}" ]
then
print_plain --message "Unable to properly complete request to add disclaimer information!"
print_plain --message "File queried --> <<${disclaimer}>> for inclusion into <<${filename}>>"
fi
return "${RC}"
}
add_header()
{
__debug $@
typeset filename=
typeset simple_header=
typeset rmbackup="${YES}"
OPTIND=1
while getoptex "f: file: c: contentfile: k. keep-backup." "$@"
do
case "${OPTOPT}" in
'f'|'file' ) filename="${OPTARG}";;
'c'|'contentfile' ) simple_header="${OPTARG}";;
'k'|'keep-backup' ) rmbackup="${NO}";;
esac
done
shift $(( OPTIND-1 ))
[ -z "${filename}" ] || [ ! -f "${filename}" ] && return "${FAIL}"
[ "$( is_empty --str "${simple_header}" )" -eq "${YES}" ] && simple_header="${SLCF_SHELL_TOP}/resources/common/pkgdetail.txt"
typeset RC="${PASS}"
__add_content_file --file "${filename}" --contentfile "${simple_header}" --keep-backup "$( invert "${rmbackup}" )"
RC=$?
if [ "${RC}" -ne "${PASS}" ]
then
print_plain --message "Unable to properly complete request to add header information!"
print_plain --message "File queried --> << ${simple_header} >> for inclusion into << ${filename} >>"
fi
return "${RC}"
}
# ---------------------------------------------------------------------------
type "__initialize" 2>/dev/null | \grep -q 'is a function'
if [ $? -ne 0 ]
then
# shellcheck source=/dev/null
. "${SLCF_SHELL_FUNCTIONDIR}/filemgt.sh"
fi
__initialize_newscriptgen
__prepared_newscriptgen
| true
|
95c3cd143d706096fa3f55ceeab69f65b93c54d6
|
Shell
|
IBMStreams/streamsx.sttgateway
|
/tests/frameworktests/tests/StreamsxSttgateway/Translation/TestSuite.sh
|
UTF-8
| 413
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#--variantList='de_DE fr_FR it_IT es_ES pt_BR ja_JP zh_CN zh_TW en_US'
##--variantList='en_US'
# must set cat default for all suites to be ignored in quick test run
if [[ $TTRO_variantSuite == 'en_US' || $TTRO_variantSuite == 'de_DE' ]]; then
setCategory 'quick'
else
setCategory 'default'
fi
function testPreparation {
local tmp="${TTRO_variantSuite}.UTF-8"
echo "Set language $tmp"
export LC_ALL="$tmp"
}
| true
|
24b71973eb910d94cf992e9da5fe0a17ae886996
|
Shell
|
acoret/zdcclient
|
/PKGBUILD
|
UTF-8
| 709
| 2.53125
| 3
|
[] |
no_license
|
pkgname='zdcclient'
pkgver='1.6'
pkgrel=1
pkgdesc="Nettool for school in china,Connect x802.1fixed"
url="git+https://github.com/isombyt/zdcclient.git"
arch=('x86_64' 'i386')
license=('GPL')
source=($pkgname::git://github.com/acoret/zdcclient.git)
gitsource=($pkgname::https://github.com/acoret/zdcclient.git)
makedepends=('libcap' 'make' 'git')
depends=('libcap')
md5sums=('SKIP')
build()
{
cd $pkgname
make -j
}
package()
{
echo 'please edit runzdclient...'
cd $pkgname
install -Dm4755 zdclient ${pkgdir}/usr/local/bin/zdclient
install -Dm0755 runzdclient ${pkgdir}/usr/local/bin/runzdclient
}
post_install()
{
echo 'please check if /usr/local/bin/runzdclient contain right user and passwd'
}
| true
|
efd2704dd09f1bd138aa659779ffc05cf372ff7e
|
Shell
|
TheStrix/dotfiles
|
/aliases
|
UTF-8
| 5,785
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Custom exports
if [[ "$(mount | grep work)" ]]; then
export C_WORKDIR="/work";
export C_ANDROIDWORKDIR="/work/Android";
else
export C_ANDROIDWORKDIR="${HOME}";
fi
export C_ANDROIDTOOLSDIR="${C_ANDROIDWORKDIR}/tools";
export C_LINEAGEDIR="${C_ANDROIDWORKDIR}/lineage";
export C_PADIR="${C_ANDROIDWORKDIR}/pa";
export C_OTHERSTUFFDIR="${C_ANDROIDWORKDIR}/otherstuff";
export C_DUMPSDIR="${C_ANDROIDWORKDIR}/dumps";
export EDITOR=nano
export TZ=Asia/Kolkata
export KBUILD_BUILD_USER=ParthB
export USE_CCACHE=1
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk
export PATH=~/bin:$HOME/.dotfiles/bin:${C_WORKDIR}/bin:${C_ANDROIDTOOLSDIR}/android-studio/Sdk/platform-tools:$PATH
if [[ $HOSTNAME = "build3.de.xda-developers.com" && ! $(command -v zsh) ]]; then
if [[ $(which zsh) != $SHELL ]]; then
echo -e "${lightred}ZSH installed but not set as default shell...\nSet \$PATH, \$SHELL in ~/.profile.${nc}"
fi
fi
# Golang
if [[ -d $C_ANDROIDTOOLSDIR/go ]]; then
export GOPATH=${C_ANDROIDTOOLSDIR}/go
export GOBIN=$GOPATH/bin
export PATH=$GOBIN:$PATH
fi
# Custom aliases
alias shutdown='sudo shutdown -P now'
alias yufastboot='fastboot -i 0x2A96'
alias grepandro='grep -i "\.so\|\.xml\|\.conf\|\.txt\|\.cfg\|\.dat"'
alias setperf='echo "performance" | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor'
alias setsave='echo "powersave" | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor'
alias path='echo ${PATH}'
alias jadx='bash ${C_ANDROIDTOOLSDIR}/jadx/build/jadx/bin/jadx-gui'
alias jadx_cli='bash ${C_ANDROIDTOOLSDIR}/jadx/build/jadx/bin/jadx'
alias venv=". $HOME/android/venv/bin/activate"
if [[ -d $C_PADIR ]]; then
# objdump_aarch64
if [[ -f ${C_PADIR}/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/aarch64-linux-android/bin/objdump ]]; then
alias objdump_aarch64='${C_PADIR}/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/aarch64-linux-android/bin/objdump'
else
alias objdump_aarch64='echo -e "${lightred}objdump file path not found!\nMake sure you have PA source code syned.\nCheck custombashrc for more.${nc}"'
fi
# aarch64 toolchain
if [[ -d "$C_PADIR/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin" ]]; then
aarch64_tc_bin="$C_PADIR/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin"
fi
elif [[ -d $C_LINEAGEDIR ]]; then
# objdump_aarch64
if [[ -f ${C_LINEAGEDIR}/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/aarch64-linux-android/bin/objdump ]]; then
alias objdump_aarch64='${C_LINEAGEDIR}/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/aarch64-linux-android/bin/objdump'
else
alias objdump_aarch64='echo -e "${lightred}objdump file path not found!\nMake sure you have LAOS/PA source code syned.\nCheck custombashrc for more.${nc}"'
fi
# aarch64 toolchain
if [[ -d "$C_LINEAGEDIR/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin" ]]; then
aarch64_tc_bin="$C_LINEAGEDIR/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin"
fi
else
ANDROID_SOURCE_PRESENT="false"
fi
alias makeoutkernel='make O=../out ARCH=arm64 CROSS_COMPILE="${aarch64_tc_bin}/aarch64-linux-android-"'
# ADB
function adbrem() { adb root && adb wait-for-$(adb devices | tail -2 | head -1 | cut -f 2 | sed 's/ *$//g') remount; }
alias adbs='adbrem && adb shell'
alias adbgp='adb shell getprop'
alias adbr='adb reboot'
alias adbpo='adb shell reboot -p'
alias adbrb='adb reboot bootloader'
alias adbrr='adb reboot recovery'
alias adbservice='adb shell service list'
alias adbdumpsys='adb shell dumpsys'
alias adbstrace='adb shell strace'
# Fastboot
alias fbdev='fastboot devices'
alias fbr='fastboot reboot'
alias fbboot='fastboot boot'
alias fbflboot='fastboot flash boot'
alias fbflrec='fastboot flash recovery'
# git
if [[ "$(command -v hub)" ]]; then
alias git='hub'
fi
alias gtcp='git cherry-pick'
alias grev='git revert'
alias glog='git log --pretty=format:"%h - %an : %s"'
alias gcomgmail='git commit --author="TheStrix <parthbhatia98@gmail.com>"'
alias gcomlaos='git commit --author="TheStrix <parthbhatia@lineageos.org>"'
#------------------------------------------////
# cd-into aliases
#------------------------------------------////
alias dotfiles='cd ~/.dotfiles'
alias laos='cd ${C_LINEAGEDIR}'
alias pa='cd ${C_PADIR}'
alias omni='cd ${C_ANDROIDWORKDIR}/omni'
alias aosp='cd ${C_ANDROIDWORKDIR}/aosp'
# LAOS
alias sagitlaos_d='cd ${C_LINEAGEDIR}/device/xiaomi/sagit'
alias sagitlaod_k='cd ${C_LINEAGEDIR}/kernel/xiaomi/msm8998'
alias sagitloas_v='cd ${C_LINEAGEDIR}/vendor/xiaomi/sagit'
alias kenzolaos_d='cd ${C_LINEAGEDIR}/device/xiaomi/kenzo'
alias kenzolaos_k='cd ${C_LINEAGEDIR}/kernel/xiaomi/msm8956'
alias kenzolaos_v='cd ${C_LINEAGEDIR}/vendor/xiaomi/kenzo'
alias hydrogenlaos_d='cd ${C_LINEAGEDIR}/device/xiaomi/hydrogen'
alias hydrogenlaos_k='cd ${C_LINEAGEDIR}/kernel/xiaomi/msm8956'
alias hydrogenlaos_v='cd ${C_LINEAGEDIR}/vendor/xiaomi/hydrogen'
alias 8956laos_d='cd ${C_LINEAGEDIR}/device/xiaomi/msm8956-common'
alias 8956laos_v='cd ${C_LINEAGEDIR}/vendor/xiaomi/msm8956-common'
alias xiaomilaos_v='cd ${C_LINEAGEDIR}/vendor/xiaomi'
# AOSPA
alias sagitpa_d='cd ${C_PADIR}/device/xiaomi/sagit'
alias sagitpa_k='cd ${C_PADIR}/kernel/xiaomi/msm8998'
alias sagitpa_v='cd ${C_PADIR}/vendor/xiaomi/sagit'
alias xiaomipa_v='cd ${C_PADIR}/vendor/xiaomi'
alias pa_v='cd ${C_PADIR}/vendor/pa'
# Colors
black='\e[0;30m'
blue='\e[0;34m'
green='\e[0;32m'
cyan='\e[0;36m'
red='\e[0;31m'
purple='\e[0;35m'
brown='\e[0;33m'
lightgray='\e[0;37m'
darkgray='\e[1;30m'
lightblue='\e[1;34m'
lightgreen='\e[1;32m'
lightcyan='\e[1;36m'
lightred='\e[1;31m'
lightpurple='\e[1;35m'
yellow='\e[1;33m'
white='\e[1;37m'
nc='\e[0m'
| true
|
76ce0e6566c85104e4100f09d825299851502a35
|
Shell
|
davehenton/dg
|
/helper.sh
|
UTF-8
| 3,791
| 3.984375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
# shellcheck disable=SC2034
set -e
# Echo an error message before exiting
err_report() {
echo "$(tput setaf 1)optimize: error on line $1$(tput sgr0)"
}
trap 'err_report $LINENO' ERR
# --------------------------
# CONSTANTS
# --------------------------
BEGINNING_OF_LINE="\\r"
ERASE="$BEGINNING_OF_LINE\\033[K"
TEMP=".tmp"
SETUP_OUTPUT_FILE="$TEMP/setup_output"
OPTIM_OUTPUT_FILE="$TEMP/optim_output"
TESTING_COMMAND_OUTPUT_FILE="$TEMP/testing_command_output"
IMAGE_OPTIM_PATH=/Applications/ImageOptim.app/Contents/MacOS/ImageOptim
# --------------------------
# PRINT FUNCTIONS
# --------------------------
erase_line() {
echo -en "$ERASE"
}
reset_cursor() {
echo -en "$BEGINNING_OF_LINE"
}
# Echos a simple status message
print_status_message() {
local message=$1
echo -e "$(tput setaf 6)$message$(tput sgr0)"
}
# Prints a success message with a green checkmark
print_success_message() {
local message=$1
erase_line
echo -en "$message"
echo -e "$(tput setaf 2)√$(tput sgr0)"
}
# Prints a "working" message with iteration count
print_working_message() {
local spin='-\|/'
local overall_spin_iteration=$1
local message=$2
local verification_message=$3
local spin_iteration=$((overall_spin_iteration%4))
# Special case the first few iterations to show a verification message as needed
if [ "$overall_spin_iteration" -lt 6 ] && [ "$verification_message" != "" ]; then
message="$verification_message"
fi
erase_line
print_information_message "$message"
printf "%s" "${spin:$spin_iteration:1}"
reset_cursor
}
# Prints a "(pass 3)" type of string in yellow with given message content
print_information_message() {
local message=$1
echo -en "$(tput setaf 3)$message$(tput sgr0)"
}
# Prints an error message with a skull and crossbones to show that
# something was impossible, with an optional progress message
print_error_message() {
local message=$1
local progress_message=$2
erase_line
echo -en "$(tput setaf 1)"
echo -en "$message "
echo -en $'\xE2\x98\xA0' # Skull and crossbones
print_information_message "$progress_message"
echo ""
}
# Prints a spinning progress indicator until the last command before this
# is finished. It uses the message passed in to print a status message
print_progress_indicator() {
local message=$1
local verification_message=$2
local pid=$!
# Prints a message that moves so we show progress
local spin_iteration=0
while kill -0 $pid 2>/dev/null
do
spin_iteration=$((spin_iteration+1))
print_working_message "$spin_iteration" "$message" "$verification_message"
sleep .15
done
}
# --------------------------
# OTHER FUNCTIONS
# --------------------------
# Installs a given named dependency if needed
install_if_needed() {
local name="$1"
local check_command=$2
local is_installed_check_result="$3"
local install_command=$4
local skip_printing="$5"
eval "$check_command >$SETUP_OUTPUT_FILE 2>$SETUP_OUTPUT_FILE"
# Check to see if item is installed
if [ "$(grep "$is_installed_check_result" "$SETUP_OUTPUT_FILE" | wc -c)" -ne 0 ]; then
print_success_message "$name already installed "
return
fi
# We can assume we need to install it - let's show progress
if [[ "$skip_printing" == "" ]]; then
erase_line
print_information_message "$name: installing... "
echo ""
fi
$install_command
if [[ "$skip_printing" == "" ]]; then
print_success_message "$name now installed "
fi
}
# --------------------------
# MAIN
# --------------------------
# Simply creates the temp directory
mkdir -p $TEMP
touch $SETUP_OUTPUT_FILE
touch $OPTIM_OUTPUT_FILE
| true
|
fb4f9ce0797768bb17f8d41521101e04b7d08793
|
Shell
|
ReconCell/smacha
|
/smacha_ros/doc/build_locally_docker.sh
|
UTF-8
| 1,606
| 3.984375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#===============================================================================
# RECONCELL DOCUMENTATION LOCAL DOCKER BUILD SCRIPT
#
# This script will build the current package documentation locally by running
# the ReconCell documentation build script in a local ROS Kinetic docker image.
# This method assumes no dependencies other than docker.
#
# NOTE: The script must be run from the root directory of the current package.
#
# After the build, the documentation will be available in the 'public' folder
# in the root directory of the current package.
#
# Author: Barry Ridge, JSI
# Date: 16th April 2018
#===============================================================================
# Check if we're in a package root that is also a repo root,
# or we're in a package root that is not a repo root (i.e. we're
# actually in a repo stack of packages).
if [ -d $PWD/.git ] && [ -f $PWD/package.xml ]; then
export MOUNT_POINT=`realpath $PWD`
export PACKAGE_NAME=
elif [ -d $PWD/../.git ] && [ -f $PWD/package.xml ]; then
export MOUNT_POINT=`realpath $PWD/..`
export PACKAGE_NAME=${PWD/*\//}
else
echo "ERROR: this tool must be run from a git-managed ROS package root directory!"
exit 1
fi
# Spoof environment variables so it looks like a regular GitLab CI build
export CI_PROJECT_NAME=${MOUNT_POINT/*\//}
# Run the documentation build script in a ROS Kinetic docker image
docker run --rm -v "$MOUNT_POINT:/$CI_PROJECT_NAME" -e CI_PROJECT_NAME=$CI_PROJECT_NAME -e CI_REPOSITORY_URL=/$CI_PROJECT_NAME ros:kinetic-ros-base /$CI_PROJECT_NAME/$PACKAGE_NAME/doc/build.sh
| true
|
89cfd4ea279c18d5ae2efae36253efa71f0692cf
|
Shell
|
phoenix110/macchinetta-web-multi-blank-thymeleaf
|
/change-infra.sh
|
UTF-8
| 432
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
MODE=$1
KEYWORD="REMOVE THIS LINE IF YOU USE $1"
TARGET="projectName-*"
DIRNAME=`echo $MODE | tr "[:upper:]" "[:lower:]"`
echo "change to $MODE"
rm -rf tmp
mkdir tmp
cp -r infra/$DIRNAME/* tmp/
rm -rf `/usr/bin/find tmp -name '.svn' -type d `
echo "copy infra/$DIRNAME"
#cp -rf tmp/* src/main/resources
#rm -rf tmp
cp -rf tmp/* ./
sed -i -e "/$KEYWORD/d" `grep -rIl "$1" $TARGET | grep -v '.svn'`
| true
|
2b28b35d5a6f5e05f035ea224b4f5e225238d14e
|
Shell
|
katsyoshi/dotfiles
|
/system/gentoo/post.inst.d.efi
|
UTF-8
| 256
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! -d /boot/efi ]; then
exit 0
fi
if ! mountpoint /boot/efi > /dev/null; then
mount /boot/efi
fi
if [ ! -d /boot/efi/efi/boot ]; then
mkdir -p /boot/efi/efi/boot
fi
dracut --hostonly --kver $1
grub-mkconfig -o /boot/grub/grub.cfg
| true
|
fa299bd7a07e05479541b7f698bc5e1ecb7ae23c
|
Shell
|
Hsuing/shelll_script
|
/shell/ssl/gen_ssl_certs.sh
|
UTF-8
| 1,509
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
ROOT_DOMAIN=$1
SYS_DOMAIN=sys.$ROOT_DOMAIN
APPS_DOMAIN=apps.$ROOT_DOMAIN
DOMAIN_DIR="${ROOT_DOMAIN}_cert"
SSL_FILE=sslconf-${ROOT_DOMAIN}.conf
[ ! -d "${DOMAIN_DIR}" ] && mkdir "${DOMAIN_DIR}"
cd "${DOMAIN_DIR}"
#Generate SSL Config with SANs
if [ ! -f $SSL_FILE ]; then
cat > $SSL_FILE <<EOF
[req]
distinguished_name = req_distinguished_name
req_extensions = v3_req
[req_distinguished_name]
countryName_default = CN
stateOrProvinceName_default = ShangHai
localityName_default = ShangHai
organizationalUnitName_default = Devops
[ v3_req ]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = ${ROOT_DOMAIN}
DNS.2 = *.${ROOT_DOMAIN}
DNS.3 = *.${SYS_DOMAIN}
DNS.4 = *.${APPS_DOMAIN}
EOF
fi
openssl genrsa -out RootCA.key 4096
openssl req -new -x509 -days 3650 -key RootCA.key -out RootCA.pem -subj "/C=CN/O=ShangHai/OU=IT/CN=ROOT-CN"
openssl genrsa -out ${ROOT_DOMAIN}.key 2048
openssl req -new -out ${ROOT_DOMAIN}.csr -subj "/CN=*.${ROOT_DOMAIN}/O=Devops/C=CN" -key ${ROOT_DOMAIN}.key -config ${SSL_FILE}
openssl x509 -req -days 3650 -CA RootCA.pem -CAkey RootCA.key -set_serial 01 -in ${ROOT_DOMAIN}.csr -out ${ROOT_DOMAIN}.crt -extensions v3_req -extfile ${SSL_FILE}
openssl x509 -in ${ROOT_DOMAIN}.crt -text -noout
cat ${ROOT_DOMAIN}.crt RootCA.pem > ${ROOT_DOMAIN}_fullchain.pem
openssl dhparam -out dhparam.pem 2048
rm ${ROOT_DOMAIN}.csr
| true
|
806510b8ddbef83212899646916ff87792616882
|
Shell
|
kstenschke/tictac-track
|
/test.sh
|
UTF-8
| 2,558
| 3.078125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
########################################################################################################################
# Run functional tests written with bats - https://github.com/sstephenson/bats #
# #
# Copyright (c) 2018-2019, Kay Stenschke #
# All rights reserved. #
# #
# Install bats on Linux: sudo apt-get install bats #
# Install bats on Mac: brew install bats #
########################################################################################################################
START_TIME=$SECONDS
printf "\033[4mTest commands recognition and display of help\033[0m\n"
bats ./test/functional/help.bats.sh
printf "\n\033[4mTest initialization\033[0m\n"
bats ./test/functional/initialization.bats.sh
printf "\n\033[4mTest clear command\033[0m\n"
bats ./test/functional/clear.bats.sh
printf "\n\033[4mTest start command\033[0m\n"
bats ./test/functional/start.bats.sh
printf "\n\033[4mTest stop command\033[0m\n"
bats ./test/functional/stop.bats.sh
printf "\n\033[4mTest resume command\033[0m\n"
bats ./test/functional/resume.bats.sh
printf "\n\033[4mTest comment command\033[0m\n"
bats ./test/functional/comment.bats.sh
printf "\n\033[4mTest task command\033[0m\n"
bats ./test/functional/task.bats.sh
printf "\n\033[4mTest merge command\033[0m\n"
bats ./test/functional/merge.bats.sh
printf "\n\033[4mTest split command\033[0m\n"
bats ./test/functional/split.bats.sh
printf "\n\033[4mTest day-tasks (ud) command\033[0m\n"
bats ./test/functional/day-tasks.bats.sh
printf "\n\033[4mTest (all-)day command\033[0m\n"
bats ./test/functional/day.bats.sh
printf "\n\033[4mTest remove command\033[0m\n"
bats ./test/functional/remove.bats.sh
printf "\n\033[4mTest undo command and backup creation\033[0m\n"
bats ./test/functional/undo-backup.bats.sh
printf "\n\033[4mTest view command\033[0m\n"
bats ./test/functional/view.bats.sh
ELAPSED_TIME=$(($SECONDS - $START_TIME))
printf "\nDone. Bats tests ran for $ELAPSED_TIME seconds.\n\n";
| true
|
3f2ac2427f81e9e2a5efc1a4580a1c4d4bffecae
|
Shell
|
chipmanc/AdminScripts
|
/vhostCreator.sh
|
UTF-8
| 1,312
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# != 1 ]
then
echo "Command Syntax:"
echo "./vhostCreator.sh domainname"
else
DOMAIN=$1
if [ -e /etc/redhat-release ]; then
export distro="RHEL"
elif [ "$(lsb_release -d | awk '{print $2}')" == "Ubuntu" ]; then
export distro="Ubuntu"
fi
if [ $distro == "RHEL" ]; then
cat > /etc/httpd/vhost.d/${DOMAIN}.conf << EOF
<VirtualHost *:80>
ServerName $DOMAIN
ServerAlias www.${DOMAIN}
DocumentRoot /var/www/vhosts/${DOMAIN}/public_html
<Directory /var/www/vhosts/${DOMAIN}/public_html>
AllowOverride All
</Directory>
ErrorLog /var/log/httpd/${DOMAIN}-error.log
CustomLog /var/log/httpd/${DOMAIN}-access.log combined
</VirtualHost>
EOF
service httpd reload
elif [ $distro == "Ubuntu" ]; then
cat > /etc/apache2/sites-available/${DOMAIN} << EOF
<VirtualHost *:80>
ServerName $DOMAIN
ServerAlias www.${DOMAIN}
DocumentRoot /var/www/vhosts/${DOMAIN}/public_html
<Directory /var/www/vhosts/${DOMAIN}/public_html>
AllowOverride All
</Directory>
ErrorLog /var/log/apache2/${DOMAIN}-error.log
CustomLog /var/log/apache2/${DOMAIN}-access.log combined
</VirtualHost>
EOF
ln -s /etc/apache2/sites-available/${DOMAIN} /etc/apache2/sites-enabled/${DOMAIN}
service apache2 reload
else
echo "Could not determine OS"
fi
mkdir -p /var/www/vhosts/${DOMAIN}/public_html
fi
| true
|
1f273b857ccdee8bb12de845631143cf177be355
|
Shell
|
Brayyy/ecs-spot-watch
|
/spotWatch.sh
|
UTF-8
| 1,800
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# If EC2 is not spot, sleep forever or exit immediately
if [ "$(curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle)" == "normal" ]; then
if [ "$EXIT_IF_NOT_SPOT" == "true" ]; then
echo "$(date +%s) - instance-life-cycle: normal, exiting"
exit 0
fi
echo "$(date +%s) - instance-life-cycle: normal, sleeping forever"
# Lock up the loop
while :; do sleep 3600; done
fi
if [ "$ECS_AGENT" == "" ]; then
ECS_AGENT="172.17.0.1:51678"
fi
# Read ECS data for later
ECS_CLUSTER=$(curl -s http://$ECS_AGENT/v1/metadata | jq -r .Cluster)
CONTAINER_INSTANCE=$(curl -s http://$ECS_AGENT/v1/metadata | jq -r .ContainerInstanceArn)
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
if [ "$ECS_CLUSTER" == "" ]; then
echo "$(date +%s) - Failed to identify the ECS_CLUSTER"
fi
if [ "$CONTAINER_INSTANCE" == "" ]; then
echo "$(date +%s) - Failed to identify the CONTAINER_INSTANCE"
fi
echo "$(date +%s) - ecs-spot-watch armed for $INSTANCE_ID, $CONTAINER_INSTANCE in $ECS_CLUSTER"
# Every 5 seconds, check termination time
while sleep 5; do
if [ -z $(curl -Isf http://169.254.169.254/latest/meta-data/spot/termination-time)]; then
if [ "$SHOW_OK" == "true" ]; then
echo "$(date +%s) - OK"
fi
else
echo "$(date +%s) - Instance $INSTANCE_ID marked for termination"
# Try to remove instance from cluster. Retry until successful
while :; do
/usr/local/bin/aws ecs update-container-instances-state \
--cluster $ECS_CLUSTER \
--container-instances $CONTAINER_INSTANCE \
--status DRAINING &>/tmp/ecs.log && break
# Print the aws log if the last command failed
cat /tmp/ecs.log
sleep 5
done
# Lock up the loop
while :; do sleep 3600; done
fi
done
| true
|
31c56e97be71f825f414e5dc55fba2e80e36eedf
|
Shell
|
hectorhmx/Linux-2020
|
/Intermedio/Miercoles/while.sh
|
UTF-8
| 150
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
function whil
{
contador=0
while [ $contador -lt 10 ]; do
echo "El contador es $contador"
let contador=contador+1
done
}
whil
| true
|
711c0d1af3d64c3b1321aa89989f03d7f98ca8d6
|
Shell
|
sinoe9891/ejercicios_bash
|
/tarea-case/tarea-case.sh
|
UTF-8
| 2,866
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
#Danny Sinoé Velasquez
echo "Escriba su nombre"
read nombre
echo "******** Bienvenido $nombre *********"
echo "Al sistema de operaciones aritméticas"
echo "*************************************"
m=0
num1=$1
num2=$2
num3=$3
DIA=`date +"%d/%m/%Y"`
HORA=`date +"%H:%M"`
echo "Hoy es el $DIA y la hora de ingreso es $HORA!"
while [ $m -eq 0 ]; do
echo $dia
echo "MENÚ PRINCIPAL"
echo "Seleccione una operación"
echo "Instrucciones: Al seleccionar una operación se solicitara que ingrese tres(3) valores númericos:"
echo "1) Sumar"
echo "2) Restar"
echo "3) Multiplicar"
echo "4) Dividir"
echo "5) Salir"
read menu
case $menu in
1)
echo "**********************"
echo "Operación SUMAR"
echo "**********************"
echo "Introduzca el primer valor"
read num1
echo "Introduzca el segundo valor"
read num2
echo "Introduzca el tercer valor"
read num3
echo "**********************"
echo "$DIA El resultado es " $(($num1 + $num2 + $num3))
echo "**********************"
echo "Operación realizada el $DIA a las $HORA | Con el resultado:" $(($num1 + $num2 + $num3)) "|| Efectuada por $nombre" >>suma.txt
;;
2)
echo "**********************"
echo "Operación RESTA"
echo "**********************"
echo "Introduzca el primer valor"
read num1
echo "Introduza el segundo valor"
read num2
echo "Introduzca el tercer valor"
read num3
echo "**********************"
echo "Operación realizada el $DIA a las $HORA || Con el resultado:" $(($num1 - $num2 - $num3))
echo "**********************"
echo "Operación realizada el $DIA a las $HORA || Con el resultado:" $(($num1 - $num2 - $num3)) "|| Efectuada por $nombre" >>resta.txt
;;
3)
echo "**********************"
echo "Operación MULTIPLICAR"
echo "**********************"
echo "Introduzca el primer valor"
read num1
echo "Introduzca el segundo valor"
read num2
echo "Introduzca el tercer valor"
read num3
echo "**********************"
echo "Operación realizada el $DIA a las $HORA || Con el resultado:" $(($num1 * $num2 * $num3))
echo "Operación realizada el $DIA a las $HORA || Con el resultado:" $(($num1 * $num2 * $num3)) "|| Efectuada por $nombre" >>multiplicacion.txt
;;
4)
echo "**********************"
echo "Operación DIVIDIR"
echo "**********************"
echo "Introduzca el primer valor"
read num1
echo "Introduzca el segundo valor"
read num2
echo "Introduzca el tercer valor"
read num3
echo "**********************"
echo "Operación realizada el $DIA a las $HORA || Con el resultado:" $(($num1 / $num2 / $num3))
echo "**********************"
echo "Operación realizada el $DIA a las $HORA || Con el resultado:" $(($num1 / $num2 / $num3)) "|| Efectuada por $nombre" >>division.txt
;;
5)
echo "Ten un buen día $nombre"
m=1
;;
*)
echo "Ingrese un valor del menú"
;;
esac
done
| true
|
a72763ab18d511c6fcc6dd6626fcd2ff3a5187ec
|
Shell
|
zencircle/spark-build
|
/scale-tests/declare_kafka_cluster_count.sh
|
UTF-8
| 362
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
if [[ $1 != ${1//[^0-9]/} || $1 == 0 ]]; then
echo "First argument must be a positive integer." >&2
exit 1
fi
value="$1"
shift
exec "$(dirname "$0")"/push_metric.sh \
--metric-name "set_kafka_or_zk_cluster_count" \
--metric-description "Expected number of kafka and zookeeper clusters." \
--metric-value "${value}" \
"$@"
| true
|
1c08b0c0a1c70d03dc00f26c89f4c5e834c4a162
|
Shell
|
lakhanp1/omics_utils
|
/RNAseq_scripts/pipeline_RNAseq.sh
|
UTF-8
| 3,724
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
## RNAseq pipeline script: hisat2 maping -> samtools indexing -> stringtie
set -e
set -u
set -o pipefail
##----------------------------------------------------------------
## argument parsing
usage="
Usage: bash RNAseq_process.sh -i /path/to/HiSAT2/index -g /path/to/annotation.gtf -c sampleInfo.tab
-c, --conf FILE: Tabular file with three columns: <sampleId> <R1.fastq.gz> <R2.fastq.gz>
-g, --gtf FILE: GTF annotation file
-i, --index CHR: HiSat index prefix
-h, --help This small usage guide
"
## read command line arguments
PARSED_OPT=$(getopt -o hi:g:c: --long "help,index:,gtf:,conf:" -- "$@")
if [ $? != "0" ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
if [ $# -eq 0 ] ; then printf "Error: No arguments\n${usage}" >&2 ; exit 1 ; fi
eval set -- "$PARSED_OPT"
while true ; do
case "$1" in
-h|--help)
printf "${usage}" >&2; exit 1;;
-i|--index)
index=$2; shift 2;;
-g|--gtf)
gtf=$2; shift 2;;
-c|--conf)
file_config=$2; shift 2;;
--) shift ; break ;;
*) echo "Internal error!" >&2; exit 1;;
esac
done
if [ -z "$index" ]; then
printf "Error: Missing --index argument\n" 1>&2
exit 1
fi
if [ -z "$gtf" ]; then
printf "Error: Missing --gtf argument\n" 1>&2
exit 1
fi
if [ -z "$file_config" ]; then
printf "Error: Missing --conf argument\n" 1>&2
exit 1
fi
##----------------------------------------------------------------
## check if config file exists
if [ ! -f "${file_config}" ]; then
printf "Error: reference config file does not exist...\n" 1>&2
exit 1
fi
## check if GTF file exists
if [ ! -f "${gtf}" ]; then
printf "Error: GTF file does not exist...\n" 1>&2
exit 1
fi
#the error_exit function will terminate the script if any of the command fails
function error_exit
{
finishTime=`date "+%T %Y/%m/%d"`
if [ $1 != "0" ]; then
printf "Error: Failed at $finishTime\n" 1>&2
exit 1
else
printf "Done... $finishTime\n\n" 1>&2
fi
}
export -f error_exit
function process_start
{
startTime=`date "+%T %Y/%m/%d"`
printf "Started at $startTime: $1\n" 1>&2
}
export -f process_start
## check if tools are installed
for tool in hisat2 stringtie samtools
do
printf "Checking installer for $tool: "
which $tool
error_exit $?
done
##----------------------------------------------------------------
while IFS=$'\t' read -r sampleId read1 read2 ; do
printf "Processing sample: sampleId:%s**read1:%s**read2:%s**\n" $sampleId $read1 $read2
## check for non empty string
if [ -z "$read1" ]; then
printf "Error: Provide valid R1 file name: $read1...\n" 1>&2
exit 1
fi
## check for non empty string
if [ -z "$read2" ]; then
printf "Error: Provide valid R2 file name: *$read2*...\n" 1>&2
exit 1
fi
## check if FASTQ files are present
for fqFile in $(echo ${read1} ${read2} | tr "," "\n")
do
if [ ! -f "${fqFile}" ]; then
printf "Error: File not found: %s ...\n" ${fqFile} 1>&2
exit 1
fi
done
outDir=$sampleId
[ ! -d ${outDir} ] && mkdir ${outDir}
## align using HiSAT2
process_start hisat2
hisat2 -p 4 --summary-file ${outDir}/hisat.summary -x ${index} -1 ${read1} -2 ${read2} | \
samtools view -bS - | \
samtools sort -O bam -o ${outDir}/${sampleId}_hisat2.bam
error_exit $?
## mapping stats
process_start samtools_index
samtools index ${outDir}/${sampleId}_hisat2.bam
error_exit $?
samtools flagstat ${outDir}/${sampleId}_hisat2.bam > ${outDir}/alignment.stats
## run StringTie
process_start stringtie
stringtie ${outDir}/${sampleId}_hisat2.bam -p 4 -e -B -G ${gtf} -o ${outDir}/stringTie_${sampleId}/${sampleId}.gtf
error_exit $?
printf "Sample $sampleId done\n"
done < ${file_config}
##----------------------------------------------------------------
| true
|
8eb04811275e40576fbf840c0931d2931de76b70
|
Shell
|
jidongdeatao/LinuxTest
|
/FirstVersion/vmSecureScan/config/script/Container_sudoers.sh
|
UTF-8
| 333
| 2.859375
| 3
|
[] |
no_license
|
images=(`docker ps |egrep -v "pause|CONTAINER" |awk '{print $2}'`)
docker=(`docker ps |egrep -v "pause|CONTAINER" |awk '{print $1}'`)
i=0
len=${#docker[@]}
while(($i<$len))
do
echo "############### ${docker[$i]} ${images[$i]}"
docker exec -u 0 ${docker[$i]} cat /etc/sudoers |egrep -v "^#|^Defaults|^$"
echo ""
i=$(($i+1))
done
| true
|
c643985c17dd8efbe9d632932c06104705e1e058
|
Shell
|
takahiro-itou/CygwinSettings
|
/Bash/CorrectGitCommiter.sh
|
UTF-8
| 1,432
| 3.71875
| 4
|
[] |
no_license
|
#! /bin/bash -ue
targetHash='FALSE'
targetDate='FALSE'
commitSign='FALSE'
while [ $# -gt 0 ] ; do
case $1 in
-c) targetHash="$2" ; shift 2 ;;
-d) targetDate="$2" ; shift 2 ;;
-s) commitSign='TRUE' ; shift 1 ;;
--) shift 1; break;;
esac
done
if [ $# -gt 0 ] ; then
targetCommits=$1
shift 1
else
targetCommits='HEAD'
fi
userName='Takahiro Itou'
userMail='6907757+takahiro-itou@users.noreply.github.com'
if [[ ! ${targetHash} = "FALSE" ]] ; then
filterArgs+='if [ "${GIT_COMMIT}" = "'
filterArgs+="${targetHash}"
filterArgs+='" ] ; then'
elif [[ ! ${targetDate} = "FALSE" ]] ; then
filterArgs+='if [ "${GIT_AUTHOR_DATE}" = "'
filterArgs+="${targetDate}"
filterArgs+='" ] ; then'
else
filterArgs+='if [ ! "${GIT_COMMITTER_EMAIL}" = "'
filterArgs+="${userMail}"
filterArgs+='" ] ; then'
fi
filterArgs+=" GIT_AUTHOR_NAME=\"${userName}\";"
filterArgs+=" GIT_AUTHOR_EMAIL=\"${userMail}\";"
filterArgs+=" GIT_COMMITTER_NAME=\"${userName}\";"
filterArgs+=" GIT_COMMITTER_EMAIL=\"${userMail}\";"
if [[ "${commitSign}" = "TRUE" ]] ; then
filterArgs+=' git commit-tree -S "$@" ; '
else
filterArgs+=' git commit-tree "$@" ; '
fi
filterArgs+='else'
filterArgs+=' git commit-tree "$@" ; '
filterArgs+='fi'
commandOpts='filter-branch -f --commit-filter'
commandArgs="${commandOpts} '${filterArgs}' ${targetCommits}"
echo "Run: git ${commandArgs}" 1>&2
echo git ${commandArgs}
| true
|
ca90659ad516cba05899c58b5ca9bade56be2d0d
|
Shell
|
sharyanto/scripts
|
/.old/downloads/wget-merck-manual-method2
|
UTF-8
| 374
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
# pertama ambil dulu merck.com-nya
wgetr www.merck.com/mrkshared/mmanual/sections.jsp
# lalu ekstrak url akamainya, baru ambil terpisah
perl -MFile::Find -e'$/=undef;
find sub {return unless -f;return unless /\.(jsp|html)/;
open F,$_; $x=<F>;
print "$1\n" while ($x=~m#(http://\S+?akamai\S+?)["\s]#g)}, "."' | sort | uniq >urls.txt
wget -x -i urls.txt
| true
|
5bcc8a2ebdbe73bce8e632bbb73b5c6f95ed51b9
|
Shell
|
fenggaoyao/crontab-ui
|
/hook.sh
|
UTF-8
| 895
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
repo_full_name=$1
tag=$2
Url="registry.cn-hangzhou.aliyuncs.com/${repo_full_name}:${tag}"
ssh root@106.53.76.57 << eeooff
#set -x
# sh hook.sh cloudsu/crontab-ui 2.1.6
function crontab(){
id=\$(docker ps | grep 'dollymi' | awk '{print \$1}')
if [ -z "\$id" ]; then
docker run -d --name dollymi -p 7000:80 ${Url}
else
docker stop \$id && docker rm \$id && docker run -d --name dollymi -p 7000:80 ${Url}
fi
}
if [ $repo_full_name = "cloudsu/crontab-ui" ]; then
echo "调用生成crontab"
crontab
fi
function authserver(){
id=\$(docker ps | grep 'auth' | awk '{print \$1}')
if [ -z "\$id" ]; then
docker run -d -p 443:443 --name auth ${Url}
else
docker stop \$id && docker rm \$id && docker run -d -p 443:443 --name auth ${Url}
fi
}
if [ $repo_full_name = "cloudsu/authserver" ]; then
echo "调用生成crontab"
authserver
fi
exit
eeooff
echo done!
| true
|
7851cf9b6cf7da228676180b31f456e940c2f040
|
Shell
|
typoworx-de/linux-snipplets
|
/bin/toggle-touchpad
|
UTF-8
| 702
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
declare -i ID
declare -i STATE
#
# Search all TouchPad devices if there are more than one
# Attention may be some kind of exclude-list may be required (is on my ToDo)
#
for DEVICE in $(xinput list | grep -Eio 'TouchPad\s*id\=[0-9]{1,2}');
do
ID=$(echo $DEVICE | grep -Eo '[0-9]{1,2}');
if [[ -z $ID || $ID -eq 0 ]];
then
continue;
fi
STATE=$(xinput list-props $ID|grep 'Device Enabled'|awk '{print $4}');
if [ $STATE -eq 1 ]
then
xinput disable $ID
xinput set-prop $ID "Device Enabled" 0
else
xinput enable $ID
xinput set-prop $ID "Device Enabled" 1
fi
done
if [ $STATE -eq 1 ]
then
echo "Touchpad disabled."
else
echo "Touchpad enabled."
fi
| true
|
8e499bd44af9cabc81521c217a50398b2ec03d48
|
Shell
|
MarcelGehrig2/wiki
|
/linux/scripts/inUse/runAtBoot.sh
|
UTF-8
| 606
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
[[ $_ != $0 ]] && script_is_sourced=true || script_is_sourced=false
if [ $script_is_sourced ]; then
SCRIPT_PATH=$BASH_SOURCE
else
SCRIPT_PATH="$(readlink -f $0)"
fi
SCRIPT_DIR="$(dirname $SCRIPT_PATH)"
SCRIPT_NAME=$(basename $SCRIPT_PATH)
touch /tmp/runAsBoot
touch ~/runAsBoot
#/home/mgehrig2/wiki/linux/scripts/inUse/startSynergy.sh
/home/mgehrig2/wiki/linux/scripts/inUse/startVNC.sh
#switch to externel display
intern=eDP1
extern=HDMI2
#xrandr > /tmp/xrandr.log
if xrandr | grep "$extern connected"; then
xrandr --output "$intern" --off --output "$extern" --auto
fi
| true
|
32da92340c3df509ce719ef0f393834f14bd5e91
|
Shell
|
leolanavo/LanArchInstall
|
/arch_install.sh
|
UTF-8
| 1,031
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# ARGUMENTS:
user=$1 # $1 --> your user
driver=$2 # $2 --> the flag for the system specific drivers
host=$3 # $3 --> your hostname
# Setting up locale
echo "en_US.UTF-8 UTF-8" > /etc/locale.gen
echo "pt_BR.UTF-8 UTF-8" >> /etc/locale.gen
locale-gen
echo LANG=en_US.UTF-8 > /etc/locale.conf
# Setting up NTP and Time
ln -sf /usr/share/zoneinfo/Brazil/East /etc/localtime
hwclock --systohc
# Hostname setting
echo $host > /etc/hostname
# Expanding MirroList
sed -i 's/^#Color/Color/g' /etc/pacman.conf
sed -i 's/^#TotalDownload/TotalDownload\nILoveCandy/g' /etc/pacman.conf
echo "[multilib]" >> /etc/pacman.conf
echo "Include = /etc/pacman.d/mirrorlist" >> /etc/pacman.conf
pacman -Sy
# Packages
./packages_install.sh $driver
# Add user
useradd -m -g users -G wheel,storage,power,docker -s /bin/zsh $user
# Add sudo power to new user
sed -ri 's/^#( %wheel ALL=\(ALL\) ALL$)/\1/g' /etc/sudoers
clear
# Set up passwords
echo "Set up the root password"
passwd
echo "Set up your own password"
passwd $user
| true
|
39fae21afa47dcb64704beab84df8d523fc02503
|
Shell
|
AWGL/SomaticEnrichment
|
/SomaticEnrichmentLib-1.4.0/compileQcReport.sh
|
UTF-8
| 4,072
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
# Christopher Medway AWMGS
# compiles a file of useful QC metrics from the multitude of PICARD metrics
seqId=$1
panel=$2
# loop through each sample and make QC file
for sampleId in $(cat ../sampleVCFs.txt); do
dir=/data/results/$seqId/$panel/$sampleId
if [ -e $dir/"$seqId"_"$sampleId"_qc.txt ]; then rm $dir/"$seqId"_"$sampleId"_qc.txt; fi
#Gather QC metrics
meanInsertSize=$(head -n8 $dir/"$seqId"_"$sampleId"_InsertMetrics.txt | tail -n1 | cut -s -f6) #mean insert size
sdInsertSize=$(head -n8 $dir/"$seqId"_"$sampleId"_InsertMetrics.txt | tail -n1 | cut -s -f7) #insert size standard deviation
duplicationRate=$(head -n8 $dir/"$seqId"_"$sampleId"_markDuplicatesMetrics.txt | tail -n1 | cut -s -f9) #The percentage of mapped sequence that is marked as duplicate.
totalReads=$(head -n8 $dir/"$seqId"_"$sampleId"_HsMetrics.txt | tail -n1 | cut -s -f6) #The total number of reads in the SAM or BAM file examine.
pctSelectedBases=$(head -n8 $dir/"$seqId"_"$sampleId"_HsMetrics.txt | tail -n1 | cut -s -f19) #On+Near Bait Bases / PF Bases Aligned.
totalTargetedUsableBases=$(head -n2 $dir/$seqId"_"$sampleId"_DepthOfCoverage".sample_summary | tail -n1 | cut -s -f2) #total number of usable bases. NB BQSR requires >= 100M, ideally >= 1B
percentUseableBasesOnTarget=$(head -n8 $dir/"$seqId"_"$sampleId"_HsMetrics.txt | tail -n1 | cut -s -f27)
meanOnTargetCoverage=$(head -n2 $dir/$seqId"_"$sampleId"_DepthOfCoverage".sample_summary | tail -n1 | cut -s -f3) #avg usable coverage
pctTargetBasesCt=$(head -n2 $dir/$seqId"_"$sampleId"_DepthOfCoverage".sample_summary | tail -n1 | cut -s -f7) #percentage panel covered with good enough data for variant detection
#freemix=$(tail -n1 $dir/"$seqId"_"$sampleId"_Contamination.selfSM | cut -s -f7) #percentage DNA contamination. Should be <= 0.02
pctPfReadsAligned=$(grep ^PAIR $dir/"$seqId"_"$sampleId"_AlignmentSummaryMetrics.txt | awk '{print $7*100}') #Percentage mapped reads
atDropout=$(head -n8 $dir/"$seqId"_"$sampleId"_HsMetrics.txt | tail -n1 | cut -s -f51) #A measure of how undercovered <= 50% GC regions are relative to the mean
gcDropout=$(head -n8 $dir/"$seqId"_"$sampleId"_HsMetrics.txt | tail -n1 | cut -s -f52) #A measure of how undercovered >= 50% GC regions are relative to the mean
# check FASTQC output
countQCFlagFails() {
#count how many core FASTQC tests failed
grep -E "Basic Statistics|Per base sequence quality|Per tile sequence quality|Per sequence quality scores|Per base N content" "$1" | \
grep -v ^PASS | \
grep -v ^WARN | \
wc -l | \
sed 's/^[[:space:]]*//g'
}
rawSequenceQuality=PASS
for report in $dir/FASTQC/"$seqId"_"$sampleId"_*_fastqc.txt;
do
if [ $(countQCFlagFails $report) -gt 0 ]; then
rawSequenceQuality=FAIL
fi
done
# Sex check removed as it was unstable.
# sex check
# this file will not be avilable for NTC
#if [ $sampleId == "NTC" ]; then
# ObsSex='Null'
#elif [ ! -e /data/results/$seqId/$panel/$sampleId/CNVKit/*.sex ]; then
# ObsSex='Unknown'
#else
# ObsSex=$(cat /data/results/$seqId/$panel/$sampleId/CNVKit/*.sex | grep .cnr | cut -f2)
#fi
# keeping placeholder sex variable in report
ObsSex='Unknown'
#Print QC metrics
echo -e "TotalReads\tRawSequenceQuality\tGender\tTotalTargetUsableBases\tPercentTargetUseableBases\tDuplicationRate\tPctSelectedBases\tPctTargetBasesCt\tMeanOnTargetCoverage\tMeanInsertSize\tSDInsertSize\tPercentMapped\tAtDropout\tGcDropout" > $dir/"$seqId"_"$sampleId"_QC.txt
echo -e "$totalReads\t$rawSequenceQuality\t$ObsSex\t$totalTargetedUsableBases\t$percentUseableBasesOnTarget\t$duplicationRate\t$pctSelectedBases\t$pctTargetBasesCt\t$meanOnTargetCoverage\t$meanInsertSize\t$sdInsertSize\t$pctPfReadsAligned\t$atDropout\t$gcDropout" >> $dir/"$seqId"_"$sampleId"_QC.txt
done
# generate combinedQC.txt
python /data/diagnostics/scripts/merge_qc_files.py /data/results/$seqId/$panel/
| true
|
cb5ac840080b24afd84324731ceebf25650e34ec
|
Shell
|
skymix/koboh20_nightmode
|
/.adds/nightmode/nightmode.sh
|
UTF-8
| 2,324
| 3.09375
| 3
|
[] |
no_license
|
#! /bin/sh
#by skymix.es@gmail.com
#
#Working on H2O Kobo Ebook Edition 2 Version 2 Mark 7
#Need Kfmon installed and working
#
#Required!
#You need to add to .kobo/Kobo/Kobo eReader.conf the next section:
#[FeatureSettings]
#InvertScreen=true
#This extra section enable the inverse mode (black and white text) on the next reboot.
#The script change the InvertScreen to the other state:
#InvertScreen True = Black Background White Text
#InvertScreen False = White Background Black Text
#We use the nightmode.png icon on the icons directory to launch the script via KFMon
#
# TODO:
#
#Copyright (C) 2019 Jose Angel Diaz Diaz
# skymix.es@gmail.com 05/2019
# This program is free software: you can redistribute
#it and/or modify it under the terms of the GNU General
#Public License as published by the Free Software Foundation,
#either version 3 of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public
#License along with this program. If not, see http://www.gnu.org/licenses/.
#
############################################################################
WORKDIR=$(dirname "$0")
CONFFILE="/mnt/onboard/.kobo/Kobo/Kobo eReader.conf"
cd "$WORKDIR" || exit 1
#Security backup of the Config File
cp "$CONFFILE" "$CONFFILE".bak
#CHeck it Feature Exist is on Config File
if grep -q "InvertScreen" "$CONFFILE" ; then
echo "Config_ok"
else
echo "[FeatureSettings]" >> "$CONFFILE"
echo "InvertScreen=false" >> "$CONFFILE"
fi
#Change the InvertScreen State
if grep -q InvertScreen=false "$CONFFILE"; then
sed -i 's/InvertScreen=false/InvertScreen=true/g' "$CONFFILE"
else
sed -i 's/InvertScreen=true/InvertScreen=false/g' "$CONFFILE"
fi
#Reboot nickel and apply the change
eval "$(xargs -n 1 -0 < /proc/$(pidof nickel)/environ | grep -E 'INTERFACE|WIFI_MODULE|DBUS_SESSION|NICKEL_HOME|LANG' | sed -e 's/^/export /')"
sync
killall -TERM nickel hindenburg sickel fickel fmon > /dev/null 2>&1
export MODEL_NUMBER=$(cut -f 6 -d ',' /mnt/onboard/.kobo/version | sed -e 's/^[0-]*//')
export LD_LIBRARY_PATH="libs:${LD_LIBRARY_PATH}"
./nickel.sh &
| true
|
6b777599d87c944e092b6de15c81d4cbd4c9b73f
|
Shell
|
olivierdalang/SPCgeonode
|
/docker-entrypoint.sh
|
UTF-8
| 476
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
# Exit script in case of error
set -e
echo $"\n\n\n"
echo "-----------------------------------------------------"
echo "STARTING DJANGO ENTRYPOINT --------------------------"
date
# Run migrations
echo 'Running initialize.py...'
python -u initialize.py
echo "-----------------------------------------------------"
echo "FINISHED DJANGO ENTRYPOINT --------------------------"
echo "-----------------------------------------------------"
# Run the CMD
exec "$@"
| true
|
f802c043be2e79eb4d6f43b6cf2a13d43d47aa12
|
Shell
|
t0mac0/Embedded-Linux-System
|
/MX233/gists/BCB-to-SD-card.sh
|
UTF-8
| 743
| 3.4375
| 3
|
[] |
no_license
|
echo "Remove old files"
rm bcb
rm bcb_512.cfg
rm bcb_sdcard_part.old
rm bcb_sdcard_part.readback
echo "Compiling bcb tool"
gcc bcb.c -o bcb
echo "Running bcb tool"
./bcb
echo "Saving old partition contents to bcb_sdcard_part.old"
dd if=/dev/sdb3 of=bcb_sdcard_part.old
echo "============= Contents of ->OLD<- BCD ============="
hd bcb_sdcard_part.old
echo "Clear the SD card boot block partition first"
dd if=/dev/zero of=/dev/sdb3
sync
echo "Write the BCD to the sd card BCD parition"
dd if=bcb_512.cfg of=/dev/sdb3
sync
echo "Reading back BCD partition for verification"
dd if=/dev/sdb3 of=bcb_sdcard_part.readback
echo "============= Contents of ->NEW<- BCD ============="
hd bcb_sdcard_part.readback
sync
echo "Done, remove card"
| true
|
d087968d62e8d18d87a77a7848aacc3c81ed75ac
|
Shell
|
pertruccio/postfix-tls-policy
|
/convert_list_to_sql.sh
|
UTF-8
| 899
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
tempfoo="sqlimport"
# Let's try mktemp for output, if creation fails -> Exit with Errorcode 1
TMPFILE=`mktemp /tmp/${tempfoo}.XXXXXX` || ERROR=true
if [[ $ERROR ]]; then
echo "ERROR: while creation of temp file!"
exit 1
fi
# delete all lines in existing table, they are probably old
echo "TRUNCATE TABLE tlspolicies;" > $TMPFILE
# for each line in tls_policy-dane file
while read p; do
if [[ $p != \#* ]]; then
# if it's not a comment build a sql statement
stringarray=($p)
NEW="insert into tlspolicies (domain, policy, params) values ('${stringarray[0]}', '${stringarray[1]}', '${stringarray[2]}');"
echo $NEW >> $TMPFILE
fi
done <./tls_policy-dane
# now this script echoes to a tempfile generated by mktemp which should be usable as sql import
echo "SQL-Statements ready in file: $TMPFILE
You can import the statements with:
mysql -u USER -p vmail < $TMPFILE"
exit 0
| true
|
0f50017fc69d3202f670ae985ddc8abbbe7e45d3
|
Shell
|
infortrend-openstack/ubuntu-kilo-multinode
|
/install-network-node.sh
|
UTF-8
| 2,562
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
PASSWORD=111111
CONFIG_DIR=network
source networkrc
echo "Start to Install Neutron"
sleep 3
cp /etc/sysctl.conf /etc/sysctl.conf~
echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
echo "net.ipv4.conf.all.rp_filter=0" >> /etc/sysctl.conf
echo "net.ipv4.conf.default.rp_filter=0" >> /etc/sysctl.conf
sysctl -p
apt-get install -y neutron-plugin-ml2 neutron-plugin-openvswitch-agent neutron-l3-agent neutron-dhcp-agent neutron-metadata-agent
mv /etc/neutron/neutron.conf /etc/neutron/neutron.conf~
cp $CONFIG_DIR/neutron/neutron.conf /etc/neutron
sed -i "s/111111/$PASSWORD/g" /etc/neutron/neutron.conf
mv /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini~
cp $CONFIG_DIR/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2
mv /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini~
cp $CONFIG_DIR/neutron/l3_agent.ini /etc/neutron
mv /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini~
cp $CONFIG_DIR/neutron/dhcp_agent.ini /etc/neutron
cp $CONFIG_DIR/neutron/dnsmasq-neutron.conf /etc/neutron
pkill dnsmasq
mv /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini~
cp $CONFIG_DIR/neutron/metadata_agent.ini /etc/neutron/
sed -i "s/111111/$PASSWORD/g" /etc/neutron/metadata_agent.ini
cp /etc/network/interfaces /etc/network/interfaces~~
cat << EOF > /etc/network/interfaces
# The loopback network interface
auto lo
iface lo inet loopback
## External net
auto br-ex
iface br-ex inet static
address $EXT_NET_ADDRESS
netmask $EXT_NET_NETMASK
gateway $EXT_NET_GATEWAY
dns-nameservers 8.8.8.8
## External net
auto $EXT_NET_INTF_NAME
iface $EXT_NET_INTF_NAME inet manual
up ifconfig \$IFACE 0.0.0.0 up
up ip link set \$IFACE promisc on
down ip link set \$IFACE promisc off
down ifconfig \$IFACE down
## Management net
auto $MGNT_NET_INTF_NAME
iface $MGNT_NET_INTF_NAME inet static
address $MGNT_NET_ADDRESS
netmask $MGNT_NET_NETMASK
## VM Data net
auto $VM_NET_INTF_NAME
iface $VM_NET_INTF_NAME inet static
address $VM_NET_ADDRESS
netmask $VM_NET_NETMASK
EOF
service openvswitch-switch restart
sleep 3
ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex $EXT_NET_INTF_NAME
ethtool -K $EXT_NET_INTF_NAME gro off
ifdown br-ex && ifup br-ex && ifdown $EXT_NET_INTF_NAME && ifup $EXT_NET_INTF_NAME
service neutron-plugin-openvswitch-agent restart
sleep 3
service neutron-l3-agent restart
sleep 3
service neutron-dhcp-agent restart
sleep 3
service neutron-metadata-agent restart
sleep 3
exit 0
| true
|
bbaedfcebefff63f5142f2f50f16ce527c93cea0
|
Shell
|
hyb8892208/wire
|
/my_tools/sh/gpio_ctl/gpio_remove.sh
|
UTF-8
| 304
| 3.328125
| 3
|
[] |
no_license
|
## stop gpio work
#!/bin/sh
gpio_reset_port=105
gpio_run_port=111
gpio_path=/sys/class/gpio
stop_gpio()
{
echo ${gpio_reset_port} > ${gpio_path}/unexport
echo ${gpio_run_port} > ${gpio_path}/unexport
}
if [ ! -e /sys/class/gpio/export ]; then
echo no export file exist!
exit
fi
stop_gpio
exit 0
| true
|
398693ae516965c83a9702f6a58abf44e1c6087f
|
Shell
|
ted92/3203-assignment1
|
/WordCount/script_wc.sh
|
UTF-8
| 3,639
| 3.734375
| 4
|
[] |
no_license
|
#!bin/bash
# Enrico Tedeschi - ete011
# INF - 3203 Advanced Distributed Systems
# Assignment 1
# Word count script.
# 1) Menu to run Word Count with different input
# 2) Possibility to choose the number of reduce tasks
# 3) generate a pdf with a graph showing the performance using gnuplot
#function execution
function execution {
len=$2
numbers=$1
#create the input_tab.dat that will be plotted
touch Inputdir/input_tab.dat
#table header
printf "%s %10s %10s\n" "#" "reduce" "time" > Inputdir/input_tab.dat
#buildthe numbers array
i=0
sum=0
while [ $i -lt $len ]
do
#clear all output folders
hadoop fs -rm -r Word_Count_outputdir
rm -r Word_Count_outputdir
#execute the Word Map
hadoop fs -copyFromLocal Inputdir/$3 $3
#time elapsed in millisecs
time_start=$(date +%s%N)
hadoop jar word_count.jar word_count.WordCount $3 Word_Count_outputdir -D mapred.reduce.tasks=${numbers[$i]}
#declare the array durations containing the time elapsed
time_elapsed=$((($(date +%s%N) - $time_start)/1000000))
durations[$i]=$time_elapsed
#write into input_tab.dat
printf "%s %10d %10d\n" " " "${numbers[$i]}" "${durations[$i]}" >> Inputdir/input_tab.dat
#to get the correct number in a loop: ${numbers[$i]}
hadoop fs -copyToLocal Word_Count_outputdir
#sum for the average
sum=$(( sum+durations[$i] ))
i=$[$i+1]
done
#calculate average and then variance
avg=$(( sum/len ))
i=0
#add average line in the file input_tab.dat to plot
printf "\n" >> Inputdir/input_tab.dat
printf "%s %10s %10s\n" "#" "reduce" "time" >> Inputdir/input_tab.dat
#create a plot with the deviation standard
touch Inputdir/standard_dev.dat
printf "%s %20s %20s\n" "#" "standard_deviation" "time" > Inputdir/standard_dev.dat
while [ $i -lt $len ]
do
#variance
variance[$i]=$(( (durations[$i]-avg)*(durations[$i]-avg) ))
#add average to the input_tab file
printf "%s %10d %10d\n" " " "${numbers[$i]}" "$avg" >> Inputdir/input_tab.dat
#add standard deviation to the standard_dev file
printf "%s %20d %20d\n" " " "${numbers[$i]}" "${variance[$i]}" >> Inputdir/standard_dev.dat
i=$[$i+1]
done
#generate plot with average and performances
set term pngcairo
gnuplot<< EOF
set terminal gif
set style line 1 lc rgb '#0060ad' lt 1 lw 2 pt 7 ps 1.5
set style line 2 lc rgb '#dd181f' lt 1 lw 2 pt 5 ps 1.5
set output '$4'
plot 'Inputdir/input_tab.dat' index 0 with linespoints ls 1
EOF
#generate plot with standard deviation
gnuplot<< EOF
set terminal gif
set style line 1 lc rgb '#dd181f' lt 1 lw 2 pt 7 ps 1.5
set output '$5'
plot 'Inputdir/standard_dev.dat' index 0 with linespoints ls 1
EOF
}
if [ "$1" == "-n" ]; then
shift
numbers=( $@ )
len=${#numbers[@]}
#numbers now contains all the number for the execution of the MapReduce with different number of reducers
OPTIONS="divine_comedy project_Gutenberg input3 quit"
select opt in $OPTIONS; do
if [ "$opt" = "divine_comedy" ]; then
#execution with divine_comedy paramters
input_file="divine_comedy.txt"
plot="Plotdir/plot_divine_comedy.gif"
plot_sd="Plotdir/plot_divine_comedy_standard_dev.gif"
#call the function execution with parameters
execution $numbers $len $input_file $plot $plot_sd
elif [ "$opt" = "project_Gutenberg" ]; then
#execution with project_Gutenberg parameters
input_file="proeject_Gutenberg.txt"
plot="Plotdir/plot_project_Gutenberg.gif"
plot_sd="Plotdir/plot_project_Gutenberg_standard_dev.gif"
execution $numbers $len $input_file $plot $plot_sd
echo project_Gutenberg
elif [ "$opt" = "quit" ]; then
exit
fi
done
else
#TODO execute with other parameters
echo not an option
fi
| true
|
45b1290895f79df821f1d7785ccfdf090e556e2c
|
Shell
|
zhixingfeng/shell
|
/sam2fq
|
UTF-8
| 296
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 3 ]; then
echo "USAGE: sam2fq samfile fqfile nthread"
exit 0
fi
samfile=$1
fqfile=$2
nthread=$3
samtools view -F 2304 --threads $nthread $samfile | awk -F "\t" '{
if (length($10) > 1 && $10 != "*"){
print "@"$1; print $10; print "+"$1; print $11
}
}' > $fqfile
| true
|
1eef87290fcf39193045191b25866012721dc74d
|
Shell
|
huggergit/Linuxscripts
|
/tennis/source/tennisGame.sh
|
UTF-8
| 656
| 3.359375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
### tennisGame.sh ###
source ./functions.sh
playersLine=`head -n 1 $1`
echo "$playersLine"
firstPlayer=`getFirstPlayerFrom "$playersLine"`
secondPlayer=`getSecondPlayerFrom "$playersLine"`
wholeScoreFileContent=`cat $1`
totalNoOfLines=`echo "$wholeScoreFileContent" | wc -l`
for currentLine in `seq 2 $totalNoOfLines`
do
firstPlayerScore=$(getScoreFor $firstPlayer "`echo \"$wholeScoreFileContent\" | head -n $currentLine`")
secondPlayerScore=$(getScoreFor $secondPlayer "`echo \"$wholeScoreFileContent\" | head -n $currentLine`")
displayScore $firstPlayer $firstPlayerScore $secondPlayer $secondPlayerScore
done
| true
|
cdae264747f54079e5cf0dc1c3ec3f858f7bb253
|
Shell
|
bthakur/benchmarks
|
/mpi/run_symmetric/dapltest/run_dapltest.sh
|
UTF-8
| 1,791
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Base parameters for launching server
servertest0="dapltest -T S -d -D ofa-v2-ib0"
servertest1="dapltest -T S -d -D ofa-v2-mlx4_0-1s"
parameterT="-i 1000 client SR 409600 1 server SR 409600 1"
parameterP="-i 1000 RW 409600 2"
if [ '1'=='1' ]; then
server="smic005h"
clients="smic006h"
clienttest0="dapltest -T P -D ofa-v2-ib0 -s $server $parameterP"
clienttest1="dapltest -T P -D ofa-v2-mlx4_0-1s -s $server $parameterP"
servertest="$servertest0"
clienttest="$clienttest0"
fi
if [ '0' == '1' ]; then
server="smic199"
clients="smic200"
clienttest0="dapltest -T T -D ofa-v2-ib0 -s $server $parameterP"
clienttest1="dapltest -T P -D ofa-v2-mlx4_0-1s -s $server $parameterP"
servertest="$servertest1"
clienttest="$clienttest1"
fi
# Clear Logs
top=$(pwd)
log=$top/log
if [ ! -d "$log" ]; then
mkdir -v $log
fi
# Start the server on a smic001
#server="smic003"
if [ -f "$log/server-$server.log" ]; then
echo "Clear old logs"
rm -v $log/server-$server.log
touch $log/server-$server.log
fi
ssh $server "nohup $servertest >& $log/server-$server.log &"
# Run tests on clients
for client in $clients; do
# Clean up earlier log
if [ -f "$log/client-$client.log" ]; then
rm $log/client-$client.log
touch $log/client-$client.log
fi
# Run differents tests for the client
for ctest in "$clienttest"; do
ssh $client "$ctest" >> $log/client-$client.log 2>&1
echo "$(if [ $? -eq 0 ]; then echo Success; else echo Failure; fi) $server -> $client"
done
done
# Cleanup server
#dapltest -T Q -s $server -D ofa-v2-mlx4_0-1s >> $log/server-$server.log
ssh $server 'pkill dapltest'
echo "Check log directory for,
$log/client-$client.log"
sleep 3
| true
|
11bf59ef66282966bcc15bbd08e0d96b3994fe99
|
Shell
|
knoxknox/dev-labs
|
/docker/infra/nginx/certs.sh
|
UTF-8
| 787
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
path="$(dirname $0)/certs"
##
# Root Certificate Authority
#
echo 'Generate root private key'
openssl genrsa -out "$path/ca.key" 4096
echo 'Generate root certificate'
subject="/C=US/O=Development/CN=local"
openssl req -new -x509 -sha256 -days 3650 -subj $subject -key "$path/ca.key" -out "$path/ca.pem"
##
# Certificate signed with Root CA
#
echo 'Generate server private key'
openssl genrsa -out "$path/dev.key" 4096
echo 'Generate certificate signing request'
subject="/C=US/O=Development/CN=development"
openssl req -new -subj $subject -key "$path/dev.key" -out "$path/dev.csr"
echo 'Generate certificate signed by root (3 years)'
openssl x509 -req -sha256 -days 1095 -CA "$path/ca.pem" -CAkey "$path/ca.key" -CAcreateserial -in "$path/dev.csr" -out "$path/dev.crt"
| true
|
301b37cfcab683ac0699b88fbccd05a23f8101eb
|
Shell
|
cmstas/MT2Analysis
|
/limits/batchsubmit/wrapper.sh
|
UTF-8
| 1,244
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "host: "
hostname
HOME=`pwd`
#This stuff to get it to run
export CMS_PATH=/cvmfs/cms.cern.ch
export SCRAM_ARCH=slc6_amd64_gcc481
source /cvmfs/cms.cern.ch/cmsset_default.sh
source /cvmfs/cms.cern.ch/slc6_amd64_gcc481/lcg/root/5.34.18/bin/thisroot.sh
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:.
export PATH=$PATH:.
pwd
pushd .
cd /cvmfs/cms.cern.ch/slc6_amd64_gcc481/cms/cmssw/CMSSW_7_1_5/src/
pwd
eval `scramv1 runtime -sh`
popd
pwd
tar -xzvf input.tar.gz > /dev/null
ls -lrth
CARD=$1
SEED=$2
NTOYS=$3
NITERS=$4
POINT=$5
COPYDIR=$6
echo "Start Time is `date`"
./combine -M GenerateOnly ${CARD} -t -1 --expectSignal 0 --saveToys -s 12345
./combine -M HybridNew --frequentist ${CARD} --saveToys --fullBToys --saveHybridResult --singlePoint ${POINT} -T ${NTOYS} -i ${NITERS} -s ${SEED} --clsAcc 0 -v -1 --toysFile higgsCombineTest.GenerateOnly.mH120.12345.root -t -1
echo "Finish Time is `date`"
ls -lrth
rm ./rstats*
rm ./T*.root
rm ./combine
rm ./libHiggsAnalysisCombinedLimit.so
rm ./input.tar.gz
ls -lrth
lcg-cp -b -D srmv2 --vo cms -t 2400 --verbose file:`pwd`/higgsCombineTest.HybridNew.mH120.${SEED}.root srm://bsrm-3.t2.ucsd.edu:8443/srm/v2/server?SFN=${COPYDIR}/higgsCombineTest.HybridNew.mH120.${SEED}.root
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.