blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ef53655fbb55efe156b825c65f80fdbd9d875a62
|
Shell
|
akilleen/gcloudrig
|
/reset-windows-password.sh
|
UTF-8
| 369
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# gcloudrig/setup.sh
# exit on error
set -e
# load globals
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source "$DIR/globals.sh"
# set/reset windows credentials
gcloud compute reset-windows-password $INSTANCE \
--user $USER \
--zone $ZONE \
--format "table[box,title='Windows Credentials'](ip_address,username,password)"
| true
|
b6125d40fe8ca7193e17aa91ef2b3d845d40c5ff
|
Shell
|
eamt/holbertonschool-higher_level_programming
|
/0x10-python-network_0/100-status_code.sh
|
UTF-8
| 155
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
# Send a request to a URL passed as an argument, and display only the status code of the response.
curl -s -o /dev/null -w '%{http_code}' "$1"
| true
|
b18bd9efd7b506cc4e87a706c0c2fa3045d0792e
|
Shell
|
shijir38/OpenNotification
|
/tools/initmysql.sh
|
UTF-8
| 478
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
DIR=`dirname $0`
DIR=$DIR/..
for x in $DIR/bin $DIR/lib/*
do
export CLASSPATH=$CLASSPATH:$x
done
mysqladmin --user=reliable --host=localhost --password=reliable -f drop reliable && mysqladmin --user=reliable --host=localhost --password=reliable create reliable && mysql --user=reliable --host=localhost --password=reliable reliable < $DIR/sql/mysql.sql
java -Dorg.quartz.properties=$DIR/conf/quartz.properties net.reliableresponse.notification.util.InitializeDB
| true
|
434d8824e6953f66afae50e2d0897d0f706ddd95
|
Shell
|
tausiflife/error-prone
|
/util/generate-latest-docs.sh
|
UTF-8
| 1,329
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# https://github.com/google/dagger/blob/master/util/generate-latest-docs.sh
set -eu
if [ "$TRAVIS_REPO_SLUG" == "google/error-prone" ] && \
[ "$TRAVIS_JDK_VERSION" == "oraclejdk8" ] && \
[ "$TRAVIS_PULL_REQUEST" == "false" ] && \
[ "$TRAVIS_BRANCH" == "master" ]; then
echo -e "Publishing docs...\n"
GH_PAGES_DIR=$HOME/gh-pages
git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/google/error-prone $GH_PAGES_DIR > /dev/null
(
cd $GH_PAGES_DIR
rm -rf _data/bugpatterns.yaml api/latest
mkdir -p _data api/latest
)
mvn -P '!examples' javadoc:aggregate
rsync -a target/site/apidocs/ ${GH_PAGES_DIR}/api/latest
# The "mvn clean" is necessary since the wiki docs are generated by an
# annotation processor that also compiles the code. If Maven thinks the code
# does not need to be recompiled, the wiki docs will not be generated either.
mvn clean
mvn -P run-annotation-processor compile site
rsync -a docgen/target/generated-wiki/ ${GH_PAGES_DIR}
cd $GH_PAGES_DIR
git add --all .
git config --global user.name "travis-ci"
git config --global user.email "travis@travis-ci.org"
git commit -m "Latest docs on successful travis build $TRAVIS_BUILD_NUMBER auto-pushed to gh-pages"
git push -fq origin gh-pages > /dev/null
echo -e "Published docs to gh-pages.\n"
fi
| true
|
99de82bf52e153e8b3a15dc460a66964d842e75c
|
Shell
|
DavidCorino/Ansible
|
/Monitoramento/Nagios/scripts_mon/nagios_scripts/check_disco.sh
|
UTF-8
| 736
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/ksh
particao="$1"
perc_uso_disco=`df -g | grep "$particao" | cut -d% -f1 | awk '{print $4}'`
perc_liv_disco=`expr 100 - $perc_uso_disco`
mb_liv_disco=` df -m | grep $particao | cut -d. -f2 | awk '{print $2}'`
if [ $perc_uso_disco -lt "80" ]; then
echo "DISK OK - free space: $particao $mb_liv_disco MB ($perc_liv_disco% livre)"
exit 0
elif [ $perc_uso_disco -ge "80" -a $perc_uso_disco -lt "90" ]; then
echo "DISK AVISO - free space: $particao $mb_liv_disco MB ($perc_liv_disco% livre)"
exit 1
elif [ $perc_uso_disco -ge "90" ]; then
echo "DISK CRITICO - free space: $particao $mb_liv_disco MB ($perc_liv_disco% livre)"
exit 2
else
echo "DISK DESCONHECIDO - Falha ao executar o Monitoramento"
exit 3
fi
| true
|
cc991e4cbb91a0fd078cef9021155f43c095dd14
|
Shell
|
kayhide/exercism
|
/bash/secret-handshake/secret_handshake.sh
|
UTF-8
| 483
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eu
main() {
local actions=("wink" "double blink" "close your eyes" "jump")
local n=${#actions[@]}
local val=$1
local res=()
if (( 1 << n & val )); then
push() {
res=("${actions[$1]}" "${res[@]}")
}
else
push() {
res+=("${actions[$1]}")
}
fi
for (( i=0; i < n; ++i )); do
(( 1 << i & val )) && push $i
done
IFS=","
echo "${res[*]}"
}
main "$@"
| true
|
d160720891e289da47b5eb3af93568f7c49dde3c
|
Shell
|
lutinghao/hao_test03
|
/scripts/ApplicationStop.sh
|
UTF-8
| 131
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ `docker ps | grep hao-devops | wc -l` = 1 ]
then
docker stop hao-devops
docker rm hao-devops
fi
| true
|
83eff605e6166e62bef5fc735e6c638063a5ab4c
|
Shell
|
Mans-from-the-ends/dotfilez
|
/scripts/linux_symlinks.sh
|
UTF-8
| 1,004
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
############################
# .make.sh
# This script creates symlinks from the home directory to any desired dotfiles in ~/dotfiles
############################
########## Variables
dir=~/dotfilez # dotfiles directory
olddir=~/dotfiles_old # old dotfiles backup directory
files="bashrc zshrc hyper.js scripts zsh vimrc" # list of files/folders to symlink in homedir
##########
# Initialising
echo "STARTING THE CREATING OF SYMLINKS TO DOTFILEZ!"
# create dotfiles_old in homedir
mkdir -p $olddir
echo "CLONING..."
# change to the dotfiles directory
cd $dir
echo "BACKING UP..."
# move any existing dotfiles in homedir to dotfiles_old directory, then create symlinks from the homedir to any files in the ~/dotfiles directory specified in $files
for file in $files; do
mv ~/.$file ~/dotfiles_old/
ln -s $dir/$file ~/.$file
done
echo "SETUP HAS BEEN COMPLETED, IF UNSATISFIED WITH SYMLINKS CONSIDER USING REMOVE EXECUTABLE"
echo "CYA!"
| true
|
9590f6f6bc299afff6d274e910fa740d2f855632
|
Shell
|
MaxDistructo/ShellScripts
|
/linux-shell/PersonalProgramInstaller.sh
|
UTF-8
| 1,032
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Update all current programs
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get dist-upgrade -y
# Start installing new programs. SFTP client, RDC Client, Desktop Environment. Uninstalls Unity and related packages
sudo apt install filezilla -y
sudo apt install remmina -y
sudo apt install cinnamon -y
sudo apt-get purge unity -y
sudo apt-get auto-remove -y
sudo mkdir tmp
cd /tmp
# Downloads Jetbrains Toolbox and opens it for you to install a program.
wget https://download.jetbrains.com/toolbox/jetbrains-toolbox-1.8.3678.tar.gz
tar -xfz jetbrains-toolbox-1.8.3678.tar.gz
cd jetbrains-toolbox-1.8.3678
jetbrains-toolbox
# Discord Installer
cd /tmp
wget https://discordapp.com/api/download?platform=linux&format=deb
deb -y
# Google Chrome Installer (Yes I know firefox is in Ubuntu by default but most schools/workplaces test their websites and guarentee compatability with it.)
cd /tmp
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
deb google-chrome-stable_current_amd64.deb -y
| true
|
2f34fe48eae9bac49313e07fe0a84190244a5e04
|
Shell
|
bioinfx/cvdc_scripts
|
/di_tads.10k.2M/master.sh
|
UTF-8
| 1,419
| 3.03125
| 3
|
[] |
no_license
|
mkdir ../../analysis/di_tads.10k/di_tads
for file in ../../data/hic/DI/*.10000.200.DI.bedGraph.TAD; do
name=$(basename $file)
echo $name
ln $file ../../analysis/di_tads.10k.2M/di_tads/$name.bed
done
## merge tad boundaries.
#rm combined_tads.raw.sorted.txt
#for file in $(ls di_tads/D??_HiC_Rep?.*.TAD.bed); do
# awk -v OFS="\t" -v name=${file/di_tads\/} '{ if(NR>1) print $0,name}' $file
# done |sort --parallel=4 -k1,1 -k2,2n -k3,3n - >> combined_tads.raw.sorted.txt
## replicated TAD boundaries
mkdir boundaries
for file in di_tads/*.bed; do
file2=$(basename $file)
name=${file2/.10000.200.DI.bedGraph.TAD.bed/}
echo $name
awk -v OFS="\t" -v name=$name '{print $1,$2,name"\n"$1,$3,name}' $file |sort -k1,1 -k2,2n > boundaries/$name.boundary
done
sort -k1,1 -k2,2n -m boundaries/D*.boundary > boundaries/boundary.all.txt
# overlap TAD boundary with DI score.
for sample in $(cat ../../data/hic/meta/names.txt); do
echo $sample
intersectBed \
-a <(awk -v OFS="\t" '{if ($2-50000 <0){print $1,0,$2+50000,$1":"$2 } else {print $1,$2-50000,$2+50000,$1":"$2 }}' boundaries/combined_boundary.uniq.txt ) \
-b DI_quantile/${sample}.10000.200.DI.bedGraph \
-wo > boundary_DI_overlaps/${sample}.DI.overlap.txt
done
Rscript calc_DI_delta.r
Rscript define_boundary_by_DI_delta.r
Rscript "find_dynamic_boundary_DI_delta.r"
# plot the TAD number
Rscript plotTadNumber.r
Rscript plotTadSize.r
| true
|
62d204f7001dcde21b50acc8285dcd8c3661b573
|
Shell
|
ruleizhou/Linux_env
|
/linux_sh/install_xed.sh
|
UTF-8
| 199
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
type xed
if [ $? -eq 0 ]; then
echo "Already install xed"
else
echo "install xed"
sudo add-apt-repository ppa:embrosyn/xapps
sudo apt-get install xed --fix-missing -y
fi
| true
|
61435ec652487c55bbf7d45e61f2131d6d93bfbb
|
Shell
|
roxor05/Programs
|
/Basic Bash scripting/breakandcontinue.sh
|
UTF-8
| 224
| 3.203125
| 3
|
[] |
no_license
|
#! /bin/bash
for (( i = 1 ; i<=10; i++ ))
do
if [ $i -gt 5 ]
then
break #statements
fi
echo "$i"
done
for (( i = 1 ; i<=10; i++ ))
do
if [ $i -eq 3 -o $i -eq 6 ]
then
continue #statements
fi
echo "$i"
done
| true
|
1db1fc387c03c1d11c2335df657ebe3a4154ea30
|
Shell
|
w0rp/scripts
|
/linux/list-changed
|
UTF-8
| 1,264
| 4.28125
| 4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash -eu
cached=''
while [ $# -ne 0 ]; do
case $1 in
--cached)
cached='--cached'
shift
;;
--)
shift
break
;;
-?*)
echo "Invalid argument: $1" 1>&2
exit 1
;;
*)
break
;;
esac
done
dir="$(pwd)"
# Walk up parent paths until we git the git directory.
while [ ! -d '.git/objects' ] && [ "$PWD" != / ]; do
cd ..
done
if [ ! -d '.git/objects' ]; then
echo 'This is not a git repository directory.' 1>&2
exit 1
fi
if [ -f '.gitmodules' ]; then
# Look through git modules.
# If the directory we were originally in was inside of a git submodule,
# Then we should change directories to the root of the submodule.
for path in $(grep -Po '(?<=path = ).*' .gitmodules ); do
mod="$(pwd)/$path"
if [ "${dir:0:${#mod}}" == "$mod" ]; then
cd "$mod"
break
fi
done
fi
function escape-slashes() {
echo "$1" | sed -r 's/\\\//\//g' | sed -r 's/\//\\\//g'
}
git_root="$(pwd)"
if [ $# -ne 0 ]; then
git diff --name-only $cached "$1" | uniq | sed -e "s/^/$(escape-slashes "$git_root")\\//"
else
git diff --name-only $cached | uniq | sed -e "s/^/$(escape-slashes "$git_root")\\//"
fi
| true
|
b7a27d9e151b9b937b19ec24cb3a6ffbe05fdde7
|
Shell
|
skinforhair/.skinforhair_settings
|
/env/global/bashrc
|
UTF-8
| 960
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
## LOCAL BASH SETTINGS ##
export profileDir="$HOME/.skinforhair_settings"
#Grab variables
#export pVAR=$profileDir/var
#for f in $pVAR/*;do source $f; done
export pBIN=$profileDir/bin
export pLIB=$profileDir/lib
#active should be a symlink to the local machine
export lBIN=$profileDir/bin/active
export pENV=$profileDir/env
#add Aliases
export pALS=$profileDir/aliases/global
#active should be a symlink to the local machine
export lALS=$profileDir/aliases/active
for f in $pALS/*;do if [ ! -d $f ]; then source $f;fi;done
for f in $lALS/*;do if [ ! -d $f ]; then source $f;fi;done
#Run custom login scripts
if [ -d "$pENV/startup_scripts/$(hostname)" ]; then
for f in $pENV/startup_scripts/$(hostname)/*;do if [ ! -d $f ]; then $f;fi;done
fi
#set up color command prompt
source $pENV/global/color_command_prompt
#set PATH
PATH=$PATH:$lBIN:$pBIN/global:$pBIN/global/networking:$pBIN/global/password:$pBIN/global/git
| true
|
1032f91cb20bdd56775192900185752ccf840bb9
|
Shell
|
Vinotha16/WIN_ROLLBACK
|
/templates/linux_actualfacts/centos8/bootloadperm_151_actual.fact
|
UTF-8
| 483
| 2.703125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
a=$(stat -c "%a %U %G %n" /boot/grub2/grub.cfg)
b=$(stat -c "%a %U %G %n" /boot/grub2/grubenv)
cmd="${a}","${b}"
if [ $(stat /boot/grub2/grub.cfg | grep 0600 | grep ".*Uid:.*0/.*root" | grep ".*Gid:.*0/.*root" | wc -l) -eq 0 ] || [ $(stat /boot/grub2/grubenv | grep 0600 | grep ".*Uid:.*0/.*root" | grep ".*Gid:.*0/.*root" | wc -l) -eq 0 ]; then
echo "{ \"bootloadperm_151_actual\" : \"\" }"
else
echo "{ \"bootloadperm_151_actual\" : \"${cmd}\" }"
exit 1
fi
| true
|
259bacf77c66714ab6e8c9162d04c00ff0528d15
|
Shell
|
adgorithmics-inc/cinnamon-python-sdk
|
/pre-push.sh
|
UTF-8
| 587
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
files_with_nocommit=$(git diff --cached --name-only --diff-filter=ACM $against | xargs grep -i "nocommit" -l | tr '\n' ' ')
if [ "x${files_with_nocommit}x" != "xx" ]; then
tput setaf 1
echo "File being committed with 'nocommit' in it:"
echo $files_with_nocommit | tr ' ' '\n'
tput sgr0
exit 1
fi
git ls-files -z **\*.py | xargs --null flake8 --config=pep.cfg
if [ "$?" -ne "0" ]; then exit 1; fi
git ls-files -z **\*.py | xargs --null black --check
if [ "$?" -ne "0" ]; then exit 1; fi
python -m unittest
if [ "$?" -ne "0" ]; then exit 1; fi
exit 0
| true
|
1f643e738fe93f9540342e65ad85ff1ad033758a
|
Shell
|
dhanaprakashj/cnv_wgs
|
/rcp/rcp_filter.sh
|
UTF-8
| 268
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script reads the files in the CGIASM.seg.gz files and unzips the files in their respective directories.
# Example command
# sh rcp_filter.sh
# $samples.rcp.in
cat $1 | while read line
do
gunzip -c $line > `echo $line | sed s/.gz//`
done
| true
|
c0852674b6341b4e54214fb593add39cc8edd932
|
Shell
|
Open-TEE/project
|
/install_android.sh
|
UTF-8
| 3,918
| 3.484375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#
# Copyright (C) 2015 Aalto University.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Open-TEE install targets
OT_PROGS=${OT_PROGS-"opentee-engine conn_test_app"}
OT_LIBS=${OT_LIBS-"libtee libCommonApi libInternalApi libcrypto_test libstorage_test"}
OT_LIBSX=${OT_LIBSX-"libLauncherApi libManagerApi"}
OT_TAS=${OT_TAS-"libta_conn_test_app"}
OT_CONFIG=${OT_CONFIG-"opentee.conf"}
# Destinations
OT_PREFIX=${OT_PREFIX-"/system"}
OT_PROGS_DEST=${OT_PROGS_DEST-"$OT_PREFIX/bin"}
OT_LIBS_DEST=${OT_LIBS_DEST-"$OT_PREFIX/lib"}
OT_LIBSX_DEST=${OT_LIBSX_DEST-"$OT_PREFIX/lib/tee"}
OT_TAS_DEST=${OT_TAS_DEST-"$OT_PREFIX/lib/ta"}
OT_CONFIG_DEST=${OT_CONFIG_DEST-"$OT_PREFIX/etc"}
# Open-TEE project directory
OT_BASEDIR="$(dirname "${BASH_SOURCE[0]}")/project"
# Returns the adb shell user from connected device
adb_whoami()
{
echo $(echo "echo \$USER; exit" | adb shell | tail -1 | tr -d '\r')
}
# Write error message to stdout and exit
fail()
{
echo "`basename $0`: $1" >&2
exit 1
}
# Make sure ANDROID_PRODUCT_OUT is set to the product-specific directory that
# contains the generated binaries
if [ -z "$ANDROID_PRODUCT_OUT" ]; then
fail "ANDROID_PRODUCT_OUT not set, run lunch to set build target, aborting"
fi
# Restart adbd daemon as root if needed
if [ "x$(adb_whoami)" != "xroot" ]; then
printf "Restarting adbd daemon with root permissions: "
adb root
sleep 5s
if [ "x$(adb_whoami)" != "xroot" ]; then
echo "FAILED"
fail "failed restart adbd with root permissions, aborting"
else
echo "OK"
fi
fi
# Remount system partions read-write"
printf "Remounting /system read-write: "
adb remount || fail "failed to remount /system read-write, aborting"
# Create destination directories
adb shell mkdir -p "$OT_TAS_DEST" || fail "failed to create '$TA_DEST', aborting"
adb shell chmod 755 "$OT_TAS_DEST" || fail "failed to set permissions for '$OT_TAS_DEST', aborting"
adb shell mkdir -p "$OT_LIBSX_DEST" || fail "failed to create '$OT_LIBSX_DEST', aborting"
adb shell chmod 755 "$OT_LIBSX_DEST" || fail "failed to set permissions for '$OT_LIBSX_DEST', aborting"
# Push programs
for target in $OT_PROGS
do
infile="$ANDROID_PRODUCT_OUT/system/bin/$target"
echo "Pushing '$target' to '$OT_PROGS_DEST'"
adb push "$infile" "$OT_PROGS_DEST" || fail "failed to push '$target', aborting"
adb shell chmod 755 "$OT_PROGS_DEST/$target" || fail "failed to set permissions for '$target', aborting"
done
# Push libraries
for target in $OT_LIBS
do
infile="$ANDROID_PRODUCT_OUT/system/lib/${target}.so"
echo "Pushing '$target' to '$OT_LIBS_DEST'"
adb push "$infile" "$OT_LIBS_DEST" || fail "failed to push '$target', aborting"
done
# Push additional libraries
for target in $OT_LIBSX
do
infile="$ANDROID_PRODUCT_OUT/system/lib/${target}.so"
echo "Pushing '$target' to '$OT_LIBSX_DEST'"
adb push "$infile" "$OT_LIBSX_DEST" || fail "failed to push '$target', aborting"
done
# Push TAs
for target in $OT_TAS
do
infile="$ANDROID_PRODUCT_OUT/system/lib/${target}.so"
echo "Pushing '$target' to '$OT_TAS_DEST'"
adb push "$infile" "$OT_TAS_DEST" || fail "failed to push '$target', aborting"
done
# Push config
for target in $OT_CONFIG
do
infile="$OT_BASEDIR/${target}.android"
echo "Pushing '$target' to '$OT_CONFIG_DEST'"
adb push "$infile" "$OT_CONFIG_DEST/${target}" || fail "failed to push '$target', aborting"
done
# Done
exit 0
| true
|
ccc769308369095029c5eabad2e352d7ab659c0c
|
Shell
|
cohenjo/dbautils
|
/oracle/dbmon/showparse
|
UTF-8
| 6,552
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/ksh
#######################################################################
# Sam Yeoman
# Script: showparse
# Description: to show sqls in library cache with high parse_calls
#######################################################################
############################################
# Show usage
############################################
Usage()
{
printf "\n"
echo "Usage: { `basename $0` "
echo " -h | Help"
echo " -f | show full sql_text"
echo " -s | show stats"
echo " -hs | show current hard parse programs"
echo " -hp | show current hard parse sqls from v\$session"
echo " }\n"
printf "\n"
}
############################################
# Normal output (with sql_text truncated)
############################################
normal()
{
sqlplus -s "/ as sysdba" <<EOF
set feed off
set echo off
set verify off
set pagesize 132
set linesize 192
column schema format a16 trunc
column module format a24 trunc
column action format a24
column sql_text format a50 trunc
column executions format 99,999,999
column parse_calls format 99,999,999
alter session set "_hash_join_enabled"=true;
select * from (
SELECT
k.kglobt03 sql_id,
k.kglobts4 schema,
k.kglobts0 module,
k.kglobts1 action,
k.kglnaobj sql_text,
k.kglobt05 executions,
k.kglobt12 parse_calls
FROM
x\$kglcursor_child_sqlid k
where k.kglobts4 != 'SYS'
order by parse_calls desc)
where rownum < 25
/
EOF
}
############################################
# Full output: Show full sql_text
############################################
show_full()
{
sqlplus -s "/ as sysdba" <<EOF
set feed off
set echo off
set verify off
set pagesize 132
set linesize 192
column schema format a16 trunc
column module format a24 trunc
column action format a24
column sql_text format a50 wrap
column executions format 99,999,999
column parse_calls format 99,999,999
alter session set "_hash_join_enabled"=true;
select * from (
SELECT
k.kglobt03 sql_id,
k.kglobts4 schema,
k.kglobts0 module,
k.kglobts1 action,
k.kglnaobj sql_text,
k.kglobt05 executions,
k.kglobt12 parse_calls
FROM
x\$kglcursor_child_sqlid k
where k.kglobts4 != 'SYS'
order by parse_calls desc)
where rownum < 25
/
EOF
}
############################################
# Parse time cpu
############################################
stats()
{
sqlplus -s "/ as sysdba" <<EOF
set feed off
set echo off
set verify off
set pagesize 132
set linesize 192
SELECT NAMESPACE, PINS, PINHITS, RELOADS, INVALIDATIONS
FROM V\$LIBRARYCACHE
ORDER BY NAMESPACE
/
select (sum(pinhits) / sum(pins)) "Library Cache Hit Ratio" from V\$LIBRARYCACHE
/
EOF
sqlplus -s "/ as sysdba" <<EOF
set feed off
set echo off
set verify off
set pagesize 132
set linesize 192
column NAME format a25
SELECT a.VALUE "parse count (total)",
b.VALUE "parse count (hard)"
FROM v\$sysstat a, v\$sysstat b
WHERE a.name = 'parse count (total)'
AND b.name = 'parse count (hard)'
/
EOF
sqlplus -s "/ as sysdba" <<EOF
set feed off
set echo off
set verify off
set pagesize 132
set linesize 192
column NAME format a25
SELECT a.VALUE "cpu_used_by_this_session",
b.VALUE "parse_time_cpu",
c.VALUE "recursive_cpu_usage"
FROM v\$sysstat a, v\$sysstat b, v\$sysstat c
WHERE a.name = 'CPU used by this session'
AND b.name = 'parse time cpu'
AND c.name = 'recursive cpu usage'
/
EOF
}
############################################
# Show current hard parse sqls from v$session
############################################
hard_parse_sql()
{
sqlplus -s "/ as sysdba" <<EOF
set feed off
set echo off
set verify off
set pagesize 132
set linesize 192
column username format a16 trunc
column module format a24 trunc
column machine format a10 trunc
column program format a15 trunc
column osuser format a10 trunc
column action format a24
column sql_text format a50 trunc
column executions format 99,999,999
column parse_calls format 99,999,999
SELECT /*+ use_hash (a,b,e,t,s,f) */
s.indx sid,
s.ksuudnam username,
s.ksusepid process,
s.ksuseunm osuser,
s.ksusemnm machine,
s.ksusepnm program,
e.kglnaobj sql_text,
e.kglobt05 executions,
e.kglobt12 parse_calls
FROM x\$ksusesta a, x\$ksusesta b, x\$kglcursor_child_sqlid e, x\$ktsso t, x\$ksuse s, x\$ksled f
WHERE BITAND (s.ksspaflg, 1) != 0
AND BITAND (s.ksuseflg, 1) != 0
AND s.ksuseopc = f.indx
AND b.ksusestv > 0
AND s.indx = a.ksusenum
AND a.ksusenum = b.ksusenum
AND a.ksusestn = (SELECT indx
FROM x\$ksusd
WHERE ksusdnam = 'parse count (hard)')
AND s.ksusesqi = e.kglobt03
AND e.kglobt12 > 0
and t.inst_id=s.inst_id
GROUP BY t.inst_id,
s.indx,
s.ksuudnam,
s.ksusepid,
s.ksuseunm,
s.ksusemnm,
s.ksusepnm,
e.kglnaobj,
e.kglobt05,
e.kglobt12
ORDER BY e.kglobt05 / e.kglobt12
/
EOF
}
############################################
# Show current hard parse programs
############################################
hard_parse_prog()
{
sqlplus -s "/ as sysdba" <<EOF
set feed off
set echo off
set verify off
set pagesize 132
set linesize 192
column "avg_parse_%" format 99,999.00
column "max_parse_%" format 99,999.00
select * from (
select substr(d.host_name,1,8) machine_name,
substr(d.instance_name,1,8) instance_name,
rpad(nvl(program,machine),50,' ') program_name,
round(avg((a.value/b.value)*100),2) "avg_parse_%",
round(max((a.value/b.value)*100),2) "max_parse_%",
count(*) program_session_count
from v\$session s,v\$sesstat a,v\$sesstat b, v\$instance d
where b.value>0 and s.sid=a.sid and a.sid=b.sid and
a.statistic#=(select statistic# from v\$statname
where name='parse count (hard)') and b
.statistic#=(select statistic# from v\$statname
where name='parse count (total)')
group by substr(d.host_name,1,8),
substr(d.instance_name,1,8),
rpad(nvl(program,machine),50,' ')
order by round(avg((a.value/b.value)*100),2) desc)
where rownum < 35
/
EOF
}
############################################
# Main
############################################
typeset -u USER_NAME=`echo $2|cut -d. -f1`
if [ -z "$USER_NAME" ]
then
export USER_NAME="%"
fi
case $1 in
-h ) Usage ;;
-f ) show_full ;;
-s ) stats ;;
-hs ) hard_parse_sql ;;
-hp ) hard_parse_prog ;;
"" ) normal ;;
* ) Usage ;;
esac
| true
|
1515e5447a2bcbba976240b20afb9350d1e31a7d
|
Shell
|
mechatrax/4gpi-networkmanager
|
/4gpi-nm-helper
|
UTF-8
| 5,094
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
CONFIG=/etc/default/4gpi
TIMEOUT=10
VERSION="3.0"
USAGE="Usage: 4gpi-nm-helper COMMAND [PARAMETERS...]
Commands:
set default apn=<apn> [user=<user>] [password=<password>] [pdp=...] [auth=...]
set default bearer
Parameters:
apn=<apn> set <apn> to APN
user=<user> set <user> to user name (optional default blank)
password=<password> set <password> to password (optional default blank)
pdp={IP|IPV6|IPV4V6} set PDP type (optional default:IPV4V6)
auth={0|1|2|3} set authentication type (optional default:3)
0: none
1: PAP
2: CHAP
3: PAP or CHAP
show default {apn|user|password|pdp|auth|all}
show default bearer
clear default
clear default bearer
version
display version and exit
help
display help and exit
"
#
# Usage: 4gpi_chat <COMMANDS>
#
4gpi_chat ()
{
local script
script="ABORT BUSY ABORT ERROR $*"
eval chat -t $TIMEOUT $script < $CONSOLE_PORT > $CONSOLE_PORT
}
#
# Usage: 4gpi_chat_e <COMMANDS>
#
4gpi_chat_e ()
{
local script
script="ABORT BUSY ABORT ERROR $*"
eval chat -e -t $TIMEOUT $script < $CONSOLE_PORT > $CONSOLE_PORT
}
#
# Usage: show_config <NAME> [ apn | user | password | pdp | auth ]
#
show_config ()
{
local gdcont
local gauth
local apn
local user
local pass
local pdp
local auth
if [ "$1" != "default" ]
then
return 1
fi
4gpi_chat "'' AT OK-AT-OK ''"
gdcont=$(4gpi_chat_e "'' AT+CGDCONT? OK ''" 2>&1 | grep "CGDCONT: 1")
gauth=$(4gpi_chat_e "'' AT+CGAUTH? OK ''" 2>&1 | grep "CGAUTH: 1")
apn=$(echo "$gdcont" | cut -d, -f3 | sed -e 's/"//g')
auth=$(echo "$gauth" | cut -d, -f2 | sed -e 's/"//g')
user=$(echo "$gauth" | cut -d, -f3 | sed -e 's/"//g')
pass=$(echo "$gauth" | cut -d, -f4 | sed -e 's/"//g')
pdp=$(echo "$gdcont" | cut -d, -f2 | sed -e 's/"//g')
params=($@)
params=(${params[@]:1})
if [ ${#params[@]} -eq 0 ]
then
params=("all")
fi
while [ $params ]
do
case "${params[0]}" in
apn)
echo $apn
;;
auth)
echo $auth
;;
user)
echo $user
;;
password)
echo $pass
;;
pdp|type)
echo $pdp
;;
*)
echo \"$apn\" \"$user\" \"$pass\" \"$pdp\" $auth
;;
esac
params=(${params[@]:1})
done
}
#
# Usage: set_config <NAME> <PARAMS...>
#
set_config ()
{
local apn
local user
local pass
local pdp
local auth
local default
local params
apn=""
user=""
pass=""
pdp="IPV4V6"
auth="3"
params=($@)
params=(${params[@]:1})
if [ "$1" != "default" ]
then
return 1
fi
while [ $params ]
do
case "${params[0]}" in
apn=*)
apn="$(echo ${params[0]} | sed -e 's/^apn=//')"
params=(${params[@]:1})
;;
apn)
apn=${params[1]}
params=(${params[@]:2})
;;
user=*)
user="$(echo ${params[0]} | sed -e 's/^user=//')"
params=(${params[@]:1})
;;
user)
user=${params[1]}
params=(${params[@]:2})
;;
password=*)
pass="$(echo ${params[0]} | sed -e 's/^password=//')"
params=(${params[@]:1})
;;
password)
pass=${params[1]}
params=(${params[@]:2})
;;
pdp=*|type=*)
pdp="$(echo ${params[0]} | sed -e 's/^pdp=//' -e 's/^type=//')"
params=(${params[@]:1})
;;
pdp|type)
pdp=${params[1]}
params=(${params[@]:2})
;;
auth=*)
auth="$(echo ${params[0]} | sed -e 's/^auth=//')"
params=(${params[@]:1})
;;
auth)
auth=${params[1]}
params=(${params[@]:2})
;;
*)
return 1
;;
esac
done
current=($(show_config $1))
if [ "${current[0]}" != "\"$apn\"" ] \
|| [ "${current[3]}" != "\"$pdp\"" ]
then
4gpi_chat "'' AT OK-AT-OK AT+CFUN=4 OK ''"
4gpi_chat "'' AT+CGDCONT=1,\\\"$pdp\\\",\\\"$apn\\\" OK ''"
fi
if [ "${current[1]}" != "\"$user\"" ] \
|| [ "${current[2]}" != "\"$pass\"" ] \
|| [ "${current[4]}" != "$auth" ]
then
4gpi_chat "'' AT OK-AT-OK AT+CFUN=4 OK ''"
if [ "$pass" != "" ] && [ "$user" != "" ] && [ "$apn" != "" ]
then
4gpi_chat "'' AT+CGAUTH=1,$auth,\\\"$pass\\\",\\\"$user\\\" OK ''"
else
4gpi_chat "'' AT+CGAUTH=1 OK ''"
fi
fi
4gpi_chat "'' AT OK-AT-OK AT+CFUN=1 OK ''"
}
#
# Usage: clear_config <NAME>
#
clear_config ()
{
if [ "$1" != "default" ]
then
return 1
fi
4gpi_chat "'' AT OK-AT-OK AT+CFUN=4 OK ''"
4gpi_chat "'' AT+CGDCONT=1 OK ''"
4gpi_chat "'' AT+CGAUTH=1 OK ''"
4gpi_chat "'' AT OK-AT-OK AT+CFUN=1 OK ''"
}
[ -r $CONFIG ] && . $CONFIG
args=($@)
#
# execute command
#
case $1 in
show)
show_config "$2" "${args[@]:2}"
;;
set)
set_config "$2" "${args[@]:2}"
;;
clear)
clear_config "$2"
;;
version)
echo "4gpi-nm-helper version $VERSION"
;;
help)
echo "$USAGE"
;;
*)
echo "$USAGE"
exit 1
;;
esac
| true
|
8e38b8df1117ec45456a27f75c55633ae3b97df9
|
Shell
|
hmdhszd/Simple-Bash-Scripts--Ping-IP-Ranges
|
/get-ip-as-argument.sh
|
UTF-8
| 255
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
is_alive_ping()
{
ping -c 1 $1 > /dev/null
if [ $? -eq 0 ]
then
echo node with address $i is up at `date +%Y-%m-%d--%H:%M`
else
echo $i is down at `date +%Y-%m-%d--%H:%M`
fi
}
for i in $@
do
is_alive_ping $i
done
exit
| true
|
263e2b30c99d3120b15da7b269a0da90ed93051f
|
Shell
|
ZaydTheCoder/pi-ware-1
|
/install
|
UTF-8
| 2,408
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#Variables
PW_PREFIX="$HOME/.local"
fwe=""
ers=""
#Functions
function error {
echo -e "\e[91m$1\e[39m"
if [ "$2" == "exit" ]; then
exit 1
else
fwe="1"
ers+="$1"
fi
}
function warning {
echo -e "\e[91m$2\e[39m"
sleep "$1"
}
#Make required directory for updater
mkdir -p "$PW_PREFIX/share/pi-ware"
mkdir -p "$PW_PREFIX/bin"
echo $(git rev-parse main) > "$PW_PREFIX/share/pi-ware/version"
if [ "$1" == "update" ];then
rm -rf $HOME/pi-ware
cp -r "$PW_PREFIX/share/pi-ware/updater_files" $HOME/pi-ware
cp -r $HOME/pi-ware/store.py "$PW_PREFIX/bin/pi-ware"
chmod +x "$PW_PREFIX/bin/pi-ware"
else
echo "Installing pi-ware..."
fi
#Main
echo "Creating a desktop entry for Pi-Ware..."
sudo mkdir -p $HOME/.local/share/applications
echo "[Desktop Entry]
Name=Pi-Ware
Comment=Raspberry Pi app store
Exec=python3 $HOME/pi-ware/store.py
Icon=$HOME/pi-ware/icons/logo.png
Categories=Utility;
Type=Application
Terminal=false" > $HOME/.local/share/applications/pi-ware.desktop
echo "Creating an auto updater for Pi-Ware..."
mkdir -p /etc/systemd/system/
echo "[Unit]
Description=Auto update service for pi-ware
After=network.target
[Service]
ExecStart=/usr/bin/bash auto-update
WorkingDirectory=$HOME/pi-ware
StandardOutput=inherit
StandardError=inherit
Restart=always
User=pi
[Install]
WantedBy=multi-user.target" > /dev/null | sudo tee /etc/systemd/system/Auto-update-pi-ware.service
sudo systemctl enable Auto-update-pi-ware.service
#install lxterminal if it is not alreadt installed
if ! command -v lxterminal >/dev/null;then
echo "Installing LXTerminal..."
if [ -f /usr/bin/apt ];then
echo "Package manager is: apt"
sudo apt update
sudo apt install lxterminal -y || error "Failed to install lxterminal."
elif [ -f /usr/bin/pacman ];then
echo "Package manager is: pacman"
sudo pacman -S lxterminal || error "Failed to install lxterminal;."
else
error "Failed to find any package manager to install lxterminal."
fi
fi
#Set package manager
if [ -f /usr/bin/apt ];then
echo "Package manager is: apt"
echo "apt-get" > "$PW_PREFIX/share/pi-ware/inst-pref"
elif [ -f /usr/bin/pacman ];then
echo "Package manager is: pacman"
echo "pacman -S" > "$PW_PREFIX/share/pi-ware/inst-pref"
else
error "Failed to find any package manager to install lxterminal."
fi
echo "Done!"
echo "You can find Pi-Ware in Menu > Accessories > Pi-Ware."
| true
|
ccb83029234b49e42c7092e70b5f91829afdd94e
|
Shell
|
huawuque18/custler.uninode
|
/scripts/Nodes_Build.sh
|
UTF-8
| 14,952
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -eE
# (C) Sergey Tyurin 2021-09-02 10:00:00
# Disclaimer
##################################################################################################################
# You running this script/function means you will not blame the author(s)
# if this breaks your stuff. This script/function is provided AS IS without warranty of any kind.
# Author(s) disclaim all implied warranties including, without limitation,
# any implied warranties of merchantability or of fitness for a particular purpose.
# The entire risk arising out of the use or performance of the sample scripts and documentation remains with you.
# In no event shall author(s) be held liable for any damages whatsoever
# (including, without limitation, damages for loss of business profits, business interruption,
# loss of business information, or other pecuniary loss) arising out of the use of or inability
# to use the script or documentation. Neither this script/function,
# nor any part of it other than those parts that are explicitly copied from others,
# may be republished without author(s) express written permission.
# Author(s) retain the right to alter this disclaimer at any time.
##################################################################################################################
# All generated executables will be placed in the $ HOME/bin folder.
BUILD_STRT_TIME=$(date +%s)
SCRIPT_DIR=`cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P`
source "${SCRIPT_DIR}/env.sh"
echo
echo "################################### FreeTON nodes build script #####################################"
echo "+++INFO: $(basename "$0") BEGIN $(date +%s) / $(date)"
BackUP_Time="$(date +'%F_%T'|tr ':' '-')"
case "${@}" in
cpp)
CPP_NODE_BUILD=true
RUST_NODE_BUILD=false
;;
rust)
CPP_NODE_BUILD=false
RUST_NODE_BUILD=true
;;
*)
CPP_NODE_BUILD=true
RUST_NODE_BUILD=true
;;
esac
[[ ! -d $HOME/bin ]] && mkdir -p $HOME/bin
#=====================================================
# Packages set for different OSes
PKGS_FreeBSD="mc libtool perl5 automake llvm-devel gmake git jq wget gawk base64 gflags ccache cmake curl gperf openssl ninja lzlib vim sysinfo logrotate gsl p7zip zstd pkgconf python"
PKGS_CentOS="curl jq wget bc vim libtool logrotate openssl-devel clang llvm-devel ccache cmake ninja-build gperf gawk gflags snappy snappy-devel zlib zlib-devel bzip2 bzip2-devel lz4-devel libmicrohttpd-devel readline-devel p7zip libzstd-devel"
PKGS_Ubuntu="git mc curl build-essential libssl-dev automake libtool clang llvm-dev jq vim cmake ninja-build ccache gawk gperf texlive-science doxygen-latex libgflags-dev libmicrohttpd-dev libreadline-dev libz-dev pkg-config zlib1g-dev p7zip bc libzstd-dev"
PKG_MNGR_FreeBSD="sudo pkg"
PKG_MNGR_CentOS="sudo dnf"
PKG_MNGR_Ubuntu="sudo apt"
FEXEC_FLG="-executable"
#=====================================================
# Detect OS and set packages
OS_SYSTEM=`uname -s`
if [[ "$OS_SYSTEM" == "Linux" ]];then
OS_SYSTEM="$(hostnamectl |grep 'Operating System'|awk '{print $3}')"
elif [[ ! "$OS_SYSTEM" == "FreeBSD" ]];then
echo
echo "###-ERROR: Unknown or unsupported OS. Can't continue."
echo
exit 1
fi
#=====================================================
# Set packages set & manager according to OS
case "$OS_SYSTEM" in
FreeBSD)
export ZSTD_LIB_DIR=/usr/local/lib
PKGs_SET=$PKGS_FreeBSD
PKG_MNGR=$PKG_MNGR_FreeBSD
$PKG_MNGR delete -y rust boost-all|cat
$PKG_MNGR update -f
$PKG_MNGR upgrade -y
FEXEC_FLG="-perm +111"
sudo wget https://github.com/mikefarah/yq/releases/download/v4.4.0/yq_freebsd_amd64 -O /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq
# libmicrohttpd \
# does not build with libmicrohttpd-0.9.71
# build & install libmicrohttpd-0.9.70
mkdir -p $HOME/src
cd $HOME/src
# sudo pkg remove -y libmicrohttpd | cat
fetch https://ftp.gnu.org/gnu/libmicrohttpd/libmicrohttpd-0.9.70.tar.gz
tar xf libmicrohttpd-0.9.70.tar.gz
cd libmicrohttpd-0.9.70
./configure && make && sudo make install
;;
CentOS)
export ZSTD_LIB_DIR=/usr/lib64
PKGs_SET=$PKGS_CentOS
PKG_MNGR=$PKG_MNGR_CentOS
$PKG_MNGR -y update --allowerasing
$PKG_MNGR group install -y "Development Tools"
$PKG_MNGR config-manager --set-enabled powertools
$PKG_MNGR --enablerepo=extras install -y epel-release
$PKG_MNGR remove -y boost
$PKG_MNGR install -y gcc-toolset-10-toolchain
source /opt/rh/gcc-toolset-10/enable
sudo wget https://github.com/mikefarah/yq/releases/download/v4.4.0/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq
;;
Ubuntu)
export ZSTD_LIB_DIR=/usr/lib/x86_64-linux-gnu
PKGs_SET=$PKGS_Ubuntu
PKG_MNGR=$PKG_MNGR_Ubuntu
$PKG_MNGR install -y software-properties-common
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/ppa
$PKG_MNGR remove -y libboost-all-dev|cat
$PKG_MNGR update && $PKG_MNGR upgrade -y
$PKG_MNGR install -y g++-10
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 90 --slave /usr/bin/g++ g++ /usr/bin/g++-10 --slave /usr/bin/gcov gcov /usr/bin/gcov-10
sudo wget https://github.com/mikefarah/yq/releases/download/v4.4.0/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq
;;
*)
echo
echo "###-ERROR: Unknown or unsupported OS. Can't continue."
echo
exit 1
;;
esac
#=====================================================
# Install packages
echo
echo '################################################'
echo "---INFO: Install packages ... "
$PKG_MNGR install -y $PKGs_SET
#=====================================================
# Install BOOST
echo
echo '################################################'
echo '---INFO: Install BOOST from source'
Installed_BOOST_Ver="$(cat /usr/local/include/boost/version.hpp 2>/dev/null | grep "define BOOST_LIB_VERSION"|awk '{print $3}'|tr -d '"'| awk -F'_' '{printf("%d%s%2d\n", $1,".",$2)}')"
Required_BOOST_Ver="$(echo $BOOST_VERSION | awk -F'.' '{printf("%d%s%2d\n", $1,".",$2)}')"
if [[ "$Installed_BOOST_Ver" != "$Required_BOOST_Ver" ]];then
mkdir -p $HOME/src
cd $HOME/src
sudo rm -rf $HOME/src/boost* |cat
sudo rm -rf /usr/local/include/boost |cat
sudo rm -f /usr/local/lib/libboost* |cat
Boost_File_Version="$(echo ${BOOST_VERSION}|awk -F. '{printf("%s_%s_%s",$1,$2,$3)}')"
wget https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${Boost_File_Version}.tar.gz
tar xf boost_${Boost_File_Version}.tar.gz
cd $HOME/src/boost_${Boost_File_Version}/
./bootstrap.sh
sudo ./b2 install --prefix=/usr/local
else
echo "---INFO: BOOST Version ${BOOST_VERSION} already installed"
fi
#=====================================================
# Install or upgrade RUST
echo
echo '################################################'
echo "---INFO: Install RUST ${RUST_VERSION}"
cd $HOME
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain ${RUST_VERSION} -y
# curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs -o $HOME/rust_install.sh
# sh $HOME/rust_install.sh -y --default-toolchain ${RUST_VERSION}
# curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain ${RUST_VERSION} -y
source $HOME/.cargo/env
cargo install cargo-binutils
#=====================================================
# Build C++ node
if $CPP_NODE_BUILD;then
echo
echo '################################################'
echo "---INFO: Build C++ node ..."
cd $SCRIPT_DIR
[[ -d ${TON_SRC_DIR} ]] && rm -rf "${TON_SRC_DIR}"
echo "---INFO: clone ${CNODE_GIT_REPO} (${CNODE_GIT_COMMIT})..."
git clone "${CNODE_GIT_REPO}" "${TON_SRC_DIR}"
cd "${TON_SRC_DIR}"
git checkout "${CNODE_GIT_COMMIT}"
git submodule init && git submodule update --recursive
git submodule foreach 'git submodule init'
git submodule foreach 'git submodule update --recursive'
echo "---INFO: clone ${CNODE_GIT_REPO} (${CNODE_GIT_COMMIT})... DONE"
echo
echo "---INFO: build a node..."
mkdir -p "${TON_BUILD_DIR}" && cd "${TON_BUILD_DIR}"
cmake .. -G "Ninja" -DCMAKE_BUILD_TYPE=Release -DPORTABLE=ON
ninja
echo "---INFO: build a node... DONE"
echo
# cp $HOME/bin/lite-client $HOME/bin/lite-client_${BackUP_Time}|cat
# cp $HOME/bin/validator-engine $HOME/bin/validator-engine_${BackUP_Time}|cat
# cp $HOME/bin/validator-engine-console $HOME/bin/validator-engine-console_${BackUP_Time}|cat
cp -f $TON_BUILD_DIR/lite-client/lite-client $HOME/bin
cp -f $TON_BUILD_DIR/validator-engine/validator-engine $HOME/bin
cp -f $TON_BUILD_DIR/validator-engine-console/validator-engine-console $HOME/bin
#=====================================================
echo "---INFO: build utils (convert_address)..."
cd "${NODE_SRC_TOP_DIR}/utils/convert_address"
cargo update
cargo build --release
cp "${NODE_SRC_TOP_DIR}/utils/convert_address/target/release/convert_address" "$HOME/bin/"
echo "---INFO: build utils (convert_address)... DONE"
fi
#=====================================================
# Build rust node
if $RUST_NODE_BUILD;then
echo
echo '################################################'
echo "---INFO: build RUST NODE ..."
#---------------- crutch for user run
sudo mkdir -p /node_db
sudo chmod -R ugo+rw /node_db
#----------------
[[ -d ${RNODE_SRC_DIR} ]] && rm -rf "${RNODE_SRC_DIR}"
# git clone --recurse-submodules "${RNODE_GIT_REPO}" $RNODE_SRC_DIR
git clone "${RNODE_GIT_REPO}" "${RNODE_SRC_DIR}"
cd "${RNODE_SRC_DIR}"
git checkout "${RNODE_GIT_COMMIT}"
git submodule init && git submodule update --recursive
git submodule foreach 'git submodule init'
git submodule foreach 'git submodule update --recursive'
cd $RNODE_SRC_DIR
cargo update
sed -i.bak 's%features = \[\"cmake_build\", \"dynamic_linking\"\]%features = \[\"cmake_build\"\]%g' Cargo.toml
#====== Uncomment to disabe node's logs competely
# sed -i.bak 's%log = "0.4"%log = { version = "0.4", features = ["release_max_level_off"] }%' Cargo.toml
# Add `sha2-native` feature (adds explicit `ed25519-dalek` dependency because it uses newer sha2 version)
# BSD/macOS sed requires an actual newline character to follow a\. I use copy+replace for compatibility
sed -i.bak -e '/^\[dependencies\]/p; s/\[dependencies\]/ed25519-dalek = "1.0"/' Cargo.toml
sed -i.bak -e '/^\[features\]/p; s/\[features\]/sha2-native = ["sha2\/asm", "ed25519-dalek\/asm"]/' Cargo.toml
RUSTFLAGS="-C target-cpu=native" cargo build --release --features "compression,sha2-native"
# --features "metrics"
# --features "external_db,metrics"
# cp $HOME/bin/rnode $HOME/bin/rnode_${BackUP_Time}|cat
cp -f ${RNODE_SRC_DIR}/target/release/ton_node $HOME/bin/rnode
#=====================================================
# Build rust node console
echo '################################################'
echo "---INFO: Build rust node console ..."
[[ -d ${RCONS_SRC_DIR} ]] && rm -rf "${RCONS_SRC_DIR}"
git clone --recurse-submodules "${RCONS_GIT_REPO}" $RCONS_SRC_DIR
cd $RCONS_SRC_DIR
git checkout "${RCONS_GIT_COMMIT}"
git submodule init
git submodule update
RUSTFLAGS="-C target-cpu=native" cargo build --release
find $RCONS_SRC_DIR/target/release/ -maxdepth 1 -type f ${FEXEC_FLG} -exec cp -f {} $HOME/bin/ \;
echo "---INFO: build RUST NODE ... DONE."
fi
#=====================================================
# Build TON Solidity Compiler (solc)
# echo "---INFO: build TON Solidity Compiler ..."
# [[ ! -z ${SOLC_SRC_DIR} ]] && rm -rf "${SOLC_SRC_DIR}"
# git clone --recurse-submodules "${SOLC_GIT_REPO}" "${SOLC_SRC_DIR}"
# cd "${SOLC_SRC_DIR}"
# git checkout "${SOLC_GIT_COMMIT}"
# mkdir ${SOLC_SRC_DIR}/build
# cd "${SOLC_SRC_DIR}/build"
# cmake ../compiler/ -DCMAKE_BUILD_TYPE=Release
# if [[ "$(uname)" == "Linux" ]];then
# V_CPU=`nproc`
# else
# V_CPU=`sysctl -n hw.ncpu`
# fi
# cmake --build . -- -j $V_CPU
# cp -f "${SOLC_SRC_DIR}/build/solc/solc" $HOME/bin/
# cp -f "${SOLC_SRC_DIR}/lib/stdlib_sol.tvm" $HOME/bin/
# echo "---INFO: build TON Solidity Compiler ... DONE."
#=====================================================
# Build TVM-linker
echo
echo '################################################'
echo "---INFO: build TVM-linker ..."
[[ ! -z ${TVM_LINKER_SRC_DIR} ]] && rm -rf "${TVM_LINKER_SRC_DIR}"
git clone --recurse-submodules "${TVM_LINKER_GIT_REPO}" "${TVM_LINKER_SRC_DIR}"
cd "${TVM_LINKER_SRC_DIR}"
git checkout "${TVM_LINKER_GIT_COMMIT}"
cd "${TVM_LINKER_SRC_DIR}/tvm_linker"
RUSTFLAGS="-C target-cpu=native" cargo build --release
cp -f "${TVM_LINKER_SRC_DIR}/tvm_linker/target/release/tvm_linker" $HOME/bin/
echo "---INFO: build TVM-linker ... DONE."
#=====================================================
# Build tonos-cli
echo
echo '################################################'
echo "---INFO: build tonos-cli ... "
[[ -d ${TONOS_CLI_SRC_DIR} ]] && rm -rf "${TONOS_CLI_SRC_DIR}"
git clone --recurse-submodules "${TONOS_CLI_GIT_REPO}" "${TONOS_CLI_SRC_DIR}"
cd "${TONOS_CLI_SRC_DIR}"
git checkout "${TONOS_CLI_GIT_COMMIT}"
cargo update
RUSTFLAGS="-C target-cpu=native" cargo build --release
# cp $HOME/bin/tonos-cli $HOME/bin/tonos-cli_${BackUP_Time}|cat
cp "${TONOS_CLI_SRC_DIR}/target/release/tonos-cli" "$HOME/bin/"
echo "---INFO: build tonos-cli ... DONE"
#=====================================================
# download contracts
echo
echo '################################################'
echo "---INFO: download contracts ... "
rm -rf "${NODE_SRC_TOP_DIR}/ton-labs-contracts"
rm -rf "${NODE_SRC_TOP_DIR}/Surf-contracts"
git clone ${CONTRACTS_GIT_REPO} "${NODE_SRC_TOP_DIR}/ton-labs-contracts"
cd "${NODE_SRC_TOP_DIR}/ton-labs-contracts"
git checkout $CONTRACTS_GIT_COMMIT
cd ${NODE_SRC_TOP_DIR}
git clone --single-branch --branch multisig-surf-v2 https://github.com/tonlabs/ton-labs-contracts.git "${NODE_SRC_TOP_DIR}/Surf-contracts"
RustCup_El_ABI_URL="https://raw.githubusercontent.com/tonlabs/rustnet.ton.dev/main/docker-compose/ton-node/configs/Elector.abi.json"
curl -o ${Elector_ABI} ${RustCup_El_ABI_URL} &>/dev/null
echo
echo '################################################'
BUILD_END_TIME=$(date +%s)
Build_mins=$(( (BUILD_END_TIME - BUILD_STRT_TIME)/60 ))
Build_secs=$(( (BUILD_END_TIME - BUILD_STRT_TIME)%60 ))
echo
echo "+++INFO: $(basename "$0") on $HOSTNAME FINISHED $(date +%s) / $(date)"
echo "All builds took $Build_mins min $Build_secs secs"
echo "================================================================================================"
exit 0
| true
|
59c9049dc5dc178b62bccfb7fe1c9a6b3bdb0b45
|
Shell
|
simpsonjulian/neophyte
|
/filter.sh
|
UTF-8
| 302
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
set -x
die() {
echo $1
exit 1
}
[ "$#" == 1 ] || die "I need a component name"
component=$1
git filter-branch -f --tree-filter \
"mkdir -p ${component} && find . -maxdepth 1 | egrep -v '^\.$|^\.git$|^\.\/${component}$' | while read o;\
do \
git mv -f \${o} ${component};\
done" HEAD
| true
|
c41d0d86195d881f03374dbfab9cb24af7305559
|
Shell
|
HalleyTm/kira
|
/docker/kira/container/start.sh
|
UTF-8
| 976
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
set +e && source $ETC_PROFILE &>/dev/null && set -e
source $SELF_SCRIPTS/utils.sh
set -x
echoInfo "INFO: Staring $NODE_TYPE container $KIRA_SETUP_VER ..."
HALT_CHECK="${COMMON_DIR}/halt"
EXIT_CHECK="${COMMON_DIR}/exit"
while [ -f "$HALT_CHECK" ] || [ -f "$EXIT_CHECK" ]; do
if [ -f "$EXIT_CHECK" ]; then
echoInfo "INFO: Ensuring sekaid process is killed"
touch $HALT_CHECK
pkill -15 sekaid || echoWarn "WARNING: Failed to kill sekaid"
rm -fv $EXIT_CHECK
fi
echoInfo "INFO: Waiting for container to be unhalted..."
sleep 30
done
if [ "${NODE_TYPE,,}" == "sentry" ] || [ "${NODE_TYPE,,}" == "priv_sentry" ] || [ "${NODE_TYPE,,}" == "seed" ]; then
$SELF_CONTAINER/sentry/start.sh
elif [ "${NODE_TYPE,,}" == "snapshot" ]; then
$SELF_CONTAINER/snapshot/start.sh
elif [ "${NODE_TYPE,,}" == "validator" ]; then
$SELF_CONTAINER/validator/start.sh
else
echoErr "ERROR: Unknown node type '$NODE_TYPE'"
exit 1
fi
| true
|
bb93262f5003dfd5ef3bb83610af82c265c48411
|
Shell
|
Caesar-github/device-custom
|
/oem/oem_ipc/RkLunch.sh
|
UTF-8
| 3,145
| 3
| 3
|
[] |
no_license
|
#!/bin/sh
#
check_linker()
{
[ ! -L "$2" ] && ln -sf $1 $2
}
[ -f /etc/profile.d/enable_coredump.sh ] && source /etc/profile.d/enable_coredump.sh
check_linker /userdata /oem/www/userdata
check_linker /userdata /oem/www/userdata
check_linker /media/usb0 /oem/www/usb0
check_linker /mnt/sdcard /oem/www/sdcard
if [ ! -f "/oem/sysconfig.db" ]; then
media-ctl -p -d /dev/media1 | grep 3840x2160
if [ $? -eq 0 ] ;then
ln -s -f /oem/sysconfig-4K.db /oem/sysconfig.db
fi
media-ctl -p -d /dev/media1 | grep 2688x1520
if [ $? -eq 0 ] ;then
ln -s -f /oem/sysconfig-2K.db /oem/sysconfig.db
fi
media-ctl -p -d /dev/media1 | grep 1920x1080
if [ $? -eq 0 ] ;then
ln -s -f /oem/sysconfig-1080P.db /oem/sysconfig.db
fi
media-ctl -p -d /dev/media1 | grep 2592x1944
if [ $? -eq 0 ] ;then
ln -s -f /oem/sysconfig-5M.db /oem/sysconfig.db
fi
fi
#set max socket buffer size to 1.5MByte
sysctl -w net.core.wmem_max=1572864
export HDR_MODE=1
export enable_encoder_debug=0
#vpu 600M, kernel default 600M
#echo 600000000 >/sys/kernel/debug/mpp_service/rkvenc/clk_core
ipc-daemon --no-mediaserver &
sleep 2
QUICKDISPLAY=`busybox ps |grep -w startup_app_ipc |grep -v grep`
if [ -z "$QUICKDISPLAY" ] ;then
echo "run ispserver"
ispserver &
sleep 1
else
echo "ispserver is running"
fi
ls /sys/class/drm | grep "card0-"
if [ $? -ne 0 ] ;then
echo "not found display"
HasDisplay=0
else
echo "find display"
HasDisplay=1
cat /proc/device-tree/compatible | grep lt9611
if [ $? -ne 0 ] ;then
echo "not HDMI"
else
echo "find HDMI"
HasHDMI=1
fi
fi
arecord -l |grep "card 0"
if [ $? -ne 0 ] ;then
echo "not found sound card"
HasAudio=0
else
echo "find sound card"
HasAudio=1
fi
# TODO:
# HasAudio=0
if [ $HasDisplay -eq 1 ]; then
if [ $HasHDMI -eq 1 ]; then
mediaserver -c /oem/usr/share/mediaserver/rv1109/ipc-hdmi-display.conf &
else
if [ -z "$QUICKDISPLAY" ]; then
if [ $HasAudio -eq 1 ]; then
mediaserver -c /oem/usr/share/mediaserver/rv1109/ipc-display.conf &
else
mediaserver -c /oem/usr/share/mediaserver/rv1109/ipc-display-without-audio.conf &
fi
else
if [ $HasAudio -eq 1 ]; then
mediaserver -c /oem/usr/share/mediaserver/rv1109/ipc.conf &
else
mediaserver -c /oem/usr/share/mediaserver/rv1109/ipc-without-audio.conf &
fi
fi
fi
else
if [ $HasAudio -eq 1 ]; then
mediaserver -c /oem/usr/share/mediaserver/rv1109/ipc.conf &
else
mediaserver -c /oem/usr/share/mediaserver/rv1109/ipc-without-audio.conf &
fi
fi
# mount media part for video recording
export MEDIA_DEV=/dev/block/by-name/media
export FSTYPE=ext4
if [ ! -L $MEDIA_DEV ]; then
echo "media part not exit, do nothing";
exit
fi
prepare_part()
{
dumpe2fs -h $MEDIA_DEV 2>/dev/null| grep "media"
if [ $? -ne 0 ]; then
echo "Auto formatting $MEDIA_DEV to $FSTYPE"
mke2fs -F -L media $MEDIA_DEV && resize2fs $MEDIA_DEV && tune2fs -c 0 -i 0 $MEDIA_DEV && prepare_part && return
fi
}
prepare_part
echo "prepare_part /userdata/media"
mkdir -p /userdata/media && sync
echo "fsck /userdata/media"
fsck.$FSTYPE -y $MEDIA_DEV
mount $MEDIA_DEV /userdata/media
| true
|
82f473c38778585cd4dac6464538545aed65ea31
|
Shell
|
mbprtpmix/world
|
/bootstrap/bootstrap-from-other-linux.sh
|
UTF-8
| 745
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eu
set -o pipefail
set -x
apt-get install -y sudo
# Allow installing nix as root, see
# https://github.com/NixOS/nix/issues/936#issuecomment-475795730
mkdir -p /etc/nix
echo "build-users-group =" > /etc/nix/nix.conf
curl -L https://nixos.org/nix/install | sh
set +u +x
. $HOME/.nix-profile/etc/profile.d/nix.sh
set -u -x
nix-channel --add https://nixos.org/channels/nixos-unstable nixpkgs
nix-channel --update
# Getting NixOS installation tools
nix-env -iE "_: with import <nixpkgs/nixos> { configuration = {}; }; with config.system.build; [ nixos-generate-config nixos-install nixos-enter manual.manpages ]"
curl -O https://raw.githubusercontent.com/johnae/world/master/bootstrap/install.sh
chmod +x install.sh
| true
|
2bba541157d7a7e41b0e8829cb93511ce061f72a
|
Shell
|
eriknw/pygraphblas
|
/docker_build.sh
|
UTF-8
| 1,354
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
if [ $# -eq 0 ]
then
echo "Usage: ./docker_build.sh SS_RELEASE PY_RELEASE BASE_NAME BRANCH [LOCATION PUSH]"
echo
echo "Example: ./docker_build.sh v3.3.3 v3.4.0 notebook main clone push"
exit 1
fi
SS_RELEASE=$1
PY_RELEASE=$2
BASE_NAME=$3
BRANCH=$4
LOCATION=$5
PUSH=$6
# for BASE_NAME=notebook image
# set env var to 1 for faster SuiteSparse compilation, but the code will be slower
SS_COMPACT=${SS_COMPACT:-0}
SS_BURBLE=${SS_BURBLE:-0}
if [ "$LOCATION" = "clone" ]
then
TMPDIR=$(mktemp -d)
if [ ! -e $TMPDIR ]; then
>&2 echo "Failed to create temp directory"
exit 1
fi
trap "exit 1" HUP INT PIPE QUIT TERM
trap 'rm -rf "$TMPDIR"' EXIT
cd $TMPDIR
git clone --branch $BRANCH https://github.com/Graphegon/pygraphblas.git
cd pygraphblas
fi
docker build \
--build-arg SS_RELEASE=${SS_RELEASE} \
--build-arg SS_COMPACT=${SS_COMPACT} \
--build-arg SS_BURBLE=${SS_BURBLE} \
-f Dockerfile-${BASE_NAME} \
-t graphblas/pygraphblas-${BASE_NAME}:${PY_RELEASE} \
.
docker tag graphblas/pygraphblas-${BASE_NAME}:${PY_RELEASE} graphblas/pygraphblas-${BASE_NAME}:latest
if [ "$PUSH" = "push" ]
then
docker push graphblas/pygraphblas-${BASE_NAME}:${PY_RELEASE}
docker push graphblas/pygraphblas-${BASE_NAME}:latest
fi
| true
|
09be1c41f92e7fb3dcf5988ed71b0902304de28b
|
Shell
|
BroderickHigby/dotfiles
|
/bin/hosts_file
|
UTF-8
| 791
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
DEFAULT_IP=127.0.0.1
IP=${3:-$DEFAULT_IP}
HOSTS_FILE='/etc/hosts'
ESCAPED_IP=`echo "$IP" | sed 's/\./\\\\./g'`
ESCAPED_HOSTNAME=`echo "$2" | sed 's/\./\\\\./g'`
case "$1" in
add)
if cat $HOSTS_FILE | grep "^$ESCAPED_IP[[:space:]]$ESCAPED_HOSTNAME\$" > /dev/null ; then
echo "Host already exists"
else
sed -i -e '$a\' $HOSTS_FILE; # Make sure file ends with new line
echo "$IP $2" >> $HOSTS_FILE
fi
;;
remove)
sed -ie "\|^$ESCAPED_IP $ESCAPED_HOSTNAME\$|d" $HOSTS_FILE
;;
*)
echo "Usage: "
echo "hosts_file [add|remove] [hostname] [ip]"
exit 1
;;
esac
exit 0
| true
|
130e151868acab038829fd494fc02959206f0e0a
|
Shell
|
Sylius/Vagrant
|
/shell_provisioner/module/debian.sh
|
UTF-8
| 681
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
# Debian
# Locales
sed -i 's/# nl_BE.UTF-8 UTF-8/nl_BE.UTF-8 UTF-8/' /etc/locale.gen
locale-gen
# echo 'LANG=nl_BE.UTF-8' > /etc/default/locale
# Timezone
echo "Europe/Brussels" > /etc/timezone
dpkg-reconfigure -f noninteractive tzdata
# Console keyboard
sed -i 's/XKBLAYOUT=.*/XKBLAYOUT="be"/' /etc/default/keyboard
setupcon --force
# Host file
echo 127.0.0.1 $APP_DOMAIN >> /etc/hosts
# Add dotdeb repository
wget https://www.dotdeb.org/dotdeb.gpg
sudo apt-key add dotdeb.gpg
cat << EOF >/etc/apt/sources.list.d/dotdeb.list
deb http://packages.dotdeb.org jessie all
deb-src http://packages.dotdeb.org jessie all
EOF
# Sync package index files
apt-get update
| true
|
64adddd611ec7b6e10ec5f1d5cd768fa1de131d4
|
Shell
|
pacurtin/RecursiveEncodingConvertor
|
/changeEncoding.sh
|
UTF-8
| 360
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
for file in * ; do
if iconv -f WINDOWS-1252 -t UTF-8 "$file" >> "${file%.txt}.new"; then
echo "Successful. Deleting original ${file%.txt} and renaming new version to ${file%.txt}"
rm "$file"
mv "${file%.txt}.new" "$file"
else
echo "Failed. deleting ${file%.txt}.new"
rm "${file%.txt}.new"
fi
done
| true
|
b991731b2254df8a9eb3db45c5540079d8b0f266
|
Shell
|
ChristopheCluizel/traefik-poc
|
/scripts/deploy.sh
|
UTF-8
| 1,151
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
function print_usage() {
echo "Deploy.
Usage: $0 [arguments]
-h, --help Display this help message
--server-ip <SERVER_IP> The IP of the server we want to deploy on
"
}
# check that we run this script from project root directory
CURRENT_DIRECTORY=$( basename "$PWD" )
if [ "$CURRENT_DIRECTORY" != "traefik-poc" ]; then
echo "You must run this script from the project root directory."
exit 1
fi
# Retrieve project path
DIR="$( pwd )"
# Load helpers
# shellcheck source=scripts/functions.sh
source "${DIR}/scripts/functions.sh"
# get input arguments
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
--server-ip)
SERVER_IP="$2"
shift # past argument
shift # past value
;;
-h|--help)
print_usage
exit 0
;;
*)
unknown_argument "$@"
;;
esac
done
# Validate mandatory arguments
check_argument "server-ip" "$SERVER_IP"
echo "==== Deploy"
cd ..
rsync -avz --exclude "venv3" --exclude ".idea" --exclude ".git" traefik-poc ec2-user@"$SERVER_IP":~
| true
|
be6c900cc556def2a9ed26acc06d0696f5e90f6b
|
Shell
|
suchuanpao/scp_scripts
|
/scp_install_cmake.sh
|
UTF-8
| 496
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash -e
if [ "$SUCHP_HOME" = "" ]; then
SUCHP_HOME=$HOME/suchp_familly
fi
SCP_PREFIX=$SUCHP_HOME
SCP_GITHUB=$SCP_PREFIX/github
PATH_GITHUB_CMAKE=$SCP_GITHUB/cmake
# 编译youcompleteme & llvm源码要使用的工具
# cmake 要求最新版本
SCP_DOWNLOAD_CMAKE_CODE(){
sudo apt-get autoremove cmake
mkdir -p $PATH_GITHUB_CMAKE
git clone https://github.com/Kitware/CMake.git $PATH_GITHUB_CMAKE
}
SCP_DOWNLOAD_CMAKE_CODE
cd $PATH_GITHUB_CMAKE
./configure;make
sudo make install
| true
|
e658cd5ee110418088934ef28ca92ef0a0f798c2
|
Shell
|
mcgrizzler/BashScripts
|
/update-all
|
UTF-8
| 267
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh
UPDATESTR='\nFetching Updates...\n'
UPGRADESTR='\nInstalling Updates...\n'
CLEANUPSTR='\nCleaning Up...\n'
DONE='\nAll Done :)\n'
echo "$UPDATESTR"
sudo apt update
echo "$UPGRADESTR"
sudo apt upgrade -y
echo "$CLEANUPSTR"
sudo apt autoremove
echo "$DONE"
| true
|
34553a7d48716bd44380bfbb7069f64ed895844d
|
Shell
|
bricksdont/embrace-noise
|
/scripts/scoring/score_lm_all.sh
|
UTF-8
| 1,884
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
base=/net/cephfs/home/mathmu/scratch/noise-distill
source $base/venvs/fairseq3/bin/activate
module unuse /apps/etc/modules/start/
module use /sapps/etc/modules/start/
module load volta cuda/10.0
trg=en
scripts=$base/scripts
filtered=$base/filtered
models_lm=$base/models_lm
scores_lm=$base/scores_lm
mkdir -p $scores_lm
score_type="neglogprob"
# basic approach: score all filtered data sets
# subset of data sets that should be scored
SCORE_SUBSET=(
"raw_paracrawl.100"
)
function contains() {
local n=$#
local value=${!n}
for ((i=1;i < $#;i++)) {
if [ "${!i}" == "${value}" ]; then
echo "y"
return 0
fi
}
echo "n"
return 1
}
for filtered_sub in $filtered/*; do
echo "filtered_sub: $filtered_sub"
name=$(basename $filtered_sub)
scores_lm_sub=$scores_lm/$name
if [[ -d $scores_lm_sub ]]; then
echo "Folder exists: $scores_lm_sub"
echo "Skipping."
continue
fi
if [ $(contains "${SCORE_SUBSET[@]}" $name) == "n" ]; then
echo "name: $name not in subset that should be scored"
echo "Skipping."
continue
fi
mkdir -p $scores_lm_sub
# LM in-domain scoring
model_path=$models_lm/baseline
input=$filtered_sub/train.bpe.$trg
output=$scores_lm_sub/scores.lm.indomain
sbatch --qos=vesta --time=12:00:00 --gres gpu:Tesla-V100:1 --cpus-per-task 1 --mem 16g $base/scripts/scoring/score_lm_generic.sh $input $output $model_path $score_type $scripts
# LM out-of-domain scoring
model_path=$models_lm/raw_paracrawl.100
input=$filtered_sub/train.bpe.$trg
output=$scores_lm_sub/scores.lm.outdomain
sbatch --qos=vesta --time=12:00:00 --gres gpu:Tesla-V100:1 --cpus-per-task 1 --mem 16g $base/scripts/scoring/score_lm_generic.sh $input $output $model_path $score_type $scripts
done
| true
|
ad8e253c2b18171e8c427dd941e298700b997fe1
|
Shell
|
mpichl87/easy-pachyderm
|
/bin/curl.bash
|
UTF-8
| 189
| 2.875
| 3
|
[] |
no_license
|
__install_curl() {
sudo apt install -y curl &> /dev/null
}
__purge_curl() {
sudo apt purge -y curl
}
curl() {
( which curl &> /dev/null || __install_curl ) &&
command curl "$@"
}
| true
|
d0bb242cd231419ebed52a33bb33a04f773f8d39
|
Shell
|
MandyMF/docker-mooc-2020
|
/part3/exercise3/git_gut.sh
|
UTF-8
| 700
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
function fail {
printf '%s\n' "$1" >&2
rm -rf app
exit 1
}
mkdir /usr/app
cd /usr/app
echo "Input GitHub repository url to build image from:"
read GIT_URL
git clone $GIT_URL . || fail "Unable to clone $GIT_URL. Exiting."
test -f Dockerfile || fail "Dockerfile not found. Exiting."
echo "Insert Docker username:"
read DOCKER_USER
docker login -u $DOCKER_USER
echo "Insert Docker repo name:"
read DOCKER_REPO
echo "Insert image name (to be used as tag in this demo):"
read IMGNAME
docker build -t $DOCKER_USER/$DOCKER_REPO:$IMGNAME .
docker push $DOCKER_USER/$DOCKER_REPO:$IMGNAME && echo "Image successfully pushed to $DOCKER_USER/$DOCKER_REPO:$IMGNAME at Docker Hub!"
cd /usr
rm -rf app
| true
|
03283889e803a9e440197385f3932439e6534ace
|
Shell
|
tshemake/Software-Development
|
/stepik/762/30682/step_11/script.sh
|
UTF-8
| 202
| 2.984375
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
read -r a b # Читаем переменные, переданные на стандартный ввод
let i=a
let sum=0
while [ $i -le $b ]
do
let sum+=i
let i+=1
done
echo $sum
| true
|
9ffe8144f0c576f2415b49fcd39648b6b3228b68
|
Shell
|
yuya-takeyama/blog.yuyat.jp
|
/scripts/push_built_files.sh
|
UTF-8
| 358
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
set -o pipefail
if [ -z "$(git status --porcelain public)" ]; then
echo "There are nothing to push"
else
echo "There are changes to push"
ghcp commit \
-r "$GITHUB_REPOSITORY" \
-b "$GITHUB_BRANCH" \
-m "Update built files" \
public
echo "Pushed built files"
echo "::notice::Retry in the next job"
exit 1
fi
| true
|
04dda00e5f88ffbe02497ae59b12501a70f576fa
|
Shell
|
fbriol/otb-feedstock
|
/otb/scripts/activate.sh
|
UTF-8
| 559
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# Store existing env vars and set to this conda env
if [[ -n "${PYTHONPATH}" ]]; then
export _CONDA_SET_PYTHONPATH=${PYTHONPATH}
fi
if [[ -n "${OTB_APPLICATION_PATH}" ]]; then
export _CONDA_SET_OTB_APPLICATION_PATH=${OTB_APPLICATION_PATH}
fi
if [ -d ${CONDA_PREFIX}/lib/otb/python/ ]; then
export PYTHONPATH=${CONDA_PREFIX}/lib/otb/python/:${_CONDA_SET_PYTHONPATH}
fi
if [ -d ${CONDA_PREFIX}/lib/otb/applications/ ]; then
export OTB_APPLICATION_PATH=${CONDA_PREFIX}/lib/otb/applications/:${_CONDA_SET_OTB_APPLICATION_PATH}
fi
| true
|
6812eb0dd2f02390be9e835804fbdafd4980b030
|
Shell
|
input-output-hk/cardano-js-sdk
|
/packages/e2e/local-network/scripts/is-db-synced.sh
|
UTF-8
| 534
| 3.328125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
here="$(cd "$(dirname "$0")" >/dev/null 2>&1 && pwd)"
root="$(cd "$here/.." && pwd)"
cd "$root"
export PATH=$PWD/bin:$PATH
user=$(cat /run/secrets/postgres_user)
pass=$(cat /run/secrets/postgres_password)
db=$(cat /run/secrets/postgres_db)
queryResult=$(psql -d postgresql://"$user":"$pass"@"$POSTGRES_HOST":"$POSTGRES_PORT"/"$db" -c 'select epoch.no from epoch order by epoch.no DESC limit 1')
arr=(${queryResult//\\n/ })
epoch="${arr[2]}"
if [[ $epoch -lt 3 ]]; then
exit 9
fi
exit 0
| true
|
b41d55e7a9602ebe88a96acbf7910397a6bd96ff
|
Shell
|
babuaravind/operating-systems
|
/shell-largest-digit.sh
|
UTF-8
| 175
| 3.390625
| 3
|
[] |
no_license
|
echo "ENTER THE NUMBER"
read a
max=0
while [ $a -gt 0 ]
do
r=`expr $a % 10`
if [ $r -gt $max ]
then
max=$r
fi
a=`expr $a / 10`
done
echo "THE LARGEST DIGIT OF THE NUMBER:$max"
| true
|
95a320d4194933d43a995c3fcd919a6c8f7e7108
|
Shell
|
blomstertj/iocage-plugin-deluge-pip-openvpn
|
/overlay/usr/local/etc/rc.d/pingwait
|
UTF-8
| 438
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# PROVIDE: pingwait
# REQUIRE: openvpn ipfw
#
# MANDATORY:
#
# pingwait_enable (bool): Set to NO by default.
# Set it to YES to enable pingwait.
. /etc/rc.subr
name="pingwait"
rcvar="pingwait_enable"
start_cmd="${name}_start"
stop_cmd=":"
pingwait_start()
{
while true; do ping -c1 www.google.com > /dev/null && break; done
}
: ${pingwait_enable:="NO"}
load_rc_config $name
run_rc_command "$1"
| true
|
a8d491e0feda95608d2c6a5afd013aaac71130ee
|
Shell
|
zwqjsj0404/hadoop-dev
|
/bin/hadoop-fuse-dfs.sh
|
UTF-8
| 685
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
# cp fuse_dfs to $HADOOP_HOME/bin
# ./hadoop-fuse-dfs.sh dfs://localhost:8020 /mnt/fuse-dfs -d
HADOOP_HOME=/home/eli/hadoop-3.0.0-SNAPSHOT
HADOOP_LIBS=$HADOOP_HOME/lib/native
JVM_LIB=$JAVA_HOME/jre/lib/amd64/server
for jar in ${HADOOP_HOME}/share/hadoop/common/*.jar; do
CLASSPATH+="$jar:"
done
for jar in ${HADOOP_HOME}/share/hadoop/common/lib/*.jar; do
CLASSPATH+="$jar:"
done
for jar in ${HADOOP_HOME}/share/hadoop/hdfs/*.jar; do
CLASSPATH+="$jar:"
done
for jar in ${HADOOP_HOME}/share/hadoop/hdfs/lib/*.jar; do
CLASSPATH+="$jar:"
done
export LD_LIBRARY_PATH=$HADOOP_LIBS:$JVM_LIB:${LD_LIBRARY_PATH}
env CLASSPATH="$CLASSPATH" $HADOOP_HOME/bin/fuse_dfs $@
| true
|
06ac2089e019a0974aca359fdfb65c9b56d6961c
|
Shell
|
mulepiemmason/alone
|
/build-initramfs
|
UTF-8
| 260
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
build () {
PKG="$1"
NAME="${PKG##*/}"
OUT="$NAME.alone"
T="$OUT.tmp"
trap "rm -f -- \"$T\"" EXIT
go build -tags 'alone netgo' -o "$T" -- "$PKG"
go run main.go <"$T" >"$OUT"
}
for p in "$@"; do
build "$p"
done
| true
|
8d500a62353540d18e15db62840884380679f2b3
|
Shell
|
m0zgen/create-lemp-site
|
/add-site-to-srv.sh
|
UTF-8
| 5,108
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# This script works in the /srv/www directory
# Reference article: https://sys-adm.in/sections/os-nix/830-fedora-linux-ustanovka-nastrojka-lemp-nginx-php-fpm.html
# Created by Yegeniy Goncharov, https://sys-adm.in
#
# Envs
# ---------------------------------------------------\
PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
SCRIPT_PATH=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
# Arg vars
# ---------------------------------------------------\
VHOST=${VHOST}
FLAG=${FLAG}
# Additions
# ---------------------------------------------------\
Info() {
printf "\033[1;32m$@\033[0m\n"
}
Error()
{
printf "\033[1;31m$@\033[0m\n"
}
Warn() {
printf "\033[1;35m$@\033[0m"
}
usage()
{
cat << EOF
Usage: $0 options
OPTIONS:
-c Create VirtualHost
-r Remove VirtualHost
-l List VirtualHost
Example:
$0 -c vhost
$0 -r vhost
$0 -l show
EOF
}
# Checking arguments
# ---------------------------------------------------\
while [[ $# > 1 ]]
do
key="$1"
shift
case $key in
-c|--create)
VHOST="$1"
FLAG="1"
shift
;;
-r|--remove)
VHOST="$1"
FLAG="2"
shift
;;
-l|--list)
FLAG="3"
shift
;;
esac
done
# Checking folders
# ---------------------------------------------------\
if [[ ! -d /srv/www ]]; then
mkdir -p /srv/www
fi
if [[ ! -d /etc/nginx/sites-available ]]; then
mkdir -p /etc/nginx/sites-available
fi
if [[ ! -d /etc/nginx/sites-enabled ]]; then
mkdir -p /etc/nginx/sites-enabled
fi
# Show Help (usage)
# ---------------------------------------------------\
if [[ "$FLAG" == "" ]]; then
usage
fi
# General vars
# ---------------------------------------------------\
domain="local"
DOMAIN_NAME=$VHOST.$domain
public_html="public_html"
webroot="/srv/www"
CHOWNERS="nginx:webadmins"
DIRECTORY=$webroot/$DOMAIN_NAME/$public_html
INDEX_HTML="$DIRECTORY/index.php"
PATH_TO_CONF="/etc/nginx/sites-available"
CONF_FILE="$PATH_TO_CONF/$DOMAIN_NAME.conf"
CONF_FILE_NAME="$DOMAIN_NAME.conf"
LOCAL_IP=$(hostname -I | cut -d' ' -f1)
# Functions
# ---------------------------------------------------\
genIndex(){
cat <<EOF >$INDEX_HTML
<html>
<head>
<title>${DOMAIN_NAME}</title>
</head>
<body>
<h1>${DOMAIN_NAME} working!</h1>
</body>
</html>
EOF
}
genConf(){
cat <<EOF >$CONF_FILE
server {
server_name ${DOMAIN_NAME};
access_log /srv/www/${DOMAIN_NAME}/logs/access.log;
error_log /srv/www/${DOMAIN_NAME}/logs/error.log;
root /srv/www/${DOMAIN_NAME}/public_html;
location / {
index index.html index.htm index.php;
}
location ~ \.php$ {
include /etc/nginx/fastcgi_params;
fastcgi_pass unix:/var/run/php-fpm/php-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
}
}
EOF
}
# If argument equal -c (create new site)
# ---------------------------------------------------\
if [[ "$FLAG" == "1" ]]; then
if [ -d "$DIRECTORY" ]; then
# if exist
echo -e "Directory exist"
echo -e "Exit!"
else
# not exist
Info "\nCreate: $DIRECTORY"
/bin/mkdir -p $DIRECTORY
mkdir $webroot/$DOMAIN_NAME/logs
echo "Create $INDEX_HTML"
/bin/touch $INDEX_HTML
genIndex
echo -e "File $INDEX_HTML created"
echo "Change vhost folder permission..."
/bin/chown -R $CHOWNERS $webroot/$DOMAIN_NAME
/bin/chmod -R 775 $webroot/$DOMAIN_NAME
echo "Create conf file $CONF_FILE"
/bin/touch $CONF_FILE
genConf
cd /etc/nginx/sites-enabled/
ln -s /etc/nginx/sites-available/$DOMAIN_NAME.conf
echo -e "Update /etc/hosts file\nAdd $LOCAL_IP $DOMAIN_NAME"
echo "$LOCAL_IP $DOMAIN_NAME" >> /etc/hosts
echo "Restart NGINX..."
systemctl restart nginx.service
Info "Done!"
Warn "\nPlease add include conf folder into nginx.conf parameter:\ninclude /etc/nginx/sites-enabled/*.conf;\n\n"
fi
fi
# If argument equal -r (remove new site)
# ---------------------------------------------------\
if [[ "$FLAG" == "2" ]]
then
if [ -d "$DIRECTORY" ]; then
# if exist
Warn "\nRemoving $VHOST"
echo "Remove directory $webroot/$DOMAIN_NAME"
/bin/rm -rf $webroot/$DOMAIN_NAME
echo "Remove conf file $CONF_FILE"
/bin/rm -f $CONF_FILE
echo "Remove link /etc/nginx/sites-enabled/$CONF_FILE_NAME"
/bin/rm -f /etc/nginx/sites-enabled/$DOMAIN_NAME.conf
echo "Comment /etc/hosts param..."
/bin/sed -i "s/$LOCAL_IP $DOMAIN_NAME/#$LOCAL_IP $DOMAIN_NAME/" /etc/hosts
echo "Restart NGINX..."
systemctl restart nginx.service
Info "Done!\n"
else
Error "\nDirectory not exist!\nPlease use remove command without extention\nExit!\n"
exit 1
fi
fi
# If argument equal -l (remove new site)
# ---------------------------------------------------\
if [[ "$FLAG" == "3" ]]
then
Info "\nSites created"
ls /etc/nginx/sites-available/
Info "\nSites enabled"
ls /etc/nginx/sites-enabled
Info "\n/srv/www folder list"
ls /srv/www
Info "\nTotal:"
ls /etc/nginx/sites-available/ | wc -l
echo -e ""
fi
| true
|
96bdc2bdfad1f718af53ebc96cefbdfd215b6b3b
|
Shell
|
diegovalle/new.crimenmexico
|
/download.sh
|
UTF-8
| 2,942
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC2001
set -euo pipefail #exit on error, undefined and prevent pipeline errors
IFS=$'\n\t'
SNSP_DIR=clean/snsp-data
ESTADOS_FC_ZIP=$SNSP_DIR/estados_fc_original.zip
MUN_FC_ZIP=$SNSP_DIR/municpios_fc_original.zip
ESTADOS_VIC_ZIP=$SNSP_DIR/estados_vic_original.zip
estatal_download() {
regex="(?<=href=\")[^\"][^\"]*(?=\">$4.*<\/a>)"
INTERMEDIATE_LINK=$(curl -s -L "$1" | \
grep -Po "$regex" | tail -n1 | \
sed 's| |%20|g' | grep -oP '(?<=/)[0-9a-zA-Z_-]{20,}(?=/)')
echo "$INTERMEDIATE_LINK"
gdown "https://drive.google.com/uc?id=$INTERMEDIATE_LINK" -O "$SNSP_DIR"/"$3"
#drive_direct_download "$INTERMEDIATE_LINK"
}
municipal_fc_download() {
#ggID='1FoFXpt4OeEXP8qeDzPKU-qky1f5iL8Gh'
ggID=$(curl -s "$1" | \
grep -Po "(?<=href=\")[^\"][^\"]*(?=\">Cifras de Incidencia Delictiva Municipal, 2015.*)" | \
sed 's| |%20|g' | grep -oP '(?<=/)[0-9a-zA-Z_-]{20,}(?=/)')
echo "ggId: $ggID"
#ggURL='https://drive.google.com/uc?export=download'
#curl -sc "$COOKIE_TMP" "${ggURL}&id=${ggID}" >/dev/null
#getcode="$(awk '/_warning_/ {print $NF}' $COOKIE_TMP)"
#echo "getcode: $getcode"
#curl -Lb "$COOKIE_TMP" "${ggURL}&confirm=${getcode}&id=${ggID}" -o "$MUN_FC_ZIP"
gdown "https://drive.google.com/uc?id=$ggID" -O "$SNSP_DIR"/municipios.csv
}
convert_to_csv() {
XLS=$(unzip -l -qq "$1" |
sort -nr |
sed 's|^[ ]*||g' |
awk -F" " 'NR==1{print $2}')
if [[ "$XLS" =~ \.xls ]]; then
unzip -p "$1" "$XLS" > "$SNSP_DIR/$2.xlsb"
(cd $SNSP_DIR && libreoffice --headless --convert-to csv \
-env:UserInstallation=file:///tmp/foobar7665765 \
"$2".xlsb)
elif [[ "$XLS" =~ \.csv$ ]]; then
unzip -p "$1" "$XLS" > "$SNSP_DIR"/"$2".csv
else
exit 1
fi
}
if ! [ -x "$(command -v gdown)" ]; then
virtualenv ~/.virtualenvs/gdown
# shellcheck source=/dev/null
source /home/rstudio/.virtualenvs/gdown/bin/activate
pip install gdown
fi
URL_ESTADOS="https://www.gob.mx/sesnsp/acciones-y-programas/incidencia-delictiva-del-fuero-comun-nueva-metodologia?state=published"
URL_MUNS="https://www.gob.mx/sesnsp/acciones-y-programas/datos-abiertos-de-incidencia-delictiva?state=published"
municipal_fc_download "$URL_MUNS"
estatal_download "$URL_MUNS" "Estatal" "estados.csv" "Cifras de Incidencia Delictiva Estatal, 2015"
URL_VIC="https://www.gob.mx/sesnsp/acciones-y-programas/victimas-nueva-metodologia?state=published"
estatal_download "$URL_MUNS" "Víctimas" "estados_victimas.csv" "Cifras de Víctimas del Fuero Común, 2015"
#convert_to_csv "$ESTADOS_FC_ZIP" estados
#convert_to_csv "$MUN_FC_ZIP" municipios
#convert_to_csv "$ESTADOS_VIC_ZIP" estados_victimas
deactivate || true
| true
|
85bdad8ba62a97c79b9c97e16f5a970cca1aa029
|
Shell
|
AAAAAEXQOSyIpN2JZ0ehUQ/SSHPLUS-MANAGER-FREE
|
/Install/Generador/Install/Base_KeyGen/keycreater
|
UTF-8
| 2,738
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$1" = "start" ]; then
while true
do
diretorio="/var/www/html/adm"
inst="/etc/admserver/inst"
arquivos="/etc/admserver/adm/*"
a=$((($RANDOM %10) * 121 ))
b=$((($RANDOM %10) * 121 ))
echo "$a" > /tmp/a
echo "$b" > /tmp/b
pasta1=$(cat /tmp/a)
pasta2=$(cat /tmp/b)
echo -e "\033[1;32m--------------------------RODANDO------------------------------"
rm -rf $diretorio > /dev/null 2>&1
mkdir $diretorio > /dev/null 2>&1
touch $diretorio/index.html > /dev/null 2>&1
mkdir $diretorio/adm > /dev/null 2>&1
touch $diretorio/adm/index.html > /dev/null 2>&1
mkdir $diretorio/adm/$pasta1 > /dev/null 2>&1
mkdir $diretorio/adm/$pasta1/$pasta2 > /dev/null 2>&1
cp $inst $diretorio/inst > /dev/null 2>&1
cp $arquivos $diretorio/adm/$pasta1/$pasta2/ > /dev/null 2>&1
if [ -e /etc/admserver/lista/lista2 ]; then
rm -rf /etc/admserver/lista/lista2
touch /etc/admserver/lista/lista2
else
touch /etc/admserver/lista/lista2
fi
if [ -e /etc/admserver/lista/lista1 ]; then
rm -rf /etc/admserver/lista/lista1
touch /etc/admserver/lista/lista1
else
touch /etc/admserver/lista/lista1
fi
for free in `cat /etc/admserver/lista/listafree`
do
echo "$pasta1/$pasta2/$free" >> /etc/admserver/lista/lista1
done
for premy in `cat /etc/admserver/lista/listapremium`
do
echo "$pasta1/$pasta2/$premy" >> /etc/admserver/lista/lista2
done
if [ -e /etc/admserver/key/keyfree ]; then
for free in `cat /etc/admserver/key/keyfree`
do
ke1=$(echo $free | awk -F ":" '{print $1}')
ke2=$(echo $free | awk -F ":" '{print $2}')
if [[ ! -d "$diretorio/$ke1" ]]; then
mkdir $diretorio/$ke1
fi
if [[ ! -d "$diretorio/$ke1/$ke2" ]]; then
mkdir $diretorio/$ke1/$ke2
fi
cp /etc/admserver/lista/lista1 $diretorio/$ke1/$ke2/lista
echo "$diretorio/$ke1/$ke2/FREE"
done
fi
if [ -e /etc/admserver/key/keypremium ]; then
for premium in `cat /etc/admserver/key/keypremium`
do
ke1=$(echo $premium | awk -F ":" '{print $1}')
ke2=$(echo $premium | awk -F ":" '{print $2}')
if [[ ! -d "$diretorio/$ke1" ]]; then
mkdir $diretorio/$ke1
fi
if [[ ! -d "$diretorio/$ke1/$ke2" ]]; then
mkdir $diretorio/$ke1/$ke2
fi
cp /etc/admserver/lista/lista2 $diretorio/$ke1/$ke2/lista
echo "$diretorio/$ke1/$ke2/PREMMY"
done
fi
rm -rf /tmp/a
rm -rf /tmp/b
echo -e "\033[1;31m INSTALAÇÃO ALOCADA EM... \033[1;36m
$diretorio/adm/$pasta1/$pasta2
\033[1;32m"
chmod -R 755 $diretorio
echo "EM ESTADO DE ESPERA, AGUARDANDO TIMER"
echo -e "--------------------------RODANDO------------------------------\033[1;31m"
for prog in $(seq 30); do
echo -n "|"
sleep 15s
echo -n "|"
sleep 15s
done
echo -ne "[100%]"
echo ""
done
fi
if [ "$1" = "stop" ]; then
for pids in `ps x | grep keycreater | grep -v grep | grep bin | awk '{print $1}'`
do
kill -9 $pids > /dev/null 2>&1
done
killall keycreater
fi
| true
|
7e6f2543e6f38d48570856471264c4b987bc9982
|
Shell
|
alexshelto/shell_scripting_reference
|
/conditionals.sh
|
UTF-8
| 498
| 3.828125
| 4
|
[] |
no_license
|
# Alexander Shelton
# March 28 2019
# Bash Scripting Reference
# Program: Covers syntax for an if statement using if, elif, else
#! /bin/bash
#simple if statement: if[] then
NAME="Alex"
echo "hello im $NAME"
read -p "what is your name: " USERNAME
#if statement:
if [ "$USERNAME" == "Alex" ]
then
echo "WOW! We have the same name"
elif [ "$USERNAME" == "Bob" ] #ELSE IF = elif
then
echo "your name is Bob, cool name"
else
echo "Hello $USERNAME, it is nice to meet you! Your name wasnt Bob or Alex"
fi
| true
|
531bb8ea48ec37a0f8da406e94c4b3a009ed05d7
|
Shell
|
mlworle/Akamai-Academy
|
/project/phase2/scripts/munge_data.sh
|
UTF-8
| 719
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# Phase 2 of Capstone Project
# Munge last command
# Munge who command
# Script to be used in cron to generate data for graphs
# This really uses up too much mem and is too clunky!
# Use awk
# Mworle 5/9/16
last > l1.dat
cat l1.dat | tr -s " " > l2.dat
#sed '/23.79.236.14/!d' l2.dat > l3.dat
cp l2.dat l3.dat
sed '/still logged/d' l3.dat > l4.dat
sed '/(/d' l4.dat > l5.dat
sed '/)/d' l5.dat > l6.dat
cat l6.dat | tr ":" "." > data.dat # really bad, must *5/3
#rm l*.dat
# Munge who command
who > who.txt
cat who.txt | tr -s " " > wh1.dat
sed '/root/d' wh1.dat > wh2.dat
cat wh2.dat | tr "-" " " > wh3.dat
cat wh3.dat | cut -d" " -f1,2,4,5,6,7 > wh4.dat
cat wh4.dat | tr ":" "." > who.dat
rm wh*.dat
| true
|
055b163dad8f6d5575ed23d481658e8ccf748dc3
|
Shell
|
jrusinko/PickMe
|
/PickMe-shortcut.sh
|
UTF-8
| 526
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
PATH="/home/julien/Bureau/JOE/PickMe-main"
SCRIPT=${1-PickMe.sh}
INPUTFILE=${2?Error: no inputfile provided}
OPTION=${3-A}
PATH2=$(pwd)
cd "$PATH"
echo 'start'
export PATH=/usr/bin/:$PATH
if [ $SCRIPT == "PickMe.sh" ]
then
./$SCRIPT $PATH2/$INPUTFILE
fi
if [ $SCRIPT == "PickMe_FR.sh" ]
then
./$SCRIPT $PATH2/$INPUTFILE
fi
if [ $SCRIPT == "PickMeQ.sh" ]
then
./$SCRIPT $PATH2/$INPUTFILE $OPTION
fi
if [ $SCRIPT == "PickMeQ_FR.sh" ]
then
./$SCRIPT $PATH2/$INPUTFILE $OPTION
fi
cd "$PATH2"
| true
|
c5bfafad3f5438c15d9c08f86f6feeb37d621b9b
|
Shell
|
lsipii/dotfiles
|
/scripts/install-wtfutil.sh
|
UTF-8
| 398
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
VERSION=0.41.0
echo "Installing wtfutils ${VERSION}"
WANTSPATH="$HOME/.local/bin"
mkdir -p ${WANTSPATH}
mkdir -p ~/src/wtfutil && \
cd ~/src/wtfutil
wget https://github.com/wtfutil/wtf/releases/download/v${VERSION}/wtf_${VERSION}_linux_amd64.tar.gz
tar -xvzf wtf_${VERSION}_linux_amd64.tar.gz
mv wtf_${VERSION}_linux_amd64/wtfutil $WANTSPATH/
cd
rm -rf ~/src/wtfutil
| true
|
1e708545421d7c0e43871a37c36228f1891ea432
|
Shell
|
FauxFaux/debian-control
|
/d/dropbear/dropbear_2018.76-4_all/postrm
|
UTF-8
| 1,380
| 3.046875
| 3
|
[] |
no_license
|
#! /bin/sh
set -e
# XXX backward compatibility; remove once Stretch is the current stable
if [ "$1" = 'abort-upgrade' -o "$1" = 'abort-install' ] && [ $# -ge 2 ] && \
dpkg --compare-versions "$2" le '2015.68-1~'; then
for x in dss rsa ecdsa; do
if [ -f "/etc/dropbear-initramfs/dropbear_${x}_host_key" ] &&
[ ! -f "/etc/initramfs-tools/etc/dropbear/dropbear_${x}_host_key" ]; then
[ -d /etc/initramfs-tools/etc/dropbear ] || mkdir -p /etc/initramfs-tools/etc/dropbear
mv -n "/etc/dropbear-initramfs/dropbear_${x}_host_key" /etc/initramfs-tools/etc/dropbear
fi
done
for x in authorized_keys id_dsa.pub id_rsa.pub id_ecdsa.pub; do
if [ -f "/etc/dropbear-initramfs/$x" ] &&
[ ! -f "/etc/initramfs-tools/root/.ssh/$x" ]; then
[ -d /etc/initramfs-tools/etc/dropbear ] || mkdir -pm0700 /etc/initramfs-tools/root/.ssh
mv -n "/etc/dropbear-initramfs/$x" /etc/initramfs-tools/root/.ssh
fi
done
rmdir --ignore-fail-on-non-empty /etc/dropbear-initramfs
fi
# Automatically added by dh_installdeb/11.3.5
dpkg-maintscript-helper mv_conffile /usr/share/initramfs-tools/conf-hooks.d/dropbear /etc/initramfs-tools/conf-hooks.d/dropbear 2015.68-1~ dropbear -- "$@"
# End automatically added section
exit 0
| true
|
acd2c1c2a6a1d5e199838ee4072ad33fe0dd7651
|
Shell
|
rcqls/dynStudio
|
/deploy/platypus/DyndocStudio/DyndocToolsInstall/linux/install_dyndoc_tools.sh
|
UTF-8
| 2,957
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# NB: requirement ruby-build, openssl ssl-dev gmp10 gmp10-dev
# This script is used for Linux
DYNDOCTOOLS=~/dyndoc/tools
DYNDOCTOOLSINSTALL=.
# Ruby Gems
R4RBVERSION=1.0.0
DYNDOCVERSION=1.6.0
R4RBGEM=ruby/R4rb-${R4RBVERSION}.gem
DYNDOCRUBYGEM=ruby/dyndoc-ruby-${DYNDOCVERSION}.gem
DYNDOCCLIENTGEM=ruby/dyndoc-client-${DYNDOCVERSION}.gem
DYNDOCSERVERGEM=ruby/dyndoc-server-${DYNDOCVERSION}.gem
# R Packages
rb4RVERSION=0.1.0
dyndocVERSION=0.1.0
RB4RPACKAGE=R/rb4R_$rb4RVERSION.tar.gz
DYNDOCPACKAGE=R/dyndoc_$dyndocVERSION.tar.gz
echo DYNDOCTOOLS=$DYNDOCTOOLS
RUBYBUILDVERSION=2.0.0-p353;RUBYVERSION=2.0.0
#RUBYBUILDVERSION=2.1.0;RUBYVERSION=2.1.0
RUBYWHERE=$DYNDOCTOOLS/Ruby/ruby-$RUBYVERSION
mkdir -p $DYNDOCTOOLS/chruby
cp $DYNDOCTOOLSINSTALL/chruby/* $DYNDOCTOOLS/chruby/
mkdir -p $DYNDOCTOOLS/Ruby
echo "Do you want to install ruby source? [yes|NO]";read OK
if [[ $OK == "yes" ]]; then
echo "check ruby-build install"
checkRubyBuild=$(ruby-build --version)
if [[ "$checkRubyBuild" != "" ]]; then
echo "Installing ruby $RUBYVERSION"
CONFIGURE_OPTS=--enable-shared ruby-build $RUBYBUILDVERSION $RUBYWHERE
else
echo "Install ruby-build first. Try this:"
echo "git clone https://github.com/sstephenson/ruby-build.git"
echo "cd ruby-build"
echo "./install.sh"
exit
fi
fi
WHERE=$DYNDOCTOOLS/Ruby/ruby-$RUBYVERSION/lib/ruby/gems/$RUBYVERSION
. $DYNDOCTOOLS/chruby/chruby.sh
chruby_use $RUBYWHERE
#### install gem
echo "Do you want to install ruby gems? [yes|NO]";read OK
if [[ $OK == "yes" ]]; then
echo "Installing ruby gems ..."
gem install $R4RBGEM $DYNDOCRUBYGEM $DYNDOCCLIENTGEM $DYNDOCSERVERGEM -i ${WHERE} --no-ri --no-rdoc
fi
echo "Do you want to install R packages? [yes|NO]";read OK
if [[ $OK == "yes" ]]; then
echo "Installing R packages ..."
mkdir -p $DYNDOCTOOLS/R/library/ruby/$RUBYVERSION
R CMD INSTALL --preclean ${RB4RPACKAGE} -l $DYNDOCTOOLS/R/library/ruby/$RUBYVERSION
R CMD INSTALL --preclean ${DYNDOCPACKAGE} -l $DYNDOCTOOLS/R/library/ruby/$RUBYVERSION
fi
echo "Do you want to install other dyndoc stuff? [yes|NO]";read OK
if [[ $OK == "yes" ]]; then
echo "Installing dyndoc stuff ..."
mkdir -p $DYNDOCTOOLS/../{etc,demo,doc,bin}
mkdir -p $DYNDOCTOOLS/../server/{rooms,run}
cp -r $DYNDOCTOOLSINSTALL/etc/* $DYNDOCTOOLS/../etc/
cp -r $DYNDOCTOOLSINSTALL/doc/* $DYNDOCTOOLS/../doc/
cp -r $DYNDOCTOOLSINSTALL/demo/* $DYNDOCTOOLS/../demo/
cp -r $DYNDOCTOOLSINSTALL/bin/* $DYNDOCTOOLS/../bin/
fi
echo "Do you want to install dyndoc studio stuff? [yes|NO]";read OK
if [[ $OK == "yes" ]]; then
echo "Installing dyndoc studio ..."
linux=linux32
archlinux=$(ruby -rrbconfig -e "puts RbConfig::CONFIG['build_cpu']")
echo "archlinux=$archlinux"
if [[ "$archlinux" == "x86_64" ]]; then
linux=linux64
fi
mkdir -p $DYNDOCTOOLS/../studio/app
cp -r $DYNDOCTOOLSINSTALL/studio/* $DYNDOCTOOLS/../studio/
cp -r $DYNDOCTOOLSINSTALL/studio/$linux/* $DYNDOCTOOLS/../studio/app
fi
| true
|
cc46a56e94165c60e9edcc6870086e55f335fee4
|
Shell
|
ftrujillojr/git_aliases
|
/svndiff.sh
|
UTF-8
| 4,928
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# INSTALLATION:
#
# 1) $ sudo yum install kdiff3
#
# 2) $ sudo yum list installed | grep kdiff3
# kdiff3.x86_64 0.9.95-3.el6 @epel
#
# 3) Place this file in $HOME/bin as svndiff.sh
#
# 4) Add to $HOME/.subversion/config lineS => diff3-cmd=/home/ftrujillo/bin/svndiff.sh
# diff -cmd=/home/ftrujillo/bin/svndiff.sh
# **** make sure to chmod 755
# Then, svn should use this script, which will use kdiff3 to do three way merging.
# Return an errorcode of 0 on successful merge, 1 if unresolved conflicts
# remain in the result. Any other errorcode will be treated as fatal.
#NOTE: all output must be redirected to stderr with "1>&2" as all stdout output is written to the output file
VDIFF3="kdiff3"
DIFF3="diff3"
DIFF="kdiff3"
echo "Running from ~/bin/svndiff.sh" 1>&2
promptUser ()
{
read answer
case "${answer}" in
"M" )
echo "" 1>&2
echo "Attempting to merge ${baseFileName} with ${DIFF}" 1>&2
$VDIFF3 $older $mine $theirs --L1 $labelOlder --L2 $labelMine --L3 $labelTheirs -o $output 1>&2
bLoop=1
if [ -f $output ]; then
if [ -s $output ]; then
#output succesfully written
bLoop=0
fi
fi
if [ $bLoop = 0 ]; then
cat $output
rm -f $output
exit 0
else
echo "Merge failed, try again" 1>&2
fi
;;
"m" )
echo "" 1>&2
echo "Attempting to auto-merge ${baseFileName}" 1>&2
diff3 -L $labelMine -L $labelOlder -L $labelTheirs -Em $mine $older $theirs > $output
if [ $? = 1 ]; then
#Can't auto merge
rm -f $output
$VDIFF3 $older $mine $theirs --L1 $labelOlder --L2 $labelMine --L3 $labelTheirs -o $output --auto 1>&2
bLoop=1
if [ -f $output ]; then
if [ -s $output ]; then
#output succesfully written
bLoop=0
fi
fi
if [ $bLoop = 0 ]; then
cat $output
rm -f $output
exit 0
else
echo "Merge failed, try again" 1>&2
fi
else
#We can automerge, and we already did it
cat $output
rm -f $output
exit 0
fi
;;
"diff3" | "Diff3" | "DIFF3" )
echo "" 1>&2
echo "Diffing..." 1>&2
$VDIFF3 $older $mine $theirs --L1 $labelOlder --L2 $labelMine --L3 $labelTheirs 1>&2
;;
"diff" | "Diff" | "DIFF" )
echo "" 1>&2
echo "Diffing..." 1>&2
$DIFF $mine $theirs -L $labelMine -L $labelTheirs 1>&2
;;
"A" | "a" )
echo "" 1>&2
echo "Accepting remote version of file..." 1>&2
cat ${theirs}
exit 0
;;
"I" | "i" )
echo "" 1>&2
echo "Keeping local modifications..." 1>&2
cat ${mine}
exit 0
;;
"R" | "r" )
echo "" 1>&2
echo "Reverting to base..." 1>&2
cat ${older}
exit 0
;;
"D" | "d" )
echo "" 1>&2
echo "Runnig diff3..." 1>&2
diff3 -L $labelMine -L $labelOlder -L $labelTheirs -Em $mine $older $theirs
#Exit with return vaule of the diff3 (to write out files if necessary)
exit $?
;;
"S" | "s" )
echo "" 1>&2
echo "Saving for later..." 1>&2
cat ${mine}
#Exit with return vaule of 1 to force writting of files
exit 1
;;
"Fail" | "fail" | "FAIL" )
echo "" 1>&2
echo "Failing..." 1>&2
exit 2
;;
"H" | "h" )
echo "" 1>&2
echo "USAGE OPTIONS:" 1>&2
echo " [A]ccept Accept $labelTheirs and throw out local modifications" 1>&2
echo " [D]efault Use diff3 to merge files (same behavior as vanilla SVN)" 1>&2
echo " [Fail] Kills the command (not suggested)" 1>&2
echo " [H]elp Print this message" 1>&2
echo " [I]gnore Keep your locally modified version as is" 1>&2
echo " [M]erge Manually merge using ${VDIFF3}" 1>&2
echo " [m]erge Same as "M" but attempts to automerge if possible" 1>&2
echo " [R]evert Revert to base version (${labelOlder})" 1>&2
echo " [S]ave Same as 'I' but writes out rold, rnew, and rmine files to deal with later" 1>&2
echo " [diff] Type 'diff' to diff versions $labelMine and $labelTheirsthe before making a descision" 1>&2
echo " [diff3] Type 'diff3' to diff all three versions before making a descision" 1>&2
echo "" 1>&2
;;
* )
echo "'${answer}' is not an option, try again." 1>&2
;;
esac
}
if [ -z $2 ]
then
echo ERROR: This script expects to be called by subversion
exit 1
fi
if [ $2 = "-m" ]
then
#Setup vars
labelMine=${4}
labelOlder=${6}
labelTheirs=${8}
mine=${9}
older=${10}
theirs=${11}
output=${9}.svnDiff3TempOutput
baseFileName=`echo $mine | sed -e "s/.tmp$//"`
#HERE#
diff3 -L $labelMine -L $labelOlder -L $labelTheirs -Em $mine $older $theirs > $output
if [ $? = 1 ]; then
#Can't auto merge
#Prompt user for direction
while [ 1 ]
do
echo "" 1>&2
echo "${baseFileName} requires merging." 1>&2
echo "" 1>&2
echo "What would you like to do?" 1>&2
echo "[M]erge [A]ccept [I]gnore [R]evert [D]efault [H]elp" 1>&2
promptUser
done
else
#We can automerge, and we already did it
cat $output
rm -f $output
exit 0
fi
else
L="-L" #Argument option for left label
R="-L" #Argument option for right label
label1=$3 #Left label
label2=$5 #Right label
file1=$6 #Left file
file2=$7 #Right file
echo "Running .... $DIFF $file1 $file2 $L $label1 $R $label2" 1>&2
$DIFF $file1 $file2 $L "$label1" $R "$label2" &
#$DIFF $file1 $file2 &
#wait for the command to finish
wait
fi
exit 0
| true
|
0f584738129cb05d7ddd421f3bd6bb258b93f379
|
Shell
|
7956968/sys-bin
|
/popt/build_hi3531.sh
|
UTF-8
| 283
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
. ../one_time_build_env_include
VERSION=1.16
pushd popt-${VERSION} > /dev/null
./configure --host=arm-hisiv200-linux --prefix=/usr
make clean
make CC=arm-hisiv200-linux-gcc
make install DESTDIR=${OUTDIR}/popt
popd > /dev/null
echo "popt-${VERSION}" >> ../sysbuild.log
| true
|
dd5815eca9bda527f3660a2d3f9e7c8b6cb1795f
|
Shell
|
Gilbert-Gb-Li/sage-bigdata-azkaban
|
/azkaban-script/live_p2_new/data_import_tbl_ex_live_id_daily_snapshot.sh
|
UTF-8
| 1,356
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/sh
source /etc/profile
source ${AZKABAN_HOME}/conf/env.conf
source ${base_path}/util.sh
source ${base_path}/live_p2_util.sh
date=$1
yesterday=`date -d "-1 day $date" +%Y-%m-%d`
echo "############ 主播id 全量快照表 start #########"
hive_sql="insert into live_p2.tbl_ex_live_id_daily_snapshot_new partition(dt='${date}')
select record_time,app_package_name,data_source,client_time,search_id,user_id,
user_name,live_desc,room_id,current_page,user_image,user_level
from(
select *,row_number() over (partition by data_source,app_package_name,search_id,user_id order by client_time desc) as order_num
from (
select record_time,app_package_name,'${ias_source}' as data_source,client_time,search_id,
user_id,user_name,live_desc,room_id,current_page,user_image,user_level
from ias_p2.tbl_ex_live_id_list_data_origin_orc
where dt='${date}' and search_id is not null and user_id is not null
union all
select record_time,app_package_name,data_source,client_time,search_id,user_id,user_name,
live_desc,room_id,current_page,user_image,user_level
from live_p2.tbl_ex_live_id_daily_snapshot_new
where dt='${yesterday}'
)as p
)as t
where t.order_num =1"
executeHiveCommand "${hive_sql}"
echo "############ 主播id 全量快照表 end #########"
| true
|
ce8d6f51aefe342bf963e907bcd075ad5181d28a
|
Shell
|
cboin/dotfiles
|
/post-installation.sh
|
UTF-8
| 1,793
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# Install packages
echo "(i) install packages"
sudo -E pacman -S - < pkglist.txt
# Set the keyboard layout settings
echo "(i) set the keyboard layout"
localectl set-x11-keymap fr
# Pull dotfiles
dotfiles_dir=$HOME/Documents/dotfiles
mkdir -p $dotfiles_dir
git clone https://github.com/cboin/dotfiles $dotfiles_dir
# Set bash config
echo "(i) set bash config"
ln -sf $dotfiles_dir/config_files/bash/bashrc $HOME/.bashrc
# Set i3wm config
echo "(i) set i3wm config"
ln -sf $dotfiles_dir/config_files/i3wm/config $HOME/.config/i3/config
ln -sf $dotfiles_dir/config_files/i3wm/i3status.conf $HOME/.config/i3/i3status.conf
sudo -E wget http://static.simpledesktops.com/uploads/desktops/2015/02/20/zentree_1.png -O /usr/share/pixmaps/wallpaper.png
sudo -E wget http://raspberrypi.windowswiki.info/img/archlinuxlogo.png -O /usr/share/pixmaps/arch-logo.png
ln -sf $dotfiles_dir/config_files/i3wm/lock.sh $HOME/.config/i3/lock.sh
sudo ln -sf $HOME/.config/i3/lock.sh /usr/bin/lock
chmod +x /usr/bin/lock
# Set X11 config
echo "(i) set X11 config"
ln -sf $dotfiles_dir/config_files/X11/Xresources $HOME/.Xresources
ln -sf $dotfiles_dir/config_files/X11/xinitrc $HOME/.xinitrc
# Set lightdm config
echo "(i) set lightdm config"
sudo ln -sf $dotfiles_dir/lightdm/* /etc/lightdm/
# Set git config
echo "(i) set git config"
ln -sf $dotfiles_dir/config_files/git/gitconfig $HOME/.gitconfig
# Set vim config
echo "(i) set vim config"
ln -sf $dotfiles_dir/config_files/vim/vimrc $HOME/.vimrc
ln -sf $dotfiles_dir/vim $HOME/.vim
# Set zsh config
echo "(i) set zsh config"
ln -sf $dotfiles_dir/zsh/oh-my-zsh $HOME/.oh-my-zsh
ln -sf $dotfiles_dir/zsh/zshrc $HOME/.zshrc
# Set temite config
echo "(i) set termite config"
ln -sf $dotfiles_dir/termite/config $HOME/.config/termite/config
| true
|
e62c34a6ef7abf311ed666256bf6e036b48efae4
|
Shell
|
unixsuperhero/better-dotfiles
|
/src/home/bin/oldlist
|
UTF-8
| 996
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
case $1 in
'') clear
count=0
for a in ~/lists/*
do
if [ -f $a ]
then
printf '%-30s' $(basename "$a")
count=$count+1
if [ $count == 4 ]
then
count=0
printf '\n'
fi
fi
done
for a in ~/lists/*
do
if [ -d $a ]
then
abase=$(basename "$a")
echo
echo $abase
count=0
for b in $a/*
do
count=$[ $count + 1 ]
if [ $count -gt 7 ]
then
printf '\n'
count=1
fi
printf '%2s%-18s' " " "$(basename $b)"
done
echo
fi
done
echo
cd ~/lists
echo "Enter command: "
read -e input
if [[ ${input%% *} == "edit" ]]
then
vim ~/lists/${input#edit }
else
clear
cat ~/lists/$input | grep -v "^[ ]*[#]" ; echo
fi
;;
-e) vim ~/lists/$2;; # use cut to decide if a directory needs to be made or not
-g) grep -Ri "$2" ~/lists/*;;
*) clear; cat ~/lists/$1 | grep -v "^[ ]*[#]" ; echo;;
esac
#cd ~/lists
| true
|
0c32640ffe4bb44e0eb33e1dabf47372a747b3bf
|
Shell
|
gbsf/archlinux-packages
|
/privoxy/repos/extra-i686/PKGBUILD
|
UTF-8
| 1,982
| 2.765625
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: Juergen Hoetzel <juergen@hoetzel.info>
# Contributor: basilburn (basilburn), Paul Bredbury (brebs)
pkgname=privoxy
pkgver=3.0.8
pkgrel=3
pkgdesc="a web proxy with advanced filtering capabilities"
arch=(i686 x86_64)
url="http://www.privoxy.org"
license=('GPL')
depends=('pcre')
makedepends=('autoconf')
backup=(etc/privoxy/{config,trust,default.action,user.filter,default.filter} \
etc/privoxy/{standard,user}.action etc/logrotate.d/privoxy)
source=(http://downloads.sourceforge.net/sourceforge/ijbswa/privoxy-$pkgver-stable-src.tar.gz privoxy \
privoxy.logrotated \
privoxy.confd)
md5sums=('252c0e085218fb930c4bc7563e9cedd9' '3c0f0c3ce941d74f40ddf0f858f7ba8d'\
'a364c34c8dc6eb78c9a6b777237932de' 'c27cf5aaf0cf89825fd0f7a5a4ebd7c0')
install=privoxy.install
build() {
GROUP_ID=42
cd $startdir/src/privoxy-$pkgver-stable
autoheader && autoconf
./configure --prefix=/usr --sysconfdir=/etc/privoxy --enable-zlib
make || return 1
make prefix=$startdir/pkg/usr MAN_DEST=$startdir/pkg/usr/share/man/man1 \
GROUP=$GROUP_ID CONF_BASE=$startdir/pkg/etc/privoxy VAR_DEST=$startdir/pkg/var \
install
rm $startdir/pkg/var/log/privoxy/* # don't overwrite existing logfiles!
install -D -m755 ../privoxy $startdir/pkg/etc/rc.d/privoxy
install -D -m644 ../privoxy.logrotated $startdir/pkg/etc/logrotate.d/privoxy
install -D -m644 ../privoxy.confd $startdir/pkg/etc/conf.d/privoxy
# fix the config paths
sed -i \
-e 's|^confdir.*$|confdir /etc/privoxy|' \
-e 's|^logdir.*$|logdir /var/log/privoxy|' \
-e '/^user-manual/s/.*/#user-manual \/usr\/share\/doc\/privoxy\/user-manual\//' \
$startdir/pkg/etc/privoxy/config
# fix permissions
chown -R $GROUP_ID:$GROUP_ID $startdir/pkg/etc/privoxy/
find $startdir/pkg/etc/privoxy/ -type d | xargs chmod 770
find $startdir/pkg/etc/privoxy/ -type f | xargs chmod 660
chmod g+rwx,u+rwx,o-rwx $startdir/pkg/var/log/privoxy
chgrp ${GROUP_ID} $startdir/pkg/var/log/privoxy/
}
| true
|
4815dd6ff1a4ac1da3c799eb0bf49f32cd41d554
|
Shell
|
turesheim/virtual-environments-fork
|
/images/linux/scripts/installers/postgresql.sh
|
UTF-8
| 984
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
################################################################################
## File: postgresql.sh
## Desc: Installs Postgresql
################################################################################
REPO_URL="https://apt.postgresql.org/pub/repos/apt/"
#Preparing repo for PostgreSQL 12.
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
echo "deb $REPO_URL $(lsb_release -cs)-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list
echo "Install PostgreSQL"
apt update
apt install postgresql postgresql-client
echo "Install libpq-dev"
apt-get install libpq-dev
rm /etc/apt/sources.list.d/pgdg.list
echo "postgresql $REPO_URL" >>"$HELPER_SCRIPTS"/apt-sources.txt
if [[ ! -f /run/systemd/container ]]; then
# Disable postgresql.service
systemctl is-active --quiet postgresql.service && systemctl stop postgresql.service
systemctl disable postgresql.service
invoke_tests "Databases" "PostgreSQL"
fi
| true
|
7adfc8da37c13325e563bdc46be8f6eff258d70a
|
Shell
|
dsxyy/tecs
|
/tools/ci/ci.sh
|
GB18030
| 3,777
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
# ߣ Ľ
# дڣ2012.09.10
COMPILE_DIR=
IMAGE_FILE=
function usage
{
echo "Usage: `basename $0` -d compile_dir -i image_file"
exit 0
}
function read_bool
{
local prompt=$1
local default=$2
echo -e "$prompt (y/n? default: $default): \c "
read answer
case $answer in
"Y") answer="yes" ;;
"y") answer="yes";;
"n") answer="no";;
"N") answer="no";;
"") answer="$default";;
*) echo "Please input y or n"; read_bool;;
esac
return 0
}
function read_string
{
local prompt=$1
local default=$2
echo -e "$prompt (default: $default): \c "
read answer
[ -z $answer ] && answer="$default"
}
#в
while getopts "d:i:h" option
do
case $option in
d) COMPILE_DIR=$OPTARG ;;
i) IMAGE_FILE=$OPTARG ;;
h) usage ;;
?) echo "invalid argument!"; exit 1 ;;
esac
done
[ -z "$IMAGE_FILE" ] && echo "no image $IMAGE_FILE!"&& exit -1
if [ -z "$COMPILE_DIR" ]; then
SELF_DIR=`dirname $0`
COMPILE_DIR="$SELF_DIR/../.."
fi
[ ! -d $COMPILE_DIR ] && echo "no dir $COMPILE_DIR!" && exit -1
echo "COMPILE_DIR = $COMPILE_DIR"
RPM_DIR=$COMPILE_DIR/rpm
[ ! -d $RPM_DIR ] && echo "no rpm dir $RPM_DIR!" && exit -1
echo "RPM_DIR = $RPM_DIR"
PYTEST_DIR=$COMPILE_DIR/tools/ci/pytest
[ ! -d $PYTEST_DIR ] && echo "no pytest dir $PYTEST_DIR!" && exit -1
echo "PYTEST_DIR = $PYTEST_DIR"
AGENT_TOOLS_DIR=$COMPILE_DIR/tools/ci/agent_tools
[ ! -d $AGENT_TOOLS_DIR ] && echo "no agent tools dir $AGENT_TOOLS_DIR!" && exit -1
echo "AGENT_TOOLS_DIR = $AGENT_TOOLS_DIR"
echo "IMAGE_FILE = $IMAGE_FILE"
[ -z "find $RPM_DIR/RPMS/el6/x86_64/ -name tecs-tc-*.rpm" ] && echo "tecs-tc rpm is not ready!" && exit -1
[ -z "find $RPM_DIR/RPMS/el6/x86_64/ -name tecs-cc-*.rpm" ] && echo "tecs-cc rpm is not ready!" && exit -1
[ -z "find $RPM_DIR/RPMS/el6/x86_64/ -name tecs-hc-*.rpm" ] && echo "tecs-hc rpm is not ready!" && exit -1
[ -z "find $RPM_DIR/RPMS/el6/x86_64/ -name tecs-common-*.rpm" ] && echo "tecs-common rpm is not ready!" && exit -1
[ -z "find $RPM_DIR/RPMS/el6/x86_64/ -name openais-*.rpm" ] && echo "openais rpm is not ready!" && exit -1
[ -z "find $RPM_DIR/RPMS/el6/x86_64/ -name qpidc-broker-*.rpm" ] && echo "qpidc-broker rpm is not ready!" && exit -1
[ -z "find $RPM_DIR/RPMS/el6/x86_64/ -name qpidc-client-*.rpm" ] && echo "qpidc-client rpm is not ready!" && exit -1
[ -z "find $RPM_DIR/RPMS/el6/x86_64/ -name postgresql-*.rpm" ] && echo "postgresql rpm is not ready!" && exit -1
CLEAR_SH=$AGENT_TOOLS_DIR/clear.sh
[ ! -e $CLEAR_SH ] && "echo no clear script $CLEAR_SH!" && exit -1
INSTALL_SH=$AGENT_TOOLS_DIR/install.sh
[ ! -e $INSTALL_SH ] && "echo no install script $INSTALL_SH!" && exit -1
START_SH=$AGENT_TOOLS_DIR/start.sh
[ ! -e $START_SH ] && "echo no start script $START_SH!" && exit -1
read_bool "clear tecs history?" "yes"
if [ $answer == "yes" ]; then
$CLEAR_SH
fi
read_bool "intall tecs now?" "yes"
if [ $answer == "yes" ]; then
$INSTALL_SH $COMPILE_DIR/../
fi
read_bool "start tecs now?" "yes"
if [ $answer == "yes" ]; then
$START_SH $COMPILE_DIR/../
fi
while true
do
service tecs-tc status
service tecs-cc status
service tecs-hc status
read_bool "ready to test?" "yes"
[ $answer == "yes" ] && break
sleep 1
done
let index=0
for i in `ls $PYTEST_DIR/test_*.py`
do
testcase["$index"]=`basename $i`
echo $index ${testcase[$index]}
let index+=1
done
read_string "Please select a test case to run:" "0"
TEST_CASE="$PYTEST_DIR/${testcase[$answer]}"
echo "test case = $TEST_CASE"
if [ ! -e $TEST_CASE ]; then
echo "test case $TEST_CASE does not exist!"
exit -1
fi
$TEST_CASE -i $IMAGE_FILE
| true
|
5d6e3331fb7699cea85bb68d0cce2704a97d36b1
|
Shell
|
geezacoleman/OpenWeedLocator
|
/update_owl.sh
|
UTF-8
| 704
| 3.40625
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
cd ~
# Rename the old 'owl' folder to 'owl-DATE'
if [ -d "owl" ]; then
mv owl "owl_$(date +'%Y%m%d_%H%M%S')"
fi
# Download the new software from GitHub
git clone https://github.com/geezacoleman/OpenWeedLocator owl
cd ~/owl
# update the system
echo "[INFO] Upgrading Raspberry Pi system...this may take some time. You will be asked to confirm at some steps."
sudo apt-get update
sudo apt-get upgrade
# Installing the requirements
echo "[INFO] Upgrading OWL requirements."
source `which workon` owl
pip install -r requirements.txt
# Changing permissions to make files executable
chmod a+x owl.py
chmod a+x owl_boot.sh
echo "[COMPLETE] OWL update has executed successfully."
| true
|
037f369f97ee814716a06c3a41700c965abfcfc3
|
Shell
|
kgrozis/bash
|
/04/04.9__run_cmds_from_a_var.sh
|
UTF-8
| 221
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
# Problem: Want to run different cmds in script depending on circumstances
FN=mytmp/blah
# assign a program name to a var
# will substitue cmd in shell and execute
PROG=echo
$PROG $FN
PROG=cat
$PROG $FN
| true
|
6f125ed86e7cb2cca1658bde00ff9446433aaf0a
|
Shell
|
nikhilvangeti/5143-OpSys-vangeti
|
/random.sh
|
UTF-8
| 296
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
WORD=/usr/share/dict/words
list_of_words=$(cat $WORD | wc -l)
echo "list_of_words in dictionary" $list_of_words
num=$((($RANDOM%list_of_words)+1))
echo "random line to be printed in dictionary" $num
wo=$(sed -n "${num}p" $WORD)
echo "random word from dictionary in" $num "is :" $wo
| true
|
3a1e40c947f68d343444d78798360af717255629
|
Shell
|
nageek17/ansible
|
/bin/arr_completion.bash
|
UTF-8
| 641
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# Completing the host name will take ~10 seconds b/c we need to get the host
# names from AWS. The results are cached for 5 minutes.
_arr() {
local current_word=${COMP_WORDS[COMP_CWORD]}
local previous_word=${COMP_WORDS[COMP_CWORD - 1]}
if [[ "$previous_word" == "arr" ]]; then
local options=$( ls roles | awk '/^[^_]/ {print}' )
COMPREPLY=( $( compgen -W "$options" -- "$current_word" ) )
elif [[ "$previous_word" == "-l" ]] || [[ "$previous_word" == "--limit" ]]; then
_ansible_complete_host "$current_word"
else
_ansible_complete_host_group "$current_word"
fi
}
complete -o default -F _arr arr
| true
|
8357ff5a80257f75c47ddd3dea01edc5d5e840b6
|
Shell
|
proxmox/dab
|
/scripts/mysql_randompw
|
UTF-8
| 909
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
### BEGIN INIT INFO
# Provides: mysql_randompw
# Required-Start: $local_fs mysql
# Required-Stop:
# X-Start-Before:
# Default-Start: 2
# Default-Stop:
# Short-Description: Generate random MySQL root password
# Description: Generate and set a random MySQL root password
### END INIT INFO
set -e
HNAME=`head -n 1 /etc/hostname|awk '{ print $1; }'`
if [ "X${HNAME}" = "Xlocalhost" ] ; then
exit 0;
fi
echo "Generate random MySQL root password"
# set HOME dir (for .my.cfg)
export HOME=/root
export USER=root
UPASSWD=`openssl rand -base64 9`
mysqladmin password "${UPASSWD}"
cat <<EOF > /root/.my.cnf
[client]
user=root
password="${UPASSWD}"
EOF
chmod 0600 /root/.my.cnf
if [ -x /sbin/insserv ] ; then
/sbin/insserv -r mysql_randompw
rm -f /etc/init.d/mysql_randompw
else
rm -f /etc/init.d/mysql_randompw
update-rc.d -f mysql_randompw remove
fi
| true
|
3cebfa80962de97c2cf5873a71471d7c90dbc0e3
|
Shell
|
juanmb/oe-recipes
|
/bftp/files/bftpd.init
|
UTF-8
| 271
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
case "$1" in
'start')
echo "Starting bftpd Server"
/usr/sbin/bftpd -d
;;
'stop')
echo "Stopping bftpd Server"
killall bftpd
;;
'restart')
$0 stop
sleep 1
$0 start
;;
*)
echo "usage: $0 {start|stop|restart}"
esac
| true
|
2eb15dfe244443e7a24d5b41dccfd14031fbb9f6
|
Shell
|
Rhinomcd/dotfiles
|
/vscode/linkmac.sh
|
UTF-8
| 285
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/bash -eu
vscodePath="/Users/rmcdono/Library/Application Support/Code/User"
for file in *.json; do
targetFile="${vscodePath}/${file}"
if [[ -e "${targetFile}" ]]; then
mv "${targetFile}" "${targetFile}.bak"
fi
ln -s "${file}" "${vscodePath}/${file}"
done
| true
|
495eaec93b72ce043d328de39892e55b77c24cf8
|
Shell
|
beta-danche/vim-mac
|
/install.sh
|
UTF-8
| 719
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
#git clone git@github.com:laiwei/unix_home.git ~/.laiwei_unix_home
install_file()
{
src=`pwd`"/"$1
des="$HOME/$1"
echo "
src $src
des $des"
if [ -f $des -o -d $des ]; then
mv -f $des $des".backup"
fi
ln -s $src $des
}
install_file .screenrc
install_file .vimrc
install_file .vim
install_file .bashrc
if [ -d "$GOROOT" ]; then
mkdir -p $HOME/.vim/ftdetect
mkdir -p $HOME/.vim/syntax
mkdir -p $HOME/.vim/autoload/go
ln -s $GOROOT/misc/vim/ftdetect/gofiletype.vim $HOME/.vim/ftdetect/
ln -s $GOROOT/misc/vim/syntax/go.vim $HOME/.vim/syntax
ln -s $GOROOT/misc/vim/autoload/go/complete.vim $HOME/.vim/autoload/go
echo "syntax on" >> $HOME/.vimrc
fi
| true
|
fccfefe2c041e1680452a9f0881e6b14c7471ca6
|
Shell
|
liuyingfithub/test
|
/lantest/t5_while.sh
|
UTF-8
| 114
| 2.84375
| 3
|
[] |
no_license
|
#! /bin/sh
echo "Enter password"
read TRY
while [ "$TRY" != "secret" ]; do
echo "Sorry,try again"
read TRY
done
| true
|
7468067d121ba99be0ac38cd867927ad624ac3a7
|
Shell
|
tournet/script
|
/ssmgr_ui_cf.sh
|
UTF-8
| 3,803
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
# Usage:
# Used for https://github.com/shadowsocks/shadowsocks-manager
# wget --no-check-certificate -O ssmgr_ui_cf.sh https://raw.githubusercontent.com/mixool/script/master/ssmgr_ui_cf.sh && chmod +x ssmgr_ui_cf.sh && ./ssmgr_ui_cf.sh 2>&1 | tee ssmgr_ui_cf.sh.log
# Make sure only root can run this script
[[ $EUID -ne 0 ]] && echo -e "This script must be run as root!" && exit 1
# Disable selinux
disable_selinux(){
if [ -s /etc/selinux/config ] && grep 'SELINUX=enforcing' /etc/selinux/config; then
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
fi
}
preinstall_conf(){
# set admin maigun
read -p "(Please enter your mailgun baseUrl: https://api.mailgun.net/v3/xx.xxx.xxx):" baseUrl
read -p "(Please enter your maigun apiKey: xxxxxxxxxxxxxxx-xxxxxx-xxxxxxx):" apiKey
# set domain and email for caddyfile
echo
read -p "Please input your domain name for vps:" domain
read -p "Please input your email:" email
echo
echo "---------------------------"
echo "domain = ${domain}"
echo "email = ${email}"
echo "---------------------------"
echo
# set CLOUDFLARE_EMAIL and CLOUDFLARE_API_KEY
echo
read -p "Please input your CLOUDFLARE_EMAIL:" CLOUDFLARE_EMAIL
read -p "Please input your CLOUDFLARE_API_KEY:" CLOUDFLARE_API_KEY
echo
echo "---------------------------"
echo "CLOUDFLARE_EMAIL = ${CLOUDFLARE_EMAIL}"
echo "CLOUDFLARE_API_KEY = ${CLOUDFLARE_API_KEY}"
echo "---------------------------"
echo
}
create_ssmgr_conf(){
mkdir /root/.ssmgr/
cat > /root/.ssmgr/webgui.yml<<EOF
type: m
manager:
address: 1.2.3.4:5
password: 1.2.3.4:5
plugins:
flowSaver:
use: true
user:
use: true
account:
use: true
email:
use: true
type: 'mailgun'
baseUrl: '${baseUrl}'
apiKey: '${apiKey}'
webgui:
use: true
host: '127.0.0.1'
port: '8080'
site: 'http://${domain}'
db: 'webgui.sqlite'
redis:
host: '127.0.0.1'
port: 6379
password: ''
db: 0
EOF
}
npm_install_ssmgr_pm2(){
apt-get update && apt-get install curl -y
curl -sL https://deb.nodesource.com/setup_8.x | bash -
apt-get install -y nodejs
npm i -g shadowsocks-manager --unsafe-perm
npm i -g pm2
}
install_caddy(){
curl https://getcaddy.com | bash -s personal tls.dns.cloudflare
mkdir /etc/caddy
cat > /etc/caddy/Caddyfile<<-EOF
${domain} {
proxy / http://127.0.0.1:8080 {
transparent
}
gzip
tls {
dns cloudflare
}
}
EOF
cat <<EOF > /etc/systemd/system/caddy.service
[Unit]
Description=Caddy HTTP/2 web server
Documentation=https://caddyserver.com/docs
After=network-online.target
Wants=network-online.target systemd-networkd-wait-online.service
[Service]
ExecStart=/usr/local/bin/caddy -log stdout -conf=/etc/caddy/Caddyfile -email=${email} -agree=true
Restart=always
User=root
Environment=CLOUDFLARE_EMAIL=${CLOUDFLARE_EMAIL}
Environment=CLOUDFLARE_API_KEY=${CLOUDFLARE_API_KEY}
[Install]
WantedBy=multi-user.target
EOF
systemctl enable caddy && systemctl start caddy
}
install_redis-server(){
apt-get install redis-server -y
systemctl enable redis-server && service redis-server start
}
install_ssmgr_ui_cf(){
echo
echo "+---------------------------------------------------------------+"
echo "One-key for ssmgr_ui_cf"
echo "+---------------------------------------------------------------+"
echo
preinstall_conf
create_ssmgr_conf
npm_install_ssmgr_pm2
install_redis-server
pm2 -f -x -n ssmgr-webgui start ssmgr -- -c /root/.ssmgr/webgui.yml
pm2 startup && pm2 save
install_caddy
}
disable_selinux
install_ssmgr_ui_cf
echo "All done! Enjoy yourself"
pm2 list
| true
|
5500c8cb700de173246255329f041daf97e4be50
|
Shell
|
paalth/kless
|
/deploy/scripts/undeploy-registry.sh
|
UTF-8
| 451
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ ! -z "$KLESS_SRC_REGISTRY" ]]; then
sed -e "s/KLESS_NAMESPACE/$KLESS_NAMESPACE/g" -e "s/KLESS_SRC_REGISTRY/$KLESS_SRC_REGISTRY\//g" deploy/prereqs/registry/kless-registry.yaml > /tmp/kless-registry.yaml
else
sed -e "s/KLESS_NAMESPACE/$KLESS_NAMESPACE/g" -e "s/KLESS_SRC_REGISTRY//g" deploy/prereqs/registry/kless-registry.yaml > /tmp/kless-registry.yaml
fi
kubectl delete -f /tmp/kless-registry.yaml
rm /tmp/kless-registry.yaml
| true
|
a6b35bb6c8507d6f109e097b79af67d5071bafad
|
Shell
|
xulongqiu/fat
|
/scripts/fat.sh
|
UTF-8
| 10,378
| 3.0625
| 3
|
[] |
no_license
|
#########################################################################
# File Name: fat.sh
# Author: eric.xu
# mail: eric.xu@libratone.com.cn
# Created Time: 2017-12-06 13:11:44
#########################################################################
#!/system/bin/sh
export BIN_DIR=/system/bin
export DATA_DIR=/data
function __envCheck()
{
if [ -f ${DATA_DIR}/dut_cfg.json ] && [ -f ${BIN_DIR}/sig_analyzer ] && [ -f ${BIN_DIR}/sig_generator ] && [ -f ${BIN_DIR}/audio_test ] && [ -f ${BIN_DIR}/audio_loop_test ]; then
return 0
fi
return 1
}
# copy configuration file to /data from udisk
function CfgCp()
{
mount -t vfat /dev/block/vold/8\:1 /storage
sync
sleep 1
if [ $# -eq 1 ] && [ -f $1 ]; then
cp $1 ${DATA_DIR}/dut_cfg.json
if [ -f ${DATA_DIR}/dut_cfg.json ]; then
echo "CmdOk"
return 0
fi
fi
echo "CmdFail"
return 1
}
# switch to LineIn and start LineInLoop program
function LineInTestStart()
{
__envCheck
if [ "$?" = "1" ]; then
echo "CmdFail"
return 1
fi
vol=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json line_in iVolume | awk -F : '{print $2}'`
#${BIN_DIR}/tinymix -D 0 2 $vol
${BIN_DIR}/testdsp -f 5#1 -v ${vol}
#${BIN_DIR}/audio_loop_test -C 1 -P 0 -d 0 -p 4096 -r 48000 -f 16 -c 2 -n 4&
${BIN_DIR}/audio_test /data/tmp_loop_test -D 0 -d 0 -P -p 4096 -r 48000 -f 16 -c 2 -n 6 -l 1&
sleep 0.2
${BIN_DIR}/audio_test /data/tmp_loop_test -D 1 -d 0 -C -p 4096 -r 48000 -f 16 -c 2 -n 4 -l 1&
echo "CmdOk"
return 1
}
# kill LineInLoop
function LineInTestEnd()
{
echo "CmdOk"
#ps audio_ |awk '{print $2}'| busybox xargs kill -9
pkill audio_test
return 0
}
# Tx start, get the record parameters, and start record
function TxStart()
{
__envCheck
if [ "$?" = "1" ]; then
echo "CmdFail"
return 1
fi
#get parameters
outfile=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json tx_test tx_record sOutFile | awk -F : '{print $2}'`
#duration=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json tx_test tx_record fDuration | awk -F : '{print $2}' |awk -F . '{print $1}'`
vol=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json tx_test tx_record iVolume | awk -F : '{print $2}' |awk -F . '{print $1}'`
#channels=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json tx_test tx_record iChannels | awk -F : '{print $2}' |awk -F . '{print $1}'`
#samplerate=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json tx_test tx_record iSampeRate | awk -F : '{print $2}' |awk -F . '{print $1}'`
${BIN_DIR}/tinymix -D 2 1 $vol
${BIN_DIR}/tinymix -D 2 2 $vol
${BIN_DIR}/tinymix -D 2 3 $vol
${BIN_DIR}/audio_test $outfile -D 2 -d 0 -C -p 512 -r 16000 -f 16 -c 8 -n 8 &
echo "CmdOk"
return 0
}
# Tx end, end record
function TxEnd()
{
__envCheck
if [ "$?" = "1" ]; then
echo "CmdFail"
return 1
fi
pkill audio_test
echo "CmdOk"
return 0
}
# Tx analyzer
function TxCheck()
{
__envCheck
if [ "$?" = "1" ]; then
echo "CmdFail"
return 1
fi
recordfile=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json tx_test tx_analyzer sInFile | awk -F : '{print $2}'`
retfile=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json tx_test tx_analyzer sRetTxFile | awk -F : '{print $2}'`
if [ -f $recordfile ]; then
${BIN_DIR}/sig_analyzer ${DATA_DIR}/dut_cfg.json 0
if [ -f $retfile ]; then
echo "txRet:"
cat ${retfile}
echo "CmdOk"
return 0
fi
fi
echo "CmdFail"
return 1
}
#TxCp, copy Tx record file to udisk
function TxCp()
{
__envCheck
if [ "$?" = "1" ]; then
echo "CmdFail"
return 1
fi
dstfile=$1
recordfile=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json tx_test tx_record sOutFile | awk -F : '{print $2}'`
if ! [ -d ${dstfile%/*} ]; then
mkdir -p ${dstfile%/*}
fi
if [ -f $recordfile ]; then
cp $recordfile $dstfile
if [ -f $recordfile ]; then
echo "CmdOk"
return 0
fi
fi
echo "CmdFail"
return 1
}
function __Loop()
{
loop_type=$1
#get parameters
c_file=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${loop_type}_record sOutFile | awk -F : '{print $2}'`
#c_len=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${loop_type}_record fDuration | awk -F : '{print $2}'`
c_vol=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${loop_type}_record iVolume | awk -F : '{print $2}' |awk -F . '{print $1}'`
#c_ch=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${loop_type}_record iChannels | awk -F : '{print $2}' |awk -F . '{print $1}'`
#c_sr=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${loop_type}_record iSampeRate | awk -F : '{print $2}' |awk -F . '{print $1}'`
p_file=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${loop_type}_play sInFile | awk -F : '{print $2}'`
p_wait=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json global_paras fLagSecs | awk -F : '{print $2}'`
#p_len=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${loop_type}_play fPlaySecs | awk -F : '{print $2}'`
p_vol=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${loop_type}_play iVolume | awk -F : '{print $2}' |awk -F . '{print $1}'`
#p_ch=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${loop_type}_play iChannels | awk -F : '{print $2}' |awk -F . '{print $1}'`
#p_sr=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${loop_type}_play iSampeRate | awk -F : '{print $2}' |awk -F . '{print $1}'`
${BIN_DIR}/testdsp -f 5#1 -v ${p_vol}
#${BIN_DIR}/tinymix -D 0 2 $p_vol
${BIN_DIR}/tinymix -D 2 1 $c_vol
${BIN_DIR}/tinymix -D 2 2 $c_vol
${BIN_DIR}/tinymix -D 2 3 $c_vol
if [ -f $p_file ]; then
${BIN_DIR}/audio_test $c_file -D 2 -d 0 -C -p 512 -r 16000 $ -f 16 -c 8 -n 8 &
sleep $p_wait
#${BIN_DIR}/audio_test $p_file -D 0 -d 0 -P -p 4096 -r 16000 -f 16 -c 2 -n 6
${BIN_DIR}/fatplayer ${p_file}
pkill audio_test
if [ -f $c_file ]; then
return 0
fi
fi
return 1
}
# loop test, dut play and record itself
function LoopStart()
{
__envCheck
if [ "$?" = "1" ]; then
echo "CmdFail"
return 1
fi
#generator signals
${BIN_DIR}/sig_generator ${DATA_DIR}/dut_cfg.json
#fr play record
__Loop fr
if [ "$?" = "1" ]; then
echo "CmdFail"
return 1
fi
#ncd play record
__Loop ncd
if [ "$?" = "1" ]; then
echo "CmdFail"
return 1
fi
echo "CmdOk"
return 0
}
#LoopCheck
function LoopCheck()
{
__envCheck
if [ "$?" = "1" ]; then
echo "CmdFail"
return 1
fi
if [ "$1" != "fr" ] && [ "$1" != "ncd" ]; then
echo "CmdFail"
return 1
fi
recordfile=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${1}_analyzer sInFile | awk -F : '{print $2}'`
if [ -f $recordfile ]; then
if [ "$1" = "fr" ]; then
retfrfile=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${1}_analyzer sRetFrFile | awk -F : '{print $2}'`
retthdfile=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${1}_analyzer sRetThdFile | awk -F : '{print $2}'`
${BIN_DIR}/sig_analyzer ${DATA_DIR}/dut_cfg.json 1
if [ -f ${retfrfile} ] && [ -f ${retthdfile} ]; then
echo "frRetFr:"
cat ${retfrfile}
echo "frRetThd:"
cat ${retthdfile}
echo "CmdOk"
return 0
fi
else
retncdfile=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ${1}_analyzer sRetNcdFile | awk -F : '{print $2}'`
${BIN_DIR}/sig_analyzer ${DATA_DIR}/dut_cfg.json 2
if [ -f ${retncdfile} ]; then
echo "ncdRet:"
cat ${retncdfile}
echo "CmdOk"
return 0
fi
fi
fi
echo "CmdFail"
}
#copy rf ncd record file to udisk
function LoopCp()
{
__envCheck
if [ "$?" = "1" ]; then
echo "CmdFail"
return 1
fi
dstfile=$1
fr_c_file=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop fr_record sOutFile | awk -F : '{print $2}'`
ncd_c_file=`${BIN_DIR}/dut_paras ${DATA_DIR}/dut_cfg.json closed_loop ncd_record sOutFile | awk -F : '{print $2}'`
if ! [ -d ${dstfile%/*} ]; then
mkdir -p ${dstfile%/*}
fi
dstfile=${dstfile%.*}
if [ -f $fr_c_file ] && [ -f $ncd_c_file ]; then
cp $fr_c_file ${dstfile}_fr.wav
cp $ncd_c_file ${dstfile}_ncd.wav
if [ -f ${dstfile}_fr.wav ] && [ -f ${dstfile}_ncd.wav ]; then
echo "CmdOk"
return 0
fi
fi
echo "CmdFail"
return 1
}
function LedTest()
{
echo "CmdOk"
${BIN_DIR}/led_test all 255
echo "CmdOk"
return 0
}
function LedTestEnd()
{
${BIN_DIR}/led_test all 0
echo "CmdOk"
return 0
}
function TouchTest()
{
val=`cat /sys/devices/11060000.i2c/i2c-1/1-0008/control/sepres_regs`
if [ "$val" = "" ]; then
val="ffff"
else
val=`echo $val|awk '{print $3$4}'`
fi
echo MASK: $val
echo "CmdOk"
return 0
}
function ScanI2c()
{
pass=0
all=0
for file in `ls /sys/bus/i2c/devices`; do
if [ "${file%-*}" == "i2c" ]; then
continue
fi
name=`cat /sys/bus/i2c/devices/$file/name`
online=`cat /sys/bus/i2c/devices/$file/online`
echo "$name($file)=$online"
((all+=1))
if [ "$online" = "1" ]; then
((pass+=1))
fi
done
if [ "$all" = "$pass" ]; then
echo "CmdOk"
return 0
fi
echo "CmdFail $pass/$all"
return 1
}
function BatteryPercent()
{
levl=95
if [ -f /sys/class/power_supply/battery/capacity ]; then
levl=`cat /sys/class/power_supply/battery/capacity`
fi
echo "percent:"${levl}
echo "CmdOk"
return 0
}
function PowerAll()
{
echo "CmdOk"
}
function SinglePlayStart()
{
echo "CmdOk"
}
function SinglePlayEnd()
{
echo "CmdOk"
}
| true
|
4295a1559b5ccd2c94d59752dcb0c04608ebbc01
|
Shell
|
jktr/qbar-blocks
|
/block-audio
|
UTF-8
| 892
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
# depends: pulsemixer mpc
readonly pulse_mute="$(pulsemixer --get-mute)"
readonly pulse_volume="$(pulsemixer --get-volume)"
readonly pulse_input_count="$(pulsemixer --list-sinks|grep input|wc -l)"
readonly mpd_state="$(mpc status)"
if [ -n "$pulse_volume" ]; then
[ "$pulse_input_count" -gt 0 ] && echo -n '<active>'
[ "$pulse_mute" = 0 ] && echo -n '🔉︎ ' || echo -n '<warning>🔇︎ '
echo -n "${pulse_volume:0:2}%"
[ "$pulse_mute" = 1 ] && echo -n '</warning>'
[ "$pulse_input_count" -gt 0 ] && echo -n '</active>'
fi
if [ "$(<<< "$mpd_state" tail -1|cut -d' ' -f2)" != 'n/a' ]; then
<<< "$mpd_state" grep -q '\[playing\]' && echo -n ' <active>'
echo -n "♫ $(<<< "$mpd_state" tail -1|awk '{print $2}')"
<<< "$mpd_state" grep -q '\[playing\]' && echo -n '</active>'
fi
echo
exit 0
| true
|
d70478933d2ff6bcc4ab5334afb141ebfb4b4799
|
Shell
|
xtophd/RHEL7-Workshop
|
/playbooks/templates/cheat-ansible-motd-defaults.j2
|
UTF-8
| 212
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
FILENAME="/root/roles/motd/defaults/main.yml"
echo "# Creating file ${FILENAME}'"
cat > ${FILENAME} << EOF
system_owner: root@workstation.example.com
EOF
echo "# Here are the contents of ${FILENAME}"
cat ${FILENAME}
| true
|
8af3a5d57bf3def93514bfe18f25840d5470a237
|
Shell
|
cyberark/conjur-api-ruby
|
/ci/configure_v5.sh
|
UTF-8
| 432
| 2.75
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -e
source ./ci/oauth/keycloak/keycloak_functions.sh
cat << "CONFIGURE" | docker exec -i $(docker-compose ps -q conjur_5) bash
set -e
for _ in $(seq 20); do
curl -o /dev/null -fs -X OPTIONS http://localhost > /dev/null && break
echo .
sleep 2
done
# So we fail if the server isn't up yet:
curl -o /dev/null -fs -X OPTIONS http://localhost > /dev/null
CONFIGURE
fetch_keycloak_certificate
create_keycloak_users
| true
|
0ecb4342000b3c991402c3f591393b30ec7f5242
|
Shell
|
jandc3845/packer-kali
|
/scripts/vmtools.sh
|
UTF-8
| 1,651
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash -eux
if test -f linux.iso ; then
echo "Installing VMware Tools"
apt-get install -y linux-headers-$(uname -r) build-essential perl
cd /tmp
mkdir -p /mnt/cdrom
mount -o loop /home/vagrant/linux.iso /mnt/cdrom
tar zxvf /mnt/cdrom/VMwareTools-*.tar.gz -C /tmp/
/tmp/vmware-tools-distrib/vmware-install.pl -d
rm /home/vagrant/linux.iso
umount /mnt/cdrom
rmdir /mnt/cdrom
elif test -f .vbox_version ; then
#!/bin/bash
apt="apt-get -qq -y"
set -x
if [ ! -e /home/vagrant/.vbox_version ] ; then
exit 0
fi
# VirtualBox Additions
# kernel source is needed for vbox additions
$apt install linux-headers-$(uname -r) build-essential dkms
if [ -f /etc/init.d/virtualbox-ose-guest-utils ] ; then
# The netboot installs the VirtualBox support (old) so we have to
# remove it
/etc/init.d/virtualbox-ose-guest-utils stop
rmmod vboxguest
$apt purge virtualbox-ose-guest-x11 virtualbox-ose-guest-dkms \
virtualbox-ose-guest-utils
elif [ -f /etc/init.d/virtualbox-guest-utils ] ; then
/etc/init.d/virtualbox-guest-utils stop
$apt purge virtualbox-guest-utils virtualbox-guest-dkms virtualbox-guest-x11
fi
# Installing the virtualbox guest additions
VBOX_VERSION=$(cat /home/vagrant/.vbox_version)
VBOX_ISO=/home/vagrant/VBoxGuestAdditions_${VBOX_VERSION}.iso
cd /tmp
if [ ! -f $VBOX_ISO ] ; then
wget -q http://download.virtualbox.org/virtualbox/${VBOX_VERSION}/VBoxGuestAdditions_${VBOX_VERSION}.iso \
-O $VBOX_ISO
fi
mount -o loop $VBOX_ISO /mnt
sh /mnt/VBoxLinuxAdditions.run
umount /mnt
rm $VBOX_ISO
fi
| true
|
f338181bcd056234fe9ca8a9ec4b109c9f763e9a
|
Shell
|
o-o-overflow/dc2019f-mirror-universe
|
/local-tester/check_admin_accessible.sh
|
UTF-8
| 609
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash -e
# Overwrite the check-admins function to return true
cat <<EOF > /home/cadr/lispm.init
(advise http:check-admins :around test-admin 0
t)
EOF
# Start the service
/usr/bin/supervisord &
sleep 10
until curl -m 10 http://localhost:5000/debug | grep "user: CADR" ; do
printf '.'
sleep 10
done
sleep 10
PAYLOAD="%22hello%20world%22"
OUTPUT=$(curl -v -m 30 $(python3 -c "print('http://localhost:5000/\x08?cmd=$PAYLOAD')"))
echo "$OUTPUT"
(echo "$OUTPUT" | grep "hello world") || (echo "PUBLIC: The admin cannot access the admin functionality"; exit -1)
kill -9 $(jobs -p) || true
exit 0
| true
|
7d127e271424cf741d74b2ba8de76157ec5d7a7d
|
Shell
|
itaraju/tips
|
/vlist/download-check.sh
|
UTF-8
| 358
| 2.921875
| 3
|
[] |
no_license
|
#! /bin/bash
# new files
echo "List of new files:"
grep -e '^\[download\] Destination\:' out.txt | cut -f2 -d: | sed 's/^ /"/;s/$/"/'
# checksum for new files
grep -e '^\[download\] Destination\:' out.txt | cut -f2 -d: | sed 's/^ /"/;s/$/"/' | xargs shasum -a 256 >> sha256.txt
echo ""
echo "New files appended to sha256.txt. You may want to: rm out.txt"
| true
|
b5a91e66f0da091ef80ec77b4486b769e98de84f
|
Shell
|
mgavinr/apphome
|
/scripts/makeg
|
UTF-8
| 3,836
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
#--------------------------------------------------------#
# Setup
#--------------------------------------------------------#
#set -e
UNIT=0
CLEAN=0
CONAN=0
GIT=0
MAKE=1
SETUP=1
LOCATION="./"
#--------------------------------------------------------#
# Args
#--------------------------------------------------------#
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-u|--unit)
UNIT=1
shift # past
;;
-c|--clean)
CLEAN=1
shift # past
;;
-g|--git)
GIT=1
shift # past
;;
-i|--install)
CONAN=1
MAKE=0
SETUP=0
shift # past
;;
-l|--location)
LOCATION="${2}"
shift # past argument=value
shift # past argument=value
;;
*)
echo "Unknown option!"
shift # past argument=value
;;
esac
done
if [ -d $LOCATION ]
then
echo "Find project $LOCATION"
cdir=`pwd`/$LOCATION
cd $cdir
bdir=$cdir/build.gcc
else
echo "Find project $LOCATION not found"
UNIT=0
CLEAN=0
MAKE=0
SETUP=0
ls -trdF ams*
fi
#--------------------------------------------------------#
# Check location
#--------------------------------------------------------#
origloc=`pwd`
while true; do
echo "# Find project `pwd` "
ls -d .git
RESULT=$?
if [ $RESULT -eq 0 ]; then
echo "# Find project `pwd` ** Building `pwd` **"
break
else
cd ..
if [[ `pwd` == "/" ]]; then
cd $origloc
break
fi
fi
done
cdir=`pwd`
cd $cdir
bdir=$cdir/build.gcc
pwd
#--------------------------------------------------------#
# Run clean
#--------------------------------------------------------#
if [ $CLEAN -eq 1 ]
then
rm -rf $bdir
fi
#--------------------------------------------------------#
# Run setup
#--------------------------------------------------------#
if [ $SETUP -eq 1 ]
then
if [ -d "$bdir" ]
then
echo "Directory $bdir exists."
cd $bdir
else
echo "Error: Directory $bdir does not exists."
mkdir $bdir
cd $bdir
conan install .. --generator cmake --build=missing --profile=gcc
#~/cmake -G "Unix Makefiles" ../src
~/cmake -DCMAKE_BUILD_TYPE=Debug -G "Unix Makefiles" ../src
fi
fi
#--------------------------------------------------------#
# Run git
#--------------------------------------------------------#
if [ $GIT -eq 1 ]
then
git pull origin master
fi
#--------------------------------------------------------#
# Run make
#--------------------------------------------------------#
if [ $MAKE -eq 1 ]
then
make VERBOSE=1 |& tee /tmp/b.log
grep '\*\*\*' /tmp/b.log && echo "MAKE FAILED"
grep '\*\*\*' /tmp/b.log && UNIT=0
fi
#--------------------------------------------------------#
# Run test
#--------------------------------------------------------#
if [ $UNIT -eq 1 ]
then
echo "Running tests.."
~/ctest --output-on-failure |& tee /tmp/t.log
echo "RE-Running tests for log.."
~/ctest --full-trace --output-on-failure -VV --output-log /tmp/tt.log > /dev/null 2>&1
echo "Done"
ls -latrd /tmp/tt.log
echo "PASS `cat /tmp/tt.log | grep '\[ OK \]' | grep " ms" |wc -l`"
echo "FAIL `cat /tmp/tt.log | grep '\[ FAILED \]' | grep " ms" |wc -l`"
grep -n '\[ FAILED \]' /tmp/tt.log | grep " ms" | grep ":\["
fi
#--------------------------------------------------------#
# Run conan install
#--------------------------------------------------------#
if [ $CONAN -eq 1 ]
then
echo "Running normal conan build"
rm -rf $bdir
pwd
mycwd=`pwd`
mkdir build.gcc
cd build.gcc
conan install ../src --generator cmake --build=missing --profile ../profiles/gcc
~/cmake -G Ninja ../src
~/ninja
echo "Running conan create gavin/dev"
cd $mycwd
conan create . gavin/dev
fi
| true
|
03e02d34bf5bd09e9e29756fe073c6d2663492a7
|
Shell
|
zwbetz-gh/dotfiles
|
/dotfiles/.common_prompt.sh
|
UTF-8
| 338
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
# See https://www.gnu.org/software/bash/manual/html_node/Controlling-the-Prompt.html
bfl_current_git_branch() {
local branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null)
if [[ -n ${branch} ]] ; then
echo "${branch}"
fi
}
export PS1='\u \h \[\033[0;32m\]\w\[\033[0m\] \[\033[0;33m\]$(bfl_current_git_branch)\[\033[0m\]\n\$ '
| true
|
2e6423598a146e40bb05622238595d2304e66279
|
Shell
|
marcelosenabr/bashtemplate
|
/dependencias.sh
|
UTF-8
| 2,263
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dependencias=$(cat dependencias)
installed() {
return $(dpkg-query -W -f '${Status}\n' "${1}" 2>&1 | awk '/ok installed/{print 0;exit}{print 1}')
}
check_dependencias() {
depende="$@"
instalar_pacotes=""
instalado=""
for pkg in ${depende[@]}; do
if ! $(installed $pkg); then
instalar_pacotes+=" $pkg"
else
instalado+=" $pkg"
fi
done
if [ ! -z "$instalado" ]; then
info_log "Pacotes Instalados $(msg_cor "[ " white)$(msg_cor "✓" green)$(msg_cor " ]" white)"
for inst in ${instalado[@]}; do
sucesso "${inst}" 'ok ✓'
done
fi
if [ ! -z "$instalar_pacotes" ]; then
echo
info_log "Pacotes Pendente $(msg_cor "[ " white)$(msg_cor "✖" red)$(msg_cor " ]" white)"
for inst_pac in ${instalar_pacotes[@]}; do
erro "${inst_pac}" '✖'
done
fi
echo
if [ ! -z "$instalar_pacotes" ]; then
info_log "Instalar Dependências"
for instPac in ${instalar_pacotes[@]}; do
echo
aviso "$(msg_cor "Instalando" white) $(msg_cor "$instPac" cyan)"
echo
sudo apt-get install -qy $instPac
done
instalar_pacotes=""
instalado=""
for pkg in ${depende[@]}; do
if ! $(installed $pkg); then
instalar_pacotes+=" $pkg"
else
instalado+=" $pkg"
fi
done
clear
if [ ! -z "$instalado" ]; then
info_log "Pacotes Instalados $(msg_cor "[ " white)$(msg_cor "✓" green)$(msg_cor " ]" white)"
for inst in ${instalado[@]}; do
sucesso "${inst}" 'ok ✓'
done
fi
if [ ! -z "$instalar_pacotes" ]; then
echo
erro "Erro Na Instalação dos Pacotes" '✖'
echo
else
echo
nota "Todos os Pacotes Instalando $(msg_cor "[ " white)$(msg_cor "✓" green)$(msg_cor " ]" white)" "─►"
echo
fi
else
nota "Todos os Pacotes Instalando $(msg_cor "[ " white)$(msg_cor "✓" green)$(msg_cor " ]" white)" "─►"
echo
fi
}
check_dependencias "${dependencias[@]}"
| true
|
1cf1ca137fec8adc320a6132802e04bfe1effa7e
|
Shell
|
elicorrales/Fort.Laud.Robotics.Meetup.Group
|
/Meetup.4/mjgp-streamer/start.stream.live.feed.sh
|
UTF-8
| 599
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$1" = "" ];
then
echo;echo; echo "Missing device name";echo;echo; exit 1;
fi;
if [ "$2" = "" ];
then
echo;echo; echo "Missing Port ";echo;echo; exit 1;
fi;
if [ "$3" = "" ];
then
echo;echo; echo "Missing Resolution ";echo;echo; exit 1;
fi;
device="$1";
port="$2";
resolution="$3";
FPS="30";
if [ "$4" != "" ];
then
FPS=$4;
fi;
echo;
echo "$device $port $resolution $FPS";
echo;
mjpg_streamer \
-i "input_uvc.so -r $resolution -d $device -f $FPS -q 80 -timeout 10" \
-o "output_http.so -p $port -w /home/devchu/public_html"
| true
|
ec9b132786dc66ca36e63c7e6746bcbdb614bd3d
|
Shell
|
sherpya/mplayer-be
|
/packages/libmodplug/build.sh
|
UTF-8
| 297
| 2.671875
| 3
|
[
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Build script for ModPlug library
# Copyright (c) 2013 Gianluigi Tiesi <sherpya@netfarm.it>
# See LICENSE for licensing informations
GIT_REPO=https://github.com/Konstanty/libmodplug.git
. $(dirname $0)/../functions.sh
BUILDDIR="libmodplug"
STATICLIBS="libmodplug"
git_clean && pkg_build && git_clean
| true
|
0d1d80b929b7c4809ae86c26c48d5493356db5bf
|
Shell
|
BarcaLS/who-at-home
|
/who-at-home.sh
|
UTF-8
| 6,910
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# who-at-home.sh
#
# Script which allows you to check if specified host is alive and send information about it to server.
# For example it can be used to check which householder is at home or outside the home (and how long) just by specifying his or her cellphone's IP.
# It's designed to be run on your LAN's server and to upload information to external server.
# Logging via keys is necessary.
# Script should be run from cron, e.g. every 1 minute. It runs constantly, can't be run in multiple instances.
# Script may have to be run by root (e.g. when only root can use sockets - necessary for "ping" command).
############
# Settings #
############
LOCAL_USER="user" # local user on this host who can keylessly ssh to SSH_HOST
SSH_HOST="somebody@host.com" # host to connect to
SSH_DIR="/home/user/domains/host.com/public_html/householders" # remote directory to upload status information
SCRIPT_DIR="/volume1/homes/user/scripts/who-at-home" # directory with this script
SENDER="Home <home@host.com>" # sender of e-mail
SENDEMAIL="/home/user/scripts/sendEmail/sendEmail" # path on $SSH_HOST to sendEmail (needed to send e-mail)
SMTP_SERVER="mail.host.com" # SMTP server
SMTP_USER="user@host.com" # SMTP user
SMTP_PASS="password" # SMTP password
CHECK_CMD="ping -c 1 -W 30 \$LOCAL_IP" # command to check host's status, \$LOCAL_IP will be replaced by script with IP to check
# examples:
# arping -I eth0 -c 1 -w 30 \$LOCAL_IP
# ping -c 1 -W 30 \$LOCAL_IP
# traceroute -m 1 \$LOCAL_IP | grep \" ms\"; ( exit \${PIPESTATUS[1]} )
PAUSE="10" # pause in seconds after checking queue of householders
# hosts to check:
# 1) IP address or hostname
# 2) arbitrary name
# 3) e-mail address to send notification about that householder; you can specify more addresses separating them with comas
# 4) IP address or hostname to check if it is at home (notification won't be sent when recipient is at home because he knows best who leaves or enters into that house);
# typically you'll want to enter here phone belonging to person which e-mail address you precised
# Example: 192.168.66.50 Wife husband@home.com,daughter@home.com husband.phone.home.com
PARAMETERS=(
192.168.66.50 Wife husband@home.com daughter@home.com husband.phone.home.com
)
#############
# Main Part #
#############
while true; do
# check if this script is currently running
NUMBER_OF_THIS_SCRIPTS_RUNNING=`ps aux | grep who-at-home.sh | grep -v grep | grep -v sudo | wc -l`
if [ "$NUMBER_OF_THIS_SCRIPTS_RUNNING" -gt 2 ]; then
echo "This script is currently running. Exiting."; exit
fi
# function executed when householder appeared or disappeared at home
appeared_or_disappeared () {
CURRENT_CHECK_CMD=`echo $CHECK_CMD | sed -e "s/\\$LOCAL_IP/$WHO_CANT_BE_AT_HOME_TO_NOTIFY/g"` # insert $LOCAL_IP instead of LOCAL_IP string
eval $CURRENT_CHECK_CMD &>/dev/null; AT_HOME=$?
if [ "$AT_HOME" != "0" ]; then # recipient of notification is away from home (checking)
MSG="$LOCAL_NAME $STATUS $DAY $HOUR"
CMD="$SENDEMAIL -q -f \"$SENDER\" -t $EMAIL -u \"$MSG\" -m \" \" -s $SMTP_SERVER -o tls=no -o message-charset=utf-8 -xu $SMTP_USER -xp $SMTP_PASS"
echo "$WHO_CANT_BE_AT_HOME_TO_NOTIFY is outside the house so I'm sending e-mail to $EMAIL."
sudo -u $LOCAL_USER ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -A $SSH_HOST "$CMD" &>/dev/null
# save to log and rotate logs to last 1000 lines
echo "$DAY $HOUR $LOCAL_NAME $STATUS" >> $SCRIPT_DIR/logs/emails.log
TMP=$(tail -n 1000 $SCRIPT_DIR/logs/emails.log)
echo "${TMP}" > $SCRIPT_DIR/logs/emails.log
fi
}
NUMBER=${#PARAMETERS[@]}
NUMBER=`expr $NUMBER - 1`
COUNTER=-1
while [ ! "$COUNTER" = "$NUMBER" ]
do
LOCAL_IP="${PARAMETERS[$COUNTER + 1]}"
LOCAL_NAME="${PARAMETERS[$COUNTER + 2]}"
EMAIL="${PARAMETERS[$COUNTER + 3]}"
WHO_CANT_BE_AT_HOME_TO_NOTIFY="${PARAMETERS[$COUNTER + 4]}"
# checking if alive
echo -n "Checking $LOCAL_NAME - $LOCAL_IP. Status: "
CURRENT_CHECK_CMD=`echo $CHECK_CMD | sed -e "s/\\$LOCAL_IP/\$LOCAL_IP/g"` # insert $LOCAL_IP instead of LOCAL_IP string
eval $CURRENT_CHECK_CMD &>/dev/null; STATE=$?
if [ "$STATE" != "0" ]; then STATE=1; fi # command results 1 if host is down and 0 if host is up
echo "$STATE"
# getting and saving current data
DAY=`date +"%Y-%m-%d"`
HOUR=`date +"%H:%M:%S"`
echo "$DAY $HOUR $STATE" >> $SCRIPT_DIR/logs/$LOCAL_NAME.log
# rotate logs to last 1000 lines
TMP=$(tail -n 10000 $SCRIPT_DIR/logs/$LOCAL_NAME.log)
echo "${TMP}" > $SCRIPT_DIR/logs/$LOCAL_NAME.log
# reading last 12 lines of data
tail -n 12 $SCRIPT_DIR/logs/$LOCAL_NAME.log > $SCRIPT_DIR/logs/tmp.log
LINE=1
while read -r DAY_TMP HOUR_TMP STATE_LAST; do
case "$LINE" in
1) STATE_LAST1=$STATE_LAST;
;;
2) STATE_LAST2=$STATE_LAST
;;
3) STATE_LAST3=$STATE_LAST
;;
4) STATE_LAST4=$STATE_LAST
;;
5) STATE_LAST5=$STATE_LAST
;;
6) STATE_LAST6=$STATE_LAST
;;
7) STATE_LAST7=$STATE_LAST
;;
8) STATE_LAST8=$STATE_LAST
;;
9) STATE_LAST9=$STATE_LAST
;;
10) STATE_LAST10=$STATE_LAST
;;
11) STATE_LAST11=$STATE_LAST
;;
12) STATE_LAST12=$STATE_LAST
;;
esac
(( LINE ++ ))
done < $SCRIPT_DIR/logs/tmp.log
# householder has appeared or disappeared
CHANGE=""
if [[ "$STATE_LAST1$STATE_LAST2$STATE_LAST3$STATE_LAST4$STATE_LAST5$STATE_LAST6$STATE_LAST7$STATE_LAST8$STATE_LAST9$STATE_LAST10$STATE_LAST11$STATE_LAST12" =~ 111111111110 ]]; then STATUS="w domu"; appeared_or_disappeared; fi
if [[ "$STATE_LAST1$STATE_LAST2$STATE_LAST3$STATE_LAST4$STATE_LAST5$STATE_LAST6$STATE_LAST7$STATE_LAST8$STATE_LAST9$STATE_LAST10$STATE_LAST11$STATE_LAST12" =~ 011111111111 ]]; then STATUS="poza domem"; appeared_or_disappeared; fi
# updating current status on remote server (but only once every minute - due to ssh logs capacity on remote server)
# householder is in the house if his status was 0 at least one time in last probes (variable STATE)
SECOND=`date +"%S"`
if [ "$SECOND" = "00" ] || [ "$SECOND" == "01" ] || [ "$SECOND" == "02" ] || [ "$SECOND" == "03" ]; then # it's 00, 01, 02 or 03 seconds of every minute so we can update current status on remote server
if [[ "$STATE_LAST1$STATE_LAST2$STATE_LAST3$STATE_LAST4$STATE_LAST5$STATE_LAST6$STATE_LAST7$STATE_LAST8$STATE_LAST9$STATE_LAST10$STATE_LAST11$STATE_LAST12" =~ 0 ]]; then STATE="0"; else STATE="1"; fi
DATA_UNIX=`date +%s`
TEXT='<?php \$data_unix = \"'$DATA_UNIX'\"; \$state = \"'$STATE'\"; ?>'
echo "Updating $LOCAL_NAME's status on remote server."
sudo -u $LOCAL_USER ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -A $SSH_HOST "echo \"$TEXT\" > $SSH_DIR/$LOCAL_NAME.log" &>/dev/null
fi
# next iteration
COUNTER=`expr $COUNTER + 4`
done
# pause
echo "Queue done. Sleeping $PAUSE seconds."
sleep $PAUSE
done
| true
|
de27f6e349bb846e3a4dd10b2e3945ea66ef6fff
|
Shell
|
billhathaway/r53tool
|
/crossCompileReleases
|
UTF-8
| 660
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# build binary releases for OSX, linux, Windows
# see http://dave.cheney.net/2013/07/09/an-introduction-to-cross-compilation-with-go-1-1
# if you need help setting up your environment for cross-compiling
version=`grep -i 'const version' main.go | awk -F\" '{ print $2 }'`
for os in darwin linux windows
do
for arch in amd64
do
echo "building for v${version} ${os} ${arch}"
GOOS=${os} GOARCH=${arch} go build -o r53tool.v${version}.${os}.${arch}
if [ $? -ne -0 ]; then
echo "cross-compile failed, see http://dave.cheney.net/2013/07/09/an-introduction-to-cross-compilation-with-go-1-1"
exit 1
fi
done
done
exit 0
| true
|
820f67cfce8cf182256a46b42fef788f2338cdc2
|
Shell
|
galoscar07/college2k16-2k19
|
/2nd Semester/Operating Systems/Operating Systems for the exam/Problems/Shell/more shell/05.sh
|
UTF-8
| 600
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
# Sa se scrie un script shell care primind 3 parametri, (extensie1,
# extensie2, cuvant) va redenumi toate fisierele cu extensia
# extensie1 din directorul curent si din toate subdirectoarele
# acestuia, dandu-le extensia extensie2 si va numara de cate ori
# apare cuvant in fiecare fisier.
for x in `find $(pwd) -type f `; do
ext=`echo $x | awk -F "." '{ print $NF }'`
if [ $ext = $1 ]; then
x2=`echo $x | sed "s/\.$1$/\.$2/"`
mv $x $x2
echo $x2
n=`grep -o "$3" $x2 | wc -l`
echo \"$3\" appears $n times \in the above file
fi
done
| true
|
20624436e4d1c2d8528445901ff14749de7a11f3
|
Shell
|
Ikusa0/IFPB-ShellScript
|
/Semana_8/Atividade_1/script3.sh
|
UTF-8
| 438
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
touch dirs.txt files.txt links.txt
for ARQUIVO in *; do
if [[ -d $ARQUIVO ]]; then
echo "$ARQUIVO" >> dirs.txt
elif [[ -f $ARQUIVO ]]; then
echo "$ARQUIVO" >> files.txt
elif [[ -h $ARQUIVO ]]; then
echo "$ARQUIVO" >> links.txt
fi
done
echo -e "\e[1mDIRS\e[m"
cat dirs.txt
echo -e "\n\e[1mFILES\e[m"
cat files.txt
echo -e "\n\e[1mLINKS\e[m"
cat links.txt
rm dirs.txt files.txt links.txt
| true
|
b4193f4963c5c1d025666105078bf093e70f2bf3
|
Shell
|
xiaodongus/snell
|
/snell.sh
|
UTF-8
| 766
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
yum install unzip -y
cd ~
wget --no-check-certificate -O snell.zip https://github.com/surge-networks/snell/releases/download/v1.1.0/snell-server-v1.1.0-linux-amd64.zip
unzip snell.zip -d snell
rm -f snell.zip
chmod +x /root/snell/snell-server
cd /etc/systemd/system
cat > snell.service<<-EOF
[Unit]
Description=Snell Server
After=network.target
[Service]
Type=simple
User=nobody
Group=nogroup
LimitNOFILE=32768
ExecStart=/root/snell/snell-server -c /root/snell/snell-server.conf
Restart=on-failure
RestartSec=1s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl start snell
systemctl restart snell
cat /root/snell/snell-server.conf
| true
|
01d4d9058a1e8177a1ded7ace56cf39f9f09cc97
|
Shell
|
AlphaOS/antergos-packages
|
/firefox-beta/PKGBUILD
|
UTF-8
| 2,712
| 2.796875
| 3
|
[] |
no_license
|
# Maintainer: Det <nimetonmaili g-mail>
# Contributors: Achilleas Pipinellis, speed145a, Schnouki
pkgname=firefox-beta
pkgver=39.0b3
_major=${pkgver/rc*}
_build=${pkgver/*rc}
pkgrel=1
pkgdesc="Standalone web browser from mozilla.org - Beta"
arch=('i686' 'x86_64')
url="https://www.mozilla.org/en-US/firefox/channel/#beta"
license=('MPL' 'GPL' 'LGPL')
depends=('alsa-lib' 'dbus-glib' 'desktop-file-utils' 'gtk2' 'hicolor-icon-theme'
'icu' 'libevent' 'libvpx' 'libxt' 'mime-types' 'nss' 'sqlite')
optdepends=('gst-plugins-base: vorbis decoding, ogg demuxing'
'gst-plugins-good: webm and mp4 demuxing'
'gst-plugins-bad: aac, vp8 and opus decoding'
'gst-plugins-ugly: h.264 and mp3 decoding'
'gst-libav: more decoders'
'libpulse: PulseAudio driver'
'networkmanager: Location detection via available WiFi networks')
provides=('firefox-beta' 'firefox-beta-bin')
conflicts=('firefox-beta-bin')
install=$pkgname.install
source=("$pkgname.desktop"
"$pkgname-safe.desktop")
source_i686=("https://ftp.mozilla.org/pub/mozilla.org/firefox/releases/$pkgver/linux-i686/en-US/firefox-$pkgver.tar.bz2")
source_x86_64=("https://ftp.mozilla.org/pub/mozilla.org/firefox/releases/$pkgver/linux-x86_64/en-US/firefox-$pkgver.tar.bz2")
md5sums=('13bd50c37f55a83539a680ce83162468'
'a85c53ccb2b78514f37833d288a20ba2')
md5sums_i686=('8da45ae6e3c13256b5e7d4388f1c5836')
md5sums_x86_64=('4f51d913af63aaff962cbbe0833862bf')
# RC
if [[ $_build = ? ]]; then
source_i686=("https://ftp.mozilla.org/pub/mozilla.org/firefox/candidates/$_major-candidates/build$_build/linux-i686/en-US/firefox-$_major.tar.bz2")
source_x86_64=("https://ftp.mozilla.org/pub/mozilla.org/firefox/candidates/$_major-candidates/build$_build/linux-x86_64/en-US/firefox-$_major.tar.bz2")
fi
package() {
# Create directories
msg2 "Creating directory structure..."
mkdir -p "$pkgdir"/usr/bin
mkdir -p "$pkgdir"/usr/share/applications
mkdir -p "$pkgdir"/usr/share/icons/hicolor/128x128/apps
mkdir -p "$pkgdir"/opt
msg2 "Moving stuff in place..."
# Install
cp -r firefox/ "$pkgdir"/opt/$pkgname-$pkgver
# /usr/bin link
ln -s /opt/$pkgname-$pkgver/firefox "$pkgdir"/usr/bin/$pkgname
# Desktops
install -m644 *.desktop "$pkgdir"/usr/share/applications/
# Icons
for i in 16x16 22x22 24x24 32x32 48x48 256x256; do
install -d "$pkgdir"/usr/share/icons/hicolor/$i/apps/
ln -s /opt/$pkgname-$pkgver/browser/chrome/icons/default/default${i/x*}.png \
"$pkgdir"/usr/share/icons/hicolor/$i/apps/$pkgname.png
done
ln -s /opt/$pkgname-$pkgver/browser/icons/mozicon128.png \
"$pkgdir"/usr/share/icons/hicolor/128x128/apps/$pkgname.png
}
| true
|
f7e50bdda4fa091622391432c4e1527f79ed59a9
|
Shell
|
hellozjf/vultr
|
/vultr_init.sh
|
UTF-8
| 622
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# 初始化vultr
# 1. 配置基础信息
wget https://raw.githubusercontent.com/hellozjf/vultr/master/vultr_system.sh && chmod +x vultr_system.sh && bash vultr_system.sh
# 2. 安装docker(小于1G内存的机器别安装docker,否则系统会出问题)
wget https://raw.githubusercontent.com/hellozjf/vultr/master/vultr_docker.sh && chmod +x vultr_docker.sh && bash vultr_docker.sh
# 3. 安装ssr和bbr
wget https://raw.githubusercontent.com/hellozjf/vultr/master/vultr_ssr_bbr.sh && chmod +x vultr_ssr_bbr.sh && bash vultr_ssr_bbr.sh
# 4. 清除所有无用的文件
rm -rf *
# 5. 重启系统
reboot
| true
|
80ff2b34210c0728e8867e3c1927b36475e962a5
|
Shell
|
benf/dotfiles
|
/.scripts/toggle_touchpad.sh
|
UTF-8
| 510
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
~/.scripts/toggle_user_unit.sh disable-touchpad.service && exit
which synclient &> /dev/null || exit
exec synclient TouchpadOff=$((1-$(synclient | awk '/TouchpadOff/ { print $3 }')))
device_id=$(xinput list | sed -n "s/^.*Synaptics.*id=\([^\t]*\).*$/\\1/p")
enabled=$(xinput list-props $device_id | sed -n "s/^.*Device Enabled.*\t\(.*\)$/\\1/p")
property=$(xinput list-props $device_id | sed -n "s/^.*Device Enabled (\([^)]*\)).*$/\\1/p")
xinput set-prop $device_id $property $((1-enabled))
| true
|
93d7e297113c79fa439cf8b3b9c7c29f0a8d5e5a
|
Shell
|
gfakes/vcloud
|
/harbinger/management/ailogs
|
UTF-8
| 1,143
| 3.25
| 3
|
[
"NTP-0"
] |
permissive
|
#!/bin/bash
if [ $# -lt 1 ]; then
echo "======================="
echo "AI Log Viewer"
echo " "
echo " g for glassfish"
echo " m for mirth"
echo " dm for data-manager"
echo " b for bridge log"
echo " p for postgres"
echo " i for indexer"
echo "======================="
echo " "
echo -n "Enter choice: "
read log
else
log="$1"
fi
case $log in
glassfish)
tail -F /servers/harbinger/logs/glassfish/server.log;;
g)
tail -F /servers/harbinger/logs/glassfish/server.log;;
mirth)
tail -F /servers/harbinger/logs/mirth/mirth.log;;
m)
tail -F /servers/harbinger/logs/mirth/mirth.log;;
data-manager)
tail -F /servers/harbinger/logs/data_manager/sasl-error.log;;
dm)
tail -F /servers/harbinger/logs/data_manager/sasl-error.log;;
postgres)
sudo tail -F /data/pgdata/serverlog;;
p)
sudo tail -F /data/pgdata/serverlog;;
indexer)
sudo tail -F /servers/indexer/logs/indexer.log;;
i)
sudo tail -F /servers/indexer/logs/indexer.log;;
bridge)
tail -F /servers/harbinger/logs/bridge-service.log;;
b)
tail -F /servers/harbinger/logs/bridge-service.log;;
*)
echo "option not found"
;;
esac
| true
|
69815c1abb79768014313f133784249a5714e6fc
|
Shell
|
yiab/candyOS
|
/bin/utils/util_tick.sh
|
UTF-8
| 416
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
declare __l_a_s_t__s_t_a_r_t__t_i_m_e;
function tick()
{
if [ -n "${__l_a_s_t__s_t_a_r_t__t_i_m_e}" ]; then
local end_tm=`date +%s%N`;
local use_tm=`echo $end_tm ${__l_a_s_t__s_t_a_r_t__t_i_m_e} | awk '{ print ($1 - $2) / 1000000000}'`
echo "$1任务结束,用时$use_tm秒"
fi;
tick_start
}
function tick_start()
{
__l_a_s_t__s_t_a_r_t__t_i_m_e=`date +%s%N`;
}
| true
|
38248796e67a5600b5f4fa2e5bba2e57f08e0b05
|
Shell
|
mpiko/localbin
|
/avonriver
|
UTF-8
| 1,986
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# download the latest Avon River heights at the Channel from BoM
# test the low warning and high warning levels.
# apparently 2.6 is when the bridge becomes impassable.
LOW=1.34
HIGH=1.4
if [ -e ~/localbin/.secrets ]
then
. ~/localbin/.secrets
else
echo "cannot find secrets. Exiting"
exit 1
fi
LAST=0
COUNT=0
IFS="
"
cd /tmp
wget http://www.bom.gov.au/fwo/IDV67203/IDV67203.085276.tbl.shtml
updateDB() {
DAY=$(echo $1 | cut -f1 -d" ")
D=$(echo $DAY | cut -f1 -d-)
M=$(echo $DAY | cut -f2 -d-)
Y=$(echo $DAY | cut -f3 -d-)
DAY=$Y-$M-$D
TIME=$(echo $1 | cut -f2 -d" ")
VAL=$(echo $1 | cut -f3 -d" ")
HITS=$(mysql -u$USER -p$PASS -D avonriver -BNe "select count(date) from riverdata where date=\"$DAY $TIME\"; " 2> /dev/null)
if [ $HITS -eq 0 ]
then
mysql -u$USER -p$PASS -D avonriver -BNe "insert into riverdata values(\"$DAY $TIME\",\"$VAL\");" 2> /dev/null
echo "DB Updated"
fi
}
trend() {
NOW=$1
PREV=$2
if [ $(calc $NOW \> $PREV) -eq 1 ]
then
echo "$STRING RISING "
elif [ $(calc $NOW \< $PREV) -eq 1 ]
then
echo "$STRING FALLING "
else
echo "$STRING STABLE "
fi
}
for i in $(grep '^<td' IDV67203.085276.tbl.shtml | sed 's/<td align=//g' | sed 's/left>//g' | sed 's/right>//g'| sed 's/<\/td>//g')
do
COUNT=$(expr $COUNT + 1)
if [ $(calc $COUNT \/ 2 | grep "\.") ]
then
# its an odd number
# only even numbered lines have the height value.
STRING=$(echo $i | sed 's#/#-#g')
continue
else
STRING="$STRING $i "
updateDB $STRING
if [ $(calc $i \> $HIGH) -eq 1 ]
then
STRING="$STRING WARNING: HIGH "
elif [ $(calc $i \> $LOW) -eq 1 ]
then
STRING="$STRING WARNING: LOW "
else
STRING="$STRING OK "
fi
trend $i $LAST;
LAST=$i
fi
done
rm IDV67203.085276.tbl.shtml*
| true
|
00c4349538dca853c11461c020948964f4d78b0c
|
Shell
|
AndrewShultz/Lynx
|
/music.fun
|
UTF-8
| 4,924
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
echoload() {
if [ -t 1 ]; then
nlin=$(tput lines)
colLen=`awk "BEGIN{ printf $(tput cols)-7 }"`
per=`awk "BEGIN{ printf (${1}/100.0)*$colLen }" | cut -c 1-4` ; ndots=`echo "$per" | awk -v FS='.' '{printf $1}'`
per2=`echo "$1" | cut -c 1-5`;mess=`printf "%5s[" "$per2"`;mess+="$(tput setab 47)";for i in `seq 1 1 $ndots` ; do mess+=" " ; done
mess+="$(tput setab 197)";mndots=`awk "BEGIN{ printf $colLen-1 }"`;for i in `seq $ndots 1 $mndots` ; do mess+=" " ; done
if [ $# -eq 2 ] ; then printf "$2\n\n\033[1A$(tput il1)" ; fi
if [ $# -eq 3 ] ; then printf "$2$3\033[0m\n\n\033[1A$(tput il1)" ; fi
if [ `echo $per2'>='100.0 | bc -l` -eq 1 ] ; then echo -ne "\033[s\033[$nlin;0H\033[K\033[0m\033[u" ; echo ;
else echo -ne "\033[s\033[$nlin;0H\033[K${mess}\033[0m]\033[u" ; fi
else
if [ $# -eq 2 ] ; then printf "$2\n" ; fi
if [ $# -eq 3 ] ; then printf "$2$3\033[0m\n" ; fi
fi
}
MusicMessoremConvert() {
P_W_D=`pwd`
cd ~/Music/Rename/
for i in *.m4a; do
#NEWFILE=`echo $i | sed 's^(.*)^^g' | sed 's^ .m4a^.m4a^g' | sed "s^ '^ ^g" | sed "s^'.^.^g"`
NEWFILENAME=`echo $i | sed 's^.m4a^.mp3^g'`
NEWFILENAME=`echo "$NEWFILENAME" | sed 's/\b\([[:alpha:]]\)\([[:alpha:]]*\)\b/\u\1\L\2/g'`
echo "$i -> $NEWFILENAME"
ffmpeg -i "$i" "$NEWFILENAME"
done
rm *.m4a
cd $P_W_D
}
MusicMessorem()###USE###
{
# while IFS='' read -r line || [[ -n "$line" ]]; do
# newline=`echo "$line" | sed -e "s^&list.*^^g"`
# newline=`echo "$newline" | sed -e "s^&index.*^^g"`
# echo "$newline"
# youtube-dl -f 140 $newline -o /home/dulain/Music/Rename/"%(title)s.%(ext)s"
# done < "/home/dulain/Music/messorem.txt"
# MusicMessoremConvert
#python /home/dulain/python/YouTubeDL/dler.py
${HOME}/Lynx/YouTubeDLer/YouTubeDLer
}
Renamerem() {
P_W_D=`pwd`
cd ~/Music/Rename/
temprename=temp.txt
if [ -f $temprename ] ; then rm $temprename ; fi
oldnames=(*".mp3")
cnt=${#oldnames[@]}
cnt=`awk "BEGIN{ printf ${cnt}-1 }"`
for iname in `seq 0 1 $cnt` ; do echo "${oldnames[$iname]}" >> $temprename ; done
e $temprename
newnames=
readcnt=0
while IFS= read -r line
do
newnames[$readcnt]="$line"
((readcnt++))
done < "$temprename"
for iname in `seq 0 1 $cnt`
do
mv "${oldnames[$iname]}" /home/dulain/Music/Messorem/"${newnames[$iname]}"
done
if [ -f "$temprename" ] ; then rm $temprename ; fi
cd $P_W_D
}
HandleMusic()###USE###
{
P_W_D=`pwd`
cd ~/Music/Messorem/
if [ $# -lt 1 ]
then
echo "USAGE OPTIONS (1 ARGUMENT ONLY):"
echo "0: Move to mp3 player"
echo "1: Move to usb stick"
echo "2: Move to computer external sd card"
echo "3: Move to S8 (must be mounted with S8Mount[give permission],S8Unmount,S8Mount)"
echo "4: Move to finished directory"
cd $P_W_D
return
else
option=$1
fi
allfilescnt=(`ls | sed -e "s^.*mp3^mp3^g"`)
allfilescnt=${#allfilescnt[@]}
ifile=0
#echo "Current directory: `pwd`"
#ls
cmd=cp
if [[ "$1" < "0" || "$1" > "4" ]] ; then echo "Not a valid option. " ; cd $P_W_D ; return ; fi
if [[ "$1" == "0" ]] ; then movedir='/media/dulain/Clip Sport/Music/NewMusic/' ; fi
if [[ "$1" == "1" ]] ; then movedir='/media/dulain/USB20FD/All Music/' ; fi
if [[ "$1" == "2" ]] ; then movedir='/media/dulain/9016-4EF8/Music/' ; fi
if [[ "$1" == "3" ]] ; then movedir='/home/dulain/mount/Phone/Music/' ; cmd="cp" ; fi
if [[ "$1" == "4" ]] ; then movedir='/home/dulain/Music/ToArchive/' ; cmd=mv ; fi
echo "Command = $cmd"
echo "End Directory = $movedir"
if [ ! -d "$movedir" ]
then
echo "Directory doesn't exist..."
cd $P_W_D
return
fi
for file in *.mp3
do
if [ -f "${file}" ]
then
((ifile++))
echoload `awk "BEGIN{ printf $ifile/$allfilescnt*100}"` $asGreen "Copying ${file}"
$cmd "${file}" "$movedir"
else
((ifile++))
echoload `awk "BEGIN{ printf $ifile/${#allfiles[@]}*100}"` $asRed "File nonexistant: ${file}"
fi
done
cd $P_W_D
}
PlayMusic()###USE###
{
pldir=${HOME}/Music/Playlists
#/home/dulain/bashscripts/playmusic2.sh && exit
#/home/dulain/python/PlayMusic/playMusic.py $1
file=`basename $1`
if [ ! -f "${pldir}/${file}.txt" ]
then
Udpl $1
fi
#mplayer -shuffle -playlist ${pldir}/${file}.txt
/home/dulain/PSoft/mplayer-transcribe-master/mplayer-transcribe ${pldir}/${file}.txt
}
Udpl()###USE###
{
pldir=${HOME}/Music/Playlists
if [ ! -d ${pldir} ] ; then mkdir -p ${pldir} ; fi
if [ -z $1 ] ; then indir=`pwd` ; indirname=`basename ${indir}` ; else indir=`pwd`/$1 ; indirname=`basename ${indir}` ; fi
if [ -z "$indirname" ] ; then echo "Trouble getting in directory name (make sure no ending front slash)" ; return 1 ; fi
find ${indir}/ -name "*.mp3" > ${pldir}/${indirname}.txt
}
| true
|
387b19177f7a850a8b64dfb3400dea85a305cbbf
|
Shell
|
Quandt-Mycology-Lab/Lab_Codes_and_workflows
|
/Phylogenomics_workflow/scripts/Old_scripts/new_create_muscle.sh
|
UTF-8
| 99
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
files=$(ls *.fasta)
for i in $files
do
echo "muscle -in ${i} -out ${i}.out"
done
| true
|
8d860edf3d9b9e68ff3cabf46f054a4a93be6d9f
|
Shell
|
albanb/scripts
|
/saveconf
|
UTF-8
| 9,878
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
# Script by Ypnose <linuxien[AT]legtux[DOT]org>
# This script saves core config files for Archlinux in the specified folder
# If you want to use it, you may change $DIRS and files accordingly!
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#DIR=/mnt/USB1/CONFSAVE
DIR="$HOME/backup"
DAT=$(date "+%d-%m-%Y")
NEWFOL="$DIR/CONFS-$DAT"
TARGZ=1
ARCHIVE=0
DF="$(tput sgr0)"
RD="$(tput setaf 1)"
GR="$(tput setaf 2)"
YL="$(tput setaf 3)"
BCY="$(tput bold; tput setaf 6)"
REPERTOIRE_SOURCE="$HOME/"
RACINE_DESTINATION="/media/5211529A04099FF7/sauvegarde/home/alban/"
if [ -x "/usr/bin/bsdtar" ]; then
CMDARC="bsdtar -oczf"
else
CMDARC="tar --format=ustar -czf"
fi
usage() {
printf "%s\n" "Usage: ${0##*/} [-d directory] [-hflorv]"
printf "%s\n" " OPTIONS:"
printf "%s\n" " -d Save in the specified directory"
printf "%s\n" " -h Display help and quit"
printf "%s\n" " -f Disable archive compression"
printf "%s\n" " -l Save important files for Fedora setup"
printf "%s\n" " -o Overwrite existing tar.gz archive or directory"
printf "%s\n" " -r Synchronize files with external hard drive"
printf "%s\n" " -v Verbose ouput (print saved filenames)"
}
delete_backup() {
# With -o argument, it overrides an existing folder / archive
if [ -d "$NEWFOL" ]; then
printf "$RD%s$DF\n" "Overwriting the existing backup folder!"
rm -R "$NEWFOL"
elif [ -e "$DIR/CONFS-$DAT.tar.gz" ]; then
printf "$RD%s$DF\n" "Overwriting the existing backup archive!"
rm "$DIR/CONFS-$DAT.tar.gz"
fi
}
limit_arch() {
# It counts existing archives and delete the oldest file if the folder, contains more than (or equal) 5 files
# >>Ypnose, for FILE in *; do [[ -f "$FILE" && "$FILE" -nt "$FNAME" ]] && typeset FNAME="$FILE"; done; print $FNAME
if [ -d "$DIR" ]; then
if [ "$((TARGZ))" -eq 1 ] && [ "$(find $DIR -type f -iname "CONFS*.tar.gz" | wc -l)" -ge 5 ]; then
printf "$RD%s\n%s$DF\n" "The directory contains more than 5 archives!" "Deleting the oldest archive"
cd "$DIR"
FILDEL="$(ls -1rt | head -1)"
rm -i "$FILDEL"
fi
else
printf "%s %s\n" '$DIR does not exist.' "Unable to check it"
fi
}
#Fonction permettant de s'assurer que le disque dur est en place avant l'archivage
function ready
{
# Détecter la présence du volume de destination et interrompre l'opération si nécessaire
while [ ! -e "$RACINE_DESTINATION" ]
do
#Si le volume de sauvegarde n'est pas présent on affiche un message demandant quoi faire
printf "%s $RD$RACINE_DESTINATION$DF %s\n" "Le dossier de sauvegarde" "n'existe pas. Insère le disque dur de sauvegarde si tu veux sauvegarder. Disque monté? y/n"
read answer
if [ "$answer" != "y" ]
then
#On demande de repousser la sauvegarde
printf "$RD%s$DF\n" "Attention la sauvegarde du dossier est repoussée"
exit 3
fi
done
printf "%s $GR$RACINE_DESTINATION$DF\n" "Sauvegarde dans le dossier"
}
#Fonction d'archivage proprement dite
function archive
{
if [ "$((ARCHIVE))" -eq 1 ]; then
ready
#Synchronisation des dossiers sources et destination
echo $REPERTOIRE_SOURCE
echo $RACINE_DESTINATION
rsync -rtD --exclude=".**" --exclude="/multimedia/videos/" --exclude="/sync/" --exclude="/dev/" --del --stats --progress $REPERTOIRE_SOURCE $RACINE_DESTINATION
rsync -rtD --include="/multimedia/" --exclude="/multimedia/videos/.divers/*" --include="/multimedia/videos/" --include="/multimedia/videos/**" --exclude="*" --stats --progress $REPERTOIRE_SOURCE $RACINE_DESTINATION
printf "%s\n" "La commande de sauvegarde a terminé son travail..."
fi
}
backup() {
if [ -d "$DIR" ]; then
printf "$GR%s\n$YL%s$DF\n" "The directory exists. It will be used for your configs." "Starting backup..."
else
printf "$RD%s$DF\n" "The directory does not exist. You should change DIR location!"
exit 1
fi
if [ -d "$NEWFOL" ] || [ -e "$DIR/CONFS-$DAT.tar.gz" ]; then
printf "$RD%s$DF\n" "WARNING: You already synchronised your files today!"
exit 1
else
mkdir -p "$NEWFOL"
fi
###########
#Filesystem
###########
BOOTF="/boot/syslinux/syslinux.cfg"
ETCF="/etc/fstab /etc/locale.conf /etc/locale.gen /etc/mkinitcpio.conf"
SUDOF="/etc/sudo.conf"
PACF="/etc/pacman.conf /etc/pacman.d/mirrorlist"
NETF="/etc/netctl/ethernet-static /etc/netctl/ethernet-dhcp /etc/netctl/wireless-open /etc/netctl/wireless-wpa /etc/netctl/wireless-wpa-dhcp-parents /etc/netctl/wireless-wpa-static"
MODF="/etc/modprobe.d/modprobe.conf /etc/modprobe.d/audio_power_save.conf"
XORGF="/etc/X11/xorg.conf.d/10-keyboard-layout.conf /etc/X11/xorg.conf.d/10-evdev.conf /etc/X11/xorg.conf.d/10-keyboard-terminate.conf /etc/X11/xorg.conf.d/10-quirks.conf /etc/X11/xorg.conf.d/50-synaptics.conf /etc/X11/xorg.conf.d/60-fonts.conf"
IPTABF="/etc/iptables/iptables.rules"
printf " ==> %s\n" "Saving filesystem files..."
if [ "$((VERBOSE))" -eq 1 ]; then
echo FOO
fi
for FILEFS in "$BOOTF $ETCF $SUDOF $PACF $NETF $MODF $XORGF $IPTABF"
do
cp $FILEFS $NEWFOL
done
#Packages list
printf "%s - %s explicit packages\n\n" "$DAT" "$(pacman -Qe | wc -l)" > $NEWFOL/pacman_pkgs.out
pacman -Qe >> $NEWFOL/pacman_pkgs.out
printf "\n%s - %s external packages\n\n" "$DAT" "$(pacman -Qm | wc -l)" >> $NEWFOL/pacman_pkgs.out
pacman -Qm >> $NEWFOL/pacman_pkgs.out
printf "\n%s - %s packages\n\n" "$DAT" "$(pacman -Q | wc -l)" >> $NEWFOL/pacman_pkgs.out
pacman -Q >> $NEWFOL/pacman_pkgs.out
#####
#User
#####
SHELF="$HOME/.bashrc $HOME/.bash_profile"
MISCF="$HOME/.gitconfig $HOME/.xinitrc $HOME/.tickr/tickr-url-list $HOME/.urlview $HOME/.vimrc $HOME/.local/share/password.gpg $HOME/.config/tmux.conf $HOME/.config/Xresources $HOME/.config/lftp/rc $HOME/.config/interrobang/config \
$HOME/.local/share/todo/todo.txt $HOME/.config/dunst/dunstrc"
BUILDF="$HOME/dev/build/loliclip/lolictrl $HOME/dev/build/dwmstatus-git/dwmstatus.h"
#####
OPTF="$HOME/.vim/ $HOME/.mutt/ $HOME/.config/ranger/ $HOME/.config/moc/ $HOME/.config/themes/ $HOME/.config/systemd/ $HOME/.config/tmux/ $HOME/.config/dwb/ $HOME/.config/todo $HOME/.config/transmission-daemon/ $HOME/.local/gnupg/"
ARCF="$HOME/dev/build/"
printf " ==> %s\n" "Saving user defined files..."
for UFILES in "$SHELF $MISCF $BUILDF"
do
cp $UFILES $NEWFOL
done
if [ -n "$FIREF" ] && [ -d "$FIREF/" ]; then
cp $FIREF/prefs.js $NEWFOL
printf " ==> $YL%s$DF\n" "Firefox profile found!"
if [ -d "$FIREF/searchplugins" ]; then
mkdir -p $NEWFOL/FFox
cp -r $FIREF/searchplugins $NEWFOL/FFox/searchplugins
fi
else
printf " ==> $YL%s$DF\n" "No Firefox profile found!"
fi
#Option
if [ -n "$OPTF" ]; then
printf " ==> %s\n" "Optional files will be saved!"
mkdir -p $NEWFOL/OPT
cp -r $OPTF $NEWFOL/OPT
else
printf " ==> %s\n" "No optional files defined!"
fi
#SSH
if [ -n "$SHF" ] && [ -d "$HOME/$SHF" ]; then
cd $HOME
$CMDARC $NEWFOL/SSH.tar.gz $SHF
fi
if [ -n "$ARCF" ] && [ -d "$HOME/$ARCF" ]; then
if [ -z "$(du -sh $HOME/$ARCF -t -50M)" ]; then
printf "%s %s\n" "Folder too big." "Builds isn't saved"
else
cd $HOME
$CMDARC $NEWFOL/builds.tar.gz $ARCF
fi
fi
printf " ==>$DF The script saved $BCY%d$DF files\n" "$(find $NEWFOL -type f | wc -l)"
if [ "$((TARGZ))" -eq 1 ]; then
cd $DIR
tar -czf "CONFS-$DAT".tar.gz "CONFS-$DAT"
if [ "$?" -eq 0 ]; then
printf "$GR%s$DF\n" "Archive has been successfully created"
else
printf "$RD%s$DF\n" "Archive creation has failed!"
fi
rm -R $NEWFOL
else
printf "$YL%s$DF\n" "Archive option turned off!"
fi
printf "$GR%s$DF\n" "Done."
}
# WORK IN PROGRESS!
backup_laptop() {
SYSF="/usr/bla/hdparm.service"
BASHF="$HOME/.bashrc $HOME/.bash_profile"
BINF="$HOME/.bin/"
printf "$YL%s$DF\n" "The following files will be saved:"
printf " %s\n" ""$SYSF $BASHF""
#for FILES in "$SYSF $BASHF"
#do
#cp $FILES $NEWFOL
#done
}
#------------------
#Arguments
while getopts ":d:hforlv" option; do
case $option in
d)
unset DIR NEWFOL
DIR="$OPTARG"
NEWFOL="$DIR/CONFS-$DAT"
;;
h)
usage
exit 1
;;
f)
TARGZ=0
;;
o)
delete_backup
;;
r)
ARCHIVE=1
;;
l)
limit_arch
backup_laptop
exit 0
;;
v)
VERBOSE=1
;;
:)
printf "Option %s requires an argument.\n" "-$OPTARG"
exit 1
;;
?)
printf "Invalid option: %s\n" "-$OPTARG"
exit 1
;;
esac
done
limit_arch
backup
archive
exit 0
| true
|
7b371035bc922d3c4e7b687ef9ed347efccef356
|
Shell
|
akwebengineer/ss
|
/1397235/slipstream_sdk/sdk_installer.sh
|
UTF-8
| 1,192
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
###############################################################################
# An installer script for Slipstream SDK. If no commandline argument is given,
# this script will create a directory named SlipstreamSDK in current location.
# Absolute path of the directory is required if the SlipstreamSDK has to be created to a specific
# location. The script will use a dependency installer script to install any dependency
# required by plugin_manager tool.
#
# Author: Sanket Desai <sanketdesai@juniper.net>
#
#
###############################################################################
SCRIPT=$0
SCRIPTPATH=$( cd $(dirname $SCRIPT) ; pwd )
if [[ "$1" != '' ]]; then
DESTINATION="$1/SlipstreamSDK"
else
DESTINATION="$SCRIPTPATH/SlipstreamSDK"
fi
mkdir -p ${DESTINATION}
TEMPFILE=`mktemp`
archive=$(awk '/^__ARCHIVE__/ {print NR + 1; exit 0; }' "${0}")
tail -n +$((archive)) $0 >> "$TEMPFILE"
tar -xvf "$TEMPFILE" -C ${DESTINATION}
rm "$TEMPFILE"
$SCRIPTPATH/./dependency_installer.sh
if [[ "$?" -eq '2' ]]; then
echo "Error while installing dependencies"
exit 1
else
echo "Installation complete. Please refresh the bash or start working in new one."
exit 0
fi
__ARCHIVE__
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.