blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
54341ad8acdf37213f5d9e967ddccb62a8b9a012 | Shell | Dj2099/hello-world | /mk.sh | UTF-8 | 543 | 3 | 3 | [] | no_license | #!/bin/bash
echo "build start"
JAR_PATH=lib
BIN_PATH=bin
SRC_PATH=src
# java list
SRC_FILE_LIST_PATH=src/sources.list
#remove old list and create new list
rm -f $SRC_PATH/sources
find $SRC_PATH/ -name *.java > $SRC_FILE_LIST_PATH
#remove old .class file and compile new class
rm -rf $BIN_PATH/
mkdir $BIN_PATH/
#link jar
for file in ${JAR_PATH}/*.jar;
do
jarfile=${jarfile}:${file}
done
echo "jarfile = "$jarfile
#compile
javac -d $BIN_PATH/ -cp $jarfile @$SRC_FILE_LIST_PATH
#run
java -cp $BIN_PATH$jarfile a4/comp2150/pos/main/test
| true |
8e28e1f1b0ad09ef93439e25e61815524909e831 | Shell | somhm-solutions/baby-terra | /usage/module-usage.sh | UTF-8 | 161 | 2.515625 | 3 | [] | no_license | # Module Loading and use
# First load module
module_path=$1
terraform get $module_path;
# If module has been update
terraform get -update $module_path; | true |
8e56872bac0ea209c18824b24da8a7c72d568ced | Shell | comfreeze/hmm | /.hmm/snapshot | UTF-8 | 261 | 2.671875 | 3 | [] | no_license | #!/bin/bash
not_empty "$DOCKER_CMD" "DOCKER_CMD not found!"
not_empty "$DOCKER_NAME" "DOCKER_NAME not found!"
not_empty "$CONTAINER_NAME" "CONTAINER_NAME empty!"
verbose "Committing $CONTAINER_NAME..."
$DOCKER_CMD commit $CONTAINER_NAME $DOCKER_NAME-snapshot
| true |
4a274eb44b97b6ed3831276d9a1bfb9744be53db | Shell | alysmirnova/nx_bootcamp_nix_diary | /src/diary_config.sh | UTF-8 | 743 | 3.46875 | 3 | [] | no_license | #! /bin/bash
if [[ $1 == config ]]
then
if [[ $2 == -d ]]
then
printf "Введите полный путь: "
read way
if [ -e $way ]
then
cat ~/.diaryrc | grep $DIARY_PATH > ~/.diaryrc
echo "DIARY_PATH=$way
EDITOR=$EDITOR" >> ~/.diaryrc
mkdir -p $way
cd $way
else echo "Каталог не найден"
fi
fi
if [[ $2 == -e ]]
then
printf "Введите путь до программы: "
read way
if [ -e $way ]
then
cat ~/.diaryrc | grep $EDITOR > ~/.diaryrc
echo "EDITOR=$way
DIARY_PATH=$DIARY_PATH" >> ~/.diaryrc
else echo "$way не найден"
fi
fi
fi
| true |
f2108e20b023c5eb4616832112573f271b200bf6 | Shell | cduongt/mmg-cluster-setup-CESNET | /provision/installation_files/_run.sh | UTF-8 | 371 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
source ~/provision/_init.sh
java -jar $METAPIPE_DIR/workflow-assembly-0.1-SNAPSHOT.jar validate
sleep 2
export JOB_TAG=$2
if [ "$1" != "assembly" ]; then
nohup /bin/bash ~/provision/installation_files/_run_func_analysis.sh "$@" > /dev/null 2>&1 &
else
nohup /bin/bash ~/provision/installation_files/_run_assembly.sh "${@:2}" > /dev/null 2>&1 &
fi
| true |
cea54fc5690ce670ec44ba388862df2844e4801f | Shell | honghh2018/Linux_Shell_collected | /gff_file_filter.sh | UTF-8 | 946 | 2.546875 | 3 | [] | no_license | gff3 file had some problem with format,then you can try below command to fix it with awk and sed command,the required manipulation derived from your file
,as below command no satified you,you can modify it.
cat Rubus_occidentalis_v1.1.lifted.gff3|sed s/\;Name=.*//g|awk 'BEGIN{OFS="\t";}{if($3~/mRNA/){print $1,$2,"gene",$4,$5,$6,$7,$8,$9;print $1,$2,"mRNA",$4,$5,$6,$7,$8,$9".1"";Parent="$9".1"}
else if($1~/##.*/){print $0}else{print $0".1"}}'|sed s/\;Alias=.*//g|sed s/\=ID//g|awk 'BEGIN{OFS="\t"}
{if($3~/exon/) sub(":exon",".1:exon",$9);print $0}'|awk 'BEGIN{OFS="\t"} {if($3~/CDS/) sub(":cds",".1:cds",$9);print $0}'|less -SN
####gff3 standard format filter command
cat Rubus_occidentalis_v1.1.lifted_3.gff3 |awk 'BEGIN{OFS="\t"}{if($3~/mRNA/){sub(";",".1;",$9)}print $0}' |awk 'BEGIN{OFS="\t"}{if($3~/exon/){print $0".1"}else if($3~/CDS/){print $0".1"}else{print $0}}' >Rubus_occidentalis_v1.1.lifted_4.gff3
good luck.
| true |
f818be19b3c3545fe9c4d44fc2bc6d426ab3f6de | Shell | iretiayo/dynamic_grasping_pybullet_v2 | /run_single_mico.sh | UTF-8 | 4,094 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env bash
# this script iterates through the list of objects in serial
# Some notes about single quote versus double quote:
# https://stackoverflow.com/questions/6697753/difference-between-single-and-double-quotes-in-bash
# Single quotes won't interpolate anything. So we need to use double quote so the python command
# can interpolates the argument values, but we need to escape interpretting object_name because
# it is not defined and it is interpretted only at run time inside screen
node_id=${node_id:-10000}
exp_name=${exp_name:-run}
motion_mode=${motion_mode:-dynamic_linear}
robot_config_name=${robot_config_name:-mico}
num_trials=${num_trials:-100}
grasp_database_path=${grasp_database_path:-assets/grasps/filtered_grasps_noise_100}
baseline_experiment_path=${baseline_experiment_path:-assets/benchmark_tasks/mico/linear_tasks_mico}
grasp_threshold=${grasp_threshold:-0.03}
lazy_threshold=${lazy_threshold:-30.3}
conveyor_speed=${conveyor_speed:-0.03}
close_delay=${close_delay:-0.5}
back_off=${back_off:-0.05}
distance_low=${distance_low:-0.15}
distance_high=${distance_high:-0.4}
pose_freq=${pose_freq:-5}
use_previous_jv=${use_previous_jv:-true}
use_seed_trajectory=${use_seed_trajectory:-true}
max_check=${max_check:-10}
use_box=${use_box:-true}
use_kf=${use_kf:-true}
use_gt=${use_gt:-false}
fix_motion_planning_time=${fix_motion_planning_time:-0.14}
use_reachability=${use_reachability:-true}
use_motion_aware=${use_motion_aware:-true}
motion_aware_model_path=${motion_aware_model_path:-assets/motion_aware_models/mico}
alpha=${alpha:-0.5}
fix_grasp_ranking_time=${fix_grasp_ranking_time:-0.135}
always_try_switching=${always_try_switching:-true}
use_joint_space_dist=${use_joint_space_dist:-true}
rendering=${rendering:-false}
record_video=${record_video:-false}
load_obstacles=${load_obstacles:-false}
add_top_shelf=${add_top_shelf:-false}
# assign the keyword argument values
while [[ $# -gt 0 ]]; do
if [[ $1 == *"--"* ]]; then
param="${1/--/}"
declare $param="$2"
fi
shift
done
timestr=${exp_name}_mico_$(hostname)_$(date '+%Y-%m-%d_%H-%M-%S')
mkdir $timestr
cp run_single_mico.sh $timestr
echo ${timestr}_moveit
screen -dmS ${timestr}_moveit bash -c "source ../../devel/setup.bash;
export ROS_MASTER_URI=http://localhost:${node_id};
roslaunch launch/mico_moveit_ros.launch planner:=ompl;
$SHELL"
sleep 3
echo ${timestr}_pybullet
screen -dmS ${timestr}_pybullet bash -c "source ../../devel/setup.bash;
export ROS_MASTER_URI=http://localhost:$node_id;
for object_name in bleach_cleanser mustard_bottle potted_meat_can sugar_box tomato_soup_can cube power_drill; do
python run_dynamic_with_motion.py \
--motion_mode ${motion_mode} \
--object_name \${object_name} \
--robot_config_name ${robot_config_name} \
--num_trials ${num_trials} \
--result_dir ${timestr} \
--grasp_database_path ${grasp_database_path} \
--baseline_experiment_path ${baseline_experiment_path} \
--grasp_threshold ${grasp_threshold} \
--lazy_threshold ${lazy_threshold} \
--conveyor_speed ${conveyor_speed} \
--close_delay ${close_delay} \
--back_off ${back_off} \
--distance_low ${distance_low} \
--distance_high ${distance_high} \
--pose_freq ${pose_freq} \
--use_previous_jv ${use_previous_jv} \
--use_seed_trajectory ${use_seed_trajectory} \
--max_check ${max_check} \
--use_box ${use_box} \
--use_kf ${use_kf} \
--use_gt ${use_gt} \
--fix_motion_planning_time ${fix_motion_planning_time} \
--use_reachability ${use_reachability} \
--use_motion_aware ${use_motion_aware} \
--motion_aware_model_path ${motion_aware_model_path} \
--alpha ${alpha} \
--fix_grasp_ranking_time ${fix_grasp_ranking_time} \
--use_joint_space_dist ${use_joint_space_dist}\
--always_try_switching ${always_try_switching}\
--record_video ${record_video} \
--rendering ${rendering} \
--load_obstacles ${load_obstacles} \
--add_top_shelf ${add_top_shelf};
sleep 5;
done;
$SHELL"
| true |
ebe141297ad1825509ca4ecd2e03c9ae43e7a33b | Shell | wilsonCernWq/autoGrader | /hw2/solutions/airline/runtests.sh | UTF-8 | 206 | 2.765625 | 3 | [] | no_license | #!/bin/bash
make > /dev/null
make -B testing > /dev/null
for i in {1..6}
do
./assignshipments < test$i.in > tmp.out
diff tmp.out test$i.out
done
rm -f tmp.out
for i in {1..6}
do
./testing $i
done
| true |
8a3d36984237607623dbd028d7c8e0f6fb332280 | Shell | AndresHF/Proyecto-Jarvis | /shellScripts/display.sh | UTF-8 | 1,979 | 3.78125 | 4 | [] | no_license | #!/bin/bash
ORANGE='\033[0;33m'
LBLUE='\033[1;34m'
LCYAN='\033[1;36m'
RESTORE='\033[1;32m'
sudo printf "${RESTORE}Looking for ${LCYAN}$1${RESTORE}";echo
if echo $2 | grep "deep" > /dev/null; then
results=$(find / -type d -name *$1* 2> /dev/null)
elif echo $2 | grep "file" > /dev/null; then
results=$(find / -type f -name *$1* 2> /dev/null)
file=true
else
results=$(find ~/ -type d -name $1 2> /dev/null)
fi
lines=0
selectedLine="wrong"
result="undefined"
function openCode() {
result=$(echo $results | tr " " "\n" | head -$selectedLine | tail -1)
printf "Do you want to code ${LCYAN}$result${RESTORE} ? (y/n)";echo
answer="wrong"
while echo $answer | egrep -xv "(y)|(n)" > /dev/null; do
read answer
done
if echo $answer | grep "y" > /dev/null; then
printf "Opening ${LCYAN}$result${RESTORE}... Have a nice coding!!";echo
if echo $file | grep "true" > /dev/null; then
sudo code --user-data-dir="~/.vscode-root" $result
exit 0
fi
cd $result
sudo code . --user-data-dir
exit 0
else
selectedLine="wrong"
checkMatches
fi
}
function checkMatches() {
if $(echo $results | egrep "[a-zA-Z]" > /dev/null); then
lines=$(echo $results | tr " " "\n" | wc -l)
printf "$lines matches!!\n"
if [[ "$lines" > 1 ]]; then
printf "${ORANGE}Select one of the following:${LCYAN}";echo;echo #'(type something to contiue...)'
nl -n ln <(printf "${LCYAN}$results" | tr " " "\n")
echo
printf "${ORANGE}Type the selected line:${RESTORE} "
while echo $selectedLine | egrep "[a-zA-Z - */?.'\[!¡¿#~$%&\(\)=]" > /dev/null; do
read selectedLine
selectedLine=$(awk -v var="$selectedLine" -v top="$lines" 'BEGIN{if((var > 0) && (var <= top)) print var; else print "Wrong number... must be between 1-"top}')
echo $selectedLine | grep "number"
done
result=$(echo $results | tr " " "\n" | head -$selectedLine | tail -1)
openCode
else
selectedLine=1
openCode
fi
else
echo "No matches..."
exit 1
fi
}
checkMatches
| true |
208db4cf51dbdc9920ef3f80823dba872997283f | Shell | Bondzio/AUR | /sslyze-git/PKGBUILD | UTF-8 | 2,162 | 2.9375 | 3 | [] | no_license | # Maintainer: Fabian Zaremba <fabian at youremail dot eu>
pkgname=sslyze-git
pkgver=0.10.412
pkgrel=2
pkgdesc="Fast and full-featured SSL scanner."
arch=('i686' 'x86_64')
url="https://github.com/nabla-c0d3/sslyze"
license=('GPL2')
depends=('python2')
makedepends=('git')
provides=('sslyze')
conflicts=('sslyze')
options=('!makeflags')
source=("git://github.com/nabla-c0d3/nassl.git"
"git://github.com/nabla-c0d3/sslyze.git"
"http://zlib.net/zlib-1.2.8.tar.gz"
"https://www.openssl.org/source/openssl-1.0.1m.tar.gz")
sha256sums=('SKIP'
'SKIP'
'36658cb768a54c1d4dec43c3116c27ed893e88b02ecfcb44f2166f9c0b7f2a0d'
'095f0b7b09116c0c5526422088058dc7e6e000aa14d22acca6a4e2babcdfef74')
pkgver() {
cd "$srcdir/sslyze"
echo $(grep "##" CHANGELOG.md | head -n 1 | sed 's/## v//').$(git rev-list --count HEAD)
}
build() {
cd "$srcdir/"
mv "$srcdir/openssl-1.0.1m" "$srcdir/nassl/"
mv "$srcdir/zlib-1.2.8" "$srcdir/nassl/"
cd "$srcdir/nassl"
#Update to new openssl version
sed -i "s/openssl-1.0.1i/openssl-1.0.1m/" "$srcdir/nassl/buildAll_config.py"
#Change python invocations to use python2
#Force linking with libz seems to be needed, add LDFLAGS for setup_unix.py invocation
sed -i "s/python -m unittest discover --pattern/python2 -m unittest discover --pattern/" "$srcdir/nassl/buildAll_unix.py"
sed -i 's/python setup_unix.py build/LDFLAGS="-Wl,--no-as-needed -lz" python2 setup_unix.py build/' "$srcdir/nassl/buildAll_unix.py"
python2 ./buildAll_unix.py
cd "$srcdir/nassl/test/nassl"
rm *.pyc
sed -i 's#/usr/bin/python#/usr/bin/python2#' *.py
mv "$srcdir/nassl/test/nassl" "$srcdir/sslyze/"
}
package() {
#Packaging routine derived from sslyze PKGBUILD by goll
# Install files in /opt
mkdir -p "$pkgdir/opt/sslyze"
cp -a "$srcdir/sslyze/." "$pkgdir/opt/sslyze"
rm -rf "$pkgdir/opt/sslyze/.git"
find "$pkgdir/opt/sslyze" -type f -name '*.py' -print0 | xargs -0 sed -i 's#/usr/bin/env python#/usr/bin/env python2#'
# Create an indirect launcher in /usr/bin
mkdir -p "$pkgdir/usr/bin"
cat << EOF > "$pkgdir/usr/bin/sslyze"
#!/usr/bin/bash
cd /opt/sslyze && python2 sslyze.py \$@
EOF
chmod 755 "$pkgdir/usr/bin/sslyze"
}
| true |
c72c59f08783b3359513ecddddeb0126f377fe65 | Shell | sasha-lan/arp_project | /arp.sh | UTF-8 | 91 | 2.625 | 3 | [] | no_license | #!/bin/bash
x=1
while [ $x == 1 ]
do
grep eth0 /proc/net/arp >> /tmp/arp.log
sleep 60
done
| true |
736b24b581902a7b70374ee872dbb8c123013157 | Shell | 1newstar/DBA_Monitoring_Scripts | /EBS2/scripts/rman_ebs.sh | UTF-8 | 2,242 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/bin/bash. $HOME/. bash_profile
id=`id -un`
. $HOME/.bash_profile
#find /newdump/filesystems/EBSRMAN/ -type d -mtime 1 -exec /bin/rm -f {} +
cd /newdump/filesystems/EBSRMAN/
rm -rf *
cd $HOME
mkdir -p /newdump/filesystems/EBSRMAN/$(date +%d%m%y)
mv /newdump/rmanbackup/EBS/* /newdump/filesystems/EBSRMAN/$(date +%d%m%y)/
BODY=$HOME/scripts/rman_body
LOGFILE=/home/oraep01/scripts/backup_`date +%d-%m-%y-%H-%M-%S`.log
rman target / trace $LOGFILE << EOF
run
{
allocate channel e1 device type DISK format '/newdump/rmanbackup/EBS/Backup%d_DB_%u_%s_%p_%T.bkp';
allocate channel e2 device type DISK format '/newdump/rmanbackup/EBS/Backup%d_DB_%u_%s_%p_%T.bkp';
allocate channel e3 device type DISK format '/newdump/rmanbackup/EBS/Backup%d_DB_%u_%s_%p_%T.bkp';
allocate channel e4 device type DISK format '/newdump/rmanbackup/EBS/Backup%d_DB_%u_%s_%p_%T.bkp';
delete noprompt obsolete;
delete noprompt expired archivelog all;
delete noprompt archivelog all completed before 'sysdate-2';
crosscheck archivelog all;
sql 'alter system archive log current';
backup as compressed backupset tag 'full_EBS_backup' database plus archivelog delete input;
backup current controlfile format '/newdump/rmanbackup/EBS/controlfile_%U_%D.bkp';
backup spfile format '/newdump/rmanbackup/EBS/spfile_%U_%D.bkp';
delete noprompt archivelog all completed before 'sysdate-2';
}
exit;
EOF
cp /etc/hosts /newdump/filesystems/EBSRMAN/$(date +%d%m%y)
cp /etc/sysctl.conf /newdump/filesystems/EBSRMAN/$(date +%d%m%y)
cp /u01/grid/network/admin/listener.ora /newdump/filesystems/EBSRMAN/$(date +%d%m%y)
cp /u01/grid/network/admin/sqlnet.ora /newdump/filesystems/EBSRMAN/$(date +%d%m%y)
cp /u01/rac/product/11.2.0/dbhome_1/network/admin/tnsnames.ora /newdump/filesystems/EBSRMAN/$(date +%d%m%y)
crontab -l > /newdump/filesystems/EBSRMAN/$(date +%d%m%y)/$(date +%Y%m%d).crontab
mutt -s "Backup status of RMAN from EBS" -a $LOGFILE -c l1.dbsupport@srei.com -b caesar.dutta@in.pwc.com < $BODY
cp -rf $LOGFILE /newdump/filesystems/mail/
cd $HOME/
find . -name '*.log' -mtime +7 -exec rm -r {} \;
cd $HOME/scripts/
find . -name '*.log' -mtime +7 -exec rm -r {} \;
cd /newdump/filesystems/EBSRMAN/
find -type d -mtime +2 -exec rm -r {} \;
exit 0
| true |
7423b80fd0d323e34a5f76f45282a9475391101e | Shell | apache/incubator-crail | /bin/crail | UTF-8 | 2,709 | 3.359375 | 3 | [
"Apache-2.0",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"BSD-3-Clause",
"CC-PDDC",
"LicenseRef-scancode-other-permissive",
"CDDL-1.1",
"EPL-2.0",
"CDDL-1.0",
"MIT",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-public-domain-disclaimer",
"BSD-2-Clause"
] | permissive | #!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bin=`which $0`
bin=`dirname ${bin}`
bin=`cd "$bin"; pwd`
LIBEXEC_DIR="$bin"/../libexec
JAVA=$JAVA_HOME/bin/java
function print_usage(){
echo "Usage: crail COMMAND"
echo " where COMMAND is one of:"
echo " namenode run the Crail namenode"
echo " datanode run a Crail datanode"
echo " removeDatanode remove a Crail datanode"
echo " fsck run a Crail file check command"
echo " fs run a Crail shell command"
echo " iobench run a Crail benchmark/test"
echo " test run a Crail unit test"
}
if [ $# = 0 ]; then
print_usage
exit
fi
COMMAND=$1
shift
case $COMMAND in
# usage flags
--help|-help|-h)
print_usage
exit
;;
esac
if [ "$COMMAND" = "namenode" ] ; then
CLASS=org.apache.crail.namenode.NameNode
elif [ "$COMMAND" = "datanode" ] ; then
CLASS=org.apache.crail.storage.StorageServer
elif [ "$COMMAND" = "removeDatanode" ] ; then
CLASS=org.apache.crail.tools.RemoveDataNode
elif [ "$COMMAND" = "fsck" ] ; then
CLASS=org.apache.crail.tools.CrailFsck
elif [ "$COMMAND" = "fs" ] ; then
CLASS=org.apache.hadoop.fs.FsShell
elif [ "$COMMAND" = "getconf" ] ; then
CLASS=org.apache.crail.hdfs.GetConf
elif [ "$COMMAND" = "iobench" ] ; then
CLASS=org.apache.crail.tools.CrailBenchmark
elif [ "$COMMAND" = "hdfsbench" ] ; then
CLASS=org.apache.crail.hdfs.tools.HdfsIOBenchmark
elif [ "$COMMAND" = "test" ] ; then
CLASS=org.junit.runner.JUnitCore
fi
CONF_PATH="$bin"/../conf
export CLASSPATH="$bin"/../jars/*:${CONF_PATH}:.
export LD_LIBRARY_PATH="$bin/../lib:$LD_LIBRARY_PATH"
if [ -f "${CONF_PATH}/crail-env.sh" ]; then
# Promote all variable declarations to environment (exported) variables
set -a
. "${CONF_PATH}/crail-env.sh"
set +a
fi
exec "$JAVA" -Dproc_$COMMAND -Dsun.nio.PageAlignDirectMemory=true $CRAIL_EXTRA_JAVA_OPTIONS $CLASS "$@"
| true |
912cd0d9696e474680c50d7957f4578fbf530dfd | Shell | gogs/gogs | /scripts/init/openbsd/gogs | UTF-8 | 300 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#
# $OpenBSD$
# shellcheck disable=SC2034,SC1091,SC2154,SC2086
daemon="/home/git/gogs/gogs"
daemon_user="git"
daemon_flags="web"
gogs_directory="/home/git/gogs"
rc_bg=YES
. /etc/rc.d/rc.subr
rc_start() {
${rcexec} "cd ${gogs_directory}; ${daemon} ${daemon_flags} ${_bg}"
}
rc_cmd $1
| true |
ab0aec0de5ee4b5c96379e9a6ae827db1ddf4e48 | Shell | jaredhuang/work-dir | /config/mysql/mysqld | UTF-8 | 948 | 3.578125 | 4 | [] | no_license | #!/bin/sh
#created by jared
####################################
basedir=/usr/local/webserver/mysql
datadir=/data/mysql/3306/data
conf=/data/mysql/3306/my.cnf
sock=/tmp/mysql.sock
port=3306
mysql_user="root"
mysql_pwd="saylove"
CmdPath="/usr/local/webserver/mysql/bin"
####################################
#startup function
function_start_mysql()
{
printf "Starting MySQL...\n"
/bin/sh ${CmdPath}/mysqld_safe --defaults-file=${conf} 2>&1 > /dev/null &
}
#stop function
function_stop_mysql()
{
printf "Stoping MySQL...\n"
${CmdPath}/mysqladmin -u ${mysql_user} -p${mysql_pwd} -S ${sock} shutdown
}
#restart function
function_restart_mysql()
{
printf "Restarting MySQL...\n"
function_stop_mysql
sleep 6
function_start_mysql
}
case $1 in
start)
function_start_mysql
;;
stop)
function_stop_mysql
;;
restart)
function_restart_mysql
;;
*)
printf "Usage: /etc/init.d/mysqld {start|stop|restart}\n"
esac
| true |
f4794d939625df5dbe202f8b58f4c6c9febf2d2d | Shell | ysamlan/travis-cookbooks | /ci_environment/haskell/templates/default/ghc_find.erb | UTF-8 | 244 | 3.375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [[ "$1" == "" ]]; then
echo "<%=@default%>"
else
for v in <%=@versions.join(" ")%>; do
if [[ "$v" == "$1"* ]]; then
echo "$v"
exit 0
fi
done
echo "ghc_find: error, no such version $1" >&2
exit 1
fi
| true |
461c1762de63f949b3a5e37a80d196bc23d09644 | Shell | chiragsakhuja/lc3tools | /test/sanitize.sh | UTF-8 | 1,481 | 4.125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [[ $# != 2 ]]; then
echo "usage: $0 lab-dir source-file"
exit 0
fi
# Add BLESSING files to each directory
while IFS="" read -r line || [ -n "$line" ]; do
part=$(echo $line | awk '{ print($(NF-1), $NF); }')
canvasid=$(echo $part | cut -d' ' -f1)
blessing=$(echo $part | cut -d' ' -f2)
seed=$(grep -o 'Seed: [0-9]\+' $1/$canvasid/$2.out.txt)
if [[ -z "$seed" ]]; then
seed='0'
else
seed=$(head -n1 <<< "$seed" | cut -d' ' -f2)
fi
echo $seed > $1/$canvasid/BLESSING
echo $blessing >> $1/$canvasid/BLESSING
done < $1/REPORT.tsv
# Delete all files other than source files, BLESSING files, and REPORT.tsv
find $1 -type f | grep -v "$2\$" | grep -v 'REPORT\.tsv' | grep -v 'BLESSING' | xargs -I{} rm -vf {}
# Remove comments and blank lines from source files
for src in $(ls $1/*/$2); do
nocomment=$(sed 's/;.*//' $src)
noblank=$(sed '/^[[:space:]]*$/d' <<< "$nocomment")
sed 's/\r//' > $src <<< "$noblank"
done
# Sanity check to make sure there isn't any revealing data
search=$(cut -d' ' -f1-3 $1/REPORT.tsv | sed 's/\t/\n/g' | sed 's/,/\n/g')
for query in $search; do
echo "========== $query =========="
grep --color=always -i $query $1/*/$2
done
# Rename directories to further anonymize
numdir=$(ls -l $1 | grep '^d' | wc -l)
nums=$(shuf -i 100-999 -n $numdir)
for canvasid in $1/*/; do
num=$(head -n1 <<< "$nums")
nums=$(sed '1d' <<< "$nums")
mv $canvasid $1/$num
done
| true |
b575c9ce079dd6d0c92ba86f54cb847a271313f1 | Shell | blackout314/boilerplates | /scanner/scanner.sh | UTF-8 | 408 | 3.109375 | 3 | [] | no_license | #!/bin/sh
while read p; do
VERB=`echo $p | awk -F' ' '{print $1}'`
URL=`echo $p | awk -F' ' '{print $2}'`
RESULT=`curl -A "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3" -s -o /dev/null -w "%{http_code}" $1$URL`
if [ "$RESULT" == "404" ] || [ "$RESULT" == "503" ]; then
echo " ... $URL $RESULT"
else
echo " >>> $URL $RESULT"
fi
done <scanner.txt
| true |
458ff8b0ae66c987f8e78527a41513f43d302421 | Shell | ccmelena/nodejsv10-java8-buildpack | /lib/binaries_java.sh | UTF-8 | 1,377 | 3.09375 | 3 | [
"MIT"
] | permissive | install_java() {
local version="$1"
local dir="$2"
local download_url="https://ucabcae183c6f8249d00fb0ade33.dl.dropboxusercontent.com/cd/0/get/Ah98mNXz57hUnilSVb2qFu_fyqyOr_9HNUeev4R_zxRbmoNcufApD19HMDeNezdMeYgoEK2Dpabb4v52sNU-NdJ53GyFgmAb4x2pO0UqxXdzoA/file#"
echo "Downloading JAVA [$download_url]"
curl --silent --fail --retry 5 --retry-max-time 15 -j -k -L -H "Cookie: oraclelicense=accept-securebackup-cookie" "$download_url" -o /tmp/java.tar.gz || (echo "Unable to download java; does it exist?" && false)
echo "Download complete!"
echo "Installing JAVA"
mkdir /tmp/jre
mkdir $dir
tar xzf /tmp/java.tar.gz -C /tmp/jre
rm -rf $dir/*
mv /tmp/jre/jre1.8.0_211/* $dir
chmod +x $dir/bin
echo "Installation complete!"
echo "Export PATH"
export JAVA_HOME=$dir
export PATH=$JAVA_HOME/bin:$PATH
#export LD_LIBRARY_PATH=$dir/jre/lib/amd64/server
#export LIBRARY_PATH=$JAVA_HOME/lib/amd64/server
#export LD_PRELOAD=$JAVA_HOME/lib/amd64/server
echo ">>> $JAVA_HOME"
echo ">>> $PATH"
#echo ">>> $LIBRARY_PATH"
#echo ">>> $LD_PRELOAD"
#echo ">>> $LD_LIBRARY_PATH"
#echo "list3..."
#ls "/usr/lib"/
#ln -sf $dir/jre/lib/amd64/server/libjvm.so /usr/lib/libjvm.so
#cp $dir/jre/lib/amd64/server/libjvm.so /usr/lib/libjvm.so
#echo "list4..."
#ls "/usr/lib"/
echo "Version: "
java -version
#javac -version
}
| true |
757f8913b281d18be3962f2ab30115959a9a52a7 | Shell | VerKnowSys/svdOS | /etc-jail/rc.d/pfsync | UTF-8 | 801 | 3.296875 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/bin/sh
#
# $FreeBSD: releng/10.1/etc/rc.d/pfsync 253357 2013-07-15 08:48:45Z des $
#
# PROVIDE: pfsync
# REQUIRE: FILESYSTEMS netif
# KEYWORD: nojail
. /etc/rc.subr
name="pfsync"
rcvar="pfsync_enable"
start_precmd="pfsync_prestart"
start_cmd="pfsync_start"
stop_cmd="pfsync_stop"
required_modules="pf"
pfsync_prestart()
{
case "$pfsync_syncdev" in
'')
warn "pfsync_syncdev is not set."
return 1
;;
esac
return 0
}
pfsync_start()
{
local _syncpeer
echo "Enabling pfsync."
if [ -n "${pfsync_syncpeer}" ]; then
_syncpeer="syncpeer ${pfsync_syncpeer}"
fi
load_kld pfsync
ifconfig pfsync0 $_syncpeer syncdev $pfsync_syncdev $pfsync_ifconfig up
}
pfsync_stop()
{
echo "Disabling pfsync."
ifconfig pfsync0 -syncdev -syncpeer down
}
load_rc_config $name
run_rc_command "$1"
| true |
32aeba76e064ceb5e74b888813b58dc88c2790d9 | Shell | iwosurf/OpenVPN.Ansible | /roles/openvpn/templates/create.clientkey.sh | UTF-8 | 406 | 2.96875 | 3 | [] | no_license | #!/bin/bash
echo "Please enter the key name:"
read name
echo "You entered: $name"
mkdir $name
mkdir /etc/$name
. ./vars
./pkitool $name
cp -v keys/$name.crt /etc/$name/client.crt
cp -v keys/$name.key /etc/$name/client.key
cp -a keys/ca.crt /etc/$name/ca.crt
cp -a keys/ta.key /etc/$name/ta.key
cp -v client.ovpn /etc/$name/$name.ovpn
cd /etc/$name
zip -9 $name.zip client.crt client.key ca.crt $name.ovpn
| true |
33a670722d576ba04c0d022cb809313d82c7e093 | Shell | stlcours/main | /ftgl/PKGBUILD | UTF-8 | 1,015 | 2.890625 | 3 | [] | no_license |
pkgname=ftgl
pkgver=2.4.0
pkgrel=1
pkgdesc="OpenGL library to use arbitrary fonts"
arch=('x86_64')
url="https://github.com/frankheckenbach/ftgl"
license=('MIT')
depends=('freetype2' 'glu')
makedepends=('doxygen' 'mesa' 'cmake')
options=('!libtool')
source=("https://github.com/frankheckenbach/ftgl/archive/v${pkgver}.tar.gz"
"https://github.com/frankheckenbach/ftgl/commit/835f2ba7911a6c15a1a314d5e3267fa089b5a319.diff")
md5sums=('fba1e1c548ebe3ab362495e96a7a0670'
'8ee6dada0a2fa4b639c99663b263f183')
prepare() {
cd ${pkgname}-${pkgver}
patch -p1 -i ${srcdir}/835f2ba7911a6c15a1a314d5e3267fa089b5a319.diff
}
build() {
mkdir -p build
cd build
cmake ../${pkgname}-${pkgver} \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release
make
}
package() {
cd build
make DESTDIR=${pkgdir} install
#install -m644 src/FT{Face,Library,Size}.h ${pkgdir}/usr/include/FTGL/
install -Dm644 ../${pkgname}-${pkgver}/COPYING ${pkgdir}/usr/share/licenses/${pkgname}/LICENSE
}
| true |
95623dc51bdf45b037b9c871cdd37e3452d049f9 | Shell | alan-turing-institute/CuCalc | /.git_hooks/pre-commit | UTF-8 | 152 | 2.671875 | 3 | [] | no_license | #!/bin/bash
exec 1>/dev/null
GIT_ROOT=$(git rev-parse --show-toplevel)
pushd "$GIT_ROOT"
make clean
make Dockerfile
git add ':(top)Dockerfile'
popd
| true |
fadf9c3649f6cf22cedc576109e72b56499bdb54 | Shell | kashyapnanavati/practice | /query/run_test_hd.sh | UTF-8 | 415 | 3.03125 | 3 | [] | no_license | echo "Start the hard test"
echo "Cleaning..."
make clean
echo "build ..."
make
if [ $? -ne 0 ]; then
echo "Compilation failed"
exit 1
fi
./query hard_input.bin
if [ $? -ne 0 ]; then
echo "Runtime Error"
exit 1
fi
#echo "compare File ..."
#result=$(diff easy_stdout.txt easy_expected_stdout.txt)
#if [ $? -eq 0 ]; then
# echo "Hard Test Passed !"
#else
# echo "Hard Test Failed !"
#fi
| true |
ce162b7ee4c0e6ed5870666ecb831d56032ae775 | Shell | ashishsony/dev | /sh.sh | UTF-8 | 242 | 3.328125 | 3 | [] | no_license | #!/bin/sh
count=$1
while [ $count -gt 0 ]
do
echo $(($count-1))
count=$(($count-1))
#sleep 1
done
case $1
in
1) echo ashish;;
2) echo soni;;
3) echo nupur;;
4) echo soni;;
*) echo "ashish luvs nupur :)";;
esac
list=$@
echo $list
| true |
4bcd3bfb5d12272385bc5e5adf763452f870f045 | Shell | PigPRS/SafeSwiftDemo | /SafeSwiftDemo/confuseOC/confuseClassOC.sh | UTF-8 | 1,906 | 3.953125 | 4 | [] | no_license | #!/bin/bash
# 这是Shell脚本,如果不懂shell,自行修炼:http://www.runoob.com/linux/linux-shell.html
# 以下使用sqlite3进行增加数据,如果不了解sqlite3命令,自行修炼:http://www.runoob.com/sqlite/sqlite-tutorial.html
#数据表名
TABLENAME="CodeObClassOC"
#数据库名
SYMBOL_DB_FILE="CodeObClassOC.db"
#要被替换的方法列表文件
STRING_SYMBOL_FILE="$PROJECT_DIR/$PROJECT_NAME/confuseOC/class.list"
#被替换后的宏定义在此文件里
HEAD_FILE="$PROJECT_DIR/$PROJECT_NAME/confuseOC/CodeObClassOC.h"
#维护数据库方便日后做bug排查
createTable()
{
echo "create table $TABLENAME(src text,des text);" | sqlite3 $SYMBOL_DB_FILE
}
insertValue()
{
echo "insert into $TABLENAME values('$1','$2');" | sqlite3 $SYMBOL_DB_FILE
}
query()
{
echo "select * from $TABLENAME where src='$1';" | sqlite3 $SYMBOL_DB_FILE
}
#生成随机16位名称
randomString()
{
openssl rand -base64 64 | tr -cd 'a-zA-Z' | head -c 16
}
#删除旧数据库文件
rm -f $SYMBOL_DB_FILE
#删除就宏定义文件
rm -f $HEAD_FILE
#创建数据表
createTable
#touch命令创建空文件,根据指定的路径
touch $HEAD_FILE
echo "#ifndef CodeObClassOC_h
#define CodeObClassOC_h" >> $HEAD_FILE
echo "" >> $HEAD_FILE
echo "//confuse string at `date`" >> $HEAD_FILE
#使用cat将方法列表文件里的内容全部读取出来,形成数组,然后逐行读取,并进行替换
cat "$STRING_SYMBOL_FILE" | while read -ra line;
do
if [[ ! -z "$line" ]]
then
random=`randomString`
echo $line $random
#将生成的随机字符串插入到表格中
insertValue $line $random
#将生成的字符串写入到宏定义文件中,变量是$HEAD_FILE
echo "#ifndef $line" >> $HEAD_FILE
echo "#define $line $random" >> $HEAD_FILE
echo "#endif" >> $HEAD_FILE
echo "" >> $HEAD_FILE
fi
done
echo "" >> $HEAD_FILE
echo "#endif" >> $HEAD_FILE
sqlite3 $SYMBOL_DB_FILE .dump
| true |
ed568b0a68b38377d54ca788b597e78e32f648ae | Shell | MW-autocat-script/MW-autocat-script | /catscripts/Government/Countries/China/China.sh | UTF-8 | 1,023 | 3.25 | 3 | [
"MIT"
] | permissive | #!/bin/bash
KEYWORDS_CHINA="People(|')s(| )Republic(| )of(| )China"
KEYWORDS_CHINA_SECONDARY="China"
KEYWORDS_HONGKONG="Hong(| )Kong"
KEYWORDS_BEIJING="Beijing"
KEYWORDS_GREATWALL="Great(| )Wall(| )of(| )China"
KEYWORDS_CHINA_SECONDARY_EXCLUDE="Republic(| )of(| )China|fine(| )china|antique(| )china|China(| )town|$KEYWORDS_HONGKONG|$KEYWORDS_BEIJING|$KEYWORDS_GREATWALL"
KEYWORDS_CHINA_ALL="$KEYWORDS_CHINA|$KEYWORDS_CHINA_SECONDARY|$KEYWORDS_HONGKONG|$KEYWORDS_BEIJING|$KEYWORDS_GREATWALL"
if [ "$1" == "" ];
then
debug_start "China"
CHINA=$(egrep -i "$KEYWORDS_CHINA" "$NEWPAGES"; egrep -i "$KEYWORDS_CHINA_SECONDARY" "$NEWPAGES" | egrep -iv "$KEYWORDS_CHINA_SECONDARY_EXCLUDE")
WALL=$(egrep -i "$KEYWORDS_GREATWALL" "$NEWPAGES")
BEIJING=$(egrep -i "$KEYWORDS_BEIJING" "$NEWPAGES")
HONGKONG=$(egrep -i "$KEYWORDS_HONGKONG" "$NEWPAGES")
categorize "CHINA" "China"
categorize "WALL" "Great Wall of China"
categorize "BEIJING" "Beijing"
categorize "HONGKONG" "Hong Kong"
debug_end "China"
fi
| true |
a752489b5443195de8919501095017030a5cbcea | Shell | batmanwgd/ankiCards | /phantom-progress.bash | UTF-8 | 289 | 3.21875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
fd=/proc/$1/fd/$2
fdinfo=/proc/$1/fdinfo/$2
name=$(readlink $fd)
size=$(wc -c $fd | awk '{print $1}')
while [ -e $fd ]; do
progress=$(cat $fdinfo | grep ^pos | awk '{print $2}')
echo $((100*$progress / $size))
sleep 1
done | dialog --gauge "Progress reading $name" 7 100
| true |
56e3e4aa482821eec73cc770ac08885fc0c0b864 | Shell | titopluto/sample-django-docker-swarm | /upgrade-services.sh | UTF-8 | 252 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
account=$1
service=$2
version=$(cat VERSION)
# Run the docker cloud client first and point DOCKER_HOST to the suggested domain/ip
docker service update --image ${account}'/swarmtest_'${service}':'${version} 'swarmtest_staging_'${service} | true |
9a2b6d89a0c78264b45d7a485ef4c65e43cc245b | Shell | TheJonny/ansible-pull-verify | /ansible-pull-verify.sh | UTF-8 | 1,237 | 4 | 4 | [] | no_license | #!/bin/bash
set -e -u -x
remote="$1"
playbook="$2"
tag="$(printf "%s" "$remote" | base64 --wrap=0)"
dir="$HOME/.ansible-pull-verify/$tag"
mkdir -p "$dir"
cd "$dir"
cat > gpgwrap <<END_GPGWRAP
gpg --no-default-keyring --keyring "$dir/keyring.gpg" --trust-model=always "\$@"
END_GPGWRAP
chmod +x gpgwrap
function pull
{
git -c gpg.program="$dir/gpgwrap" -C working-copy pull --ff-only --verify-signature -- "$@"
ln -sf working-copy/keyring.gpg keyring.gpg
}
export remote
export -f pull
function yesno {
echo "$1"
select yn in "Yes" "No"
do
case $yn in
Yes ) return 0;;
No ) return 1;;
esac
done
}
function initialize
{
tmp="$(mktemp -d)"
git clone "$remote" "$tmp"
cp "$tmp/keyring.gpg" .
echo "The repository contains these keys to verify commits"
./gpgwrap --list-keys
yesno "Do you trust them all?" || return 1
(
set -e
trap "rm -rf working-copy keyring.gpg" EXIT
git init working-copy
pull "$tmp"
trap "" EXIT
)
rm -rf "$tmp"
git -C working-copy remote add origin "$remote"
}
function run_ansible {
cd working-copy
exec ansible-playbook -c local "$playbook" -t all -l "localhost,$(hostname),$(hostname -f)"
}
if [ -d working-copy ]
then
pull "$remote"
else
initialize
fi
run_ansible
| true |
622c7cd0561afba33361a0a031131d9ffad942f6 | Shell | opensciencegrid/gums | /gums-service/src/main/resources/scripts/gums-add-mysql-admin | UTF-8 | 1,463 | 4.5 | 4 | [] | no_license | #!/bin/bash
usage () {
script=$(basename "$0")
dn='"/DC=org/DC=doegrids/OU=People/CN=Gabriele Carcassi 12345"'
echo "Add an admin in the GUMS database on localhost"
echo
echo "Usage: $script [-y] [DN for administrator] [PASSWORD]"
echo
echo "If '-y' is specified, do not do not prompt user for 'yes'."
echo "If PASSWORD for gums db is not specified, mysql will prompt for one."
echo
echo "Example for interactive use:"
echo " $script $dn"
echo
echo "Example for automated use:"
echo " $script -y $dn \"secret\""
echo
exit 1
}
if [[ $1 = -y ]]; then
YES=y
shift
fi
# DN is required and should start with a "/"
[[ $1 = /* ]] || usage
ADMINDN=$1
if [[ $# -lt 2 ]]; then # have mysql prompt for password
PASSWORD=(-p)
elif [[ $2 = "" ]]; then # explicit empty password
PASSWORD=()
else # non-empty password
PASSWORD=("-p$2")
fi
echo 'WARNING: You must have created the database before running this script!'
echo
echo "Adding the following DN to the local database:"
echo "Certificate DN for administrator: \"$ADMINDN\""
if [[ -t 0 && -t 1 && ! $YES ]]; then
echo
echo "Is this correct? (Enter 'yes' to proceed)"
read response
if [[ $response != yes ]]; then
exit 1
fi
fi
echo
echo Adding the admin...
if [[ $PASSWORD = -p ]]; then
echo Enter the gums mysql password:
fi
sed "s%@ADMINDN@%$ADMINDN%g" /usr/lib/gums/sql/addAdmin.mysql \
| mysql -u gums "${PASSWORD[@]}" && echo Done.
| true |
68b7751ada867442cb5793e54879bbbce2818d78 | Shell | cosa65/3aalgo3 | /ej3tests/diamond/run_tests.sh | UTF-8 | 698 | 2.578125 | 3 | [] | no_license | rm -f src/*.o src/ej3_menor_color src/ej3_mayor_vertice
echo -e '\0033\0143'
g++ src/ej3_menor_color.cpp src/grafo_menor_color.cpp src/vertice_menor_color.cpp -std=c++11 -ggdb -o src/ej3_menor_color
g++ src/ej3_mayor_vertice.cpp src/grafo_mayor_vertice.cpp src/vertice_mayor_vertice.cpp -std=c++11 -ggdb -o src/ej3_mayor_vertice
mkdir diamantes_out_menor_color
mkdir diamantes_out_mayor_grado
for i in $(ls diamantes_in)
do
echo "Midiendo conflictos para $i"
src/ej3_menor_color diamantes_in/$i basura diamantes_out_menor_color/$i
done
for i in $(ls diamantes_in)
do
echo "Midiendo conflictos para $i"
src/ej3_mayor_vertice diamantes_in/$i basura diamantes_out_mayor_grado/$i
done
rm basura | true |
ca08d7617965d98bf0682ea70c294eb3da7a988e | Shell | drewfrank/homebin | /forget | UTF-8 | 437 | 3.390625 | 3 | [] | no_license | #!/bin/bash
# Remove all entries for each host specified on the command line from known_hosts.
# The acrobatics in the second line of the loop resolve the ip address of an ssh
# host alias and remove entries for that ip address as well.
for host in "$@"; do
ssh-keygen -R $host &>/dev/null
ssh-keygen -R `ssh -o "StrictHostKeyChecking no" -v $host 'exit' |& grep "IP address" | cut -d " " -f 16 | sed "s/'//g"` &>/dev/null
done
| true |
5fda6083ea03365e0530da271aac480060d6cfc4 | Shell | CamHenlin/PlexGameLauncher | /Game Launcher.bundle/Contents/Helpers/MacOSX/i386/snes9x.sh | UTF-8 | 571 | 2.921875 | 3 | [] | no_license | #!/bin/bash
args=("$@")
romname=${args[0]}
emupath=${args[1]}
/usr/bin/osascript -e "tell application \"System Events\" to set visible of process \"Plex\" to false"
# /usr/bin/osascript -e "tell application \"Plex\" to quit"
open -a "$emupath"Snes9x/Snes9x.app "$romname"
sleep 3
while [ `ps -ef | grep Snes9x | grep -v snes9x.sh | grep -v grep | awk '{print $2}' | wc -l` = 1 ]
do sleep 1
done
/usr/bin/osascript -e "tell application \"System Events\" to set visible of process \"Plex\" to true"
/usr/bin/osascript -e "tell application \"Plex\" to activate"
| true |
0335e9e886611999e0fed3829c3cf13c2248c847 | Shell | jjjapj/Captcha | /get images/Selenium-WebDriver/script.sh | UTF-8 | 121 | 2.96875 | 3 | [] | no_license | num=1
while [ $num -lt 1001 ]
do
mv `ls Images/captcha-* | shuf -n 1` Images/captcha+$num.png
num=`expr $num + 1`
done
| true |
7ef17d4ae4f5921a479158acbbbb5f650644a978 | Shell | nicksan2c/slxc | /templates/machine_init.sh.in | UTF-8 | 1,239 | 3.15625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
. @SLURM_LXC_HOME@/slxc.conf
# Run DHCP client (I'm seing errors but it works anyway)
dhclient eth0
# Prepare var for Munge
su - $SLURM_USER -c "mkdir -p $MUNGE_PATH/var/lib/munge/"
su - $SLURM_USER -c "mkdir -p $MUNGE_PATH/var/log/munge/"
su - $SLURM_USER -c "mkdir -p $MUNGE_PATH/var/run/munge/"
su - $SLURM_USER -c "chmod -R 0755 $MUNGE_PATH/var/*"
# start Munge
su - $SLURM_USER -c "$MUNGE_PATH/etc/init.d/munge start"
# Prepare var for SLURM
su - $SLURM_USER -c "mkdir -p $SLURM_PATH/var/spool/slurmd"
su - $SLURM_USER -c "mkdir -p $SLURM_PATH/var/log/"
su - $SLURM_USER -c "chmod -R g+rwx $SLURM_PATH/var/spool/"
su - $SLURM_USER -c "chmod -R o+rwx $SLURM_PATH/var/spool/"
# Start SLURM. Distinguish between frontend and compute node
cfg_line=`cat $SLURM_PATH/etc/slurm.conf | grep ControlMachine`
ctrl_machine=`echo $cfg_line | sed 's/ControlMachine=//'`
this_host=`hostname`
if [ "$ctrl_machine" = "$this_host" ]; then
su - $SLURM_USER -c "$SLURM_PATH/sbin/slurmctld -f $SLURM_PATH/etc/slurm.conf -L $SLURM_PATH/var/log/slurmctld.log"
else
su - $SLURM_USER -c "$SLURM_PATH/sbin/slurmd -f $SLURM_PATH/etc/slurm.conf -L $SLURM_PATH/var/log/slurmd.log"
fi
# Run bash to be able to issue commands
/bin/bash
| true |
bd9fce442b8333701f08c36a12c9b76209295c3c | Shell | cy20lin/crouton_backup | /bin/reformat-chroot | UTF-8 | 243 | 3.234375 | 3 | [] | no_license | #!/bin/bash
DEVICE=$(df /media/removable/Linux/ | tail -n 1 | cut -d ' ' -f 1)
if test ! -z "${DEVICE}" && sudo umount /media/removable/Linux
then
yes | /sbin/mkfs.ext4 "${DEVICE}" -L Linux
mount "${DEVICE}" /media/removable/Linux
fi
| true |
c5e7d5d791047283578cb09cec667c9cf421fe61 | Shell | torohangupta/aere361-labs | /lab-8-torohangupta/tests/test_ex2.sh | UTF-8 | 1,558 | 3.4375 | 3 | [] | no_license | # AerE 361 Lab 8
# Spring 2021
# Professor Nelson
# Ex 2 Script
# DO NOT EDIT THIS SCRIPT
export TERM=xterm-256color
FILESRC="val_test.c"
tput setaf 6
echo "Testing Exercise 2"
# Make sure the file compiles
test_gcc=$(gcc -Wall -g $FILESRC 2>&1)
status_gcc=$?
# Make sure the file passes Valgrind
test_valgrind=$(valgrind --leak-check=yes ./a.out 2>&1)
status_valgrind=$?
if [ ${status_gcc} -ne 0 ]; then
tput setaf 1
echo "Compiling $FILESRC....FAILED"
echo "Cleaning files"
make clean
exit 1
else
if grep "warning:" <<<"${test_gcc}" >/dev/null ; then
tput setaf 3
echo "Compiling $FILESRC....WARNING"
echo "Compile must not have warnings, failing autograder"
echo "Cleaning files"
make clean
exit 1
else
echo "Compiling $FILESRC....OK"
fi
fi
if [ ${status_valgrind} -ne 0 ]; then
tput setaf 1
echo "Running Valgrind on $FILESRC....FAILED"
echo "Cleaning files"
make clean
exit 1
else
if grep "definitely lost" <<<"${test_valgrind}" >/dev/null; then
tput setaf 3
echo "Valgrind found a memory leak, please fix"
echo "Need to fix leak to pass autograder"
echo "Cleaning files"
make clean
exit 1
else
if grep "Invalid write of size" <<<"${test_valgrind}" >/dev/null; then
tput setaf 3
echo "Valgrind found heap block over run, please fix"
echo "Need to fix the overrun to pass autograder"
echo "Cleaning files"
make clean
exit 1
else
echo "Valgrind check on $FILESRC....OK"
fi
fi
fi
echo
tput setaf 7
echo "All tests passed, cleaning up files"
make clean
exit 0
| true |
8c7a9938b5d089d0c25cfd2354c4ebebf4344ad5 | Shell | m-lab/etl-gardener | /apply-cluster.sh | UTF-8 | 1,631 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# apply-cluster.sh applies the k8s cluster configuration to the currently
# configured cluster. This script may be safely run multiple times to load the
# most recent configurations.
#
# Example:
#
# PROJECT_ID=mlab-sandbox CLOUDSDK_CONTAINER_CLUSTER=scraper-cluster ./apply-cluster.sh
set -x
set -e
set -u
USAGE="PROJECT_ID=<projectid> CLOUDSDK_CONTAINER_CLUSTER=<cluster> $0"
PROJECT_ID=${PROJECT_ID:?Please provide project id: $USAGE}
CLUSTER=${CLOUDSDK_CONTAINER_CLUSTER:?Please provide cluster name: $USAGE}
DATE_SKIP=${DATE_SKIP:-"0"} # Number of dates to skip between each processed date (for sandbox).
TASK_FILE_SKIP=${TASK_FILE_SKIP:-"0"} # Number of files to skip between each processed file (for sandbox).
# Use sandbox in sandbox, measurement-lab in staging & oti.
SOURCE_PROJECT=${PROJECT_ID/mlab-oti/measurement-lab}
SOURCE_PROJECT=${SOURCE_PROJECT/mlab-staging/measurement-lab}
sed -i \
-e 's/{{ANNOTATION_SOURCE_PROJECT}}/'${SOURCE_PROJECT}'/g' \
config/config.yml
sed -i \
-e 's/{{NDT_SOURCE_PROJECT}}/'${SOURCE_PROJECT}'/g' \
config/config.yml
# Create the configmap
kubectl create configmap gardener-config --dry-run \
--from-file config/config.yml \
-o yaml > k8s/${CLUSTER}/deployments/config.yml
# Apply templates
find k8s/${CLUSTER}/ -type f -exec \
sed -i \
-e 's/{{GIT_COMMIT}}/'${GIT_COMMIT}'/g' \
-e 's/{{GCLOUD_PROJECT}}/'${PROJECT_ID}'/g' \
-e 's/{{DATE_SKIP}}/'${DATE_SKIP}'/g' \
-e 's/{{TASK_FILE_SKIP}}/'${TASK_FILE_SKIP}'/g' \
{} \;
# This triggers deployment of the pod.
kubectl apply --recursive -f k8s/${CLUSTER}
| true |
66696a23f95bc2da5481a92c2477b690a8e8f998 | Shell | EmbeddedSystemClass/electronics | /raspberry_2017_08_03/plot.sh | UTF-8 | 2,369 | 3.09375 | 3 | [] | no_license | #!/bin/bash
DA=`cut -f1 -d' ' ~/data.dat | head -1`
TE=`cut -f1 -d' ' ~/data.dat | tail -1`
INDEX=`cat ~/index`
echo "Graph From... "
echo ${DA}
echo "to.. "
echo ${TE}
echo "in file... "
echo "plot${INDEX}"
gnuplot -p <<EOF
#script
set title "E.O.E data ${DA} - ${TE}"
set terminal jpeg giant size 2400, 1400
set output '~/eoe-plot/plot${INDEX}.jpg'
set multiplot
set key outside Left
set style line 1 linetype 1 linecolor rgb "#4169e1" linewidth 5
set style line 2 lt 1 linecolor rgb "#2e8b57" lw 5
set style line 3 lt 1 linecolor rgb "#ffd700" lw 5
set style line 4 lt 1 linecolor rgb "#ff0000" lw 5
set yrange [-20:100]
set y2range [-200:1000]
set timefmt "%d/%m %H:%M:%S"
set xdata time
set xrange ["${DA}":"${TE}"]
set format x ""
set ytics (100, 80, 60, 40, 20, 0)
set y2tics (1000, 800, 600, 400, 200, 0)
set ylabel "°C / %"
set y2label 'lux'
set grid
set size 1.0, 0.5
set origin 0.0, 0.4
set bmargin 1
plot '~/data.dat' u 1:6 title 'temperature (°C)' w l linestyle 1 axes x1y1,'~/data.dat' u 1:5 title 'moisture (%)' w l linestyle 2 axes x1y1,'~/data.dat' u 1:4 title 'luminosity (lux)' w l linestyle 3 axes x1y2
unset title
unset y2range
unset y2label
set bmargin 0
set timefmt "%d/%m %H:%M:%S"
set xdata time
set xrange ["${DA}":"${TE}"]
set format x "%d/%m\n%H:%M:%S"
set size 1.0, 0.3
set origin 0.0, 0.1
set tmargin 0
set yrange ["0":"100"]
set format "%1.0f"
set ytics (100, 75, 50, 25, 0)
set ylabel '%'
plot '~/data.dat' u 1:7 title 'water level (%)' w i lt 3, '~/data.dat' u 1:3 title 'battery level (%)' w l linestyle 4
unset multiplot
quit
EOF
NEWINDEX=`awk -F, '{printf("%03d\n", $1 + 1)}' ~/index`
echo "${NEWINDEX}" > ~/index
cp ~/data.dat ~/backup/data${INDEX}.dat
>~/data.dat
PREV_INDEX=$((INDEX - 1))
NEXT_INDEX=$((INDEX + 1))
echo "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">
<html>
<head>
<link rel=\"stylesheet\" href=\"index.css\" />
<title>e.o.e plot${INDEX}</title>
</head>
<body bgcolor=white>
<img src=\"plot${INDEX}.jpg\" id=\"plot\">
<a href=\"page${PREV_INDEX}.html\">PREVIOUS</a>.
<a href=\"config.php\">CONFIG</a>.
<a href=\"page${NEXT_INDEX}.html\">NEXT</a>.
</body>
</html>" >> ./eoe-plot/page${INDEX}.html
echo "Done !"
| true |
fa1fba1bbdc6fd1badbc98bccb4f853a35c01063 | Shell | tehmoth/jemplate | /tests/bin/test-run | UTF-8 | 450 | 3.4375 | 3 | [] | no_license | #!/bin/bash
JEMPLATE="perl -I../.. ../../jemplate"
DAEMON_PID=
function run() {
$JEMPLATE $* > runtime.js
echo -e "\n"
echo -e "*** Testing: $*"
echo -e "*** Press return when you're ready to continue ***"
read
}
function quit() {
[ -n "$DAEMON_PID" ] && kill -9 $DAEMON_PID
exit
}
trap "quit" SIGINT SIGTERM
make all
cd var
../bin/daemon -p 8081 &
DAEMON_PID=$!
run --runtime=standard
run --runtime=yui
run --runtime=jquery
quit
| true |
13c6cedf5b99cd66514af2eb9cc8beb345ab2720 | Shell | BackupTheBerlios/projectdev | /current/base/network/openssh/PKGBUILD | UTF-8 | 1,165 | 2.734375 | 3 | [] | no_license | # $Id: PKGBUILD,v 1.1 2004/12/03 17:13:46 rensel Exp $
# Arch Maintainer: judd <jvinet@zeroflux.org>
# Maintainer: blokkie <t.fernagut@burningfrog.be>
pkgname=openssh
pkgver=3.9p1
pkgrel=1
pkgdesc='A Secure SHell server/client'
url="http://www.openssh.org/portable.html"
backup=('etc/ssh/ssh_config' 'etc/ssh/sshd_config' 'etc/pam.d/sshd')
depends=('openssl>=0.9.7d' 'zlib' 'pam')
source=(ftp://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/$pkgname-$pkgver.tar.gz sshd)
md5sums=('8e1774d0b52aff08f817f3987442a16e' 'c017087203893769efc159f3615e2b57')
build() {
cd $startdir/src/$pkgname-$pkgver
./configure --prefix=/usr --libexecdir=/usr/lib/ssh \
--sysconfdir=/etc/ssh --with-tcp-wrappers --with-privsep-user=nobody \
--with-md5-passwords --with-pam
make || return 1
make DESTDIR=$startdir/pkg install
mkdir -p $startdir/pkg/etc/rc.d
mkdir -p $startdir/pkg/var/empty
cp ../sshd $startdir/pkg/etc/rc.d
mkdir -p $startdir/pkg/etc/pam.d
cp $startdir/sshd.pam $startdir/pkg/etc/pam.d/sshd
sed -ie 's|^#ListenAddress 0.0.0.0|ListenAddress 0.0.0.0|g' \
$startdir/pkg/etc/ssh/sshd_config
# where did this file come from?
rm -f $startdir/pkg/etc/ssh/sshd_confige
}
| true |
111cdd7ac1dbcc72b911d181f7f33f42035b9454 | Shell | davidbarkhuizen/sunbird | /sungate/deploy.sh | UTF-8 | 203 | 2.75 | 3 | [] | no_license | #!/bin/bash
if [[ $# -eq 0 ]] ; then
echo 'usage: ./deploy.sh <serial device> e.g. ./deploy.sh /dev/ttyACM0'
exit 0
fi
set -x # echo on
arduino --upload arduino/sungate/sungate.ino --port "$1" | true |
ee673229f2755be6510d974f08b0be0fea5003c2 | Shell | bespike/Bespike-Installer | /bootstrap.sh | UTF-8 | 1,343 | 3.9375 | 4 | [] | no_license | #!/usr/bin/env bash
# must run as root
if [ `id -u` -ne 0 ]; then
echo "Must be ran as root or sudo"
exit
fi
if [ ! -f /usr/bin/apt-get ]; then
echo "This os is not supported"
exit
fi
if [ $SUDO_USER ]; then
RealUser=$SUDO_USER
else
RealUser=$(whoami)
fi
#if [ $EUID -ne 0 ]; then
# if [ -d /root ]; then
# RealHome="/root"
# else
# RealHome=$HOME
# fi
#else
# if [ -d /home/$RealUser ]; then
# RealHome="/home/$RealUser"
# else
# RealHome=$HOME
# fi
#fi
#if [ ! -d $RealHome/bespikeinstall ]; then
if [ ! -f /usr/bin/sudo ]; then
echo "Installing sudo . . ."
apt-get -q -q update
DEBIAN_FRONTEND=noninteractive apt-get -q -q install -y sudo < /dev/null
echo
fi
if [ ! -d /bespikeinstall ]; then
if [ ! -f /usr/bin/git ]; then
echo "Installing git . . ."
apt-get -q -q update
DEBIAN_FRONTEND=noninteractive apt-get -q -q install -y git < /dev/null
echo
fi
echo "Downloading Bespike Installer . . ."
git clone \
https://github.com/bespike/bespikeinstall_setup \
/bespikeinstall/install \
< /dev/null 2> /dev/null
echo
else
cd /bespikeinstall/install
chown -R $RealUser /bespikeinstall/install/.git/
echo "Updating Bespike Installer"
git fetch -all
git reset --hard origin/master
echo
fi
# Start setup script.
bash /bespikeinstall/install/start.sh | true |
252ad2130f98334a106f25d75cff1cb49b52ea4d | Shell | csanadm/StrongCoulombLevyBEC | /Glue.sh | UTF-8 | 234 | 3.453125 | 3 | [] | no_license | #!/bin/bash
#args: filelist targetname
i=0
list_aux=`echo " "`
list=$list_aux
for file in `cat $1`
do
list=`echo -n "$list_aux $file"`
list_aux=$list
let i+=1
done
echo "Concatenating $i files..."
cat $list > $2
echo "Done."
| true |
9fe0a56e67b593df1cd8b9e44439ad246f395362 | Shell | s117/Speckle | /clean.sh | UTF-8 | 926 | 3.53125 | 4 | [] | no_license | #!/usr/bin/env bash
pushd () {
command pushd "$@" > /dev/null
}
popd () {
command popd "$@" > /dev/null
}
if [[ $# != 1 ]]; then
echo "Usage: $0 [2006|2017|all]"
exit 1
fi
if [[ "$1" == "2017" || "$1" == "all" ]]; then
pushd SPEC/cpu2017/benchspec/CPU
find . -mindepth 2 -maxdepth 2 -type d -name "build" | xargs -d '\n' -- rm -fr
find . -mindepth 2 -maxdepth 2 -type d -name "run" | xargs -d '\n' -- rm -fr
find . -mindepth 2 -maxdepth 2 -type d -name "exe" | xargs -d '\n' -- rm -fr
popd
fi
if [[ "$1" == "2006" || "$1" == "all" ]]; then
pushd SPEC/cpu2006/benchspec/CPU2006
find . -mindepth 2 -maxdepth 2 -type d -name "build" | xargs -d '\n' -- rm -fr
find . -mindepth 2 -maxdepth 2 -type d -name "run" | xargs -d '\n' -- rm -fr
find . -mindepth 2 -maxdepth 2 -type d -name "exe" | xargs -d '\n' -- rm -fr
popd
fi
if [[ "$1" == "all" ]]; then
rm -fr build
rm -fr riscv-m64-spec
fi
| true |
0271e8089d333b9983af1c726c427c550d4d0dca | Shell | caifeng2014/Test | /deploy.sh | UTF-8 | 1,144 | 3.21875 | 3 | [] | no_license | #!/bin/bash
mvn clean install
#服务名称
SERVER_NAME=demo
# 源jar路径,mvn打包完成之后,target目录下的jar包名称,也可选择成为war包,war包可移动到Tomcat的webapps目录下运行,这里使用jar包,用java -jar 命令执行
JAR_NAME=demo-0.0.1-SNAPSHOT
# 源jar路径
#/usr/local/jenkins_home/workspace--->jenkins 工作目录
#demo 项目目录
#target 打包生成jar包的目录
JAR_PATH=$PROJ_PATH/order/target
# 打包完成之后,把jar包移动到运行jar包的目录--->work_daemon,work_daemon这个目录需要自己提前创建
JAR_WORK_PATH=$PROJ_PATH/order/target
echo "查询进程id-->$SERVER_NAME"
PID=`ps -ef | grep "$SERVER_NAME" | awk '{print $2}'`
echo "得到进程ID:$PID"
echo "结束进程"
for id in $PID
do
kill -9 $id
echo "killed $id"
done
echo "结束进程完成"
#复制jar包到执行目录
echo "复制jar包到执行目录:cp $JAR_PATH/$JAR_NAME.jar $JAR_WORK_PATH"
cp $JAR_PATH/$JAR_NAME.jar $JAR_WORK_PATH
echo "复制jar包完成"
cd $JAR_WORK_PATH
#修改文件权限
chmod 755 $JAR_NAME.jar
nohup java -jar $JAR_NAME.jar &
echo "启动成功"
| true |
32eadd448c76fa2af1426bb713a8b3a72cadbc23 | Shell | JaanJah/rpi-temperature-monitor | /tempmon.sh | UTF-8 | 254 | 3.25 | 3 | [] | no_license | #!/bin/bash
# Get Raspberry PI temperature
TEMP=$(vcgencmd measure_temp | egrep -o '[0-9]*\.[0-9]*')
# Get timestamp
TIMESTAMP=$(date -Iseconds)
# File to write log to
FILE="./$(date -Idate)-heat.log"
# Write to file
echo "$TIMESTAMP $TEMP" >> $FILE
| true |
b4cd9125c53e122241666f29a38ee5bbc1c831cc | Shell | crea28/sysadmin | /monitoring-scripts/check_galera_cluster.sh | UTF-8 | 4,088 | 3.75 | 4 | [] | no_license | #!/bin/bash
# Script : check_galera_cluster.sh
# Author : crea28.fr
# http://galeracluster.com/documentation-webpages/monitoringthecluster.html
ST_OK=0
ST_WR=1
ST_CR=2
ST_UK=3
# default values
# can be replaced in using grep with .my.cnf
port=''
mysqlhost=''
mysqluser=''
mysqlpassword=''
function cluster_size() {
node='3'
r1=$(mysql -h$mysqlhost -P$port -u$mysqluser -p$mysqlpassword -B -N -e "show status like 'wsrep_cluster_size'"|cut -f 2)
if [ $r1 -eq $node ]; then
echo "OK: number of NODES = $r1"
exit $ST_OK;
elif [ $r1 -ne $node ]; then
echo "CRITICAL: number of NODES = $r1";
echo "Configuration indicates $node nodes"
exit $ST_CR;
else
exit $ST_UK;
fi
}
function cluster_status() {
status="Primary"
r2=$(mysql -h$mysqlhost -P$port -u$mysqluser -p$mysqlpassword -B -N -e "show status like 'wsrep_cluster_status'"|cut -f 2)
if [ "$r2" != 'Primary' ]; then
echo "CRITICAL: node is not primary"
exit $ST_CR;
else
echo "OK : `hostname` is Primary"
exit $ST_OK;
fi
}
function cluster_ready() {
# Value (ON | OFF)
r4=$(mysql -h$mysqlhost -P$port -u$mysqluser -p$mysqlpassword -B -N -e "show status like 'wsrep_ready'"|cut -f 2)
if [ "$r4" != 'ON' ]; then
echo "CRITICAL: node is not ready"
exit $ST_CR;
else
echo "OK: I can accept write-sets from the cluster"
exit $ST_OK;
fi
}
function cluster_connected() {
# Value (ON | OFF)
r5=$(mysql -h$mysqlhost -P$port -u$mysqluser -p$mysqlpassword -B -N -e "show status like 'wsrep_connected'"|cut -f 2)
if [ "$r5" != 'ON' ]; then
echo "CRITICAL: Where is Brian ?"
exit $ST_CR;
else
echo "OK: Network connection is good !"
exit $ST_OK;
fi
}
function cluster_local_state() {
# Synced is the state by default
r6=$(mysql -h$mysqlhost -P$port -u$mysqluser -p$mysqlpassword -B -N -e "show status like 'wsrep_local_state_comment'"|cut -f 2)
if [ "$r6" != 'Synced' ]; then
echo "CRITICAL: node is not synced"
exit $ST_CR;
else
echo "OK: node is synced (Status : $r6)"
exit $ST_OK;
fi
}
function cluster_replication_flow() {
# wsrep_flow_control_paused shows the fraction of the time, since the status variable was last called, that the node paused due to Flow Control
fcp='0.1';
r3=$(mysql -h$mysqlhost -P$port -u$mysqluser -p$mysqlpassword -B -N -e "show status like 'wsrep_flow_control_paused'"|cut -f 2)
if [ -z "$r3" ]; then
echo "UNKNOWN: wsrep_flow_control_paused is empty"
exit $ST_UK ;
fi
if [ $(echo "$r3 > $fcp" | bc) = 1 ]; then
echo "CRITICAL: wsrep_flow_control_paused is > $fcp"
exit $ST_CR;
else
echo "OK: wsrep_flow_control_paused is < $fcp"
exit $ST_OK;
fi
}
function help() {
echo "";
echo "Options:";
echo "";
echo "cluster_size Shows the number of nodes in the cluster";
echo "cluster_status Shows the primary status of the cluster component";
echo "cluster_ready Shows if node can accept queries or not";
echo "cluster_connected Shows the network connectivity with any other nodes":
echo "cluster_local_state Shows the node state in a human readable format";
echo "cluster_replication_flow Shows time since the status variable was last called";
exit $ST_UK;
}
# main program
if [ $# != "1" ]; then
echo "argument missing" ;
help;
else
while (true)
do
case $1 in
cluster_size)
cluster_size;
exit 1;
;;
cluster_status)
cluster_status;
exit 1;
;;
cluster_ready)
cluster_ready;
exit 1;
;;
cluster_connected)
cluster_connected;
exit 1;
;;
cluster_local_state)
cluster_local_state;
exit 1;
;;
cluster_replication_flow)
cluster_replication_flow;
exit 1;
;;
*)
help;
exit 1;
;;
esac
done
fi
| true |
bf25d849460987f329ee0e3b2d45a59ee4c61a4b | Shell | Julian88Tex/ohmyzsh | /plugins/themes/themes.plugin.zsh | UTF-8 | 793 | 3.15625 | 3 | [
"MIT"
] | permissive | function theme {
: ${1:=random} # Use random theme if none provided
if [[ -f "$ZSH_CUSTOM/$1.zsh-theme" ]]; then
source "$ZSH_CUSTOM/$1.zsh-theme"
elif [[ -f "$ZSH_CUSTOM/themes/$1.zsh-theme" ]]; then
source "$ZSH_CUSTOM/themes/$1.zsh-theme"
elif [[ -f "$ZSH/themes/$1.zsh-theme" ]]; then
source "$ZSH/themes/$1.zsh-theme"
else
echo "$0: Theme '$1' not found"
return 1
fi
}
function _theme {
_arguments "1: :($(lstheme))"
}
compdef _theme theme
function lstheme {
# Resources:
# http://zsh.sourceforge.net/Doc/Release/Expansion.html#Modifiers
# http://zsh.sourceforge.net/Doc/Release/Expansion.html#Glob-Qualifiers
print "$ZSH_CUSTOM"/*.zsh-theme(N:t:r) {"$ZSH_CUSTOM","$ZSH"}/themes/*.zsh-theme(N:t:r)
}
| true |
e71494b682461951d3ac582a4b209924ecdeb618 | Shell | zacharyzhou/zzemacs | /bin/hg-tools.sh | UTF-8 | 923 | 3.6875 | 4 | [] | no_license | #!/bin/sh
##Import vars and functions
. sample.sh
echo "hg setup start ..."
###Mercurial Books
##http://mercurial.selenic.com/
##http://hginit.com/ (Hg Init: a Mercurial tutorial)
##http://hgbook.red-bean.com/ (Mercurial: The Definitive Guide)
Install_package()
{
# dectect OS version
if [ "$OS_DISTRO" = "SuSE" ]; then
echo "Install on suse"
elif [ "$OS_DISTRO" = "Ubuntu" ]; then
sudo apt-get install -y mercurial
sudo apt-get install -y python-docutils
elif [ "$OS_DISTRO" = "CentOS" ]; then
sudo yum install -y mercurial
sudo yum install -y python-devel
sudo yum install -y python-docutils
else
echo "You are about to install on a non supported linux distribution."
fi
}
##setup packages
echo -n "Do you need install packages? (y/N): "
read answer
case "$answer" in
"Y" | "y" )
try_command Install_package
;;
esac
| true |
203038600847e8353c07aafe6279a02d3d1b1146 | Shell | petronny/aur3-mirror | /open_watcom-v2-git/PKGBUILD | UTF-8 | 2,769 | 2.65625 | 3 | [] | no_license | # Mantainer Jens Staal <staal1978@gmail.com>
pkgname=open_watcom-v2-git
pkgver=0.r2155.g22304e6
pkgrel=1
pkgdesc="The Open Watcom C/C++ compiler, github source fork"
arch=('i686' 'x86_64')
url="http://open-watcom.github.io/open-watcom/"
#url="http://www.openwatcom.org"
license=('custom:OWPL-1')
makedepends=('dosbox' 'open_watcom' 'git')
# there are some annoying makepkg-specific bugs in the build
# that makes it required to run the build with watcom
# dosemu should also work, but I hit lots of bugs on x86_64
optdepend=('open_watcom: skip bootstrapping with gcc')
provides=('open_watcom' 'open_watcom-v2' 'openwatcom-extras-hg')
conflicts=('open_watcom' 'open_watcom-v2' 'openwatcom-extras-hg')
replaces=('open_watcom' 'open_watcom-v2' 'openwatcom-extras-hg')
source=('watcom'::'git://github.com/open-watcom/open-watcom-v2.git' 'setvars.sh' \
'owsetenv.sh')
sha256sums=('SKIP' '4ededdb1c0c6412b720dbf137e0b27449a6d15721bb98fe057e3e55613e2be24' \
'44f0b3bff2b722be9cf97b0246cf16d4526fcf6cc09353949ce82552243ccdb8')
options=('!strip' '!buildflags' 'staticlibs')
pkgver() {
cd $srcdir/watcom/
if GITTAG="$(git describe --abbrev=0 --tags 2>/dev/null)"; then
echo "$(sed -e "s/^${pkgname%%-git}//" -e 's/^[-_/a-zA-Z]\+//' -e 's/[-_+]/./g' <<< ${GITTAG}).r$(git rev-list --count ${GITTAG}..).g$(git log -1 --format="%h")"
else
echo "0.r$(git rev-list --count master).g$(git log -1 --format="%h")"
fi
}
build() {
cd $srcdir/watcom
msg "set current source directory and other variables"
cat $srcdir/setvars.sh | sed "s|CurrentPkgbuildDir|${PWD}|g" > setvars.sh
#64-bit needs to be built with GCC for now...
#if [ $CARCH = i686 ]; then
msg "use OpenWatcom if possible..."
if [ -d /opt/watcom ]; then
msg2 "watcom detected"
sed 's/OWUSENATIVETOOLS=1/OWUSENATIVETOOLS=0/g' -i setvars.sh
fi
#else
#work-around for strange bug in makepkg when building with gcc
#running build.sh outside of PKGBUILD works...
#cp $srcdir/*.gh bld/wmake/h/
#fi
chmod +x build.sh
source setvars.sh
msg "OWROOT is $OWROOT" #testing variables from setvars.sh
msg "OWSRCDIR is $OWSRCDIR" #testing variables from cmnvars.sh
msg "WATCOM is $WATCOM"
msg "OWDEFPATH is $OWDEFPATH"
# ./clean.sh # make sure everything is pristine
./build.sh
cd bld
builder rel
}
package() {
cd $srcdir/watcom/rel
install -d "$pkgdir/usr/share/licenses/watcom"
install -Dm644 "$srcdir/watcom/license.txt" "$pkgdir/usr/share/licenses/watcom/license.txt"
mkdir -p $pkgdir/opt/watcom/{binl,lh,h/{dos,win,nt,os2,os21x},lib286/{dos,win,os2},lib386/{nt,os2}}
cp -ar * $pkgdir/opt/watcom/
cp $srcdir/owsetenv.sh $pkgdir/opt/watcom/
msg "adding some fake binaries to make life easier"
cd $pkgdir/opt/watcom/binl
ln -s /usr/bin/true ranlib
ln -s wlib ar
}
| true |
b95aa24f663eb8de93158553a419e4eb56a96a23 | Shell | linkedin/rest.li | /pre-release-check | UTF-8 | 1,794 | 4.125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# The purpose of this script is to perform some checks before the release process
REMOTE="origin"
BRANCH="master"
if [ $# -eq 2 ]
then
REMOTE=$1
BRANCH=$2
fi
# Determine version to be released
VERSION=`awk 'BEGIN { FS = "=" }; $1 == "version" { print $2 }' gradle.properties | awk '{ print $1 }'`
echo "Running pre-release job for version $VERSION..."
# Check that there are no uncommitted changes
DIRTY=`git status --porcelain --untracked-files=no 2>&1 || echo FAIL`
if [ -n "$DIRTY" ]
then
echo "Dirty index or working tree. Use git status to check."
echo "After resolution, run this command again."
exit 1
fi
# Ensure that the current branch is consistent with the remote target
INCONSISTENT=`git diff --quiet $REMOTE/$BRANCH >/dev/null 2>&1 ; echo $?`
if [ $INCONSISTENT -ne 0 ]
then
echo "$REMOTE/$BRANCH and current branch are inconsistent."
echo "Use git diff $REMOTE/$BRANCH to see changes."
echo "Rebase or push, as appropriate, and run this command again."
exit 1
fi
# Ensure that a tag exists for this version
EXPECTED_TAG="v$VERSION"
if [ -z `git tag | grep $EXPECTED_TAG` ]
then
echo "Could not find tag $EXPECTED_TAG, please create it then run this command again."
echo "This release process expects release tags to be manually created beforehand."
echo
echo "Use './prepare-release' to create and push a release tag."
echo "Optionally, use './prepare-release [TARGET_COMMIT]' to tag a particular commit."
exit 1
fi
# We want to release from this tag, so check it out
echo "Found tag $EXPECTED_TAG, checking out..."
git checkout --quiet $EXPECTED_TAG
if [ $? -ne 0 ]
then
echo "Unable to check out tag $EXPECTED_TAG"
exit 1
fi
echo "All pre-release checks passed, ready to build and release..."
| true |
36c3cfd3c99ede4bd9322c9f66d061f23326b792 | Shell | thapovan-inc/orion-proto | /scripts/error.sh | UTF-8 | 384 | 3.90625 | 4 | [
"Apache-2.0"
] | permissive | PROGNAME=$(basename $0)
function error_exit
{
# ----------------------------------------------------------------
# Function for exit due to fatal program error
# Accepts 1 argument:
# string containing descriptive error message
# ----------------------------------------------------------------
echo "${PROGNAME}: ${1:-"Unknown Error"}" 1>&2
exit 1
} | true |
4e420c238a90aec8b21e02e997427edccc3603cf | Shell | jacano1969/Legacy | /CMC/tools/s/build_language_only | UTF-8 | 5,992 | 2.9375 | 3 | [] | no_license | #!/bin/bash
# Written by lithid (mrlithid@gmail.com)
. $CMC_HOME/tools/f/compile_functions
REM_BACK="$CMC_BACKUP/remove_languages"
if [ ! -d $REM_BACK ]; then
mkdir -p $REM_BACK
fi
echo " Removed languages" >> $CMC_DELETED
function backup_file() {
cp $F_EDIT $REM_BACK/$UNIQ
rm -rf $F_EDIT
touch $F_EDIT
echo "cp $REM_BACK/$UNIQ $F_EDIT" >> $CMC_RESTORE
}
UNIQ=$(echo `</dev/urandom tr -dc A-Za-z0-9 | head -c20`)
F_EDIT="$REPO_PATH/build/target/product/full_base.mk"
backup_file
UNIQ=$(echo `</dev/urandom tr -dc A-Za-z0-9 | head -c20`)
F_EDIT="$REPO_PATH/build/target/product/languages_full.mk"
backup_file
UNIQ=$(echo `</dev/urandom tr -dc A-Za-z0-9 | head -c20`)
F_EDIT="$REPO_PATH/external/svox/pico/lang/all_pico_languages.mk"
backup_file
UNIQ=$(echo `</dev/urandom tr -dc A-Za-z0-9 | head -c20`)
F_EDIT="$REPO_PATH/build/target/product/locales_full.mk"
backup_file
if [ "$BRANCH" = "ics" ]; then
F_NAME="languages_small.mk"
F_EDIT="$REPO_PATH/build/target/product/languages_small.mk"
backup_file
fi
if [ "$BRANCH" = "ics" ]; then
(cat << EOF) > $REPO_PATH/build/target/product/full_base.mk
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a build configuration for a full-featured build of the
# Open-Source part of the tree. It's geared toward a US-centric
# build of the emulator, but all those aspects can be overridden
# in inherited configurations.
PRODUCT_PACKAGES := \\
drmserver \\
libdrmframework \\
libdrmframework_jni \\
libfwdlockengine \\
VideoEditor \\
WAPPushManager
# Additional settings used in all AOSP builds
PRODUCT_PROPERTY_OVERRIDES := \\
ro.com.android.dateformat=MM-dd-yyyy \\
ro.config.ringtone=Ring_Synth_04.ogg \\
ro.config.notification_sound=pixiedust.ogg
# Put en_US first in the list, so make it default.
PRODUCT_LOCALES := $LANGS
# Get some sounds
\$(call inherit-product-if-exists, frameworks/base/data/sounds/AllAudio.mk)
# Get the TTS language packs
\$(call inherit-product-if-exists, external/svox/pico/lang/all_pico_languages.mk)
# Get everything else from the parent package
\$(call inherit-product, \$(SRC_TARGET_DIR)/product/generic_no_telephony.mk)
EOF
else
(cat << EOF) > $REPO_PATH/build/target/product/full_base.mk
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a build configuration for a full-featured build of the
# Open-Source part of the tree. This is a base configuration to
# bes used for AOSP builds on various target devices.
PRODUCT_PACKAGES := \\
VoiceDialer
# Additional settings used in all AOSP builds
PRODUCT_PROPERTY_OVERRIDES := \\
keyguard.no_require_sim=true
# Put en_US first in the list, to make it default.
PRODUCT_LOCALES := $LANGS
# Pick up some sounds - stick with the short list to save space
# on smaller devices.
\$(call inherit-product-if-exists, frameworks/base/data/sounds/OriginalAudio.mk)
# Get the TTS language packs
\$(call inherit-product-if-exists, external/svox/pico/lang/all_pico_languages.mk)
\$(call inherit-product, \$(SRC_TARGET_DIR)/product/generic.mk)
EOF
fi
(cat << EOF) > $REPO_PATH/external/svox/pico/lang/all_pico_languages.mk
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file simply inherits from all the know language packs.
\$(call inherit-product, external/svox/pico/lang/PicoLangEnUsInSystem.mk)
EOF
(cat << EOF) > $REPO_PATH/build/target/product/locales_full.mk
# The locales from the ICU "-large.dat" data set.
# See external/icu4c/stubdata.
# This is distinct from "languages_full.mk", which contains those locales for
# which we have translations. If you like, this file is i18n rather than l18n.
PRODUCT_LOCALES := $LANGS
EOF
(cat << EOF) > $REPO_PATH/build/target/product/languages_full.mk
# The locales from the ICU "-large.dat" data set.
# See external/icu4c/stubdata.
# This is distinct from "languages_full.mk", which contains those locales for
# which we have translations. If you like, this file is i18n rather than l18n.
PRODUCT_LOCALES := $LANGS
EOF
if [ "$BRANCH" = "ics" ]; then
(cat << EOF) > $REPO_PATH/build/target/product/languages_small.mk
# The locales from the ICU "-large.dat" data set.
# See external/icu4c/stubdata.
# This is distinct from "languages_full.mk", which contains those locales for
# which we have translations. If you like, this file is i18n rather than l18n.
PRODUCT_LOCALES := $LANGS
EOF
fi | true |
92353f687a646b64f3865f39e3944c9a01673dc1 | Shell | vlnguyen92/tfhybrid | /convnets/runAll2GPU2Machine.sh | UTF-8 | 306 | 2.890625 | 3 | [] | no_license | #!/bin/bash
for f in *.py
do
echo $f
cmd_start="tmux new-session -d -s tf-ps \"CUDA_VISIBLE_DEVICES= python $f --job_name=ps --task_index=0\""
eval $cmd_start
job='python '$f' --job_name=worker --task_index=0'
eval $job
cmd_stop='tmux kill-session -t tf-ps'
eval $cmd_stop
done
| true |
e6d8c72d320080e56cab8903bbac48d5cb87ee1f | Shell | JohnBarrabas/Gutenberg-Narrative-Text-Corpus | /bin/03-MountISO.sh | UTF-8 | 2,553 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
########################################################################################################################
########################################################################################################################
##
## Copyright (C) 2021 Rajstennaj Barrabas, Milford, NH 03055
## All Rights Reserved under the MIT license as outlined below.
##
## FILE
## 03-MountISO.sh
##
## DESCRIPTION
## Mount the ISO into a local project directory
##
## USAGE
## 03-MountISO.sh
##
## NOTE:
##
## Requires root privilege, will prompt user for su password.
##
## If this bothers you, add a line to /etc/fstab (as root) to mount the system at
## boot time. Something like this:
##
## /home/my_dir/Gutenberg-Narrative-Text-Corpus /home/my_dir/Gutenberg-Narrative-Text-Corpus/ISO auto loop 0 0
##
########################################################################################################################
########################################################################################################################
##
## MIT LICENSE
##
## Permission is hereby granted, free of charge, to any person obtaining a copy of
## this software and associated documentation files (the "Software"), to deal in
## the Software without restriction, including without limitation the rights to
## use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
## of the Software, and to permit persons to whom the Software is furnished to do
## so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
## INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
## PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
## HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
## SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##
########################################################################################################################
########################################################################################################################
ISO="pgdvd072006.iso"
cd ..
mkdir -p ISO
sudo mount $ISO ISO 2>/dev/null
| true |
600afff387275f54cc7d476cada5464b8ce8bf24 | Shell | gerithebiker/OldKornStorageScirpts | /lib/SnapmirrorReplicationFix | UTF-8 | 8,496 | 3.8125 | 4 | [] | no_license | #!/bin/ksh
#
#
if [ $# -eq 1 ] ; then
filervol=$1
else
echo "`date '+%d%b%Y_%H:%M:%S'`: Bad parameters - $#"
exit 1
fi
#
# General variables
#
PID=$$
LOG=/santeam/Logs/SnapmirrorReplicationFix.logs
MAIL_REC=sanadmin@citco.com
MAIL_SENDER="SnapmirrorFix-noreply"
SNAPRESVTRESHOLD=99
VOLFULLTRESHOLD=90
#
# Collect general information
#
# Get the DR filer and volume name
drfiler=`echo $filervol|awk -F: '{print $1}'`
drvolume=`echo $filervol|awk -F: '{print $2}'`
# Get the extended status of the replicated volume
ssh $drfiler snapmirror status -l $drvolume > /tmp/snapmirror_status_${drvolume}
if [ ! -s /tmp/snapmirror_status_${drvolume} ] ; then
echo "`date '+%d%b%Y_%H:%M:%S'`: Can't get the snapmirror status of $drfiler:$drvolume !!" >> $LOG
exit 4
fi
# Get the status of the replication error
replication_status=`grep "Transfer Error" /tmp/snapmirror_status_${drvolume}`
# Get the source filer name
srcfiler=`grep ^Source /tmp/snapmirror_status_${drvolume}|awk '{print $2}'|awk -F: '{print $1}'`
# Get the source volume name
srcvolume=`grep ^Source /tmp/snapmirror_status_${drvolume}|awk '{print $2}'|awk -F: '{print $2}'`
#################
# Functions #
#################
WriteLogs ()
{
TEXT=$1
DATE=`date '+%d%b%Y_%H:%M:%S'`
echo "$DATE:$PID:$TEXT" >> $LOG
}
GrowDRvolume ()
{
srcvolumesize=`ssh $srcfiler vol size $srcvolume|grep "has size"|awk '{print $NF}'|sed -e 's/.$//'`
WriteLogs "Source volume size $srcvolume: $srcvolumesize" >> $LOG
# Break the replication
BREAK_STATUS=0
ssh $drfiler snapmirror break $drvolume
WriteLogs "ssh $drfiler snapmirror break $drvolume" >> $LOG
while [ $BREAK_STATUS -eq 0 ] ; do
status=`ssh $drfiler snapmirror status $drvolume|tail -1|awk '{print $3}'`
if [ $status = Broken-off ] ; then
BREAK_STATUS=1
else
# wait 3 sec. for completion
sleep 3
fi
done
# Update the volume size on the replicated volume
WriteLogs "ssh $drfiler vol options $drvolume fs_size_fixed off" >> $LOG
ssh $drfiler vol options $drvolume fs_size_fixed off
ssh $drfiler vol size $drvolume $srcvolumesize
WriteLogs "Increase size on DR volume -> ssh $drfiler vol size $drvolume $srcvolumesize" >> $LOG
# Resync snapmirror
ssh $drfiler snapmirror resync -f -S ${srcfiler}:${srcvolume} $drvolume
WriteLogs "ssh $drfiler snapmirror resync -f ${srcfiler}:${srcvolume} $drvolume" >> $LOG
}
ShrinkSourceVolume ()
{
# Determine the theoritical size of the source volume
# get the lun size in kbytes
srclunsize=`ssh $srcfiler df -V $srcvolume|grep -v snap|tail -1|awk '{print $3}'`
# Add 10% more for the free space in the volume
usedspace=`echo "scale=0;$srclunsize*1.1"|bc`
# Remove the decimal part
usedspace=`echo $usedspace|sed -e 's/\.[0-9]//'`
# Change unit from KB to GB
usedspace=`expr $usedspace / 1024 / 1024` # Size in MB,GB
# Get the current snap reserve space percentage
snapresv=`ssh $srcfiler snap reserve $srcvolume|tail -1|awk '{print $7}'|sed -e 's/%//'`
# Calcul of the volume size - unit gb
volumesize=`echo "scale=0;($usedspace*100)/(100-$snapresv)"|bc`
# Current volume size - we use aggr show_space instead vol size to avoid unit effects (the value is not 100% accurate)
currentvolsize=`ssh $srcfiler aggr show_space -g |grep $srcvolume|awk '{print $2}'|sed -e 's/GB//'`
WriteLogs "$srcfiler:$srcvolume - current size: ${currentvolsize}GB - new size: ${volumesize}GB"
# Check if the new size is lower than the current size
if [ $volumesize -lt $currentvolsize ] ; then
# Apply the new size on the source volume
ssh $srcfiler vol size $srcvolume ${volumesize}g
WriteLogs "Apply the new size to $srcfiler:$srcvolume -> ssh $srcfiler vol size $srcvolume ${volumesize}g"
# Update the DR side
BREAK_STATUS=0
ssh $drfiler snapmirror break $drvolume
WriteLogs "ssh $drfiler snapmirror break $drvolume" >> $LOG
while [ $BREAK_STATUS -eq 0 ] ; do
status=`ssh $drfiler snapmirror status $drvolume|tail -1|awk '{print $3}'`
if [ $status = Broken-off ] ; then
BREAK_STATUS=1
else
# wait 3 sec. for completion
sleep 3
fi
done
# Update the volume size on the replicated volume
ssh $drfiler vol options $drvolume fs_size_fixed off
ssh $drfiler vol size $drvolume ${volumesize}g
WriteLogs "Update size on DR volume -> ssh $drfiler vol size $drvolume ${volumesize}g" >> $LOG
# Resync snapmirror
ssh $drfiler snapmirror resync -f -S ${srcfiler}:${srcvolume} $drvolume
WriteLogs "ssh $drfiler snapmirror resync -f ${srcfiler}:${srcvolume} $drvolume" >> $LOG
else
# The new size is bigger than the current size
# because the space used on the volume is >90% or snapshot space consumption is high
# in this case we grow the DR volume
GrowDRvolume
fi
}
#################
# MAIN PROGRAM #
#################
# Check if the source volume still exist and is online
ssh $srcfiler vol status $srcvolume|grep online > /dev/null
if [ $? -ne 0 ] ; then
WriteLogs "$srcfiler:$srcvolume doesn't exist or not online"
exit 2
fi
# Check if the source volume has snap reserve configured
srcvolume_snapres=`ssh $srcfiler snap reserve $srcvolume|tail -1|awk '{print $7}'|sed -e 's/%//'`
if [ $srcvolume_snapres -eq 0 ] ; then
WriteLogs "$srcfiler:$srcvolume doesn't have snap reserve configured"
# Send an email to san team
echo "Please check, no snapshot reserved space for the replication of volume $rcfiler:$srcvolume" \
|mailx -r $MAIL_SENDER -s "Snapmirror replication issue" $MAIL_REC
exit 3
fi
# Check if replication pending because source volume bigger than replicated volume
echo $replication_status|grep "too small" > /dev/null
if [ $? -eq 0 ] ; then # Source volume size has increased
WriteLogs "Source volume $srcvolume on $srcfiler is bigger than destination"
# Check if snap reserved used space on source volume is > 100%
snap_perc=`ssh $srcfiler df -Vh $srcvolume|grep snapshot|awk '{print $5}'|sed -e s'/%//'`
WriteLogs "snapshot resv used ${snap_perc}% on $srcfiler:$srcvolume"
if [ $snap_perc -ge $SNAPRESVTRESHOLD ] ; then # Snapshot usage >= SNAPRESVTRESHOLD
# In this case we break the replication and extend the replicated volume
GrowDRvolume
else
# Check the occupation of the volume
# if the volume is full at >=90%
# we will extend the DR volume
srcvolume_occup=`ssh $srcfiler df -Vh $srcvolume|grep -v snap|tail -1|awk '{print $5}'|sed -e 's/%//'`
if [ $srcvolume_occup -ge $VOLFULLTRESHOLD ] ; then
WriteLogs "Source volume $srcvolume occupation: ${srcvolume_occup}%, the replicated volume will be expanded"
GrowDRvolume
else
# In this case we reduce the size of the source volume
ShrinkSourceVolume
fi
fi
else
# Check if the source filer is still reachable
FAILEDPING=0
ssh $drfiler 'priv set -q advanced;ping $srcfiler'|grep alive > /dev/null
if [ $? -ne 0 ] ; then
# Run the test 3 times to confirm
ind=0
while [ ind -ne 3 ] ; do
ssh $drfiler 'priv set -q advanced;ping $srcfiler'|grep alive > /dev/null
if [ $? -ne 0 ] ; then
FAILEDPING=`expr $FAILEDPING + 1`
fi
ind=`$ind + 1`
sleep 2
done
fi
if [ $FAILEDPING -lt 3 ] ; then # No prob with network or dns
# Is the volume size the same between PROD and DR?
drvolsize=`ssh $drfiler vol size $drvolume|tail -1|grep "has size"|awk '{print $NF}'|sed -e 's/.$//'`
srcvolsize=`ssh $srcfiler vol size $srcvolume|grep "has size"|awk '{print $NF}'|sed -e 's/.$//'`
if [ $drvolsize == $srcvolsize ] ; then
WriteLogs "$srcfiler:$srcvolume and $drfiler:$drvolume have the same size: $srcvolsize"
WriteLogs "We will break snapmirror and resync to force an update"
ssh $drfiler snapmirror break $drvolume
WriteLogs "ssh $drfiler snapmirror break $drvolume" >> $LOG
BREAK_STATUS=0
while [ $BREAK_STATUS -eq 0 ] ; do
status=`ssh $drfiler snapmirror status $drvolume|tail -1|awk '{print $3}'`
if [ $status = Broken-off ] ; then
BREAK_STATUS=1
else
# wait 3 sec. for completion
sleep 3
fi
done
# Resync now
ssh $drfiler snapmirror resync -f -S ${srcfiler}:${srcvolume} $drvolume
WriteLogs "Resync snapmirror relationship, volume size stay unchanged: ssh $drfiler snapmirror resync -f ${srcfiler}:${srcvolume} $drvolume" >> $LOG
fi
else
# Send an alert email to SAN team
WriteLogs "$srcfiler is not reachable from $drfiler"
# Send an email to san team
echo "Please check for possible network issue, $drfiler can't ping $srcfiler" \
|mailx -r $MAIL_SENDER -s "Snapmirror replication issue" $MAIL_REC
exit 5
fi
fi
# Delete temp file
rm /tmp/snapmirror_status_${volume}
| true |
0afc8a5b38e1231e8c9b4633bc45599aabfc1185 | Shell | tnakaicode/jburkardt | /triangle_exactness/triangle_exactness.sh | UTF-8 | 419 | 3 | 3 | [] | no_license | #!/bin/bash
#
g++ -c -I$HOME/include triangle_exactness.cpp
if [ $? -ne 0 ]; then
echo "Errors compiling triangle_exactness.cpp"
exit
fi
#
g++ triangle_exactness.o -lm
if [ $? -ne 0 ]; then
echo "Errors linking and loading triangle_exactness.o."
exit
fi
#
rm triangle_exactness.o
#
chmod ugo+x a.out
mv a.out ~/bincpp/$ARCH/triangle_exactness
#
echo "Executable installed as ~/bincpp/$ARCH/triangle_exactness"
| true |
12f9cf1112751229c201f50cde7278dcfa8a3979 | Shell | mattwthompson/dotfiles | /zshrc | UTF-8 | 1,906 | 3.015625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | plugins=(
git
bundler
dotenv
osx
rake
rbenv
ruby
)
ZSH_THEME="theunraveler"
ZSH_THEME_GIT_PROMPT_PREFIX="%{$fg[white]%}("
ZSH_THEME_GIT_PROMPT_SUFFIX="%{$fg[white]%})%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_DIRTY="*"
ZSH_THEME_GIT_PROMPT_CLEAN=""
# %~ is the current working directory relative to the home directory
PROMPT='[$FG[228]%~%{$reset_color%}]'
PROMPT+=' $(git_prompt_info)'
PROMPT+=' %(?.$FG[154].$FG[009])€%{$reset_color%} '
# If you come from bash you might have to change your $PATH.
export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="/Users/mwt/.oh-my-zsh"
CASE_SENSITIVE="true"
source $ZSH/oh-my-zsh.sh
# aliases
alias xmgrace='xmgrace -free'
alias vi='nvim'
alias vip='vi -p'
alias lspy='ls -l | grep py'
alias vmd='/Applications/VMD\ 1.9.3.app/Contents/MacOS/startup.command'
alias preview='open -a Preview'
alias rscp='rsync -avzhe ssh --progress'
alias rm='rmtrash' # brew install rmtrash
alias diff='colordiff' # brew install colordiff
alias brewup='brew update; brew upgrade; brew cleanup; brew doctor'
# environment variables
export MP_EDITOR='/usr/bin/vim'
export VMD=/Users/mwt/software/scripts/vmd_scripts
export OE_LICENSE=/Users/mwt/.oe_license.txt
# script from http://onethingwell.org/post/586977440/mkcd-improved
# make a directory and immediately change to it
mkcd () {
mkdir -p "$*"
cd "$*"
}
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$('/Users/mwt/anaconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/Users/mwt/anaconda3/etc/profile.d/conda.sh" ]; then
. "/Users/mwt/anaconda3/etc/profile.d/conda.sh"
else
export PATH="/Users/mwt/anaconda3/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
export PATH="/usr/local/sbin:$PATH"
| true |
b84ef2a38d7ec2984af8bdf8b60638a9bd8d822a | Shell | izikeros/scripts | /my_scripts/save-boot-time.sh | UTF-8 | 304 | 3.125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
FILE=$HOME/data/boots.log
if [[ -e $FILE ]]; then
journalctl --list-boots | sed 's/^\s\+//' | cut -d ' ' -f2- >> $FILE
echo "Append to $FILE"
else
journalctl --list-boots | sed 's/^\s\+//' | cut -d ' ' -f2- > $FILE
echo "Create $FILE"
fi
# sort -k2 -u -o $FILE $FILE
| true |
d9dc02b2afb5c402394741803481ff847eb447b2 | Shell | jonathanjtan/warframe-relic-data | /update.sh | UTF-8 | 564 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env sh
if [ -n "$(git status --porcelain)" ];then
echo "Changes found => pushing"
WARFRAME_BUILD="$(cat ./data/version)"
git config user.name "Titania CI"
git config user.email "titaniaci@sleepylux.xyz"
git add .
git commit -m "📝 Update(items) for Warframe Version $WARFRAME_BUILD"
git remote set-url origin "https://Soundofdarkness:$ACCESS_TOKEN@github.com/TitaniaProject/warframe-relic-data"
git push --set-upstream origin master
echo "Sucessfully updated Items to Warframe version $WARFRAME_BUILD"
else
echo "No changes found => Finished."
fi | true |
39d26bd0a99012f6ab22011edc46f70ea36437f1 | Shell | matt-welch/GENI_VT | /vm/networking/startbr.sh | UTF-8 | 690 | 3.390625 | 3 | [] | no_license | #!/bin/bash
# NOTE: if this is run when the interface is the ONLY interface,
# you will lose your SSH connection when the interface goes down
# use 10G network connection for data
IF=eth10 # $(ifconfig | grep 192.168 -B 1 | head -n 1 | cut -d ' ' -f 1)
BR=br0
echo "Creating ${BR} in $0"
IP_ADDR=$(/sbin/ifconfig ${IF} | grep "inet addr" | tr -s ' ' | cut -d ' ' -f3 | cut -d ":" -f2)
NETMASK=$(/sbin/ifconfig ${IF} | grep "inet addr" | tr -s ' ' | cut -d ":" -f4)
/sbin/brctl addbr ${BR}
/sbin/brctl addif ${BR} ${IF}
ifconfig ${IF} 0.0.0.0 promisc
ifconfig ${BR} ${IP_ADDR} netmask ${NETMASK} up
# this line may not be necessary in most cases
#route add default gw 192.168.42.1 $BR
| true |
355fddd702f3f3a1bab4642f3959322bd12e24cd | Shell | liluyi/race-detectors | /apps/ldap/mk-tsan2 | UTF-8 | 1,838 | 2.9375 | 3 | [] | no_license | #!/bin/bash
set -e
APP_DIR=`pwd`
VER=2.4.33
if [ ! -f openldap-$VER.tgz ]; then
wget http://www.openldap.org/software/download/OpenLDAP/openldap-release/openldap-$VER.tgz
fi
if [ ! -f db-5.3.15.tar.gz ]; then
wget http://download.oracle.com/berkeley-db/db-5.3.15.tar.gz
fi
rm -rf tsan2
mkdir tsan2
cd tsan2
tar zxvf ../openldap-$VER.tgz
tar xvzf ../db-5.3.15.tar.gz
pushd db-5.3.15
pushd build_unix
CC=gcc-4.9 CFLAGS="-fsanitize=thread -fPIE" LDFLAGS="-fsanitize=thread -pie" ../dist/configure --prefix=$APP_DIR/tsan2/install
make -j25
make install -j25
popd
popd
# Build.
cd openldap-$VER
patch -p1 < ../../add-resp-to-mtread.patch
mkdir obj
cd obj
CC=gcc-4.9 CFLAGS="-g -O0 -fsanitize=thread -fPIE" CPPFLAGS="-I$APP_DIR/tsan2/install/include" LDFLAGS="-L$APP_DIR/tsan2/install/lib -fsanitize=thread -pie -Wl,-rpath -Wl,$APP_DIR/tsan2/install/lib" ../configure --prefix=$APP_DIR/tsan2/install
make -j25
make install -j25
cd $APP_DIR
# We use this one because it has debug symbols.
ln -s $APP_DIR/xtern-test-mt-hot $APP_DIR/tsan2/openldap-$VER/tests/scripts/xtern-test-mt-hot
ln -s $APP_DIR/local.options $APP_DIR/tsan2/openldap-$VER/obj/tests/local.options
ln -s $APP_DIR/tsan2/openldap-$VER/obj/servers/slapd/slapd $APP_DIR/tsan2/install/libexec/slapd.x86
cp $APP_DIR/tsan2/install/etc/openldap/slapd.conf $APP_DIR/tsan2/install/etc/openldap/slapd.conf.bak
cp slapd.conf.template $APP_DIR/tsan2/install/etc/openldap/slapd.conf
# Add benchmark.
cd tsan2/openldap-$VER
patch -p1 < ../../only-xtern-test-mt-hot.patch
patch -p1 < ../../only-run-bdb-tests.patch
cd $APP_DIR/tsan2
ln -s $APP_DIR/tsan2/openldap-$VER/obj/servers/slapd/slapd
ln -s $APP_DIR/tsan1/openldap-$VER/obj/tests/progs/slapd-mtread
cd openldap-$VER/obj/tests
set +e
make test
sed -i -e "s/\.\//tsan1\/openldap-${VER}\/obj\/tests\//g" testrun/slapd.1.conf
| true |
5dd9d78a1f4c77e0e219008bda9449362ba480d2 | Shell | pan-mroku/dotfiles | /bin/git-istage | UTF-8 | 595 | 3.75 | 4 | [] | no_license | #!/bin/bash
AskYN()
{
while [ true ]; do
read -p "$1" yn
case $yn in
[yY]*)
return 0
break
;;
[nN]*)
return 1
break
;;
[pP]*)
return 2
break
;;
*)
echo;;
esac
done
}
OLDIFS=$IFS
IFS=$'\n'
for FILE in `git s|grep "^.M.*\|^??.*"|sed -n 's|^.M.||p;s| |\\ |;s|"||g'`; do
clear
git diffw "`pwd`/$FILE"
AskYN "$FILE: Add to commit? [y]es/[p]atch/[n]o: "
case $? in
0)
git add "$FILE"
;;
2)
git add -p "$FILE"
;;
esac
done
IFS=$OLDIFS
| true |
4c6e38776a693dddbbe4b2582cfb524e95cda7d9 | Shell | eiji03aero/mash | /backend/scripts/docker.sh | UTF-8 | 353 | 3.4375 | 3 | [] | no_license | #!/bin/bash
cmd="${1:-def}"
script_dir=$(cd $(dirname $0); pwd)
root_dir=$(dirname $script_dir)
if [ $cmd = "def" ]; then
echo no command specified
exit 1
elif [ $cmd = "publish:go-dev" ]; then
tag="eiji03aero/mash-go-dev:latest"
cd $root_dir
docker build \
-t $tag \
-f ./docker/Dockerfile.go-dev \
.
docker push $tag
fi
| true |
86fff476aa84aaa0385e8271a2f818bb4cd24203 | Shell | gonzalorodrigo/ScSFWorkload | /bin/workers_ping.sh | UTF-8 | 222 | 3.125 | 3 | [
"BSD-3-Clause-LBNL"
] | permissive | HOSTS_FILE="./hosts.list"
while IFS='' read -r worker || [[ -n "$worker" ]]; do
ssh -t $worker exit &> /dev/null < /dev/null
if [ $? = 0 ]; then
echo "$worker Up"
else
echo "$worker Down"
fi
done < "$HOSTS_FILE"
| true |
ac78170895e73bc547cef9e58a0264d7164f66f0 | Shell | monokrome/dotfiles | /.local/etc/zsh/plugins/dotfiles.zsh | UTF-8 | 230 | 2.546875 | 3 | [] | no_license | # Load completion plugin
autoload -U compinit
# Create a git alias called `dot` for managing dotfiles
alias dot='git --git-dir="$HOME/.dotfiles" --work-tree="$HOME"'
# Use git autocompletion with the dot alias
compdef _git dot
| true |
fc685f6ba36fb785a8d31c291e1506724eb6d8a1 | Shell | petronny/aur3-mirror | /lightdm-webkit-theme-antergos-git/PKGBUILD | UTF-8 | 1,303 | 2.75 | 3 | [] | no_license | # Maintainer: Dustin Falgout <dustin@antergos.info>
pkgname=lightdm-webkit-theme-antergos-git
_gitname=lightdm-webkit-theme-antergos
_pkgname=antergos
pkgver=2.1.0.r9
_pkgver=2.1.0
_bgver=0.6
epoch=1
pkgrel=2
pkgdesc="The official greeter theme of Antergos Linux"
arch=('any')
url="http://www.antergos.com"
license=('GPLv2')
depends=('lightdm' 'lightdm-webkit2-greeter')
makedepends=('git')
provides=('lightdm-webkit-theme-antergos')
conflicts=('lightdm-webkit-theme-antergos')
install=theme.install
source=("$pkgname"::'git+http://github.com/Antergos/lightdm-webkit-theme-antergos.git#branch=master'
"http://antergos.org/antergos-wallpapers-${_bgver}.zip")
sha256sums=('SKIP'
'383484231d5d4c1e785d8b5e7635909e3e3200a9fe18fef5dbc6fc2f23961b28')
pkgver() {
cd "${srcdir}/${pkgname}"
printf "%s.r%s" "${_pkgver}" "$(git rev-list --count HEAD)"
}
build()
{
cd "${srcdir}/${pkgname}"
sed -i 's%/usr/share/%/usr/share/lightdm-webkit/themes/%g' index.html
}
package()
{
cd "${pkgdir}"
mkdir -p usr/share/lightdm-webkit/themes
cd usr/share/lightdm-webkit/themes
cp -dpr --no-preserve=ownership "${srcdir}/${pkgname}" ${_pkgname}
msg "Removing .git files"
cd ${_pkgname}
rm -f .gitignore
rm -rf .git
cp -dpr --no-preserve=ownership "${srcdir}/antergos-wallpapers-${_bgver}" wallpapers
}
| true |
0a39d23b609491e4c13349d344d6b4d7c203fba6 | Shell | walleleung/auto-build-install-script | /lib/lc_git_lib.sh | UTF-8 | 357 | 3.84375 | 4 | [] | no_license | #!/bin/bash
function git_update()
{
if [ "$#" -lt 2 -o -z "$0" -o -z "$1" ] ; then
echo "Usage: git_update gitpath giturl"
return 1
fi
gitpath="$1"
giturl="$2"
unset GIT_DIR
if [ -d "$gitpath" ] ; then
cd $gitpath
git pull origin master
else
mkdir -p "$gitpath"
git clone "$giturl" "$gitpath"
cd "$gitpath"
fi
}
| true |
b663eeb4f523f0bff3343f28bf32184c5fb25329 | Shell | hobama/ideam | /setup/install.sh | UTF-8 | 1,439 | 2.953125 | 3 | [] | no_license | #!/bin/ash
RED='\033[0;31m'
NC='\033[0m'
YELLOW='\033[1;33m'
GREEN='\033[0;32m'
echo -e "${YELLOW}[ INFO ]${NC} Installing admin scripts"
echo -e "${YELLOW}[ INFO ]${NC} Creating cdx.admin user"
touch /usr/local/kong/consumer_error.log
touch /usr/local/kong/consumer_out.log
sh /usr/local/kong/setup/setup_consumer.sh cdx.admin 2>/usr/local/kong/consumer_error.log >/usr/local/kong/consumer_out.log
if [ $? -eq 0 ]; then
echo -e "${GREEN}[ OK ] ${NC} Created cdx.admin user"
else
echo -e "${RED}[ ERROR ] ${NC}Failed to create cdx.admin user"
fi
echo -e "${YELLOW}[ INFO ]${NC} Adding ACL to cdx.admin user"
touch /usr/local/kong/acl_error.log
touch /usr/local/kong/acl_out.log
sh /usr/local/kong/setup/setup_consumer-acl.sh cdx.admin 2>/usr/local/kong/acl_error.log >/usr/local/kong/acl_out.log
if [ $? -eq 0 ]; then
echo -e "${GREEN}[ OK ] ${NC} Added ACL to cdx.admin user"
else
echo -e "${RED}[ ERROR ] ${NC}Failed to add ACL to cdx.admin user"
fi
echo -e "${YELLOW}[ INFO ]${NC} Adding key-auth to cdx.admin user"
touch /usr/local/kong/auth_error.log
touch /usr/local/kong/auth_out.log
sh /usr/local/kong/setup/setup_consumer-key-auth.sh cdx.admin 2>/usr/local/kong/auth_error.log >/usr/local/kong/auth_out.log
if [ $? -eq 0 ]; then
echo -e "${GREEN}[ OK ] ${NC}Added key-auth to cdx.admin user"
else
echo -e "${RED}[ ERROR ] ${NC}Failed to add key-auth to cdx.admin user"
fi
| true |
ed1e2b5bb9dd5667e1252ca4aa8a120d8cf18fed | Shell | Vic-Dev/storefront | /scripts/print-package-versions.sh | UTF-8 | 281 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Change to the root of the repo
cd "${BASH_SOURCE%/*}/.."
# Create a Markdown list of all the package versions
node -p 'JSON.stringify(require("./presets/package-versions"), null, 2)' |
tr -d '",' |
sed '1d; $d; s/^ */- /; s#@storefront/[a-z-]*#`&`#' |
sort
| true |
ddb6891599c3be6c449668527dbd97ef38807cf2 | Shell | paradoxical-io/deployment | /mvn/deploy.sh | UTF-8 | 409 | 2.546875 | 3 | [
"MIT"
] | permissive | MVN_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
function snapshot() {
mvn clean deploy --settings "${MVN_SCRIPT_DIR}/settings.xml" \
-P snapshot \
-DskipTests $@
}
function release() {
mvn clean deploy --settings "${MVN_SCRIPT_DIR}/settings.xml" \
-P release \
-DskipTests \
-Drevision="$REVISION" \
-Ddeployment.directory="${SCRIPT_DIR}" $@
}
| true |
0fb0e53e78c5e86338ffc1f617076482058800b0 | Shell | eviweb/bash-scripts | /security/curlsh | UTF-8 | 2,255 | 4.3125 | 4 | [] | no_license | #! /bin/bash
# secure the use of `curl <url> | sh`
#
# @see http://blog.classicalcode.com/2012/11/curl-pipe-sh-exploit-proof-of-concept/
# @see http://www.djm.org.uk/protect-yourself-from-non-obvious-dangers-curl-url-pipe-sh/
# @see https://github.com/djm/pipe-to-sh-poc
#
# define a variable content using heredoc
function define()
{
IFS=$''
read -d '' ${1} || true;
}
# Help message
define Usage << "HELP"
Usage: curlsh [OPTIONS] FILE [ARGUMENTS]
Options:
-h display this message
-d temporary working directory (default: /tmp)
-e text editor
-q quiet mode, disable the secure mode (!!! WARNING !!!)
HELP
# print the help message
function usage()
{
echo -e "$Usage"
}
# if no given parameters, abort
if [ $# -eq 0 ]
then
usage
exit 1
fi
# default command line options
OPTIONS=":hd:e:q"
# default temporary working directory
TMPDIR="/tmp"
# default temporary file name
TMPFILE="curlsh.XXXXXXXX"
# quiet mode default value
QUIETMODE=0
# default editor
EDITOR="cat"
# get command line options
while getopts $OPTIONS option
do
case $option in
d) [[ -d "${OPTARG}" ]] && TMPDIR=${OPTARG};;
e) [[ -e "$(which ${OPTARG})" ]] && EDITOR=$(which ${OPTARG});;
q) QUIETMODE=1;;
h | *) usage && exit 0 || exit 1;;
esac
done
shift $(($OPTIND - 1 ))
# yes/no menu choice
function yesno()
{
local def=${2:-n}
while ! [[ "$choice" =~ [yYnN] ]]
do
read -s -p "$1 ? [$def]" choice
choice=${choice:-$def}
echo ''
done
[[ "$choice" =~ [yY] ]] && return 0 || return 1
}
# curlsh function
function curlsh {
file=$(mktemp $TMPDIR/$TMPFILE) || { echo "Failed creating file"; return; }
curl -s "$1" > $file || { echo "Failed to curl file"; return; }
if [[ $QUIETMODE -eq 0 ]]
then
echo ""
echo "**** BEGIN FILE CONTENT ****"
$EDITOR $file || { echo "Editor quit with error code"; return; }
echo "**** END FILE CONTENT ****"
echo ""
if ! yesno 'Would you want to execute this script' 'N'
then
echo 'Abort.'
rm $file
exit 1
fi
fi
shift
sh $file $@
rm $file
}
curlsh $@
exit 0
| true |
0780592b065032953915d7184b16b62a4d23e269 | Shell | spseol/rozvrh | /updater/update_remote.sh | UTF-8 | 614 | 3.4375 | 3 | [] | no_license | url="`cat target_url`"
merge() {
fname="tmp/final-$1.json"
touch "$fname"
python3 merge.py "tmp/$1.json" "$fname" > "$fname.tmp"
mv "$fname.tmp" "$fname"
}
SUM="`md5sum tmp/rozvrh.json | cut -d' ' -f1,1`"
if [ "`cat SENT_ROZVRH`" != "$SUM" ]
then
echo 'updating rozvrh'
curl -F "file=@tmp/final-rozvrh.json" "$url/rozvrh"
echo "$SUM" > SENT_ROZVRH
fi
SUM="`md5sum tmp/zmeny.json | cut -d' ' -f1,1`"
if [ "`cat SENT_ZMENY`" != "$SUM" ]
then
echo 'updating zmeny'
merge zmeny
curl -F "file=@tmp/final-zmeny.json" "$url/zmeny"
echo "$SUM" > SENT_ZMENY
fi
echo " Done. "
| true |
7100c1c40c80a4526ef4551c3d02b3442597e91d | Shell | segmentio/amazon-vpc-cni-k8s | /scripts/lib/common.sh | UTF-8 | 332 | 3.703125 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
check_is_installed() {
local __name="$1"
if ! is_installed "$__name"; then
echo "Please install $__name before running this script."
exit 1
fi
}
is_installed() {
local __name="$1"
if $(which $__name >/dev/null 2>&1); then
return 0
else
return 1
fi
}
| true |
dd135f2e9a23b9ece288d9a5718f598c83af4656 | Shell | shirtsgroup/finite-temperature-crystal-scripts | /useful_scripts/createkeyrestraint | UTF-8 | 844 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#BASH SCRIPT TO ADD RESTRAINTS TO A KEY FILE
#param f - The name of the input xyz file to use as restraints
#param k - The name of the input key file to add the restraints to
#=============================================================================================
while getopts "f:k:" opt; do
case $opt in
f )
file=$OPTARG
;;
k )
keyfile=$OPTARG
;;
esac
done
numatoms=$(sed -n '1p' $file | awk '{print $1}')
let "lineend=$numatoms+2"
#Added the restraint header to the key file
echo "" >> $keyfile
echo '# Position restraints' >> $keyfile
for i in $(seq 3 $lineend); do
line=$(sed -n "${i}p" $file | awk '{print $1,$3,$4,$5}')
line="RESTRAIN-POSITION $line INTERPOLATE 1 0.0 10.0"
#line="RESTRAIN-POSITION $line INTERPOLATE 1 0.0 2.39"
echo $line >> $keyfile
done
| true |
6e4c8b46079a10a5956a6e1b361f875c70325948 | Shell | jxs/.dotfiles | /.zshrc | UTF-8 | 650 | 2.515625 | 3 | [] | no_license | # -- variables
export SHELL="/usr/bin/zsh"
export TERM=xterm-kitty
export COLORTERM=truecolor
export ZSH_THEME="arrow"
export EDITOR='nvim'
export LANG=en_US.UTF-8
export SSH_AUTH_SOCK="${XDG_RUNTIME_DIR}/ssh-agent.socket"
# -- aliases
alias cat='bat'
alias vim='nvim'
alias gb='git rev-parse --abbrev-ref HEAD'
# -- load zgen
source "/usr/share/zsh/share/zgen.zsh"
# -- zgen packages
if ! zgen saved; then
zgen oh-my-zsh
zgen oh-my-zsh plugins/pass
zgen load zsh-users/zsh-autosuggestions
zgen load 'wfxr/forgit'
# generate the init script from plugins above
zgen save
fi
# -- fzf
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true |
db04dcde930d0e09867f0c491715eab662df208f | Shell | linuxmint/ubiquity | /d-i/source/partman-efi/commit.d/format_efi | UTF-8 | 2,651 | 3.78125 | 4 | [] | no_license | #!/bin/sh
. /lib/partman/lib/base.sh
ARCH="$(archdetect)"
case $ARCH in
i386/*|amd64/*)
new_efi_fs=fat32
;;
*)
new_efi_fs=fat16
;;
esac
if search-path mkfs.fat; then
MKFS_FAT=mkfs.fat
else
MKFS_FAT=mkdosfs
fi
enable_swap
for dev in $DEVICES/*; do
[ -d "$dev" ] || continue
cd $dev
partitions=
open_dialog PARTITIONS
while { read_line num id size type fs path name; [ "$id" ]; }; do
[ "$fs" != free ] || continue
partitions="$partitions $id,$num"
done
close_dialog
for part in $partitions; do
id=${part%,*}
num=${part#*,}
[ -f $id/method -a -f $id/format ] || continue
# Formatting an EFI System Partition that already has a
# filesystem on it is dangerous
# (https://bugs.launchpad.net/bugs/769669). The least bad
# option seems to be to skip such partitions entirely.
if [ -f $id/detected_filesystem ]; then
continue
fi
method=$(cat $id/method)
if [ "$method" = efi ]; then
if [ -f $id/formatted ] && \
[ $id/formatted -nt $id/method ]; then
continue
fi
log "Try to format EFI $new_efi_fs fs in $dev/$id"
template=partman-basicfilesystems/progress_formatting
open_dialog PARTITION_INFO $id
read_line x1 x2 x3 x4 x5 device x6
close_dialog
RET=''
db_metaget partman/filesystem_short/efi description || RET=''
[ "$RET" ] || RET=efi
db_subst $template TYPE "$RET"
db_subst $template PARTITION "$num"
db_subst $template DEVICE $(humandev $(cat device))
db_progress START 0 3 partman/text/formatting
db_progress INFO $template
db_progress SET 1
log_sector_size="$(blockdev --getss "$(cat device)")"
if [ "$log_sector_size" = 512 ]; then
mkdosfs_opts=
else
# mkdosfs has trouble handling cluster
# calculations for non-512-byte logical
# sectors. Forcing one sector per cluster
# avoids this as long as the filesystem
# isn't too large, but that shouldn't be a
# problem for EFI System Partitions.
mkdosfs_opts='-s 1'
fi
if log-output -t partman --pass-stdout \
$MKFS_FAT -F "${new_efi_fs#fat}" \
-S "$log_sector_size" \
$mkdosfs_opts \
"$device" >/dev/null; then
sync
status=OK
else
status=failed
fi
db_progress STOP
if [ "$status" != OK ]; then
db_subst partman-basicfilesystems/create_failed TYPE efi
db_subst partman-basicfilesystems/create_failed PARTITION "$num"
db_subst partman-basicfilesystems/create_failed DEVICE $(humandev $(cat device))
db_input critical partman-basicfilesystems/create_failed || true
db_go || true
#disable_swap
exit 1
fi
>$id/formatted
fi
done
done
#disable_swap
| true |
42c3add1e294c5a1b81e9595a56f63701f0ddf3c | Shell | johnelse/.dotfiles | /scripts/new-file-cpp | UTF-8 | 516 | 3.40625 | 3 | [] | no_license | #!/bin/bash
FILEPATH=$1
[ -z "$FILEPATH" ] && echo "error: \$FILEPATH is empty" && return 1
[ -e "$FILEPATH.cpp" ] && echo "error: \$FILEPATH.cpp exists" && return 1
FILENAME=$(basename $1)
YEAR=$(date +%Y)
cat << EOF > "${FILEPATH}.cpp"
//==============================================================================
// ${FILENAME}.cpp
// Copyright ${YEAR} inMusic Brands. All rights reserved.
//==============================================================================
#include "${FILEPATH}.h"
EOF
| true |
9bfda8baf7cd13749976b914c9bab485e6651f9a | Shell | kanhaiya38/se-assignment | /Assignment 3/csvtovcfwrapper.sh | UTF-8 | 255 | 3.3125 | 3 | [] | no_license | #!/bin/bash
if [[ $# -ne 1 ]]; then
echo "Incorrect usage"
echo "usage: ./csvtovcf.sh <file_name>.csv"
exit 1
fi
if [[ $1 != *.csv ]]; then
echo "Please give proper csv file as input"
exit 1
fi
python3 csvtovcf.py $1 >${1%.csv}.vcf
| true |
0161ca6876a1c5d6f0b1f755d2b04b89ae0da7d5 | Shell | kanghyojun/.conf | /install.sh | UTF-8 | 219 | 2.53125 | 3 | [] | no_license | #!/bin/sh
install_script="install.sh"
vimrc="./vimrc"
tmux="./tmux.conf"
bashrc="./bashrc"
git submodule init
git submodule update
sh "$vimrc/$install_script"
sh "$tmux/$install_script"
sh "$bashrc/$install_script"
| true |
ab937c159516da151abe937c1e7ffeac3a6c5194 | Shell | alopatindev/dotfiles | /common/home/scripts/slowpokecast-toggle.sh | UTF-8 | 352 | 3.234375 | 3 | [] | no_license | #!/bin/bash
REC="slowpokecast-record.sh"
P=$(ps aux | grep -v grep | grep $REC | grep '/bin/bash')
msg() {
echo "^fg(#ffffff)^bg(#5D9457)$1" | \
dzen2 -fn "DejaVu Sans:size=70" -p 1 -ta l -y 20
}
if [[ $P != '' ]]; then
killall $REC
msg "$REC STOPPED"
else
sleep 1 && ~/scripts/$REC 2&>>/dev/null &
msg "$REC STARTED"
fi
| true |
e741e400d4fe6f329416a0197988664d5115d169 | Shell | GabrielCzar/MyDotfiles | /workstation.sh | UTF-8 | 1,939 | 2.90625 | 3 | [] | no_license | #!/bin/bash
## Removing locks from apt ##
sudo rm /var/lib/dpkg/lock-frontend
sudo rm /var/cache/apt/archives/lock
## Update repository ##
sudo apt update &&
## python-pip nautilus-dropbox not found in ubuntu-20
## Installing packages and softwares from Ubuntu deb repository ##
sudo apt install git maven python3 git build-essential libssl-dev flatpak gnome-software-plugin-flatpak yarn -y &&
sudo apt install ruby-full -y &&
sudo apt-get install --no-install-recommends gnome-panel -y &&
## Adicionando repositório Flathub ##
flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo &&
## Instalando Apps do Flathub ##
sudo flatpak install flathub com.sublimetext.three -y &&
# Installing tools
sudo apt install docker docker-compose &&
sudo groupadd -f docker &&
sudo usermod -aG docker $USER &&
newgrp docker &&
sudo apt install gnome-tweak-tool -y &&
sudo apt install guake -y &&
sudo apt install curl -y &&
sudo apt install apache2 -y &&
sudo chown -R $USER:$USER /var/www/html &&
sudo apt install zsh -y &&
chsh -s $(which zsh) &&
sh -c "$(wget -O- https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" -y &&
wget https://nosqlbooster.com/s3/download/releasesv5/nosqlbooster4mongo-5.2.12.AppImage &&
chmod a+x nosqlbooster4mongo-5.2.12.AppImage &&
sudo apt install htop -y &&
sudo apt-get install openvpn -y &&
sudo apt-get install network-manager-openvpn-gnome -y &&
sudo apt install deepin-screenshot -y &&
## Installing Snap packages ##
sudo snap install code --classic &&
sudo snap install --edge node --classic &&
sudo snap install insomnia &&
sudo snap install spotify &&
sudo snap install intellij-idea-ultimate --classic &&
## Atualização do sistema ##
sudo apt update && sudo apt upgrade -y && sudo apt autoclean -y && sudo apt autoremove -y &&
# End of script ##
echo "Finished"
| true |
9ced53501ca1969b9c91c62b91806d80784902a5 | Shell | kericmiles/useful_scripts | /bash/get-vbox-ips | UTF-8 | 365 | 3.03125 | 3 | [] | no_license | #!/bin/bash
#Gets IP addresses for currently running machines in VirtualBox, requires guest tools to be installed
#add to ~/local/bin
for f in $(VBoxManage list runningvms | awk -F\" '{print $2}'); do
ipaddr=$(VBoxManage guestproperty enumerate "$f" | grep IP | awk 'BEGIN { FS=","; } {print $2}'| awk 'BEGIN { FS=":"; } {print $2}')
echo "$f:$ipaddr"
done
| true |
50dd85bc89fe139d74459e0efb98a5e3ee8e63a1 | Shell | ihomeyuan/MPTCP-1 | /script-tool/configWIFI.sh | UTF-8 | 92 | 2.609375 | 3 | [
"MIT"
] | permissive | if [ -z "$1"]
then
echo "iwconfig [interface] [mac]"
exit -1
fi
sudo iwconfig $1 ap $2
| true |
ed1819bb5c38de6e0599efbb4938115254e914d3 | Shell | joemat/docker-eclipse-for-rcp | /eclipse_install_tools/install_eclipse.sh | UTF-8 | 5,012 | 3.671875 | 4 | [] | no_license | #!/bin/bash
ECLIPSE_DOWNLOAD_URL="http://ftp-stud.fht-esslingen.de/pub/Mirrors/eclipse/technology/epp/downloads/release/mars/R/eclipse-java-mars-R-linux-gtk-x86_64.tar.gz"
ECLIPSE_P2_START_ARGS="-clean -application org.eclipse.equinox.p2.director -noSplash"
ECLIPSE_VMARGS="-vmargs -Declipse.p2.mirrors=true -Djava.net.preferIPv4Stack=true"
#!/bin/bash
show_help() {
echo " "
echo "Usage: $0 -t <installation dir> [-y] [-p <plugin name>] [-d <dropin name>]"
echo " "
echo "\t-y -- don't show the confirmation dialog"
echo "\t-p -- installs the plugin defined in plugin-info/<plugin name>.pi"
echo "\t-d -- installs the dropin defined in drop-info/<dropin name>.di"
echo " "
echo " "
exit 0
}
show_confirmation() {
ECLIPSE_BASE_DIR="$1"; shift;
echo "Create/Update eclipse installation in $ECLIPSE_BASE_DIR? [y/N]"
read a
case $a in
y|Y)
echo "Ok ... here we go ..."
;;
*)
echo "Aborted"
exit 1;
esac
}
download_eclipse() {
ECLIPSE_BASE_DIR="$1"; shift;
ECLIPSE_DIR="$1"; shift;
# Download Eclipse for RCP and RAP developers
mkdir -p $(dirname $ECLIPSE_DIR)
curl "$ECLIPSE_DOWNLOAD_URL" | tar -C "$ECLIPSE_BASE_DIR" -xvz
}
fix_eclipse_classcast_exception() {
ECLIPSE_DIR="$1"; shift;
echo "-Dosgi.user.area=@user.home/.eclipse.user.area" >> $ECLIPSE_DIR/eclipse.ini
}
remove_maxpermsize_from_eclipse_ini() {
ECLIPSE_DIR="$1"; shift;
# Remove MaxPermSize parameter from eclipse.ini.
# (This parameter is no longer supported with Java 8)
grep -v -e "MaxPermSize" -e "256m" $ECLIPSE_DIR/eclipse.ini > $ECLIPSE_DIR/eclipse.ini.new;
mv $ECLIPSE_DIR/eclipse.ini.new $ECLIPSE_DIR/eclipse.ini
}
install_eclipse() {
ECLIPSE_BASE_DIR="$1"; shift;
ECLIPSE_DIR="$1"; shift;
download_eclipse "$ECLIPSE_BASE_DIR" "$ECLIPSE_DIR"
if [ -f "$ECLIPSE_DIR" ]
then
echo "Found an existing eclipse installation in \"$ECLIPSE_DIR\" aborting!";
exit 1;
fi
fix_eclipse_classcast_exception "$ECLIPSE_DIR"
remove_maxpermsize_from_eclipse_ini "$ECLIPSE_DIR"
}
run_p2() {
ECLIPSE_DIR="$1"; shift;
ECLIPSE_BINARY="$ECLIPSE_DIR/eclipse"
$ECLIPSE_BINARY $ECLIPSE_P2_START_ARGS $@ $ECLIPSE_JVMARGS
}
check_for_file() {
FILENAME="$1"; shift;
FILETYPE="$1"; shift;
if [ ! -f "$FILENAME" ]
then
echo "Error: $FILETYPE file \"$FILENAME\" not found! - aborting"
exit 1;
fi
}
check_not_empty() {
PARAMETERNAME="$1"; shift;
PARAMETERVALUE="$1"; shift;
FILENAME="$1"; shift;
if [ -z "$PARAMETERVALUE" ]
then
echo "Error: Invalid file format \"$FILENAME\"; \"$PARAMETERNAME\" is missing - aborting!"
exit 1
fi
}
install_plugins() {
ECLIPSE_DIR="$1"; shift;
PLUGINS="$1"; shift;
ALL_REPOSITORIES=""
ALL_FEATURES=""
for PLUGIN in $(echo "$PLUGINS" | sed -e 's/,/ /g')
do
PI_FILE="$TOOL_INSTALL_PATH/plugin-info/$PLUGIN.pi"
check_for_file "$PI_FILE" "Plugin info";
REPOSITORY=""
FEATURES=""
. "$PI_FILE"
check_not_empty "REPOSITORY" "$REPOSITORY" "$PI_FILE"
check_not_empty "FEATURES" "$FEATURES" "$PI_FILE"
ALL_REPOSITORIES="${ALL_REPOSITORIES}${REPOSITORY},"
ALL_FEATURES="${ALL_FEATURES}${FEATURES},"
done
run_p2 "$ECLIPSE_DIR" -repository "$ALL_REPOSITORIES" -installIUs "$ALL_FEATURES"
}
install_dropin () {
ECLIPSE_DIR="$1"; shift;
DROPIN="$1"; shift;
DI_FILE="$TOOL_INSTALL_PATH/dropin-info/$DROPIN.di"
check_for_file "$DI_FILE" "Dropin info";
DROPIN_URL=""
. "$DI_FILE"
check_not_empty "DROPIN_URL" "$DROPIN_URL" "$DI_FILE"
(cd $ECLIPSE_DIR/dropins && curl -L -O "$DROPIN_URL")
}
install_dropins() {
ECLIPSE_DIR="$1"; shift;
DROPINS="$1"; shift;
for DROPIN in $(echo "$DROPINS" | sed -e 's/,/ /g')
do
install_dropin "$ECLIPSE_DIR" "$DROPIN"
done
}
STARTDIR=$(pwd)
SHOW_CONFIRMATION=1;
PLUGIN="";
DROPIN=""
ECLIPSE_DIR=""
ECLIPSE_BASE_DIR=""
TOOL_INSTALL_PATH=$(dirname $0)
while getopts "h?t:p:d:y" opt; do
case "$opt" in
h|\?)
show_help
;;
y) SHOW_CONFIRMATION=0
;;
t) ECLIPSE_BASE_DIR="$OPTARG";
ECLIPSE_DIR="$ECLIPSE_BASE_DIR/eclipse"
;;
p) PLUGINS="$OPTARG"
;;
d) DROPINS="$OPTARG"
;;
esac
done
if [ -z "$ECLIPSE_DIR" ]
then
echo "Error: no target dir"
show_help
fi
if [ ! -z "$DROPIN" ] && [ ! -z "$PLUGIN" ]
then
echo "Error: -d and -p specified!"
show_help
fi
if [ $SHOW_CONFIRMATION -ne 0 ]
then
show_confirmation "$ECLIPSE_BASE_DIR";
fi
if [ -f "$ECLIPSE_DIR/eclipse" ]
then
echo "Eclipse is already installed => skipping eclipse installation."
else
install_eclipse "$ECLIPSE_BASE_DIR" "$ECLIPSE_DIR"
fi
if [ ! -z "$PLUGINS" ]
then
install_plugins "$ECLIPSE_DIR" "$PLUGINS"
fi
if [ ! -z "$DROPINS" ]
then
install_dropins "$ECLIPSE_DIR" "$DROPINS"
fi
exit 0
| true |
fc2494cb04223eac43aa1d05b0a3ea2c3ec29d03 | Shell | Lytigas/.dotfiles | /bash/components/autojump/compose.sh | UTF-8 | 273 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
# sources autojump script if it exists
if [ -f "/usr/share/autojump/autojump.sh" ]; then
echo '# Found autojump init'
echo 'source /usr/share/autojump/autojump.sh'
else
echo "# autojump init file not found, skipping"
echo '#'
fi
| true |
b1494f9ea1fb530d11ac6d5f71ceb12634ecc8f2 | Shell | JJS-X/PiOT | /bash/addFichierIP.sh | UTF-8 | 460 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# SUID
files="$(find / -type f -user root -perm -u=s -print 2>/dev/null)"
for obj in $files
do
echo `ls -l $obj` >> /opt/projetmaster-master/ressource/fichierSUID
done
# IP
grep -oP --exclude='lastlog' --exclude='dpkg*' '([01]?\d\d?|2[0-4]\d|25[0-5])\.([01]?\d\d?|2[0-4]\d|25[0-5])\.([01]?\d\d?|2[0-4]\d|25[0-5])\.([01]?\d\d?|2[0-4]\d|25[0-5])' /var/log/*log* 2>/dev/null | cut -d ":" -f 2 | sort -u > /opt/projetmaster-master/ressource/fichierIP | true |
bab7d4160d252ef3c399a63dfde2b3487799d301 | Shell | msys2/MINGW-packages | /mingw-w64-python-trustme/PKGBUILD | UTF-8 | 2,006 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | # Maintainer: Sarah Ottinger <schalaalexiazeal@gmail.com>
_realname=trustme
pkgbase=mingw-w64-python-${_realname}
pkgname=("${MINGW_PACKAGE_PREFIX}-python-${_realname}")
pkgver=1.1.0
pkgrel=1
pkgdesc='Library for fake certificate authority (CA) to generate fake TLS certs (mingw-w64)'
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32' 'clangarm64')
url="https://github.com/python-trio/trustme"
license=('spdx:MIT OR spdx:Apache-2.0')
depends=("${MINGW_PACKAGE_PREFIX}-python"
"${MINGW_PACKAGE_PREFIX}-python-cryptography"
"${MINGW_PACKAGE_PREFIX}-python-idna")
checkdepends=("${MINGW_PACKAGE_PREFIX}-python-pytest"
"${MINGW_PACKAGE_PREFIX}-python-pyopenssl"
"${MINGW_PACKAGE_PREFIX}-python-service_identity")
makedepends=("${MINGW_PACKAGE_PREFIX}-python-build"
"${MINGW_PACKAGE_PREFIX}-python-installer"
"${MINGW_PACKAGE_PREFIX}-python-setuptools"
"${MINGW_PACKAGE_PREFIX}-python-wheel")
source=("https://pypi.org/packages/source/${_realname::1}/${_realname}/${_realname}-${pkgver}.tar.gz")
sha256sums=('5375ad7fb427074bec956592e0d4ee2a4cf4da68934e1ba4bcf4217126bc45e6')
prepare() {
rm -rf python-build-${MSYSTEM} | true
cp -r "${_realname}-${pkgver}" "python-build-${MSYSTEM}"
}
build() {
cd "${srcdir}/python-build-${MSYSTEM}"
${MINGW_PREFIX}/bin/python -m build --wheel --skip-dependency-check --no-isolation
}
package() {
cd "${srcdir}/python-build-${MSYSTEM}"
MSYS2_ARG_CONV_EXCL="--prefix=" \
${MINGW_PREFIX}/bin/python -m installer --prefix=${MINGW_PREFIX} \
--destdir="${pkgdir}" dist/*.whl
install -Dm644 LICENSE "${pkgdir}${MINGW_PREFIX}/share/licenses/python-${_realname}/LICENSE"
install -Dm644 LICENSE.APACHE2 "${pkgdir}${MINGW_PREFIX}/share/licenses/python-${_realname}/LICENSE.APACHE2"
install -Dm644 LICENSE.MIT "${pkgdir}${MINGW_PREFIX}/share/licenses/python-${_realname}/LICENSE.MIT"
install -Dm 644 README.rst -t "${pkgdir}${MINGW_PREFIX}/share/doc/python-${_realname}"
}
| true |
f9049196b345198fdc9d1717693d54bcbff1d163 | Shell | Badjeck/BASH | /S3.bash | UTF-8 | 349 | 3.296875 | 3 | [] | no_license | #!/bin/bash
video() {
echo "rentrez l'URL"
read url
youtube-dl -f137 $url
}
musique() {
echo "rentrez l'URL"
read url
youtube-dl -f140 $url
}
#choix video ou musique puis choix qualité
echo "veux tu la vidéo [y/n]"
read choix
if [[ "$choix" -eq "y" ]]; then
video
exit 1
else
musique
exit 1
fi
exit 0
| true |
53f4f563c1dc00bcc0fdbab41840a04795d90cfe | Shell | daxer/scripts | /hdclub | UTF-8 | 323 | 2.84375 | 3 | [] | no_license | #!/bin/sh
cd /watch_dir
mysql -h127.0.0.1 -u "root" -p"password" hdclub -e "SELECT tor_num FROM torrents" | grep -v tor_num | \
while read i;
do
file="$i.torrent"
if [ ! -e $file ]; then
wget -qO $file "http://hdclub.org/download.php?id=$i&passkey=??????????????????"
mv $file /watch_dir/
fi
done
| true |
a881975f9a88f3d7edee00d40ff235ebbb9eb2cb | Shell | eyedol/tools | /compile-mutt | UTF-8 | 1,308 | 2.65625 | 3 | [] | no_license | #!/bin/bash
echo "Downloading mutt..."
wget -q -O - https://bitbucket.org/mutt/mutt/downloads/mutt-1.5.23.tar.gz | tar xvfz -
pushd mutt-1.5.23
# Apply side bar patch
echo "Applying side bar patch..."
wget -q -O - http://lunar-linux.org/~tchan/mutt/patch-1.5.23.sidebar.20140412.txt | patch -p1
# Apply trash patch
echo "Applying trash patch..."
wget -q -O - http://cedricduval.free.fr/mutt/patches/download/patch-1.5.5.1.cd.trash_folder.3.4 | patch -p1
# Configure Mutt
echo "Enabling needed options..."
./configure --prefix=/usr \
--sysconfdir=/etc \
--with-docdir=/usr/share/doc/mutt-1.5.23 \
--enable-pop \
--enable-imap \
--enable-hcache \
--without-qdbm \
--with-gdbm \
--without-bdb \
--without-tokyocabinet \
--enable-smtp \
--with-ssl \
--with-gnutls \
--with-regex \
--enable-gpgme \
--with-ssl \
--with-sasl
# Build Mutt
echo "Creating needed files for compilation..."
touch configure.ac aclocal.m4 configure Makefile.am Makefile.in
echo "Building..."
make
# Install Mutt
echo "Installing..."
sudo make install
popd
echo "Done!"
| true |
ea265a944b0c6dc778c6abb927b7c0e8440f5238 | Shell | benryandev/wp-dev-bash-setup | /roots | UTF-8 | 667 | 2.53125 | 3 | [] | no_license | #!/bin/bash
cd ~/Local\ Sites/$1/app/
echo Installing Bedrock... Please Wait
composer create-project roots/bedrock
rm -rf public
cd ~/Local\ Sites/$1/app/bedrock/web/wp/wp-content/
rm -rf themes
mkdir themes
cd ~/Local\ Sites/$1/app/bedrock/web/app/themes
composer create-project roots/sage $1
composer require wpackagist-plugin/wordpress-seo
code ~/Local\ Sites/$1/app/bedrock/.env && code ~/Local\ Sites/$1/conf/nginx/site.conf.hbs
echo "****** Change "{{root}}"; To root "/Users/[yourUserNameHere]/Local Sites/projectname/app/bedrock/web"; MAKE SURE TO ADD THE QUOTES TO THE PATH******"
echo "****** Update .env With New DB and Domain Details ******"
| true |
b2f07e0bd750b2331ec6ddea689847be061c6173 | Shell | mikey-/dotfiles | /.function.d/helpers.sh | UTF-8 | 1,886 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
function get_python_version () {
# is_python_project && \
printf " %s " "$($(/usr/bin/command -v python) --version 2>&1 | cut -d ' ' -f 2)";
return 0;
}
function get_ruby_version () {
# is_ruby_project && \ <- this function doesn't actually exist
printf " %s " "$($(/usr/bin/command -v ruby) --version 2>&1 | cut -d ' ' -f 2)";
return 0;
}
function get_go_version () {
# is_go_project && \
printf " %s " "$(/usr/bin/command -v go) --version)";
return 0;
}
function get_node_version() {
#is_node_project && \
printf " %s " "$($(/usr/bin/command -v node) -v 2>&1)";
return 0;
}
function user_info () {
printf "👨🏽💻 ";
return 0;
}
function load_aws_env () {
if is_pwd_project_dir; then
PROJECT_NAME="$(pwd | sed -e "s|${PROJECT_BASE_DIR}/||" | cut -d '/' -f 1)";
PROJECT_DIR="${PROJECT_BASE_DIR}/${PROJECT_NAME}";
AWS_ENV_DIR="$PROJECT_DIR";
elif [ "$(pwd)" == "$HOME" ]; then
AWS_ENV_DIR="$HOME";
else
return 1;
fi
local aws_config;
local aws_credentials;
aws_config="${AWS_ENV_DIR}/.aws/config";
aws_credentials="${AWS_ENV_DIR}/.aws/credentials";
if all_files_exist "${aws_config} ${aws_credentials}"; then
export AWS_CONFIG_FILE="$aws_config"
export AWS_SHARED_CREDENTIALS_FILE="$aws_credentials"
AWS_DEFAULT_PROFILE="$(aws-profiles \
| grep default \
| cut -d ' ' -f 2 \
| head -1)";
export AWS_DEFAULT_PROFILE;
return 0;
fi
return 1;
}
function get_battery_status () {
local battery_status;
battery_status=$(pmset -g batt | tail -1 | tr -d '\t' | sed -Ee 's/.*-InternalBattery-0 \(id=[0-9]+\)([0-9]+%);.*/\1/g' -e 's/%/%/g')
printf "🔋 %s " "$battery_status";
return 0;
}
function clock () {
local clock_format;
clock_format="%Y-%m-%d %H:%M:%S";
printf "⌚️ %s | " "$(date +"${clock_format}")";
return 0;
}
| true |
d4c9e1d67fb2e361e56cc5eeee857a032d07a43b | Shell | igor-liferenko/mytex | /printer.evince | UTF-8 | 593 | 2.859375 | 3 | [] | no_license | #!/bin/sh
ssh p print >/dev/null 2>&1
ret=$?
if [ $ret = 10 ]; then zenity --error --title 'Printing failed' --text 'Printer is not powered on' &
elif [ $ret = 11 ]; then zenity --error --title 'Printing failed' --text 'write() error' &
elif [ $ret = 12 ]; then zenity --error --title 'Printing failed' --text 'TODO: loop until all bytes are written' &
elif [ $ret = 13 ]; then zenity --error --title 'Printing failed' --text 'read() error' &
elif [ $ret != 0 ]; then zenity --error --title 'Printing failed' --text 'Network error' &
else zenity --info --title 'Success' --text 'Printed' & fi
| true |
0ede14b34f2640735a45af6da6c1d1d420b41616 | Shell | minhouse/shell-dev | /datechage.sh | UTF-8 | 757 | 3.390625 | 3 | [] | no_license | #!/bin/bash
#日付を5秒おきに変更するスクリプト
#用途は日付が変わるとログのローテーションをするスクリプトの動作確認等をする時に利用する
#使用後は公開NTPサーバーとの同期を実施し日付を戻す事
# ntpdate ntp.nict.go.jp
LANG=C
month=`date +%b`
day="06"
time="0000"
if [ $month = "Jan" -o $month = "Mar" -o $month = "May" -o $month = "July" -o $month = "Aug" -o $month = "Oct" -o $month = "Dec" ];then
end_day=31
elif [ $month = "Feb" ];then
end_day=28
else
end_day=30
fi
month=`date +%m`
while [ $day -le $end_day ]
do
day=`printf %02d $day`
DATE_OPTION="${month}${day}${time}"
echo "date $DATE_OPTION"
date $DATE_OPTION
day=`expr $day + 1`
sleep 5
done
exit 1 | true |
0febd507d6356ad62e806ee49a9202b2b0b2b7f6 | Shell | Blobfolio/blob-select | /npm_scripts/notify-css.sh | UTF-8 | 408 | 3.015625 | 3 | [
"WTFPL"
] | permissive | #!/bin/bash
#
# NPM: Notify CSS
#
# These are a little too cumbersome to deal with inside NPM.
##
# Check dependencies.
command -v notify-send >/dev/null 2>&1 || {
echo -e "\033[31;1mError:\033[0m notify-send must be in \$PATH."
exit 1
}
notify-send -i "$( pwd )/npm_scripts/icon.png" --category dev.validate -h int:transient:1 -t 3000 "CSS" "The CSS has been linted, minified, and exported!"
exit 0
| true |
000b2a1c6c6f03857aee59750aa2e379c7a7d648 | Shell | LydiaBer/pheno_study | /analysis/resolved/tools/submit.zsh | UTF-8 | 370 | 2.96875 | 3 | [] | no_license | #!/bin/zsh
data=$1
f=$2
i=0
find -O2 $ADATA/Exotics/HH4b/Ntuples/${data} -type f -name '*.root' -exec du -m {} \; \
| sort -n -r -k 1,1 \
| awk -f fix.awk \
| while read -r files
do
qsub -N HH4b-res-recon-${i} -o logs/stdout-$i.txt -e logs/stderr-$i.txt \
-v data=${data},f=${f},inum=${i},files=${files} run.job
echo Submitted $((i++))
done
| true |
79eb4920ff83abc5979fbb9b0f58195979c9bfc0 | Shell | dannymcc/dotfiles | /scripts/ghgrep | UTF-8 | 333 | 3.078125 | 3 | [] | no_license | #!/bin/bash
{
find .git/objects/pack/ -name "*.idx" | while read i; do
git show-index < "$i" | awk '{print $2}';
done;
find .git/objects/ -type f |
grep -v '/pack/' |
awk -F'/' '{print $(NF-1)$NF}';
} | while read o; do
git cat-file -p $o | awk "{print \"$o: \" \$0 }";
done | grep $@
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.