blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
490547672f6ae6cf40c06bc4009c598720eb978e
|
Shell
|
adutta/romshare
|
/scripts/validate_zip.sh
|
UTF-8
| 580
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]
then
echo please provide a filename
exit 1
fi
UPDATE_BINARY=$(unzip -l $1 | grep META-INF/com/google/android/update-binary)
UPDATE_SCRIPT=$(unzip -l $1 | grep META-INF/com/google/android/update-script)
if [ -z "$UPDATE_BINARY" -a -z "$UPDATE_SCRIPT" ]
then
echo no update-binary or update-script found
exit 1
fi
MODVERSION=$(unzip -p $1 system/build.prop | grep ro.modversion | head -n 1 | cut -d = -f 2)
DEVELOPERID=$(unzip -p $1 system/build.prop | grep ro.rommanager.developerid | head -n 1 | cut -d = -f 2)
echo $MODVERSION
echo $DEVELOPERID
| true
|
9d63b5d370be5daffd88570bd3d92f91906ad4d4
|
Shell
|
hdnes/MavicPro
|
/MavicPro_Scripts/start_dji_system.sh
|
UTF-8
| 9,141
| 3.09375
| 3
|
[] |
no_license
|
#!/system/bin/sh
# Check partitions, try format it and reboot when failed
/system/bin/part_check.sh
#set emmc irq cpu affinity, dw_mci
echo 02 > /proc/irq/66/smp_affinity
echo 02 > /proc/irq/67/smp_affinity
#set ap_dma irq cpu affinity, ap_dma
echo 08 > /proc/irq/34/smp_affinity
/system/bin/wl_link_judge.sh
wl_link_type=$?
if [ $wl_link_type -ge 1 ]; then
setprop wl.link.prefer SDR
else
setprop wl.link.prefer WIFI
fi
#check cp_assert log size, if more then 32KB remove it
#do it before sdrs start, so new log would not be lost
if [ -f /data/dji/log/cp_assert.log ]; then
cp_assert_file_size=`busybox wc -c /data/dji/log/cp_assert.log | busybox awk '{printf $1}'`
if [ $cp_assert_file_size -gt 32768 ]; then
rm -rf /data/dji/log/cp_assert.log
fi
fi
setprop dji.sdrs 1
debug=false
grep production /proc/cmdline >> /dev/null
if [ $? != 0 ];then
debug=true # engineering version, enable adb by default
fi
if $debug; then
/system/bin/adb_en.sh
else
setprop sys.usb.config rndis,mass_storage,bulk,acm
fi
setprop dji.sdrs_log 1
# set ip address one more time to avoid possible lost
ifconfig usb0 192.168.1.10
# rndis
ifconfig rndis0 192.168.42.2
mkdir /var/lib
mkdir /var/lib/misc
echo > /var/lib/misc/udhcpd.lease
busybox udhcpd
# ftp server on all the interface
busybox tcpsvd -vE 0 21 busybox ftpd -w /ftp &
# dump system/upgrade log to a special file
#logcat | grep DUSS\&5a >> /data/dji/log/upgrade.log &
mkdir -p /data/upgrade/backup
mkdir -p /data/upgrade/signimgs
mkdir -p /cache/upgrade/unsignimgs
mkdir -p /data/upgrade/incomptb
# clean up dump files
rm -Rf /data/dji/dump/5
busybox mv /data/dji/dump/4 /data/dji/dump/5
busybox mv /data/dji/dump/3 /data/dji/dump/4
busybox mv /data/dji/dump/2 /data/dji/dump/3
busybox mv /data/dji/dump/1 /data/dji/dump/2
busybox mv /data/dji/dump/0 /data/dji/dump/1
mkdir /data/dji/dump/0
busybox find /data/dji/dump/ -maxdepth 1 -type f | busybox xargs -I '{}' mv {} /data/dji/dump/0/
# CP SDR channel
dji_net.sh uav &
# Start services
export HOME=/data
setprop dji.monitor_service 1
setprop dji.hdvt_service 1
setprop dji.encoding_service 1
setprop dji.system_service 1
###Here we change it to 15s to avoid ssd probe fail issue##
if [ -f /data/dji/cfg/ssd_en ]; then # Disabled by default
i=0
while [ $i -lt 25 ]; do
if [ -b /dev/block/sda1 ]
then
mkdir -p /data/image
mount -t ext4 /dev/block/sda1 /data/image
break
fi
i=`busybox expr $i + 1`
sleep 1
done
fi
setprop dji.vision_service 1
# For debug
debuggerd&
mkdir -p /data/dji/log
mkdir -p /data/dji/cfg/test
# Auto save logcat to flash to help trace issues
if [ -f /data/dji/cfg/field_trail ]; then
# Enable bionic libc memory leak/corruption detection
setprop libc.debug.malloc 10
# Up to 5 files, each file upto 32MB
logcat -f /data/dji/log/logcat.log -r32768 -n4 *:I &
fi
# Capture temperature
#test_thermal.sh >> /data/dji/log/temperature.log &
if [ -f /data/dji/amt/state ]; then
amt_state=`cat /data/dji/amt/state`
fi
# dump system/upgrade log to a special file
rm /data/dji/upgrade_log.tar.gz
upgrade_file_size=`busybox wc -c < /data/dji/log/upgrade00.log`
if [ $upgrade_file_size -gt 2097152 ]; then
mv /data/dji/log/upgrade07.log /data/dji/log/upgrade08.log
mv /data/dji/log/upgrade06.log /data/dji/log/upgrade07.log
mv /data/dji/log/upgrade05.log /data/dji/log/upgrade06.log
mv /data/dji/log/upgrade04.log /data/dji/log/upgrade05.log
mv /data/dji/log/upgrade03.log /data/dji/log/upgrade04.log
mv /data/dji/log/upgrade02.log /data/dji/log/upgrade03.log
mv /data/dji/log/upgrade01.log /data/dji/log/upgrade02.log
mv /data/dji/log/upgrade00.log /data/dji/log/upgrade01.log
else
echo -e "\n\n!!!new file start!!!\n">> /data/dji/log/upgrade00.log
fi
logcat -v threadtime |stdbuf -oL grep DUSS\&63 >> /data/dji/log/upgrade00.log &
env_amt_state=`env amt.state`
if [ "$env_amt_state"x == "factory_out"x ]; then
cpld_dir=/data/dji/amt/factory_out/cpld
mkdir -p $cpld_dir
rm -rf $cpld_dir/log.txt
local r=0
local n=0
while [ $n -lt 3 ]; do
let n+=1
test_fpga /dev/i2c-1 /dev/i2c-1 64 400000 /vendor/firmware/cpld.fw >> $cpld_dir/log.txt
r=$?
if [ $r == 0 ]; then
env -d amt.state
amt_state=factory_out
echo factory > /data/dji/amt/state
break
fi
done
echo $r > $cpld_dir/result
fi
# kill dji_encoding when factory
if [ "$amt_state"x = "factory"x -o "$amt_state"x = "aging_test"x -o "$amt_state"x = "factory_out"x ]; then
# kill encoding service
setprop dji.factory_out 1
fi
if [ "$amt_state"x == "factory"x ]; then
# Need to enable bootarea1 write for enc
echo 0 > /sys/block/mmcblk0boot1/force_ro
fi
# Check whether do auto fs write test
if [ -f /data/dji/cfg/test/fs ]; then
/system/bin/test_fs_write.sh
fi
# WIFI
# Check if usb wifi card is inserted
#RETRY_COUNT=1
#while [ $RETRY_COUNT -ge 0 ]
#do
# busybox lsusb | grep 1022
# if [ $? = 0 ]
# then
# setprop dji.network_service 1
# break
# else
# echo "No wifi usb device" >> /data/dji/log/start_dji_system.log
# busybox lsusb >> /data/dji/log/start_dji_system.log
# sleep 1
# fi
# let RETRY_COUNT-=1
#done
# Check whether do auto sdr test
if [ $wl_link_type -ge 1 ]; then
# enable ip forward for ip stack
echo 1 > /proc/sys/net/ipv4/ip_forward
# only enable forward for 192.168.41.2 RC and 192.168.41.3 GLASS
/system/bin/iptables -A FORWARD -s 192.168.41.2 -d 192.168.41.3 -j ACCEPT
/system/bin/iptables -A FORWARD -s 192.168.41.3 -d 192.168.41.2 -j ACCEPT
# other ip could not be forword
/system/bin/iptables -A FORWARD -i+ -j DROP
fi
if [ -f /data/dji/cfg/amt_sdr_test.cfg ]; then
/system/bin/test_sdr.sh
else
boardid=`cat /proc/cmdline | busybox awk '{for(a=1;a<=NF;a++) print $a}' | busybox grep board_id | busybox awk -F '=' '{print $2}'`
if [ $wl_link_type -eq 0 ]; then
# for baord ap004v2, gpio243 is wifi power control
if [ "$boardid" = "0xe2200042" ]; then
echo 243 > /sys/class/gpio/export
echo 1 > /sys/class/gpio/gpio243/value
sleep 0.2
fi
# hack for ssid=Maverick-xxx
if [ -f /data/misc/wifi/hostapd.conf ]; then
cp /data/misc/wifi/hostapd.conf /data/misc/wifi/hostapd.conf.back
busybox sed -i -e 's|ssid=Maverick|ssid=Mavic|' /data/misc/wifi/hostapd.conf.back
# hack for invalid psk
if [ -f /amt/wifi.config ]; then
cat /amt/wifi.config | grep psk
if [ $? == 0 ]; then
PSK=`cat /amt/wifi.config | grep psk | busybox awk -F '=' '{print $2}'`
busybox sed -i "${line}s:wpa_passphrase=32ee9aa4:wpa_passphrase=$PSK:g" /data/misc/wifi/hostapd.conf.back
fi
fi
mv /data/misc/wifi/hostapd.conf.back /data/misc/wifi/hostapd.conf
fi
# hack for invalid mar addr
if [ -f /amt/WIFI_nvram.txt ]; then
WIFI_NVRAM_SIZE=`busybox wc -c < /amt/WIFI_nvram.txt`
if [ $WIFI_NVRAM_SIZE == 6 ]; then
mount -o remount,rw /system
sleep 1
cp /amt/WIFI_nvram.txt /system/etc/firmware/ath6k/AR6004/hw3.0/softmac.bin
sync
mount -o remount,ro /system
fi
fi
setprop dji.network_service 1
else
# for baord ap004v2, under sdr mode, wifi power shutdown, no load driver
if [ "$boardid" != "0xe2200042" ]; then
/system/bin/load_wifi_modules.sh
fi
fi
fi
# Here we update recovery.img since all the service should be started.
# We could make the recovery.img work before this script exit for some
# service not startup.
/system/bin/recovery_update.sh
env_boot_mode=`env boot.mode`
#no need do next steps in factory mode
if [ "$amt_state"x == "factory"x -o "$amt_state"x == "aging_test"x -o "$env_boot_mode"x == "factory_out"x ]; then
env wipe_counter 0
env crash_counter 0
if [ "$amt_state"x == "aging_test"x ]; then
echo "start aging_test..." > /dev/ttyS1
/system/bin/aging_test.sh
fi
exit 0
fi
# for fatal errors, up to 32MB
logcat -v time -f /data/dji/log/fatal.log -r65536 -n1 *:F &
rm -rf /data/dji/log/fatal01.log
rm -rf /data/dji/log/fatal02.log
rm -rf /data/dji/log/fatal03.log
ps | grep dji_sys
if [ $? != 0 ];then
echo "crash_counter: dji_sys not exist" > /data/dji/log/crash_counter.log
sync
exit -1
fi
ps | grep dji_hdvt_uav
if [ $? != 0 ];then
echo "crash_counter: dji_hdvt_uav not exist" > /data/dji/log/crash_counter.log
sync
exit -1
fi
ps | grep dji_vision
if [ $? != 0 ];then
echo "crash_counter: dji_vision not exist" > /data/dji/log/crash_counter.log
sync
exit -1
fi
ps | grep dji_monitor
if [ $? != 0 ];then
echo "crash_counter: dji_monitor not exist" > /data/dji/log/crash_counter.log
sync
exit -1
fi
ps | grep dji_encoding
if [ $? != 0 ];then
echo "crash_counter: dji_encoding not exist" > /data/dji/log/crash_counter.log
sync
exit -1
fi
env wipe_counter 0
env crash_counter 0
# dump LC1860 state
check_1860_state.sh&
# panic and tombstones check
panic_tombstone_check.sh &
# dump wifi log
# wifi log will be output only when usb inserted
# and there is a wifi.debug file in usb root dir
/system/bin/wifi_debug.sh &
# dump profiled wifi log
/system/bin/wifi_profiled_debug.sh &
# Check whether do auto OTA upgrade test
if [ -f /data/dji/cfg/test/ota ]; then
/system/bin/test_ota.sh
fi
# Check whether do auto reboot test
if [ -f /data/dji/cfg/test/reboot ]; then
sleep 20
reboot
fi
| true
|
65b556b26201498bae34ced4de9fc9628dd18342
|
Shell
|
rgburnett/informixutils
|
/ipc
|
UTF-8
| 2,685
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
: '{{ TOOLDOC 1
/*
* Name : ipc - Informix print columns - print the columns of a table
*
* Usage : ipc database [ -c | -i | -s ] table
*
* Arguements : database - the name of the database containing the table
*
* -c - create table statement
* -s - a select statement
* -i - an insert statement
*
* Description : ipc assists in the composition of preprocess sql files for use
* in the iimport tool.
*
* Notes : Specifics
*
* Perms : 555
*
* Status : SHAREWARE
*
* See Also : iimport(1DB), iexport(1DB)
*
* Sccs Id : %W%
*
* Dated : %D% %T%
*
* Owner : Graeme Burnett
*
* Continuus
*
* Type : %cvtype: %
* Created by : %created_by: %
* Date Created : %date_created: %
* Date Modified: %date_modified: %
* Derived by : %derived_by: %
* File/Version : %filespec: %
*
*/
'
trap '/bin/rm -f $TMP1; exit 0' 0 1 2 3 15
TMP1="/tmp/.imkppfile$$"
Usage()
{
(
cat <<!
ipc database tablename - Prints a list of colum names only
-c tablename - Prints column names and definitions for a create statement
-s tablename - Prints column names and definitions for a select statement
-i tablename - Prints column names and definitions for an insert statement
!
) 1>&2
}
createtable=false
select=false
insert=false
I_DB=$1
shift
if [ "$1" = "-c" ]
then
shift
createtable=true
elif [ "$1" = "-s" ]
then
shift
select=true
elif [ "$1" = "-i" ]
then
shift
insert=true
fi
if [ "$I_DB" = "" ]
then
exec echo "I_DB not set" 1<&2
fi
if $createtable
then
(
echo "CREATE TABLE"
echo " $I_DB:$1"
echo "("
dbaccess $I_DB <<!
info columns for $1
!
echo ");"
) 2</dev/null | sed -e '/^$/d' \
-e 's/ *yes/,/' \
-e 's/ *no/ not null,/' \
-e '/Column name/d' < $TMP1
(
ex $TMP1 <<!
$
-
:s/,$//
w
q
!
) </dev/null 2<&1
elif $insert
then
(
echo "INSERT INTO"
echo " ${I_DB}:${1}"
echo "("
dbaccess ${I_DB} <<!
info columns for $1
!
echo ");"
) 2</dev/null | sed -e '/^$/d' \
-e 's/ .*/,/' \
-e '/Column name/d' < $TMP1
(
ex $TMP1 <<!
$
-
:s/,$//
w
q
!
) </dev/null 2<&1
elif $select
then
(
echo "SELECT"
echo "("
dbaccess $I_DB <<!
info columns for $1
!
echo ")"
) 2</dev/null | sed -e '/^$/d' \
-e 's/ .*/,/' \
-e '/Column name/d' < $TMP1
(
ex $TMP1 <<!
$
-
:s/,$//
w
q
!
) </dev/null 2<&1
else
(
dbaccess $I_DB <<!
info columns for $1
!
) 2</dev/null | sed -e '/^$/d' \
-e 's/ .*/,/' \
-e '/Column name/d' < $TMP1
(
ex $TMP1 <<!
$
:s/,$//
w
q
!
) </dev/null 2<&1
fi
cat $TMP1
exit 0
| true
|
fa57a6cb2509685fa694cdc29d849aa153b41ac0
|
Shell
|
ptomblin/navaid_offsite
|
/load_scripts/check_diffs
|
UTF-8
| 697
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
for i in areaids waypoint comm_freqs runways fix ; do
mv after.$i before.$i
done
dbName=$1
if [ x$1 = x ] ; then
dbName=navaid
fi
psql -U navaid $dbName << EOF > after.areaids
select * from areaids order by areaid;
EOF
psql -U navaid $dbName <<EOF > after.waypoint
select * from waypoint order by id, internalid;
EOF
psql -U navaid $dbName <<EOF > after.comm_freqs
select * from comm_freqs order by internalid, comm_type, frequency, comm_name;
EOF
psql -U navaid $dbName <<EOF > after.runways
select * from runways order by internalid, runway_designation, surface;
EOF
psql -U navaid $dbName <<EOF > after.fix
select * from fix order by internalid,navaid_type, navaid;
EOF
| true
|
f70e7f118fdeb7ffc0596e05a5786e2cb1b67105
|
Shell
|
cvolo4yzhka/ALPS_q0_zte_a476
|
/device/zte/zte_blade_a476/make_rel_key.sh
|
UTF-8
| 564
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
# make directory for release key
mkdir security
# jump to root of source
cd ../../..
# add cert info
subject='/C=RU/ST=Tatarstan/L=Almetevsk/O=cvolo4yzhka,Inc./OU=cvolo4yzhka Mobility/CN=cvolo4yzhka/emailAddress=cvolo4yzhka@gmail.com'
# generate all cert and keys,
# on password promt, press enter (empty/none password)
# put key and cert to security folder
for x in releasekey platform shared media networkstack testkey verity verity_key; do \
./development/tools/make_key ./device/zte/zte_blade_a476/security/$x "$subject"; \
done
cd ./device/zte/zte_blade_a476/
| true
|
e27a61988d09b28d559a6c5e1c4f16d4a6f6cc0e
|
Shell
|
willheslam/pdsl
|
/scripts/release
|
UTF-8
| 406
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
source ./scripts/version-bump.sh
source ./scripts/git-check.sh
source ./scripts/publishing.sh
semver_type=$1
dist_tag=${2:-next}
exit_unless_valid_semver "$semver_type"
exit_unless_valid_branch "master"
exit_unless_clean_git_folder
yarn test
yarn docs && git add -A && git commit -m 'Update docs'
version_bump "$semver_type"
git push --tags && git push
publish_to_dist_tag "$dist_tag"
| true
|
100504a792667b96159ee9a3b20b582ff1965b4d
|
Shell
|
ketan/dot_rc
|
/zsh/zsh.d/lib/aliases.zsh
|
UTF-8
| 618
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/zsh
if [ -z "$PS1" ]; then
return
fi
# some aliases
alias ls='ls -hG'
alias vi='vim'
alias less='less -R'
alias -g ..='..'
alias -g ...='../..'
alias -g ....='../../..'
alias -g .....='../../../..'
alias mysqlstart='sudo mysqld_safe &'
alias mysqlstop='mysqladmin -u root -p shutdown'
alias reload="source ~/.zshrc"
alias grep='grep --color'
alias ls='ls --color=auto -hF'
alias ikill="ps axu | grep Developer | grep -v Xcode.app | grep -v grep | awk '{print \$2}' | xargs kill -9"
path() {
echo $PATH | tr ":" "\n"
}
if [[ $OSTYPE == darwin* ]]; then
pman() { man $1 -t | open -f -a Preview }
fi
| true
|
540fe0a0d1154e139bcf82e4e1c332ac6737ed6f
|
Shell
|
openstack/magnum
|
/magnum/drivers/common/templates/swarm/fragments/network-config-service.sh
|
UTF-8
| 1,967
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
. /etc/sysconfig/heat-params
echo "Configuring ${NETWORK_DRIVER} network ..."
if [ "$NETWORK_DRIVER" != "flannel" ]; then
exit 0
fi
FLANNELD_CONFIG=/etc/sysconfig/flanneld
FLANNEL_CONFIG_BIN=/usr/local/bin/flannel-config
FLANNEL_CONFIG_SERVICE=/etc/systemd/system/flannel-config.service
FLANNEL_JSON=/etc/sysconfig/flannel-network.json
CERT_DIR=/etc/docker
PROTOCOL=https
FLANNEL_OPTIONS="-etcd-cafile $CERT_DIR/ca.crt \
-etcd-certfile $CERT_DIR/server.crt \
-etcd-keyfile $CERT_DIR/server.key"
ETCD_CURL_OPTIONS="--cacert $CERT_DIR/ca.crt \
--cert $CERT_DIR/server.crt --key $CERT_DIR/server.key"
if [ "$TLS_DISABLED" = "True" ]; then
PROTOCOL=http
FLANNEL_OPTIONS=""
ETCD_CURL_OPTIONS=""
fi
sed -i '
/^FLANNEL_ETCD=/ s|=.*|="'"$PROTOCOL"'://'"$ETCD_SERVER_IP"':2379"|
' $FLANNELD_CONFIG
sed -i '/FLANNEL_OPTIONS/'d $FLANNELD_CONFIG
cat >> $FLANNELD_CONFIG <<EOF
FLANNEL_OPTIONS="$FLANNEL_OPTIONS"
EOF
. $FLANNELD_CONFIG
echo "creating $FLANNEL_CONFIG_BIN"
cat > $FLANNEL_CONFIG_BIN <<EOF
#!/bin/sh
if ! [ -f "$FLANNEL_JSON" ]; then
echo "ERROR: missing network configuration file" >&2
exit 1
fi
if ! [ "$FLANNEL_ETCD_ENDPOINTS" ] && [ "$FLANNEL_ETCD_PREFIX" ]; then
echo "ERROR: missing required configuration" >&2
exit 1
fi
echo "creating flanneld config in etcd"
while ! curl -sf -L $ETCD_CURL_OPTIONS \
$FLANNEL_ETCD/v2/keys${FLANNEL_ETCD_PREFIX}/config \
-X PUT --data-urlencode value@${FLANNEL_JSON}; do
echo "waiting for etcd"
sleep 1
done
EOF
cat > $FLANNEL_CONFIG_SERVICE <<EOF
[Unit]
After=etcd.service
Requires=etcd.service
[Service]
Type=oneshot
EnvironmentFile=/etc/sysconfig/flanneld
ExecStart=$FLANNEL_CONFIG_BIN
[Install]
WantedBy=multi-user.target
EOF
chown root:root $FLANNEL_CONFIG_BIN
chmod 0755 $FLANNEL_CONFIG_BIN
chown root:root $FLANNEL_CONFIG_SERVICE
chmod 0644 $FLANNEL_CONFIG_SERVICE
systemctl enable flannel-config
systemctl start --no-block flannel-config
| true
|
3f944c6cfce7c242366d5b69738f98138e5417a0
|
Shell
|
goncalomb/acestream-rpi
|
/setup-lirc.sh
|
UTF-8
| 947
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if command -v lircd >/dev/null; then
echo "lirc already installed, run 'sudo apt-get purge -y lirc' to remove"
exit 1
fi
# install lirc
sudo apt-get update
sudo apt-get install -y lirc
# enable lirc-rpi module
sudo sed -i 's/^#dtoverlay=/dtoverlay=/g' /boot/config.txt
# configure lirc
sudo python3 << EOF
import configparser;
parser = configparser.ConfigParser()
with open('/etc/lirc/lirc_options.conf', 'r') as fp:
parser.read_file(fp)
parser.set('lircd', 'driver', 'default')
parser.set('lircd', 'device', '/dev/lirc0')
if not parser.has_section('lircd-uinput'):
parser.add_section('lircd-uinput')
parser.set('lircd-uinput', 'add-release-events', 'True')
parser.set('lircd-uinput', 'release-timeout', '150')
parser.set('lircd-uinput', 'repeat', '700,50')
with open('/etc/lirc/lirc_options.conf', 'w') as fp:
parser.write(fp)
EOF
echo "REBOOT in 5 seconds, CTRL-C to cancel"
sleep 5
sudo reboot
| true
|
e9981d55596bf2d5930ce2896771f54237ccaeeb
|
Shell
|
delfas/bash-home
|
/.profile
|
UTF-8
| 1,515
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
# This file runs once at login.
# Add all local binary paths to the system path.
export PATH="$PATH:$HOME/.local/bin:/usr/local/go/bin"
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
# ~/.profile: executed by the command interpreter for login shells.
# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
# exists.
# see /usr/share/doc/bash/examples/startup-files for examples.
# the files are located in the bash-doc package.
# the default umask is set in /etc/profile; for setting the umask
# for ssh logins, install and configure the libpam-umask package.
#umask 022
# Default programs to run.
export EDITOR="vim"
# Add colors to the less and man commands.
export LESS=-R
export LESS_TERMCAP_mb="$(printf '%b' '[1;31m')"; a="${a%_}"
export LESS_TERMCAP_md="$(printf '%b' '[1;36m')"; a="${a%_}"
export LESS_TERMCAP_me="$(printf '%b' '[0m')"; a="${a%_}"
export LESS_TERMCAP_so="$(printf '%b' '[01;44;33m')"; a="${a%_}"
export LESS_TERMCAP_se="$(printf '%b' '[0m')"; a="${a%_}"
export LESS_TERMCAP_us="$(printf '%b' '[1;32m')"; a="${a%_}"
export LESS_TERMCAP_ue="$(printf '%b' '[0m')"; a="${a%_}"
# If bash is the login shell, then source ~/.bashrc if it exists.
echo "$0" | grep "bash$" >/dev/null && [ -f "$HOME/.bashrc" ] && source "$HOME/.bashrc"
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/.local/bin" ] ; then
PATH="$HOME/.local/bin:$PATH"
fi
eval $(ssh-agent)
| true
|
631d0141b1bdf55451f68fb18294fd4a1ffc1881
|
Shell
|
redmatter/docker-bamboo-backup
|
/s3sync.sh
|
UTF-8
| 655
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
: ${AWS_ACCESS_KEY:?"AWS Access key not specified (AWS_ACCESS_KEY)"}
: ${AWS_SECRET_KEY:?"AWS Secret key not specified (AWS_SECRET_KEY)"}
: ${AWS_S3_BUCKET:?"AWS Bucket name not specified (AWS_S3_BUCKET)"}
: ${AWS_S3_BUCKET_PATH:=}
: ${DEBUG:=0}
[ "$DEBUG" = 1 ] && set -x;
sed -i "s~{{AWS_ACCESS_KEY}}~${AWS_ACCESS_KEY}~g;
s~{{AWS_SECRET_KEY}}~${AWS_SECRET_KEY}~g;" \
${BAMBOO_BACKUP_HOME}/s3cfg.ini
if [ -n "${AWS_S3_BUCKET_PATH}" ]; then
AWS_S3_BUCKET_PATH=$(readlink -m "/${AWS_S3_BUCKET_PATH}/")
fi
exec s3cmd --config=${BAMBOO_BACKUP_HOME}/s3cfg.ini sync /bamboo-backups/ s3://${AWS_S3_BUCKET}${AWS_S3_BUCKET_PATH}/
| true
|
bce870c4c85e675e6f735a11a00f2fa5479d1ebe
|
Shell
|
seppinho/scripts
|
/imputation/bin/vcf2bgen.sh
|
UTF-8
| 139
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
for f in *vcf.gz
do
echo "Processing $f"
base="$(basename $f .vcf.gz)"
plink2 --vcf $f --export bgen-1.3 --out $base &
done
| true
|
ca769e68af03d8c11217112d91b87a92611babcc
|
Shell
|
mencaribug/oroshi
|
/scripts/install/web/ruby
|
UTF-8
| 741
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# shellcheck disable=SC1091
set -ex
sudo apt-get install curl \
libcurl4 \
libcurl4-openssl-dev
# Install rvm
# This should automatically install all needed Ubuntu dependencies
gpg --import "/home/$USER/.oroshi/private/config/pgp/mpapis.asc"
curl -L https://get.rvm.io | bash -s stable --ruby
# Don't forget to add the following line in ~/.zshrc
# [[ -r $HOME/.rvm/scripts/rvm ]] && source $HOME/.rvm/scripts/rvm
source /home/tim/.rvm/scripts/rvm
# Installing Ruby
rvm install 2.5.1
rvm use 2.5.1
gem install bundler
# Automatic prepending of bundle exec
gem install rubygems-bundler
# Linter
gem install rubocop
# Awesome print, for better debug
gem install awesome_print
| true
|
5720175b516c284c81c1fac1e65eda4b059751b4
|
Shell
|
haiwen/seafile-rpi
|
/build-batch.sh
|
UTF-8
| 3,548
| 3.96875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Set the version which needs to be build
VERSION=${1:-'9.0.9'}
echo "Get the current build script"
wget -O build.sh https://raw.githubusercontent.com/haiwen/seafile-rpi/master/build.sh
chmod u+x build.sh
# Set the arch name for the armhf distros
sysArch=$(arch)
[ "$sysArch" == "aarch64" ] && archhfName='armv8l' || archhfName='armv7l'
declare -A lxcDistroMap=(["bullseye"]="debian/11/" ["buster"]="debian/10/" ["stretch"]="debian/9/" ["kinetic"]="ubuntu/22.10/" ["jammy"]="ubuntu/22.04/" ["focal"]="ubuntu/20.04/" ["bionic"]="ubuntu/18.04/")
# Assign the distros which need to be build
configLxcDistros=("jammy" "focal" "bionic" "bullseye" "buster")
configLxcArchs=("armhf")
if [[ "$sysArch" == "aarch64" ]]; then
# Only add arm64 if system supports it
configLxcArchs+=("arm64")
fi
lxcContainers=()
for lxcArch in "${configLxcArchs[@]}"; do
for lxcDistro in "${configLxcDistros[@]}"; do
lxcContainers+=("${lxcDistro}-${lxcArch}")
done
done
echo "Building following distributions and architectures: "
echo "${lxcContainers[@]}"
# Execute the builds
for container in "${lxcContainers[@]}"; do
archShort=${container#*-}
distroName=${container%-*}
[ "$archShort" == "arm64" ] && architecture='aarch64' || architecture=$archhfName
echo -e "\n######################################################\n"
echo "Distribution: $distroName"
echo "Architecture: $architecture"
exists=false
{
lxc info $container &&
exists=true
}
if $exists; then
echo "Starting existing Lxc image $container"
lxc start $container
else
echo "Launching Lxc images:${lxcDistroMap[$distroName]}$archShort $container"
lxc launch images:"${lxcDistroMap[$distroName]}"$archShort $container
fi
if ! lxc exec $container -- /bin/bash -c "sudo -V" &>/dev/null; then
echo "Install 'sudo'"
lxc exec $container -- apt install sudo
fi
if ! lxc exec $container -- id seafile &>/dev/null; then
echo "Add 'seafile' as user"
lxc exec $container -- useradd -m -s /bin/bash seafile
fi
if ! lxc exec $container -- /bin/bash -c "sudo -l -U seafile" &>/dev/null; then
echo "Give 'seafile' super user privileges"
lxc exec $container -- /bin/bash -c "echo 'seafile ALL=(ALL) NOPASSWD: ALL' | sudo EDITOR='tee -a' visudo"
fi
echo "Building for container: $container"
lxc file push build.sh $container/home/seafile/
NETWORK_ATTEMPTS=0
while [ "$(lxc exec ${container} -- bash -c 'hostname -I' 2>/dev/null)" = "" ]; do
((NETWORK_ATTEMPTS++))
echo -e "\e[1A\e[KNo network available in $container (attempt $NETWORK_ATTEMPTS): $(date)"
if [ $NETWORK_ATTEMPTS -gt 120 ]; then
continue 2
fi
sleep .5
done
echo -e "\e[1A\e[KNetwork available in $container"
echo "Upgrade container packages: $container"
lxc exec $container -- apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y upgrade
echo "Execute build.sh for $container"
lxc exec $container -- su - seafile -c "sudo ./build.sh -DTA -v $VERSION \
-h https://raw.githubusercontent.com/haiwen/seafile-rpi/master/requirements/seahub_requirements_v${VERSION}.txt \
-d https://raw.githubusercontent.com/haiwen/seafile-rpi/master/requirements/seafdav_requirements_v${VERSION}.txt"
filename=$(lxc exec $container -- bash -c "ls /home/seafile/built-seafile-server-pkgs/seafile-server-$VERSION-*.tar.gz" 2>/dev/null)
lxc file pull "$container$filename" ./
echo -e "Build finished for container $container\n\n"
lxc stop $container
done
echo "Building distros finished"
| true
|
e228a449626a8d29e8bc44227f7f0d9ce7940e74
|
Shell
|
prathapreddy123/GoogleCloud
|
/helpers/pr-validations.sh
|
UTF-8
| 1,257
| 3.09375
| 3
|
[] |
no_license
|
set -ex
: '
When raised PR 17 from mkv/feature branch to merge on to master of
prathapreddy123/GoogleCloud repo below are values:
PROJECT_ID=prathap-poc
COMMIT_SHA=73434e300319db035d61783f6a2367a015992fee
SHORT_SHA=73434e3
REPO_NAME=GoogleCloud
BRANCH_NAME=feature
HEAD_BRANCH=feature
BASE_BRANCH=master
HEAD_REPO_URL=https://github.com/monikaduv/GoogleCloud
PR_NUMBER=17
'
echo "*** Printing all variables ***"
echo "PROJECT_ID=${PROJECT_ID}"
echo "COMMIT_SHA=${COMMIT_SHA}"
echo "SHORT_SHA=${SHORT_SHA}"
echo "REPO_NAME=${REPO_NAME}"
echo "BRANCH_NAME=${BRANCH_NAME}"
echo "HEAD_BRANCH=${HEAD_BRANCH}"
echo "BASE_BRANCH=${BASE_BRANCH}"
echo "HEAD_REPO_URL=${HEAD_REPO_URL}"
echo "PR_NUMBER=${PR_NUMBER}"
git clone "${BASE_REPO_URL}" --branch ${BASE_BRANCH} --single-branch
echo "Repo cloned successfully"
cd "${REPO_NAME}"
git config user.email "presubmit@example.com"
git config user.name "presubmit"
git fetch origin refs/pull/${PR_NUMBER}/head:validate#${PR_NUMBER}
echo "pull ref created"
git checkout validate#${PR_NUMBER}
#merge --ff-only rebase
if ! git rebase "origin/${BASE_BRANCH}"
then
echo "PR#${PR_NUMBER} cannot be rebased automatically. Resolve conflicts manually"
exit 1
fi
echo "PR#${PR_NUMBER} can be rebased successfully on ${BASE_BRANCH}."
| true
|
407054497ab4d9446147c3bfe55db35d6a213b2c
|
Shell
|
stanojevic/BadParser
|
/src/scripts/embeddings_preprocessing/run_train_w2v.sh
|
UTF-8
| 538
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
vsizes=(32 64 128 256 512)
wsizes=(8 16)
useCBOWs=(0 1)
for vsize in "${vsizes[@]}"; do
for wsize in "${wsizes[@]}"; do
for useCBOW in "${useCBOWs[@]}"; do
X=vsize_${vsize}_wsize_${wsize}_cbow_${useCBOW}
echo "Running: $X";
sbatch -c 16 -J w2v$X -e log.${X}.err -o log.${X}.out --wrap="./word2vec -train ../../../de/full.txt -output ${X}.bin -cbow $useCBOW -size $vsize -window $wsize -negative 25 -hs 0 -sample 1e-4 -threads 15 -binary 1 -iter 15"
#./distance vectors.bin
sleep 1s
done;
done;
done;
| true
|
3d61a9d3da98fc3f7b49502f60cb21802099560a
|
Shell
|
stoneneedle/compcfg
|
/jimbocfg.sh
|
UTF-8
| 1,108
| 3.21875
| 3
|
[] |
no_license
|
#/bin/bash
# jimbocfg.sh :: config for a new OS
# Includes commonly used apps, and assumes Ubuntu 18.04 system.
# Will configure a few date/time features and style features for Xubuntu
# Package string for basic installations
pkg_str="cmatrix tmux vim tree htop unzip monit git curl wget ansible python"
# TBA: sshd, mysql or postgresql, apache2, php
# Update
sudo apt-get update && sudo apt-get upgrade
# Initial app install
if [ "$1" = "srv" ]; then
# Server build
sudo apt-get install $pkg_str
else
# Basic installs for a dev/home environment
sudo apt-get install $pkg_str
# Slightly more finicky/involved installs
# Astrill
sudo dpkg -i astrill.deb
# Chrome
sudo dpkg -i chrome.deb
#Pip & jrnl
sudo apt-get install python-pip
pip install jrnl[encrypted]
# Dropbox
# Sublime 3
wget -qO - https://download.sublimetext.com/sublimehq-pub.gpg | sudo apt-key add -
sudo apt-get install apt-transport-https
echo "deb https://download.sublimetext.com/ apt/stable/" | sudo tee /etc/apt/sources.list.d/sublime-text.list
sudo apt-get update
sudo apt-get install sublime-text
fi
exit 0
| true
|
88971fde3ecbd8dd6d71dd7843f0ba6a706876b4
|
Shell
|
TheBITLINK/dotfiles
|
/.bashrc
|
UTF-8
| 3,386
| 3.46875
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
# ye olde prompt
export PS1=" \[\033[38;5;250m\]$(if [[ -z $i ]] ; then i=$(tput cols) ; while (( i-- > 8 )) ; do echo -n '—' ; done ; echo -n " " ; unset i ; fi)\[\033[38;5;15m\]\A\n[\[$(tput sgr0)\]\[\033[38;5;154m\]\u\[$(tput sgr0)\]\[\033[38;5;15m\]@\h \[$(tput sgr0)\]\[\033[38;5;14m\]\w\[$(tput sgr0)\]\[\033[38;5;15m\]]\\$ \[$(tput sgr0)\]"
# git functions
# copy/paste from https://github.com/jimeh/git-aware-prompt/blob/master/prompt.sh
find_git_branch() {
# Based on: http://stackoverflow.com/a/13003854/170413
local branch
if branch=$(git rev-parse --abbrev-ref HEAD 2> /dev/null); then
if [[ "$branch" == "HEAD" ]]; then
branch='detached*'
fi
git_branch="$branch"
else
git_branch=""
fi
}
find_git_dirty() {
local status=$(git status --porcelain 2> /dev/null)
if [[ "$status" != "" ]]; then
git_dirty='y'
else
git_dirty=''
fi
}
# "dynamic" prompt
export PROMPT_COMMAND=__prompt_command
function __prompt_command() {
local EXIT="$?"
find_git_branch;
find_git_dirty;
local RCol='\[\e[0m\]'
local Lime='\[\033[38;5;154m\]'
local Orange='\[\033[38;5;208m\]'
local Cyan='\[\033[38;5;51m\]'
local Mag='\[\033[38;5;200m\]'
local White='\[\033[38;5;231m\]'
local Blue='\[\033[38;5;33m\]'
local Yellow='\[\033[38;5;11m\]'
local DarkGray='\[\033[38;5;247m\]'
# Title
PS1="\[\033]0;\u@\H: \W\007\] ${DarkGray}"
if [[ -z $i ]]; then
i=$(tput cols)
while (( i-- > 8 )); do
PS1+='—'
done;
PS1+="${White} \A\n"
unset i
fi
# Store Line 2 in a separate variable
local LINE2=""
# Username
if [ "$(id -u)" != "0" ]; then
# Green for users
LINE2+="[${Lime}\u${White}@\H ${Cyan}\W${White}"
else
# Orange for root
LINE2+="[${Orange}\u${White}@\H ${Cyan}\W${White}"
fi
# git branch
if [ $git_branch ]; then
if [ $git_dirty ]; then
LINE2+=" ${Yellow}${git_branch}${White}"
else
LINE2+=" ${Blue}${git_branch}${White}"
fi
fi
# exit code if not 0
if [ $EXIT != 0 ]; then
LINE2+="${Mag} ${EXIT}${White}"
fi
LINE2+="]"
# Newline if LINE2 is 2big
local cols=$(tput cols)
if [ ${#LINE2} -gt $(($cols+60)) ]; then
PS1+="${LINE2}\n"
else
PS1+=$LINE2
fi
PS1+="\\$ ${White}\[$(tput sgr0)\]"
}
# Aliases
# Downloads a video as mp3
alias ytmp3="youtube-dl -f bestaudio -x --audio-format mp3 --default-search ytsearch"
# Downloads a video as ogg
alias ytogg="youtube-dl -f bestaudio -x --audio-format vorbis --default-search ytsearch"
alias startx="startx ~/.xinitrc"
alias waifu2x="waifu2x-converter-cpp -j 2"
# Creates a directory and CDs into it
mkd () {
mkdir -p $1 && cd $1
}
# Starts GNOME on Wayland from a tty
startwayland() {
export QT_QPA_PLATFORM=wayland
XDG_SESSION_TYPE=wayland dbus-run-session gnome-session
}
# Random Password
gen_pass(){
if [ -z "$1" ]; then
length=100
else
length="$1"
fi
echo $(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c${1:-$length};echo;)
}
# The colorrrrrrssssss
export TERM=xterm-256color
NPM_PACKAGES="${HOME}/.npm-packages"
PATH="$NPM_PACKAGES/bin:$PATH"
| true
|
34d84cbd806cae4210807a3c4d48821591a9ebf2
|
Shell
|
KokoTa/ts-axios
|
/release.sh
|
UTF-8
| 571
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
set -e
echo "Enter release version: "
read VERSION
# -p 输出 "..." 中的文字
# -n 1 表示只有第一个字符有效
# -r 禁止反斜线
read -p "Release $VERSION - are you sure(y/n)" -n 1 -r
echo # 跳到新行
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo "Releasing $VERSION ..."
# commit
# git add .
# git commit -m "[build] $VERSION"
# npm version $VERSION --message "[release] $VERSION"
# git push origin master
# 发布的时候发布的是 dist 文件夹,见 package.json 的 files 字段
# publish
# npm publish
echo "Release success!"
fi
| true
|
623cd946c5f415a3515bf8a34a8351c81270e98d
|
Shell
|
RuthPetrie/uor
|
/TRACK-1.4.3/converters/conv_pp_nc_annual.sh
|
UTF-8
| 1,273
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/ksh
infileloc='/export/quince/data-05/wx019276/um_expts/iceonly/output/'
outfileloc='/export/quince/data-05/wx019276/um_expts/iceonly/trackdata/'
cd /export/quince/data-05/wx019276/um_expts/iceonly/output/
#infile=$infileloc'xhuiea.pck[0-1]*.pp '$infileloc'xhuiea.pck[2-3]*.pp '$infileloc'xhuiea.pck[4-5]*.pp '$infileloc'xhuiea.pck[6-7]*.pp '$infileloc'xhuiea.pck[8-9]*.pp '
#infile='xhuiea.pck[0-5]*.pp '
#infile=$infileloc'xhuiea.pck[8-9]*.pp '$infileloc'xhuiea.pcj[6-9]*.pp '
#outfile=$outfileloc'winds_1996_2012.nc'
for file in xhuiea.pci[1-8]*.pp
do
infile=$file
outfile=${file%pp}nc
print $infile
print $outfile
~/TRACK-1.4.3/converters/cat_conv.tcl -i $infile -o $outfileloc$outfile
done
for file in xhuiea.pcj*.pp
do
infile=$file
outfile=${file%pp}nc
print $infile
print $outfile
~/TRACK-1.4.3/converters/cat_conv.tcl -i $infile -o $outfileloc$outfile
done
for file in xhuiea.pck*.pp
do
infile=$file
outfile=${file%pp}nc
print $infile
print $outfile
~/TRACK-1.4.3/converters/cat_conv.tcl -i $infile -o $outfileloc$outfile
done
for file in xhuiea.pcl*.pp
do
infile=$file
outfile=${file%pp}nc
print $infile
print $outfile
~/TRACK-1.4.3/converters/cat_conv.tcl -i $infile -o $outfileloc$outfile
done
| true
|
4c6aae356cdb4b38235a3fe729a3bc15a9717f38
|
Shell
|
IbrahimMohammed47/todoist-replica
|
/createJars.sh
|
UTF-8
| 209
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
dirs=(server controller todoist-tasks todoist-user todoist-chat todoist-reports)
for d in "${dirs[@]}"
do
cd $d
mvn install -DskipTests
cd ..
# echo "$d is a registered user"
done
| true
|
42b4108d8314a879ba88e5f320da48a8dec1f04a
|
Shell
|
drexljo/folio-projects
|
/folio-buildhelpers/systemd_dep_active.sh
|
UTF-8
| 1,071
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# Execute in the parent directory where all module directories reside
# This script is quick, dirty and just useful to rapidly create a bunch of files
while read -u 6 LINE
do
SYSTEMD="folio-buildhelpers/$(echo "$LINE" | cut -d "/" -f 1 -s)"
echo -e "After=postgres\nWants=postgres\nAfter=network-online.target\nRequires=network-online.target\nAfter=folio-okapi\nWants=folio-okapi" > $SYSTEMD.tmp
while read -u 5 REQUIRES
do
if [ "${REQUIRES:0:1}" = "_" -o "$REQUIRES" = "" ]
then
continue
fi
while read -u 7 FILE
do
if [ "$FILE" = "" ]
then
continue
fi
echo -e "After=$FILE\nWants=$FILE" >> $SYSTEMD.tmp
done 7<<<$(grep "$REQUIRES " folio-buildhelpers/apis.list | cut -d " " -f 2 -s | cut -d "@" -f 1 -s)
done 5<<<$(folio-buildhelpers/get_apis.py -i $LINE -r)
sort -u $SYSTEMD.tmp > $SYSTEMD.dep_active
rm $SYSTEMD.tmp
done 6<<<$(ls -1 folio-mod-*/orig-packaging/module_descriptor.json)
| true
|
40f9a6220d1512d3bc241a896283610560921996
|
Shell
|
digraj/Python
|
/364/Prelab01/.svn/text-base/sum.bash.svn-base
|
UTF-8
| 202
| 3.015625
| 3
|
[] |
no_license
|
#! /bin/bash
#$Author$
#$Date$
#$Revision$
#$HeadURL$
#$id$
Num_Of_Param=$#
addition=0
for (( I = $Num_Of_Param; I > 0; I-- ))
do
addition=$(($addition+$1))
shift
done
echo "$addition"
exit 0
| true
|
dc35458b7874cd98685618807949dbd43204670a
|
Shell
|
MengZhang/dev_instance
|
/mnt/galaxyTools/apsim_ria/7.5/apsim_plus_batch.sh
|
UTF-8
| 3,005
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
JsonInput=$1
CultivarInput=$2
AcmoOutput=$3
apsimInput=$4
apsimOutput=$5
echo JsonInput: $JsonInput
echo CultivarInput: $CultivarInput
echo AcmoOutput: $AcmoOutput
echo apsimInput: $apsimInput
echo apsimOutput: $apsimOutput
echo Running in $PWD
# Setup QuadUI
#INSTALL_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
INSTALL_DIR=/mnt/galaxyTools/quadui/1.3.3
quadui=quadui-1.3.4-beta1.jar
ln -sf $INSTALL_DIR/$quadui
# Setup ACMOUI
INSTALL_DIR=/mnt/galaxyTools/acmoui/1.2
acmoui=acmoui-1.2-SNAPSHOT-beta7.jar
ln -sf $INSTALL_DIR/$acmoui .
# Setup APSIM model
tar xfz /mnt/galaxyTools/apsim_ria/7.5/apsim.tar.gz
APSIMDIR=$PWD
export LD_LIBRARY_PATH=$APSIMDIR:$APSIMDIR/Model:$APSIMDIR/Files:$LD_LIBRARY_PATH
export PATH=$APSIMDIR:$APSIMDIR/Model:$APSIMDIR/Files:$PATH
# Prepare JSON files
cp -f $JsonInput $PWD/json.zip
unzip -o -q json.zip -d json/
mkdir result
cd json
for file in *.json; do
{
filename="${file%.*}"
mkdir ../result/$filename
cp -f $filename.json ../result/$filename/1.json
}
done
cd ..
# Prepare Cultivar files
if [ "$CultivarInput" != "N/A" ]
then
cp -f $CultivarInput $PWD/cul.zip
unzip -o -q cul.zip -d cul/
if [ -d "$PWD/cul/apsim_specific" ]
then
echo "Found APSIM cultivar files correctly"
else
echo "[Warn] Could not find apsim_specific diretory in the cultivar package, will using default cultivar loaded in the system"
fi
fi
# Loop all the input JSON file
cd result
for dir in */; do
{
cd $dir
batchId=${dir%/}
# Run QuadUI
java -jar ../../$quadui -cli -clean -n -A $PWD/1.json $PWD
# Setup Cultivar files
if [ -d "../../cul/apsim_specific" ]
then
cp -f ../../cul/apsim_specific/* $PWD/APSIM/.
fi
# Generate output zip package for APSIM input files
cd APSIM
zip -r -q ../../retIn_$batchId.zip *
cd ..
# Setup APSIM model
cp -f $PWD/APSIM/* .
# Run APSIM model
#mono Model/Apsim.exe AgMip.apsim >> log.out 2>/dev/null
mono ../../Model/ApsimToSim.exe AgMip.apsim 2>/dev/null
tmp_fifofile="./control.fifo"
mkfifo $tmp_fifofile
exec 6<>$tmp_fifofile
rm $tmp_fifofile
thread=`cat /proc/cpuinfo | grep processor | wc -l`
echo "detect $thread cores, will use $thread threads to run APSIM"
for ((i=0;i<$thread;i++));do
echo
done >&6
for file in *.sim; do
{
read -u6
filename="${file%.*}"
../../Model/ApsimModel.exe $file >> $filename.sum 2>/dev/null
echo >&6
} &
done
wait
exec 6>&-
# Generate the output zip package for APSIM output files
mkdir output
mv -f *.out output
mv -f *.sum output
mv -f ACMO_meta.dat output
cd output
zip -r -q ../../retOut_$batchId.zip *
cd ..
# Run ACMOUI
java -Xms256m -Xmx512m -jar ../../$acmoui -cli -apsim "$PWD/output"
cp -f output/*.csv ../$batchId.csv
cd ..
}
done
# Setup outputs
zip -r -q retIn.zip retIn_*
cp retIn.zip $apsimInput
zip -r -q retOut.zip retOut_*
cp retOut.zip $apsimOutput
zip -r -q acmo.zip *.csv
cp acmo.zip $AcmoOutput
cd ..
exit 0
| true
|
8d650a676906cb6623f824dbf7655ccb93f1b70f
|
Shell
|
DSouzaM/mesh-testsuite
|
/time-analysis/timelarson.sh
|
UTF-8
| 268
| 3.015625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
malloclib=$1
tmpfile=$2
echo $malloclib
if [ $malloclib == "malloc" ]
then
echo "reached malloc"
{ time -p ./larson 10 7 8 10000 10000 1 16 ; } 2> $tmpfile
else
echo "reached $malloclib"
{ time -p ./larson-ldpreload.sh $malloclib ; } 2> $tmpfile
fi
| true
|
bc25d0b794584c996fbb520f251a05ad19fca9f2
|
Shell
|
Jfprado11/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/10-fizzbuzz
|
UTF-8
| 297
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# fizz buzz in bash
a=1
while [ $a -le 100 ]
do
if [ $((a % 5)) -eq 0 ] && [ $((a % 3)) -eq 0 ]
then
echo "FizzBuzz"
elif [ $((a % 5)) -eq 0 ]
then
echo "Buzz"
elif [ $((a % 3)) -eq 0 ]
then
echo "Fizz"
else
echo $a
fi
a=$((a + 1))
done
| true
|
a44a2361e75c64167e33a8b253a863d98701b0ac
|
Shell
|
ParrotSec/parrot-core
|
/localbin/apt
|
UTF-8
| 740
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
case $1 in
upgrade)
echo -e "
APT on Parrot behaves differently than Debian.
apt upgrade is equivalent to apt full-upgrade in Debian,
and performs a complete system update.
Use apt safe-upgrade to perform a partial upgrade.
"
/usr/bin/apt full-upgrade
;;
safe-upgrade)
/usr/bin/apt upgrade
;;
dup)
/usr/bin/apt dist-upgrade
exit
;;
u|up)
/usr/bin/apt update
exit
;;
i|in)
/usr/bin/apt install "${@:2}"
exit
;;
r|rm)
/usr/bin/apt remove "${@:2}"
exit
;;
list-updates|lu|l)
/usr/bin/apt list --upgradable
exit
;;
s|se)
/usr/bin/apt search "${@:2}"
exit
;;
p|po|pol)
/usr/bin/apt policy "${@:2}"
exit
;;
sh)
/usr/bin/apt show "${@:2}"
exit
;;
esac
/usr/bin/apt "$@"
| true
|
560ef822f5ba1a25c19337fd9bd18b03f15ba2fc
|
Shell
|
guessi/docker-compose-grafana-influxdb
|
/provision.sh
|
UTF-8
| 555
| 2.75
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit
set -o nounset
source influx2.env
echo "==> Prepare Configurations"
sed -e 's/%%INFLUXDB_INIT_ORG%%/'${DOCKER_INFLUXDB_INIT_ORG}'/g' \
-e 's/%%INFLUXDB_INIT_BUCKET%%/'${DOCKER_INFLUXDB_INIT_BUCKET}'/g' \
-e 's/%%INFLUXDB_INIT_ADMIN_TOKEN%%/'${DOCKER_INFLUXDB_INIT_ADMIN_TOKEN}'/g' \
grafana/etc/provisioning/datasources/datasource.yaml.template \
> grafana/etc/provisioning/datasources/datasource.yaml
echo "==> Docker Image Pull"
docker compose pull
echo "==> Bring Up Services"
docker compose up -d
| true
|
f9a08835ce349ca7fefee1426470dadf268515d1
|
Shell
|
Gor4i4ka/linux-dev-2020
|
/13_PatchInstall/install.sh
|
UTF-8
| 559
| 3.578125
| 4
|
[] |
no_license
|
echo "Выберите установить в произвольный каталог(0) или по умолчанию(1)?"
read -r num
echo $num
if [[ "$num" == "1" ]]
then
cd build
cmake -S ../ -B ./ -DLOCALE_PATH='/usr/share/locale'
make
sudo make install
sudo make allclean
elif [[ "$num" == "0" ]]
then
echo "Выберите каталог Для установки"
read -r path_to_dir
full_path=$(realpath $path_to_dir)
echo $full_path
cmake -S ./ -B $path_to_dir -DLOCALE_PATH=$full_path"/bin"
cd $full_path
echo $full_path
make
fi
| true
|
7dab0cfc845f81266d68442ec9f1f794ec09f645
|
Shell
|
rudijs/netdata
|
/plugins.d/charts.d.plugin
|
UTF-8
| 8,701
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# -----------------------------------------------------------------------------
# insternal defaults
pause_method="sleep" # use either "suspend" or "sleep"
# DO NOT USE SUSPEND - LINUX WILL SUSPEND NETDATA TOO
# THE WHOLE PROCESS GROUP - NOT JUST THE SHELL
pluginsd="plugins.d"
confd="conf.d"
chartsd="charts.d"
myconfig="$confd/charts.d.conf"
minimum_update_frequency=1
update_every=1 # this is overwritten by the command line
# work around for non BASH shells
charts_create="_create"
charts_update="_update"
charts_check="_check"
charts_undescore="_"
# -----------------------------------------------------------------------------
# parse parameters
debug=0
check=0
chart_only=
while [ ! -z "$1" ]
do
if [ "$1" = "check" ]
then
check=1
shift
continue
fi
if [ "$1" = "debug" -o "$1" = "all" ]
then
debug=1
shift
continue
fi
if [ -f "$chartsd/$1.chart.sh" ]
then
debug=1
chart_only="$( echo $1.chart.sh | sed "s/\.chart\.sh$//g" )"
shift
continue
fi
if [ -f "$chartsd/$1" ]
then
debug=1
chart_only="$( echo $1 | sed "s/\.chart\.sh$//g" )"
shift
continue
fi
if [ -f "$1" ]
then
debug=1
chart_only="$( basename "$1" | sed "s/\.chart\.sh$//g" )"
shift
continue
fi
# number check
n="$1"
x=$((n + 1 - 1))
if [ "$x" = "$n" ]
then
update_every=$x
shift
continue
fi
echo >&2 "Cannot understand parameter $1. Aborting."
echo "DISABLE"
exit 1
done
# -----------------------------------------------------------------------------
# load my configuration
if [ -f "$myconfig" ]
then
. "$myconfig"
if [ $? -ne 0 ]
then
echo >&2 "charts.d: cannot load $myconfig"
echo "DISABLE"
exit 1
fi
fi
if [ "$pause_method" = "suspend" ]
then
# enable bash job control
# this is required for suspend to work
set -m
fi
# -----------------------------------------------------------------------------
# internal checks
# netdata passes the requested update frequency as the first argument
update_every=$(( update_every + 1 - 1)) # makes sure it is a number
test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
# check the charts.d directory
if [ ! -d "$chartsd" ]
then
echo >&2 "charts.d: cannot find charts directory '$chartsd'"
echo "DISABLE"
fi
# -----------------------------------------------------------------------------
# loop control
# default sleep function
loopsleepms() {
sleep $1
}
# if found and included, this file overwrites loopsleepms()
# with a high resolution timer function for precise looping.
. "$( dirname $0 )/loopsleepms.sh.inc"
# -----------------------------------------------------------------------------
# charts check functions
all_charts() {
cd "$chartsd"
ls *.chart.sh | sed "s/\.chart\.sh$//g"
}
all_enabled_charts() {
local charts=
# find all enabled charts
for chart in $( all_charts )
do
eval "enabled=\$$chart"
if [ "$enabled" = "yes" ]
then
local charts="$charts $chart"
else
echo >&2 "charts.d: chart '$chart' is NOT enabled. Add a line with $chart=yes in $myconfig to enable it."
fi
done
local charts2=
for chart in $charts
do
# check the enabled charts
local check="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()" )"
if [ -z "$check" ]
then
echo >&2 "charts.d: chart '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
continue
fi
local create="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()" )"
if [ -z "$create" ]
then
echo >&2 "charts.d: chart '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
continue
fi
local update="$( cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()" )"
if [ -z "$update" ]
then
echo >&2 "charts.d: chart '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
continue
fi
# check its config
if [ -f "$confd/$chart.conf" ]
then
if [ ! -z "$( cat "$confd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_undescore" )" ]
then
echo >&2 "charts.d: chart's $chart config $confd/$chart.conf should only have lines starting with $chart$charts_undescore . Disabling it."
continue
fi
fi
"$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$confd/$chart.conf" >/dev/null
if [ $? -ne 0 ]
then
echo >&2 "charts.d: chart's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
continue
fi
local charts2="$charts2 $chart"
done
echo $charts2
}
# -----------------------------------------------------------------------------
# load the charts
suffix_update_every="_update_every"
active_charts=
for chart in $( all_enabled_charts )
do
. "$chartsd/$chart.chart.sh"
if [ -f "$confd/$chart.conf" ]
then
. "$confd/$chart.conf"
fi
eval "dt=\$$chart$suffix_update_every"
dt=$(( dt + 1 - 1 )) # make sure it is a number
if [ $dt -lt $update_every ]
then
eval "$chart$suffix_update_every=$update_every"
fi
$chart$charts_check
if [ $? -eq 0 ]
then
active_charts="$active_charts $chart"
else
echo >&2 "charts.d: chart '$chart' check() function reports failure."
fi
done
# -----------------------------------------------------------------------------
# check overwrites
# enable work time reporting
debug_time=
test $debug -eq 1 && debug_time=tellwork
# if we only need a specific chart, remove all the others
if [ ! -z "$chart_only" ]
then
check_charts=
for chart in $active_charts
do
if [ "$chart" = "$chart_only" ]
then
check_charts="$chart"
break
fi
done
active_charts="$check_charts"
fi
# stop if we just need a pre-check
if [ $check -eq 1 ]
then
echo "CHECK RESULT"
echo "Will run the charts: $active_charts"
exit 0
fi
# -----------------------------------------------------------------------------
# create temp dir
TMP_DIR=
chartsd_cleanup() {
if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]
then
echo >&2 "charts.d: cleaning up temporary directory $TMP_DIR ..."
rm -rf "$TMP_DIR"
fi
exit 0
}
trap chartsd_cleanup EXIT
trap chartsd_cleanup SIGHUP
trap chartsd_cleanup INT
if [ $UID = "0" ]
then
TMP_DIR="$( mktemp -d /var/run/netdata-charts.d-XXXXXXXXXX )"
else
TMP_DIR="$( mktemp -d /tmp/.netdata-charts.d-XXXXXXXXXX )"
fi
# -----------------------------------------------------------------------------
# library functions
fixid() {
echo "$*" |\
tr -c "[A-Z][a-z][0-9]" "_" |\
sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |\
tr "[A-Z]" "[a-z]"
}
# -----------------------------------------------------------------------------
# create charts
run_charts=
for chart in $active_charts
do
$chart$charts_create
if [ $? -eq 0 ]
then
run_charts="$run_charts $chart"
else
echo >&2 "charts.d: chart '$chart' create() function reports failure."
fi
done
# -----------------------------------------------------------------------------
# update dimensions
global_update() {
local exit_after=$((3600 / update_every))
# return the current time in ms in $now_ms
current_time_ms
local chart=
for chart in $now_charts
do
eval "local last_update_$chart=\$((now_ms - ($chart$suffix_update_every * 1000) ))"
done
# the main loop
local c=0
while [ 1 ]
do
local c=$((c + 1))
local now_charts="$run_charts"
local run_charts=
local chart=
for chart in $now_charts
do
# return the current time in ms in $now_ms
current_time_ms
eval "local chart_min_dt=\$$chart$suffix_update_every"
test -z "$chart_min_dt" && local chart_min_dt=$update_every
local chart_min_dt=$((chart_min_dt * 1000000))
eval "local last=\$last_update_$chart"
test -z "$last" && local last=$((now_ms - (chart_min_dt / 1000) ))
local dt=$(( (now_ms - last) * 1000 ))
if [ $dt -ge $chart_min_dt ]
then
eval "last_update_$chart=$now_ms"
# the first call should not give a duration
# so that netdata calibrates to current time
test $c -eq 1 && local dt=
$chart$charts_update $dt
if [ $? -eq 0 ]
then
run_charts="$run_charts $chart"
else
echo >&2 "charts.d: chart '$chart' update() function reports failure. Disabling it."
fi
else
run_charts="$run_charts $chart"
fi
done
if [ "$pause_method" = "suspend" ]
then
echo "STOPPING_WAKE_ME_UP_PLEASE"
suspend || ( echo >&2 "suspend returned error $?, falling back to sleep."; loopsleepms $debug_time $update_every )
else
# wait the time you are required to
loopsleepms $debug_time $update_every
fi
test $c -gt $exit_after && exit 0
done
}
global_update
| true
|
398c4ada3651a2aab9bb3c8a38f9db6b66f5a3ca
|
Shell
|
hying-caritas/caritas_scripts
|
/bin/kbuild_config
|
UTF-8
| 1,136
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#TAGS: kernel
set -e
source caritas_functions.sh
MAKE="make -j$nr_cpu"
[ $# -ne 1 ] && die "Please specify target!"
target=$1
xcd() {
echo ---- "$1" -----
if [ ! -d "$1" ] ; then
mkdir "$1"
cd "$1"
"$2" "$MAKE" -C ../"$target" O="$(pwd)" defconfig
else
cd "$1"
fi
}
oldconfig() {
yes "" | "$1" "$MAKE" oldconfig
}
build() {
$MAKE clean
"$1" nice -n20 "$MAKE" | tee ../LOG
}
(
xcd obj
oldconfig
build
)
(
xcd obj-up
./source/scripts/config --disable smp
oldconfig
build
)
(
xcd obj32 linux32
sh ./source/scripts/config --disable eeepc_laptop
oldconfig linux32
build linux32
)
(
xcd obj32-up linux32
linux32 ./source/scripts/config \
--disable smp --enable x86_up_apic --enable x86_io_apic --enable X86_UP_IOAPIC
oldconfig linux32
build linux32
)
(
xcd obj-allno
make allnoconfig
build
)
(
xcd obj32-allno linux32
linux32 make allnoconfig
build linux32
)
(
xcd obj-allmod
make allmodconfig
./source/scripts/config --disable debug_info
build
)
(
xcd obj32-allmod
linux32 make allmodconfig
linux32 ./source/scripts/config --disable debug_info
build linux32
)
| true
|
5b2324cccb816bdc96dc6e1ec16c6992373f1c8f
|
Shell
|
openlibraryenvironment/mod-rs
|
/okapi-scripts/applyAllPermissions.sh
|
UTF-8
| 2,278
| 3.328125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
ADMIN_ID=${ADMIN_ID:-'diku_admin'}
TENANT=${TENANT:-'diku'}
# setOkapiUrl sets the variable OKAPI_URL
. ./setOkapiUrl
echo OKAPI Url: $OKAPI_URL
# Get hold of an auth token
AUTH_TOKEN=`./okapi-login`
echo Auth Token: $AUTH_TOKEN
# Check vars not blank
[[ ! -z "$OKAPI_URL" ]] || ( echo "OKAPI_URL is a required environment variable" && exit 1 )
[[ ! -z "$TENANT" ]] || ( echo "TENANT is a required environment variable" && exit 1 )
[[ ! -z "$ADMIN_ID" ]] || ( echo "ADMIN_ID is a required environment variable" && exit 1 )
[[ ! -z "$AUTH_TOKEN" ]] || ( echo "AUTH_TOKEN is a required environment variable" && exit 1 )
# Ensure the admin account has every permission
# Lookup USER UUID
USER_ID=$(curl --http1.1 -sSLf -H "x-okapi-token: $AUTH_TOKEN" -H 'accept: application/json' -H 'Content-type: application/json' \
-H "X-Okapi-Tenant: $TENANT" --connect-timeout 10 --max-time 30 -XGET "$OKAPI_URL/users?query=username%3D%3D${ADMIN_ID}&length=10000" \
| jq -rc '.users[0].id')
echo User Id: $USER_UID
# Use the UUID to find the perms user.
PERMS_UID=$(curl --http1.1 -sSLf -H "x-okapi-token: $AUTH_TOKEN" -H 'accept: application/json' -H 'Content-type: application/json' \
-H "X-Okapi-Tenant: $TENANT" --connect-timeout 10 --max-time 30 \
-XGET "$OKAPI_URL/perms/users?query=userId%3D%3D${USER_ID}&length=10000" | jq -rc ".permissionUsers[0].id")
echo Perms Id: $PERMS_ID
# Grant all to found user.
ALL_PERMS=`curl --http1.1 -sSLf -H "x-okapi-token: $AUTH_TOKEN" -H 'accept: application/json' -H 'Content-type: application/json' \
-H "X-Okapi-Tenant: $TENANT" --connect-timeout 10 --max-time 30 \
-XGET "$OKAPI_URL/perms/permissions?length=100000"`
echo All Perms: $ALL_PERMS
TO_ASSIGN=`echo $ALL_PERMS | jq -rc '.permissions[] |= { permissionName } | .permissions[]'`
for perm in $TO_ASSIGN; do
echo "Add permission ${perm} (ignore error if already granted)"
curl --http1.1 -w "\nstatus=%{http_code} size=%{size_download} time=%{time_total} content-type=\"%{content_type}\"\n" \
-sSL -H "x-okapi-token: $AUTH_TOKEN" -H 'accept: application/json' -H 'Content-type: application/json' \
-H "X-Okapi-Tenant: $TENANT" --connect-timeout 5 --max-time 30 \
-XPOST "$OKAPI_URL/perms/users/${PERMS_UID}/permissions" -d "$perm"
done
| true
|
9df5005754ed92a5983438f9a3f57b9278283118
|
Shell
|
onikongo/kubscripts
|
/dcknow
|
UTF-8
| 3,217
| 3.859375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ -z "$3" ]
then
echo "${0} [SITEDIR] [BOXDIR] [web:job,live,yellow] [folder1,folder2,folder3,...] (#,..,... define to skip dckbuild for sub images)"
echo "Example: "
echo " dcknow playsite playbox job modules/typ,themes/contrib,sites 0,3"
exit
fi
PATHSET=$(echo $PWD | tr "/" "\n")
for X in $PATHSET
do
CURRENTPATH=$X
done
SITEDIR=$1
BOXDIR=$2
WEB=$3
FOLDERS=$4;
IFS=','
read -ra MODULES <<<"$4"
IFS=','
read -ra SKIPBUILT <<<"$5"
SITEDIR=${PWD}/${SITEDIR}/www/$WEB/web
BOXDIR=${BOXDIR}
G='\033[1;30m'
H='\033[1;33m'
T='\033[0;36m'
M='\033[1;35m'
N='\033[0m' # No Color
PARTNO=0
SRC=${1}/www
DEST=${BOXDIR}/${CURRENTPATH}-${PARTNO}/root
mkdir -p ${DEST}
echo -e "${G}${T}Working with ${H}${FOLDERS}${T}......"
echo " "
echo -e "${G}=== ${T}Syncing ${H}${SRC}${T} to ${H}${DEST}${N}"
mkdir -p $DEST
echo -e "${M}"
rsync --exclude '.*' -a --delete $SRC $DEST
echo -e "${NC}"
for PART in "${MODULES[@]}"
do
((PARTNO=PARTNO+1))
SRC=${BOXDIR}/${CURRENTPATH}-0/root/www/${WEB}/web/${PART}/*
DEST=${BOXDIR}/${CURRENTPATH}-${PARTNO}/root/www/${WEB}/web/${PART}
echo " "
echo -e "${G}=== ${T}Spliting ${H}${SRC}${T} to ${H}${DEST}${N}"
mkdir -p $DEST
echo -e "${M}"
rsync --exclude '.*' --exclude 'hg-*' -av --delete $SRC $DEST
echo -e "${NC}"
rm -rf $SRC
done
echo " "
echo -e "${T}Creating ${H}Dockerfile${T}......${N}"
echo "FROM busybox" > Dockerfile
echo "MAINTAINER vorapoap@teleinfomedia.co.th" >> Dockerfile
echo "ENV TIMEZONE Asia/Bangkok" >> Dockerfile
#echo "COPY --chown=48:48 drupal /root/www/drupal" >> Dockerfile
DCKHEADER=$(cat <<-END
FROM busybox
ENV TIMEZONE Asia/Bangkok
END
)
I=0
while [ $I -le $PARTNO ]
do
DEST=/root/www
SRC=playbox/${CURRENTPATH}-${I}/root/*
DOCKERFILE=playbox/${CURRENTPATH}-${I}/Dockerfile
DOCKERIGNORE=playbox/${CURRENTPATH}-${I}/.dockerignore
echo "${DCKHEADER}" > $DOCKERFILE
echo "COPY --chown=48:48 ${DEST} /root/www/" >> $DOCKERFILE
echo "VOLUME /www" >> $DOCKERFILE
echo "CMD cp -prf /root/www/* /www" >> $DOCKERFILE
echo ".*" > $DOCKERIGNORE
echo "hg-*">> $DOCKERIGNORE
((I=I+1))
done
echo ""
echo -e "${T}Dockerbuild / Dockerpush ${H}dckbuild/dckpush${T}......${NC}"
I=0
while [ $I -le $PARTNO ]
do
SRC=${PWD}/${BOXDIR}/${CURRENTPATH}-${I}
echo -e "${G}===== ${H}${BOXDIR}/${CURRENTPATH}-${I}/Dockerfile${NC}"
if [[ "${SKIPBUILT[*]}" =~ "${I}" ]]
then
echo -e "${G}===== ${T}SKIP Building/Pushing Dockerfile${NC}"
echo ""
else
( cd ${SRC}; cat Dockerfile )
echo ""
( cd ${SRC}; dckbuild; dckpush )
echo ""
echo ""
fi
((I=I+1))
done
#DOCKERFILE=Dockerfile
#echo "FROM ${OWNER}/${CURRENTPATH}-${PARTNO}:latest AS A${PARTNO}" > $DOCKERFILE
#echo "COPY /root/* /root" >> $DOCKERFILE
#
#((I=PARTNO-1))
#while [ $I -ge 0 ]
#do
# ((V=I+1))
# echo "FROM ${OWNER}/${CURRENTPATH}-${I}:latest AS A${I}" >> $DOCKERFILE
# echo "COPY --from A${V} /root/* /root" >> $DOCKERFILE
# ((I=I-1))
#done
#echo "VOLUME /www" >> $DOCKERFILE
#echo "CMD cp -prf /root/www/* /www" >> $DOCKERFILE
#
#echo -e "${G}===== ${H}Dockerfile${N}"
#cat $DOCKERFILE
#echo ""
#dckbuild
echo ""
| true
|
b40f4a9084922fe5d2c4ec4ae80d4d18c68545d2
|
Shell
|
mozilla/aws-lambda-telemetry
|
/ansible/files/deploy.sh
|
UTF-8
| 1,444
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
if [ $# -eq 0 ]
then
echo "No arguments supplied"
exit 1
fi
cd ..
FILES="telemetry_index_ping.js telemetry_schema.py telemetry_v2_schema.json telemetry_v4_schema.json telemetry_v4_release_schema.json node_modules/ schema/"
FUNCTION_NAME=telemetry_index_ping
METADATA_BUCKET=net-mozaws-prod-us-west-2-pipeline-metadata
INSTANCE_PROFILE=$1
aws s3 cp s3://$METADATA_BUCKET/sources.json sources.json
for source in telemetry telemetry-release
do
bucket=$(jq -r ".\"$source\" | .bucket" sources.json)
prefix=$(jq -r ".\"$source\" | .prefix" sources.json)
dir=schema/$bucket/$prefix
mkdir -p $dir
aws s3 cp s3://$METADATA_BUCKET/$prefix/schema.json $dir
done
npm install node-uuid aws-sdk-promise promise
wget -N https://raw.githubusercontent.com/mozilla/telemetry-tools/master/telemetry/telemetry_schema.py
zip -r lambda.zip $FILES
aws lambda delete-function --function-name $FUNCTION_NAME || true
aws lambda create-function \
--function-name $FUNCTION_NAME \
--runtime nodejs \
--role $INSTANCE_PROFILE \
--handler telemetry_index_ping.handler \
--description "Index Telemetry files in SimpleDB" \
--timeout 15 \
--zip-file fileb://lambda.zip \
--memory-size 128 \
--region us-west-2
| true
|
2a0f6669fd60979502adfc03059540e9cc2b1a9d
|
Shell
|
liodopolus/scriptcol-current
|
/archiv/archiv-alpha/sed-cut-end.sc
|
UTF-8
| 192
| 3.109375
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/sh
# written by Jeffrey Scherling Sun Sep 18 18:04:07 CEST 2011
# cut the end with sed
CWD=$(pwd)
# String
STR="$1"
# Filename
FILE=$2
cat $FILE | sed s/$STR*.*/$STR/ > $FILE.without-end
# -
| true
|
3115b89f161ca144303782cb4f017087ead2b455
|
Shell
|
shiva0808/ShellScripts
|
/StdinStdout/basicProgram.sh
|
UTF-8
| 1,656
| 3.46875
| 3
|
[] |
no_license
|
FILE="/tmp/data"
head -n1 /etc/passwd > ${FILE}
read line < ${FILE}
echo "Line Contains ${line}"
head -n3 /etc/passwd > ${FILE}
echo "Contents of file ${FILE}"
cat "${FILE}"
echo "${random}${random}" >> ${FILE}
echo "${random}${random}" >> ${FILE}
echo
echo "Contents of file ${FILE} :"
cat "${FILE}"
###################
###Recirect File as input in line
read line 0< ${FILE}
echo
echo "Line Contains ${LINE}"
#################
##Redirect output to a file using file descriptor
head -n3 /etc/passwd 1> ${FILE}
echo
echo "Contents of file is ${FILE}"
cat ${FILE}
##################
####STDERR to a FILE
ERR_FILE="/tmp/error_file"
head -n3 /etc/passwd /etc/dummyfile 2> ${ERR_FILE}
###########
###Redirecting both STDOUT and STDERR in one FILE
#head -n3 /etc/passwd /etc/dummyfile 1> output.txt 2>&1 #this is old method
head -n3 /etc/passwd /etc/dummyfile &> ${FILE}
echo "Content of file are ${FILE}"
##############
##Sending STDERR in another file and STDOUT in another FILE
head -n3 /etc/passwd /etc/dummyfile 1> output.txt 2> error.txt
#############
#Sending STDOUT and STDERR output to file using pipe
head -n3 /etc/passwd /dummyfile |& cat -n
#############
##Sending error to stderr FILE
echo "This is a error!!!" &>2
#now output this script stderr to a file
#ex../test.sh 2> error.out
########No Device /dev/null
#sending all output of script in /dev/null
#this means we dont want to print anything on screen
head -n3 /etc/passwd /etc/dummyfile >& /dev/null
#discarding STDOUT
head -n /etc/passwd > /dev/null
#Discarding stderr
head -n /etc/passwd /dummyfile 2> /dev/null
#Discarding both
head -n /etc/passwd /dummyfile &> /dev/null
| true
|
ea819f17f95b203ff600bff36fce1b2ba3005888
|
Shell
|
RedPandaDev/HPCCoursework
|
/normal.sh
|
UTF-8
| 871
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Remove all compiled programs before starting
rm blurOMP.out
# How many threads do you want to test from and too?
tmin=2
tmax=10
# How many chunks do you want to test from and too?
cmin=2
cmax=10
# How many results do you want?
runs=10
# Static, dynamic, guided, auto?
# The number is the code that OMP stores in its enum omp_sched_t custom variable type
# static = 1
# dynamic = 2
# guided = 3
# auto = 4
sch=static
# Recompile program
gcc -fopenmp -o blur.out blur.c
# Run programs
for z in $(seq 1 $runs);
do
./blurOMP.out
done
printf "\nfin\n"
# If you want to do only even numbers in a seq
# You can add an interval to the seq command as the second argument
# i.e
# seq 2 2 10
# will output:
# 2 4 6 8 10
# To put the output of the script into a file (as opposed to printing to the screen)
# Run `bash runner.sh >> outputfile.txt`
| true
|
11edcfeef5fb4a718c6e167573fe407508de5524
|
Shell
|
mocha-parallel/mocha-parallel-tests
|
/test/reporter-custom-jenkins/index.sh
|
UTF-8
| 658
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
RESULT="$DIR/result.xml"
OUTPUT=$(JUNIT_REPORT_PATH=$RESULT dist/bin/cli.js -R mocha-jenkins-reporter test/reporter-custom-jenkins/suite.js 2>&1)
STATUS=$?
if [ $STATUS -eq 0 ]; then
RESULT_XML=$(cat $RESULT)
if [[ $RESULT_XML == *"<testsuites "* ]] && [[ $RESULT_XML == *"<testsuite "* ]] && [[ $RESULT_XML == *"<testcase "* ]]; then
EXIT_CODE=0
else
echo "Reporter output file is wrong: $RESULT_XML"
EXIT_CODE=1
fi
else
echo "Exit code is unexpected: $STATUS"
echo "Debug output: $OUTPUT"
EXIT_CODE=1
fi
rm $RESULT
exit $EXIT_CODE
| true
|
3e8c9807dc6bd79d31ca9089dc93046cee161b42
|
Shell
|
JavierGelatti/TrustingTrust
|
/0/dame_feedback.sh
|
UTF-8
| 444
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
clear
./compiler programa.c -o programa
./programa > salida
printf "\n---- DIFF ----\n"
colordiff -u programa.c salida | diff-highlight
printf "\n--- SALIDA ---\n"
cat salida
printf "\n"
if diff -q programa.c salida > /dev/null
then
printf "\u001b[32mEl programa y la salida son iguales\u001b[0m\n"
else
printf "\u001b[31mEl programa y la salida son distintos\u001b[0m\n"
fi
printf "\n"
rm salida
rm programa
| true
|
9387bdc56c95eb5a47d05fdfba8935f480a36aaf
|
Shell
|
teja624/home
|
/.zsh/modules/aws/lib/sh/api/codepipeline/stage_transition_disable.sh
|
UTF-8
| 345
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
aws_codepipeline_stage_transition_disable() {
local pipeline_name="$1"
local stage_name="$2"
local transition_type="$3"
local reason="$4"
shift 4
cond_log_and_run aws codepipeline disable-stage-transition --pipeline-name $pipeline_name --stage-name $stage_name --transition-type $transition_type --reason $reason "$@"
}
| true
|
897034b1d7d834c1d733caa3efcabe3a44543c1e
|
Shell
|
cnagel85/vagrant-configs
|
/dev/setup/install-misc.sh
|
UTF-8
| 718
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# Just a basic setup for my own dev environment, to quickly get my machine up and running
echo "Updating vim plugins..."
# Create vim directories
mkdir -p .vim/plugged
mkdir -p .vim/autoload
# Download VimPlug
curl --silent -fLo .vim/autoload/plug.vim https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
# Own .vim dir
sudo chown -R ubuntu .vim
# Download .vimrc
wget --quiet "https://raw.githubusercontent.com/rdrsss/vimrc/master/.vimrc"
sudo chown -R ubuntu .vimrc
echo "Installing GCC & G++..."
sudo apt-get -q install -y gcc g++
echo "Installing Python(2.7)..."
sudo apt-get -q install -y python
echo "Installing Python-Pip..."
sudo apt-get -q install -y python-pip
echo "Fin..."
| true
|
eb1baf37d479e3bf8c321076e0289e229e21bc73
|
Shell
|
jrtest5/php-buildpack
|
/support/ext/ds
|
UTF-8
| 343
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
php_version=${1:=7.4}
ds_version=1.2.9
curl -L "https://pecl.php.net/get/ds-${ds_version}.tgz" \
| tar xzv
cd ds-${ds_version}
/app/vendor/php/bin/phpize
./configure --with-php-config=/app/vendor/php/bin/php-config
make
cp modules/ds.so "$EXT_DIR/ds.so"
echo "extension=ds.so" > "$PREFIX/etc/conf.d/ds.ini"
| true
|
20ba9ae7baeba1f570cf160f00f8259623dfedd9
|
Shell
|
jasoyode/CTRNN_NM
|
/scripts/single_seed_analysis/create_config.sh
|
UTF-8
| 1,043
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
#60, 68, 99
XXX_SEED_XXX="99"
if [ "$1" != "" ]; then
XXX_SEED_XXX="$1"
fi
echo "XXX_SEED_XXX: $XXX_SEED_XXX"
mod_std="mod1-ON"
#mod_std="standard"
MOD_STD="MOD"
#MOD_STD="STD"
TYPE="RPG"
SIZE="3"
##############
XXX_NAME_XXX="${TYPE}${SIZE}_${MOD_STD}_$XXX_SEED_XXX"
XXX_CTRNN_PATH_XXX="/scratch/jasoyode/github_jasoyode/CTRNN_NM/"
INNER_FOLDER="JOB_ctrnn-${TYPE}_size-${SIZE}_sim-100run-500gen_signal-SINE-1p_M-${mod_std}"
##################
XXX_CTRNN_PATH_XXX=$( echo "$XXX_CTRNN_PATH_XXX" | sed "s/\//\\\\\//g" )
XXX_EXP_FOLDER_XXX="DATA/CPG_RPG_MPG_345/$INNER_FOLDER"
XXX_EXP_FOLDER_XXX=$( echo "$XXX_EXP_FOLDER_XXX" | sed "s/\//\\\\\//g" )
XXX_JOB_INI_XXX="$INNER_FOLDER.ini"
OUTPUT_CONFIG="config_$XXX_NAME_XXX.ini"
cat config_template.ini | sed "s/XXX_SEED_XXX/$XXX_SEED_XXX/" \
| sed "s/XXX_NAME_XXX/$XXX_NAME_XXX/" \
| sed "s/XXX_CTRNN_PATH_XXX/$XXX_CTRNN_PATH_XXX/" \
| sed "s/XXX_EXP_FOLDER_XXX/$XXX_EXP_FOLDER_XXX/" \
| sed "s/XXX_JOB_INI_XXX/$XXX_JOB_INI_XXX/" > $OUTPUT_CONFIG
| true
|
01a8b13b1c3041c909eee2a48c78333f24044ce6
|
Shell
|
MaxPoon/nus-datathon-2018
|
/preprocessing/pdf2jpg.sh
|
UTF-8
| 122
| 2.671875
| 3
|
[] |
no_license
|
for i in {311..351};
do mkdir -p JPEG/"$i"; for j in `ls $i/*.pdf`;
do convert -density 500 "$j" JPEG/"$j".jpg;
done;
done
| true
|
e621bfdb4307bf4c0cd690a60173a983f777a250
|
Shell
|
zeraimundo/lista_2
|
/1.sh
|
UTF-8
| 2,710
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
echo
echo "Redirecionadores"
echo
echo "Redicionadores de entrada e saída são meios de definir a entrada e saída padrão para um comando. Eles possibilitam a habilidade de tratar as entradas e saídas de dados com grande facilidade."
echo
echo
echo "Tipos de Redirecionadores"
echo
echo
echo "> ou 1> - redireciona a entrada padrão para outra saída"
echo "Ex: ao digitar o comando ls > conteudo.txt alteramos a saída padão do comando para que a mesma seja direcionada ao arquivo conteudo.txt ao invés de ser exibida na tela. Nesse tipo de redirecionamento, se o arquivo não existir, o mesmo será criádo, caso ele exista, o mesmo será substituído."
echo
echo
echo ">> - essa forma de redirecionamento se assemelha a anterior, direcionando o conteúdo para um arquivo porém preservando seu conteúdo original e adicionando as saídas após às informações previamente arquivadas."
echo "Ex: ao digitar o comando ls >> conteudo.txt alteramos a saída padão do comando para que a mesma seja direcionada ao arquivo conteudo.txt ao invés de ser exibida na tela. Nesse tipo de redirecionamento, se o arquivo não existir, o mesmo será criádo, caso ele exista, será adicionado o conteúdo no seu final."
echo
echo
echo "2> - essa forma de redirecionamento envia para um arquivo ao invés da saída padrão os erros encontrados durante a execução de um comando"
echo "Ex: ao digitar o comando ls 2> conteudo.txt alteramos a saída de erro do comando para que a mesma seja direcionada ao arquivo conteudo.txt ao invés de ser exibida na tela. Nesse tipo de redirecionamento, se o arquivo não existir, o mesmo será criádo, caso ele exista, o mesmo será substituído."
echo
echo
echo "Posso unir dois redirecionadores num comando para que ele redirecione a saída padrão sem erros para um arquivo e a saída de erros para outro"
echo "Ex: ls > ok.txt 2> erro.txt"
echo
echo
echo "O redirecionador &> pode fazer o trabalho anterior, mas redirecionando tanto a saída padrão quanto a saída de erros para o mesmo arquivo."
echo "Ex: ls &> ambos.txt"
echo
echo
echo "O arquivo /dev/null pode ser usado para descartar erros de comandos"
echo
echo
echo "< - Redirecionador de entrada padrão que permite enviar a entrada a partir de um arquivo para um determinado comando"
echo "cat < doc.txt"
echo
echo
echo "<< ou here file onde podemos digitar um conjunto de informações para um determinado comando"
echo
echo
echo "<<< ou here string redireciona a entrada padrão para uma string"
echo '(bc <<< "scale=7;($lat1+$lat2)/2")'
echo
echo
echo "| ou pipe – redireciona tanto a saída quanto a entrada de dois comandos diferentes"
echo "Ex: cat teste.txt | wc -l > resultado.txt"
| true
|
0559e92b612cbcd5e64aab152263a34c2a531482
|
Shell
|
rickaas/webdsl.debug
|
/example-projects/performance01-di/timing-results/url-fetch-script.sh
|
UTF-8
| 1,758
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# the script location
#echo $0
# http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in
prefix="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
#echo $prefix
GROUP=$1
# logs are categorized in group
LOGNAME=$2
#LOGNAME=run_1313
REPEAT=$3
#REPEAT=5
# each unit will generate 10*100 users
DATASET_UNIT_SIZE=$4
#DATASET_UNIT_SIZE=0
BASEURL=http://localhost:8080/performance01
URLs=( "listUsers" "filterWhereUsers" "orderByUsers" )
mkdir -p $prefix/archive/$GROUP/$LOGNAME
echo group=$GROUP log=$LOGNAME repeat=$REPEAT unitsize=$DATASET_UNIT_SIZE > $prefix/archive/$GROUP/$LOGNAME/run.config
LOGFILE=$prefix/archive/$GROUP/$LOGNAME/wget.log
WGETOUTPUT=$prefix/archive/$GROUP/$LOGNAME/wget.output.tmp
rm -f $LOGFILE
WGET_OPTIONS="--output-document=$WGETOUTPUT --quiet --append-output=$LOGFILE"
# first call base url to initialize the WebDSL backend
wget $WGET_OPTIONS "$BASEURL"
for j in `seq 1 $DATASET_UNIT_SIZE`
do
echo populating $j / $DATASET_UNIT_SIZE
# populate database 10*100 users
wget $WGET_OPTIONS "$BASEURL/automaticallyMakeTestSet"
done
echo start performance tests
# start logging
wget $WGET_OPTIONS "$BASEURL/stop"
echo calling "$BASEURL/start?$LOGNAME"
wget $WGET_OPTIONS "$BASEURL/start?$LOGNAME"
# only measure frameduration for a subset of debug-events
for i in "${URLs[@]}"
do
echo Add FrameName filter: $i
wget $WGET_OPTIONS "$BASEURL/addFrameNameFilter?$i"
done
for LOOP_COUNTER in `seq 1 $REPEAT`
do
echo loop count $LOOP_COUNTER / $REPEAT
# call each URL
for i in "${URLs[@]}"
do
echo $i
wget $WGET_OPTIONS "$BASEURL/$i"
done
done
# flush and stop logging
wget $WGET_OPTIONS "$BASEURL/flush"
wget $WGET_OPTIONS "$BASEURL/stop"
| true
|
b781c21e843cc63b6794127829bc0e70dbf7d1ec
|
Shell
|
github/VFSForGit
|
/Scripts/Mac/BuildGVFSForMac.sh
|
UTF-8
| 5,217
| 3.703125
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
. "$(dirname ${BASH_SOURCE[0]})/InitializeEnvironment.sh"
CONFIGURATION=$1
if [ -z $CONFIGURATION ]; then
CONFIGURATION=Debug
fi
runVersionUpdater="yes"
VERSION=$2
if [ -z $VERSION ]; then
VERSION="0.2.173.2"
# don't update version number for developer builds
runVersionUpdater="no"
fi
if [ ! -d $VFS_OUTPUTDIR ]; then
mkdir $VFS_OUTPUTDIR
fi
echo 'Building ProjFS kext and libraries...'
$VFS_SRCDIR/ProjFS.Mac/Scripts/Build.sh $CONFIGURATION || exit 1
# Create the directory where we'll do pre build tasks
BUILDDIR=$VFS_OUTPUTDIR/GVFS.Build
if [ ! -d $BUILDDIR ]; then
mkdir $BUILDDIR || exit 1
fi
echo 'Downloading a VFS-enabled version of Git...'
$VFS_SCRIPTDIR/DownloadGVFSGit.sh || exit 1
GITVERSION="$($VFS_SCRIPTDIR/GetGitVersionNumber.sh)"
GITPATH="$(find $VFS_PACKAGESDIR/gitformac.gvfs.installer/$GITVERSION -type f -name *.dmg)" || exit 1
echo "Downloaded Git $GITVERSION"
# Now that we have a path containing the version number, generate GVFSConstants.GitVersion.cs
$VFS_SCRIPTDIR/GenerateGitVersionConstants.sh "$GITPATH" $BUILDDIR || exit 1
# If we're building the Profiling(Release) configuration, remove Profiling() for building .NET code
if [ "$CONFIGURATION" == "Profiling(Release)" ]; then
CONFIGURATION=Release
fi
echo "Generating CommonAssemblyVersion.cs as $VERSION..."
$VFS_SCRIPTDIR/GenerateCommonAssemblyVersion.sh $VERSION || exit 1
# /warnasmessage:MSB4011. Reference: https://bugzilla.xamarin.com/show_bug.cgi?id=58564
# Visual Studio Mac does not support explicit import of Sdks. GVFS.Installer.Mac.csproj
# does need this ability to override "Build" and "Publish" targets. As a workaround the
# project implicitly imports "Microsoft.Net.Sdk" in the beginning of its csproj (because
# otherwise Visual Studio Mac IDE will not be able to open the GVFS.Install.Mac project)
# and explicitly imports Project="Sdk.targets" Sdk="Microsoft.NET.Sdk" later, before
# overriding build targets. The duplicate import generates warning MSB4011 that is ignored
# by this switch.
echo 'Restoring packages...'
dotnet restore $VFS_SRCDIR/GVFS.sln /p:Configuration=$CONFIGURATION.Mac --packages $VFS_PACKAGESDIR /warnasmessage:MSB4011 || exit 1
dotnet build $VFS_SRCDIR/GVFS.sln --runtime osx-x64 --framework netcoreapp2.1 --configuration $CONFIGURATION.Mac -p:CopyPrjFS=true /maxcpucount:1 /warnasmessage:MSB4011 || exit 1
NATIVEDIR=$VFS_SRCDIR/GVFS/GVFS.Native.Mac
xcodebuild -configuration $CONFIGURATION -workspace $NATIVEDIR/GVFS.Native.Mac.xcworkspace build -scheme GVFS.Native.Mac -derivedDataPath $VFS_OUTPUTDIR/GVFS.Native.Mac || exit 1
USERNOTIFICATIONDIR=$VFS_SRCDIR/GVFS/GVFS.Notifications/VFSForGit.Mac
USERNOTIFICATIONPROJECT="$USERNOTIFICATIONDIR/VFSForGit.xcodeproj"
USERNOTIFICATIONSCHEME="VFS For Git"
if [ "$runVersionUpdater" == "yes" ]; then
updateAppVersionCmd="(cd \"$USERNOTIFICATIONDIR\" && /usr/bin/xcrun agvtool new-marketing-version \"$VERSION\")"
echo $updateAppVersionCmd
eval $updateAppVersionCmd || exit 1
fi
# Build user notification app
xcodebuild -configuration $CONFIGURATION -project "$USERNOTIFICATIONPROJECT" build -scheme "$USERNOTIFICATIONSCHEME" -derivedDataPath $VFS_OUTPUTDIR/GVFS.Notifications/VFSForGit.Mac || exit 1
# Build the tests in a separate directory, so the binary for distribution does not contain
# test plugins created and injected by the test build.
xcodebuild -configuration $CONFIGURATION -project "$USERNOTIFICATIONPROJECT" test -scheme "$USERNOTIFICATIONSCHEME" -derivedDataPath $VFS_OUTPUTDIR/GVFS.Notifications/VFSForGit.Mac/Tests || exit 1
if [ ! -d $VFS_PUBLISHDIR ]; then
mkdir $VFS_PUBLISHDIR || exit 1
fi
echo 'Copying native binaries to Publish directory...'
cp $VFS_OUTPUTDIR/GVFS.Native.Mac/Build/Products/$CONFIGURATION/GVFS.ReadObjectHook $VFS_PUBLISHDIR || exit 1
cp $VFS_OUTPUTDIR/GVFS.Native.Mac/Build/Products/$CONFIGURATION/GVFS.VirtualFileSystemHook $VFS_PUBLISHDIR || exit 1
cp $VFS_OUTPUTDIR/GVFS.Native.Mac/Build/Products/$CONFIGURATION/GVFS.PostIndexChangedHook $VFS_PUBLISHDIR || exit 1
# Publish after native build, so installer package can include the native binaries.
dotnet publish $VFS_SRCDIR/GVFS.sln /p:Configuration=$CONFIGURATION.Mac /p:Platform=x64 -p:CopyPrjFS=true --runtime osx-x64 --framework netcoreapp2.1 --self-contained --output $VFS_PUBLISHDIR /maxcpucount:1 /warnasmessage:MSB4011 || exit 1
echo 'Copying Git installer to the output directory...'
$VFS_SCRIPTDIR/PublishGit.sh $GITPATH || exit 1
echo 'Installing shared data queue stall workaround...'
# We'll generate a temporary project if and only if we don't find the correct dylib already in place.
BUILDDIR=$VFS_OUTPUTDIR/GVFS.Build
if [ ! -e $BUILDDIR/libSharedDataQueue.dylib ]; then
cp $VFS_SRCDIR/nuget.config $BUILDDIR
dotnet new classlib -n Restore.SharedDataQueueStallWorkaround -o $BUILDDIR --force
dotnet add $BUILDDIR/Restore.SharedDataQueueStallWorkaround.csproj package --package-directory $VFS_PACKAGESDIR SharedDataQueueStallWorkaround --version '1.0.0'
cp $VFS_PACKAGESDIR/shareddataqueuestallworkaround/1.0.0/libSharedDataQueue.dylib $BUILDDIR/libSharedDataQueue.dylib
fi
echo 'Running VFS for Git unit tests...'
$VFS_PUBLISHDIR/GVFS.UnitTests || exit 1
| true
|
d99867ce296e8647df6090c924d267eda2b2ba43
|
Shell
|
opsh-cookbooks/psql
|
/recipes/default.sh
|
UTF-8
| 403
| 2.96875
| 3
|
[] |
no_license
|
case "${node[os]} ${node[version]}" in
ubuntu*14.04*)
apt-install postgresql-9.3
;;
ubuntu*)
code="$(lsb_release -c | awk '{print $2}')"
echo "deb http://apt.postgresql.org/pub/repos/apt/ ${code}-pgdg main" > /etc/apt/sources.list.d/psql.list
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | \
apt-key add -
apt-get update
apt-get install postgresql-9.3
;;
esac
| true
|
ebc10b7239357ec0536e258f8a460b3a7eff61a3
|
Shell
|
lynnlangit/sample-data
|
/2_DB_scripts/Redshift_DW/0_redshift.sh
|
UTF-8
| 1,087
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# 1. Manually create a Redshift instance
# Setup Redshift with all defaults and with public access, create a database called 'demodb'
# Add inbound rule for public access to port 5439 with your pubic IP address to VPC security group (default)
# Download 14-day trial Navicat as client
# Donnect to instance endpoint, rather than cluster if using more than one node
# Connection string includes 1)endpoint 2)port <3306> 3) database use 'demodb', it's <'postgres'> by default 3)username 4)password
# 2. Create a database and tables with a new query in Navicat
create-tables.sql
# Refresh and switch to that database
# 3. Prepare Data
# Download and upzip sample data (or use the data in this directory)
# Create a s3 bucket
# Change the bucketname in 'customer-fw-manifest' file **BEFORE** you upload it
# Upload all sample data to your bucket
# 4. Prepare to laod Data
# EDIT the bucket path in the 'load-tables.sql' file for these tables only: part, customer, dwdate
# 5. Load data with a new query in Navicat
load-tables.sql
# 6. Query data with new query in Navicat
query-data.sql
| true
|
1ea5bbadf3c5c4b48e118ef8d30bd699232756c8
|
Shell
|
HiroNakamura/aprendiendo-cobol
|
/temario/chapter1/compila.sh
|
UTF-8
| 371
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# valida que se envie un parametro
if [ -z $1 ] ; then
echo "[-] debes enviar el nombre del archivo";
echo "./compile [nombre]";
exit 1;
fi
# compilamos el archivo
cobc -x -free $1.cbl -o $1.exe
# valida que exista el archivo
if [ ! -f $1 ] ; then
echo "[-]";
exit 1;
fi
# damos permiso de ejecucion
chmod 777 $1
# ejecutamos
./$1.exe
| true
|
a59aba9732b8eb22e4271be05764b1dae1df6d42
|
Shell
|
ultreme/m1ch3l
|
/irc/public/vadim.cmd
|
UTF-8
| 156
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
po=$1
se=$2
ch=$3
us=$4
IFS="
"
for line in $(banner "BON ANNIV")
do
echo "$se 1 PRIVMSG $ch :$line" | nc -q 1 localhost $po
done
| true
|
6b25c945ffbcf3a566c534e427e4eafda24efe9e
|
Shell
|
drewwalters96/airshipctl
|
/tools/airship-in-a-pod/artifact-setup/assets/entrypoint.sh
|
UTF-8
| 1,532
| 3.390625
| 3
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
function cloneRepo(){
repo_name=$1
repo_url=$2
repo_ref=$3
repo_dir="$ARTIFACTS_DIR/$repo_name"
mkdir -p "$repo_dir"
cd "$repo_dir"
git init
git fetch "$repo_url" "$repo_ref"
git checkout FETCH_HEAD
}
if [[ "$USE_CACHED_ARTIFACTS" = "true" ]]
then
printf "Using cached airshipctl\n"
cp -r "$CACHE_DIR/*" "$ARTIFACTS_DIR"
else
printf "Waiting 30 seconds for the libvirt and docker services to be ready\n"
sleep 30
cloneRepo $MANIFEST_REPO_NAME $MANIFEST_REPO_URL $MANIFEST_REPO_REF
if [[ "$MANIFEST_REPO_NAME" != "airshipctl" ]]
then
cloneRepo airshipctl https://github.com/airshipit/airshipctl $AIRSHIPCTL_REF
fi
cd $ARTIFACTS_DIR/$MANIFEST_REPO_NAME
if [[ "$MANIFEST_REPO_NAME" == "airshipctl" ]]
then
./tools/deployment/21_systemwide_executable.sh
else
./tools/deployment/airship-core/21_systemwide_executable.sh
fi
mkdir -p bin
cp "$(which airshipctl)" bin
fi
/signal_complete artifact-setup
| true
|
38e70d66d6e30c73606964206c06aeb896f27eaa
|
Shell
|
aaronzirbes/burrow-cli
|
/burrow
|
UTF-8
| 312
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f ~/.burrow-cli.config ]; then
echo "Creating default config file ~/.burrow-cli.config"
echo "# Burrow host aliases
# default alias to use
default: local
# alias definitions
local: http://localhost:8000" > ~/.burrow-cli.config
fi
java -jar ~/.burrow-cli/burrow-cli-all.jar $1
| true
|
d215aaabcc9c53e96d59976469418ab8aca9936b
|
Shell
|
aming-note/note
|
/ShellScript/log_to_sql.sh
|
UTF-8
| 1,394
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
log_file="/usr/local/nginx/logs/bet.log"
err_line=0
function get_new_log(){
kg=0
file=`cat $log_file`
cut=`cat /home/linshi 2>/dev/null`
for i in $file;do
if [ -f /home/linshi ];then
if [ "$cut" = "`tail -n 1 $log_file`" ];then
break
fi
if [ $kg = "1" ];then
send_mysql $i
echo $i > /home/linshi
fi
if [ "$cut" = "$i" ];then
kg=1
fi
else
for a in $file;do
send_mysql $a
echo $a >/home/linshi
done
fi
done
}
function send_mysql(){
check=`echo $1 | awk -F ',' '{print $6}' | wc -c`
if [ $check -ge 2 ];then
remote_addr=`echo $1 | awk -F ',' '{print $1}'`
time_local=`echo $1 | awk -F ',' '{print $2}'`
request_time=`echo $1 | awk -F ',' '{print $3}'`
http_referer=`echo $1 | awk -F ',' '{print $4}'`
http_cookie=`echo $1 | awk -F ',' '{print $5}'`
request_body=`echo $1 | awk -F ',' '{print $6}'`
mysql -ubet_user -p'123456' -h 192.168.1.84 -e "INSERT INTO test.bet_log(
remote_addr,time_local,request_time,http_referer,http_cookie,request_body) VALUES(
$remote_addr,$time_local,$request_time,$http_referer,$http_cookie,$request_body)"
else
err_line=`expr $err_line + 1`
fi
}
get_new_log
if [ $err_line -gt 0 ];then
now_date=`date '+%Y-%m-%d_%H:%M:%S'`
mysql -ubet_user -p'123456' -h 192.168.1.84 -e "INSERT INTO test.bet_log(time_local,info) VALUES('$now_date','Log_Code_Error_$err_line')"
fi
| true
|
1c12144340eab444abc0502b9c9b436e8ab334c4
|
Shell
|
gdelanoy/shells
|
/scripts/add-blacklist-to-hosts.sh
|
UTF-8
| 1,882
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
grep -q Noirlistez /etc/hosts
if [ $? -eq 0 ]
then
sudo -u guillaume DISPLAY=:0 DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus notify-send -t 90000 -i netsurf "Filtrage Web déjà activé 🤪" "\nIl semble que le filtrage par adresses soit déjà en cours sur ton système ..."
exit 1
else
echo "0.0.0.0 www.twitter.com # Noirlistez-moi !!!" >> /etc/hosts
echo "0.0.0.0 facebook.com # Noirlistez-moi !!!" >> /etc/hosts
echo "0.0.0.0 twitter.com # Noirlistez-moi !!!" >> /etc/hosts
echo "0.0.0.0 abs.twimg.com # Noirlistez-moi !!!" >> /etc/hosts
echo "0.0.0.0 chat.peterschmitt.fr # Noirlistez-moi !!!" >> /etc/hosts
echo "0.0.0.0 www.reddit.com # Noirlistez-moi !!!" >> /etc/hosts
# Usage : echo "0.0.0.0 @domain_to_blacklist # Noirlistez-moi !!!" >> /etc/hosts
# echo "Access to brainspace-wasting sites is restricted BY NOW so that you can get a life. Use Focuswriter instead ..." | wall
sudo -u guillaume DISPLAY=:0 DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus notify-send -t 90000 -i netsurf "Le filtrage Web est activé 🤓" "\nSouviens-toi que tu ne fais pas ça pour te fliquer\n toi-même, mais pour t inciter à tirer le meilleur parti de ton temps.\nCourage et bonne humeur !"
fi
# ps -aux | grep firefox >/dev/null
# if [ $? -eq 0 ]
# then
# pkill firefox
# wait
# xhost 127.0.0.1
# sudo -u guillaume -H firefox &
# sudo -u guillaume DISPLAY=:0 DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus notify-send -t 90000 -i terminal "Redémarrage du navigateur 💥" "\nFirefox redémarre pour tenir compte des nouvelles règles de filtrage ..."
# else
# To adapt this script change my name to your username in the above line, as well as UID (1000 here) !
# exit
# fi
| true
|
b10350c96d24d052c1098414408f96659095d6c6
|
Shell
|
GunterMueller/ST_STX_Fork
|
/build/stx/rules/hg-clone.sh
|
UTF-8
| 1,565
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# $Header: /cvs/stx/stx/rules/hg-clone.sh,v 1.2 2014-02-28 14:14:05 vrany Exp $
#
# Simple script to clone Mercurial repository. Used from stub entries in CVS
# to automatically fetch code from there.
#
# Usage: hg-clone.sh <repository> <packagedir>
#
#
function usage() {
echo <<USAGE
Usage: hg-clone.sh <repository> <packagedir>
USAGE
exit 126
}
function error() {
echo "ERROR: $1";
if [ -z "$2" ]; then
exit 1
else
exit "$2"
fi
}
if [ "$" == "--help" ]; then
usage
fi
REPO=$1
PKGDIR=$2
CLONE="$PKGDIR.tmp"
if [ -z "$REPO" ]; then
error "No repository specified"
fi
if [ -z "$PKGDIR" ]; then
error "No directory specified"
fi
if ! hg --version &> /dev/null; then
echo <<END
Could not find `hg` command, perhaps Mercurial client is not installed.
Please install mercurial and try again. You may find pre-built packages
and installation instruction at:
http://mercurial.selenic.com/wiki/Download
END
exit 2
fi
if [ -d "$PKGDIR/.hg" ]; then
error "$PKGDIR is already a Mercurial repository"
fi
trap "rm -rf $CLONE; exit" SIGHUP SIGINT SIGTERM
if [ -d "$CLONE" ]; then
rm -rf "$CLONE"
fi
if ! hg clone "$REPO" "$CLONE"; then
echo "ERROR: Cannot clone repository"
echo "Cleaning up..."
rm -rf "$CLONE"
exit 5
fi
mv "$PKGDIR" "${PKGDIR}.old" || error "Cannot move old directory aside"
trap "rm -rf $CLONE; mv \"$PKGDIR.old\" \"$PKGDIR\"; exit" SIGHUP SIGINT SIGTERM
mv "$CLONE" "$PKGDIR" || error "Cannot move clone to $PKGDIR"
#rm -rf "$PKGDIR.old"
| true
|
a9e7a944c545146dd8d52e1751f83f132851cbe7
|
Shell
|
najjaray/Java-Twitter-light
|
/setup.sh
|
UTF-8
| 1,437
| 2.796875
| 3
|
[] |
no_license
|
# /bin/bash
clear
echo "initalizing CS682 project 1.5"
echo "Enter remove server login id:"
RemoteLogin='ayalnajjar'
#read RemoteLogin
# getting data server 1 informaions
echo "Enter Address for data server"
read DatSrvAddr1
echo "Enter Port for data server"
read DatSrvAddrPort2
# getting data server 2 informaions
echo "Enter Address for data server"
read DatSrvAddr2
echo "Enter Port for data server"
read DatSrvAddrPort2
# getting web server 1 informaions
echo "Enter Address for Web server #1:"
read WebSrvAddr1
echo "Enter Port for Web server #1:"
read WebSrvAddrPort1
# getting web server 2 informaions
echo "Enter Address for Web server #2:"
read WebSrvAddr2
echo "Enter Port for Web server #1:"
read WebSrvAddrPort2
ssh "$RemoteLogin@$DatSrvAddr1" "java -jar twitter-light.jar data $DatSrvAddr1 $DatSrvAddrPort1 &"
ssh "$RemoteLogin@$DatSrvAddr1" "java -jar twitter-light.jar data $DatSrvAddr2 $DatSrvAddrPort2 $DatSrvAddr1 $DatSrvAddrPort1&"
ssh "$RemoteLogin@$WebSrvAddr1" "java -jar twitter-light.jar web $WebSrvAddrPort1 $DatSrvAddr1 $DatSrvAddrPort1 &"
ssh "$RemoteLogin@$WebSrvAddr2" "java -jar twitter-light.jar web $WebSrvAddrPort2 $DatSrvAddr2 $DatSrvAddrPort2 $DatSrvAddr1 $DatSrvAddrPort1 &"
echo "Please use option 1 to setup your client to access one of the folowing web servers:"
echo "Web Server #1 address:$WebSrvAddr1 Port:$WebSrvAddrPort1"
echo "Web Server #1 address:$WebSrvAddr2 Port:$WebSrvAddrPort2"
| true
|
600b1d04a412fac420995e60067ac9cddc5027be
|
Shell
|
marcopeg/humble-cli
|
/bin/inc/is-absolute-path.sh
|
UTF-8
| 109
| 3.40625
| 3
|
[] |
no_license
|
isAbsolutePath() {
if [ "${1:0:1}" = "/" ]; then
echo true
else
echo false
fi
}
| true
|
3f2bd06581a5365d5a9d48ef71158f5c067b6c85
|
Shell
|
marhatha/openshift4x-poc-sriov
|
/sriov.sh
|
UTF-8
| 7,774
| 2.890625
| 3
|
[] |
no_license
|
echo "clone this github link https://github.com/openshift/sriov-network-operator"
sleep 2
echo "After cloning is done go to sriov-network-operator directory and run this sriov.sh script"
sleep 5
echo "make sure you copy your pull secret file in sriov-network-operator directory"
sleep 5
##############################################################
# UPDATE TO MATCH YOUR ENVIRONMENT
##############################################################
OCP_RELEASE=4.1.4
#RHCOS_BUILD=4.1.0
#POCDIR=ocp4poc
#############################################################
# EXPERIMENTAL
##############################################################
#LAST_3_OCP_RELEASES=$(curl -s https://quay.io/api/v1/repository/${UPSTREAM_REPO}/ocp-release/tag/\?limit=3\&page=1\&onlyActiveTags=true | jq -r '.tags[].name')
AIRGAP_REG='test.example.com:5000'
AIRGAP_REPO='ocp4/openshift4'
UPSTREAM_REPO='quay.io' ## or 'openshift'
AIRGAP_SECRET_JSON='pull-secret-2.json'
#export OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE=${AIRGAP_REG}/${AIRGAP_REPO}:${OCP_RELEASE}
##############################################################
# DO NOT MODIFY AFTER THIS LINE
############################################################################################################################
usage() {
echo -e "Usage: $0 [ modify_files ] "
echo -e "\t\t(extras) [ mirror | clone ]"
}
get_sriov_images() {
mkdir images ; cd images
curl -J -L -O ${UPSTREAM_REPO}/openshift/origin-sriov-network-device-plugin:4.2.0
curl -J -L -O ${UPSTREAM_REPO}/openshift/origin-sriov-dp-admission-controller:4.2.0
curl -J -L -O ${UPSTREAM_REPO}/openshift/origin-sriov-network-operator
curl -J -L -O ${UPSTREAM_REPO}/openshift/origin-sriov-cni:4.2.0
cd ..
tree images
}
mirror() {
skopeo copy --authfile=${AUTH_JSON_FILE} docker://${UPSTREAM_REPO}/openshift/origin-sriov-network-device-plugin:4.2.0 \
docker://${AIRGAP_REG}/${AIRGAP_REPO}:${OCP_RELEASE}
skopeo copy --authfile=${AUTH_JSON_FILE} docker://${UPSTREAM_REPO}/openshift/origin-sriov-dp-admission-controller:4.2.0 \
docker://${AIRGAP_REG}/${AIRGAP_REPO}:${OCP_RELEASE}
skopeo copy --authfile=${AUTH_JSON_FILE} docker://${UPSTREAM_REPO}/openshift/origin-sriov-network-operator \
docker://${AIRGAP_REG}/${AIRGAP_REPO}:${OCP_RELEASE}
skopeo copy --authfile=${AUTH_JSON_FILE} docker://${UPSTREAM_REPO}/openshift/origin-sriov-network-operator \
docker://${AIRGAP_REG}/${AIRGAP_REPO}:${OCP_RELEASE}
skopeo copy --authfile=${AUTH_JSON_FILE} docker://${UPSTREAM_REPO}/openshift/origin-sriov-cni:4.2.0 \
docker://${AIRGAP_REG}/${AIRGAP_REPO}:${OCP_RELEASE}
}
modify_files() {
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-network-operator|${AIRGAP_REG}/origin-sriov-network-operator|g' < ./deploy/operator.yaml > test.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-cni:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-cni:4.2.0|g' < test.yaml > test1.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-network-device-plugin:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-network-device-plugin:4.2.0|g' < test1.yaml > test2.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-dp-admission-controller:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-dp-admission-controller:4.2.0|g' < test2.yaml > operator.yaml
rm test.yaml test1.yaml test2.yaml
echo "operator.yaml file completed successfully"
sleep 2
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-cni:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-cni:4.2.0|g' < ./hack/env.sh > test.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-network-device-plugin:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-network-device-plugin:4.2.0|g' < test.yaml > test1.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-dp-admission-controller:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-dp-admission-controller:4.2.0|g' < test1.yaml > test2.yaml
sed -e 's|${UPSTREAM_REPO}/pliurh/sriov-network-operator|${AIRGAP_REG}/pliurh/sriov-network-operator|g' < test2.yaml > env.sh
rm test.yaml test1.yaml test2.yaml
echo "env.sh is completed succesfuly"
sleep 2
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-network-operator:latest|${AIRGAP_REG}/openshift/origin-sriov-network-operator:latest|g' < ./manifests/latest > test.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-cni:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-cni:4.2.0|g' < test.yaml > test1.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-network-device-plugin:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-network-device-plugin:4.2.0|g' < test1.yaml > test2.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-dp-admission-controller:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-dp-admission-controller:4.2.0|g' < test2.yaml > latest
rm test.yaml test1.yaml test2.yaml
echo "manifest file is completed succesfuly"
sleep 2
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-network-operator:latest|${AIRGAP_REG}/openshift/origin-sriov-network-operator:latest|g' < ./manifests/4.2/sriov-network-operator.v0.0.1.clusterserviceversion.yaml > test.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-cni:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-cni:4.2.0|g' < test.yaml > test1.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-network-device-plugin:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-network-device-plugin:4.2.0|g' < test1.yaml > test2.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-dp-admission-controller:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-dp-admission-controller:4.2.0|g' < test2.yaml > sriov-network-operator.v0.0.1.clusterserviceversion.yaml
rm test.yaml test1.yaml test2.yaml
echo "clusterserviceversion file is completed succesfuly"
sleep 2
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-network-operator:latest|${AIRGAP_REG}/openshift/origin-sriov-network-operator:latest|g' < ./manifests/4.2/image-references > test.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-cni:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-cni:4.2.0|g' < test.yaml > test1.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-network-device-plugin:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-network-device-plugin:4.2.0|g' < test1.yaml > test2.yaml
sed -e 's|${UPSTREAM_REPO}/openshift/origin-sriov-dp-admission-controller:4.2.0|${AIRGAP_REG}/openshift/origin-sriov-dp-admission-controller:4.2.0|g' < test2.yaml > image-references
rm test.yaml test1.yaml test2.yaml
echo "image reference file is completed succesfuly"
sleep 2
echo "backing up the files stay tuned"
cp ./deploy/operator.yaml ./deploy/operator_bkp.yaml
cp ./hack/env.sh ./hack/env_bkp.sh
cp ./manifests/latest ./manifests/latest_bkp
cp ./manifests/4.2/sriov-network-operator.v0.0.1.clusterserviceversion.yaml ./manifests/4.2/sriov-network-operator.v0.0.1.clusterserviceversion_bkp.yaml
cp ./manifests/4.2/image-references ./manifests/4.2/image-references_bkp
echo "backup done \n"
echo "Copying files now \n"
sleep 2
cp operator.yaml ./deploy/operator.yaml
cp env.sh ./hack/env.sh
cp latest ./manifests/latest
cp sriov-network-operator.v0.0.1.clusterserviceversion.yaml ./manifests/4.2/sriov-network-operator.v0.0.1.clusterserviceversion.yaml
cp image-references ./manifests/4.2/image-references
echo "DONE DONE DONE"
}
clone () {
git cone https://github.com/openshift/sriov-network-operator
cd sriov-network-operator
}
# Capture First param
key="$1"
case $key in
modify_files)
modify_files
;;
get_sriov_images)
get_sriov_images
;;
mirror)
mirror
;;
clone)
clone
;;
*)
usage
;;
esac
##############################################################
# END OF FILE
##############################################################
| true
|
b98f39328994e52acb214ef39a5afdbded3aa23b
|
Shell
|
google/timesketch
|
/end_to_end_tests/tools/run_end_to_end_tests.sh
|
UTF-8
| 758
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Script to run end-to-end tests.
# Fail on any error
set -e
# Defaults
DEFAULT_OPENSEARCH_VERSION=1.2.2
# Set OpenSearch version to run
[ -z "$OPENSEARCH_VERSION" ] && export OPENSEARCH_VERSION=$DEFAULT_OPENSEARCH_VERSION
# Container ID for the web server
export CONTAINER_ID="$(sudo -E docker container list -f name=e2e_timesketch -q)"
# Start containers if necessary
if [ -z "$CONTAINER_ID" ]; then
sudo -E docker compose -f ./docker/e2e/docker-compose.yml up -d
/bin/sleep 120 # Wait for all containers to be available
export CONTAINER_ID="$(sudo -E docker container list -f name=e2e_timesketch -q)"
fi
# Run tests.
sudo -E docker exec $CONTAINER_ID python3 /usr/local/src/timesketch/end_to_end_tests/tools/run_in_container.py
| true
|
3176c8c750bf8f03e03e33a5db6d466ea1f03968
|
Shell
|
Tencent/bk-base
|
/scripts/install/install_py_venv_pkgs.sh
|
UTF-8
| 4,747
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
# 通用脚本框架变量
PROGRAM=$(basename "$0")
VERSION=1.0
EXITCODE=0
# 全局默认变量
#PYTHON_PATH=/opt/py36/bin/python
PIP_OPTIONS=(--no-cache-dir)
ENCRYPT=
# error exit handler
err_trap_handler () {
MYSELF="$0"
LASTLINE="$1"
LASTERR="$2"
echo "${MYSELF}: line ${LASTLINE} with exit code ${LASTERR}" >&2
}
trap 'err_trap_handler ${LINENO} $?' ERR
usage () {
cat <<EOF
用法:
$PROGRAM [ -h --help -? 查看帮助 ]
[ -n, --virtualenv [必选] "指定virtualenv名字" ]
[ -w, --workon-home [可选] "指定WORKON_HOME路径,默认为\$HOME/.virtualenvs" ]
[ -p, --python-path [可选] "指定python的路径,默认为/opt/py27/bin/python" ]
[ -a, --project-home [可选] "指定python工程的家目录" ]
[ -s, --pkg-path [可选] "指定本地包路径" ]
[ -r, --req-file [必选] "指定requirements.txt的路径" ]
[ -e, --encrypt [可选] "指定的python解释器为加密解释器" ]
[ -v, --version [可选] 查看脚本版本号 ]
EOF
}
usage_and_exit () {
usage
exit "$1"
}
log () {
echo "$@"
}
error () {
echo "$@" 1>&2
usage_and_exit 1
}
warning () {
echo "$@" 1>&2
EXITCODE=$((EXITCODE + 1))
}
version () {
echo "$PROGRAM version $VERSION"
}
# 解析命令行参数,长短混合模式
(( $# == 0 )) && usage_and_exit 1
while (( $# > 0 )); do
case "$1" in
-n | --virtualenv )
shift
VENV_NAME=$1
;;
-w | --workon-home )
shift
WORKON_HOME=$1
;;
-p | --python-path )
shift
PYTHON_PATH=$1
;;
-a | --project-home )
shift
PROJECT_HOME=$1
;;
-e | --encrypt )
ENCRYPT=1
;;
-r | --req-file )
shift
REQ_FILE=$1
;;
-s | --pkg-path )
shift
PKG_PATH=$1
;;
--help | -h | '-?' )
usage_and_exit 0
;;
--version | -v | -V )
version
exit 0
;;
-*)
error "不可识别的参数: $1"
;;
*)
break
;;
esac
shift $(( $# == 0 ? 0 : 1 ))
done
# 参数合法性有效性校验,这些可以使用通用函数校验。
if [[ -z $VENV_NAME ]]; then
warning "-n must be specify a valid name"
fi
if ! [[ -r $REQ_FILE ]]; then
warning "requirement file path does'nt exist"
fi
if ! [[ -x $PYTHON_PATH ]]; then
warning "$PYTHON_PATH is not a valid executable python"
fi
if [[ -n $PROJECT_HOME ]] && [[ ! -d $PROJECT_HOME ]]; then
warning "$PROJECT_HOME (-a, --project-home) specify a non-exist directory"
fi
if [[ $EXITCODE -ne 0 ]]; then
exit "$EXITCODE"
fi
[[ -d "$WORKON_HOME" ]] || mkdir -p "$WORKON_HOME"
export WORKON_HOME
# pip固定死20.2.3,因为20.3.x 会存在解析依赖出错后就直接exit的问题
venv_opts=("-p" "$PYTHON_PATH" --no-download --no-periodic-update --pip 20.2.3)
if [[ -n "$PROJECT_HOME" ]]; then
venv_opts+=("-a" "$PROJECT_HOME")
fi
if [[ -d $CTRL_DIR/pip ]]; then
venv_opts+=(--extra-search-dir=$CTRL_DIR/pip)
fi
PYTHON_BIN_HOME=${PYTHON_PATH%/*}
export PATH=${PYTHON_BIN_HOME}:$PATH
if ! [[ -r ${PYTHON_BIN_HOME}/virtualenvwrapper.sh ]]; then
# 写死固定版本号,以免出现高版本不兼容的情况
local_options="--no-cache-dir --no-index --find-links $CTRL_DIR/pip"
if [ -d $CTRL_DIR/pip ]; then
"$PYTHON_BIN_HOME"/pip install ${local_options} pbr==5.5.1
"$PYTHON_BIN_HOME"/pip install ${local_options} virtualenv==20.1.0 virtualenvwrapper==4.8.4
else
log "'$CTRL_DIR/pip' 目录不存在,线上安装virtualenvwrapper"
"$PYTHON_BIN_HOME"/pip install pbr==5.5.1
"$PYTHON_BIN_HOME"/pip install virtualenv==20.1.0 virtualenvwrapper==4.8.4
fi
fi
# shellcheck source=/dev/null
export VIRTUALENVWRAPPER_PYTHON=${PYTHON_PATH}
source "${PYTHON_BIN_HOME}/virtualenvwrapper.sh"
# 加密解释器的特殊参数
if [[ "$ENCRYPT" -eq 1 ]]; then
venv_opts+=(--system-site-packages --always-copy)
fi
set +e
if ! lsvirtualenv | grep -w "$VENV_NAME"; then
if ! mkvirtualenv "${venv_opts[@]}" "$VENV_NAME"; then
echo "create venv $VENV_NAME failed"
exit 1
fi
fi
set -e
if [[ -d "$PKG_PATH" ]];then
PIP_OPTIONS+=("--find-links=$PKG_PATH" --no-index)
fi
"${WORKON_HOME}/$VENV_NAME/bin/pip" install -r "$REQ_FILE" "${PIP_OPTIONS[@]}"
if [[ $EXITCODE -ne 0 ]]; then
warning "pip install error"
exit "$EXITCODE"
fi
| true
|
253483c93e2f3f377f548768da04a8b76525f6d6
|
Shell
|
MDSLab/s4t-iotronic-standalone
|
/utils/net-scripts/add-network
|
UTF-8
| 193
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
if [ "$#" -ne 2 ]; then
echo "add-network VLAN COUNT"
exit 1
fi
VLAN=$1
COUNT=$(($2*2))
PORT=`expr $COUNT + 10000`
IFACE="gre-lr$PORT"
bridge vlan add dev $IFACE vid $VLAN
| true
|
d043958a2d0759af8f5dc01c1453058f6b4b59cc
|
Shell
|
kenmoini/workshop-terminal
|
/terminal/etc/profile
|
UTF-8
| 646
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
unset BASH_ENV PROMPT_COMMAND ENV
# Read in additional application profile files.
for i in /opt/workshop/etc/profile.d/*.sh /opt/workshop/etc/profile.d/sh.local; do
if [ -r "$i" ]; then
. "$i" >/dev/null
fi
done
for i in /opt/app-root/etc/profile.d/*.sh /opt/app-root/etc/profile.d/sh.local; do
if [ -r "$i" ]; then
. "$i" >/dev/null
fi
done
case $- in
*i*)
# Interactive session. Try switching to bash.
if [ -z "$ZSH" ]; then # do nothing if running under bash already
zsh=$(command -v zsh)
if [ -x "$zsh" ]; then
export SHELL="$zsh"
exec "$zsh" -l
fi
fi
esac
| true
|
f3dd20a4b0fc2a208bd726222915312d58344234
|
Shell
|
nelson-portilla/pdfspliter
|
/script/splitPDF4serie.sh
|
UTF-8
| 1,045
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
# -*- ENCODING: UTF-8 -*-
#Author: Nelson Portilla
if [ "$#" = "0" ]; then
echo $'\nsplitPDF [param1] [param2]\n'
echo "param1: Archivo pdf de entrada"
echo "param2: Archivo plano con la secuencia"$'\n'
exit
fi
filepdf=$1
listaserie=$2
numeropaginas=$(pdftk $filepdf dump_data | grep NumberOfPages | cut -c16-)
folder=${filepdf/.pdf/""}
mkdir $folder
paginas=$((10#$numeropaginas))
cd "$folder/"
paginicial=$((10#1))
while IFS='' read -r line || [[ -n "$line" ]]; do
if (($line > $paginas)); then
echo $'\nEl numero excede al total de paginas\n'
echo 'Corrija la serie en el archivo'
exit
else
echo "Text read from file: $line"
pdftk "$filepdf" cat $paginicial-$line output "$folder$paginicial-_-$line.pdf"
mv "$folder$paginicial-_-$line.pdf" "$folder"
fi
paginicial=$((10#$line))
done < "$2"
if (($paginicial < $paginas)); then
echo "Ultima particion: de $paginicial a $paginas"
pdftk "$filepdf" cat $paginicial-$paginas output "$folder$paginicial-_-$paginas.pdf"
mv "$folder$paginicial-_-$paginas.pdf" "$folder"
fi
| true
|
ac31ae7497b628dc50d6a2e13454df97d9651a3a
|
Shell
|
29prashanto/OpenShift_Multinode_Cluster
|
/workstation.sh
|
UTF-8
| 4,989
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
set -o nounset
set -o errexit
hostnamectl set-hostname workstation.lw.com
cat <<EOF > /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=permissive
# SELINUXTYPE= can take one of three two values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
EOF
mkdir /dvd
mount /dev/cdrom /dvd/
cat <<EOF > /etc/yum.repos.d/dvd.repo
[local_yum]
baseurl=file:///dvd
gpgcheck=0
EOF
cat <<EOF >> /etc/fstab
/dev/cdrom /dvd iso9660 defaults 0 0
EOF
yum install vim net-tools bash-completion -y
systemctl restart NetworkManager
exec bash
cat <<EOF > /etc/yum.repos.d/ose.repo
[ose]
baseurl=ftp://192.168.43.12/
gpgcheck=0
EOF
ssh-keygen -f ~/.ssh/id_rsa -N ''
for host in master.lw.com node1.lw.com node2.lw.com registry.lw.com workstation.lw.com ; do ssh-copy-id -i ~/.ssh/id_rsa.pub $host; done
yum install atomic-openshift-utils
mkdir /ws
cat <<EOF > /ws/ansible.cfg
[defaults]
remote_user = root
inventory = ./inventory
log_path = ./ansible.log
host_key_checking = False
EOF
cat <<EOF > /ws/inventory
[workstations]
workstation.lw.com
[nfs]
registry.lw.com
[masters]
master.lw.com
[etcd]
master.lw.com
[nodes]
master.lw.com
node1.lw.com
node2.lw.com
[OSEv3:children]
masters
etcd
nodes
nfs
#Variables needed by the prepare_install.yml playbook.
[nodes:vars]
#registry_local=registry.lw.com
registry_local=registry.access.redhat.com
use_overlay2_driver=true
insecure_registry=false
run_docker_offline=true
docker_storage_device=/dev/sdb
[OSEv3:vars]
#General Cluster Variables
openshift_deployment_type=openshift-enterprise
openshift_release=v3.9
openshift_image_tag=v3.9.14
openshift_disable_check=disk_availability,docker_storage,memory_availability
#Cluster Authentication Variables
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
openshift_master_htpasswd_users={'admin':'$apr1$Vd4/F6nT$xYB.UFGvcZeWPdMoAXSZJ1', 'developer': '$apr1$jhQYVWRa$A6LOPTN0dkSYnsGEhaHr4.'}
#OpenShift Networking Variables
os_firewall_use_firewalld=true
openshift_master_api_port=443
openshift_master_console_port=443
openshift_master_default_subdomain=myapp.lw.com
#NFS is an unsupported configuration
openshift_enable_unsupported_configurations=true
#OCR configuration variables
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/exports
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=40Gi
#OAB's etcd configuration variables
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/exports
openshift_hosted_etcd_storage_volume_name=etcd-vol2
openshift_hosted_etcd_storage_access_modes=["ReadWriteOnce"]
openshift_hosted_etcd_storage_volume_size=1G
openshift_hosted_etcd_storage_labels={'storage': 'etcd'}
#Modifications Needed for a Disconnected Install
oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
openshift_examples_modify_imagestreams=true
#openshift_docker_additional_registries=registry.lw.com
openshift_docker_additional_registries=registry.access.redhat.com
openshift_docker_blocked_registries=docker.io
#openshift_docker_blocked_registries=registry.access.redhat.com,docker.io
#Image Prefixes
openshift_web_console_prefix=registry.access.redhat.com/openshift3/ose-
openshift_cockpit_deployer_prefix='registry.access.redhat.com/openshift3/'
openshift_service_catalog_image_prefix=registry.access.redhat.com/openshift3/ose-
template_service_broker_prefix=registry.access.redhat.com/openshift3/ose-
ansible_service_broker_image_prefix=registry.access.redhat.com/openshift3/ose-
ansible_service_broker_etcd_image_prefix=registry.access.redhat.com/rhel7/
[nodes]
master.lw.com
node1.lw.com openshift_node_labels="{'region':'infra', 'node-role.kubernetes.io/compute':'true'}"
node2.lw.com openshift_node_labels="{'region':'infra', 'node-role.kubernetes.io/compute':'true'}"
EOF
#ansible nodes --list-hosts ==> to check the nodes list
#ansible node -m command -a id ==> to check connectivity
# Before installation chech eveything is fine.
cd /ws/
ansible-playbook /usr/share/ansible/openshift-ansible/playbook/prerequisites.yml
#OpenShift installation
cd /ws/
ansible-playbook /usr/share/ansible/openshift-ansible/playbook/deploy_cluster.yml
| true
|
c4605d3e9f0c01396d92b78ff1d13e713169dd93
|
Shell
|
csebesta/dotfiles
|
/scripts/.bin/bginfo
|
UTF-8
| 1,480
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Script for imitating bginfo functionality in Ubuntu
# Script for imitating the behavior of bginfo
# Start this script on system boot or login
# Color pallete: https://design.ubuntu.com/brand/colour-palette
# Candidate colors: 2C001E 333333 2D2D2D
# Text colors: FFFFFF F9F9F9 (with 2D2D2D) DEDEDE DCDCDC
# Written by Chad Sebesta
# Get screen resolution
RESOLUTION="$(xdpyinfo | awk '/dimensions/{print $2}')"
echo $RESOLUTION
# Get system information
echo $USER
echo $HOSTNAME
#CPU="$(awk -f /proc/cpuinfo '/'model name'/{print $0}')"
CPU="$(awk -F":" '/model name/{ print $2; exit }' /proc/cpuinfo)"
RAM="$(awk -F":" '/MemTotal/{ print $2; exit }' /proc/meminfo)"
echo $CPU
echo $RAM
# OS Kernel version can be had with : uname -mrs
#/proc/uptime
#/proc/meminfo
#/proc/version
# The information an admin is looking for is kernel version not distro
# Use custom background only if it exists and is selected
# Create background with solid color
# http://askubuntu.com/questions/66914/how-to-change-desktop-background-from-command-line-in-unity
# Use if command described in link above for backwards compatibility
#convert -size $RESOLUTION xc:#2C001E /tmp/bginfo.png
#convert -size $RESOLUTION xc:#2D2D2D /tmp/bginfo.jpg
#convert -size $RESOLUTION xc:#2D2D2D \
# -gravity center -font consola.ttf -pointsize 16 \
# label:TEST
# label:THIS
# /tmp/bginfo.jpg
# Set image as background
#gsettings set org.gnome.desktop.background \
#picture-uri file:///tmp/bginfo.jpg
| true
|
5467485293a7d2d67230ccea3e58e8692a5a344c
|
Shell
|
TryItOnline/tiosetup
|
/run-scripts
|
UTF-8
| 1,123
| 4.1875
| 4
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/bash
LOGS=/var/log/tioupd
mkdir -p "$LOGS"
formattime() {
if [ $1 -lt 60 ]; then
echo "$1s"
else
echo "$((${1}/60))m $((${1}%60))s"
fi
}
if [ $# -lt 1 ]; then
echo "Usage: run-scripts <dir>"
exit 1
fi
if [ ! -d $1 ]; then
echo "Not a directory: $1"
exit 1
fi
for i in $(LC_ALL=C; echo ${1%/}/*) ; do
[ -d $i ] && continue
if [ -x $i ]; then
# run executable files
echo "$(date --rfc-3339=seconds) run-scripts[$$] ($1) starting $(basename $i)" >> "$LOGS/run-scripts.log"
echo "$(date --rfc-3339=seconds) Executing $(basename $i)..."
start=`date +%s`
echo "$(date --rfc-3339=seconds)" >> "$LOGS/$(basename $i).log"
$i >> "$LOGS/$(basename $i).log" 2>&1
status="$?"
end=`date +%s`
if [ "$status" -eq "0" ]; then
status="SUCCESS"
else
status="FAILURE: $status"
fi
echo "$(date --rfc-3339=seconds) Executed $(basename $i)... $(formattime $((end-start))) {$status}"
echo "$(date --rfc-3339=seconds) run-scripts[$$] ($1) finished $(basename $i) $(formattime $((end-start))) {$status}" >> "$LOGS/run-scripts.log"
fi
done
exit 0
| true
|
12ed79763c025b02214a91982298e47c1560c517
|
Shell
|
ejangelico/cepc_caltiming
|
/Time/steer/bak/Overlay.sh
|
UTF-8
| 4,844
| 2.734375
| 3
|
[] |
no_license
|
path="$PWD"
cd $path
num=10
n=10
cal=26.71
while [ "$n" -le "$num" ]
do
o=3
d=$[n-o]
CellSize=5
inum=2
i=2
while [ "$i" -le "$inum" ]
do
case $i in
1) energy=1 ;;
2) energy=5 ;;
3) energy=10 ;;
4) energy=25 ;;
5) energy=50 ;;
6) energy=75 ;;
7) energy=100 ;;
8) energy=125 ;;
9) energy=150 ;;
10) energy=175 ;;
*) echo "i wrong";;
esac
#OUTPUTDATA="H_diphoton"
#OUTPUTDATA="gamma_CellSize"
OUTPUTDATA="gamma"
export SimuWorkDir=$path/simu/
mkdir -p $SimuWorkDir/$OUTPUTDATA/
export tmpDir=$path/tmp_steer/
mkdir -p $tmpDir
echo \
"
<marlin>
<execute>
<processor name=\"MyG2CDHGC\"/>
<processor name=\"MyMarlinArbor\"/>
<processor name=\"MyBushConnect\"/>
<processor name=\"MyOverlay\"/>
<processor name=\"MyAnaOverlay\"/>
<processor name=\"MyAnaClu\"/>
<processor name=\"MyLCIOOutputProcessor\"/>
</execute>
<global>
<parameter name=\"LCIOInputFiles\">
/besfs/groups/higgs/users/zhaoh/cepc/myWorkSpace/SimplifiedGeometry/Generator/data/ScCal/Overlay/gamma_5GeV_CellSize1mm_L30_W28_Sc2/gamma_${energy}GeV_CellSize1mm_L30_W28_Sc2_xchange_${n}.slcio
</parameter>
<parameter name=\"GearXMLFile\" value=\"/besfs/groups/higgs/users/zhaoh/cepc/myWorkSpace/SimplifiedGeometry/Generator/data/ScCal/Overlay/gamma_5GeV_CellSize1mm_L30_W28_Sc2/GearOutput.xml\"/>
<parameter name=\"MaxRecordNumber\" value=\"1001\"/>
<parameter name=\"SkipNEvents\" value=\"0\"/>
<parameter name=\"SupressCheck\" value=\"false\"/>
<parameter name=\"Verbosity\" options=\"DEBUG0-4,MESSAGE0-4,WARNING0-4,ERROR0-4,SILENT\"> MESSAGE </parameter>
<parameter name=\"RandomSeed\" value=\"1234567890\" />
</global>
<processor name=\"MyLCIOOutputProcessor\" type=\"LCIOOutputProcessor\">
<parameter name=\"LCIOOutputFile\" type=\"string\" >
gamma_${energy}GeV_CellSize${CellSize}mm_L30_Sc2_x${d}_reco.slcio
</parameter>
<parameter name=\"LCIOWriteMode\" type=\"string\" value=\"WRITE_NEW\"/>
</processor>
<processor name=\"MyG2CDHGC\" type=\"G2CDHGC\">
<parameter name=\"CalibCalo\" type=\"float\"> ${cal} </parameter>
</processor>
<processor name=\"MyOverlay\" type=\"Overlay\">
<parameter name=\"CollectionMap\" type=\"StringVec\">
MCParticle MCParticle
SiCalCollection SiCalCollection
</parameter>
<parameter name=\"InputFileNames\" type=\"StringVec\">
/besfs/groups/higgs/users/zhaoh/cepc/myWorkSpace/ScECAL/result/tmp/gamma_5GeV_CellSize${CellSize}mm_L30_Sc2_x0_reco.slcio
</parameter>
<parameter name=\"NumberOverlayEvents\" type=\"int\">1 </parameter>
</processor>
<processor name=\"MyMarlinArbor\" type=\"MarlinArbor\">
</processor>
<processor name=\"MyBushConnect\" type=\"BushConnect\">
<parameter name=\"FlagDiagnosis\" type=\"int\">0 </parameter>
<parameter name=\"MCPMIMIC\" type=\"int\">1 </parameter>
</processor>
<processor name=\"MyAnaOverlay\" type=\"AnaOverlay\">
<parameter name=\"TreeOutputFile\" type=\"StringVec\">
AnaOverlay_gamma_${energy}GeV_CellSize${CellSize}mm_x${d}.root
</parameter>
<parameter name=\"OverwriteFile\" type=\"int\"> 1 </parameter>
<parameter name=\"Merge\" type=\"int\"> 1 </parameter>
</processor>
<processor name=\"MyAnaClu\" type=\"AnaClu\">
<parameter name=\"TreeOutputFile\" type=\"StringVec\">
AnaClu_gamma_${energy}GeV_CellSize${CellSize}mm_x${d}.root
</parameter>
<parameter name=\"OverwriteFile\" type=\"int\"> 1 </parameter>
</processor>
</marlin>
" > tmp_steer/gamma_${energy}GeV_CellSize${CellSize}mm_L30_Sc2_x${d}_reco.xml
echo \
"
cd /besfs/groups/higgs/users/zhaoh/cepc/myWorkSpace/ScECAL
source /besfs/groups/higgs/users/zhaoh/cepc/myWorkSpace/ScECAL/env.sh
cd result/tmp
Marlin /besfs/groups/higgs/users/zhaoh/cepc/myWorkSpace/ScECAL/steer/tmp_steer/gamma_${energy}GeV_CellSize${CellSize}mm_L30_Sc2_x${d}_reco.xml
cd /besfs/groups/higgs/users/zhaoh/cepc/myWorkSpace/ScECAL/steer
" > $SimuWorkDir/$OUTPUTDATA/gamma_${energy}GeV_CellSize${CellSize}mm_L30_Sc2_x${d}_reco.sh
chmod +x $SimuWorkDir/$OUTPUTDATA/gamma_${energy}GeV_CellSize${CellSize}mm_L30_Sc2_x${d}_reco.sh
#hep_sub $SimuWorkDir/$OUTPUTDATA/gamma_${energy}GeV_CellSize${CellSize}mm_L30_Sc2_x${d}_reco.sh -g higgs -o ${energy}GeV_CellSize${CellSize}mm_${d}.out -e ${energy}GeV_CellSize${CellSize}mm_${d}.err
. $SimuWorkDir/$OUTPUTDATA/gamma_${energy}GeV_CellSize${CellSize}mm_L30_Sc2_x${d}_reco.sh
let "i+=1"
done
let "n+=1"
done
#<parameter name=\"EcalHitCollections\" type=\"string\"> DigiSiHit </parameter>
#<parameter name=\"ThresholdsforArborBuilding\" type=\"FloatVec\"> 2 90 50 1.2 </parameter>
| true
|
6c047ea9dca2a2e03404659fe4cb793851c8049a
|
Shell
|
NixOS/nixpkgs-channels
|
/pkgs/servers/hylafaxplus/post-patch.sh
|
UTF-8
| 907
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
# `configure` (maybe others) set `POSIXLY_CORRECT`, which
# breaks the gcc wrapper script of nixpkgs (maybe others).
# We simply un-export `POSIXLY_CORRECT` after each export so
# its effects don't apply within nixpkgs wrapper scripts.
grep -rlF POSIXLY_CORRECT | xargs \
sed '/export *POSIXLY_CORRECT/a export -n POSIXLY_CORRECT' -i
# Replace strange default value for the nobody account.
if test -n "@maxuid@"
then
for f in util/faxadduser.c hfaxd/manifest.h
do
substituteInPlace "$f" --replace 60002 "@maxuid@"
done
fi
# Replace hardcoded `PATH` variables with proper paths.
# Note: `findutils` is needed for `faxcron`.
substituteInPlace faxcover/edit-faxcover.sh.in \
--replace 'PATH=/bin' 'PATH="@faxcover_binpath@"'
substituteInPlace etc/faxsetup.sh.in \
--replace 'PATH=/bin' 'PATH="@faxsetup_binpath@"'
# Create `config.site`
substitute "@configSite@" config.site --subst-var out
| true
|
fdab9b00311af6162d586b4c346dbbb91caf754b
|
Shell
|
Lachele/gems-gmml-workspace
|
/bin/clean.sh
|
UTF-8
| 274
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source ./settings.sh
source ./etc/functions.sh
# Call each Docker Services' clean.sh script.
echo "Removing the GRPC Docker Service setup files."
if ! ( cd ${GW_GRPC_DIR} && bash ./bin/clean.sh ); then
print_error_and_exit
fi
# EXIT_SUCCESS
exit 0
| true
|
5202c813d54e76e3c443408ba3984bcb95683464
|
Shell
|
stefnadev/imagemin
|
/imagemin.sh
|
UTF-8
| 4,289
| 4.0625
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
defaultUrl="http://localhost:8082"
CHECK_NOOP=.noop
concurrency=2
threshold=1
optCheckFileBase=.imagemin.list
OPT_CHECK_DIR="${OPT_CHECK_DIR:-/var/www/vhosts}"
optCheckFile="$OPT_CHECK_DIR/$optCheckFileBase"
BASE_DIR=$(dirname $0)
. ${BASE_DIR}/funcs.sh
usage() {
echo -e "Usage: $0: PATH [MTIME] [URL] [CONCURRENCY] [THRESHOLD]\n" >&2
echo -e "\tPATH: Path to image files" >&2
echo -e "\tMTIME: Optional: Find files modified less than MTIME days ago" >&2
echo -e "\tURL: Optional: Send request to URL (default=$defaultUrl)" >&2
echo -e "\tCONCURRENCY: Optional: How many concurrent processes (default=$concurrency)" >&2
echo -e "\tTHRESHOLD: Optional: Set mininum optimization threshold (default=$threshold)" >&2
echo >&2
if [ "$1" != "" ]; then
exit $1
else
exit 1
fi
}
if [ "$1" == "-h" -o "$1" == "--help" ]; then
usage
fi
if [ $# -lt 1 ]; then
error "Missing arguments"
fi
if [ "$2" != "" ]; then
re='^[0-9]+$'
if ! [[ $2 =~ $re ]]; then
error "MTIME must be a number" "'$2' is not a number " 4
fi
fi
# Read parameters
IMAGEPATH="$1"
MTIME="$2"
if [ "$MTIME" == "0" ]; then
MTIME=""
fi
URL=${defaultUrl}
if [[ "$3" =~ https? ]]; then
URL="$3"
fi
if [ "$4" != "" ]; then
concurrency=$4
fi
if [ "$5" != "" ]; then
threshold="$5"
if [[ ! "$threshold" =~ ^[0-9]{1,2}$ ]]; then
error "Threshold must be a number between 1 and 99"
fi
fi
if [ ! -f "$optCheckFile" ]; then
error "The optimization check file is missing: '$optCheckFile'"
fi
if [ ! -w "$optCheckFile" ]; then
error "Could not write to '$optCheckFile'"
fi
ping "$URL"
findCommand() {
# Process all file extensions
ext=""
if [ $# -gt 1 ]; then
for i in $@; do
t="-iname *.$i"
if [ "$ext" ]; then
ext="$ext -o $t"
else
ext="$t"
fi
done
ext=" ( $ext ) "
else
ext="-iname *.$1"
fi
# Minimum of 128bytes
# And must be a file (no symlinks allowed)
ret="-type f -size +128c $ext"
# Add mtime parameter if in use
if [ "$MTIME" ]; then
ret="$ret -mtime -$MTIME"
fi
echo "$ret"
}
declare -A checkedDirs=()
shouldRunDir() {
local file="${1%/}"
local base="${2%/}"
local dir="$(dirname "$file")"
local ret
[ ${checkedDirs[$dir]+_} ] && {
test ${checkedDirs[$dir]} == 't'
return $?
}
if [ ! -f "$dir/$CHECK_NOOP" ] ; then
ret='t'
else
ret='f'
fi
if [ ${ret} == 'f' -o "$dir" == "$base" ]; then
checkedDirs[$dir]=${ret}
test ${ret} == 't'
return $?
fi
shouldRunDir "$dir" "$base"
if [ $? -eq 0 ]; then
ret='t'
else
ret='f'
fi
checkedDirs[$dir]=${ret}
test ${ret} == 't'
return $?
}
shouldRun() {
local file="${1%/}"
local base="${2%/}"
if ! shouldRunDir "$file" "$base" ; then
# Directory should not be processed
return 1
fi
# Special grep escape for . and [
local fileCheck=${file//\./\\.}
fileCheck=${fileCheck//\[/\\[}
local lastOptTime=$(grep "$fileCheck$" "$optCheckFile"|tail -n 1)
if [ "$lastOptTime" == "" ]; then
# Not yet optimized
return 0
fi
lastOptTime=${lastOptTime/%\ */}
mtime=$(stat -c %Y "$file")
if [ ${mtime} -gt ${lastOptTime} ]; then
# file has changed since last optimization
return 0
fi
return 1
}
optimizeDir() {
local findCmd=$(findCommand $@)
local time=$(date +%s)
local commandsFile=/tmp/_imagemin_cmds_$$
> ${commandsFile}
local script="$BASE_DIR/imagemin_one.sh"
if [ "$threshold" != "" ]; then
script="$script -t $threshold"
fi
IMAGEPATH="$(realpath "$IMAGEPATH")"
# we need to use while read to optimize files with space in filename
find "$IMAGEPATH" ${findCmd} | while read -r FILE; do
FILE="$(realpath "$FILE")"
if shouldRun "$FILE" "$IMAGEPATH"; then
echo "$script '$URL' '$(escape "$FILE")'" >> ${commandsFile}
echo "$time $FILE" >> "$optCheckFile"
else
echo "NOOP: $FILE"
fi
done
local count=$(wc -l ${commandsFile}|awk '{print $1}')
echo "OK All files gathered. Now running $count commands with concurrency of $concurrency (PID=$$):"
cat ${commandsFile} | xargs -I CMD --max-procs=${concurrency} bash -c CMD
local ltime=$(date +%s)
local totalTime="$(( $ltime - $time ))"
echo "Optimization done in $totalTime sec"
rm ${commandsFile}
}
# for debugging
#set -x
# no globbing so we won't accidentally match some files while running find
set -f
optimizeDir png jpg jpeg
| true
|
7463149849f19e45e7bfbeed457cdaa19a402c30
|
Shell
|
willrain/docker-test
|
/magento2/application/core/sbin/webapp-deploy.sh
|
UTF-8
| 2,515
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# 1. 소스 파일 다운로드
# 2. 소스 파일 압축 (불 필요한 파일 제거)
# 3. magento2-core-webapp docker images 생성
DOCKER_PRJ_HOME=/data01/docker/ssg-global-docker/magento2/application/magento2-core
PRJECT_NM=magento2-2.1.8
WEBAPP_ROOT=${DOCKER_PRJ_HOME}/webapp
SRC_DIR=${WEBAPP_ROOT}/src
TMP_DIR=${WEBAPP_ROOT}/tmp
mkdir -p ${WEBAPP_ROOT}
mkdir -p ${SRC_DIR}
mkdir -p ${TMP_DIR}
echo "
#--------------------------------------------------------------------------
# 1. 소스 파일 다운로드
#--------------------------------------------------------------------------"
cd ${TMP_DIR}
rm -rf ${TMP_DIR}/*
git clone git@gitlab.ssgadm.com:global/${PRJECT_NM}.git
echo "
#--------------------------------------------------------------------------
# 1-1. 파일 / 디렉토리 퍼미션 변경
#--------------------------------------------------------------------------"
find . -type f -exec chmod -c 644 {} \; > /dev/null 2>&1
find . -type d -exec chmod -c 755 {} \; > /dev/null 2>&1
echo "
#--------------------------------------------------------------------------
# 2. 소스 파일 압축 (불 필요한 파일 제거)
#--------------------------------------------------------------------------"
cd ${TMP_DIR}/${PRJECT_NM}
rm -rf .git .gitignore .htaccess.sample .php_cs .travis.yml .user.ini
cp ${WEBAPP_ROOT}/exclude.lst ./
tar -zcf ${PRJECT_NM}.tar.gz --exclude-from=exclude.lst --ignore-failed-read .
mv ${PRJECT_NM}.tar.gz ${SRC_DIR}/${PRJECT_NM}.tar.gz
cd ${TMP_DIR} && rm -rf ${TMP_DIR}/*
echo "
#--------------------------------------------------------------------------
# 3. magento2-core-webapp docker images 생성
#--------------------------------------------------------------------------"
cd ${WEBAPP_ROOT}
#docker rmi global-nexus:9003/magento2-core-webapp
docker build --force-rm=true -t global-nexus:9003/magento2-core-webapp .
echo "
#--------------------------------------------------------------------------
# 4. magento2-core-webapp docker images push
#--------------------------------------------------------------------------"
docker push global-nexus:9003/magento2-core-webapp
echo "
#--------------------------------------------------------------------------
# 5. 임시 파일 삭제
#--------------------------------------------------------------------------"
rm -rf ${SRC_DIR}/${PRJECT_NM}.tar.gz
#cd ${DOCKER_PRJ_HOME}
#docker-compose pull
#docker-compose build
#docker-compose up
| true
|
e96157b9d9891ba20cb0144c876ebaa07be0c104
|
Shell
|
yasassri/yasassri.github.io
|
/tools/release.sh
|
UTF-8
| 5,322
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Release a new version to the GitLab flow production branch.
#
# For a new major/minor version, bump version on the main branch, and then merge into the production branch.
#
# For a patch version, bump the version number on the patch branch, then merge that branch into the main branch
# and production branch.
#
#
# Usage: run on main branch or the patch branch
#
# Requires: Git, NPM and RubyGems
set -eu
opt_pre=false # preview mode option
working_branch="$(git branch --show-current)"
STAGING_BRANCH="$(git symbolic-ref refs/remotes/origin/HEAD | sed 's@^refs/remotes/origin/@@')"
PROD_BRANCH="production"
GEM_SPEC="jekyll-theme-chirpy.gemspec"
NODE_CONFIG="package.json"
FILES=(
"_sass/jekyll-theme-chirpy.scss"
"_javascript/copyright"
"$GEM_SPEC"
"$NODE_CONFIG"
)
TOOLS=(
"git"
"npm"
"standard-version"
"gem"
)
help() {
echo "A tool to release new version Chirpy gem"
echo
echo "Usage:"
echo
echo " bash ./tools/release.sh [options]"
echo
echo "Options:"
echo " -p, --preview Enable preview mode, only package, and will not modify the branches"
echo " -h, --help Print this information."
}
_check_git() {
# ensure nothing is uncommitted
if [[ -n $(git status . -s) ]]; then
echo "Abort: Commit the staged files first, and then run this tool again."
exit 1
fi
# ensure the working branch is the main/patch branch
if [[ $working_branch != "$STAGING_BRANCH" && $working_branch != hotfix/* ]]; then
echo "Abort: Please run on the main branch or patch branches."
exit 1
fi
}
_check_src() {
if [[ ! -f $1 && ! -d $1 ]]; then
echo -e "Error: Missing file \"$1\"!\n"
exit 1
fi
}
_check_command() {
if ! command -v "$1" &>/dev/null; then
echo "Command '$1' not found"
exit 1
fi
}
_check_node_packages() {
if [[ ! -d node_modules || "$(du node_modules | awk '{print $1}')" == "0" ]]; then
npm i
fi
}
check() {
_check_git
for i in "${!FILES[@]}"; do
_check_src "${FILES[$i]}"
done
for i in "${!TOOLS[@]}"; do
_check_command "${TOOLS[$i]}"
done
_check_node_packages
}
_bump_file() {
for i in "${!FILES[@]}"; do
sed -i "s/v[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+/v$1/" "${FILES[$i]}"
done
npx gulp
}
_bump_gemspec() {
sed -i "s/[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+/$1/" "$GEM_SPEC"
}
# 1. Bump latest version number to the following files:
#
# - _sass/jekyll-theme-chirpy.scss
# - _javascript/copyright
# - assets/js/dist/*.js (will be built by gulp later)
# - jekyll-theme-chirpy.gemspec
#
# 2. Create a commit to save the changes.
bump() {
_bump_file "$1"
_bump_gemspec "$1"
if [[ $opt_pre = false && -n $(git status . -s) ]]; then
git add .
git commit -m "chore(release): $1"
fi
}
## Remove unnecessary theme settings
cleanup_config() {
cp _config.yml _config.yml.bak
sed -i "s/^img_cdn:.*/img_cdn:/;s/^avatar:.*/avatar:/" _config.yml
}
resume_config() {
mv _config.yml.bak _config.yml
}
# auto-generate a new version number to the file 'package.json'
standard_version() {
if $opt_pre; then
standard-version --prerelease rc
else
standard-version
fi
}
# Prevent changelogs generated on master branch from having duplicate content
# (the another bug of `standard-version`)
standard_version_plus() {
temp_branch="prod-mirror"
temp_dir="$(mktemp -d)"
git checkout -b "$temp_branch" "$PROD_BRANCH"
git merge --no-ff --no-edit "$STAGING_BRANCH"
standard_version
cp package.json CHANGELOG.md "$temp_dir"
git checkout "$STAGING_BRANCH"
git reset --hard HEAD # undo the changes from $temp_branch
mv "$temp_dir"/* . # rewrite the changelog
# clean up the temp stuff
rm -rf "$temp_dir"
git branch -D "$temp_branch"
}
# build a gem package
build_gem() {
echo -e "Build the gem package for v$_version\n"
cleanup_config
rm -f ./*.gem
gem build "$GEM_SPEC"
resume_config
}
# Update the git branch graph, tag, and then build the gem package.
release() {
_version="$1" # X.Y.Z
git checkout "$PROD_BRANCH"
git merge --no-ff --no-edit "$working_branch"
# Create a new tag on production branch
echo -e "Create tag v$_version\n"
git tag "v$_version"
# merge from patch branch to the staging branch
# NOTE: This may break due to merge conflicts, so it may need to be resolved manually.
if [[ $working_branch == hotfix/* ]]; then
git checkout "$STAGING_BRANCH"
git merge --no-ff --no-edit "$working_branch"
git branch -D "$working_branch"
fi
}
main() {
check
if [[ "$working_branch" == "$STAGING_BRANCH" ]]; then
standard_version_plus
else
standard_version
fi
# Change heading of Patch version to level 2 (a bug from `standard-version`)
sed -i "s/^### \[/## \[/g" CHANGELOG.md
_version="$(grep '"version":' package.json | sed 's/.*: "//;s/".*//')"
echo -e "Bump version number to $_version\n"
bump "$_version"
build_gem
if [[ $opt_pre = true ]]; then
# Undo all changes on Git
git reset --hard && git clean -fd
else
release "$_version"
fi
}
while (($#)); do
opt="$1"
case $opt in
-p | --preview)
opt_pre=true
shift
;;
-h | --help)
help
exit 0
;;
*)
# unknown option
help
exit 1
;;
esac
done
main
| true
|
14fd8af8cfad6408770102a9e2e8fe10e889d130
|
Shell
|
bufubaoni/algorithms
|
/ldap.sh
|
UTF-8
| 357
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
LDAPBK=ldap-$( date +%Y%m%d-%H ).ldif
BACKUPDIR=/ldap_backups
BACKUP_EXEC=`which slapcat`
PACKAGE=`which gzip`
checkdir(){
if [ ! -d "$BACKUPDIR" ]; then
mkdir -p ${BACKUPDIR}
fi
}
backuping(){
echo "Backup Ldap Start...."
${BACKUP_EXEC} -v -l ${BACKUPDIR}/${LDAPBK}
${PACKAGE} -9 $BACKUPDIR/$LDAPBK
}
checkdir
backuping
| true
|
c413cac3542799429a763ce5227a2a7b5575e010
|
Shell
|
xiaolei16fan/redis-deploy
|
/bind_hosts.sh
|
UTF-8
| 698
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#author xiaolei <xiaolei@16fan.com>
#date 2016.09.05
#在发生主从切换时,将管理主机(SERVER_A)的HOSTS修改为最新 redis master 的IP
MASTER_IP=${6}
DEFAULT_MASTER=192.168.33.11
if [ ! `grep 'redisserver16fan' /etc/hosts` ]; then
echo "${DEFAULT_MASTER} redisserver16fan" >> /etc/hosts
else
sed -i '/redisserver16fan/d' /etc/hosts
echo "${MASTER_IP} redisserver16fan" >> /etc/hosts
fi
# 通知管理员
EMAIL=xiaolei@16fan.com
date=`date +%Y/%m/%d-%H:%M:%S`
issues=/usr/local/redis/log/redis_issues.log
echo -e "FAILOVER EVENT at ${date} \nSwitch HOST from: ${4} to: ${6}\nCURRENT MASTER: ${6}" > $issues
cat $issues | mail -s "Redis Failover Notice!" $EMAIL
| true
|
1e5ab1f0959f98d24d20914d2b719a8a08dd1efd
|
Shell
|
hectorsalvador/WNV_model
|
/build_model.sh
|
UTF-8
| 253
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
PERIODS=$1
# Run model
if [ -z $PERIODS ]
then
echo "Training model using defaults."
python3 -W ignore scripts/build_model.py
else
echo "Training model every $PERIODS periods."
python3 -W ignore scripts/build_model.py --p $PERIODS
fi
| true
|
e101787cfd2bee52622309e54e591684c5702d4c
|
Shell
|
acshetty/sncRNA-seq-analysis
|
/scripts/merge_alignment_statistics.sh
|
UTF-8
| 1,741
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
function Usage {
echo "Usage: $1 <alignment_directory> <rnatype> <output_file> <log>"
}
if (( $# < 4 )) || (( $# > 4 )); then
Usage $0
exit 1
fi
ALN=$1
TYP=$2
OUT=$3
LOG=$4
nIDX=0
for F in `find ${ALN}/*/*${TYP}*alignment_statistics.txt`; do
if [[ "$nIDX" -eq 0 ]]; then
head -1 ${F} > ${OUT}
fi
sed -n '2,$p' ${F} >> ${OUT}
nIDX=$(($nIDX + 1))
done
echo "See alignment statistics in ${OUT}" > ${LOG}
echo ""
#####################################################################################
### AUTHOR
###
### Amol Carl Shetty
### Lead Bioinformatics Software Engineer
### Institute of Genome Sciences
### University of Maryland
### Baltimore, Maryland 21201
###
### =head1 LICENSE AND COPYRIGHT
###
### Copyright (c) 2019 Amol Carl Shetty (<ashetty@som.umaryland.edu>). All rights
### reserved.
###
### This program is free software; you can distribute it and/or modify it under
### GNU-GPL licenscing terms.
###
### This program is distributed in the hope that it will be useful, but WITHOUT
### ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or FITNESS
### FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
### BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
### CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
### GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
### HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
### LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
### OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#####################################################################################
| true
|
b852dc8878ffee3ea2180b98dd115f6da766e0db
|
Shell
|
jwilk/deb-toolbox
|
/buildd.d.o
|
UTF-8
| 823
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright © 2017-2023 Jakub Wilk <jwilk@jwilk.net>
# SPDX-License-Identifier: MIT
set -e -u
urlescape()
{
perl -E '$_ = $ARGV[0]; s/[^\w.-]/sprintf "%%%02X", ord $&/ge; say $_' "$@"
}
if [ $# -ne 1 ]
then
printf 'Usage: %s PACKAGE\n' "${0##*/}" >&2
exit 1
fi
pkg="$1"
pkgs=$(apt-cache showsrc -- "$pkg" | grep-dctrl -ns Package '' | sort -u)
if [ -z "$pkgs" ]
then
printf '%s: cannot resolve package name: %s\n' "${0##*/}" "$1"
exit 1
fi
n=0
for pkg in $pkgs
do
if [ "$pkg" = "$1" ]
then
n=1
break
fi
n=$((n + 1))
done
if [ $n -ne 1 ]
then
printf '%s: cannot disambiguate package name: %s\n' "${0##*/}" "$1"
exit 1
fi
upkg=$(urlescape "$pkg")
url="https://buildd.debian.org/status/package.php?p=$upkg"
exec sensible-browser "$url"
# vim:ts=4 sts=4 sw=4 et
| true
|
07836ea707e641ca97dc85486d1a8715944ecb62
|
Shell
|
nosebrain/stuff
|
/scripts/macos/brew-cask-update.sh
|
UTF-8
| 280
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
APPS="$@"
echo $APPS
for APP in `echo $APPS`; do
echo "updating $APP; please wait"
RESULT=$( brew cask install --force "$APP" | tee /dev/tty)
APP_PATH=`echo "$RESULT" | grep "Moving App" | cut -d "'" -f 4`
xattr -r -d com.apple.quarantine "$APP_PATH"
done
| true
|
08432ea3dcb7da69cbe73452c095251ee4b41a6f
|
Shell
|
kaiyuanshe/open-hackathon
|
/deploy/setup.sh
|
UTF-8
| 10,644
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This file is covered by the LICENSING file in the root of this project.
# This script should be run as "sudo bash setup.sh"
# Better to deploy open-hackathon on ubuntu 14
function pre_setup_docker() {
#for ubuntu 14
if $(lsb_release -d | grep -q "14"); then
echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" | tee /etc/apt/sources.list.d/docker.list
fi
# for ubuntu 15
if $(lsb_release -d | grep -q "15"); then
echo "deb https://apt.dockerproject.org/repo ubuntu-wily main" | tee /etc/apt/sources.list.d/docker.list
fi
# for linux mint
if $(lsb_release -d | grep -q "Mint"); then
echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" | tee /etc/apt/sources.list.d/docker.list
fi
apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
}
function get_dependency_software() {
echo "updating apt-get......"
result=$(apt-get update)
if grep -q "Could not resolve" <<< $result; then
echo "Could not update apt-get, please solve it"
exit
fi
echo "installing git python-setuptools python-dev python-pip"
result=$(apt-get install -y git python-setuptools python-dev python-pip)
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install dependancy software, pls install docker manually"
exit
fi
echo "installing autoconf libtool tomcat7 "
result=$(apt-get install -y autoconf automake libtool tomcat7 )
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install dependancy software, pls install docker manually"
exit
fi
}
function set_envirement() {
echo "git clone source code from Github, and solve python dependency"
if [ ! -d $HOME ]; then
mkdir $HOME
fi
cd $HOME
if [ ! -d "$OHP_HOME" ]; then
echo "git cloning open-hackathon source code"
result=$(git clone https://github.com/kaiyuanshe/open-hackathon.git)
if grep -q "unable to access" <<< $result; then
echo "Could not git clone open-hackathon source code, pls check your network"
exit
fi
fi
cd $OHP_HOME
git reset --hard
git pull
echo "pip is installing required python library"
result=$(pip install -r open-hackathon-server/requirements.txt)
result=$(pip install -r open-hackathon-client/requirements.txt)
cp open-hackathon-server/src/hackathon/config_sample.py open-hackathon-server/src/hackathon/config.py
cp open-hackathon-server/src/hackathon/config_sample.py open-hackathon-server/src/hackathon/config.py
}
function install_mongodb() {
result=$(sudo service mongod status)
if $(grep -q "mongod" <<< $result); then
echo "mongodb is installed"
return
fi
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
if $(lsb_release -d | grep -q "12"); then
echo "deb http://repo.mongodb.org/apt/ubuntu precise/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list
fi
if $(lsb_release -d | grep -q "14"); then
echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list
fi
if $(lsb_release -d | grep -q "Mint"); then
echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list
fi
result=$(apt-get update)
echo "installing mongodb-org"
result=$(apt-get install -y mongodb-org)
echo "installing mongodb-org=$MONGO_VERSION mongodb-org-server=$MONGO_VERSION mongodb-org-shell=$MONGO_VERSION mongodb-org-mongos=$MONGO_VERSION mongodb-org-tools=$MONGO_VERSION"
result=$(apt-get install -y mongodb-org=$MONGO_VERSION mongodb-org-server=$MONGO_VERSION mongodb-org-shell=$MONGO_VERSION mongodb-org-mongos=$MONGO_VERSION mongodb-org-tools=$MONGO_VERSION)
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install mongodb, pls run this script again or install mongodb manually"
exit
fi
echo "mongodb-org hold" | sudo dpkg --set-selections
echo "mongodb-org-server hold" | sudo dpkg --set-selections
echo "mongodb-org-shell hold" | sudo dpkg --set-selections
echo "mongodb-org-mongos hold" | sudo dpkg --set-selections
echo "mongodb-org-tools hold" | sudo dpkg --set-selections
service mongod start
cd $OHP_HOME && python open-hackathon-server/src/setup_db.py
}
function get_dependency_for_guacamole() {
echo "solve dependency software for guacamole"
result=$(service guacd restart)
if grep -q "SUCCESS" <<< $result; then
echo "guacamole is installed!"
return
fi
echo "installing libcairo2-dev"
result=$(apt-get install -y libcairo2-dev)
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install dependancy software for guacamole, pls install guacamole manually"
exit
fi
echo "installing libjpeg62-dev libpng12-dev libossp-uuid-dev"
result=$(apt-get install -y libjpeg62-dev libpng12-dev libossp-uuid-dev)
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install dependancy software for guacamole, pls install guacamole manually"
exit
fi
echo "installing libfreerdp-dev libpango1.0-dev libssh2-1-dev libtelnet-dev"
result=$(apt-get install -y libfreerdp-dev libpango1.0-dev libssh2-1-dev libtelnet-dev)
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install dependancy software for guacamole, pls install guacamole manually"
exit
fi
echo "installing libvncserver-dev libpulse-dev libwebp-dev libssl-dev libvorbis-dev"
result=$(apt-get install -y libvncserver-dev libpulse-dev libwebp-dev libssl-dev libvorbis-dev)
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install dependancy software for guacamole, pls install guacamole manually"
exit
fi
}
function install_and_config_guacamole() {
result=$(service guacd restart)
if grep -q "SUCCESS" <<< $result; then
echo "guacamole is installed!"
return
fi
echo "installing guacamole"
# install and Configure guacamole
cd $OHP_HOME
if [ ! -d "guacamole-server-$GUACAMOLE_VERSION" ]; then
wget http://sourceforge.net/projects/guacamole/files/current/source/guacamole-server-$GUACAMOLE_VERSION.tar.gz/download
mv download guacamole-server-$GUACAMOLE_VERSION.tar.gz && tar -xzf guacamole-server-$GUACAMOLE_VERSION.tar.gz
fi
cd guacamole-server-$GUACAMOLE_VERSION
result=$(autoreconf -fi)
result=$(./configure --with-init-dir=/etc/init.d)
result=$(make clean)
result=$(make)
result=$(make install)
ldconfig
# configure guacamole client
if [ ! -f /var/lib/tomcat7/webapps/guacamole.war ] ; then
wget http://sourceforge.net/projects/guacamole/files/current/binary/guacamole-$GUACAMOLE_VERSION.war/download
mv download /var/lib/tomcat7/webapps/guacamole.war
fi
# configure guacamole authentication provider
mkdir /usr/share/tomcat7/.guacamole
mkdir /etc/guacamole
cd /home/opentech/open-hackathon/deploy/guacamole
cp guacamole-sample.properties /etc/guacamole/guacamole.properties
cp *.jar /etc/guacamole
ln -s /etc/guacamole/guacamole.properties /usr/share/tomcat7/.guacamole/guacamole.properties
result=$(sudo service guacd restart)
if ! (grep -q "SUCCESS" <<< $result); then
echo "Fail to install guacamole, please run this script once again!"
exit
fi
result=$(service tomcat7 restart)
echo "guacamole installed successfully"
}
function install_and_config_docker() {
# install docker
result=$(apt-get update)
if grep -q "Could not resolve" <<< $result; then
echo "Could not update apt-get, please solve it"
exit
fi
result=$(apt-get install apt-transport-https ca-certificates)
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install dependancy software for docker, pls install docker manually"
exit
fi
result=$(apt-get update)
if grep -q "Could not resolve" <<< $result; then
echo "Could not update apt-get, please solve it"
exit
fi
result=$(apt-get purge lxc-docker)
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install docker, pls install docker manually"
exit
fi
result=$(apt-cache policy docker-engine)
# for ubuntu 15
result=$(apt-get install -y linux-image-extra-$(uname -r))
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install linux-image-extra-$(uname -r)"
exit
fi
# for ubuntu 12 & 14
result=$(apt-get install -y apparmor)
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install apparmor"
exit
fi
result=$(apt-get install -y docker-engine)
if grep -q "Unable to lacate" <<< $result; then
echo "Could not install apparmor"
exit
fi
service docker start
echo "docker installed successfully"
usermod -aG docker ubuntu
groupadd docker
gpasswd -a ${USER} docker
docker pull rastasheep/ubuntu-sshd
result=$(docker run hello-world)
if ! (grep -q "Hello from Docker" <<< $result); then
echo "Install docker failed, please run this script again or install docker manually."
exit
fi
echo "Docker is installed successfully."
}
function deploy() {
# Logging && Hosts
result=$(mkdir /var/log/open-hackathon)
chmod -R 644 /var/log/open-hackathon
# Installing uWSGI
result=$(pip install uwsgi)
cp $OHP_HOME/open-hackathon-server/src/open-hackathon-server.conf /etc/init/
cp $OHP_HOME/open-hackathon-client/src/open-hackathon-client.conf /etc/init/
}
function main() {
if [ $(id -u) != "0" ]; then
echo "Please run this script with sudo"
echo "like sudo bash setup.sh"
exit 1
fi
export HOME=/home/opentech
export OHP_HOME=$HOME/open-hackathon
export MONGO_VERSION=3.2.4
export GUACAMOLE_VERSION=0.9.9
echo "It may take a long time to install and configure open-hackathon, please wait a moment^_^, ..."
echo "安装将花费一定时间,请耐心等待直到安装完成^_^, ..."
get_dependency_software
install_mongodb
set_envirement
get_dependency_for_guacamole
install_and_config_guacamole
#pre_setup_docker
#install_and_config_docker
deploy
chown -R $(logname) $HOME
}
main
| true
|
bf368394eed7928bbb27d1cee20d29e72f4590b4
|
Shell
|
farrokhi/freebsd-munin-plugins
|
/if_packets_
|
UTF-8
| 1,670
| 3.984375
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# -*- sh -*-
#
# Wildcard-plugin to monitor network interfaces. To monitor an
# interface, link if_packets_<interface> to this file. E.g.
#
# ln -s /usr/local/share/munin/plugins/if_packets_ /usr/local/etc/munin/plugins/if_packets_em0
#
# ...will monitor eth0 interface.
#
# Magic markers (optional - used by munin-config and some installation
# scripts):
#
#%# family=auto
#%# capabilities=autoconf suggest
INTERFACE=${0##*if_packets_}
IFCOUNTERS="/usr/local/bin/ifcounters"
if [ "$1" = "autoconf" ]; then
if [ -x /sbin/ifconfig -o -x ${IFCOUNTERS} ]; then
echo yes
exit 0
else
echo "no (${IFCOUNTERS} not found)"
exit 0
fi
fi
if [ "$1" = "suggest" ]; then
if [ -x /sbin/ifconfig ]
then
ifconfig -l | sed -Ee 's/[[:<:]](pfsync|faith|pf(log|sync)|lo|plip|carp|enc|fwe)[^ ]*//g' | xargs -n 1 echo
exit 0
else
exit 1
fi
fi
if [ "$1" = "config" ]; then
echo "graph_order rpackets opackets"
echo "graph_title $INTERFACE pps"
echo 'graph_args --base 1000'
echo 'graph_vlabel packets per ${graph_period} in (-) / out (+)'
echo 'graph_category network'
echo "graph_info This graph shows the packets counter of the $INTERFACE network interface. Please note that the traffic is shown in packets per second."
echo 'rpackets.label received'
echo 'rpackets.type COUNTER'
echo 'rpackets.graph no'
echo 'rpackets.min 0'
echo 'opackets.label pps'
echo 'opackets.type COUNTER'
echo 'opackets.colour COLOUR19'
echo 'opackets.draw AREA'
echo 'opackets.negative rpackets'
echo 'opackets.min 0'
echo "opackets.info Packets sent (+) and received (-) on the $INTERFACE network interface."
exit 0
fi
${IFCOUNTERS} -p ${INTERFACE}
| true
|
680198fcdb79bc78416db546a943fbadcdf76e76
|
Shell
|
miiya369/analysisHAL_miya
|
/scripts/for_SC/fx100/Find.ErrJobs.sh
|
UTF-8
| 1,321
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
if [ $# -eq 0 ]; then
echo "usage: sh `basename $0` [src t] ..."
exit 1
fi
Base=/gwfefs/data/G16030/miyamoto/work.K/work.run-i.traj_mod10_5/run.t_mod24_??/run.t_???.rot_tot/logs.t_???
run_out_lst=/gwfefs/home/tkymiya/scripts/run.out.lst
for srcT in $@; do
SRCT=`printf %03d $srcT`
if [ $? -ne 0 ]; then
exit 1
fi
for ofile in `cat $run_out_lst | sed "s/SRCT/$SRCT/g"`; do
if [ ! -e $Base/$ofile ]; then
echo "$ofile : Not exist"
continue
fi
tmp0=`grep ERROR $Base/$ofile | wc -l`
if [ $tmp0 -ne 0 ]; then
echo "$ofile : Error happened"
continue
fi
tmp4=`ls -l $Base/$ofile | cut -d " " -f 5`
if [ $tmp4 -lt 100000 ]; then
echo "$ofile : Invalid file size : Too small file size"
continue
fi
tmp1=`tail -n 1 $Base/$ofile | cut -d " " -f 4`
tmp2=`echo $tmp1 | cut -d ":" -f 1`
expr $tmp2 + 1 > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "$ofile : Time limit exceeded"
continue
fi
tmp3=`ls -l $Base/$ofile | cut -d " " -f 5`
if [ $tmp3 -lt 705000 -o $tmp3 -gt 715000 ]; then
echo -n "$ofile : Invalid file size"
if [ `grep "restarts" $Base/$ofile | wc -l` -ne 0 ]; then
echo " : restarted."
else
echo " : Something happened"
fi
continue
fi
done
done
| true
|
c24d2e435bc75560ec0c9552532bb6b392f26600
|
Shell
|
itsmarky10/ECS122B
|
/sanity_check.sh
|
UTF-8
| 1,343
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
numOfSamp=100
numOfEle=1000
# Compile the java files
javac QuickSelect.java
javac DeterministicSelect.java
javac SelectK.java
# Test
#for((i=0; i<20; i++)); do
# randomNum=$(od -N4 -tu4 -An /dev/urandom | tr -d " ")
# randomNum=$((($randomNum % (10**6)) + 1))
# echo $randomNum
#done
#exit
numOfTestFailed=0
# Generate a file with integers
for ((i=1; i <= numOfSamp; i++)); do
# Shuf generates a list of integers
shuf -i 1-$((10**6)) -n $numOfEle > "$i.txt"
kOrder=$(od -N4 -tu4 -An /dev/urandom | tr -d " ")
kOrder=$((($kOrder % $numOfEle) + 1))
java QuickSelect "$i.txt" $kOrder > "QuickSelect_failed_test_$i.txt"
java DeterministicSelect "$i.txt" $kOrder > "DeterministicSelect_failed_test_$i.txt"
java SelectK "$i.txt" $kOrder > "SelectK_failed_test_$i.txt"
diff3 QuickSelect_failed_test_$i.txt DeterministicSelect_failed_test_$i.txt SelectK_failed_test_$i.txt > result.txt
if [[ -s result.txt ]]; then
numOfTestFailed=$((numOfTestFailed+1))
else
rm "QuickSelect_failed_test_$i.txt"
rm "DeterministicSelect_failed_test_$i.txt"
rm "SelectK_failed_test_$i.txt"
fi
rm "$i.txt"
done
rm result.txt
if ((numOfTestFailed > 0)); then
echo "$numOfTestFailed tests failed."
else
echo "All tests passed."
fi
# Clean up class files
rm ./*.class
| true
|
d30cf0f8eb285fb09ae2153abf4313c7ddf5691b
|
Shell
|
megclaypool/pantheon-clone-to-local
|
/clone-from-pantheon.sh
|
UTF-8
| 12,688
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# You can set your local mysql username and password in clone_script.settings.local.yml
# Otherwise, you'll be setting them interactively with the other variables when you run the script :)
SITE_MACHINE_NAME=''
SITE_ENV=''
# COPY_FILES can be yes or no
COPY_FILES=''
# SITE_TYPE can be WP, D7, or D8 -- leave blank to autodetect
SITE_TYPE=''
# Get the script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
LOCAL_SETTINGS=${SCRIPT_DIR}/assets/clone_script.settings.local.yml
if [ -f ${LOCAL_SETTINGS} ]
then
temp_username=$(grep 'username:' ${LOCAL_SETTINGS} | sed -n -e 's/^.*username: //p');
temp_password=$(grep 'password:' ${LOCAL_SETTINGS} | sed -n -e 's/^.*password: //p');
if [ "${temp_username//\"/}" != "MYSQL_USERNAME" ] && [ "${temp_password//\"/}" != "MYSQL_PASSWORD" ]
then
SQL_USERNAME=${temp_username//\"/}
SQL_PASSWORD=${temp_password//\"/}
NAME_PASS_SET="true"
fi
fi
# Don't mess with the stuff below here! #
#########################################
# Check that mysql is installed
command -v mysql >/dev/null 2>&1 || { echo >&2 "This script uses mysql, which you don't seem to have installed yet... Please install mysql and try running this script again :)"; exit 1; }
# Check that terminus is installed
command -v terminus >/dev/null 2>&1 || { echo >&2 "This script uses terminus, which you don't seem to have installed yet... Please install terminus and try running this script again :)"; exit 1; }
# Check that robo is installed
command -v robo >/dev/null 2>&1 || { echo >&2 "This script uses robo, which you don't seem to have installed yet... Please install robo and try running this script again :)"; exit 1; }
# Set a whole bunch of variables #
##################################
# if $SITE_MACHINE_NAME isn't set above, prompt the user
if [ -z "$SITE_MACHINE_NAME" ]
then
echo "Enter the pantheon machine name for this site"
read SITE_MACHINE_NAME
fi
# use terminus to get the site's id number (needed for the clone address)
SITE_ID=$(terminus site:info --field id -- $SITE_MACHINE_NAME)
# if $SITE_ID isn't set after running the above, there's probably an error in the SITE_MACHINE_NAME
while [ -z "$SITE_ID" ]
do
echo -e "It looks like you may have entered the machine name of your site incorrectly; Terminus is returning an error instead of a site id... \n\nYou can find the machine name of your site by visiting the site's Pantheon dashboard and clicking the tab for the dev environment. Then click the \"Visit Development Site\" button. The site's machine name is the bit between \"dev-\" and \".pantheonsite.io\" in the url. \n\nPlease enter the machine name of your site"
read SITE_MACHINE_NAME
SITE_ID=$(terminus site:info --field id -- $SITE_MACHINE_NAME)
done
# if $SITE_TYPE isn't set above try to figure it out based on the upstream.
SITE_UPSTREAM=$(terminus site:info --field upstream -- $SITE_MACHINE_NAME)
if [ -z "$SITE_TYPE" ]
then
if [[ $SITE_UPSTREAM == *"WordPress"* ]] || [[ $SITE_UPSTREAM == *"wordpress-upstream"* ]]
then
echo "This is a WordPress site"
SITE_TYPE="WP"
fi
if [[ $SITE_UPSTREAM == *"drops-7"* ]]
then
echo "This is a Drupal 7 site"
SITE_TYPE="D7"
fi
if [[ $SITE_UPSTREAM == *"drops-8"* ]]
then
echo "This is a Drupal 8 site"
SITE_TYPE="D8"
fi
fi
# if I couldn't figure out the $SITE_TYPE from the upstream, prompt the user
if [ -z "$SITE_TYPE" ]
then
echo -e "\nIs this a WP site, a D7 site, or a D8 site?"
read SITE_TYPE
SITE_TYPE=$( echo "$SITE_TYPE" | tr '[:lower:]' '[:upper:]')
while [ "${SITE_TYPE}" != "WP" ] && [ "${SITE_TYPE}" != "D7" ] && [ "${SITE_TYPE}" != "D8" ]
do
echo "Please use 'WP', 'D7', or 'D8' to describe this site"
read SITE_TYPE
SITE_TYPE=$( echo "$SITE_TYPE" | tr '[:lower:]' '[:upper:]')
done
fi
# if $SITE_ENV isn't set above, prompt the user
if [ -z "$SITE_ENV" ]
then
echo -e "\nFrom which environment would you like to clone the database and (possibly) files?"
read SITE_ENV
fi
# if $COPY_FILES isn't set above, prompt the user
if [ -z "$COPY_FILES" ]
then
echo -e "\nWould you like to download all the site's files, in addition to the code and database? (Yes / No)"
read COPY_FILES
COPY_FILES=$( echo "$COPY_FILES" | tr '[:upper:]' '[:lower:]' )
if [ "${COPY_FILES}" == "y" ]
then
COPY_FILES='yes'
fi
if [ "${COPY_FILES}" == "n" ]
then
COPY_FILES='no'
fi
while [ "${COPY_FILES}" != "yes" ] && [ "${COPY_FILES}" != "no" ]
do
echo -e "\nerr... That was a yes or no question... Let's try again: Would you like to download all the site's files, in addition to the code and database? (Yes / No)"
read COPY_FILES
COPY_FILES=$( echo "$COPY_FILES" | tr '[:upper:]' '[:lower:]' )
done
if [ "${COPY_FILES}" == "no" ]
then
echo -e "\nOk, I won't download the files right now. You can download them later using the command \"robo pullfiles\"."
fi
fi
# if $SQL_USERNAME isn't set above, prompt the user
if [ "${NAME_PASS_SET}" != "true" ]
then
echo -e "\nEnter your mysql username"
read SQL_USERNAME
fi
# if $SQL_PASSWORD isn't set above, prompt the user
if [ "${NAME_PASS_SET}" != "true" ]
then
echo -e "\nEnter your mysql password (hit enter if you don't have a password set)"
read SQL_PASSWORD
fi
# set the sql database name based on the site machine name
SQL_DATABASE=${SITE_MACHINE_NAME//-/_}
DB_EXISTS=$(mysql -u $SQL_USERNAME -e "use ${SQL_DATABASE}" 2> /dev/null; echo "$?")
# Now to start actually doing stuff! #
######################################
# if the sql database already exists, drop it and create a new one
if [ -z "$SQL_PASSWORD" ]
then
if [ $DB_EXISTS == 0 ]
then
mysql -u$SQL_USERNAME -e "drop database $SQL_DATABASE"
fi
mysql -u $SQL_USERNAME -e "create database $SQL_DATABASE"
else
if [ $DB_EXISTS == 0 ]
then
mysql -u$SQL_USERNAME -p$SQL_PASSWORD -e "drop database $SQL_DATABASE"
fi
mysql -u$SQL_USERNAME -p$SQL_PASSWORD -e "create database $SQL_DATABASE"
fi
# clone the site code from pantheon
git clone ssh://codeserver.dev.${SITE_ID}@codeserver.dev.${SITE_ID}.drush.in:2222/~/repository.git ${SITE_MACHINE_NAME}
# change to the site directory
cd ${SITE_MACHINE_NAME}
# if the Robo files already exist, delete them (we want to have the latest version)
if [ -f 'RoboFile.php' ]
then
rm RoboFile.php
fi
if [ -f 'RoboLocal.php' ]
then
rm RoboLocal.php
fi
if [ -f 'RoboLocal.example.php' ]
then
rm RoboLocal.example.php
fi
# Snag an up-to-date copy of the RoboFile
cp ${SCRIPT_DIR}/assets/RoboFile.php ./RoboFile.php
# Add RoboLocal.php to .gitignore if it's not already there
if [ -f .gitignore ]
then
if ! grep -q "RoboLocal.php" ".gitignore"
then
echo -e "\n\n# Local Robo Settings #\n#######################\nRoboLocal.php" >> .gitignore
fi
else
touch .gitignore
echo -e "\n\n# Local Robo Settings #\n#######################\nRoboLocal.php" >> .gitignore
fi
# Depending on what kind of site this is, copy over the right version of RoboLocal
# Also copy over the local config or settings file and then find and replace variables
if [ "${SITE_TYPE}" == "WP" ]
then
# check for /web directory
if [ -d ./web ]
then
DIRECTORY_PATH='./web'
else
DIRECTORY_PATH='.'
fi
cp ${SCRIPT_DIR}/assets/wordpress.RoboLocal.php ./RoboLocal.php
cp ${SCRIPT_DIR}/assets/wordpress.RoboLocal.php ./RoboLocal.example.php
cp ${SCRIPT_DIR}/assets/wordpress.wp-config-local.php ${DIRECTORY_PATH}/wp-config-local.php
sed -i '' -e "s/LOCAL_DATABASE_NAME_PLACEHOLDER/$SQL_DATABASE/g" ${DIRECTORY_PATH}/wp-config-local.php
sed -i '' -e "s/MYSQL_USERNAME_PLACEHOLDER/$SQL_USERNAME/g" ${DIRECTORY_PATH}/wp-config-local.php
sed -i '' -e "s/MYSQL_PASSWORD_PLACEHOLDER/$SQL_PASSWORD/g" ${DIRECTORY_PATH}/wp-config-local.php
if [ "${DIRECTORY_PATH}" == "./web" ]
then
sed -i '' -e "s/\'wp-content\/uploads\'/\'web\/wp-content\/uploads\'/g" ./RoboLocal.php
sed -i '' -e "s/\'wp-content\/uploads\'/\'web\/wp-content\/uploads\'/g" ./RoboLocal.example.php
fi
# make sure wp-config-local is listed in the .gitignore
if ! grep -q "wp-config-local.php" ".gitignore"
then
echo -e "\n\n# Local Settings #\n##################\nwp-config-local.php" >> .gitignore
fi
fi
if [ "${SITE_TYPE}" == "D7" ]
then
# check for /web directory
if [ -d ./web ]
then
DIRECTORY_PATH='./web'
else
DIRECTORY_PATH='.'
fi
cp ${SCRIPT_DIR}/assets/drupal.RoboLocal.php ./RoboLocal.php
cp ${SCRIPT_DIR}/assets/drupal.RoboLocal.php ./RoboLocal.example.php
cp ${SCRIPT_DIR}/assets/drupal7.settings.local.php ${DIRECTORY_PATH}/sites/default/settings.local.php
sed -i '' -e "s/LOCAL_DATABASE_NAME_PLACEHOLDER/$SQL_DATABASE/g" ${DIRECTORY_PATH}/sites/default/settings.local.php
sed -i '' -e "s/MYSQL_USERNAME_PLACEHOLDER/$SQL_USERNAME/g" ${DIRECTORY_PATH}/sites/default/settings.local.php
sed -i '' -e "s/MYSQL_PASSWORD_PLACEHOLDER/$SQL_PASSWORD/g" ${DIRECTORY_PATH}/sites/default/settings.local.php
if [ "${DIRECTORY_PATH}" == "./web" ]
then
sed -i '' -e "s/\'sites\/default\/files\'/\'web\/sites\/default\/files\'/g" ./RoboLocal.php
sed -i '' -e "s/\'sites\/default\/files\'/\'web\/sites\/default\/files\'/g" ./RoboLocal.example.php
fi
# make sure settings.local is listed in the .gitignore
if ! grep -q "settings.local.php" ".gitignore"
then
echo -e "\n\n# Local Settings #\n##################\nsettings.local.php" >> .gitignore
fi
fi
if [ "${SITE_TYPE}" == "D8" ]
then
if [ -d ./web ]
then
DIRECTORY_PATH='./web'
else
DIRECTORY_PATH='.'
fi
cp ${SCRIPT_DIR}/assets/drupal.RoboLocal.php ./RoboLocal.php
cp ${SCRIPT_DIR}/assets/drupal.RoboLocal.php ./RoboLocal.example.php
cp ${SCRIPT_DIR}/assets/drupal8.settings.local.php ${DIRECTORY_PATH}/sites/default/settings.local.php
cp ${SCRIPT_DIR}/assets/drupal8.services.local.yml ${DIRECTORY_PATH}/sites/default/services.local.yml
sed -i '' -e "s/LOCAL_DATABASE_NAME_PLACEHOLDER/$SQL_DATABASE/g" ${DIRECTORY_PATH}/sites/default/settings.local.php
sed -i '' -e "s/MYSQL_USERNAME_PLACEHOLDER/$SQL_USERNAME/g" ${DIRECTORY_PATH}/sites/default/settings.local.php
sed -i '' -e "s/MYSQL_PASSWORD_PLACEHOLDER/$SQL_PASSWORD/g" ${DIRECTORY_PATH}/sites/default/settings.local.php
if [ "${DIRECTORY_PATH}" == "./web" ]
then
sed -i '' -e "s/\'sites\/default\/files\'/\'web\/sites\/default\/files\'/g" ./RoboLocal.php
sed -i '' -e "s/\'sites\/default\/files\'/\'web\/sites\/default\/files\'/g" ./RoboLocal.example.php
fi
# make sure settings.local is listed in the .gitignore
if ! grep -q "settings.local.php" ".gitignore"
then
echo -e "\n\n# Local Settings #\n##################\nsettings.local.php" >> .gitignore
fi
# make sure services.local is listed in the .gitignore
if ! grep -q "services.local.yml" ".gitignore"
then
echo -e "\n\n# Local Debug Settings #\n########################\nservices.local.yml" >> .gitignore
fi
fi
sed -i '' -e "s/LOCAL_DATABASE_NAME_PLACEHOLDER/$SQL_DATABASE/g" ./RoboLocal.php
sed -i '' -e "s/MYSQL_USERNAME_PLACEHOLDER/$SQL_USERNAME/g" ./RoboLocal.php
sed -i '' -e "s/SITE_MACHINE_NAME_PLACEHOLDER/$SITE_MACHINE_NAME/g" ./RoboLocal.php
sed -i '' -e "s/SITE_MACHINE_NAME_PLACEHOLDER/$SITE_MACHINE_NAME/g" ./RoboLocal.example.php
sed -i '' -e "s/SITE_ENV_PLACEHOLDER/$SITE_ENV/g" ./RoboLocal.php
if [ ! -z "$SQL_PASSWORD" ]
then
sed -i '' -e "s/\/\/ define('ROBO_DB_PASS', 'MYSQL_PASSWORD_PLACEHOLDER');/define('ROBO_DB_PASS', '${SQL_PASSWORD}')/g" ./RoboLocal.php
fi
# In theory everything is set up and ready for robo pull!
robo pull
if [ "${COPY_FILES}" == "yes" ]
then
echo -e "\nThe database is set up, and I'm about to start pulling the files from the $SITE_ENV environment. This can take quite a while, so if you'd like you can start working with the code while the files download :)"
robo pullfiles
fi
# 1. COLLECT ALL THE VARIABLES
# 2. CREATE THE DATABASE
# 3. CLONE THE SITE
# 4. COPY IN THE APPROPRIATE FILES
# 5. REPLACE THE APPROPRIATE VARIABLES IN THE COPIED FILES
# 6. USE ROBO TO COPY THE DATABASE
# 7. IF DESIRED, USE ROBO TO COPY THE FILES
# TODO: maybe check in settings.php / wp-config.php to see if the local files are being included and add the include if it's not there (some older sites, maybe)
| true
|
0d75dc9798cdd9544f80a63ed21a066c3d5bc36a
|
Shell
|
22antonio/laptop
|
/os/linux
|
UTF-8
| 4,654
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
OS=
RELEASE=$(cat /etc/*release | grep ^NAME)
ID_LIKE=$(cat /etc/*release | grep "^ID_LIKE=")
ID=$(cat /etc/*release | grep "^ID=")
if grep -q Fedora <<< $RELEASE; then
OS=Fedora
elif grep -q -i ubuntu <<< $ID || grep -q -i ubuntu <<< $ID_LIKE; then
OS=Debian
DOCKER_APT_REPO="deb [arch=amd64] https://download.docker.com/linux/ubuntu \
bionic stable"
MONGO_APT_REPO="deb [ arch=amd64 ] https://repo.mongodb.org/apt/ubuntu \
bionic/mongodb-org/4.2 multiverse"
elif grep -q -i debian <<< $ID || grep -q -i debian <<< $ID_LIKE; then
OS=Debian
DOCKER_APT_REPO="deb [arch=amd64] https://download.docker.com/linux/debian \
buster stable"
MONGO_APT_REPO="deb http://repo.mongodb.org/apt/debian \
buster/mongodb-org/4.2 main"
fi
SHARED_PACKAGES=$(cat <<EOF
curl git zsh vim tmux httpie vim hub
mongodb-org
EOF
)
DEBIAN_PACKAGES=$(cat <<EOF
$SHARED_PACKAGES
build-essential
postgresql redis-server awscli python3 python3-pip
imagemagick shellcheck gnupg
silversearcher-ag
EOF
)
FEDORA_PACKAGES=$(cat <<EOF
$SHARED_PACKAGES
python3 python3-pip
util-linux-user
git-lfs the_silver_searcher
ShellCheck ImageMagick
postgresql-server postgresql-contrib libpq-devel redis awscli
dnf-plugins-core
openssl-devel
EOF
)
fedora_enable_databases_on_restart() {
fancy_echo "Ensuring databases run on startup."
if sudo bash -c '[ ! -d "/var/lib/pgsql/data" ]'; then
sudo /usr/bin/postgresql-setup --initdb
fi
sudo systemctl enable postgresql
sudo systemctl enable redis
sudo systemctl enable mongod
}
debian_enable_databases_on_restart() {
fancy_echo "Ensuring databases run on startup."
sudo systemctl enable postgresql
sudo systemctl enable redis-server
sudo systemctl enable mongod
}
apt_add_mongo_repo() {
echo "$MONGO_APT_REPO" | sudo tee /etc/apt/sources.list.d/mongodb-org-4.2.list
sudo apt install -y -q gnupg wget
wget -qO - https://www.mongodb.org/static/pgp/server-4.2.asc | sudo apt-key add -
sudo apt-get update
}
dnf_add_mongo_repo() {
sudo tee /etc/yum.repos.d/mongodb-org-4.2.repo > /dev/null <<EOF
[mongodb-org-4.2]
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/redhat/8/mongodb-org/4.2/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-4.2.asc
EOF
}
dnf_add_and_install_docker_ce() {
sudo dnf config-manager \
--add-repo \
https://download.docker.com/linux/fedora/docker-ce.repo
sudo dnf -y install docker-ce docker-ce-cli containerd.io
}
deb_add_and_install_docker_ce() {
sudo apt install -y -q \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
sudo add-apt-repository "$DOCKER_APT_REPO"
sudo apt-get update
sudo apt-get install -y -q docker-ce docker-ce-cli containerd.io
}
systemd_enable_docker_on_restart() {
fancy_echo "Ensuring Docker runs on startup."
sudo usermod -aG docker $(whoami)
sudo systemctl enable docker
}
install_circleci_cli() {
curl -fLSs https://raw.githubusercontent.com/CircleCI-Public/circleci-cli/master/install.sh | sudo bash
}
install_eb_cli() {
pip3 install --user --upgrade awsebcli
}
deb_add_and_install_terraform() {
curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add -
sudo apt-add-repository "deb [arch=$(dpkg --print-architecture)] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
sudo apt-get install -y terraform
}
dnf_add_and_install_terraform() {
sudo dnf install -y dnf-plugins-core
sudo dnf config-manager --add-repo https://rpm.releases.hashicorp.com/$release/hashicorp.repo
sudo dnf -y install terraform
}
case $OS in
Fedora)
fancy_echo "Installing packages using dnf"
sudo dnf groupinstall -y "C Development Tools and Libraries"
dnf_add_mongo_repo
dnf_add_and_install_terraform
sudo dnf -y install $FEDORA_PACKAGES
install_circleci_cli
install_eb_cli
fedora_enable_databases_on_restart
;;
Debian)
fancy_echo "Installing packages using apt"
deb_add_and_install_docker_ce
deb_add_and_install_terraform
apt_add_mongo_repo
sudo apt install -y -q $DEBIAN_PACKAGES
install_circleci_cli
install_eb_cli
debian_enable_databases_on_restart
systemd_enable_docker_on_restart
;;
*)
fancy_echo "You're OS is not detected, cannot install packages."
exit 1
;;
esac
| true
|
c670d1b3820217434b226fe496fd7d239b22997a
|
Shell
|
izabera/codeeval
|
/moderate/justify.bash
|
UTF-8
| 820
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
LANG=C
eighty=" "
set -f
while read -r || [[ $REPLY ]]; do
while (( ${#REPLY} > 80 )); do
words=($REPLY) line=${words[0]} i=1
while (( ${#line} + ${#words[i]} < 80 )); do
line+=" ${words[i++]}"
done
REPLY=${words[*]:i}
words=($line) concat=${line// } sep=0 seps=()
while (( ${#concat} + (sep + 1) * (${#words[@]} - 1) < 80 )); do
(( sep ++ ))
done
for i in "${words[@]:1}"; do
seps+=("${eighty:0:sep}")
done
(( len = ${#concat} + sep * (${#words[@]} - 1) ))
line=${words[0]}
for i in "${!seps[@]}"; do
(( len++ < 80 )) && seps[i]+=" "
line+=${seps[i]}${words[i+1]}
done
printf "%s\n" "$line"
done
printf "%s\n" "$REPLY"
done < "$1"
| true
|
e54bd955da481ae5908d3d6350634f03c2c2e210
|
Shell
|
mikecarr/docker-apps
|
/docker-rabbitmq-cluster/add_user.sh
|
UTF-8
| 1,520
| 3
| 3
|
[] |
no_license
|
#!/bin/sh
while getopts "u:p:v:c:m:P:e:h" ARG
do
case $ARG in
u)
USERNAME=$OPTARG
echo $USERNAME
;;
p)
PASSWORD=$OPTARG
echo $PASSWORD
;;
v)
VHOST="/$OPTARG"
echo $VHOST
;;
m)
CLUSTER_MASTER=$OPTARG
echo $CLUSTER_MASTER
;;
P)
PORT=$OPTARG
echo $PORT
;;
e)
ERLANG_PORT=$OPTARG
echo $ERLANG_PORT
;;
c)
COOKIE=$OPTARG
echo $COOKIE
;;
h)
echo "Args:"
echo "-u <username>"
echo "-p <password>"
echo "-v <vhost>"
echo "-m <cluster master>"
echo "-P <port>"
echo "-e <erlang port>"
echo "-c <cookie>"
echo "-h help"
exit
;;
esac
done
#echo $COOKIE > ~/.erlang.cookie
echo $COOKIE > /var/lib/rabbitmq/.erlang.cookie
export RABBITMQ_SERVER_START_ARGS="-setcookie $COOKIE -kernel inet_dist_listen_min $PORT -kernel inet_dist_listen_max $PORT"
export ERL_EPMD_PORT=$ERLANG_PORT
rabbitmq-server -detached
sleep 2
# ram nodes for greater than 2
rabbitmqctl stop_app
#rabbitmqctl reset
if [ ! -z "$CLUSTER_MASTER" ]; then
echo "******** HERE $CLUSTER_MASTER . "
rabbitmqctl join_cluster rabbit@$CLUSTER_MASTER
fi
rabbitmqctl start_app
sleep 2
rabbitmqctl add_vhost $VHOST
rabbitmqctl add_user $USERNAME $PASSWORD
rabbitmqctl set_permissions -p $VHOST $USERNAME ".*" ".*" ".*"
rabbitmqctl set_user_tags $USERNAME administrator
rabbitmqctl delete_user guest
/usr/bin/supervisord -n
| true
|
4a946c37b2fb4805ee919614a3a092cc2cba1eb4
|
Shell
|
nexus-prime/PullTaskResults
|
/PullTasks.sh
|
UTF-8
| 5,021
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# bash PullTasks.sh ProjectName HostID NumPages
# $1=project URL
# $2=host ID
# $3=Pages of BOINC tasks to return (20 tasks per page)
# $4=Output File Name
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
#echo $SCRIPT_DIR
# Load input data
URL="$(cat $SCRIPT_DIR/resources/WhiteList_URL_Lookup.txt | grep "$1" | awk -F[=,=] '{print $2}')"
HostID=$2
NumPages=$3
dir=$(pwd)
Output=$4
echo $URL
delim=";"
echo "Task;Work Unit;Sent [UTC];Time Reported [UTC];Status;Run Time [sec];CPU TIme [sec];Credit;Application" > $Output
# Check for projects that are incompatible with main script
if [ "$URL" == "https://einsteinathome.org" ]
then
bash $SCRIPT_DIR/PullTasks_Einstein_Special.alt $HostID $NumPages $Output
elif [ "$URL" == "http://www.rechenkraft.net/yoyo" ]
then
bash $SCRIPT_DIR/PullTasks_Yoyo_Special.alt $HostID $NumPages $Output
elif [ "$URL" == "https://download.worldcommunitygrid.org/boinc" ]
then
echo "World Community Grid does not report data task data on a per host basis"
else
# Start Download Loop
for jnd in `seq 0 $(($NumPages-1))`;
do
offset=$(($jnd*20))
wget "$URL/results.php?hostid=$HostID&offset=$offset&show_names=0&state=4&appid=0" -q -O $SCRIPT_DIR/resources/"x$jnd.temp" &
done
wait
# Check for invalid table
for jnd in `seq 0 $(($NumPages-1))`;
do
offset=$(($jnd*20))
if grep -q '<title>Invalid tasks for computer' $SCRIPT_DIR/resources/"x$jnd.temp"
then
wget "$URL/results.php?hostid=$HostID&offset=$offset&show_names=0&state=3&appid=0" -q -O $SCRIPT_DIR/resources/"x$jnd.temp" &
fi
done
wait
# Start Main Loop
for jnd in `seq 0 $(($NumPages-1))`;
do
offset=$(($jnd*20))
cat $SCRIPT_DIR/resources/"x$jnd.temp" > $SCRIPT_DIR/resources/x.temp
# Pull data from the website
# wget "$URL/results.php?hostid=$HostID&offset=$offset&show_names=0&state=4&appid=0" -O $SCRIPT_DIR/resources/x.temp
# if grep -q '<title>Invalid tasks for computer' $SCRIPT_DIR/resources/x.temp
# then
# wget "$URL/results.php?hostid=$HostID&offset=$offset&show_names=0&state=3&appid=0" -O $SCRIPT_DIR/resources/x.temp
# fi
# Kill loop if website is out of data
if grep -q "explain_state" $SCRIPT_DIR/resources/x.temp
then
:
else
break
fi
# Reformat to adjust for minor incosistencies in results.php data
headersize="$(cat $SCRIPT_DIR/resources/x.temp| grep -n "result.php?resultid"|cut -f1 -d:|head -1)"
headersize="$(($headersize-1))d"
sed -i "1,$headersize" $SCRIPT_DIR/resources/x.temp
head -n -6 $SCRIPT_DIR/resources/x.temp > $SCRIPT_DIR/resources/x.temp2
mv $SCRIPT_DIR/resources/x.temp2 $SCRIPT_DIR/resources/x.temp
cat $SCRIPT_DIR/resources/x.temp | awk '{gsub("</td><td", "</td>~<td", $0); print}' > $SCRIPT_DIR/resources/x.temp2
mv $SCRIPT_DIR/resources/x.temp2 $SCRIPT_DIR/resources/x.temp
cat $SCRIPT_DIR/resources/x.temp | tr '~' '\n' > $SCRIPT_DIR/resources/x.temp2
mv $SCRIPT_DIR/resources/x.temp2 $SCRIPT_DIR/resources/x.temp
sed -i '/^$/d' $SCRIPT_DIR/resources/x.temp
sed -i 's/ //g' $SCRIPT_DIR/resources/x.temp
cat $SCRIPT_DIR/resources/x.temp | awk '{gsub("</td></tr>", "</td>", $0); print}' > $SCRIPT_DIR/resources/x.temp2
mv $SCRIPT_DIR/resources/x.temp2 $SCRIPT_DIR/resources/x.temp
length="$(wc -l < $SCRIPT_DIR/resources/x.temp)"
if [ "$length" -eq "0" ]
then
break
fi
dataseg="$(cat $SCRIPT_DIR/resources/x.temp| grep -n "result.php"|cut -f1 -d:|head -2|tail -1)"
dataseg="$(( $dataseg - 1 ))"
for ind in `seq 0 19`;
do
readln=$(( $ind*$dataseg+1))
text=""
text="$(awk "NR==$readln {print;exit}" $SCRIPT_DIR/resources/x.temp | sed 's/.*?//' | awk -F[=,\"] '{print $2}')$delim"
readln=$(($readln+1))
text="$text$(awk "NR==$readln {print;exit}" $SCRIPT_DIR/resources/x.temp | sed 's/.*?//' | awk -F[=,\"] '{print $2}')$delim"
readln=$(($readln+1))
text="$text$(awk "NR==$readln {print;exit}" $SCRIPT_DIR/resources/x.temp | sed 's/^.\{4\}//' | sed 's/.\{5\}$//')$delim"
readln=$(($readln+1))
text="$text$(awk "NR==$readln {print;exit}" $SCRIPT_DIR/resources/x.temp | sed 's/^.\{4\}//' | sed 's/.\{5\}$//')$delim"
readln=$(($readln+1))
text="$text$(awk "NR==$readln {print;exit}" $SCRIPT_DIR/resources/x.temp | sed 's/^.\{4\}//' | sed 's/.\{5\}$//')$delim"
readln=$(($readln+1))
text="$text$(awk "NR==$readln {print;exit}" $SCRIPT_DIR/resources/x.temp | tr --delete , | grep -Eo "[0-9]+\.[0-9]+")$delim"
readln=$(($readln+1))
text="$text$(awk "NR==$readln {print;exit}" $SCRIPT_DIR/resources/x.temp | tr --delete , | grep -Eo "[0-9]+\.[0-9]+")$delim"
readln=$(($readln+1))
text="$text$(awk "NR==$readln {print;exit}" $SCRIPT_DIR/resources/x.temp | tr --delete , | grep -Eo "[0-9]+\.[0-9]+")$delim"
readln=$(($readln+1))
text="$text$(awk "NR==$readln {print;exit}" $SCRIPT_DIR/resources/x.temp | sed 's/^.\{4\}//' | sed 's/.\{5\}$//')"
echo $text >> $Output
done
done
grep 'UTC' $Output > temp && mv temp $Output
grep -v '.rogress' $Output > temp && mv temp $Output
rm $SCRIPT_DIR/resources/x*.temp
#cat $Output
fi
| true
|
443e185743d89f971407e219603c74fc30a23965
|
Shell
|
dazzbourgh/alexa-jumoresques-serverless
|
/scripts/remove-production-dependencies.sh
|
UTF-8
| 122
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
for dir in "../src/handlers"/*; do
if test -d "$dir"; then
rm -r "$dir"/dependencies
fi
done
| true
|
13ea1d6c3d85d90c786cc3ec8a2f11c5e5fed3d8
|
Shell
|
marcusandre/dotfiles
|
/zsh/.zshrc
|
UTF-8
| 2,006
| 2.75
| 3
|
[] |
no_license
|
export ZSH=~/.zsh
eval "$(~/Developer/homebrew/bin/brew shellenv)"
# == ENVIRONMENT
if [[ -f /Users/mzui5of/.zsh_private ]]; then
source /Users/mzui5of/.zsh_private
fi
# == PATH
export GOPATH=$HOME/golang
export GOROOT=$(brew --prefix go)/libexec
CHUNKS=(
$HOME/bin
$GOPATH/bin
$GOROOT/bin
/usr/local/bin
/usr/local/sbin
$PATH
)
CHUNKS_STR=${(j.:.)${:-${^CHUNKS}}}
export PATH=$CHUNKS_STR
# == EDITOR
export EDITOR=nvim
# == OPTIONS
setopt HIST_IGNORE_DUPS
setopt HIST_IGNORE_SPACE
setopt INC_APPEND_HISTORY
setopt EXTENDED_HISTORY
setopt AUTO_PUSHD
setopt PUSHD_IGNORE_DUPS
setopt PUSHD_MINUS
SAVEHIST=9000
HISTSIZE=9000
HISTFILE=~/.zsh_history
# == COMPLETION
setopt GLOBDOTS # Complete hidden files
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
# Load and run compinit
autoload -U compinit
compinit -i
# == Keyboard
bindkey -e
# == ALIASES
alias ..='cd ..'
alias e='nvim'
alias flushdns='sudo killall -HUP mDNSResponder'
alias gap='git add . -p'
alias gb='git branch -a'
alias gd='git diff'
alias gl='git ll'
alias gp="git symbolic-ref --short HEAD 2> /dev/null | xargs -L1 git push origin"
alias gr='cd $(git root)'
alias gu='git upgrade'
alias la=' ls -laG'
alias ld=' ls -ladG'
alias ll=' ls -laG'
alias nd='cat package.json | jq ".dependencies"'
alias ns='cat package.json | jq ".scripts"'
alias rf='rm -fr'
alias rmds='find . -name ".DS_Store" -type f -delete'
alias s='git s'
alias ss='git status'
alias wl='wget -c -q --show-progress'
# == HELPERS
# Create new folder and cd into
md() { mkdir -p "$1" && cd "$_" }
# Get external ip addr
extip() { curl ifconfig.me/ip }
# Safety copies
dl3() { youtube-dl --output "%(title)s.%(ext)s" -x --audio-format mp3 --prefer-ffmpeg "$1" }
# starship
eval "$(starship init zsh)"
# init fzf
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# init z
[ -f $(brew --prefix)/etc/profile.d/z.sh ] && . $(brew --prefix)/etc/profile.d/z.sh
# init cargo
[ -f ~/.cargo/env ] && source ~/.cargo/env
# fnm
eval "$(fnm env --use-on-cd)"
| true
|
33ed85a222bc0bb664f0c9967f6f63c2260273cf
|
Shell
|
hassoon1986/curl-for-win
|
/_build.sh
|
UTF-8
| 8,136
| 3.4375
| 3
|
[
"MIT",
"CC-BY-SA-4.0",
"CC-BY-SA-3.0"
] |
permissive
|
#!/bin/sh -ex
# Copyright 2015-present Viktor Szakats. See LICENSE.md
# TODO:
# - Enable Control Flow Guard (once FLOSS toolchains support it)
# LLVM/CLANG: -ehcontguard (requires LLVM 13.0.0)
# - ARM64 builds (once FLOSS toolchains support it)
# - Switch to libssh from libssh2?
# - LLVM -mretpoline
# - GCC -mindirect-branch -mfunction-return -mindirect-branch-register
# - Use Universal CRT?
# - Switch to LibreSSL or rustls?
# Tools:
# compiler build
# -------- -----------
# zlib.sh clang cmake
# zlibng.sh clang cmake
# zstd.sh clang cmake
# brotli.sh clang cmake
# libgsasl.sh clang autotools
# libidn2.sh clang autotools
# nghttp2.sh clang cmake
# nghttp3.sh clang cmake
# openssl.sh clang proprietary
# ngtcp2.sh gcc autotools TODO: move to cmake and clang (couldn't detect openssl, and even configure needs a manual patch)
# libssh2.sh clang make TODO: move to cmake
# curl.sh clang make TODO: move to cmake
cd "$(dirname "$0")" || exit
LC_ALL=C
LC_MESSAGES=C
LANG=C
export GREP_OPTIONS=
readonly _LOG='logurl.txt'
if [ -n "${APPVEYOR_ACCOUNT_NAME}" ]; then
# https://www.appveyor.com/docs/environment-variables/
_LOGURL="${APPVEYOR_URL}/project/${APPVEYOR_ACCOUNT_NAME}/${APPVEYOR_PROJECT_SLUG}/build/${APPVEYOR_BUILD_VERSION}/job/${APPVEYOR_JOB_ID}"
# _LOGURL="${APPVEYOR_URL}/api/buildjobs/${APPVEYOR_JOB_ID}/log"
elif [ -n "${GITHUB_RUN_ID}" ]; then
# https://docs.github.com/actions/reference/environment-variables
_LOGURL="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}"
else
# https://docs.gitlab.com/ce/ci/variables/index.html
_LOGURL="${CI_SERVER_URL}/${CI_PROJECT_PATH}/-/jobs/${CI_JOB_ID}/raw"
fi
echo "${_LOGURL}" | tee "${_LOG}"
export _BRANCH="${APPVEYOR_REPO_BRANCH}${CI_COMMIT_REF_NAME}${GITHUB_REF}${GIT_BRANCH}"
[ -n "${_BRANCH}" ] || _BRANCH="$(git symbolic-ref --short --quiet HEAD)"
[ -n "${_BRANCH}" ] || _BRANCH='main'
export _URL=''
command -v git >/dev/null 2>&1 && _URL="$(git ls-remote --get-url | sed 's|.git$||')"
[ -n "${_URL}" ] || _URL="https://github.com/${APPVEYOR_REPO_NAME}${GITHUB_REPOSITORY}"
# Detect host OS
export _OS
case "$(uname)" in
*_NT*) _OS='win';;
Linux*) _OS='linux';;
Darwin*) _OS='mac';;
*BSD) _OS='bsd';;
*) _OS='unrecognized';;
esac
# For 'configure'-based builds.
# This is more or less guesswork and this warning remains:
# `configure: WARNING: using cross tools not prefixed with host triplet`
# Even with `_CCPREFIX` provided.
if [ "${_OS}" != 'win' ]; then
# https://clang.llvm.org/docs/CrossCompilation.html
export _CROSS_HOST
case "${_OS}" in
win) _CROSS_HOST='x86_64-pc-mingw32';;
linux) _CROSS_HOST='x86_64-pc-linux';; # x86_64-pc-linux-gnu
mac) _CROSS_HOST='x86_64-apple-darwin';;
bsd) _CROSS_HOST='x86_64-pc-bsd';;
esac
fi
export PUBLISH_PROD_FROM
if [ "${APPVEYOR_REPO_PROVIDER}" = 'gitHub' ] || \
[ -n "${GITHUB_RUN_ID}" ]; then
PUBLISH_PROD_FROM='linux'
fi
export _BLD='build.txt'
rm -f ./*-*-mingw*.*
rm -f hashes.txt
rm -f "${_BLD}"
# Download sources
. ./_dl.sh || exit 1
# Decrypt package signing key
SIGN_PKG_KEY='sign-pkg.gpg.asc'
if [ -f "${SIGN_PKG_KEY}" ] && [ "${SIGN_PKG_KEY_ID}" ]; then
(
set +x
echo "${SIGN_PKG_GPG_PASS}" | gpg \
--batch --yes --no-tty --quiet \
--pinentry-mode loopback --passphrase-fd 0 \
--decrypt "${SIGN_PKG_KEY}" 2>/dev/null | \
gpg --batch --quiet --import
)
fi
# decrypt code signing key
export SIGN_CODE_KEY=
SIGN_CODE_KEY="$(realpath '.')/sign-code.p12"
if [ -f "${SIGN_CODE_KEY}.asc" ]; then
(
set +x
if [ -n "${SIGN_CODE_GPG_PASS}" ]; then
install -m 600 /dev/null "${SIGN_CODE_KEY}"
echo "${SIGN_CODE_GPG_PASS}" | gpg \
--batch --yes --no-tty --quiet \
--pinentry-mode loopback --passphrase-fd 0 \
--decrypt "${SIGN_CODE_KEY}.asc" 2>/dev/null >> "${SIGN_CODE_KEY}"
fi
)
fi
[ -f "${SIGN_CODE_KEY}" ] || unset SIGN_CODE_KEY
if [ "${CC}" = 'mingw-clang' ]; then
echo ".clang$("clang${_CCSUFFIX}" --version | grep -o -a -E ' [0-9]*\.[0-9]*[\.][0-9]*')" >> "${_BLD}"
fi
unset ver
case "${_OS}" in
mac)
ver="$(brew info --json=v2 --formula mingw-w64 | jq --raw-output '.formulae[] | select(.name == "mingw-w64") | .versions.stable')";;
linux)
[ -n "${ver}" ] || ver="$(dpkg --status mingw-w64)"
[ -n "${ver}" ] || ver="$(rpm --query mingw-w64)"
[ -n "${ver}" ] || ver="$(pacman --query --info mingw-w64)"
ver="$(printf '%s' "${ver}" | sed -E 's|^(Version ?:) *(.+)$|\2|g')"
;;
esac
[ -n "${ver}" ] && echo ".mingw-w64 ${ver}" >> "${_BLD}"
_ori_path="${PATH}"
build_single_target() {
export _CPU="$1"
export _TRIPLET=
export _SYSROOT=
export _CCPREFIX=
export _MAKE='make'
export _WINE=''
export _OPTM=
[ "${_CPU}" = 'x86' ] && _OPTM='-m32'
[ "${_CPU}" = 'x64' ] && _OPTM='-m64'
[ "${_CPU}" = 'x86' ] && _machine='i686'
[ "${_CPU}" = 'x64' ] && _machine='x86_64'
[ "${_CPU}" = 'arm64' ] && _machine="${_CPU}"
export _PKGSUFFIX
[ "${_CPU}" = 'x86' ] && _PKGSUFFIX="-win32-mingw"
[ "${_CPU}" = 'x64' ] && _PKGSUFFIX="-win64-mingw"
if [ "${_OS}" = 'win' ]; then
export PATH
[ "${_CPU}" = 'x86' ] && PATH="/mingw32/bin:${_ori_path}"
[ "${_CPU}" = 'x64' ] && PATH="/mingw64/bin:${_ori_path}"
export _MAKE='mingw32-make'
# Install required component
pip3 --version
pip3 --disable-pip-version-check --no-cache-dir install --user pefile
else
if [ "${CC}" = 'mingw-clang' ] && [ "${_OS}" = 'mac' ]; then
export PATH="/usr/local/opt/llvm/bin:${_ori_path}"
fi
_TRIPLET="${_machine}-w64-mingw32"
# Prefixes don't work with MSYS2/mingw-w64, because `ar`, `nm` and
# `runlib` are missing from them. They are accessible either _without_
# one, or as prefix + `gcc-ar`, `gcc-nm`, `gcc-runlib`.
_CCPREFIX="${_TRIPLET}-"
# mingw-w64 sysroots
if [ "${_OS}" = 'mac' ]; then
_SYSROOT="/usr/local/opt/mingw-w64/toolchain-${_machine}"
else
_SYSROOT="/usr/${_TRIPLET}"
fi
if [ "${_OS}" = 'mac' ]; then
if [ "${_CPU}" = 'x64' ] && \
[ "$(uname -m)" = 'x86_64' ] && \
[ "$(sysctl -i -n sysctl.proc_translated)" != '1' ]; then
_WINE='wine64'
else
_WINE='echo'
fi
else
_WINE='wine'
fi
fi
export _CCVER
if [ "${CC}" = 'mingw-clang' ]; then
# We don't use old mingw toolchain versions when building with clang, so this is safe:
_CCVER='99'
else
_CCVER="$(printf '%02d' \
"$("${_CCPREFIX}gcc" -dumpversion | grep -a -o -E '^[0-9]+')")"
fi
echo ".gcc-mingw-w64-${_machine} $("${_CCPREFIX}gcc" -dumpversion)" >> "${_BLD}"
echo ".binutils-mingw-w64-${_machine} $("${_CCPREFIX}ar" V | grep -o -a -E '[0-9]+\.[0-9]+(\.[0-9]+)?')" >> "${_BLD}"
osslsigncode --version
ver="$(osslsigncode --version | grep -a -o -m 1 -E '[0-9]+\.[0-9]+\.[0-9]+')"
maj="$(printf '%s' "${ver}" | grep -a -o -E '[0-9]+' | head -1)"
min="$(printf '%s' "${ver}" | grep -a -o -E '[0-9]+' | head -2 | tail -1)"
rel="$(printf '%s' "${ver}" | grep -a -o -E '[0-9]+' | tail -1)"
ver="$(printf '%02d%02d%02d' "${maj}" "${min}" "${rel}")"
[ "${ver}" -lt 020100 ] || unset SIGN_CODE_KEY
time ./zlib.sh "${ZLIB_VER_}"
time ./zlibng.sh "${ZLIBNG_VER_}"
time ./zstd.sh "${ZSTD_VER_}"
time ./brotli.sh "${BROTLI_VER_}"
time ./libgsasl.sh "${LIBGSASL_VER_}"
time ./libidn2.sh "${LIBIDN2_VER_}"
time ./nghttp2.sh "${NGHTTP2_VER_}"
time ./nghttp3.sh "${NGHTTP3_VER_}"
time ./openssl.sh "${OPENSSL_VER_}"
time ./ngtcp2.sh "${NGTCP2_VER_}"
time ./libssh2.sh "${LIBSSH2_VER_}"
time ./curl.sh "${CURL_VER_}"
}
# Build binaries
# build_single_target arm64
build_single_target x64
build_single_target x86
case "${_OS}" in
mac) rm -f -P "${SIGN_CODE_KEY}";;
linux) [ -w "${SIGN_CODE_KEY}" ] && srm "${SIGN_CODE_KEY}";;
esac
rm -f "${SIGN_CODE_KEY}"
# Upload/deploy binaries
. ./_ul.sh || exit 1
| true
|
774053aa33b8e6808cbc34c0dd1cd4d5abd03885
|
Shell
|
russelltsherman/libsh
|
/lib/validate.sh
|
UTF-8
| 652
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
function is_empty() {
local var="$1"
[ -z "$var" ]
}
function is_not_empty() {
local var="$1"
[ -n "$var" ]
}
function is_file() {
local file="$1"
[ -f "$file" ]
}
function is_not_file() {
local file="$1"
[ ! -f "$file" ]
}
function is_dir() {
local dir="$1"
[ -d "$dir" ]
}
function is_not_dir() {
local dir="$1"
[ ! -d "$dir" ]
}
function is_number() {
local value="$1"
[[ "$value" =~ ^[0-9]+$ ]]
}
function is_not_number() {
local value="$1"
[[ ! "$value" =~ ^[0-9]+$ ]]
}
function contains() {
local list="$1"
local item="$2"
[[ $list =~ (^|[[:space:]])"$item"($|[[:space:]]) ]]
}
| true
|
594356343568e76a43ace48bf4a499480c2fa820
|
Shell
|
Theosakamg/deploy-user
|
/start.sh
|
UTF-8
| 2,347
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# -*- coding: utf-8 -*-
# micky:x:1000:1000:Mickael Gaillard,,,:/home/micky:/bin/bash
PKGS_INSTALL=false
PKGS_LIST="exa ccze htop nano git command-not-found"
USER_NAME="micky"
USER_SHELL="bash"
USER_HOME="/home/$USER_NAME"
# Check if script is run with root.
if [ "$EUID" -ne 0 ]; then
echo "Please run as root"
exit
fi
# Make all variables.
echo -e "Check state..."
USER_PATH_SHELL=$(whereis $USER_SHELL |awk '{print $2}')
USER_CURRENT_PASSWD=$(getent passwd $USER_NAME)
if [ ! $? -eq 0 ]; then
useradd -m $USER_NAME -s $USER_PATH_SHELL
fi
USER_CURRENT_PASSWD=$(getent passwd $USER_NAME)
USER_CURRENT_SHELL=$(echo $USER_CURRENT_PASSWD | awk 'BEGIN { FS = ":" } ; {print $7}')
USER_CURRENT_HOME=$(echo $USER_CURRENT_PASSWD | awk 'BEGIN { FS = ":" } ; {print $6}')
# Install packages.
if $PKGS_INSTALL ; then
echo -e "install tools..."
apt-get update >/dev/null
apt-get -qq install $PKGS_LIST >/dev/null
fi
# Define Shell to use.
if [ "$USER_CURRENT_SHELL" != "$USER_PATH_SHELL" ]; then
echo -e "Change Shell..."
echo -e "\tfrom $USER_CURRENT_SHELL to $USER_PATH_SHELL"
chsh -s $USER_PATH_SHELL $USER_NAME
fi
# Define Home to use.
if [ "$USER_CURRENT_HOME" != "$USER_HOME" ]; then
echo -e "Change Home... "
echo -e "\tfrom $USER_CURRENT_HOME to $USER_HOME"
fi
# Deploy config.
echo -e "Get all config/script..."
if [ ! -d $USER_HOME/.deploy-user ]; then
git clone -q --recurse-submodules -j8 https://github.com/Theosakamg/deploy-user.git $USER_HOME/.deploy-user >/dev/null
chown -R $USER_NAME:$USER_NAME $USER_HOME/.deploy-user
else
cd $USER_HOME/.deploy-user
git pull
fi
if [ -e "$USER_HOME/.bash_aliases" ] && [ ! -L "$USER_HOME/.bash_aliases" ]; then
echo "Manual .bash_aliases removing..."
mv "$USER_HOME/.bash_aliases" "$USER_HOME/.bash_aliases.old"
fi
if [ ! -e "$USER_HOME/.bash_aliases" ]; then
echo -e "Deploy alias"
ln -s $USER_HOME/.deploy-user/bash_aliases $USER_HOME/.bash_aliases
chown $USER_NAME:$USER_NAME $USER_HOME/.bash_aliases
fi
if [ -e "$USER_HOME/.bashrc" ] && [ ! -L "$USER_HOME/.bashrc" ]; then
echo "Manual .bashrc removing..."
mv "$USER_HOME/.bashrc" "$USER_HOME/.bashrc.old"
fi
if [ ! -e "$USER_HOME/.bashrc" ]; then
echo -e "Deploy bashrc"
ln -s $USER_HOME/.deploy-user/bashrc $USER_HOME/.bashrc
chown $USER_NAME:$USER_NAME $USER_HOME/.bashrc
fi
| true
|
b14ac675b8a0fec6e516c18ae0a3b45a55a5021e
|
Shell
|
CarlosUziel/OSAdministration
|
/Kdic.ksh
|
UTF-8
| 252
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/ksh
if (( $# < 1 || $# > 2 ))
then
print -u2 "Wrong Invocation"
exit 1
fi
DIC=${2:-"/usr/dict/dictionary"}
exec 0<$DIC
while read WORD
do
if [[ $1 = WORD ]]
then
print "$1 encontrada"
exit 0
fi
done
print "$1 no encontrada"
exit 0
| true
|
44eedca465a27f9e1938d111bae16a3128a6d49f
|
Shell
|
mahendra-shinde/docker-demos
|
/svn_server/docker-entry.sh
|
UTF-8
| 281
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [ ! -d "/opt/svn/conf" ]; then
echo " Could not find configuration directory /opt/svn/conf"
echo " creating svn directory..."
svnadmin create /opt/svn
echo " Successfully created!"
fi
echo " Starting SVN service ..."
exec "$@"
| true
|
ed63467991e4629ef51119a5b043bb222ff65626
|
Shell
|
Youngfellows/ShellStudy
|
/Sample_Shell5/基本脚本/使用方括号执行数学运算.sh
|
UTF-8
| 109
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
var1=10
var2=50
var3=45
var4=$[$var1 * ($var2 - $var3)]
echo 'The final result is '$var4
| true
|
07ac5c2ae31e821bb76ac8340c31ff8ecb8f0ecb
|
Shell
|
KaOSx/main
|
/xwayland/PKGBUILD
|
UTF-8
| 1,278
| 2.71875
| 3
|
[] |
no_license
|
pkgname=xwayland
pkgver=23.2.0
pkgrel=1
pkgdesc="X.Org X servers for Wayland"
arch=('x86_64')
url="https://xorg.freedesktop.org"
license=('GPL2')
depends=('xorg-server' 'wayland-protocols')
makedepends=('libx11' 'xtrans' 'libxkbfile' 'libxmu' 'libxtst' 'libxres' 'valgrind' 'meson' 'ninja')
options=('!libtool')
groups=('xorg')
# https://gitlab.freedesktop.org/xorg/xserver/-/tree/xwayland-21.1
source=("https://xorg.freedesktop.org/archive/individual/xserver/${pkgname}-${pkgver}.tar.xz")
sha256sums=('7f33ec2a34de6e66ae1b7e44872c3a2146192872c719b9acf192814edbabd4c5')
build() {
mkdir -p build
cd build
meson setup ../${pkgname}-${pkgver} \
--prefix=/usr \
--buildtype=release \
-Dglamor=true \
-Dxwayland_eglstream=false \
-Dxvfb=false \
-Dipv6=true \
-Dxdmcp=false \
-Dxcsecurity=true \
-Ddri3=true \
-Dxkb_dir=/usr/share/X11/xkb \
-Dxkb_output_dir=/var/lib/xkb
ninja
}
package() {
cd build
DESTDIR=${pkgdir} ninja install
install -m755 -d ${pkgdir}/usr/share/licenses/${pkgname}
install -m644 ../${pkgname}-${pkgver}/COPYING ${pkgdir}/usr/share/licenses/${pkgname}/
# part of xorg-server, keep conflicting files there
rm ${pkgdir}/usr/share/man/man1/Xserver.1
rm ${pkgdir}/usr/lib/xorg/protocol.txt
}
| true
|
37f2b76348fab06de0e828372b2dc30a17c033f2
|
Shell
|
vinhjaxt/docker-web-deploy
|
/php-node/run.sh
|
UTF-8
| 1,568
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
HOST_NAME="php-node"
if [[ "$(docker images -q "${HOST_NAME}:latest" 2> /dev/null)" == "" || "$1" != "" ]]; then
docker build -t "${HOST_NAME}:latest" "${DIR}/build"
if [ $? -eq 0 ]; then
echo 'Build done'
else
echo 'Build failed'
exit 1
fi
fi
docker container inspect "${HOST_NAME}" >/dev/null 2>&1
if [ $? -eq 0 ]; then
docker stop "${HOST_NAME}"
docker rm "${HOST_NAME}"
fi
docker run -d --restart=unless-stopped --name "${HOST_NAME}" --hostname "${HOST_NAME}" \
-v "${DIR}/custom-php.ini:/usr/local/etc/php/conf.d/0-vinhjaxt-custom-php.ini:ro" \
-v "${DIR}/custom-php-fpm.conf:/usr/local/etc/php-fpm.d/www.conf:ro" \
-v "${DIR}/../logs/${HOST_NAME}:/home/logs:rw" \
-v "${DIR}/../public_html/${HOST_NAME}:/home/public_html:rw" \
-v "${DIR}/../data/${HOST_NAME}:/home/www-data:rw" \
-v "${DIR}/../run/nginx/${HOST_NAME}:/home/run:rw" \
-v "${DIR}/entrypoint.sh:/entrypoint.sh:ro" \
--entrypoint="/entrypoint.sh" \
-v "${DIR}/../run/mysql/mysqld:/var/run/mysqld:ro" \
-e HOSTNAME=localhost \
--cap-add=SYS_PTRACE \
"${HOST_NAME}:latest" php-fpm
| true
|
7692c47a369f0a21326b06c7ff7680bf9f674506
|
Shell
|
kjcjohnson/nasT
|
/render.sh
|
UTF-8
| 463
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
INFILE=$1
OUTFILE=$2
data_v=()
data_k=()
i=1;
for arg in "$@"
do
if [ $i -le 2 ]
then
i=$((i+1))
continue;
fi
if [ $(($i % 2)) -eq 0 ]
then
data_v+=("$arg");
else
data_k+=("$arg");
fi
i=$((i+1))
done
`cp $INFILE $OUTFILE`
for i in `seq 0 $((${#data_k[*]} - 1))`;
do
#echo "[[$i]]> Looking for: ${data_k[$i]} to replace with: ${data_v[$i]}"
sed -i "s/<%${data_k[$i]}%>/${data_v[$i]}/g" $OUTFILE
done
| true
|
4b2e6bb3e3001eff6ef731535da2e17113170efa
|
Shell
|
rumeau/rendiciones
|
/bootstrap.sh
|
UTF-8
| 1,872
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "--- Let's get to work. Installing now. ---"
echo "--- Updating packages list ---"
sudo apt-get update
echo "--- MySQL time ---"
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password password root'
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password root'
echo "--- Installing base packages ---"
sudo apt-get install -y vim curl python-software-properties
echo "--- Updating packages list ---"
sudo apt-get update
echo "--- We want the bleeding edge of PHP ---"
sudo add-apt-repository -y ppa:ondrej/php5
echo "--- Updating packages list ---"
sudo apt-get update
echo "--- Installing PHP-specific packages ---"
sudo apt-get install -y php5 apache2 php-pear libapache2-mod-php5 php5-curl php5-gd php5-mcrypt php5-intl mysql-server-5.5 php5-mysql git-core
echo "--- Installing and configuring Xdebug ---"
sudo apt-get install -y php5-xdebug
cat << EOF | sudo tee -a /etc/php5/mods-available/xdebug.ini
xdebug.scream=1
xdebug.cli_color=1
xdebug.show_local_vars=1
EOF
echo "--- Enabling mod-rewrite ---"
sudo a2enmod rewrite
echo "--- Setting document root ---"
sudo rm -rf /var/www
sudo ln -fs /vagrant /var/www
echo "--- Turn on errors ---"
sed -i "s/error_reporting = .*/error_reporting = E_ALL/" /etc/php5/apache2/php.ini
sed -i "s/display_errors = .*/display_errors = On/" /etc/php5/apache2/php.ini
sed -i 's/AllowOverride None/AllowOverride All/' /etc/apache2/apache2.conf
echo "--- Restarting Apache ---"
sudo service apache2 restart
echo "--- Install Composer (PHP package manager) ---"
curl -sS https://getcomposer.org/installer | php
sudo mv composer.phar /usr/local/bin/composer
echo "--- Install PHPUnit through Composer---"
sudo composer global require "phpunit/phpunit=4.1.*"
#
# Project specific packages
#
echo "--- All done, enjoy! :) ---"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.