text
stringlengths 1
1.05M
|
|---|
#!/usr/bin/env bash
#
# Change WorkerThreads to 1 and check if only 1 thread is used
if [ -n "$HAVE_MYSQL" ]; then
ods_setup_conf conf.xml conf-mysql.xml
fi &&
ods_reset_env &&
log_this_timeout ods-control-start 30 ods-control start &&
syslog_waitfor 60 'ods-enforcerd: .*Sleeping for' &&
syslog_waitfor 60 'ods-signerd: .*\[engine\] signer started' &&
log_this_timeout ods-control-stop 30 ods-control stop &&
syslog_waitfor 60 'ods-enforcerd: .*all done' &&
syslog_waitfor 60 'ods-signerd: .*\[engine\] signer shutdown' &&
syslog_grep 'ods-signerd: .*\[worker\[1\]\] report for duty' &&
! syslog_grep 'ods-signerd: .*\[worker\[2\]\] report for duty' &&
return 0
ods_kill
return 1
|
SELECT * FROM stars
WHERE description LIKE '%sun%';
|
#!/bin/sh
set -e
GOFLAGS=-mod=vendor GO111MODULE=on GOOS="" GOARCH="" go run $@
|
package core.checker.checker;
import core.checker.util.Perf;
import core.checker.vo.Result;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Spits out graphs of throughput over time. Checker options take precedence
* over those passed in with this constructor.
*/
public class RateGraph implements Checker {
private Map opts;
public RateGraph(Map opts) {
this.opts = opts;
}
@Override
public Result check(Map test, List<Operation> history, Map cOpts) {
Map o = new HashMap();
for (Object key : opts.keySet()) {
o.put(key, opts.get(key));
}
for (Object key : cOpts.keySet()) {
o.put(key, cOpts.get(key));
}
Perf.rateGraph(test, history, o);
Result result = new Result();
result.setValid(true);
return result;
}
}
|
<reponame>j-groeneveld/streaming-serverless
module.exports = function(grunt) {
require('load-grunt-tasks')(grunt);
grunt.initConfig({
pkg: grunt.file.readJSON('package.json'),
watch: {
lambda: {
files: [
'infrastructure/lambda/**/fn/index.*',
'infrastructure/lambda/*/lib/*helpers*',
'infrastructure/lambda/js/*/fn/package.json',
'infrastructure/lambda.tf',
'infrastructure/lambda/terraform/*/main.tf',
],
tasks: ['deploy'],
},
terraform: {
files: [
'**/*.tf'
],
tasks: ['shell:formatTf']
},
},
env: {
dev: {
src: '.env',
},
},
shell: {
dockerWatch: {
command: () => {
return 'watch -n 1 docker ps -a';
},
},
formatTf: {
command: () => {
return 'terraform fmt .'
}
},
infraUp: {
command: () => {
return (
'cd ./infrastructure && terraform -v ' +
'&& terraform init -input=false ' +
'&& terraform apply -auto-approve -input=false'
);
},
},
lambdaUp: {
command: () => {
return (
'cd ./infrastructure && terraform -v ' +
'&& terraform init -input=false ' +
'&& terraform apply -auto-approve -input=false ' +
'-target=module.tasks_validator -target=module.email_executer' // Keep up to date
);
},
},
localstackUp: {
command: () => {
return 'TMPDIR=/private$TMPDIR docker-compose up -d localstack';
},
},
stackDown: {
command: () => {
return 'docker-compose down && if [[ $(docker ps -aq) ]]; then docker rm -f $(docker ps -aq); fi';
},
},
stackUp: {
command: () => {
return 'TMPDIR=/private$TMPDIR docker-compose up';
},
},
},
});
/**** Register Tasks ****/
/* TLT (Top Level Tasks) */
grunt.registerTask('start', ['cleanup', 'build', 'shell:stackUp']);
grunt.registerTask('deploy', ['env:dev', 'shell:infraUp']);
grunt.registerTask('cleanup', ['del-tf-state', 'shell:stackDown']);
grunt.registerTask('docker', ['shell:dockerWatch']);
/* STs Secondary Tasks */
grunt.registerTask('build', [
'shell:localstackUp',
'wait:5000', // Not functionally essential but keeps error logs clean
'deploy',
]);
grunt.registerTask(
'del-tf-state',
'Delete terraform state files',
function() {
DATA_FP = 'data';
TF_STATE_FP = 'infrastructure/terraform.tfstate';
TF_STATE_BACKUP_FP = `${TF_STATE_FP}.backup`;
const delFile = function(filePath) {
if (grunt.file.exists(filePath)) {
grunt.log.write(`Deleting file ${filePath}...`);
grunt.file.delete(filePath);
grunt.log.ok();
}
};
delFile(TF_STATE_FP);
delFile(TF_STATE_BACKUP_FP);
delFile(DATA_FP);
}
);
grunt.registerTask('wait', 'Blocking wait before next task is run', function(
ms
) {
var done = this.async();
grunt.log.write('Going to sleep...');
setTimeout(function() {
grunt.log.write('waking up...').ok();
done();
}, ms);
});
/**** Grunt plugins ****/
grunt.loadNpmTasks('grunt-env');
grunt.loadNpmTasks('grunt-contrib-watch');
};
|
#!/usr/bin/env bash
#
# Copyright (c) 2019-2020 The Finalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_win64
export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to win64
export HOST=x86_64-w64-mingw32
export DPKG_ADD_ARCH="i386"
export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 wine32 file"
export RUN_FUNCTIONAL_TESTS=false
export GOAL="deploy"
export FINALCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --disable-external-signer"
|
from rest_framework.pagination import PageNumberPagination
class UserListPagination(PageNumberPagination):
"""
Custom pagination class for paginating a list of users
"""
page_size = 64
page_query_param = "page"
page_size_query_param = "size"
max_page_size = 64
|
<reponame>lananh265/social-network<filename>node_modules/react-icons-kit/md/ic_event_available_twotone.js
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_event_available_twotone = void 0;
var ic_event_available_twotone = {
"viewBox": "0 0 24 24",
"children": [{
"name": "path",
"attribs": {
"d": "M0 0h24v24H0V0z",
"fill": "none"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M5 5h14v2H5z",
"opacity": ".3"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M19 3h-1V1h-2v2H8V1H6v2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm0 16H5V9h14v10zm0-12H5V5h14v2zm-2.51 4.53l-1.06-1.06-4.87 4.87-2.11-2.11-1.06 1.06 3.17 3.17z"
},
"children": []
}]
};
exports.ic_event_available_twotone = ic_event_available_twotone;
|
#!/bin/bash
sudo apt-get install -y ansible sshpass
ansible-playbook -i inventory/hosts playbook.yml
# --ask-become-pass
|
#!/bin/bash -eu
_root_dir=$(dirname $(dirname $(readlink -f $0)))
_ungoogled_repo=$_root_dir/ungoogled-chromium
printf '%s-%s.%s' $(cat $_ungoogled_repo/chromium_version.txt) $(cat $_ungoogled_repo/revision.txt) $(cat $_root_dir/revision.txt)
|
#!/bin/bash -e
#
# Copyright (c) 2009-2020 Robert Nelson <robertcnelson@gmail.com>
# Copyright (c) 2010 Mario Di Francesco <mdf-code@digitalexile.it>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Latest can be found at:
# https://github.com/RobertCNelson/omap-image-builder/blob/master/tools/setup_sdcard.sh
#REQUIREMENTS:
#uEnv.txt bootscript support
BOOT_LABEL="BOOT"
unset USE_BETA_BOOTLOADER
unset USE_LOCAL_BOOT
unset LOCAL_BOOTLOADER
#Defaults
ROOTFS_TYPE=ext4
ROOTFS_LABEL=rootfs
DIR="$PWD"
TEMPDIR=$(mktemp -d)
keep_net_alive () {
while : ; do
echo "syncing media... $*"
sleep 300
done
}
keep_net_alive & KEEP_NET_ALIVE_PID=$!
cleanup_keep_net_alive () {
[ -e /proc/$KEEP_NET_ALIVE_PID ] && kill $KEEP_NET_ALIVE_PID
}
trap cleanup_keep_net_alive EXIT
is_element_of () {
testelt=$1
for validelt in $2 ; do
[ $testelt = $validelt ] && return 0
done
return 1
}
#########################################################################
#
# Define valid "--rootfs" root filesystem types.
#
#########################################################################
VALID_ROOTFS_TYPES="ext2 ext3 ext4 btrfs"
is_valid_rootfs_type () {
if is_element_of $1 "${VALID_ROOTFS_TYPES}" ] ; then
return 0
else
return 1
fi
}
check_root () {
if ! [ $(id -u) = 0 ] ; then
echo "$0 must be run as sudo user or root"
exit 1
fi
}
find_issue () {
check_root
ROOTFS=$(ls "${DIR}/" | grep rootfs)
if [ "x${ROOTFS}" != "x" ] ; then
echo "Debug: ARM rootfs: ${ROOTFS}"
else
echo "Error: no armel-rootfs-* file"
echo "Make sure your in the right dir..."
exit
fi
unset has_uenvtxt
unset check
check=$(ls "${DIR}/" | grep uEnv.txt | grep -v post-uEnv.txt | head -n 1)
if [ "x${check}" != "x" ] ; then
echo "Debug: image has pre-generated uEnv.txt file"
has_uenvtxt=1
fi
unset has_post_uenvtxt
unset check
check=$(ls "${DIR}/" | grep post-uEnv.txt | head -n 1)
if [ "x${check}" != "x" ] ; then
echo "Debug: image has post-uEnv.txt file"
has_post_uenvtxt="enable"
fi
}
check_for_command () {
if ! which "$1" > /dev/null ; then
echo -n "You're missing command $1"
NEEDS_COMMAND=1
if [ -n "$2" ] ; then
echo -n " (consider installing package $2)"
fi
echo
fi
}
detect_software () {
unset NEEDS_COMMAND
check_for_command mkfs.vfat dosfstools
check_for_command wget wget
check_for_command git git
check_for_command partprobe parted
if [ "x${build_img_file}" = "xenable" ] ; then
check_for_command kpartx kpartx
fi
if [ "${NEEDS_COMMAND}" ] ; then
echo ""
echo "Your system is missing some dependencies"
echo "Debian/Ubuntu: sudo apt-get install dosfstools git kpartx wget parted"
echo "Fedora: yum install dosfstools dosfstools git wget"
echo "Gentoo: emerge dosfstools git wget"
echo ""
exit
fi
unset test_sfdisk
test_sfdisk=$(LC_ALL=C sfdisk -v 2>/dev/null | grep 2.17.2 | awk '{print $1}')
if [ "x${test_sdfdisk}" = "xsfdisk" ] ; then
echo ""
echo "Detected known broken sfdisk:"
echo "See: https://github.com/RobertCNelson/netinstall/issues/20"
echo ""
exit
fi
unset wget_version
wget_version=$(LC_ALL=C wget --version | grep "GNU Wget" | awk '{print $3}' | awk -F '.' '{print $2}' || true)
case "${wget_version}" in
12|13)
#wget before 1.14 in debian does not support sni
echo "wget: [`LC_ALL=C wget --version | grep \"GNU Wget\" | awk '{print $3}' || true`]"
echo "wget: [this version of wget does not support sni, using --no-check-certificate]"
echo "wget: [http://en.wikipedia.org/wiki/Server_Name_Indication]"
dl="wget --no-check-certificate"
;;
*)
dl="wget"
;;
esac
dl_continue="${dl} -c"
dl_quiet="${dl} --no-verbose"
}
local_bootloader () {
echo ""
echo "Using Locally Stored Device Bootloader"
echo "-----------------------------"
mkdir -p ${TEMPDIR}/dl/
if [ "${spl_name}" ] ; then
cp ${LOCAL_SPL} ${TEMPDIR}/dl/
SPL=${LOCAL_SPL##*/}
echo "SPL Bootloader: ${SPL}"
fi
if [ "${boot_name}" ] ; then
cp ${LOCAL_BOOTLOADER} ${TEMPDIR}/dl/
UBOOT=${LOCAL_BOOTLOADER##*/}
echo "UBOOT Bootloader: ${UBOOT}"
fi
}
dl_bootloader () {
echo ""
echo "Downloading Device's Bootloader"
echo "-----------------------------"
minimal_boot="1"
mkdir -p ${TEMPDIR}/dl/${DIST}
mkdir -p "${DIR}/dl/${DIST}"
${dl_quiet} --directory-prefix="${TEMPDIR}/dl/" ${conf_bl_http}/${conf_bl_listfile}
if [ ! -f ${TEMPDIR}/dl/${conf_bl_listfile} ] ; then
echo "error: can't connect to rcn-ee.net, retry in a few minutes..."
exit
fi
boot_version=$(cat ${TEMPDIR}/dl/${conf_bl_listfile} | grep "VERSION:" | awk -F":" '{print $2}')
if [ "x${boot_version}" != "x${minimal_boot}" ] ; then
echo "Error: This script is out of date and unsupported..."
echo "Please Visit: https://github.com/RobertCNelson to find updates..."
exit
fi
if [ "${USE_BETA_BOOTLOADER}" ] ; then
ABI="ABX2"
else
ABI="ABI2"
fi
if [ "${spl_name}" ] ; then
SPL=$(cat ${TEMPDIR}/dl/${conf_bl_listfile} | grep "${ABI}:${conf_board}:SPL" | awk '{print $2}')
${dl_quiet} --directory-prefix="${TEMPDIR}/dl/" ${SPL}
SPL=${SPL##*/}
echo "SPL Bootloader: ${SPL}"
else
unset SPL
fi
if [ "${boot_name}" ] ; then
UBOOT=$(cat ${TEMPDIR}/dl/${conf_bl_listfile} | grep "${ABI}:${conf_board}:BOOT" | awk '{print $2}')
${dl} --directory-prefix="${TEMPDIR}/dl/" ${UBOOT}
UBOOT=${UBOOT##*/}
echo "UBOOT Bootloader: ${UBOOT}"
else
unset UBOOT
fi
if [ "x${oem_blank_eeprom}" = "xenable" ] ; then
if [ "x${conf_board}" = "xam335x_evm" ] ; then
ABI="ABI2"
conf_board="am335x_boneblack"
if [ "${spl_name}" ] ; then
blank_SPL=$(cat ${TEMPDIR}/dl/${conf_bl_listfile} | grep "${ABI}:${conf_board}:SPL" | awk '{print $2}')
${dl_quiet} --directory-prefix="${TEMPDIR}/dl/" ${blank_SPL}
blank_SPL=${blank_SPL##*/}
echo "blank_SPL Bootloader: ${blank_SPL}"
else
unset blank_SPL
fi
if [ "${boot_name}" ] ; then
blank_UBOOT=$(cat ${TEMPDIR}/dl/${conf_bl_listfile} | grep "${ABI}:${conf_board}:BOOT" | awk '{print $2}')
${dl} --directory-prefix="${TEMPDIR}/dl/" ${blank_UBOOT}
blank_UBOOT=${blank_UBOOT##*/}
echo "blank_UBOOT Bootloader: ${blank_UBOOT}"
else
unset blank_UBOOT
fi
fi
if [ "x${conf_board}" = "xbeagle_x15" ] ; then
if [ ! "x${flasher_uboot}" = "x" ] ; then
ABI="ABI2"
conf_board="${flasher_uboot}"
if [ "${spl_name}" ] ; then
blank_SPL=$(cat ${TEMPDIR}/dl/${conf_bl_listfile} | grep "${ABI}:${conf_board}:SPL" | awk '{print $2}')
${dl_quiet} --directory-prefix="${TEMPDIR}/dl/" ${blank_SPL}
blank_SPL=${blank_SPL##*/}
echo "blank_SPL Bootloader: ${blank_SPL}"
else
unset blank_SPL
fi
if [ "${boot_name}" ] ; then
blank_UBOOT=$(cat ${TEMPDIR}/dl/${conf_bl_listfile} | grep "${ABI}:${conf_board}:BOOT" | awk '{print $2}')
${dl} --directory-prefix="${TEMPDIR}/dl/" ${blank_UBOOT}
blank_UBOOT=${blank_UBOOT##*/}
echo "blank_UBOOT Bootloader: ${blank_UBOOT}"
else
unset blank_UBOOT
fi
else
unset oem_blank_eeprom
fi
fi
fi
}
generate_soc () {
echo "#!/bin/sh" > ${wfile}
echo "format=1.0" >> ${wfile}
echo "" >> ${wfile}
if [ ! "x${conf_bootloader_in_flash}" = "xenable" ] ; then
echo "board=${board}" >> ${wfile}
echo "" >> ${wfile}
echo "bootloader_location=${bootloader_location}" >> ${wfile}
echo "bootrom_gpt=${bootrom_gpt}" >> ${wfile}
echo "" >> ${wfile}
echo "dd_spl_uboot_count=${dd_spl_uboot_count}" >> ${wfile}
echo "dd_spl_uboot_seek=${dd_spl_uboot_seek}" >> ${wfile}
if [ "x${build_img_file}" = "xenable" ] ; then
echo "dd_spl_uboot_conf=notrunc" >> ${wfile}
else
echo "dd_spl_uboot_conf=${dd_spl_uboot_conf}" >> ${wfile}
fi
echo "dd_spl_uboot_bs=${dd_spl_uboot_bs}" >> ${wfile}
echo "dd_spl_uboot_backup=/opt/backup/uboot/${spl_uboot_name}" >> ${wfile}
echo "" >> ${wfile}
echo "dd_uboot_count=${dd_uboot_count}" >> ${wfile}
echo "dd_uboot_seek=${dd_uboot_seek}" >> ${wfile}
if [ "x${build_img_file}" = "xenable" ] ; then
echo "dd_uboot_conf=notrunc" >> ${wfile}
else
echo "dd_uboot_conf=${dd_uboot_conf}" >> ${wfile}
fi
echo "dd_uboot_bs=${dd_uboot_bs}" >> ${wfile}
echo "dd_uboot_backup=/opt/backup/uboot/${uboot_name}" >> ${wfile}
else
echo "uboot_CONFIG_CMD_BOOTZ=${uboot_CONFIG_CMD_BOOTZ}" >> ${wfile}
echo "uboot_CONFIG_SUPPORT_RAW_INITRD=${uboot_CONFIG_SUPPORT_RAW_INITRD}" >> ${wfile}
echo "uboot_CONFIG_CMD_FS_GENERIC=${uboot_CONFIG_CMD_FS_GENERIC}" >> ${wfile}
echo "zreladdr=${conf_zreladdr}" >> ${wfile}
fi
echo "" >> ${wfile}
echo "boot_fstype=${conf_boot_fstype}" >> ${wfile}
echo "conf_boot_startmb=${conf_boot_startmb}" >> ${wfile}
echo "conf_boot_endmb=${conf_boot_endmb}" >> ${wfile}
echo "sfdisk_fstype=${sfdisk_fstype}" >> ${wfile}
echo "" >> ${wfile}
if [ "x${uboot_efi_mode}" = "xenable" ] ; then
echo "uboot_efi_mode=${uboot_efi_mode}" >> ${wfile}
echo "" >> ${wfile}
fi
echo "boot_label=${BOOT_LABEL}" >> ${wfile}
echo "rootfs_label=${ROOTFS_LABEL}" >> ${wfile}
echo "" >> ${wfile}
echo "#Kernel" >> ${wfile}
echo "dtb=${dtb}" >> ${wfile}
echo "serial_tty=${SERIAL}" >> ${wfile}
echo "usbnet_mem=${usbnet_mem}" >> ${wfile}
echo "" >> ${wfile}
echo "#Advanced options" >> ${wfile}
echo "#disable_ssh_regeneration=true" >> ${wfile}
echo "" >> ${wfile}
}
drive_error_ro () {
echo "-----------------------------"
echo "Error: for some reason your SD card is not writable..."
echo "Check: is the write protect lever set the locked position?"
echo "Check: do you have another SD card reader?"
echo "-----------------------------"
echo "Script gave up..."
exit
}
unmount_all_drive_partitions () {
echo ""
echo "Unmounting Partitions"
echo "-----------------------------"
NUM_MOUNTS=$(mount | grep -v none | grep "${media}" | wc -l)
## for (i=1;i<=${NUM_MOUNTS};i++)
for ((i=1;i<=${NUM_MOUNTS};i++))
do
DRIVE=$(mount | grep -v none | grep "${media}" | tail -1 | awk '{print $1}')
umount ${DRIVE} >/dev/null 2>&1 || true
done
echo "Zeroing out Drive"
echo "-----------------------------"
dd if=/dev/zero of=${media} bs=1M count=100 || drive_error_ro
sync
dd if=${media} of=/dev/null bs=1M count=100
sync
}
sfdisk_partition_layout () {
sfdisk_options="--force --in-order --Linux --unit M"
sfdisk_boot_startmb="${conf_boot_startmb}"
sfdisk_boot_size_mb="${conf_boot_endmb}"
sfdisk_var_size_mb="${conf_var_startmb}"
if [ "x${option_ro_root}" = "xenable" ] ; then
sfdisk_var_startmb=$(($sfdisk_boot_startmb + $sfdisk_boot_size_mb))
sfdisk_rootfs_startmb=$(($sfdisk_var_startmb + $sfdisk_var_size_mb))
else
sfdisk_rootfs_startmb=$(($sfdisk_boot_startmb + $sfdisk_boot_size_mb))
fi
test_sfdisk=$(LC_ALL=C sfdisk --help | grep -m 1 -e "--in-order" || true)
if [ "x${test_sfdisk}" = "x" ] ; then
echo "log: sfdisk: 2.26.x or greater detected"
sfdisk_options="--force ${sfdisk_gpt}"
sfdisk_boot_startmb="${sfdisk_boot_startmb}M"
sfdisk_boot_size_mb="${sfdisk_boot_size_mb}M"
sfdisk_var_startmb="${sfdisk_var_startmb}M"
sfdisk_var_size_mb="${sfdisk_var_size_mb}M"
sfdisk_rootfs_startmb="${sfdisk_rootfs_startmb}M"
fi
if [ "x${option_ro_root}" = "xenable" ] ; then
echo "sfdisk: [$(LC_ALL=C sfdisk --version)]"
echo "sfdisk: [${sfdisk_options} ${media}]"
echo "sfdisk: [${sfdisk_boot_startmb},${sfdisk_boot_size_mb},${sfdisk_fstype},*]"
echo "sfdisk: [${sfdisk_var_startmb},${sfdisk_var_size_mb},,-]"
echo "sfdisk: [${sfdisk_rootfs_startmb},,,-]"
LC_ALL=C sfdisk ${sfdisk_options} "${media}" <<-__EOF__
${sfdisk_boot_startmb},${sfdisk_boot_size_mb},${sfdisk_fstype},*
${sfdisk_var_startmb},${sfdisk_var_size_mb},,-
${sfdisk_rootfs_startmb},,,-
__EOF__
media_rootfs_var_partition=3
else
echo "sfdisk: [$(LC_ALL=C sfdisk --version)]"
echo "sfdisk: [${sfdisk_options} ${media}]"
echo "sfdisk: [${sfdisk_boot_startmb},${sfdisk_boot_size_mb},${sfdisk_fstype},*]"
echo "sfdisk: [${sfdisk_rootfs_startmb},,,-]"
LC_ALL=C sfdisk ${sfdisk_options} "${media}" <<-__EOF__
${sfdisk_boot_startmb},${sfdisk_boot_size_mb},${sfdisk_fstype},*
${sfdisk_rootfs_startmb},,,-
__EOF__
fi
sync
}
sfdisk_single_partition_layout () {
sfdisk_options="--force --in-order --Linux --unit M"
sfdisk_boot_startmb="${conf_boot_startmb}"
sfdisk_var_size_mb="${conf_var_startmb}"
if [ "x${option_ro_root}" = "xenable" ] ; then
sfdisk_rootfs_startmb=$(($sfdisk_boot_startmb + $sfdisk_var_size_mb))
fi
test_sfdisk=$(LC_ALL=C sfdisk --help | grep -m 1 -e "--in-order" || true)
if [ "x${test_sfdisk}" = "x" ] ; then
echo "log: sfdisk: 2.26.x or greater detected"
sfdisk_options="--force ${sfdisk_gpt}"
sfdisk_boot_startmb="${sfdisk_boot_startmb}M"
sfdisk_var_size_mb="${sfdisk_var_size_mb}M"
if [ "x${option_ro_root}" = "xenable" ] ; then
sfdisk_rootfs_startmb="${sfdisk_rootfs_startmb}M"
fi
fi
if [ "x${option_ro_root}" = "xenable" ] ; then
echo "sfdisk: [$(LC_ALL=C sfdisk --version)]"
echo "sfdisk: [${sfdisk_options} ${media}]"
echo "sfdisk: [${sfdisk_boot_startmb},${sfdisk_var_size_mb},${sfdisk_fstype},*]"
echo "sfdisk: [${sfdisk_rootfs_startmb},,,-]"
LC_ALL=C sfdisk ${sfdisk_options} "${media}" <<-__EOF__
${sfdisk_boot_startmb},${sfdisk_var_size_mb},${sfdisk_fstype},*
${sfdisk_rootfs_startmb},,,-
__EOF__
media_rootfs_var_partition=2
else
echo "sfdisk: [$(LC_ALL=C sfdisk --version)]"
echo "sfdisk: [${sfdisk_options} ${media}]"
echo "sfdisk: [${sfdisk_boot_startmb},,${sfdisk_fstype},*]"
LC_ALL=C sfdisk ${sfdisk_options} "${media}" <<-__EOF__
${sfdisk_boot_startmb},,${sfdisk_fstype},*
__EOF__
fi
sync
}
dd_uboot_boot () {
unset dd_uboot
if [ ! "x${dd_uboot_count}" = "x" ] ; then
dd_uboot="${dd_uboot}count=${dd_uboot_count} "
fi
if [ ! "x${dd_uboot_seek}" = "x" ] ; then
dd_uboot="${dd_uboot}seek=${dd_uboot_seek} "
fi
if [ "x${build_img_file}" = "xenable" ] ; then
dd_uboot="${dd_uboot}conv=notrunc "
else
if [ ! "x${dd_uboot_conf}" = "x" ] ; then
dd_uboot="${dd_uboot}conv=${dd_uboot_conf} "
fi
fi
if [ ! "x${dd_uboot_bs}" = "x" ] ; then
dd_uboot="${dd_uboot}bs=${dd_uboot_bs}"
fi
if [ "x${oem_blank_eeprom}" = "xenable" ] ; then
uboot_blob="${blank_UBOOT}"
else
uboot_blob="${UBOOT}"
fi
echo "${uboot_name}: dd if=${uboot_blob} of=${media} ${dd_uboot}"
echo "-----------------------------"
dd if=${TEMPDIR}/dl/${uboot_blob} of=${media} ${dd_uboot}
echo "-----------------------------"
}
dd_spl_uboot_boot () {
unset dd_spl_uboot
if [ ! "x${dd_spl_uboot_count}" = "x" ] ; then
dd_spl_uboot="${dd_spl_uboot}count=${dd_spl_uboot_count} "
fi
if [ ! "x${dd_spl_uboot_seek}" = "x" ] ; then
dd_spl_uboot="${dd_spl_uboot}seek=${dd_spl_uboot_seek} "
fi
if [ "x${build_img_file}" = "xenable" ] ; then
dd_spl_uboot="${dd_spl_uboot}conv=notrunc "
else
if [ ! "x${dd_spl_uboot_conf}" = "x" ] ; then
dd_spl_uboot="${dd_spl_uboot}conv=${dd_spl_uboot_conf} "
fi
fi
if [ ! "x${dd_spl_uboot_bs}" = "x" ] ; then
dd_spl_uboot="${dd_spl_uboot}bs=${dd_spl_uboot_bs}"
fi
if [ "x${oem_blank_eeprom}" = "xenable" ] ; then
spl_uboot_blob="${blank_SPL}"
else
spl_uboot_blob="${SPL}"
fi
echo "${spl_uboot_name}: dd if=${spl_uboot_blob} of=${media} ${dd_spl_uboot}"
echo "-----------------------------"
dd if=${TEMPDIR}/dl/${spl_uboot_blob} of=${media} ${dd_spl_uboot}
echo "-----------------------------"
}
format_partition_error () {
echo "LC_ALL=C ${mkfs} ${mkfs_partition} ${mkfs_label}"
echo "Failure: formating partition"
exit
}
format_partition_try2 () {
unset mkfs_options
if [ "x${mkfs}" = "xmkfs.ext4" ] ; then
mkfs_options="${ext4_options}"
fi
echo "-----------------------------"
echo "BUG: [${mkfs_partition}] was not available so trying [${mkfs}] again in 5 seconds..."
partprobe ${media}
sync
sleep 5
echo "-----------------------------"
echo "Formating with: [${mkfs} ${mkfs_options} ${mkfs_partition} ${mkfs_label}]"
echo "-----------------------------"
LC_ALL=C ${mkfs} ${mkfs_options} ${mkfs_partition} ${mkfs_label} || format_partition_error
sync
}
format_partition () {
unset mkfs_options
if [ "x${mkfs}" = "xmkfs.ext4" ] ; then
mkfs_options="${ext4_options}"
fi
echo "Formating with: [${mkfs} ${mkfs_options} ${mkfs_partition} ${mkfs_label}]"
echo "-----------------------------"
LC_ALL=C ${mkfs} ${mkfs_options} ${mkfs_partition} ${mkfs_label} || format_partition_try2
sync
}
format_boot_partition () {
mkfs_partition="${media_prefix}${media_boot_partition}"
if [ "x${conf_boot_fstype}" = "xfat" ] ; then
mount_partition_format="vfat"
mkfs="mkfs.vfat -F 16"
mkfs_label="-n ${BOOT_LABEL}"
else
mount_partition_format="${conf_boot_fstype}"
mkfs="mkfs.${conf_boot_fstype}"
mkfs_label="-L ${BOOT_LABEL}"
fi
format_partition
boot_drive="${conf_root_device}p${media_boot_partition}"
}
format_rootfs_partition () {
if [ "x${option_ro_root}" = "xenable" ] ; then
mkfs="mkfs.ext2"
else
mkfs="mkfs.${ROOTFS_TYPE}"
fi
mkfs_partition="${media_prefix}${media_rootfs_partition}"
mkfs_label="-L ${ROOTFS_LABEL}"
format_partition
rootfs_drive="${conf_root_device}p${media_rootfs_partition}"
if [ "x${option_ro_root}" = "xenable" ] ; then
mkfs="mkfs.${ROOTFS_TYPE}"
mkfs_partition="${media_prefix}${media_rootfs_var_partition}"
mkfs_label="-L var"
format_partition
rootfs_var_drive="${conf_root_device}p${media_rootfs_var_partition}"
fi
}
create_partitions () {
unset bootloader_installed
unset sfdisk_gpt
media_boot_partition=1
media_rootfs_partition=2
unset ext4_options
if [ ! "x${uboot_supports_csum}" = "xtrue" ] ; then
#Debian Stretch, mfks.ext4 default to metadata_csum, 64bit disable till u-boot works again..
unset ext4_options
unset test_mke2fs
LC_ALL=C mkfs.ext4 -V &> /tmp/mkfs
test_mkfs=$(cat /tmp/mkfs | grep mke2fs | grep 1.43 || true)
if [ "x${test_mkfs}" = "x" ] ; then
unset ext4_options
else
ext4_options="-O ^metadata_csum,^64bit"
fi
fi
echo ""
case "${bootloader_location}" in
fatfs_boot)
conf_boot_endmb=${conf_boot_endmb:-"12"}
#mkfs.fat 4.1 (2017-01-24)
#WARNING: Not enough clusters for a 16 bit FAT! The filesystem will be
#misinterpreted as having a 12 bit FAT without mount option "fat=16".
#mkfs.vfat: Attempting to create a too large filesystem
#LC_ALL=C mkfs.vfat -F 16 /dev/sdg1 -n BOOT
#Failure: formating partition
#When using "E" this fails, however "0xE" works fine...
echo "Using sfdisk to create partition layout"
echo "Version: `LC_ALL=C sfdisk --version`"
echo "-----------------------------"
sfdisk_partition_layout
;;
dd_uboot_boot)
echo "Using dd to place bootloader on drive"
echo "-----------------------------"
if [ "x${bootrom_gpt}" = "xenable" ] ; then
sfdisk_gpt="--label gpt"
fi
if [ "x${uboot_efi_mode}" = "xenable" ] ; then
sfdisk_gpt="--label gpt"
fi
dd_uboot_boot
bootloader_installed=1
if [ "x${enable_fat_partition}" = "xenable" ] ; then
conf_boot_endmb=${conf_boot_endmb:-"96"}
conf_boot_fstype=${conf_boot_fstype:-"fat"}
sfdisk_fstype=${sfdisk_fstype:-"0xE"}
sfdisk_partition_layout
else
sfdisk_single_partition_layout
media_rootfs_partition=1
fi
;;
dd_spl_uboot_boot)
echo "Using dd to place bootloader on drive"
echo "-----------------------------"
if [ "x${bootrom_gpt}" = "xenable" ] ; then
sfdisk_gpt="--label gpt"
fi
if [ "x${uboot_efi_mode}" = "xenable" ] ; then
sfdisk_gpt="--label gpt"
fi
dd_spl_uboot_boot
dd_uboot_boot
bootloader_installed=1
if [ "x${enable_fat_partition}" = "xenable" ] ; then
conf_boot_endmb=${conf_boot_endmb:-"96"}
conf_boot_fstype=${conf_boot_fstype:-"fat"}
sfdisk_fstype=${sfdisk_fstype:-"0xE"}
sfdisk_partition_layout
else
if [ "x${uboot_efi_mode}" = "xenable" ] ; then
conf_boot_endmb="16"
conf_boot_fstype="fat"
sfdisk_fstype="U"
BOOT_LABEL="EFI"
sfdisk_partition_layout
else
sfdisk_single_partition_layout
media_rootfs_partition=1
fi
fi
;;
*)
echo "Using sfdisk to create partition layout"
echo "Version: `LC_ALL=C sfdisk --version`"
echo "-----------------------------"
sfdisk_partition_layout
;;
esac
echo "Partition Setup:"
echo "-----------------------------"
LC_ALL=C fdisk -l "${media}"
echo "-----------------------------"
if [ "x${build_img_file}" = "xenable" ] ; then
media_loop=$(losetup -f || true)
if [ ! "${media_loop}" ] ; then
echo "losetup -f failed"
echo "Unmount some via: [sudo losetup -a]"
echo "-----------------------------"
losetup -a
echo "sudo kpartx -d /dev/loopX ; sudo losetup -d /dev/loopX"
echo "-----------------------------"
exit
fi
losetup ${media_loop} "${media}"
kpartx -av ${media_loop}
sleep 1
sync
test_loop=$(echo ${media_loop} | awk -F'/' '{print $3}')
if [ -e /dev/mapper/${test_loop}p${media_boot_partition} ] && [ -e /dev/mapper/${test_loop}p${media_rootfs_partition} ] ; then
media_prefix="/dev/mapper/${test_loop}p"
else
ls -lh /dev/mapper/
echo "Error: not sure what to do (new feature)."
exit
fi
else
partprobe ${media}
fi
if [ "x${media_boot_partition}" = "x${media_rootfs_partition}" ] ; then
mount_partition_format="${ROOTFS_TYPE}"
format_rootfs_partition
else
format_boot_partition
format_rootfs_partition
fi
}
populate_boot () {
echo "Populating Boot Partition"
echo "-----------------------------"
if [ ! -d ${TEMPDIR}/disk ] ; then
mkdir -p ${TEMPDIR}/disk
fi
partprobe ${media}
if ! mount -t ${mount_partition_format} ${media_prefix}${media_boot_partition} ${TEMPDIR}/disk; then
echo "-----------------------------"
echo "BUG: [${media_prefix}${media_boot_partition}] was not available so trying to mount again in 5 seconds..."
partprobe ${media}
sync
sleep 5
echo "-----------------------------"
if ! mount -t ${mount_partition_format} ${media_prefix}${media_boot_partition} ${TEMPDIR}/disk; then
echo "-----------------------------"
echo "Unable to mount ${media_prefix}${media_boot_partition} at ${TEMPDIR}/disk to complete populating Boot Partition"
echo "Please retry running the script, sometimes rebooting your system helps."
echo "-----------------------------"
exit
fi
fi
lsblk | grep -v sr0
echo "-----------------------------"
if [ "${spl_name}" ] ; then
if [ -f ${TEMPDIR}/dl/${SPL} ] ; then
if [ ! "${bootloader_installed}" ] ; then
cp -v ${TEMPDIR}/dl/${SPL} ${TEMPDIR}/disk/${spl_name}
echo "-----------------------------"
fi
fi
fi
if [ "${boot_name}" ] ; then
if [ -f ${TEMPDIR}/dl/${UBOOT} ] ; then
if [ ! "${bootloader_installed}" ] ; then
cp -v ${TEMPDIR}/dl/${UBOOT} ${TEMPDIR}/disk/${boot_name}
echo "-----------------------------"
fi
fi
fi
if [ "x${distro_defaults}" = "xenable" ] ; then
${dl_quiet} --directory-prefix="${TEMPDIR}/dl/" https://raw.githubusercontent.com/RobertCNelson/netinstall/master/lib/distro_defaults.scr
cp -v ${TEMPDIR}/dl/distro_defaults.scr ${TEMPDIR}/disk/boot.scr
fi
if [ "x${conf_board}" = "xam335x_boneblack" ] || [ "x${conf_board}" = "xam335x_evm" ] ; then
wfile="${TEMPDIR}/disk/bbb-uEnv.txt"
echo "##Rename as: uEnv.txt to override old bootloader in eMMC" > ${wfile}
echo "##These are needed to be compliant with Angstrom's 2013.06.20 u-boot." >> ${wfile}
echo "" >> ${wfile}
echo "loadaddr=0x82000000" >> ${wfile}
echo "fdtaddr=0x88000000" >> ${wfile}
echo "rdaddr=0x88080000" >> ${wfile}
echo "" >> ${wfile}
echo "initrd_high=0xffffffff" >> ${wfile}
echo "fdt_high=0xffffffff" >> ${wfile}
echo "" >> ${wfile}
echo "##These are needed to be compliant with Debian 2014-05-14 u-boot." >> ${wfile}
echo "" >> ${wfile}
echo "loadximage=echo debug: [/boot/vmlinuz-\${uname_r}] ... ; load mmc 0:${media_rootfs_partition} \${loadaddr} /boot/vmlinuz-\${uname_r}" >> ${wfile}
echo "loadxfdt=echo debug: [/boot/dtbs/\${uname_r}/\${fdtfile}] ... ;load mmc 0:${media_rootfs_partition} \${fdtaddr} /boot/dtbs/\${uname_r}/\${fdtfile}" >> ${wfile}
echo "loadxrd=echo debug: [/boot/initrd.img-\${uname_r}] ... ; load mmc 0:${media_rootfs_partition} \${rdaddr} /boot/initrd.img-\${uname_r}; setenv rdsize \${filesize}" >> ${wfile}
echo "loaduEnvtxt=load mmc 0:${media_rootfs_partition} \${loadaddr} /boot/uEnv.txt ; env import -t \${loadaddr} \${filesize};" >> ${wfile}
echo "check_dtb=if test -n \${dtb}; then setenv fdtfile \${dtb};fi;" >> ${wfile}
echo "check_uboot_overlays=if test -n \${enable_uboot_overlays}; then setenv enable_uboot_overlays ;fi;" >> ${wfile}
echo "loadall=run loaduEnvtxt; run check_dtb; run check_uboot_overlays; run loadximage; run loadxrd; run loadxfdt;" >> ${wfile}
echo "" >> ${wfile}
echo "mmcargs=setenv bootargs console=tty0 console=\${console} \${optargs} \${cape_disable} \${cape_enable} root=/dev/mmcblk0p${media_rootfs_partition} rootfstype=\${mmcrootfstype} \${cmdline}" >> ${wfile}
echo "" >> ${wfile}
echo "uenvcmd=run loadall; run mmcargs; echo debug: [\${bootargs}] ... ; echo debug: [bootz \${loadaddr} \${rdaddr}:\${rdsize} \${fdtaddr}] ... ; bootz \${loadaddr} \${rdaddr}:\${rdsize} \${fdtaddr};" >> ${wfile}
echo "" >> ${wfile}
fi
if [ "x${conf_board}" = "xam335x_boneblack" ] || [ "x${conf_board}" = "xam335x_evm" ] || [ "x${conf_board}" = "xam335x_blank_bbbw" ] ; then
wfile="${TEMPDIR}/disk/nfs-uEnv.txt"
echo "##Rename as: uEnv.txt to boot via nfs" > ${wfile}
echo "" >> ${wfile}
echo "##https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt" >> ${wfile}
echo "" >> ${wfile}
echo "##SERVER: sudo apt-get install tftpd-hpa" >> ${wfile}
echo "##SERVER:" >> ${wfile}
echo "##SERVER: zImage boot:" >> ${wfile}
echo "##SERVER: TFTP_DIRECTORY defined in /etc/default/tftpd-hpa" >> ${wfile}
echo "##SERVER: zImage/*.dtb need to be located here:" >> ${wfile}
echo "##SERVER: TFTP_DIRECTORY/zImage" >> ${wfile}
echo "##SERVER: TFTP_DIRECTORY/dtbs/*.dtb" >> ${wfile}
echo "##SERVER:" >> ${wfile}
echo "##SERVER: uname_r boot:" >> ${wfile}
echo "##SERVER: TFTP_DIRECTORY defined in /etc/default/tftpd-hpa" >> ${wfile}
echo "##SERVER: Change TFTP_DIRECTORY to /NFSEXPORT/boot" >> ${wfile}
echo "##SERVER: TFTP_DIRECTORY/vmlinuz-\${uname_r}" >> ${wfile}
echo "##SERVER: TFTP_DIRECTORY/dtbs/\${uname_r}/*.dtb" >> ${wfile}
echo "" >> ${wfile}
echo "##client_ip needs to be set for u-boot to try booting via nfs" >> ${wfile}
echo "" >> ${wfile}
echo "client_ip=192.168.1.101" >> ${wfile}
echo "" >> ${wfile}
echo "#u-boot defaults: uncomment and override where needed" >> ${wfile}
echo "" >> ${wfile}
echo "#server_ip=192.168.1.100" >> ${wfile}
echo "#gw_ip=192.168.1.1" >> ${wfile}
echo "#netmask=255.255.255.0" >> ${wfile}
echo "#hostname=" >> ${wfile}
echo "#device=eth0" >> ${wfile}
echo "#autoconf=off" >> ${wfile}
echo "#root_dir=/home/userid/targetNFS" >> ${wfile}
echo "#nfs_options=,vers=3" >> ${wfile}
echo "#nfsrootfstype=ext4 rootwait fixrtc" >> ${wfile}
echo "" >> ${wfile}
echo "##use uname_r= only if TFTP SERVER is setup for uname_r boot:" >> ${wfile}
echo "#uname_r=" >> ${wfile}
echo "" >> ${wfile}
fi
if [ -f "${DIR}/ID.txt" ] ; then
cp -v "${DIR}/ID.txt" ${TEMPDIR}/disk/ID.txt
fi
if [ "x${conf_board}" = "ximx8mqevk_buildroot" ] ; then
touch ${TEMPDIR}/disk/.imx8mq-evk
fi
if [ ${has_uenvtxt} ] ; then
cp -v "${DIR}/uEnv.txt" ${TEMPDIR}/disk/uEnv.txt
echo "-----------------------------"
fi
cd ${TEMPDIR}/disk
sync
cd "${DIR}"/
echo "Debug: Contents of Boot Partition"
echo "-----------------------------"
ls -lh ${TEMPDIR}/disk/
du -sh ${TEMPDIR}/disk/
echo "-----------------------------"
sync
sync
umount ${TEMPDIR}/disk || true
echo "Finished populating Boot Partition"
echo "-----------------------------"
}
kernel_detection () {
unset has_multi_armv7_kernel
unset check
check=$(ls "${dir_check}" | grep vmlinuz- | grep armv7 | grep -v lpae | head -n 1)
if [ "x${check}" != "x" ] ; then
armv7_kernel=$(ls "${dir_check}" | grep vmlinuz- | grep armv7 | grep -v lpae | head -n 1 | awk -F'vmlinuz-' '{print $2}')
echo "Debug: image has: v${armv7_kernel}"
has_multi_armv7_kernel="enable"
fi
unset has_multi_armv7_lpae_kernel
unset check
check=$(ls "${dir_check}" | grep vmlinuz- | grep armv7 | grep lpae | head -n 1)
if [ "x${check}" != "x" ] ; then
armv7_lpae_kernel=$(ls "${dir_check}" | grep vmlinuz- | grep armv7 | grep lpae | head -n 1 | awk -F'vmlinuz-' '{print $2}')
echo "Debug: image has: v${armv7_lpae_kernel}"
has_multi_armv7_lpae_kernel="enable"
fi
unset has_bone_kernel
unset check
check=$(ls "${dir_check}" | grep vmlinuz- | grep bone | head -n 1)
if [ "x${check}" != "x" ] ; then
bone_dt_kernel=$(ls "${dir_check}" | grep vmlinuz- | grep bone | head -n 1 | awk -F'vmlinuz-' '{print $2}')
echo "Debug: image has: v${bone_dt_kernel}"
has_bone_kernel="enable"
fi
unset has_ti_kernel
unset check
check=$(ls "${dir_check}" | grep vmlinuz- | grep ti | head -n 1)
if [ "x${check}" != "x" ] ; then
ti_dt_kernel=$(ls "${dir_check}" | grep vmlinuz- | grep ti | head -n 1 | awk -F'vmlinuz-' '{print $2}')
echo "Debug: image has: v${ti_dt_kernel}"
has_ti_kernel="enable"
fi
unset has_xenomai_kernel
unset check
check=$(ls "${dir_check}" | grep vmlinuz- | grep xenomai | head -n 1)
if [ "x${check}" != "x" ] ; then
xenomai_dt_kernel=$(ls "${dir_check}" | grep vmlinuz- | grep xenomai | head -n 1 | awk -F'vmlinuz-' '{print $2}')
echo "Debug: image has: v${xenomai_dt_kernel}"
has_xenomai_kernel="enable"
fi
}
kernel_select () {
unset select_kernel
if [ "x${conf_kernel}" = "xarmv7" ] || [ "x${conf_kernel}" = "x" ] ; then
if [ "x${has_multi_armv7_kernel}" = "xenable" ] ; then
select_kernel="${armv7_kernel}"
fi
fi
if [ "x${conf_kernel}" = "xarmv7_lpae" ] ; then
if [ "x${has_multi_armv7_lpae_kernel}" = "xenable" ] ; then
select_kernel="${armv7_lpae_kernel}"
else
if [ "x${has_multi_armv7_kernel}" = "xenable" ] ; then
select_kernel="${armv7_kernel}"
fi
fi
fi
if [ "x${conf_kernel}" = "xbone" ] ; then
if [ "x${has_ti_kernel}" = "xenable" ] ; then
select_kernel="${ti_dt_kernel}"
else
if [ "x${has_bone_kernel}" = "xenable" ] ; then
select_kernel="${bone_dt_kernel}"
else
if [ "x${has_multi_armv7_kernel}" = "xenable" ] ; then
select_kernel="${armv7_kernel}"
else
if [ "x${has_xenomai_kernel}" = "xenable" ] ; then
select_kernel="${xenomai_dt_kernel}"
fi
fi
fi
fi
fi
if [ "x${conf_kernel}" = "xti" ] ; then
if [ "x${has_ti_kernel}" = "xenable" ] ; then
select_kernel="${ti_dt_kernel}"
else
if [ "x${has_multi_armv7_kernel}" = "xenable" ] ; then
select_kernel="${armv7_kernel}"
fi
fi
fi
if [ "${select_kernel}" ] ; then
echo "Debug: using: v${select_kernel}"
else
echo "Error: [conf_kernel] not defined [armv7_lpae,armv7,bone,ti]..."
exit
fi
}
populate_rootfs () {
echo "Populating rootfs Partition"
echo "Please be patient, this may take a few minutes, as its transfering a lot of data.."
echo "-----------------------------"
if [ ! -d ${TEMPDIR}/disk ] ; then
mkdir -p ${TEMPDIR}/disk
fi
partprobe ${media}
if ! mount -t ${ROOTFS_TYPE} ${media_prefix}${media_rootfs_partition} ${TEMPDIR}/disk; then
echo "-----------------------------"
echo "BUG: [${media_prefix}${media_rootfs_partition}] was not available so trying to mount again in 5 seconds..."
partprobe ${media}
sync
sleep 5
echo "-----------------------------"
if ! mount -t ${ROOTFS_TYPE} ${media_prefix}${media_rootfs_partition} ${TEMPDIR}/disk; then
echo "-----------------------------"
echo "Unable to mount ${media_prefix}${media_rootfs_partition} at ${TEMPDIR}/disk to complete populating rootfs Partition"
echo "Please retry running the script, sometimes rebooting your system helps."
echo "-----------------------------"
exit
fi
fi
if [ "x${option_ro_root}" = "xenable" ] ; then
if [ ! -d ${TEMPDIR}/disk/var ] ; then
mkdir -p ${TEMPDIR}/disk/var
fi
if ! mount -t ${ROOTFS_TYPE} ${media_prefix}${media_rootfs_var_partition} ${TEMPDIR}/disk/var; then
echo "-----------------------------"
echo "BUG: [${media_prefix}${media_rootfs_var_partition}] was not available so trying to mount again in 5 seconds..."
partprobe ${media}
sync
sleep 5
echo "-----------------------------"
if ! mount -t ${ROOTFS_TYPE} ${media_prefix}${media_rootfs_var_partition} ${TEMPDIR}/disk/var; then
echo "-----------------------------"
echo "Unable to mount ${media_prefix}${media_rootfs_var_partition} at ${TEMPDIR}/disk/var to complete populating rootfs Partition"
echo "Please retry running the script, sometimes rebooting your system helps."
echo "-----------------------------"
exit
fi
fi
fi
if [ "x${uboot_efi_mode}" = "xenable" ] ; then
if [ ! -d ${TEMPDIR}/disk/boot/efi ] ; then
mkdir -p ${TEMPDIR}/disk/boot/efi
fi
if ! mount -t vfat ${media_prefix}${media_boot_partition} ${TEMPDIR}/disk/boot/efi; then
echo "-----------------------------"
echo "BUG: [${media_prefix}${media_boot_partition}] was not available so trying to mount again in 5 seconds..."
partprobe ${media}
sync
sleep 5
echo "-----------------------------"
if ! mount -t vfat ${media_prefix}${media_boot_partition} ${TEMPDIR}/disk/boot/efi; then
echo "-----------------------------"
echo "Unable to mount ${media_prefix}${media_boot_partition} at ${TEMPDIR}/disk/boot/efi to complete populating rootfs Partition"
echo "Please retry running the script, sometimes rebooting your system helps."
echo "-----------------------------"
exit
fi
fi
fi
lsblk | grep -v sr0
echo "-----------------------------"
if [ -f "${DIR}/${ROOTFS}" ] ; then
if which pv > /dev/null ; then
pv "${DIR}/${ROOTFS}" | tar --numeric-owner --preserve-permissions -xf - -C ${TEMPDIR}/disk/
else
echo "pv: not installed, using tar verbose to show progress"
tar --numeric-owner --preserve-permissions --verbose -xf "${DIR}/${ROOTFS}" -C ${TEMPDIR}/disk/
fi
echo "Transfer of data is Complete, now syncing data to disk..."
echo "Disk Size"
du -sh ${TEMPDIR}/disk/
sync
sync
echo "-----------------------------"
if [ -f /usr/bin/stat ] ; then
echo "-----------------------------"
echo "Checking [${TEMPDIR}/disk/] permissions"
/usr/bin/stat ${TEMPDIR}/disk/
echo "-----------------------------"
fi
echo "Setting [${TEMPDIR}/disk/] chown root:root"
chown root:root ${TEMPDIR}/disk/
echo "Setting [${TEMPDIR}/disk/] chmod 755"
chmod 755 ${TEMPDIR}/disk/
if [ -f /usr/bin/stat ] ; then
echo "-----------------------------"
echo "Verifying [${TEMPDIR}/disk/] permissions"
/usr/bin/stat ${TEMPDIR}/disk/
fi
echo "-----------------------------"
if [ ! "x${oem_flasher_img}" = "x" ] ; then
if [ ! -d "${TEMPDIR}/disk/opt/emmc/" ] ; then
mkdir -p "${TEMPDIR}/disk/opt/emmc/"
fi
cp -v "${oem_flasher_img}" "${TEMPDIR}/disk/opt/emmc/"
sync
if [ ! "x${oem_flasher_eeprom}" = "x" ] ; then
cp -v "${oem_flasher_eeprom}" "${TEMPDIR}/disk/opt/emmc/"
sync
fi
echo "Disk Size, with *.img"
du -sh ${TEMPDIR}/disk/
fi
echo "-----------------------------"
fi
dir_check="${TEMPDIR}/disk/boot/"
kernel_detection
kernel_select
if [ ! "x${uboot_eeprom}" = "x" ] ; then
echo "board_eeprom_header=${uboot_eeprom}" > "${TEMPDIR}/disk/boot/.eeprom.txt"
fi
wfile="${TEMPDIR}/disk/boot/uEnv.txt"
echo "#Docs: http://elinux.org/Beagleboard:U-boot_partitioning_layout_2.0" > ${wfile}
echo "" >> ${wfile}
if [ "x${kernel_override}" = "x" ] ; then
echo "uname_r=${select_kernel}" >> ${wfile}
else
echo "uname_r=${kernel_override}" >> ${wfile}
fi
if [ "${BTRFS_FSTAB}" ] ; then
echo "mmcrootfstype=btrfs rootwait" >> ${wfile}
fi
echo "#uuid=" >> ${wfile}
if [ ! "x${dtb}" = "x" ] ; then
echo "dtb=${dtb}" >> ${wfile}
else
if [ ! "x${forced_dtb}" = "x" ] ; then
echo "dtb=${forced_dtb}" >> ${wfile}
else
echo "#dtb=" >> ${wfile}
fi
if [ "x${conf_board}" = "xam335x_boneblack" ] || [ "x${conf_board}" = "xam335x_evm" ] || [ "x${conf_board}" = "xam335x_blank_bbbw" ] ; then
echo "" >> ${wfile}
echo "###U-Boot Overlays###" >> ${wfile}
echo "###Documentation: http://elinux.org/Beagleboard:BeagleBoneBlack_Debian#U-Boot_Overlays" >> ${wfile}
echo "###Master Enable" >> ${wfile}
if [ "x${uboot_cape_overlays}" = "xenable" ] ; then
echo "enable_uboot_overlays=1" >> ${wfile}
else
echo "#enable_uboot_overlays=1" >> ${wfile}
fi
echo "###" >> ${wfile}
echo "###Overide capes with eeprom" >> ${wfile}
echo "#uboot_overlay_addr0=/lib/firmware/<file0>.dtbo" >> ${wfile}
echo "#uboot_overlay_addr1=/lib/firmware/<file1>.dtbo" >> ${wfile}
echo "#uboot_overlay_addr2=/lib/firmware/<file2>.dtbo" >> ${wfile}
echo "#uboot_overlay_addr3=/lib/firmware/<file3>.dtbo" >> ${wfile}
echo "###" >> ${wfile}
echo "###Additional custom capes" >> ${wfile}
echo "#uboot_overlay_addr4=/lib/firmware/<file4>.dtbo" >> ${wfile}
echo "#uboot_overlay_addr5=/lib/firmware/<file5>.dtbo" >> ${wfile}
echo "#uboot_overlay_addr6=/lib/firmware/<file6>.dtbo" >> ${wfile}
echo "#uboot_overlay_addr7=/lib/firmware/<file7>.dtbo" >> ${wfile}
echo "###" >> ${wfile}
echo "###Custom Cape" >> ${wfile}
echo "#dtb_overlay=/lib/firmware/<file8>.dtbo" >> ${wfile}
echo "###" >> ${wfile}
echo "###Disable auto loading of virtual capes (emmc/video/wireless/adc)" >> ${wfile}
echo "#disable_uboot_overlay_emmc=1" >> ${wfile}
if [ "x${uboot_disable_video}" = "xenable" ] ; then
echo "disable_uboot_overlay_video=1" >> ${wfile}
else
echo "#disable_uboot_overlay_video=1" >> ${wfile}
fi
if [ "x${uboot_disable_audio}" = "xenable" ] ; then
echo "disable_uboot_overlay_audio=1" >> ${wfile}
else
echo "#disable_uboot_overlay_audio=1" >> ${wfile}
fi
echo "#disable_uboot_overlay_wireless=1" >> ${wfile}
echo "#disable_uboot_overlay_adc=1" >> ${wfile}
echo "###" >> ${wfile}
echo "###PRUSS OPTIONS" >> ${wfile}
unset use_pru_uio
if [ "x${uboot_pru_rproc_414ti}" = "xenable" ] ; then
echo "###pru_rproc (4.14.x-ti kernel)" >> ${wfile}
echo "uboot_overlay_pru=/lib/firmware/AM335X-PRU-RPROC-4-14-TI-00A0.dtbo" >> ${wfile}
echo "###pru_rproc (4.19.x-ti kernel)" >> ${wfile}
echo "#uboot_overlay_pru=/lib/firmware/AM335X-PRU-RPROC-4-19-TI-00A0.dtbo" >> ${wfile}
echo "###pru_uio (4.14.x-ti, 4.19.x-ti & mainline/bone kernel)" >> ${wfile}
echo "#uboot_overlay_pru=/lib/firmware/AM335X-PRU-UIO-00A0.dtbo" >> ${wfile}
use_pru_uio="blocked"
fi
if [ "x${uboot_pru_rproc_419ti}" = "xenable" ] ; then
echo "###pru_rproc (4.14.x-ti kernel)" >> ${wfile}
echo "#uboot_overlay_pru=/lib/firmware/AM335X-PRU-RPROC-4-14-TI-00A0.dtbo" >> ${wfile}
echo "###pru_rproc (4.19.x-ti kernel)" >> ${wfile}
echo "uboot_overlay_pru=/lib/firmware/AM335X-PRU-RPROC-4-19-TI-00A0.dtbo" >> ${wfile}
echo "###pru_uio (4.14.x-ti, 4.19.x-ti & mainline/bone kernel)" >> ${wfile}
echo "#uboot_overlay_pru=/lib/firmware/AM335X-PRU-UIO-00A0.dtbo" >> ${wfile}
use_pru_uio="blocked"
fi
if [ "x${mainline_pru_rproc}" = "xenable" ] ; then
echo "###pru_rproc (4.14.x-ti kernel)" >> ${wfile}
echo "#uboot_overlay_pru=/lib/firmware/AM335X-PRU-RPROC-4-14-TI-00A0.dtbo" >> ${wfile}
echo "###pru_rproc (4.19.x-ti kernel)" >> ${wfile}
echo "#uboot_overlay_pru=/lib/firmware/AM335X-PRU-RPROC-4-19-TI-00A0.dtbo" >> ${wfile}
echo "###pru_uio (4.14.x-ti, 4.19.x-ti & mainline/bone kernel)" >> ${wfile}
echo "#uboot_overlay_pru=/lib/firmware/AM335X-PRU-UIO-00A0.dtbo" >> ${wfile}
use_pru_uio="blocked"
fi
if [ "x${use_pru_uio}" = "x" ] ; then
echo "###pru_rproc (4.14.x-ti kernel)" >> ${wfile}
echo "#uboot_overlay_pru=/lib/firmware/AM335X-PRU-RPROC-4-14-TI-00A0.dtbo" >> ${wfile}
echo "###pru_rproc (4.19.x-ti kernel)" >> ${wfile}
echo "#uboot_overlay_pru=/lib/firmware/AM335X-PRU-RPROC-4-19-TI-00A0.dtbo" >> ${wfile}
echo "###pru_uio (4.14.x-ti, 4.19.x-ti & mainline/bone kernel)" >> ${wfile}
echo "uboot_overlay_pru=/lib/firmware/AM335X-PRU-UIO-00A0.dtbo" >> ${wfile}
fi
echo "###" >> ${wfile}
echo "###Cape Universal Enable" >> ${wfile}
if [ "x${uboot_cape_overlays}" = "xenable" ] && [ "x${enable_cape_universal}" = "xenable" ] ; then
echo "enable_uboot_cape_universal=1" >> ${wfile}
else
echo "#enable_uboot_cape_universal=1" >> ${wfile}
fi
echo "###" >> ${wfile}
echo "###Debug: disable uboot autoload of Cape" >> ${wfile}
echo "#disable_uboot_overlay_addr0=1" >> ${wfile}
echo "#disable_uboot_overlay_addr1=1" >> ${wfile}
echo "#disable_uboot_overlay_addr2=1" >> ${wfile}
echo "#disable_uboot_overlay_addr3=1" >> ${wfile}
echo "###" >> ${wfile}
echo "###U-Boot fdt tweaks... (60000 = 384KB)" >> ${wfile}
echo "#uboot_fdt_buffer=0x60000" >> ${wfile}
echo "###U-Boot Overlays###" >> ${wfile}
echo "" >> ${wfile}
fi
fi
cmdline="coherent_pool=1M net.ifnames=0"
if [ ! "x${loops_per_jiffy}" = "x" ] ; then
cmdline="${cmdline} ${loops_per_jiffy}"
fi
if [ ! "x${rng_core}" = "x" ] ; then
cmdline="${cmdline} ${rng_core}"
fi
cmdline="${cmdline} quiet"
unset kms_video
drm_device_identifier=${drm_device_identifier:-"HDMI-A-1"}
drm_device_timing=${drm_device_timing:-"1024x768@60e"}
if [ "x${drm_read_edid_broken}" = "xenable" ] ; then
cmdline="${cmdline} video=${drm_device_identifier}:${drm_device_timing}"
echo "cmdline=${cmdline}" >> ${wfile}
echo "" >> ${wfile}
else
echo "cmdline=${cmdline}" >> ${wfile}
echo "" >> ${wfile}
echo "#In the event of edid real failures, uncomment this next line:" >> ${wfile}
echo "#cmdline=${cmdline} video=${drm_device_identifier}:${drm_device_timing}" >> ${wfile}
echo "" >> ${wfile}
fi
if [ "x${conf_board}" = "xam335x_boneblack" ] || [ "x${conf_board}" = "xam335x_evm" ] || [ "x${conf_board}" = "xam335x_blank_bbbw" ] ; then
if [ ! "x${has_post_uenvtxt}" = "x" ] ; then
cat "${DIR}/post-uEnv.txt" >> ${wfile}
echo "" >> ${wfile}
fi
if [ "x${usb_flasher}" = "xenable" ] ; then
if [ ! "x${oem_flasher_script}" = "x" ] ; then
echo "cmdline=init=/opt/scripts/tools/eMMC/${oem_flasher_script}" >> ${wfile}
else
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-from-usb-media.sh" >> ${wfile}
fi
elif [ "x${emmc_flasher}" = "xenable" ] ; then
echo "##enable Generic eMMC Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-v3.sh" >> ${wfile}
elif [ "x${bbg_flasher}" = "xenable" ] ; then
echo "##enable BBG: eMMC Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-v3-bbg.sh" >> ${wfile}
elif [ "x${bbgw_flasher}" = "xenable" ] ; then
echo "##enable BBG: eMMC Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-v3-bbgw.sh" >> ${wfile}
elif [ "x${m10a_flasher}" = "xenable" ] ; then
echo "##enable m10a: eMMC Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-v3-m10a.sh" >> ${wfile}
elif [ "x${me06_flasher}" = "xenable" ] ; then
echo "##enable me06: eMMC Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-v3-me06.sh" >> ${wfile}
elif [ "x${bbbl_flasher}" = "xenable" ] ; then
echo "##enable bbbl: eMMC Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-v3-bbbl.sh" >> ${wfile}
elif [ "x${bbbw_flasher}" = "xenable" ] ; then
echo "##enable bbbw: eMMC Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-v3-bbbw.sh" >> ${wfile}
elif [ "x${bp00_flasher}" = "xenable" ] ; then
echo "##enable bp00: eeprom Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-bp00.sh" >> ${wfile}
elif [ "x${a335_flasher}" = "xenable" ] ; then
echo "##enable a335: eeprom Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-a335.sh" >> ${wfile}
else
echo "##enable Generic eMMC Flasher:" >> ${wfile}
echo "##make sure, these tools are installed: dosfstools rsync" >> ${wfile}
echo "#cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-v3.sh" >> ${wfile}
fi
echo "" >> ${wfile}
else
if [ "x${usb_flasher}" = "xenable" ] ; then
if [ ! "x${oem_flasher_script}" = "x" ] ; then
echo "cmdline=init=/opt/scripts/tools/eMMC/${oem_flasher_script}" >> ${wfile}
else
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-from-usb-media.sh" >> ${wfile}
fi
elif [ "x${emmc_flasher}" = "xenable" ] ; then
echo "##enable Generic eMMC Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-v3-no-eeprom.sh" >> ${wfile}
elif [ "x${bp00_flasher}" = "xenable" ] ; then
echo "##enable bp00: eeprom Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-bp00.sh" >> ${wfile}
elif [ "x${a335_flasher}" = "xenable" ] ; then
echo "##enable a335: eeprom Flasher:" >> ${wfile}
echo "cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-a335.sh" >> ${wfile}
else
if [ "x${conf_board}" = "xbeagle_x15" ] ; then
echo "##enable x15: eMMC Flasher:" >> ${wfile}
echo "##make sure, these tools are installed: dosfstools rsync" >> ${wfile}
echo "#cmdline=init=/opt/scripts/tools/eMMC/init-eMMC-flasher-v3-no-eeprom.sh" >> ${wfile}
fi
fi
fi
#am335x_boneblack is a custom u-boot to ignore empty factory eeproms...
if [ "x${conf_board}" = "xam335x_boneblack" ] ; then
board="am335x_evm"
else
board=${conf_board}
fi
echo "/boot/uEnv.txt---------------"
cat ${wfile}
sudo chown -R 1000:1000 ${wfile}
echo "-----------------------------"
wfile="${TEMPDIR}/disk/boot/SOC.sh"
generate_soc
#RootStock-NG
if [ -f ${TEMPDIR}/disk/etc/rcn-ee.conf ] ; then
. ${TEMPDIR}/disk/etc/rcn-ee.conf
mkdir -p ${TEMPDIR}/disk/boot/uboot || true
wfile="${TEMPDIR}/disk/etc/fstab"
echo "# /etc/fstab: static file system information." > ${wfile}
echo "#" >> ${wfile}
echo "# Auto generated by RootStock-NG: setup_sdcard.sh" >> ${wfile}
echo "#" >> ${wfile}
if [ "x${option_ro_root}" = "xenable" ] ; then
echo "#With read only rootfs, we need to boot once as rw..." >> ${wfile}
echo "${rootfs_drive} / ext2 noatime,errors=remount-ro 0 1" >> ${wfile}
echo "#" >> ${wfile}
echo "#Switch to read only rootfs:" >> ${wfile}
echo "#${rootfs_drive} / ext2 noatime,ro,errors=remount-ro 0 1" >> ${wfile}
echo "#" >> ${wfile}
echo "${rootfs_var_drive} /var ${ROOTFS_TYPE} noatime 0 2" >> ${wfile}
else
if [ "${BTRFS_FSTAB}" ] ; then
echo "${rootfs_drive} / btrfs defaults,noatime 0 1" >> ${wfile}
else
echo "${rootfs_drive} / ${ROOTFS_TYPE} noatime,errors=remount-ro 0 1" >> ${wfile}
fi
fi
if [ "x${uboot_efi_mode}" = "xenable" ] ; then
echo "${boot_drive} /boot/efi vfat defaults 0 0" >> ${wfile}
fi
echo "debugfs /sys/kernel/debug debugfs mode=755,uid=root,gid=gpio,defaults 0 0" >> ${wfile}
if [ "x${DISABLE_ETH}" != "xskip" ] ; then
wfile="${TEMPDIR}/disk/etc/network/interfaces"
if [ -f ${wfile} ] ; then
echo "# This file describes the network interfaces available on your system" > ${wfile}
echo "# and how to activate them. For more information, see interfaces(5)." >> ${wfile}
echo "" >> ${wfile}
echo "# The loopback network interface" >> ${wfile}
echo "auto lo" >> ${wfile}
echo "iface lo inet loopback" >> ${wfile}
echo "" >> ${wfile}
echo "# The primary network interface" >> ${wfile}
if [ "${DISABLE_ETH}" ] ; then
echo "#auto eth0" >> ${wfile}
echo "#iface eth0 inet dhcp" >> ${wfile}
else
echo "auto eth0" >> ${wfile}
echo "iface eth0 inet dhcp" >> ${wfile}
fi
#if we have systemd & wicd-gtk, disable eth0 in /etc/network/interfaces
if [ -f ${TEMPDIR}/disk/lib/systemd/systemd ] ; then
if [ -f ${TEMPDIR}/disk/usr/bin/wicd-gtk ] ; then
sed -i 's/auto eth0/#auto eth0/g' ${wfile}
sed -i 's/allow-hotplug eth0/#allow-hotplug eth0/g' ${wfile}
sed -i 's/iface eth0 inet dhcp/#iface eth0 inet dhcp/g' ${wfile}
fi
fi
#if we have connman, disable eth0 in /etc/network/interfaces
if [ -f ${TEMPDIR}/disk/etc/init.d/connman ] ; then
sed -i 's/auto eth0/#auto eth0/g' ${wfile}
sed -i 's/allow-hotplug eth0/#allow-hotplug eth0/g' ${wfile}
sed -i 's/iface eth0 inet dhcp/#iface eth0 inet dhcp/g' ${wfile}
fi
echo "# Example to keep MAC address between reboots" >> ${wfile}
echo "#hwaddress ether DE:AD:BE:EF:CA:FE" >> ${wfile}
echo "" >> ${wfile}
echo "##connman: ethX static config" >> ${wfile}
echo "#connmanctl services" >> ${wfile}
echo "#Using the appropriate ethernet service, tell connman to setup a static IP address for that service:" >> ${wfile}
echo "#sudo connmanctl config <service> --ipv4 manual <ip_addr> <netmask> <gateway> --nameservers <dns_server>" >> ${wfile}
echo "" >> ${wfile}
echo "##connman: WiFi" >> ${wfile}
echo "#" >> ${wfile}
echo "#connmanctl" >> ${wfile}
echo "#connmanctl> tether wifi off" >> ${wfile}
echo "#connmanctl> enable wifi" >> ${wfile}
echo "#connmanctl> scan wifi" >> ${wfile}
echo "#connmanctl> services" >> ${wfile}
echo "#connmanctl> agent on" >> ${wfile}
echo "#connmanctl> connect wifi_*_managed_psk" >> ${wfile}
echo "#connmanctl> quit" >> ${wfile}
fi
fi
if [ -f ${TEMPDIR}/disk/var/www/index.html ] ; then
rm -f ${TEMPDIR}/disk/var/www/index.html || true
fi
if [ -f ${TEMPDIR}/disk/var/www/html/index.html ] ; then
rm -f ${TEMPDIR}/disk/var/www/html/index.html || true
fi
sync
fi #RootStock-NG
if [ ! "x${uboot_name}" = "x" ] ; then
echo "Backup version of u-boot: /opt/backup/uboot/"
mkdir -p ${TEMPDIR}/disk/opt/backup/uboot/
cp -v ${TEMPDIR}/dl/${UBOOT} ${TEMPDIR}/disk/opt/backup/uboot/${uboot_name}
fi
if [ ! "x${spl_uboot_name}" = "x" ] ; then
mkdir -p ${TEMPDIR}/disk/opt/backup/uboot/
cp -v ${TEMPDIR}/dl/${SPL} ${TEMPDIR}/disk/opt/backup/uboot/${spl_uboot_name}
fi
if [ ! -f ${TEMPDIR}/etc/udev/rules.d/60-omap-tty.rules ] ; then
file="/etc/udev/rules.d/60-omap-tty.rules"
echo "#from: http://arago-project.org/git/meta-ti.git?a=commit;h=4ce69eff28103778508d23af766e6204c95595d3" > ${TEMPDIR}/disk${file}
echo "" > ${TEMPDIR}/disk${file}
echo "# Backward compatibility with old OMAP UART-style ttyO0 naming" > ${TEMPDIR}/disk${file}
echo "" >> ${TEMPDIR}/disk${file}
echo "SUBSYSTEM==\"tty\", ATTR{uartclk}!=\"0\", KERNEL==\"ttyS[0-9]\", SYMLINK+=\"ttyO%n\"" >> ${TEMPDIR}/disk${file}
echo "" >> ${TEMPDIR}/disk${file}
fi
if [ -f ${TEMPDIR}/disk/etc/init.d/cpufrequtils ] ; then
if [ "x${conf_board}" = "xbeagle_x15" ] ; then
sed -i 's/GOVERNOR="ondemand"/GOVERNOR="powersave"/g' ${TEMPDIR}/disk/etc/init.d/cpufrequtils
else
sed -i 's/GOVERNOR="ondemand"/GOVERNOR="performance"/g' ${TEMPDIR}/disk/etc/init.d/cpufrequtils
fi
fi
if [ ! -f ${TEMPDIR}/disk/opt/scripts/boot/generic-startup.sh ] ; then
git clone https://github.com/RobertCNelson/boot-scripts ${TEMPDIR}/disk/opt/scripts/ --depth 1
sudo chown -R 1000:1000 ${TEMPDIR}/disk/opt/scripts/
if [ ! -f ${TEMPDIR}/disk/etc/default/bb-boot ] ; then
sudo cp -v ${TEMPDIR}/disk/opt/scripts/boot/default/bb-boot ${TEMPDIR}/disk/etc/default/
fi
else
cd ${TEMPDIR}/disk/opt/scripts/
git pull
cd -
sudo chown -R 1000:1000 ${TEMPDIR}/disk/opt/scripts/
fi
if [ "x${drm}" = "xomapdrm" ] ; then
wfile="/etc/X11/xorg.conf"
if [ -f ${TEMPDIR}/disk${wfile} ] ; then
sudo sed -i -e 's:modesetting:omap:g' ${TEMPDIR}/disk${wfile}
sudo sed -i -e 's:fbdev:omap:g' ${TEMPDIR}/disk${wfile}
if [ "x${conf_board}" = "xomap3_beagle" ] ; then
sudo sed -i -e 's:#HWcursor_false::g' ${TEMPDIR}/disk${wfile}
sudo sed -i -e 's:#DefaultDepth::g' ${TEMPDIR}/disk${wfile}
else
sudo sed -i -e 's:#HWcursor_false::g' ${TEMPDIR}/disk${wfile}
fi
fi
fi
if [ "x${drm}" = "xetnaviv" ] ; then
wfile="/etc/X11/xorg.conf"
if [ -f ${TEMPDIR}/disk${wfile} ] ; then
if [ -f ${TEMPDIR}/disk/usr/lib/xorg/modules/drivers/armada_drv.so ] ; then
sudo sed -i -e 's:modesetting:armada:g' ${TEMPDIR}/disk${wfile}
sudo sed -i -e 's:fbdev:armada:g' ${TEMPDIR}/disk${wfile}
fi
fi
fi
if [ "${usbnet_mem}" ] ; then
echo "vm.min_free_kbytes = ${usbnet_mem}" >> ${TEMPDIR}/disk/etc/sysctl.conf
fi
if [ "${need_wandboard_firmware}" ] ; then
http_brcm="https://raw.githubusercontent.com/Freescale/meta-fsl-arm-extra/master/recipes-bsp/broadcom-nvram-config/files/wandboard"
${dl_quiet} --directory-prefix="${TEMPDIR}/disk/lib/firmware/brcm/" ${http_brcm}/brcmfmac4329-sdio.txt
${dl_quiet} --directory-prefix="${TEMPDIR}/disk/lib/firmware/brcm/" ${http_brcm}/brcmfmac4330-sdio.txt
fi
if [ ! "x${new_hostname}" = "x" ] ; then
echo "Updating Image hostname too: [${new_hostname}]"
wfile="/etc/hosts"
echo "127.0.0.1 localhost" > ${TEMPDIR}/disk${wfile}
echo "127.0.1.1 ${new_hostname}.localdomain ${new_hostname}" >> ${TEMPDIR}/disk${wfile}
echo "" >> ${TEMPDIR}/disk${wfile}
echo "# The following lines are desirable for IPv6 capable hosts" >> ${TEMPDIR}/disk${wfile}
echo "::1 localhost ip6-localhost ip6-loopback" >> ${TEMPDIR}/disk${wfile}
echo "ff02::1 ip6-allnodes" >> ${TEMPDIR}/disk${wfile}
echo "ff02::2 ip6-allrouters" >> ${TEMPDIR}/disk${wfile}
wfile="/etc/hostname"
echo "${new_hostname}" > ${TEMPDIR}/disk${wfile}
fi
# setuid root ping+ping6 - capabilities does not survive tar
if [ -x ${TEMPDIR}/disk/bin/ping ] ; then
echo "making ping/ping6 setuid root"
chmod u+s ${TEMPDIR}/disk//bin/ping ${TEMPDIR}/disk//bin/ping6
fi
cd ${TEMPDIR}/disk/
sync
sync
cd "${DIR}/"
if [ "x${option_ro_root}" = "xenable" ] ; then
umount ${TEMPDIR}/disk/var || true
fi
if [ "x${uboot_efi_mode}" = "xenable" ] ; then
umount ${TEMPDIR}/disk/boot/efi || true
fi
umount ${TEMPDIR}/disk || true
if [ "x${build_img_file}" = "xenable" ] ; then
sync
kpartx -d ${media_loop} || true
losetup -d ${media_loop} || true
fi
echo "Finished populating rootfs Partition"
echo "-----------------------------"
echo "setup_sdcard.sh script complete"
if [ -f "${DIR}/user_password.list" ] ; then
echo "-----------------------------"
echo "The default user:password for this image:"
cat "${DIR}/user_password.list"
echo "-----------------------------"
fi
if [ "x${build_img_file}" = "xenable" ] ; then
echo "Image file: ${imagename}"
echo "-----------------------------"
fi
}
check_mmc () {
FDISK=$(LC_ALL=C fdisk -l 2>/dev/null | grep "Disk ${media}:" | awk '{print $2}')
if [ "x${FDISK}" = "x${media}:" ] ; then
echo ""
echo "I see..."
echo ""
echo "lsblk:"
lsblk | grep -v sr0
echo ""
unset response
echo -n "Are you 100% sure, on selecting [${media}] (y/n)? "
read response
if [ "x${response}" != "xy" ] ; then
exit
fi
echo ""
else
echo ""
echo "Are you sure? I Don't see [${media}], here is what I do see..."
echo ""
echo "lsblk:"
lsblk | grep -v sr0
echo ""
exit
fi
}
process_dtb_conf () {
if [ "${conf_warning}" ] ; then
show_board_warning
fi
echo "-----------------------------"
#defaults, if not set...
case "${bootloader_location}" in
fatfs_boot)
conf_boot_startmb=${conf_boot_startmb:-"1"}
;;
dd_uboot_boot|dd_spl_uboot_boot)
conf_boot_startmb=${conf_boot_startmb:-"4"}
;;
*)
conf_boot_startmb=${conf_boot_startmb:-"4"}
;;
esac
#https://wiki.linaro.org/WorkingGroups/KernelArchived/Projects/FlashCardSurvey
conf_root_device=${conf_root_device:-"/dev/mmcblk0"}
#error checking...
if [ ! "${conf_boot_fstype}" ] ; then
conf_boot_fstype="${ROOTFS_TYPE}"
fi
case "${conf_boot_fstype}" in
fat)
sfdisk_fstype=${sfdisk_fstype:-"0xE"}
;;
ext2|ext3|ext4|btrfs)
sfdisk_fstype="L"
;;
*)
echo "Error: [conf_boot_fstype] not recognized, stopping..."
exit
;;
esac
if [ "x${uboot_cape_overlays}" = "xenable" ] ; then
echo "U-Boot Overlays Enabled..."
fi
}
check_dtb_board () {
error_invalid_dtb=1
#/hwpack/${dtb_board}.conf
unset leading_slash
leading_slash=$(echo ${dtb_board} | grep "/" || unset leading_slash)
if [ "${leading_slash}" ] ; then
dtb_board=$(echo "${leading_slash##*/}")
fi
#${dtb_board}.conf
dtb_board=$(echo ${dtb_board} | awk -F ".conf" '{print $1}')
if [ -f "${DIR}"/hwpack/${dtb_board}.conf ] ; then
. "${DIR}"/hwpack/${dtb_board}.conf
boot=${boot_image}
unset error_invalid_dtb
process_dtb_conf
else
cat <<-__EOF__
-----------------------------
ERROR: This script does not currently recognize the selected: [--dtb ${dtb_board}] option..
Please rerun $(basename $0) with a valid [--dtb <device>] option from the list below:
-----------------------------
__EOF__
cat "${DIR}"/hwpack/*.conf | grep supported
echo "-----------------------------"
exit
fi
}
usage () {
echo "usage: sudo $(basename $0) --mmc /dev/sdX --dtb <dev board>"
#tabed to match
cat <<-__EOF__
-----------------------------
Bugs email: "bugs at rcn-ee.com"
Required Options:
--mmc </dev/sdX> or --img <filename.img>
--dtb <dev board>
Additional Options:
-h --help
--probe-mmc
<list all partitions: sudo ./setup_sdcard.sh --probe-mmc>
__EOF__
exit
}
checkparm () {
if [ "$(echo $1|grep ^'\-')" ] ; then
echo "E: Need an argument"
usage
fi
}
error_invalid_dtb=1
# parse commandline options
while [ ! -z "$1" ] ; do
case $1 in
-h|--help)
usage
media=1
;;
--hostname)
checkparm $2
new_hostname="$2"
;;
--probe-mmc)
media="/dev/idontknow"
check_root
check_mmc
;;
--mmc)
checkparm $2
media="$2"
media_prefix="${media}"
echo ${media} | grep mmcblk >/dev/null && media_prefix="${media}p"
check_root
check_mmc
;;
--img|--img-[12468]gb)
checkparm $2
name=${2:-image}
gsize=$(echo "$1" | sed -ne 's/^--img-\([[:digit:]]\+\)gb$/\1/p')
# --img defaults to --img-2gb
gsize=${gsize:-2}
imagename=${name%.img}-${gsize}gb.img
media="${DIR}/${imagename}"
build_img_file="enable"
check_root
if [ -f "${media}" ] ; then
rm -rf "${media}" || true
fi
#FIXME: (should fit most microSD cards)
#eMMC: (dd if=/dev/mmcblk1 of=/dev/null bs=1M #MB)
#Micron 3744MB (bbb): 3925868544 bytes -> 3925.86 Megabyte
#Kingston 3688MB (bbb): 3867148288 bytes -> 3867.15 Megabyte
#Kingston 3648MB (x15): 3825205248 bytes -> 3825.21 Megabyte (3648)
#
### seek=$((1024 * (700 + (gsize - 1) * 1000)))
## 1000 1GB = 700 #2GB = 1700 #4GB = 3700
## 990 1GB = 700 #2GB = 1690 #4GB = 3670
#
### seek=$((1024 * (gsize * 850)))
## x 850 (85%) #1GB = 850 #2GB = 1700 #4GB = 3400
#
### seek=$((1024 * (gsize * 900)))
## x 900 (90%) #1GB = 900 #2GB = 1800 #4GB = 3600
#
dd if=/dev/zero of="${media}" bs=1024 count=0 seek=$((1024 * (gsize * 900)))
;;
--dtb)
checkparm $2
dtb_board="$2"
dir_check="${DIR}/"
kernel_detection
check_dtb_board
;;
--ro)
conf_var_startmb="2048"
option_ro_root="enable"
;;
--rootfs)
checkparm $2
ROOTFS_TYPE="$2"
;;
--boot_label)
checkparm $2
BOOT_LABEL="$2"
;;
--rootfs_label)
checkparm $2
ROOTFS_LABEL="$2"
;;
--spl)
checkparm $2
LOCAL_SPL="$2"
SPL="${LOCAL_SPL##*/}"
blank_SPL="${SPL}"
USE_LOCAL_BOOT=1
;;
--bootloader)
checkparm $2
LOCAL_BOOTLOADER="$2"
UBOOT="${LOCAL_BOOTLOADER##*/}"
blank_UBOOT="${UBOOT}"
USE_LOCAL_BOOT=1
;;
--use-beta-bootloader)
USE_BETA_BOOTLOADER=1
;;
--a335-flasher)
oem_blank_eeprom="enable"
a335_flasher="enable"
uboot_eeprom="bbb_blank"
;;
--bp00-flasher)
oem_blank_eeprom="enable"
bp00_flasher="enable"
;;
--bbg-flasher)
oem_blank_eeprom="enable"
bbg_flasher="enable"
;;
--bbgw-flasher)
oem_blank_eeprom="enable"
bbgw_flasher="enable"
;;
--bbgg-flasher)
oem_blank_eeprom="enable"
uboot_eeprom="bbgg_blank"
#default:
emmc_flasher="enable"
;;
--m10a-flasher)
oem_blank_eeprom="enable"
m10a_flasher="enable"
;;
--me06-flasher)
oem_blank_eeprom="enable"
me06_flasher="enable"
;;
--bbb-usb-flasher|--usb-flasher|--oem-flasher)
oem_blank_eeprom="enable"
usb_flasher="enable"
;;
--bbb-flasher|--emmc-flasher)
oem_blank_eeprom="enable"
uboot_eeprom="bbb_blank"
#default:
emmc_flasher="enable"
;;
--bbbl-flasher)
oem_blank_eeprom="enable"
bbbl_flasher="enable"
uboot_eeprom="bbbl_blank"
;;
--bbbw-flasher)
oem_blank_eeprom="enable"
bbbw_flasher="enable"
uboot_eeprom="bbbw_blank"
;;
--bbb-old-bootloader-in-emmc)
echo "[--bbb-old-bootloader-in-emmc] is obsolete, and has been removed..."
exit 2
;;
--x15-force-revb-flash)
x15_force_revb_flash="enable"
;;
--am57xx-x15-flasher)
flasher_uboot="beagle_x15_flasher"
;;
--am57xx-x15-revc-flasher)
flasher_uboot="beagle_x15_revc_flasher"
;;
--am571x-sndrblock-flasher)
flasher_uboot="am571x_sndrblock_flasher"
;;
--oem-flasher-script)
checkparm $2
oem_flasher_script="$2"
;;
--oem-flasher-img)
checkparm $2
oem_flasher_img="$2"
;;
--oem-flasher-eeprom)
checkparm $2
oem_flasher_eeprom="$2"
;;
--oem-flasher-job)
checkparm $2
oem_flasher_job="$2"
;;
--enable-systemd)
echo "--enable-systemd: option is depreciated (enabled by default Jessie+)"
;;
--enable-cape-universal)
enable_cape_universal="enable"
;;
--enable-uboot-cape-overlays)
uboot_cape_overlays="enable"
;;
--enable-uboot-disable-video)
uboot_disable_video="enable"
;;
--enable-uboot-disable-audio)
uboot_disable_audio="enable"
;;
--enable-uboot-pru-rproc-44ti)
echo "[--enable-uboot-pru-rproc-44ti] is obsolete, use [--enable-uboot-pru-rproc-414ti]"
exit 2
;;
--enable-uboot-pru-rproc-49ti)
echo "[--enable-uboot-pru-rproc-49ti] is obsolete, use [--enable-uboot-pru-rproc-414ti]"
exit 2
;;
--enable-uboot-pru-rproc-414ti)
uboot_pru_rproc_414ti="enable"
;;
--enable-uboot-pru-rproc-419ti)
uboot_pru_rproc_419ti="enable"
;;
--enable-mainline-pru-rproc)
mainline_pru_rproc="enable"
;;
--enable-uboot-pru-uio-419)
echo "[--enable-uboot-pru-uio-419] is obsolete, and has been removed..."
exit 2
;;
--efi)
uboot_efi_mode="enable"
;;
--offline)
offline=1
;;
--kernel)
checkparm $2
kernel_override="$2"
;;
--enable-cape)
#checkparm $2
#oobe_cape="$2"
echo "[--enable-cape XYZ] is obsolete, and has been removed..."
exit 2
;;
--enable-fat-partition)
enable_fat_partition="enable"
;;
--force-device-tree)
checkparm $2
forced_dtb="$2"
;;
esac
shift
done
if [ ! "${media}" ] ; then
echo "ERROR: --mmc undefined"
usage
fi
if [ "${error_invalid_dtb}" ] ; then
echo "-----------------------------"
echo "ERROR: --dtb undefined"
echo "-----------------------------"
usage
fi
if ! is_valid_rootfs_type ${ROOTFS_TYPE} ; then
echo "ERROR: ${ROOTFS_TYPE} is not a valid root filesystem type"
echo "Valid types: ${VALID_ROOTFS_TYPES}"
exit
fi
unset BTRFS_FSTAB
if [ "x${ROOTFS_TYPE}" = "xbtrfs" ] ; then
unset NEEDS_COMMAND
check_for_command mkfs.btrfs btrfs-tools
if [ "${NEEDS_COMMAND}" ] ; then
echo ""
echo "Your system is missing the btrfs dependency needed for this particular target."
echo "Ubuntu/Debian: sudo apt-get install btrfs-tools"
echo "Fedora: as root: yum install btrfs-progs"
echo "Gentoo: emerge btrfs-progs"
echo ""
exit
fi
BTRFS_FSTAB=1
if [ ! "x${conf_boot_fstype}" = "xfat" ] ; then
conf_boot_fstype="btrfs"
fi
fi
find_issue
detect_software
if [ "${spl_name}" ] || [ "${boot_name}" ] ; then
if [ "${USE_LOCAL_BOOT}" ] ; then
local_bootloader
else
dl_bootloader
fi
fi
if [ ! "x${build_img_file}" = "xenable" ] ; then
unmount_all_drive_partitions
fi
create_partitions
populate_boot
populate_rootfs
exit 0
#
|
$('document').ready(function(){
$("#btn-login").click(function(){
$("#login-form").validate({
rules:
{
password: {
required: true,
},
user_email: {
required: true,
email: true
}
},
messages:
{
password:{
required: "<PASSWORD>"
},
user_email: "Lütfen email adresinizi giriniz",
},
submitHandler: submitLoginForm
});
});
function submitLoginForm()
{
var data = $("#login-form").serialize();
$.ajax({
type : 'POST',
url : 'process.php',
data : data,
beforeSend: function(){
$("#error").fadeOut();
$("#btn-login").html('<span class="glyphicon glyphicon-transfer"></span> gönderiliyor ...');
},
success : function(response){
if(response=="ok"){
$("#btn-login").html('<img src="image/btn-ajax-loader.gif" /> Giriş Yapılıyor ...');
setTimeout(' window.location.href = "home.php"; ',4000);
}
else{
$("#error").fadeIn(1000, function(){
$("#error").html('<div class="alert alert-danger"> <span class="glyphicon glyphicon-info-sign"></span> '+response+' !</div>');
$("#btn-login").html('<span class="glyphicon glyphicon-log-in"></span> Giriş Yap');
});
}
}
});
return false;
}
$("#btn-register").click(function(){
$("#register-form").validate({
rules:
{
name:{
required: true,
},
surname:{
required: true,
},
password: {
required: true,
},
user_email: {
required: true,
email: true
},
},
messages:
{
name:{
required: "Lütfen adınızı giriniz"
},
surname:{
required: "Lütfen soyadınızı giriniz"
},
password:{
required: "Lütfen şifrenizi giriniz"
},
user_email: "Lütfen email adresinizi giriniz",
},
submitHandler: submitRegisterForm
});
});
function submitRegisterForm(){
var data = $("#register-form").serialize();
$.ajax({
type : 'POST',
url : 'process.php',
data : data,
beforeSend: function(){
$("#error").fadeOut();
$("#btn-register").html('<span class="glyphicon glyphicon-transfer"></span> gönderiliyor ...');
},
success : function(response){
if(response=="ok"){
$("#btn-register").html('<img src="image/btn-ajax-loader.gif" /> Kayıt Yapılıyor ...');
setTimeout(' window.location.href = "index.php?kayit=true"; ',4000);
}
else{
$("#error").fadeIn(1000, function(){
$("#error").html('<div class="alert alert-danger"> <span class="glyphicon glyphicon-info-sign"></span> '+response+' !</div>');
$("#btn-register").html('<span class="glyphicon glyphicon-log-in"></span> Kayıt Ol');
});
}
}
});
return false;
}
$("#table-ekle").click(function(){
var data = $("#table-form").serialize();
$.ajax({
type : 'POST',
url : 'process.php',
data : data +"&btn-table-ekle=",
beforeSend: function(){
$("#error").fadeOut();
$("#table-ekle").html('<span class="glyphicon glyphicon-transfer"></span> gönderiliyor ...');
},
success : function(response){
if(response != "0"){
$("#table-ekle").html('<img src="image/btn-ajax-loader.gif" /> Kayıt Yapılıyor ...');
dynamic();
$("#table-ekle").html('<span class="glyphicon glyphicon-log-in"></span> Oluştur');
}
else{
$("#error").fadeIn(1000, function(){
$("#error").html('<div class="alert alert-danger"> <span class="glyphicon glyphicon-info-sign"></span> '+response+' !</div>');
$("#table-ekle").html('<span class="glyphicon glyphicon-log-in"></span> Oluştur');
});
}
}
});
});
$("#table-duzenle").click(function(){
var data = $("#table-duzenle-form").serialize();
var id = $("#tableDuzenle").data('id');
$.ajax({
type : 'POST',
url : 'process.php',
data : data +"&id="+id+"&btn-table-duzenle=",
beforeSend: function(){
$("#error").fadeOut();
$("#table-duzenle").html('<span class="glyphicon glyphicon-transfer"></span> gönderiliyor ...');
},
success : function(response){
if(response != "0"){
$("#table-duzenle").html('<img src="image/btn-ajax-loader.gif" /> Kayıt Yapılıyor ...');
dynamic();
$("#table-duzenle").html('<span class="glyphicon glyphicon-log-in"></span> Düzenle');
}
else{
$("#error").fadeIn(1000, function(){
$("#error").html('<div class="alert alert-danger"> <span class="glyphicon glyphicon-info-sign"></span> '+response+' !</div>');
$("#table-duzenle").html('<span class="glyphicon glyphicon-log-in"></span> Düzenle');
});
}
}
});
});
$("#img-ekle").click(function(){
var data = $("#img-form").serialize();
$.ajax({
type : 'POST',
url : 'process.php',
data : data +"&btn-img-ekle=",
beforeSend: function(){
$("#error").fadeOut();
$("#img-ekle").html('<span class="glyphicon glyphicon-transfer"></span> gönderiliyor ...');
},
success : function(response){
if(response != "0"){
$("#img-ekle").html('<img src="image/btn-ajax-loader.gif" /> Kayıt Yapılıyor ...');
dynamic();
$("#img-ekle").html('<span class="glyphicon glyphicon-log-in"></span> Oluştur');
}
else{
$("#error").fadeIn(1000, function(){
$("#error").html('<div class="alert alert-danger"> <span class="glyphicon glyphicon-info-sign"></span> '+response+' !</div>');
$("#img-ekle").html('<span class="glyphicon glyphicon-log-in"></span> Oluştur');
});
}
}
});
});
$("#img-duzenle").click(function(){
var data = $("#img-duzenle-form").serialize();
var id = $("#imgDuzenle").data('id');
$.ajax({
type : 'POST',
url : 'process.php',
data : data +"&id="+id+"&btn-img-duzenle=",
beforeSend: function(){
$("#error").fadeOut();
$("#img-duzenle").html('<span class="glyphicon glyphicon-transfer"></span> gönderiliyor ...');
},
success : function(response){
if(response != "0"){
$("#img-duzenle").html('<img src="image/btn-ajax-loader.gif" /> Kayıt Yapılıyor ...');
dynamic();
$("#img-duzenle").html('<span class="glyphicon glyphicon-log-in"></span> Düzenle');
}
else{
$("#error").fadeIn(1000, function(){
$("#error").html('<div class="alert alert-danger"> <span class="glyphicon glyphicon-info-sign"></span> '+response+' !</div>');
$("#img-duzenle").html('<span class="glyphicon glyphicon-log-in"></span> Düzenle');
});
}
}
});
});
$("#p-ekle").click(function(){
var data = $("#p-form").serialize();
$.ajax({
type : 'POST',
url : 'process.php',
data : data +"&btn-p-ekle=",
beforeSend: function(){
$("#error").fadeOut();
$("#p-ekle").html('<span class="glyphicon glyphicon-transfer"></span> gönderiliyor ...');
},
success : function(response){
if(response != "0"){
$("#p-ekle").html('<img src="image/btn-ajax-loader.gif" /> Kayıt Yapılıyor ...');
dynamic();
$("#p-ekle").html('<span class="glyphicon glyphicon-log-in"></span> Oluştur');
}
else{
$("#error").fadeIn(1000, function(){
$("#error").html('<div class="alert alert-danger"> <span class="glyphicon glyphicon-info-sign"></span> '+response+' !</div>');
$("#p-ekle").html('<span class="glyphicon glyphicon-log-in"></span> Oluştur');
});
}
}
});
});
$("#p-duzenle").click(function(){
var data = $("#p-duzenle-form").serialize();
var id = $("#pDuzenle").data('id');
$.ajax({
type : 'POST',
url : 'process.php',
data : data +"&id="+id+"&btn-p-duzenle=",
beforeSend: function(){
$("#error").fadeOut();
$("#p-duzenle").html('<span class="glyphicon glyphicon-transfer"></span> gönderiliyor ...');
},
success : function(response){
if(response != "0"){
$("#p-duzenle").html('<img src="image/btn-ajax-loader.gif" /> Kayıt Yapılıyor ...');
dynamic();
$("#p-duzenle").html('<span class="glyphicon glyphicon-log-in"></span> Düzenle');
}
else{
$("#error").fadeIn(1000, function(){
$("#error").html('<div class="alert alert-danger"> <span class="glyphicon glyphicon-info-sign"></span> '+response+' !</div>');
$("#p-duzenle").html('<span class="glyphicon glyphicon-log-in"></span> Düzenle');
});
}
}
});
});
$("#a-ekle").click(function(){
var data = $("#a-form").serialize();
$.ajax({
type : 'POST',
url : 'process.php',
data : data +"&btn-a-ekle=",
beforeSend: function(){
$("#error").fadeOut();
$("#a-ekle").html('<span class="glyphicon glyphicon-transfer"></span> gönderiliyor ...');
},
success : function(response){
if(response != "0"){
$("#a-ekle").html('<img src="image/btn-ajax-loader.gif" /> Kayıt Yapılıyor ...');
dynamic();
$("#a-ekle").html('<span class="glyphicon glyphicon-log-in"></span> Oluştur');
}
else{
$("#error").fadeIn(1000, function(){
$("#error").html('<div class="alert alert-danger"> <span class="glyphicon glyphicon-info-sign"></span> '+response+' !</div>');
$("#a-ekle").html('<span class="glyphicon glyphicon-log-in"></span> Oluştur');
});
}
}
});
});
$("#a-duzenle").click(function(){
var data = $("#a-duzenle-form").serialize();
var id = $("#aDuzenle").data('id');
$.ajax({
type : 'POST',
url : 'process.php',
data : data +"&id="+id+"&btn-a-duzenle=",
beforeSend: function(){
$("#error").fadeOut();
$("#a-duzenle").html('<span class="glyphicon glyphicon-transfer"></span> gönderiliyor ...');
},
success : function(response){
if(response != "0"){
$("#a-duzenle").html('<img src="image/btn-ajax-loader.gif" /> Kayıt Yapılıyor ...');
dynamic();
$("#a-duzenle").html('<span class="glyphicon glyphicon-log-in"></span> Düzenle');
}
else{
$("#error").fadeIn(1000, function(){
$("#error").html('<div class="alert alert-danger"> <span class="glyphicon glyphicon-info-sign"></span> '+response+' !</div>');
$("#a-duzenle").html('<span class="glyphicon glyphicon-log-in"></span> Düzenle');
});
}
}
});
});
});
function dynamic(){
$.ajax({
type : 'POST',
url : 'process.php',
data : 'dynamic',
success : function(response){
if(response!="0"){
$("#dynamic").html(response);
}
}
});
}
function sil(id){
$.ajax({
type : 'POST',
url : 'process.php',
data : "id="+id+"&sil=",
success : function(response){
if(response!="0"){
dynamic();
}
}
});
}
|
package org.cmayes.hartree.calc;
/**
* Contract for performing a calculation on a given input.
*
* @author cmayes
*/
public interface Calculation {
/**
* Perfoms the calculation.
*
* @param procResult
* The input to process.
* @return The results of the calculation.
*/
Object calculate(Object procResult);
}
|
/* **** Notes
Restore.
Remarks:
Along with C library
//*/
# define CAR
# include <stdio.h>
# include "../../../incl/config.h"
signed(__cdecl restore_rules(signed(arg/* append or.. */),page_t(*argp))) {
auto signed char *b;
auto rule_t *rule;
auto signed r;
auto signed short flag;
if(!argp) return(0x00);
flag = (*(CLI_BASE+(R(flag,*argp))));
if(!(CLI_INIT&(flag))) return(0x00);
rule = (CLI_INDEX+(R(rule,*argp)));
if(!arg) {
r = init_rule_b(0x01,rule);
if(!r) return(0x00);
}
b = (*(CLI_BASE+(R(b,*rule))));
rule = (CLI_BASE+(R(rule,*argp)));
r = append_rule_b(rule,b);
if(!r) return(0x00);
rule = (CLI_INDEX+(R(rule,*argp)));
r = init_rule_b(0x01,rule);
if(!r) return(0x00);
rule = (CLI_BASE+(R(rule,*argp)));
b = (*(CLI_INDEX+(R(b,*rule))));
rule = (CLI_INDEX+(R(rule,*argp)));
r = append_rule_b(rule,b);
if(!r) return(0x00);
return(0x01);
}
|
#!/bin/bash
dieharder -d 209 -g 30 -S 246603919
|
#!/bin/bash
#$ -V
#$ -N sequdas_fastqc
#$ -cwd
#$ -pe smp 7
#$ -l h_vmem=100G
python $3/Cluster/fastqc.py $1 $2
|
<gh_stars>1-10
package de.ids_mannheim.korap.service;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.regex.Pattern;
import javax.ws.rs.core.Response.Status;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import de.ids_mannheim.korap.cache.VirtualCorpusCache;
import de.ids_mannheim.korap.config.FullConfiguration;
import de.ids_mannheim.korap.constant.GroupMemberStatus;
import de.ids_mannheim.korap.constant.QueryAccessStatus;
import de.ids_mannheim.korap.constant.QueryType;
import de.ids_mannheim.korap.constant.ResourceType;
import de.ids_mannheim.korap.dao.AdminDao;
import de.ids_mannheim.korap.dao.QueryAccessDao;
import de.ids_mannheim.korap.dao.QueryDao;
import de.ids_mannheim.korap.dto.QueryAccessDto;
import de.ids_mannheim.korap.dto.QueryDto;
import de.ids_mannheim.korap.dto.converter.QueryAccessConverter;
import de.ids_mannheim.korap.dto.converter.QueryConverter;
import de.ids_mannheim.korap.entity.QueryAccess;
import de.ids_mannheim.korap.entity.QueryDO;
import de.ids_mannheim.korap.entity.UserGroup;
import de.ids_mannheim.korap.entity.UserGroupMember;
import de.ids_mannheim.korap.exceptions.KustvaktException;
import de.ids_mannheim.korap.exceptions.StatusCodes;
import de.ids_mannheim.korap.query.serialize.QuerySerializer;
import de.ids_mannheim.korap.user.User.CorpusAccess;
import de.ids_mannheim.korap.utils.JsonUtils;
import de.ids_mannheim.korap.utils.KoralCollectionQueryBuilder;
import de.ids_mannheim.korap.utils.ParameterChecker;
import de.ids_mannheim.korap.web.SearchKrill;
import de.ids_mannheim.korap.web.controller.QueryReferenceController;
import de.ids_mannheim.korap.web.controller.VirtualCorpusController;
import de.ids_mannheim.korap.web.input.QueryJson;
/**
* QueryService handles the logic behind
* {@link VirtualCorpusController} and
* {@link QueryReferenceController}. Virtual corpora and
* stored-queries are both treated as queries of different types.
* Thus, they are handled logically similarly.
*
* QueryService communicates with {@link QueryDao}, handles
* {@link QueryDO} and
* returns
* {@link QueryDto} to {@link VirtualCorpusController} and
* {@link QueryReferenceController}.
*
* @author margaretha
*
*/
@Service
public class QueryService {
public static Logger jlog =
LogManager.getLogger(QueryService.class);
public static boolean DEBUG = false;
public static Pattern queryNamePattern = Pattern.compile("[-\\w.]+");
@Autowired
private QueryDao queryDao;
@Autowired
private QueryAccessDao accessDao;
@Autowired
private AdminDao adminDao;
@Autowired
private UserGroupService userGroupService;
@Autowired
private SearchKrill krill;
@Autowired
private FullConfiguration config;
@Autowired
private QueryConverter converter;
@Autowired
private QueryAccessConverter accessConverter;
private void verifyUsername (String contextUsername, String pathUsername)
throws KustvaktException {
if (!contextUsername.equals(pathUsername)
&& !adminDao.isAdmin(contextUsername)) {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + contextUsername,
contextUsername);
}
}
public List<QueryDto> listOwnerQuery (String username,
String queryCreator, QueryType queryType) throws KustvaktException {
verifyUsername(username, queryCreator);
List<QueryDO> list = queryDao.retrieveOwnerQuery(username, queryType);
return createQueryDtos(list, queryType);
}
public List<QueryDto> listSystemQuery (QueryType queryType)
throws KustvaktException {
List<QueryDO> list = queryDao.retrieveQueryByType(ResourceType.SYSTEM,
null, queryType);
return createQueryDtos(list, queryType);
}
public List<QueryDto> listAvailableQueryForUser (
String authenticatedUsername, String username, QueryType queryType)
throws KustvaktException {
boolean isAdmin = adminDao.isAdmin(authenticatedUsername);
if (username != null) {
if (!username.equals(authenticatedUsername) && !isAdmin) {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: "
+ authenticatedUsername,
authenticatedUsername);
}
}
else {
username = authenticatedUsername;
}
List<QueryDO> list =
queryDao.retrieveQueryByUser(username, queryType);
return createQueryDtos(list, queryType);
}
public List<QueryDto> listQueryByType (String username,
String createdBy, ResourceType type, QueryType queryType)
throws KustvaktException {
boolean isAdmin = adminDao.isAdmin(username);
if (isAdmin) {
List<QueryDO> virtualCorpora =
queryDao.retrieveQueryByType(type, createdBy, queryType);
Collections.sort(virtualCorpora);
return createQueryDtos(virtualCorpora, queryType);
}
else {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + username, username);
}
}
private ArrayList<QueryDto> createQueryDtos (
List<QueryDO> queryList, QueryType queryType)
throws KustvaktException {
ArrayList<QueryDto> dtos = new ArrayList<>(queryList.size());
QueryDO query;
Iterator<QueryDO> i = queryList.iterator();
while (i.hasNext()) {
query = i.next();
// String json = query.getKoralQuery();
String statistics = null;
// if (queryType.equals(QueryType.VIRTUAL_CORPUS)) {
// statistics = krill.getStatistics(json);
// }
QueryDto dto =
converter.createQueryDto(query, statistics);
dtos.add(dto);
}
return dtos;
}
public void deleteQueryByName (String username, String queryName,
String createdBy, QueryType type) throws KustvaktException {
QueryDO query = queryDao.retrieveQueryByName(queryName, createdBy);
if (query == null) {
String code = createdBy + "/" + queryName;
throw new KustvaktException(StatusCodes.NO_RESOURCE_FOUND,
"Query " + code + " is not found.",
String.valueOf(code));
}
else if (query.getCreatedBy().equals(username)
|| adminDao.isAdmin(username)) {
if (query.getType().equals(ResourceType.PUBLISHED)) {
QueryAccess access =
accessDao.retrieveHiddenAccess(query.getId());
accessDao.deleteAccess(access, "system");
userGroupService.deleteAutoHiddenGroup(
access.getUserGroup().getId(), "system");
}
if (type.equals(QueryType.VIRTUAL_CORPUS)
&& VirtualCorpusCache.contains(queryName)) {
VirtualCorpusCache.delete(queryName);
}
queryDao.deleteQuery(query);
}
else {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + username, username);
}
}
public Status handlePutRequest (String username, String queryCreator,
String queryName, QueryJson queryJson) throws KustvaktException {
verifyUsername(username, queryCreator);
QueryDO query = queryDao.retrieveQueryByName(queryName, queryCreator);
if (query == null) {
storeQuery(queryJson, queryName, queryCreator, username);
return Status.CREATED;
}
else {
editQuery(query, queryJson, queryName, username);
return Status.NO_CONTENT;
}
}
public void editQuery (QueryDO existingQuery, QueryJson newQuery,
String queryName, String username) throws KustvaktException {
if (!username.equals(existingQuery.getCreatedBy())
&& !adminDao.isAdmin(username)) {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + username, username);
}
String koralQuery = null;
CorpusAccess requiredAccess = null;
String corpusQuery = newQuery.getCorpusQuery();
String query = newQuery.getQuery();
String queryLanguage = newQuery.getQueryLanguage();
if (corpusQuery != null && !corpusQuery.isEmpty()) {
koralQuery = serializeCorpusQuery(corpusQuery);
requiredAccess = determineRequiredAccess(newQuery.isCached(), queryName,
koralQuery);
}
else if (query != null && !query.isEmpty() && queryLanguage != null
&& !queryLanguage.isEmpty()) {
koralQuery = serializeQuery(query, queryLanguage);
}
ResourceType type = newQuery.getType();
if (type != null) {
if (existingQuery.getType().equals(ResourceType.PUBLISHED)) {
// withdraw from publication
if (!type.equals(ResourceType.PUBLISHED)) {
QueryAccess hiddenAccess =
accessDao.retrieveHiddenAccess(existingQuery.getId());
deleteQueryAccess(hiddenAccess.getId(), "system");
int groupId = hiddenAccess.getUserGroup().getId();
userGroupService.deleteAutoHiddenGroup(groupId, "system");
// EM: should the users within the hidden group
// receive
// notifications?
}
// else remains the same
}
else if (type.equals(ResourceType.PUBLISHED)) {
publishQuery(existingQuery.getId());
}
}
queryDao.editQuery(existingQuery, queryName, type, requiredAccess,
koralQuery, newQuery.getDefinition(), newQuery.getDescription(),
newQuery.getStatus(), newQuery.isCached(), query, queryLanguage);
}
private void publishQuery (int queryId) throws KustvaktException {
QueryAccess access = accessDao.retrieveHiddenAccess(queryId);
// check if hidden access exists
if (access == null) {
QueryDO query = queryDao.retrieveQueryById(queryId);
// create and assign a new hidden group
int groupId = userGroupService.createAutoHiddenGroup();
UserGroup autoHidden =
userGroupService.retrieveUserGroupById(groupId);
accessDao.createAccessToQuery(query, autoHidden, "system",
QueryAccessStatus.HIDDEN);
}
else {
// should not happened
jlog.error("Cannot publish query with id: " + queryId
+ ". Hidden access exists! Access id: " + access.getId());
}
}
public void storeQuery (QueryJson query, String queryName,
String queryCreator, String username) throws KustvaktException {
String koralQuery = null;
if (query.getQueryType().equals(QueryType.VIRTUAL_CORPUS)) {
ParameterChecker.checkStringValue(query.getCorpusQuery(),
"corpusQuery");
koralQuery = serializeCorpusQuery(query.getCorpusQuery());
}
else if (query.getQueryType().equals(QueryType.QUERY)) {
ParameterChecker.checkStringValue(query.getQuery(), "query");
ParameterChecker.checkStringValue(query.getQueryLanguage(),
"queryLanguage");
koralQuery =
serializeQuery(query.getQuery(), query.getQueryLanguage());
}
storeQuery(username, queryName, query.getType(), query.getQueryType(),
koralQuery, query.getDefinition(), query.getDescription(),
query.getStatus(), query.isCached(), queryCreator,
query.getQuery(), query.getQueryLanguage());
}
public void storeQuery (String username, String queryName,
ResourceType type, QueryType queryType, String koralQuery,
String definition, String description, String status,
boolean isCached, String queryCreator, String query,
String queryLanguage) throws KustvaktException {
ParameterChecker.checkNameValue(queryName, "queryName");
ParameterChecker.checkObjectValue(type, "type");
if (!queryNamePattern.matcher(queryName).matches()) {
throw new KustvaktException(StatusCodes.INVALID_ARGUMENT,
queryType.displayName() + " name must only contain "
+ "letters, numbers, underscores, hypens and spaces",
queryName);
}
if (type.equals(ResourceType.SYSTEM)){
if (adminDao.isAdmin(username)) {
queryCreator="system";
}
else if (!username.equals("system")) {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + username, username);
}
}
CorpusAccess requiredAccess = CorpusAccess.PUB;
if (queryType.equals(QueryType.VIRTUAL_CORPUS)) {
requiredAccess =
determineRequiredAccess(isCached, queryName, koralQuery);
}
if (DEBUG){
jlog.debug("Storing query: " + queryName + "in the database ");
}
int queryId = 0;
try {
queryId = queryDao.createQuery(queryName, type, queryType,
requiredAccess, koralQuery, definition, description, status,
isCached, queryCreator, query, queryLanguage);
}
catch (Exception e) {
Throwable cause = e;
Throwable lastCause = null;
while ((cause = cause.getCause()) != null
&& !cause.equals(lastCause)) {
if (cause instanceof SQLException) {
break;
}
lastCause = cause;
}
throw new KustvaktException(StatusCodes.DB_INSERT_FAILED,
cause.getMessage());
}
if (type.equals(ResourceType.PUBLISHED)) {
publishQuery(queryId);
}
}
private String serializeCorpusQuery (String corpusQuery)
throws KustvaktException {
QuerySerializer serializer = new QuerySerializer();
serializer.setCollection(corpusQuery);
String koralQuery;
try {
koralQuery = serializer.convertCollectionToJson();
}
catch (JsonProcessingException e) {
throw new KustvaktException(StatusCodes.INVALID_ARGUMENT,
"Invalid argument: " + corpusQuery, corpusQuery);
}
if (DEBUG) {
jlog.debug(koralQuery);
}
return koralQuery;
}
private String serializeQuery (String query, String queryLanguage)
throws KustvaktException {
QuerySerializer serializer = new QuerySerializer();
String koralQuery;
koralQuery = serializer.setQuery(query, queryLanguage).toJSON();
if (DEBUG) {
jlog.debug(koralQuery);
}
return koralQuery;
}
public CorpusAccess determineRequiredAccess (boolean isCached, String name,
String koralQuery) throws KustvaktException {
if (isCached) {
KoralCollectionQueryBuilder koral =
new KoralCollectionQueryBuilder();
koral.with("referTo " + name);
koralQuery = koral.toJSON();
if (DEBUG) {
jlog.debug("Determine vc access with vc ref: " + koralQuery);
}
}
if (findDocWithLicense(koralQuery, config.getAllOnlyRegex())) {
return CorpusAccess.ALL;
}
else if (findDocWithLicense(koralQuery, config.getPublicOnlyRegex())) {
return CorpusAccess.PUB;
}
else {
return CorpusAccess.FREE;
}
}
private boolean findDocWithLicense (String koralQuery, String license)
throws KustvaktException {
KoralCollectionQueryBuilder koral = new KoralCollectionQueryBuilder();
koral.setBaseQuery(koralQuery);
koral.with("availability=/" + license + "/");
String json = koral.toJSON();
String statistics = krill.getStatistics(json);
JsonNode node = JsonUtils.readTree(statistics);
int numberOfDoc = node.at("/documents").asInt();
if (DEBUG) {
jlog.debug(
"License: " + license + ", number of docs: " + numberOfDoc);
}
return (numberOfDoc > 0) ? true : false;
}
public void shareQuery (String username, String createdBy, String queryName,
String groupName) throws KustvaktException {
QueryDO query = queryDao.retrieveQueryByName(queryName, createdBy);
if (query == null) {
String code = createdBy + "/" + queryName;
throw new KustvaktException(StatusCodes.NO_RESOURCE_FOUND,
"Query " + code + " is not found.",
String.valueOf(code));
}
if (!username.equals(query.getCreatedBy())
&& !adminDao.isAdmin(username)) {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + username, username);
}
UserGroup userGroup =
userGroupService.retrieveUserGroupByName(groupName);
if (!isQueryAccessAdmin(userGroup, username)
&& !adminDao.isAdmin(username)) {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + username, username);
}
else {
try {
accessDao.createAccessToQuery(query, userGroup, username,
QueryAccessStatus.ACTIVE);
}
catch (Exception e) {
Throwable cause = e;
Throwable lastCause = null;
while ((cause = cause.getCause()) != null
&& !cause.equals(lastCause)) {
if (cause instanceof SQLException) {
break;
}
lastCause = cause;
}
throw new KustvaktException(StatusCodes.DB_INSERT_FAILED,
cause.getMessage());
}
queryDao.editQuery(query, null, ResourceType.PROJECT, null, null,
null, null, null, query.isCached(), null, null);
}
}
private boolean isQueryAccessAdmin (UserGroup userGroup, String username)
throws KustvaktException {
List<UserGroupMember> accessAdmins =
userGroupService.retrieveQueryAccessAdmins(userGroup);
for (UserGroupMember m : accessAdmins) {
if (username.equals(m.getUserId())) {
return true;
}
}
return false;
}
// public void editVCAccess (VirtualCorpusAccess access, String
// username)
// throws KustvaktException {
//
// // get all the VCA admins
// UserGroup userGroup = access.getUserGroup();
// List<UserGroupMember> accessAdmins =
// userGroupService.retrieveVCAccessAdmins(userGroup);
//
// User user = authManager.getUser(username);
// if (!user.isSystemAdmin()) {
// throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
// "Unauthorized operation for user: " + username, username);
// }
// }
public List<QueryAccessDto> listQueryAccessByUsername (String username)
throws KustvaktException {
List<QueryAccess> accessList = new ArrayList<>();
if (adminDao.isAdmin(username)) {
accessList = accessDao.retrieveAllAccess();
}
else {
List<UserGroup> groups =
userGroupService.retrieveUserGroup(username);
for (UserGroup g : groups) {
if (isQueryAccessAdmin(g, username)) {
accessList.addAll(
accessDao.retrieveActiveAccessByGroup(g.getId()));
}
}
}
return accessConverter.createQueryAccessDto(accessList);
}
public List<QueryAccessDto> listQueryAccessByQuery (String username,
String queryCreator, String queryName) throws KustvaktException {
List<QueryAccess> accessList;
if (adminDao.isAdmin(username)) {
accessList = accessDao.retrieveAllAccessByQuery(queryCreator, queryName);
}
else {
accessList = accessDao.retrieveActiveAccessByQuery(queryCreator, queryName);
List<QueryAccess> filteredAccessList = new ArrayList<>();
for (QueryAccess access : accessList) {
UserGroup userGroup = access.getUserGroup();
if (isQueryAccessAdmin(userGroup, username)) {
filteredAccessList.add(access);
}
}
accessList = filteredAccessList;
}
return accessConverter.createQueryAccessDto(accessList);
}
@Deprecated
public List<QueryAccessDto> listVCAccessByGroup (String username,
int groupId) throws KustvaktException {
UserGroup userGroup = userGroupService.retrieveUserGroupById(groupId);
List<QueryAccess> accessList;
if (adminDao.isAdmin(username)) {
accessList = accessDao.retrieveAllAccessByGroup(groupId);
}
else if (isQueryAccessAdmin(userGroup, username)) {
accessList = accessDao.retrieveActiveAccessByGroup(groupId);
}
else {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + username, username);
}
return accessConverter.createQueryAccessDto(accessList);
}
public List<QueryAccessDto> listQueryAccessByGroup (String username,
String groupName) throws KustvaktException {
UserGroup userGroup =
userGroupService.retrieveUserGroupByName(groupName);
List<QueryAccess> accessList;
if (adminDao.isAdmin(username)) {
accessList = accessDao.retrieveAllAccessByGroup(userGroup.getId());
}
else if (isQueryAccessAdmin(userGroup, username)) {
accessList =
accessDao.retrieveActiveAccessByGroup(userGroup.getId());
}
else {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + username, username);
}
return accessConverter.createQueryAccessDto(accessList);
}
public void deleteQueryAccess (int accessId, String username)
throws KustvaktException {
QueryAccess access = accessDao.retrieveAccessById(accessId);
UserGroup userGroup = access.getUserGroup();
if (isQueryAccessAdmin(userGroup, username)
|| adminDao.isAdmin(username)) {
accessDao.deleteAccess(access, username);
}
else {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + username, username);
}
}
public JsonNode retrieveKoralQuery (String username, String queryName,
String createdBy, QueryType queryType) throws KustvaktException {
QueryDO query = searchQueryByName(username, queryName, createdBy, queryType);
String koralQuery = query.getKoralQuery();
JsonNode kq = JsonUtils.readTree(koralQuery);
return kq;
}
public QueryDO searchQueryByName (String username, String queryName,
String createdBy, QueryType queryType) throws KustvaktException {
QueryDO query = queryDao.retrieveQueryByName(queryName, createdBy);
if (query == null) {
String code = createdBy + "/" + queryName;
throw new KustvaktException(StatusCodes.NO_RESOURCE_FOUND,
queryType.displayName()+ " " + code + " is not found.",
String.valueOf(code));
}
checkQueryAccess(query, username);
return query;
}
public QueryDto retrieveQueryByName (String username, String queryName,
String createdBy, QueryType queryType) throws KustvaktException {
QueryDO query = searchQueryByName(username, queryName, createdBy, queryType);
// String json = query.getKoralQuery();
String statistics = null;
// long start,end;
// start = System.currentTimeMillis();
// if (query.getQueryType().equals(QueryType.VIRTUAL_CORPUS)) {
// statistics = krill.getStatistics(json);
// }
// end = System.currentTimeMillis();
// jlog.debug("{} statistics duration: {}", queryName, (end - start));
return converter.createQueryDto(query, statistics);
}
public QueryDto searchQueryById (String username, int queryId)
throws KustvaktException {
QueryDO query = queryDao.retrieveQueryById(queryId);
checkQueryAccess(query, username);
// String json = query.getKoralQuery();
// String statistics = krill.getStatistics(json);
return converter.createQueryDto(query, null);
}
private void checkQueryAccess (QueryDO query, String username)
throws KustvaktException {
ResourceType type = query.getType();
if (!adminDao.isAdmin(username)
&& !username.equals(query.getCreatedBy())) {
if (type.equals(ResourceType.PRIVATE)
|| (type.equals(ResourceType.PROJECT)
&& !hasAccess(username, query.getId()))) {
throw new KustvaktException(StatusCodes.AUTHORIZATION_FAILED,
"Unauthorized operation for user: " + username,
username);
}
else if (ResourceType.PUBLISHED.equals(type)
&& !username.equals("guest")) {
// add user in the query's auto group
UserGroup userGroup = userGroupService
.retrieveHiddenUserGroupByQuery(query.getId());
try {
userGroupService.addGroupMember(username, userGroup,
"system", GroupMemberStatus.ACTIVE);
// member roles are not set (not necessary)
}
catch (KustvaktException e) {
// member exists
// skip adding user to hidden group
}
}
// else VirtualCorpusType.SYSTEM
}
}
private boolean hasAccess (String username, int queryId)
throws KustvaktException {
UserGroup userGroup;
List<QueryAccess> accessList =
accessDao.retrieveActiveAccessByQuery(queryId);
for (QueryAccess access : accessList) {
userGroup = access.getUserGroup();
if (userGroupService.isMember(username, userGroup)) {
return true;
}
}
return false;
}
}
|
class Solution {
public:
int maxArea(vector<int>& height) {
if(height.size() < 1)
return 0;
int left = 0;
int right = height.size() - 1;
int result = 0;
while(left < right){
int area = (height[left] < height[right]) ? (height[left] * (right - left)) : (height[right] * (right - left));
result = max(result, area);
if(height[left] < height[right])
left++;
else
right--;
}
return result;
}
};
|
def generate_model_summary():
registered_models = [
'DocumentReferencesType_model',
'DocumentTitleType_model',
'DocumentTrackingType_model',
'DocumentTypeType_model',
'EngineType_model',
'EntitlementType_model'
]
model_summary = {model: [] for model in registered_models}
return model_summary
|
#!/usr/bin/env bash
# This script will run predict on test for all the networks and assemble a final submission file.
set -e # abort if any command fails
source activate py27
export PYTHONPATH=$(pwd):$PYTHONPATH
pushd asanakoy
bash predict_scratch.sh
bash predict_vgg11v1.sh
popd
pushd albu
bash predict.sh
popd
pushd ternaus
bash predict.sh
popd
echo "Generate final ensemble"
python generate_sub_final_ensemble.py -j=4
source deactivate
|
#!/bin/sh
psql -U ticketuser ticketdb -c "\lo_list" | grep ^' [0-9]' | grep -o '[0-9]\+' > /tmp/out.txt
while read line
do
echo $line
done < /tmp/out.txt
|
package com.g4mesoft.util;
public abstract class GameEvent {
private final Object sender;
boolean cancel;
public GameEvent(Object sender) {
this.sender = sender;
cancel = false;
}
public void cancelEvent() {
cancel = true;
}
public Object getSender() {
return sender;
}
}
|
import nltk
from nltk.corpus import wordnet
# Define a student's answer
student_answer = "The cat ran across the street."
# Define the answer key
answer_key = "The cat ran over the bridge."
# Split into tokens
student_tokens = nltk.word_tokenize(student_answer)
answer_key_tokens = nltk.word_tokenize(answer_key)
# Initialize score
score = 0
# Score for each word in student answer
for token in student_tokens:
# Check if token is in answer key
if token in answer_key_tokens:
score += 1
# Check for synonyms
else:
synonyms = wordnet.synsets(token)
is_synonym = False
# Check each synonym
for synonym in synonyms:
lemmas = synonym.lemmas()
# Check each lemma
for lemma in lemmas:
if lemma.name() in answer_key_tokens:
is_synonym = True
break
if is_synonym:
score += 1
# Calculate overall score
overall_score = round(score/len(answer_key_tokens), 2)
print("Overall score: {}".format(overall_score))
|
<reponame>practicajs/practica<gh_stars>10-100
import isPortReachable from "is-port-reachable";
import path from "path";
import dockerCompose from "docker-compose";
import { execSync } from "child_process";
module.exports = async () => {
console.time("global-setup");
// ️️️✅ Best Practice: Speed up during development, if already live then do nothing
const isDBReachable = await isPortReachable(54310);
if (!isDBReachable) {
// ️️️✅ Best Practice: Start the infrastructure within a test hook - No failures occur because the DB is down
await dockerCompose.upAll({
cwd: path.join(__dirname),
log: true,
});
await dockerCompose.exec(
"database",
["sh", "-c", "until pg_isready ; do sleep 1; done"],
{
cwd: path.join(__dirname),
}
);
// ️️️✅ Best Practice: Use npm script for data seeding and migrations
execSync("npm run db:migrate");
// ✅ Best Practice: Seed only metadata and not test record, read "Dealing with data" section for further information
execSync("npm run db:seed");
}
// 👍🏼 We're ready
console.timeEnd("global-setup");
};
|
#!/bin/bash
#creates list of parallel jobs to construct gene/exon coverage sums per all samples
#in a Snaptron compilation using a specific annotation
#1) parameter is the TSV of disjoint exon coordinates, format(tab-delimted):
#chrm,start,end,gene_name
#2) parameter is the Snaptron compilation name string (e.g. encode1159)
#3) parameter is the number of splits you want
#get this script's path
p=`perl -e '@f=split("/","'$0'"); pop(@f); print "".join("/",@f)."\n";'`
#echo $p
#with auto determined ~balanced splits keeping genes together w/ individual headers on separate filesystems, assumes 1-base input; used for both human/mouse
cat <(echo "$1") $1 | perl -ne 'chomp; $s=$_; if($s=~/\.tsv$/) { print "$s\n"; next; } @f=split(/\t/,$s); ($c,$s,$e,$g)=split(/\t/,$s); print "$c:$s-$e\t1\t$g\n";' | perl -ne 'BEGIN { $g="NA"; open(OUT,">dummyfile"); $i=0; $i2=-1; $A=66; } chomp; $ls=$_; if($ls=~/\.tsv$/) { $fn=$ls; $wc=`wc -l $fn`; @fn=split(/\//,$fn); $fn=pop(@fn); chomp($wc); $nl=int($wc/'${3}'); $mnl='${3}'*$nl; next; } ($c,$m,$g)=split(/\t/,$ls); $SPLIT=1 if($i++ % $nl == 0 && $i < $mnl); if($SPLIT && $g ne $pg) { $SPLIT=undef; $i2++; close(OUT); $fs="/data".chr($A++); $nfn="$fs/'${2}'.$i2"; print "python client/bulk_base_intervals.py --bulk-query-file $nfn --endpoint bases --datasrc '${2}' > $nfn.run 2>&1\n"; open(OUT,">$nfn"); print OUT "region\tcontains\tgroup\n";} $pg=$g; if($c !~ /region/) {$c1=$c; $c1=~s/-/:/; $g.=":".$c1;} print OUT "$c\t$m\t$g\n"; END { `rm dummyfile`;}'
|
var NAVTREEINDEX15 =
{
"_neon_end_to_end_tests_8cpp.xhtml#a4892abb94679c653f8cb78efb6f17152":[8,0,1,10,4,0,1,75],
"_neon_end_to_end_tests_8cpp.xhtml#a4ead8cf58c0c1bbd12cc5e6acfc08338":[8,0,1,10,4,0,1,13],
"_neon_end_to_end_tests_8cpp.xhtml#a519174862e574b4c5b16d854e9703a07":[8,0,1,10,4,0,1,46],
"_neon_end_to_end_tests_8cpp.xhtml#a522a440dc1e26bed45fd3f68be8484e9":[8,0,1,10,4,0,1,95],
"_neon_end_to_end_tests_8cpp.xhtml#a53dfedd8cce454accdd7b637c4f8343a":[8,0,1,10,4,0,1,12],
"_neon_end_to_end_tests_8cpp.xhtml#a54af9f6f8541f7b1aa36b68909c5a454":[8,0,1,10,4,0,1,40],
"_neon_end_to_end_tests_8cpp.xhtml#a56bf4a940fe54032d30583f37dcbc173":[8,0,1,10,4,0,1,66],
"_neon_end_to_end_tests_8cpp.xhtml#a579a7f2b91a1d20355c5c23a628bb531":[8,0,1,10,4,0,1,44],
"_neon_end_to_end_tests_8cpp.xhtml#a59af9ee6527696d071e8dd804b2a72f8":[8,0,1,10,4,0,1,77],
"_neon_end_to_end_tests_8cpp.xhtml#a5bf6ca74dabc25f0c014e6966be3956b":[8,0,1,10,4,0,1,32],
"_neon_end_to_end_tests_8cpp.xhtml#a5f67f26350dfe7a8f74ddc1ddf600741":[8,0,1,10,4,0,1,62],
"_neon_end_to_end_tests_8cpp.xhtml#a64c1dd1b6dd60be9f4a16db9c8f427a5":[8,0,1,10,4,0,1,99],
"_neon_end_to_end_tests_8cpp.xhtml#a66bf08ef789c18b8a63d0fceaefbeea1":[8,0,1,10,4,0,1,17],
"_neon_end_to_end_tests_8cpp.xhtml#a6974361ca12ee218b8f0c62e92acd032":[8,0,1,10,4,0,1,81],
"_neon_end_to_end_tests_8cpp.xhtml#a6a1f2d3bb6bb5dfae6042ba3bf0be92e":[8,0,1,10,4,0,1,11],
"_neon_end_to_end_tests_8cpp.xhtml#a6befe939868d35975b47bce59eaa8b73":[8,0,1,10,4,0,1,49],
"_neon_end_to_end_tests_8cpp.xhtml#a6cf0bfeae35f405a5dc67ed57affd9d9":[8,0,1,10,4,0,1,24],
"_neon_end_to_end_tests_8cpp.xhtml#a756295623f8ac16a2beb184a3030e3aa":[8,0,1,10,4,0,1,21],
"_neon_end_to_end_tests_8cpp.xhtml#a7941253beab47fec145e4ba5d7b94b76":[8,0,1,10,4,0,1,53],
"_neon_end_to_end_tests_8cpp.xhtml#a7afa245d3002692d0c5a51dae551f906":[8,0,1,10,4,0,1,52],
"_neon_end_to_end_tests_8cpp.xhtml#a7cf4601d23d9558a0c8b9a5a330ae854":[8,0,1,10,4,0,1,15],
"_neon_end_to_end_tests_8cpp.xhtml#a815b32e9701ab457eb99845aad24550c":[8,0,1,10,4,0,1,68],
"_neon_end_to_end_tests_8cpp.xhtml#a819925dc2f98ec56b046f90eb7c0dc43":[8,0,1,10,4,0,1,69],
"_neon_end_to_end_tests_8cpp.xhtml#a86faac93dccd9c6833273fbd7d1f3beb":[8,0,1,10,4,0,1,51],
"_neon_end_to_end_tests_8cpp.xhtml#a8c80ce683482e071b23ad8c366ec2200":[8,0,1,10,4,0,1,3],
"_neon_end_to_end_tests_8cpp.xhtml#a8dc2c199ff4fa47a459e170886d1fee2":[8,0,1,10,4,0,1,48],
"_neon_end_to_end_tests_8cpp.xhtml#a8e13f191eb837fdbb3390336154c8fc6":[8,0,1,10,4,0,1,50],
"_neon_end_to_end_tests_8cpp.xhtml#a928273b32d998ba9b1daf93587c46fa1":[8,0,1,10,4,0,1,94],
"_neon_end_to_end_tests_8cpp.xhtml#a991623e504ae7897afef650efe5922a6":[8,0,1,10,4,0,1,64],
"_neon_end_to_end_tests_8cpp.xhtml#a99e26266538c44c9eb1d28f5095b79d8":[8,0,1,10,4,0,1,85],
"_neon_end_to_end_tests_8cpp.xhtml#a9d3555adb8d3f83d5733f4ac8354b77c":[8,0,1,10,4,0,1,9],
"_neon_end_to_end_tests_8cpp.xhtml#a9d5b9e392c2744826c8bb8df575975f1":[8,0,1,10,4,0,1,84],
"_neon_end_to_end_tests_8cpp.xhtml#a9daf87e1460f9304964fcc4f402fb259":[8,0,1,10,4,0,1,10],
"_neon_end_to_end_tests_8cpp.xhtml#a9f327ea14c666c031452bc317ab55023":[8,0,1,10,4,0,1,33],
"_neon_end_to_end_tests_8cpp.xhtml#aa17c111024b09c13383792b5ed933ab9":[8,0,1,10,4,0,1,55],
"_neon_end_to_end_tests_8cpp.xhtml#aa3b1a1d76c1d2d0cb3578af2472d5786":[8,0,1,10,4,0,1,27],
"_neon_end_to_end_tests_8cpp.xhtml#aa69384e2c2aa50739b8fb8e9b2fffaec":[8,0,1,10,4,0,1,36],
"_neon_end_to_end_tests_8cpp.xhtml#aa71a3ef46b213ce5d270e60b0ecc8557":[8,0,1,10,4,0,1,31],
"_neon_end_to_end_tests_8cpp.xhtml#aa77db7cb5a8cdabc3f3f560258f4f21c":[8,0,1,10,4,0,1,8],
"_neon_end_to_end_tests_8cpp.xhtml#ab26c8f73114673f5af878c0cdd52fc54":[8,0,1,10,4,0,1,88],
"_neon_end_to_end_tests_8cpp.xhtml#ab2f2a585b7c20e6410236bc087eac215":[8,0,1,10,4,0,1,67],
"_neon_end_to_end_tests_8cpp.xhtml#ab3c8acea06815a62e228bb9319ddcc60":[8,0,1,10,4,0,1,25],
"_neon_end_to_end_tests_8cpp.xhtml#ab481307a1087080d658579c421ccae14":[8,0,1,10,4,0,1,37],
"_neon_end_to_end_tests_8cpp.xhtml#ab59caffe2ee6be46c08766c055420f17":[8,0,1,10,4,0,1,98],
"_neon_end_to_end_tests_8cpp.xhtml#ab80e6a797df04fbd1dadf34b5288effd":[8,0,1,10,4,0,1,38],
"_neon_end_to_end_tests_8cpp.xhtml#ab921327fa50673c42e5d19699a8d7296":[8,0,1,10,4,0,1,28],
"_neon_end_to_end_tests_8cpp.xhtml#ab9ce61b78b416e66d44acb22137f3999":[8,0,1,10,4,0,1,56],
"_neon_end_to_end_tests_8cpp.xhtml#abed8aefee4420b00529b765385109a07":[8,0,1,10,4,0,1,39],
"_neon_end_to_end_tests_8cpp.xhtml#abf342e51b788bf8e2bf070d18bedae96":[8,0,1,10,4,0,1,6],
"_neon_end_to_end_tests_8cpp.xhtml#abfa50e55ee160bfc64d8c3bb3dc40cc4":[8,0,1,10,4,0,1,92],
"_neon_end_to_end_tests_8cpp.xhtml#ac0981848e4ae57729f14f72bd4caa9f8":[8,0,1,10,4,0,1,0],
"_neon_end_to_end_tests_8cpp.xhtml#ac1cc3b02303e679b5a006f9b98e6aa79":[8,0,1,10,4,0,1,74],
"_neon_end_to_end_tests_8cpp.xhtml#ac58542c104ba70a65e4e125e12a801fa":[8,0,1,10,4,0,1,29],
"_neon_end_to_end_tests_8cpp.xhtml#ac88dc69623e6bacd6204fd86ed16eae2":[8,0,1,10,4,0,1,80],
"_neon_end_to_end_tests_8cpp.xhtml#aca41e6b9e1bda61b09b635c5857aee91":[8,0,1,10,4,0,1,16],
"_neon_end_to_end_tests_8cpp.xhtml#acd1426a216eb03ea1cf4b9b548ef3131":[8,0,1,10,4,0,1,71],
"_neon_end_to_end_tests_8cpp.xhtml#acdb56c7ab178e350bae6bd5dbe006f1d":[8,0,1,10,4,0,1,90],
"_neon_end_to_end_tests_8cpp.xhtml#ad03398894656de4577c25a38ebcacfc5":[8,0,1,10,4,0,1,41],
"_neon_end_to_end_tests_8cpp.xhtml#ad1404f851fb9559ae9b3a403d21b7b6a":[8,0,1,10,4,0,1,34],
"_neon_end_to_end_tests_8cpp.xhtml#ad16c7915847aa89511b528d543e66fb2":[8,0,1,10,4,0,1,86],
"_neon_end_to_end_tests_8cpp.xhtml#ad3804af7f4121ef26a388c1c9f81e06b":[8,0,1,10,4,0,1,23],
"_neon_end_to_end_tests_8cpp.xhtml#ad3f31806f100cfeb7ea2ae529df55e34":[8,0,1,10,4,0,1,30],
"_neon_end_to_end_tests_8cpp.xhtml#ad52602d8dc02485735ac085c0b8dbe0f":[8,0,1,10,4,0,1,43],
"_neon_end_to_end_tests_8cpp.xhtml#ad55af39009f2b0f0191b8854da60f86f":[8,0,1,10,4,0,1,70],
"_neon_end_to_end_tests_8cpp.xhtml#ad792413aef241a122cffe07dd1552477":[8,0,1,10,4,0,1,63],
"_neon_end_to_end_tests_8cpp.xhtml#ad9decb65c57e3ac6f9996632242d29f2":[8,0,1,10,4,0,1,58],
"_neon_end_to_end_tests_8cpp.xhtml#ada422a73ac4e68bcb1b1b1f0b44028d9":[8,0,1,10,4,0,1,79],
"_neon_end_to_end_tests_8cpp.xhtml#adc9fde6d755f2de04d92687f59392f2f":[8,0,1,10,4,0,1,72],
"_neon_end_to_end_tests_8cpp.xhtml#ae15bf71385d1355cc0d85af4d61a008e":[8,0,1,10,4,0,1,19],
"_neon_end_to_end_tests_8cpp.xhtml#ae2dc0f1995ef18fadd72c9b6c74edce7":[8,0,1,10,4,0,1,57],
"_neon_end_to_end_tests_8cpp.xhtml#ae3a453b6efdbf75df7f7b602ee83f1b6":[8,0,1,10,4,0,1,54],
"_neon_end_to_end_tests_8cpp.xhtml#ae47365701f186c00b63d50967d29fcd8":[8,0,1,10,4,0,1,65],
"_neon_end_to_end_tests_8cpp.xhtml#ae4dcafa0cac6f73ce4bd9fffa692280d":[8,0,1,10,4,0,1,26],
"_neon_end_to_end_tests_8cpp.xhtml#aec48a5a5ab6ecf86c8db0f6d0859fe2f":[8,0,1,10,4,0,1,93],
"_neon_end_to_end_tests_8cpp.xhtml#aeeba71439ec1a08d39e80eb365a473fa":[8,0,1,10,4,0,1,96],
"_neon_end_to_end_tests_8cpp.xhtml#afb0e1534489f21f7fe9e5055c3d15cf0":[8,0,1,10,4,0,1,45],
"_neon_end_to_end_tests_8cpp.xhtml#afe48c20bc9f2e0b86d00806b5e17f2a4":[8,0,1,10,4,0,1,1],
"_neon_end_to_end_tests_8cpp_source.xhtml":[8,0,1,10,4,0,1],
"_neon_floor_float_workload_8cpp.xhtml":[8,0,1,10,4,1,32],
"_neon_floor_float_workload_8cpp_source.xhtml":[8,0,1,10,4,1,32],
"_neon_floor_float_workload_8hpp.xhtml":[8,0,1,10,4,1,33],
"_neon_floor_float_workload_8hpp_source.xhtml":[8,0,1,10,4,1,33],
"_neon_fully_connected_workload_8cpp.xhtml":[8,0,1,10,4,1,34],
"_neon_fully_connected_workload_8cpp.xhtml#a0b7897a2a04016aa7fa24e2a1d10e944":[8,0,1,10,4,1,34,0],
"_neon_fully_connected_workload_8cpp_source.xhtml":[8,0,1,10,4,1,34],
"_neon_fully_connected_workload_8hpp.xhtml":[8,0,1,10,4,1,35],
"_neon_fully_connected_workload_8hpp.xhtml#a0b7897a2a04016aa7fa24e2a1d10e944":[8,0,1,10,4,1,35,1],
"_neon_fully_connected_workload_8hpp_source.xhtml":[8,0,1,10,4,1,35],
"_neon_greater_workload_8cpp.xhtml":[8,0,1,10,4,1,36],
"_neon_greater_workload_8cpp.xhtml#ad536149438b0481b7278ad741e18fb5a":[8,0,1,10,4,1,36,0],
"_neon_greater_workload_8cpp_source.xhtml":[8,0,1,10,4,1,36],
"_neon_greater_workload_8hpp.xhtml":[8,0,1,10,4,1,37],
"_neon_greater_workload_8hpp.xhtml#a18b8b3bd9e39c84e36ab560978ab64c7":[8,0,1,10,4,1,37,1],
"_neon_greater_workload_8hpp.xhtml#a9b0bb8592cd6e6cb693d305825fae448":[8,0,1,10,4,1,37,2],
"_neon_greater_workload_8hpp.xhtml#ad536149438b0481b7278ad741e18fb5a":[8,0,1,10,4,1,37,3],
"_neon_greater_workload_8hpp_source.xhtml":[8,0,1,10,4,1,37],
"_neon_instance_normalization_workload_8cpp.xhtml":[8,0,1,10,4,1,38],
"_neon_instance_normalization_workload_8cpp.xhtml#aea722abe239545030f4c6fe4e083816f":[8,0,1,10,4,1,38,0],
"_neon_instance_normalization_workload_8cpp_source.xhtml":[8,0,1,10,4,1,38],
"_neon_instance_normalization_workload_8hpp.xhtml":[8,0,1,10,4,1,39],
"_neon_instance_normalization_workload_8hpp.xhtml#aea722abe239545030f4c6fe4e083816f":[8,0,1,10,4,1,39,1],
"_neon_instance_normalization_workload_8hpp_source.xhtml":[8,0,1,10,4,1,39],
"_neon_interceptor_scheduler_8cpp.xhtml":[8,0,1,10,4,5],
"_neon_interceptor_scheduler_8cpp_source.xhtml":[8,0,1,10,4,5],
"_neon_interceptor_scheduler_8hpp.xhtml":[8,0,1,10,4,6],
"_neon_interceptor_scheduler_8hpp_source.xhtml":[8,0,1,10,4,6],
"_neon_json_printer_tests_8cpp.xhtml":[8,0,1,10,4,0,2],
"_neon_json_printer_tests_8cpp.xhtml#ac92bd62119a283e105a4f2087fba2dbc":[8,0,1,10,4,0,2,0],
"_neon_json_printer_tests_8cpp_source.xhtml":[8,0,1,10,4,0,2],
"_neon_l2_normalization_float_workload_8cpp.xhtml":[8,0,1,10,4,1,40],
"_neon_l2_normalization_float_workload_8cpp.xhtml#ae838df3960d2b5d18d73ed2a07aee917":[8,0,1,10,4,1,40,0],
"_neon_l2_normalization_float_workload_8cpp_source.xhtml":[8,0,1,10,4,1,40],
"_neon_l2_normalization_float_workload_8hpp.xhtml":[8,0,1,10,4,1,41],
"_neon_l2_normalization_float_workload_8hpp.xhtml#ae838df3960d2b5d18d73ed2a07aee917":[8,0,1,10,4,1,41,1],
"_neon_l2_normalization_float_workload_8hpp_source.xhtml":[8,0,1,10,4,1,41],
"_neon_layer_support_8cpp.xhtml":[8,0,1,10,4,7],
"_neon_layer_support_8cpp.xhtml#af0383f6bb43d6d5df6f3265367b6ebf9":[8,0,1,10,4,7,0],
"_neon_layer_support_8cpp_source.xhtml":[8,0,1,10,4,7],
"_neon_layer_support_8hpp.xhtml":[8,0,1,10,4,8],
"_neon_layer_support_8hpp_source.xhtml":[8,0,1,10,4,8],
"_neon_layer_support_tests_8cpp.xhtml":[8,0,1,10,4,0,3],
"_neon_layer_support_tests_8cpp.xhtml#a02d92330ef1c09327f5e099b699c6e42":[8,0,1,10,4,0,3,2],
"_neon_layer_support_tests_8cpp.xhtml#a15137eb9f472e32ce1031c6e3e79958b":[8,0,1,10,4,0,3,7],
"_neon_layer_support_tests_8cpp.xhtml#a18052d0741438beaf81c68b548ae9a3d":[8,0,1,10,4,0,3,0],
"_neon_layer_support_tests_8cpp.xhtml#a1cad5764c2e201e1bf68136447904d15":[8,0,1,10,4,0,3,4],
"_neon_layer_support_tests_8cpp.xhtml#a316dc42ae9bac191e4de45b20a1702ea":[8,0,1,10,4,0,3,5],
"_neon_layer_support_tests_8cpp.xhtml#a758123f024c7582abe67dd9eaa97fd8a":[8,0,1,10,4,0,3,1],
"_neon_layer_support_tests_8cpp.xhtml#a964b4cb9c48564b2e8bbdd1828714506":[8,0,1,10,4,0,3,6],
"_neon_layer_support_tests_8cpp.xhtml#a9debf84a0a29399d54720eb8613231cb":[8,0,1,10,4,0,3,3],
"_neon_layer_support_tests_8cpp_source.xhtml":[8,0,1,10,4,0,3],
"_neon_layer_tests_8cpp.xhtml":[8,0,1,10,4,0,4],
"_neon_layer_tests_8cpp.xhtml#a01a718ba97664b32a3b22ab9bc3bc42b":[8,0,1,10,4,0,4,76],
"_neon_layer_tests_8cpp.xhtml#a0357ab81edd7b23936e57eb2262700ed":[8,0,1,10,4,0,4,17],
"_neon_layer_tests_8cpp.xhtml#a03c473ae995ff60aef73b2d752eb56d7":[8,0,1,10,4,0,4,42],
"_neon_layer_tests_8cpp.xhtml#a0b2a78e98e451799c2066761156bf2d8":[8,0,1,10,4,0,4,33],
"_neon_layer_tests_8cpp.xhtml#a0dcfeb6562b512467bccc5e970a6a181":[8,0,1,10,4,0,4,97],
"_neon_layer_tests_8cpp.xhtml#a196b7e049fcf9f8636c4c95d794f6610":[8,0,1,10,4,0,4,54],
"_neon_layer_tests_8cpp.xhtml#a19774294b1af8e22a22e3c7a147451fc":[8,0,1,10,4,0,4,21],
"_neon_layer_tests_8cpp.xhtml#a1d69b9f69cd9022e871ca4e2ce74eacf":[8,0,1,10,4,0,4,0],
"_neon_layer_tests_8cpp.xhtml#a1eb0392c1e3dd4dda90d946b5e6a6a5b":[8,0,1,10,4,0,4,25],
"_neon_layer_tests_8cpp.xhtml#a1ee9f32a37187865f7f9330f35d9dfa3":[8,0,1,10,4,0,4,43],
"_neon_layer_tests_8cpp.xhtml#a24649c6ed1dd8dc683d200c48f04d7c0":[8,0,1,10,4,0,4,94],
"_neon_layer_tests_8cpp.xhtml#a25f16ab62a126e3cdaad02ac2c116736":[8,0,1,10,4,0,4,28],
"_neon_layer_tests_8cpp.xhtml#a25f5b5dc8973e884e10df2967b5fe6cc":[8,0,1,10,4,0,4,53],
"_neon_layer_tests_8cpp.xhtml#a27a35730c796690312e0cca68c59fe7b":[8,0,1,10,4,0,4,69],
"_neon_layer_tests_8cpp.xhtml#a29130e1a162157d1bd38c6dab5953a68":[8,0,1,10,4,0,4,68],
"_neon_layer_tests_8cpp.xhtml#a2af12ac440c8efe14c5b5a11d214efb2":[8,0,1,10,4,0,4,38],
"_neon_layer_tests_8cpp.xhtml#a2b73aed770e3e60473031628af01041c":[8,0,1,10,4,0,4,36],
"_neon_layer_tests_8cpp.xhtml#a2dd61f31b8891ba4d70f5bf156f402c5":[8,0,1,10,4,0,4,56],
"_neon_layer_tests_8cpp.xhtml#a2f92d0c557e3cf56afb3542b4630debf":[8,0,1,10,4,0,4,14],
"_neon_layer_tests_8cpp.xhtml#a304b92ed3e46f007da845939a97c1fc5":[8,0,1,10,4,0,4,83],
"_neon_layer_tests_8cpp.xhtml#a39b48fd7c61510e221e0e9a4738dd5ff":[8,0,1,10,4,0,4,6],
"_neon_layer_tests_8cpp.xhtml#a3ed921802fefe892dfe0417d2eafa4d9":[8,0,1,10,4,0,4,72],
"_neon_layer_tests_8cpp.xhtml#a455cf1219839ce69c907a584529f6dc4":[8,0,1,10,4,0,4,47],
"_neon_layer_tests_8cpp.xhtml#a4826bd18a588f3f84676203c9d3e0838":[8,0,1,10,4,0,4,78],
"_neon_layer_tests_8cpp.xhtml#a49ec3f6d1929e1757580217a91785e0d":[8,0,1,10,4,0,4,49],
"_neon_layer_tests_8cpp.xhtml#a4ca2339ca1e490c1a9765331d8688b43":[8,0,1,10,4,0,4,31],
"_neon_layer_tests_8cpp.xhtml#a4d00317807fc30f46c7c369e70054110":[8,0,1,10,4,0,4,77],
"_neon_layer_tests_8cpp.xhtml#a4d4318b5fbb8f0ce6805c42babe1db57":[8,0,1,10,4,0,4,96],
"_neon_layer_tests_8cpp.xhtml#a4e18dff13e7e7e9e0efaadd810cac060":[8,0,1,10,4,0,4,5],
"_neon_layer_tests_8cpp.xhtml#a516483aa5a8414da971b69e2f177695b":[8,0,1,10,4,0,4,95],
"_neon_layer_tests_8cpp.xhtml#a521000bbe98868e380852e28af2a1afa":[8,0,1,10,4,0,4,86],
"_neon_layer_tests_8cpp.xhtml#a52ed54cfc16438caa521b255e376d49e":[8,0,1,10,4,0,4,105],
"_neon_layer_tests_8cpp.xhtml#a53e7feb3d85cdd83e81a43a4e8a6f8f0":[8,0,1,10,4,0,4,1],
"_neon_layer_tests_8cpp.xhtml#a5402a88554daa0c3546839c50d19664f":[8,0,1,10,4,0,4,73],
"_neon_layer_tests_8cpp.xhtml#a545328205ddb5989c37b9aab090f9895":[8,0,1,10,4,0,4,16],
"_neon_layer_tests_8cpp.xhtml#a5babfd457bd6f745543fc3c680031c39":[8,0,1,10,4,0,4,58],
"_neon_layer_tests_8cpp.xhtml#a5f6be4f5ad0e3a7001bebc48e26701b8":[8,0,1,10,4,0,4,100],
"_neon_layer_tests_8cpp.xhtml#a6084fdd458b84d2141f1a695ddead7d8":[8,0,1,10,4,0,4,46],
"_neon_layer_tests_8cpp.xhtml#a6150d203b2dac434d75cc5e9a930b37f":[8,0,1,10,4,0,4,98],
"_neon_layer_tests_8cpp.xhtml#a6272b12c511acb0b65cdec23bfbb803d":[8,0,1,10,4,0,4,92],
"_neon_layer_tests_8cpp.xhtml#a639893252fc60b9b3b2dafee03ab1ea2":[8,0,1,10,4,0,4,62],
"_neon_layer_tests_8cpp.xhtml#a6cbbd15b3d2ca4e530b74982aaa53e94":[8,0,1,10,4,0,4,80],
"_neon_layer_tests_8cpp.xhtml#a6f36cafc82a139dd0b85b19b84f74b63":[8,0,1,10,4,0,4,18],
"_neon_layer_tests_8cpp.xhtml#a6f487b9fae5b7e8914b53fed1fcb0f64":[8,0,1,10,4,0,4,57],
"_neon_layer_tests_8cpp.xhtml#a759e0c92c2c2f2db7e8c95e10e41cdc3":[8,0,1,10,4,0,4,3],
"_neon_layer_tests_8cpp.xhtml#a777821acd8376ee4aebaacb7c3b8a4fe":[8,0,1,10,4,0,4,15],
"_neon_layer_tests_8cpp.xhtml#a77b0c43051d7f2feb178cafd69d7c98a":[8,0,1,10,4,0,4,74],
"_neon_layer_tests_8cpp.xhtml#a7a14dd2cf9f1405ac313b5eaaae71376":[8,0,1,10,4,0,4,85],
"_neon_layer_tests_8cpp.xhtml#a7a67fe79a2a37f8b4c53499b00d4d11e":[8,0,1,10,4,0,4,12],
"_neon_layer_tests_8cpp.xhtml#a7aa0cc47b204ae54c5ad026089943fc1":[8,0,1,10,4,0,4,99],
"_neon_layer_tests_8cpp.xhtml#a7cb0769392575ec388c33220b3e6a4fa":[8,0,1,10,4,0,4,59],
"_neon_layer_tests_8cpp.xhtml#a7f812d52c215b35fbee29e2a9cf78dbb":[8,0,1,10,4,0,4,87],
"_neon_layer_tests_8cpp.xhtml#a802925008a5ff635622972f7b9aa21ea":[8,0,1,10,4,0,4,52],
"_neon_layer_tests_8cpp.xhtml#a81140e1a9eac08f4a7d021a0617a2151":[8,0,1,10,4,0,4,7],
"_neon_layer_tests_8cpp.xhtml#a81ddafe27845b2da0dd92878291c0a67":[8,0,1,10,4,0,4,10],
"_neon_layer_tests_8cpp.xhtml#a8618e4263bfd70bbf3148062bef66ea5":[8,0,1,10,4,0,4,24],
"_neon_layer_tests_8cpp.xhtml#a8743b5ac060e8128e03e62720489d316":[8,0,1,10,4,0,4,81],
"_neon_layer_tests_8cpp.xhtml#a88e8f80b23d466d2d30740d5893cc0eb":[8,0,1,10,4,0,4,39],
"_neon_layer_tests_8cpp.xhtml#a8dbe29de3d8ca5a30b9867fb1bad2fcb":[8,0,1,10,4,0,4,22],
"_neon_layer_tests_8cpp.xhtml#a8ee297c983981d909a99b0de595b8805":[8,0,1,10,4,0,4,9],
"_neon_layer_tests_8cpp.xhtml#a8f1031c2a749436a3f4c0ed0101b8fe0":[8,0,1,10,4,0,4,37],
"_neon_layer_tests_8cpp.xhtml#a9397e3fce14e3d25a626471e92164741":[8,0,1,10,4,0,4,44],
"_neon_layer_tests_8cpp.xhtml#a942efc75261663814435830d9e6bf025":[8,0,1,10,4,0,4,65],
"_neon_layer_tests_8cpp.xhtml#a95ba6e0f6df561c5da9ba9edbb4f4029":[8,0,1,10,4,0,4,66],
"_neon_layer_tests_8cpp.xhtml#a96869e7d54584f845f4d2b37c7f1810f":[8,0,1,10,4,0,4,90],
"_neon_layer_tests_8cpp.xhtml#a9accf0643571a1311130e39c285b4908":[8,0,1,10,4,0,4,60],
"_neon_layer_tests_8cpp.xhtml#a9ad391ecc4e862cef1126906c3db42c8":[8,0,1,10,4,0,4,102],
"_neon_layer_tests_8cpp.xhtml#a9c34efb87bd25a39f4c16b7e66c9852e":[8,0,1,10,4,0,4,84],
"_neon_layer_tests_8cpp.xhtml#aa273044e7aaf25e5873f6780aaa31a45":[8,0,1,10,4,0,4,50],
"_neon_layer_tests_8cpp.xhtml#aa40114a97c2d05f86c11161dd2594e14":[8,0,1,10,4,0,4,93],
"_neon_layer_tests_8cpp.xhtml#aa424ccba97642d54abe41e6b6acd9370":[8,0,1,10,4,0,4,40],
"_neon_layer_tests_8cpp.xhtml#aaa5710164f575390c788b3f210159006":[8,0,1,10,4,0,4,71],
"_neon_layer_tests_8cpp.xhtml#aaece6874e244ec1fc15bf0bdc5c914d9":[8,0,1,10,4,0,4,75],
"_neon_layer_tests_8cpp.xhtml#aaf14fe14057876a2a08911ca5d88a058":[8,0,1,10,4,0,4,2],
"_neon_layer_tests_8cpp.xhtml#ab056d2bd8fe20d69f8142b661bf1474e":[8,0,1,10,4,0,4,55],
"_neon_layer_tests_8cpp.xhtml#ab30128d089d5ed238cf0a2d97d0210b2":[8,0,1,10,4,0,4,13],
"_neon_layer_tests_8cpp.xhtml#ab66bc6b631130221e5bb9cd28ab1364f":[8,0,1,10,4,0,4,11],
"_neon_layer_tests_8cpp.xhtml#ab795a0e2710f62867eaf5d48adc740e5":[8,0,1,10,4,0,4,70],
"_neon_layer_tests_8cpp.xhtml#ab8bf7a82bdd3574ddcf0fe5828008f2a":[8,0,1,10,4,0,4,29],
"_neon_layer_tests_8cpp.xhtml#ab95b439385bde10e3a148b07e78c3c0f":[8,0,1,10,4,0,4,103],
"_neon_layer_tests_8cpp.xhtml#abc5710b2d94f048aa4320e2d14c1ff49":[8,0,1,10,4,0,4,82],
"_neon_layer_tests_8cpp.xhtml#abf333a6276e7f49a5393babf9ac7c8fb":[8,0,1,10,4,0,4,48],
"_neon_layer_tests_8cpp.xhtml#abf5c594b8217fd86cb35eb5295c08061":[8,0,1,10,4,0,4,20],
"_neon_layer_tests_8cpp.xhtml#ac445ffeb665bd2ee79276658e63c38ff":[8,0,1,10,4,0,4,67],
"_neon_layer_tests_8cpp.xhtml#acb636f7093e56b479bf25666e58087d7":[8,0,1,10,4,0,4,61],
"_neon_layer_tests_8cpp.xhtml#acdc493dc1fc99bb7ca0b367a0adb3f21":[8,0,1,10,4,0,4,91],
"_neon_layer_tests_8cpp.xhtml#aced4e3ab49ae91101bbd0486b1f042bb":[8,0,1,10,4,0,4,101],
"_neon_layer_tests_8cpp.xhtml#ad1899491c5abd55563454cffd2e301f3":[8,0,1,10,4,0,4,45],
"_neon_layer_tests_8cpp.xhtml#ad28220dbfeda70f38ea78e866abf1207":[8,0,1,10,4,0,4,41],
"_neon_layer_tests_8cpp.xhtml#ad33a32f1e6decba204cbc398a369f730":[8,0,1,10,4,0,4,27],
"_neon_layer_tests_8cpp.xhtml#ad46faea9a8bfdd5311940d15f74d532a":[8,0,1,10,4,0,4,104],
"_neon_layer_tests_8cpp.xhtml#ad628ac88dc32c00330167e2e615d34c7":[8,0,1,10,4,0,4,8],
"_neon_layer_tests_8cpp.xhtml#ada4271a559e7473ea9ae81aa4a2c7899":[8,0,1,10,4,0,4,63],
"_neon_layer_tests_8cpp.xhtml#adea013ff05bb65c4774ae6ff4e255c8e":[8,0,1,10,4,0,4,89],
"_neon_layer_tests_8cpp.xhtml#ae0be66b77ec9e1c2addff0a089a93ced":[8,0,1,10,4,0,4,79],
"_neon_layer_tests_8cpp.xhtml#ae4e67a91d3cbd8474c27438cc6b0292c":[8,0,1,10,4,0,4,19],
"_neon_layer_tests_8cpp.xhtml#ae6dad0e5fbe3055c072f2d7b5ccd2eb1":[8,0,1,10,4,0,4,34],
"_neon_layer_tests_8cpp.xhtml#aeb2f310b7fd4912fbfb1c7b5fc2dc766":[8,0,1,10,4,0,4,51],
"_neon_layer_tests_8cpp.xhtml#aebf563e9b6c556c1d4d3f30fe195585f":[8,0,1,10,4,0,4,88],
"_neon_layer_tests_8cpp.xhtml#af1672a8953967d2334147dcbd16594b6":[8,0,1,10,4,0,4,35],
"_neon_layer_tests_8cpp.xhtml#af223a5dde540ba2ae191184187a9236e":[8,0,1,10,4,0,4,32],
"_neon_layer_tests_8cpp.xhtml#af55e57850201fd9f69bc805ec2904549":[8,0,1,10,4,0,4,4],
"_neon_layer_tests_8cpp.xhtml#af6949b52643529172cdaa54c86746e10":[8,0,1,10,4,0,4,26],
"_neon_layer_tests_8cpp.xhtml#af8660b4327b0db5951d3ca4480debc57":[8,0,1,10,4,0,4,64],
"_neon_layer_tests_8cpp.xhtml#afe3ab054ec25d613c1f2a86504d5be2f":[8,0,1,10,4,0,4,30],
"_neon_layer_tests_8cpp.xhtml#aff648caa56cb924d30ad6ce8516b4748":[8,0,1,10,4,0,4,23],
"_neon_layer_tests_8cpp_source.xhtml":[8,0,1,10,4,0,4],
"_neon_lstm_float_workload_8cpp.xhtml":[8,0,1,10,4,1,42],
"_neon_lstm_float_workload_8cpp.xhtml#a9e06cc2a2ac8b88fc72972695a17910f":[8,0,1,10,4,1,42,0],
"_neon_lstm_float_workload_8cpp_source.xhtml":[8,0,1,10,4,1,42],
"_neon_lstm_float_workload_8hpp.xhtml":[8,0,1,10,4,1,43],
"_neon_lstm_float_workload_8hpp.xhtml#a9e06cc2a2ac8b88fc72972695a17910f":[8,0,1,10,4,1,43,1],
"_neon_lstm_float_workload_8hpp_source.xhtml":[8,0,1,10,4,1,43],
"_neon_maximum_workload_8cpp.xhtml":[8,0,1,10,4,1,44],
"_neon_maximum_workload_8cpp.xhtml#a8d2ea79addd8ef64be2ca0dad3408f00":[8,0,1,10,4,1,44,0],
"_neon_maximum_workload_8cpp_source.xhtml":[8,0,1,10,4,1,44],
"_neon_maximum_workload_8hpp.xhtml":[8,0,1,10,4,1,45],
"_neon_maximum_workload_8hpp.xhtml#a8d2ea79addd8ef64be2ca0dad3408f00":[8,0,1,10,4,1,45,1],
"_neon_maximum_workload_8hpp_source.xhtml":[8,0,1,10,4,1,45]
};
|
package org.bf2.cos.fleetshard.operator.it.debezium.glues;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import org.bf2.cos.fleetshard.it.cucumber.support.StepsSupport;
import org.bf2.cos.fleetshard.support.json.JacksonUtil;
import com.fasterxml.jackson.databind.JsonNode;
import io.cucumber.datatable.DataTable;
import io.cucumber.java.en.And;
import io.cucumber.java.en.Then;
import io.cucumber.java.en.When;
import io.fabric8.kubernetes.client.utils.Serialization;
import io.strimzi.api.kafka.model.KafkaConnect;
import static net.javacrumbs.jsonunit.assertj.JsonAssertions.assertThatJson;
import static org.assertj.core.api.Assertions.assertThat;
import static org.bf2.cos.fleetshard.it.cucumber.assertions.CucumberAssertions.assertThatDataTable;
public class KafkaConnectSteps extends StepsSupport {
@Then("the kc exists")
public void exists() {
awaiter.until(() -> kc() != null);
}
@Then("the kc does not exists")
public void does_not_exists() {
awaiter.until(() -> kc() == null);
}
@When("the kc path {string} is set to json:")
public void kc_pointer(String path, String payload) {
kubernetesClient.resources(KafkaConnect.class)
.inNamespace(ctx.connector().getMetadata().getNamespace())
.withName(ctx.connector().getMetadata().getName())
.edit(res -> {
JsonNode replacement = Serialization.unmarshal(payload, JsonNode.class);
JsonNode replaced = PARSER.parse(Serialization.asJson(res)).set(path, replacement).json();
return JacksonUtil.treeToValue(replaced, KafkaConnect.class);
});
}
@And("the kc has an entry at path {string} with value {string}")
public void kc_has_a_path_matching_value(String path, String value) {
KafkaConnect res = kc();
assertThat(res)
.isNotNull();
assertThatJson(JacksonUtil.asJsonNode(res))
.inPath(path)
.isString()
.isEqualTo(ctx.resolvePlaceholders(value));
}
@And("the kc has an entry at path {string} with value {int}")
public void kc_has_a_path_matching_value(String path, int value) {
KafkaConnect res = kc();
assertThat(res)
.isNotNull();
assertThatJson(JacksonUtil.asJsonNode(res))
.inPath(path)
.isNumber()
.satisfies(bd -> assertThat(bd.intValue()).isEqualTo(value));
}
@And("the kc has an entry at path {string} with value {bool}")
public void kc_has_a_path_matching_value(String path, Boolean value) {
KafkaConnect res = kc();
assertThat(res)
.isNotNull();
assertThatJson(JacksonUtil.asJsonNode(res))
.inPath(path)
.isBoolean()
.isEqualTo(value);
}
@And("the kc has an object at path {string} containing:")
public void kc_has_a_path_matching_object(String path, String content) {
KafkaConnect res = kc();
content = ctx.resolvePlaceholders(content);
assertThat(res)
.isNotNull();
assertThatJson(JacksonUtil.asJsonNode(res))
.inPath(path)
.isObject()
.containsValue(Serialization.unmarshal(content, JsonNode.class));
}
@And("the kc has an array at path {string} containing:")
public void kc_has_a_path_containing_object(String path, DataTable elements) {
KafkaConnect res = kc();
assertThat(res)
.isNotNull();
assertThatJson(JacksonUtil.asJsonNode(res))
.inPath(path)
.isArray()
.containsAll(
elements.asList().stream()
.map(e -> ctx.resolvePlaceholders(e))
.map(e -> Serialization.unmarshal(e, JsonNode.class))
.collect(Collectors.toList()));
}
@And("the kc has annotations containing:")
public void kc_annotation_contains(DataTable table) {
KafkaConnect res = kc();
assertThat(res)
.isNotNull();
assertThatDataTable(table, ctx::resolvePlaceholders)
.matches(res.getMetadata().getAnnotations());
}
@And("the kc has labels containing:")
public void kc_label_contains(DataTable table) {
KafkaConnect res = kc();
assertThat(res)
.isNotNull();
assertThatDataTable(table, ctx::resolvePlaceholders)
.matches(res.getMetadata().getLabels());
}
@And("the kc has config containing:")
public void kc_config_contains(DataTable table) {
KafkaConnect res = kc();
assertThat(res)
.isNotNull();
assertThatDataTable(table, ctx::resolvePlaceholders)
.matches(res.getSpec().getConfig());
}
@Then("the kc path {string} matches json:")
public void kc_path_matches(String path, String payload) {
untilKc(res -> {
JsonNode actual = PARSER.parse(JacksonUtil.asJsonNode(res)).read(path);
JsonNode expected = PARSER.parse(payload).json();
assertThatJson(actual).isEqualTo(expected);
});
}
private KafkaConnect kc() {
return kubernetesClient.resources(KafkaConnect.class)
.inNamespace(ctx.connector().getMetadata().getNamespace())
.withName(ctx.connector().getMetadata().getName())
.get();
}
private void untilKc(Consumer<KafkaConnect> predicate) {
awaiter.untilAsserted(() -> {
KafkaConnect res = kc();
assertThat(res).isNotNull();
assertThat(res).satisfies(predicate);
});
}
}
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+512+512-FW/model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+512+512-FW/1024+0+512-N-VB-IP-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function replace_all_but_nouns_and_verbs_first_two_thirds_sixth --eval_function penultimate_sixth_eval
|
#!/bin/bash
# Run script as root
if [ $(id -u) != "0" ]
then
sudo "$0" "$@"
exit $?
fi
######## PRE-START SETUP ##########
# Stop watchdog before update
/etc/init.d/watchdog stop
# Enable rw file system
../app/helpers/mount_rw.sh
# Sdcard life time extend hacks
# http://raspberrypi.stackexchange.com/questions/169/how-can-i-extend-the-life-of-my-sd-card
# Shutdown swap
swapoff --all
####### CONFIGURE MODULES #########
rm -rf /var/lib/dhcp/
ln -svf /tmp /var/lib/dhcp
rm -rf /var/run /var/spool /var/lock
ln -svf /tmp /var/run
ln -svf /tmp /var/spool
ln -svf /tmp /var/lock
# FSTAB and cmdline is node handled as partition can change wildly between installation to installation
# CONFIGURE THEM MANUALLY
# cp -vf ./confs/fstab /etc
# cp -vf ./confs/cmdline.txt /boot
#chown root:root /etc/fstab
#chmod 644 /etc/fstab
#chown root:root /boot/cmdline.txt
#chmod 755 /boot/cmdline.txt
# Add mount path for log sqlitedb mount path
mkdir -p /media/usbDISK
# File based configuration changes start:
# Folder for initial files before update
mkdir -p ../original_confs
cp -n /etc/inittab ../original_confs/ # Don't overwrite if file exists with -n
python2 ./helpers/auto_replace.py --file=/etc/inittab \
--search="T0:23:respawn:/sbin/getty L ttyAMA0" \
--replace="#T0:23:respawn:/sbin/getty L ttyAMA0"
# Configure DNS with google servers
cp -n /etc/network/interfaces ../original_confs/ # Don't overwrite if file exists with -n
cp -vf ./confs/interfaces /etc/network
chown root:root /etc/network/interfaces
chmod 644 /etc/network/interfaces
cp -n /etc/resolv.conf.head ../original_confs/ # Don't overwrite if file exists with -n
cp -vf ./confs/resolv.conf.head /etc
chown root:root /etc/resolv.conf.head
chmod 644 /etc/resolv.conf.head
cp -n /etc/watchdog.conf ../original_confs/ # Don't overwrite if file exists with -n
python2 ./helpers/auto_replace.py --file=/etc/watchdog.conf \
--search="#watchdog-device" \
--replace="watchdog-device"
python2 ./helpers/auto_replace.py --file=/etc/watchdog.conf \
--search="#max-load-1" \
--replace="max-load-1"
cp -n /etc/modules ../original_confs/ # Don't overwrite if file exists with -n
python2 ./helpers/search_append.py --file=/etc/modules \
--key="bcm2708_wdog"
cp -n /etc/sysctl.conf ../original_confs/ # Don't overwrite if file exists with -n
python2 ./helpers/conf_append.py --file /etc/sysctl.conf \
--key="#KERNEL_PANIC_BOOT_TIME" \
--append="kernel.panic = 10"
# Copy Fake Cron
cp ./confs/fakecron /etc/init.d
chmod 755 /etc/init.d/fakecron
cp ./confs/fakecron.sh /usr/local/bin
chmod 755 /usr/local/bin/fakecron.sh
# Copy App Start Script
cp ./confs/rfid_app /etc/init.d
chmod 755 /etc/init.d/rfid_app
cp ./confs/rfid_app.sh /usr/local/bin
chmod 755 /usr/local/bin/rfid_app.sh
# Copy Git Update
cp ./confs/pull_git.sh /usr/local/bin
chmod 755 /usr/local/bin/pull_git.sh
cp -n /etc/rc.local ../original_confs/ # Don't overwrite if file exists with -n
cp ./confs/rc.local /etc
chmod 755 /etc/rc.local
######## POST-STOP SETUP ########
# Switch back to read only file system and enable watchdog
apt-get install -y watchdog
insserv watchdog
../app/helpers/mount_ro.sh
/etc/init.d/watchdog start
|
def determine_next_action(animator_state: str, is_human_exists: bool, fsm_state: str) -> str:
if animator_state == "Wait" and is_human_exists:
return "Transition to State_Contact_Dragon"
elif not is_human_exists:
return "Set Is_Sleep to true"
else:
return "No action needed"
|
SELECT * FROM TABLE
WHERE column1 = 'some_value'
LIMIT 10
ORDER BY column2 DESC
|
// License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved
#pragma once
#include <opencv2/core/core.hpp>
#include <map>
class PersonData
{
public:
int Id;
int rid = 0;
cv::Rect rectangle;
bool valid;
};
class PersonDataStorage
{
public:
void Add(PersonData data)
{
mPersonsData[data.Id] = data;
}
const PersonData* get(int id)
{
auto iter = mPersonsData.find(id);
return (iter != mPersonsData.end()) ? &iter->second : nullptr;
}
//void clear() { mPersonsData.clear(); }
PersonData* MatchPersonToPoint(cv::Point point)
{
for (auto iter = mPersonsData.rbegin(); iter != mPersonsData.rend(); ++iter)
{
if (iter->second.rectangle.contains(point))
{
return &iter->second;
}
}
return nullptr;
}
private:
std::map<int, PersonData> mPersonsData;
};
|
package de.hswhameln.typetogether.client.runtime;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
/**
* Class that handles listening to property changes and calling the correct
* methods accordingly. For each Property Id, a list of Method is stored that
* are called when a property with the fitting propertyId is modified.
*
*/
public class PropertyChangeManager implements PropertyChangeListener {
/**
* Map which stores a List of PropertyChangeHandler responsible for a Property
*/
public Map<String, List<Consumer<PropertyChangeEvent>>> propertyChangeHandlers;
public PropertyChangeManager() {
this.propertyChangeHandlers = new HashMap<>();
}
@Override
public void propertyChange(PropertyChangeEvent evt) {
this.propertyChangeHandlers
.computeIfAbsent(evt.getPropertyName(), k -> new ArrayList<>())
.forEach(propertyChangeHandler -> propertyChangeHandler.accept(evt));
}
/**
* Adds a method that will be called when the source Object's property with the
* given name is changed.
*
* @param propertyName Name of the property to be observed
* @param consumer Method to be called on property change
*/
public void onPropertyChange(String propertyName, Consumer<PropertyChangeEvent> consumer) {
this.propertyChangeHandlers.computeIfAbsent(propertyName, k -> new ArrayList<>()).add(consumer);
}
}
|
<reponame>kuhella/ambari<filename>ambari-server/src/main/resources/stacks/ADH/1.4/services/FLINK/package/scripts/flink.py
#!/usr/bin/env python
import sys, os, pwd, grp, signal, time, glob
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management import *
from subprocess import call
class Master(Script):
def get_component_name(self):
return "flink"
def get_env(self):
import params
return {'JAVA_HOME': params.java_home, 'FLINK_PID_DIR': params.flink_pid_dir}
def install(self, env):
import params
import status_params
env.set_params(params)
env.set_params(status_params)
self.install_packages(env)
Directory([status_params.flink_pid_dir, params.flink_log_dir],
owner=params.flink_user,
group=params.flink_group
)
File(params.flink_log_file,
mode=0644,
owner=params.flink_user,
group=params.flink_group,
content=''
)
def configure(self, env, isInstall=False):
import params
import status_params
env.set_params(params)
env.set_params(status_params)
self.create_hdfs_user(params.flink_user)
self.config_ssh(params.flink_user)
#write out config
properties_content=InlineTemplate(params.flink_yaml_content)
File(format("{conf_dir}/flink-conf.yaml"), content=properties_content, owner=params.flink_user)
def config_ssh(self, flink_user):
if not os.path.exists(format("{flink_home_dir}/.ssh/id_rsa")):
cmd1 = format("ssh-keygen -f {flink_home_dir}/.ssh/id_rsa -t rsa -N \"\"")
Execute(cmd1, user=flink_user)
cmd2 = format("cat {flink_home_dir}/.ssh/id_rsa.pub >> {flink_home_dir}/.ssh/authorized_keys")
Execute(cmd2, user=flink_user)
cmd3 = format("echo -e \"Host localhost\n StrictHostKeyChecking no\" > {flink_home_dir}/.ssh/config")
Execute(cmd3, user=flink_user)
def stop(self, env):
import params
cmd = format("{params.bin_dir}/stop-cluster.sh >> {params.flink_log_file}")
Execute (cmd, user=params.flink_user, environment=self.get_env())
def start(self, env):
import params
import status_params
env.set_params(params)
env.set_params(status_params)
self.configure(env, True)
cmd = format("{params.bin_dir}/start-cluster.sh >> {params.flink_log_file}")
#cmd = "env >/tmp/1.log"
Execute (cmd, user=params.flink_user, environment=self.get_env())
if os.path.exists(params.temp_file):
os.remove(params.temp_file)
def status(self, env):
import status_params
check_process_status(status_params.flink_pid_file)
def create_hdfs_user(self, user):
Execute('hadoop fs -mkdir -p /user/'+user, user='hdfs', ignore_failures=True)
Execute('hadoop fs -chown ' + user + ' /user/'+user, user='hdfs')
Execute('hadoop fs -chgrp ' + user + ' /user/'+user, user='hdfs')
if __name__ == "__main__":
Master().execute()
|
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
# Add code here to multiply x and y
pass
def divide(x, y):
# Add code here to divide x and y
pass
|
export class Profile {
public id: string;
public firstname: string;
public lastname: string;
public avatar?: any;
constructor(firstname: string, lastname: string, avatar?: any) {
this.firstname = firstname;
this.lastname = lastname;
this.avatar = avatar;
}
}
|
TMPDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmpConfigDir'`
DUMPDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'dumpDir'`
cp config/config.json $TMPDIR
echo "Creating databases"
docker exec mattermost-mysql mysql -uroot -pmostest -e "CREATE DATABASE migrated; CREATE DATABASE latest; GRANT ALL PRIVILEGES ON migrated.* TO mmuser; GRANT ALL PRIVILEGES ON latest.* TO mmuser"
echo "Importing mysql dump from version 5.0"
docker exec -i mattermost-mysql mysql -D migrated -uroot -pmostest < $(pwd)/scripts/mattermost-mysql-5.0.sql
echo "Setting up config for db migration"
make ARGS="config set SqlSettings.DataSource 'mmuser:mostest@tcp(localhost:3306)/migrated?charset=utf8mb4,utf8&readTimeout=30s&writeTimeout=30s' --config $TMPDIR/config.json" run-cli
make ARGS="config set SqlSettings.DriverName 'mysql' --config $TMPDIR/config.json" run-cli
echo "Running the migration"
make ARGS="version --config $TMPDIR/config.json" run-cli
echo "Setting up config for fresh db setup"
make ARGS="config set SqlSettings.DataSource 'mmuser:mostest@tcp(localhost:3306)/latest?charset=utf8mb4,utf8&readTimeout=30s&writeTimeout=30s' --config $TMPDIR/config.json" run-cli
echo "Setting up fresh db"
make ARGS="version --config $TMPDIR/config.json" run-cli
echo "Ignoring known MySQL mismatch: ChannelMembers.SchemeGuest"
docker exec mattermost-mysql mysql -D migrated -uroot -pmostest -e "ALTER TABLE ChannelMembers DROP COLUMN SchemeGuest;"
docker exec mattermost-mysql mysql -D latest -uroot -pmostest -e "ALTER TABLE ChannelMembers DROP COLUMN SchemeGuest;"
echo "Generating dump"
docker exec mattermost-mysql mysqldump --skip-opt --no-data --compact -u root -pmostest migrated > $DUMPDIR/migrated.sql
docker exec mattermost-mysql mysqldump --skip-opt --no-data --compact -u root -pmostest latest > $DUMPDIR/latest.sql
echo "Removing databases created for db comparison"
docker exec mattermost-mysql mysql -uroot -pmostest -e "DROP DATABASE migrated; DROP DATABASE latest"
echo "Generating diff"
diff $DUMPDIR/migrated.sql $DUMPDIR/latest.sql > $DUMPDIR/diff.txt
diffErrorCode=$?
if [ $diffErrorCode -eq 0 ]; then
echo "Both schemas are same"
else
echo "Schema mismatch"
cat $DUMPDIR/diff.txt
fi
rm -rf $TMPDIR $DUMPDIR
exit $diffErrorCode
|
# Pull latest word-flip Docker image
docker pull asewdat/word-flip
# Kill existing word-flip app if running
docker kill word-flip-app
# Remove word-flip-app container if exists
docker rm word-flip-app
# Start word-flip-app using latest image
docker run -p 80:3000 --name word-flip-app --link mongo-img-0:mongo -d asewdat/word-flip
|
(function () {
const triangleDirs = [
['top', 'Top Right'],
['right', 'Bottom Right'],
['bottom', 'Bottom Left'],
['left', 'Top Left']
]
const block = (title, content) =>
`<div class="block">
<h5>${title}</h5>
${content}
</div>`
const getField = selector => {
const $owner = $(selector)
let $field = $owner.find('>.field-body')
if ($field.size()) return $field
$field = $('<div class="field-body"></div>')
$field.appendTo($owner)
return $field
}
function setupTriangleBlock ($, {scssVars}) {
const $field = getField('.corner-triangle-field')
const { triangleSelectorPrefix } = scssVars
let html = ''
html += block(
'Usage',
triangleDirs.map(([dir]) => {
return `<div class="showcase">
<div class="showcase-icon ${triangleSelectorPrefix}-${dir}"></div>
${dir}
</div>`
}).join('')
)
html += block(
'Demo',
triangleDirs.map(([dir, title]) => {
return `<div class="demo demo-${dir}">
<div class="demo-icon ${triangleSelectorPrefix}-${dir}"></div>
${title}
</div>`
}).join('')
)
$field.append(html)
}
function setupRotateTriangleBlock ($, {scssVars}) {
const $field = getField('.rotate-triangle-field')
const { rotateTriangleSelectorPrefix } = scssVars
let html = ''
html += block(
'Usage',
`<div class="showcase">
<div class="showcase-icon ${rotateTriangleSelectorPrefix}-top"></div>
</div>`
)
$field.append(html)
}
function main($, scssVars) {
const args = [
$,
{
scssVars
}
]
setupTriangleBlock(...args)
setupRotateTriangleBlock(...args)
}
$(function () {
$.get('../vars.json').done(v => {
main($, v)
})
})
})()
|
#!/usr/bin/with-contenv bash
# ==============================================================================
# Community Hass.io Add-ons: Pi-hole
# Sets the configured password for the Pi-hole admin interface
# ==============================================================================
# shellcheck disable=SC1091
source /usr/lib/hassio-addons/base.sh
if ! hass.config.has_value 'password'; then
hass.log.warning 'No password set! This is not recommended!'
fi
pihole -a -p "$(hass.config.get 'password')"
|
<reponame>kels-orien/fastify-rest-api<filename>module.js
const { gql } = require("apollo-server-fastify");
const typeDefs = gql`
type Token {
token: String!
}
type User {
id: ID!
firstName: String!
lastName: String!
password: String!
bio: String
profileImage: String
email: String!
userName: String!
createdDate: String
}
type Query {
user(id: ID!): User
users: [User]
currentUser: User
}
type Mutation {
signupUser(
firstName: String!
lastName: String!
email: String!
userName: String!
password: String!
): Token
signinUser(email: String!, password: String!): Token
editProfile(email: String!, bio: String!): User
setProfileIMG(email: String!, profileImage: String!): User
changeEmail(currentEmail: String!, newEmail: String!): User
changePassword(email: String!, password: String!): User
passwordReset(email: String!): User
}
`;
const resolvers = {
Query: {},
Mutation: {}
};
module.exports = {
typeDefs,
resolvers
};
|
# Library of functions shared by all tests scripts, included by
# test-lib.sh.
#
# Copyright (c) 2005 Junio C Hamano
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/ .
# The semantics of the editor variables are that of invoking
# sh -c "$EDITOR \"$@\"" files ...
#
# If our trash directory contains shell metacharacters, they will be
# interpreted if we just set $EDITOR directly, so do a little dance with
# environment variables to work around this.
#
# In particular, quoting isn't enough, as the path may contain the same quote
# that we're using.
test_set_editor () {
FAKE_EDITOR="$1"
export FAKE_EDITOR
EDITOR='"$FAKE_EDITOR"'
export EDITOR
}
test_decode_color () {
awk '
function name(n) {
if (n == 0) return "RESET";
if (n == 1) return "BOLD";
if (n == 30) return "BLACK";
if (n == 31) return "RED";
if (n == 32) return "GREEN";
if (n == 33) return "YELLOW";
if (n == 34) return "BLUE";
if (n == 35) return "MAGENTA";
if (n == 36) return "CYAN";
if (n == 37) return "WHITE";
if (n == 40) return "BLACK";
if (n == 41) return "BRED";
if (n == 42) return "BGREEN";
if (n == 43) return "BYELLOW";
if (n == 44) return "BBLUE";
if (n == 45) return "BMAGENTA";
if (n == 46) return "BCYAN";
if (n == 47) return "BWHITE";
}
{
while (match($0, /\033\[[0-9;]*m/) != 0) {
printf "%s<", substr($0, 1, RSTART-1);
codes = substr($0, RSTART+2, RLENGTH-3);
if (length(codes) == 0)
printf "%s", name(0)
else {
n = split(codes, ary, ";");
sep = "";
for (i = 1; i <= n; i++) {
printf "%s%s", sep, name(ary[i]);
sep = ";"
}
}
printf ">";
$0 = substr($0, RSTART + RLENGTH, length($0) - RSTART - RLENGTH + 1);
}
print
}
'
}
nul_to_q () {
perl -pe 'y/\000/Q/'
}
q_to_nul () {
perl -pe 'y/Q/\000/'
}
q_to_cr () {
tr Q '\015'
}
q_to_tab () {
tr Q '\011'
}
qz_to_tab_space () {
tr QZ '\011\040'
}
append_cr () {
sed -e 's/$/Q/' | tr Q '\015'
}
remove_cr () {
tr '\015' Q | sed -e 's/Q$//'
}
# In some bourne shell implementations, the "unset" builtin returns
# nonzero status when a variable to be unset was not set in the first
# place.
#
# Use sane_unset when that should not be considered an error.
sane_unset () {
unset "$@"
return 0
}
# Stop execution and start a shell. This is useful for debugging tests and
# only makes sense together with "-v".
#
# Be sure to remove all invocations of this command before submitting.
test_pause () {
if test "$verbose" = t; then
"$SHELL_PATH" <&6 >&3 2>&4
else
error >&5 "test_pause requires --verbose"
fi
}
write_script () {
{
echo "#!${2-"$SHELL_PATH"}" &&
cat
} >"$1" &&
chmod +x "$1"
}
# Use test_set_prereq to tell that a particular prerequisite is available.
# The prerequisite can later be checked for in two ways:
#
# - Explicitly using test_have_prereq.
#
# - Implicitly by specifying the prerequisite tag in the calls to
# test_expect_{success,failure,code}.
#
# The single parameter is the prerequisite tag (a simple word, in all
# capital letters by convention).
test_set_prereq () {
satisfied_prereq="$satisfied_prereq$1 "
}
satisfied_prereq=" "
lazily_testable_prereq= lazily_tested_prereq=
# Usage: test_lazy_prereq PREREQ 'script'
test_lazy_prereq () {
lazily_testable_prereq="$lazily_testable_prereq$1 "
eval test_prereq_lazily_$1=\$2
}
test_run_lazy_prereq_ () {
script='
mkdir -p "$TRASH_DIRECTORY/prereq-test-dir" &&
(
cd "$TRASH_DIRECTORY/prereq-test-dir" &&'"$2"'
)'
say >&3 "checking prerequisite: $1"
say >&3 "$script"
test_eval_ "$script"
eval_ret=$?
rm -rf "$TRASH_DIRECTORY/prereq-test-dir"
if test "$eval_ret" = 0; then
say >&3 "prerequisite $1 ok"
else
say >&3 "prerequisite $1 not satisfied"
fi
return $eval_ret
}
test_have_prereq () {
# prerequisites can be concatenated with ','
save_IFS=$IFS
IFS=,
set -- $*
IFS=$save_IFS
total_prereq=0
ok_prereq=0
missing_prereq=
for prerequisite
do
case "$prerequisite" in
!*)
negative_prereq=t
prerequisite=${prerequisite#!}
;;
*)
negative_prereq=
esac
case " $lazily_tested_prereq " in
*" $prerequisite "*)
;;
*)
case " $lazily_testable_prereq " in
*" $prerequisite "*)
eval "script=\$test_prereq_lazily_$prerequisite" &&
if test_run_lazy_prereq_ "$prerequisite" "$script"
then
test_set_prereq $prerequisite
fi
lazily_tested_prereq="$lazily_tested_prereq$prerequisite "
esac
;;
esac
total_prereq=$(($total_prereq + 1))
case "$satisfied_prereq" in
*" $prerequisite "*)
satisfied_this_prereq=t
;;
*)
satisfied_this_prereq=
esac
case "$satisfied_this_prereq,$negative_prereq" in
t,|,t)
ok_prereq=$(($ok_prereq + 1))
;;
*)
# Keep a list of missing prerequisites; restore
# the negative marker if necessary.
prerequisite=${negative_prereq:+!}$prerequisite
if test -z "$missing_prereq"
then
missing_prereq=$prerequisite
else
missing_prereq="$prerequisite,$missing_prereq"
fi
esac
done
test $total_prereq = $ok_prereq
}
test_declared_prereq () {
case ",$test_prereq," in
*,$1,*)
return 0
;;
esac
return 1
}
test_expect_failure () {
test_start_
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
test "$#" = 2 ||
error "bug in the test script: not 2 or 3 parameters to test-expect-failure"
export test_prereq
if ! test_skip "$@"
then
say >&3 "checking known breakage: $2"
if test_run_ "$2" expecting_failure
then
test_known_broken_ok_ "$1"
else
test_known_broken_failure_ "$1"
fi
fi
echo >&3 ""
}
test_expect_success () {
test_start_
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
test "$#" = 2 ||
error "bug in the test script: not 2 or 3 parameters to test-expect-success"
export test_prereq
if ! test_skip "$@"
then
say >&3 "expecting success: $2"
if test_run_ "$2"
then
test_ok_ "$1"
else
test_failure_ "$@"
fi
fi
test_finish_
}
# test_external runs external test scripts that provide continuous
# test output about their progress, and succeeds/fails on
# zero/non-zero exit code. It outputs the test output on stdout even
# in non-verbose mode, and announces the external script with "# run
# <n>: ..." before running it. When providing relative paths, keep in
# mind that all scripts run in "trash directory".
# Usage: test_external description command arguments...
# Example: test_external 'Perl API' perl ../path/to/test.pl
test_external () {
test "$#" = 4 && { test_prereq=$1; shift; } || test_prereq=
test "$#" = 3 ||
error >&5 "bug in the test script: not 3 or 4 parameters to test_external"
descr="$1"
shift
export test_prereq
if ! test_skip "$descr" "$@"
then
# Announce the script to reduce confusion about the
# test output that follows.
say_color "" "# run $test_count: $descr ($*)"
# Export TEST_DIRECTORY, TRASH_DIRECTORY and SYDBOX_TEST_LONG
# to be able to use them in script
export TEST_DIRECTORY TRASH_DIRECTORY SYDBOX_TEST_LONG
# Run command; redirect its stderr to &4 as in
# test_run_, but keep its stdout on our stdout even in
# non-verbose mode.
"$@" 2>&4
if [ "$?" = 0 ]
then
if test $test_external_has_tap -eq 0; then
test_ok_ "$descr"
else
say_color "" "# test_external test $descr was ok"
test_success=$(($test_success + 1))
fi
else
if test $test_external_has_tap -eq 0; then
test_failure_ "$descr" "$@"
else
say_color error "# test_external test $descr failed: $@"
test_failure=$(($test_failure + 1))
fi
fi
fi
}
# Like test_external, but in addition tests that the command generated
# no output on stderr.
test_external_without_stderr () {
# The temporary file has no (and must have no) security
# implications.
tmp=${TMPDIR:-/tmp}
stderr="$tmp/sydbox-external-stderr.$$.tmp"
test_external "$@" 4> "$stderr"
[ -f "$stderr" ] || error "Internal error: $stderr disappeared."
descr="no stderr: $1"
shift
say >&3 "# expecting no stderr from previous command"
if [ ! -s "$stderr" ]; then
rm "$stderr"
if test $test_external_has_tap -eq 0; then
test_ok_ "$descr"
else
say_color "" "# test_external_without_stderr test $descr was ok"
test_success=$(($test_success + 1))
fi
else
if [ "$verbose" = t ]; then
output=`echo; echo "# Stderr is:"; cat "$stderr"`
else
output=
fi
# rm first in case test_failure exits.
rm "$stderr"
if test $test_external_has_tap -eq 0; then
test_failure_ "$descr" "$@" "$output"
else
say_color error "# test_external_without_stderr test $descr failed: $@: $output"
test_failure=$(($test_failure + 1))
fi
fi
}
# debugging-friendly alternatives to "test [-f|-d|-e]"
# The commands test the existence or non-existence of $1. $2 can be
# given to provide a more precise diagnosis.
test_path_is_file () {
if ! [ -f "$1" ]
then
echo "File $1 doesn't exist. $*"
false
fi
}
test_path_is_dir () {
if ! [ -d "$1" ]
then
echo "Directory $1 doesn't exist. $*"
false
fi
}
test_path_is_missing () {
if [ -e "$1" ]
then
echo "Path exists:"
ls -ld "$1"
if [ $# -ge 1 ]; then
echo "$*"
fi
false
fi
}
# test_line_count checks that a file has the number of lines it
# ought to. For example:
#
# test_expect_success 'produce exactly one line of output' '
# do something >output &&
# test_line_count = 1 output
# '
#
# is like "test $(wc -l <output) = 1" except that it passes the
# output through when the number of lines is wrong.
test_line_count () {
if test $# != 3
then
error "bug in the test script: not 3 parameters to test_line_count"
elif ! test $(wc -l <"$3") "$1" "$2"
then
echo "test_line_count: line count for $3 !$1 $2"
cat "$3"
return 1
fi
}
# This is not among top-level (test_expect_success | test_expect_failure)
# but is a prefix that can be used in the test script, like:
#
# test_expect_success 'complain and die' '
# do something &&
# do something else &&
# test_must_fail git checkout ../outerspace
# '
#
# Writing this as "! git checkout ../outerspace" is wrong, because
# the failure could be due to a segv. We want a controlled failure.
test_must_fail () {
"$@"
exit_code=$?
if test $exit_code = 0; then
echo >&2 "test_must_fail: command succeeded: $*"
return 1
elif test $exit_code -gt 129 -a $exit_code -le 192; then
echo >&2 "test_must_fail: died by signal: $*"
return 1
elif test $exit_code = 127; then
echo >&2 "test_must_fail: command not found: $*"
return 1
elif test $exit_code = 126; then
echo >&2 "test_must_fail: valgrind error: $*"
return 1
fi
return 0
}
# Similar to test_must_fail, but tolerates success, too. This is
# meant to be used in contexts like:
#
# test_expect_success 'some command works without configuration' '
# test_might_fail git config --unset all.configuration &&
# do something
# '
#
# Writing "git config --unset all.configuration || :" would be wrong,
# because we want to notice if it fails due to segv.
test_might_fail () {
"$@"
exit_code=$?
if test $exit_code -gt 129 -a $exit_code -le 192; then
echo >&2 "test_might_fail: died by signal: $*"
return 1
elif test $exit_code = 127; then
echo >&2 "test_might_fail: command not found: $*"
return 1
fi
return 0
}
# Similar to test_must_fail and test_might_fail, but check that a
# given command exited with a given exit code. Meant to be used as:
#
# test_expect_success 'Merge with d/f conflicts' '
# test_expect_code 1 git merge "merge msg" B master
# '
test_expect_code () {
want_code=$1
shift
"$@"
exit_code=$?
if test $exit_code = $want_code
then
return 0
fi
echo >&2 "test_expect_code: command exited with $exit_code, we wanted $want_code $*"
return 1
}
# test_cmp is a helper function to compare actual and expected output.
# You can use it like:
#
# test_expect_success 'foo works' '
# echo expected >expected &&
# foo >actual &&
# test_cmp expected actual
# '
#
# This could be written as either "cmp" or "diff -u", but:
# - cmp's output is not nearly as easy to read as diff -u
# - not all diff versions understand "-u"
test_cmp() {
$SYDBOX_TEST_CMP "$@"
}
# Check if the file expected to be empty is indeed empty, and barfs
# otherwise.
test_must_be_empty () {
if test -s "$1"
then
echo "'$1' is not empty, it contains:"
cat "$1"
return 1
fi
}
# Print a sequence of numbers or letters in increasing order. This is
# similar to GNU seq(1), but the latter might not be available
# everywhere (and does not do letters). It may be used like:
#
# for i in `test_seq 100`; do
# for j in `test_seq 10 20`; do
# for k in `test_seq a z`; do
# echo $i-$j-$k
# done
# done
# done
test_seq () {
case $# in
1) set 1 "$@" ;;
2) ;;
*) error "bug in the test script: not 1 or 2 parameters to test_seq" ;;
esac
perl -le 'print for $ARGV[0]..$ARGV[1]' -- "$@"
}
# This function can be used to schedule some commands to be run
# unconditionally at the end of the test to restore sanity:
#
# test_expect_success 'test core.capslock' '
# git config core.capslock true &&
# test_when_finished "git config --unset core.capslock" &&
# hello world
# '
#
# That would be roughly equivalent to
#
# test_expect_success 'test core.capslock' '
# git config core.capslock true &&
# hello world
# git config --unset core.capslock
# '
#
# except that the greeting and config --unset must both succeed for
# the test to pass.
#
# Note that under --immediate mode, no clean-up is done to help diagnose
# what went wrong.
test_when_finished () {
test_cleanup="{ $*
} && (exit \"\$eval_ret\"); eval_ret=\$?; $test_cleanup"
}
perl () {
command "$PERL_PATH" "$@"
}
|
# Variables
SHARP_VERSION=$(npm show sharp version)
NODE_VERSION=10.16.3
SHARP_DIRECTORY=sharp-$SHARP_VERSION
TARBALL=sharp-$SHARP_VERSION-aws-lambda-linux-x64-node-$NODE_VERSION.zip
# current dir where the build.sh is located
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
cd $DIR
# https://docs.aws.amazon.com/en_pv/lambda/latest/dg/configuration-layers.html#configuration-layers-path
# NPM install sharp
npm install --production --prefix ./nodejs sharp@$SHARP_VERSION
# tarball the resulting node_modules
zip -r $TARBALL nodejs
mv $TARBALL out
# Clean up
# rm -rf nodejs/node_modules
|
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
LINKCEIL="1gbit"
#limit outbound Bitcoin protocol traffic to this rate
LIMIT="160kbit"
#defines the address space for which you wish to disable rate limiting
LOCALNET="192.168.0.0/16"
#delete existing rules
tc qdisc del dev ${IF} root
#add root class
tc qdisc add dev ${IF} root handle 1: htb default 10
#add parent class
tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL}
#add our two classes. one unlimited, another limited
tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0
tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1
#add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..."
tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11
#delete any existing rules
#disable for now
#ret=0
#while [ $ret -eq 0 ]; do
# iptables -t mangle -D OUTPUT 1
# ret=$?
#done
#limit outgoing traffic to and from port 30229. but not when dealing with a host on the local network
# (defined by $LOCALNET)
# --set-mark marks packages matching these criteria with the number "2"
# these packages are filtered by the tc filter with "handle 2"
# this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT}
iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 30229 ! -d ${LOCALNET} -j MARK --set-mark 0x2
iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 30229 ! -d ${LOCALNET} -j MARK --set-mark 0x2
|
#!/bin/bash
Ns=(10000 40000 100000 200000)
for n in "${Ns[@]}"
do
./fork $n >> result.txt
./vfork $n >> result.txt
./clone $n >> result.txt
./vclone $n >> result.txt
done
|
<reponame>asvyazin/purescript-screeps
exports.usePathFinder = function () {
return PathFinder.use(true);
}
exports.deserialize = function (json) {
return function () {
return PathFinder.CostMatrix.deserialize(json);
}
}
exports.search = function (from) {
return function (to) {
return function (opts) {
var clonedOpts = {
roomCallback: function (rn) { return opts.roomCallback(rn)(); }
, plainCost: opts.plainCost
, swampCost: opts.swampCost
, flee: opts.flee
, maxOps: opts.maxOps
, maxRooms: opts.maxRooms
, maxCost: opts.maxCost
, heuristicWeight: opts.heuristicWeight
};
return function () {
return PathFinder.search(from, to, clonedOpts);
}
}
}
}
exports.newCostMatrix = function () {
return new PathFinder.CostMatrix;
}
exports.infinity = Number.POSITIVE_INFINITY;
|
'use strict';
const semver = require('semver');
//values from http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html
const validRetentionInDays = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653];
class AwsAddLogRetention {
constructor(serverless, options) {
if (!semver.satisfies(serverless.version, '>= 1.20.2')) {
throw new Error('serverless-plugin-log-retention requires serverless 1.20.2 or higher');
}
this.serverless = serverless;
this.options = options;
this.provider = this.serverless.getProvider('aws');
this.hooks = {
'package:createDeploymentArtifacts': this.beforeDeploy.bind(this),
};
}
sanitizeRetentionValue(inputValue) {
const value = Number(inputValue);
if(Number.isInteger(value) && validRetentionInDays.includes(value)) {
return value;
} else {
throw new Error(`RetentionInDays value must be one of ${validRetentionInDays}`);
}
}
addLogRetentionForFunctions(globalLogRetentionInDays) {
const service = this.serverless.service;
const template = service.provider.compiledCloudFormationTemplate;
if (typeof service.functions !== 'object' || typeof template.Resources !== 'object') {
return;
}
Object.keys(template.Resources).forEach((logGroupLogicalId) => {
const resource = template.Resources[logGroupLogicalId];
if (resource.Type === 'AWS::Logs::LogGroup') {
const functionName = Object.keys(service.functions).find(functionName => {
return this.provider.naming.getLogGroupLogicalId(functionName) === logGroupLogicalId;
});
if (!functionName) return;
const localLogRetentionInDays = service.functions[functionName].logRetentionInDays;
if (!localLogRetentionInDays && !globalLogRetentionInDays) {
return;
}
resource.Properties.RetentionInDays = localLogRetentionInDays ?
this.sanitizeRetentionValue(localLogRetentionInDays) :
globalLogRetentionInDays;
}
});
}
beforeDeploy() {
const service = this.serverless.service;
const globalLogRetentionInDays = service.custom && service.custom.logRetentionInDays
? this.sanitizeRetentionValue(service.custom.logRetentionInDays)
: null;
this.addLogRetentionForFunctions(globalLogRetentionInDays);
}
}
module.exports = AwsAddLogRetention;
|
'use strict';
const passport = require('passport');
const LocalStrategy = require('passport-local').Strategy;
const DBWrapper = require('./DBWrapper').createDBWrapper();
const PasswordHelper = require('./PasswordHelper').createPasswordHelper();
var PassportWrapper = function(){
this.passport = passport;
this.passport.use('local', new LocalStrategy({
passReqToCallback: true
},
function(req, username, password, done) {
DBWrapper.db.query('Select * from user where name=?;', [username], function(err, rows, fields) {
if (err) {
return done(err);
}
else if (rows.length == 0) {
return done(null, false, { message: req.flash('loginMessage', 'Incorrect username. If you do not have an account yet, please follow the link above to sign up to the system.') });
}
else {
PasswordHelper.verify(password, rows[0].PwdHash, rows[0].PwdSalt, function(err, result) {
if(err){
return done(null, false, { message: req.flash('loginMessage', 'Something went wrong.') });
}else if(!result){
return done(null, false, { message: req.flash('loginMessage', 'Incorrect password. If you do not have an account yet, please follow the link above to sign up to the system.') });
}
var user = rows[0];
return done(null, user);
});
}
});
}
));
this.passport.serializeUser(function(user, done) {
done(null, user);
});
this.passport.deserializeUser(function(user, done) {
done(null, user);
});
}
PassportWrapper.prototype.isLoggedIn =function (req, res, next) {
// if user is authenticated in the session, carry on
if (req.isAuthenticated())
return next();
// if they aren't redirect them to the home page
res.redirect('/');
}
/**
* Factory function
*
* @returns {PassportWrapper}
*/
function createPassportWrapper() {
return new PassportWrapper();
}
module.exports = {
createPassportWrapper : createPassportWrapper
};
|
package com.projects.tradingMachine.services.database.sql;
import java.sql.CallableStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.projects.tradingMachine.services.database.DataManager;
import com.projects.tradingMachine.services.database.DatabaseProperties;
import com.projects.tradingMachine.utility.Utility;
import com.projects.tradingMachine.utility.marketData.MarketData;
import com.projects.tradingMachine.utility.order.OrderSide;
import com.projects.tradingMachine.utility.order.OrderTimeInForce;
import com.projects.tradingMachine.utility.order.OrderType;
import com.projects.tradingMachine.utility.order.SimpleOrder;
public class MySqlManager implements DataManager {
private static Logger logger = LoggerFactory.getLogger(MySqlManager.class);
private final MySqlConnection mySqlConnection;
public MySqlManager(final MySqlConnection mySqlConnection) {
this.mySqlConnection = mySqlConnection;
}
@Override
public void storeOrder(final SimpleOrder order) {
try(final CallableStatement stm = mySqlConnection.getConnection().prepareCall("{call addOrder(?,?,?,?,?,?,?,?,?)}")) {
stm.setString(1, order.getID());
stm.setString(2, order.getSymbol());
stm.setInt(3, order.getQuantity());
stm.setString(4, order.getSide().toString());
stm.setString(5, order.getType().toString());
stm.setString(6, order.getTimeInForce().toString());
switch(order.getType()) {
case LIMIT:
stm.setDouble(7, order.getLimit());
stm.setNull(8, java.sql.Types.DOUBLE);
break;
case STOP:
stm.setNull(7, java.sql.Types.DOUBLE);
stm.setDouble(8, order.getStop());
break;
default:
stm.setNull(7, java.sql.Types.DOUBLE);
stm.setNull(8, java.sql.Types.DOUBLE);
}
stm.setDouble(9, order.getAvgPx());
stm.execute();
}
catch(final Exception ex) {
logger.warn("Failed to store order "+order+", due to: "+ex.getMessage());
throw new RuntimeException(ex);
}
}
@Override
public List<SimpleOrder> getOrders(final Optional<OrderType> orderType) {
logger.info("Starting to get orders data...");
final List<SimpleOrder> result = new ArrayList<SimpleOrder>();
try(final CallableStatement stm = mySqlConnection.getConnection().prepareCall("{call getOrders (?)}")) {
if (orderType.isPresent())
stm.setString(1, orderType.get().toString());
else
stm.setNull(1, java.sql.Types.VARCHAR);
final ResultSet rs = stm.executeQuery();
while (rs.next())
result.add(new SimpleOrder(rs.getString("ID"), rs.getString("symbol"), rs.getInt("quantity"), OrderSide.fromString(rs.getString("side")),
OrderType.fromString(rs.getString("type")), OrderTimeInForce.fromString(rs.getString("time_in_force")),
rs.getDouble("limit_price"), rs.getDouble("stop_price"), rs.getDouble("price"), rs.getString("original_id"), rs.getDate("fill_date")));
}
catch(final SQLException e) {
throw new RuntimeException(e);
}
logger.info("Number of orders retrieved: "+result.size());
return result;
}
@Override
public void addMarketDataItems(final List<MarketData> marketDataItems, final boolean deleteFirst) {
throw new UnsupportedOperationException("Not implemented yet.");
}
@Override
public void close() throws Exception {
mySqlConnection.close();
}
public static void main(final String[] args) throws NumberFormatException, ClassNotFoundException, SQLException, Exception {
final Properties p = Utility.getApplicationProperties("tradingMachineServices.properties");
try(final DataManager mySqlManager = new MySqlManager(new MySqlConnection(new DatabaseProperties(p.getProperty("mySQL.host"), Integer.valueOf(p.getProperty("mySQL.port")), p.getProperty("mySQL.database"),
p.getProperty("mySQL.userName"), p.getProperty("mySQL.password"))))) {
//System.out.println(mySqlManager.getOrders(Optional.of(OrderType.STOP)).stream().mapToDouble(SimpleOrder::getAvgPx).summaryStatistics());
System.out.println(mySqlManager.getOrders(Optional.ofNullable(null)).stream().mapToDouble(SimpleOrder::getAvgPx).summaryStatistics());
//mongoDBManager.getOrders(Optional.of(OrderType.LIMIT)).stream().map(SimpleOrder::getAvgPx).forEach(System.out::println);
}
}
}
|
import os.path
import page_finder
from util import extract_all_links
try:
FILE = __file__
except NameError:
FILE = './tests'
TESTDIR = os.getenv('TESTPATH',
os.path.dirname(os.path.realpath(FILE)))
def get_local_url(filename):
return 'file:///{0}/{1}'.format(os.path.join(TESTDIR, 'data'), filename)
def test_hnews():
link_annotation = page_finder.LinkAnnotation()
link_annotation.load(
extract_all_links(get_local_url('Hacker News 1.html')))
link_annotation.mark_link('https://news.ycombinator.com/news?p=2')
link_annotation.load(
extract_all_links(get_local_url('Hacker News 2.html')))
best = link_annotation.best_links_to_follow()
assert(best[0] == 'https://news.ycombinator.com/news?p=2')
assert(best[1] == 'https://news.ycombinator.com/news?p=3')
link_annotation.prune(100)
assert(len(link_annotation.links) <= 100)
assert(best[0] == 'https://news.ycombinator.com/news?p=2')
assert(best[1] == 'https://news.ycombinator.com/news?p=3')
def test_equal_distance():
link_annotation = page_finder.LinkAnnotation()
link_annotation.mark_link('http://page_1')
more_links = [
'http://page_2',
'http://page_3',
'http://page_4',
'http://page_5',
'http://page_6',
'http://page_7',
]
link_annotation.load(more_links)
for link in more_links:
assert link_annotation.is_follow_link(link)
|
#!/usr/bin/env bash
set -o errexit
test_import () {
echo "Create environment: python=$PYTHON_VERSION $1"
# Create an empty environment
conda create -q -y -n test-imports -c conda-forge python=$PYTHON_VERSION pyyaml fsspec toolz partd cloudpickle $1
conda activate test-imports
pip install -e .
echo "python -c '$2'"
python -c "$2"
conda deactivate
conda env remove -n test-imports
}
test_import "" "import dask, dask.base, dask.multiprocessing, dask.threaded, dask.optimization, dask.bag, dask.delayed, dask.graph_manipulation, dask.layers"
test_import "numpy" "import dask.array"
test_import "pandas" "import dask.dataframe"
test_import "bokeh" "import dask.diagnostics"
test_import "distributed" "import dask.distributed"
|
def sort_strings_alphabetically(strings):
strings.sort()
return strings
if __name__ == "__main__":
strings = ["hello", "bye", "world", "how", "are", "you"]
sorted_strings = sort_strings_alphabetically(strings)
print(sorted_strings)
|
#!/bin/bash
export CHANNEL_NAME=pcr-channel
echo "Creating channel"
peer channel create -o orderer.rbi.com:7050 -c $CHANNEL_NAME -f ./channel-artifacts/channel.tx --tls --cafile /opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations/rbi.com/orderers/orderer.rbi.com/msp/tlscacerts/tlsca.rbi.com-cert.pem
echo "Joining Channel org0"
export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org0.rbi.com/users/Admin@org0.rbi.com/msp
export CORE_PEER_ADDRESS=peer0.org0.rbi.com:7051
export CORE_PEER_LOCALMSPID="Org0MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org0.rbi.com/peers/peer0.org0.rbi.com/tls/ca.crt
peer channel join -b pcr-channel.block
echo "Joined Channel org0"
echo "Joining Channel org1"
export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.rbi.com/users/Admin@org1.rbi.com/msp
export CORE_PEER_ADDRESS=peer0.org1.rbi.com:8051
export CORE_PEER_LOCALMSPID="Org1MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.rbi.com/peers/peer0.org1.rbi.com/tls/ca.crt
peer channel join -b pcr-channel.block
echo "Joined Channel org1"
echo "Joining Channel org2"
export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.rbi.com/users/Admin@org2.rbi.com/msp
export CORE_PEER_ADDRESS=peer0.org2.rbi.com:9051
export CORE_PEER_LOCALMSPID="Org2MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.rbi.com/peers/peer0.org2.rbi.com/tls/ca.crt
peer channel join -b pcr-channel.block
echo "Joined Channel org2"
echo "Adding anchor peer: org0"
export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org0.rbi.com/users/Admin@org0.rbi.com/msp
export CORE_PEER_ADDRESS=peer0.org0.rbi.com:7051
export CORE_PEER_LOCALMSPID="Org0MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org0.rbi.com/peers/peer0.org0.rbi.com/tls/ca.crt
peer channel update -o orderer.rbi.com:7050 -c $CHANNEL_NAME -f ./channel-artifacts/Org0MSPanchors.tx --tls --cafile /opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations/rbi.com/orderers/orderer.rbi.com/msp/tlscacerts/tlsca.rbi.com-cert.pem
echo "Added anchor peer: org0"
echo "Adding anchor peer: org1"
export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.rbi.com/users/Admin@org1.rbi.com/msp
export CORE_PEER_ADDRESS=peer0.org1.rbi.com:8051
export CORE_PEER_LOCALMSPID="Org1MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.rbi.com/peers/peer0.org1.rbi.com/tls/ca.crt
peer channel update -o orderer.rbi.com:7050 -c $CHANNEL_NAME -f ./channel-artifacts/Org1MSPanchors.tx --tls --cafile /opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations/rbi.com/orderers/orderer.rbi.com/msp/tlscacerts/tlsca.rbi.com-cert.pem
echo "Added anchor peer: org1"
echo "Adding anchor peer: org2"
export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.rbi.com/users/Admin@org2.rbi.com/msp
export CORE_PEER_ADDRESS=peer0.org2.rbi.com:9051
export CORE_PEER_LOCALMSPID="Org2MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.rbi.com/peers/peer0.org2.rbi.com/tls/ca.crt
peer channel update -o orderer.rbi.com:7050 -c $CHANNEL_NAME -f ./channel-artifacts/Org2MSPanchors.tx --tls --cafile /opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations/rbi.com/orderers/orderer.rbi.com/msp/tlscacerts/tlsca.rbi.com-cert.pem
echo "Added anchor peer: org2"
echo "Installing Smart Contract: org0"
export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org0.rbi.com/users/Admin@org0.rbi.com/msp
export CORE_PEER_ADDRESS=peer0.org0.rbi.com:7051
export CORE_PEER_LOCALMSPID="Org0MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org0.rbi.com/peers/peer0.org0.rbi.com/tls/ca.crt
peer chaincode install -n mycc -v 1.0 -l node -p /opt/gopath/src/github.com/chaincode/
echo "Installied Smart Contract: org0"
echo "Installing Smart Contract: org1"
export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.rbi.com/users/Admin@org1.rbi.com/msp
export CORE_PEER_ADDRESS=peer0.org1.rbi.com:8051
export CORE_PEER_LOCALMSPID="Org1MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.rbi.com/peers/peer0.org1.rbi.com/tls/ca.crt
peer chaincode install -n mycc -v 1.0 -l node -p /opt/gopath/src/github.com/chaincode/
echo "Installied Smart Contract: org1"
echo "Installing Smart Contract: org2"
export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.rbi.com/users/Admin@org2.rbi.com/msp
export CORE_PEER_ADDRESS=peer0.org2.rbi.com:9051
export CORE_PEER_LOCALMSPID="Org2MSP"
export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.rbi.com/peers/peer0.org2.rbi.com/tls/ca.crt
peer chaincode install -n mycc -v 1.0 -l node -p /opt/gopath/src/github.com/chaincode/
echo "Installied Smart Contract: org2"
echo "Instantiating Smart Contract: org2"
peer chaincode instantiate -o orderer.rbi.com:7050 --tls --cafile /opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations/rbi.com/orderers/orderer.rbi.com/msp/tlscacerts/tlsca.rbi.com-cert.pem -C $CHANNEL_NAME -n mycc -l node -v 1.0 -c '{"Args":[]}' -P "AND ('Org0MSP.member')"
echo "Instantiated Smart Contract: org2"
|
function getPermutations(string) {
let result = [];
if (string.length === 1) {
result.push(string);
return result;
}
for (let i = 0; i < string.length; i++) {
let firstChar = string[i];
let charsLeft = string.substring(0, i) + string.substring(i + 1);
let innerPermutations = getPermutations(charsLeft);
for (let j = 0; j < innerPermutations.length; j++) {
result.push(firstChar + innerPermutations[j]);
}
}
return result;
}
console.log(getPermutations('abc'))
// Output: ["abc", "acb", "bac", "bca", "cab", "cba"]
|
#!/bin/bash
# exit when any command fails
set -e
# keep track of the last executed command
trap 'last_command=$current_command; current_command=$BASH_COMMAND' DEBUG
# echo an error message before exiting
trap 'echo "\"${last_command}\" command filed with exit code $?."' EXIT
# Extract the base64 encoded config data and write this to the KUBECONFIG
echo "$KUBE_CONFIG_DATA" | base64 --decode > /tmp/config
export KUBECONFIG=/tmp/config
export NAMESPACE="$KUBE_NAMESPACE"
export IMAGE="$KUBE_IMAGE"
export NAME="$SELECTOR_NAME"
kubectl config current-context
echo "${NAMESPACE}"
echo "${IMAGE}"
echo "${NAME}"
kubectl get deployments ${NAME} --namespace=${NAMESPACE}
kubectl set image deployments/${NAME} ${NAME}=${IMAGE} --namespace=${NAMESPACE} --record
kubectl rollout restart deployment ${NAME} --namespace=${NAMESPACE}
#kubectl delete pods -l app=${NAME} --namespace=${NAMESPACE}
|
class Histogram:
def __init__(self):
self.data_points = []
def add_data_point(self, value):
self.data_points.append(value)
def generate_histogram(self):
if not self.data_points:
raise TypeError("Empty histogram")
histogram_dict = {}
for data_point in self.data_points:
if data_point in histogram_dict:
histogram_dict[data_point] += 1
else:
histogram_dict[data_point] = 1
histogram_str = ""
for key, value in sorted(histogram_dict.items()):
histogram_str += f"{key}: {'*' * value}\n"
return histogram_str
|
<filename>src/main.js
import 'babel-polyfill';
import './scss/style.scss';
import React from 'react';
import ReactDom from 'react-dom';
import { browserHistory } from 'react-router';
import { syncHistoryWithStore } from 'react-router-redux';
import configureStore from './store/configureStore';
import bootstrapStore from './store/bootstrapStore';
import Root from './containers/Root';
import moment from 'moment';
// Global localize moment for formatting stuff
moment.locale('it');
const store = configureStore();
const history = syncHistoryWithStore(browserHistory, store);
// Fill store with various stuff
bootstrapStore(store);
ReactDom.render(
<Root store={store} history={history} />,
document.getElementById('root')
);
|
#!/bin/sh
#
# Skip the test if arch+kernel combination is not supported.
#
# Copyright (c) 2016 Dmitry V. Levin <ldv@altlinux.org>
# Copyright (c) 2016-2017 The strace developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
. "${srcdir=.}/init.sh"
uname_r="$(uname -r)"
case "$STRACE_ARCH" in
arm)
# PTRACE_SET_SYSCALL is supported by linux kernel
# starting with commit v2.6.16-rc1~107^2.
require_min_kernel_version_or_skip 2.6.16 ;;
aarch64)
# NT_ARM_SYSTEM_CALL regset is supported by linux kernel
# starting with commit v3.19-rc1~59^2~16.
require_min_kernel_version_or_skip 3.19 ;;
hppa)
# Syscall number and return value modification did not work
# properly before commit v4.5-rc7~31^2~1.
require_min_kernel_version_or_skip 4.5 ;;
sparc*)
# Reloading the syscall number from %g1 register is supported
# by linux kernel starting with commit v4.5-rc7~35^2~3.
require_min_kernel_version_or_skip 4.5 ;;
mips)
# Only the native ABI is supported by the kernel properly, see
# https://sourceforge.net/p/strace/mailman/message/35587571/
msg_prefix="mips $MIPS_ABI scno tampering does not work"
uname_m="$(uname -m)"
case "$MIPS_ABI:$uname_m" in
n64:mips64) ;;
o32:mips)
# is it really mips32?
if ../is_linux_mips_n64; then
skip_ "$msg_prefix on mips n64 yet"
fi
;;
*) skip_ "$msg_prefix on $uname_m yet" ;;
esac ;;
esac
|
public class Task {
public void done() {
System.out.println("Task marked as done");
}
}
|
<reponame>horowitz2009/BBGun<filename>src/com/horowitz/bigbusiness/model/Deserializable.java
package com.horowitz.bigbusiness.model;
public interface Deserializable {
void postDeserialize(Object[] transientObjects) throws Exception;
}
|
class TreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def build_binary_tree(integers):
if len(integers) == 0:
return None
root = TreeNode(integers[0])
i = 0
l = len(integers)
nodes = [root]
while (i < l):
node = nodes.pop(0)
if (2 * i + 1 < l and integers[2 * i + 1] is not None):
node.left = TreeNode(integers[2 * i + 1])
nodes.append(node.left)
if (2 * i + 2 < l and integers[2 * i + 2] is not None):
node.right = TreeNode(integers[2 * i + 2])
nodes.append(node.right)
i += 1
return root
|
#ifndef DVDSHOP_REGULARMOVIE_H
#define DVDSHOP_REGULARMOVIE_H
#include "Movie.h"
class RegularMovie : public Movie
{
public:
RegularMovie(const std::string& title);
double determine_amount(int days_rented) const override;
int determine_points(int days_rented) const override;
};
#endif//DVDSHOP_REGULARMOVIE_H
|
adpater="en0" # Adapter name
username="username"
password="password"
mac=$(ifconfig ${adpater} | grep "ether " | awk '{print $2}')
ip=$(ifconfig ${adpater} | grep "inet " | awk '{print $2}')
response=$(curl -s "http://10.69.69.72/quickauth.do?userid=${username}&passwd=${password}&wlanuserip=${ip}&wlanacname=NFV-BASE1&wlanacIp=&ssid=&vlan=&mac=${mac//:/%3A}&version=0&portalpageid=1×tamp=&uuid=&portaltype=")
code=$(echo ${response} | awk -F ',' '{print $1}' | cut -c 2- | awk -F ':' '{print $2}' | cut -c 2)
if [ $code != 0 ]
then
message=$(echo ${response} | awk -F ',' '{print $3}' | cut -c 2- | awk -F ':' '{print $2}')
echo "${message}. ${code}"
fi
|
import numpy as np
from typing import Tuple, Dict
import scipy.linalg
def eigen_solver(matrix: np.ndarray, n_eigenpairs: int, is_symmetric: bool) -> Tuple[np.ndarray, np.ndarray]:
if n_eigenpairs == matrix.shape[1]:
if is_symmetric:
scipy_eigvec_solver = scipy.linalg.eigh
else:
scipy_eigvec_solver = scipy.linalg.eig
solver_kwargs: Dict[str, object] = {
"check_finite": False
} # should be already checked
eigenvalues, eigenvectors = scipy_eigvec_solver(matrix, **solver_kwargs)
else:
# Handle the case when n_eigenpairs < matrix.shape[1]
# Implement the logic to select a different solver or handle this case as per requirements
# For the purpose of this example, we'll assume using the same solver with all eigenpairs
eigenvalues, eigenvectors = scipy_eigvec_solver(matrix, **solver_kwargs)
return eigenvalues, eigenvectors
|
#!/bin/bash
set -e
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
__branch=$(git branch --no-color | grep -E '^\*' | awk '{print $2}')
dotnet-sonarscanner begin /key:"jpdillingham_Utility.CommandLine.Arguments" /o:jpdillingham-github /d:sonar.host.url="https://sonarcloud.io" /d:sonar.exclusions="**/*examples*/**" /d:sonar.branch.name=${__branch} /d:sonar.login="${TOKEN_SONARCLOUD}" /d:sonar.cs.opencover.reportsPaths="tests/opencover.xml"
. "${__dir}/build.sh"
. "${__dir}/test.sh"
dotnet-sonarscanner end /d:sonar.login="${TOKEN_SONARCLOUD}"
bash <(curl -s https://codecov.io/bash) -f tests/opencover.xml
|
package com.testforth.terminal;
import static java.awt.event.KeyEvent.*;
import java.util.HashMap;
import java.util.Map;
import static com.testforth.terminal.TerminalWindow.*;
/**
* @author Dmitry
*/
class KeyCodes {
public static Map<Integer, Integer> keyCode2LanceletKeyCode = new HashMap<>() {
{
put(VK_ESCAPE, KEY_ESCAPE);
put(VK_F1, KEY_F1);
put(VK_F2, KEY_F2);
put(VK_F3, KEY_F3);
put(VK_F4, KEY_F4);
put(VK_F5, KEY_F5);
put(VK_F6, KEY_F6);
put(VK_F7, KEY_F7);
put(VK_F8, KEY_F8);
put(VK_F9, KEY_F9);
put(VK_F10, KEY_F10);
put(VK_F11, KEY_F11);
put(VK_F12, KEY_F12);
put(VK_SHIFT, KEY_SHIFT);
put(VK_CONTROL, KEY_CONTROL);
put(VK_ALT, KEY_ALT);
put(VK_ENTER, KEY_ENTER);
put(VK_BACK_SPACE, KEY_BACK_SPACE);
put(VK_DELETE, KEY_DELETE);
put(VK_COMMA, KEY_COMMA);
put(VK_PERIOD, KEY_PERIOD);
put(VK_UP, KEY_UP);
put(VK_DOWN, KEY_DOWN);
put(VK_LEFT, KEY_LEFT);
put(VK_RIGHT, KEY_RIGHT);
put(VK_TAB, KEY_TAB);
put(VK_BACK_QUOTE, KEY_BACK_QUOTE);
put(VK_MINUS, KEY_MINUS);
put(VK_END, KEY_END);
put(VK_PAGE_UP, KEY_PAGE_UP);
put(VK_PAGE_DOWN, KEY_PAGE_DOWN);
put(VK_SPACE, KEY_SPACE);
put(VK_HOME, KEY_HOME);
put(VK_QUOTE, KEY_QUOTE);
put(VK_SEMICOLON, KEY_SEMICOLON);
put(VK_EQUALS, KEY_EQUALS);
put(VK_OPEN_BRACKET, KEY_OPEN_BRACKET);
put(VK_CLOSE_BRACKET, KEY_CLOSE_BRACKET);
put(VK_BACK_SLASH, KEY_BACK_SLASH);
put(VK_SLASH, KEY_SLASH);
put(VK_0, KEY_0);
put(VK_1, KEY_1);
put(VK_2, KEY_2);
put(VK_3, KEY_3);
put(VK_4, KEY_4);
put(VK_5, KEY_5);
put(VK_6, KEY_6);
put(VK_7, KEY_7);
put(VK_8, KEY_8);
put(VK_9, KEY_9);
put(VK_A, KEY_A);
put(VK_B, KEY_B);
put(VK_C, KEY_C);
put(VK_D, KEY_D);
put(VK_E, KEY_E);
put(VK_F, KEY_F);
put(VK_G, KEY_G);
put(VK_H, KEY_H);
put(VK_I, KEY_I);
put(VK_J, KEY_J);
put(VK_K, KEY_K);
put(VK_L, KEY_L);
put(VK_M, KEY_M);
put(VK_N, KEY_N);
put(VK_O, KEY_O);
put(VK_P, KEY_P);
put(VK_Q, KEY_Q);
put(VK_R, KEY_R);
put(VK_S, KEY_S);
put(VK_T, KEY_T);
put(VK_U, KEY_U);
put(VK_V, KEY_V);
put(VK_W, KEY_W);
put(VK_X, KEY_X);
put(VK_Y, KEY_Y);
put(VK_Z, KEY_Z);
}
};
}
|
#!/bin/bash
### Use this script to remove all dependencies indicated in vcpkg-dependencies
### from the vcpkg submodule
version=1.0.1-for-${PROJECT_NAME}
tripletOverride=
while [[ "$1" =~ ^- && ! "$1" == "--" ]]; do case $1 in
-V | --version )
echo $version
exit
;;
-h | --help )
echo "$0:"
echo "-h [--help] display this message"
echo "-V [--version] display version information"
echo "-t [--triplet] ARG overrides the triplet default [default: $tripletOverride]"
exit
;;
-t | --triplet )
shift; tripletOverride=$1
;;
esac; shift; done
if [[ "$1" == '--' ]]; then shift; fi
# make sure $VCPKG_ROOT is set
if [[ -z "$VCPKG_ROOT" ]]; then
export VCPKG_ROOT=`pwd`/extern/vcpkg
fi
echo $VCPKG_ROOT
# pick a triplet based on host os
triplet="x64-linux"
vcpkg_exe="$VCPKG_ROOT/vcpkg"
if [[ "$OSTYPE" == "msys" ]]; then
triplet="x64-windows-static"
vcpkg_exe="$VCPKG_ROOT/vcpkg.exe"
elif [[ "$OSTYPE" =~ ^darwin.* ]]; then
triplet="x64-osx"
fi
# honor the triplet override argument
if [[ ! -z "$tripletOverride" ]]; then
triplet=$tripletOverride
echo "using triplet=$triplet"
fi
# create the vcpkg install parameters from the vcpkg-dependencies file
vcpkg_dependencies=(`cat "ci/vcpkg-dependencies" | tr -d '\r'`)
# ^ the tr -d '\r' is needed when git is configured to modify
# line endings on windows to \r\n on checkout
for i in "${!vcpkg_dependencies[@]}"; do
# line looks like name|platform or name
# split into name and platform
# if platform is empty or matches $OSTYPE, include it, otherwise skip it
IFS='|'
read -ra vcpkgItemArray <<< "${vcpkg_dependencies[$i]}"
IFS=' '
if [[ ${#vcpkgItemArray[@]} -eq 2 ]]; then
iter=0
for pkg in "${vcpkgItemArray[@]}"; do
if [[ iter -eq 0 ]]; then
pkgName=$pkg
iter=1
else
platformName=$pkg
fi
done
if [[ "$platformName" == "windows" ]] && [[ "$OSTYPE" == "msys" ]]; then
vcpkg_dependencies[$i]="$pkgName:$triplet"
elif [[ "$platformName" == "osx" ]] && [[ "$OSTYPE" =~ ^darwin.* ]]; then
vcpkg_dependencies[$i]="$pkgName:$triplet"
elif [[ "$platformName" == "linux" ]] && [[ "$OSTYPE" =~ ^linux.* ]]; then
vcpkg_dependencies[$i]="$pkgName:$triplet"
else
vcpkg_dependencies[$i]=""
fi
else
vcpkg_dependencies[$i]="${vcpkg_dependencies[$i]}:$triplet"
fi
done
# remove all vcpkg dependencies
if [[ -e "$vcpkg_exe" ]]; then
# create the parameter line passed to vcpkg remove
for i in "${vcpkg_dependencies[@]}"; do
vcpkg_install_line="$vcpkg_install_line $i"
done
echo "$vcpkg_exe remove$vcpkg_install_line"
$vcpkg_exe remove$vcpkg_install_line
else
echo "nothing to do: $vcpkg_exe does not exist"
fi
|
let givenDate = new Date(2012, 11, 16);
let formattedDate = givenDate.toLocaleDateString('en-US', {
day: '2-digit',
month: '2-digit',
year: 'numeric'
});
console.log(formattedDate); // 12/16/2012
|
<gh_stars>0
package gui;
public interface GraphicPanel {
//result panel
public String getResult();
public void setResult(String text);
public void addResult(String s);
public void backspaceResult();
public void clearResult();
public boolean isResultEmpty();
//expression panel
public String getExpression();
public void setExpression(String expressions);
public void clearExpression();
public void clearAll();
}
|
package org.museautomation.ui.extend.components;
import javafx.geometry.*;
import javafx.scene.*;
import javafx.scene.control.*;
import javafx.scene.layout.*;
import javafx.util.*;
import org.controlsfx.control.*;
/**
* @author <NAME> (see LICENSE.txt for license details)
*/
public abstract class PopupDialog
{
public PopupDialog(String ok_button_label, String header)
{
_popper = new PopOver();
GridPane grid = new GridPane();
grid.setPadding(new Insets(5));
grid.setVgap(5);
_popper.setContentNode(grid);
if (header != null)
{
_popper.setTitle(header);
_popper.setHeaderAlwaysVisible(true);
}
grid.add(createContent(), 0, 0);
// button area
HBox button_holder = new HBox();
button_holder.alignmentProperty().setValue(Pos.CENTER);
button_holder.setSpacing(5);
grid.add(button_holder, 0, 1);
// ok button
if (ok_button_label == null)
ok_button_label = "Ok";
_ok_button = new Button(ok_button_label);
_ok_button.setId(OK_BUTTON_ID);
_ok_button.setOnAction(event1 ->
{
if (okPressed())
{
_popper.hide();
destroy();
}
});
button_holder.getChildren().add(_ok_button);
if (MAKE_FAST)
{
_popper.setFadeInDuration(Duration.millis(5));
_popper.setFadeOutDuration(Duration.millis(5));
MAKE_FAST = false;
}
}
/**
* Implement to provide content for the popup dialog
*/
protected abstract Node createContent();
/**
* Implement to handle the ok event
*/
protected abstract boolean okPressed();
/**
* Override to cleanup resources, deregister listeners, etc.
*
* Call super() to ensure the popup is closed.
*/
public void destroy()
{
_popper.hide();
}
public void show(Node owner)
{
_popper.show(owner);
}
public void setOkButtonEnabled(boolean enabled)
{
_ok_button.setDisable(!enabled);
}
/**
* For testing purposes only. This will cause the next constructed PopupDialog to use fast transitions.
*/
public static void makeFast()
{
MAKE_FAST = true;
}
private final PopOver _popper;
private final Button _ok_button;
private static boolean MAKE_FAST = false;
public final static String OK_BUTTON_ID = "omuc-ok-button";
}
|
import { Connection, MysqlError } from 'mysql';
import { Report } from '@/typings/command';
interface Config {
commandPrefix?: string | string[];
database: string;
table: string;
}
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
type Row = any;
const VAR_RE = /\${(.+?):(.+?)(?:,\s*(.+?))?(?:,\s*(.+?))?}/;
const getCommandPrefix = (config: Config): string | void => {
if (config.commandPrefix) {
if (Array.isArray(config.commandPrefix)) {
return config.commandPrefix.find((prefix: string): boolean => Boolean(prefix) && !prefix.match(VAR_RE));
}
if (!config.commandPrefix.match(VAR_RE)) {
return config.commandPrefix;
}
}
return undefined;
};
const parseCommand = (command: string, config: Config): string => {
const prefix = getCommandPrefix(config);
return prefix ? `${prefix}-${command}` : command;
};
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
const query = (connection: Connection | void, sql: string, params?: any): Promise<any> =>
new Promise((resolve, reject): void => {
if (connection) {
connection.query(
sql,
params,
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
(error: MysqlError | null, results: any): any => {
if (error) {
reject(error);
} else {
resolve(results);
}
},
);
} else {
resolve();
}
});
export const init = async (connection: Connection | void, config: Config): Promise<void> => {
/* eslint-disable-next-line no-console */
console.log('[@modus/gimbal-plugin-mysql]', 'Creating table...');
return query(
connection,
`CREATE TABLE IF NOT EXISTS ${config.table} (
id INT NOT NULL AUTO_INCREMENT,
command VARCHAR(255) NOT NULL,
date DATETIME NOT NULL,
report LONGTEXT NOT NULL,
PRIMARY KEY (id, command),
UNIQUE INDEX id_UNIQUE (id ASC)) ENGINE=INNODB;`,
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
).then((ret: any): any => {
/* eslint-disable-next-line no-console */
console.log('[@modus/gimbal-plugin-mysql]', 'Table created!');
return ret;
});
};
export const getLastReport = async (command: string, connection: Connection | void, config: Config): Promise<void> => {
const parsedCommand = parseCommand(command, config);
/* eslint-disable-next-line no-console */
console.log('[@modus/gimbal-plugin-mysql]', `Getting last report for "${parsedCommand}" command...`);
return query(
connection,
`SELECT command, date, report FROM ${config.table} WHERE command = ? ORDER BY date DESC LIMIT 1;`,
[parsedCommand],
).then(
([row]: Row[] = []): Row => {
if (row) {
/* eslint-disable-next-line no-console */
console.log('[@modus/gimbal-plugin-mysql]', 'Got last report!');
if (config.commandPrefix) {
/* eslint-disable-next-line no-param-reassign */
row.command = row.command.replace(parsedCommand, command);
}
} else {
/* eslint-disable-next-line no-console */
console.log('[@modus/gimbal-plugin-mysql]', 'Did not find a last report.');
}
return row;
},
);
};
export const saveLastReport = async (
command: string,
report: Report,
connection: Connection | void,
config: Config,
): Promise<void> => {
const parsedCommand = parseCommand(command, config);
/* eslint-disable-next-line no-console */
console.log('[@modus/gimbal-plugin-mysql]', `Saving new report for "${parsedCommand}" command...`);
return query(connection, `INSERT INTO ${config.table} (command, date, report) VALUES (?, NOW(), ?);`, [
parsedCommand,
JSON.stringify(report),
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
]).then((ret: any): any => {
/* eslint-disable-next-line no-console */
console.log('[@modus/gimbal-plugin-mysql]', 'Saved new report!');
return ret;
});
};
|
<filename>using-custom-hook.js
const useTextByCount = (count, messages, onFinished) => {
const [text, setText] = useState(messages[0]);
useEffect(() => {
if (count < messages.length) {
return setText(messages[count]);
}
return onFinished();
}, [count]);
return text;
};
const messages = ['Cancel', 'Really?', 'Don\'t leave me!', 'OK, fine!']
const CancelAccountDeletion = ({onClick}) => {
const [clicks, setClicks] = useState(0);
const buttonText = useTextByCount(clicks, messages, onClick);
return (
<button onClick={() => setClicks(clicks+1)}
className="btn btn-default btn-lg cancel-account-deletion">
<span className="glyphicon glyphicon glyphicon-ban-circle"></span>
{buttonText}
</button>
)
};
|
<reponame>p2401kumar/ObjMtlParser
package com.keetarp.parser.mtl_parser;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.HashMap;
import com.keetarp.parser.comman.TheThree;
public class MTL_parser {
private HashMap<String, MTL_texture> MTL_TEXTURE;
/**
* make sure the raw folder contains drawable folder with texture placed
* inside a folder named with 3d files name as shown
*/
final String IMAGE_LOCATION = "src/main/resources/drawable/";
public MTL_parser(String s) {
MTL_TEXTURE = new HashMap<String, MTL_texture>();
FileInputStream fis = null;
/**
* Reading mtl file into BufferReader br
*/
try {
//fis = new FileInputStream("raw\\res\\" + s + ".mtl");
fis = new FileInputStream("src/main/resources/res/" + s + ".mtl");
@SuppressWarnings("resource")
BufferedReader br = new BufferedReader(new InputStreamReader(fis));
/**
* Reading into Hashmap
*/
String line = "", tex_name = "", img_map = "";
TheThree Ka = null, Kd = null, Ks = null;
int illium = 1;
float d = 1;
while ((line = br.readLine()) != null) {
if (line.startsWith("newmtl ")) {
MTL_TEXTURE.put(tex_name, new MTL_texture(Ka, Kd, Ks, d,
img_map, illium));
tex_name = line.substring(7);
Ka = new TheThree(0f, 0f, 0f);
Kd = new TheThree(0f, 0f, 0f);
Ks = new TheThree(0f, 0f, 0f);
illium = 1;
d = 1;
img_map = "";
} else if (line.startsWith("Ka ")) {
Ka = new TheThree(line.substring(3).split(" "));
} else if (line.startsWith("Kd ")) {
Kd = new TheThree(line.substring(3).split(" "));
} else if (line.startsWith("Ks ")) {
Ks = new TheThree(line.substring(3).split(" "));
} else if (line.startsWith("illium ")) {
illium = Integer.parseInt(line.substring(7));
} else if (line.startsWith("d ")) {
d = Float.parseFloat(line.substring(2));
} else if (line.startsWith("map_Ka ")
|| line.startsWith("map_Kd ")
|| line.startsWith("map_Ks ")) {
img_map = IMAGE_LOCATION
+ line.substring(7);
}
}
MTL_TEXTURE.put(tex_name, new MTL_texture(Ka, Kd, Ks, d, img_map,
illium));
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
public HashMap<String, MTL_texture> get_texture() {
return MTL_TEXTURE;
}
}
|
def is_single_character(word):
return len(word) == 1
|
#ifndef PROJ_TYPE_H_
#define PROJ_TYPE_H_
// #include "proto/message_protocol.pb.h"
namespace google {
namespace protobuf {
class MessageLite;
}
}
class MessageLite;
namespace ChatProtocol{
class Packet;
}
using MessageBase = google::protobuf::MessageLite;
using MessagePtr = std::shared_ptr<MessageBase>;
using PacketPtr = std::shared_ptr<ChatProtocol::Packet>;
// 메세지 관련 const
// header 4byte에 messagelength를 적는다.
static const int kMaxBufferSize = 65536;
static const int kMsgHeaderSize = 4;
//
static const int kDispatcherSize = 3;
#endif // PROJ_TYPE_H_
|
package Assign_Cookies;
import java.util.Arrays;
public class Solution {
public int findContentChildren(int[] g, int[] s) {
Arrays.sort(g);
Arrays.sort(s);
int i, j;
for (i = 0, j = 0; i < g.length && j < s.length;i++) {
while (s[j++] < g[i]){
if (j >= s.length) return i;
}
}
return i;
}
public static void main(String[] args) {
Solution s = new Solution();
System.out.println(s.findContentChildren(new int[]{1, 2, 3}, new int[]{1, 1}));
System.out.println(s.findContentChildren(new int[]{1, 2}, new int[]{1, 2, 3}));
}
}
|
package org.slos;
import org.slos.battle.decision.Choice;
import org.slos.battle.decision.ChoiceGate;
import org.slos.battle.decision.ChoiceGateFactory;
import org.slos.battle.decision.MasterChoiceContext;
import org.slos.battle.decision.strategy.ChoiceStrategyMode;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
@Disabled
public class ChoiceTests {
@Test
public void itShouldDecideUsingHisory() {
MasterChoiceContext masterChoiceContext = new MasterChoiceContext();
ChoiceGateFactory choiceGateFactory = masterChoiceContext.getChoiceGateFactory();
ChoiceGate<Boolean> choiceGate = choiceGateFactory.buildGate(new ChoiceGateFactory.Configuration<Boolean>()
.setGateId("Test")
.setChoiceMode(ChoiceStrategyMode.WEIGHT_ENFORCED)
.addChoice(new Choice(1, 2, new Boolean(true)))
.addChoice(new Choice(0, 2, new Boolean(false)))
);
for (int i = 0; i < 100; i++) {
System.out.println("i: " + i);
assertTrue(choiceGate.getResult());
assertFalse(choiceGate.getResult());
}
}
@Test
public void itShouldGetNewGateForNestedDecisions() {
MasterChoiceContext masterChoiceContext = new MasterChoiceContext();
ChoiceGateFactory choiceGateFactory = masterChoiceContext.getChoiceGateFactory();
ChoiceGate gate1 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gate2 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
assertNotSame(gate1, gate2);
}
@Test
public void itShouldStartOverWithPreviousHistory() {
MasterChoiceContext masterChoiceContext = new MasterChoiceContext();
ChoiceGateFactory choiceGateFactory = masterChoiceContext.getChoiceGateFactory();
ChoiceGate gate1 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gate2 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gate3 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
masterChoiceContext.reset();
ChoiceGate gateA = getGateAndExecuteToExpect("TEST", false, choiceGateFactory);
ChoiceGate gateB = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gateC = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
assertSame(gate1, gateA);
assertNotSame(gate2, gateB);
assertNotSame(gate2, gateC);
assertNotSame(gate3, gateC);
masterChoiceContext.reset();
ChoiceGate gateI = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gateII = getGateAndExecuteToExpect("TEST", false, choiceGateFactory);
ChoiceGate gateIII = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
assertSame(gate1, gateI);
assertSame(gate2, gateII);
masterChoiceContext.reset();
ChoiceGate gateX = getGateAndExecuteToExpect("TEST", false, choiceGateFactory);
ChoiceGate gateY = getGateAndExecuteToExpect("TEST", false, choiceGateFactory);
ChoiceGate gateZ = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
assertSame(gateI, gateX);
}
@Test
public void itShouldHandleMultipleDifferentGates() {
MasterChoiceContext masterChoiceContext = new MasterChoiceContext();
ChoiceGateFactory choiceGateFactory = masterChoiceContext.getChoiceGateFactory();
ChoiceGate gate1 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gate2 = getGateAndExecuteToExpect("TEST2", true, choiceGateFactory);
ChoiceGate gate3 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gate4 = getGateAndExecuteToExpect("TEST2", true, choiceGateFactory);
masterChoiceContext.reset();
ChoiceGate gate12 = getGateAndExecuteToExpect("TEST", false, choiceGateFactory);
ChoiceGate gate22 = getGateAndExecuteToExpect("TEST2", true, choiceGateFactory);
ChoiceGate gate32 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gate42 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
assertSame(gate1, gate12);
assertNotSame(gate2, gate12);
masterChoiceContext.reset();
ChoiceGate gate13 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gate23 = getGateAndExecuteToExpect("TEST2", false, choiceGateFactory);
ChoiceGate gate33 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gate43 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
assertSame(gate1, gate13);
assertSame(gate2, gate23);
masterChoiceContext.reset();
ChoiceGate gate14 = getGateAndExecuteToExpect("TEST", false, choiceGateFactory);
ChoiceGate gate24 = getGateAndExecuteToExpect("TEST2", false, choiceGateFactory);
ChoiceGate gate34 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gate44 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
assertSame(gate22, gate24);
masterChoiceContext.reset();
ChoiceGate gate15 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
ChoiceGate gate25 = getGateAndExecuteToExpect("TEST2", true, choiceGateFactory);
ChoiceGate gate35 = getGateAndExecuteToExpect("TEST", false, choiceGateFactory);
ChoiceGate gate45 = getGateAndExecuteToExpect("TEST", true, choiceGateFactory);
assertSame(gate3, gate35);
}
@Test
public void itShouldntPersistPreviousChoices() {
MasterChoiceContext masterChoiceContext = new MasterChoiceContext();
ChoiceGateFactory choiceGateFactory = masterChoiceContext.getChoiceGateFactory();
Object one = new Object();
Object two = new Object();
Set firstSet = new HashSet();
firstSet.add(one);
firstSet.add(two);
ChoiceGate<Object> testGate = createChoiceGateWithChoices("TEST3", choiceGateFactory, new Choice<Object>(1, 1, one), new Choice(2, 1, two));
Object one1 = testGate.getResult();
Object two1 = testGate.getResult();
assertTrue(firstSet.contains(one1));
assertTrue(firstSet.contains(two1));
masterChoiceContext.reset();
Object oneA = new Object();
Object twoA = new Object();
Set secondSet = new HashSet();
secondSet.add(oneA);
secondSet.add(twoA);
ChoiceGate<Object> testGate2 = createChoiceGateWithChoices("TEST3", choiceGateFactory, new Choice<Object>(1, 1, oneA), new Choice(2, 1, twoA));
Object oneB = testGate2.getResult();
Object twoB = testGate2.getResult();
assertFalse(secondSet.contains(one1));
assertFalse(secondSet.contains(two1));
assertFalse(firstSet.contains(oneB));
assertFalse(firstSet.contains(twoB));
assertTrue(secondSet.contains(oneB));
assertTrue(secondSet.contains(twoB));
masterChoiceContext.reset();
Object oneX = new Object();
Object twoX = new Object();
Set thirdSet = new HashSet();
thirdSet.add(oneX);
thirdSet.add(twoX);
ChoiceGate<Object> testGate3 = createChoiceGateWithChoices("TEST3", choiceGateFactory, new Choice<Object>(1, 1, oneX), new Choice(2, 1, twoX));
Object oneY = testGate3.getResult();
Object twoY = testGate3.getResult();
assertTrue(thirdSet.contains(oneY));
assertTrue(thirdSet.contains(twoY));
assertFalse(secondSet.contains(oneY));
assertFalse(secondSet.contains(twoY));
assertFalse(firstSet.contains(oneY));
assertFalse(firstSet.contains(twoY));
masterChoiceContext.reset();
System.out.println(masterChoiceContext.toJson());
}
@Test
public void itShouldHandleArrayPermutationResultsX() {
MasterChoiceContext masterChoiceContext = new MasterChoiceContext();
ChoiceGateFactory choiceGateFactory = masterChoiceContext.getChoiceGateFactory();
List<String> choices = new ArrayList<>();
choices.add("One");
choices.add("Two");
choices.add("Three");
ChoiceGate permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(" -- BREAK --");
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
System.out.println(" -- BREAK --");
}
@Test
public void itShouldHandleArrayPermutationResults() {
MasterChoiceContext masterChoiceContext = new MasterChoiceContext();
ChoiceGateFactory choiceGateFactory = masterChoiceContext.getChoiceGateFactory();
List<String> choices = new ArrayList<>();
choices.add("One");
choices.add("Two");
choices.add("Three");
ChoiceGate permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(" -- CHECK --");
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
System.out.println(" -- CHECK --");
masterChoiceContext.reset();
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
permutationChoiceGate = getPermutationGate("Permutation", choices, choiceGateFactory);
System.out.println(permutationChoiceGate.getResult());
System.out.println(" -- CHECK --");
}
private ChoiceGate getPermutationGate(String id, List<String> choices, ChoiceGateFactory choiceGateFactory) {
ChoiceGateFactory.Configuration<Choice<String>> configuration = new ChoiceGateFactory.Configuration<Boolean>()
.setGateId(id)
.setChoiceMode(ChoiceStrategyMode.RANDOM_WEIGHTED)
.returnPermutationOf(choices);
return choiceGateFactory.buildGate(configuration);
}
private ChoiceGate createChoiceGateWithChoices(String id, ChoiceGateFactory choiceGateFactory, Choice... choices) {
ChoiceGateFactory.Configuration configuration = new ChoiceGateFactory.Configuration<Boolean>()
.setGateId(id)
.setChoiceMode(ChoiceStrategyMode.RANDOM_WEIGHTED);
for (Choice choice : choices) {
configuration.addChoice(choice);
}
ChoiceGate<Boolean> choiceGate = choiceGateFactory.buildGate(configuration);
return choiceGate;
}
private ChoiceGate getGateAndExecuteToExpect(String id, Boolean expect, ChoiceGateFactory choiceGateFactory) {
return getGateAndExecuteToExpect(id, expect, choiceGateFactory, ChoiceStrategyMode.WEIGHT_ENFORCED);
}
private ChoiceGate getGateAndExecuteToExpect(String id, Boolean expect, ChoiceGateFactory choiceGateFactory, ChoiceStrategyMode choiceStrategyMode) {
ChoiceGate<Boolean> choiceGate = choiceGateFactory.buildGate(new ChoiceGateFactory.Configuration<Boolean>()
.setGateId(id)
.setChoiceMode(choiceStrategyMode)
.addChoice(new Choice(1, 1, new Boolean(true)))
.addChoice(new Choice(0, 1, new Boolean(false)))
);
// assertEquals(expect, choiceGate.getResult());
return choiceGate;
}
}
|
class PluginManager:
def __init__(self):
self.registered_plugins = []
def register_plugin(self, name, plugin_type, api_version):
self.registered_plugins.append((name, plugin_type, api_version))
def reject_plugin(self, name):
if 'imdb' in name:
raise ValueError(f"Plugin '{name}' rejected: 'imdb required'")
def get_registered_plugins(self):
return self.registered_plugins
|
<gh_stars>0
import {
templateForComponentFile,
createYargsForComponentGeneration,
} from '../helpers'
export const files = async ({ name, ...rest }) => {
console.info(rest)
const serviceFile = templateForComponentFile({
name,
apiPathSection: 'services',
templatePath: 'service/service.js.template',
templateVars: { ...rest },
})
const testFile = templateForComponentFile({
name,
extension: '.test.js',
apiPathSection: 'services',
templatePath: 'service/test.js.template',
templateVars: { ...rest },
})
// Returns
// {
// "path/to/fileA": "<<<template>>>",
// "path/to/fileB": "<<<template>>>",
// }
return [serviceFile, testFile].reduce((acc, [outputPath, content]) => {
return {
[outputPath]: content,
...acc,
}
}, {})
}
export const {
command,
desc,
builder,
handler,
} = createYargsForComponentGeneration({
componentName: 'service',
filesFn: files,
})
|
<reponame>thomaszdxsn/zoom-meeting-api
import { CreateUserActions, UserType } from '../constants';
export interface CreateUserParams {
action: CreateUserActions;
user_info: { email: string; type: UserType; first_name?: string; last_name?: string; password?: string };
}
|
def reverse_string(s):
# create an empty list to store reversed characters
reversed_s = []
# Iterate over the string in reverse order
for i in range(len(s) - 1, -1, -1):
reversed_s.append(s[i])
# Join the characters to form the reversed string
return "".join(reversed_s)
|
#!/bin/bash
set -e
export DEFAULT_NATS_SERVER_VERSION=v2.0.4
export NATS_SERVER_VERSION="${NATS_SERVER_VERSION:=$DEFAULT_NATS_SERVER_VERSION}"
# check to see if nats-server folder is empty
if [ ! "$(ls -A $HOME/nats-server)" ]; then
(
mkdir -p $HOME/nats-server
cd $HOME/nats-server
wget https://github.com/nats-io/nats-server/releases/download/$NATS_SERVER_VERSION/nats-server-$NATS_SERVER_VERSION-linux-amd64.zip -O nats-server.zip
unzip nats-server.zip
cp nats-server-$NATS_SERVER_VERSION-linux-amd64/nats-server $HOME/nats-server/nats-server
)
else
echo 'Using cached directory.';
fi
|
import { Injectable } from '@angular/core';
import 'rxjs/add/operator/toPromise';
import { Course } from 'app/core/model';
import * as moment from 'moment';
import { AuthHttp } from 'angular2-jwt';
import { environment } from 'environments/environment';
@Injectable()
export class CoursesService {
courseUrl: string;
constructor(private http: AuthHttp) {
this.courseUrl = `${environment.apiURL}/courses`;
}
searchCourse(): Promise<any> {
return this.http.get(`${this.courseUrl}`)
.toPromise().then(response => response.json());
}
deleteCourses(id: number): Promise<void> {
return this.http.delete(`${this.courseUrl}/${id}`)
.toPromise().then(() => null);
}
saveCourses(course: Course): Promise<Course> {
return this.http.post(this.courseUrl,
JSON.stringify(course))
.toPromise()
.then(response => response.json());
}
update(course: Course): Promise<Course> {
return this.http.put(`${this.courseUrl}/${course.id}`,
JSON.stringify(course))
.toPromise()
.then(response => {
const courseModificator = response.json() as Course;
this.converterStringsParaDatas([courseModificator]);
return courseModificator;
});
}
buscarPorCodigo(id: number): Promise<Course> {
return this.http.get(`${this.courseUrl}/${id}`)
.toPromise()
.then(response => {
const course = response.json() as Course;
this.converterStringsParaDatas([course]);
return course;
});
}
private converterStringsParaDatas(courses: Course[]) {
for (const course of courses) {
course.dateRegister = moment(course.dateRegister,
'YYYY-MM-DD').toDate();
}
}
}
|
#!/usr/bin/env bash
export NCTL_DOCKER_CONTAINER=${1:-"mynctl"}
export NCTL_HOME=/home/casper/casper-node/utils/nctl
# ###############################################################
# ALIASES
# ###############################################################
# Assets.
nctl-assets-dump() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/assets/dump.sh $@"; }
nctl-assets-ls() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/assets/list.sh $@"; }
nctl-assets-setup() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/assets/setup.sh $@"; }
nctl-assets-setup-from-stage() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/assets/setup_from_stage.sh $@"; }
nctl-assets-teardown() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/assets/teardown.sh $@"; }
nctl-assets-upgrade-from-stage() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/assets/upgrade_from_stage.sh $@"; }
# Binaries.
nctl-compile() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/assets/compile.sh $@"; }
nctl-compile-client() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/assets/compile_client.sh $@"; }
nctl-compile-node() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/assets/compile_node.sh $@"; }
nctl-compile-node-launcher() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/assets/compile_node_launcher.sh $@"; }
# Staging.
nctl-stage-build() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/staging/build.sh $@"; }
nctl-stage-build-from-settings() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/staging/build_from_settings.sh $@"; }
nctl-stage-init-settings() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/staging/init_settings.sh $@"; }
nctl-stage-set-remote() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/staging/set_remote.sh $@"; }
nctl-stage-set-remotes() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/staging/set_remotes.sh $@"; }
nctl-stage-teardown() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/staging/teardown.sh $@"; }
# Node control.
nctl-clean() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/clean.sh $@"; }
nctl-clean-logs() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/clean_logs.sh $@"; }
nctl-interactive() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/interactive.sh $@"; }
nctl-join() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/join.sh $@"; }
nctl-leave() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/leave.sh $@"; }
nctl-ports() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "lsof -i tcp | grep casper-no | grep LISTEN | sort"; }
nctl-processes() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c 'ps -aux | grep "$NCTL"'; }
nctl-restart() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/restart.sh $@"; }
nctl-rotate() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/misc/rotate_nodeset.sh $@"; }
nctl-start() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/start.sh $@"; }
nctl-start-after-n-blocks() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/start_after_n_blocks.sh $@"; }
nctl-start-after-n-eras() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/start_after_n_eras.sh $@"; }
nctl-status() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/status.sh $@"; }
nctl-stop() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/stop.sh $@"; }
nctl-upgrade-protocol() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/upgrade.sh $@"; }
nctl-emergency-upgrade() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/node/emergency_upgrade.sh $@"; }
# Blocking commands.
nctl-await-n-blocks() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/misc/await_n_blocks.sh $@"; }
nctl-await-n-eras() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/misc/await_n_eras.sh $@"; }
nctl-await-until-block-n() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/misc/await_until_block_n.sh $@"; }
nctl-await-until-era-n() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/misc/await_until_era_n.sh $@"; }
# Views #1: chain.
nctl-view-chain-account() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_account.sh $@"; }
nctl-view-chain-auction-info() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_auction_info.sh $@"; }
nctl-view-chain-balance() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_balance.sh $@"; }
nctl-view-chain-balances() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_balances.sh $@"; }
nctl-view-chain-block() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_block.sh $@"; }
nctl-view-chain-block-transfers() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_block_transfers.sh $@"; }
nctl-view-chain-deploy() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_deploy.sh $@"; }
nctl-view-chain-era() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_era.sh $@"; }
nctl-view-chain-era-info() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_era_info.sh $@"; }
nctl-view-chain-height() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_height.sh $@"; }
nctl-view-chain-state-root-hash() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_state_root_hash.sh $@"; }
nctl-view-chain-lfb() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_lfb.sh $@"; }
nctl-view-chain-spec() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_spec.sh $@"; }
nctl-view-chain-spec-accounts() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_chain_spec_accounts.sh $@"; }
# Views #2: node.
nctl-view-node-config() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_config.sh $@"; }
nctl-view-node-error-log() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_log_stderr.sh $@"; }
nctl-view-node-log() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_log_stdout.sh $@"; }
nctl-view-node-peers() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_peers.sh $@"; }
nctl-view-node-peer-count() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_peer_count.sh $@"; }
nctl-view-node-ports() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_ports.sh $@"; }
nctl-view-node-rpc-endpoint() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_rpc_endpoint.sh $@"; }
nctl-view-node-rpc-schema() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_rpc_schema.sh $@"; }
nctl-view-node-status() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_status.sh $@"; }
nctl-view-node-storage() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_storage.sh $@"; }
# Views #3: node metrics.
nctl-view-node-metrics() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_metrics.sh $@"; }
nctl-view-node-pending-deploy-count() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_metrics.sh metric=pending_deploy $@"; }
nctl-view-node-finalised-block-count() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_metrics.sh metric=amount_of_blocks $@"; }
nctl-view-node-finalisation-time() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_node_metrics.sh metric=finalization_time $@"; }
# Views #4: faucet.
nctl-view-faucet-account() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_faucet_account.sh $@"; }
# Views #5: user.
nctl-view-user-account() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_user_account.sh $@"; }
# Views #6: validator.
nctl-view-validator-account() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/views/view_validator_account.sh $@"; }
# Contracts #1: KV storage.
nctl-contracts-hello-world-install() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-hello-world/do_install.sh $@"; }
# Contracts #2: Transfers.
nctl-transfer() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-transfers/do_dispatch_native.sh $@"; }
nctl-transfer-native() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-transfers/do_dispatch_native.sh $@"; }
nctl-transfer-native-batch-dispatch() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-transfers/do_dispatch_native_batch.sh $@"; }
nctl-transfer-native-batch-prepare() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-transfers/do_prepare_native_batch.sh $@"; }
nctl-transfer-wasm() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-transfers/do_dispatch_wasm.sh $@"; }
nctl-transfer-wasm-batch-dispatch() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-transfers/do_dispatch_wasm_batch.sh $@"; }
nctl-transfer-wasm-batch-prepare() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-transfers/do_prepare_wasm_batch.sh $@"; }
# Contracts #3: Auction.
nctl-auction-activate() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-auction/do_bid_activate.sh $@"; }
nctl-auction-bid() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-auction/do_bid.sh $@"; }
nctl-auction-withdraw() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-auction/do_bid_withdraw.sh $@"; }
nctl-auction-delegate() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-auction/do_delegate.sh $@"; }
nctl-auction-undelegate() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-auction/do_delegate_withdraw.sh $@"; }
# Contracts #4: ERC-20.
nctl-erc20-approve() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-erc20/do_approve.sh $@"; }
nctl-erc20-install() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-erc20/do_install.sh $@"; }
nctl-erc20-fund-users() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-erc20/do_fund_users.sh $@"; }
nctl-erc20-transfer() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-erc20/do_transfer.sh $@"; }
nctl-erc20-view-allowances() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-erc20/view_allowances.sh $@"; }
nctl-erc20-view-details() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-erc20/view_details.sh $@"; }
nctl-erc20-view-balances() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-erc20/view_balances.sh $@"; }
# Contracts #5: KV storage.
nctl-kv-storage-get-key() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-kv/get_key.sh $@"; }
nctl-kv-storage-install() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-kv/do_install.sh $@"; }
nctl-kv-storage-set-key() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/contracts-kv/set_key.sh $@"; }
# Scenarios #1: Execute protocol upgrade.
nctl-exec-upgrade-scenario-1() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/scenarios-upgrades/upgrade_scenario_01.sh $@"; }
nctl-exec-upgrade-scenario-2() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/scenarios-upgrades/upgrade_scenario_02.sh $@"; }
nctl-exec-upgrade-scenario-3() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "source $NCTL_HOME/sh/scenarios-upgrades/upgrade_scenario_03.sh $@"; }
# Secret keys
nctl-view-faucet-secret-key() { docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "cat $NCTL_HOME/assets/net-1/faucet/secret_key.pem"; }
nctl-view-user-secret-key() {
userx=`echo $1 | sed 's/=/-/'`;
docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "cat $NCTL_HOME/assets/net-1/users/$userx/secret_key.pem";
}
nctl-view-node-secret-key() {
nodex=`echo $1 | sed 's/=/-/'`;
docker exec -t $NCTL_DOCKER_CONTAINER /bin/bash -c "cat $NCTL_HOME/assets/net-1/nodes/$nodex/keys/secret_key.pem";
}
|
package io.github.intellij.dlanguage.run.exception;
public class ModuleNotFoundException extends Exception {
public ModuleNotFoundException() {
this("Module was null");
}
public ModuleNotFoundException(final String message) {
super(message);
}
}
|
"""Project: Eskapade - A python-based package for data analysis.
Macro: esk202_writedata
Created: 2017/02/20
Description:
Macro to illustrate writing pandas dataframes to file.
Authors:
KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands
Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""
from eskapade import ConfigObject, Chain, resources
from eskapade import analysis
from eskapade import process_manager
from eskapade.logger import Logger
logger = Logger()
logger.debug('Now parsing configuration file esk202_writedata')
#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk202_writedata'
settings['version'] = 0
#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.
settings['do_readdata'] = True
settings['do_writedata'] = True
#########################################################################################
# --- Set path of data
data_path = resources.fixture('dummy.csv')
#########################################################################################
# --- now set up the chains and links based on configuration flags
# --- readdata with default settings reads all three input files simultaneously.
# all extra key word arguments are passed on to pandas reader.
if settings['do_readdata']:
read = Chain('ReadData')
# --- readdata keeps on opening the next file in the file list.
# all kwargs are passed on to pandas file reader.
read_data = analysis.ReadToDf(name='reader', key='test', sep='|', reader='csv', path=[data_path] * 3)
read.add(read_data)
if settings['do_writedata']:
write = Chain('WriteData')
# --- writedata needs a specified output format ('writer' argument).
# if this is not set, try to determine this from the extension from the filename.
# 'key' is picked up from the datastore. 'path' is the output filename.
# all other kwargs are passed on to pandas file writer.
write_data = analysis.WriteFromDf(name='writer', key='test', path='tmp3.csv', writer='csv')
write.add(write_data)
#########################################################################################
logger.debug('Done parsing configuration file esk202_writedata')
|
from hs_build_tools.nose import eq_,ok_
import hashkernel.bakery as ids
from hashstore.tests import TestSetup
from sqlalchemy import Table, Integer, MetaData, Column, types, select
from hashstore.utils.db import Dbf, IntCast, StringCast
import enum
import logging
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
test = TestSetup(__name__, ensure_empty=True)
log = test.log
def test_int_enum():
class X(enum.IntEnum):
a = 1
b = 2
c = 3
meta = MetaData()
tbl = Table("mytable", meta,
Column("id", Integer, primary_key=True,
autoincrement=True),
Column('name', types.String()),
Column("x", IntCast(X), nullable=True))
#'sqlite:///:memory:'
dbf = Dbf(meta,test.file_path('int_enum.sqlite3'))
ok_(not dbf.exists())
dbf.ensure_db()
with dbf.connect() as conn:
r = conn.execute(tbl.insert().values(name='abc'))
id1 = r.inserted_primary_key[0]
log.debug( id1 )
r = conn.execute(tbl.insert().values(name='xyz',x=None))
id2 = r.inserted_primary_key[0]
log.debug( id2 )
dbf.execute(tbl.update().where(tbl.c.id == id1)
.values(name='ed', x = X.c))
fetch = dbf.execute(select([tbl])).fetchall()
attach = {r.id: r.x for r in fetch}
eq_(attach[id1], X.c)
eq_(attach[id2], None)
def test_cake_type():
meta = MetaData()
tbl = Table("mytable", meta,
Column("guid", StringCast(ids.Cake),
primary_key=True,
default=lambda: ids.Cake.new_portal()),
Column('name', types.String()),
Column("attachment", StringCast(ids.Cake), nullable=True))
tbl2 = Table("mytable2", meta,
Column("guid", StringCast(ids.Cake),
primary_key=True,
default=lambda: ids.Cake.new_portal()),
Column('name', types.String()),
Column("attachment", StringCast(ids.Cake), nullable=True))
#'sqlite:///:memory:'
dbf = Dbf(meta,test.file_path('test.sqlite3'))
def run_scenario(dbf, tbl):
with dbf.connect() as conn:
r = conn.execute(tbl.insert().values(name='abc'))
guid1 = r.last_inserted_params()['guid']
log.debug(guid1)
r = conn.execute(
tbl.insert().values(name='xyz', attachment=None))
guid2 = r.last_inserted_params()['guid']
log.debug(guid2)
dbf.execute(tbl.update().where(tbl.c.guid == guid1)
.values(name='ed',
attachment=ids.Cake.from_bytes(b'asdf')))
fetch = dbf.execute(select([tbl])).fetchall()
attach = {r.guid: r.attachment for r in fetch}
return attach, guid1, guid2
ok_(not dbf.exists())
dbf.ensure_db()
attach, guid1, guid2 = run_scenario(dbf, tbl)
eq_(attach[guid1], ids.Cake('01ME5Mi'))
eq_(attach[guid2], None)
attach, guid1, guid2 = run_scenario(dbf, tbl2)
eq_(attach[guid1], ids.Cake('01ME5Mi'))
eq_(attach[guid2], None)
tbl.drop(dbf.engine())
eq_(dbf.engine().table_names(), ['mytable2'])
dbf = Dbf(meta,test.file_path('test.sqlite3'))
eq_(dbf.engine().table_names(), ['mytable2'])
dbf.ensure_db()
eq_(dbf.engine().table_names(), ['mytable', 'mytable2'])
|
#!/usr/bin/env bash
# Based on https://superuser.com/posts/1538073
set -e
FILE_PATH=$1
if [ ! -f "${FILE_PATH}" ]; then
echo "Invalid file path: ${FILE_PATH}"
exit 2
fi
# Density ppi - the higher the value the longer the conversion will take
DENSITY=${DENSITY:-"300"}
DESTINATION_DIRECTORY=${DESTINATION_DIRECTORY:-""}
if [ -z "${DESTINATION_DIRECTORY}" ]; then
DESTINATION_DIRECTORY=$(pwd)
fi
FILE_NAME=$(basename "${1}" ".pdf")
# NOTE: We support concurrent execution of this script so we create temporary
# directory per file and only move generated pdf to the original directory when
# we are done
TEMP_DIR=$(mktemp -d -t pdf-darken-XXXXXXXXXXXX)
clean_up() {
popd > /dev/null
if [ -d "${TEMP_DIR}" ]; then
echo "Removing temporary directory ${TEMP_DIR}..."
rm -rf "${TEMP_DIR}"
fi
}
DESTINATION_PATH_TEMP="${TEMP_DIR}/${FILE_NAME}_darkened.pdf"
DESTINATION_PATH_FINAL="${DESTINATION_DIRECTORY}/${FILE_NAME}_darkened.pdf"
pushd "${TEMP_DIR}" > /dev/null
trap "clean_up" EXIT
echo "Darkening pdf (this may take a while)..."
# 1. Convert it to jpg images
convert -density "${DENSITY}" "${FILE_PATH}" darken_pdf_preprocess_%02d.jpg
# 2. Darken the images
echo "Darkening JPGs..."
convert darken_pdf_preprocess*.jpg -level "50%,100%,0.3" darken_pdf_postprocess_%02d.jpg
# 3. Re-create pdf from darkned jpg images
echo "Converting JPGs to PDF..."
convert darken_pdf_postprocess*.jpg -background none "${DESTINATION_PATH_TEMP}"
# 4. Move generated pdf in place (we do this to avoid potential sync issue if we wrote
# PDF directly to the output instead of temporary dir). Think of it as of an atomic movie
mv "${DESTINATION_PATH_TEMP}" "${DESTINATION_PATH_FINAL}"
echo "Darkned PDF stored at ${DESTINATION_PATH}"
# NOTE: In some cases generated PDF for some reason won't be readable on RM
# (still haven't figured out way). In that case we can convert it to epub using
# calibre. This will also result in small overall file size
# ebook-convert "${DESTINATION_PATH}" "${DESTINATION_PATH}.epub" --margin-left 0
# --margin-right 0 --margin-top 0 --margin-bottom 0 --enable-heuristics
# --output-profile tablet
|
function MultiLiner() {
this.myLinesStartWith = "";
this.myLinesEndWith = "";
this.myStringToWrap = "";
this.setLineStart = function(aLineStart){
this.myLinesStartWith = aLineStart;
}
this.setLineEnd = function(aLineEnd){
this.myLinesEndWith = aLineEnd;
}
this.setStringToWrap = function(aStringToWrap){
this.myStringToWrap = aStringToWrap;
}
this.getLineStart = function(){
return this.myLinesStartWith;
}
this.getLineEnd = function(){
return this.myLinesEndWith;
}
this.getStringToWrap = function(){
return this.myStringToWrap;
}
this.wrapString = function(){
this.myStringToWrap = this.myStringToWrap.replace(/\r\n|\r|\n/g, this.myLinesEndWith + "\n" + this.myLinesStartWith);
this.myStringToWrap = this.myLinesStartWith + this.myStringToWrap + this.myLinesEndWith;
return this.myStringToWrap;
}
}
|
#!/usr/bin/env python3
"""Graph AWS resource data in Neptune"""
import argparse
import os
import sys
from typing import List, Optional
from altimeter.aws.aws2n import generate_scan_id, aws2n
from altimeter.aws.scan.muxer.local_muxer import LocalAWSScanMuxer
from altimeter.core.config import AWSConfig
def main(argv: Optional[List[str]] = None) -> int:
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument("config", type=str, nargs="?")
args_ns = parser.parse_args(argv)
config = args_ns.config
if config is None:
config = os.environ.get("CONFIG_PATH")
if config is None:
print("config must be provided as a positional arg or env var 'CONFIG_PATH'")
return 1
config = AWSConfig.from_path(config)
scan_id = generate_scan_id()
muxer = LocalAWSScanMuxer(scan_id=scan_id, config=config)
result = aws2n(scan_id=scan_id, config=config, muxer=muxer, load_neptune=False)
print(result.rdf_path)
return 0
if __name__ == "__main__":
sys.exit(main())
|
import * as React from "react";
import { StaticImageImport } from "src/image";
export const TEAM_TYPES = ["Team", "Keynote Speaker", "Panelist"] as const;
export type TeamType = typeof TEAM_TYPES[number];
export interface TeamMemberProps {
name: string;
title: string;
type: TeamType;
image: StaticImageImport;
bio: React.ReactNode;
}
function image(postfix: string): Pick<TeamMemberProps, "image"> {
return {
image:
require(/* webpackInclude: /\.jpg$/ */ `public/images/bridge/${postfix}.jpg`)
.default,
};
}
const TEAM: TeamMemberProps[] = [
{
name: "<NAME>",
type: "Team",
title: "President of the Student Advisory Board, Mission Bit",
...image("team/johnny_lin"),
bio: (
<>
My name is Johnny, and this is my second year helping to organize the
Bridging the Youth Tech Divide conference. It’s a fun process planning
out this event. My primary role is to make sure that we’re meeting
deadlines for the conference, and I also help out wherever help is
needed. If you have any questions or concerns directly for me, you can
reach me at{" "}
<a
href="mailto:<EMAIL>"
target="_blank"
rel="noopener noreferrer"
>
<EMAIL>
</a>
!
</>
),
},
{
name: "<NAME>",
type: "Team",
title: "Student Ambassador, Mission Bit",
...image("team/karina"),
bio: (
<>
I’m Karina, and this is my first year helping to organize the Bridging
the Youth Tech Divide conference. I attended the conference last summer,
and I’m so excited to have the opportunity to help organize this year’s
conference. My primary role is to help with outreach and recruiting
attendees. You can reach me at{" "}
<a
href="mailto:<EMAIL>"
target="_blank"
rel="noopener noreferrer"
>
<EMAIL>
</a>
!
</>
),
},
{
name: "<NAME>",
type: "Team",
title: "Student Ambassador, Mission Bit",
...image("team/tara"),
bio: (
<>
I’m Tara, and this is my second year organizing the Bridging the Youth
Tech Divide. My role is to make sure everyone has an orderly system of
planning. I’m excited to see new faces interested in the tech industry
this year! Contact me at{" "}
<a
href="mailto:<EMAIL>"
target="_blank"
rel="noopener noreferrer"
>
<EMAIL>
</a>
!
</>
),
},
{
name: "<NAME>",
type: "Team",
title: "Student Ambassador, Mission Bit",
...image("team/natalie"),
bio: (
<>
Hi! My name is Natalie, and this is my first time helping to organize
the Bridging the Youth Tech Divide Conference. I’m super excited to meet
everyone and show you what our team has worked on over the last few
months. Feel free to reach out to me anytime at{" "}
<a
href="mailto:<EMAIL>"
target="_blank"
rel="noopener noreferrer"
>
<EMAIL>
</a>
!
</>
),
},
{
name: "<NAME>",
type: "Team",
title: "Student Ambassador, Mission Bit",
...image("team/alyssa"),
bio: (
<>
My name is Alyssa, and this year is my first year contributing to the
Bridging the Youth Tech Divide Conference. It was super cool taking part
in organizing this event, and I look forward to meeting everyone! If you
have any questions or concerns regarding the conference, you may contact
our team or reach me at{" "}
<a
href="mailto:<EMAIL>"
target="_blank"
rel="noopener noreferrer"
>
<EMAIL>
</a>
!
</>
),
},
{
name: "<NAME>",
type: "Team",
title: "Student Ambassador, Mission Bit",
...image("team/oswen"),
bio: (
<>
My name is Oswen, and this is my first year helping organize the
Bridging the Youth Tech Divide Conference. It has been a fun learning
experience working on this event with others. I am extremely excited to
facilitate and meet everyone. Feel free to contact me or others about
any further questions you may have, you can reach me at{" "}
<a
href="mailto:<EMAIL>"
target="_blank"
rel="noopener noreferrer"
>
<EMAIL>
</a>
!
</>
),
},
{
name: "<NAME>",
type: "Team",
title: "Student Ambassador, Mission Bit",
...image("team/angelo"),
bio: (
<>
Hello! My name is Angelo, and this is my first year attending and
organizing the Bridging Youth Tech Divide Conference for 2021. It has
been a super fun experience organizing this event with the student
advisory board. I’m very excited to meet everyone! If you have any
questions regarding the conference, please don’t hesitate to reach out
to me at{" "}
<a
href="mailto:<EMAIL>"
target="_blank"
rel="noopener noreferrer"
>
<EMAIL>
</a>
!
</>
),
},
];
const KEYNOTE_SPEAKERS: TeamMemberProps[] = [
{
name: "<NAME>",
type: "Keynote Speaker",
title:
"Manager, Verizon Crisis Response Team - VA/WV/KY/OH, Public Sector Strategic Operations",
...image("keynote-speakers/tetoya"),
bio: (
<>
Prepared to deploy Verizon response supported mission essential
communication and tech solutions to assist with emergency relief efforts
nationwide. Current response manager for Virginia, West Virginia,
Kentucky and Ohio. Tetoya has 20+ years of public safety experience
assisting military and private sector businesses with Emergency
Operation Centers, Government operations. As a veteran of the United
States Air Force she has developed extensive knowledge in emergency
preparedness and project management. Tetoya has served Eastern North
Carolina as both a Disaster Response and Service to the Armed Services
Emergency Communications volunteer deploying to multiple disaster
response operations across the nation. Tetoya is an active member of the
American Red Cross. Tetoya resides in Moseley, Virginia
</>
),
},
{
name: "<NAME>",
type: "Keynote Speaker",
title: "Senior Engineer - Applied Research",
...image("keynote-speakers/venky"),
bio: (
<>
An engineer with experience in working at the intersection of technology
and strategy in IoT, with an emphasis on applications in industrial and
smart city deployments. Currently, part of the innovation team at
Verizon that is involved in shaping the go-to-market and product
strategy of 5G by working closely with various external partners and
internal stakeholders.
</>
),
},
];
const PANELISTS: TeamMemberProps[] = [
{
name: "<NAME>",
type: "Panelist",
title: "Sr. Manager - Technology and Product Development",
...image("panelists/alelgn"),
bio: (
<>
<NAME> is Senior Manager - Technology responsible for Sports
Technology integration supporting the Technology & Product Development
Organization. Before joining Device Technology, he was a Senior Manager
of Global Operator Collaboration responsible for collaborating with 55+
international carriers to help identify emerging trends in the industry
and advocate for Verizon interest in the global landscape on strategic
initiatives such as 5G core, FWA and mmWave adoption. Alelgn has prior
experience in the Network Implementation organization where he helped
launch projects such as eUICC, VoWiFi and also helped in portfolio
management of various projects. He has also worked in the Network Device
Evaluation Lab where he worked with device OEMs to make sure they adhere
to Verizon and industry standards.
</>
),
},
{
name: "<NAME>",
type: "Panelist",
title: "Principal Engineer System Architecture",
...image("panelists/carlo"),
bio: (
<>
Award-winning Telecommunications Professional with 15+ years of
expertise in wireless network engineering, systems design and
implementation, project planning, and team leadership. Proven ability to
direct teams of 15+ while managing $20M capital expense budgets.
Highly-skilled at adapting to new technologies in dynamic, fast-paced,
environments. Gains stakeholder buy-in to deliver projects on time and
budget. Organized self-starter able to manage multiple competing
priorities.
</>
),
},
{
name: "<NAME>",
type: "Panelist",
title: "Product Marketing",
...image("panelists/destah"),
bio: (
<>
Destah is a technology and innovation enthusiast with a career built
around driving business disruption, customer advocacy and connecting
good ideas and good people. A veteran of several of the smallest Silicon
Valley Startups as well as some of the biggest behemoths, he has picked
up enough knowledge and information to be relatively dangerous. From his
humble beginnings as an Electrical Engineer he has touched the data
networking, wireless data, and machine learning technologies as a
customer facing evangelist and problem solver. When using his powers for
good, he is identifying emerging technologies and finding synergistic
solutions for customers and professional colleagues. The latest shiny
objects to have caught his fancy are XR (mixed reality), Cybersecurity,
and IoT/Smart City technologies.
</>
),
},
{
name: "<NAME>",
type: "Panelist",
title: "5G Infrastructure Planning",
...image("panelists/patricia"),
bio: (
<>
Patricia is a senior Cloud Technology manager in the Verizon Global
Strategy and Technology Planning organization. She is leading and
driving private 5G and private Mobile Edge Computing (MEC) strategy to
commercialization. She was instrumental in the private 5G and MEC
partnerships with Microsoft and AWS. Prior to HQ Network Planning,
Patricia was an Associate Director in the West Area Data Planning
leading VoLTE and Data Networks in Planning, Engineering, and
Performance. Patricia is a technology innovator with over seventy plus
granted patents. She was elected as one of the 2019 Verizon Master
Inventors, and was showcased in Verizon Patent Video in 2015. She earned
her bachelor and master degrees in Electrical Engineering and Computer
Science at New York University School of Engineering in Brooklyn, New
York{" "}
</>
),
},
];
const TeamData = [
{ section: "Keynote Speakers", members: KEYNOTE_SPEAKERS },
{ section: "Panelists", members: PANELISTS },
{ section: "Team", members: TEAM },
];
export default TeamData;
|
'use strict';
import angular from 'angular';
// import ngAnimate from 'angular-animate';
import ngCookies from 'angular-cookies';
import ngResource from 'angular-resource';
import ngSanitize from 'angular-sanitize';
const ngRoute = require('angular-route');
import uiBootstrap from 'angular-ui-bootstrap';
import {
routeConfig
} from './app.config';
import constants from './app.constants';
import util from '../components/util/util.module';
import navbar from '../components/navbar/navbar.component';
import main from './main/main.component';
import about from './about/about.component';
import recipes from './recipes/recipes.component';
import recipeDetail from './recipeDetail/recipeDetail.component';
import users from './users/users.component';
import userDetail from './userDetail/userDetail.component';
import './app.scss';
angular.module('comp3705App', [ngCookies, ngResource, ngSanitize, ngRoute, uiBootstrap,
constants, util, navbar, main, about, recipes, recipeDetail, users, userDetail])
.config(routeConfig);
angular.element(document)
.ready(() => {
angular.bootstrap(document, ['comp3705App'], {
strictDi: true
});
});
|
#!/bin/bash
set -e # Exit immediately if a command exits with a non-zero status
DIST="dist"
# rely on the heavy assumption that there is only one .tar file here (should be ok in CI)
TARFILE=$(cd ${DIST} && ls *.tar)
ORIGINAL="\"dependencies\": {"
REPLACEMENT="\"dependencies\": {\n \"@abolis\/plate-maker\": \"file:${DIST}\/${TARFILE}\","
sed -i "s/${ORIGINAL}/${REPLACEMENT}/g" package.json
|
#! /bin/sh
# Copyright (C) 2000-2020 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Test for PR 68.
#
# == Report ==
# If an autoconf substitution variable of the form "*_HEADERS" --
# for example "MY_HEADERS" -- is used in an "include_" variable
# in conjunction with EXTRA_HEADERS, $(MY_HEADERS) gets included
# in the HEADERS variable for *all* generated Makefile.ins in the
# project.
. test-init.sh
cat >> configure.ac << 'END'
AC_SUBST([MY_HEADERS])
AC_PROG_CC
END
cat > Makefile.am << 'END'
bin_PROGRAMS = zoo
zoo_SOURCES = joe.c
END
: > joe.c
: > qq.h
$ACLOCAL
$AUTOMAKE
grep '[^@]MY_HEADERS' Makefile.in && exit 1
exit 0
|
module Foundation
module Icons
module Sassc
module Rails
VERSION = "3.0.0"
end
end
end
end
|
#!/bin/bash
# Sample usage of the script
# Set log level and message
level="INFO"
message="This is an informational message"
# Convert log level to uppercase
level=${level^^}
# Use case statement to perform actions based on log level
case $level in
INFO)
echo "Information: $message"
;;
WARN)
echo "Warning: $message"
;;
ERROR)
echo "Error: $message"
;;
*)
echo "Unknown log level: $level"
;;
esac
|
#!/bin/bash
## curl http2 2020-01-02
## http://www.aqzt.com
##email: ppabc@qq.com
##robert yu
##centos 6和centos 7
yum install -y bzip2 openssl openssl-devel libcurl
wget https://github.com/nghttp2/nghttp2/releases/download/v1.40.0/nghttp2-1.40.0.tar.bz2
tar xvf nghttp2-1.40.0.tar.bz2
cd nghttp2-1.40.0
./configure
make
make install
cd ..
echo "/usr/local/lib" >> /etc/ld.so.conf
ldconfig
wget https://curl.haxx.se/download/curl-7.67.0.tar.gz
tar zxvf curl-7.67.0.tar.gz
cd curl-7.67.0
./configure --with-nghttp2=/usr/local --with-ssl
make
make install
cd ..
echo ok
echo "/usr/local/bin/curl -kv --http2 -I https://aqzt.com"
|
#!/bin/bash
export GOPATH=/opt/gopath
export GOROOT=/opt/go
export PATH="$PATH:$GOROOT/bin:$GOPATH/bin"
usage() {
echo "Usage: $0 [-c <channel name>] [-g <orgs of peers>] [-n <chaincode name>] [-v <chaincode version>]" 1>&2
exit 1
}
while getopts ":c:n:v:g:" o; do
case "${o}" in
c)
c=${OPTARG}
;;
n)
n=${OPTARG}
;;
v)
v=${OPTARG}
;;
g)
g=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND - 1))
if [ -z "${c}" ] || [ -z "${n}" ] || [ -z "${v}" ] || [ -z "${g}" ]; then
usage
fi
echo "create channel channelID ${c} chaincodeName ${n} with ${v}"
source $(dirname "$0")/env.sh
PEER_ORGS=($g)
CHANNEL_NAME=${c}
CHANNEL_TX_FILE=$DATA/$CHANNEL_NAME.tx
QUERY_TIMEOUT=30
# install chaincode on peer0-org1, peer0-org2
for ORG in ${PEER_ORGS[*]}; do
initPeerVars $ORG 0
echo $ORDERER_CONN_ARGS
$GOPATH/src/github.com/hyperledger/fabric/.build/bin/peer chaincode install -n $n -v $v -p github.com/hyperledger/caliper/src/contract/fabric/simple/go
done
sleep 3
$GOPATH/src/github.com/hyperledger/fabric/.build/bin/peer chaincode list --installed -C $CHANNEL_NAME
initPeerVars ${PEER_ORGS[0]} 0
echo $ORDERER_CONN_ARGS
echo "Instantiating chaincode on $PEER_HOST ..."
POLICY="OR('org1MSP.member', 'org2MSP.member', 'org3MSP.member', 'org4MSP.member', 'org5MSP.member')"
$GOPATH/src/github.com/hyperledger/fabric/.build/bin/peer chaincode instantiate -C $CHANNEL_NAME -n ${n} -v ${v} -c '{"Args":["init"]}' $ORDERER_CONN_ARGS
sleep 10
# test query org5
initPeerVars ${PEER_ORGS[4]} 0
echo $ORDERER_CONN_ARGS
echo "Instantiating chaincode on $PEER_HOST ..."
$GOPATH/src/github.com/hyperledger/fabric/.build/bin/peer chaincode invoke -C $CHANNEL_NAME -n ${n} -c '{"Args":["open","aaabbbccc","10000"]}' $ORDERER_CONN_ARGS
# test query org4
initPeerVars ${PEER_ORGS[3]} 0
echo $ORDERER_CONN_ARGS
echo "Instantiating chaincode on $PEER_HOST ..."
$GOPATH/src/github.com/hyperledger/fabric/.build/bin/peer chaincode invoke -C $CHANNEL_NAME -n ${n} -c '{"Args":["open","aaabbbddd","10000"]}' $ORDERER_CONN_ARGS
# test query org3
initPeerVars ${PEER_ORGS[2]} 0
echo $ORDERER_CONN_ARGS
echo "Instantiating chaincode on $PEER_HOST ..."
$GOPATH/src/github.com/hyperledger/fabric/.build/bin/peer chaincode invoke -C $CHANNEL_NAME -n ${n} -c '{"Args":["open","aaabbbeee","10000"]}' $ORDERER_CONN_ARGS
# test query org2
initPeerVars ${PEER_ORGS[1]} 0
echo $ORDERER_CONN_ARGS
echo "Instantiating chaincode on $PEER_HOST ..."
$GOPATH/src/github.com/hyperledger/fabric/.build/bin/peer chaincode invoke -C $CHANNEL_NAME -n ${n} -c '{"Args":["open","aaabbbfff","10000"]}' $ORDERER_CONN_ARGS
echo "done"
|
# modify the prompt to contain branch name if applicable
git_branch() {
branch=$(git current-branch 2> /dev/null)
if [ "$branch" = "master" ]; then
echo "±"
else
echo $branch
fi
}
hg_bookmark() {
repo=$($HOME/.zsh/tools/find_parent_dir .hg)
if [[ -n $repo ]]; then
bookmark=$(cat $repo/bookmarks.current 2> /dev/null)
if [[ -n $bookmark ]]; then
echo $bookmark
else
echo "☿"
fi
fi
}
get_branch_name() {
# git
branch=$(git_branch)
[[ -n $branch ]] && echo "$branch" && return 0
# hg
branch=$(hg_bookmark)
[[ -n $branch ]] && echo "$branch" && return 0
return 1
}
branch_prompt_info() {
branch=$(get_branch_name)
# output the prompt
if [[ -n $branch ]]; then
echo " %{$fg_bold[green]%}$branch%{$reset_color%}"
fi
}
setopt promptsubst
virtualenv_prompt_info() {
name=$(virtualenv_name)
if [[ -n $name ]]; then
echo "%{$fg[green]%}py %{$reset_color%}"
fi
}
icon() {
echo "λ"
}
PS1='${SSH_CONNECTION+"%{$fg[cyan]%}%n@%m "}$(virtualenv_prompt_info)%{$fg_bold[yellow]%}$(icon) %{$fg_bold[blue]%}%c%{$reset_color%}$(branch_prompt_info) '
|
export LD_LIBRARY_PATH=/usr/lib/jvm/default-java/jre/lib/amd64/server:$LD_LIBRARY_PATH
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.