text
stringlengths 5
1.04M
|
|---|
/*****************************************************************************
Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file os/os0proc.cc
The interface to the operating system
process control primitives
Created 9/30/1995 Heikki Tuuri
*******************************************************/
#include "os0proc.h"
#ifdef UNIV_NONINL
#include "os0proc.ic"
#endif
#include "ut0mem.h"
#include "ut0byte.h"
/* FreeBSD for example has only MAP_ANON, Linux has MAP_ANONYMOUS and
MAP_ANON but MAP_ANON is marked as deprecated */
#if defined(MAP_ANONYMOUS)
#define OS_MAP_ANON MAP_ANONYMOUS
#elif defined(MAP_ANON)
#define OS_MAP_ANON MAP_ANON
#endif
UNIV_INTERN ibool os_use_large_pages;
/* Large page size. This may be a boot-time option on some platforms */
UNIV_INTERN ulint os_large_page_size;
/****************************************************************//**
Converts the current process id to a number. It is not guaranteed that the
number is unique. In Linux returns the 'process number' of the current
thread. That number is the same as one sees in 'top', for example. In Linux
the thread id is not the same as one sees in 'top'.
@return process id as a number */
UNIV_INTERN
ulint
os_proc_get_number(void)
/*====================*/
{
#ifdef __WIN__
return((ulint)GetCurrentProcessId());
#else
return((ulint) getpid());
#endif
}
/****************************************************************//**
Allocates large pages memory.
@return allocated memory */
UNIV_INTERN
void*
os_mem_alloc_large(
/*===============*/
ulint* n) /*!< in/out: number of bytes */
{
void* ptr;
ulint size;
#if defined HAVE_LARGE_PAGES && defined UNIV_LINUX
int shmid;
struct shmid_ds buf;
if (!os_use_large_pages || !os_large_page_size) {
goto skip;
}
/* Align block size to os_large_page_size */
ut_ad(ut_is_2pow(os_large_page_size));
size = ut_2pow_round(*n + (os_large_page_size - 1),
os_large_page_size);
shmid = shmget(IPC_PRIVATE, (size_t) size, SHM_HUGETLB | SHM_R | SHM_W);
if (shmid < 0) {
fprintf(stderr, "InnoDB: HugeTLB: Warning: Failed to allocate"
" %lu bytes. errno %d\n", size, errno);
ptr = NULL;
} else {
ptr = shmat(shmid, NULL, 0);
if (ptr == (void*)-1) {
fprintf(stderr, "InnoDB: HugeTLB: Warning: Failed to"
" attach shared memory segment, errno %d\n",
errno);
ptr = NULL;
}
/* Remove the shared memory segment so that it will be
automatically freed after memory is detached or
process exits */
shmctl(shmid, IPC_RMID, &buf);
}
if (ptr) {
*n = size;
os_fast_mutex_lock(&ut_list_mutex);
ut_total_allocated_memory += size;
os_fast_mutex_unlock(&ut_list_mutex);
UNIV_MEM_ALLOC(ptr, size);
return(ptr);
}
fprintf(stderr, "InnoDB HugeTLB: Warning: Using conventional"
" memory pool\n");
skip:
#endif /* HAVE_LARGE_PAGES && UNIV_LINUX */
#ifdef __WIN__
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
/* Align block size to system page size */
ut_ad(ut_is_2pow(system_info.dwPageSize));
/* system_info.dwPageSize is only 32-bit. Casting to ulint is required
on 64-bit Windows. */
size = *n = ut_2pow_round(*n + (system_info.dwPageSize - 1),
(ulint) system_info.dwPageSize);
ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
if (!ptr) {
fprintf(stderr, "InnoDB: VirtualAlloc(%lu bytes) failed;"
" Windows error %lu\n",
(ulong) size, (ulong) GetLastError());
} else {
os_fast_mutex_lock(&ut_list_mutex);
ut_total_allocated_memory += size;
os_fast_mutex_unlock(&ut_list_mutex);
UNIV_MEM_ALLOC(ptr, size);
}
#elif !defined OS_MAP_ANON
size = *n;
ptr = ut_malloc_low(size, TRUE, FALSE);
#else
# ifdef HAVE_GETPAGESIZE
size = getpagesize();
# else
size = UNIV_PAGE_SIZE;
# endif
/* Align block size to system page size */
ut_ad(ut_is_2pow(size));
size = *n = ut_2pow_round(*n + (size - 1), size);
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | OS_MAP_ANON, -1, 0);
if (UNIV_UNLIKELY(ptr == (void*) -1)) {
fprintf(stderr, "InnoDB: mmap(%lu bytes) failed;"
" errno %lu\n",
(ulong) size, (ulong) errno);
ptr = NULL;
} else {
os_fast_mutex_lock(&ut_list_mutex);
ut_total_allocated_memory += size;
os_fast_mutex_unlock(&ut_list_mutex);
UNIV_MEM_ALLOC(ptr, size);
}
#endif
return(ptr);
}
/****************************************************************//**
Frees large pages memory. */
UNIV_INTERN
void
os_mem_free_large(
/*==============*/
void *ptr, /*!< in: pointer returned by
os_mem_alloc_large() */
ulint size) /*!< in: size returned by
os_mem_alloc_large() */
{
os_fast_mutex_lock(&ut_list_mutex);
ut_a(ut_total_allocated_memory >= size);
os_fast_mutex_unlock(&ut_list_mutex);
#if defined HAVE_LARGE_PAGES && defined UNIV_LINUX
if (os_use_large_pages && os_large_page_size && !shmdt(ptr)) {
os_fast_mutex_lock(&ut_list_mutex);
ut_a(ut_total_allocated_memory >= size);
ut_total_allocated_memory -= size;
os_fast_mutex_unlock(&ut_list_mutex);
UNIV_MEM_FREE(ptr, size);
return;
}
#endif /* HAVE_LARGE_PAGES && UNIV_LINUX */
#ifdef __WIN__
/* When RELEASE memory, the size parameter must be 0.
Do not use MEM_RELEASE with MEM_DECOMMIT. */
if (!VirtualFree(ptr, 0, MEM_RELEASE)) {
fprintf(stderr, "InnoDB: VirtualFree(%p, %lu) failed;"
" Windows error %lu\n",
ptr, (ulong) size, (ulong) GetLastError());
} else {
os_fast_mutex_lock(&ut_list_mutex);
ut_a(ut_total_allocated_memory >= size);
ut_total_allocated_memory -= size;
os_fast_mutex_unlock(&ut_list_mutex);
UNIV_MEM_FREE(ptr, size);
}
#elif !defined OS_MAP_ANON
ut_free(ptr);
#else
# if defined(UNIV_SOLARIS)
if (munmap(static_cast<caddr_t>(ptr), size)) {
# else
if (munmap(ptr, size)) {
# endif /* UNIV_SOLARIS */
fprintf(stderr, "InnoDB: munmap(%p, %lu) failed;"
" errno %lu\n",
ptr, (ulong) size, (ulong) errno);
} else {
os_fast_mutex_lock(&ut_list_mutex);
ut_a(ut_total_allocated_memory >= size);
ut_total_allocated_memory -= size;
os_fast_mutex_unlock(&ut_list_mutex);
UNIV_MEM_FREE(ptr, size);
}
#endif
}
|
/**
* Copyright Soramitsu Co., Ltd. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef IROHA_ALWAYS_VALID_VALIDATORS_HPP_
#define IROHA_ALWAYS_VALID_VALIDATORS_HPP_
#include "validators/abstract_validator.hpp"
/* These classes are supposed to be used in testing cases, where we need to
* create objects bypassing any validation, so purportedly invalid data can be
* made.
*/
namespace shared_model {
namespace validation {
struct AlwaysValidFieldValidator final {
AlwaysValidFieldValidator(std::shared_ptr<ValidatorsConfig>) {}
template <typename... Args>
std::optional<ValidationError> validateAccountId(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAssetId(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateBytecode(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateEvmHexAddress(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validatePeer(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAmount(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validatePubkey(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validatePeerAddress(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateRoleId(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAccountName(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateDomainId(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateDomain(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAssetName(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAccountDetailKey(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAccountDetailValue(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validatePrecision(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateRolePermission(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateGrantablePermission(
Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateQuorum(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateCreatorAccountId(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAccount(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateCreatedTime(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateCounter(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateSignatureForm(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateSignatures(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateQueryPayloadMeta(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateDescription(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateBatchMeta(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateHeight(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateHash(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateTxPaginationMeta(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAccountAsset(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAsset(Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAccountDetailRecordId(
Args...) const {
return std::nullopt;
}
template <typename... Args>
std::optional<ValidationError> validateAccountDetailPaginationMeta(
Args...) const {
return std::nullopt;
}
};
template <typename Model>
struct AlwaysValidModelValidator final : public AbstractValidator<Model> {
public:
std::optional<ValidationError> validate(const Model &m) const override {
return std::nullopt;
};
};
} // namespace validation
} // namespace shared_model
#endif /* IROHA_ALWAYS_VALID_VALIDATORS_HPP_ */
|
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/mlir/tfrt/jit/tf_cpurt_pipeline.h"
#include "mlir/Conversion/ShapeToStandard/ShapeToStandard.h"
#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
#include "mlir/Dialect/Bufferization/Transforms/Passes.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/MemRef/Transforms/Passes.h"
#include "mlir/Dialect/Shape/Transforms/Passes.h"
#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
#include "mlir/Dialect/Tensor/Transforms/Passes.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tfrt/jit/transforms/tf_cpurt_passes.h"
#include "tensorflow/compiler/mlir/tools/kernel_gen/transforms/passes.h"
#include "tensorflow/compiler/mlir/xla/transforms/passes.h"
// -------------------------------------------------------------------------- //
// Custom passes that are missing upstream.
// -------------------------------------------------------------------------- //
namespace tensorflow {
namespace {
// Adds a Tensorflow producer version to the module to enable shape inference.
struct AddTensorflowProducerVersion
: public mlir::PassWrapper<AddTensorflowProducerVersion,
mlir::OperationPass<mlir::ModuleOp>> {
void runOnOperation() override {
mlir::ModuleOp module = getOperation();
// Tensorflow producer version does not really impact anything during the
// shape inference. Set it to `0` (any random number will do the work) to
// bypass attribute checks.
mlir::Builder builder(module);
auto version =
builder.getNamedAttr("producer", builder.getI32IntegerAttr(0));
module->setAttr("tf.versions", builder.getDictionaryAttr({version}));
}
};
} // namespace
// -------------------------------------------------------------------------- //
// Assemble a TF-CPURT pipeline to lower from Tensorflow dialects to Linalg on
// buffers via progressive lowering to MHLO and Linalg.
// -------------------------------------------------------------------------- //
void CreateTfCpuRtPipeline(mlir::OpPassManager& pm,
const TfCpuRtPipelineOptions& options) {
// Break Tensorflow fused operations into primitive operations before
// lowering to HLO.
pm.addNestedPass<mlir::FuncOp>(CreateFissionPass());
// Run shape inference to propagate potentially specialized input shapes.
pm.addPass(std::make_unique<AddTensorflowProducerVersion>());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addPass(mlir::createCanonicalizerPass());
// Transform TF operation to HLO.
pm.addNestedPass<mlir::FuncOp>(mlir::mhlo::createLegalizeTFPass());
if (options.legalize_i1_tensors) {
// Convert 'i1' tensors into 'i8' tensors.
pm.addPass(CreateCpuRtLegalizeI1TypesPass());
}
// Resolve all shape constraints (e.g. broadcast constraints that can be
// proved statically and changed to const witness) early to allow more
// efficient broadcast operations moving.
pm.addNestedPass<mlir::FuncOp>(
CreateSymbolicShapeOptimizationPass(/*constraints_only=*/true));
// Move up broadcasting operations to allow for more fusion opportunities.
pm.addNestedPass<mlir::FuncOp>(mlir::mhlo::createBroadcastPropagationPass());
pm.addPass(mlir::createCSEPass());
pm.addPass(mlir::createCanonicalizerPass());
// After all shape constraints removed and broadcasts moved to the top, try
// to resolve broadcasts that can be converted to linalg generic operations.
pm.addNestedPass<mlir::FuncOp>(CreateSymbolicShapeOptimizationPass());
// Transform HLO operations to Linalg.
pm.addNestedPass<mlir::FuncOp>(mlir::mhlo::createLegalizeHloToLinalgPass());
// Lower shape dialect to standard to enable linalg canonicalizations (e.g.
// use linalg inputs instead of outputs for memref.dim operations).
pm.addNestedPass<mlir::FuncOp>(
mlir::kernel_gen::transforms::CreateShapeSimplification());
pm.addNestedPass<mlir::FuncOp>(mlir::createShapeToShapeLowering());
pm.addPass(mlir::createConvertShapeToStandardPass());
pm.addNestedPass<mlir::FuncOp>(mlir::createConvertShapeConstraintsPass());
// Fuse Linalg on tensors operations.
pm.addPass(mlir::createCSEPass());
pm.addPass(mlir::memref::createResolveShapedTypeResultDimsPass());
// Lower index cast on tensors to tensor.generate.
pm.addNestedPass<mlir::FuncOp>(
mlir::kernel_gen::transforms::CreateLowerIndexCastPass());
pm.addPass(mlir::createCanonicalizerPass());
pm.addNestedPass<mlir::FuncOp>(CreateFusionPass());
// Perform tiling-peeling-vectorization if vectorization is enabled.
if (options.vectorize) {
pm.addNestedPass<mlir::FuncOp>(CreateDetensorizeLinalgPass());
pm.addNestedPass<mlir::FuncOp>(CreateCodegenStrategyForReductionPass());
pm.addNestedPass<mlir::FuncOp>(CreateCodegenStrategyForCWisePass());
pm.addNestedPass<mlir::FuncOp>(CreatePeelTiledLoopsPass());
pm.addNestedPass<mlir::FuncOp>(mlir::createCSEPass());
pm.addPass(mlir::createCanonicalizerPass());
pm.addNestedPass<mlir::FuncOp>(CreateSinkUnusedOutputs());
pm.addNestedPass<mlir::FuncOp>(CreateVectorizeTiledOpsPass());
}
// Bufferize Linalg on tensors program.
// Always run canonicalizer (which does dead code removal) before bufferizing
// anything.
pm.addPass(mlir::createCanonicalizerPass());
// Now bufferize all the compute operations (hlo + linalg) and func signature.
pm.addPass(
mlir::kernel_gen::transforms::CreateComputeOpAndFuncBufferizePass());
pm.addNestedPass<mlir::FuncOp>(
mlir::kernel_gen::transforms::CreateTiledLoopBufferizePass());
// Now that all compute operations are converted to standard (as a side effect
// of bufferizing to memref dialect) we can remove the remaining references
// to unsigned types.
pm.addPass(mlir::kernel_gen::transforms::CreateConvertToSignlessPass());
// Turn tensor constants into global memrefs.
// TODO(kramerb): Expose the patterns and add them to the bufferize passes.
pm.addPass(mlir::createTensorConstantBufferizePass(/*alignment=*/64));
// Always run canonicalizer (which does dead code removal) before bufferizing
// anything.
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::kernel_gen::transforms::CreateFinalBufferizePass());
pm.addPass(mlir::createCSEPass());
pm.addPass(mlir::createCanonicalizerPass());
// Deallocate all temporary buffers.
pm.addNestedPass<mlir::FuncOp>(
mlir::bufferization::createBufferDeallocationPass());
// Do trivial buffer forwarding across linalg.generic operations.
pm.addNestedPass<mlir::FuncOp>(CreateLinalgTrivialBufferForwardingPass());
// Remove trivial copy operations.
pm.addNestedPass<mlir::FuncOp>(CreateLinalgTrivialCopyRemovalPass());
if (options.vectorize) {
pm.addNestedPass<mlir::FuncOp>(
mlir::createConvertLinalgTiledLoopsToSCFPass());
}
pm.addPass(mlir::createCSEPass());
pm.addPass(mlir::createCanonicalizerPass());
mlir::VectorTransferToSCFOptions vec_to_scf_options;
vec_to_scf_options.unroll = true;
pm.addNestedPass<mlir::FuncOp>(
mlir::createConvertVectorToSCFPass(vec_to_scf_options));
pm.addNestedPass<mlir::FuncOp>(CreateMathApproximationPass({"all"}));
}
void CreateDefaultTfCpuRtPipeline(mlir::OpPassManager& pm) {
TfCpuRtPipelineOptions options;
options.vectorize = tensorflow::GetCpuRtFlags().vectorize;
CreateTfCpuRtPipeline(pm, options);
}
static mlir::PassPipelineRegistration<TfCpuRtPipelineOptions> tf_cpurt_pipeline(
"tf-cpurt-pipeline",
"Convert Tensorflow dialect to TFRT's CPURT compatible dialects",
CreateTfCpuRtPipeline);
} // namespace tensorflow
|
//
// Created by Khyber on 8/14/2019.
//
#include "Script.h"
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/cudnn/softmax.cc
* \brief Use external cudnn softmax function
*/
#include <tvm/runtime/device_api.h>
#include <tvm/runtime/registry.h>
#include "cudnn_utils.h"
namespace tvm {
namespace contrib {
using namespace runtime;
TVM_REGISTER_GLOBAL("tvm.contrib.cudnn.softmax.forward")
.set_body([](TVMArgs args, TVMRetValue* ret) {
DLTensor* x = args[0];
DLTensor* y = args[1];
int axis = args[2];
int ndim = x->ndim;
int64_t* shape = x->shape;
if (axis < 0) axis += ndim;
CHECK(axis >= 0 && axis < ndim);
CuDNNThreadEntry* entry_ptr = CuDNNThreadEntry::ThreadLocal();
entry_ptr->softmax_entry.data_type = CuDNNDataType::DLTypeToCuDNNType(x->dtype);
// Set mode and shape descriptor
if (axis == ndim - 1) {
int64_t N = 1;
for (int i = 0; i < ndim - 1; ++i) {
N *= shape[i];
}
entry_ptr->softmax_entry.mode = CUDNN_SOFTMAX_MODE_INSTANCE;
CUDNN_CALL(cudnnSetTensor4dDescriptor(entry_ptr->softmax_entry.shape_desc,
CUDNN_TENSOR_NCHW, entry_ptr->softmax_entry.data_type,
static_cast<int>(N),
static_cast<int>(shape[ndim - 1]), 1, 1));
} else {
int64_t pre_axis_dim = 1;
int64_t post_axis_dim = 1;
for (int i = 0; i < ndim; ++i) {
if (i < axis) {
pre_axis_dim *= shape[i];
} else if (i > axis) {
post_axis_dim *= shape[i];
}
}
entry_ptr->softmax_entry.mode = CUDNN_SOFTMAX_MODE_CHANNEL;
CUDNN_CALL(cudnnSetTensor4dDescriptor(
entry_ptr->softmax_entry.shape_desc, CUDNN_TENSOR_NCHW,
entry_ptr->softmax_entry.data_type, static_cast<int>(pre_axis_dim),
static_cast<int>(shape[axis]), static_cast<int>(post_axis_dim), 1));
}
auto alpha = CuDNNDataType::GetConst<1>(entry_ptr->softmax_entry.data_type);
auto beta = CuDNNDataType::GetConst<0>(entry_ptr->softmax_entry.data_type);
CUDNN_CALL(cudnnSoftmaxForward(entry_ptr->handle, CUDNN_SOFTMAX_ACCURATE,
entry_ptr->softmax_entry.mode, alpha,
entry_ptr->softmax_entry.shape_desc, x->data, beta,
entry_ptr->softmax_entry.shape_desc, y->data));
});
} // namespace contrib
} // namespace tvm
|
#include<stdio.h>
int main()
{
int i=0;
while(i % 2 == 0)
{
printf("%d",i);
i++;
}
}
|
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _SVX_SIDEBAR_VALUESETWITHTEXT_CONTROL_HXX_
#define _SVX_SIDEBAR_VALUESETWITHTEXT_CONTROL_HXX_
#include "svx/svxdllapi.h"
#include <svtools/valueset.hxx>
#include <limits.h>
#include <com/sun/star/uno/Reference.h>
#include <com/sun/star/uno/Sequence.h>
#include <com/sun/star/lang/Locale.hpp>
#include <vcl/image.hxx>
#include <vector>
namespace com{namespace sun{ namespace star{
namespace container{
class XIndexAccess;
}
namespace beans{
struct PropertyValue;
}
namespace text{
class XNumberingFormatter;
}
}}}
namespace svx { namespace sidebar {
/** Specialization of class <ValueSet>.
This specialization allows is a one-columned ValueSet which allow
items containing an image and a text or a text and a second text.
Especially, used for sidebar related controls.
*/
class SVX_DLLPUBLIC ValueSetWithTextControl : public ValueSet
{
public:
// control type of specialized <ValueSet>:
// - image + text
// - text + text
enum tControlType
{
IMAGE_TEXT,
TEXT_TEXT
};
ValueSetWithTextControl(
const tControlType eControlType,
Window* pParent,
const ResId& rResId);
virtual ~ValueSetWithTextControl(void);
// add item for control type IMAGE_TEXT
// if control type does not match IMAGE_TEXT no item is added.
// @param pSelectedItemImage
// selection item image is optional. if not provided, it is the same as the image item
// @param pItemHelpText
// help text is optional. if not provided, it is the same as the item text
void AddItem(
const Image& rItemImage,
const Image* pSelectedItemImage,
const XubString& rItemText,
const XubString* pItemHelpText );
// replace item images for control type IMAGE_TEXT
void ReplaceItemImages(
const sal_uInt16 nItemId,
const Image& rItemImage,
const Image* pSelectedItemImage );
// add item for control type TEXT_TEXT
// if control type does not match TEXT_TEXT no item is added.
// @param pItemHelpText
// help text is optional. if not provided, it is the same as the item text
void AddItem(
const XubString& rItemText,
const XubString& rItemText2,
const XubString* pItemHelpText );
virtual void UserDraw( const UserDrawEvent& rUDEvt );
private:
struct ValueSetWithTextItem
{
Image maItemImage;
Image maSelectedItemImage;
XubString maItemText;
XubString maItemText2;
};
typedef ::std::vector< ValueSetWithTextItem > tItemList;
const tControlType meControlType;
tItemList maItems;
};
class SVX_DLLPUBLIC SvxNumValueSet2 : public ValueSet
{
Color aLineColor;
Rectangle aOrgRect;
VirtualDevice* pVDev;
com::sun::star::uno::Reference<com::sun::star::text::XNumberingFormatter> xFormatter;
com::sun::star::lang::Locale aLocale;
com::sun::star::uno::Sequence<
com::sun::star::uno::Sequence<
com::sun::star::beans::PropertyValue> > aNumSettings;
public:
SvxNumValueSet2( Window* pParent, const ResId& rResId);
~SvxNumValueSet2();
virtual void UserDraw( const UserDrawEvent& rUDEvt );
void SetNumberingSettings(
const com::sun::star::uno::Sequence<
com::sun::star::uno::Sequence<
com::sun::star::beans::PropertyValue> >& aNum,
com::sun::star::uno::Reference<com::sun::star::text::XNumberingFormatter>& xFormatter,
const com::sun::star::lang::Locale& rLocale );
};
class SVX_DLLPUBLIC SvxNumValueSet3 : public ValueSet
{
public:
SvxNumValueSet3( Window* pParent, const ResId& rResId);
~SvxNumValueSet3();
virtual void UserDraw( const UserDrawEvent& rUDEvt );
};
} } // end of namespace svx::sidebar
#endif
|
#include "move.h"
Move::Move()
{
}
QString Move::ToString()
{
QString pieceType;
switch (pieceMoved->type)
{
case Piece::PAWN:
pieceType = "Pawn";
break;
case Piece::ROOK:
pieceType = "Rook";
break;
case Piece::KNIGHT:
pieceType = "Knight";
break;
case Piece::BISHOP:
pieceType = "Bishop";
break;
case Piece::QUEEN:
pieceType = "Queen";
break;
case Piece::KING:
pieceType = "King";
break;
default:
break;
}
QString currentPlayer;
switch (player)
{
case Piece::BLACK:
currentPlayer = "Black";
break;
case Piece::WHITE:
currentPlayer = "White";
break;
}
QString start;
QString temp;
switch (firstPosition->XPos())
{
case 0:
temp = "A";
break;
case 1:
temp = "B";
break;
case 2:
temp = "C";
break;
case 3:
temp = "D";
break;
case 4:
temp = "E";
break;
case 5:
temp = "F";
break;
case 6:
temp = "G";
break;
case 7:
temp = "H";
break;
default:
break;
}
start = temp + " " + QString::number(firstPosition->YPos());
QString temp2;
switch (secondPosition.x())
{
case 0:
temp2 = "A";
break;
case 1:
temp2 = "B";
break;
case 2:
temp2 = "C";
break;
case 3:
temp2 = "D";
break;
case 4:
temp2 = "E";
break;
case 5:
temp2 = "F";
break;
case 6:
temp2 = "G";
break;
case 7:
temp2 = "H";
break;
default:
break;
}
QString end;
end = temp2 + " " + QString::number(secondPosition.y());
return currentPlayer + " " + pieceType + ": " + start + " to " + end + " score: " + QString::number(score);
}
|
//////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016-20, Lawrence Livermore National Security, LLC and Umpire
// project contributors. See the COPYRIGHT file for details.
//
// SPDX-License-Identifier: (MIT)
//////////////////////////////////////////////////////////////////////////////
#ifndef UMPIRE_ZeroByteHandler_HPP
#define UMPIRE_ZeroByteHandler_HPP
#include <memory>
#include "umpire/Allocator.hpp"
#include "umpire/strategy/AllocationStrategy.hpp"
#include "umpire/strategy/FixedPool.hpp"
namespace umpire {
namespace strategy {
class ZeroByteHandler : public AllocationStrategy {
public:
ZeroByteHandler(std::unique_ptr<AllocationStrategy>&& allocator) noexcept;
void* allocate(std::size_t bytes) override;
void deallocate(void* ptr) override;
void release() override;
std::size_t getCurrentSize() const noexcept override;
std::size_t getHighWatermark() const noexcept override;
std::size_t getActualSize() const noexcept override;
Platform getPlatform() noexcept override;
MemoryResourceTraits getTraits() const noexcept override;
strategy::AllocationStrategy* getAllocationStrategy();
private:
std::unique_ptr<strategy::AllocationStrategy> m_allocator;
FixedPool* m_zero_byte_pool;
};
} // namespace strategy
} // namespace umpire
#endif // UMPIRE_ZeroByteHandler_HPP
|
#include "ofApp.h"
//--------------------------------------------------------------
void ofApp::setup(){
ofSetVerticalSync(true);
ofSetFrameRate(60);
ofBackground(10, 10, 10);
ofEnableDepthTest();
// turn on smooth lighting //
bSmoothLighting = true;
ofSetSmoothLighting(true);
// lets make a high-res sphere //
// default is 20 //
ofSetSphereResolution(128);
// This is not needed when using gl3 since the lighting
// is calculated per fragment
ofSetBoxResolution(30);
// radius of the sphere //
radius = 180.f;
center.set(ofGetWidth()*.5, ofGetHeight()*.5, 0);
// Point lights emit light in all directions //
// set the diffuse color, color reflected from the light source //
pointLight.setDiffuseColor( ofColor(0.f, 255.f, 0.f));
// specular color, the highlight/shininess color //
pointLight.setSpecularColor( ofColor(255.f, 255.f, 0.f));
pointLight.setPointLight();
spotLight.setDiffuseColor( ofColor(255.f, 0.f, 0.f));
spotLight.setSpecularColor( ofColor(255.f, 255.f, 255.f));
// turn the light into spotLight, emit a cone of light //
spotLight.setSpotlight();
// size of the cone of emitted light, angle between light axis and side of cone //
// angle range between 0 - 90 in degrees //
spotLight.setSpotlightCutOff( 50 );
// rate of falloff, illumitation decreases as the angle from the cone axis increases //
// range 0 - 128, zero is even illumination, 128 is max falloff //
spotLight.setSpotConcentration( 45 );
// Directional Lights emit light based on their orientation, regardless of their position //
directionalLight.setDiffuseColor(ofColor(0.f, 0.f, 255.f));
directionalLight.setSpecularColor(ofColor(255.f, 255.f, 255.f));
directionalLight.setDirectional();
// set the direction of the light
// set it pointing from left to right -> //
directionalLight.setOrientation( ofVec3f(0, 90, 0) );
bShiny = true;
// shininess is a value between 0 - 128, 128 being the most shiny //
material.setShininess( 120 );
// the light highlight of the material //
material.setSpecularColor(ofColor(255, 255, 255, 255));
bPointLight = bSpotLight = bDirLight = true;
// tex coords for 3D objects in OF are from 0 -> 1, not 0 -> image.width
// so we must disable the arb rectangle call to allow 0 -> 1
ofDisableArbTex();
// load an image to use as the texture //
ofLogoImage.load("of.png");
bUseTexture = true;
}
//--------------------------------------------------------------
void ofApp::update() {
pointLight.setPosition(cos(ofGetElapsedTimef()*.6f) * radius * 2 + center.x,
sin(ofGetElapsedTimef()*.8f) * radius * 2 + center.y,
-cos(ofGetElapsedTimef()*.8f) * radius * 2 + center.z);
spotLight.setOrientation( ofVec3f( 0, cos(ofGetElapsedTimef()) * RAD_TO_DEG, 0) );
spotLight.setPosition( mouseX, mouseY, 200);
}
//--------------------------------------------------------------
void ofApp::draw(){
// enable lighting //
ofEnableLighting();
// enable the material, so that it applies to all 3D objects before material.end() call //
material.begin();
// activate the lights //
if (bPointLight) pointLight.enable();
if (bSpotLight) spotLight.enable();
if (bDirLight) directionalLight.enable();
// grab the texture reference and bind it //
// this will apply the texture to all drawing (vertex) calls before unbind() //
if(bUseTexture) ofLogoImage.getTexture().bind();
ofSetColor(255, 255, 255, 255);
ofPushMatrix();
ofTranslate(center.x, center.y, center.z-300);
ofRotateRad(ofGetElapsedTimef() * .8, 0, 1, 0);
ofDrawSphere( 0,0,0, radius);
ofPopMatrix();
ofPushMatrix();
ofTranslate(300, 300, cos(ofGetElapsedTimef()*1.4) * 300.f);
ofRotateRad(ofGetElapsedTimef()*.6, 1, 0, 0);
ofRotateRad(ofGetElapsedTimef()*.8, 0, 1, 0);
ofDrawBox(0, 0, 0, 60);
ofPopMatrix();
ofPushMatrix();
ofTranslate(center.x, center.y, -900);
ofRotateRad(ofGetElapsedTimef() * .2, 0, 1, 0);
ofDrawBox( 0, 0, 0, 850);
ofPopMatrix();
if(bUseTexture) ofLogoImage.getTexture().unbind();
if (!bPointLight) pointLight.disable();
if (!bSpotLight) spotLight.disable();
if (!bDirLight) directionalLight.disable();
material.end();
// turn off lighting //
ofDisableLighting();
ofSetColor( pointLight.getDiffuseColor() );
if(bPointLight) pointLight.draw();
ofSetColor(255, 255, 255);
ofSetColor( spotLight.getDiffuseColor() );
if(bSpotLight) spotLight.draw();
ofSetColor(255, 255, 255);
ofDrawBitmapString("Point Light On (1) : "+ofToString(bPointLight) +"\n"+
"Spot Light On (2) : "+ofToString(bSpotLight) +"\n"+
"Directional Light On (3) : "+ofToString(bDirLight)+"\n"+
"Shiny Objects On (s) : "+ofToString(bShiny)+"\n"+
"Spot Light Cutoff (up/down) : "+ofToString(spotLight.getSpotlightCutOff(),0)+"\n"+
"Spot Light Concentration (right/left) : " + ofToString(spotLight.getSpotConcentration(),0)+"\n"+
"Smooth Lighting enabled (x) : "+ofToString(bSmoothLighting,0)+"\n"+
"Textured (t) : "+ofToString(bUseTexture,0),
20, 20);
}
//--------------------------------------------------------------
void ofApp::keyPressed(int key){
switch (key) {
case '1':
bPointLight = !bPointLight;
break;
case '2':
bSpotLight = !bSpotLight;
break;
case '3':
bDirLight = !bDirLight;
break;
case 's':
bShiny = !bShiny;
if (bShiny) material.setShininess( 120 );
else material.setShininess( 30 );
break;
case 'x':
bSmoothLighting = !bSmoothLighting;
ofSetSmoothLighting(bSmoothLighting);
break;
case 't':
bUseTexture = !bUseTexture;
break;
case OF_KEY_UP:
// setSpotlightCutOff is clamped between 0 - 90 degrees //
spotLight.setSpotlightCutOff(spotLight.getSpotlightCutOff()+1);
break;
case OF_KEY_DOWN:
// setSpotlightCutOff is clamped between 0 - 90 degrees //
spotLight.setSpotlightCutOff(spotLight.getSpotlightCutOff()-1);
break;
case OF_KEY_RIGHT:
// setSpotConcentration is clamped between 0 - 128 //
spotLight.setSpotConcentration(spotLight.getSpotConcentration()+1);
break;
case OF_KEY_LEFT:
// setSpotConcentration is clamped between 0 - 128 //
spotLight.setSpotConcentration(spotLight.getSpotConcentration()-1);
break;
default:
break;
}
}
//--------------------------------------------------------------
void ofApp::keyReleased(int key){
}
//--------------------------------------------------------------
void ofApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void ofApp::mouseDragged(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mousePressed(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mouseReleased(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mouseEntered(int x, int y){
}
//--------------------------------------------------------------
void ofApp::mouseExited(int x, int y){
}
//--------------------------------------------------------------
void ofApp::windowResized(int w, int h){
}
//--------------------------------------------------------------
void ofApp::gotMessage(ofMessage msg){
}
//--------------------------------------------------------------
void ofApp::dragEvent(ofDragInfo dragInfo){
}
|
#include <ATen/native/ReduceOps.h>
#include <ATen/ATen.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/native/TensorDimApply.h>
#include <ATen/native/SharedReduceOps.h>
#include <algorithm>
#include <functional>
#include <limits>
#include <numeric>
#include <vector>
#include <map>
#include <cmath>
#include <cfloat>
#include <type_traits>
namespace at {
namespace native {
DEFINE_DISPATCH(sum_stub);
DEFINE_DISPATCH(nansum_stub);
DEFINE_DISPATCH(std_var_stub);
DEFINE_DISPATCH(prod_stub);
DEFINE_DISPATCH(norm_stub);
DEFINE_DISPATCH(mean_stub);
DEFINE_DISPATCH(and_stub);
DEFINE_DISPATCH(or_stub);
DEFINE_DISPATCH(min_values_stub);
DEFINE_DISPATCH(max_values_stub);
DEFINE_DISPATCH(argmax_stub);
DEFINE_DISPATCH(argmin_stub);
DEFINE_DISPATCH(cumsum_stub);
DEFINE_DISPATCH(cumprod_stub);
DEFINE_DISPATCH(logcumsumexp_stub);
Tensor _logcumsumexp_cpu(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _logcumsumexp_out_cpu(result, self, dim);
}
Tensor& _logcumsumexp_out_cpu(Tensor& result, const Tensor& self, int64_t dim) {
logcumsumexp_stub(self.device().type(), result, self, dim);
return result;
}
Tensor logcumsumexp(const Tensor& self, int64_t dim) {
auto result = [&]() {
NoNamesGuard guard;
return at::_logcumsumexp(self, dim);
}();
namedinference::propagate_names(result, self);
return result;
}
Tensor& logcumsumexp_out(Tensor& result, const Tensor& self, int64_t dim) {
check_scalar_type_device_layout_equal(result, self);
{
NoNamesGuard guard;
at::_logcumsumexp_out(result, self.toType(result.scalar_type()), dim);
}
namedinference::propagate_names(result, self);
return result;
}
Tensor _cumsum_cpu(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
cumsum_stub(self.device().type(), result, self, dim);
return result;
}
Tensor& _cumsum_out_cpu(Tensor& result, const Tensor& self, int64_t dim) {
cumsum_stub(self.device().type(), result, self, dim);
return result;
}
Tensor cumsum(const Tensor& self, int64_t dim, c10::optional<ScalarType> dtype) {
auto result = [&]() {
NoNamesGuard guard;
return at::_cumsum(integer_upcast(self, dtype), dim);
}();
namedinference::propagate_names(result, self);
return result;
}
Tensor& cumsum_out(Tensor& result, const Tensor& self, int64_t dim, c10::optional<ScalarType> dtype) {
// result type is favored over dtype; check that they match if provided (NumPy doesn't check)
TORCH_CHECK(
!dtype.has_value() || (result.scalar_type() == dtype.value()),
"provided dtype must match dtype of result in cumsum. Got ",
toString(result.scalar_type()),
" and ",
toString(dtype.value()),
".");
{
NoNamesGuard guard;
at::_cumsum_out(result, self.toType(result.scalar_type()), dim);
}
namedinference::propagate_names(result, self);
return result;
}
Tensor _cumprod_cpu(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
cumprod_stub(self.device().type(), result, self, dim);
return result;
}
Tensor& _cumprod_out_cpu(Tensor& result, const Tensor& self, int64_t dim) {
cumprod_stub(self.device().type(), result, self, dim);
return result;
}
Tensor cumprod(const Tensor& self, int64_t dim, c10::optional<ScalarType> dtype) {
auto result = [&]() {
NoNamesGuard guard;
return at::_cumprod(integer_upcast(self, dtype), dim);
}();
namedinference::propagate_names(result, self);
return result;
}
Tensor& cumprod_out(Tensor& result, const Tensor& self, int64_t dim, c10::optional<ScalarType> dtype) {
// result type is favored over dtype; check that they match if provided (NumPy doesn't check)
TORCH_CHECK(
!dtype.has_value() || (result.scalar_type() == dtype.value()),
"provided dtype must match dtype of result in cumprod. Got ",
toString(result.scalar_type()),
" and ",
toString(dtype.value()),
".");
{
NoNamesGuard guard;
at::_cumprod_out(result, self.toType(result.scalar_type()), dim);
}
namedinference::propagate_names(result, self);
return result;
}
static Tensor sum_scan_exclusive(const Tensor& x, int64_t dim) {
Tensor ret = at::cumsum(-x, dim);
int64_t end_idx = ret.size(dim) - 1;
Tensor ret_sum = ret.narrow(dim, end_idx, 1).clone(at::MemoryFormat::Preserve);
ret -= ret_sum.expand_as(ret);
ret += x;
return ret;
}
Tensor cumprod_backward(const Tensor& grad, const Tensor& input, int64_t dim) {
/*
There are two algorithms to do this. The first one
is very efficient, but works only when there are no
nonzero elements in the input.
The second one is much more complex, but it doesn't
assume anything on the input. The main downside is
that it takes time O(n^2), where n = input.size(self.dim)
(i.e. the length of the cumulative product). This is in
contrast to the forward pass and the efficient algorithm,
which are both O(n).
The second algorithm is a simple application of the chain
rule. If x is an n-dimensional vector, and y = cumprod(x),
and F is the final cost, then
dF / dx_k = sum_j (dF / dy_j) * (dy_j / dx_k) (1)
The term dF / dy_j is just grad_output[j] (assuming again
everything is one-dimensional).
The term (dy_j / dx_k) is easilly seen to be
if j >= k
dy_j / dx_k = prod_{1 <= i <= j, i != k} x_i
else:
dy_j / dx_k = 0
Note that the indicator (j>=k) can be taken out
by replacing the sum in (1) with a sum from
j = k to n.
Thus,
df / dx_k = sum_{k <= j <= n} grad_output[j] * (dy_j / dx_k)
with
dy_j / dx_k = prod_{1 <= i <= j, i != k} x_i (2)
Note that this last term is just the cumulative product
with k omitted. Thus, if x_k (the input) is nonzero, we can
just express this as
dy_j / dx_k = (prod_{1 <= i <= j} x_i) / x_k
= y_j / x_k
So therefore,
df / dx_k = sum_{k <= j <= n} grad_output[j] * y_j / x_k
so
grad_output = sum_scan_exclusiv(grad_output * output) / input
If the input is nonzero, we need to calculate the dy_j / dx_k
by using the formula (2), called in the code omitted_products.
The way the code calculates it is simply by noting that
prod_{1 <= i <= j, i != k} x_i
= (prod_{1 <= i <= k} x_i) * (prod_{k + 1 <= i <= j} x_i)
the first term is calculated as prods_until_k, which since
doesn't depend in j is easy to vectorize.
The second term (indexed by j) is the cumulative product of
x_{k+1}, x_{k+2}, ..., x_n, and it's named in the code
prods_from_k_pkus_1, and it's calculated as a cumprod.
In order to vectorize this properly, we need to add to
omitted_products the dimensions where k > j, and therefore
dy_j / dx_k = 0, which is done right after the assert.
*/
if (input.dim() == 0 || input.numel() == 0) {
return grad;
}
dim = at::maybe_wrap_dim(dim, input.sizes().size());
int64_t dim_size = input.size(dim);
if (dim_size == 1) {
return grad;
}
// Simple case with nonzero elements in the input
if ((input != 0).all().item<uint8_t>()) {
Tensor result = at::cumprod(input, dim);
return sum_scan_exclusive(result * grad, dim) / input;
}
auto ones_size = input.sizes().vec();
ones_size[dim] = 1;
Tensor ones = at::ones({1}, grad.options()).expand(ones_size);
Tensor grad_input = at::zeros(input.sizes(), grad.options());
Tensor prods_from_k_plus_1;
Tensor omitted_products;
for (int k = 0; k < dim_size; ++k) {
if (k == 0) {
prods_from_k_plus_1 = at::cumprod(input.slice(dim, k + 1), dim);
omitted_products = at::cat({ones, prods_from_k_plus_1}, dim);
} else if (k == dim_size - 1) {
Tensor prods_until_k = at::prod(input.slice(dim, 0, k), dim, true);
omitted_products = prods_until_k;
} else {
Tensor prods_until_k = at::prod(input.slice(dim, 0, k), dim, true);
prods_from_k_plus_1 = at::cumprod(input.slice(dim, k+1), dim);
omitted_products = prods_until_k.expand_as(prods_from_k_plus_1) * prods_from_k_plus_1;
omitted_products = at::cat({prods_until_k, omitted_products}, dim);
}
// At this point omitted_products is the same size
// as input, except on the dimension dim where it's
// dim_size - k
AT_ASSERT(omitted_products.size(dim) == dim_size - k);
grad_input.select(dim, k).copy_(
at::sum(grad.slice(dim, k) * omitted_products,dim));
}
return grad_input;
}
// Implement std::is_nan<IntegralType> for MSVC.
namespace {
#ifdef _MSC_VER
template<typename T>
inline typename std::enable_if<std::is_integral<T>::value, bool>::type isnan_(T x) {
return false;
}
template<typename T>
inline typename std::enable_if<!std::is_integral<T>::value, bool>::type isnan_(T x) {
return std::isnan(x);
}
#else
template<typename T>
inline bool isnan_(T x) {
return std::isnan(x);
}
#endif
}
template<typename T1, typename T2, typename Operation>
void cummax_cummin_helper(const T1* self_data, T1* values_data, T2* indices_data,
int self_dim_size, int self_stride, int values_stride, int indices_stride) {
Operation op;
T1 out = self_data[0];
int idx = 0;
for(int i = 0; i < self_dim_size; i++) {
T1 curr_elem = self_data[i*self_stride];
if(isnan_(curr_elem) || (!isnan_(out) && op(curr_elem, out))) {
out = self_data[i*self_stride];
idx = i;
}
values_data[i*values_stride] = out;
indices_data[i*indices_stride] = idx;
}
}
void cummax_helper_cpu(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Bool,
self.scalar_type(), "cummax_cpu",
[&] {
at::native::tensor_dim_apply3<scalar_t, int64_t>(self, values, indices, dim, cummax_cummin_helper<scalar_t, int64_t, std::greater_equal<scalar_t>>);
});
}
std::tuple<Tensor&, Tensor&> cummax_out(Tensor& values, Tensor& indices, const Tensor& self, int64_t dim) {
check_scalar_type_device_layout_equal(values, self);
check_scalar_type_device_layout_equal(indices, at::empty({0}, self.options().dtype(at::kLong)));
{
NoNamesGuard guard;
values.resize_(self.sizes());
indices.resize_(self.sizes());
if(self.dim() == 0) {
values.fill_(self);
indices.fill_(0);
} else if(self.numel() != 0) {
dim = maybe_wrap_dim(dim, self.dim());
at::_cummax_helper(self, values, indices, dim);
}
}
namedinference::propagate_names(values, self);
namedinference::propagate_names(indices, self);
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor, Tensor> cummax(const Tensor& self, int64_t dim) {
auto values = at::empty(self.sizes(), self.options());
auto indices = at::empty(self.sizes(), self.options().dtype(at::kLong));
at::cummax_out(values, indices, self, dim);
return std::make_tuple(values, indices);
}
void cummin_helper_cpu(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Bool,
self.scalar_type(), "cummin_cpu",
[&] {
at::native::tensor_dim_apply3<scalar_t, int64_t>(self, values, indices, dim, cummax_cummin_helper<scalar_t, int64_t, std::less_equal<scalar_t>>);
});
}
std::tuple<Tensor&, Tensor&> cummin_out(Tensor& values, Tensor& indices, const Tensor& self, int64_t dim) {
check_scalar_type_device_layout_equal(values, self);
check_scalar_type_device_layout_equal(indices, at::empty({0}, self.options().dtype(at::kLong)));
{
NoNamesGuard guard;
values.resize_(self.sizes());
indices.resize_(self.sizes());
if(self.dim() == 0) {
values.fill_(self);
indices.fill_(0);
} else if(self.numel() != 0) {
dim = maybe_wrap_dim(dim, self.dim());
at::_cummin_helper(self, values, indices, dim);
}
}
namedinference::propagate_names(values, self);
namedinference::propagate_names(indices, self);
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor, Tensor> cummin(const Tensor& self, int64_t dim) {
auto values = at::empty(self.sizes(), self.options());
auto indices = at::empty(self.sizes(), self.options().dtype(at::kLong));
at::cummin_out(values, indices, self, dim);
return std::make_tuple(values, indices);
}
Tensor cummaxmin_backward(const Tensor& grad, const Tensor& input, const Tensor& indices, int64_t dim) {
if (input.numel() == 0) {
return input;
}
auto result = at::zeros(input.sizes(), input.options());
return result.scatter_add_(dim, indices, grad);
}
// ALL REDUCE #################################################################
static ScalarType get_dtype(Tensor& result, const Tensor& self, optional<ScalarType> dtype,
bool promote_integers=false) {
if (dtype.has_value()) {
return dtype.value();
} else if (result.defined()) {
return result.scalar_type();
}
ScalarType src_type = self.scalar_type();
if (promote_integers && at::isIntegralType(src_type, /*includeBool=*/true)) {
return kLong;
}
return src_type;
}
Tensor& sum_out(Tensor& result, const Tensor& self, IntArrayRef dim,
bool keepdim, optional<ScalarType> opt_dtype) {
ScalarType dtype = get_dtype(result, self, opt_dtype, true);
auto iter = make_reduction("sum", result, self, dim, keepdim, dtype);
if (iter.numel() == 0) {
result.zero_();
} else {
sum_stub(iter.device_type(), iter);
}
return result;
}
Tensor sum(const Tensor &self, c10::optional<ScalarType> dtype) {
return at::native::sum(self, std::vector<int64_t>{}, false, dtype);
}
Tensor sum(const Tensor& self, IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype) {
Tensor result;
return at::native::sum_out(result, self, dim, keepdim, dtype);
}
Tensor sum(const Tensor& self, DimnameList dim, bool keepdim, c10::optional<ScalarType> dtype) {
return at::sum(self, dimnames_to_positions(self, dim), keepdim, dtype);
}
Tensor& sum_out(Tensor& result, const Tensor& self, DimnameList dim,
bool keepdim, optional<ScalarType> opt_dtype) {
return at::sum_out(result, self, dimnames_to_positions(self, dim), keepdim, opt_dtype);
}
Tensor& nansum_out(Tensor& result, const Tensor& self, IntArrayRef dim,
bool keepdim, optional<ScalarType> opt_dtype) {
TORCH_CHECK(!c10::isComplexType(self.scalar_type()), "nansum does not support complex inputs");
// For integral types, use existing sum as
// integral types don't have `Nan`.
if (c10::isIntegralType(self.scalar_type(), true)){
return at::sum_out(result, self, dim, keepdim, opt_dtype);
}
ScalarType dtype = get_dtype(result, self, opt_dtype, true);
auto iter = make_reduction("nansum", result, self, dim, keepdim, dtype);
if (iter.numel() == 0) {
result = result.zero_();
} else {
nansum_stub(iter.device_type(), iter);
}
return result;
}
Tensor nansum(const Tensor &self, c10::optional<ScalarType> dtype) {
return at::native::nansum(self, std::vector<int64_t>{}, false, dtype);
}
Tensor nansum(const Tensor& self, IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype) {
Tensor result;
return at::native::nansum_out(result, self, dim, keepdim, dtype);
}
static Tensor& prod_out_impl(Tensor& result, const Tensor& self, IntArrayRef dim,
bool keepdim, c10::optional<ScalarType> opt_dtype) {
ScalarType dtype = get_dtype(result, self, opt_dtype, true);
auto iter = make_reduction("prod", result, self, dim, keepdim, dtype);
if (iter.numel() == 0) {
result.fill_(1);
} else {
prod_stub(iter.device_type(), iter);
}
return result;
}
Tensor prod(const Tensor& self, int64_t dim, bool keepdim, c10::optional<ScalarType> dtype) {
Tensor result;
native::prod_out_impl(result, self, dim, keepdim, dtype);
return result;
}
Tensor prod(const Tensor &self, c10::optional<ScalarType> dtype) {
Tensor result;
return at::native::prod_out_impl(result, self, {}, false, dtype);
}
Tensor& prod_out(Tensor& result, const Tensor& self, int64_t dim, bool keepdim, c10::optional<ScalarType> dtype) {
return at::native::prod_out_impl(result, self, dim, keepdim, dtype);
}
Tensor prod(const Tensor& self, Dimname dim, bool keepdim, c10::optional<ScalarType> dtype) {
return at::prod(self, dimname_to_position(self, dim), keepdim, dtype);
}
Tensor& prod_out(Tensor& result, const Tensor& self, Dimname dim,
bool keepdim, optional<ScalarType> opt_dtype) {
return at::prod_out(result, self, dimname_to_position(self, dim), keepdim, opt_dtype);
}
Tensor &mean_out_cpu_gpu(Tensor &result, const Tensor &self, IntArrayRef dim,
bool keepdim, c10::optional<ScalarType> opt_dtype) {
ScalarType scalarType = opt_dtype.has_value() ? opt_dtype.value() : self.scalar_type();
TORCH_CHECK(
at::isFloatingType(scalarType) || at::isComplexType(scalarType),
"Can only calculate the mean of floating types. Got ",
toString(scalarType),
" instead.");
ScalarType dtype = get_dtype(result, self, opt_dtype, true);
// TODO: the TensorIterator reduction implementation of mean
// (mean_kernel_impl()) is unvectorized and leads to very poor performance
// for production workloads. Once that's fixed, the following code can be used
// in lieu of the sum + divide implementation below.
if (self.device().is_cpu()) {
int64_t dim_prod = 1;
if (dim.size() == 0 || self.ndimension() == 0) {
dim_prod = self.numel();
} else {
for (auto d : dim) {
dim_prod *= self.size(d);
}
}
at::sum_out(result, self, dim, keepdim, dtype).div_(dim_prod);
return result;
}
auto iter = make_reduction("mean", result, self, dim, keepdim, dtype);
if (iter.numel() == 0) {
result.fill_(std::numeric_limits<double>::quiet_NaN());
} else {
mean_stub(iter.device_type(), iter);
}
return result;
}
Tensor mean_cpu_gpu(const Tensor &self, optional<ScalarType> dtype) {
return at::native::mean_cpu_gpu(self, IntArrayRef{}, false, dtype);
}
Tensor mean_cpu_gpu(const Tensor& self, IntArrayRef dim, bool keepdim, optional<ScalarType> dtype) {
Tensor result;
return at::native::mean_out_cpu_gpu(result, self, dim, keepdim, dtype);
}
Tensor mean(const Tensor& self, DimnameList dim, bool keepdim, optional<ScalarType> dtype) {
return at::mean(self, dimnames_to_positions(self, dim), keepdim, dtype);
}
Tensor& mean_out(Tensor& result, const Tensor& self, DimnameList dim,
bool keepdim, c10::optional<ScalarType> opt_dtype) {
return at::mean_out(result, self, dimnames_to_positions(self, dim), keepdim, opt_dtype);
}
static Tensor squeeze_multiple(const Tensor& self, IntArrayRef dims) {
int ndims = self.sizes().size();
auto dims_to_squeeze = at::dim_list_to_bitset(dims, ndims);
Tensor result = self;
for (int i = ndims - 1; i >= 0; --i) {
if (dims_to_squeeze[i]) {
result = result.squeeze(i);
}
}
return result;
}
static Tensor& logsumexp_out_impl(Tensor& result, const Tensor& self, IntArrayRef dims, bool keepdim) {
// can't take max of empty tensor
if (self.numel() != 0) {
auto maxes = at::amax(self, dims, true);
auto maxes_squeezed = (keepdim ? maxes : squeeze_multiple(maxes, dims));
maxes_squeezed.masked_fill_(maxes_squeezed.abs() == INFINITY, 0);
at::sum_out(result, at::exp(self - maxes), dims, keepdim);
result.log_().add_(maxes_squeezed);
} else {
at::sum_out(result, at::exp(self), dims, keepdim);
result.log_();
}
return result;
}
Tensor& logsumexp_out(Tensor& result, const Tensor& self, IntArrayRef dims, bool keepdim) {
{
NoNamesGuard guard;
logsumexp_out_impl(result, self, dims, keepdim);
}
namedinference::propagate_names_for_reduction(result, self, dims, keepdim);
return result;
}
Tensor logsumexp(const Tensor& self, IntArrayRef dims, bool keepdim) {
Tensor result = at::empty({0}, self.options());
return at::native::logsumexp_out(result, self, dims, keepdim);
}
Tensor logsumexp(const Tensor& self, DimnameList dims, bool keepdim) {
return at::logsumexp(self, dimnames_to_positions(self, dims), keepdim);
}
Tensor& logsumexp_out(Tensor& result, const Tensor& self, DimnameList dims, bool keepdim) {
return at::logsumexp_out(result, self, dimnames_to_positions(self, dims), keepdim);
}
static Tensor& norm_out(Tensor &result, const Tensor &self, optional<Scalar> opt_p,
IntArrayRef dim, bool keepdim, optional<ScalarType> opt_dtype) {
auto p = opt_p.value_or(2.0);
TORCH_CHECK(!(p.toDouble() == 2 && self.is_complex()), "norm with p=2 not supported for complex tensors");
TORCH_CHECK(self.device().type() == DeviceType::CPU || self.device().type() == DeviceType::CUDA,
"norm only supports CPU AND CUDA device type, got: ", self.device().type());
TORCH_CHECK(self.layout() == Layout::Strided,
"norm only supports strided layout, got: ", self.layout());
ScalarType scalarType = opt_dtype.has_value() ? opt_dtype.value() : self.scalar_type();
TORCH_CHECK(
at::isFloatingType(scalarType) || at::isComplexType(scalarType),
"Can only calculate the mean of floating types. Got ",
toString(scalarType),
" instead.");
ScalarType dtype = get_dtype(result, self, opt_dtype, true);
auto iter = make_reduction("norm", result, self, dim, keepdim, dtype);
if (iter.numel() == 0) {
result.zero_();
} else {
norm_stub(iter.device_type(), iter, p);
}
return result;
}
static inline Tensor _norm(const Tensor &self, Scalar p) {
if (self.is_sparse()) {
// Sparse tensors need a different implementation because their values
// are accessed with a different API than strided tensors
return at::native_norm(self, p);
} else {
TORCH_CHECK(self.device().type() == DeviceType::CPU || self.device().type() == DeviceType::CUDA,
"norm only supports CPU AND CUDA device type, got: ", self.device().type());
TORCH_CHECK(self.layout() == Layout::Strided,
"norm only supports strided layout, got: ", self.layout());
TORCH_CHECK(at::isFloatingType(self.scalar_type()) || at::isComplexType(self.scalar_type()),
"norm only supports floating-point dtypes");
Tensor result;
return at::native::norm_out(result, self, p, IntArrayRef{}, false, c10::nullopt);
}
}
Tensor &norm_out(Tensor& result, const Tensor& self, optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) {
return at::native::norm_out(result, self, p, dim, keepdim, optional<ScalarType>(dtype));
}
Tensor &norm_out(Tensor& result, const Tensor& self, optional<Scalar> p, IntArrayRef dim, bool keepdim) {
return at::native::norm_out(result, self, p, dim, keepdim, c10::nullopt);
}
static Tensor norm(const Tensor& self, optional<Scalar> p, IntArrayRef dim, bool keepdim,
optional<ScalarType> opt_dtype) {
if (self.is_sparse()) {
// Sparse tensors need a different implementation because their values
// are accessed with a different API than strided tensors
return at::native_norm(self, p, dim, keepdim, opt_dtype);
} else {
Tensor result;
return at::native::norm_out(result, self, p, dim, keepdim, opt_dtype);
}
}
Tensor norm(const Tensor& self, optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) {
return at::native::norm(self, p, dim, keepdim, optional<ScalarType>(dtype));
}
Tensor norm(const Tensor& self, optional<Scalar> p, ScalarType dtype) {
return at::native::norm(self, p, IntArrayRef{}, false, optional<ScalarType>(dtype));
}
Tensor norm(const Tensor& self, optional<Scalar> p, IntArrayRef dim, bool keepdim) {
return at::native::norm(self, p, dim, keepdim, c10::nullopt);
}
// leave it so we support sparse tensors
Tensor norm(const Tensor& self, Scalar p) {
return at::native::_norm(self, p);
}
inline Tensor & _all(Tensor & result, TensorIterator & iter) {
if (iter.numel() == 0) {
result.fill_(1);
} else {
and_stub(iter.device_type(), iter);
}
return result;
}
Tensor all(const Tensor& self) {
TORCH_CHECK(self.device().type() == DeviceType::CPU || self.device().type() == DeviceType::CUDA,
"all only supports CPU AND CUDA device type, got: ", self.device().type());
TORCH_CHECK(self.layout() == Layout::Strided,
"all only supports strided layout, got: ", self.layout());
TORCH_CHECK(self.scalar_type() == at::ScalarType::Byte || self.scalar_type() == at::ScalarType::Bool,
"all only supports torch.uint8 and torch.bool dtypes");
Tensor result = at::empty({0}, self.options());
auto iter = make_reduction(
"all", result, self, {}, false, self.scalar_type());
return _all(result, iter);
}
Tensor all(const Tensor& self, int64_t dim, bool keepdim) {
Tensor result = at::empty({0}, self.options());
return at::native::all_out(result, self, dim, keepdim);
}
Tensor &all_out(Tensor &result, const Tensor &self, int64_t dim, bool keepdim) {
TORCH_CHECK(self.device().type() == DeviceType::CPU || self.device().type() == DeviceType::CUDA,
"all only supports CPU AND CUDA device type, got: ", self.device().type());
TORCH_CHECK(self.layout() == Layout::Strided,
"all only supports strided layout, got: ", self.layout());
TORCH_CHECK(self.scalar_type() == at::ScalarType::Byte || self.scalar_type() == at::ScalarType::Bool,
"all only supports torch.uint8 and torch.bool dtypes");
dim = maybe_wrap_dim(dim, self.dim());
if (_dimreduce_return_trivial(result, self, 1, dim, keepdim)) {
return result;
} else {
auto iter = make_reduction(
"all", result, self, dim, keepdim, self.scalar_type());
return _all(result, iter);
}
}
inline Tensor & _any(Tensor & result, TensorIterator & iter) {
if (iter.numel() == 0) {
result.fill_(0);
} else {
or_stub(iter.device_type(), iter);
}
return result;
}
Tensor any(const Tensor& self) {
TORCH_CHECK(self.device().type() == DeviceType::CPU || self.device().type() == DeviceType::CUDA,
"any only supports CPU AND CUDA device type, got: ", self.device().type());
TORCH_CHECK(self.layout() == Layout::Strided || self.layout() == Layout::Sparse,
"any only supports strided AND sparse layout, got: ", self.layout());
TORCH_CHECK(self.scalar_type() == at::ScalarType::Byte || self.scalar_type() == at::ScalarType::Bool,
"all only supports torch.uint8 and torch.bool dtypes");
Tensor result = at::empty({0}, self.options());
auto iter = make_reduction(
"any", result, self, {}, false, self.scalar_type());
return _any(result, iter);
}
Tensor any(const Tensor& self, int64_t dim, bool keepdim) {
Tensor result = at::empty({0}, self.options());
return at::native::any_out(result, self, dim, keepdim);
}
Tensor &any_out(Tensor &result, const Tensor &self, int64_t dim, bool keepdim) {
TORCH_CHECK(self.device().type() == DeviceType::CPU || self.device().type() == DeviceType::CUDA,
"any only supports CPU AND CUDA device type, got: ", self.device().type());
TORCH_CHECK(self.layout() == Layout::Strided,
"any only supports strided layout, got: ", self.layout());
TORCH_CHECK(self.scalar_type() == at::ScalarType::Byte || self.scalar_type() == at::ScalarType::Bool,
"all only supports torch.uint8 and torch.bool dtypes");
dim = maybe_wrap_dim(dim, self.dim());
if (_dimreduce_return_trivial(result, self, 0, dim, keepdim)) {
return result;
} else {
auto iter = make_reduction(
"any", result, self, dim, keepdim, self.scalar_type());
return _any(result, iter);
}
}
Tensor &amin_out(Tensor& result, const Tensor& self, IntArrayRef dim, bool keepdim) {
TORCH_CHECK(self.scalar_type() == result.scalar_type(), "Illegal dtype for self, and out:", self.scalar_type(), result.scalar_type());
auto iter = make_reduction("amin", result, self, dim, keepdim, self.scalar_type());
TORCH_CHECK(iter.numel() > 0, "operation does not have an identity");
min_values_stub(iter.device_type(), iter);
return result;
}
Tensor amin(const Tensor& self, IntArrayRef dim, bool keepdim) {
Tensor result = at::empty({0}, self.options());
return at::amin_out(result, self, dim, keepdim);
}
Tensor &amax_out(Tensor& result, const Tensor& self, IntArrayRef dim, bool keepdim) {
TORCH_CHECK(self.scalar_type() == result.scalar_type(), "Illegal dtype for self, and out:", self.scalar_type(), result.scalar_type());
auto iter = make_reduction("amax", result, self, dim, keepdim, self.scalar_type());
TORCH_CHECK(iter.numel() > 0, "operation does not have an identity");
max_values_stub(iter.device_type(), iter);
return result;
}
Tensor amax(const Tensor& self, IntArrayRef dim, bool keepdim) {
Tensor result = at::empty({0}, self.options());
return at::amax_out(result, self, dim, keepdim);
}
Tensor& argmax_out(Tensor& result, const Tensor& self, c10::optional<int64_t> dim, bool keepdim) {
TORCH_CHECK(self.numel() > 0, "cannot perform reduction function argmax on a "
"tensor with no elements because the operation does not have an identity");
Tensor in;
if (dim) {
auto sizes = self.sizes();
auto wrap_dim = maybe_wrap_dim(dim.value(), self.dim());
if (sizes[wrap_dim] == 1) {
if (keepdim) {
result = at::zeros(sizes, self.options().dtype(at::kLong));
} else {
auto sizes_vec = sizes.vec();
sizes_vec.erase(sizes_vec.begin() + wrap_dim);
result = at::zeros(sizes_vec, self.options().dtype(at::kLong));
}
return result;
}
in = self;
} else {
in = self.reshape({-1});
keepdim = false;
}
auto itr = make_reduction("argmax", result, in, dim.value_or(0), keepdim,
self.scalar_type(), at::kLong);
argmax_stub(itr.device_type(), itr);
return result;
}
Tensor argmax(const Tensor& self, c10::optional<int64_t> dim, bool keepdims) {
Tensor result = at::empty({0}, self.options().dtype(at::kLong));
return at::native::argmax_out(result, self, dim, keepdims);
}
Tensor& argmin_out(Tensor& result, const Tensor& self, c10::optional<int64_t> dim, bool keepdim) {
TORCH_CHECK(self.numel() > 0, "cannot perform reduction function argmin on a "
"tensor with no elements because the operation does not have an identity");
Tensor in;
if (dim) {
auto sizes = self.sizes();
auto wrap_dim = maybe_wrap_dim(dim.value(), self.dim());
if (sizes[wrap_dim] == 1) {
if (keepdim) {
result = at::zeros(sizes, self.options().dtype(at::kLong));
} else {
auto sizes_vec = sizes.vec();
sizes_vec.erase(sizes_vec.begin() + wrap_dim);
result = at::zeros(sizes_vec, self.options().dtype(at::kLong));
}
return result;
}
in = self;
} else {
in = self.reshape({-1});
keepdim = false;
}
auto itr = make_reduction("argmin", result, in, dim.value_or(0), keepdim,
self.scalar_type(), at::kLong);
argmin_stub(itr.device_type(), itr);
return result;
}
Tensor argmin(const Tensor& self, c10::optional<int64_t> dim, bool keepdims) {
Tensor result = at::empty({0}, self.options().dtype(at::kLong));
return at::native::argmin_out(result, self, dim, keepdims);
}
static Tensor& std_var_out(Tensor& result, const Tensor& self, IntArrayRef dim, bool unbiased, bool keepdim, bool take_sqrt) {
TORCH_CHECK(self.device().type() == DeviceType::CPU || self.device().type() == DeviceType::CUDA,
"std and var only supports CPU AND CUDA device type, got: ", self.device().type());
TORCH_CHECK(self.layout() == Layout::Strided,
"std and var only supports strided layout, got: ", self.layout());
TORCH_CHECK(at::isFloatingType(self.scalar_type()) || at::isComplexType(self.scalar_type()),
"std and var only support floating-point dtypes");
if (at::isComplexType(self.scalar_type())){
ScalarType dtype = c10::toValueType(get_dtype(result, self, {}, true));
Tensor real_in = at::real(self);
Tensor real_out = at::empty({0}, self.options().dtype(dtype));
auto iter = make_reduction("std or var", real_out, real_in, dim, keepdim, dtype);
if (iter.numel() == 0) {
real_out.fill_(NAN);
} else {
std_var_stub(iter.device_type(), iter, unbiased, false);
}
Tensor imag_in = at::imag(self);
Tensor imag_out = at::empty({0}, self.options().dtype(dtype));
iter = make_reduction("std or var", imag_out, imag_in, dim, keepdim, dtype);
if (iter.numel() == 0) {
imag_out.fill_(NAN);
} else {
std_var_stub(iter.device_type(), iter, unbiased, false);
}
at::add_out(result, real_out, imag_out);
take_sqrt ? at::sqrt_out(result, result) : result;
} else{
ScalarType dtype = get_dtype(result, self, {}, true);
auto iter = make_reduction("std or var", result, self, dim, keepdim, dtype);
if (iter.numel() == 0) {
result.fill_(NAN);
} else {
std_var_stub(iter.device_type(), iter, unbiased, take_sqrt);
}
}
return result;
}
static std::tuple<Tensor&,Tensor&> std_var_mean_out(const char* fname, Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, bool take_sqrt) {
AT_ASSERT(result1.defined() && result2.defined());
TORCH_CHECK(self.device().type() == DeviceType::CPU || self.device().type() == DeviceType::CUDA,
fname, " only supports CPU AND CUDA device type, got: ", self.device().type());
TORCH_CHECK(self.layout() == Layout::Strided,
fname, " only supports strided layout, got: ", self.layout());
TORCH_CHECK(at::isFloatingType(self.scalar_type()) || at::isComplexType(self.scalar_type()),
fname, " only support floating-point dtypes");
TORCH_CHECK(result1.scalar_type() == result2.scalar_type(),
"provided by result1 dtype must match dtype of result2. Got ",
toString(result1.scalar_type()),
" and ",
toString(result2.scalar_type()),
".");
if (at::isComplexType(self.scalar_type())){
ScalarType dtype = c10::toValueType(get_dtype(result1, self, {}, true));
Tensor real_in = at::real(self);
Tensor real_out_var = at::empty({0}, self.options().dtype(dtype));
Tensor real_out_mean = at::empty({0}, self.options().dtype(dtype));
auto iter = make_reduction(fname, real_out_var, real_out_mean, real_in, dim, keepdim, dtype);
if (iter.numel() == 0) {
real_out_var.fill_(NAN);
real_out_mean.fill_(NAN);
} else {
std_var_stub(iter.device_type(), iter, unbiased, false);
}
Tensor imag_in = at::imag(self);
Tensor imag_out_var = at::empty({0}, self.options().dtype(dtype));
Tensor imag_out_mean = at::empty({0}, self.options().dtype(dtype));
iter = make_reduction(fname, imag_out_var, imag_out_mean, imag_in, dim, keepdim, dtype);
if (iter.numel() == 0) {
imag_out_var.fill_(NAN);
imag_out_mean.fill_(NAN);
} else {
std_var_stub(iter.device_type(), iter, unbiased, false);
}
at::add_out(result1, real_out_var, imag_out_var);
take_sqrt ? at::sqrt_out(result1, result1) : result1;
at::add_out(result2, real_out_mean, at::mul(imag_out_mean, c10::complex<double>{0.0, 1.0}));
} else {
ScalarType dtype = get_dtype(result1, self, {}, true);
auto iter = make_reduction(fname, result1, result2, self, dim, keepdim, dtype);
if (iter.numel() == 0) {
result1.fill_(NAN);
result2.fill_(NAN);
} else {
std_var_stub(iter.device_type(), iter, unbiased, take_sqrt);
}
}
return std::tuple<Tensor&, Tensor&>(result1, result2);
}
std::tuple<Tensor&,Tensor&> var_mean_out(Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
return std_var_mean_out("var_mean", result1, result2, self, dim, unbiased, keepdim, false);
}
std::tuple<Tensor&,Tensor&> std_mean_out(Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
return std_var_mean_out("std_mean", result1, result2, self, dim, unbiased, keepdim, true);
}
std::tuple<Tensor&,Tensor&> var_mean_out(Tensor &result1, Tensor &result2, const Tensor &self, bool unbiased) {
return std_var_mean_out("var_mean", result1, result2, self, {}, unbiased, false, false);
}
std::tuple<Tensor&,Tensor&> std_mean_out(Tensor &result1, Tensor &result2, const Tensor &self, bool unbiased) {
return std_var_mean_out("std_mean", result1, result2, self, {}, unbiased, false, true);
}
std::tuple<Tensor,Tensor> var_mean(const Tensor& self, IntArrayRef dim, bool unbiased, bool keepdim) {
Tensor result1 = at::empty({0}, self.options());
Tensor result2 = at::empty({0}, self.options());
return at::native::var_mean_out(result1, result2, self, dim, unbiased, keepdim);
}
std::tuple<Tensor,Tensor> std_mean(const Tensor& self, IntArrayRef dim, bool unbiased, bool keepdim) {
Tensor result1 = at::empty({0}, self.options());
Tensor result2 = at::empty({0}, self.options());
return at::native::std_mean_out(result1, result2, self, dim, unbiased, keepdim);
}
std::tuple<Tensor,Tensor> std_mean(const Tensor& self, bool unbiased) {
Tensor result1 = at::empty({0}, self.options());
Tensor result2 = at::empty({0}, self.options());
return at::native::std_mean_out(result1, result2, self, unbiased);
}
std::tuple<Tensor,Tensor> var_mean(const Tensor& self, bool unbiased) {
Tensor result1 = at::empty({0}, self.options());
Tensor result2 = at::empty({0}, self.options());
return at::native::var_mean_out(result1, result2, self, unbiased);
}
Tensor var(const Tensor& self, bool unbiased) {
TORCH_CHECK(self.device().type() == DeviceType::CPU || self.device().type() == DeviceType::CUDA,
"var only supports CPU AND CUDA device type, got: ", self.device().type());
TORCH_CHECK(self.layout() == Layout::Strided,
"var only supports strided layout, got: ", self.layout());
TORCH_CHECK(at::isFloatingType(self.scalar_type()) || at::isComplexType(self.scalar_type()),
"var only supports floating-point dtypes");
auto trivial_return = _allreduce_return_trivial(self, std::numeric_limits<double>::quiet_NaN());
if (trivial_return.has_value()) {
return trivial_return.value();
}
// NOTE: CPU performance significantly regressed when attempting to port to ATen,
// so this dispatches differently based on device type.
// See https://github.com/pytorch/pytorch/pull/43858.
if (self.device().type() == kCPU) {
return at::_var(self, unbiased);
}
Tensor result = at::empty({0}, self.options());
return std_var_out(result, self, std::vector<int64_t>{}, unbiased, false, false);
}
Tensor var(const Tensor& self, IntArrayRef dim, bool unbiased, bool keepdim) {
Tensor result = at::empty({0}, self.options());
return at::native::var_out(result, self, dim, unbiased, keepdim);
}
Tensor& var_out(Tensor& result, const Tensor& self, IntArrayRef dim, bool unbiased, bool keepdim) {
return std_var_out(result, self, dim, unbiased, keepdim, false);
}
Tensor std(const Tensor& self, bool unbiased) {
TORCH_CHECK(self.device().type() == DeviceType::CPU || self.device().type() == DeviceType::CUDA,
"std only supports CPU AND CUDA device type, got: ", self.device().type());
TORCH_CHECK(self.layout() == Layout::Strided,
"std only supports strided layout, got: ", self.layout());
TORCH_CHECK(at::isFloatingType(self.scalar_type()) || at::isComplexType(self.scalar_type()),
"std only supports floating-point dtypes");
auto trivial_return = _allreduce_return_trivial(self, std::numeric_limits<double>::quiet_NaN());
if (trivial_return.has_value()) {
return trivial_return.value();
}
// NOTE: CPU performance significantly regressed when attempting to port to ATen,
// so this dispatches differently based on device type.
// See https://github.com/pytorch/pytorch/pull/43858.
if (self.device().type() == kCPU) {
return at::_std(self, unbiased);
}
Tensor result = at::empty({0}, self.options());
return std_var_out(result, self, std::vector<int64_t>{}, unbiased, false, true);
}
Tensor std(const Tensor& self, IntArrayRef dim, bool unbiased, bool keepdim) {
Tensor result = at::empty({0}, self.options());
return at::native::std_out(result, self, dim, unbiased, keepdim);
}
Tensor& std_out(Tensor& result, const Tensor& self, IntArrayRef dim, bool unbiased, bool keepdim) {
return std_var_out(result, self, dim, unbiased, keepdim, true);
}
Tensor std(const Tensor& self, DimnameList dim, bool unbiased, bool keepdim) {
return at::std(self, dimnames_to_positions(self, dim), unbiased, keepdim);
}
Tensor& std_out(Tensor& result, const Tensor& self, DimnameList dim, bool unbiased, bool keepdim) {
return at::std_out(result, self, dimnames_to_positions(self, dim), unbiased, keepdim);
}
Tensor var(const Tensor& self, DimnameList dim, bool unbiased, bool keepdim) {
return at::var(self, dimnames_to_positions(self, dim), unbiased, keepdim);
}
Tensor& var_out(Tensor& result, const Tensor& self, DimnameList dim, bool unbiased, bool keepdim) {
return at::std_out(result, self, dimnames_to_positions(self, dim), unbiased, keepdim);
}
std::tuple<Tensor,Tensor> var_mean(const Tensor& self, DimnameList dim, bool unbiased, bool keepdim) {
return at::var_mean(self, dimnames_to_positions(self, dim), unbiased, keepdim);
}
std::tuple<Tensor,Tensor> std_mean(const Tensor& self, DimnameList dim, bool unbiased, bool keepdim) {
return at::std_mean(self, dimnames_to_positions(self, dim), unbiased, keepdim);
}
Tensor& norm_out(Tensor& result, const Tensor& self, optional<Scalar> p, DimnameList dim, bool keepdim, ScalarType dtype) {
return at::norm_out(result, self, p, dimnames_to_positions(self, dim), keepdim, dtype);
}
Tensor& norm_out(Tensor& result, const Tensor& self, optional<Scalar> p, DimnameList dim, bool keepdim) {
return at::norm_out(result, self, p, dimnames_to_positions(self, dim), keepdim);
}
Tensor norm(const Tensor& self, optional<Scalar> p, DimnameList dim, bool keepdim, ScalarType dtype) {
return at::norm(self, p, dimnames_to_positions(self, dim), keepdim, dtype);
}
Tensor norm(const Tensor& self, optional<Scalar> p, DimnameList dim, bool keepdim) {
return at::norm(self, p, dimnames_to_positions(self, dim), keepdim);
}
Tensor any(const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("any");
}
Tensor& any_out(Tensor& result, const Tensor &self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("any");
}
Tensor all(const Tensor& self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("all");
}
Tensor& all_out(Tensor& result, const Tensor &self, Dimname dim, bool keepdim) {
reportNYIDimnameOverload("all");
}
Tensor logcumsumexp(const Tensor& self, Dimname dim) {
return at::logcumsumexp(self, dimname_to_position(self, dim));
}
Tensor& logcumsumexp_out(Tensor& result, const Tensor& self, Dimname dim) {
return at::logcumsumexp_out(result, self, dimname_to_position(self, dim));
}
Tensor cumsum(const Tensor& self, Dimname dim, c10::optional<ScalarType> dtype) {
return at::cumsum(self, dimname_to_position(self, dim), dtype);
}
Tensor& cumsum_out(Tensor& result, const Tensor& self, Dimname dim, c10::optional<ScalarType> dtype) {
return at::cumsum_out(result, self, dimname_to_position(self, dim), dtype);
}
Tensor cumprod(const Tensor& self, Dimname dim, c10::optional<ScalarType> dtype) {
return at::cumprod(self, dimname_to_position(self, dim), dtype);
}
Tensor& cumprod_out(Tensor& result, const Tensor& self, Dimname dim, c10::optional<ScalarType> dtype) {
return at::cumprod_out(result, self, dimname_to_position(self, dim), dtype);
}
std::tuple<Tensor, Tensor> cummax(const Tensor& self, Dimname dim) {
return at::cummax(self, dimname_to_position(self, dim));
}
std::tuple<Tensor&, Tensor&> cummax_out(Tensor& values, Tensor& indices, const Tensor& self, Dimname dim) {
return at::cummax_out(values, indices, self, dimname_to_position(self, dim));
}
std::tuple<Tensor, Tensor> cummin(const Tensor& self, Dimname dim) {
return at::cummin(self, dimname_to_position(self, dim));
}
std::tuple<Tensor&, Tensor&> cummin_out(Tensor& values, Tensor& indices, const Tensor& self, Dimname dim) {
return at::cummin_out(values, indices, self, dimname_to_position(self, dim));
}
Tensor dist(const Tensor &self, const Tensor& other, Scalar p){
return at::norm(self - other, p);
}
Tensor count_nonzero(const Tensor& self, IntArrayRef dims){
auto mask = (self != 0);
return mask.sum(dims);
}
Tensor count_nonzero(const Tensor& self, c10::optional<int64_t> dim){
if (dim){
auto wrap_dim = maybe_wrap_dim(dim.value(), self.dim());
return at::count_nonzero(self, IntArrayRef{wrap_dim});
}
return at::count_nonzero(self, IntArrayRef{});
}
bool cpu_equal(const Tensor& self, const Tensor& other) {
if (!at::namedinference::are_names_equal(
self.unsafeGetTensorImpl(), other.unsafeGetTensorImpl())) {
return false;
}
at::NoNamesGuard guard;
TORCH_CHECK(self.device() == other.device(), "Cannot compare two tensors on "
"different devices. Got: ", self.device(), " and ", other.device());
TORCH_CHECK(self.dtype() == other.dtype(),
"Expected object of scalar type ", self.dtype(), " but got scalar type ",
other.dtype(), " for argument 'other'");
if (!self.is_same_size(other)) {
return false;
}
std::atomic<bool> result{true};
auto iter = TensorIteratorConfig()
.add_input(self)
.add_input(other)
.allow_cpu_scalars(true)
.promote_inputs_to_common_dtype(true)
.build();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, iter.input_dtype(), "equal_cpu", [&] {
iter.for_each([&](char** data, const int64_t *strides, int64_t dim_size) {
if (!result) {
return;
}
char* self_data = data[0];
char* other_data = data[1];
for (int64_t i = 0; i < dim_size; ++i) {
if (*((scalar_t*)self_data) != *((scalar_t*)other_data)) {
result = false;
return;
}
self_data += strides[0];
other_data += strides[1];
}
});
});
return result.load();
}
// max(dim), min(dim), topk(dim), mode(dim), are examples of reduction
// functions that select values. value_selecting_reduction_backward is the
// backward function for those operators; it propagates the grad to the
// specific value locations referred to at `indices`.
Tensor value_selecting_reduction_backward(const Tensor& grad, int64_t dim, const Tensor& indices, IntArrayRef sizes, bool keepdim) {
if (!keepdim && sizes.size() > 0) {
auto grad_ = grad.unsqueeze(dim);
auto indices_ = indices.unsqueeze(dim);
return at::zeros(sizes, grad_.options()).scatter_(dim, indices_, grad_);
}
return at::zeros(sizes, grad.options()).scatter_(dim, indices, grad);
}
}} // namespace at::native
|
/** @file
*****************************************************************************
Implementation of interfaces for (square-and-multiply) exponentiation.
See exponentiation.hpp .
*****************************************************************************
* @author This file is part of libff, developed by SCIPR Lab
* and contributors (see AUTHORS).
* @copyright MIT license (see LICENSE file)
*****************************************************************************/
#ifndef EXPONENTIATION_TCC_
#define EXPONENTIATION_TCC_
#include <../trusted_libff/libff/common/utils.hpp>
namespace libff {
template<typename FieldT, mp_size_t m>
FieldT power(const FieldT &base, const bigint<m> &exponent)
{
FieldT result = FieldT::one();
bool found_one = false;
for (long i = exponent.max_bits() - 1; i >= 0; --i)
{
if (found_one)
{
result = result * result;
}
if (exponent.test_bit(i))
{
found_one = true;
result = result * base;
}
}
return result;
}
template<typename FieldT>
FieldT power(const FieldT &base, const unsigned long exponent)
{
return power<FieldT>(base, bigint<1>(exponent));
}
} // libff
#endif // EXPONENTIATION_TCC_
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "protocol.h"
#include "util.h"
#include "utilstrencodings.h"
#ifndef WIN32
# include <arpa/inet.h>
#endif
namespace NetMsgType {
const char *VERSION="version";
const char *VERACK="verack";
const char *ADDR="addr";
const char *INV="inv";
const char *GETDATA="getdata";
const char *MERKLEBLOCK="merkleblock";
const char *GETBLOCKS="getblocks";
const char *GETHEADERS="getheaders";
const char *TX="tx";
const char *HEADERS="headers";
const char *BLOCK="block";
const char *GETADDR="getaddr";
const char *MEMPOOL="mempool";
const char *PING="ping";
const char *PONG="pong";
const char *NOTFOUND="notfound";
const char *FILTERLOAD="filterload";
const char *FILTERADD="filteradd";
const char *FILTERCLEAR="filterclear";
const char *REJECT="reject";
const char *SENDHEADERS="sendheaders";
const char *FEEFILTER="feefilter";
const char *SENDCMPCT="sendcmpct";
const char *CMPCTBLOCK="cmpctblock";
const char *GETBLOCKTXN="getblocktxn";
const char *BLOCKTXN="blocktxn";
const char *VOTE="vote_message";
const char *PUT_BLOCK="put_block";
const char *PUT_VOTE="put_vote";
//const char *BROADCAST_VOTE="111_vote";
};
/** All known message types. Keep this in the same order as the list of
* messages above and in protocol.h.
*/
const static std::string allNetMessageTypes[] = {
NetMsgType::VERSION,
NetMsgType::VERACK,
NetMsgType::ADDR,
NetMsgType::INV,
NetMsgType::GETDATA,
NetMsgType::MERKLEBLOCK,
NetMsgType::GETBLOCKS,
NetMsgType::GETHEADERS,
NetMsgType::TX,
NetMsgType::HEADERS,
NetMsgType::BLOCK,
NetMsgType::GETADDR,
NetMsgType::MEMPOOL,
NetMsgType::PING,
NetMsgType::PONG,
NetMsgType::NOTFOUND,
NetMsgType::FILTERLOAD,
NetMsgType::FILTERADD,
NetMsgType::FILTERCLEAR,
NetMsgType::REJECT,
NetMsgType::SENDHEADERS,
NetMsgType::FEEFILTER,
NetMsgType::SENDCMPCT,
NetMsgType::CMPCTBLOCK,
NetMsgType::GETBLOCKTXN,
NetMsgType::BLOCKTXN,
};
const static std::vector<std::string> allNetMessageTypesVec(allNetMessageTypes, allNetMessageTypes+ARRAYLEN(allNetMessageTypes));
CMessageHeader::CMessageHeader(const MessageStartChars& pchMessageStartIn)
{
memcpy(pchMessageStart, pchMessageStartIn, MESSAGE_START_SIZE);
memset(pchCommand, 0, sizeof(pchCommand));
nMessageSize = -1;
memset(pchChecksum, 0, CHECKSUM_SIZE);
}
CMessageHeader::CMessageHeader(const MessageStartChars& pchMessageStartIn, const char* pszCommand, unsigned int nMessageSizeIn)
{
memcpy(pchMessageStart, pchMessageStartIn, MESSAGE_START_SIZE);
memset(pchCommand, 0, sizeof(pchCommand));
strncpy(pchCommand, pszCommand, COMMAND_SIZE);
nMessageSize = nMessageSizeIn;
memset(pchChecksum, 0, CHECKSUM_SIZE);
}
std::string CMessageHeader::GetCommand() const
{
return std::string(pchCommand, pchCommand + strnlen(pchCommand, COMMAND_SIZE));
}
bool CMessageHeader::IsValid(const MessageStartChars& pchMessageStartIn) const
{
// Check start string
if (memcmp(pchMessageStart, pchMessageStartIn, MESSAGE_START_SIZE) != 0)
return false;
// Check the command string for errors
for (const char* p1 = pchCommand; p1 < pchCommand + COMMAND_SIZE; p1++)
{
if (*p1 == 0)
{
// Must be all zeros after the first zero
for (; p1 < pchCommand + COMMAND_SIZE; p1++)
if (*p1 != 0)
return false;
}
else if (*p1 < ' ' || *p1 > 0x7E)
return false;
}
// Message size
if (nMessageSize > MAX_SIZE)
{
LogPrintf("CMessageHeader::IsValid(): (%s, %u bytes) nMessageSize > MAX_SIZE\n", GetCommand(), nMessageSize);
return false;
}
return true;
}
CAddress::CAddress() : CService()
{
Init();
}
CAddress::CAddress(CService ipIn, ServiceFlags nServicesIn) : CService(ipIn)
{
Init();
nServices = nServicesIn;
}
void CAddress::Init()
{
nServices = NODE_NONE;
nTime = 100000000;
}
CInv::CInv()
{
type = 0;
hash.SetNull();
}
CInv::CInv(int typeIn, const uint256& hashIn)
{
type = typeIn;
hash = hashIn;
}
bool operator<(const CInv& a, const CInv& b)
{
return (a.type < b.type || (a.type == b.type && a.hash < b.hash));
}
std::string CInv::GetCommand() const
{
std::string cmd;
if (type & MSG_WITNESS_FLAG)
cmd.append("witness-");
int masked = type & MSG_TYPE_MASK;
switch (masked)
{
case MSG_TX: return cmd.append(NetMsgType::TX);
case MSG_BLOCK: return cmd.append(NetMsgType::BLOCK);
case MSG_FILTERED_BLOCK: return cmd.append(NetMsgType::MERKLEBLOCK);
case MSG_CMPCT_BLOCK: return cmd.append(NetMsgType::CMPCTBLOCK);
default:
throw std::out_of_range(strprintf("CInv::GetCommand(): type=%d unknown type", type));
}
}
std::string CInv::ToString() const
{
try {
return strprintf("%s %s", GetCommand(), hash.ToString());
} catch(const std::out_of_range &) {
return strprintf("0x%08x %s", type, hash.ToString());
}
}
const std::vector<std::string> &getAllNetMessageTypes()
{
return allNetMessageTypesVec;
}
|
// Read an INI file into easy-to-access name/value pairs.
// inih and INIReader are released under the New BSD license (see LICENSE.txt).
// Go to the project home page for more info:
//
// https://github.com/benhoyt/inih
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#include <algorithm>
#include <cctype>
#include <cstdlib>
#include "ini.h"
#include "INIReader.h"
#include "../libconvert/libconvert.h"
using std::string;
INIReader::INIReader()
{
}
INIReader::INIReader(const string& filename)
{
_error = ini_parse(filename.c_str(), ValueHandler, this);
}
INIReader::INIReader(const char * s)
{
if (s != NULL)
{
_error = ini_parse_string(s, ValueHandler, this);
}
}
INIReader::~INIReader()
{
for (size_t i = 0; i < map.iniSection.size(); i++)
{
for (size_t j = 0; j < map.iniSection[i].IniKey.size(); j++)
{
map.iniSection[i].IniKey[j].hash = 0;
map.iniSection[i].IniKey[j].key = "";
map.iniSection[i].IniKey[j].value = "";
}
map.iniSection[i].hash = 0;
map.iniSection[i].section = "";
}
}
int INIReader::ParseError() const
{
return _error;
}
string INIReader::Get(const string& section, const string& name, const string& default_value) const
{
//string key = MakeKey(section, name);
// Use _values.find() here instead of _values.at() to support pre C++11 compilers
//string s = _values.count(key) ? _values.find(key)->second : default_value;
std::string s;
std::string sn;
s = section;
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
sn = name;
std::transform(sn.begin(), sn.end(), sn.begin(), ::tolower);
unsigned int h = hashString(s);
unsigned int hn = hashString(sn);
for (size_t i = 0; i < map.iniSection.size(); i++)
{
if (h == map.iniSection[i].hash)
{
for (size_t j = 0; j < map.iniSection[i].IniKey.size(); j++)
{
if (hn == map.iniSection[i].IniKey[j].hash)
{
return map.iniSection[i].IniKey[j].value;
}
}
}
}
return default_value;
}
void INIReader::Set(const std::string& section, const std::string& name,
const std::string& value)
{
int findSection = -1;
int findName = -1;
std::string s = section;
std::string sn = name;
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
std::transform(sn.begin(), sn.end(), sn.begin(), ::tolower);
unsigned int h = hashString(s);
unsigned int hn = hashString(sn);
for (size_t i = 0; i < map.iniSection.size(); i++)
{
if (map.iniSection[i].hash == h)
{
findSection = (int)i;
for (size_t j = 0; j < map.iniSection[i].IniKey.size(); j++)
{
if (hn == map.iniSection[i].IniKey[j].hash)
{
findName = j;
map.iniSection[i].IniKey[j].value = value;
}
}
}
}
if (findSection < 0)
{
map.iniSection.resize(map.iniSection.size() + 1);
map.iniSection[map.iniSection.size() - 1].hash = h;
map.iniSection[map.iniSection.size() - 1].section = section;
findSection = map.iniSection.size() - 1;
}
if (findName < 0)
{
map.iniSection[findSection].IniKey.resize(map.iniSection[findSection].IniKey.size() + 1);
findName = map.iniSection[findSection].IniKey.size() - 1;
map.iniSection[findSection].IniKey[findName].hash = hn;
map.iniSection[findSection].IniKey[findName].key = name;
map.iniSection[findSection].IniKey[findName].value = value;
}
}
void INIReader::SetInteger(const std::string & section, const std::string & name, long value)
{
std::string v = formatString("%d", value);
Set(section, name, v);
}
void INIReader::SetReal(const std::string & section, const std::string & name, double value)
{
std::string v = formatString("%lf", value);
Set(section, name, v);
}
void INIReader::SetBoolean(const std::string & section, const std::string & name, bool value)
{
std::string v = value ? "1" : "0";
Set(section, name, v);
}
unsigned int INIReader::GetColor(const std::string & section, const std::string & name, unsigned int value)
{
unsigned char colorData[3] = { unsigned char((value & 0xFF0000) >> 16) ,unsigned char((value & 0xFF00) >> 8) , unsigned char((value & 0xFF)) };
std::string col = convert::formatString("%d", colorData[0]) + "," + convert::formatString("%d", colorData[1]) + "," + convert::formatString("%d", colorData[2]);
col = Get(section, name, col);
std::vector<std::string> c = convert::splitString(col, ",");
for (size_t i = 0; i < (c.size() > 3 ? 3 : c.size()); i++)
{
//char* end = NULL;
//long n = strtol(c[i].c_str(), &end, 0);
//colorData[i] = (unsigned char)(end > c[i].c_str() ? n : colorData[i]);
colorData[i] = (unsigned char)atoi(c[i].c_str());
}
return 0xFF000000 | ((colorData[0] << 16) + (colorData[1] << 8) + colorData[2]);
}
long INIReader::GetInteger(const string& section, const string& name, long default_value) const
{
string valstr = Get(section, name, "");
const char* value = valstr.c_str();
if (value == NULL)
{
return default_value;
}
char* end;
// This parses "1234" (decimal) and also "0x4D2" (hex)
long n = strtol(value, &end, 0);
return end > value ? n : default_value;
}
double INIReader::GetReal(const string& section, const string& name, double default_value) const
{
string valstr = Get(section, name, "");
const char* value = valstr.c_str();
if (value == NULL)
{
return default_value;
}
char* end;
double n = strtod(value, &end);
return end > value ? n : default_value;
}
bool INIReader::GetBoolean(const string& section, const string& name, bool default_value) const
{
string valstr = Get(section, name, "");
// Convert to lower case to make string comparisons case-insensitive
std::transform(valstr.begin(), valstr.end(), valstr.begin(), ::tolower);
if (valstr == "true" || valstr == "yes" || valstr == "on" || valstr == "1")
return true;
else if (valstr == "false" || valstr == "no" || valstr == "off" || valstr == "0")
return false;
else
return default_value;
}
std::string INIReader::saveToString()
{
std::string s = "";
for (size_t i = 0; i < map.iniSection.size(); i++)
{
s += "[" + map.iniSection[i].section + "]\r\n";
for (size_t j = 0; j < map.iniSection[i].IniKey.size(); j++)
{
s += map.iniSection[i].IniKey[j].key + "=" + map.iniSection[i].IniKey[j].value + "\r\n";
}
s += "\r\n";
}
return s;
}
void INIReader::saveToFile(const std::string & fileName)
{
std::string s = saveToString();
FILE * fp = fopen(fileName.c_str(), "wb");
if (!fp)
{
fprintf(stderr, "Can not open file %s\n", fileName.c_str());
return;
}
fseek(fp, 0, 0);
fwrite(s.c_str(), s.length(), 1, fp);
fclose(fp);
}
string INIReader::MakeKey(const string& section, const string& name)
{
string key = section + "=" + name;
// Convert to lower case to make section/name lookups case-insensitive
std::transform(key.begin(), key.end(), key.begin(), ::tolower);
return key;
}
int INIReader::ValueHandler(void* user, const char* section, const char* name,
const char* value)
{
INIReader* reader = (INIReader*)user;
std::string strSection = section;
std::string strName = name;
std::string strValue = value;
reader->Set(strSection, strName, strValue);
//string key = MakeKey(section, name);
//if (reader->_values[key].size() > 0)
// reader->_values[key] += "\n";
//reader->_values[key] += value;
return 1;
}
#ifdef _MSC_VER
#define vsprintf vsprintf_s
#endif
std::string INIReader::formatString(const char * format, ...)
{
char s[1000];
va_list arg_ptr;
va_start(arg_ptr, format);
vsprintf(s, format, arg_ptr);
va_end(arg_ptr);
return s;
}
|
// Copyright (c) 2021, Kai Wolf - SW Consulting. All rights reserved.
// For the licensing terms see LICENSE file in the root directory. For the
// list of contributors see the AUTHORS file in the same directory.
#include "kwctoolkit/system/environment.h"
#include <gtest/gtest.h>
using namespace kwc::system;
TEST(EnvironmentTest, SetEnvironmentVariable) {
std::unique_ptr<Environment> env(Environment::create());
const char foobar_upper[] = "FOOBAR";
const char foobar_lower[] = "foobar";
EXPECT_TRUE(env->setEnvVar(foobar_upper, foobar_lower));
EXPECT_TRUE(env->hasEnvVar(foobar_upper));
std::string value;
EXPECT_TRUE(env->getEnvVar(foobar_upper, &value));
ASSERT_EQ(value, foobar_lower);
}
TEST(EnvironmentTest, HasEnvironmentVariable) {
std::unique_ptr<Environment> env(Environment::create());
ASSERT_TRUE(env->hasEnvVar("PATH"));
}
TEST(EnvironmentTest, UnsetEnvironmentVariable) {
std::unique_ptr<Environment> env(Environment::create());
const char foobar_upper[] = "FOOBAR";
const char foobar_lower[] = "foobar";
// first set the environment variable
EXPECT_TRUE(env->setEnvVar(foobar_upper, foobar_lower));
EXPECT_TRUE(env->hasEnvVar(foobar_upper));
EXPECT_TRUE(env->unsetEnvVar(foobar_upper));
ASSERT_FALSE(env->hasEnvVar(foobar_upper));
}
TEST(EnvironmentTest, GetReverseEnvironmentVariable) {
std::unique_ptr<Environment> env(Environment::create());
const char foobar_upper[] = "FOOBAR";
const char foobar_lower[] = "foobar";
// set variable in UPPER case
EXPECT_TRUE(env->setEnvVar(foobar_upper, foobar_lower));
// now try to get this variable passing the lower case
std::string env_value;
EXPECT_TRUE(env->getEnvVar(foobar_lower, &env_value));
ASSERT_EQ(env_value, foobar_lower);
EXPECT_TRUE(env->unsetEnvVar(foobar_upper));
const char bar[] = "bar";
// now for the opposite
EXPECT_TRUE(env->setEnvVar(foobar_lower, bar));
EXPECT_TRUE(env->getEnvVar(foobar_upper, &env_value));
ASSERT_EQ(env_value, bar);
EXPECT_TRUE(env->unsetEnvVar(foobar_lower));
}
|
// OJ: https://leetcode.com/problems/clone-graph
// Author: github.com/lzl124631x
// Time: O(N)
// Space: O(N)
class Solution {
private:
unordered_map<int, UndirectedGraphNode*> m;
public:
UndirectedGraphNode *cloneGraph(UndirectedGraphNode *node) {
if (!node) return NULL;
auto copy = new UndirectedGraphNode(node->label);
m[node->label] = copy;
for (auto n : node->neighbors)
copy->neighbors.push_back(m.count(n->label) ? m[n->label] : cloneGraph(n));
return copy;
}
};
|
#include <iostream>
#include <cstdio>
#include <algorithm>
#include <cstdlib>
#include <cstring>
using std::cerr;
using std::cin;
using std::cout;
using std::endl;
int sum[310];
int f1[310][310];
int f2[310][310];
int n;
int main()
{
while (cin >> n)
{
sum[0] = 0;
std::memset(f1, 0x3f, sizeof(f1));
std::memset(f2, 0, sizeof(f2));
for (int i = 1; i <= n; i++)
{
cin >> sum[i];
sum[i+n] = sum [i];
}
for (int i = 1; i <= n*2 ; i++)
{
sum[i] += sum[i - 1];
f1[i][i] = 0;
}
for (int l = 2; l <= 2*n; l++)
{
int i = 1;
int j = l;
while (j <= n*2)
{
for (int k = i; k <= j - 1; k++)
{
f1[i][j] = std::min(f1[i][j], f1[i][k] + f1[k + 1][j] + sum[j] - sum[i - 1]);
f2[i][j] = std::max(f2[i][j], f2[i][k] + f2[k + 1][j] + sum[j] - sum[i - 1]);
}
i++;
j++;
}
}
int mn=99999999,mx=-99999999;
for (int i = 1; i <= n; i++)
{
mn = std::min(mn,f1[i][i+n]);
mx = std::max(mx,f2[i][i+n]);
}
cout<<mn<<' '<<mx<<endl;
}
return 0;
}
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/importer/importer_unittest_utils.h"
#include "base/utf_string_conversions.h"
#include "chrome/test/base/testing_profile.h"
bool EqualBookmarkEntry(const ProfileWriter::BookmarkEntry& entry,
const BookmarkInfo& expected) {
if (expected.in_toolbar != entry.in_toolbar ||
expected.path_size != entry.path.size() ||
expected.url != entry.url.spec() ||
WideToUTF16Hack(expected.title) != entry.title)
return false;
for (size_t i = 0; i < expected.path_size; ++i) {
if (WideToUTF16Hack(expected.path[i]) != entry.path[i])
return false;
}
return true;
}
bool FindBookmarkEntry(const ProfileWriter::BookmarkEntry& entry,
const BookmarkInfo* list, int list_size) {
for (int i = 0; i < list_size; ++i) {
if (EqualBookmarkEntry(entry, list[i]))
return true;
}
return false;
}
ImporterTest::ImporterTest()
: profile_(new TestingProfile()),
ui_thread_(content::BrowserThread::UI, &message_loop_),
file_thread_(content::BrowserThread::FILE, &message_loop_) {
}
ImporterTest::~ImporterTest() {
profile_.reset(NULL);
}
void ImporterTest::SetUp() {
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
}
|
#ifndef BOOST_MP11_TUPLE_HPP_INCLUDED
#define BOOST_MP11_TUPLE_HPP_INCLUDED
// Copyright 2015-2020 Peter Dimov.
//
// Distributed under the Boost Software License, Version 1.0.
//
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
#include <boost/mp11/integer_sequence.hpp>
#include <boost/mp11/list.hpp>
#include <boost/mp11/function.hpp>
#include <boost/mp11/detail/config.hpp>
#include <tuple>
#include <utility>
#include <type_traits>
#include <cstddef>
#if BOOST_MP11_MSVC
# pragma warning( push )
# pragma warning( disable: 4100 ) // unreferenced formal parameter 'tp'
#endif
namespace boost
{
namespace mp11
{
// tuple_apply
namespace detail
{
template<class F, class Tp, std::size_t... J> BOOST_MP11_CONSTEXPR auto tuple_apply_impl( F && f, Tp && tp, integer_sequence<std::size_t, J...> )
-> decltype( std::forward<F>(f)( std::get<J>(std::forward<Tp>(tp))... ) )
{
return std::forward<F>(f)( std::get<J>(std::forward<Tp>(tp))... );
}
} // namespace detail
template<class F, class Tp,
class Seq = make_index_sequence<std::tuple_size<typename std::remove_reference<Tp>::type>::value>>
BOOST_MP11_CONSTEXPR auto tuple_apply( F && f, Tp && tp )
-> decltype( detail::tuple_apply_impl( std::forward<F>(f), std::forward<Tp>(tp), Seq() ) )
{
return detail::tuple_apply_impl( std::forward<F>(f), std::forward<Tp>(tp), Seq() );
}
// construct_from_tuple
namespace detail
{
template<class T, class Tp, std::size_t... J> BOOST_MP11_CONSTEXPR T construct_from_tuple_impl( Tp && tp, integer_sequence<std::size_t, J...> )
{
return T( std::get<J>(std::forward<Tp>(tp))... );
}
} // namespace detail
template<class T, class Tp,
class Seq = make_index_sequence<std::tuple_size<typename std::remove_reference<Tp>::type>::value>>
BOOST_MP11_CONSTEXPR T construct_from_tuple( Tp && tp )
{
return detail::construct_from_tuple_impl<T>( std::forward<Tp>(tp), Seq() );
}
// tuple_for_each
namespace detail
{
template<class Tp, std::size_t... J, class F> BOOST_MP11_CONSTEXPR F tuple_for_each_impl( Tp && tp, integer_sequence<std::size_t, J...>, F && f )
{
using A = int[sizeof...(J)];
return (void)A{ ((void)f(std::get<J>(std::forward<Tp>(tp))), 0)... }, std::forward<F>(f);
}
template<class Tp, class F> BOOST_MP11_CONSTEXPR F tuple_for_each_impl( Tp && /*tp*/, integer_sequence<std::size_t>, F && f )
{
return std::forward<F>(f);
}
} // namespace detail
template<class Tp, class F> BOOST_MP11_CONSTEXPR F tuple_for_each( Tp && tp, F && f )
{
using seq = make_index_sequence<std::tuple_size<typename std::remove_reference<Tp>::type>::value>;
return detail::tuple_for_each_impl( std::forward<Tp>(tp), seq(), std::forward<F>(f) );
}
// tuple_transform
namespace detail
{
// std::forward_as_tuple is not constexpr in C++11 or libstdc++ 5.x
template<class... T> BOOST_MP11_CONSTEXPR auto tp_forward_r( T&&... t ) -> std::tuple<T&&...>
{
return std::tuple<T&&...>( std::forward<T>( t )... );
}
template<class... T> BOOST_MP11_CONSTEXPR auto tp_forward_v( T&&... t ) -> std::tuple<T...>
{
return std::tuple<T...>( std::forward<T>( t )... );
}
template<std::size_t J, class... Tp>
BOOST_MP11_CONSTEXPR auto tp_extract( Tp&&... tp )
-> decltype( tp_forward_r( std::get<J>( std::forward<Tp>( tp ) )... ) )
{
return tp_forward_r( std::get<J>( std::forward<Tp>( tp ) )... );
}
#if !BOOST_MP11_WORKAROUND( BOOST_MP11_MSVC, < 1900 )
template<class F, class... Tp, std::size_t... J>
BOOST_MP11_CONSTEXPR auto tuple_transform_impl( integer_sequence<std::size_t, J...>, F const& f, Tp&&... tp )
-> decltype( tp_forward_v( tuple_apply( f, tp_extract<J>( std::forward<Tp>(tp)... ) )... ) )
{
return tp_forward_v( tuple_apply( f, tp_extract<J>( std::forward<Tp>(tp)... ) )... );
}
#else
template<class F, class Tp1, std::size_t... J>
BOOST_MP11_CONSTEXPR auto tuple_transform_impl( integer_sequence<std::size_t, J...>, F const& f, Tp1&& tp1 )
-> decltype( tp_forward_v( f( std::get<J>( std::forward<Tp1>(tp1) ) )... ) )
{
return tp_forward_v( f( std::get<J>( std::forward<Tp1>(tp1) ) )... );
}
template<class F, class Tp1, class Tp2, std::size_t... J>
BOOST_MP11_CONSTEXPR auto tuple_transform_impl( integer_sequence<std::size_t, J...>, F const& f, Tp1&& tp1, Tp2&& tp2 )
-> decltype( tp_forward_v( f( std::get<J>( std::forward<Tp1>(tp1) ), std::get<J>( std::forward<Tp2>(tp2) ) )... ) )
{
return tp_forward_v( f( std::get<J>( std::forward<Tp1>(tp1) ), std::get<J>( std::forward<Tp2>(tp2) ) )... );
}
template<class F, class Tp1, class Tp2, class Tp3, std::size_t... J>
BOOST_MP11_CONSTEXPR auto tuple_transform_impl( integer_sequence<std::size_t, J...>, F const& f, Tp1&& tp1, Tp2&& tp2, Tp3&& tp3 )
-> decltype( tp_forward_v( f( std::get<J>( std::forward<Tp1>(tp1) ), std::get<J>( std::forward<Tp2>(tp2) ), std::get<J>( std::forward<Tp3>(tp3) ) )... ) )
{
return tp_forward_v( f( std::get<J>( std::forward<Tp1>(tp1) ), std::get<J>( std::forward<Tp2>(tp2) ), std::get<J>( std::forward<Tp3>(tp3) ) )... );
}
#endif // !BOOST_MP11_WORKAROUND( BOOST_MP11_MSVC, < 1900 )
} // namespace detail
#if BOOST_MP11_WORKAROUND( BOOST_MP11_MSVC, < 1910 )
template<class F, class Tp1, class... Tp,
class Seq = make_index_sequence<std::tuple_size<typename std::remove_reference<Tp1>::type>::value>>
BOOST_MP11_CONSTEXPR auto tuple_transform( F const& f, Tp1&& tp1, Tp&&... tp )
-> decltype( detail::tuple_transform_impl( Seq(), f, std::forward<Tp1>(tp1), std::forward<Tp>(tp)... ) )
{
return detail::tuple_transform_impl( Seq(), f, std::forward<Tp1>(tp1), std::forward<Tp>(tp)... );
}
#else
template<class F, class... Tp,
class Z = mp_list<mp_size_t<std::tuple_size<typename std::remove_reference<Tp>::type>::value>...>,
class E = mp_if<mp_apply<mp_same, Z>, mp_front<Z>>,
class Seq = make_index_sequence<E::value>>
BOOST_MP11_CONSTEXPR auto tuple_transform( F const& f, Tp&&... tp )
-> decltype( detail::tuple_transform_impl( Seq(), f, std::forward<Tp>(tp)... ) )
{
return detail::tuple_transform_impl( Seq(), f, std::forward<Tp>(tp)... );
}
#endif // BOOST_MP11_WORKAROUND( BOOST_MP11_MSVC, < 1910 )
} // namespace mp11
} // namespace boost
#if BOOST_MP11_MSVC
# pragma warning( pop )
#endif
#endif // #ifndef BOOST_TUPLE_HPP_INCLUDED
|
/**
* firewall API
* Firewall Service
*
* OpenAPI spec version: 2.0
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/polycube-network/swagger-codegen.git
* branch polycube
*/
/* Do not edit this file manually */
#include "FirewallJsonObject.h"
#include <regex>
namespace io {
namespace swagger {
namespace server {
namespace model {
FirewallJsonObject::FirewallJsonObject() {
m_nameIsSet = false;
m_uuidIsSet = false;
m_type = CubeType::TC;
m_typeIsSet = false;
m_loglevel = FirewallLoglevelEnum::INFO;
m_loglevelIsSet = false;
m_portsIsSet = false;
m_ingressPortIsSet = false;
m_egressPortIsSet = false;
m_conntrackIsSet = false;
m_acceptEstablishedIsSet = false;
m_interactive = true;
m_interactiveIsSet = false;
m_sessionTableIsSet = false;
m_chainIsSet = false;
}
FirewallJsonObject::~FirewallJsonObject() {}
void FirewallJsonObject::validateKeys() {
if (!m_nameIsSet) {
throw std::runtime_error("Variable name is required");
}
}
void FirewallJsonObject::validateMandatoryFields() {
}
void FirewallJsonObject::validateParams() {
if (m_uuidIsSet) {
std::string patter_value = R"PATTERN([0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12})PATTERN";
std::regex e (patter_value);
if (!std::regex_match(m_uuid, e))
throw std::runtime_error("Variable uuid has not a valid format");
}
}
nlohmann::json FirewallJsonObject::toJson() const {
nlohmann::json val = nlohmann::json::object();
if (m_nameIsSet) {
val["name"] = m_name;
}
if (m_uuidIsSet) {
val["uuid"] = m_uuid;
}
if (m_typeIsSet) {
val["type"] = CubeType_to_string(m_type);
}
if (m_loglevelIsSet) {
val["loglevel"] = FirewallLoglevelEnum_to_string(m_loglevel);
}
{
nlohmann::json jsonArray;
for (auto& item : m_ports) {
jsonArray.push_back(JsonObjectBase::toJson(item));
}
if (jsonArray.size() > 0) {
val["ports"] = jsonArray;
}
}
if (m_ingressPortIsSet) {
val["ingress-port"] = m_ingressPort;
}
if (m_egressPortIsSet) {
val["egress-port"] = m_egressPort;
}
if (m_conntrackIsSet) {
val["conntrack"] = FirewallConntrackEnum_to_string(m_conntrack);
}
if (m_acceptEstablishedIsSet) {
val["accept-established"] = FirewallAcceptEstablishedEnum_to_string(m_acceptEstablished);
}
if (m_interactiveIsSet) {
val["interactive"] = m_interactive;
}
{
nlohmann::json jsonArray;
for (auto& item : m_sessionTable) {
jsonArray.push_back(JsonObjectBase::toJson(item));
}
if (jsonArray.size() > 0) {
val["session-table"] = jsonArray;
}
}
{
nlohmann::json jsonArray;
for (auto& item : m_chain) {
jsonArray.push_back(JsonObjectBase::toJson(item));
}
if (jsonArray.size() > 0) {
val["chain"] = jsonArray;
}
}
return val;
}
void FirewallJsonObject::fromJson(nlohmann::json& val) {
for(nlohmann::json::iterator it = val.begin(); it != val.end(); ++it) {
std::string key = it.key();
bool found = (std::find(allowedParameters_.begin(), allowedParameters_.end(), key) != allowedParameters_.end());
if (!found) {
throw std::runtime_error(key + " is not a valid parameter");
return;
}
}
if (val.find("name") != val.end()) {
setName(val.at("name"));
}
if (val.find("uuid") != val.end()) {
setUuid(val.at("uuid"));
}
if (val.find("type") != val.end()) {
setType(string_to_CubeType(val.at("type")));
}
if (val.find("loglevel") != val.end()) {
setLoglevel(string_to_FirewallLoglevelEnum(val.at("loglevel")));
}
m_ports.clear();
for (auto& item : val["ports"]) {
PortsJsonObject newItem;
newItem.fromJson(item);
m_ports.push_back(newItem);
m_portsIsSet = true;
}
if (val.find("ingress-port") != val.end()) {
setIngressPort(val.at("ingress-port"));
}
if (val.find("egress-port") != val.end()) {
setEgressPort(val.at("egress-port"));
}
if (val.find("conntrack") != val.end()) {
setConntrack(string_to_FirewallConntrackEnum(val.at("conntrack")));
}
if (val.find("accept-established") != val.end()) {
setAcceptEstablished(string_to_FirewallAcceptEstablishedEnum(val.at("accept-established")));
}
if (val.find("interactive") != val.end()) {
setInteractive(val.at("interactive"));
}
m_sessionTable.clear();
for (auto& item : val["session-table"]) {
SessionTableJsonObject newItem;
newItem.fromJson(item);
m_sessionTable.push_back(newItem);
m_sessionTableIsSet = true;
}
m_chain.clear();
for (auto& item : val["chain"]) {
ChainJsonObject newItem;
newItem.fromJson(item);
m_chain.push_back(newItem);
m_chainIsSet = true;
}
}
nlohmann::json FirewallJsonObject::helpKeys() {
nlohmann::json val = nlohmann::json::object();
val["name"]["name"] = "name";
val["name"]["type"] = "key";
val["name"]["simpletype"] = "string";
val["name"]["description"] = R"POLYCUBE(Name of the firewall service)POLYCUBE";
val["name"]["example"] = R"POLYCUBE(firewall1)POLYCUBE";
return val;
}
nlohmann::json FirewallJsonObject::helpElements() {
nlohmann::json val = nlohmann::json::object();
val["uuid"]["name"] = "uuid";
val["uuid"]["type"] = "leaf"; // Suppose that type is leaf
val["uuid"]["simpletype"] = "string";
val["uuid"]["description"] = R"POLYCUBE(UUID of the Cube)POLYCUBE";
val["uuid"]["example"] = R"POLYCUBE()POLYCUBE";
val["type"]["name"] = "type";
val["type"]["type"] = "leaf"; // Suppose that type is leaf
val["type"]["simpletype"] = "string";
val["type"]["description"] = R"POLYCUBE(Type of the Cube (TC, XDP_SKB, XDP_DRV))POLYCUBE";
val["type"]["example"] = R"POLYCUBE(TC)POLYCUBE";
val["loglevel"]["name"] = "loglevel";
val["loglevel"]["type"] = "leaf"; // Suppose that type is leaf
val["loglevel"]["simpletype"] = "string";
val["loglevel"]["description"] = R"POLYCUBE(Defines the logging level of a service instance, from none (OFF) to the most verbose (TRACE))POLYCUBE";
val["loglevel"]["example"] = R"POLYCUBE(INFO)POLYCUBE";
val["ports"]["name"] = "ports";
val["ports"]["type"] = "leaf"; // Suppose that type is leaf
val["ports"]["type"] = "list";
val["ports"]["description"] = R"POLYCUBE(Entry of the ports table)POLYCUBE";
val["ports"]["example"] = R"POLYCUBE()POLYCUBE";
val["ingress-port"]["name"] = "ingress-port";
val["ingress-port"]["type"] = "leaf"; // Suppose that type is leaf
val["ingress-port"]["simpletype"] = "string";
val["ingress-port"]["description"] = R"POLYCUBE(Name for the ingress port, from which arrives traffic processed by INGRESS chain (by default it's the first port of the cube))POLYCUBE";
val["ingress-port"]["example"] = R"POLYCUBE()POLYCUBE";
val["egress-port"]["name"] = "egress-port";
val["egress-port"]["type"] = "leaf"; // Suppose that type is leaf
val["egress-port"]["simpletype"] = "string";
val["egress-port"]["description"] = R"POLYCUBE(Name for the egress port, from which arrives traffic processed by EGRESS chain (by default it's the second port of the cube))POLYCUBE";
val["egress-port"]["example"] = R"POLYCUBE()POLYCUBE";
val["conntrack"]["name"] = "conntrack";
val["conntrack"]["type"] = "leaf"; // Suppose that type is leaf
val["conntrack"]["simpletype"] = "string";
val["conntrack"]["description"] = R"POLYCUBE(Enables the Connection Tracking module. Mandatory if connection tracking rules are needed. Default is ON.)POLYCUBE";
val["conntrack"]["example"] = R"POLYCUBE()POLYCUBE";
val["accept-established"]["name"] = "accept-established";
val["accept-established"]["type"] = "leaf"; // Suppose that type is leaf
val["accept-established"]["simpletype"] = "string";
val["accept-established"]["description"] = R"POLYCUBE(If Connection Tracking is enabled, all packets belonging to ESTABLISHED connections will be forwarded automatically. Default is ON.)POLYCUBE";
val["accept-established"]["example"] = R"POLYCUBE()POLYCUBE";
val["interactive"]["name"] = "interactive";
val["interactive"]["type"] = "leaf"; // Suppose that type is leaf
val["interactive"]["simpletype"] = "boolean";
val["interactive"]["description"] = R"POLYCUBE(Interactive mode applies new rules immediately; if 'false', the command 'apply-rules' has to be used to apply all the rules at once. Default is TRUE.)POLYCUBE";
val["interactive"]["example"] = R"POLYCUBE()POLYCUBE";
val["session-table"]["name"] = "session-table";
val["session-table"]["type"] = "leaf"; // Suppose that type is leaf
val["session-table"]["type"] = "list";
val["session-table"]["description"] = R"POLYCUBE()POLYCUBE";
val["session-table"]["example"] = R"POLYCUBE()POLYCUBE";
val["chain"]["name"] = "chain";
val["chain"]["type"] = "leaf"; // Suppose that type is leaf
val["chain"]["type"] = "list";
val["chain"]["description"] = R"POLYCUBE()POLYCUBE";
val["chain"]["example"] = R"POLYCUBE()POLYCUBE";
return val;
}
nlohmann::json FirewallJsonObject::helpWritableLeafs() {
nlohmann::json val = nlohmann::json::object();
val["loglevel"]["name"] = "loglevel";
val["loglevel"]["simpletype"] = "string";
val["loglevel"]["description"] = R"POLYCUBE(Defines the logging level of a service instance, from none (OFF) to the most verbose (TRACE))POLYCUBE";
val["loglevel"]["example"] = R"POLYCUBE(INFO)POLYCUBE";
val["ingress-port"]["name"] = "ingress-port";
val["ingress-port"]["simpletype"] = "string";
val["ingress-port"]["description"] = R"POLYCUBE(Name for the ingress port, from which arrives traffic processed by INGRESS chain (by default it's the first port of the cube))POLYCUBE";
val["ingress-port"]["example"] = R"POLYCUBE()POLYCUBE";
val["egress-port"]["name"] = "egress-port";
val["egress-port"]["simpletype"] = "string";
val["egress-port"]["description"] = R"POLYCUBE(Name for the egress port, from which arrives traffic processed by EGRESS chain (by default it's the second port of the cube))POLYCUBE";
val["egress-port"]["example"] = R"POLYCUBE()POLYCUBE";
val["conntrack"]["name"] = "conntrack";
val["conntrack"]["simpletype"] = "string";
val["conntrack"]["description"] = R"POLYCUBE(Enables the Connection Tracking module. Mandatory if connection tracking rules are needed. Default is ON.)POLYCUBE";
val["conntrack"]["example"] = R"POLYCUBE()POLYCUBE";
val["accept-established"]["name"] = "accept-established";
val["accept-established"]["simpletype"] = "string";
val["accept-established"]["description"] = R"POLYCUBE(If Connection Tracking is enabled, all packets belonging to ESTABLISHED connections will be forwarded automatically. Default is ON.)POLYCUBE";
val["accept-established"]["example"] = R"POLYCUBE()POLYCUBE";
val["interactive"]["name"] = "interactive";
val["interactive"]["simpletype"] = "boolean";
val["interactive"]["description"] = R"POLYCUBE(Interactive mode applies new rules immediately; if 'false', the command 'apply-rules' has to be used to apply all the rules at once. Default is TRUE.)POLYCUBE";
val["interactive"]["example"] = R"POLYCUBE()POLYCUBE";
return val;
}
nlohmann::json FirewallJsonObject::helpComplexElements() {
nlohmann::json val = nlohmann::json::object();
val["ports"]["name"] = "ports";
val["ports"]["type"] = "list";
val["ports"]["description"] = R"POLYCUBE(Entry of the ports table)POLYCUBE";
val["ports"]["example"] = R"POLYCUBE()POLYCUBE";
val["session-table"]["name"] = "session-table";
val["session-table"]["type"] = "list";
val["session-table"]["description"] = R"POLYCUBE()POLYCUBE";
val["session-table"]["example"] = R"POLYCUBE()POLYCUBE";
val["chain"]["name"] = "chain";
val["chain"]["type"] = "list";
val["chain"]["description"] = R"POLYCUBE()POLYCUBE";
val["chain"]["example"] = R"POLYCUBE()POLYCUBE";
return val;
}
std::vector<std::string> FirewallJsonObject::helpActions() {
std::vector<std::string> val;
return val;
}
std::string FirewallJsonObject::getName() const {
return m_name;
}
void FirewallJsonObject::setName(std::string value) {
m_name = value;
m_nameIsSet = true;
}
bool FirewallJsonObject::nameIsSet() const {
return m_nameIsSet;
}
void FirewallJsonObject::unsetName() {
m_nameIsSet = false;
}
std::string FirewallJsonObject::getUuid() const {
return m_uuid;
}
void FirewallJsonObject::setUuid(std::string value) {
m_uuid = value;
m_uuidIsSet = true;
}
bool FirewallJsonObject::uuidIsSet() const {
return m_uuidIsSet;
}
void FirewallJsonObject::unsetUuid() {
m_uuidIsSet = false;
}
CubeType FirewallJsonObject::getType() const {
return m_type;
}
void FirewallJsonObject::setType(CubeType value) {
m_type = value;
m_typeIsSet = true;
}
bool FirewallJsonObject::typeIsSet() const {
return m_typeIsSet;
}
void FirewallJsonObject::unsetType() {
m_typeIsSet = false;
}
std::string FirewallJsonObject::CubeType_to_string(const CubeType &value){
switch(value){
case CubeType::TC:
return std::string("TC");
case CubeType::XDP_SKB:
return std::string("XDP_SKB");
case CubeType::XDP_DRV:
return std::string("XDP_DRV");
default:
throw std::runtime_error("Bad Firewall type");
}
}
CubeType FirewallJsonObject::string_to_CubeType(const std::string &str){
if (JsonObjectBase::iequals("TC", str))
return CubeType::TC;
if (JsonObjectBase::iequals("XDP_SKB", str))
return CubeType::XDP_SKB;
if (JsonObjectBase::iequals("XDP_DRV", str))
return CubeType::XDP_DRV;
throw std::runtime_error("Firewall type is invalid");
}
FirewallLoglevelEnum FirewallJsonObject::getLoglevel() const {
return m_loglevel;
}
void FirewallJsonObject::setLoglevel(FirewallLoglevelEnum value) {
m_loglevel = value;
m_loglevelIsSet = true;
}
bool FirewallJsonObject::loglevelIsSet() const {
return m_loglevelIsSet;
}
void FirewallJsonObject::unsetLoglevel() {
m_loglevelIsSet = false;
}
std::string FirewallJsonObject::FirewallLoglevelEnum_to_string(const FirewallLoglevelEnum &value){
switch(value){
case FirewallLoglevelEnum::TRACE:
return std::string("trace");
case FirewallLoglevelEnum::DEBUG:
return std::string("debug");
case FirewallLoglevelEnum::INFO:
return std::string("info");
case FirewallLoglevelEnum::WARN:
return std::string("warn");
case FirewallLoglevelEnum::ERR:
return std::string("err");
case FirewallLoglevelEnum::CRITICAL:
return std::string("critical");
case FirewallLoglevelEnum::OFF:
return std::string("off");
default:
throw std::runtime_error("Bad Firewall loglevel");
}
}
FirewallLoglevelEnum FirewallJsonObject::string_to_FirewallLoglevelEnum(const std::string &str){
if (JsonObjectBase::iequals("trace", str))
return FirewallLoglevelEnum::TRACE;
if (JsonObjectBase::iequals("debug", str))
return FirewallLoglevelEnum::DEBUG;
if (JsonObjectBase::iequals("info", str))
return FirewallLoglevelEnum::INFO;
if (JsonObjectBase::iequals("warn", str))
return FirewallLoglevelEnum::WARN;
if (JsonObjectBase::iequals("err", str))
return FirewallLoglevelEnum::ERR;
if (JsonObjectBase::iequals("critical", str))
return FirewallLoglevelEnum::CRITICAL;
if (JsonObjectBase::iequals("off", str))
return FirewallLoglevelEnum::OFF;
throw std::runtime_error("Firewall loglevel is invalid");
}
polycube::LogLevel FirewallJsonObject::getPolycubeLoglevel() const {
switch(m_loglevel) {
case FirewallLoglevelEnum::TRACE:
return polycube::LogLevel::TRACE;
case FirewallLoglevelEnum::DEBUG:
return polycube::LogLevel::DEBUG;
case FirewallLoglevelEnum::INFO:
return polycube::LogLevel::INFO;
case FirewallLoglevelEnum::WARN:
return polycube::LogLevel::WARN;
case FirewallLoglevelEnum::ERR:
return polycube::LogLevel::ERR;
case FirewallLoglevelEnum::CRITICAL:
return polycube::LogLevel::CRITICAL;
case FirewallLoglevelEnum::OFF:
return polycube::LogLevel::OFF;
}
}
const std::vector<PortsJsonObject>& FirewallJsonObject::getPorts() const{
return m_ports;
}
void FirewallJsonObject::addPorts(PortsJsonObject value) {
m_ports.push_back(value);
}
bool FirewallJsonObject::portsIsSet() const {
return m_portsIsSet;
}
void FirewallJsonObject::unsetPorts() {
m_portsIsSet = false;
}
std::string FirewallJsonObject::getIngressPort() const {
return m_ingressPort;
}
void FirewallJsonObject::setIngressPort(std::string value) {
m_ingressPort = value;
m_ingressPortIsSet = true;
}
bool FirewallJsonObject::ingressPortIsSet() const {
return m_ingressPortIsSet;
}
void FirewallJsonObject::unsetIngressPort() {
m_ingressPortIsSet = false;
}
std::string FirewallJsonObject::getEgressPort() const {
return m_egressPort;
}
void FirewallJsonObject::setEgressPort(std::string value) {
m_egressPort = value;
m_egressPortIsSet = true;
}
bool FirewallJsonObject::egressPortIsSet() const {
return m_egressPortIsSet;
}
void FirewallJsonObject::unsetEgressPort() {
m_egressPortIsSet = false;
}
FirewallConntrackEnum FirewallJsonObject::getConntrack() const {
return m_conntrack;
}
void FirewallJsonObject::setConntrack(FirewallConntrackEnum value) {
m_conntrack = value;
m_conntrackIsSet = true;
}
bool FirewallJsonObject::conntrackIsSet() const {
return m_conntrackIsSet;
}
void FirewallJsonObject::unsetConntrack() {
m_conntrackIsSet = false;
}
std::string FirewallJsonObject::FirewallConntrackEnum_to_string(const FirewallConntrackEnum &value){
switch(value){
case FirewallConntrackEnum::ON:
return std::string("on");
case FirewallConntrackEnum::OFF:
return std::string("off");
default:
throw std::runtime_error("Bad Firewall conntrack");
}
}
FirewallConntrackEnum FirewallJsonObject::string_to_FirewallConntrackEnum(const std::string &str){
if (JsonObjectBase::iequals("on", str))
return FirewallConntrackEnum::ON;
if (JsonObjectBase::iequals("off", str))
return FirewallConntrackEnum::OFF;
throw std::runtime_error("Firewall conntrack is invalid");
}
FirewallAcceptEstablishedEnum FirewallJsonObject::getAcceptEstablished() const {
return m_acceptEstablished;
}
void FirewallJsonObject::setAcceptEstablished(FirewallAcceptEstablishedEnum value) {
m_acceptEstablished = value;
m_acceptEstablishedIsSet = true;
}
bool FirewallJsonObject::acceptEstablishedIsSet() const {
return m_acceptEstablishedIsSet;
}
void FirewallJsonObject::unsetAcceptEstablished() {
m_acceptEstablishedIsSet = false;
}
std::string FirewallJsonObject::FirewallAcceptEstablishedEnum_to_string(const FirewallAcceptEstablishedEnum &value){
switch(value){
case FirewallAcceptEstablishedEnum::ON:
return std::string("on");
case FirewallAcceptEstablishedEnum::OFF:
return std::string("off");
default:
throw std::runtime_error("Bad Firewall acceptEstablished");
}
}
FirewallAcceptEstablishedEnum FirewallJsonObject::string_to_FirewallAcceptEstablishedEnum(const std::string &str){
if (JsonObjectBase::iequals("on", str))
return FirewallAcceptEstablishedEnum::ON;
if (JsonObjectBase::iequals("off", str))
return FirewallAcceptEstablishedEnum::OFF;
throw std::runtime_error("Firewall acceptEstablished is invalid");
}
bool FirewallJsonObject::getInteractive() const {
return m_interactive;
}
void FirewallJsonObject::setInteractive(bool value) {
m_interactive = value;
m_interactiveIsSet = true;
}
bool FirewallJsonObject::interactiveIsSet() const {
return m_interactiveIsSet;
}
void FirewallJsonObject::unsetInteractive() {
m_interactiveIsSet = false;
}
const std::vector<SessionTableJsonObject>& FirewallJsonObject::getSessionTable() const{
return m_sessionTable;
}
void FirewallJsonObject::addSessionTable(SessionTableJsonObject value) {
m_sessionTable.push_back(value);
}
bool FirewallJsonObject::sessionTableIsSet() const {
return m_sessionTableIsSet;
}
void FirewallJsonObject::unsetSessionTable() {
m_sessionTableIsSet = false;
}
const std::vector<ChainJsonObject>& FirewallJsonObject::getChain() const{
return m_chain;
}
void FirewallJsonObject::addChain(ChainJsonObject value) {
m_chain.push_back(value);
}
bool FirewallJsonObject::chainIsSet() const {
return m_chainIsSet;
}
void FirewallJsonObject::unsetChain() {
m_chainIsSet = false;
}
}
}
}
}
|
//------------------------------------------------------------------------------
// d3d11memoryvertexbufferloader.cc
// (C) 2007 Radon Labs GmbH
// (C) 2013-2018 Individual contributors, see AUTHORS file
//------------------------------------------------------------------------------
#include "stdneb.h"
#include "coregraphics/vertexlayoutserver.h"
#include "coregraphics/d3d11/d3d11memoryvertexbufferloader.h"
#include "coregraphics/d3d11/d3d11types.h"
#include "coregraphics/renderdevice.h"
#include "coregraphics/vertexbuffer.h"
namespace Direct3D11
{
__ImplementClass(Direct3D11::D3D11MemoryVertexBufferLoader, 'DMVL', Base::MemoryVertexBufferLoaderBase);
using namespace Resources;
using namespace CoreGraphics;
//------------------------------------------------------------------------------
/**
This will create a D3D11 vertex buffer and vertex declaration object
from the data provided in the Setup() method and setup our resource
object (which must be a D3D11VertexBuffer object).
*/
bool
D3D11MemoryVertexBufferLoader::OnLoadRequested()
{
n_assert(this->GetState() == Resource::Initial);
n_assert(this->resource.isvalid());
n_assert(!this->resource->IsAsyncEnabled());
n_assert(this->numVertices > 0);
if (VertexBuffer::UsageImmutable == this->usage)
{
n_assert(0 != this->vertexDataPtr);
n_assert(0 < this->vertexDataSize);
}
ID3D11Buffer* d3dVertexBuffer = 0;
D3D11_SUBRESOURCE_DATA data;
ID3D11Device* d3d11Device = RenderDevice::Instance()->GetDirect3DDevice();
n_assert(0 != d3d11Device);
// first setup the vertex layout (contains the D3D11 vertex declaration)
Ptr<VertexLayout> vertexLayout = VertexLayoutServer::Instance()->CreateSharedVertexLayout(this->vertexComponents);
if (0 != this->vertexDataPtr)
{
n_assert((this->numVertices * vertexLayout->GetVertexByteSize()) == this->vertexDataSize);
}
// create a D3D11 vertex buffer object
D3D11_BUFFER_DESC desc;
if (D3D11_USAGE_DYNAMIC == D3D11Types::AsD3D11Usage(this->usage))
{
desc = CD3D11_BUFFER_DESC(
this->numVertices * vertexLayout->GetVertexByteSize(),
D3D11_BIND_VERTEX_BUFFER,
D3D11_USAGE_DYNAMIC,
D3D11_CPU_ACCESS_WRITE);
}
else
{
desc = CD3D11_BUFFER_DESC(
this->numVertices * vertexLayout->GetVertexByteSize(),
D3D11_BIND_VERTEX_BUFFER,
D3D11_USAGE_DEFAULT);
}
desc.StructureByteStride = vertexLayout->GetVertexByteSize();
HRESULT hr;
data.pSysMem = this->vertexDataPtr;
data.SysMemPitch = 0;
data.SysMemSlicePitch = 0;
if (0 != this->vertexDataPtr)
{
hr = d3d11Device->CreateBuffer(&desc, &data, &d3dVertexBuffer);
}
else
{
hr = d3d11Device->CreateBuffer(&desc, NULL, &d3dVertexBuffer);
}
n_assert(SUCCEEDED(hr));
n_assert(0 != d3dVertexBuffer);
// setup our resource object
const Ptr<VertexBuffer>& res = this->resource.downcast<VertexBuffer>();
n_assert(!res->IsLoaded());
res->SetUsage(this->usage);
res->SetAccess(this->access);
res->SetVertexLayout(vertexLayout);
res->SetNumVertices(this->numVertices);
res->SetD3D11VertexBuffer(d3dVertexBuffer);
// invalidate setup data (because we don't own our data)
this->vertexDataPtr = 0;
this->vertexDataSize = 0;
this->SetState(Resource::Loaded);
return true;
}
} // namespace Direct3D11
|
#include "optimizers/base.h"
int main(int argc, char **argv) {
if (argc != 4) {
fprintf(stderr, "Usage: %s N target optimizer\n", argv[0]);
exit(1);
}
Runner runner(argv[2]);
auto optimizer = Optimizer::Create(argv[3], &runner, atoll(argv[1]));
optimizer->Run();
}
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/command_line.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/ui/browser.h"
#include "chrome/browser/ui/tabs/tab_strip_model.h"
#include "chrome/browser/ui/webui/password_manager_internals/password_manager_internals_ui.h"
#include "chrome/common/url_constants.h"
#include "chrome/test/base/ui_test_utils.h"
#include "chrome/test/base/web_ui_browser_test.h"
#include "components/password_manager/content/browser/password_manager_internals_service_factory.h"
#include "components/password_manager/core/browser/password_manager_internals_service.h"
#include "components/password_manager/core/common/password_manager_switches.h"
#include "content/public/browser/web_contents.h"
class PasswordManagerInternalsWebUIBrowserTest : public WebUIBrowserTest {
public:
PasswordManagerInternalsWebUIBrowserTest();
virtual ~PasswordManagerInternalsWebUIBrowserTest();
virtual void SetUpOnMainThread() OVERRIDE;
protected:
content::WebContents* GetWebContents();
// Navigates to the internals page in a tab specified by |disposition|. Also
// assigns the corresponding UI controller to |controller_|.
void OpenInternalsPage(WindowOpenDisposition disposition);
private:
PasswordManagerInternalsUI* controller_;
};
PasswordManagerInternalsWebUIBrowserTest::
PasswordManagerInternalsWebUIBrowserTest()
: controller_(NULL) {}
PasswordManagerInternalsWebUIBrowserTest::
~PasswordManagerInternalsWebUIBrowserTest() {}
void PasswordManagerInternalsWebUIBrowserTest::SetUpOnMainThread() {
WebUIBrowserTest::SetUpOnMainThread();
OpenInternalsPage(CURRENT_TAB);
}
content::WebContents*
PasswordManagerInternalsWebUIBrowserTest::GetWebContents() {
return browser()->tab_strip_model()->GetActiveWebContents();
}
void PasswordManagerInternalsWebUIBrowserTest::OpenInternalsPage(
WindowOpenDisposition disposition) {
std::string url_string("chrome://");
url_string += chrome::kChromeUIPasswordManagerInternalsHost;
ui_test_utils::NavigateToURLWithDisposition(
browser(),
GURL(url_string),
disposition,
ui_test_utils::BROWSER_TEST_WAIT_FOR_NAVIGATION);
controller_ = static_cast<PasswordManagerInternalsUI*>(
GetWebContents()->GetWebUI()->GetController());
AddLibrary(base::FilePath(
FILE_PATH_LITERAL("password_manager_internals_browsertest.js")));
}
IN_PROC_BROWSER_TEST_F(PasswordManagerInternalsWebUIBrowserTest,
LogSavePasswordProgress) {
password_manager::PasswordManagerInternalsService* service =
password_manager::PasswordManagerInternalsServiceFactory::
GetForBrowserContext(browser()->profile());
ASSERT_TRUE(service);
service->ProcessLog("<script> text for testing");
ASSERT_TRUE(RunJavascriptTest("testLogText"));
}
// Test that a single internals page is flushed on reload.
IN_PROC_BROWSER_TEST_F(PasswordManagerInternalsWebUIBrowserTest,
LogSavePasswordProgress_FlushedOnReload) {
password_manager::PasswordManagerInternalsService* service =
password_manager::PasswordManagerInternalsServiceFactory::
GetForBrowserContext(browser()->profile());
ASSERT_TRUE(service);
service->ProcessLog("<script> text for testing");
OpenInternalsPage(CURRENT_TAB); // Reload.
ASSERT_TRUE(RunJavascriptTest("testLogTextNotPresent"));
}
// Test that if two tabs with the internals page are open, the second displays
// the same logs. In particular, this checks that both the second tab gets the
// logs created before the second tab was opened, and also that the second tab
// waits with displaying until the internals page is ready (trying to display
// the old logs just on construction time would fail).
IN_PROC_BROWSER_TEST_F(PasswordManagerInternalsWebUIBrowserTest,
LogSavePasswordProgress_MultipleTabsIdentical) {
// First, open one tab with the internals page, and log something.
password_manager::PasswordManagerInternalsService* service =
password_manager::PasswordManagerInternalsServiceFactory::
GetForBrowserContext(browser()->profile());
ASSERT_TRUE(service);
service->ProcessLog("<script> text for testing");
ASSERT_TRUE(RunJavascriptTest("testLogText"));
// Now open a second tab with the internals page, but do not log anything.
OpenInternalsPage(NEW_FOREGROUND_TAB);
// The previously logged text should have made it to the page.
ASSERT_TRUE(RunJavascriptTest("testLogText"));
}
// Test that in the presence of more internals pages, reload does not cause
// flushing the logs.
IN_PROC_BROWSER_TEST_F(PasswordManagerInternalsWebUIBrowserTest,
LogSavePasswordProgress_NotFlushedOnReloadIfMultiple) {
// Open one more tab with the internals page.
OpenInternalsPage(NEW_FOREGROUND_TAB);
// Now log something.
password_manager::PasswordManagerInternalsService* service =
password_manager::PasswordManagerInternalsServiceFactory::
GetForBrowserContext(browser()->profile());
ASSERT_TRUE(service);
service->ProcessLog("<script> text for testing");
// Reload.
OpenInternalsPage(CURRENT_TAB);
// The text should still be there.
ASSERT_TRUE(RunJavascriptTest("testLogText"));
}
|
/**
* Copyright (c) 2017-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ThreadPoolExecutor.h"
#include "glow/Backends/DeviceManager.h"
#include "glow/ExecutionContext/ExecutionContext.h"
#include <queue>
#include <unordered_set>
#include <glog/logging.h>
namespace glow {
namespace runtime {
void InflightBarrier::decrement(unsigned decr) {
std::unique_lock<std::mutex> lock(mtx_);
assert(count_ >= decr && "Barrier decrement cannot be less than count!");
count_ -= decr;
// If count_ has hit zero, wake up all threads that are waiting.
if (count_ == 0) {
cv_.notify_all();
}
} // namespace runtime
void InflightBarrier::increment(unsigned incr) {
std::unique_lock<std::mutex> lock(mtx_);
count_ += incr;
}
unsigned InflightBarrier::count() {
std::unique_lock<std::mutex> lock(mtx_);
return count_;
}
void InflightBarrier::wait() {
std::unique_lock<std::mutex> lock(mtx_);
// If count_ is not 0, wait until a signal is received that it is.
// The second argument below is a predicate that returns true when
// it is safe to wake up. It preserves correctness in the case of
// spurious wakeups.
cv_.wait(lock, [&] { return count_ == 0; });
}
ExecutionState::ExecutionState(RunIdentifierTy id, const DAGNode *root,
std::unique_ptr<ExecutionContext> resultContext,
ResultCBTy doneCb)
: runId_(id), cb_(doneCb), resultCtx_(std::move(resultContext)),
inflightNodes_(0), module_(root->module), root_(root) {}
void ExecutionState::init() {
// Create a queue for the breadth-first traversal through the graph.
std::queue<const DAGNode *> bfsQueue;
// Place the root nodes in the queue.
for (const auto &node : root_->children) {
bfsQueue.push(node);
}
auto *resultTraceContext = resultCtx_->getTraceContext();
// Breadth-first search.
while (!bfsQueue.empty()) {
// Get the next node in the BFS queue.
const DAGNode *node = bfsQueue.front();
bfsQueue.pop();
// Make a counter for the number of node parents done.
nodeParentsDone_[node] = 0;
// Make an (empty) input context for the node.
auto nodeInputCtx = llvm::make_unique<ExecutionContext>();
if (resultTraceContext) {
nodeInputCtx->setTraceContext(
llvm::make_unique<TraceContext>(resultTraceContext->getTraceLevel()));
}
auto nodeInputPhBindings = nodeInputCtx->getPlaceholderBindings();
// Get the symbol table for the node.
const SymbolTableTy &symbolTable = node->runtimeBundle->getSymbolTable();
// Create Placeholders for the symbols of all intermediate nodes. These are
// not in the ExecutionContext passed to Executor::run, so they must be
// created by the Executor.
auto *resultBindings = resultCtx_->getPlaceholderBindings();
for (const auto &symbolPair : symbolTable) {
const auto &symbolName = symbolPair.first;
const auto &symbolInfo = symbolPair.second;
if (symbolInfo.symbolCategory == SymbolCategory::Placeholder) {
auto *PH = resultBindings->getPlaceholderByName(symbolName);
if (!PH) {
PH = module_->getPlaceholderByName(symbolName);
DCHECK(PH) << "Placeholder: " << symbolName
<< " is not in the module";
// allocate into the resultBindings because they have the longest
// lifetime.
resultBindings->insert(PH,
intermediateTensorPool_.get(PH->getType()));
intermediatePlaceholders_.push_back(PH);
}
nodeInputPhBindings->insert(
PH, resultBindings->get(PH)->getUnowned(PH->dims()));
}
}
// Insert the prepared ExecutionContext into the input contexts map.
inputCtxs_.insert(std::make_pair(node, std::move(nodeInputCtx)));
// Push all unvisited children onto the BFS queue.
for (const auto &child : node->children) {
// Use nodeParentsDone_ as a set of nodes that have been visited already
// to avoid visiting a node more than once.
if (!nodeParentsDone_.count(child)) {
bfsQueue.push(child);
}
}
}
initialized_ = true;
}
std::unique_ptr<ExecutionContext>
ExecutionState::getUniqueNodeContextPtr(const DAGNode *node) {
// The input PlaceholderBindings for the node should have been created in the
// constructor.
auto ctxIt = inputCtxs_.find(node);
if (ctxIt == inputCtxs_.end()) {
assert(!"Input bindings not found but should exist!");
}
return std::move(ctxIt->second);
}
void ExecutionState::incrementInflightNodes(unsigned increment) {
inflightNodes_ += increment;
}
bool ExecutionState::decrementInflightNodes(unsigned decrement) {
// fetch_sub must be used here so that the function returns true to only one
// caller.
unsigned previousValue = inflightNodes_.fetch_sub(decrement);
// The decrement should never be more than the value of the counter at the
// time of decrement.
if (previousValue < decrement) {
assert(!"More decrements than increments to inflight nodes!");
}
// Return true when the counter hits zero.
return (previousValue == decrement);
}
bool ExecutionState::incrementNodeParentsDone(const DAGNode *node,
unsigned increment) {
// Get the parents done counter for the node. It should have
// been created in the constructor.
auto it = nodeParentsDone_.find(node);
if (it == nodeParentsDone_.end()) {
assert(!"Node parents done counter should exist but not found!");
}
// fetch_add must be used here so that the function returns true to only
// one caller.
unsigned numParents = (node->parents).size();
unsigned previousValue = (it->second).fetch_add(increment);
unsigned newValue = previousValue + increment;
// The new value of the counter cannot exceed the number of parents that
// the node has.
if (newValue > numParents) {
assert(!"Node parents done counter incremented beyond limit!");
}
// Return true only when the counter hits the total numer of parents.
return (newValue == numParents);
}
void ExecutionState::insertIntoTraceContext(std::vector<TraceEvent> &events) {
if (!resultCtx_->getTraceContext()) {
events.clear();
return;
}
std::lock_guard<std::mutex> lock(bindingsMtx_);
std::move(
events.begin(), events.end(),
std::back_inserter(resultCtx_->getTraceContext()->getTraceEvents()));
}
void ExecutionState::removeIntermediatePlaceholders() {
for (auto &p : intermediatePlaceholders_) {
resultCtx_->getPlaceholderBindings()->erase(p);
}
intermediatePlaceholders_.clear();
}
std::unique_ptr<ExecutionContext> ExecutionState::getUniqueResultContextPtr() {
// The result PlaceholderBindings should have been been created in the
// constructor.
assert(resultCtx_ && "Execution result bindings should exist!");
return std::move(resultCtx_);
}
ExecutionContext *ExecutionState::getRawResultContextPtr() const {
// The result PlaceholderBindings should have been been created in the
// constructor and should not yet have been moved out if this function is
// being called.
assert(resultCtx_ && "Execution result bindings should exist!");
return resultCtx_.get();
}
void ThreadPoolExecutor::shutdown() {
// Prevent more requests from being processed.
shuttingDown_ = true;
// Wait for all inflight DeviceManager::runFunction() calls to return and be
// processed before starting to destroy state that is used in
// handleDeviceManagerResult().
inflightBarrier_.wait();
}
void ThreadPoolExecutor::run(const DAGNode *root,
std::unique_ptr<ExecutionContext> context,
RunIdentifierTy runId, ResultCBTy cb) {
TRACE_EVENT_SCOPE(context->getTraceContext(), "ThreadPoolExecutor::run");
// Don't process new requests if the executor is shutting down.
if (shuttingDown_) {
cb(runId,
MAKE_ERR(GlowErr::ErrorCode::RUNTIME_REQUEST_REFUSED,
"ThreadPoolExecutor is shutting down"),
std::move(context));
return;
}
// If list of roots is empty, there is nothing to do. Give back the
// bindings so the caller can reuse it.
if (!root) {
cb(runId, llvm::Error::success(), std::move(context));
return;
}
std::shared_ptr<ExecutionState> executionState =
std::make_shared<ExecutionState>(runId, root, std::move(context),
std::move(cb));
executionState->init();
bool runIdAlreadyTaken = false;
{
std::lock_guard<std::mutex> lock(executionStatesMutex_);
auto result = executionStates_.emplace(runId, executionState);
runIdAlreadyTaken = !result.second;
}
if (runIdAlreadyTaken) {
cb = executionState->getCallback();
cb(runId,
MAKE_ERR(
GlowErr::ErrorCode::RUNTIME_REQUEST_REFUSED,
"ThreadPoolExecutor found another run with the same request id"),
executionState->getUniqueResultContextPtr());
return;
}
// Execute all child nodes of root.
// Mark the child nodes as "inflight" (i.e. currently executing). This must be
// done here instead of inside executeDAGNode() so that a node can be
// executed while placeholders are being propagated for the next node without
// the callback for that node deleting the execution state.
auto numChildren = (root->children).size();
executionState->incrementInflightNodes(numChildren);
inflightBarrier_.increment(numChildren);
for (auto const &node : root->children) {
// Execute the node.
executeDAGNode(executionState, node);
}
}
void ThreadPoolExecutor::executeDAGNode(
std::shared_ptr<ExecutionState> executionState, DAGNode *node) {
TRACE_EVENT_SCOPE(executionState->getRawResultContextPtr()->getTraceContext(),
"ThreadPoolExecutor::executeDAGNode");
assert(executionState->initialized_ && "Run state must be initialized");
// If execution has already failed due to another node, don't bother running
// this one.
if (executionState->getErrorContainer().containsErr()) {
// Mark the node as no longer executing.
executionState->decrementInflightNodes();
inflightBarrier_.decrement();
return;
}
auto currentDevice = node->getNextDevice();
// Get the DeviceManager that can run the node.
auto deviceManagerIt = deviceManagers_.find(currentDevice);
if (deviceManagerIt == deviceManagers_.end()) {
// Mark the node as no longer executing.
executionState->getErrorContainer().set(
MAKE_ERR(GlowErr::ErrorCode::RUNTIME_DEVICE_NOT_FOUND,
"Cannot find the DeviceManager specified."));
executionState->decrementInflightNodes();
inflightBarrier_.decrement();
return;
}
auto &deviceManager = deviceManagerIt->second;
// Get the PlaceholderBindings containing all of the inputs for the node.
std::unique_ptr<ExecutionContext> nodeCtx =
executionState->getUniqueNodeContextPtr(node);
// Run the node using the DeviceManager.
deviceManager->runFunction(
node->name, std::move(nodeCtx),
[this, executionState,
node](RunIdentifierTy id, llvm::Error err,
std::unique_ptr<ExecutionContext> resultCtx) {
// Immediately move the handling of the result onto threadPool_ to
// avoid doing work on the DeviceManager thread.
this->threadPool_.submit([this, executionState, node,
err = std::move(err),
ctx = std::move(resultCtx)]() mutable {
this->handleDeviceManagerResult(executionState, std::move(err),
std::move(ctx), node);
});
});
}
void ThreadPoolExecutor::handleDeviceManagerResult(
std::shared_ptr<ExecutionState> executionState, llvm::Error err,
std::unique_ptr<ExecutionContext> ctx, const DAGNode *node) {
// If executionState is null, that means that the object was deleted
// while a node was executing. That should never happen.
assert(executionState && "Execution state should not be null");
TraceContext *traceContext = ctx->getTraceContext();
TRACE_EVENT_BEGIN(traceContext, "ThreadPoolExecutor::handleResult");
auto runWasSuccess = !err;
// Set the result code for the run.
executionState->getErrorContainer().set(std::move(err));
// If the DeviceManager executed the node, propagate its output Placeholders
// to its children or the result PlaceholderBindings as appropriate.
if (runWasSuccess) {
for (auto &child : node->children) {
// Execute any child that has no parent nodes left to execute.
bool childReadyToExecute =
executionState->incrementNodeParentsDone(child);
if (childReadyToExecute) {
// Mark the node as "inflight" (i.e. currently executing).
executionState->incrementInflightNodes();
inflightBarrier_.increment();
executeDAGNode(executionState, child);
}
}
}
// Now, check if all nodes in the graph are done. If so, the callback can be
// called and all state associated with the run can be erased.
bool noNodesInflight = executionState->decrementInflightNodes();
if (traceContext) {
TRACE_EVENT_END(traceContext, "ThreadPoolExecutor::handleResult");
executionState->insertIntoTraceContext(traceContext->getTraceEvents());
}
if (noNodesInflight) {
// Remove the intermediate placeholders so we don't leak them to the caller.
executionState->removeIntermediatePlaceholders();
// If there are no nodes inflight, that means all nodes are done. Call
// the callback and erase the state information.
ResultCBTy cb = executionState->getCallback();
cb(executionState->getRunId(), executionState->getErrorContainer().get(),
executionState->getUniqueResultContextPtr());
// Clean up the state stored for the run.
std::lock_guard<std::mutex> lock(executionStatesMutex_);
executionStates_.erase(executionState->getRunId());
}
// Decrement the inflight barrier for the executor keeping track of all
// outstanding DeviceManager::runFunction() calls. This must be done here
// instead of right after executionState->decrementInflightNodes() so that
// ~ThreadPoolExecutor does not delete executor state before this function
// is done using it (e.g. when erasing the ExecutionState object for a
// run).
inflightBarrier_.decrement();
}
} // namespace runtime
} // namespace glow
|
/**
* Copyright (C) 2014 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the GNU Affero General Public License in all respects
* for all of the code used other than as permitted herein. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do not
* wish to do so, delete this exception statement from your version. If you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
#include "mongo/platform/basic.h"
#include "mongo/db/json.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
namespace {
using namespace mongo;
using std::unique_ptr;
using std::make_pair;
/**
* ChunkManager targeting test
*
* TODO:
* Pull the implementation out of chunk.cpp
*/
// Utility function to create a CanonicalQuery
unique_ptr<CanonicalQuery> canonicalize(const char* queryStr) {
BSONObj queryObj = fromjson(queryStr);
auto statusWithCQ = CanonicalQuery::canonicalize("test.foo", queryObj, WhereCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
void checkIndexBoundsWithKey(const char* keyStr,
const char* queryStr,
const IndexBounds& expectedBounds) {
unique_ptr<CanonicalQuery> query(canonicalize(queryStr));
ASSERT(query.get() != NULL);
BSONObj key = fromjson(keyStr);
IndexBounds indexBounds = ChunkManager::getIndexBoundsForQuery(key, *query.get());
ASSERT_EQUALS(indexBounds.size(), expectedBounds.size());
for (size_t i = 0; i < indexBounds.size(); i++) {
const OrderedIntervalList& oil = indexBounds.fields[i];
const OrderedIntervalList& expectedOil = expectedBounds.fields[i];
ASSERT_EQUALS(oil.intervals.size(), expectedOil.intervals.size());
for (size_t i = 0; i < oil.intervals.size(); i++) {
if (Interval::INTERVAL_EQUALS != oil.intervals[i].compare(expectedOil.intervals[i])) {
log() << oil.intervals[i] << " != " << expectedOil.intervals[i];
}
ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
oil.intervals[i].compare(expectedOil.intervals[i]));
}
}
}
// Assume shard key is { a: 1 }
void checkIndexBounds(const char* queryStr, const OrderedIntervalList& expectedOil) {
unique_ptr<CanonicalQuery> query(canonicalize(queryStr));
ASSERT(query.get() != NULL);
BSONObj key = fromjson("{a: 1}");
IndexBounds indexBounds = ChunkManager::getIndexBoundsForQuery(key, *query.get());
ASSERT_EQUALS(indexBounds.size(), 1U);
const OrderedIntervalList& oil = indexBounds.fields.front();
if (oil.intervals.size() != expectedOil.intervals.size()) {
for (size_t i = 0; i < oil.intervals.size(); i++) {
log() << oil.intervals[i];
}
}
ASSERT_EQUALS(oil.intervals.size(), expectedOil.intervals.size());
for (size_t i = 0; i < oil.intervals.size(); i++) {
ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
oil.intervals[i].compare(expectedOil.intervals[i]));
}
}
const double INF = std::numeric_limits<double>::infinity();
// { a: 2 } -> a: [2, 2]
TEST(CMCollapseTreeTest, Basic) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON("" << 2 << "" << 2), true, true));
checkIndexBounds("{a: 2}", expected);
}
// { b: 2 } -> a: [MinKey, MaxKey]
TEST(CMCollapseTreeTest, AllValue) {
OrderedIntervalList expected;
BSONObjBuilder builder;
builder.appendMinKey("");
builder.appendMaxKey("");
expected.intervals.push_back(Interval(builder.obj(), true, true));
checkIndexBounds("{b: 2}", expected);
}
// { 'a' : { '$not' : { '$gt' : 1 } } } -> a: [MinKey, 1.0], (inf.0, MaxKey]
TEST(CMCollapseTreeTest, NegativeGT) {
OrderedIntervalList expected;
{
BSONObjBuilder builder;
builder.appendMinKey("");
builder.appendNumber("", 1.0);
expected.intervals.push_back(Interval(builder.obj(), true, true));
}
{
BSONObjBuilder builder;
builder.append("", std::numeric_limits<double>::infinity());
builder.appendMaxKey("");
expected.intervals.push_back(Interval(builder.obj(), false, true));
}
checkIndexBounds("{ 'a' : { '$not' : { '$gt' : 1 } } }", expected);
}
// {$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]} -> a: [1.0, 1.0], [20.0, 20.0]
TEST(CMCollapseTreeTest, OrWithAndChild) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON("" << 1.0 << "" << 1.0), true, true));
expected.intervals.push_back(Interval(BSON("" << 20.0 << "" << 20.0), true, true));
checkIndexBounds("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}", expected);
}
// {a:20, $or: [{b:1}, {c:7}]} -> a: [20.0, 20.0]
TEST(CMCollapseTreeTest, AndWithUnindexedOrChild) {
// Logic rewrite could give a tree with root OR.
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON("" << 20.0 << "" << 20.0), true, true));
checkIndexBounds("{a:20, $or: [{b:1}, {c:7}]}", expected);
}
// {$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:5}}]} -> a: (0.0, 10.0)
TEST(CMCollapseTreeTest, OrOfAnd) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON("" << 0.0 << "" << 10.0), false, false));
checkIndexBounds("{$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:5}}]}", expected);
}
// {$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:15}}, {a:{$gt:20}}]}
// -> a: (0.0, 15.0), (20.0, inf.0]
TEST(CMCollapseTreeTest, OrOfAnd2) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON("" << 0.0 << "" << 15.0), false, false));
expected.intervals.push_back(Interval(BSON("" << 20.0 << "" << INF), false, true));
checkIndexBounds("{$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:15}}, {a:{$gt:20}}]}", expected);
}
// "{$or: [{a:{$gt:1,$lt:5},b:6}, {a:3,b:{$gt:0,$lt:10}}]}" -> a: (1.0, 5.0)
TEST(CMCollapseTreeTest, OrOfAnd3) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON("" << 1.0 << "" << 5.0), false, false));
checkIndexBounds("{$or: [{a:{$gt:1,$lt:5},b:6}, {a:3,b:{$gt:0,$lt:10}}]}", expected);
}
//
// Compound shard key
//
// "{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, "
// "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}",
// -> a: (1.0, 5.0), b: (0.0, 3.0)
TEST(CMCollapseTreeTest, OrOfAnd4) {
IndexBounds expectedBounds;
expectedBounds.fields.push_back(OrderedIntervalList());
expectedBounds.fields.push_back(OrderedIntervalList());
expectedBounds.fields[0].intervals.push_back(
Interval(BSON("" << 1.0 << "" << 5.0), false, false));
expectedBounds.fields[1].intervals.push_back(
Interval(BSON("" << 0.0 << "" << 3.0), false, false));
checkIndexBoundsWithKey("{a: 1, b: 1}", // shard key
"{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, "
"{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}",
expectedBounds);
}
// "{$or: [{a:{$gt:1,$lt:5}, c:6}, "
// "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
// ->
TEST(CMCollapseTreeTest, OrOfAnd5) {
IndexBounds expectedBounds;
expectedBounds.fields.push_back(OrderedIntervalList());
expectedBounds.fields.push_back(OrderedIntervalList());
expectedBounds.fields[0].intervals.push_back(
Interval(BSON("" << 1.0 << "" << 5.0), false, false));
BSONObjBuilder builder;
builder.appendMinKey("");
builder.appendMaxKey("");
expectedBounds.fields[1].intervals.push_back(Interval(builder.obj(), true, true));
checkIndexBoundsWithKey("{a: 1, b: 1}", // shard key
"{$or: [{a:{$gt:1,$lt:5}, c:6}, "
"{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}",
expectedBounds);
}
// {$or: [{a:{$in:[1]},b:{$in:[1]}}, {a:{$in:[1,5]},b:{$in:[1,5]}}]}
// -> a: [1], [5]; b: [1], [5]
TEST(CMCollapseTreeTest, OrOfAnd6) {
IndexBounds expectedBounds;
expectedBounds.fields.push_back(OrderedIntervalList());
expectedBounds.fields.push_back(OrderedIntervalList());
// a: [1], [5]
expectedBounds.fields[0].intervals.push_back(
Interval(BSON("" << 1.0 << "" << 1.0), true, true));
expectedBounds.fields[0].intervals.push_back(
Interval(BSON("" << 5.0 << "" << 5.0), true, true));
// b: [1], [5]
expectedBounds.fields[1].intervals.push_back(
Interval(BSON("" << 1.0 << "" << 1.0), true, true));
expectedBounds.fields[1].intervals.push_back(
Interval(BSON("" << 5.0 << "" << 5.0), true, true));
checkIndexBoundsWithKey("{a: 1, b: 1}", // shard key
"{$or: [{a:{$in:[1]},b:{$in:[1]}}, {a:{$in:[1,5]},b:{$in:[1,5]}}]}",
expectedBounds);
}
//
// Array operators
//
// {a : {$elemMatch: {b:1}}} -> a.b: [MinKey, MaxKey]
// Shard key doesn't allow multikey, but query on array should succeed without error.
TEST(CMCollapseTreeTest, ElemMatchOneField) {
IndexBounds expectedBounds;
expectedBounds.fields.push_back(OrderedIntervalList());
OrderedIntervalList& oil = expectedBounds.fields.front();
oil.intervals.push_back(Interval(BSON("" << 1 << "" << 1), true, true));
checkIndexBoundsWithKey("{'a.b': 1}", "{a : {$elemMatch: {b:1}}}", expectedBounds);
}
// {foo: {$all: [ {$elemMatch: {a:1, b:1}}, {$elemMatch: {a:2, b:2}}]}}
// -> foo.a: [1, 1]
// Or -> foo.a: [2, 2]
TEST(CMCollapseTreeTest, BasicAllElemMatch) {
Interval expectedInterval(BSON("" << 1 << "" << 1), true, true);
const char* queryStr = "{foo: {$all: [ {$elemMatch: {a:1, b:1}} ]}}";
unique_ptr<CanonicalQuery> query(canonicalize(queryStr));
ASSERT(query.get() != NULL);
BSONObj key = fromjson("{'foo.a': 1}");
IndexBounds indexBounds = ChunkManager::getIndexBoundsForQuery(key, *query.get());
ASSERT_EQUALS(indexBounds.size(), 1U);
const OrderedIntervalList& oil = indexBounds.fields.front();
ASSERT_EQUALS(oil.intervals.size(), 1U);
const Interval& interval = oil.intervals.front();
// Choose one of the two possible solutions.
// Two solutions differ only by assignment of index tags.
ASSERT(Interval::INTERVAL_EQUALS == interval.compare(expectedInterval));
}
// {a : [1, 2, 3]} -> a: [1, 1], [[1, 2, 3], [1, 2, 3]]
TEST(CMCollapseTreeTest, ArrayEquality) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON("" << 1 << "" << 1), true, true));
BSONArray array(BSON_ARRAY(1 << 2 << 3));
Interval interval(BSON("" << array << "" << array), true, true);
expected.intervals.push_back(interval);
checkIndexBounds("{a : [1, 2, 3]}", expected);
}
//
// Features: Regex, $where, $text, hashed key
//
// { a: /abc/ } -> a: ["", {}), [/abc/, /abc/]
TEST(CMCollapseTreeTest, Regex) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON(""
<< ""
<< "" << BSONObj()),
true,
false));
BSONObjBuilder builder;
builder.appendRegex("", "abc");
builder.appendRegex("", "abc");
expected.intervals.push_back(Interval(builder.obj(), true, true));
checkIndexBounds("{ a: /abc/ }", expected);
}
// {$where: 'this.credits == this.debits' }
TEST(CMCollapseTreeTest, Where) {
OrderedIntervalList expected;
BSONObjBuilder builder;
builder.appendMinKey("");
builder.appendMaxKey("");
expected.intervals.push_back(Interval(builder.obj(), true, true));
checkIndexBounds("{$where: 'this.credits == this.debits' }", expected);
}
// { $text: { $search: "coffee -cake" } }
TEST(CMCollapseTreeTest, Text) {
OrderedIntervalList expected;
BSONObjBuilder builder;
builder.appendMinKey("");
builder.appendMaxKey("");
expected.intervals.push_back(Interval(builder.obj(), true, true));
checkIndexBounds("{ $text: { $search: 'coffee -cake' } }", expected);
}
// { a: 2, $text: { $search: "leche", $language: "es" } }
TEST(CMCollapseTreeTest, TextWithQuery) {
OrderedIntervalList expected;
BSONObjBuilder builder;
builder.appendMinKey("");
builder.appendMaxKey("");
expected.intervals.push_back(Interval(builder.obj(), true, true));
checkIndexBounds("{ a: 2, $text: { $search: 'leche', $language: 'es' } }", expected);
}
// { a: 0 } -> hashed a: [hash(0), hash(0)]
TEST(CMCollapseTreeTest, HashedSinglePoint) {
const char* queryStr = "{ a: 0 }";
unique_ptr<CanonicalQuery> query(canonicalize(queryStr));
ASSERT(query.get() != NULL);
BSONObj key = fromjson("{a: 'hashed'}");
IndexBounds indexBounds = ChunkManager::getIndexBoundsForQuery(key, *query.get());
ASSERT_EQUALS(indexBounds.size(), 1U);
const OrderedIntervalList& oil = indexBounds.fields.front();
ASSERT_EQUALS(oil.intervals.size(), 1U);
const Interval& interval = oil.intervals.front();
ASSERT(interval.isPoint());
}
// { a: { $lt: 2, $gt: 1} } -> hashed a: [Minkey, Maxkey]
TEST(CMCollapseTreeTest, HashedRange) {
IndexBounds expectedBounds;
expectedBounds.fields.push_back(OrderedIntervalList());
OrderedIntervalList& expectedOil = expectedBounds.fields.front();
BSONObjBuilder builder;
builder.appendMinKey("");
builder.appendMaxKey("");
expectedOil.intervals.push_back(Interval(builder.obj(), true, true));
checkIndexBoundsWithKey("{a: 'hashed'}", "{ a: { $lt: 2, $gt: 1} }", expectedBounds);
}
// { a: /abc/ } -> hashed a: [Minkey, Maxkey]
TEST(CMCollapseTreeTest, HashedRegex) {
IndexBounds expectedBounds;
expectedBounds.fields.push_back(OrderedIntervalList());
OrderedIntervalList& expectedOil = expectedBounds.fields.front();
BSONObjBuilder builder;
builder.appendMinKey("");
builder.appendMaxKey("");
expectedOil.intervals.push_back(Interval(builder.obj(), true, true));
checkIndexBoundsWithKey("{a: 'hashed'}", "{ a: /abc/ }", expectedBounds);
}
/**
* KeyPattern key bounds generation test
*/
void CheckBoundList(const BoundList& list, const BoundList& expected) {
ASSERT_EQUALS(list.size(), expected.size());
for (size_t i = 0; i < list.size(); i++) {
ASSERT_EQUALS(list[i].first.woCompare(expected[i].first), 0);
ASSERT_EQUALS(list[i].second.woCompare(expected[i].second), 0);
}
}
// Key { a: 1 }, Bounds a: [0]
// => { a: 0 } -> { a: 0 }
TEST(CMKeyBoundsTest, Basic) {
IndexBounds indexBounds;
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields.front().intervals.push_back(Interval(BSON("" << 0 << "" << 0), true, true));
BoundList expectedList;
expectedList.push_back(make_pair(fromjson("{a: 0}"), fromjson("{a: 0}")));
ShardKeyPattern skeyPattern(fromjson("{a: 1}"));
BoundList list = skeyPattern.flattenBounds(indexBounds);
CheckBoundList(list, expectedList);
}
// Key { a: 1 }, Bounds a: [2, 3)
// => { a: 2 } -> { a: 3 } // bound inclusion is ignored.
TEST(CMKeyBoundsTest, SingleInterval) {
IndexBounds indexBounds;
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields.front().intervals.push_back(Interval(BSON("" << 2 << "" << 3), true, false));
BoundList expectedList;
expectedList.push_back(make_pair(fromjson("{a: 2}"), fromjson("{a: 3}")));
ShardKeyPattern skeyPattern(fromjson("{a: 1}"));
BoundList list = skeyPattern.flattenBounds(indexBounds);
CheckBoundList(list, expectedList);
}
// Key { a: 1, b: 1, c: 1 }, Bounds a: [2, 3), b: [2, 3), c: [2: 3)
// => { a: 2, b: 2, c: 2 } -> { a: 3, b: 3, c: 3 }
TEST(CMKeyBoundsTest, MultiIntervals) {
IndexBounds indexBounds;
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields[0].intervals.push_back(Interval(BSON("" << 2 << "" << 3), true, false));
indexBounds.fields[1].intervals.push_back(Interval(BSON("" << 2 << "" << 3), true, false));
indexBounds.fields[2].intervals.push_back(Interval(BSON("" << 2 << "" << 3), true, false));
BoundList expectedList;
expectedList.push_back(
make_pair(fromjson("{ a: 2, b: 2, c: 2 }"), fromjson("{ a: 3, b: 3, c: 3 }")));
ShardKeyPattern skeyPattern(fromjson("{a: 1, b: 1, c: 1}"));
BoundList list = skeyPattern.flattenBounds(indexBounds);
CheckBoundList(list, expectedList);
}
// Key { a: 1, b: 1, c: 1 }, Bounds a: [0, 0], b: { $in: [4, 5, 6] }, c: [2: 3)
// => { a: 0, b: 4, c: 2 } -> { a: 0, b: 4, c: 3 }
// { a: 0, b: 5, c: 2 } -> { a: 0, b: 5, c: 3 }
// { a: 0, b: 6, c: 2 } -> { a: 0, b: 6, c: 3 }
TEST(CMKeyBoundsTest, IntervalExpansion) {
IndexBounds indexBounds;
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields[0].intervals.push_back(Interval(BSON("" << 0 << "" << 0), true, true));
indexBounds.fields[1].intervals.push_back(Interval(BSON("" << 4 << "" << 4), true, true));
indexBounds.fields[1].intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
indexBounds.fields[1].intervals.push_back(Interval(BSON("" << 6 << "" << 6), true, true));
indexBounds.fields[2].intervals.push_back(Interval(BSON("" << 2 << "" << 3), true, false));
BoundList expectedList;
expectedList.push_back(
make_pair(fromjson("{ a: 0, b: 4, c: 2 }"), fromjson("{ a: 0, b: 4, c: 3 }")));
expectedList.push_back(
make_pair(fromjson("{ a: 0, b: 5, c: 2 }"), fromjson("{ a: 0, b: 5, c: 3 }")));
expectedList.push_back(
make_pair(fromjson("{ a: 0, b: 6, c: 2 }"), fromjson("{ a: 0, b: 6, c: 3 }")));
ShardKeyPattern skeyPattern(fromjson("{a: 1, b: 1, c: 1}"));
BoundList list = skeyPattern.flattenBounds(indexBounds);
CheckBoundList(list, expectedList);
}
// Key { a: 1, b: 1, c: 1 }, Bounds a: [0, 1], b: { $in: [4, 5, 6] }, c: [2: 3)
// => { a: 0, b: 4, c: 2 } -> { a: 1, b: 6, c: 3 }
// Since field "a" is not a point, expasion after "a" is not allowed.
TEST(CMKeyBoundsTest, NonPointIntervalExpasion) {
IndexBounds indexBounds;
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields.push_back(OrderedIntervalList());
indexBounds.fields[0].intervals.push_back(Interval(BSON("" << 0 << "" << 1), true, true));
indexBounds.fields[1].intervals.push_back(Interval(BSON("" << 4 << "" << 4), true, true));
indexBounds.fields[1].intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
indexBounds.fields[1].intervals.push_back(Interval(BSON("" << 6 << "" << 6), true, true));
indexBounds.fields[2].intervals.push_back(Interval(BSON("" << 2 << "" << 3), true, false));
BoundList expectedList;
expectedList.push_back(
make_pair(fromjson("{ a: 0, b: 4, c: 2 }"), fromjson("{ a: 1, b: 6, c: 3 }")));
ShardKeyPattern skeyPattern(fromjson("{a: 1, b: 1, c: 1}"));
BoundList list = skeyPattern.flattenBounds(indexBounds);
CheckBoundList(list, expectedList);
}
} // namespace
|
/*++
Copyright (c) 2011 Microsoft Corporation
Module Name:
seq_decl_plugin.h
Abstract:
decl_plugin for the theory of sequences
Author:
Nikolaj Bjorner (nbjorner) 2011-14-11
Revision History:
--*/
#include "util/gparams.h"
#include "ast/seq_decl_plugin.h"
#include "ast/arith_decl_plugin.h"
#include "ast/array_decl_plugin.h"
#include "ast/ast_pp.h"
#include "ast/bv_decl_plugin.h"
#include <sstream>
static bool is_hex_digit(char ch, unsigned& d) {
if ('0' <= ch && ch <= '9') {
d = ch - '0';
return true;
}
if ('A' <= ch && ch <= 'F') {
d = 10 + ch - 'A';
return true;
}
if ('a' <= ch && ch <= 'f') {
d = 10 + ch - 'a';
return true;
}
return false;
}
static bool is_octal_digit(char ch, unsigned& d) {
if ('0' <= ch && ch <= '7') {
d = ch - '0';
return true;
}
return false;
}
bool zstring::is_escape_char(char const *& s, unsigned& result) {
unsigned d1, d2, d3;
if (*s != '\\' || *(s + 1) == 0) {
return false;
}
if (*(s + 1) == 'x' &&
is_hex_digit(*(s + 2), d1) && is_hex_digit(*(s + 3), d2)) {
result = d1*16 + d2;
s += 4;
return true;
}
/* C-standard octal escapes: either 1, 2, or 3 octal digits,
* stopping either at 3 digits or at the first non-digit character.
*/
/* 1 octal digit */
if (is_octal_digit(*(s + 1), d1) && !is_octal_digit(*(s + 2), d2)) {
result = d1;
s += 2;
return true;
}
/* 2 octal digits */
if (is_octal_digit(*(s + 1), d1) && is_octal_digit(*(s + 2), d2) &&
!is_octal_digit(*(s + 3), d3)) {
result = d1 * 8 + d2;
s += 3;
return true;
}
/* 3 octal digits */
if (is_octal_digit(*(s + 1), d1) && is_octal_digit(*(s + 2), d2) &&
is_octal_digit(*(s + 3), d3)) {
result = d1*64 + d2*8 + d3;
s += 4;
return true;
}
if (*(s+1) == 'u' && *(s+2) == '{') {
result = 0;
for (unsigned i = 0; i < 5; ++i) {
if (is_hex_digit(*(s+3+i), d1)) {
result = 16*result + d1;
}
else if (*(s+3+i) == '}') {
if (result > 255 && !uses_unicode())
throw default_exception("unicode characters outside of byte range are not supported");
s += 4 + i;
return true;
}
else {
break;
}
}
return false;
}
if (*(s+1) == 'u' && is_hex_digit(*(s+2), d1)) {
result = d1;
unsigned i = 0;
for (; i < 4; ++i) {
if (is_hex_digit(*(s+3+i), d1)) {
result = 16*result + d1;
}
else {
break;
}
}
if (result > 255 && !uses_unicode())
throw default_exception("unicode characters outside of byte range are not supported");
s += 3 + i;
return true;
}
switch (*(s + 1)) {
case 'a':
result = '\a';
s += 2;
return true;
case 'b':
result = '\b';
s += 2;
return true;
#if 0
case 'e':
result = '\e';
s += 2;
return true;
#endif
case 'f':
result = '\f';
s += 2;
return true;
case 'n':
result = '\n';
s += 2;
return true;
case 'r':
result = '\r';
s += 2;
return true;
case 't':
result = '\t';
s += 2;
return true;
case 'v':
result = '\v';
s += 2;
return true;
default:
result = *(s + 1);
s += 2;
return true;
}
return false;
}
zstring::zstring(char const* s) {
while (*s) {
unsigned ch = 0;
if (is_escape_char(s, ch)) {
m_buffer.push_back(ch);
}
else {
m_buffer.push_back(*s);
++s;
}
}
SASSERT(well_formed());
}
bool zstring::uses_unicode() const {
return gparams::get_value("unicode") == "true";
}
bool zstring::well_formed() const {
for (unsigned ch : m_buffer) {
if (ch > max_char())
return false;
}
return true;
}
zstring::zstring(unsigned ch) {
m_buffer.push_back(ch);
}
zstring zstring::reverse() const {
zstring result;
for (unsigned i = length(); i-- > 0; ) {
result.m_buffer.push_back(m_buffer[i]);
}
return result;
}
zstring zstring::replace(zstring const& src, zstring const& dst) const {
zstring result;
if (length() < src.length()) {
return zstring(*this);
}
if (src.length() == 0) {
return dst + zstring(*this);
}
bool found = false;
for (unsigned i = 0; i < length(); ++i) {
bool eq = !found && i + src.length() <= length();
for (unsigned j = 0; eq && j < src.length(); ++j) {
eq = m_buffer[i+j] == src[j];
}
if (eq) {
result.m_buffer.append(dst.m_buffer);
found = true;
i += src.length() - 1;
}
else {
result.m_buffer.push_back(m_buffer[i]);
}
}
return result;
}
static const char esc_table[32][6] =
{ "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\x09", "\\n", "\\v", "\\f", "\\r", "\\x0E", "\\x0F",
"\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", "\\x1A", "\\x1B", "\\x1C", "\\x1D", "\\x1E", "\\x1F"
};
std::string zstring::encode() const {
std::ostringstream strm;
char buffer[100];
unsigned offset = 0;
#define _flush() if (offset > 0) { buffer[offset] = 0; strm << buffer; offset = 0; }
for (unsigned i = 0; i < m_buffer.size(); ++i) {
unsigned ch = m_buffer[i];
if (0 <= ch && ch < 32) {
_flush();
strm << esc_table[ch];
}
else if (ch == '\\') {
_flush();
strm << "\\\\";
}
else if (ch >= 256) {
_flush();
strm << "\\u{" << std::hex << ch << std::dec << "}";
}
else if (ch >= 128) {
_flush();
strm << "\\x" << std::hex << ch << std::dec;
}
else {
if (offset == 99) {
_flush();
}
buffer[offset++] = (char)ch;
}
}
_flush();
return strm.str();
}
bool zstring::suffixof(zstring const& other) const {
if (length() > other.length()) return false;
bool suffix = true;
for (unsigned i = 0; suffix && i < length(); ++i) {
suffix = m_buffer[length()-i-1] == other[other.length()-i-1];
}
return suffix;
}
bool zstring::prefixof(zstring const& other) const {
if (length() > other.length()) return false;
bool prefix = true;
for (unsigned i = 0; prefix && i < length(); ++i) {
prefix = m_buffer[i] == other[i];
}
return prefix;
}
bool zstring::contains(zstring const& other) const {
if (other.length() > length()) return false;
unsigned last = length() - other.length();
bool cont = false;
for (unsigned i = 0; !cont && i <= last; ++i) {
cont = true;
for (unsigned j = 0; cont && j < other.length(); ++j) {
cont = other[j] == m_buffer[j+i];
}
}
return cont;
}
int zstring::indexofu(zstring const& other, unsigned offset) const {
if (offset <= length() && other.length() == 0) return offset;
if (offset == length()) return -1;
if (offset > other.length() + offset) return -1;
if (other.length() + offset > length()) return -1;
unsigned last = length() - other.length();
for (unsigned i = offset; i <= last; ++i) {
bool prefix = true;
for (unsigned j = 0; prefix && j < other.length(); ++j) {
prefix = m_buffer[i + j] == other[j];
}
if (prefix) {
return static_cast<int>(i);
}
}
return -1;
}
int zstring::last_indexof(zstring const& other) const {
if (other.length() == 0) return length();
if (other.length() > length()) return -1;
for (unsigned last = length() - other.length(); last-- > 0; ) {
bool suffix = true;
for (unsigned j = 0; suffix && j < other.length(); ++j) {
suffix = m_buffer[last + j] == other[j];
}
if (suffix) {
return static_cast<int>(last);
}
}
return -1;
}
zstring zstring::extract(unsigned offset, unsigned len) const {
zstring result;
if (offset + len < offset) return result;
int last = std::min(offset+len, length());
for (int i = offset; i < last; ++i) {
result.m_buffer.push_back(m_buffer[i]);
}
return result;
}
zstring zstring::operator+(zstring const& other) const {
zstring result(*this);
result.m_buffer.append(other.m_buffer);
return result;
}
bool zstring::operator==(const zstring& other) const {
// two strings are equal iff they have the same length and characters
if (length() != other.length()) {
return false;
}
for (unsigned i = 0; i < length(); ++i) {
if (m_buffer[i] != other[i]) {
return false;
}
}
return true;
}
bool zstring::operator!=(const zstring& other) const {
return !(*this == other);
}
std::ostream& operator<<(std::ostream &os, const zstring &str) {
return os << str.encode();
}
bool operator<(const zstring& lhs, const zstring& rhs) {
// This has the same semantics as strcmp()
unsigned len = lhs.length();
if (rhs.length() < len) {
len = rhs.length();
}
for (unsigned i = 0; i < len; ++i) {
unsigned Li = lhs[i];
unsigned Ri = rhs[i];
if (Li < Ri) {
return true;
}
else if (Li > Ri) {
return false;
}
}
// at this point, all compared characters are equal,
// so decide based on the relative lengths
return lhs.length() < rhs.length();
}
seq_decl_plugin::seq_decl_plugin(): m_init(false),
m_stringc_sym("String"),
m_charc_sym("Char"),
m_string(nullptr),
m_char(nullptr),
m_reglan(nullptr),
m_has_re(false),
m_has_seq(false) {
m_unicode = gparams::get_value("unicode") == "true";
}
void seq_decl_plugin::finalize() {
for (psig* s : m_sigs) {
dealloc(s);
}
m_manager->dec_ref(m_string);
m_manager->dec_ref(m_char);
m_manager->dec_ref(m_reglan);
}
bool seq_decl_plugin::is_sort_param(sort* s, unsigned& idx) {
return
s->get_name().is_numerical() &&
(idx = s->get_name().get_num(), true);
}
bool seq_decl_plugin::match(ptr_vector<sort>& binding, sort* s, sort* sP) {
if (s == sP) return true;
unsigned idx;
if (is_sort_param(sP, idx)) {
if (binding.size() <= idx) binding.resize(idx+1);
if (binding[idx] && (binding[idx] != s)) return false;
binding[idx] = s;
return true;
}
if (s->get_family_id() == sP->get_family_id() &&
s->get_decl_kind() == sP->get_decl_kind() &&
s->get_num_parameters() == sP->get_num_parameters()) {
for (unsigned i = 0, sz = s->get_num_parameters(); i < sz; ++i) {
parameter const& p = s->get_parameter(i);
if (p.is_ast() && is_sort(p.get_ast())) {
parameter const& p2 = sP->get_parameter(i);
if (!match(binding, to_sort(p.get_ast()), to_sort(p2.get_ast()))) return false;
}
}
return true;
}
else {
TRACE("seq", tout << "Could not match " << mk_pp(s, *m_manager) << " and " << mk_pp(sP, *m_manager) << "\n";);
return false;
}
}
/*
\brief match right associative operator.
*/
void seq_decl_plugin::match_assoc(psig& sig, unsigned dsz, sort *const* dom, sort* range, sort_ref& range_out) {
ptr_vector<sort> binding;
ast_manager& m = *m_manager;
if (dsz == 0) {
std::ostringstream strm;
strm << "Unexpected number of arguments to '" << sig.m_name << "' ";
strm << "at least one argument expected " << dsz << " given";
m.raise_exception(strm.str());
}
bool is_match = true;
for (unsigned i = 0; is_match && i < dsz; ++i) {
SASSERT(dom[i]);
is_match = match(binding, dom[i], sig.m_dom.get(0));
}
if (range && is_match) {
is_match = match(binding, range, sig.m_range);
}
if (!is_match) {
std::ostringstream strm;
strm << "Sort of function '" << sig.m_name << "' ";
strm << "does not match the declared type. Given domain: ";
for (unsigned i = 0; i < dsz; ++i) {
strm << mk_pp(dom[i], m) << " ";
}
if (range) {
strm << " and range: " << mk_pp(range, m);
}
m.raise_exception(strm.str());
}
range_out = apply_binding(binding, sig.m_range);
SASSERT(range_out);
}
void seq_decl_plugin::match(psig& sig, unsigned dsz, sort *const* dom, sort* range, sort_ref& range_out) {
m_binding.reset();
ast_manager& m = *m_manager;
if (sig.m_dom.size() != dsz) {
std::ostringstream strm;
strm << "Unexpected number of arguments to '" << sig.m_name << "' ";
strm << sig.m_dom.size() << " arguments expected " << dsz << " given";
m.raise_exception(strm.str());
}
bool is_match = true;
for (unsigned i = 0; is_match && i < dsz; ++i) {
is_match = match(m_binding, dom[i], sig.m_dom[i].get());
}
if (range && is_match) {
is_match = match(m_binding, range, sig.m_range);
}
if (!is_match) {
std::ostringstream strm;
strm << "Sort of polymorphic function '" << sig.m_name << "' ";
strm << "does not match the declared type. ";
strm << "\nGiven domain: ";
for (unsigned i = 0; i < dsz; ++i) {
strm << mk_pp(dom[i], m) << " ";
}
if (range) {
strm << " and range: " << mk_pp(range, m);
}
strm << "\nExpected domain: ";
for (unsigned i = 0; i < dsz; ++i) {
strm << mk_pp(sig.m_dom[i].get(), m) << " ";
}
m.raise_exception(strm.str());
}
if (!range && dsz == 0) {
std::ostringstream strm;
strm << "Sort of polymorphic function '" << sig.m_name << "' ";
strm << "is ambiguous. Function takes no arguments and sort of range has not been constrained";
m.raise_exception(strm.str());
}
range_out = apply_binding(m_binding, sig.m_range);
SASSERT(range_out);
}
sort* seq_decl_plugin::apply_binding(ptr_vector<sort> const& binding, sort* s) {
unsigned i;
if (is_sort_param(s, i)) {
if (binding.size() <= i || !binding[i]) {
m_manager->raise_exception("Expecting type parameter to be bound");
}
return binding[i];
}
if (is_sort_of(s, m_family_id, SEQ_SORT) || is_sort_of(s, m_family_id, RE_SORT)) {
SASSERT(s->get_num_parameters() == 1);
SASSERT(s->get_parameter(0).is_ast());
SASSERT(is_sort(s->get_parameter(0).get_ast()));
sort* p = apply_binding(binding, to_sort(s->get_parameter(0).get_ast()));
parameter param(p);
if (p == m_char && s->get_decl_kind() == SEQ_SORT)
return m_string;
if (p == m_string && s->get_decl_kind() == RE_SORT)
return m_reglan;
return mk_sort(s->get_decl_kind(), 1, ¶m);
}
return s;
}
void seq_decl_plugin::init() {
if (m_init) return;
ast_manager& m = *m_manager;
m_init = true;
sort* A = m.mk_uninterpreted_sort(symbol(0u));
sort* strT = m_string;
parameter paramA(A);
parameter paramS(strT);
sort* seqA = m.mk_sort(m_family_id, SEQ_SORT, 1, ¶mA);
parameter paramSA(seqA);
sort* reA = m.mk_sort(m_family_id, RE_SORT, 1, ¶mSA);
sort* reT = m.mk_sort(m_family_id, RE_SORT, 1, ¶mS);
sort* boolT = m.mk_bool_sort();
sort* intT = arith_util(m).mk_int();
sort* predA = array_util(m).mk_array_sort(A, boolT);
sort* seqAseqAseqA[3] = { seqA, seqA, seqA };
sort* seqAreAseqA[3] = { seqA, reA, seqA };
sort* seqAseqA[2] = { seqA, seqA };
sort* seqAreA[2] = { seqA, reA };
sort* reAreA[2] = { reA, reA };
sort* AreA[2] = { A, reA };
sort* seqAint2T[3] = { seqA, intT, intT };
sort* seq2AintT[3] = { seqA, seqA, intT };
sort* str2T[2] = { strT, strT };
sort* str3T[3] = { strT, strT, strT };
sort* strTint2T[3] = { strT, intT, intT };
sort* strTreT[2] = { strT, reT };
sort* str2TintT[3] = { strT, strT, intT };
sort* seqAintT[2] = { seqA, intT };
sort* seq3A[3] = { seqA, seqA, seqA };
m_sigs.resize(LAST_SEQ_OP);
// TBD: have (par ..) construct and load parameterized signature from premable.
m_sigs[OP_SEQ_UNIT] = alloc(psig, m, "seq.unit", 1, 1, &A, seqA);
m_sigs[OP_SEQ_EMPTY] = alloc(psig, m, "seq.empty", 1, 0, nullptr, seqA);
m_sigs[OP_SEQ_CONCAT] = alloc(psig, m, "seq.++", 1, 2, seqAseqA, seqA);
m_sigs[OP_SEQ_PREFIX] = alloc(psig, m, "seq.prefixof", 1, 2, seqAseqA, boolT);
m_sigs[OP_SEQ_SUFFIX] = alloc(psig, m, "seq.suffixof", 1, 2, seqAseqA, boolT);
m_sigs[OP_SEQ_CONTAINS] = alloc(psig, m, "seq.contains", 1, 2, seqAseqA, boolT);
m_sigs[OP_SEQ_EXTRACT] = alloc(psig, m, "seq.extract", 1, 3, seqAint2T, seqA);
m_sigs[OP_SEQ_REPLACE] = alloc(psig, m, "seq.replace", 1, 3, seq3A, seqA);
m_sigs[OP_SEQ_INDEX] = alloc(psig, m, "seq.indexof", 1, 3, seq2AintT, intT);
m_sigs[OP_SEQ_LAST_INDEX] = alloc(psig, m, "seq.last_indexof", 1, 2, seqAseqA, intT);
m_sigs[OP_SEQ_AT] = alloc(psig, m, "seq.at", 1, 2, seqAintT, seqA);
m_sigs[OP_SEQ_NTH] = alloc(psig, m, "seq.nth", 1, 2, seqAintT, A);
m_sigs[OP_SEQ_NTH_I] = alloc(psig, m, "seq.nth_i", 1, 2, seqAintT, A);
m_sigs[OP_SEQ_NTH_U] = alloc(psig, m, "seq.nth_u", 1, 2, seqAintT, A);
m_sigs[OP_SEQ_LENGTH] = alloc(psig, m, "seq.len", 1, 1, &seqA, intT);
m_sigs[OP_RE_PLUS] = alloc(psig, m, "re.+", 1, 1, &reA, reA);
m_sigs[OP_RE_STAR] = alloc(psig, m, "re.*", 1, 1, &reA, reA);
m_sigs[OP_RE_OPTION] = alloc(psig, m, "re.opt", 1, 1, &reA, reA);
m_sigs[OP_RE_RANGE] = alloc(psig, m, "re.range", 1, 2, seqAseqA, reA);
m_sigs[OP_RE_CONCAT] = alloc(psig, m, "re.++", 1, 2, reAreA, reA);
m_sigs[OP_RE_UNION] = alloc(psig, m, "re.union", 1, 2, reAreA, reA);
m_sigs[OP_RE_INTERSECT] = alloc(psig, m, "re.inter", 1, 2, reAreA, reA);
m_sigs[OP_RE_DIFF] = alloc(psig, m, "re.diff", 1, 2, reAreA, reA);
m_sigs[OP_RE_LOOP] = alloc(psig, m, "re.loop", 1, 1, &reA, reA);
m_sigs[OP_RE_POWER] = alloc(psig, m, "re.^", 1, 1, &reA, reA);
m_sigs[OP_RE_COMPLEMENT] = alloc(psig, m, "re.comp", 1, 1, &reA, reA);
m_sigs[OP_RE_EMPTY_SET] = alloc(psig, m, "re.empty", 1, 0, nullptr, reA);
m_sigs[OP_RE_FULL_SEQ_SET] = alloc(psig, m, "re.all", 1, 0, nullptr, reA);
m_sigs[OP_RE_FULL_CHAR_SET] = alloc(psig, m, "re.allchar", 1, 0, nullptr, reA);
m_sigs[OP_RE_OF_PRED] = alloc(psig, m, "re.of.pred", 1, 1, &predA, reA);
m_sigs[OP_RE_REVERSE] = alloc(psig, m, "re.reverse", 1, 1, &reA, reA);
m_sigs[OP_RE_DERIVATIVE] = alloc(psig, m, "re.derivative", 1, 2, AreA, reA);
m_sigs[_OP_RE_ANTIMOROV_UNION] = alloc(psig, m, "re.union", 1, 2, reAreA, reA);
m_sigs[OP_SEQ_TO_RE] = alloc(psig, m, "seq.to.re", 1, 1, &seqA, reA);
m_sigs[OP_SEQ_IN_RE] = alloc(psig, m, "seq.in.re", 1, 2, seqAreA, boolT);
m_sigs[OP_SEQ_REPLACE_RE_ALL] = alloc(psig, m, "str.replace_re_all", 1, 3, seqAreAseqA, seqA);
m_sigs[OP_SEQ_REPLACE_RE] = alloc(psig, m, "str.replace_re", 1, 3, seqAreAseqA, seqA);
m_sigs[OP_SEQ_REPLACE_ALL] = alloc(psig, m, "str.replace_all", 1, 3, seqAseqAseqA, seqA);
m_sigs[OP_STRING_CONST] = nullptr;
m_sigs[OP_CHAR_CONST] = nullptr;
sort* charTcharT[2] = { m_char, m_char };
m_sigs[OP_CHAR_LE] = unicode() ? alloc(psig, m, "char.<=", 0, 2, charTcharT, boolT) : nullptr;
m_sigs[_OP_STRING_STRIDOF] = alloc(psig, m, "str.indexof", 0, 3, str2TintT, intT);
m_sigs[_OP_STRING_STRREPL] = alloc(psig, m, "str.replace", 0, 3, str3T, strT);
m_sigs[_OP_STRING_FROM_CHAR] = alloc(psig, m, "char", 1, 0, nullptr, strT);
m_sigs[OP_STRING_ITOS] = alloc(psig, m, "str.from_int", 0, 1, &intT, strT);
m_sigs[OP_STRING_STOI] = alloc(psig, m, "str.to_int", 0, 1, &strT, intT);
m_sigs[OP_STRING_LT] = alloc(psig, m, "str.<", 0, 2, str2T, boolT);
m_sigs[OP_STRING_LE] = alloc(psig, m, "str.<=", 0, 2, str2T, boolT);
m_sigs[OP_STRING_IS_DIGIT] = alloc(psig, m, "str.is_digit", 0, 1, &strT, boolT);
m_sigs[OP_STRING_TO_CODE] = alloc(psig, m, "str.to_code", 0, 1, &strT, intT);
m_sigs[OP_STRING_FROM_CODE] = alloc(psig, m, "str.from_code", 0, 1, &intT, strT);
m_sigs[_OP_STRING_CONCAT] = alloc(psig, m, "str.++", 1, 2, str2T, strT);
m_sigs[_OP_STRING_LENGTH] = alloc(psig, m, "str.len", 0, 1, &strT, intT);
m_sigs[_OP_STRING_STRCTN] = alloc(psig, m, "str.contains", 0, 2, str2T, boolT);
m_sigs[_OP_STRING_CHARAT] = alloc(psig, m, "str.at", 0, 2, strTint2T, strT);
m_sigs[_OP_STRING_PREFIX] = alloc(psig, m, "str.prefixof", 0, 2, str2T, boolT);
m_sigs[_OP_STRING_SUFFIX] = alloc(psig, m, "str.suffixof", 0, 2, str2T, boolT);
m_sigs[_OP_STRING_IN_REGEXP] = alloc(psig, m, "str.in_re", 0, 2, strTreT, boolT);
m_sigs[_OP_STRING_TO_REGEXP] = alloc(psig, m, "str.to_re", 0, 1, &strT, reT);
m_sigs[_OP_REGEXP_EMPTY] = alloc(psig, m, "re.none", 0, 0, nullptr, reT);
m_sigs[_OP_REGEXP_FULL_CHAR] = alloc(psig, m, "re.allchar", 0, 0, nullptr, reT);
m_sigs[_OP_STRING_SUBSTR] = alloc(psig, m, "str.substr", 0, 3, strTint2T, strT);
}
void seq_decl_plugin::set_manager(ast_manager* m, family_id id) {
decl_plugin::set_manager(m, id);
bv_util bv(*m);
if (unicode())
m_char = m->mk_sort(symbol("Unicode"), sort_info(m_family_id, _CHAR_SORT, 0, nullptr));
else
m_char = bv.mk_sort(8);
m->inc_ref(m_char);
parameter param(m_char);
m_string = m->mk_sort(symbol("String"), sort_info(m_family_id, SEQ_SORT, 1, ¶m));
m->inc_ref(m_string);
parameter paramS(m_string);
m_reglan = m->mk_sort(m_family_id, RE_SORT, 1, ¶mS);
m->inc_ref(m_reglan);
}
sort * seq_decl_plugin::mk_sort(decl_kind k, unsigned num_parameters, parameter const * parameters) {
init();
ast_manager& m = *m_manager;
switch (k) {
case SEQ_SORT:
if (num_parameters != 1) {
m.raise_exception("Invalid sequence sort, expecting one parameter");
}
if (!parameters[0].is_ast() || !is_sort(parameters[0].get_ast())) {
m.raise_exception("invalid sequence sort, parameter is not a sort");
}
if (parameters[0].get_ast() == m_char) {
return m_string;
}
return m.mk_sort(symbol("Seq"), sort_info(m_family_id, SEQ_SORT, num_parameters, parameters));
case RE_SORT: {
if (num_parameters != 1) {
m.raise_exception("Invalid regex sort, expecting one parameter");
}
if (!parameters[0].is_ast() || !is_sort(parameters[0].get_ast())) {
m.raise_exception("invalid regex sort, parameter is not a sort");
}
return m.mk_sort(symbol("RegEx"), sort_info(m_family_id, RE_SORT, num_parameters, parameters));
}
case _CHAR_SORT:
return m_char;
case _STRING_SORT:
return m_string;
case _REGLAN_SORT:
return m_reglan;
default:
UNREACHABLE();
return nullptr;
}
}
func_decl* seq_decl_plugin::mk_seq_fun(decl_kind k, unsigned arity, sort* const* domain, sort* range, decl_kind k_string) {
ast_manager& m = *m_manager;
sort_ref rng(m);
match(*m_sigs[k], arity, domain, range, rng);
return m.mk_func_decl(m_sigs[(domain[0] == m_string)?k_string:k]->m_name, arity, domain, rng, func_decl_info(m_family_id, k));
}
func_decl* seq_decl_plugin::mk_str_fun(decl_kind k, unsigned arity, sort* const* domain, sort* range, decl_kind k_seq) {
ast_manager& m = *m_manager;
sort_ref rng(m);
match(*m_sigs[k], arity, domain, range, rng);
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, rng, func_decl_info(m_family_id, k_seq));
}
func_decl* seq_decl_plugin::mk_assoc_fun(decl_kind k, unsigned arity, sort* const* domain, sort* range, decl_kind k_seq, decl_kind k_string) {
return mk_assoc_fun(k, arity, domain, range, k_seq, k_string, true);
}
func_decl* seq_decl_plugin::mk_left_assoc_fun(decl_kind k, unsigned arity, sort* const* domain, sort* range, decl_kind k_seq, decl_kind k_string) {
return mk_assoc_fun(k, arity, domain, range, k_seq, k_string, false);
}
func_decl* seq_decl_plugin::mk_assoc_fun(decl_kind k, unsigned arity, sort* const* domain, sort* range, decl_kind k_seq, decl_kind k_string, bool is_right) {
ast_manager& m = *m_manager;
sort_ref rng(m);
if (arity == 0) {
m.raise_exception("Invalid function application. At least one argument expected");
}
match_assoc(*m_sigs[k], arity, domain, range, rng);
func_decl_info info(m_family_id, k_seq);
if (is_right)
info.set_right_associative(true);
info.set_left_associative(true);
return m.mk_func_decl(m_sigs[(rng == m_string)?k_string:k_seq]->m_name, rng, rng, rng, info);
}
func_decl * seq_decl_plugin::mk_func_decl(decl_kind k, unsigned num_parameters, parameter const * parameters,
unsigned arity, sort * const * domain, sort * range) {
init();
m_has_seq = true;
ast_manager& m = *m_manager;
sort_ref rng(m);
switch(k) {
case OP_SEQ_EMPTY:
match(*m_sigs[k], arity, domain, range, rng);
if (rng == m_string) {
parameter param(symbol(""));
return mk_func_decl(OP_STRING_CONST, 1, ¶m, 0, nullptr, m_string);
}
else {
parameter param(rng.get());
func_decl_info info(m_family_id, k, 1, ¶m);
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, rng, info);
}
case OP_RE_PLUS:
case OP_RE_STAR:
case OP_RE_OPTION:
case OP_RE_RANGE:
case OP_RE_OF_PRED:
case OP_RE_COMPLEMENT:
case OP_RE_REVERSE:
case OP_RE_DERIVATIVE:
case _OP_RE_ANTIMOROV_UNION:
m_has_re = true;
// fall-through
case OP_SEQ_UNIT:
case OP_STRING_ITOS:
case OP_STRING_STOI:
case OP_STRING_LT:
case OP_STRING_LE:
match(*m_sigs[k], arity, domain, range, rng);
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, rng, func_decl_info(m_family_id, k));
case OP_STRING_IS_DIGIT:
case OP_STRING_TO_CODE:
case OP_STRING_FROM_CODE:
match(*m_sigs[k], arity, domain, range, rng);
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, rng, func_decl_info(m_family_id, k));
case _OP_REGEXP_FULL_CHAR:
m_has_re = true;
if (!range) range = m_reglan;
match(*m_sigs[k], arity, domain, range, rng);
return m.mk_func_decl(symbol("re.allchar"), arity, domain, rng, func_decl_info(m_family_id, OP_RE_FULL_CHAR_SET));
case OP_RE_FULL_CHAR_SET:
m_has_re = true;
if (!range) range = m_reglan;
if (range == m_reglan) {
match(*m_sigs[k], arity, domain, range, rng);
return m.mk_func_decl(symbol("re.allchar"), arity, domain, rng, func_decl_info(m_family_id, k));
}
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, range, func_decl_info(m_family_id, k));
case OP_RE_FULL_SEQ_SET:
m_has_re = true;
if (!range) range = m_reglan;
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, range, func_decl_info(m_family_id, k));
case _OP_REGEXP_EMPTY:
m_has_re = true;
if (!range) range = m_reglan;
match(*m_sigs[k], arity, domain, range, rng);
return m.mk_func_decl(symbol("re.none"), arity, domain, rng, func_decl_info(m_family_id, OP_RE_EMPTY_SET));
case OP_RE_EMPTY_SET:
m_has_re = true;
if (!range) range = m_reglan;
if (range == m_reglan) {
match(*m_sigs[k], arity, domain, range, rng);
return m.mk_func_decl(symbol("re.none"), arity, domain, rng, func_decl_info(m_family_id, k));
}
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, range, func_decl_info(m_family_id, k));
case OP_RE_LOOP:
m_has_re = true;
switch (arity) {
case 1:
match(*m_sigs[k], arity, domain, range, rng);
if (num_parameters == 0 || num_parameters > 2 || !parameters[0].is_int() || (num_parameters == 2 && !parameters[1].is_int())) {
m.raise_exception("Expecting two numeral parameters to function re-loop");
}
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, rng, func_decl_info(m_family_id, k, num_parameters, parameters));
case 2:
if (m_reglan != domain[0] || !arith_util(m).is_int(domain[1])) {
m.raise_exception("Incorrect type of arguments passed to re.loop. Expecting regular expression and two integer parameters");
}
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, domain[0], func_decl_info(m_family_id, k, num_parameters, parameters));
case 3:
if (m_reglan != domain[0] || !arith_util(m).is_int(domain[1]) || !arith_util(m).is_int(domain[2])) {
m.raise_exception("Incorrect type of arguments passed to re.loop. Expecting regular expression and two integer parameters");
}
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, domain[0], func_decl_info(m_family_id, k, num_parameters, parameters));
default:
m.raise_exception("Incorrect number of arguments passed to loop. Expected 1 regular expression and two integer parameters");
}
case OP_RE_POWER:
m_has_re = true;
if (num_parameters == 1 && parameters[0].is_int() && arity == 1 && parameters[0].get_int() >= 0) {
rng = domain[0];
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, rng, func_decl_info(m_family_id, k, num_parameters, parameters));
}
m.raise_exception("Incorrect arguments used for re.^. Expected one non-negative integer parameter");
case OP_STRING_CONST:
if (!(num_parameters == 1 && arity == 0 && parameters[0].is_symbol())) {
m.raise_exception("invalid string declaration");
}
return m.mk_const_decl(m_stringc_sym, m_string,
func_decl_info(m_family_id, OP_STRING_CONST, num_parameters, parameters));
case OP_RE_UNION:
case OP_RE_CONCAT:
case OP_RE_INTERSECT:
case OP_RE_DIFF:
m_has_re = true;
return mk_left_assoc_fun(k, arity, domain, range, k, k);
case OP_SEQ_REPLACE_RE_ALL:
case OP_SEQ_REPLACE_RE:
m_has_re = true;
case OP_SEQ_REPLACE_ALL:
return mk_str_fun(k, arity, domain, range, k);
case OP_SEQ_CONCAT:
return mk_assoc_fun(k, arity, domain, range, k, _OP_STRING_CONCAT);
case _OP_STRING_CONCAT:
return mk_assoc_fun(k, arity, domain, range, OP_SEQ_CONCAT, k);
case _OP_STRING_FROM_CHAR: {
if (!(num_parameters == 1 && parameters[0].is_int()))
m.raise_exception("character literal expects integer parameter");
zstring zs(parameters[0].get_int());
parameter p(zs.encode());
return m.mk_const_decl(m_stringc_sym, m_string,func_decl_info(m_family_id, OP_STRING_CONST, 1, &p));
}
case OP_SEQ_REPLACE:
return mk_seq_fun(k, arity, domain, range, _OP_STRING_STRREPL);
case _OP_STRING_STRREPL:
return mk_str_fun(k, arity, domain, range, OP_SEQ_REPLACE);
case OP_SEQ_INDEX:
if (arity == 2) {
sort* dom[3] = { domain[0], domain[1], arith_util(m).mk_int() };
sort_ref rng(m);
match(*m_sigs[k], 3, dom, range, rng);
return m.mk_func_decl(m_sigs[(dom[0] == m_string)?_OP_STRING_STRIDOF:k]->m_name, arity, domain, rng, func_decl_info(m_family_id, k));
}
return mk_seq_fun(k, arity, domain, range, _OP_STRING_STRIDOF);
case _OP_STRING_STRIDOF:
if (arity == 2) {
sort* dom[3] = { domain[0], domain[1], arith_util(m).mk_int() };
sort_ref rng(m);
match(*m_sigs[k], 3, dom, range, rng);
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, rng, func_decl_info(m_family_id, OP_SEQ_INDEX));
}
return mk_str_fun(k, arity, domain, range, OP_SEQ_INDEX);
case OP_SEQ_LAST_INDEX:
if (arity != 2) {
m.raise_exception("two arguments expected tin last_indexof");
}
else {
return mk_seq_fun(k, arity, domain, range, OP_SEQ_LAST_INDEX);
}
case OP_SEQ_PREFIX:
return mk_seq_fun(k, arity, domain, range, _OP_STRING_PREFIX);
case _OP_STRING_PREFIX:
return mk_str_fun(k, arity, domain, range, OP_SEQ_PREFIX);
case OP_SEQ_SUFFIX:
return mk_seq_fun(k, arity, domain, range, _OP_STRING_SUFFIX);
case _OP_STRING_SUFFIX:
return mk_str_fun(k, arity, domain, range, OP_SEQ_SUFFIX);
case OP_SEQ_LENGTH:
return mk_seq_fun(k, arity, domain, range, _OP_STRING_LENGTH);
case _OP_STRING_LENGTH:
return mk_str_fun(k, arity, domain, range, OP_SEQ_LENGTH);
case OP_SEQ_CONTAINS:
return mk_seq_fun(k, arity, domain, range, _OP_STRING_STRCTN);
case _OP_STRING_STRCTN:
return mk_str_fun(k, arity, domain, range, OP_SEQ_CONTAINS);
case OP_SEQ_TO_RE:
m_has_re = true;
return mk_seq_fun(k, arity, domain, range, _OP_STRING_TO_REGEXP);
case _OP_STRING_TO_REGEXP:
m_has_re = true;
return mk_str_fun(k, arity, domain, range, OP_SEQ_TO_RE);
case OP_CHAR_LE:
if (arity == 2 && domain[0] == m_char && domain[1] == m_char) {
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, m.mk_bool_sort(), func_decl_info(m_family_id, k, 0, nullptr));
}
m.raise_exception("Incorrect parameters passed to character comparison");
case OP_CHAR_CONST:
if (!(num_parameters == 1 && arity == 0 &&
parameters[0].is_int() &&
0 <= parameters[0].get_int() &&
parameters[0].get_int() < static_cast<int>(zstring::max_char()))) {
m.raise_exception("invalid character declaration");
}
return m.mk_const_decl(m_charc_sym, m_char, func_decl_info(m_family_id, OP_CHAR_CONST, num_parameters, parameters));
case OP_SEQ_IN_RE:
m_has_re = true;
return mk_seq_fun(k, arity, domain, range, _OP_STRING_IN_REGEXP);
case _OP_STRING_IN_REGEXP:
m_has_re = true;
return mk_str_fun(k, arity, domain, range, OP_SEQ_IN_RE);
case OP_SEQ_AT:
return mk_seq_fun(k, arity, domain, range, _OP_STRING_CHARAT);
case _OP_STRING_CHARAT:
return mk_str_fun(k, arity, domain, range, OP_SEQ_AT);
case OP_SEQ_NTH:
case OP_SEQ_NTH_I:
case OP_SEQ_NTH_U:
match(*m_sigs[k], arity, domain, range, rng);
return m.mk_func_decl(m_sigs[k]->m_name, arity, domain, rng, func_decl_info(m_family_id, k));
case OP_SEQ_EXTRACT:
return mk_seq_fun(k, arity, domain, range, _OP_STRING_SUBSTR);
case _OP_STRING_SUBSTR:
return mk_str_fun(k, arity, domain, range, OP_SEQ_EXTRACT);
case _OP_SEQ_SKOLEM: {
if (num_parameters == 0 || !parameters[0].is_symbol()) {
m.raise_exception("first parameter to skolem symbol should be a parameter");
}
symbol s = parameters[0].get_symbol();
return m.mk_func_decl(s, arity, domain, range, func_decl_info(m_family_id, k, num_parameters, parameters));
}
default:
UNREACHABLE();
return nullptr;
}
}
void seq_decl_plugin::get_op_names(svector<builtin_name> & op_names, symbol const & logic) {
init();
for (unsigned i = 0; i < m_sigs.size(); ++i) {
if (m_sigs[i]) {
op_names.push_back(builtin_name(m_sigs[i]->m_name.str(), i));
}
}
op_names.push_back(builtin_name("str.in.re", _OP_STRING_IN_REGEXP));
op_names.push_back(builtin_name("str.in-re", _OP_STRING_IN_REGEXP));
op_names.push_back(builtin_name("str.to.re", _OP_STRING_TO_REGEXP));
op_names.push_back(builtin_name("str.to-re", _OP_STRING_TO_REGEXP));
op_names.push_back(builtin_name("str.to-int", OP_STRING_STOI));
op_names.push_back(builtin_name("str.to.int", OP_STRING_STOI));
op_names.push_back(builtin_name("str.from-int", OP_STRING_ITOS));
op_names.push_back(builtin_name("int.to.str", OP_STRING_ITOS));
op_names.push_back(builtin_name("re.nostr", _OP_REGEXP_EMPTY));
op_names.push_back(builtin_name("re.complement", OP_RE_COMPLEMENT));
}
void seq_decl_plugin::get_sort_names(svector<builtin_name> & sort_names, symbol const & logic) {
init();
sort_names.push_back(builtin_name("Seq", SEQ_SORT));
sort_names.push_back(builtin_name("RegEx", RE_SORT));
// TBD:
// sort_names.push_back(builtin_name("Unicode", CHAR_SORT));
// SMTLIB 2.6 RegLan, String
sort_names.push_back(builtin_name("RegLan", _REGLAN_SORT));
sort_names.push_back(builtin_name("String", _STRING_SORT));
// SMTLIB 2.5 compatibility
sort_names.push_back(builtin_name("StringSequence", _STRING_SORT));
}
app* seq_decl_plugin::mk_string(symbol const& s) {
zstring canonStr(s.bare_str());
symbol canonSym(canonStr.encode());
parameter param(canonSym);
func_decl* f = m_manager->mk_const_decl(m_stringc_sym, m_string,
func_decl_info(m_family_id, OP_STRING_CONST, 1, ¶m));
return m_manager->mk_const(f);
}
app* seq_decl_plugin::mk_string(zstring const& s) {
symbol sym(s.encode());
parameter param(sym);
func_decl* f = m_manager->mk_const_decl(m_stringc_sym, m_string,
func_decl_info(m_family_id, OP_STRING_CONST, 1, ¶m));
return m_manager->mk_const(f);
}
app* seq_decl_plugin::mk_char(unsigned u) {
if (unicode()) {
parameter param(u);
func_decl* f = m_manager->mk_const_decl(m_charc_sym, m_char, func_decl_info(m_family_id, OP_CHAR_CONST, 1, ¶m));
return m_manager->mk_const(f);
}
else {
UNREACHABLE();
return nullptr;
}
}
bool seq_decl_plugin::is_considered_uninterpreted(func_decl * f) {
seq_util util(*m_manager);
return util.str.is_nth_u(f);
}
bool seq_decl_plugin::is_unique_value(app* e) const {
if (is_app_of(e, m_family_id, OP_CHAR_CONST))
return true;
return false;
}
bool seq_decl_plugin::is_value(app* e) const {
while (true) {
if (is_app_of(e, m_family_id, OP_SEQ_EMPTY))
return true;
if (is_app_of(e, m_family_id, OP_STRING_CONST))
return true;
if (is_app_of(e, m_family_id, OP_CHAR_CONST))
return true;
if (is_app_of(e, m_family_id, OP_SEQ_UNIT) &&
m_manager->is_value(e->get_arg(0)))
return true;
if (is_app_of(e, m_family_id, OP_SEQ_CONCAT)) {
bool first = true;
for (expr* arg : *e) {
if (first) {
first = false;
}
else if (is_app(arg) && !is_value(to_app(arg))) {
return false;
}
}
if (!is_app(e->get_arg(0))) return false;
e = to_app(e->get_arg(0));
continue;
}
return false;
}
}
bool seq_decl_plugin::are_equal(app* a, app* b) const {
if (a == b) return true;
// handle concatenations
return false;
}
bool seq_decl_plugin::are_distinct(app* a, app* b) const {
if (a == b)
return false;
if (is_app_of(a, m_family_id, OP_STRING_CONST) &&
is_app_of(b, m_family_id, OP_STRING_CONST))
return true;
if (is_app_of(a, m_family_id, OP_CHAR_CONST) &&
is_app_of(b, m_family_id, OP_CHAR_CONST))
return true;
if (is_app_of(a, m_family_id, OP_SEQ_UNIT) &&
is_app_of(b, m_family_id, OP_SEQ_UNIT))
return m_manager->are_distinct(a->get_arg(0), b->get_arg(0));
if (is_app_of(a, m_family_id, OP_SEQ_EMPTY) &&
is_app_of(b, m_family_id, OP_SEQ_UNIT))
return true;
if (is_app_of(b, m_family_id, OP_SEQ_EMPTY) &&
is_app_of(a, m_family_id, OP_SEQ_UNIT))
return true;
return false;
}
expr* seq_decl_plugin::get_some_value(sort* s) {
seq_util util(*m_manager);
if (util.is_seq(s)) {
return util.str.mk_empty(s);
}
sort* seq;
if (util.is_re(s, seq)) {
return util.re.mk_to_re(util.str.mk_empty(seq));
}
UNREACHABLE();
return nullptr;
}
app* seq_util::mk_skolem(symbol const& name, unsigned n, expr* const* args, sort* range) {
SASSERT(range);
parameter param(name);
func_decl* f = m.mk_func_decl(get_family_id(), _OP_SEQ_SKOLEM, 1, ¶m, n, args, range);
return m.mk_app(f, n, args);
}
app* seq_util::str::mk_string(zstring const& s) const {
return u.seq.mk_string(s);
}
app* seq_util::str::mk_char(zstring const& s, unsigned idx) const {
return u.mk_char(s[idx]);
}
app* seq_util::str::mk_char(unsigned ch) const {
return u.mk_char(ch);
}
app* seq_util::str::mk_char_bit(expr* e, unsigned idx) {
return u.mk_char_bit(e, idx);
}
app* seq_util::mk_char_bit(expr* e, unsigned i) {
parameter params[2] = { parameter(symbol("char.bit")), parameter(i) };
sort* range = m.mk_bool_sort();
func_decl* f = m.mk_func_decl(get_family_id(), _OP_SEQ_SKOLEM, 2, params, 1, &e, range);
return m.mk_app(f, 1, &e);
}
bv_util& seq_util::bv() const {
if (!m_bv) m_bv = alloc(bv_util, m);
return *m_bv.get();
}
unsigned seq_util::max_plus(unsigned x, unsigned y) const {
if (x + y < x || x + y < y)
return UINT_MAX;
return x + y;
}
unsigned seq_util::max_mul(unsigned x, unsigned y) const {
uint64_t r = ((uint64_t)x)*((uint64_t)y);
return (r > UINT_MAX) ? UINT_MAX : (unsigned)r;
}
bool seq_util::is_const_char(expr* e, unsigned& c) const {
if (seq.unicode()) {
return is_app_of(e, m_fid, OP_CHAR_CONST) && (c = to_app(e)->get_parameter(0).get_int(), true);
}
else {
rational r;
unsigned sz;
return bv().is_numeral(e, r, sz) && sz == 8 && r.is_unsigned() && (c = r.get_unsigned(), true);
}
}
bool seq_util::is_char_le(expr const* e) const {
if (seq.unicode())
return is_app_of(e, m_fid, OP_CHAR_LE);
else
return bv().is_bv_ule(e) && is_char(to_app(e)->get_arg(0));
}
app* seq_util::mk_char(unsigned ch) const {
if (seq.unicode())
return seq.mk_char(ch);
else
return bv().mk_numeral(rational(ch), 8);
}
app* seq_util::mk_le(expr* ch1, expr* ch2) const {
expr_ref _ch1(ch1, m), _ch2(ch2, m);
if (seq.unicode()) {
expr* es[2] = { ch1, ch2 };
return m.mk_app(m_fid, OP_CHAR_LE, 2, es);
}
else {
rational r1, r2;
if (bv().is_numeral(ch1, r1) && bv().is_numeral(ch2, r2)) {
return m.mk_bool_val(r1 <= r2);
}
return bv().mk_ule(ch1, ch2);
}
}
app* seq_util::mk_lt(expr* ch1, expr* ch2) const {
return m.mk_not(mk_le(ch2, ch1));
}
bool seq_util::str::is_string(func_decl const* f, zstring& s) const {
if (is_string(f)) {
s = zstring(f->get_parameter(0).get_symbol().bare_str());
return true;
}
else {
return false;
}
}
bool seq_util::str::is_string(expr const* n, zstring& s) const {
return is_app(n) && is_string(to_app(n)->get_decl(), s);
}
bool seq_util::str::is_nth_i(expr const* n, expr*& s, unsigned& idx) const {
expr* i = nullptr;
if (!is_nth_i(n, s, i)) return false;
return arith_util(m).is_unsigned(i, idx);
}
app* seq_util::str::mk_nth_i(expr* s, unsigned i) const {
return mk_nth_i(s, arith_util(m).mk_int(i));
}
void seq_util::str::get_concat(expr* e, expr_ref_vector& es) const {
expr* e1, *e2;
while (is_concat(e, e1, e2)) {
get_concat(e1, es);
e = e2;
}
if (!is_empty(e)) {
es.push_back(e);
}
}
void seq_util::str::get_concat_units(expr* e, expr_ref_vector& es) const {
expr* e1, *e2;
while (is_concat(e, e1, e2)) {
get_concat_units(e1, es);
e = e2;
}
zstring s;
if (is_string(e, s)) {
unsigned sz = s.length();
for (unsigned j = 0; j < sz; ++j) {
es.push_back(mk_unit(mk_char(s, j)));
}
}
else if (!is_empty(e)) {
es.push_back(e);
}
}
app* seq_util::str::mk_is_empty(expr* s) const {
return m.mk_eq(s, mk_empty(get_sort(s)));
}
unsigned seq_util::str::min_length(expr* s) const {
SASSERT(u.is_seq(s));
unsigned result = 0;
expr* s1 = nullptr, *s2 = nullptr;
auto get_length = [&](expr* s1) {
zstring st;
if (is_unit(s1))
return 1u;
else if (is_string(s1, st))
return st.length();
else
return 0u;
};
while (is_concat(s, s1, s2)) {
result += get_length(s1);
s = s2;
}
result += get_length(s);
return result;
}
unsigned seq_util::str::max_length(expr* s) const {
SASSERT(u.is_seq(s));
unsigned result = 0;
expr* s1 = nullptr, *s2 = nullptr, *s3 = nullptr;
unsigned n = 0;
zstring st;
auto get_length = [&](expr* s1) {
if (is_empty(s1))
return 0u;
else if (is_unit(s1))
return 1u;
else if (is_at(s1))
return 1u;
else if (is_extract(s1, s1, s2, s3))
return (arith_util(m).is_unsigned(s3, n)) ? n : UINT_MAX;
else if (is_string(s1, st))
return st.length();
else
return UINT_MAX;
};
while (is_concat(s, s1, s2)) {
result = u.max_plus(get_length(s), result);
s = s2;
}
result = u.max_plus(get_length(s), result);
return result;
}
unsigned seq_util::rex::min_length(expr* r) const {
SASSERT(u.is_re(r));
return get_info(r).min_length;
}
unsigned seq_util::rex::max_length(expr* r) const {
SASSERT(u.is_re(r));
expr* r1 = nullptr, *r2 = nullptr, *s = nullptr;
unsigned lo = 0, hi = 0;
if (is_empty(r))
return 0;
if (is_concat(r, r1, r2))
return u.max_plus(max_length(r1), max_length(r2));
if (is_union(r, r1, r2) || m.is_ite(r, s, r1, r2))
return std::max(max_length(r1), max_length(r2));
if (is_intersection(r, r1, r2))
return std::min(max_length(r1), max_length(r2));
if (is_diff(r, r1, r2) || is_reverse(r, r1) || is_opt(r, r1))
return max_length(r1);
if (is_loop(r, r1, lo, hi))
return u.max_mul(hi, max_length(r1));
if (is_to_re(r, s))
return u.str.max_length(s);
if (is_range(r) || is_of_pred(r) || is_full_char(r))
return 1;
// Else: star, plus, complement, full_seq, loop(r,r1,lo), derivative
return UINT_MAX;
}
sort* seq_util::rex::to_seq(sort* re) {
(void)u;
SASSERT(u.is_re(re));
return to_sort(re->get_parameter(0).get_ast());
}
app* seq_util::rex::mk_loop(expr* r, unsigned lo) {
parameter param(lo);
return m.mk_app(m_fid, OP_RE_LOOP, 1, ¶m, 1, &r);
}
app* seq_util::rex::mk_loop(expr* r, unsigned lo, unsigned hi) {
parameter params[2] = { parameter(lo), parameter(hi) };
return m.mk_app(m_fid, OP_RE_LOOP, 2, params, 1, &r);
}
app* seq_util::rex::mk_loop(expr* r, expr* lo) {
expr* rs[2] = { r, lo };
return m.mk_app(m_fid, OP_RE_LOOP, 0, nullptr, 2, rs);
}
app* seq_util::rex::mk_loop(expr* r, expr* lo, expr* hi) {
expr* rs[3] = { r, lo, hi };
return m.mk_app(m_fid, OP_RE_LOOP, 0, nullptr, 3, rs);
}
app* seq_util::rex::mk_full_char(sort* s) {
return m.mk_app(m_fid, OP_RE_FULL_CHAR_SET, 0, nullptr, 0, nullptr, s);
}
app* seq_util::rex::mk_full_seq(sort* s) {
return m.mk_app(m_fid, OP_RE_FULL_SEQ_SET, 0, nullptr, 0, nullptr, s);
}
app* seq_util::rex::mk_empty(sort* s) {
return m.mk_app(m_fid, OP_RE_EMPTY_SET, 0, nullptr, 0, nullptr, s);
}
app* seq_util::rex::mk_of_pred(expr* p) {
return m.mk_app(m_fid, OP_RE_OF_PRED, 0, nullptr, 1, &p);
}
bool seq_util::rex::is_loop(expr const* n, expr*& body, unsigned& lo, unsigned& hi) const {
if (is_loop(n)) {
app const* a = to_app(n);
if (a->get_num_args() == 1 && a->get_decl()->get_num_parameters() == 2) {
body = a->get_arg(0);
lo = a->get_decl()->get_parameter(0).get_int();
hi = a->get_decl()->get_parameter(1).get_int();
return true;
}
}
return false;
}
bool seq_util::rex::is_loop(expr const* n, expr*& body, unsigned& lo) const {
if (is_loop(n)) {
app const* a = to_app(n);
if (a->get_num_args() == 1 && a->get_decl()->get_num_parameters() == 1) {
body = a->get_arg(0);
lo = a->get_decl()->get_parameter(0).get_int();
return true;
}
}
return false;
}
bool seq_util::rex::is_loop(expr const* n, expr*& body, expr*& lo, expr*& hi) const {
if (is_loop(n)) {
app const* a = to_app(n);
if (a->get_num_args() == 3) {
body = a->get_arg(0);
lo = a->get_arg(1);
hi = a->get_arg(2);
return true;
}
}
return false;
}
bool seq_util::rex::is_loop(expr const* n, expr*& body, expr*& lo) const {
if (is_loop(n)) {
app const* a = to_app(n);
if (a->get_num_args() == 2) {
body = a->get_arg(0);
lo = a->get_arg(1);
return true;
}
}
return false;
}
/**
Returns true iff e is the epsilon regex.
*/
bool seq_util::rex::is_epsilon(expr* r) const {
expr* s;
return is_to_re(r, s) && u.str.is_empty(s);
}
/**
Makes the epsilon regex for a given sequence sort.
*/
app* seq_util::rex::mk_epsilon(sort* seq_sort) {
return mk_to_re(u.str.mk_empty(seq_sort));
}
/*
Produces compact view of concrete concatenations such as (abcd).
*/
std::ostream& seq_util::rex::pp::compact_helper_seq(std::ostream& out, expr* s) const {
SASSERT(re.u.is_seq(s));
if (re.u.str.is_empty(s))
out << "()";
else if (re.u.str.is_unit(s))
seq_unit(out, s);
else if (re.u.str.is_concat(s)) {
expr_ref_vector es(re.m);
re.u.str.get_concat(s, es);
for (expr* e : es)
compact_helper_seq(out, e);
}
//using braces to indicate 'full' output
//for example an uninterpreted constant X will be printed as {X}
//while a unit sequence "X" will be printed as X
//thus for example (concat "X" "Y" Z "W") where Z is uninterpreted is printed as XY{Z}W
else out << "{" << mk_pp(s, re.m) << "}";
return out;
}
/*
Produces output such as [a-z] for a range.
*/
std::ostream& seq_util::rex::pp::compact_helper_range(std::ostream& out, expr* s1, expr* s2) const {
out << "[";
seq_unit(out, s1) << "-";
seq_unit(out, s2) << "]";
return out;
}
/*
Checks if parenthesis can be omitted in some cases in a loop body or in complement.
*/
bool seq_util::rex::pp::can_skip_parenth(expr* r) const {
expr* s;
return ((re.is_to_re(r, s) && re.u.str.is_unit(s)) || re.is_range(r) || re.is_empty(r) || re.is_epsilon(r) || re.is_full_char(r));
}
/*
Specialize output for a unit sequence converting to visible ASCII characters if possible.
*/
std::ostream& seq_util::rex::pp::seq_unit(std::ostream& out, expr* s) const {
expr* e;
unsigned n = 0;
if (re.u.str.is_unit(s, e) && re.u.is_const_char(e, n)) {
char c = (char)n;
if (c == '\n')
out << "\\n";
else if (c == '\r')
out << "\\r";
else if (c == '\f')
out << "\\f";
else if (c == ' ')
out << "\\s";
else if (c == '(' || c == ')' || c == '{' || c == '}' || c == '[' || c == ']' || c == '.' || c == '\\')
out << "\\" << c;
else if (32 < n && n < 127) {
if (html_encode) {
if (c == '<')
out << "<";
else if (c == '>')
out << ">";
else if (c == '&')
out << "&";
else if (c == '\"')
out << """;
else
out << "\\x" << std::hex << n;
}
else
out << c;
}
else if (n <= 0xF)
out << "\\x0" << std::hex << n;
else if (n <= 0xFF)
out << "\\x" << std::hex << n;
else if (n <= 0xFFF)
out << "\\u0" << std::hex << n;
else
out << "\\u" << std::hex << n;
}
else
out << "{" << mk_pp(s, re.m) << "}";
return out;
}
/*
Pretty prints the regex r into the out stream
*/
std::ostream& seq_util::rex::pp::display(std::ostream& out) const {
expr* r1 = nullptr, * r2 = nullptr, * s = nullptr, * s2 = nullptr;
unsigned lo = 0, hi = 0;
if (re.is_full_char(e))
return out << ".";
else if (re.is_full_seq(e))
return out << ".*";
else if (re.is_to_re(e, s))
return compact_helper_seq(out, s);
else if (re.is_range(e, s, s2))
return compact_helper_range(out, s, s2);
else if (re.is_epsilon(e))
return out << "()";
else if (re.is_empty(e))
return out << "[]";
else if (re.is_concat(e, r1, r2))
return out << pp(re, r1) << pp(re, r2);
else if (re.is_union(e, r1, r2))
return out << pp(re, r1) << "|" << pp(re, r2);
else if (re.is_intersection(e, r1, r2))
return out << "(" << pp(re, r1) << (html_encode ? ")&(": ")&(" ) << pp(re, r2) << ")";
else if (re.is_complement(e, r1)) {
if (can_skip_parenth(r1))
return out << "~" << pp(re, r1);
else
return out << "~(" << pp(re, r1) << ")";
}
else if (re.is_plus(e, r1)) {
if (can_skip_parenth(r1))
return out << pp(re, r1) << "+";
else
return out << "(" << pp(re, r1) << ")+";
}
else if (re.is_star(e, r1)) {
if (can_skip_parenth(r1))
return out << pp(re, r1) << "*";
else
return out << "(" << pp(re, r1) << ")*";
}
else if (re.is_loop(e, r1, lo)) {
if (can_skip_parenth(r1))
return out << pp(re, r1) << "{" << lo << ",}";
else
return out << "(" << pp(re, r1) << "){" << lo << ",}";
}
else if (re.is_loop(e, r1, lo, hi)) {
if (can_skip_parenth(r1)) {
if (lo == hi)
return out << pp(re, r1) << "{" << lo << "}";
else
return out << pp(re, r1) << "{" << lo << "," << hi << "}";
}
else {
if (lo == hi)
return out << "(" << pp(re, r1) << "){" << lo << "}";
else
return out << "(" << pp(re, r1) << "){" << lo << "," << hi << "}";
}
}
else if (re.is_diff(e, r1, r2))
return out << "(" << pp(re, r1) << ")\\(" << pp(re, r2) << ")";
else if (re.m.is_ite(e, s, r1, r2))
return out << "if(" << mk_pp(s, re.m) << "," << pp(re, r1) << "," << pp(re, r2) << ")";
else if (re.is_opt(e, r1)) {
if (can_skip_parenth(r1))
return out << pp(re, r1) << "?";
else
return out << "(" << pp(re, r1) << ")?";
}
else if (re.is_reverse(e, r1))
return out << "reverse(" << pp(re, r1) << ")";
else
// Else: derivative or is_of_pred
return out << "{" << mk_pp(e, re.m) << "}";
}
/*
Pretty prints the regex r into the output string
*/
std::string seq_util::rex::to_str(expr* r) const {
std::ostringstream out;
out << pp(u.re, r);
return out.str();
}
/*
Returns true iff info has been computed for the regex r
*/
bool seq_util::rex::has_valid_info(expr* r) const {
return r->get_id() < m_infos.size() && m_infos[r->get_id()].is_valid();
}
/*
Returns the info in the cache if the info is valid. Returns invalid_info otherwise.
*/
seq_util::rex::info seq_util::rex::get_cached_info(expr* e) const {
if (has_valid_info(e))
return m_infos[e->get_id()];
else
return invalid_info;
}
/*
Get the information value associated with the regular expression e
*/
seq_util::rex::info seq_util::rex::get_info(expr* e) const
{
SASSERT(u.is_re(e));
auto result = get_cached_info(e);
if (result.is_valid())
return result;
m_info_pinned.push_back(e);
return get_info_rec(e);
}
/*
Gets the info value for the given regex e, possibly making a new info recursively over the structure of e.
*/
seq_util::rex::info seq_util::rex::get_info_rec(expr* e) const {
auto result = get_cached_info(e);
if (result.is_valid())
return result;
if (!is_app(e))
result = unknown_info;
else
result = mk_info_rec(to_app(e));
m_infos.setx(e->get_id(), result, invalid_info);
STRACE("re_info", tout << "compute_info(" << pp(u.re, e) << ")=" << result << std::endl;);
return result;
}
/*
Computes the info value for the given regex e recursively over the structure of e.
The regex e does not yet have an entry in the cache.
*/
seq_util::rex::info seq_util::rex::mk_info_rec(app* e) const {
info i1, i2;
lbool nullable(l_false);
unsigned min_length(0), lower_bound(0), upper_bound(UINT_MAX);
bool is_value(false);
if (e->get_family_id() == u.get_family_id()) {
switch (e->get_decl()->get_decl_kind()) {
case OP_RE_EMPTY_SET:
return info(true, true, true, true, true, true, false, l_false, UINT_MAX, 0);
case OP_RE_FULL_SEQ_SET:
return info(true, true, true, true, true, true, false, l_true, 0, 1);
case OP_RE_STAR:
i1 = get_info_rec(e->get_arg(0));
return i1.star();
case OP_RE_OPTION:
i1 = get_info_rec(e->get_arg(0));
return i1.opt();
case OP_RE_RANGE:
case OP_RE_FULL_CHAR_SET:
case OP_RE_OF_PRED:
//TBD: check if the character predicate contains uninterpreted symbols or is nonground or is unsat
//TBD: check if the range is unsat
return info(true, true, true, true, true, true, true, l_false, 1, 0);
case OP_RE_CONCAT:
i1 = get_info_rec(e->get_arg(0));
i2 = get_info_rec(e->get_arg(1));
return i1.concat(i2, u.re.is_concat(e->get_arg(0)));
case OP_RE_UNION:
i1 = get_info_rec(e->get_arg(0));
i2 = get_info_rec(e->get_arg(1));
return i1.disj(i2);
case OP_RE_INTERSECT:
i1 = get_info_rec(e->get_arg(0));
i2 = get_info_rec(e->get_arg(1));
return i1.conj(i2);
case OP_SEQ_TO_RE:
min_length = u.str.min_length(e->get_arg(0));
is_value = m.is_value(e->get_arg(0));
nullable = (is_value && min_length == 0 ? l_true : (min_length > 0 ? l_false : l_undef));
return info(true, true, is_value, true, true, true, (min_length == 1 && u.str.max_length(e->get_arg(0)) == 1), nullable, min_length, 0);
case OP_RE_REVERSE:
return get_info_rec(e->get_arg(0));
case OP_RE_PLUS:
i1 = get_info_rec(e->get_arg(0));
return i1.plus();
case OP_RE_COMPLEMENT:
i1 = get_info_rec(e->get_arg(0));
return i1.complement();
case OP_RE_LOOP:
i1 = get_info_rec(e->get_arg(0));
if (e->get_decl()->get_num_parameters() >= 1)
lower_bound = e->get_decl()->get_parameter(0).get_int();
if (e->get_decl()->get_num_parameters() == 2)
upper_bound = e->get_decl()->get_parameter(1).get_int();
return i1.loop(lower_bound, upper_bound);
case OP_RE_DIFF:
i1 = get_info_rec(e->get_arg(0));
i2 = get_info_rec(e->get_arg(1));
return i1.diff(i2);
}
return unknown_info;
}
expr* c, * t, * f;
if (u.m.is_ite(e, c, t, f)) {
i1 = get_info_rec(t);
i2 = get_info_rec(f);
return i1.orelse(i2);
}
return unknown_info;
}
std::ostream& seq_util::rex::info::display(std::ostream& out) const {
if (is_known()) {
out << "info("
<< "nullable=" << (nullable == l_true ? "T" : (nullable == l_false ? "F" : "U")) << ", "
<< "classical=" << (classical ? "T" : "F") << ", "
<< "standard=" << (standard ? "T" : "F") << ", "
<< "nonbranching=" << (nonbranching ? "T" : "F") << ", "
<< "normalized=" << (normalized ? "T" : "F") << ", "
<< "monadic=" << (monadic ? "T" : "F") << ", "
<< "singleton=" << (singleton ? "T" : "F") << ", "
<< "min_length=" << min_length << ", "
<< "star_height=" << star_height << ")";
}
else if (is_valid())
out << "UNKNOWN";
else
out << "INVALID";
return out;
}
/*
String representation of the info.
*/
std::string seq_util::rex::info::str() const {
std::ostringstream out;
display(out);
return out.str();
}
seq_util::rex::info seq_util::rex::info::star() const {
//if is_known() is false then all mentioned properties will remain false
return seq_util::rex::info(classical, classical, interpreted, nonbranching, normalized, monadic, false, l_true, 0, star_height + 1);
}
seq_util::rex::info seq_util::rex::info::plus() const {
if (is_known()) {
//plus never occurs in a normalized regex
return info(classical, classical, interpreted, nonbranching, false, monadic, false, nullable, min_length, star_height + 1);
}
else
return *this;
}
seq_util::rex::info seq_util::rex::info::opt() const {
//if is_known() is false then all mentioned properties will remain false
//optional construct never occurs in a normalized regex
return seq_util::rex::info(classical, classical, interpreted, nonbranching, false, monadic, false, l_true, 0, star_height);
}
seq_util::rex::info seq_util::rex::info::complement() const {
if (is_known()) {
lbool compl_nullable = (nullable == l_true ? l_false : (nullable == l_false ? l_true : l_undef));
unsigned compl_min_length = (compl_nullable == l_false ? 1 : 0);
return info(false, standard, interpreted, nonbranching, normalized, monadic, false, compl_nullable, compl_min_length, star_height);
}
else
return *this;
}
seq_util::rex::info seq_util::rex::info::concat(seq_util::rex::info const& rhs, bool lhs_is_concat) const {
if (is_known()) {
if (rhs.is_known()) {
unsigned m = min_length + rhs.min_length;
if (m < min_length || m < rhs.min_length)
m = UINT_MAX;
return info(classical & rhs.classical,
classical && rhs.classical, //both args of concat must be classical for it to be standard
interpreted && rhs.interpreted,
nonbranching && rhs.nonbranching,
(normalized && !lhs_is_concat && rhs.normalized),
monadic && rhs.monadic,
false,
((nullable == l_false || rhs.nullable == l_false) ? l_false : ((nullable == l_true && rhs.nullable == l_true) ? l_true : l_undef)),
m,
std::max(star_height, rhs.star_height));
}
else
return rhs;
}
else
return *this;
}
seq_util::rex::info seq_util::rex::info::disj(seq_util::rex::info const& rhs) const {
if (is_known() || rhs.is_known()) {
//works correctly if one of the arguments is unknown
return info(classical & rhs.classical,
standard && rhs.standard,
interpreted && rhs.interpreted,
nonbranching && rhs.nonbranching,
normalized && rhs.normalized,
monadic && rhs.monadic,
singleton && rhs.singleton,
((nullable == l_true || rhs.nullable == l_true) ? l_true : ((nullable == l_false && rhs.nullable == l_false) ? l_false : l_undef)),
std::min(min_length, rhs.min_length),
std::max(star_height, rhs.star_height));
}
else
return rhs;
}
seq_util::rex::info seq_util::rex::info::conj(seq_util::rex::info const& rhs) const {
if (is_known()) {
if (rhs.is_known()) {
return info(false,
standard && rhs.standard,
interpreted && rhs.interpreted,
nonbranching && rhs.nonbranching,
normalized && rhs.normalized,
monadic && rhs.monadic,
singleton && rhs.singleton,
((nullable == l_true && rhs.nullable == l_true) ? l_true : ((nullable == l_false || rhs.nullable == l_false) ? l_false : l_undef)),
std::max(min_length, rhs.min_length),
std::max(star_height, rhs.star_height));
}
else
return rhs;
}
else
return *this;
}
seq_util::rex::info seq_util::rex::info::diff(seq_util::rex::info const& rhs) const {
if (is_known()) {
if (rhs.is_known()) {
return info(false,
standard & rhs.standard,
interpreted & rhs.interpreted,
nonbranching & rhs.nonbranching,
normalized & rhs.normalized,
monadic & rhs.monadic,
false,
((nullable == l_true && rhs.nullable == l_false) ? l_true : ((nullable == l_false || rhs.nullable == l_false) ? l_false : l_undef)),
std::max(min_length, rhs.min_length),
std::max(star_height, rhs.star_height));
}
else
return rhs;
}
else
return *this;
}
seq_util::rex::info seq_util::rex::info::orelse(seq_util::rex::info const& i) const {
if (is_known()) {
if (i.is_known()) {
// unsigned ite_min_length = std::min(min_length, i.min_length);
// lbool ite_nullable = (nullable == i.nullable ? nullable : l_undef);
//TBD: whether ite is interpreted or not depends on whether the condition is interpreted and both branches are interpreted
return info(false, false, false, false, normalized && i.normalized, monadic && i.monadic, singleton && i.singleton, nullable, min_length, std::max(star_height, i.star_height));
}
else
return i;
}
else
return *this;
}
seq_util::rex::info seq_util::rex::info::loop(unsigned lower, unsigned upper) const {
if (is_known()) {
unsigned m = min_length * lower;
if (m > 0 && (m < min_length || m < lower))
m = UINT_MAX;
lbool loop_nullable = (nullable == l_true || lower == 0 ? l_true : nullable);
if (upper == UINT_MAX) {
//this means the loop is r{lower,*} and is therefore not normalized
//normalized regex would be r{lower,lower}r* and would in particular not use r{0,} for r*
return info(classical, classical, interpreted, nonbranching, false, singleton, false, loop_nullable, m, star_height + 1);
}
else {
bool loop_normalized = normalized;
//r{lower,upper} is not normalized if r is nullable but lower > 0
//r{0,1} is not normalized: it should be ()|r
//r{1,1} is not normalized: it should be r
//r{lower,upper} is not normalized if lower > upper it should then be [] (empty)
if ((nullable == l_true && lower > 0) || upper == 1 || lower > upper)
loop_normalized = false;
return info(classical, classical, interpreted, nonbranching, loop_normalized, singleton, false, loop_nullable, m, star_height);
}
}
else
return *this;
}
|
// PX2InputManager.hpp
#ifndef PX2INPUTMANAGER_HPP
#define PX2INPUTMANAGER_HPP
#include "PX2UnityPre.hpp"
#include "PX2Singleton_NeedNew.hpp"
#include "PX2InputEventListener.hpp"
namespace PX2
{
class PX2_UNITY_ITEM InputManager : public Singleton<InputManager>
{
public:
InputManager();
~InputManager();
InputEventListener *GetDefaultListener();
InputEventListener *CreateAddListener(int id);
InputEventListener *GetInputListener(int id);
protected:
InputEventListenerPtr mInputEventListener;
std::map<int, InputEventListenerPtr> mInputEventListenerMap;
};
#include "PX2InputManager.inl"
#define PX2_INPUTMAN InputManager::GetSingleton()
}
#endif
|
//
// serial_port_base.hpp
// ~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_SERIAL_PORT_BASE_HPP
#define ASIO_SERIAL_PORT_BASE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_SERIAL_PORT) \
|| defined(GENERATING_DOCUMENTATION)
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
# include <termios.h>
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#include "asio/detail/socket_types.hpp"
#include "asio/error_code.hpp"
#if defined(GENERATING_DOCUMENTATION)
# define ASIO_OPTION_STORAGE implementation_defined
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
# define ASIO_OPTION_STORAGE DCB
#else
# define ASIO_OPTION_STORAGE termios
#endif
#include "asio/detail/push_options.hpp"
namespace asio {
/// The serial_port_base class is used as a base for the basic_serial_port class
/// template so that we have a common place to define the serial port options.
class serial_port_base
{
public:
/// Serial port option to permit changing the baud rate.
/**
* Implements changing the baud rate for a given serial port.
*/
class baud_rate
{
public:
explicit baud_rate(unsigned int rate = 0);
unsigned int value() const;
ASIO_DECL asio::error_code store(
ASIO_OPTION_STORAGE& storage,
asio::error_code& ec) const;
ASIO_DECL asio::error_code load(
const ASIO_OPTION_STORAGE& storage,
asio::error_code& ec);
private:
unsigned int value_;
};
/// Serial port option to permit changing the flow control.
/**
* Implements changing the flow control for a given serial port.
*/
class flow_control
{
public:
enum type { none, software, hardware };
ASIO_DECL explicit flow_control(type t = none);
type value() const;
ASIO_DECL asio::error_code store(
ASIO_OPTION_STORAGE& storage,
asio::error_code& ec) const;
ASIO_DECL asio::error_code load(
const ASIO_OPTION_STORAGE& storage,
asio::error_code& ec);
private:
type value_;
};
/// Serial port option to permit changing the parity.
/**
* Implements changing the parity for a given serial port.
*/
class parity
{
public:
enum type { none, odd, even };
ASIO_DECL explicit parity(type t = none);
type value() const;
ASIO_DECL asio::error_code store(
ASIO_OPTION_STORAGE& storage,
asio::error_code& ec) const;
ASIO_DECL asio::error_code load(
const ASIO_OPTION_STORAGE& storage,
asio::error_code& ec);
private:
type value_;
};
/// Serial port option to permit changing the number of stop bits.
/**
* Implements changing the number of stop bits for a given serial port.
*/
class stop_bits
{
public:
enum type { one, onepointfive, two };
ASIO_DECL explicit stop_bits(type t = one);
type value() const;
ASIO_DECL asio::error_code store(
ASIO_OPTION_STORAGE& storage,
asio::error_code& ec) const;
ASIO_DECL asio::error_code load(
const ASIO_OPTION_STORAGE& storage,
asio::error_code& ec);
private:
type value_;
};
/// Serial port option to permit changing the character size.
/**
* Implements changing the character size for a given serial port.
*/
class character_size
{
public:
ASIO_DECL explicit character_size(unsigned int t = 8);
unsigned int value() const;
ASIO_DECL asio::error_code store(
ASIO_OPTION_STORAGE& storage,
asio::error_code& ec) const;
ASIO_DECL asio::error_code load(
const ASIO_OPTION_STORAGE& storage,
asio::error_code& ec);
private:
unsigned int value_;
};
protected:
/// Protected destructor to prevent deletion through this type.
~serial_port_base()
{
}
};
} // namespace asio
#include "asio/detail/pop_options.hpp"
#undef ASIO_OPTION_STORAGE
#include "asio/impl/serial_port_base.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/impl/serial_port_base.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // defined(ASIO_HAS_SERIAL_PORT)
// || defined(GENERATING_DOCUMENTATION)
#endif // ASIO_SERIAL_PORT_BASE_HPP
|
#ifndef BOOST_DESCRIBE_CLASS_HPP_INCLUDED
#define BOOST_DESCRIBE_CLASS_HPP_INCLUDED
// Copyright 2020 Peter Dimov
// Distributed under the Boost Software License, Version 1.0.
// https://www.boost.org/LICENSE_1_0.txt
#include <boost/describe/detail/config.hpp>
#if !defined(BOOST_DESCRIBE_CXX14)
#define BOOST_DESCRIBE_CLASS(C, Bases, Public, Protected, Private)
#define BOOST_DESCRIBE_STRUCT(C, Bases, Members)
#else
#include <boost/describe/detail/bases.hpp>
#include <boost/describe/detail/members.hpp>
#include <type_traits>
namespace boost
{
namespace describe
{
#if defined(_MSC_VER) && !defined(__clang__)
#define BOOST_DESCRIBE_PP_UNPACK(...) __VA_ARGS__
#define BOOST_DESCRIBE_CLASS(C, Bases, Public, Protected, Private) \
friend BOOST_DESCRIBE_BASES(C, BOOST_DESCRIBE_PP_UNPACK Bases) \
friend BOOST_DESCRIBE_PUBLIC_MEMBERS(C, BOOST_DESCRIBE_PP_UNPACK Public) \
friend BOOST_DESCRIBE_PROTECTED_MEMBERS(C, BOOST_DESCRIBE_PP_UNPACK Protected) \
friend BOOST_DESCRIBE_PRIVATE_MEMBERS(C, BOOST_DESCRIBE_PP_UNPACK Private)
#define BOOST_DESCRIBE_STRUCT(C, Bases, Members) \
static_assert(std::is_class<C>::value, "BOOST_DESCRIBE_STRUCT should only be used with class types"); \
BOOST_DESCRIBE_BASES(C, BOOST_DESCRIBE_PP_UNPACK Bases) \
BOOST_DESCRIBE_PUBLIC_MEMBERS(C, BOOST_DESCRIBE_PP_UNPACK Members) \
BOOST_DESCRIBE_PROTECTED_MEMBERS(C) \
BOOST_DESCRIBE_PRIVATE_MEMBERS(C)
#else
#if defined(__GNUC__) && __GNUC__ >= 8
# define BOOST_DESCRIBE_PP_UNPACK(...) __VA_OPT__(,) __VA_ARGS__
#else
# define BOOST_DESCRIBE_PP_UNPACK(...) , ##__VA_ARGS__
#endif
#define BOOST_DESCRIBE_BASES_(...) BOOST_DESCRIBE_BASES(__VA_ARGS__)
#define BOOST_DESCRIBE_PUBLIC_MEMBERS_(...) BOOST_DESCRIBE_PUBLIC_MEMBERS(__VA_ARGS__)
#define BOOST_DESCRIBE_PROTECTED_MEMBERS_(...) BOOST_DESCRIBE_PROTECTED_MEMBERS(__VA_ARGS__)
#define BOOST_DESCRIBE_PRIVATE_MEMBERS_(...) BOOST_DESCRIBE_PRIVATE_MEMBERS(__VA_ARGS__)
#define BOOST_DESCRIBE_CLASS(C, Bases, Public, Protected, Private) \
friend BOOST_DESCRIBE_BASES_(C BOOST_DESCRIBE_PP_UNPACK Bases) \
friend BOOST_DESCRIBE_PUBLIC_MEMBERS_(C BOOST_DESCRIBE_PP_UNPACK Public) \
friend BOOST_DESCRIBE_PROTECTED_MEMBERS_(C BOOST_DESCRIBE_PP_UNPACK Protected) \
friend BOOST_DESCRIBE_PRIVATE_MEMBERS_(C BOOST_DESCRIBE_PP_UNPACK Private)
#define BOOST_DESCRIBE_STRUCT(C, Bases, Members) \
static_assert(std::is_class<C>::value, "BOOST_DESCRIBE_STRUCT should only be used with class types"); \
BOOST_DESCRIBE_BASES_(C BOOST_DESCRIBE_PP_UNPACK Bases) \
BOOST_DESCRIBE_PUBLIC_MEMBERS_(C BOOST_DESCRIBE_PP_UNPACK Members) \
BOOST_DESCRIBE_PROTECTED_MEMBERS_(C) \
BOOST_DESCRIBE_PRIVATE_MEMBERS_(C)
#endif
} // namespace describe
} // namespace boost
#endif // !defined(BOOST_DESCRIBE_CXX14)
#endif // #ifndef BOOST_DESCRIBE_CLASS_HPP_INCLUDED
|
#include <doctest/doctest.h>
#include <set>
#include <vector>
// 287. Find the Duplicate Number
class Solution {
public:
int findDuplicate(const std::vector<int>& nums) {
std::set<int> s;
for (auto n : nums) {
if (s.find(n) == s.end()) {
s.insert(n);
} else {
return n;
}
}
return -1;
}
};
TEST_CASE("Find the Duplicate Number") {
Solution s;
REQUIRE_EQ(s.findDuplicate({1, 3, 4, 2, 2}), 2);
REQUIRE_EQ(s.findDuplicate({3, 1, 3, 4, 2}), 3);
REQUIRE_EQ(s.findDuplicate({2, 2, 2, 2, 2}), 2);
}
|
/*
* Copyright (C) 2013-2014 IMS LICENSE
* You may obtain a copy of the License at
*
* http://www.bitbucket.org/italiammarssociety/eras/src/
* LICENSE
*
* @author Kunal Tyagi
*
*/
#include "SwivelPlugin.hh"
using namespace gazebo;
GZ_REGISTER_MODEL_PLUGIN(SwivelPlugin)
/////////////////////////////////////////////////
SwivelPlugin::SwivelPlugin()
{
this->maxForce = 5.0;
this->jointAngle = 0.0;
}
/////////////////////////////////////////////////
int SwivelPlugin::RegisterJoint(const std::string &_name)
{
// if NUMBER_OF_JOINTS != 1 in future, make _index one of the
// parameters of the function call
int _index = SWIVEL_PIVOT;
// Bounds checking on index
if (NUMBER_OF_JOINTS != 1)
{
gzerr << "Joint index " << NUMBER_OF_JOINTS << " out of bounds [0, 1] in model " << this->model->GetName()
<< "." << std::endl;
}
// Find the specified joint and add it to out list
this->joints[_index] = this->model->GetJoint(_name);
if (!this->joints[_index])
{
gzerr << "Unable to find the " << _name
<< " joint in model " << this->model->GetName() << "." << std::endl;
return 1;
}
// Success!
return 0;
}
/////////////////////////////////////////////////
void SwivelPlugin::Load(physics::ModelPtr _model,
sdf::ElementPtr _sdf)
{
this->model = _model;
this->node = transport::NodePtr(new transport::Node());
this->node->Init(this->model->GetWorld()->GetName());
int err = 0;
err += RegisterJoint("swivel_pivot");
if (err > 0)
return;
if (_sdf->HasElement("max_force"))
this->maxForce = _sdf->GetElement("max_force")->Get<double>();
else
gzwarn << "No MaxForce value set in the model sdf, default value is 5.0.\n";
if (_sdf->HasElement("max_force"))
this->axis = _sdf->GetElement("axis")->Get<int>();
else
gzwarn << "No MaxForce value set in the model sdf, default value is 0 (X axis).\n";
// Validity checks...
while (this->jointAngle > M_PI)
{
gzwarn << "Angle out of bounds, trying to fit it right in\n"
<< "Maybe because the angle should be in Radians (-pi, pi]" << std::endl;
this->jointAngle = (2 * M_PI) - this->jointAngle;
}
while (this->jointAngle < -M_PI)
{
gzwarn << "Angle out of bounds, trying to fit it right in\n"
<< "Maybe because the angle should be in Radians (-pi, pi]" << std::endl;
this->jointAngle = (2 * M_PI) + this->jointAngle;
}
this->intSub = this->node->Subscribe(
std::string("~/") + this->model->GetName() + std::string("/angle"),
&SwivelPlugin::OnIntMsg, this);
}
/////////////////////////////////////////////////
void SwivelPlugin::OnIntMsg(ConstIntPtr &_msg)
{
// gzdbg << "Target angle: " << _msg->data() << std::endl;
for (int i = 0; i < NUMBER_OF_JOINTS; i++)
this->joints[i]->SetMaxForce(0, this->maxForce);
double targetAngle = (_msg->data()) * (M_PI/180);
// this->joints[SWIVEL_PIVOT]->SetPosition((unsigned int)axis, (double)targetAngle);
this->joints[SWIVEL_PIVOT]->SetForce(axis,
this->maxForce*(targetAngle - this->joints[SWIVEL_PIVOT]->GetAngle(axis).Radian()));
}
|
/*
* Copyright (C) 2016-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "JSWebAssemblyModule.h"
#if ENABLE(WEBASSEMBLY)
#include "JSCInlines.h"
#include "JSWebAssemblyCodeBlock.h"
#include "JSWebAssemblyCompileError.h"
#include "WasmFormat.h"
#include "WasmModule.h"
#include "WasmModuleInformation.h"
#include <wtf/StdLibExtras.h>
namespace JSC {
const ClassInfo JSWebAssemblyModule::s_info = { "WebAssembly.Module", &Base::s_info, nullptr, nullptr, CREATE_METHOD_TABLE(JSWebAssemblyModule) };
JSWebAssemblyModule* JSWebAssemblyModule::createStub(VM& vm, JSGlobalObject* globalObject, Structure* structure, Wasm::Module::ValidationResult&& result)
{
auto scope = DECLARE_THROW_SCOPE(vm);
if (!result.has_value()) {
auto* error = JSWebAssemblyCompileError::create(globalObject, vm, structure->globalObject()->webAssemblyCompileErrorStructure(), result.error());
RETURN_IF_EXCEPTION(scope, nullptr);
throwException(globalObject, scope, error);
return nullptr;
}
auto* module = new (NotNull, allocateCell<JSWebAssemblyModule>(vm.heap)) JSWebAssemblyModule(vm, structure, result.value().releaseNonNull());
module->finishCreation(vm);
return module;
}
Structure* JSWebAssemblyModule::createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
{
return Structure::create(vm, globalObject, prototype, TypeInfo(WebAssemblyModuleType, StructureFlags), info());
}
JSWebAssemblyModule::JSWebAssemblyModule(VM& vm, Structure* structure, Ref<Wasm::Module>&& module)
: Base(vm, structure)
, m_module(WTFMove(module))
{
}
void JSWebAssemblyModule::finishCreation(VM& vm)
{
Base::finishCreation(vm);
ASSERT(inherits(vm, info()));
// On success, a new WebAssembly.Module object is returned with [[Module]] set to the validated Ast.module.
SymbolTable* exportSymbolTable = SymbolTable::create(vm);
const Wasm::ModuleInformation& moduleInformation = m_module->moduleInformation();
for (auto& exp : moduleInformation.exports) {
auto offset = exportSymbolTable->takeNextScopeOffset(NoLockingNecessary);
String field = String::fromUTF8(exp.field);
exportSymbolTable->set(NoLockingNecessary, AtomString(field).impl(), SymbolTableEntry(VarOffset(offset)));
}
m_exportSymbolTable.set(vm, this, exportSymbolTable);
}
void JSWebAssemblyModule::destroy(JSCell* cell)
{
static_cast<JSWebAssemblyModule*>(cell)->JSWebAssemblyModule::~JSWebAssemblyModule();
Wasm::SignatureInformation::tryCleanup();
}
const Wasm::ModuleInformation& JSWebAssemblyModule::moduleInformation() const
{
return m_module->moduleInformation();
}
SymbolTable* JSWebAssemblyModule::exportSymbolTable() const
{
return m_exportSymbolTable.get();
}
Wasm::SignatureIndex JSWebAssemblyModule::signatureIndexFromFunctionIndexSpace(unsigned functionIndexSpace) const
{
return m_module->signatureIndexFromFunctionIndexSpace(functionIndexSpace);
}
JSWebAssemblyCodeBlock* JSWebAssemblyModule::codeBlock(Wasm::MemoryMode mode)
{
return m_codeBlocks[static_cast<size_t>(mode)].get();
}
Wasm::Module& JSWebAssemblyModule::module()
{
return m_module.get();
}
void JSWebAssemblyModule::setCodeBlock(VM& vm, Wasm::MemoryMode mode, JSWebAssemblyCodeBlock* codeBlock)
{
m_codeBlocks[static_cast<size_t>(mode)].set(vm, this, codeBlock);
}
void JSWebAssemblyModule::visitChildren(JSCell* cell, SlotVisitor& visitor)
{
JSWebAssemblyModule* thisObject = jsCast<JSWebAssemblyModule*>(cell);
ASSERT_GC_OBJECT_INHERITS(thisObject, info());
Base::visitChildren(thisObject, visitor);
visitor.append(thisObject->m_exportSymbolTable);
for (unsigned i = 0; i < Wasm::NumberOfMemoryModes; ++i)
visitor.append(thisObject->m_codeBlocks[i]);
}
} // namespace JSC
#endif // ENABLE(WEBASSEMBLY)
|
#include "BossFour.hpp"
#include <iostream>
#include <cmath>
#include <vector>
#include <algorithm>
#include <set>
#include <time.h>
/**
BossFour.cpp
Purpose: to update and control BossFour in regards to location , attacks, health
*/
//boss four shoots guided beams quickly toward player
namespace g35 {
BossFour::~BossFour() {
}
/**
Sets the sprites, health and initial position that will be used for BossFour
@param data that will be used to access the texture files
*/
BossFour::BossFour(GameDataRef data):_data(data)
{
_BossFourSprite.setTexture(_data->assets.GetTexture("BossFourStage1"));
_BossFourSprite.setPosition(3*SCREEN_WIDTH/4 - _BossFourSprite.getGlobalBounds().width/2,SCREEN_HEIGHT/2 - _BossFourSprite.getGlobalBounds().width/2);
_health = BOSS_FOUR_STARTING_HEALTH;
rageMode = false; //two modes, one normal mode and one hard mode when its health is low
pastMainCharacterLocation = new float[2];
pastMainCharacterLocation[0] = -1;
pastMainCharacterLocation[1] = -1;
}
/**
Draws BossFour into the window
*/
void BossFour::Draw()
{
_data->window.draw(_BossFourSprite);
for (int i = 0; i < bullets.size(); i++) {
_data->window.draw(bullets[i]->shape);
}
}
/**
moves BossFour in specificied direction
@param the direction
*/
void BossFour::Move(int direction) {
float speed;
if (rageMode) {
speed = BOSS_FOUR_MOVEMENT_RAGE_SPEED;
}
else {
speed = BOSS_FOUR_MOVEMENT_SPEED;
}
switch (direction) {
case 0:
break;
case 1:
this->_BossFourSprite.move(0,-speed);
break;
case 2:
this->_BossFourSprite.move(speed,0);
break;
case 3:
this->_BossFourSprite.move(0,speed);
break;
case 4:
this->_BossFourSprite.move(-speed,0);
break;
case 5:
this->_BossFourSprite.move(speed/sqrt(2),-speed/sqrt(2));
break;
case 6:
this->_BossFourSprite.move(speed/sqrt(2),speed/sqrt(2));
break;
case 7:
this->_BossFourSprite.move(-speed/sqrt(2),speed/sqrt(2));
break;
case 8:
this->_BossFourSprite.move(-speed/sqrt(2),-speed/sqrt(2));
break;
}
}
/**
Returns the sprite for BossFour
*/
sf::Sprite &BossFour::GetSprite()
{
return _BossFourSprite;
}
/**
Returns the position of BossFour.
*/
float* BossFour::getPosition() {
float* returnData = new float[2];
returnData[0] = _BossFourSprite.getGlobalBounds().left + (_BossFourSprite.getGlobalBounds().width)/2;
returnData[1] = _BossFourSprite.getGlobalBounds().top + (_BossFourSprite.getGlobalBounds().height)/2;
return returnData;
}
float BossFour::getHealth() {
return this->_health;
}
/**
Sets the specified health for BossFour
@param the specified Health
*/
void BossFour::setHealth(float health) {
this->_health = health;
}
/**
Updates BossFour health, attacks and position
@param dt, and the position of the player
*/
void BossFour::Update(float dt, float * mainCharacterPosition, sf::FloatRect mainCharacterRect) {
if (_health <= BOSS_FOUR_STARTING_HEALTH/3 && !rageMode) {
rageMode = true;
_BossFourSprite.setTexture(_data->assets.GetTexture("BossFourStage2"), true);
}
if (_health > 0) {
if (pastMainCharacterLocation[0] == -1) {
pastMainCharacterLocation = mainCharacterPosition;
}
float * newLocationExpected = new float[2];
newLocationExpected[0] = (mainCharacterPosition[0] - pastMainCharacterLocation[0]) + mainCharacterPosition[0];
newLocationExpected[1] = (mainCharacterPosition[1] - pastMainCharacterLocation[1]) + mainCharacterPosition[1];
shootBeam(newLocationExpected);
std::set<int> impossibleMoves = getImpossibleMoves();
int direction = findMoveToTarget(getPosition(), newLocationExpected, impossibleMoves);
Move(direction);
pastMainCharacterLocation = mainCharacterPosition;
}
else {
_health = 0;
}
for (int i = 0; i < bullets.size();i++) {
bullets[i]->currVelocity.x *= 1.001;
bullets[i]->currVelocity.y *= 1.001;
bullets[i]->shape.move(bullets[i]->currVelocity);
if (bullets[i]->shape.getPosition().x < 0 || bullets[i]->shape.getPosition().x > SCREEN_WIDTH || bullets[i]->shape.getPosition().y < 0 || bullets[i]->shape.getPosition().y > SCREEN_HEIGHT) {
bullets.erase(bullets.begin() + i);
}
}
}
/**
Returns the distance between two points
@param the position of bossFour and the target position which in this case is the player.
@return the distance.
*/
float BossFour::getDistance(float* BossFourMovePosition, float* targetPosition) {
float distance = sqrt(abs(pow(BossFourMovePosition[0]- targetPosition[0],2) + pow(BossFourMovePosition[1]-targetPosition[1],2)));
return distance;
}
/**
to check for impossible Moves
*/
std::set<int> BossFour::getImpossibleMoves() {
std::set<int> impossibleMoves;
//check for impossible moves
for (int j = 0; j <= 8; j++) {
switch (j) {
case 0:
break;
case 1:
if (_BossFourSprite.getPosition().y - BOSS_FOUR_MOVEMENT_SPEED < 0 ) {
impossibleMoves.insert(j);
}
break;
case 2:
if (_BossFourSprite.getPosition().x + _BossFourSprite.getGlobalBounds().width + BOSS_FOUR_MOVEMENT_SPEED > SCREEN_WIDTH) {
impossibleMoves.insert(j);
}
break;
case 3:
if (_BossFourSprite.getPosition().y + _BossFourSprite.getGlobalBounds().height + BOSS_FOUR_MOVEMENT_SPEED > SCREEN_HEIGHT) {
impossibleMoves.insert(j);
}
break;
case 4:
if (_BossFourSprite.getPosition().x - BOSS_FOUR_MOVEMENT_SPEED < 0) {
impossibleMoves.insert(j);
}
break;
case 5:
if (_BossFourSprite.getPosition().y - (BOSS_FOUR_MOVEMENT_SPEED/sqrt(2)) < 0 || _BossFourSprite.getPosition().x + _BossFourSprite.getGlobalBounds().width + (BOSS_FOUR_MOVEMENT_SPEED/sqrt(2)) > SCREEN_WIDTH) {
impossibleMoves.insert(j);
}
break;
case 6:
if (_BossFourSprite.getPosition().y + _BossFourSprite.getGlobalBounds().height + (BOSS_FOUR_MOVEMENT_SPEED/sqrt(2)) > SCREEN_HEIGHT || _BossFourSprite.getPosition().x + _BossFourSprite.getGlobalBounds().width + (BOSS_FOUR_MOVEMENT_SPEED/sqrt(2)) > SCREEN_WIDTH) {
impossibleMoves.insert(j);
}
break;
case 7:
if (_BossFourSprite.getPosition().y + _BossFourSprite.getGlobalBounds().height + (BOSS_FOUR_MOVEMENT_SPEED/sqrt(2)) > SCREEN_HEIGHT || _BossFourSprite.getPosition().x - (BOSS_FOUR_MOVEMENT_SPEED/sqrt(2)) < 0) {
impossibleMoves.insert(j);
}
break;
case 8:
if (_BossFourSprite.getPosition().y - (BOSS_FOUR_MOVEMENT_SPEED/sqrt(2)) < 0 || _BossFourSprite.getPosition().x - (BOSS_FOUR_MOVEMENT_SPEED/sqrt(2)) < 0) {
impossibleMoves.insert(j);
}
break;
}
}
return impossibleMoves;
}
/**
finds the right direction to move to get to target
@param the position of BossFour and the Position of the target which in this case is the player
@return the direction to move
*/
int BossFour::findMoveToTarget(float * BossFourPosition, float * targetPosition, std::set<int> impossibleMoves) {
int direction = 0;
float distance = 0;
float minDist = -1;
float * BossFourMovePosition = new float[2];
for (int j = 1; j <= 8; j++) {
if (std::find(impossibleMoves.begin(), impossibleMoves.end(), j) == impossibleMoves.end()) {
switch (j) {
case 0:
BossFourMovePosition[0] = BossFourPosition[0];
BossFourMovePosition[1] = BossFourPosition[1];
break;
case 1:
BossFourMovePosition[0] = BossFourPosition[0];
BossFourMovePosition[1] = BossFourPosition[1]-BOSS_FOUR_MOVEMENT_SPEED;
break;
case 2:
BossFourMovePosition[0] = BossFourPosition[0]+BOSS_FOUR_MOVEMENT_SPEED;
BossFourMovePosition[1] = BossFourPosition[1];
break;
case 3:
BossFourMovePosition[0] = BossFourPosition[0];
BossFourMovePosition[1] = BossFourPosition[1]+BOSS_FOUR_MOVEMENT_SPEED;
break;
case 4:
BossFourMovePosition[0] = BossFourPosition[0]-BOSS_FOUR_MOVEMENT_SPEED;
BossFourMovePosition[1] = BossFourPosition[1];
break;
case 5:
BossFourMovePosition[0] = BossFourPosition[0] + BOSS_FOUR_MOVEMENT_SPEED/sqrt(2);
BossFourMovePosition[1] = BossFourPosition[1]-BOSS_FOUR_MOVEMENT_SPEED/sqrt(2);
break;
case 6:
BossFourMovePosition[0] = BossFourPosition[0] + BOSS_FOUR_MOVEMENT_SPEED/sqrt(2);
BossFourMovePosition[1] = BossFourPosition[1]+BOSS_FOUR_MOVEMENT_SPEED/sqrt(2);
break;
case 7:
BossFourMovePosition[0] = BossFourPosition[0] -BOSS_FOUR_MOVEMENT_SPEED/sqrt(2);
BossFourMovePosition[1] = BossFourPosition[1]+BOSS_FOUR_MOVEMENT_SPEED/sqrt(2);
break;
case 8:
BossFourMovePosition[0] = BossFourPosition[0] - BOSS_FOUR_MOVEMENT_SPEED/sqrt(2);
BossFourMovePosition[1] = BossFourPosition[1]-BOSS_FOUR_MOVEMENT_SPEED/sqrt(2);
break;
}
distance = getDistance(BossFourMovePosition, targetPosition);
if (minDist < 0) {
minDist = distance;
direction = j;
}
else if (distance < minDist) {
minDist = distance;
direction = j;
}
}
}
return direction;
}
/**
boss Four's beam attacks
*/
void BossFour::shootBeam(float * mainCharacterPosition) {
if (!rageMode) {
if (_clock.getElapsedTime().asSeconds() > 1) {
Bullet * b1 = new Bullet(BOSS_FOUR_BEAM_RADIUS_STAGE1, sf::Color::Green);
b1->shape.setOrigin(BOSS_FOUR_BEAM_RADIUS_STAGE1, BOSS_FOUR_BEAM_RADIUS_STAGE1);
b1->shape.setPosition(this->getPosition()[0],this->getPosition()[1]);
sf::Vector2f directionVector;
directionVector.x = mainCharacterPosition[0] - getPosition()[0];
directionVector.y = mainCharacterPosition[1] - getPosition()[1];
float speedAdjuster = BOSS_FOUR_BEAM_SPEED_STAGE1_SET/sqrt(pow(directionVector.x,2) + pow(directionVector.y,2));
b1->currVelocity = speedAdjuster * directionVector * b1->maxSpeed;
bullets.push_back(b1);
_clock.restart();
}
}
else {
Bullet * b1 = new Bullet(BOSS_FOUR_BEAM_RADIUS_STAGE2, sf::Color(255,150,50,240));
b1->shape.setOrigin(BOSS_FOUR_BEAM_RADIUS_STAGE2, BOSS_FOUR_BEAM_RADIUS_STAGE2);
b1->shape.setPosition(this->getPosition()[0],this->getPosition()[1]);
sf::Vector2f directionVector;
directionVector.x = mainCharacterPosition[0] - getPosition()[0];
directionVector.y = mainCharacterPosition[1] - getPosition()[1];
float speedAdjuster = BOSS_FOUR_BEAM_SPEED_STAGE2_SET/sqrt(pow(directionVector.x,2) + pow(directionVector.y,2));
b1->currVelocity = speedAdjuster * directionVector * b1->maxSpeed;
bullets.push_back(b1);
}
}
std::vector<Bullet*>* BossFour::getBulletVector() {
return &this->bullets;
}
}
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "chainparams.h"
#include "db.h"
#include "net.h"
#include "main.h"
#include "addrman.h"
#include "ui_interface.h"
#ifdef WIN32
#include <string.h>
#endif
#ifdef USE_UPNP
#include <miniupnpc/miniwget.h>
#include <miniupnpc/miniupnpc.h>
#include <miniupnpc/upnpcommands.h>
#include <miniupnpc/upnperrors.h>
#endif
// Dump addresses to peers.dat every 15 minutes (900s)
#define DUMP_ADDRESSES_INTERVAL 900
using namespace std;
using namespace boost;
static const int MAX_OUTBOUND_CONNECTIONS = 16;
bool OpenNetworkConnection(const CAddress& addrConnect, CSemaphoreGrant *grantOutbound = NULL, const char *strDest = NULL, bool fOneShot = false);
struct LocalServiceInfo {
int nScore;
int nPort;
};
//
// Global state variables
//
bool fDiscover = true;
uint64_t nLocalServices = NODE_NETWORK;
static CCriticalSection cs_mapLocalHost;
static map<CNetAddr, LocalServiceInfo> mapLocalHost;
static bool vfReachable[NET_MAX] = {};
static bool vfLimited[NET_MAX] = {};
static CNode* pnodeLocalHost = NULL;
static CNode* pnodeSync = NULL;
uint64_t nLocalHostNonce = 0;
static std::vector<SOCKET> vhListenSocket;
CAddrMan addrman;
vector<CNode*> vNodes;
CCriticalSection cs_vNodes;
map<CInv, CDataStream> mapRelay;
deque<pair<int64_t, CInv> > vRelayExpiration;
CCriticalSection cs_mapRelay;
map<CInv, int64_t> mapAlreadyAskedFor;
static deque<string> vOneShots;
CCriticalSection cs_vOneShots;
set<CNetAddr> setservAddNodeAddresses;
CCriticalSection cs_setservAddNodeAddresses;
vector<std::string> vAddedNodes;
CCriticalSection cs_vAddedNodes;
static CSemaphore *semOutbound = NULL;
// Signals for message handling
static CNodeSignals g_signals;
CNodeSignals& GetNodeSignals() { return g_signals; }
void AddOneShot(string strDest)
{
LOCK(cs_vOneShots);
vOneShots.push_back(strDest);
}
unsigned short GetListenPort()
{
return (unsigned short)(GetArg("-port", Params().GetDefaultPort()));
}
// find 'best' local address for a particular peer
bool GetLocal(CService& addr, const CNetAddr *paddrPeer)
{
if (fNoListen)
return false;
int nBestScore = -1;
int nBestReachability = -1;
{
LOCK(cs_mapLocalHost);
for (map<CNetAddr, LocalServiceInfo>::iterator it = mapLocalHost.begin(); it != mapLocalHost.end(); it++)
{
int nScore = (*it).second.nScore;
int nReachability = (*it).first.GetReachabilityFrom(paddrPeer);
if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore))
{
addr = CService((*it).first, (*it).second.nPort);
nBestReachability = nReachability;
nBestScore = nScore;
}
}
}
return nBestScore >= 0;
}
// get best local address for a particular peer as a CAddress
// Otherwise, return the unroutable 0.0.0.0 but filled in with
// the normal parameters, since the IP may be changed to a useful
// one by discovery.
CAddress GetLocalAddress(const CNetAddr *paddrPeer)
{
CAddress ret(CService("0.0.0.0",GetListenPort()),0);
CService addr;
if (GetLocal(addr, paddrPeer))
{
ret = CAddress(addr);
}
ret.nServices = nLocalServices;
ret.nTime = GetAdjustedTime();
return ret;
}
bool RecvLine(SOCKET hSocket, string& strLine)
{
strLine = "";
while (true)
{
char c;
int nBytes = recv(hSocket, &c, 1, 0);
if (nBytes > 0)
{
if (c == '\n')
continue;
if (c == '\r')
return true;
strLine += c;
if (strLine.size() >= 9000)
return true;
}
else if (nBytes <= 0)
{
boost::this_thread::interruption_point();
if (nBytes < 0)
{
int nErr = WSAGetLastError();
if (nErr == WSAEMSGSIZE)
continue;
if (nErr == WSAEWOULDBLOCK || nErr == WSAEINTR || nErr == WSAEINPROGRESS)
{
MilliSleep(10);
continue;
}
}
if (!strLine.empty())
return true;
if (nBytes == 0)
{
// socket closed
LogPrint("net", "socket closed\n");
return false;
}
else
{
// socket error
int nErr = WSAGetLastError();
LogPrint("net", "recv failed: %d\n", nErr);
return false;
}
}
}
}
int GetnScore(const CService& addr)
{
LOCK(cs_mapLocalHost);
if (mapLocalHost.count(addr) == LOCAL_NONE)
return 0;
return mapLocalHost[addr].nScore;
}
// Is our peer's addrLocal potentially useful as an external IP source?
bool IsPeerAddrLocalGood(CNode *pnode)
{
return fDiscover && pnode->addr.IsRoutable() && pnode->addrLocal.IsRoutable() &&
!IsLimited(pnode->addrLocal.GetNetwork());
}
// pushes our own address to a peer
void AdvertizeLocal(CNode *pnode)
{
if (!fNoListen && pnode->fSuccessfullyConnected)
{
CAddress addrLocal = GetLocalAddress(&pnode->addr);
// If discovery is enabled, sometimes give our peer the address it
// tells us that it sees us as in case it has a better idea of our
// address than we do.
if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() ||
GetRand((GetnScore(addrLocal) > LOCAL_MANUAL) ? 8:2) == 0))
{
addrLocal.SetIP(pnode->addrLocal);
}
if (addrLocal.IsRoutable())
{
pnode->PushAddress(addrLocal);
}
}
}
void SetReachable(enum Network net, bool fFlag)
{
LOCK(cs_mapLocalHost);
vfReachable[net] = fFlag;
if (net == NET_IPV6 && fFlag)
vfReachable[NET_IPV4] = true;
}
// learn a new local address
bool AddLocal(const CService& addr, int nScore)
{
if (!addr.IsRoutable())
return false;
if (!fDiscover && nScore < LOCAL_MANUAL)
return false;
if (IsLimited(addr))
return false;
LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore);
{
LOCK(cs_mapLocalHost);
bool fAlready = mapLocalHost.count(addr) > 0;
LocalServiceInfo &info = mapLocalHost[addr];
if (!fAlready || nScore >= info.nScore) {
info.nScore = nScore + (fAlready ? 1 : 0);
info.nPort = addr.GetPort();
}
SetReachable(addr.GetNetwork());
}
return true;
}
bool AddLocal(const CNetAddr &addr, int nScore)
{
return AddLocal(CService(addr, GetListenPort()), nScore);
}
/** Make a particular network entirely off-limits (no automatic connects to it) */
void SetLimited(enum Network net, bool fLimited)
{
if (net == NET_UNROUTABLE)
return;
LOCK(cs_mapLocalHost);
vfLimited[net] = fLimited;
}
bool IsLimited(enum Network net)
{
LOCK(cs_mapLocalHost);
return vfLimited[net];
}
bool IsLimited(const CNetAddr &addr)
{
return IsLimited(addr.GetNetwork());
}
/** vote for a local address */
bool SeenLocal(const CService& addr)
{
{
LOCK(cs_mapLocalHost);
if (mapLocalHost.count(addr) == 0)
return false;
mapLocalHost[addr].nScore++;
}
return true;
}
/** check whether a given address is potentially local */
bool IsLocal(const CService& addr)
{
LOCK(cs_mapLocalHost);
return mapLocalHost.count(addr) > 0;
}
/** check whether a given address is in a network we can probably connect to */
bool IsReachable(const CNetAddr& addr)
{
LOCK(cs_mapLocalHost);
enum Network net = addr.GetNetwork();
return vfReachable[net] && !vfLimited[net];
}
void AddressCurrentlyConnected(const CService& addr)
{
addrman.Connected(addr);
}
uint64_t CNode::nTotalBytesRecv = 0;
uint64_t CNode::nTotalBytesSent = 0;
CCriticalSection CNode::cs_totalBytesRecv;
CCriticalSection CNode::cs_totalBytesSent;
CNode* FindNode(const CNetAddr& ip)
{
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
if ((CNetAddr)pnode->addr == ip)
return (pnode);
}
return NULL;
}
CNode* FindNode(const std::string& addrName)
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
if (pnode->addrName == addrName)
return (pnode);
return NULL;
}
CNode* FindNode(const CService& addr)
{
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
if ((CService)pnode->addr == addr)
return (pnode);
}
return NULL;
}
CNode* ConnectNode(CAddress addrConnect, const char *pszDest)
{
if (pszDest == NULL) {
if (IsLocal(addrConnect))
return NULL;
// Look for an existing connection
CNode* pnode = FindNode((CService)addrConnect);
if (pnode)
{
pnode->AddRef();
return pnode;
}
}
/// debug print
LogPrint("net", "trying connection %s lastseen=%.1fhrs\n",
pszDest ? pszDest : addrConnect.ToString(),
pszDest ? 0 : (double)(GetAdjustedTime() - addrConnect.nTime)/3600.0);
// Connect
SOCKET hSocket;
bool proxyConnectionFailed = false;
if (pszDest ? ConnectSocketByName(addrConnect, hSocket, pszDest, Params().GetDefaultPort(), nConnectTimeout, &proxyConnectionFailed) :
ConnectSocket(addrConnect, hSocket, nConnectTimeout, &proxyConnectionFailed))
{
addrman.Attempt(addrConnect);
LogPrint("net", "connected %s\n", pszDest ? pszDest : addrConnect.ToString());
// Set to non-blocking
#ifdef WIN32
u_long nOne = 1;
if (ioctlsocket(hSocket, FIONBIO, &nOne) == SOCKET_ERROR)
LogPrintf("ConnectSocket() : ioctlsocket non-blocking setting failed, error %d\n", WSAGetLastError());
#else
if (fcntl(hSocket, F_SETFL, O_NONBLOCK) == SOCKET_ERROR)
LogPrintf("ConnectSocket() : fcntl non-blocking setting failed, error %d\n", errno);
#endif
// Add node
CNode* pnode = new CNode(hSocket, addrConnect, pszDest ? pszDest : "", false);
pnode->AddRef();
{
LOCK(cs_vNodes);
vNodes.push_back(pnode);
}
pnode->nTimeConnected = GetTime();
return pnode;
} else if (!proxyConnectionFailed) {
// If connecting to the node failed, and failure is not caused by a problem connecting to
// the proxy, mark this as an attempt.
addrman.Attempt(addrConnect);
}
return NULL;
}
void CNode::CloseSocketDisconnect()
{
fDisconnect = true;
if (hSocket != INVALID_SOCKET)
{
LogPrint("net", "disconnecting node %s\n", addrName);
closesocket(hSocket);
hSocket = INVALID_SOCKET;
}
// in case this fails, we'll empty the recv buffer when the CNode is deleted
TRY_LOCK(cs_vRecvMsg, lockRecv);
if (lockRecv)
vRecvMsg.clear();
// if this was the sync node, we'll need a new one
if (this == pnodeSync)
pnodeSync = NULL;
}
void CNode::PushVersion()
{
/// when NTP implemented, change to just nTime = GetAdjustedTime()
int64_t nTime = (fInbound ? GetAdjustedTime() : GetTime());
CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService("0.0.0.0",0)));
CAddress addrMe = GetLocalAddress(&addr);
RAND_bytes((unsigned char*)&nLocalHostNonce, sizeof(nLocalHostNonce));
LogPrint("net", "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%s\n", PROTOCOL_VERSION, nBestHeight, addrMe.ToString(), addrYou.ToString(), addr.ToString());
PushMessage("version", PROTOCOL_VERSION, nLocalServices, nTime, addrYou, addrMe,
nLocalHostNonce, FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, std::vector<string>()), nBestHeight);
}
std::map<CNetAddr, int64_t> CNode::setBanned;
CCriticalSection CNode::cs_setBanned;
void CNode::ClearBanned()
{
setBanned.clear();
}
bool CNode::IsBanned(CNetAddr ip)
{
bool fResult = false;
{
LOCK(cs_setBanned);
std::map<CNetAddr, int64_t>::iterator i = setBanned.find(ip);
if (i != setBanned.end())
{
int64_t t = (*i).second;
if (GetTime() < t)
fResult = true;
}
}
return fResult;
}
bool CNode::Misbehaving(int howmuch)
{
if (addr.IsLocal())
{
LogPrintf("Warning: Local node %s misbehaving (delta: %d)!\n", addrName, howmuch);
return false;
}
nMisbehavior += howmuch;
if (nMisbehavior >= GetArg("-banscore", 100))
{
int64_t banTime = GetTime()+GetArg("-bantime", 60*60*24); // Default 24-hour ban
LogPrintf("Misbehaving: %s (%d -> %d) DISCONNECTING\n", addr.ToString(), nMisbehavior-howmuch, nMisbehavior);
{
LOCK(cs_setBanned);
if (setBanned[addr] < banTime)
setBanned[addr] = banTime;
}
CloseSocketDisconnect();
return true;
} else
LogPrintf("Misbehaving: %s (%d -> %d)\n", addr.ToString(), nMisbehavior-howmuch, nMisbehavior);
return false;
}
#undef X
#define X(name) stats.name = name
void CNode::copyStats(CNodeStats &stats)
{
X(nServices);
X(nLastSend);
X(nLastRecv);
X(nTimeConnected);
X(nTimeOffset);
X(addrName);
X(nVersion);
X(strSubVer);
X(fInbound);
X(nStartingHeight);
X(nMisbehavior);
X(nSendBytes);
X(nRecvBytes);
stats.fSyncNode = (this == pnodeSync);
// It is common for nodes with good ping times to suddenly become lagged,
// due to a new block arriving or other large transfer.
// Merely reporting pingtime might fool the caller into thinking the node was still responsive,
// since pingtime does not update until the ping is complete, which might take a while.
// So, if a ping is taking an unusually long time in flight,
// the caller can immediately detect that this is happening.
int64_t nPingUsecWait = 0;
if ((0 != nPingNonceSent) && (0 != nPingUsecStart)) {
nPingUsecWait = GetTimeMicros() - nPingUsecStart;
}
// Raw ping time is in microseconds, but show it to user as whole seconds (Bitcoin users should be well used to small numbers with many decimal places by now :)
stats.dPingTime = (((double)nPingUsecTime) / 1e6);
stats.dPingWait = (((double)nPingUsecWait) / 1e6);
// Leave string empty if addrLocal invalid (not filled in yet)
stats.addrLocal = addrLocal.IsValid() ? addrLocal.ToString() : "";
}
#undef X
// requires LOCK(cs_vRecvMsg)
bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes)
{
while (nBytes > 0) {
// get current incomplete message, or create a new one
if (vRecvMsg.empty() ||
vRecvMsg.back().complete())
vRecvMsg.push_back(CNetMessage(SER_NETWORK, nRecvVersion));
CNetMessage& msg = vRecvMsg.back();
// absorb network data
int handled;
if (!msg.in_data)
handled = msg.readHeader(pch, nBytes);
else
handled = msg.readData(pch, nBytes);
if (handled < 0)
return false;
pch += handled;
nBytes -= handled;
if (msg.complete())
msg.nTime = GetTimeMicros();
}
return true;
}
int CNetMessage::readHeader(const char *pch, unsigned int nBytes)
{
// copy data to temporary parsing buffer
unsigned int nRemaining = 24 - nHdrPos;
unsigned int nCopy = std::min(nRemaining, nBytes);
memcpy(&hdrbuf[nHdrPos], pch, nCopy);
nHdrPos += nCopy;
// if header incomplete, exit
if (nHdrPos < 24)
return nCopy;
// deserialize to CMessageHeader
try {
hdrbuf >> hdr;
}
catch (std::exception &e) {
return -1;
}
// reject messages larger than MAX_SIZE
if (hdr.nMessageSize > MAX_SIZE)
return -1;
// switch state to reading message data
in_data = true;
return nCopy;
}
int CNetMessage::readData(const char *pch, unsigned int nBytes)
{
unsigned int nRemaining = hdr.nMessageSize - nDataPos;
unsigned int nCopy = std::min(nRemaining, nBytes);
if (vRecv.size() < nDataPos + nCopy) {
// Allocate up to 256 KiB ahead, but never more than the total message size.
vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024));
}
memcpy(&vRecv[nDataPos], pch, nCopy);
nDataPos += nCopy;
return nCopy;
}
// requires LOCK(cs_vSend)
void SocketSendData(CNode *pnode)
{
std::deque<CSerializeData>::iterator it = pnode->vSendMsg.begin();
while (it != pnode->vSendMsg.end()) {
const CSerializeData &data = *it;
assert(data.size() > pnode->nSendOffset);
int nBytes = send(pnode->hSocket, &data[pnode->nSendOffset], data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT);
if (nBytes > 0) {
pnode->nLastSend = GetTime();
pnode->nSendBytes += nBytes;
pnode->nSendOffset += nBytes;
pnode->RecordBytesSent(nBytes);
if (pnode->nSendOffset == data.size()) {
pnode->nSendOffset = 0;
pnode->nSendSize -= data.size();
it++;
} else {
// could not send full message; stop sending more
break;
}
} else {
if (nBytes < 0) {
// error
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS)
{
LogPrintf("socket send error %d\n", nErr);
pnode->CloseSocketDisconnect();
}
}
// couldn't send anything at all
break;
}
}
if (it == pnode->vSendMsg.end()) {
assert(pnode->nSendOffset == 0);
assert(pnode->nSendSize == 0);
}
pnode->vSendMsg.erase(pnode->vSendMsg.begin(), it);
}
static list<CNode*> vNodesDisconnected;
void ThreadSocketHandler()
{
unsigned int nPrevNodeCount = 0;
while (true)
{
//
// Disconnect nodes
//
{
LOCK(cs_vNodes);
// Disconnect unused nodes
vector<CNode*> vNodesCopy = vNodes;
BOOST_FOREACH(CNode* pnode, vNodesCopy)
{
if (pnode->fDisconnect ||
(pnode->GetRefCount() <= 0 && pnode->vRecvMsg.empty() && pnode->nSendSize == 0 && pnode->ssSend.empty()))
{
// remove from vNodes
vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end());
// release outbound grant (if any)
pnode->grantOutbound.Release();
// close socket and cleanup
pnode->CloseSocketDisconnect();
// hold in disconnected pool until all refs are released
if (pnode->fNetworkNode || pnode->fInbound)
pnode->Release();
vNodesDisconnected.push_back(pnode);
}
}
}
{
// Delete disconnected nodes
list<CNode*> vNodesDisconnectedCopy = vNodesDisconnected;
BOOST_FOREACH(CNode* pnode, vNodesDisconnectedCopy)
{
// wait until threads are done using it
if (pnode->GetRefCount() <= 0)
{
bool fDelete = false;
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend)
{
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv)
{
TRY_LOCK(pnode->cs_inventory, lockInv);
if (lockInv)
fDelete = true;
}
}
}
if (fDelete)
{
vNodesDisconnected.remove(pnode);
delete pnode;
}
}
}
}
if(vNodes.size() != nPrevNodeCount) {
nPrevNodeCount = vNodes.size();
uiInterface.NotifyNumConnectionsChanged(nPrevNodeCount);
}
//
// Find which sockets have data to receive
//
struct timeval timeout;
timeout.tv_sec = 0;
timeout.tv_usec = 50000; // frequency to poll pnode->vSend
fd_set fdsetRecv;
fd_set fdsetSend;
fd_set fdsetError;
FD_ZERO(&fdsetRecv);
FD_ZERO(&fdsetSend);
FD_ZERO(&fdsetError);
SOCKET hSocketMax = 0;
bool have_fds = false;
BOOST_FOREACH(SOCKET hListenSocket, vhListenSocket) {
FD_SET(hListenSocket, &fdsetRecv);
hSocketMax = max(hSocketMax, hListenSocket);
have_fds = true;
}
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
{
if (pnode->hSocket == INVALID_SOCKET)
continue;
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend) {
// do not read, if draining write queue
if (!pnode->vSendMsg.empty())
FD_SET(pnode->hSocket, &fdsetSend);
else
FD_SET(pnode->hSocket, &fdsetRecv);
FD_SET(pnode->hSocket, &fdsetError);
hSocketMax = max(hSocketMax, pnode->hSocket);
have_fds = true;
}
}
}
}
int nSelect = select(have_fds ? hSocketMax + 1 : 0,
&fdsetRecv, &fdsetSend, &fdsetError, &timeout);
boost::this_thread::interruption_point();
if (nSelect == SOCKET_ERROR)
{
if (have_fds)
{
int nErr = WSAGetLastError();
LogPrintf("socket select error %d\n", nErr);
for (unsigned int i = 0; i <= hSocketMax; i++)
FD_SET(i, &fdsetRecv);
}
FD_ZERO(&fdsetSend);
FD_ZERO(&fdsetError);
MilliSleep(timeout.tv_usec/1000);
}
//
// Accept new connections
//
BOOST_FOREACH(SOCKET hListenSocket, vhListenSocket)
if (hListenSocket != INVALID_SOCKET && FD_ISSET(hListenSocket, &fdsetRecv))
{
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
SOCKET hSocket = accept(hListenSocket, (struct sockaddr*)&sockaddr, &len);
CAddress addr;
int nInbound = 0;
if (hSocket != INVALID_SOCKET)
if (!addr.SetSockAddr((const struct sockaddr*)&sockaddr))
LogPrintf("Warning: Unknown socket family\n");
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
if (pnode->fInbound)
nInbound++;
}
if (hSocket == INVALID_SOCKET)
{
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK)
LogPrintf("socket error accept failed: %d\n", nErr);
}
else if (nInbound >= GetArg("-maxconnections", 125) - MAX_OUTBOUND_CONNECTIONS)
{
closesocket(hSocket);
}
else if (CNode::IsBanned(addr))
{
LogPrintf("connection from %s dropped (banned)\n", addr.ToString());
closesocket(hSocket);
}
else
{
LogPrint("net", "accepted connection %s\n", addr.ToString());
CNode* pnode = new CNode(hSocket, addr, "", true);
pnode->AddRef();
{
LOCK(cs_vNodes);
vNodes.push_back(pnode);
}
}
}
//
// Service each socket
//
vector<CNode*> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
BOOST_FOREACH(CNode* pnode, vNodesCopy)
pnode->AddRef();
}
BOOST_FOREACH(CNode* pnode, vNodesCopy)
{
boost::this_thread::interruption_point();
//
// Receive
//
if (pnode->hSocket == INVALID_SOCKET)
continue;
if (FD_ISSET(pnode->hSocket, &fdsetRecv) || FD_ISSET(pnode->hSocket, &fdsetError))
{
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv)
{
if (pnode->GetTotalRecvSize() > ReceiveFloodSize()) {
if (!pnode->fDisconnect)
LogPrintf("socket recv flood control disconnect (%u bytes)\n", pnode->GetTotalRecvSize());
pnode->CloseSocketDisconnect();
}
else {
// typical socket buffer is 8K-64K
char pchBuf[0x10000];
int nBytes = recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT);
if (nBytes > 0)
{
if (!pnode->ReceiveMsgBytes(pchBuf, nBytes))
pnode->CloseSocketDisconnect();
pnode->nLastRecv = GetTime();
pnode->nRecvBytes += nBytes;
pnode->RecordBytesRecv(nBytes);
}
else if (nBytes == 0)
{
// socket closed gracefully
if (!pnode->fDisconnect)
LogPrint("net", "socket closed\n");
pnode->CloseSocketDisconnect();
}
else if (nBytes < 0)
{
// error
int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS)
{
if (!pnode->fDisconnect)
LogPrintf("socket recv error %d\n", nErr);
pnode->CloseSocketDisconnect();
}
}
}
}
}
//
// Send
//
if (pnode->hSocket == INVALID_SOCKET)
continue;
if (FD_ISSET(pnode->hSocket, &fdsetSend))
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend)
SocketSendData(pnode);
}
//
// Inactivity checking
//
int64_t nTime = GetTime();
if (nTime - pnode->nTimeConnected > 60)
{
if (pnode->nLastRecv == 0 || pnode->nLastSend == 0)
{
LogPrint("net", "socket no message in first 60 seconds, %d %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0);
pnode->fDisconnect = true;
}
else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL)
{
LogPrintf("socket sending timeout: %ds\n", nTime - pnode->nLastSend);
pnode->fDisconnect = true;
}
else if (nTime - pnode->nLastRecv > (pnode->nVersion > BIP0031_VERSION ? TIMEOUT_INTERVAL : 90*60))
{
LogPrintf("socket receive timeout: %ds\n", nTime - pnode->nLastRecv);
pnode->fDisconnect = true;
}
else if (pnode->nPingNonceSent && pnode->nPingUsecStart + TIMEOUT_INTERVAL * 1000000 < GetTimeMicros())
{
LogPrintf("ping timeout: %fs\n", 0.000001 * (GetTimeMicros() - pnode->nPingUsecStart));
pnode->fDisconnect = true;
}
}
}
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodesCopy)
pnode->Release();
}
}
}
#ifdef USE_UPNP
void ThreadMapPort()
{
std::string port = strprintf("%u", GetListenPort());
const char * multicastif = 0;
const char * minissdpdpath = 0;
struct UPNPDev * devlist = 0;
char lanaddr[64];
#ifndef UPNPDISCOVER_SUCCESS
/* miniupnpc 1.5 */
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0);
#elif MINIUPNPC_API_VERSION < 14
/* miniupnpc 1.6 */
int error = 0;
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error);
#else
/* miniupnpc 1.9.20150730 */
int error = 0;
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error);
#endif
struct UPNPUrls urls;
struct IGDdatas data;
int r;
r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr));
if (r == 1)
{
if (fDiscover) {
char externalIPAddress[40];
r = UPNP_GetExternalIPAddress(urls.controlURL, data.first.servicetype, externalIPAddress);
if(r != UPNPCOMMAND_SUCCESS)
LogPrintf("UPnP: GetExternalIPAddress() returned %d\n", r);
else
{
if(externalIPAddress[0])
{
LogPrintf("UPnP: ExternalIPAddress = %s\n", externalIPAddress);
AddLocal(CNetAddr(externalIPAddress), LOCAL_UPNP);
}
else
LogPrintf("UPnP: GetExternalIPAddress failed.\n");
}
}
string strDesc = "OTOCASH " + FormatFullVersion();
try {
while (true) {
#ifndef UPNPDISCOVER_SUCCESS
/* miniupnpc 1.5 */
r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype,
port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0);
#else
/* miniupnpc 1.6 */
r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype,
port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0, "0");
#endif
if(r!=UPNPCOMMAND_SUCCESS)
LogPrintf("AddPortMapping(%s, %s, %s) failed with code %d (%s)\n",
port, port, lanaddr, r, strupnperror(r));
else
LogPrintf("UPnP Port Mapping successful.\n");;
MilliSleep(20*60*1000); // Refresh every 20 minutes
}
}
catch (boost::thread_interrupted)
{
r = UPNP_DeletePortMapping(urls.controlURL, data.first.servicetype, port.c_str(), "TCP", 0);
LogPrintf("UPNP_DeletePortMapping() returned : %d\n", r);
freeUPNPDevlist(devlist); devlist = 0;
FreeUPNPUrls(&urls);
throw;
}
} else {
LogPrintf("No valid UPnP IGDs found\n");
freeUPNPDevlist(devlist); devlist = 0;
if (r != 0)
FreeUPNPUrls(&urls);
}
}
void MapPort(bool fUseUPnP)
{
static boost::thread* upnp_thread = NULL;
if (fUseUPnP)
{
if (upnp_thread) {
upnp_thread->interrupt();
upnp_thread->join();
delete upnp_thread;
}
upnp_thread = new boost::thread(boost::bind(&TraceThread<void (*)()>, "upnp", &ThreadMapPort));
}
else if (upnp_thread) {
upnp_thread->interrupt();
upnp_thread->join();
delete upnp_thread;
upnp_thread = NULL;
}
}
#else
void MapPort(bool)
{
// Intentionally left blank.
}
#endif
void ThreadDNSAddressSeed()
{
// goal: only query DNS seeds if address need is acute
if ((addrman.size() > 0) &&
(!GetBoolArg("-forcednsseed", false))) {
MilliSleep(11 * 1000);
LOCK(cs_vNodes);
if (vNodes.size() >= 2) {
LogPrintf("P2P peers available. Skipped DNS seeding.\n");
return;
}
}
const vector<CDNSSeedData> &vSeeds = Params().DNSSeeds();
int found = 0;
LogPrintf("Loading addresses from DNS seeds (could take a while)\n");
BOOST_FOREACH(const CDNSSeedData &seed, vSeeds) {
if (HaveNameProxy()) {
AddOneShot(seed.host);
} else {
vector<CNetAddr> vIPs;
vector<CAddress> vAdd;
if (LookupHost(seed.host.c_str(), vIPs))
{
BOOST_FOREACH(CNetAddr& ip, vIPs)
{
int nOneDay = 24*3600;
CAddress addr = CAddress(CService(ip, Params().GetDefaultPort()));
addr.nTime = GetTime() - 3*nOneDay - GetRand(4*nOneDay); // use a random age between 3 and 7 days old
vAdd.push_back(addr);
found++;
}
}
addrman.Add(vAdd, CNetAddr(seed.name, true));
}
}
LogPrintf("%d addresses found from DNS seeds\n", found);
}
void DumpAddresses()
{
int64_t nStart = GetTimeMillis();
CAddrDB adb;
adb.Write(addrman);
LogPrint("net", "Flushed %d addresses to peers.dat %dms\n",
addrman.size(), GetTimeMillis() - nStart);
}
void static ProcessOneShot()
{
string strDest;
{
LOCK(cs_vOneShots);
if (vOneShots.empty())
return;
strDest = vOneShots.front();
vOneShots.pop_front();
}
CAddress addr;
CSemaphoreGrant grant(*semOutbound, true);
if (grant) {
if (!OpenNetworkConnection(addr, &grant, strDest.c_str(), true))
AddOneShot(strDest);
}
}
void ThreadOpenConnections()
{
// Connect to specific addresses
if (mapArgs.count("-connect") && mapMultiArgs["-connect"].size() > 0)
{
for (int64_t nLoop = 0;; nLoop++)
{
ProcessOneShot();
BOOST_FOREACH(string strAddr, mapMultiArgs["-connect"])
{
CAddress addr;
OpenNetworkConnection(addr, NULL, strAddr.c_str());
for (int i = 0; i < 10 && i < nLoop; i++)
{
MilliSleep(500);
}
}
MilliSleep(500);
}
}
// Initiate network connections
int64_t nStart = GetTime();
while (true)
{
ProcessOneShot();
MilliSleep(500);
CSemaphoreGrant grant(*semOutbound);
boost::this_thread::interruption_point();
// Add seed nodes if DNS seeds are all down (an infrastructure attack?).
if (addrman.size() == 0 && (GetTime() - nStart > 60)) {
static bool done = false;
if (!done) {
LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be available.\n");
addrman.Add(Params().FixedSeeds(), CNetAddr("127.0.0.1"));
done = true;
}
}
//
// Choose an address to connect to based on most recently seen
//
CAddress addrConnect;
// Only connect out to one peer per network group (/16 for IPv4).
// Do this here so we don't have to critsect vNodes inside mapAddresses critsect.
int nOutbound = 0;
set<vector<unsigned char> > setConnected;
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes) {
if (!pnode->fInbound) {
setConnected.insert(pnode->addr.GetGroup());
nOutbound++;
}
}
}
int64_t nANow = GetAdjustedTime();
int nTries = 0;
while (true)
{
CAddress addr = addrman.Select();
// if we selected an invalid address, restart
if (!addr.IsValid() || setConnected.count(addr.GetGroup()) || IsLocal(addr))
break;
// If we didn't find an appropriate destination after trying 100 addresses fetched from addrman,
// stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates
// already-connected network ranges, ...) before trying new addrman addresses.
nTries++;
if (nTries > 100)
break;
if (IsLimited(addr))
continue;
// only consider very recently tried nodes after 30 failed attempts
if (nANow - addr.nLastTry < 600 && nTries < 30)
continue;
// do not allow non-default ports, unless after 50 invalid addresses selected already
if (addr.GetPort() != Params().GetDefaultPort() && nTries < 50)
continue;
addrConnect = addr;
break;
}
if (addrConnect.IsValid())
OpenNetworkConnection(addrConnect, &grant);
}
}
void ThreadOpenAddedConnections()
{
{
LOCK(cs_vAddedNodes);
vAddedNodes = mapMultiArgs["-addnode"];
}
if (HaveNameProxy()) {
while(true) {
list<string> lAddresses(0);
{
LOCK(cs_vAddedNodes);
BOOST_FOREACH(string& strAddNode, vAddedNodes)
lAddresses.push_back(strAddNode);
}
BOOST_FOREACH(string& strAddNode, lAddresses) {
CAddress addr;
CSemaphoreGrant grant(*semOutbound);
OpenNetworkConnection(addr, &grant, strAddNode.c_str());
MilliSleep(500);
}
MilliSleep(120000); // Retry every 2 minutes
}
}
for (unsigned int i = 0; true; i++)
{
list<string> lAddresses(0);
{
LOCK(cs_vAddedNodes);
BOOST_FOREACH(string& strAddNode, vAddedNodes)
lAddresses.push_back(strAddNode);
}
list<vector<CService> > lservAddressesToAdd(0);
BOOST_FOREACH(string& strAddNode, lAddresses)
{
vector<CService> vservNode(0);
if(Lookup(strAddNode.c_str(), vservNode, Params().GetDefaultPort(), fNameLookup, 0))
{
lservAddressesToAdd.push_back(vservNode);
{
LOCK(cs_setservAddNodeAddresses);
BOOST_FOREACH(CService& serv, vservNode)
setservAddNodeAddresses.insert(serv);
}
}
}
// Attempt to connect to each IP for each addnode entry until at least one is successful per addnode entry
// (keeping in mind that addnode entries can have many IPs if fNameLookup)
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
for (list<vector<CService> >::iterator it = lservAddressesToAdd.begin(); it != lservAddressesToAdd.end(); it++)
BOOST_FOREACH(CService& addrNode, *(it))
if (pnode->addr == addrNode)
{
it = lservAddressesToAdd.erase(it);
it--;
break;
}
}
BOOST_FOREACH(vector<CService>& vserv, lservAddressesToAdd)
{
CSemaphoreGrant grant(*semOutbound);
OpenNetworkConnection(CAddress(vserv[i % vserv.size()]), &grant);
MilliSleep(500);
}
MilliSleep(120000); // Retry every 2 minutes
}
}
// if successful, this moves the passed grant to the constructed node
bool OpenNetworkConnection(const CAddress& addrConnect, CSemaphoreGrant *grantOutbound, const char *strDest, bool fOneShot)
{
//
// Initiate outbound network connection
//
boost::this_thread::interruption_point();
if (!strDest)
if (IsLocal(addrConnect) ||
FindNode((CNetAddr)addrConnect) || CNode::IsBanned(addrConnect) ||
FindNode(addrConnect.ToStringIPPort().c_str()))
return false;
if (strDest && FindNode(strDest))
return false;
CNode* pnode = ConnectNode(addrConnect, strDest);
boost::this_thread::interruption_point();
if (!pnode)
return false;
if (grantOutbound)
grantOutbound->MoveTo(pnode->grantOutbound);
pnode->fNetworkNode = true;
if (fOneShot)
pnode->fOneShot = true;
return true;
}
// for now, use a very simple selection metric: the node from which we received
// most recently
static int64_t NodeSyncScore(const CNode *pnode) {
return pnode->nLastRecv;
}
void static StartSync(const vector<CNode*> &vNodes) {
CNode *pnodeNewSync = NULL;
int64_t nBestScore = 0;
// fImporting and fReindex are accessed out of cs_main here, but only
// as an optimization - they are checked again in SendMessages.
if (fImporting || fReindex)
return;
// Iterate over all nodes
BOOST_FOREACH(CNode* pnode, vNodes) {
// check preconditions for allowing a sync
if (!pnode->fClient && !pnode->fOneShot &&
!pnode->fDisconnect && pnode->fSuccessfullyConnected &&
(pnode->nStartingHeight > (nBestHeight - 144)) &&
(pnode->nVersion < NOBLKS_VERSION_START || pnode->nVersion >= NOBLKS_VERSION_END)) {
// if ok, compare node's score with the best so far
int64_t nScore = NodeSyncScore(pnode);
if (pnodeNewSync == NULL || nScore > nBestScore) {
pnodeNewSync = pnode;
nBestScore = nScore;
}
}
}
// if a new sync candidate was found, start sync!
if (pnodeNewSync) {
pnodeNewSync->fStartSync = true;
pnodeSync = pnodeNewSync;
}
}
void ThreadMessageHandler()
{
SetThreadPriority(THREAD_PRIORITY_BELOW_NORMAL);
while (true)
{
bool fHaveSyncNode = false;
vector<CNode*> vNodesCopy;
{
LOCK(cs_vNodes);
vNodesCopy = vNodes;
BOOST_FOREACH(CNode* pnode, vNodesCopy) {
pnode->AddRef();
if (pnode == pnodeSync)
fHaveSyncNode = true;
}
}
if (!fHaveSyncNode)
StartSync(vNodesCopy);
// Poll the connected nodes for messages
CNode* pnodeTrickle = NULL;
if (!vNodesCopy.empty())
pnodeTrickle = vNodesCopy[GetRand(vNodesCopy.size())];
bool fSleep = true;
BOOST_FOREACH(CNode* pnode, vNodesCopy)
{
if (pnode->fDisconnect)
continue;
// Receive messages
{
TRY_LOCK(pnode->cs_vRecvMsg, lockRecv);
if (lockRecv)
{
if (!g_signals.ProcessMessages(pnode))
pnode->CloseSocketDisconnect();
if (pnode->nSendSize < SendBufferSize())
{
if (!pnode->vRecvGetData.empty() || (!pnode->vRecvMsg.empty() && pnode->vRecvMsg[0].complete()))
{
fSleep = false;
}
}
}
}
boost::this_thread::interruption_point();
// Send messages
{
TRY_LOCK(pnode->cs_vSend, lockSend);
if (lockSend)
g_signals.SendMessages(pnode, pnode == pnodeTrickle);
}
boost::this_thread::interruption_point();
}
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodesCopy)
pnode->Release();
}
if (fSleep)
MilliSleep(100);
}
}
bool BindListenPort(const CService &addrBind, string& strError)
{
strError = "";
int nOne = 1;
// Create socket for listening for incoming connections
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
if (!addrBind.GetSockAddr((struct sockaddr*)&sockaddr, &len))
{
strError = strprintf("Error: bind address family for %s not supported", addrBind.ToString());
LogPrintf("%s\n", strError);
return false;
}
SOCKET hListenSocket = socket(((struct sockaddr*)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP);
if (hListenSocket == INVALID_SOCKET)
{
strError = strprintf("Error: Couldn't open socket for incoming connections (socket returned error %d)", WSAGetLastError());
LogPrintf("%s\n", strError);
return false;
}
#ifdef SO_NOSIGPIPE
// Different way of disabling SIGPIPE on BSD
setsockopt(hListenSocket, SOL_SOCKET, SO_NOSIGPIPE, (void*)&nOne, sizeof(int));
#endif
#ifndef WIN32
// Allow binding if the port is still in TIME_WAIT state after
// the program was closed and restarted. Not an issue on windows.
setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (void*)&nOne, sizeof(int));
#endif
#ifdef WIN32
// Set to non-blocking, incoming connections will also inherit this
if (ioctlsocket(hListenSocket, FIONBIO, (u_long*)&nOne) == SOCKET_ERROR)
#else
if (fcntl(hListenSocket, F_SETFL, O_NONBLOCK) == SOCKET_ERROR)
#endif
{
strError = strprintf("Error: Couldn't set properties on socket for incoming connections (error %d)", WSAGetLastError());
LogPrintf("%s\n", strError);
return false;
}
// some systems don't have IPV6_V6ONLY but are always v6only; others do have the option
// and enable it by default or not. Try to enable it, if possible.
if (addrBind.IsIPv6()) {
#ifdef IPV6_V6ONLY
#ifdef WIN32
setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)&nOne, sizeof(int));
#else
setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&nOne, sizeof(int));
#endif
#endif
#ifdef WIN32
int nProtLevel = 10 /* PROTECTION_LEVEL_UNRESTRICTED */;
int nParameterId = 23 /* IPV6_PROTECTION_LEVEl */;
// this call is allowed to fail
setsockopt(hListenSocket, IPPROTO_IPV6, nParameterId, (const char*)&nProtLevel, sizeof(int));
#endif
}
if (::bind(hListenSocket, (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR)
{
int nErr = WSAGetLastError();
if (nErr == WSAEADDRINUSE)
strError = strprintf(_("Unable to bind to %s on this computer. OTOCASH is probably already running."), addrBind.ToString());
else
strError = strprintf(_("Unable to bind to %s on this computer (bind returned error %d, %s)"), addrBind.ToString(), nErr, strerror(nErr));
LogPrintf("%s\n", strError);
return false;
}
LogPrintf("Bound to %s\n", addrBind.ToString());
// Listen for incoming connections
if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR)
{
strError = strprintf("Error: Listening for incoming connections failed (listen returned error %d)", WSAGetLastError());
LogPrintf("%s\n", strError);
return false;
}
vhListenSocket.push_back(hListenSocket);
if (addrBind.IsRoutable() && fDiscover)
AddLocal(addrBind, LOCAL_BIND);
return true;
}
void static Discover(boost::thread_group& threadGroup)
{
if (!fDiscover)
return;
#ifdef WIN32
// Get local host IP
char pszHostName[1000] = "";
if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR)
{
vector<CNetAddr> vaddr;
if (LookupHost(pszHostName, vaddr))
{
BOOST_FOREACH (const CNetAddr &addr, vaddr)
{
AddLocal(addr, LOCAL_IF);
}
}
}
#else
// Get local host ip
struct ifaddrs* myaddrs;
if (getifaddrs(&myaddrs) == 0)
{
for (struct ifaddrs* ifa = myaddrs; ifa != NULL; ifa = ifa->ifa_next)
{
if (ifa->ifa_addr == NULL) continue;
if ((ifa->ifa_flags & IFF_UP) == 0) continue;
if (strcmp(ifa->ifa_name, "lo") == 0) continue;
if (strcmp(ifa->ifa_name, "lo0") == 0) continue;
if (ifa->ifa_addr->sa_family == AF_INET)
{
struct sockaddr_in* s4 = (struct sockaddr_in*)(ifa->ifa_addr);
CNetAddr addr(s4->sin_addr);
if (AddLocal(addr, LOCAL_IF))
LogPrintf("IPv4 %s: %s\n", ifa->ifa_name, addr.ToString());
}
else if (ifa->ifa_addr->sa_family == AF_INET6)
{
struct sockaddr_in6* s6 = (struct sockaddr_in6*)(ifa->ifa_addr);
CNetAddr addr(s6->sin6_addr);
if (AddLocal(addr, LOCAL_IF))
LogPrintf("IPv6 %s: %s\n", ifa->ifa_name, addr.ToString());
}
}
freeifaddrs(myaddrs);
}
#endif
}
void StartNode(boost::thread_group& threadGroup)
{
if (semOutbound == NULL) {
// initialize semaphore
int nMaxOutbound = min(MAX_OUTBOUND_CONNECTIONS, (int)GetArg("-maxconnections", 125));
semOutbound = new CSemaphore(nMaxOutbound);
}
if (pnodeLocalHost == NULL)
pnodeLocalHost = new CNode(INVALID_SOCKET, CAddress(CService("127.0.0.1", 0), nLocalServices));
Discover(threadGroup);
//
// Start threads
//
if (!GetBoolArg("-dnsseed", true))
LogPrintf("DNS seeding disabled\n");
else
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "dnsseed", &ThreadDNSAddressSeed));
#ifdef USE_UPNP
// Map ports with UPnP
MapPort(GetBoolArg("-upnp", USE_UPNP));
#endif
// Send and receive from sockets, accept connections
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "net", &ThreadSocketHandler));
// Initiate outbound connections from -addnode
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "addcon", &ThreadOpenAddedConnections));
// Initiate outbound connections
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "opencon", &ThreadOpenConnections));
// Process messages
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "msghand", &ThreadMessageHandler));
// Dump network addresses
threadGroup.create_thread(boost::bind(&LoopForever<void (*)()>, "dumpaddr", &DumpAddresses, DUMP_ADDRESSES_INTERVAL * 1000));
}
bool StopNode()
{
LogPrintf("StopNode()\n");
MapPort(false);
mempool.AddTransactionsUpdated(1);
if (semOutbound)
for (int i=0; i<MAX_OUTBOUND_CONNECTIONS; i++)
semOutbound->post();
DumpAddresses();
return true;
}
class CNetCleanup
{
public:
CNetCleanup()
{
}
~CNetCleanup()
{
// Close sockets
BOOST_FOREACH(CNode* pnode, vNodes)
if (pnode->hSocket != INVALID_SOCKET)
closesocket(pnode->hSocket);
BOOST_FOREACH(SOCKET hListenSocket, vhListenSocket)
if (hListenSocket != INVALID_SOCKET)
if (closesocket(hListenSocket) == SOCKET_ERROR)
LogPrintf("closesocket(hListenSocket) failed with error %d\n", WSAGetLastError());
#ifdef WIN32
// Shutdown Windows Sockets
WSACleanup();
#endif
}
}
instance_of_cnetcleanup;
void RelayTransaction(const CTransaction& tx, const uint256& hash)
{
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(10000);
ss << tx;
RelayTransaction(tx, hash, ss);
}
void RelayTransaction(const CTransaction& tx, const uint256& hash, const CDataStream& ss)
{
CInv inv(MSG_TX, hash);
{
LOCK(cs_mapRelay);
// Expire old relay messages
while (!vRelayExpiration.empty() && vRelayExpiration.front().first < GetTime())
{
mapRelay.erase(vRelayExpiration.front().second);
vRelayExpiration.pop_front();
}
// Save original serialized message so newer versions are preserved
mapRelay.insert(std::make_pair(inv, ss));
vRelayExpiration.push_back(std::make_pair(GetTime() + 15 * 60, inv));
}
RelayInventory(inv);
}
void CNode::RecordBytesRecv(uint64_t bytes)
{
LOCK(cs_totalBytesRecv);
nTotalBytesRecv += bytes;
}
void CNode::RecordBytesSent(uint64_t bytes)
{
LOCK(cs_totalBytesSent);
nTotalBytesSent += bytes;
}
uint64_t CNode::GetTotalBytesRecv()
{
LOCK(cs_totalBytesRecv);
return nTotalBytesRecv;
}
uint64_t CNode::GetTotalBytesSent()
{
LOCK(cs_totalBytesSent);
return nTotalBytesSent;
}
//
// CAddrDB
//
CAddrDB::CAddrDB()
{
pathAddr = GetDataDir() / "peers.dat";
}
bool CAddrDB::Write(const CAddrMan& addr)
{
// Generate random temporary filename
unsigned short randv = 0;
RAND_bytes((unsigned char *)&randv, sizeof(randv));
std::string tmpfn = strprintf("peers.dat.%04x", randv);
// serialize addresses, checksum data up to that point, then append csum
CDataStream ssPeers(SER_DISK, CLIENT_VERSION);
ssPeers << FLATDATA(Params().MessageStart());
ssPeers << addr;
uint256 hash = Hash(ssPeers.begin(), ssPeers.end());
ssPeers << hash;
// open temp output file, and associate with CAutoFile
boost::filesystem::path pathTmp = GetDataDir() / tmpfn;
FILE *file = fopen(pathTmp.string().c_str(), "wb");
CAutoFile fileout = CAutoFile(file, SER_DISK, CLIENT_VERSION);
if (!fileout)
return error("CAddrman::Write() : open failed");
// Write and commit header, data
try {
fileout << ssPeers;
}
catch (std::exception &e) {
return error("CAddrman::Write() : I/O error");
}
FileCommit(fileout);
fileout.fclose();
// replace existing peers.dat, if any, with new peers.dat.XXXX
if (!RenameOver(pathTmp, pathAddr))
return error("CAddrman::Write() : Rename-into-place failed");
return true;
}
bool CAddrDB::Read(CAddrMan& addr)
{
// open input file, and associate with CAutoFile
FILE *file = fopen(pathAddr.string().c_str(), "rb");
CAutoFile filein = CAutoFile(file, SER_DISK, CLIENT_VERSION);
if (!filein)
return error("CAddrman::Read() : open failed");
// use file size to size memory buffer
int fileSize = boost::filesystem::file_size(pathAddr);
int dataSize = fileSize - sizeof(uint256);
// Don't try to resize to a negative number if file is small
if ( dataSize < 0 ) dataSize = 0;
vector<unsigned char> vchData;
vchData.resize(dataSize);
uint256 hashIn;
// read data and checksum from file
try {
filein.read((char *)&vchData[0], dataSize);
filein >> hashIn;
}
catch (std::exception &e) {
return error("CAddrman::Read() 2 : I/O error or stream data corrupted");
}
filein.fclose();
CDataStream ssPeers(vchData, SER_DISK, CLIENT_VERSION);
// verify stored checksum matches input data
uint256 hashTmp = Hash(ssPeers.begin(), ssPeers.end());
if (hashIn != hashTmp)
return error("CAddrman::Read() : checksum mismatch; data corrupted");
unsigned char pchMsgTmp[4];
try {
// de-serialize file header (network specific magic number) and ..
ssPeers >> FLATDATA(pchMsgTmp);
// ... verify the network matches ours
if (memcmp(pchMsgTmp, Params().MessageStart(), sizeof(pchMsgTmp)))
return error("CAddrman::Read() : invalid network magic number");
// de-serialize address data into one CAddrMan object
ssPeers >> addr;
}
catch (std::exception &e) {
return error("CAddrman::Read() : I/O error or stream data corrupted");
}
return true;
}
|
//===--- ParseExpr.cpp - Swift Language Parser for Expressions ------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// Expression Parsing and AST Building
//
//===----------------------------------------------------------------------===//
#include "swift/Parse/Parser.h"
#include "swift/AST/DiagnosticsParse.h"
#include "swift/Basic/EditorPlaceholder.h"
#include "swift/Parse/CodeCompletionCallbacks.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "swift/Basic/StringExtras.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
using namespace swift;
/// parseExpr
///
/// expr:
/// expr-sequence(basic | trailing-closure)
///
/// \param isExprBasic Whether we're only parsing an expr-basic.
ParserResult<Expr> Parser::parseExprImpl(Diag<> Message, bool isExprBasic) {
// If we are parsing a refutable pattern, check to see if this is the start
// of a let/var/is pattern. If so, parse it to an UnresolvedPatternExpr and
// name binding will perform final validation.
//
// Only do this if we're parsing a pattern, to improve QoI on malformed
// expressions followed by (e.g.) let/var decls.
//
if (InVarOrLetPattern && isOnlyStartOfMatchingPattern()) {
ParserResult<Pattern> pattern = parseMatchingPattern(/*isExprBasic*/false);
if (pattern.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
if (pattern.isNull())
return nullptr;
return makeParserResult(new (Context) UnresolvedPatternExpr(pattern.get()));
}
ParserResult<Expr> expr = parseExprSequence(Message, isExprBasic);
if (expr.hasCodeCompletion())
return expr;
if (expr.isNull())
return nullptr;
return makeParserResult(expr.get());
}
/// parseExprIs
/// expr-is:
/// 'is' type
ParserResult<Expr> Parser::parseExprIs() {
SourceLoc isLoc = consumeToken(tok::kw_is);
ParserResult<TypeRepr> type = parseType(diag::expected_type_after_is);
if (type.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
if (type.isNull())
return nullptr;
return makeParserResult(new (Context) IsExpr(isLoc, type.get()));
}
/// parseExprAs
/// expr-as:
/// 'as' type
/// 'as?' type
/// 'as!' type
ParserResult<Expr> Parser::parseExprAs() {
// Parse the 'as'.
SourceLoc asLoc = consumeToken(tok::kw_as);
// Parse the postfix '?'.
SourceLoc questionLoc;
SourceLoc exclaimLoc;
if (Tok.is(tok::question_postfix)) {
questionLoc = consumeToken(tok::question_postfix);
} else if (Tok.is(tok::exclaim_postfix)) {
exclaimLoc = consumeToken(tok::exclaim_postfix);
}
ParserResult<TypeRepr> type = parseType(diag::expected_type_after_as);
if (type.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
if (type.isNull())
return nullptr;
Expr *parsed;
if (questionLoc.isValid()) {
parsed = new (Context) ConditionalCheckedCastExpr(asLoc, questionLoc,
type.get());
} else if (exclaimLoc.isValid()) {
parsed = new (Context) ForcedCheckedCastExpr(asLoc, exclaimLoc, type.get());
} else {
parsed = new (Context) CoerceExpr(asLoc, type.get());
}
return makeParserResult(parsed);
}
/// parseExprArrow
///
/// expr-arrow:
/// '->'
/// 'throws' '->'
ParserResult<Expr> Parser::parseExprArrow() {
SourceLoc throwsLoc, arrowLoc;
if (Tok.is(tok::kw_throws)) {
throwsLoc = consumeToken(tok::kw_throws);
if (!Tok.is(tok::arrow)) {
diagnose(throwsLoc, diag::throws_in_wrong_position);
return nullptr;
}
}
arrowLoc = consumeToken(tok::arrow);
if (Tok.is(tok::kw_throws)) {
diagnose(Tok.getLoc(), diag::throws_in_wrong_position);
throwsLoc = consumeToken(tok::kw_throws);
}
auto arrow = new (Context) ArrowExpr(throwsLoc, arrowLoc);
return makeParserResult(arrow);
}
/// parseExprSequence
///
/// expr-sequence(Mode):
/// expr-sequence-element(Mode) expr-binary(Mode)*
/// expr-binary(Mode):
/// operator-binary expr-sequence-element(Mode)
/// '?' expr-sequence(Mode) ':' expr-sequence-element(Mode)
/// '=' expr-unary
/// expr-is
/// expr-as
///
/// The sequencing for binary exprs is not structural, i.e., binary operators
/// are not inherently right-associative. If present, '?' and ':' tokens must
/// match.
///
/// Similarly, the parsing of 'try' as part of expr-sequence-element
/// is not structural. 'try' is not permitted at arbitrary points in
/// a sequence; in the places it's permitted, it's hoisted out to
/// apply to everything to its right.
ParserResult<Expr> Parser::parseExprSequence(Diag<> Message,
bool isExprBasic,
bool isForConditionalDirective) {
SmallVector<Expr*, 8> SequencedExprs;
SourceLoc startLoc = Tok.getLoc();
bool HasCodeCompletion = false;
while (true) {
if (isForConditionalDirective && Tok.isAtStartOfLine())
break;
// Parse a unary expression.
ParserResult<Expr> Primary =
parseExprSequenceElement(Message, isExprBasic);
HasCodeCompletion |= Primary.hasCodeCompletion();
if (Primary.isNull()) {
if (Primary.hasCodeCompletion()) {
if (CodeCompletion) {
CodeCompletion->setLeadingSequenceExprs(SequencedExprs);
}
return Primary;
} else {
return nullptr;
}
}
SequencedExprs.push_back(Primary.get());
if (isForConditionalDirective && Tok.isAtStartOfLine())
break;
parse_operator:
switch (Tok.getKind()) {
case tok::oper_binary_spaced:
case tok::oper_binary_unspaced: {
// If this is an "&& #available()" expression (or related things that
// show up in a stmt-condition production), then don't eat it.
//
// These are not general expressions, and && is an infix operator,
// so the code is invalid. We get better recovery if we bail out from
// this, because then we can produce a fixit to rewrite the && into a ,
// if we're in a stmt-condition.
if (Tok.getText() == "&&" &&
peekToken().isAny(tok::pound_available,
tok::kw_let, tok::kw_var, tok::kw_case))
goto done;
// Parse the operator.
Expr *Operator = parseExprOperator();
SequencedExprs.push_back(Operator);
// The message is only valid for the first subexpr.
Message = diag::expected_expr_after_operator;
break;
}
case tok::question_infix: {
// Save the '?'.
SourceLoc questionLoc = consumeToken();
// Parse the middle expression of the ternary.
ParserResult<Expr> middle =
parseExprSequence(diag::expected_expr_after_if_question, isExprBasic);
if (middle.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
if (middle.isNull())
return nullptr;
// Make sure there's a matching ':' after the middle expr.
if (!Tok.is(tok::colon)) {
diagnose(questionLoc, diag::expected_colon_after_if_question);
return makeParserErrorResult(new (Context) ErrorExpr(
{startLoc, middle.get()->getSourceRange().End}));
}
SourceLoc colonLoc = consumeToken();
auto *unresolvedIf
= new (Context) IfExpr(questionLoc,
middle.get(),
colonLoc);
SequencedExprs.push_back(unresolvedIf);
Message = diag::expected_expr_after_if_colon;
break;
}
case tok::equal: {
// If we're parsing an expression as the body of a refutable var/let
// pattern, then an assignment doesn't make sense. In a "if let"
// statement the equals is the start of the condition, so don't parse it
// as a binary operator.
if (InVarOrLetPattern)
goto done;
SourceLoc equalsLoc = consumeToken();
auto *assign = new (Context) AssignExpr(equalsLoc);
SequencedExprs.push_back(assign);
Message = diag::expected_expr_assignment;
if (Tok.is(tok::code_complete)) {
if (CodeCompletion) {
auto RHS = new (Context) ErrorExpr(
SourceRange(Tok.getRange().getStart(), Tok.getRange().getEnd()));
assign->setSrc(RHS);
SequencedExprs.pop_back();
assign->setDest(SequencedExprs.back());
SequencedExprs.pop_back();
SequencedExprs.push_back(assign);
CodeCompletion->completeAssignmentRHS(assign);
}
consumeToken();
if (SequencedExprs.size() > 0 && (SequencedExprs.size() & 1) == 0) {
// Make sure we have odd number of sequence exprs.
SequencedExprs.pop_back();
}
auto Result = SequencedExprs.size() == 1 ?
makeParserResult(SequencedExprs[0]):
makeParserResult(SequenceExpr::create(Context, SequencedExprs));
Result.setHasCodeCompletion();
return Result;
}
break;
}
case tok::kw_is: {
// Parse a type after the 'is' token instead of an expression.
ParserResult<Expr> is = parseExprIs();
if (is.isNull() || is.hasCodeCompletion())
return is;
// Store the expr itself as a placeholder RHS. The real RHS is the
// type parameter stored in the node itself.
SequencedExprs.push_back(is.get());
SequencedExprs.push_back(is.get());
// We already parsed the right operand as part of the 'is' production.
// Jump directly to parsing another operator.
goto parse_operator;
}
case tok::kw_as: {
ParserResult<Expr> as = parseExprAs();
if (as.isNull() || as.hasCodeCompletion())
return as;
// Store the expr itself as a placeholder RHS. The real RHS is the
// type parameter stored in the node itself.
SequencedExprs.push_back(as.get());
SequencedExprs.push_back(as.get());
// We already parsed the right operand as part of the 'is' production.
// Jump directly to parsing another operator.
goto parse_operator;
}
case tok::arrow:
case tok::kw_throws: {
ParserResult<Expr> arrow = parseExprArrow();
if (arrow.isNull() || arrow.hasCodeCompletion())
return arrow;
SequencedExprs.push_back(arrow.get());
break;
}
default:
// If the next token is not a binary operator, we're done.
goto done;
}
}
done:
// For conditional directives, we stop parsing after a line break.
if (isForConditionalDirective && (SequencedExprs.size() & 1) == 0) {
diagnose(getEndOfPreviousLoc(),
diag::incomplete_conditional_compilation_directive);
return makeParserError();
}
// If we had semantic errors, just fail here.
assert(!SequencedExprs.empty());
// If we saw no operators, don't build a sequence.
if (SequencedExprs.size() == 1) {
auto Result = makeParserResult(SequencedExprs[0]);
if (HasCodeCompletion)
Result.setHasCodeCompletion();
return Result;
}
auto Result = makeParserResult(SequenceExpr::create(Context, SequencedExprs));
if (HasCodeCompletion)
Result.setHasCodeCompletion();
return Result;
}
/// parseExprSequenceElement
///
/// expr-sequence-element(Mode):
/// 'try' expr-unary(Mode)
/// 'try' '?' expr-unary(Mode)
/// 'try' '!' expr-unary(Mode)
/// expr-unary(Mode)
///
/// 'try' is not actually allowed at an arbitrary position of a
/// sequence, but this isn't enforced until sequence-folding.
ParserResult<Expr> Parser::parseExprSequenceElement(Diag<> message,
bool isExprBasic) {
SourceLoc tryLoc;
bool hadTry = consumeIf(tok::kw_try, tryLoc);
Optional<Token> trySuffix;
if (hadTry && Tok.isAny(tok::exclaim_postfix, tok::question_postfix)) {
trySuffix = Tok;
consumeToken();
}
// Try to parse '@' sign or 'inout' as a attributed typerepr.
if (Tok.isAny(tok::at_sign, tok::kw_inout)) {
bool isType = false;
{
BacktrackingScope backtrack(*this);
isType = canParseType();
}
if (isType) {
ParserResult<TypeRepr> ty = parseType();
if (ty.isNonNull())
return makeParserResult(
new (Context) TypeExpr(TypeLoc(ty.get(), Type())));
checkForInputIncomplete();
return nullptr;
}
}
ParserResult<Expr> sub = parseExprUnary(message, isExprBasic);
if (hadTry && !sub.hasCodeCompletion() && !sub.isNull()) {
switch (trySuffix ? trySuffix->getKind() : tok::NUM_TOKENS) {
case tok::exclaim_postfix:
sub = makeParserResult(
new (Context) ForceTryExpr(tryLoc, sub.get(), trySuffix->getLoc()));
break;
case tok::question_postfix:
sub = makeParserResult(
new (Context) OptionalTryExpr(tryLoc, sub.get(),
trySuffix->getLoc()));
break;
default:
// If this is a simple "try expr" situation, where the expr is a closure
// literal, and the next token is a 'catch', then the user wrote
// try/catch instead of do/catch. Emit a fixit hint to rewrite to the
// correct do/catch construct.
if (Tok.is(tok::kw_catch) && isa<ClosureExpr>(sub.get())) {
diagnose(tryLoc, diag::docatch_not_trycatch)
.fixItReplace(tryLoc, "do");
// Eat all of the catch clauses, so we don't trip over them in error
// recovery.
while (Tok.is(tok::kw_catch)) {
ParserResult<CatchStmt> clause = parseStmtCatch();
if (clause.hasCodeCompletion() && clause.isNull())
break;
}
return makeParserResult(new (Context) ErrorExpr(tryLoc));
}
sub = makeParserResult(new (Context) TryExpr(tryLoc, sub.get()));
break;
}
}
return sub;
}
/// parseExprUnary
///
/// expr-unary(Mode):
/// expr-postfix(Mode)
/// operator-prefix expr-unary(Mode)
/// '&' expr-unary(Mode)
///
ParserResult<Expr> Parser::parseExprUnary(Diag<> Message, bool isExprBasic) {
UnresolvedDeclRefExpr *Operator;
switch (Tok.getKind()) {
default:
// If the next token is not an operator, just parse this as expr-postfix.
return parseExprPostfix(Message, isExprBasic);
case tok::amp_prefix: {
SourceLoc Loc = consumeToken(tok::amp_prefix);
ParserResult<Expr> SubExpr = parseExprUnary(Message, isExprBasic);
if (SubExpr.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
if (SubExpr.isNull())
return nullptr;
return makeParserResult(
new (Context) InOutExpr(Loc, SubExpr.get(), Type()));
}
case tok::pound_keyPath:
return parseExprKeyPathObjC();
case tok::backslash:
return parseExprKeyPath();
case tok::oper_postfix:
// Postfix operators cannot start a subexpression, but can happen
// syntactically because the operator may just follow whatever precedes this
// expression (and that may not always be an expression).
diagnose(Tok, diag::invalid_postfix_operator);
Tok.setKind(tok::oper_prefix);
LLVM_FALLTHROUGH;
case tok::oper_prefix:
Operator = parseExprOperator();
break;
case tok::oper_binary_spaced:
case tok::oper_binary_unspaced: {
// For recovery purposes, accept an oper_binary here.
SourceLoc OperEndLoc = Tok.getLoc().getAdvancedLoc(Tok.getLength());
Tok.setKind(tok::oper_prefix);
Operator = parseExprOperator();
if (OperEndLoc == Tok.getLoc())
diagnose(PreviousLoc, diag::expected_expr_after_unary_operator);
else
diagnose(PreviousLoc, diag::expected_prefix_operator)
.fixItRemoveChars(OperEndLoc, Tok.getLoc());
break;
}
}
ParserResult<Expr> SubExpr = parseExprUnary(Message, isExprBasic);
if (SubExpr.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
if (SubExpr.isNull())
return nullptr;
// Check if we have a unary '-' with number literal sub-expression, for
// example, "-42" or "-1.25".
if (auto *LE = dyn_cast<NumberLiteralExpr>(SubExpr.get())) {
if (Operator->hasName() && Operator->getName().getBaseName() == "-") {
LE->setNegative(Operator->getLoc());
return makeParserResult(LE);
}
}
return makeParserResult(
new (Context) PrefixUnaryExpr(Operator, SubExpr.get()));
}
/// expr-keypath-swift:
/// \ type? . initial-key-path-component key-path-components
///
/// key-path-components:
// key-path-component*
/// <empty>
///
/// key-path-component:
/// .identifier
/// ?
/// !
/// [ expression ]
///
/// initial-key-path-component:
/// identifier
/// ?
/// !
/// [ expression ]
ParserResult<Expr> Parser::parseExprKeyPath() {
// Consume '\'.
SourceLoc backslashLoc = consumeToken(tok::backslash);
llvm::SaveAndRestore<SourceLoc> slashLoc(SwiftKeyPathSlashLoc, backslashLoc);
// FIXME: diagnostics
ParserResult<Expr> rootResult, pathResult;
if (!startsWithSymbol(Tok, '.')) {
llvm::SaveAndRestore<bool> S(InSwiftKeyPath, true);
rootResult = parseExprPostfix(diag::expr_keypath_expected_expr,
/*isBasic=*/true);
if (rootResult.isParseError() || rootResult.hasCodeCompletion())
return rootResult;
}
if (startsWithSymbol(Tok, '.')) {
llvm::SaveAndRestore<Expr*> S(SwiftKeyPathRoot, rootResult.getPtrOrNull());
// For uniformity, \.foo is parsed as if it were MAGIC.foo, so we need to
// make sure the . is there, but parsing the ? in \.? as .? doesn't make
// sense. This is all made more complicated by .?. being considered an
// operator token, and a single one at that (which means
// peekToken().is(tok::identifier) is incorrect: it is true for .?.foo).
auto position = getParserPosition();
auto dotLoc = consumeStartingCharacterOfCurrentToken();
if (Tok.is(tok::identifier))
backtrackToPosition(position);
auto inner = makeParserResult(new (Context) KeyPathDotExpr(dotLoc));
bool unusedHasBindOptional = false;
// Inside a keypath's path, the period always behaves normally: the key path
// behavior is only the separation between type and path.
pathResult = parseExprPostfixSuffix(inner, /*isExprBasic=*/true,
/*periodHasKeyPathBehavior=*/false,
unusedHasBindOptional);
if (pathResult.isParseError() || pathResult.hasCodeCompletion())
return pathResult;
}
auto keypath = new (Context) KeyPathExpr(
backslashLoc, rootResult.getPtrOrNull(), pathResult.getPtrOrNull());
return makeParserResult(keypath);
}
/// expr-keypath-objc:
/// '#keyPath' '(' unqualified-name ('.' unqualified-name) * ')'
///
ParserResult<Expr> Parser::parseExprKeyPathObjC() {
// Consume '#keyPath'.
SourceLoc keywordLoc = consumeToken(tok::pound_keyPath);
// Parse the leading '('.
if (!Tok.is(tok::l_paren)) {
diagnose(Tok, diag::expr_keypath_expected_lparen);
return makeParserError();
}
SourceLoc lParenLoc = consumeToken(tok::l_paren);
SmallVector<KeyPathExpr::Component, 4> components;
/// Handler for code completion.
auto handleCodeCompletion = [&](bool hasDot) -> ParserResult<Expr> {
KeyPathExpr *expr = nullptr;
if (!components.empty()) {
expr = new (Context)
KeyPathExpr(Context, keywordLoc, lParenLoc, components, Tok.getLoc());
}
if (CodeCompletion)
CodeCompletion->completeExprKeyPath(expr, hasDot);
// Eat the code completion token because we handled it.
consumeToken(tok::code_complete);
return makeParserCodeCompletionResult(expr);
};
// Parse the sequence of unqualified-names.
ParserStatus status;
while (true) {
// Handle code completion.
if (Tok.is(tok::code_complete))
return handleCodeCompletion(!components.empty());
// Parse the next name.
DeclNameLoc nameLoc;
bool afterDot = !components.empty();
auto name = parseUnqualifiedDeclName(
afterDot, nameLoc,
diag::expr_keypath_expected_property_or_type);
if (!name) {
status.setIsParseError();
break;
}
// Record the name we parsed.
auto component = KeyPathExpr::Component::forUnresolvedProperty(name,
nameLoc.getBaseNameLoc());
components.push_back(component);
// Handle code completion.
if (Tok.is(tok::code_complete))
return handleCodeCompletion(false);
// Parse the next period to continue the path.
if (consumeIf(tok::period))
continue;
break;
}
// Parse the closing ')'.
SourceLoc rParenLoc;
if (status.isError()) {
skipUntilDeclStmtRBrace(tok::r_paren);
if (Tok.is(tok::r_paren))
rParenLoc = consumeToken();
else
rParenLoc = PreviousLoc;
} else {
parseMatchingToken(tok::r_paren, rParenLoc,
diag::expr_keypath_expected_rparen, lParenLoc);
}
// If we cannot build a useful expression, just return an error
// expression.
if (components.empty() || status.isError()) {
return makeParserResult<Expr>(
new (Context) ErrorExpr(SourceRange(keywordLoc, rParenLoc)));
}
// We're done: create the key-path expression.
return makeParserResult<Expr>(new (Context) KeyPathExpr(
Context, keywordLoc, lParenLoc, components, rParenLoc));
}
/// parseExprSelector
///
/// expr-selector:
/// '#selector' '(' expr ')'
/// '#selector' '(' 'getter' ':' expr ')'
/// '#selector' '(' 'setter' ':' expr ')'
///
ParserResult<Expr> Parser::parseExprSelector() {
// Consume '#selector'.
SourceLoc keywordLoc = consumeToken(tok::pound_selector);
// Parse the leading '('.
if (!Tok.is(tok::l_paren)) {
diagnose(Tok, diag::expr_selector_expected_lparen);
return makeParserError();
}
SourceLoc lParenLoc = consumeToken(tok::l_paren);
SourceLoc modifierLoc;
// Parse possible 'getter:' or 'setter:' modifiers, and determine
// the kind of selector we're working with.
ObjCSelectorExpr::ObjCSelectorKind selectorKind;
if (peekToken().is(tok::colon) &&
(Tok.isContextualKeyword("getter") ||
Tok.isContextualKeyword("setter"))) {
// Parse the modifier.
if (Tok.isContextualKeyword("getter"))
selectorKind = ObjCSelectorExpr::Getter;
else
selectorKind = ObjCSelectorExpr::Setter;
modifierLoc = consumeToken(tok::identifier);
(void)consumeToken(tok::colon);
} else {
selectorKind = ObjCSelectorExpr::Method;
}
ObjCSelectorContext selectorContext;
switch (selectorKind) {
case ObjCSelectorExpr::Getter:
selectorContext = ObjCSelectorContext::GetterSelector;
break;
case ObjCSelectorExpr::Setter:
selectorContext = ObjCSelectorContext::SetterSelector;
break;
case ObjCSelectorExpr::Method:
selectorContext = ObjCSelectorContext::MethodSelector;
}
// Parse the subexpression.
CodeCompletionCallbacks::InObjCSelectorExprRAII
InObjCSelectorExpr(CodeCompletion, selectorContext);
ParserResult<Expr> subExpr =
parseExpr(selectorKind == ObjCSelectorExpr::Method
? diag::expr_selector_expected_method_expr
: diag::expr_selector_expected_property_expr);
if (subExpr.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
// Parse the closing ')'.
SourceLoc rParenLoc;
if (subExpr.isParseError()) {
skipUntilDeclStmtRBrace(tok::r_paren);
if (Tok.is(tok::r_paren))
rParenLoc = consumeToken();
else
rParenLoc = PreviousLoc;
} else {
parseMatchingToken(tok::r_paren, rParenLoc,
diag::expr_selector_expected_rparen, lParenLoc);
}
// If the subexpression was in error, just propagate the error.
if (subExpr.isParseError())
return makeParserResult<Expr>(
new (Context) ErrorExpr(SourceRange(keywordLoc, rParenLoc)));
return makeParserResult<Expr>(
new (Context) ObjCSelectorExpr(selectorKind, keywordLoc, lParenLoc,
modifierLoc, subExpr.get(), rParenLoc));
}
static DeclRefKind getDeclRefKindForOperator(tok kind) {
switch (kind) {
case tok::oper_binary_spaced:
case tok::oper_binary_unspaced: return DeclRefKind::BinaryOperator;
case tok::oper_postfix: return DeclRefKind::PostfixOperator;
case tok::oper_prefix: return DeclRefKind::PrefixOperator;
default: llvm_unreachable("bad operator token kind");
}
}
/// parseExprOperator - Parse an operator reference expression. These
/// are not "proper" expressions; they can only appear in binary/unary
/// operators.
UnresolvedDeclRefExpr *Parser::parseExprOperator() {
assert(Tok.isAnyOperator());
DeclRefKind refKind = getDeclRefKindForOperator(Tok.getKind());
SourceLoc loc = Tok.getLoc();
Identifier name = Context.getIdentifier(Tok.getText());
consumeToken();
// Bypass local lookup.
return new (Context) UnresolvedDeclRefExpr(name, refKind, DeclNameLoc(loc));
}
static VarDecl *getImplicitSelfDeclForSuperContext(Parser &P,
DeclContext *DC,
SourceLoc Loc) {
auto *methodContext = DC->getInnermostMethodContext();
if (!methodContext) {
P.diagnose(Loc, diag::super_not_in_class_method);
return nullptr;
}
// Do an actual lookup for 'self' in case it shows up in a capture list.
auto *methodSelf = methodContext->getImplicitSelfDecl();
auto *lookupSelf = P.lookupInScope(P.Context.Id_self);
if (lookupSelf && lookupSelf != methodSelf) {
// FIXME: This is the wrong diagnostic for if someone manually declares a
// variable named 'self' using backticks.
P.diagnose(Loc, diag::super_in_closure_with_capture);
P.diagnose(lookupSelf->getLoc(), diag::super_in_closure_with_capture_here);
return nullptr;
}
return methodSelf;
}
/// parseExprSuper
///
/// expr-super:
/// expr-super-member
/// expr-super-init
/// expr-super-subscript
/// expr-super-member:
/// 'super' '.' identifier
/// expr-super-init:
/// 'super' '.' 'init'
/// expr-super-subscript:
/// 'super' '[' expr ']'
ParserResult<Expr> Parser::parseExprSuper(bool isExprBasic) {
// Parse the 'super' reference.
SourceLoc superLoc = consumeToken(tok::kw_super);
VarDecl *selfDecl = getImplicitSelfDeclForSuperContext(*this,
CurDeclContext,
superLoc);
bool ErrorOccurred = selfDecl == nullptr;
Expr *superRef = !ErrorOccurred
? cast<Expr>(new (Context) SuperRefExpr(selfDecl, superLoc,
/*Implicit=*/false))
: cast<Expr>(new (Context) ErrorExpr(superLoc));
if (Tok.isAny(tok::period, tok::period_prefix)) {
// 'super.' must be followed by a member or initializer ref.
SourceLoc dotLoc = consumeToken();
if (Tok.is(tok::code_complete)) {
if (CodeCompletion) {
if (auto *SRE = dyn_cast<SuperRefExpr>(superRef))
CodeCompletion->completeExprSuperDot(SRE);
}
// Eat the code completion token because we handled it.
consumeToken(tok::code_complete);
return makeParserCodeCompletionResult(superRef);
}
DeclNameLoc nameLoc;
DeclName name = parseUnqualifiedDeclName(/*afterDot=*/true, nameLoc,
diag::expected_identifier_after_super_dot_expr);
if (!name)
return nullptr;
return makeParserResult(
new (Context) UnresolvedDotExpr(superRef, dotLoc, name, nameLoc,
/*Implicit=*/false));
}
// NOTE: l_square_lit is for migrating the old object literal syntax.
// Eventually this block can be removed.
if (Tok.is(tok::l_square_lit) && !Tok.isAtStartOfLine() &&
isCollectionLiteralStartingWithLSquareLit()) {
assert(Tok.getLength() == 1);
Tok.setKind(tok::l_square);
}
if (Tok.isFollowingLSquare()) {
// super[expr]
SourceLoc lSquareLoc, rSquareLoc;
SmallVector<Expr *, 2> indexArgs;
SmallVector<Identifier, 2> indexArgLabels;
SmallVector<SourceLoc, 2> indexArgLabelLocs;
Expr *trailingClosure;
ParserStatus status = parseExprList(tok::l_square, tok::r_square,
/*isPostfix=*/true, isExprBasic,
lSquareLoc, indexArgs, indexArgLabels,
indexArgLabelLocs,
rSquareLoc,
trailingClosure);
if (status.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
if (status.isError())
return nullptr;
return makeParserResult(
SubscriptExpr::create(Context, superRef, lSquareLoc, indexArgs,
indexArgLabels, indexArgLabelLocs, rSquareLoc,
trailingClosure, ConcreteDeclRef(),
/*implicit=*/false));
}
if (Tok.is(tok::code_complete)) {
if (CodeCompletion) {
if (auto *SRE = dyn_cast<SuperRefExpr>(superRef))
CodeCompletion->completeExprSuper(SRE);
}
// Eat the code completion token because we handled it.
consumeToken(tok::code_complete);
return makeParserCodeCompletionResult(superRef);
}
if (consumeIf(tok::unknown))
return nullptr;
diagnose(Tok, diag::expected_dot_or_subscript_after_super);
return nullptr;
}
/// Copy a numeric literal value into AST-owned memory, stripping underscores
/// so the semantic part of the value can be parsed by APInt/APFloat parsers.
static StringRef copyAndStripUnderscores(ASTContext &C, StringRef orig) {
char *start = static_cast<char*>(C.Allocate(orig.size(), 1));
char *p = start;
if (p) {
for (char c : orig) {
if (c != '_') {
*p++ = c;
}
}
}
return StringRef(start, p - start);
}
/// Disambiguate the parse after '{' token that is in a place that might be
/// the start of a trailing closure, or start the variable accessor block.
///
/// Check to see if the '{' is followed by a 'didSet' or a 'willSet' label,
/// possibly preceded by attributes. If so, we disambiguate the parse as the
/// start of a get-set block in a variable definition (not as a trailing
/// closure).
static bool isStartOfGetSetAccessor(Parser &P) {
assert(P.Tok.is(tok::l_brace) && "not checking a brace?");
// The only case this can happen is if the accessor label is immediately after
// a brace (possibly preceded by attributes). "get" is implicit, so it can't
// be checked for. Conveniently however, get/set properties are not allowed
// to have initializers, so we don't have an ambiguity, we just have to check
// for observing accessors.
//
// If we have a 'didSet' or a 'willSet' label, disambiguate immediately as
// an accessor block.
Token NextToken = P.peekToken();
if (NextToken.isContextualKeyword("didSet") ||
NextToken.isContextualKeyword("willSet"))
return true;
// If we don't have attributes, then it cannot be an accessor block.
if (NextToken.isNot(tok::at_sign))
return false;
Parser::BacktrackingScope Backtrack(P);
// Eat the "{".
P.consumeToken(tok::l_brace);
// Eat attributes, if present.
while (P.consumeIf(tok::at_sign)) {
if (!P.consumeIf(tok::identifier)) return false;
// Eat paren after attribute name; e.g. @foo(x)
if (P.Tok.is(tok::l_paren)) P.skipSingle();
}
// Check if we have 'didSet'/'willSet' after attributes.
return P.Tok.isContextualKeyword("didSet") ||
P.Tok.isContextualKeyword("willSet");
}
/// Recover invalid uses of trailing closures in a situation
/// where the parser requires an expr-basic (which does not allow them). We
/// handle this by doing some lookahead in common situations. And later, Sema
/// will emit a diagnostic with a fixit to add wrapping parens.
static bool isValidTrailingClosure(bool isExprBasic, Parser &P){
assert(P.Tok.is(tok::l_brace) && "Couldn't be a trailing closure");
// If this is the start of a get/set accessor, then it isn't a trailing
// closure.
if (isStartOfGetSetAccessor(P))
return false;
// If this is a normal expression (not an expr-basic) then trailing closures
// are allowed, so this is obviously one.
// TODO: We could handle try to disambiguate cases like:
// let x = foo
// {...}()
// by looking ahead for the ()'s, but this has been replaced by do{}, so this
// probably isn't worthwhile.
//
if (!isExprBasic)
return true;
// If this is an expr-basic, then a trailing closure is not allowed. However,
// it is very common for someone to write something like:
//
// for _ in numbers.filter {$0 > 4} {
//
// and we want to recover from this very well. We need to perform arbitrary
// look-ahead to disambiguate this case, so we only do this in the case where
// the token after the { is on the same line as the {.
if (P.peekToken().isAtStartOfLine())
return false;
// Determine if the {} goes with the expression by eating it, and looking
// to see if it is immediately followed by '{', 'where', or comma. If so,
// we consider it to be part of the proceeding expression.
Parser::BacktrackingScope backtrack(P);
P.consumeToken(tok::l_brace);
P.skipUntil(tok::r_brace);
SourceLoc endLoc;
if (!P.consumeIf(tok::r_brace, endLoc) ||
P.Tok.isNot(tok::l_brace, tok::kw_where, tok::comma)) {
return false;
}
// Recoverable case. Just return true here and Sema will emit a diagnostic
// later. see: Sema/MiscDiagnostics.cpp#checkStmtConditionTrailingClosure
return true;
}
/// Map magic literal tokens such as #file to their
/// MagicIdentifierLiteralExpr kind.
static MagicIdentifierLiteralExpr::Kind
getMagicIdentifierLiteralKind(tok Kind) {
switch (Kind) {
case tok::kw___COLUMN__:
case tok::pound_column:
return MagicIdentifierLiteralExpr::Kind::Column;
case tok::kw___FILE__:
case tok::pound_file:
return MagicIdentifierLiteralExpr::Kind::File;
case tok::kw___FUNCTION__:
case tok::pound_function:
return MagicIdentifierLiteralExpr::Kind::Function;
case tok::kw___LINE__:
case tok::pound_line:
return MagicIdentifierLiteralExpr::Kind::Line;
case tok::kw___DSO_HANDLE__:
case tok::pound_dsohandle:
return MagicIdentifierLiteralExpr::Kind::DSOHandle;
default:
llvm_unreachable("not a magic literal");
}
}
/// See if type(of: <expr>) can be parsed backtracking on failure.
static bool canParseTypeOf(Parser &P) {
// We parsed `type(of:)` as a special syntactic form in Swift 3. In Swift 4
// it is handled by overload resolution.
if (!P.Context.LangOpts.isSwiftVersion3())
return false;
if (!(P.Tok.getText() == "type" && P.peekToken().is(tok::l_paren))) {
return false;
}
// Look ahead to parse the parenthesized expression.
Parser::BacktrackingScope Backtrack(P);
P.consumeToken(tok::identifier);
P.consumeToken(tok::l_paren);
// The first argument label must be 'of'.
if (!(P.Tok.getText() == "of" && P.peekToken().is(tok::colon))) {
return false;
}
// Parse to the closing paren.
while (!P.Tok.is(tok::r_paren) && !P.Tok.is(tok::eof)) {
// Anything that looks like another argument label is bogus. It is
// sufficient to parse for a single trailing comma. Backtracking will
// fall back to an unresolved decl.
if (P.Tok.is(tok::comma)) {
return false;
}
P.skipSingle();
}
return true;
}
ParserResult<Expr>
Parser::parseExprPostfixSuffix(ParserResult<Expr> Result, bool isExprBasic,
bool periodHasKeyPathBehavior,
bool &hasBindOptional) {
hasBindOptional = false;
// Handle suffix expressions.
while (1) {
// FIXME: Better recovery.
if (Result.isNull())
return Result;
// Check for a .foo suffix.
SourceLoc TokLoc = Tok.getLoc();
if (Tok.is(tok::period) || Tok.is(tok::period_prefix)) {
// A key path is special, because it allows .[, unlike anywhere else. The
// period itself should be left in the token stream. (.? and .! end up
// being operators, and so aren't handled here.)
if (periodHasKeyPathBehavior && peekToken().is(tok::l_square)) {
break;
}
consumeToken();
// Handle "x.42" - a tuple index.
if (Tok.is(tok::integer_literal)) {
DeclName name = Context.getIdentifier(Tok.getText());
SourceLoc nameLoc = consumeToken(tok::integer_literal);
// Don't allow '.<integer literal>' following a numeric literal
// expression (unless in #if env, for 1.2.3.4 version numbers)
if (!InPoundIfEnvironment && Result.isNonNull() &&
isa<NumberLiteralExpr>(Result.get())) {
diagnose(nameLoc, diag::numeric_literal_numeric_member)
.highlight(Result.get()->getSourceRange());
continue;
}
Result = makeParserResult(new (Context) UnresolvedDotExpr(
Result.get(), TokLoc, name, DeclNameLoc(nameLoc),
/*Implicit=*/false));
continue;
}
// Handle "x.self" expr.
if (Tok.is(tok::kw_self)) {
Result = makeParserResult(
new (Context) DotSelfExpr(Result.get(), TokLoc, consumeToken()));
continue;
}
// Handle the deprecated 'x.dynamicType' and migrate it to `type(of: x)`
if (Tok.getText() == "dynamicType") {
auto range = Result.get()->getSourceRange();
auto dynamicTypeExprRange = SourceRange(TokLoc, Tok.getLoc());
diagnose(TokLoc, diag::expr_dynamictype_deprecated)
.highlight(dynamicTypeExprRange)
.fixItReplace(dynamicTypeExprRange, ")")
.fixItInsert(range.Start, "type(of: ");
// fallthrough to an UnresolvedDotExpr.
}
// If we have '.<keyword><code_complete>', try to recover by creating
// an identifier with the same spelling as the keyword.
if (Tok.isKeyword() && peekToken().is(tok::code_complete)) {
Identifier Name = Context.getIdentifier(Tok.getText());
Result = makeParserResult(new (Context) UnresolvedDotExpr(
Result.get(), TokLoc, Name, DeclNameLoc(Tok.getLoc()),
/*Implicit=*/false));
consumeToken();
// Fall into the next code completion handler.
}
// Handle "x.<tab>" for code completion.
if (Tok.is(tok::code_complete)) {
if (CodeCompletion && Result.isNonNull()) {
if (InSwiftKeyPath) {
Result = makeParserResult(
new (Context) KeyPathExpr(SwiftKeyPathSlashLoc, Result.get(),
nullptr));
} else if (SwiftKeyPathRoot) {
Result = makeParserResult(
new (Context) KeyPathExpr(SwiftKeyPathSlashLoc, SwiftKeyPathRoot,
Result.get()));
}
CodeCompletion->completeDotExpr(Result.get(), /*DotLoc=*/TokLoc);
}
// Eat the code completion token because we handled it.
consumeToken(tok::code_complete);
Result.setHasCodeCompletion();
return Result;
}
DeclNameLoc NameLoc;
DeclName Name = parseUnqualifiedDeclName(/*afterDot=*/true, NameLoc,
diag::expected_member_name);
if (!Name)
return nullptr;
Result = makeParserResult(
new (Context) UnresolvedDotExpr(Result.get(), TokLoc, Name, NameLoc,
/*Implicit=*/false));
if (canParseAsGenericArgumentList()) {
SmallVector<TypeRepr *, 8> args;
SourceLoc LAngleLoc, RAngleLoc;
if (parseGenericArguments(args, LAngleLoc, RAngleLoc)) {
diagnose(LAngleLoc, diag::while_parsing_as_left_angle_bracket);
}
SmallVector<TypeLoc, 8> locArgs;
for (auto ty : args)
locArgs.push_back(ty);
Result = makeParserResult(new (Context) UnresolvedSpecializeExpr(
Result.get(), LAngleLoc, Context.AllocateCopy(locArgs), RAngleLoc));
}
continue;
}
// If there is an expr-call-suffix, parse it and form a call.
if (Tok.isFollowingLParen()) {
Result = parseExprCallSuffix(Result, isExprBasic);
continue;
}
// NOTE: l_square_lit is for migrating the old object literal syntax.
// Eventually this block can be removed.
if (Tok.is(tok::l_square_lit) && !Tok.isAtStartOfLine() &&
isCollectionLiteralStartingWithLSquareLit()) {
assert(Tok.getLength() == 1);
Tok.setKind(tok::l_square);
}
// Check for a [expr] suffix.
// Note that this cannot be the start of a new line.
if (Tok.isFollowingLSquare()) {
SourceLoc lSquareLoc, rSquareLoc;
SmallVector<Expr *, 2> indexArgs;
SmallVector<Identifier, 2> indexArgLabels;
SmallVector<SourceLoc, 2> indexArgLabelLocs;
Expr *trailingClosure;
ParserStatus status = parseExprList(
tok::l_square, tok::r_square,
/*isPostfix=*/true, isExprBasic, lSquareLoc, indexArgs,
indexArgLabels, indexArgLabelLocs, rSquareLoc, trailingClosure);
if (status.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
if (status.isError() || Result.isNull())
return nullptr;
Result = makeParserResult(SubscriptExpr::create(
Context, Result.get(), lSquareLoc, indexArgs, indexArgLabels,
indexArgLabelLocs, rSquareLoc, trailingClosure, ConcreteDeclRef(),
/*implicit=*/false));
continue;
}
// Check for a trailing closure, if allowed.
if (Tok.is(tok::l_brace) && isValidTrailingClosure(isExprBasic, *this)) {
// FIXME: if Result has a trailing closure, break out.
// Stop after literal expressions, which may never have trailing closures.
const auto *callee = Result.get();
if (isa<LiteralExpr>(callee) || isa<CollectionExpr>(callee) ||
isa<TupleExpr>(callee))
break;
ParserResult<Expr> closure =
parseTrailingClosure(callee->getSourceRange());
if (closure.isNull())
return nullptr;
// Trailing closure implicitly forms a call.
Result = makeParserResult(
ParserStatus(closure),
CallExpr::create(Context, Result.get(), SourceLoc(), {}, {}, {},
SourceLoc(), closure.get(), /*implicit=*/false));
if (Result.hasCodeCompletion())
return Result;
// We only allow a single trailing closure on a call. This could be
// generalized in the future, but needs further design.
if (Tok.is(tok::l_brace))
break;
continue;
}
// Check for a ? suffix.
if (consumeIf(tok::question_postfix)) {
Result = makeParserResult(
new (Context) BindOptionalExpr(Result.get(), TokLoc, /*depth*/ 0));
hasBindOptional = true;
continue;
}
// Check for a ! suffix.
if (consumeIf(tok::exclaim_postfix)) {
Result =
makeParserResult(new (Context) ForceValueExpr(Result.get(), TokLoc));
continue;
}
// Check for a postfix-operator suffix.
if (Tok.is(tok::oper_postfix)) {
// KeyPaths are more restricted in what can go after a ., and so we treat
// them specially.
if (periodHasKeyPathBehavior && startsWithSymbol(Tok, '.'))
break;
Expr *oper = parseExprOperator();
Result =
makeParserResult(new (Context) PostfixUnaryExpr(oper, Result.get()));
continue;
}
if (Tok.is(tok::code_complete)) {
if (Tok.isAtStartOfLine()) {
// Postfix expression is located on a different line than the code
// completion token, and thus they are not related.
return Result;
}
if (CodeCompletion && Result.isNonNull()) {
bool hasSpace = Tok.getLoc() != getEndOfPreviousLoc();
CodeCompletion->completePostfixExpr(Result.get(), hasSpace);
}
// Eat the code completion token because we handled it.
consumeToken(tok::code_complete);
return makeParserCodeCompletionResult<Expr>();
}
// If we end up with an unknown token on this line, return an ErrorExpr
// covering the range of the token.
if (!Tok.isAtStartOfLine() && consumeIf(tok::unknown)) {
Result = makeParserResult(new (Context)
ErrorExpr(Result.get()->getSourceRange()));
continue;
}
// Otherwise, we don't know what this token is, it must end the expression.
break;
}
return Result;
}
/// parseExprPostfix
///
/// expr-literal:
/// integer_literal
/// floating_literal
/// string_literal
/// nil
/// true
/// false
/// #file
/// #line
/// #column
/// #function
/// #dsohandle
///
/// expr-primary:
/// expr-literal
/// expr-identifier expr-call-suffix?
/// expr-closure
/// expr-anon-closure-argument
/// expr-delayed-identifier
/// expr-paren
/// expr-super
/// expr-discard
/// expr-selector
///
/// expr-delayed-identifier:
/// '.' identifier
///
/// expr-discard:
/// '_'
///
/// expr-dot:
/// expr-postfix '.' 'type'
/// expr-postfix '.' (identifier|keyword) generic-args? expr-call-suffix?
/// expr-postfix '.' integer_literal
///
/// expr-subscript:
/// expr-postfix '[' expr ']'
///
/// expr-call:
/// expr-postfix expr-paren
///
/// expr-force-value:
/// expr-postfix '!'
///
/// expr-trailing-closure:
/// expr-postfix(trailing-closure) expr-closure
///
/// expr-postfix(Mode):
/// expr-postfix(Mode) operator-postfix
///
/// expr-postfix(basic):
/// expr-primary
/// expr-dot
/// expr-metatype
/// expr-init
/// expr-subscript
/// expr-call
/// expr-force-value
///
/// expr-postfix(trailing-closure):
/// expr-postfix(basic)
/// expr-trailing-closure
///
ParserResult<Expr> Parser::parseExprPostfix(Diag<> ID, bool isExprBasic) {
ParserResult<Expr> Result;
switch (Tok.getKind()) {
case tok::integer_literal: {
StringRef Text = copyAndStripUnderscores(Context, Tok.getText());
SourceLoc Loc = consumeToken(tok::integer_literal);
Result = makeParserResult(new (Context) IntegerLiteralExpr(Text, Loc,
/*Implicit=*/false));
break;
}
case tok::floating_literal: {
StringRef Text = copyAndStripUnderscores(Context, Tok.getText());
SourceLoc Loc = consumeToken(tok::floating_literal);
Result = makeParserResult(new (Context) FloatLiteralExpr(Text, Loc,
/*Implicit=*/false));
break;
}
case tok::at_sign:
// Objective-C programmers habitually type @"foo", so recover gracefully
// with a fixit. If this isn't @"foo", just handle it like an unknown
// input.
if (peekToken().isNot(tok::string_literal))
goto UnknownCharacter;
diagnose(Tok.getLoc(), diag::string_literal_no_atsign)
.fixItRemove(Tok.getLoc());
consumeToken(tok::at_sign);
LLVM_FALLTHROUGH;
case tok::string_literal: // "foo"
Result = parseExprStringLiteral();
break;
case tok::kw_nil:
Result = makeParserResult(
new (Context) NilLiteralExpr(consumeToken(tok::kw_nil)));
break;
case tok::kw_true:
case tok::kw_false: {
bool isTrue = Tok.is(tok::kw_true);
Result = makeParserResult(
new (Context) BooleanLiteralExpr(isTrue, consumeToken()));
break;
}
case tok::kw___FILE__:
case tok::kw___LINE__:
case tok::kw___COLUMN__:
case tok::kw___FUNCTION__:
case tok::kw___DSO_HANDLE__: {
StringRef replacement = "";
switch (Tok.getKind()) {
default: llvm_unreachable("can't get here");
case tok::kw___FILE__: replacement = "#file"; break;
case tok::kw___LINE__: replacement = "#line"; break;
case tok::kw___COLUMN__: replacement = "#column"; break;
case tok::kw___FUNCTION__: replacement = "#function"; break;
case tok::kw___DSO_HANDLE__: replacement = "#dsohandle"; break;
}
diagnose(Tok.getLoc(), diag::snake_case_deprecated,
Tok.getText(), replacement)
.fixItReplace(Tok.getLoc(), replacement);
LLVM_FALLTHROUGH;
}
case tok::pound_column:
case tok::pound_file:
case tok::pound_function:
case tok::pound_line:
case tok::pound_dsohandle: {
auto Kind = getMagicIdentifierLiteralKind(Tok.getKind());
SourceLoc Loc = consumeToken();
Result = makeParserResult(
new (Context) MagicIdentifierLiteralExpr(Kind, Loc, /*implicit=*/false));
break;
}
case tok::identifier: // foo
// Attempt to parse for 'type(of: <expr>)'.
if (canParseTypeOf(*this)) {
Result = parseExprTypeOf();
break;
}
// If we are parsing a refutable pattern and are inside a let/var pattern,
// the identifiers change to be value bindings instead of decl references.
// Parse and return this as an UnresolvedPatternExpr around a binding. This
// will be resolved (or rejected) by sema when the overall refutable pattern
// it transformed from an expression into a pattern.
if ((InVarOrLetPattern == IVOLP_ImplicitlyImmutable ||
InVarOrLetPattern == IVOLP_InVar ||
InVarOrLetPattern == IVOLP_InLet) &&
// If we have "case let x." or "case let x(", we parse x as a normal
// name, not a binding, because it is the start of an enum pattern or
// call pattern.
peekToken().isNot(tok::period, tok::period_prefix, tok::l_paren)) {
Identifier name;
SourceLoc loc = consumeIdentifier(&name);
auto specifier = (InVarOrLetPattern != IVOLP_InVar)
? VarDecl::Specifier::Let
: VarDecl::Specifier::Var;
auto pattern = createBindingFromPattern(loc, name, specifier);
Result = makeParserResult(new (Context) UnresolvedPatternExpr(pattern));
break;
}
LLVM_FALLTHROUGH;
case tok::kw_self: // self
case tok::kw_Self: // Self
Result = makeParserResult(parseExprIdentifier());
// If there is an expr-call-suffix, parse it and form a call.
if (Tok.isFollowingLParen()) {
Result = parseExprCallSuffix(Result, isExprBasic);
break;
}
break;
case tok::kw_Any: { // Any
ParserResult<TypeRepr> repr = parseAnyType();
auto expr = new (Context) TypeExpr(TypeLoc(repr.get()));
Result = makeParserResult(expr);
break;
}
case tok::dollarident: // $1
Result = makeParserResult(parseExprAnonClosureArg());
break;
// If the next token is '_', parse a discard expression.
case tok::kw__:
Result = makeParserResult(
new (Context) DiscardAssignmentExpr(consumeToken(), /*Implicit=*/false));
break;
case tok::pound_selector: // expr-selector
Result = parseExprSelector();
break;
case tok::l_brace: // expr-closure
Result = parseExprClosure();
break;
case tok::period: //=.foo
case tok::period_prefix: { // .foo
SourceLoc DotLoc = consumeToken();
// Special case ".<integer_literal>" like ".4". This isn't valid, but the
// developer almost certainly meant to use "0.4". Diagnose this, and
// recover as if they wrote that.
if (Tok.is(tok::integer_literal) && !Tok.isAtStartOfLine()) {
diagnose(DotLoc, diag::invalid_float_literal_missing_leading_zero,
Tok.getText())
.fixItInsert(DotLoc, "0")
.highlight({DotLoc, Tok.getLoc()});
char *Ptr = (char*)Context.Allocate(Tok.getLength()+2, 1);
memcpy(Ptr, "0.", 2);
memcpy(Ptr+2, Tok.getText().data(), Tok.getLength());
auto FltText = StringRef(Ptr, Tok.getLength()+2);
FltText = copyAndStripUnderscores(Context, FltText);
consumeToken(tok::integer_literal);
Result = makeParserResult(new (Context)
FloatLiteralExpr(FltText, DotLoc,
/*Implicit=*/false));
break;
}
DeclName Name;
DeclNameLoc NameLoc;
if (Tok.is(tok::code_complete)) {
auto Expr = UnresolvedMemberExpr::create(
Context, DotLoc, DeclNameLoc(DotLoc.getAdvancedLoc(1)),
Context.getIdentifier("_"), /*implicit=*/false);
Result = makeParserResult(Expr);
if (CodeCompletion) {
std::vector<StringRef> Identifiers;
// Move lexer to the start of the current line.
L->backtrackToState(L->getStateForBeginningOfTokenLoc(
L->getLocForStartOfLine(SourceMgr, Tok.getLoc())));
bool HasReturn = false;
// Until we see the code completion token, collect identifiers.
for (L->lex(Tok); !Tok.is(tok::code_complete); consumeToken()) {
if (!HasReturn)
HasReturn = Tok.is(tok::kw_return);
if (Tok.is(tok::identifier)) {
Identifiers.push_back(Tok.getText());
}
}
CodeCompletion->completeUnresolvedMember(Expr, Identifiers, HasReturn);
} else {
Result.setHasCodeCompletion();
}
consumeToken();
return Result;
}
Name = parseUnqualifiedDeclName(/*afterDot=*/true, NameLoc,
diag::expected_identifier_after_dot_expr);
if (!Name) return nullptr;
// Check for a () suffix, which indicates a call when constructing
// this member. Note that this cannot be the start of a new line.
if (Tok.isFollowingLParen()) {
SourceLoc lParenLoc, rParenLoc;
SmallVector<Expr *, 2> args;
SmallVector<Identifier, 2> argLabels;
SmallVector<SourceLoc, 2> argLabelLocs;
Expr *trailingClosure;
ParserStatus status = parseExprList(tok::l_paren, tok::r_paren,
/*isPostfix=*/true, isExprBasic,
lParenLoc, args, argLabels,
argLabelLocs,
rParenLoc,
trailingClosure);
if (status.isError())
return nullptr;
Result = makeParserResult(
status,
UnresolvedMemberExpr::create(Context, DotLoc, NameLoc, Name,
lParenLoc, args, argLabels,
argLabelLocs, rParenLoc,
trailingClosure,
/*implicit=*/false));
if (Result.hasCodeCompletion())
return Result;
break;
}
// Check for a trailing closure, if allowed.
if (Tok.is(tok::l_brace) && isValidTrailingClosure(isExprBasic, *this)) {
ParserResult<Expr> closure =
parseTrailingClosure(NameLoc.getSourceRange());
if (closure.isNull()) return nullptr;
// Handle .foo by just making an AST node.
Result = makeParserResult(
ParserStatus(closure),
UnresolvedMemberExpr::create(Context, DotLoc, NameLoc, Name,
SourceLoc(), { }, { }, { },
SourceLoc(), closure.get(),
/*implicit=*/false));
if (Result.hasCodeCompletion())
return Result;
break;
}
// Handle .foo by just making an AST node.
Result = makeParserResult(
UnresolvedMemberExpr::create(Context, DotLoc, NameLoc, Name,
/*implicit=*/false));
break;
}
case tok::kw_super: { // super.foo or super[foo]
Result = parseExprSuper(isExprBasic);
break;
}
case tok::l_paren:
Result = parseExprList(tok::l_paren, tok::r_paren);
break;
case tok::l_square:
Result = parseExprCollection();
break;
case tok::pound_available: {
// For better error recovery, parse but reject #available in an expr
// context.
diagnose(Tok.getLoc(), diag::availability_query_outside_if_stmt_guard);
auto res = parseStmtConditionPoundAvailable();
if (res.hasCodeCompletion())
return makeParserCodeCompletionStatus();
if (res.isParseError() || res.isNull())
return nullptr;
Result = makeParserResult(
new (Context) ErrorExpr(res.get()->getSourceRange()));
break;
}
// NOTE: This is for migrating the old object literal syntax.
// Eventually this block can be removed.
case tok::l_square_lit: {// [#Color(...)#], [#Image(...)#]
// If this is actually a collection literal starting with '[#', handle it
// as such.
if (isCollectionLiteralStartingWithLSquareLit()) {
// Split the token into two.
SourceLoc LSquareLoc = consumeStartingCharacterOfCurrentToken();
// Consume the '[' token.
Result = parseExprCollection(LSquareLoc);
break;
}
auto LSquareLoc = Tok.getLoc();
auto LSquareTokRange = Tok.getRange();
(void)consumeToken(tok::l_square_lit);
if (Tok.is(tok::pound)) {
consumeToken();
if (!Tok.is(tok::identifier))
diagnose(LSquareLoc, diag::expected_object_literal_identifier);
skipUntil(tok::r_square_lit);
Result = makeParserError();
}
else {
Result = parseExprPostfix(ID, isExprBasic);
}
// This should be an invariant based on the check in
// isCollectionLiteralStartingWithLSquareLit().
auto RSquareTokRange = Tok.getRange();
(void)consumeToken(tok::r_square_lit);
// Issue a diagnostic for the legacy syntax and provide a fixit
// to strip away the '[#' and '#]'
diagnose(LSquareLoc, diag::legacy_object_literal_syntax)
.fixItRemoveChars(LSquareTokRange.getStart(), LSquareTokRange.getEnd())
.fixItRemoveChars(RSquareTokRange.getStart(), RSquareTokRange.getEnd());
break;
}
#define POUND_OBJECT_LITERAL(Name, Desc, Proto) case tok::pound_##Name: \
Result = parseExprObjectLiteral(ObjectLiteralExpr::Name, isExprBasic); \
break;
#include "swift/Syntax/TokenKinds.def"
#define POUND_OLD_OBJECT_LITERAL(Name, NewName, NewArg, OldArg)\
case tok::pound_##Name: \
Result = parseExprObjectLiteral(ObjectLiteralExpr::NewName, isExprBasic, \
"#" #NewName); \
break;
#include "swift/Syntax/TokenKinds.def"
case tok::code_complete:
Result = makeParserResult(new (Context) CodeCompletionExpr(Tok.getLoc()));
Result.setHasCodeCompletion();
if (CodeCompletion &&
// We cannot code complete anything after var/let.
(!InVarOrLetPattern || InVarOrLetPattern == IVOLP_InMatchingPattern))
CodeCompletion->completePostfixExprBeginning(dyn_cast<CodeCompletionExpr>(
Result.get()));
consumeToken(tok::code_complete);
break;
// Eat an invalid token in an expression context. Error tokens are diagnosed
// by the lexer, so there is no reason to emit another diagnostic.
case tok::unknown:
consumeToken(tok::unknown);
return nullptr;
default:
UnknownCharacter:
checkForInputIncomplete();
// FIXME: offer a fixit: 'Self' -> 'self'
diagnose(Tok, ID);
return nullptr;
}
// If we had a parse error, don't attempt to parse suffixes.
if (Result.isParseError())
return Result;
bool hasBindOptional = false;
Result = parseExprPostfixSuffix(Result, isExprBasic,
/*periodHasKeyPathBehavior=*/InSwiftKeyPath,
hasBindOptional);
if (Result.isParseError() || Result.hasCodeCompletion())
return Result;
// If we had a ? suffix expression, bind the entire postfix chain
// within an OptionalEvaluationExpr.
if (hasBindOptional) {
Result = makeParserResult(
new (Context) OptionalEvaluationExpr(Result.get()));
}
return Result;
}
static StringLiteralExpr *
createStringLiteralExprFromSegment(ASTContext &Ctx,
const Lexer *L,
Lexer::StringSegment &Segment,
SourceLoc TokenLoc) {
assert(Segment.Kind == Lexer::StringSegment::Literal);
// FIXME: Consider lazily encoding the string when needed.
llvm::SmallString<256> Buf;
StringRef EncodedStr = L->getEncodedStringSegment(Segment, Buf);
if (!Buf.empty()) {
assert(EncodedStr.begin() == Buf.begin() &&
"Returned string is not from buffer?");
EncodedStr = Ctx.AllocateCopy(EncodedStr);
}
return new (Ctx) StringLiteralExpr(EncodedStr, TokenLoc);
}
/// expr-literal:
/// string_literal
ParserResult<Expr> Parser::parseExprStringLiteral() {
SmallVector<Lexer::StringSegment, 1> Segments;
L->getStringLiteralSegments(Tok, Segments);
SourceLoc Loc = consumeToken();
// The simple case: just a single literal segment.
if (Segments.size() == 1 &&
Segments.front().Kind == Lexer::StringSegment::Literal) {
return makeParserResult(
createStringLiteralExprFromSegment(Context, L, Segments.front(), Loc));
}
ParserStatus Status;
SmallVector<Expr*, 4> Exprs;
bool First = true;
for (auto Segment : Segments) {
switch (Segment.Kind) {
case Lexer::StringSegment::Literal: {
auto TokenLoc = First ? Loc : Segment.Loc;
Exprs.push_back(
createStringLiteralExprFromSegment(Context, L, Segment, TokenLoc));
// Since the string is already parsed, Tok already points to the first
// token after the whole string, but PreviousLoc is not exactly correct.
PreviousLoc = TokenLoc;
break;
}
case Lexer::StringSegment::Expr: {
// We are going to mess with Tok to do reparsing for interpolated literals,
// don't lose our 'next' token.
llvm::SaveAndRestore<Token> SavedTok(Tok);
// Create a temporary lexer that lexes from the body of the string.
Lexer::State BeginState =
L->getStateForBeginningOfTokenLoc(Segment.Loc);
// We need to set the EOF at r_paren, to prevent the Lexer from eagerly
// trying to lex the token beyond it. Parser::parseList() does a special
// check for a tok::EOF that is spelled with a ')'.
// FIXME: This seems like a hack, there must be a better way..
Lexer::State EndState = BeginState.advance(Segment.Length-1);
Lexer LocalLex(*L, BeginState, EndState);
// Temporarily swap out the parser's current lexer with our new one.
llvm::SaveAndRestore<Lexer *> T(L, &LocalLex);
// Prime the new lexer with a '(' as the first token.
// We might be at tok::eof now, so ensure that consumeToken() does not
// assert about lexing past eof.
Tok.setKind(tok::unknown);
consumeToken();
assert(Tok.is(tok::l_paren));
ParserResult<Expr> E = parseExprList(tok::l_paren, tok::r_paren);
Status |= E;
if (E.isNonNull()) {
Exprs.push_back(E.get());
if (!Tok.is(tok::eof)) {
diagnose(Tok, diag::string_interpolation_extra);
}
}
break;
}
}
First = false;
}
if (Exprs.empty()) {
Status.setIsParseError();
return makeParserResult(Status, new (Context) ErrorExpr(Loc));
}
return makeParserResult(Status, new (Context) InterpolatedStringLiteralExpr(
Loc, Context.AllocateCopy(Exprs)));
}
void Parser::parseOptionalArgumentLabel(Identifier &name, SourceLoc &loc) {
// Check to see if there is an argument label.
if (Tok.canBeArgumentLabel() && peekToken().is(tok::colon)) {
auto text = Tok.getText();
// If this was an escaped identifier that need not have been escaped, say
// so. Only _ needs escaping, because we take foo(_: 3) to be equivalent
// to foo(3), to be more uniform with _ in function declaration as well as
// the syntax for referring to the function pointer (foo(_:)),
auto escaped = Tok.isEscapedIdentifier();
auto underscore = Tok.is(tok::kw__) || (escaped && text == "_");
if (escaped && !underscore && canBeArgumentLabel(text)) {
SourceLoc start = Tok.getLoc();
SourceLoc end = start.getAdvancedLoc(Tok.getLength());
diagnose(Tok, diag::escaped_parameter_name, text)
.fixItRemoveChars(start, start.getAdvancedLoc(1))
.fixItRemoveChars(end.getAdvancedLoc(-1), end);
}
auto unescapedUnderscore = underscore && !escaped;
if (!unescapedUnderscore)
name = Context.getIdentifier(text);
loc = consumeToken();
consumeToken(tok::colon);
}
}
DeclName Parser::parseUnqualifiedDeclName(bool afterDot,
DeclNameLoc &loc,
const Diagnostic &diag,
bool allowOperators,
bool allowZeroArgCompoundNames) {
// Consume the base name.
Identifier baseName = Context.getIdentifier(Tok.getText());
SourceLoc baseNameLoc;
if (Tok.isAny(tok::identifier, tok::kw_Self, tok::kw_self)) {
baseNameLoc = consumeIdentifier(&baseName);
} else if (allowOperators && Tok.isAnyOperator()) {
baseName = Context.getIdentifier(Tok.getText());
baseNameLoc = consumeToken();
} else if (afterDot && Tok.isKeyword()) {
baseNameLoc = consumeToken();
} else {
checkForInputIncomplete();
diagnose(Tok, diag);
return DeclName();
}
// If the next token isn't a following '(', we don't have a compound name.
if (!Tok.isFollowingLParen()) {
loc = DeclNameLoc(baseNameLoc);
return baseName;
}
// If the next token is a ')' then we have a 0-arg compound name. This is
// explicitly differentiated from "simple" (non-compound) name in DeclName.
// Unfortunately only some places in the grammar are ok with accepting this
// kind of name; in other places it's ambiguous with trailing calls.
if (allowZeroArgCompoundNames && peekToken().is(tok::r_paren)) {
consumeToken(tok::l_paren);
consumeToken(tok::r_paren);
loc = DeclNameLoc(baseNameLoc);
SmallVector<Identifier, 2> argumentLabels;
return DeclName(Context, baseName, argumentLabels);
}
// If the token after that isn't an argument label or ':', we don't have a
// compound name.
if ((!peekToken().canBeArgumentLabel() && !peekToken().is(tok::colon)) ||
Identifier::isEditorPlaceholder(peekToken().getText())) {
loc = DeclNameLoc(baseNameLoc);
return baseName;
}
// Try to parse a compound name.
BacktrackingScope backtrack(*this);
SmallVector<Identifier, 2> argumentLabels;
SmallVector<SourceLoc, 2> argumentLabelLocs;
SourceLoc lparenLoc = consumeToken(tok::l_paren);
SourceLoc rparenLoc;
while (true) {
// Terminate at ')'.
if (Tok.is(tok::r_paren)) {
rparenLoc = consumeToken(tok::r_paren);
break;
}
// If we see a ':', the user forgot the '_';
if (Tok.is(tok::colon)) {
diagnose(Tok, diag::empty_arg_label_underscore)
.fixItInsert(Tok.getLoc(), "_");
argumentLabels.push_back(Identifier());
argumentLabelLocs.push_back(consumeToken(tok::colon));
}
Identifier argName;
SourceLoc argLoc;
parseOptionalArgumentLabel(argName, argLoc);
if (argLoc.isValid()) {
argumentLabels.push_back(argName);
argumentLabelLocs.push_back(argLoc);
continue;
}
// This is not a compound name.
// FIXME: Could recover better if we "know" it's a compound name.
loc = DeclNameLoc(baseNameLoc);
return baseName;
}
assert(!argumentLabels.empty() && "Logic above should prevent this");
assert(argumentLabels.size() == argumentLabelLocs.size());
// We have a compound name. Cancel backtracking and build that name.
backtrack.cancelBacktrack();
loc = DeclNameLoc(Context, baseNameLoc, lparenLoc, argumentLabelLocs,
rparenLoc);
return DeclName(Context, baseName, argumentLabels);
}
static bool shouldAddSelfFixit(DeclContext* Current, DeclName Name,
DescriptiveDeclKind &Kind) {
if (Current->isTypeContext() || !Current->getInnermostTypeContext())
return false;
if (auto *Nominal = Current->getInnermostTypeContext()->
getAsNominalTypeOrNominalTypeExtensionContext()){
// FIXME: we cannot resolve members appear later in the body of the nominal.
auto LookupResults = Nominal->lookupDirect(Name);
if (!LookupResults.empty()) {
Kind = LookupResults.front()->getDescriptiveKind();
return true;
}
}
return false;
}
/// expr-identifier:
/// unqualified-decl-name generic-args?
Expr *Parser::parseExprIdentifier() {
assert(Tok.isAny(tok::identifier, tok::kw_self, tok::kw_Self));
Token IdentTok = Tok;
// Parse the unqualified-decl-name.
DeclNameLoc loc;
DeclName name = parseUnqualifiedDeclName(/*afterDot=*/false, loc,
diag::expected_expr);
SmallVector<TypeRepr*, 8> args;
SourceLoc LAngleLoc, RAngleLoc;
bool hasGenericArgumentList = false;
/// The generic-args case is ambiguous with an expression involving '<'
/// and '>' operators. The operator expression is favored unless a generic
/// argument list can be successfully parsed, and the closing bracket is
/// followed by one of these tokens:
/// lparen_following rparen lsquare_following rsquare lbrace rbrace
/// period_following comma semicolon
///
if (canParseAsGenericArgumentList()) {
if (parseGenericArguments(args, LAngleLoc, RAngleLoc)) {
diagnose(LAngleLoc, diag::while_parsing_as_left_angle_bracket);
}
// The result can be empty in error cases.
hasGenericArgumentList = !args.empty();
}
ValueDecl *D = nullptr;
if (!InPoundIfEnvironment) {
D = lookupInScope(name);
// FIXME: We want this to work: "var x = { x() }", but for now it's better
// to disallow it than to crash.
if (D) {
for (auto activeVar : DisabledVars) {
if (activeVar == D) {
diagnose(loc.getBaseNameLoc(), DisabledVarReason);
return new (Context) ErrorExpr(loc.getSourceRange());
}
}
} else {
for (auto activeVar : DisabledVars) {
if (activeVar->getFullName() == name) {
DescriptiveDeclKind Kind;
if (DisabledVarReason.ID == diag::var_init_self_referential.ID &&
shouldAddSelfFixit(CurDeclContext, name, Kind)) {
diagnose(loc.getBaseNameLoc(), diag::expected_self_before_reference,
Kind).fixItInsert(loc.getBaseNameLoc(), "self.");
} else {
diagnose(loc.getBaseNameLoc(), DisabledVarReason);
}
return new (Context) ErrorExpr(loc.getSourceRange());
}
}
}
}
Expr *E;
if (D == nullptr) {
if (name.getBaseName().isEditorPlaceholder())
return parseExprEditorPlaceholder(IdentTok, name.getBaseIdentifier());
auto refKind = DeclRefKind::Ordinary;
E = new (Context) UnresolvedDeclRefExpr(name, refKind, loc);
} else if (auto TD = dyn_cast<TypeDecl>(D)) {
// When parsing default argument expressions for generic functions,
// we haven't built a FuncDecl or re-parented the GenericTypeParamDecls
// to the FuncDecl yet. Other than that, we should only ever find
// global or local declarations here.
assert(!TD->getDeclContext()->isTypeContext() ||
isa<GenericTypeParamDecl>(TD));
E = TypeExpr::createForDecl(loc.getBaseNameLoc(), TD, /*DC*/nullptr,
/*implicit*/false);
} else {
E = new (Context) DeclRefExpr(D, loc, /*Implicit=*/false);
}
if (hasGenericArgumentList) {
SmallVector<TypeLoc, 8> locArgs;
for (auto ty : args)
locArgs.push_back(ty);
E = new (Context) UnresolvedSpecializeExpr(E, LAngleLoc,
Context.AllocateCopy(locArgs),
RAngleLoc);
}
return E;
}
Expr *Parser::parseExprEditorPlaceholder(Token PlaceholderTok,
Identifier PlaceholderId) {
assert(PlaceholderTok.is(tok::identifier));
assert(PlaceholderId.isEditorPlaceholder());
auto parseTypeForPlaceholder = [&](TypeLoc &TyLoc, TypeRepr *&ExpansionTyR) {
Optional<EditorPlaceholderData> DataOpt =
swift::parseEditorPlaceholder(PlaceholderTok.getText());
if (!DataOpt)
return;
StringRef TypeStr = DataOpt->Type;
if (TypeStr.empty())
return;
// Ensure that we restore the parser state at exit.
ParserPositionRAII PPR(*this);
auto parseTypeString = [&](StringRef TyStr) -> TypeRepr* {
unsigned Offset = TyStr.data() - PlaceholderTok.getText().data();
SourceLoc TypeStartLoc = PlaceholderTok.getLoc().getAdvancedLoc(Offset);
SourceLoc TypeEndLoc = TypeStartLoc.getAdvancedLoc(TyStr.size());
Lexer::State StartState = L->getStateForBeginningOfTokenLoc(TypeStartLoc);
Lexer::State EndState = L->getStateForBeginningOfTokenLoc(TypeEndLoc);
// Create a lexer for the type sub-string.
Lexer LocalLex(*L, StartState, EndState);
// Temporarily swap out the parser's current lexer with our new one.
llvm::SaveAndRestore<Lexer *> T(L, &LocalLex);
Tok.setKind(tok::unknown); // we might be at tok::eof now.
consumeToken();
return parseType().getPtrOrNull();
};
TypeRepr *TyR = parseTypeString(TypeStr);
TyLoc = TyR;
if (DataOpt->TypeForExpansion == TypeStr) {
ExpansionTyR = TyR;
} else {
ExpansionTyR = parseTypeString(DataOpt->TypeForExpansion);
}
};
TypeLoc TyLoc;
TypeRepr *ExpansionTyR = nullptr;
parseTypeForPlaceholder(TyLoc, ExpansionTyR);
return new (Context) EditorPlaceholderExpr(PlaceholderId,
PlaceholderTok.getLoc(),
TyLoc, ExpansionTyR);
}
// Extract names of the tuple elements and preserve the structure
// of the tuple (with any nested tuples inside) to be able to use
// it in the fix-it without any type information provided by user.
static void printTupleNames(const TypeRepr *typeRepr, llvm::raw_ostream &OS) {
if (!typeRepr)
return;
auto tupleRepr = dyn_cast<TupleTypeRepr>(typeRepr);
if (!tupleRepr)
return;
OS << "(";
unsigned elementIndex = 0;
llvm::SmallVector<TypeRepr *, 10> elementTypes;
tupleRepr->getElementTypes(elementTypes);
interleave(elementTypes,
[&](const TypeRepr *element) {
if (isa<TupleTypeRepr>(element)) {
printTupleNames(element, OS);
} else {
auto name = tupleRepr->getElementName(elementIndex);
// If there is no label from the element
// it means that it's malformed and we can
// use the type instead.
if (name.empty())
element->print(OS);
else
OS << name;
}
++elementIndex;
},
[&] { OS << ", "; });
OS << ")";
}
bool Parser::
parseClosureSignatureIfPresent(SmallVectorImpl<CaptureListEntry> &captureList,
ParameterList *¶ms, SourceLoc &throwsLoc,
SourceLoc &arrowLoc,
TypeRepr *&explicitResultType, SourceLoc &inLoc){
// Clear out result parameters.
params = nullptr;
throwsLoc = SourceLoc();
arrowLoc = SourceLoc();
explicitResultType = nullptr;
inLoc = SourceLoc();
// If we have a leading token that may be part of the closure signature, do a
// speculative parse to validate it and look for 'in'.
if (Tok.isAny(tok::l_paren, tok::l_square, tok::identifier, tok::kw__)) {
BacktrackingScope backtrack(*this);
// Skip by a closure capture list if present.
if (consumeIf(tok::l_square)) {
skipUntil(tok::r_square);
if (!consumeIf(tok::r_square))
return false;
}
// Parse pattern-tuple func-signature-result? 'in'.
if (consumeIf(tok::l_paren)) { // Consume the ')'.
// While we don't have '->' or ')', eat balanced tokens.
while (!Tok.is(tok::r_paren) && !Tok.is(tok::eof))
skipSingle();
// Consume the ')', if it's there.
if (consumeIf(tok::r_paren)) {
consumeIf(tok::kw_throws) || consumeIf(tok::kw_rethrows);
// Parse the func-signature-result, if present.
if (consumeIf(tok::arrow)) {
if (!canParseType())
return false;
}
}
// Okay, we have a closure signature.
} else if (Tok.isIdentifierOrUnderscore()) {
// Parse identifier (',' identifier)*
consumeToken();
while (consumeIf(tok::comma)) {
if (Tok.isIdentifierOrUnderscore()) {
consumeToken();
continue;
}
return false;
}
consumeIf(tok::kw_throws) || consumeIf(tok::kw_rethrows);
// Parse the func-signature-result, if present.
if (consumeIf(tok::arrow)) {
if (!canParseType())
return false;
}
}
// Parse the 'in' at the end.
if (Tok.isNot(tok::kw_in))
return false;
// Okay, we have a closure signature.
} else {
// No closure signature.
return false;
}
// At this point, we know we have a closure signature. Parse the capture list
// and parameters.
if (consumeIf(tok::l_square) &&
!consumeIf(tok::r_square)) {
do {
// Check for the strength specifier: "weak", "unowned", or
// "unowned(safe/unsafe)".
SourceLoc loc;
Ownership ownershipKind = Ownership::Strong;
if (Tok.isContextualKeyword("weak")){
loc = consumeToken(tok::identifier);
ownershipKind = Ownership::Weak;
} else if (Tok.isContextualKeyword("unowned")) {
loc = consumeToken(tok::identifier);
ownershipKind = Ownership::Unowned;
// Skip over "safe" and "unsafe" if present.
if (consumeIf(tok::l_paren)) {
if (Tok.getText() == "safe")
ownershipKind = Ownership::Unowned; // FIXME: No "safe" variant.
else if (Tok.getText() == "unsafe")
ownershipKind = Ownership::Unmanaged;
else
diagnose(Tok, diag::attr_unowned_invalid_specifier);
consumeIf(tok::identifier);
if (!consumeIf(tok::r_paren))
diagnose(Tok, diag::attr_unowned_expected_rparen);
}
} else if (Tok.isAny(tok::identifier, tok::kw_self) &&
peekToken().isAny(tok::equal, tok::comma, tok::r_square)) {
// "x = 42", "x," and "x]" are all strong captures of x.
loc = Tok.getLoc();
} else {
diagnose(Tok, diag::expected_capture_specifier);
skipUntil(tok::comma, tok::r_square);
continue;
}
if (Tok.isNot(tok::identifier, tok::kw_self)) {
diagnose(Tok, diag::expected_capture_specifier_name);
skipUntil(tok::comma, tok::r_square);
continue;
}
// The thing being capture specified is an identifier, or as an identifier
// followed by an expression.
Expr *initializer;
Identifier name;
SourceLoc nameLoc = Tok.getLoc();
if (peekToken().isNot(tok::equal)) {
// If this is the simple case, then the identifier is both the name and
// the expression to capture.
name = Context.getIdentifier(Tok.getText());
initializer = parseExprIdentifier();
// It is a common error to try to capture a nested field instead of just
// a local name, reject it with a specific error message.
if (Tok.isAny(tok::period, tok::exclaim_postfix,tok::question_postfix)){
diagnose(Tok, diag::cannot_capture_fields);
skipUntil(tok::comma, tok::r_square);
continue;
}
} else {
// Otherwise, the name is a new declaration.
consumeIdentifier(&name);
consumeToken(tok::equal);
auto ExprResult = parseExpr(diag::expected_init_capture_specifier);
if (ExprResult.isNull())
continue;
initializer = ExprResult.get();
}
// Create the VarDecl and the PatternBindingDecl for the captured
// expression. This uses the parent declcontext (not the closure) since
// the initializer expression is evaluated before the closure is formed.
auto specifierKind = (ownershipKind != Ownership::Weak)
? VarDecl::Specifier::Let
: VarDecl::Specifier::Var;
auto *VD = new (Context) VarDecl(/*isStatic*/false,
specifierKind,
/*isCaptureList*/true,
nameLoc, name, Type(), CurDeclContext);
// Attributes.
if (ownershipKind != Ownership::Strong)
VD->getAttrs().add(new (Context) OwnershipAttr(ownershipKind));
auto pattern = new (Context) NamedPattern(VD, /*implicit*/true);
auto *PBD = PatternBindingDecl::create(Context, /*staticloc*/SourceLoc(),
StaticSpellingKind::None,
nameLoc, pattern, initializer,
CurDeclContext);
captureList.push_back(CaptureListEntry(VD, PBD));
} while (consumeIf(tok::comma));
// The capture list needs to be closed off with a ']'.
if (!consumeIf(tok::r_square)) {
diagnose(Tok, diag::expected_capture_list_end_rsquare);
skipUntil(tok::r_square);
if (Tok.is(tok::r_square))
consumeToken(tok::r_square);
}
}
bool invalid = false;
if (Tok.isNot(tok::kw_in)) {
if (Tok.is(tok::l_paren)) {
// Parse the closure arguments.
auto pattern = parseSingleParameterClause(ParameterContextKind::Closure);
if (pattern.isNonNull())
params = pattern.get();
else
invalid = true;
} else {
// Parse identifier (',' identifier)*
SmallVector<ParamDecl*, 4> elements;
do {
if (Tok.isNot(tok::identifier, tok::kw__)) {
diagnose(Tok, diag::expected_closure_parameter_name);
invalid = true;
break;
}
Identifier name = Tok.is(tok::identifier) ?
Context.getIdentifier(Tok.getText()) : Identifier();
auto var = new (Context) ParamDecl(VarDecl::Specifier::Owned, SourceLoc(),
SourceLoc(), Identifier(),
Tok.getLoc(), name, Type(), nullptr);
elements.push_back(var);
consumeToken();
// Consume a comma to continue.
} while (consumeIf(tok::comma));
params = ParameterList::create(Context, elements);
}
if (Tok.is(tok::kw_throws)) {
throwsLoc = consumeToken();
} else if (Tok.is(tok::kw_rethrows)) {
throwsLoc = consumeToken();
diagnose(throwsLoc, diag::rethrowing_function_type)
.fixItReplace(throwsLoc, "throws");
}
// Parse the optional explicit return type.
if (Tok.is(tok::arrow)) {
// Consume the '->'.
arrowLoc = consumeToken();
// Parse the type.
explicitResultType =
parseType(diag::expected_closure_result_type).getPtrOrNull();
if (!explicitResultType) {
// If we couldn't parse the result type, clear out the arrow location.
arrowLoc = SourceLoc();
invalid = true;
}
}
}
// Parse the 'in'.
if (Tok.is(tok::kw_in)) {
inLoc = consumeToken();
} else {
// Scan forward to see if we can find the 'in'. This re-synchronizes the
// parser so we can at least parse the body correctly.
SourceLoc startLoc = Tok.getLoc();
ParserPosition pos = getParserPosition();
while (Tok.isNot(tok::eof) && !Tok.is(tok::kw_in) &&
Tok.isNot(tok::r_brace)) {
skipSingle();
}
if (Tok.is(tok::kw_in)) {
// We found the 'in'. If this is the first error, complain about the
// junk tokens in-between but re-sync at the 'in'.
if (!invalid) {
diagnose(startLoc, diag::unexpected_tokens_before_closure_in);
}
inLoc = consumeToken();
} else {
// We didn't find an 'in', backtrack to where we started. If this is the
// first error, complain about the missing 'in'.
backtrackToPosition(pos);
if (!invalid) {
diagnose(Tok, diag::expected_closure_in)
.fixItInsert(Tok.getLoc(), "in ");
}
inLoc = Tok.getLoc();
}
}
if (!params)
return invalid;
// If this was a closure declaration (maybe even trailing)
// tuple parameter destructuring is one of the common
// problems, and is misleading to users, so it's imperative
// to detect any tuple splat or destructuring as early as
// possible and give a proper fix-it. See SE-0110 for more details.
auto isTupleDestructuring = [](ParamDecl *param) -> bool {
if (!param->isInvalid())
return false;
auto &typeLoc = param->getTypeLoc();
if (auto typeRepr = typeLoc.getTypeRepr())
return !param->hasName() && isa<TupleTypeRepr>(typeRepr);
return false;
};
for (unsigned i = 0, e = params->size(); i != e; ++i) {
auto *param = params->get(i);
if (!isTupleDestructuring(param))
continue;
auto argName = "arg" + std::to_string(i);
auto typeLoc = param->getTypeLoc();
SmallString<64> fixIt;
llvm::raw_svector_ostream OS(fixIt);
auto isMultiLine = Tok.isAtStartOfLine();
StringRef indent = Lexer::getIndentationForLine(SourceMgr, Tok.getLoc());
if (isMultiLine)
OS << '\n' << indent;
OS << "let ";
printTupleNames(typeLoc.getTypeRepr(), OS);
OS << " = " << argName << (isMultiLine ? "\n" + indent : "; ");
diagnose(param->getStartLoc(), diag::anon_closure_tuple_param_destructuring)
.fixItReplace(param->getSourceRange(), argName)
.fixItInsert(Tok.getLoc(), OS.str());
invalid = true;
}
return invalid;
}
ParserResult<Expr> Parser::parseExprClosure() {
assert(Tok.is(tok::l_brace) && "Not at a left brace?");
// We may be parsing this closure expr in a matching pattern context. If so,
// reset our state to not be in a pattern for any recursive pattern parses.
llvm::SaveAndRestore<decltype(InVarOrLetPattern)>
T(InVarOrLetPattern, IVOLP_NotInVarOrLet);
// Parse the opening left brace.
SourceLoc leftBrace = consumeToken();
// Parse the closure-signature, if present.
ParameterList *params = nullptr;
SourceLoc throwsLoc;
SourceLoc arrowLoc;
TypeRepr *explicitResultType;
SourceLoc inLoc;
SmallVector<CaptureListEntry, 2> captureList;
parseClosureSignatureIfPresent(captureList, params, throwsLoc, arrowLoc,
explicitResultType, inLoc);
// If the closure was created in the context of an array type signature's
// size expression, there will not be a local context. A parse error will
// be reported at the signature's declaration site.
if (!CurLocalContext) {
skipUntil(tok::r_brace);
if (Tok.is(tok::r_brace))
consumeToken();
return makeParserError();
}
unsigned discriminator = CurLocalContext->claimNextClosureDiscriminator();
// Create the closure expression and enter its context.
auto *closure = new (Context) ClosureExpr(params, throwsLoc, arrowLoc, inLoc,
explicitResultType,
discriminator, CurDeclContext);
// The arguments to the func are defined in their own scope.
Scope S(this, ScopeKind::ClosureParams);
ParseFunctionBody cc(*this, closure);
// Handle parameters.
if (params) {
// Add the parameters into scope.
addParametersToScope(params);
} else {
// There are no parameters; allow anonymous closure variables.
// FIXME: We could do this all the time, and then provide Fix-Its
// to map $i -> the appropriately-named argument. This might help
// users who are refactoring code by adding names.
AnonClosureVars.push_back({ leftBrace, {}});
}
// Add capture list variables to scope.
for (auto c : captureList)
addToScope(c.Var);
// Parse the body.
SmallVector<ASTNode, 4> bodyElements;
ParserStatus Status;
Status |= parseBraceItems(bodyElements, BraceItemListKind::Brace);
// Parse the closing '}'.
SourceLoc rightBrace;
parseMatchingToken(tok::r_brace, rightBrace, diag::expected_closure_rbrace,
leftBrace);
// If we didn't have any parameters, create a parameter list from the
// anonymous closure arguments.
if (!params) {
// Create a parameter pattern containing the anonymous variables.
auto &anonVars = AnonClosureVars.back().second;
SmallVector<ParamDecl*, 4> elements;
for (auto anonVar : anonVars)
elements.push_back(anonVar);
params = ParameterList::create(Context, leftBrace, elements, leftBrace);
// Pop out of the anonymous closure variables scope.
AnonClosureVars.pop_back();
// Attach the parameters to the closure.
closure->setParameterList(params);
closure->setHasAnonymousClosureVars();
}
// If the body consists of a single expression, turn it into a return
// statement.
//
// But don't do this transformation during code completion, as the source
// may be incomplete and the type mismatch in return statement will just
// confuse the type checker.
bool hasSingleExpressionBody = false;
if (!Status.hasCodeCompletion() && bodyElements.size() == 1) {
// If the closure's only body element is a single return statement,
// use that instead of creating a new wrapping return expression.
Expr *returnExpr = nullptr;
if (bodyElements[0].is<Stmt *>()) {
if (auto returnStmt =
dyn_cast<ReturnStmt>(bodyElements[0].get<Stmt*>())) {
if (!returnStmt->hasResult()) {
returnExpr = TupleExpr::createEmpty(Context,
SourceLoc(),
SourceLoc(),
/*implicit*/true);
returnStmt->setResult(returnExpr);
}
hasSingleExpressionBody = true;
}
}
// Otherwise, create the wrapping return.
if (bodyElements[0].is<Expr *>()) {
hasSingleExpressionBody = true;
returnExpr = bodyElements[0].get<Expr*>();
bodyElements[0] = new (Context) ReturnStmt(SourceLoc(),
returnExpr);
}
}
// Set the body of the closure.
closure->setBody(BraceStmt::create(Context, leftBrace, bodyElements,
rightBrace),
hasSingleExpressionBody);
// If the closure includes a capture list, create an AST node for it as well.
Expr *result = closure;
if (!captureList.empty())
result = new (Context) CaptureListExpr(Context.AllocateCopy(captureList),
closure);
return makeParserResult(Status, result);
}
/// expr-anon-closure-argument:
/// dollarident
Expr *Parser::parseExprAnonClosureArg() {
StringRef Name = Tok.getText();
SourceLoc Loc = consumeToken(tok::dollarident);
assert(Name[0] == '$' && "Not a dollarident");
// We know from the lexer that this is all-numeric.
unsigned ArgNo = 0;
if (Name.substr(1).getAsInteger(10, ArgNo)) {
diagnose(Loc.getAdvancedLoc(1), diag::dollar_numeric_too_large);
return new (Context) ErrorExpr(Loc);
}
// If this is a closure expression that did not have any named parameters,
// generate the anonymous variables we need.
auto closure = dyn_cast_or_null<ClosureExpr>(
dyn_cast<AbstractClosureExpr>(CurDeclContext));
if (!closure) {
diagnose(Loc, diag::anon_closure_arg_not_in_closure);
return new (Context) ErrorExpr(Loc);
}
// When the closure already has explicit parameters, offer their names as
// replacements.
if (auto *params = closure->getParameters()) {
if (ArgNo < params->size() && params->get(ArgNo)->hasName()) {
auto paramName = params->get(ArgNo)->getNameStr();
diagnose(Loc, diag::anon_closure_arg_in_closure_with_args_typo, paramName)
.fixItReplace(Loc, paramName);
return new (Context) DeclRefExpr(params->get(ArgNo), DeclNameLoc(Loc),
/*Implicit=*/false);
} else {
diagnose(Loc, diag::anon_closure_arg_in_closure_with_args);
return new (Context) ErrorExpr(Loc);
}
}
auto leftBraceLoc = AnonClosureVars.back().first;
auto &decls = AnonClosureVars.back().second;
while (ArgNo >= decls.size()) {
unsigned nextIdx = decls.size();
SmallVector<char, 4> StrBuf;
StringRef varName = ("$" + Twine(nextIdx)).toStringRef(StrBuf);
Identifier ident = Context.getIdentifier(varName);
SourceLoc varLoc = leftBraceLoc;
auto *var = new (Context) ParamDecl(VarDecl::Specifier::Owned, SourceLoc(),SourceLoc(),
Identifier(), varLoc, ident, Type(),
closure);
var->setImplicit();
decls.push_back(var);
}
return new (Context) DeclRefExpr(decls[ArgNo], DeclNameLoc(Loc),
/*Implicit=*/false);
}
/// parseExprList - Parse a list of expressions.
///
/// expr-paren:
/// lparen-any ')'
/// lparen-any binary-op ')'
/// lparen-any expr-paren-element (',' expr-paren-element)* ')'
///
/// expr-paren-element:
/// (identifier ':')? expr
///
ParserResult<Expr> Parser::parseExprList(tok leftTok, tok rightTok) {
SmallVector<Expr*, 8> subExprs;
SmallVector<Identifier, 8> subExprNames;
SmallVector<SourceLoc, 8> subExprNameLocs;
Expr *trailingClosure = nullptr;
SourceLoc leftLoc, rightLoc;
ParserStatus status = parseExprList(leftTok, rightTok, /*isPostfix=*/false,
/*isExprBasic=*/true,
leftLoc,
subExprs,
subExprNames,
subExprNameLocs,
rightLoc,
trailingClosure);
// A tuple with a single, unlabeled element is just parentheses.
if (subExprs.size() == 1 &&
(subExprNames.empty() || subExprNames[0].empty())) {
return makeParserResult(
status, new (Context) ParenExpr(leftLoc, subExprs[0], rightLoc,
/*hasTrailingClosure=*/false));
}
return makeParserResult(
status,
TupleExpr::create(Context, leftLoc, subExprs, subExprNames,
subExprNameLocs, rightLoc, /*HasTrailingClosure=*/false,
/*Implicit=*/false));
}
/// parseExprList - Parse a list of expressions.
///
/// expr-paren:
/// lparen-any ')'
/// lparen-any binary-op ')'
/// lparen-any expr-paren-element (',' expr-paren-element)* ')'
///
/// expr-paren-element:
/// (identifier ':')? expr
///
ParserStatus Parser::parseExprList(tok leftTok, tok rightTok,
bool isPostfix,
bool isExprBasic,
SourceLoc &leftLoc,
SmallVectorImpl<Expr *> &exprs,
SmallVectorImpl<Identifier> &exprLabels,
SmallVectorImpl<SourceLoc> &exprLabelLocs,
SourceLoc &rightLoc,
Expr *&trailingClosure) {
trailingClosure = nullptr;
StructureMarkerRAII ParsingExprList(*this, Tok);
leftLoc = consumeToken(leftTok);
ParserStatus status = parseList(rightTok, leftLoc, rightLoc,
/*AllowSepAfterLast=*/false,
rightTok == tok::r_paren
? diag::expected_rparen_expr_list
: diag::expected_rsquare_expr_list,
[&] () -> ParserStatus {
Identifier FieldName;
SourceLoc FieldNameLoc;
parseOptionalArgumentLabel(FieldName, FieldNameLoc);
// See if we have an operator decl ref '(<op>)'. The operator token in
// this case lexes as a binary operator because it neither leads nor
// follows a proper subexpression.
ParserStatus Status;
Expr *SubExpr = nullptr;
if (Tok.isBinaryOperator() && peekToken().isAny(rightTok, tok::comma)) {
SourceLoc Loc;
Identifier OperName;
if (parseAnyIdentifier(OperName, Loc, diag::expected_operator_ref)) {
return makeParserError();
}
// Bypass local lookup. Use an 'Ordinary' reference kind so that the
// reference may resolve to any unary or binary operator based on
// context.
SubExpr = new(Context) UnresolvedDeclRefExpr(OperName,
DeclRefKind::Ordinary,
DeclNameLoc(Loc));
} else {
ParserResult<Expr> ParsedSubExpr
= parseExpr(diag::expected_expr_in_expr_list);
SubExpr = ParsedSubExpr.getPtrOrNull();
Status = ParsedSubExpr;
}
// If we got a subexpression, add it.
if (SubExpr) {
// Update names and locations.
if (!exprLabels.empty()) {
exprLabels.push_back(FieldName);
exprLabelLocs.push_back(FieldNameLoc);
} else if (FieldName.get()) {
exprLabels.resize(exprs.size());
exprLabels.push_back(FieldName);
exprLabelLocs.resize(exprs.size());
exprLabelLocs.push_back(FieldNameLoc);
}
// Add the subexpression.
exprs.push_back(SubExpr);
}
return Status;
});
// If we aren't interested in trailing closures, or there isn't a valid one,
// we're done.
if (!isPostfix || Tok.isNot(tok::l_brace) ||
!isValidTrailingClosure(isExprBasic, *this))
return status;
// Parse the closure.
ParserResult<Expr> closure =
parseTrailingClosure(SourceRange(leftLoc, rightLoc));
status |= closure;
if (closure.isNull())
return status;
// Record the trailing closure.
trailingClosure = closure.get();
return status;
}
ParserResult<Expr> Parser::parseTrailingClosure(SourceRange calleeRange) {
SourceLoc braceLoc = Tok.getLoc();
// Parse the closure.
ParserResult<Expr> closure = parseExprClosure();
if (closure.isNull())
return makeParserError();
// Warn if the trailing closure is separated from its callee by more than
// one line. A single-line separation is acceptable for a trailing closure
// call, and will be diagnosed later only if the call fails to typecheck.
auto origLine = SourceMgr.getLineNumber(calleeRange.End);
auto braceLine = SourceMgr.getLineNumber(braceLoc);
if (braceLine > origLine + 1) {
diagnose(braceLoc, diag::trailing_closure_after_newlines);
diagnose(calleeRange.Start, diag::trailing_closure_callee_here);
auto *CE = dyn_cast<ClosureExpr>(closure.get());
if (CE && CE->hasAnonymousClosureVars() &&
CE->getParameters()->size() == 0) {
diagnose(braceLoc, diag::brace_stmt_suggest_do)
.fixItInsert(braceLoc, "do ");
}
}
return closure;
}
// NOTE: this is to detect the old object literal syntax.
// This will be removed in the future.
bool Parser::isCollectionLiteralStartingWithLSquareLit() {
BacktrackingScope backtracking(*this);
(void)consumeToken(tok::l_square_lit);
switch (Tok.getKind()) {
// Handle both degenerate '#' and '# identifier'.
case tok::pound:
(void) consumeToken();
if (Tok.is(tok::identifier)) skipSingle();
break;
#define POUND_OBJECT_LITERAL(kw, desc, proto)\
case tok::pound_##kw: (void)consumeToken(); break;
#define POUND_OLD_OBJECT_LITERAL(kw, new_kw, old_arg, new_arg)\
case tok::pound_##kw: (void)consumeToken(); break;
#include "swift/Syntax/TokenKinds.def"
default:
return true;
}
// Skip over a parenthesized argument, if present.
if (Tok.is(tok::l_paren)) skipSingle();
return Tok.isNot(tok::r_square_lit);
}
/// \brief Parse an object literal expression.
///
/// expr-literal:
/// '#' identifier expr-paren
ParserResult<Expr>
Parser::parseExprObjectLiteral(ObjectLiteralExpr::LiteralKind LitKind,
bool isExprBasic,
StringRef NewName) {
auto PoundTok = Tok;
SourceLoc PoundLoc = consumeToken();
// Parse a tuple of args
if (!Tok.is(tok::l_paren)) {
diagnose(Tok, diag::expected_arg_list_in_object_literal);
return makeParserError();
}
// Parse the argument list.
SourceLoc lParenLoc, rParenLoc;
SmallVector<Expr *, 2> args;
SmallVector<Identifier, 2> argLabels;
SmallVector<SourceLoc, 2> argLabelLocs;
Expr *trailingClosure;
ParserStatus status = parseExprList(tok::l_paren, tok::r_paren,
/*isPostfix=*/true, isExprBasic,
lParenLoc, args, argLabels,
argLabelLocs,
rParenLoc,
trailingClosure);
if (status.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
if (status.isError())
return makeParserError();
// If the legacy name was used (e.g., #Image instead of #imageLiteral)
// prompt an error and a fixit.
if (!NewName.empty()) {
auto diag =
diagnose(PoundTok, diag::object_literal_legacy_name,
PoundTok.getText(), NewName);
auto Range = PoundTok.getRange();
// Create a FixIt for the keyword.
diag.fixItReplaceChars(Range.getStart(), Range.getEnd(), NewName);
// Try and construct a FixIt for the argument label.
if (argLabelLocs.size() > 0 && !argLabels[0].empty()) {
auto ArgLoc = argLabelLocs[0];
auto FirstElementName = argLabels[0];
if (ArgLoc.isValid() && !FirstElementName.empty()) {
auto OldArg = FirstElementName.str();
auto NewArg =
llvm::StringSwitch<StringRef>(OldArg)
#define POUND_OLD_OBJECT_LITERAL(kw, new_kw, old_arg, new_arg)\
.Case(#old_arg, #new_arg)
#include "swift/Syntax/TokenKinds.def"
.Default("");
if (!NewArg.empty()) {
auto Loc = argLabelLocs[0];
diag.fixItReplaceChars(Loc,
Loc.getAdvancedLocOrInvalid(OldArg.size()),
NewArg);
}
}
}
return makeParserError();
}
return makeParserResult(
ObjectLiteralExpr::create(Context, PoundLoc, LitKind, lParenLoc, args,
argLabels, argLabelLocs, rParenLoc,
trailingClosure, /*implicit=*/false));
}
/// \brief Parse an expression call suffix.
///
/// expr-call-suffix:
/// expr-paren
/// expr-closure (except in expr-basic)
ParserResult<Expr>
Parser::parseExprCallSuffix(ParserResult<Expr> fn, bool isExprBasic) {
assert(Tok.isFollowingLParen() && "Not a call suffix?");
// Parse the first argument.
// If there is a code completion token right after the '(', do a special case
// callback.
if (peekToken().is(tok::code_complete) && CodeCompletion) {
consumeToken(tok::l_paren);
auto CCE = new (Context) CodeCompletionExpr(Tok.getLoc());
auto Result = makeParserResult(
CallExpr::create(Context, fn.get(), SourceLoc(),
{ CCE },
{ Identifier() },
{ },
SourceLoc(),
/*trailingClosure=*/nullptr,
/*implicit=*/false));
CodeCompletion->completePostfixExprParen(fn.get(), CCE);
// Eat the code completion token because we handled it.
consumeToken(tok::code_complete);
Result.setHasCodeCompletion();
return Result;
}
// Parse the argument list.
SourceLoc lParenLoc, rParenLoc;
SmallVector<Expr *, 2> args;
SmallVector<Identifier, 2> argLabels;
SmallVector<SourceLoc, 2> argLabelLocs;
Expr *trailingClosure;
ParserStatus status = parseExprList(tok::l_paren, tok::r_paren,
/*isPostfix=*/true, isExprBasic,
lParenLoc, args, argLabels,
argLabelLocs,
rParenLoc,
trailingClosure);
// Form the call.
auto Result = makeParserResult(status | fn,
CallExpr::create(Context, fn.get(), lParenLoc,
args, argLabels, argLabelLocs,
rParenLoc, trailingClosure,
/*implicit=*/false));
if (status.hasCodeCompletion()) {
if (CodeCompletion) {
CodeCompletion->completeCallArg(Result.get());
}
Result.setHasCodeCompletion();
}
return Result;
}
/// parseExprCollection - Parse a collection literal expression.
///
/// expr-collection:
/// expr-array
/// expr-dictionary
// lsquare-starting ']'
ParserResult<Expr> Parser::parseExprCollection(SourceLoc LSquareLoc) {
// If the caller didn't already consume the '[', do so now.
if (LSquareLoc.isInvalid())
LSquareLoc = consumeToken(tok::l_square);
Parser::StructureMarkerRAII ParsingCollection(
*this, LSquareLoc,
StructureMarkerKind::OpenSquare);
// [] is always an array.
if (Tok.is(tok::r_square)) {
SourceLoc RSquareLoc = consumeToken(tok::r_square);
return makeParserResult(
ArrayExpr::create(Context, LSquareLoc, {}, {}, RSquareLoc));
}
// [:] is always an empty dictionary.
if (Tok.is(tok::colon) && peekToken().is(tok::r_square)) {
consumeToken(tok::colon);
SourceLoc RSquareLoc = consumeToken(tok::r_square);
return makeParserResult(
DictionaryExpr::create(Context, LSquareLoc, {}, RSquareLoc));
}
// Parse the first expression.
ParserResult<Expr> FirstExpr
= parseExpr(diag::expected_expr_in_collection_literal);
if (FirstExpr.isNull()) {
skipUntil(tok::r_square);
if (Tok.is(tok::r_square))
consumeToken();
return FirstExpr;
}
// If we have a ':', this is a dictionary literal.
if (Tok.is(tok::colon)) {
return parseExprDictionary(LSquareLoc, FirstExpr);
}
// Otherwise, we have an array literal.
return parseExprArray(LSquareLoc, FirstExpr);
}
/// parseExprArray - Parse an array literal expression.
///
/// The lsquare-starting and first expression have already been
/// parsed, and are passed in as parameters.
///
/// expr-array:
/// '[' expr (',' expr)* ','? ']'
/// '[' ']'
ParserResult<Expr> Parser::parseExprArray(SourceLoc LSquareLoc,
ParserResult<Expr> FirstExpr) {
SmallVector<Expr *, 8> SubExprs;
SmallVector<SourceLoc, 8> CommaLocs;
SubExprs.push_back(FirstExpr.get());
SourceLoc CommaLoc, RSquareLoc;
ParserStatus Status(FirstExpr);
if (Tok.isNot(tok::r_square) && !consumeIf(tok::comma, CommaLoc)) {
diagnose(Tok, diag::expected_separator, ",")
.fixItInsertAfter(PreviousLoc, ",");
Status.setIsParseError();
}
CommaLocs.push_back(CommaLoc);
Status |= parseList(tok::r_square, LSquareLoc, RSquareLoc,
/*AllowSepAfterLast=*/true,
diag::expected_rsquare_array_expr,
[&] () -> ParserStatus
{
ParserResult<Expr> Element
= parseExpr(diag::expected_expr_in_collection_literal);
if (Element.isNonNull())
SubExprs.push_back(Element.get());
if (Tok.is(tok::comma))
CommaLocs.push_back(Tok.getLoc());
return Element;
});
assert(SubExprs.size() >= 1);
return makeParserResult(Status,
ArrayExpr::create(Context, LSquareLoc, SubExprs, CommaLocs,
RSquareLoc));
}
/// parseExprDictionary - Parse a dictionary literal expression.
///
/// The lsquare-starting and first key have already been parsed, and
/// are passed in as parameters.
///
/// expr-dictionary:
/// '[' expr ':' expr (',' expr ':' expr)* ','? ']'
/// '[' ':' ']'
ParserResult<Expr> Parser::parseExprDictionary(SourceLoc LSquareLoc,
ParserResult<Expr> FirstKey) {
assert(Tok.is(tok::colon));
// Each subexpression is a (key, value) tuple.
// FIXME: We're not tracking the colon locations in the AST.
SmallVector<Expr *, 8> SubExprs;
SourceLoc RSquareLoc;
// Function that adds a new key/value pair.
auto addKeyValuePair = [&](Expr *Key, Expr *Value) -> void {
Expr *Exprs[] = {Key, Value};
SubExprs.push_back(TupleExpr::createImplicit(Context, Exprs, { }));
};
bool FirstPair = true;
ParserStatus Status(FirstKey);
Status |=
parseList(tok::r_square, LSquareLoc, RSquareLoc,
/*AllowSepAfterLast=*/true,
diag::expected_rsquare_array_expr, [&]() -> ParserStatus {
// Parse the next key.
ParserResult<Expr> Key;
if (FirstPair) {
Key = makeParserResult(FirstKey.get());
FirstPair = false;
} else {
Key = parseExpr(diag::expected_key_in_dictionary_literal);
if (Key.isNull())
return Key;
}
// Parse the ':'.
if (Tok.isNot(tok::colon)) {
diagnose(Tok, diag::expected_colon_in_dictionary_literal);
return ParserStatus(Key) | makeParserError();
}
consumeToken();
// Parse the next value.
ParserResult<Expr> Value =
parseExpr(diag::expected_value_in_dictionary_literal);
if (Value.isNull())
Value = makeParserResult(Value, new (Context) ErrorExpr(PreviousLoc));
// Add this key/value pair.
addKeyValuePair(Key.get(), Value.get());
return ParserStatus(Key) | ParserStatus(Value);
});
assert(SubExprs.size() >= 1);
return makeParserResult(Status, DictionaryExpr::create(Context, LSquareLoc,
SubExprs, RSquareLoc));
}
void Parser::addPatternVariablesToScope(ArrayRef<Pattern *> Patterns) {
for (Pattern *Pat : Patterns) {
Pat->forEachVariable([&](VarDecl *VD) {
if (VD->hasName()) {
// Add any variable declarations to the current scope.
addToScope(VD);
}
});
}
}
void Parser::addParametersToScope(ParameterList *PL) {
for (auto param : *PL)
if (param->hasName())
addToScope(param);
}
/// Parse availability query specification.
///
/// availability-spec:
/// '*'
/// language-version-constraint-spec
/// platform-version-constraint-spec
ParserResult<AvailabilitySpec> Parser::parseAvailabilitySpec() {
if (Tok.isBinaryOperator() && Tok.getText() == "*") {
SourceLoc StarLoc = Tok.getLoc();
consumeToken();
return makeParserResult(new (Context) OtherPlatformAvailabilitySpec(StarLoc));
}
if (Tok.isIdentifierOrUnderscore() && Tok.getText() == "swift") {
return parseLanguageVersionConstraintSpec();
}
return parsePlatformVersionConstraintSpec();
}
/// Parse language-version constraint specification.
///
/// language-version-constraint-spec:
/// "swift" version-tuple
ParserResult<LanguageVersionConstraintAvailabilitySpec>
Parser::parseLanguageVersionConstraintSpec() {
SourceLoc SwiftLoc;
clang::VersionTuple Version;
SourceRange VersionRange;
if (!(Tok.isIdentifierOrUnderscore() && Tok.getText() == "swift"))
return nullptr;
SwiftLoc = Tok.getLoc();
consumeToken();
if (parseVersionTuple(Version, VersionRange,
diag::avail_query_expected_version_number)) {
return nullptr;
}
return makeParserResult(new (Context)
LanguageVersionConstraintAvailabilitySpec(
SwiftLoc, Version, VersionRange));
}
/// Parse platform-version constraint specification.
///
/// platform-version-constraint-spec:
/// identifier version-comparison version-tuple
ParserResult<PlatformVersionConstraintAvailabilitySpec>
Parser::parsePlatformVersionConstraintSpec() {
Identifier PlatformIdentifier;
SourceLoc PlatformLoc;
if (Tok.is(tok::code_complete)) {
consumeToken();
if (CodeCompletion) {
CodeCompletion->completePoundAvailablePlatform();
}
return makeParserCodeCompletionStatus();
}
if (parseIdentifier(PlatformIdentifier, PlatformLoc,
diag::avail_query_expected_platform_name)) {
return nullptr;
}
if (Tok.isBinaryOperator() && Tok.getText() == ">=") {
diagnose(Tok, diag::avail_query_version_comparison_not_needed)
.fixItRemove(Tok.getLoc());
consumeToken();
}
clang::VersionTuple Version;
SourceRange VersionRange;
if (parseVersionTuple(Version, VersionRange,
diag::avail_query_expected_version_number)) {
return nullptr;
}
Optional<PlatformKind> Platform =
platformFromString(PlatformIdentifier.str());
if (!Platform.hasValue() || Platform.getValue() == PlatformKind::none) {
diagnose(Tok, diag::avail_query_unrecognized_platform_name,
PlatformIdentifier);
return nullptr;
}
return makeParserResult(new (Context) PlatformVersionConstraintAvailabilitySpec(
Platform.getValue(), PlatformLoc, Version, VersionRange));
}
/// parseExprTypeOf
///
/// expr-dynamictype:
/// 'type' '(' 'of:' expr ')'
///
ParserResult<Expr> Parser::parseExprTypeOf() {
// Consume 'type'
SourceLoc keywordLoc = consumeToken();
// Parse the leading '('.
SourceLoc lParenLoc = consumeToken(tok::l_paren);
// Parse `of` label.
if (Tok.getText() == "of" && peekToken().is(tok::colon)) {
// Consume the label.
consumeToken();
consumeToken(tok::colon);
} else {
// There cannot be a richer diagnostic here because the user may have
// defined a function `type(...)` that conflicts with the magic expr.
diagnose(Tok, diag::expr_typeof_expected_label_of);
}
// Parse the subexpression.
ParserResult<Expr> subExpr = parseExpr(diag::expr_typeof_expected_expr);
if (subExpr.hasCodeCompletion())
return makeParserCodeCompletionResult<Expr>();
// Parse the closing ')'
SourceLoc rParenLoc;
if (subExpr.isParseError()) {
skipUntilDeclStmtRBrace(tok::r_paren);
if (Tok.is(tok::r_paren))
rParenLoc = consumeToken();
else
rParenLoc = PreviousLoc;
} else {
parseMatchingToken(tok::r_paren, rParenLoc,
diag::expr_typeof_expected_rparen, lParenLoc);
}
// If the subexpression was in error, just propagate the error.
if (subExpr.isParseError())
return makeParserResult<Expr>(
new (Context) ErrorExpr(SourceRange(keywordLoc, rParenLoc)));
return makeParserResult(
new (Context) DynamicTypeExpr(keywordLoc, lParenLoc,
subExpr.get(), rParenLoc, Type()));
}
|
#include <bits/stdc++.h>
#define ll long long
using namespace std;
int main()
{
ios_base::sync_with_stdio(false);
cin.tie(NULL);
cout.tie(NULL);
ll t,i,n,k;
cin>>t;
while(t--)
{
cin>>n>>k;
ll r[n];
ll c[n];
ll x,y;
memset(r, 0, sizeof(r));
memset(c, 0, sizeof(c));
for(i=0;i<k;i++)
{
cin>>x>>y;
r[x-1] = 1;
c[y-1] = 1;
}
cout<<n-k<<" ";
vector<ll> rv;
vector<ll> cv;
for(i=0;i<n;i++)
{
if(r[i] == 0)
{
rv.push_back(i+1);
}
if(c[i] == 0)
{
cv.push_back(i+1);
}
}
for(i=0;i<rv.size();i++)
cout<<rv[i]<<" "<<cv[i]<<" ";
cout<<endl;
}
return 0;
}
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "table/format.h"
#include "port/port.h"
#include "rocksdb/env.h"
#include "table/block.h"
#include "util/coding.h"
#include "util/crc32c.h"
#include "util/perf_context_imp.h"
namespace rocksdb {
void BlockHandle::EncodeTo(std::string* dst) const {
// Sanity check that all fields have been set
assert(offset_ != ~static_cast<uint64_t>(0));
assert(size_ != ~static_cast<uint64_t>(0));
PutVarint64(dst, offset_);
PutVarint64(dst, size_);
}
Status BlockHandle::DecodeFrom(Slice* input) {
if (GetVarint64(input, &offset_) &&
GetVarint64(input, &size_)) {
return Status::OK();
} else {
return Status::Corruption("bad block handle");
}
}
void Footer::EncodeTo(std::string* dst) const {
#ifndef NDEBUG
const size_t original_size = dst->size();
#endif
metaindex_handle_.EncodeTo(dst);
index_handle_.EncodeTo(dst);
dst->resize(2 * BlockHandle::kMaxEncodedLength); // Padding
PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber & 0xffffffffu));
PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber >> 32));
assert(dst->size() == original_size + kEncodedLength);
}
Status Footer::DecodeFrom(Slice* input) {
assert(input != nullptr);
assert(input->size() >= kEncodedLength);
const char* magic_ptr = input->data() + kEncodedLength - 8;
const uint32_t magic_lo = DecodeFixed32(magic_ptr);
const uint32_t magic_hi = DecodeFixed32(magic_ptr + 4);
const uint64_t magic = ((static_cast<uint64_t>(magic_hi) << 32) |
(static_cast<uint64_t>(magic_lo)));
if (magic != kTableMagicNumber) {
return Status::InvalidArgument("not an sstable (bad magic number)");
}
Status result = metaindex_handle_.DecodeFrom(input);
if (result.ok()) {
result = index_handle_.DecodeFrom(input);
}
if (result.ok()) {
// We skip over any leftover data (just padding for now) in "input"
const char* end = magic_ptr + 8;
*input = Slice(end, input->data() + input->size() - end);
}
return result;
}
Status ReadBlockContents(RandomAccessFile* file,
const ReadOptions& options,
const BlockHandle& handle,
BlockContents* result,
Env* env,
bool do_uncompress) {
result->data = Slice();
result->cachable = false;
result->heap_allocated = false;
// Read the block contents as well as the type/crc footer.
// See table_builder.cc for the code that built this structure.
size_t n = static_cast<size_t>(handle.size());
char* buf = new char[n + kBlockTrailerSize];
Slice contents;
StopWatchNano timer(env);
StartPerfTimer(&timer);
Status s = file->Read(handle.offset(), n + kBlockTrailerSize, &contents, buf);
BumpPerfCount(&perf_context.block_read_count);
BumpPerfCount(&perf_context.block_read_byte, n + kBlockTrailerSize);
BumpPerfTime(&perf_context.block_read_time, &timer);
if (!s.ok()) {
delete[] buf;
return s;
}
if (contents.size() != n + kBlockTrailerSize) {
delete[] buf;
return Status::Corruption("truncated block read");
}
// Check the crc of the type and the block contents
const char* data = contents.data(); // Pointer to where Read put the data
if (options.verify_checksums) {
const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
const uint32_t actual = crc32c::Value(data, n + 1);
if (actual != crc) {
delete[] buf;
s = Status::Corruption("block checksum mismatch");
return s;
}
BumpPerfTime(&perf_context.block_checksum_time, &timer);
}
// If the caller has requested that the block not be uncompressed
if (!do_uncompress || data[n] == kNoCompression) {
if (data != buf) {
// File implementation gave us pointer to some other data.
// Use it directly under the assumption that it will be live
// while the file is open.
delete[] buf;
result->data = Slice(data, n);
result->heap_allocated = false;
result->cachable = false; // Do not double-cache
} else {
result->data = Slice(buf, n);
result->heap_allocated = true;
result->cachable = true;
}
result->compression_type = (rocksdb::CompressionType)data[n];
s = Status::OK();
} else {
s = UncompressBlockContents(data, n, result);
delete[] buf;
}
BumpPerfTime(&perf_context.block_decompress_time, &timer);
return s;
}
//
// The 'data' points to the raw block contents that was read in from file.
// This method allocates a new heap buffer and the raw block
// contents are uncompresed into this buffer. This
// buffer is returned via 'result' and it is upto the caller to
// free this buffer.
Status UncompressBlockContents(const char* data, size_t n,
BlockContents* result) {
char* ubuf = nullptr;
int decompress_size = 0;
assert(data[n] != kNoCompression);
switch (data[n]) {
case kSnappyCompression: {
size_t ulength = 0;
static char snappy_corrupt_msg[] =
"Snappy not supported or corrupted Snappy compressed block contents";
if (!port::Snappy_GetUncompressedLength(data, n, &ulength)) {
return Status::Corruption(snappy_corrupt_msg);
}
ubuf = new char[ulength];
if (!port::Snappy_Uncompress(data, n, ubuf)) {
delete[] ubuf;
return Status::Corruption(snappy_corrupt_msg);
}
result->data = Slice(ubuf, ulength);
result->heap_allocated = true;
result->cachable = true;
break;
}
case kZlibCompression:
ubuf = port::Zlib_Uncompress(data, n, &decompress_size);
static char zlib_corrupt_msg[] =
"Zlib not supported or corrupted Zlib compressed block contents";
if (!ubuf) {
return Status::Corruption(zlib_corrupt_msg);
}
result->data = Slice(ubuf, decompress_size);
result->heap_allocated = true;
result->cachable = true;
break;
case kBZip2Compression:
ubuf = port::BZip2_Uncompress(data, n, &decompress_size);
static char bzip2_corrupt_msg[] =
"Bzip2 not supported or corrupted Bzip2 compressed block contents";
if (!ubuf) {
return Status::Corruption(bzip2_corrupt_msg);
}
result->data = Slice(ubuf, decompress_size);
result->heap_allocated = true;
result->cachable = true;
break;
default:
return Status::Corruption("bad block type");
}
result->compression_type = kNoCompression; // not compressed any more
return Status::OK();
}
} // namespace rocksdb
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/details/computation_op_handle.h"
#include <string>
namespace paddle {
namespace framework {
namespace details {
ComputationOpHandle::ComputationOpHandle(const OpDesc &op_desc, Scope *scope,
platform::Place place)
: op_(framework::OpRegistry::CreateOp(op_desc)),
scope_(scope),
place_(place) {}
void ComputationOpHandle::RunImpl() {
auto *cur_ctx = dev_ctxes_[place_];
for (auto *in : inputs_) {
bool need_wait =
in->generated_op_ && in->generated_op_->dev_ctxes_[place_] != cur_ctx;
if (need_wait) {
in->generated_op_->Wait(cur_ctx);
}
}
this->RunAndRecordEvent([this] {
op_->Run(*scope_->FindVar(kLocalExecScopeName)->Get<Scope *>(), place_);
});
}
std::string ComputationOpHandle::Name() const { return op_->Type(); }
} // namespace details
} // namespace framework
} // namespace paddle
|
/*
* sdio_sdcard.cpp
*
* Created on: Apr 24, 2015
* Author: walmis
*/
#include "sdio_sdcard.hpp"
using namespace xpcc;
using namespace stm32;
#define SDIO_STATIC_FLAGS (SDIO_HAL::Interrupt) ((uint32_t)0x000005FF)
#define SD_OCR_ERRORBITS ((uint32_t)0xFDFFE008)
#define DATATIMEOUT (0xFFFFFF)
static SDIO_SDCard* inst;
SDIO_SDCard::SDIO_SDCard() : dma_stm(dma::Stream::DMA2_3),
initialized(false), _sectors(0), RCA(0) {
inst = this;
}
extern "C"
void SDIO_IRQHandler() {
IRQWrapper w;
inst->handleIRQ();
}
static uint32_t ext_bits(unsigned char* data, int msb,
int lsb) {
uint32_t bits = 0;
uint32_t size = 1 + msb - lsb;
for (uint32_t i = 0; i < size; i++) {
uint32_t position = lsb + i;
uint32_t byte = 15 - (position >> 3);
uint32_t bit = position & 0x7;
uint32_t value = (data[byte] >> bit) & 1;
bits |= value << i;
}
return bits;
}
bool xpcc::stm32::SDIO_SDCard::init() {
if(initialized)
return true;
dma::Config dma_cfg;
//prepare dma channel config
dma_cfg.channel(dma::Channel::Channel_4)
->mode(dma::Mode::Normal)
->periphBaseAddress((uint32_t)&SDIO->FIFO)
->peripheralBurst(dma::PeripheralBurst::INC4)
->memoryBurst(dma::MemoryBurst::INC4)
->memoryDataSize(dma::MemoryDataSize::Word)
->peripheralDataSize(dma::PeripheralDataSize::Word)
->memoryInc(dma::MemoryInc::Enable)
->flowControl(dma::FlowControl::Peripheral)
->fifoMode(dma::FIFOMode::Enable)
->fifoThreshold(dma::FIFOThreshold::Full)
->priority(dma::Prioriy::VeryHigh);
dma_stm.init(dma_cfg);
dma_stm.attachCallback([this]() {
dma_evt.signal();
});
SDIO_HAL::init();
NVIC_EnableIRQ(SDIO_IRQn);
SDIO_HAL::setBusWidth(SDIO_HAL::BusWidth::_1b);
SDIO_HAL::setClockDiv(192);
SDIO_HAL::setPowerState(SDIO_HAL::PowerState::On);
SDIO_HAL::setClockState(true);
cmd(0, 0, ResponseType::None);
if(!cmd(SDIO_SEND_IF_COND, 0x1AA, ResponseType::Short)) {
//sdcard v1
init_v1_card();
} else {
if(init_v2_card()) {
initialized = true;
return true;
}
}
return false;
}
bool xpcc::stm32::SDIO_SDCard::isInitialized() {
return initialized;
}
bool xpcc::stm32::SDIO_SDCard::readStart(uint32_t blockNumber) {
//XPCC_LOG_DEBUG .printf("r+ %d\n", blockNumber);
rd_block = blockNumber;
return true;
}
bool xpcc::stm32::SDIO_SDCard::readStop() {
//XPCC_LOG_DEBUG .printf("r- %d\n", rd_block);
return true;
}
bool xpcc::stm32::SDIO_SDCard::readData(uint8_t* buffer, size_t length) {
//XPCC_LOG_DEBUG .printf("rd %d+%d\n", rd_block, length/512);
if(length <= 512) {
return readSingleBlock(buffer, rd_block++);
} else {
bool res = readMultipleBlocks(buffer, rd_block, length/512);
rd_block += length/512;
return res;
}
}
bool xpcc::stm32::SDIO_SDCard::readMultipleBlocks(uint8_t* buffer,
size_t block_number, uint32_t numBlocks) {
SDIO_HAL::resetDataPath();
//initialize dma controller
if(!dmaInit(buffer, false)) {
return false;
}
//tell SDIO to begin block transfer
if(!startBlockTransfer(buffer, false, numBlocks*512)) {
return false;
}
//send read cmd to initiate transfer
if(!cmd(SD_CMD_READ_MULT_BLOCK, block_number*cdv)) {
return false;
}
//GpioProfiler<PB15> p;
//send read cmd to initiate transfer
bool res = waitTransfer();
cmd(SD_CMD_STOP_TRANSMISSION, 0);
return res;
}
bool xpcc::stm32::SDIO_SDCard::readSingleBlock(uint8_t* buffer,
size_t block_number) {
SDIO_HAL::resetDataPath();
//initialize dma controller
if(!dmaInit(buffer, false)) {
return false;
}
//tell SDIO to begin block transfer
if(!startBlockTransfer(buffer, false)) {
return false;
}
//send read cmd to initiate transfer
if(!cmd(SD_CMD_READ_SINGLE_BLOCK, block_number*cdv)) {
return false;
}
//GpioProfiler<PB15> p;
//send read cmd to initiate transfer
return waitTransfer();
}
bool xpcc::stm32::SDIO_SDCard::writeStart(uint32_t blockNumber,
uint32_t eraseCount) {
//XPCC_LOG_DEBUG .printf("w+ %d", blockNumber);
SDIO_HAL::resetDataPath();
if (eraseCount > 1) {
cmd(SD_CMD_APP_CMD, RCA<<16);
if (!cmd(23, eraseCount) != 0) {
XPCC_LOG_DEBUG.printf("SD_CARD_ERROR_ACMD23\n");
return false;
}
}
if(!cmd(SD_CMD_WRITE_MULT_BLOCK, blockNumber*cdv)) {
return false;
}
//stop clock. After writeStart data blocks can be written at irregular intervals.
//We enable the clock when there is something to write
SDIO_HAL::setClockState(false);
//SDIO->ICR=(uint32_t) 0xA003FF;
return true;
}
bool xpcc::stm32::SDIO_SDCard::writeStop() {
//XPCC_LOG_DEBUG .printf("w-");
SDIO_HAL::setClockState(true);
if(!cmd(SD_CMD_STOP_TRANSMISSION, 0)) {
return false;
}
//GpioProfiler<PB15> p;
uint8_t status;
while(1) {
if(!getCardStatus(status)) break;
if((SDCardState)status == SDCardState::SD_CARD_PROGRAMMING) {
sleep(1);
} else {
break;
}
}
//XPCC_LOG_DEBUG .printf("status 0x%x\n", status);
return true;
}
bool xpcc::stm32::SDIO_SDCard::writeData(const uint8_t* src) {
//XPCC_LOG_DEBUG .printf("wb\n");
SDIO_HAL::resetDataPath();
if(!dmaInit((uint8_t*)src, true)) {
return false;
}
//startBlockTransfer will enable the clock for us
if(!startBlockTransfer((uint8_t*)src, true)) {
return false;
}
if(!waitTransfer()) {
SDIO_HAL::setClockState(true);
return false;
}
//data is sent, stop clock and wait for more data
SDIO_HAL::setClockState(false);
return true;
}
bool xpcc::stm32::SDIO_SDCard::writeData(uint8_t token, const uint8_t* src) {
//unused
return false;
}
bool xpcc::stm32::SDIO_SDCard::writeBlock(uint32_t blockNumber,
const uint8_t* src) {
SDIO_HAL::resetDataPath();
// SDIO_HAL::clearInterrupt(SDIO_STATIC_FLAGS);
//SDIO->ICR=(uint32_t) 0xA003FF;
if(!dmaInit((uint8_t*)src, true)) {
return false;
}
if(!cmd(SD_CMD_WRITE_SINGLE_BLOCK, blockNumber*cdv)) {
return false;
}
if(!startBlockTransfer((uint8_t*)src, true)) {
return false;
}
return waitTransfer();
}
bool xpcc::stm32::SDIO_SDCard::cmd(uint32_t cmd, uint32_t arg,
ResponseType resp, uint32_t timeout) {
SDIO_HAL::ResponseType type;
switch(resp) {
case ResponseType::None:
type = SDIO_HAL::ResponseType::None;
break;
case ResponseType::Short:
type = SDIO_HAL::ResponseType::Short;
break;
case ResponseType::Long:
type = SDIO_HAL::ResponseType::Long;
break;
case ResponseType::R3Resp:
type = SDIO_HAL::ResponseType::Short;
break;
}
sdio_evt.reset();
SDIO_HAL::clearInterrupt(SDIO_STATIC_FLAGS);
//
// SDIO_HAL::interruptConfig(SDIO_HAL::Interrupt::CMDSENT|
// SDIO_HAL::Interrupt::CCRCFAIL|
// SDIO_HAL::Interrupt::CTIMEOUT|
// SDIO_HAL::Interrupt::CMDREND, true);
SDIO_HAL::sendCommand(arg, cmd, type);
uint32_t deadlockPreventer = 10000;
while (((SDIO->STA) & (SDIO_STA_CMDREND | SDIO_STA_CTIMEOUT |
SDIO_STA_CCRCFAIL | SDIO_STA_CMDSENT)) == 0 && --deadlockPreventer)
;
//while(!t.isExpired()) {
uint32_t status = SDIO_HAL::getInterruptFlags();
if(resp == ResponseType::None) {
if(status & SDIO_HAL::Interrupt::CMDSENT) {
XPCC_LOG_DEBUG .printf("cmd %d sent\n", cmd);
//SDIO_HAL::clearInterrupt(SDIO_STATIC_FLAGS);
return 1;
}
} else {
if(status & (SDIO_HAL::Interrupt::CMDREND|
SDIO_HAL::Interrupt::CCRCFAIL|SDIO_HAL::Interrupt::CTIMEOUT)) {
if ((status & SDIO_HAL::Interrupt::CMDREND)
|| ((status & SDIO_HAL::Interrupt::CCRCFAIL)
&& resp == ResponseType::R3Resp)) {
//delay_ms(5);
// XPCC_LOG_DEBUG.printf("cmd%d STA:%x resp received cmd:%d r:%x\n",
// cmd, SDIO->STA, SDIO_HAL::getCommandResponse(),
// SDIO_HAL::getResponse(SDIO_HAL::SDIOResp::RESP1));
//SDIO_HAL::clearInterrupt(SDIO_STATIC_FLAGS);
return 1;
}
XPCC_LOG_DEBUG .printf("cmd %d err st %x\n", cmd, SDIO->STA);
SDIO_HAL::clearInterrupt(SDIO_STATIC_FLAGS);
return 0;
}
}
//}
XPCC_LOG_DEBUG .printf("cmd %d timeout\n", cmd);
return 0;
}
bool xpcc::stm32::SDIO_SDCard::init_v1_card() {
return false;
}
bool xpcc::stm32::SDIO_SDCard::init_v2_card() {
cmd(55, 0, ResponseType::Short);
cmd(41, (uint32_t) 0x80100000 | (uint32_t) 0x40000000, ResponseType::R3Resp);
while (1) {
////Send ACMD41
//CMD55
cmd(55, 0, ResponseType::Short);
//ACMD41 (Response is R3 which does not contain any CRC)
//Second argument in the argument indicates that host supports SDHC. We will check acmd41 response if the SD is SC or HC
cmd(41, (uint32_t) 0x80100000 | (uint32_t) 0x40000000,
ResponseType::R3Resp);
uint32_t response = SDIO_HAL::getResponse(SDIO_HAL::SDIOResp::RESP1);
//Check the ready status in the response (R3)
if ((response >> 31) == 1) { //When card is busy this bit will be 0
//Card is now initialized. Check to see if SD is SC or HC
SDType = (response & 0x40000000) >> 30; //1=HC, 0=SC
break;
}
}
//Now we are in the Ready State. Ask for CID using CMD2
//Response is R2. RESP1234 are filled with CID. I will ignore them
cmd(2, 0, ResponseType::Long);
//Now the card is in the identification mode. Request for the RCA number with cmd3
cmd(3, 0, ResponseType::Short);
//Read the RCA
RCA = SDIO_HAL::getResponse(SDIO_HAL::SDIOResp::RESP1)>>16;
cmd(SD_CMD_SEND_CSD, RCA<<16, ResponseType::Long);
uint32_t csd[4];
csd[0] = __builtin_bswap32(SDIO_HAL::getResponse(SDIO_HAL::SDIOResp::RESP1));
csd[1] = __builtin_bswap32(SDIO_HAL::getResponse(SDIO_HAL::SDIOResp::RESP2));
csd[2] = __builtin_bswap32(SDIO_HAL::getResponse(SDIO_HAL::SDIOResp::RESP3));
csd[3] = __builtin_bswap32(SDIO_HAL::getResponse(SDIO_HAL::SDIOResp::RESP4));
_sectors = _sd_sectors((uint8_t*)csd);
//Put the Card in the tranfer mode using cmd7. (I will change the clock spped later together with bus width)
//Bus width can only be changed in transfer mode
cmd(7, (RCA << 16));
//set block size
cmd(SD_CMD_SET_BLOCKLEN, 512);
//change bus width
sd_wide_bus(true);
SDIO_HAL::setClockDiv(0); // set clock rate 12mhz
//card is ready
return true;
}
void xpcc::stm32::SDIO_SDCard::sd_wide_bus(bool enable) {
if(enable) {
if(cmd(SD_CMD_APP_CMD, RCA<<16)) {
/*!< Send ACMD6 APP_CMD with argument as 2 for wide bus mode */
if(cmd(SD_CMD_APP_SD_SET_BUSWIDTH, 0x02)) {
SDIO_HAL::setBusWidth(SDIO_HAL::BusWidth::_4b);
}
}
}
}
bool xpcc::stm32::SDIO_SDCard::startBlockTransfer(uint8_t* block, bool write, uint32_t len) {
xpcc::atomic::Lock lock;
if(write) {
SDIO_HAL::setClockState(false);
}
dma_evt.reset();
sdio_evt.reset();
SDIO_HAL::clearInterrupt(SDIO_STATIC_FLAGS);
//SDIO->ICR=(SDIO_STA_DCRCFAIL | SDIO_STA_DTIMEOUT | SDIO_STA_TXUNDERR | SDIO_STA_RXOVERR | SDIO_STA_DATAEND | SDIO_STA_STBITERR | SDIO_STA_DBCKEND);
SDIO_HAL::startDataTransaction(write ? SDIO_HAL::TransferDir::ToCard :
SDIO_HAL::TransferDir::ToSDIO, len,
SDIO_HAL::TransferMode::Block, SDIO_HAL::DataBlockSize::_512b);
if(write) {
//wait for fifo to fill up
uint32_t deadlockPreventer = 10000;
while(!SDIO_HAL::getInterruptStatus(SDIO_HAL::Interrupt::TXFIFOF) && --deadlockPreventer);
//resume clock
SDIO_HAL::setClockState(true);
}
return true;
}
uint32_t xpcc::stm32::SDIO_SDCard::_sd_sectors(uint8_t* csd) {
uint32_t c_size, c_size_mult, read_bl_len;
uint32_t block_len, mult, blocknr;
uint32_t hc_c_size;
uint64_t blocks, capacity;
// csd_structure : csd[127:126]
// c_size : csd[73:62]
// c_size_mult : csd[49:47]
// read_bl_len : csd[83:80] - the *maximum* read block length
int csd_structure = ext_bits(csd, 127, 126);
switch (csd_structure) {
case 0:
cdv = 512;
c_size = ext_bits(csd, 73, 62);
c_size_mult = ext_bits(csd, 49, 47);
read_bl_len = ext_bits(csd, 83, 80);
block_len = 1 << read_bl_len;
mult = 1 << (c_size_mult + 2);
blocknr = (c_size + 1) * mult;
capacity = blocknr * block_len;
blocks = capacity / 512;
XPCC_LOG_DEBUG.printf(
"SDCard\nc_size: %d \ncapacity: %lld \nsectors: %lld\n", c_size,
capacity, blocks);
break;
case 1:
cdv = 1;
hc_c_size = ext_bits(csd, 63, 48);
blocks = ((hc_c_size + 1) * 1024) /*- 1*/;
capacity = (blocks * 512) / 1000000;
XPCC_LOG_DEBUG.printf(
"SDHC Card \nhc_c_size: %d\ncapacity: %lldMiB \nsectors: %lld\n",
hc_c_size, capacity, blocks);
break;
default:
XPCC_LOG_DEBUG.printf("CSD struct unsupported\n");
return 0;
};
return blocks;
}
uint32_t xpcc::stm32::SDIO_SDCard::getSectorCount() {
return _sectors;
}
bool xpcc::stm32::SDIO_SDCard::waitTransfer() {
////Check if there is an ongoing transmission
//Check if the DMA is disabled (SDIO disables the DMA after it is done with it)
// //Wait till SDIO is not active
// while (SDIO_HAL::getInterruptStatus(SDIO_HAL::Interrupt::RXACT|SDIO_HAL::Interrupt::TXACT)) {
//
// }
sdio_evt.reset();
//enable interrupt
SDIO_HAL::setInterruptMask(SDIO_HAL::Interrupt::DTIMEOUT|SDIO_HAL::Interrupt::DCRCFAIL|
SDIO_HAL::Interrupt::DATAEND|SDIO_HAL::Interrupt::STBITERR);
//wait for interrupt
bool r1 = dma_evt.wait(1000);
bool r2 = sdio_evt.wait(1000);
if(!r1 || !r2) {
XPCC_LOG_DEBUG .printf("r1 r2 timeout\n");
}
//if(dma_stm.isError()) {
//XPCC_LOG_DEBUG .printf("SDIO DMA Error 0x%x\n", dma_stm.getInterruptFlags());
//return false;
//}
Timeout<> t(1000);
while(SDIO_HAL::getInterruptStatus(SDIO_HAL::Interrupt::TXACT | SDIO_HAL::Interrupt::RXACT)) {
#ifdef XPCC_CHIBI_RTOS
chThdSleepMicroseconds(100);
#else
yield();
#endif
if(t.isExpired()) {
XPCC_LOG_DEBUG .printf("SDIO ACT wait timeout\n");
return false;
}
}
// while(!SDIO_HAL::getInterruptStatus(SDIO_HAL::Interrupt::DTIMEOUT|SDIO_HAL::Interrupt::DCRCFAIL|
// SDIO_HAL::Interrupt::DBCKEND|SDIO_HAL::Interrupt::STBITERR)) {
//
// }
if (!SDIO_HAL::getInterruptStatus(SDIO_HAL::Interrupt::DBCKEND)) { //An Error has occured.
XPCC_LOG_DEBUG .printf("SDIO:Data Transmission Error 0x%x\n", SDIO_HAL::getInterruptFlags());
return false;
}
return true;
}
bool xpcc::stm32::SDIO_SDCard::dmaInit(uint8_t* block, bool write) {
if(((uint32_t)block & 3) != 0) {
XPCC_LOG_DEBUG .printf("Err SD Block (0x%x) is not 4 byte aligned\n", block);
return false;
}
if((uint32_t)block & 0x10000000) {
XPCC_LOG_DEBUG << "Err SD Block is in CCM memory\n";
return false;
}
//if read, enable sdio dma before initializing dma channel
if(!write)
SDIO_HAL::DMACmd(ENABLE);
dma_stm.disable();
dma_stm.memoryTargetConfig((uint32_t)block, dma::Memory::Memory_0);
dma_stm.setXferDirection(write?dma::XferDir::MemoryToPeripheral : dma::XferDir::PeripheralToMemory);
dma_stm.setCurrDataCounter(0);
//Enable the DMA (When it is enabled, it starts to respond dma requests)
dma_stm.enable();
if(write)
SDIO_HAL::DMACmd(ENABLE);
return 1;
}
bool xpcc::stm32::SDIO_SDCard::getCardStatus(uint8_t& status) {
// Send SEND_STATUS command
if(!cmd(SD_CMD_SEND_STATUS,RCA << 16)) {
return false;// CMD13
}
// Find out card status
status = (SDIO_HAL::getResponse(SDIO_HAL::SDIOResp::RESP1) & 0x1e00) >> 9;
// Check for errors
return true;
}
void xpcc::stm32::SDIO_SDCard::handleIRQ() {
//disable interrupts
SDIO->MASK = 0;
//signal threads
sdio_evt.signal();
}
|
#include "graphics/window.h"
#include <iostream>
using namespace SnowEngine;
using namespace SnowEngine::Graphics;
int main()
{
Window window("SnowEngine!", 1280, 720);
glClearColor(0.2f, 0.3f, 0.8f, 1.0f);
while (!window.shouldClose())
{
window.clear();
window.update();
}
return 0;
}
|
/****************************************************************************
Copyright (c) 2010-2012 cocos2d-x.org
Copyright (c) 2013-2016 Chukong Technologies Inc.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
#include "audio/include/SimpleAudioEngine.h"
#include <map>
#include <cstdlib>
#include "audio/win32/MciPlayer.h"
#include "platform/CCFileUtils.h"
using namespace cocos2d;
using namespace std;
namespace CocosDenshion {
typedef map<unsigned int, MciPlayer *> EffectList;
typedef pair<unsigned int, MciPlayer *> Effect;
static char s_szRootPath[MAX_PATH];
static DWORD s_dwRootLen;
static char s_szFullPath[MAX_PATH];
static std::string _FullPath(const char * szPath);
static unsigned int _Hash(const char *key);
#define BREAK_IF(cond) if (cond) break;
static EffectList& sharedList()
{
static EffectList s_List;
return s_List;
}
static MciPlayer& sharedMusic()
{
static MciPlayer s_Music;
return s_Music;
}
SimpleAudioEngine::SimpleAudioEngine()
{
}
SimpleAudioEngine::~SimpleAudioEngine()
{
}
SimpleAudioEngine* SimpleAudioEngine::getInstance()
{
static SimpleAudioEngine s_SharedEngine;
return &s_SharedEngine;
}
void SimpleAudioEngine::end()
{
sharedMusic().Close();
for (auto& iter : sharedList())
{
delete iter.second;
iter.second = nullptr;
}
sharedList().clear();
return;
}
//////////////////////////////////////////////////////////////////////////
// BackgroundMusic
//////////////////////////////////////////////////////////////////////////
void SimpleAudioEngine::playBackgroundMusic(const char* pszFilePath, bool bLoop)
{
if (! pszFilePath)
{
return;
}
sharedMusic().Open(_FullPath(pszFilePath).c_str(), _Hash(pszFilePath));
sharedMusic().Play((bLoop) ? -1 : 1);
}
void SimpleAudioEngine::stopBackgroundMusic(bool bReleaseData)
{
if (bReleaseData)
{
sharedMusic().Close();
}
else
{
sharedMusic().Stop();
}
}
void SimpleAudioEngine::pauseBackgroundMusic()
{
sharedMusic().Pause();
}
void SimpleAudioEngine::resumeBackgroundMusic()
{
sharedMusic().Resume();
}
void SimpleAudioEngine::rewindBackgroundMusic()
{
sharedMusic().Rewind();
}
bool SimpleAudioEngine::willPlayBackgroundMusic()
{
return false;
}
bool SimpleAudioEngine::isBackgroundMusicPlaying()
{
return sharedMusic().IsPlaying();
}
//////////////////////////////////////////////////////////////////////////
// effect function
//////////////////////////////////////////////////////////////////////////
unsigned int SimpleAudioEngine::playEffect(const char* pszFilePath, bool bLoop,
float pitch, float pan, float gain)
{
unsigned int nRet = _Hash(pszFilePath);
preloadEffect(pszFilePath);
EffectList::iterator p = sharedList().find(nRet);
if (p != sharedList().end())
{
p->second->Play((bLoop) ? -1 : 1);
}
return nRet;
}
void SimpleAudioEngine::stopEffect(unsigned int nSoundId)
{
EffectList::iterator p = sharedList().find(nSoundId);
if (p != sharedList().end())
{
p->second->Stop();
}
}
void SimpleAudioEngine::preloadEffect(const char* pszFilePath)
{
int nRet = 0;
do
{
BREAK_IF(! pszFilePath);
nRet = _Hash(pszFilePath);
BREAK_IF(sharedList().end() != sharedList().find(nRet));
sharedList().insert(Effect(nRet, new MciPlayer()));
MciPlayer * pPlayer = sharedList()[nRet];
pPlayer->Open(_FullPath(pszFilePath).c_str(), nRet);
BREAK_IF(nRet == pPlayer->GetSoundID());
delete pPlayer;
sharedList().erase(nRet);
nRet = 0;
} while (0);
}
void SimpleAudioEngine::pauseEffect(unsigned int nSoundId)
{
EffectList::iterator p = sharedList().find(nSoundId);
if (p != sharedList().end())
{
p->second->Pause();
}
}
void SimpleAudioEngine::pauseAllEffects()
{
for (auto& iter : sharedList())
{
iter.second->Pause();
}
}
void SimpleAudioEngine::resumeEffect(unsigned int nSoundId)
{
EffectList::iterator p = sharedList().find(nSoundId);
if (p != sharedList().end())
{
p->second->Resume();
}
}
void SimpleAudioEngine::resumeAllEffects()
{
for (auto& iter : sharedList())
{
iter.second->Resume();
}
}
void SimpleAudioEngine::stopAllEffects()
{
for (auto& iter : sharedList())
{
iter.second->Stop();
}
}
void SimpleAudioEngine::preloadBackgroundMusic(const char* pszFilePath)
{
}
void SimpleAudioEngine::unloadEffect(const char* pszFilePath)
{
unsigned int nID = _Hash(pszFilePath);
EffectList::iterator p = sharedList().find(nID);
if (p != sharedList().end())
{
delete p->second;
p->second = nullptr;
sharedList().erase(nID);
}
}
//////////////////////////////////////////////////////////////////////////
// volume interface
//////////////////////////////////////////////////////////////////////////
float SimpleAudioEngine::getBackgroundMusicVolume()
{
return 1.0;
}
void SimpleAudioEngine::setBackgroundMusicVolume(float volume)
{
}
float SimpleAudioEngine::getEffectsVolume()
{
return 1.0;
}
void SimpleAudioEngine::setEffectsVolume(float volume)
{
}
//////////////////////////////////////////////////////////////////////////
// static function
//////////////////////////////////////////////////////////////////////////
static std::string _FullPath(const char * szPath)
{
return FileUtils::getInstance()->fullPathForFilename(szPath);
}
unsigned int _Hash(const char *key)
{
unsigned int len = strlen(key);
const char *end=key+len;
unsigned int hash;
for (hash = 0; key < end; key++)
{
hash *= 16777619;
hash ^= (unsigned int) (unsigned char) toupper(*key);
}
return (hash);
}
} // end of namespace CocosDenshion
|
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "cyber/init.h"
#include <libgen.h>
#include <stdio.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include <csignal>
#include <string>
#include "cyber/binary.h"
#include "cyber/common/global_data.h"
#include "cyber/data/data_dispatcher.h"
#include "cyber/logger/async_logger.h"
#include "cyber/scheduler/scheduler.h"
#include "cyber/service_discovery/topology_manager.h"
#include "cyber/task/task.h"
#include "cyber/timer/timer_manager.h"
#include "cyber/transport/transport.h"
namespace apollo {
namespace cyber {
using apollo::cyber::scheduler::Scheduler;
using apollo::cyber::service_discovery::TopologyManager;
namespace {
bool g_atexit_registered = false;
std::mutex g_mutex;
logger::AsyncLogger* async_logger = nullptr;
void InitLogger(const char* binary_name) {
const char* slash = strrchr(binary_name, '/');
if (slash) {
::apollo::cyber::Binary::SetName(slash + 1);
} else {
::apollo::cyber::Binary::SetName(binary_name);
}
CHECK_NOTNULL(common::GlobalData::Instance());
// Init glog
google::InitGoogleLogging(binary_name);
google::SetLogDestination(google::ERROR, "");
google::SetLogDestination(google::WARNING, "");
google::SetLogDestination(google::FATAL, "");
// Init async logger
async_logger = new ::apollo::cyber::logger::AsyncLogger(
google::base::GetLogger(FLAGS_minloglevel), 2 * 1024 * 1024);
google::base::SetLogger(FLAGS_minloglevel, async_logger);
async_logger->Start();
}
void StopLogger() {
if (async_logger != nullptr) {
async_logger->Stop();
}
}
} // namespace
void OnShutdown(int sig) {
(void)sig;
if (GetState() != STATE_SHUTDOWN) {
SetState(STATE_SHUTTING_DOWN);
}
}
void ExitHandle() { Clear(); }
bool Init(const char* binary_name) {
std::lock_guard<std::mutex> lg(g_mutex);
if (GetState() != STATE_UNINITIALIZED) {
return false;
}
InitLogger(binary_name);
auto thread = const_cast<std::thread*>(async_logger->LogThread());
scheduler::Instance()->SetInnerThreadAttr("async_log", thread);
std::signal(SIGINT, OnShutdown);
// Register exit handlers
if (!g_atexit_registered) {
if (std::atexit(ExitHandle) != 0) {
AERROR << "Register exit handle failed";
return false;
}
AINFO << "Register exit handle succ.";
g_atexit_registered = true;
}
SetState(STATE_INITIALIZED);
return true;
}
void Clear() {
std::lock_guard<std::mutex> lg(g_mutex);
if (GetState() == STATE_SHUTDOWN || GetState() == STATE_UNINITIALIZED) {
return;
}
TaskManager::CleanUp();
TimerManager::CleanUp();
scheduler::CleanUp();
service_discovery::TopologyManager::CleanUp();
transport::Transport::CleanUp();
StopLogger();
SetState(STATE_SHUTDOWN);
}
} // namespace cyber
} // namespace apollo
|
/**
* \file
* \copyright
* Copyright (c) 2012-2020, OpenGeoSys Community (http://www.opengeosys.org)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.org/project/license
*/
#include <numeric>
#include <Eigen/Eigen>
#include "gtest/gtest.h"
#include "MeshLib/MeshGenerators/MeshGenerator.h"
#include "MeshLib/Mesh.h"
#include "MeshLib/Elements/Element.h"
#include "MeshLib/PropertyVector.h"
class MeshLibProperties : public ::testing::Test
{
public:
MeshLibProperties()
{
mesh = MeshLib::MeshGenerator::generateRegularHexMesh(1.0, mesh_size);
}
~MeshLibProperties() override { delete mesh; }
static std::size_t const mesh_size = 5;
MeshLib::Mesh* mesh{nullptr};
};
std::size_t const MeshLibProperties::mesh_size;
TEST_F(MeshLibProperties, PropertyVectorTestMetaData)
{
ASSERT_TRUE(mesh != nullptr);
std::string const prop_name("TestProperty");
auto const* const p = mesh->getProperties().createNewPropertyVector<double>(
prop_name, MeshLib::MeshItemType::Cell);
ASSERT_EQ(0u, p->getPropertyName().compare(prop_name));
ASSERT_EQ(MeshLib::MeshItemType::Cell, p->getMeshItemType());
ASSERT_EQ(1u, p->getNumberOfComponents());
ASSERT_EQ(0u, p->size());
}
TEST_F(MeshLibProperties, PropertyVectorTestIntegrationPoint)
{
ASSERT_TRUE(mesh != nullptr);
int const n_integration_points = 4;
int const vector_length = 6;
std::string const prop_name("ip_field");
auto* const p_ptr = mesh->getProperties().createNewPropertyVector<double>(
prop_name, MeshLib::MeshItemType::IntegrationPoint, vector_length);
ASSERT_TRUE(p_ptr != nullptr);
auto& p = *p_ptr;
ASSERT_EQ(p.getPropertyName(), prop_name);
ASSERT_EQ(MeshLib::MeshItemType::IntegrationPoint, p.getMeshItemType());
ASSERT_EQ(vector_length, p.getNumberOfComponents());
ASSERT_EQ(0u, p.size());
// Fill the property vector with double data in following pattern:
// for element's id xx and integration point number yy the value is xx.yy.
p.resize(mesh->getNumberOfElements() * n_integration_points);
std::vector<std::size_t> offsets(mesh->getNumberOfElements());
std::size_t offset = 0; // last position in the property vector
for (auto const& e : mesh->getElements())
{
offsets[e->getID()] = offset;
for (int ip = 0; ip < n_integration_points; ++ip)
{
p[offset + ip] = e->getID() + ip * 0.01;
}
offset += n_integration_points;
}
// Check the values at each offset.
// Last element is checked after the for-loop.
std::size_t i = 0;
for (; i < offsets.size() - 1; ++i)
{
std::size_t const size = offsets[i + 1] - offsets[i];
ASSERT_EQ(n_integration_points, size);
for (int ip = 0; ip < n_integration_points; ++ip)
{
ASSERT_EQ(i + ip * 0.01, p[offsets[i] + ip]);
}
}
{ // Last element
std::size_t const size = p.size() - offsets[i];
ASSERT_EQ(n_integration_points, size);
for (int ip = 0; ip < n_integration_points; ++ip)
{
ASSERT_EQ(i + ip * 0.01, p[offsets[i] + ip]);
}
}
}
TEST_F(MeshLibProperties, AddDoubleProperties)
{
ASSERT_TRUE(mesh != nullptr);
const std::size_t size(mesh_size*mesh_size*mesh_size);
std::string const prop_name("TestProperty");
auto* const double_properties =
mesh->getProperties().createNewPropertyVector<double>(
prop_name, MeshLib::MeshItemType::Cell);
ASSERT_EQ(0u, double_properties->size());
double_properties->resize(size);
ASSERT_EQ(size, double_properties->size());
std::iota(double_properties->begin(), double_properties->end(), 1);
for (std::size_t k(0); k<size; k++) {
ASSERT_EQ(static_cast<double>(k+1), (*double_properties)[k]);
}
ASSERT_TRUE(mesh->getProperties().existsPropertyVector<double>(prop_name));
auto* const double_properties_cpy =
mesh->getProperties().getPropertyVector<double>(prop_name);
for (std::size_t k(0); k<size; k++) {
ASSERT_EQ((*double_properties)[k], (*double_properties_cpy)[k]);
}
mesh->getProperties().removePropertyVector(prop_name);
ASSERT_FALSE(mesh->getProperties().existsPropertyVector<double>(prop_name));
}
TEST_F(MeshLibProperties, AddDoublePointerProperties)
{
ASSERT_TRUE(mesh != nullptr);
std::string const& prop_name("GroupProperty");
// check if a property with the name is already assigned to the mesh
ASSERT_FALSE(mesh->getProperties().hasPropertyVector(prop_name));
// data needed for the property
const std::size_t n_prop_val_groups(10);
const std::size_t n_items(mesh_size*mesh_size*mesh_size);
std::vector<std::size_t> prop_item2group_mapping(n_items);
// create simple mat_group to index mapping
for (std::size_t j(0); j<n_prop_val_groups; j++) {
auto const lower(static_cast<std::size_t>(
(static_cast<double>(j) / n_prop_val_groups) * n_items));
auto const upper(static_cast<std::size_t>(
(static_cast<double>(j + 1) / n_prop_val_groups) * n_items));
for (std::size_t k(lower); k < upper; k++)
{
prop_item2group_mapping[k] = j;
}
}
// obtain PropertyVector data structure
auto* const group_properties =
mesh->getProperties().createNewPropertyVector<double*>(
prop_name, n_prop_val_groups, prop_item2group_mapping,
MeshLib::MeshItemType::Cell);
ASSERT_EQ(prop_item2group_mapping.size(), group_properties->size());
// initialize the property values
for (std::size_t i(0); i<n_prop_val_groups; i++) {
group_properties->initPropertyValue(i, static_cast<double>(i+1));
}
// check mapping to values
for (std::size_t i(0); i<n_prop_val_groups; i++) {
auto const lower(static_cast<std::size_t>(
(static_cast<double>(i) / n_prop_val_groups) * n_items));
auto const upper(static_cast<std::size_t>(
(static_cast<double>(i + 1) / n_prop_val_groups) * n_items));
for (std::size_t k(lower); k < upper; k++)
{
ASSERT_NEAR(static_cast<double>(i + 1), *(*group_properties)[k],
std::numeric_limits<double>::epsilon());
}
}
// the mesh should have the property assigned to cells
ASSERT_TRUE(mesh->getProperties().hasPropertyVector(prop_name));
// fetch the properties from the container
auto const* const group_properties_cpy =
mesh->getProperties().getPropertyVector<double*>(prop_name);
ASSERT_FALSE(!group_properties_cpy);
for (std::size_t k(0); k<n_items; k++) {
ASSERT_EQ((*group_properties)[k], (*group_properties_cpy)[k]);
}
mesh->getProperties().removePropertyVector(prop_name);
ASSERT_FALSE(
mesh->getProperties().existsPropertyVector<double*>(prop_name));
}
TEST_F(MeshLibProperties, AddArrayPointerProperties)
{
ASSERT_TRUE(mesh != nullptr);
std::string const& prop_name("GroupPropertyWithArray");
const std::size_t n_prop_val_groups(10);
const std::size_t n_items(mesh_size*mesh_size*mesh_size);
std::vector<std::size_t> prop_item2group_mapping(n_items);
// create simple mat_group to index mapping
for (std::size_t j(0); j<n_prop_val_groups; j++) {
auto const lower(static_cast<std::size_t>(
(static_cast<double>(j) / n_prop_val_groups) * n_items));
auto const upper(static_cast<std::size_t>(
(static_cast<double>(j + 1) / n_prop_val_groups) * n_items));
for (std::size_t k(lower); k < upper; k++)
{
prop_item2group_mapping[k] = j;
}
}
auto* const group_prop_vec =
mesh->getProperties().createNewPropertyVector<std::array<double, 3>*>(
prop_name, n_prop_val_groups, prop_item2group_mapping,
MeshLib::MeshItemType::Cell);
ASSERT_EQ(prop_item2group_mapping.size(), group_prop_vec->size());
// initialize the property values
for (std::size_t i(0); i<n_prop_val_groups; i++) {
group_prop_vec->initPropertyValue(i,
std::array<double,3>({{static_cast<double>(i),
static_cast<double>(i+1),
static_cast<double>(i+2)}}
)
);
}
// check the mapping to values
for (std::size_t i(0); i<n_prop_val_groups; i++) {
auto const lower(static_cast<std::size_t>(
(static_cast<double>(i) / n_prop_val_groups) * n_items));
auto const upper(static_cast<std::size_t>(
(static_cast<double>(i + 1) / n_prop_val_groups) * n_items));
for (std::size_t k(lower); k < upper; k++)
{
ASSERT_NEAR(static_cast<double>(i), (*(*group_prop_vec)[k])[0],
std::numeric_limits<double>::epsilon());
ASSERT_NEAR(static_cast<double>(i + 1), (*(*group_prop_vec)[k])[1],
std::numeric_limits<double>::epsilon());
ASSERT_NEAR(static_cast<double>(i + 2), (*(*group_prop_vec)[k])[2],
std::numeric_limits<double>::epsilon());
}
}
auto* const group_properties_cpy =
mesh->getProperties().getPropertyVector<std::array<double, 3>*>(
prop_name);
ASSERT_FALSE(!group_properties_cpy);
for (std::size_t k(0); k<n_items; k++) {
ASSERT_EQ((*((*group_prop_vec)[k]))[0],
(*((*group_properties_cpy)[k]))[0]);
ASSERT_EQ((*((*group_prop_vec)[k]))[1],
(*((*group_properties_cpy)[k]))[1]);
ASSERT_EQ((*((*group_prop_vec)[k]))[2],
(*((*group_properties_cpy)[k]))[2]);
}
mesh->getProperties().removePropertyVector(prop_name);
auto exists =
mesh->getProperties().existsPropertyVector<std::array<double, 3>*>(
prop_name);
ASSERT_FALSE(exists);
}
TEST_F(MeshLibProperties, AddVariousDifferentProperties)
{
ASSERT_TRUE(mesh != nullptr);
std::string const& prop_name("GroupVectorProperty");
// check if the property is already assigned to the mesh
ASSERT_FALSE(mesh->getProperties().hasPropertyVector(prop_name));
const std::size_t n_prop_val_groups(10);
const std::size_t n_items(mesh_size*mesh_size*mesh_size);
std::vector<std::size_t> prop_item2group_mapping(n_items);
// create simple mat_group to index mapping
for (std::size_t j(0); j<n_prop_val_groups; j++) {
auto const lower(static_cast<std::size_t>(
(static_cast<double>(j) / n_prop_val_groups) * n_items));
auto const upper(static_cast<std::size_t>(
(static_cast<double>(j + 1) / n_prop_val_groups) * n_items));
for (std::size_t k(lower); k < upper; k++)
{
prop_item2group_mapping[k] = j;
}
}
// create data structure for the property
auto* const group_properties =
mesh->getProperties().createNewPropertyVector<std::array<double, 3>*>(
prop_name, n_prop_val_groups, prop_item2group_mapping,
MeshLib::MeshItemType::Cell);
// initialize the property values
for (std::size_t i(0); i<n_prop_val_groups; i++) {
group_properties->initPropertyValue(i,
std::array<double,3>({{static_cast<double>(i),
static_cast<double>(i+1),
static_cast<double>(i+2)}}
)
);
}
// the mesh should have the property assigned to cells
ASSERT_TRUE(mesh->getProperties().hasPropertyVector(prop_name));
// fetch the vector filled with property values from mesh
auto const* const group_properties_cpy =
mesh->getProperties().getPropertyVector<std::array<double, 3>*>(
prop_name);
ASSERT_FALSE(!group_properties_cpy);
// compare the content
const std::size_t n_elements(mesh_size*mesh_size*mesh_size);
for (std::size_t k(0); k<n_elements; k++) {
ASSERT_EQ((*((*group_properties)[k]))[0],
(*((*group_properties_cpy)[k]))[0]);
ASSERT_EQ((*((*group_properties)[k]))[1],
(*((*group_properties_cpy))[k])[1]);
ASSERT_EQ((*((*group_properties)[k]))[2],
(*((*group_properties_cpy)[k]))[2]);
}
// *** add a 2nd property ***
std::string const& prop_name_2("ItemwiseMatrixProperties");
// check if the property is already assigned to the mesh
ASSERT_FALSE(mesh->getProperties().hasPropertyVector(prop_name_2));
const std::size_t n_items_2(mesh_size*mesh_size*mesh_size);
auto* const array_properties =
mesh->getProperties().createNewPropertyVector<std::array<float, 9>>(
prop_name_2, MeshLib::MeshItemType::Cell);
array_properties->resize(n_items_2);
// initialize the property values
for (std::size_t i(0); i<n_items_2; i++) {
// init property value
for (std::size_t k(0); k<(*array_properties)[i].size(); k++) {
(*array_properties)[i][k] = static_cast<float>(i+k);
}
}
EXPECT_EQ(9, (*array_properties)[0].size());
// the mesh should have the property assigned to cells
ASSERT_TRUE(mesh->getProperties().hasPropertyVector(prop_name_2));
// fetch the vector in order to compare the content
auto const* const array_properties_cpy =
mesh->getProperties().getPropertyVector<std::array<float, 9>>(
prop_name_2);
ASSERT_FALSE(!array_properties_cpy);
// compare the values/matrices
for (std::size_t k(0); k<n_items_2; k++) {
for (std::size_t j(0); j<(*array_properties)[k].size(); j++) {
ASSERT_EQ((*array_properties)[k][j], (*array_properties_cpy)[k][j]);
}
}
// *** add a 3rd property ***
std::string const& prop_name_3("ItemwiseEigenMatrixProperties");
// check if the property is already assigned to the mesh
ASSERT_FALSE(mesh->getProperties().hasPropertyVector(prop_name_3));
auto* const matrix_properties =
mesh->getProperties()
.createNewPropertyVector<
Eigen::Matrix<double, 3, 3, Eigen::RowMajor>>(
prop_name_3, MeshLib::MeshItemType::Cell);
// init property values
for (auto it = matrix_properties->begin(); it != matrix_properties->end();
it++)
{
for (int r(0); r<it->rows(); r++) {
for (int c(0); c<it->cols(); c++) {
(*it)(r,c) = static_cast<double>(
std::distance(matrix_properties->begin(),it)+r*it->cols()+c+1);
}
}
}
// the mesh should have the property assigned to cells
ASSERT_TRUE(mesh->getProperties().hasPropertyVector(prop_name_3));
// fetch the vector in order to compare the content
auto const* const matrix_properties_cpy =
mesh->getProperties()
.getPropertyVector<Eigen::Matrix<double, 3, 3, Eigen::RowMajor>>(
prop_name_3);
ASSERT_FALSE(!matrix_properties_cpy);
// compare the values/matrices
auto it_cpy = matrix_properties_cpy->begin();
for (auto it = matrix_properties->begin(); it != matrix_properties->end();
it++, it_cpy++)
{
for (int r(0); r<it->rows(); r++) {
for (int c(0); c<it->cols(); c++) {
ASSERT_EQ((*it)(r,c), (*it_cpy)(r,c));
}
}
}
}
TEST_F(MeshLibProperties, CopyConstructor)
{
ASSERT_TRUE(mesh != nullptr);
std::string const& prop_name("GroupProperty");
// data needed for the property
const std::size_t n_prop_val_groups(10);
const std::size_t n_items(mesh_size*mesh_size*mesh_size);
std::vector<std::size_t> prop_item2group_mapping(n_items);
// create simple mat_group to index mapping
for (std::size_t j(0); j<n_prop_val_groups; j++) {
auto const lower(static_cast<std::size_t>(
(static_cast<double>(j) / n_prop_val_groups) * n_items));
auto const upper(static_cast<std::size_t>(
(static_cast<double>(j + 1) / n_prop_val_groups) * n_items));
for (std::size_t k(lower); k < upper; k++)
{
prop_item2group_mapping[k] = j;
}
}
// obtain PropertyVector data structure
auto* const group_properties(
mesh->getProperties().createNewPropertyVector<double*>(
prop_name, n_prop_val_groups, prop_item2group_mapping,
MeshLib::MeshItemType::Cell
)
);
// initialize the property values
for (std::size_t i(0); i<n_prop_val_groups; i++) {
group_properties->initPropertyValue(i, static_cast<double>(i + 1));
}
// create a copy from the original Properties object
MeshLib::Properties properties_copy(mesh->getProperties());
// check if the Properties have a PropertyVector with the correct name
ASSERT_TRUE(properties_copy.hasPropertyVector(prop_name));
// fetch the PropertyVector from the copy of the Properties object
auto const* const group_properties_cpy(
properties_copy.getPropertyVector<double*>(prop_name));
ASSERT_FALSE(!group_properties_cpy);
// check if the values in the PropertyVector of the copy of the Properties
// are the same
for (std::size_t k(0); k<n_items; k++) {
EXPECT_EQ(*(*group_properties)[k], *(*group_properties_cpy)[k]);
}
}
TEST_F(MeshLibProperties, AddDoublePropertiesTupleSize2)
{
ASSERT_TRUE(mesh != nullptr);
const std::size_t number_of_tuples(mesh_size*mesh_size*mesh_size);
std::string const prop_name("TestProperty");
auto* const pv = mesh->getProperties().createNewPropertyVector<double>(
prop_name, MeshLib::MeshItemType::Cell, 2);
// PropertyVector should be created in a correct way
ASSERT_NE(nullptr, pv);
ASSERT_EQ(0u, pv->getPropertyName().compare(prop_name));
ASSERT_EQ(MeshLib::MeshItemType::Cell, pv->getMeshItemType());
ASSERT_EQ(2u, pv->getNumberOfComponents());
ASSERT_EQ(0u, pv->getNumberOfTuples());
ASSERT_EQ(0u, pv->size());
// push some values (2 tuples) into the vector
for (std::size_t k(0); k<number_of_tuples; k++) {
pv->push_back(static_cast<double>(k));
pv->push_back(static_cast<double>(k));
}
// check the number of tuples
ASSERT_EQ(number_of_tuples, pv->getNumberOfTuples());
ASSERT_EQ(pv->getNumberOfTuples()*pv->getNumberOfComponents(), pv->size());
// check the values
for (std::size_t k(0); k<number_of_tuples; k++) {
ASSERT_EQ(static_cast<double>(k), (*pv)[2*k]);
ASSERT_EQ(static_cast<double>(k), (*pv)[2*k+1]);
}
}
|
class Solution {
vector<int> nums;
public:
Solution(vector<int>& nums) {
this->nums = nums;
}
/** Resets the array to its original configuration and return it. */
vector<int> reset() {
return nums;
}
/** Returns a random shuffling of the array. */
vector<int> shuffle() {
vector<int> res(nums);
for(int i=0;i<size(res);i++){
swap(res[rand()%(i+1)],res[i]);
}
return res;
}
};
/**
* Your Solution object will be instantiated and called as such:
* Solution* obj = new Solution(nums);
* vector<int> param_1 = obj->reset();
* vector<int> param_2 = obj->shuffle();
*/
|
#include <iostream>
#include <cstdio>
#include <bits/stdc++.h>
using namespace std;
int max_of_four(int a, int b, int c, int d){
int max;
int numArray[4]={a,b,c,d};
int n = sizeof(numArray)/sizeof(numArray[4]);
std::sort(numArray,numArray+n);
max=numArray[3];
return max;
}
int main() {
int a, b, c, d;
scanf("%d %d %d %d", &a, &b, &c, &d);
int ans = max_of_four(a, b, c, d);
printf("%d", ans);
return 0;
}
|
// Copyright 2022 Alejandro Gallo
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// [[file:../../atrip.org::*Prolog][Prolog:1]]
#pragma once
#include <iostream>
#include <algorithm>
#include <vector>
#include <mpi.h>
#include <atrip/Tuples.hpp>
#include <atrip/Utils.hpp>
#include <atrip/Blas.hpp>
namespace atrip {
template <typename FF> FF maybeConjugate(const FF a) { return a; }
template <> Complex maybeConjugate(const Complex a) { return std::conj(a); }
namespace traits {
template <typename FF> bool isComplex() { return false; }
template <> bool isComplex<Complex>() { return true; }
namespace mpi {
template <typename FF> MPI_Datatype datatypeOf(void);
template <> MPI_Datatype datatypeOf<double>() { return MPI_DOUBLE; }
template <> MPI_Datatype datatypeOf<Complex>() { return MPI_DOUBLE_COMPLEX; }
}
}
template <typename F=double>
struct Slice {
// Prolog:1 ends here
// [[file:../../atrip.org::*Location][Location:1]]
struct Location { size_t rank; size_t source; };
// Location:1 ends here
// [[file:../../atrip.org::*Type][Type:1]]
enum Type
{ A = 10
, B
, C
// Two-parameter slices
, AB = 20
, BC
, AC
// for abci and the doubles
, CB
, BA
, CA
// The non-typed slice
, Blank = 404
};
// Type:1 ends here
// [[file:../../atrip.org::*State][State:1]]
enum State {
Fetch = 0,
Dispatched = 2,
Ready = 1,
SelfSufficient = 911,
Recycled = 123,
Acceptor = 405
};
// State:1 ends here
// [[file:../../atrip.org::*The Info structure][The Info structure:1]]
struct Info {
// which part of a,b,c the slice holds
PartialTuple tuple;
// The type of slice for the user to retrieve the correct one
Type type;
// What is the state of the slice
State state;
// Where the slice is to be retrieved
Location from;
// If the data are actually to be found in this other slice
Type recycling;
Info() : tuple{0,0}
, type{Blank}
, state{Acceptor}
, from{0,0}
, recycling{Blank}
{}
};
using Ty_x_Tu = std::pair< Type, PartialTuple >;
// The Info structure:1 ends here
// [[file:../../atrip.org::*Name][Name:1]]
enum Name
{ TA = 100
, VIJKA = 101
, VABCI = 200
, TABIJ = 201
, VABIJ = 202
};
// Name:1 ends here
// [[file:../../atrip.org::*Database][Database:1]]
struct LocalDatabaseElement {
Slice<F>::Name name;
Slice<F>::Info info;
};
// Database:1 ends here
// [[file:../../atrip.org::*Database][Database:2]]
using LocalDatabase = std::vector<LocalDatabaseElement>;
using Database = LocalDatabase;
// Database:2 ends here
// [[file:../../atrip.org::*MPI Types][MPI Types:1]]
struct mpi {
static MPI_Datatype vector(size_t n, MPI_Datatype const& DT) {
MPI_Datatype dt;
MPI_Type_vector(n, 1, 1, DT, &dt);
MPI_Type_commit(&dt);
return dt;
}
static MPI_Datatype sliceLocation () {
constexpr int n = 2;
// create a sliceLocation to measure in the current architecture
// the packing of the struct
Slice<F>::Location measure;
MPI_Datatype dt;
const std::vector<int> lengths(n, 1);
const MPI_Datatype types[n] = {usizeDt(), usizeDt()};
static_assert(sizeof(Slice<F>::Location) == 2 * sizeof(size_t),
"The Location packing is wrong in your compiler");
// measure the displacements in the struct
size_t j = 0;
MPI_Aint base_address, displacements[n];
MPI_Get_address(&measure, &base_address);
MPI_Get_address(&measure.rank, &displacements[j++]);
MPI_Get_address(&measure.source, &displacements[j++]);
for (size_t i = 0; i < n; i++)
displacements[i] = MPI_Aint_diff(displacements[i], base_address);
MPI_Type_create_struct(n, lengths.data(), displacements, types, &dt);
MPI_Type_commit(&dt);
return dt;
}
static MPI_Datatype usizeDt() { return MPI_UINT64_T; }
static MPI_Datatype sliceInfo () {
constexpr int n = 5;
MPI_Datatype dt;
Slice<F>::Info measure;
const std::vector<int> lengths(n, 1);
const MPI_Datatype types[n]
= { vector(2, usizeDt())
, vector(sizeof(enum Type), MPI_CHAR)
, vector(sizeof(enum State), MPI_CHAR)
, sliceLocation()
, vector(sizeof(enum Type), MPI_CHAR)
// TODO: Why this does not work on intel mpi?
/*, MPI_UINT64_T*/
};
static_assert(sizeof(enum Type) == 4, "Enum type not 4 bytes long");
static_assert(sizeof(enum State) == 4, "Enum State not 4 bytes long");
static_assert(sizeof(enum Name) == 4, "Enum Name not 4 bytes long");
// create the displacements from the info measurement struct
size_t j = 0;
MPI_Aint base_address, displacements[n];
MPI_Get_address(&measure, &base_address);
MPI_Get_address(&measure.tuple[0], &displacements[j++]);
MPI_Get_address(&measure.type, &displacements[j++]);
MPI_Get_address(&measure.state, &displacements[j++]);
MPI_Get_address(&measure.from, &displacements[j++]);
MPI_Get_address(&measure.recycling, &displacements[j++]);
for (size_t i = 0; i < n; i++)
displacements[i] = MPI_Aint_diff(displacements[i], base_address);
MPI_Type_create_struct(n, lengths.data(), displacements, types, &dt);
MPI_Type_commit(&dt);
return dt;
}
static MPI_Datatype localDatabaseElement () {
constexpr int n = 2;
MPI_Datatype dt;
LocalDatabaseElement measure;
const std::vector<int> lengths(n, 1);
const MPI_Datatype types[n]
= { vector(sizeof(enum Name), MPI_CHAR)
, sliceInfo()
};
// measure the displacements in the struct
size_t j = 0;
MPI_Aint base_address, displacements[n];
MPI_Get_address(&measure, &base_address);
MPI_Get_address(&measure.name, &displacements[j++]);
MPI_Get_address(&measure.info, &displacements[j++]);
for (size_t i = 0; i < n; i++)
displacements[i] = MPI_Aint_diff(displacements[i], base_address);
static_assert( sizeof(LocalDatabaseElement) == sizeof(measure)
, "Measure has bad size");
MPI_Type_create_struct(n, lengths.data(), displacements, types, &dt);
MPI_Type_commit(&dt);
return vector(sizeof(LocalDatabaseElement), MPI_CHAR);
// TODO: write tests in order to know if this works
return dt;
}
};
// MPI Types:1 ends here
// [[file:../../atrip.org::*Static utilities][Static utilities:1]]
static
PartialTuple subtupleBySlice(ABCTuple abc, Type sliceType) {
switch (sliceType) {
case AB: return {abc[0], abc[1]};
case BC: return {abc[1], abc[2]};
case AC: return {abc[0], abc[2]};
case CB: return {abc[2], abc[1]};
case BA: return {abc[1], abc[0]};
case CA: return {abc[2], abc[0]};
case A: return {abc[0], 0};
case B: return {abc[1], 0};
case C: return {abc[2], 0};
default: throw "Switch statement not exhaustive!";
}
}
// Static utilities:1 ends here
// [[file:../../atrip.org::*Static utilities][Static utilities:2]]
static std::vector<Slice<F>*> hasRecycledReferencingToIt
( std::vector<Slice<F>> &slices
, Info const& info
) {
std::vector<Slice<F>*> result;
for (auto& s: slices)
if ( s.info.recycling == info.type
&& s.info.tuple == info.tuple
&& s.info.state == Recycled
) result.push_back(&s);
return result;
}
// Static utilities:2 ends here
// [[file:../../atrip.org::*Static utilities][Static utilities:3]]
static Slice<F>& findOneByType(std::vector<Slice<F>> &slices, Slice<F>::Type type) {
const auto sliceIt
= std::find_if(slices.begin(), slices.end(),
[&type](Slice<F> const& s) {
return type == s.info.type;
});
WITH_CRAZY_DEBUG
WITH_RANK
<< "\t__ looking for " << type << "\n";
if (sliceIt == slices.end())
throw std::domain_error("Slice by type not found!");
return *sliceIt;
}
// Static utilities:3 ends here
// [[file:../../atrip.org::*Static utilities][Static utilities:4]]
static Slice<F>&
findRecycledSource (std::vector<Slice<F>> &slices, Slice<F>::Info info) {
const auto sliceIt
= std::find_if(slices.begin(), slices.end(),
[&info](Slice<F> const& s) {
return info.recycling == s.info.type
&& info.tuple == s.info.tuple
&& State::Recycled != s.info.state
;
});
WITH_CRAZY_DEBUG
WITH_RANK << "__slice__:find: recycling source of "
<< pretty_print(info) << "\n";
if (sliceIt == slices.end())
throw std::domain_error( "Slice not found: "
+ pretty_print(info)
+ " rank: "
+ pretty_print(Atrip::rank)
);
WITH_RANK << "__slice__:find: " << pretty_print(sliceIt->info) << "\n";
return *sliceIt;
}
// Static utilities:4 ends here
// [[file:../../atrip.org::*Static utilities][Static utilities:5]]
static Slice<F>& findByTypeAbc
( std::vector<Slice<F>> &slices
, Slice<F>::Type type
, ABCTuple const& abc
) {
const auto tuple = Slice<F>::subtupleBySlice(abc, type);
const auto sliceIt
= std::find_if(slices.begin(), slices.end(),
[&type, &tuple](Slice<F> const& s) {
return type == s.info.type
&& tuple == s.info.tuple
;
});
WITH_CRAZY_DEBUG
WITH_RANK << "__slice__:find:" << type << " and tuple "
<< pretty_print(tuple)
<< "\n";
if (sliceIt == slices.end())
throw std::domain_error( "Slice not found: "
+ pretty_print(tuple)
+ ", "
+ pretty_print(type)
+ " rank: "
+ pretty_print(Atrip::rank)
);
return *sliceIt;
}
// Static utilities:5 ends here
// [[file:../../atrip.org::*Static utilities][Static utilities:6]]
static Slice<F>& findByInfo(std::vector<Slice<F>> &slices,
Slice<F>::Info const& info) {
const auto sliceIt
= std::find_if(slices.begin(), slices.end(),
[&info](Slice<F> const& s) {
// TODO: maybe implement comparison in Info struct
return info.type == s.info.type
&& info.state == s.info.state
&& info.tuple == s.info.tuple
&& info.from.rank == s.info.from.rank
&& info.from.source == s.info.from.source
;
});
WITH_CRAZY_DEBUG
WITH_RANK << "__slice__:find:looking for " << pretty_print(info) << "\n";
if (sliceIt == slices.end())
throw std::domain_error( "Slice by info not found: "
+ pretty_print(info));
return *sliceIt;
}
// Static utilities:6 ends here
// [[file:../../atrip.org::*Attributes][Attributes:1]]
Info info;
// Attributes:1 ends here
// [[file:../../atrip.org::*Attributes][Attributes:2]]
F *data;
// Attributes:2 ends here
// [[file:../../atrip.org::*Attributes][Attributes:3]]
MPI_Request request;
// Attributes:3 ends here
// [[file:../../atrip.org::*Attributes][Attributes:4]]
const size_t size;
// Attributes:4 ends here
// [[file:../../atrip.org::*Member functions][Member functions:1]]
void markReady() noexcept {
info.state = Ready;
info.recycling = Blank;
}
// Member functions:1 ends here
// [[file:../../atrip.org::*Member functions][Member functions:2]]
bool isUnwrapped() const noexcept {
return info.state == Ready
|| info.state == SelfSufficient
;
}
// Member functions:2 ends here
// [[file:../../atrip.org::*Member functions][Member functions:3]]
bool isUnwrappable() const noexcept {
return isUnwrapped()
|| info.state == Recycled
|| info.state == Dispatched
;
}
inline bool isDirectlyFetchable() const noexcept {
return info.state == Ready || info.state == Dispatched;
}
void free() noexcept {
info.tuple = {0, 0};
info.type = Blank;
info.state = Acceptor;
info.from = {0, 0};
info.recycling = Blank;
data = nullptr;
}
inline bool isFree() const noexcept {
return info.tuple == PartialTuple{0, 0}
&& info.type == Blank
&& info.state == Acceptor
&& info.from.rank == 0
&& info.from.source == 0
&& info.recycling == Blank
&& data == nullptr
;
}
// Member functions:3 ends here
// [[file:../../atrip.org::*Member functions][Member functions:4]]
inline bool isRecyclable() const noexcept {
return ( info.state == Dispatched
|| info.state == Ready
|| info.state == Fetch
)
&& hasValidDataPointer()
;
}
// Member functions:4 ends here
// [[file:../../atrip.org::*Member functions][Member functions:5]]
inline bool hasValidDataPointer() const noexcept {
return data != nullptr
&& info.state != Acceptor
&& info.type != Blank
;
}
// Member functions:5 ends here
// [[file:../../atrip.org::*Member functions][Member functions:6]]
void unwrapAndMarkReady() {
if (info.state == Ready) return;
if (info.state != Dispatched)
throw
std::domain_error("Can't unwrap a non-ready, non-dispatched slice!");
markReady();
MPI_Status status;
#ifdef HAVE_OCD
WITH_RANK << "__slice__:mpi: waiting " << "\n";
#endif
const int errorCode = MPI_Wait(&request, &status);
if (errorCode != MPI_SUCCESS)
throw "MPI ERROR HAPPENED....";
#ifdef HAVE_OCD
char errorString[MPI_MAX_ERROR_STRING];
int errorSize;
MPI_Error_string(errorCode, errorString, &errorSize);
WITH_RANK << "__slice__:mpi: status "
<< "{ .source=" << status.MPI_SOURCE
<< ", .tag=" << status.MPI_TAG
<< ", .error=" << status.MPI_ERROR
<< ", .errCode=" << errorCode
<< ", .err=" << errorString
<< " }"
<< "\n";
#endif
}
// Member functions:6 ends here
// [[file:../../atrip.org::*Epilog][Epilog:1]]
Slice(size_t size_)
: info({})
, data(nullptr)
, size(size_)
{}
}; // struct Slice
// Epilog:1 ends here
// [[file:../../atrip.org::*Debug][Debug:1]]
template <typename F=double>
std::ostream& operator<<(std::ostream& out, typename Slice<F>::Location const& v) {
// TODO: remove me
out << "{.r(" << v.rank << "), .s(" << v.source << ")};";
return out;
}
template <typename F=double>
std::ostream& operator<<(std::ostream& out, typename Slice<F>::Info const& i) {
out << "«t" << i.type << ", s" << i.state << "»"
<< " ⊙ {" << i.from.rank << ", " << i.from.source << "}"
<< " ∴ {" << i.tuple[0] << ", " << i.tuple[1] << "}"
<< " ♲t" << i.recycling
;
return out;
}
} // namespace atrip
// Debug:1 ends here
|
#ifndef FSLGRAPHICS_RENDER_NINESLICEATLASTEXTURE2D_HPP
#define FSLGRAPHICS_RENDER_NINESLICEATLASTEXTURE2D_HPP
/****************************************************************************************************************************************************
* Copyright 2020 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of the NXP. nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************************************************************************************/
#include <FslBase/Math/NineSlice.hpp>
#include <FslGraphics/Render/AtlasTexture2D.hpp>
namespace Fsl
{
struct NineSliceAtlasTexture2D
{
AtlasTexture2D Texture;
NineSlice NSlice;
NineSliceAtlasTexture2D() = default;
explicit NineSliceAtlasTexture2D(const AtlasTexture2D& texture)
: NineSliceAtlasTexture2D(texture, NineSlice())
{
}
explicit NineSliceAtlasTexture2D(const AtlasTexture2D& texture, const NineSlice& nineSlice)
: Texture(texture)
, NSlice(nineSlice)
{
}
bool IsValid() const
{
return Texture.IsValid();
}
bool operator==(const struct NineSliceAtlasTexture2D& rhs) const
{
return Texture == rhs.Texture && NSlice == rhs.NSlice;
}
bool operator!=(const NineSliceAtlasTexture2D& rhs) const
{
return !(*this == rhs);
}
};
}
#endif
|
// Copyright (c) 2012-2013 The PPCoin developers
// Copyright (c) 2015-2019 The PIVX developers
// Copyright (c) 2019-2020 The Altbet developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <boost/assign/list_of.hpp>
#include "db.h"
#include "kernel.h"
#include "script/interpreter.h"
#include "timedata.h"
#include "util.h"
#include "stakeinput.h"
#include "utilmoneystr.h"
#include "zpivchain.h"
using namespace std;
// Modifier interval: time to elapse before new modifier is computed
// Set to 3-hour for production network and 20-minute for test network
unsigned int nModifierInterval;
int nStakeTargetSpacing = 60;
unsigned int getIntervalVersion(bool fTestNet = false)
{
return MODIFIER_INTERVAL;
}
// Hard checkpoints of stake modifiers to ensure they are deterministic
static std::map<int, unsigned int> mapStakeModifierCheckpoints = {};
// Get the last stake modifier and its generation time from a given block
static bool GetLastStakeModifier(const CBlockIndex* pindex, uint64_t& nStakeModifier, int64_t& nModifierTime)
{
if (!pindex)
return error("%s : null pindex", __func__);
while (pindex && pindex->pprev && !pindex->GeneratedStakeModifier())
pindex = pindex->pprev;
if (!pindex->GeneratedStakeModifier())
return error("%s : no generation at genesis block", __func__);
nStakeModifier = pindex->nStakeModifier;
nModifierTime = pindex->GetBlockTime();
return true;
}
// Get selection interval section (in seconds)
static int64_t GetStakeModifierSelectionIntervalSection(int nSection)
{
assert(nSection >= 0 && nSection < 64);
int64_t a = getIntervalVersion() * 63 / (63 + ((63 - nSection) * (MODIFIER_INTERVAL_RATIO - 1)));
return a;
}
// Get stake modifier selection interval (in seconds)
static int64_t GetStakeModifierSelectionInterval()
{
int64_t nSelectionInterval = 0;
for (int nSection = 0; nSection < 64; nSection++) {
nSelectionInterval += GetStakeModifierSelectionIntervalSection(nSection);
}
return nSelectionInterval;
}
// select a block from the candidate blocks in vSortedByTimestamp, excluding
// already selected blocks in vSelectedBlocks, and with timestamp up to
// nSelectionIntervalStop.
static bool SelectBlockFromCandidates(
std::vector<std::pair<int64_t, uint256> >& vSortedByTimestamp,
std::map<uint256, const CBlockIndex*>& mapSelectedBlocks,
int64_t nSelectionIntervalStop,
uint64_t nStakeModifierPrev,
const CBlockIndex** pindexSelected)
{
bool fModifierV2 = false;
bool fFirstRun = true;
bool fSelected = false;
uint256 hashBest = 0;
*pindexSelected = (const CBlockIndex*)0;
for (const PAIRTYPE(int64_t, uint256) & item : vSortedByTimestamp) {
if (!mapBlockIndex.count(item.second))
return error("%s : failed to find block index for candidate block %s", __func__, item.second.ToString().c_str());
const CBlockIndex* pindex = mapBlockIndex[item.second];
if (fSelected && pindex->GetBlockTime() > nSelectionIntervalStop)
break;
//if the lowest block height (vSortedByTimestamp[0]) is >= switch height, use new modifier calc
if (fFirstRun){
fModifierV2 = pindex->nHeight >= Params().ModifierUpgradeBlock();
fFirstRun = false;
}
if (mapSelectedBlocks.count(pindex->GetBlockHash()) > 0)
continue;
// compute the selection hash by hashing an input that is unique to that block
uint256 hashProof;
if(fModifierV2)
hashProof = pindex->GetBlockHash();
else
hashProof = pindex->IsProofOfStake() ? 0 : pindex->GetBlockHash();
CDataStream ss(SER_GETHASH, 0);
ss << hashProof << nStakeModifierPrev;
uint256 hashSelection = Hash(ss.begin(), ss.end());
// the selection hash is divided by 2**32 so that proof-of-stake block
// is always favored over proof-of-work block. this is to preserve
// the energy efficiency property
if (pindex->IsProofOfStake())
hashSelection >>= 32;
if (fSelected && hashSelection < hashBest) {
hashBest = hashSelection;
*pindexSelected = (const CBlockIndex*)pindex;
} else if (!fSelected) {
fSelected = true;
hashBest = hashSelection;
*pindexSelected = (const CBlockIndex*)pindex;
}
}
if (GetBoolArg("-printstakemodifier", false))
LogPrintf("%s : selection hash=%s\n", __func__, hashBest.ToString().c_str());
return fSelected;
}
/* NEW MODIFIER */
// Stake Modifier (hash modifier of proof-of-stake):
// The purpose of stake modifier is to prevent a txout (coin) owner from
// computing future proof-of-stake generated by this txout at the time
// of transaction confirmation. To meet kernel protocol, the txout
// must hash with a future stake modifier to generate the proof.
uint256 ComputeStakeModifier(const CBlockIndex* pindexPrev, const uint256& kernel)
{
if (!pindexPrev)
return uint256(); // genesis block's modifier is 0
CHashWriter ss(SER_GETHASH, 0);
ss << kernel;
ss << pindexPrev->nStakeModifier;
return ss.GetHash();
}
// Stake Modifier (hash modifier of proof-of-stake):
// The purpose of stake modifier is to prevent a txout (coin) owner from
// computing future proof-of-stake generated by this txout at the time
// of transaction confirmation. To meet kernel protocol, the txout
// must hash with a future stake modifier to generate the proof.
// Stake modifier consists of bits each of which is contributed from a
// selected block of a given block group in the past.
// The selection of a block is based on a hash of the block's proof-hash and
// the previous stake modifier.
// Stake modifier is recomputed at a fixed time interval instead of every
// block. This is to make it difficult for an attacker to gain control of
// additional bits in the stake modifier, even after generating a chain of
// blocks.
bool ComputeNextStakeModifier(const CBlockIndex* pindexPrev, uint64_t& nStakeModifier, bool& fGeneratedStakeModifier)
{
nStakeModifier = 0;
fGeneratedStakeModifier = false;
if (!pindexPrev) {
fGeneratedStakeModifier = true;
return true; // genesis block's modifier is 0
}
if (pindexPrev->nHeight == 0) {
//Give a stake modifier to the first block
fGeneratedStakeModifier = true;
nStakeModifier = uint64_t("stakemodifier");
return true;
}
// First find current stake modifier and its generation block time
// if it's not old enough, return the same stake modifier
int64_t nModifierTime = 0;
if (!GetLastStakeModifier(pindexPrev, nStakeModifier, nModifierTime))
return error("%s : unable to get last modifier", __func__);
if (GetBoolArg("-printstakemodifier", false))
LogPrintf("%s : prev modifier= %s time=%s\n", __func__, std::to_string(nStakeModifier).c_str(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", nModifierTime).c_str());
if (nModifierTime / MODIFIER_INTERVAL >= pindexPrev->GetBlockTime() / MODIFIER_INTERVAL)
return true;
// Sort candidate blocks by timestamp
vector<pair<int64_t, uint256> > vSortedByTimestamp;
vSortedByTimestamp.reserve(64 * getIntervalVersion() / nStakeTargetSpacing);
int64_t nSelectionInterval = GetStakeModifierSelectionInterval();
int64_t nSelectionIntervalStart = (pindexPrev->GetBlockTime() / getIntervalVersion()) * getIntervalVersion() - nSelectionInterval;
const CBlockIndex* pindex = pindexPrev;
while (pindex && pindex->GetBlockTime() >= nSelectionIntervalStart) {
vSortedByTimestamp.push_back(std::make_pair(pindex->GetBlockTime(), pindex->GetBlockHash()));
pindex = pindex->pprev;
}
int nHeightFirstCandidate = pindex ? (pindex->nHeight + 1) : 0;
std::reverse(vSortedByTimestamp.begin(), vSortedByTimestamp.end());
std::sort(vSortedByTimestamp.begin(), vSortedByTimestamp.end());
// Select 64 blocks from candidate blocks to generate stake modifier
uint64_t nStakeModifierNew = 0;
int64_t nSelectionIntervalStop = nSelectionIntervalStart;
std::map<uint256, const CBlockIndex*> mapSelectedBlocks;
for (int nRound = 0; nRound < std::min(64, (int)vSortedByTimestamp.size()); nRound++) {
// add an interval section to the current selection round
nSelectionIntervalStop += GetStakeModifierSelectionIntervalSection(nRound);
// select a block from the candidates of current round
if (!SelectBlockFromCandidates(vSortedByTimestamp, mapSelectedBlocks, nSelectionIntervalStop, nStakeModifier, &pindex))
return error("%s : unable to select block at round %d", __func__, nRound);
// write the entropy bit of the selected block
nStakeModifierNew |= (((uint64_t)pindex->GetStakeEntropyBit()) << nRound);
// add the selected block from candidates to selected list
mapSelectedBlocks.insert(std::make_pair(pindex->GetBlockHash(), pindex));
if (GetBoolArg("-printstakemodifier", false))
LogPrintf("%s : selected round %d stop=%s height=%d bit=%d\n", __func__,
nRound, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", nSelectionIntervalStop).c_str(), pindex->nHeight, pindex->GetStakeEntropyBit());
}
// Print selection map for visualization of the selected blocks
if (GetBoolArg("-printstakemodifier", false)) {
std::string strSelectionMap = "";
// '-' indicates proof-of-work blocks not selected
strSelectionMap.insert(0, pindexPrev->nHeight - nHeightFirstCandidate + 1, '-');
pindex = pindexPrev;
while (pindex && pindex->nHeight >= nHeightFirstCandidate) {
// '=' indicates proof-of-stake blocks not selected
if (pindex->IsProofOfStake())
strSelectionMap.replace(pindex->nHeight - nHeightFirstCandidate, 1, "=");
pindex = pindex->pprev;
}
for (const std::pair<const uint256, const CBlockIndex*> &item : mapSelectedBlocks) {
// 'S' indicates selected proof-of-stake blocks
// 'W' indicates selected proof-of-work blocks
strSelectionMap.replace(item.second->nHeight - nHeightFirstCandidate, 1, item.second->IsProofOfStake() ? "S" : "W");
}
LogPrintf("%s : selection height [%d, %d] map %s\n", __func__, nHeightFirstCandidate, pindexPrev->nHeight, strSelectionMap.c_str());
}
if (GetBoolArg("-printstakemodifier", false)) {
LogPrintf("%s : new modifier=%s time=%s\n", __func__, std::to_string(nStakeModifierNew).c_str(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexPrev->GetBlockTime()).c_str());
}
nStakeModifier = nStakeModifierNew;
fGeneratedStakeModifier = true;
return true;
}
// The stake modifier used to hash for a stake kernel is chosen as the stake
// modifier about a selection interval later than the coin generating the kernel
bool GetKernelStakeModifier(uint256 hashBlockFrom, uint64_t& nStakeModifier, int& nStakeModifierHeight, int64_t& nStakeModifierTime, bool fPrintProofOfStake)
{
nStakeModifier = 0;
if (!mapBlockIndex.count(hashBlockFrom))
return error("GetKernelStakeModifier() : block not indexed");
const CBlockIndex* pindexFrom = mapBlockIndex[hashBlockFrom];
nStakeModifierHeight = pindexFrom->nHeight;
nStakeModifierTime = pindexFrom->GetBlockTime();
int64_t nStakeModifierSelectionInterval = GetStakeModifierSelectionInterval();
const CBlockIndex* pindex = pindexFrom;
CBlockIndex* pindexNext = chainActive[pindexFrom->nHeight + 1];
// loop to find the stake modifier later by a selection interval
while (nStakeModifierTime < pindexFrom->GetBlockTime() + nStakeModifierSelectionInterval)
{
if (!pindexNext && nStakeModifier)
return true;
pindex = pindexNext;
pindexNext = chainActive[pindexNext->nHeight + 1];
if (pindex->GeneratedStakeModifier())
{
nStakeModifierHeight = pindex->nHeight;
nStakeModifierTime = pindex->GetBlockTime();
nStakeModifier = pindex->nStakeModifier;
}
}
nStakeModifier = pindex->nStakeModifier;
return true;
}
bool CheckStakeKernelHash(const CBlockIndex* pindexPrev, const unsigned int nBits, CStakeInput* stake, const unsigned int nTimeTx, uint256& hashProofOfStake, const bool fVerify)
{
// Calculate the proof of stake hash
if (!GetHashProofOfStake(pindexPrev, stake, nTimeTx, fVerify, hashProofOfStake)) {
return error("%s : Failed to calculate the proof of stake hash", __func__);
}
const CAmount& nValueIn = stake->GetValue();
const CDataStream& ssUniqueID = stake->GetUniqueness();
// Base target
uint256 bnTarget;
bnTarget.SetCompact(nBits);
// Weighted target
uint256 bnWeight = uint256(nValueIn) / 100;
bnTarget *= bnWeight;
// Check if proof-of-stake hash meets target protocol
const bool res = (hashProofOfStake < bnTarget);
if (fVerify || res) {
LogPrint("staking", "%s : Proof Of Stake:"
"\nssUniqueID=%s"
"\nnTimeTx=%d"
"\nhashProofOfStake=%s"
"\nnBits=%d"
"\nweight=%d"
"\nbnTarget=%s (res: %d)\n\n",
__func__, HexStr(ssUniqueID), nTimeTx, hashProofOfStake.GetHex(),
nBits, nValueIn, bnTarget.GetHex(), res);
}
return res;
}
bool GetHashProofOfStake(const CBlockIndex* pindexPrev, CStakeInput* stake, const unsigned int nTimeTx, const bool fVerify, uint256& hashProofOfStakeRet) {
// Grab the stake data
CBlockIndex* pindexfrom = stake->GetIndexFrom();
if (!pindexfrom) return error("%s : Failed to find the block index for stake origin", __func__);
const CDataStream& ssUniqueID = stake->GetUniqueness();
const unsigned int nTimeBlockFrom = pindexfrom->nTime;
CDataStream modifier_ss(SER_GETHASH, 0);
// Hash the modifier
if (!Params().IsStakeModifierV2(pindexPrev->nHeight + 1)) {
// Modifier v1
uint64_t nStakeModifier = 0;
if (!stake->GetModifier(nStakeModifier))
return error("%s : Failed to get kernel stake modifier", __func__);
modifier_ss << nStakeModifier;
} else {
// Modifier v2
modifier_ss << pindexPrev->nStakeModifierV2;
}
CDataStream ss(modifier_ss);
// Calculate hash
ss << nTimeBlockFrom << ssUniqueID << nTimeTx;
hashProofOfStakeRet = Hash(ss.begin(), ss.end());
if (fVerify) {
LogPrint("staking", "%s :{ nStakeModifier=%s\n"
"nStakeModifierHeight=%s\n"
"}\n",
__func__, HexStr(modifier_ss), ((stake->IsZPIV()) ? "Not available" : std::to_string(stake->getStakeModifierHeight())));
}
return true;
}
bool Stake(const CBlockIndex* pindexPrev, CStakeInput* stakeInput, unsigned int nBits, unsigned int& nTimeTx, uint256& hashProofOfStake)
{
int prevHeight = pindexPrev->nHeight;
// get stake input pindex
CBlockIndex* pindexFrom = stakeInput->GetIndexFrom();
if (!pindexFrom || pindexFrom->nHeight < 1) return error("%s : no pindexfrom", __func__);
const uint32_t nTimeBlockFrom = pindexFrom->nTime;
const int nHeightBlockFrom = pindexFrom->nHeight;
// check for maturity (min age/depth) requirements
if (!Params().HasStakeMinAgeOrDepth(prevHeight + 1, nTimeTx, nHeightBlockFrom, nTimeBlockFrom))
return error("%s : min age violation - height=%d - nTimeTx=%d, nTimeBlockFrom=%d, nHeightBlockFrom=%d",
__func__, prevHeight + 1, nTimeTx, nTimeBlockFrom, nHeightBlockFrom);
// iterate the hashing
bool fSuccess = false;
const unsigned int nHashDrift = 60;
unsigned int nTryTime = nTimeTx - 1;
// iterate from nTimeTx up to nTimeTx + nHashDrift
// but not after the max allowed future blocktime drift (3 minutes for PoS)
const unsigned int maxTime = std::min(nTimeTx + nHashDrift, Params().MaxFutureBlockTime(GetAdjustedTime(), true));
while (nTryTime < maxTime)
{
//new block came in, move on
if (chainActive.Height() != prevHeight)
break;
++nTryTime;
// if stake hash does not meet the target then continue to next iteration
if (!CheckStakeKernelHash(pindexPrev, nBits, stakeInput, nTryTime, hashProofOfStake))
continue;
// if we made it this far, then we have successfully found a valid kernel hash
fSuccess = true;
nTimeTx = nTryTime;
break;
}
mapHashedBlocks.clear();
mapHashedBlocks[chainActive.Tip()->nHeight] = GetTime(); //store a time stamp of when we last hashed on this block
return fSuccess;
}
bool ContextualCheckZerocoinStake(int nPreviousBlockHeight, CStakeInput* stake)
{
if (nPreviousBlockHeight < Params().Zerocoin_Block_V2_Start())
return error("%s : zABET stake block is less than allowed start height", __func__);
if (CZPivStake* zABET = dynamic_cast<CZPivStake*>(stake)) {
CBlockIndex* pindexFrom = zABET->GetIndexFrom();
if (!pindexFrom)
return error("%s : failed to get index associated with zABET stake checksum", __func__);
int depth = (nPreviousBlockHeight + 1) - pindexFrom->nHeight;
if (depth < Params().Zerocoin_RequiredStakeDepth())
return error("%s : zABET stake does not have required confirmation depth. Current height %d, stakeInput height %d.", __func__, nPreviousBlockHeight, pindexFrom->nHeight);
//The checksum needs to be the exact checksum from 200 blocks ago
uint256 nCheckpoint200 = chainActive[nPreviousBlockHeight - Params().Zerocoin_RequiredStakeDepth()]->nAccumulatorCheckpoint;
uint32_t nChecksum200 = ParseChecksum(nCheckpoint200, libzerocoin::AmountToZerocoinDenomination(zABET->GetValue()));
if (nChecksum200 != zABET->GetChecksum())
return error("%s : accumulator checksum is different than the block 200 blocks previous. stake=%d block200=%d", __func__, zABET->GetChecksum(), nChecksum200);
} else {
return error("%s : dynamic_cast of stake ptr failed", __func__);
}
return true;
}
bool initStakeInput(const CBlock block, std::unique_ptr<CStakeInput>& stake, int nPreviousBlockHeight) {
const CTransaction tx = block.vtx[1];
if (!tx.IsCoinStake())
return error("%s : called on non-coinstake %s", __func__, tx.GetHash().ToString().c_str());
// Kernel (input 0) must match the stake hash target per coin age (nBits)
const CTxIn& txin = tx.vin[0];
//Construct the stakeinput object
if (txin.IsZerocoinSpend()) {
libzerocoin::CoinSpend spend = TxInToZerocoinSpend(txin);
if (spend.getSpendType() != libzerocoin::SpendType::STAKE)
return error("%s : spend is using the wrong SpendType (%d)", __func__, (int)spend.getSpendType());
stake = std::unique_ptr<CStakeInput>(new CZPivStake(spend));
if (!ContextualCheckZerocoinStake(nPreviousBlockHeight, stake.get()))
return error("%s : staked zABET fails context checks", __func__);
} else {
// First try finding the previous transaction in database
uint256 hashBlock;
CTransaction txPrev;
if (!GetTransaction(txin.prevout.hash, txPrev, hashBlock, true))
return error("%s : INFO: read txPrev failed, tx id prev: %s, block id %s",
__func__, txin.prevout.hash.GetHex(), block.GetHash().GetHex());
//verify signature and script
if (!VerifyScript(txin.scriptSig, txPrev.vout[txin.prevout.n].scriptPubKey, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&tx, 0)))
return error("%s : VerifySignature failed on coinstake %s", __func__, tx.GetHash().ToString().c_str());
CPivStake* pivInput = new CPivStake();
pivInput->SetInput(txPrev, txin.prevout.n);
stake = std::unique_ptr<CStakeInput>(pivInput);
}
return true;
}
// Check kernel hash target and coinstake signature
bool CheckProofOfStake(const CBlock block, uint256& hashProofOfStake, std::unique_ptr<CStakeInput>& stake, int nPreviousBlockHeight)
{
// Initialize the stake object
if(!initStakeInput(block, stake, nPreviousBlockHeight))
return error("%s : stake input object initialization failed", __func__);
const CTransaction tx = block.vtx[1];
// Kernel (input 0) must match the stake hash target per coin age (nBits)
const CTxIn& txin = tx.vin[0];
CBlockIndex* pindexPrev = mapBlockIndex[block.hashPrevBlock];
CBlockIndex* pindexfrom = stake->GetIndexFrom();
if (!pindexfrom)
return error("%s : Failed to find the block index for stake origin", __func__);
unsigned int nBlockFromTime = pindexfrom->nTime;
unsigned int nTxTime = block.nTime;
const int nBlockFromHeight = pindexfrom->nHeight;
if (!txin.IsZerocoinSpend() && nPreviousBlockHeight >= Params().Zerocoin_Block_Public_Spend_Enabled() - 1) {
//check for maturity (min age/depth) requirements
if (!Params().HasStakeMinAgeOrDepth(nPreviousBlockHeight+1, nTxTime, nBlockFromHeight, nBlockFromTime))
return error("%s : min age violation - height=%d - nTimeTx=%d, nTimeBlockFrom=%d, nHeightBlockFrom=%d",
__func__, nPreviousBlockHeight, nTxTime, nBlockFromTime, nBlockFromHeight);
}
if (!CheckStakeKernelHash(pindexPrev, block.nBits, stake.get(), nTxTime, hashProofOfStake, true))
return error("%s : INFO: check kernel failed on coinstake %s, hashProof=%s", __func__,
tx.GetHash().GetHex(), hashProofOfStake.GetHex());
return true;
}
// Check whether the coinstake timestamp meets protocol
bool CheckCoinStakeTimestamp(int64_t nTimeBlock, int64_t nTimeTx)
{
// v0.3 protocol
return (nTimeBlock == nTimeTx);
}
// Get stake modifier checksum
unsigned int GetStakeModifierChecksum(const CBlockIndex* pindex)
{
assert(pindex->pprev || pindex->GetBlockHash() == Params().HashGenesisBlock());
// Hash previous checksum with flags, hashProofOfStake and nStakeModifier
CDataStream ss(SER_GETHASH, 0);
if (pindex->pprev)
ss << pindex->pprev->nStakeModifierChecksum;
ss << pindex->nFlags << pindex->hashProofOfStake << pindex->nStakeModifier;
uint256 hashChecksum = Hash(ss.begin(), ss.end());
hashChecksum >>= (256 - 32);
return hashChecksum.Get64();
}
// Check stake modifier hard checkpoints
bool CheckStakeModifierCheckpoints(int nHeight, unsigned int nStakeModifierChecksum)
{
if (Params().NetworkID() != CBaseChainParams::MAIN) return true; // Testnet has no checkpoints
if (mapStakeModifierCheckpoints.count(nHeight)) {
return nStakeModifierChecksum == mapStakeModifierCheckpoints[nHeight];
}
return true;
}
|
// Copyright (c) 2011-2014 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <QApplication>
#include "guiutil.h"
#include "bitcoinaddressvalidator.h"
#include "walletmodel.h"
#include "bitcoinunits.h"
#include "util.h"
#include "init.h"
#include <QDateTime>
#include <QDoubleValidator>
#include <QFont>
#include <QLineEdit>
#if QT_VERSION >= 0x050000
#include <QUrlQuery>
#else
#include <QUrl>
#endif
#include <QTextDocument> // for Qt::mightBeRichText
#include <QAbstractItemView>
#include <QClipboard>
#include <QFileDialog>
#include <QDesktopServices>
#include <QThread>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#ifdef WIN32
#ifdef _WIN32_WINNT
#undef _WIN32_WINNT
#endif
#define _WIN32_WINNT 0x0501
#ifdef _WIN32_IE
#undef _WIN32_IE
#endif
#define _WIN32_IE 0x0501
#define WIN32_LEAN_AND_MEAN 1
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include "shlwapi.h"
#include "shlobj.h"
#include "shellapi.h"
#endif
namespace GUIUtil {
QString dateTimeStr(const QDateTime &date)
{
return date.date().toString(Qt::SystemLocaleShortDate) + QString(" ") + date.toString("hh:mm");
}
QString dateTimeStr(qint64 nTime)
{
return dateTimeStr(QDateTime::fromTime_t((qint32)nTime));
}
QFont bitcoinAddressFont()
{
QFont font("Monospace");
font.setStyleHint(QFont::TypeWriter);
return font;
}
void setupAddressWidget(QLineEdit *widget, QWidget *parent)
{
widget->setMaxLength(BitcoinAddressValidator::MaxAddressLength);
widget->setValidator(new BitcoinAddressValidator(parent));
widget->setFont(bitcoinAddressFont());
}
void setupAmountWidget(QLineEdit *widget, QWidget *parent)
{
QDoubleValidator *amountValidator = new QDoubleValidator(parent);
amountValidator->setDecimals(8);
amountValidator->setBottom(0.0);
widget->setValidator(amountValidator);
widget->setAlignment(Qt::AlignRight|Qt::AlignVCenter);
}
bool parseBitcoinURI(const QUrl &uri, SendCoinsRecipient *out)
{
// return if URI is not valid or is no bitcoin URI
if(!uri.isValid() || uri.scheme() != QString("kumulcoin"))
return false;
SendCoinsRecipient rv;
rv.address = uri.path();
rv.amount = 0;
#if QT_VERSION < 0x050000
QList<QPair<QString, QString> > items = uri.queryItems();
#else
QUrlQuery uriQuery(uri);
QList<QPair<QString, QString> > items = uriQuery.queryItems();
#endif
for (QList<QPair<QString, QString> >::iterator i = items.begin(); i != items.end(); i++)
{
bool fShouldReturnFalse = false;
if (i->first.startsWith("req-"))
{
i->first.remove(0, 4);
fShouldReturnFalse = true;
}
if (i->first == "label")
{
rv.label = i->second;
fShouldReturnFalse = false;
}
else if (i->first == "amount")
{
if(!i->second.isEmpty())
{
if(!BitcoinUnits::parse(BitcoinUnits::BTC, i->second, &rv.amount))
{
return false;
}
}
fShouldReturnFalse = false;
}
if (fShouldReturnFalse)
return false;
}
if(out)
{
*out = rv;
}
return true;
}
bool parseBitcoinURI(QString uri, SendCoinsRecipient *out)
{
// Convert bitcoin:// to bitcoin:
//
// Cannot handle this later, because bitcoin:// will cause Qt to see the part after // as host,
// which will lower-case it (and thus invalidate the address).
if(uri.startsWith("kumulcoin://"))
{
uri.replace(0, 11, "kumulcoin:");
}
QUrl uriInstance(uri);
return parseBitcoinURI(uriInstance, out);
}
QString HtmlEscape(const QString& str, bool fMultiLine)
{
#if QT_VERSION < 0x050000
QString escaped = Qt::escape(str);
#else
QString escaped = str.toHtmlEscaped();
#endif
if(fMultiLine)
{
escaped = escaped.replace("\n", "<br>\n");
}
return escaped;
}
QString HtmlEscape(const std::string& str, bool fMultiLine)
{
return HtmlEscape(QString::fromStdString(str), fMultiLine);
}
void copyEntryData(QAbstractItemView *view, int column, int role)
{
if(!view || !view->selectionModel())
return;
QModelIndexList selection = view->selectionModel()->selectedRows(column);
if(!selection.isEmpty())
{
// Copy first item (global clipboard)
QApplication::clipboard()->setText(selection.at(0).data(role).toString(), QClipboard::Clipboard);
// Copy first item (global mouse selection for e.g. X11 - NOP on Windows)
QApplication::clipboard()->setText(selection.at(0).data(role).toString(), QClipboard::Selection);
}
}
void setClipboard(const QString& str)
{
QApplication::clipboard()->setText(str, QClipboard::Clipboard);
QApplication::clipboard()->setText(str, QClipboard::Selection);
}
QString getSaveFileName(QWidget *parent, const QString &caption,
const QString &dir,
const QString &filter,
QString *selectedSuffixOut)
{
QString selectedFilter;
QString myDir;
if(dir.isEmpty()) // Default to user documents location
{
#if QT_VERSION < 0x050000
myDir = QDesktopServices::storageLocation(QDesktopServices::DocumentsLocation);
#else
myDir = QStandardPaths::writableLocation(QStandardPaths::DocumentsLocation);
#endif
}
else
{
myDir = dir;
}
QString result = QFileDialog::getSaveFileName(parent, caption, myDir, filter, &selectedFilter);
/* Extract first suffix from filter pattern "Description (*.foo)" or "Description (*.foo *.bar ...) */
QRegExp filter_re(".* \\(\\*\\.(.*)[ \\)]");
QString selectedSuffix;
if(filter_re.exactMatch(selectedFilter))
{
selectedSuffix = filter_re.cap(1);
}
/* Add suffix if needed */
QFileInfo info(result);
if(!result.isEmpty())
{
if(info.suffix().isEmpty() && !selectedSuffix.isEmpty())
{
/* No suffix specified, add selected suffix */
if(!result.endsWith("."))
result.append(".");
result.append(selectedSuffix);
}
}
/* Return selected suffix if asked to */
if(selectedSuffixOut)
{
*selectedSuffixOut = selectedSuffix;
}
return result;
}
Qt::ConnectionType blockingGUIThreadConnection()
{
if(QThread::currentThread() != qApp->thread())
{
return Qt::BlockingQueuedConnection;
}
else
{
return Qt::DirectConnection;
}
}
bool checkPoint(const QPoint &p, const QWidget *w)
{
QWidget *atW = QApplication::widgetAt(w->mapToGlobal(p));
if (!atW) return false;
return atW->topLevelWidget() == w;
}
bool isObscured(QWidget *w)
{
return !(checkPoint(QPoint(0, 0), w)
&& checkPoint(QPoint(w->width() - 1, 0), w)
&& checkPoint(QPoint(0, w->height() - 1), w)
&& checkPoint(QPoint(w->width() - 1, w->height() - 1), w)
&& checkPoint(QPoint(w->width() / 2, w->height() / 2), w));
}
void openDebugLogfile()
{
boost::filesystem::path pathDebug = GetDataDir() / "debug.log";
/* Open debug.log with the associated application */
if (boost::filesystem::exists(pathDebug))
QDesktopServices::openUrl(QUrl::fromLocalFile(QString::fromStdString(pathDebug.string())));
}
ToolTipToRichTextFilter::ToolTipToRichTextFilter(int size_threshold, QObject *parent) :
QObject(parent), size_threshold(size_threshold)
{
}
bool ToolTipToRichTextFilter::eventFilter(QObject *obj, QEvent *evt)
{
if(evt->type() == QEvent::ToolTipChange)
{
QWidget *widget = static_cast<QWidget*>(obj);
QString tooltip = widget->toolTip();
if(tooltip.size() > size_threshold && !tooltip.startsWith("<qt") && !Qt::mightBeRichText(tooltip))
{
// Envelop with <qt></qt> to make sure Qt detects this as rich text
// Escape the current message as HTML and replace \n by <br>
tooltip = "<qt>" + HtmlEscape(tooltip, true) + "</qt>";
widget->setToolTip(tooltip);
return true;
}
}
return QObject::eventFilter(obj, evt);
}
#ifdef WIN32
boost::filesystem::path static StartupShortcutPath()
{
return GetSpecialFolderPath(CSIDL_STARTUP) / "Kumulcoin.lnk";
}
bool GetStartOnSystemStartup()
{
// check for Bitcoin.lnk
return boost::filesystem::exists(StartupShortcutPath());
}
bool SetStartOnSystemStartup(bool fAutoStart)
{
// If the shortcut exists already, remove it for updating
boost::filesystem::remove(StartupShortcutPath());
if (fAutoStart)
{
CoInitialize(NULL);
// Get a pointer to the IShellLink interface.
IShellLink* psl = NULL;
HRESULT hres = CoCreateInstance(CLSID_ShellLink, NULL,
CLSCTX_INPROC_SERVER, IID_IShellLink,
reinterpret_cast<void**>(&psl));
if (SUCCEEDED(hres))
{
// Get the current executable path
TCHAR pszExePath[MAX_PATH];
GetModuleFileName(NULL, pszExePath, sizeof(pszExePath));
TCHAR pszArgs[5] = TEXT("-min");
// Set the path to the shortcut target
psl->SetPath(pszExePath);
PathRemoveFileSpec(pszExePath);
psl->SetWorkingDirectory(pszExePath);
psl->SetShowCmd(SW_SHOWMINNOACTIVE);
psl->SetArguments(pszArgs);
// Query IShellLink for the IPersistFile interface for
// saving the shortcut in persistent storage.
IPersistFile* ppf = NULL;
hres = psl->QueryInterface(IID_IPersistFile,
reinterpret_cast<void**>(&ppf));
if (SUCCEEDED(hres))
{
WCHAR pwsz[MAX_PATH];
// Ensure that the string is ANSI.
MultiByteToWideChar(CP_ACP, 0, StartupShortcutPath().string().c_str(), -1, pwsz, MAX_PATH);
// Save the link by calling IPersistFile::Save.
hres = ppf->Save(pwsz, TRUE);
ppf->Release();
psl->Release();
CoUninitialize();
return true;
}
psl->Release();
}
CoUninitialize();
return false;
}
return true;
}
#elif defined(LINUX)
// Follow the Desktop Application Autostart Spec:
// http://standards.freedesktop.org/autostart-spec/autostart-spec-latest.html
boost::filesystem::path static GetAutostartDir()
{
namespace fs = boost::filesystem;
char* pszConfigHome = getenv("XDG_CONFIG_HOME");
if (pszConfigHome) return fs::path(pszConfigHome) / "autostart";
char* pszHome = getenv("HOME");
if (pszHome) return fs::path(pszHome) / ".config" / "autostart";
return fs::path();
}
boost::filesystem::path static GetAutostartFilePath()
{
return GetAutostartDir() / "kumulcoin.desktop";
}
bool GetStartOnSystemStartup()
{
boost::filesystem::ifstream optionFile(GetAutostartFilePath());
if (!optionFile.good())
return false;
// Scan through file for "Hidden=true":
std::string line;
while (!optionFile.eof())
{
getline(optionFile, line);
if (line.find("Hidden") != std::string::npos &&
line.find("true") != std::string::npos)
return false;
}
optionFile.close();
return true;
}
bool SetStartOnSystemStartup(bool fAutoStart)
{
if (!fAutoStart)
boost::filesystem::remove(GetAutostartFilePath());
else
{
char pszExePath[MAX_PATH+1];
memset(pszExePath, 0, sizeof(pszExePath));
if (readlink("/proc/self/exe", pszExePath, sizeof(pszExePath)-1) == -1)
return false;
boost::filesystem::create_directories(GetAutostartDir());
boost::filesystem::ofstream optionFile(GetAutostartFilePath(), std::ios_base::out|std::ios_base::trunc);
if (!optionFile.good())
return false;
// Write a bitcoin.desktop file to the autostart directory:
optionFile << "[Desktop Entry]\n";
optionFile << "Type=Application\n";
optionFile << "Name=Kumulcoin\n";
optionFile << "Exec=" << pszExePath << " -min\n";
optionFile << "Terminal=false\n";
optionFile << "Hidden=false\n";
optionFile.close();
}
return true;
}
#elif defined(Q_OS_MAC)
// based on: https://github.com/Mozketo/LaunchAtLoginController/blob/master/LaunchAtLoginController.m
#include <CoreFoundation/CoreFoundation.h>
#include <CoreServices/CoreServices.h>
LSSharedFileListItemRef findStartupItemInList(LSSharedFileListRef list, CFURLRef findUrl);
LSSharedFileListItemRef findStartupItemInList(LSSharedFileListRef list, CFURLRef findUrl)
{
// loop through the list of startup items and try to find the bitcoin app
CFArrayRef listSnapshot = LSSharedFileListCopySnapshot(list, NULL);
for(int i = 0; i < CFArrayGetCount(listSnapshot); i++) {
LSSharedFileListItemRef item = (LSSharedFileListItemRef)CFArrayGetValueAtIndex(listSnapshot, i);
UInt32 resolutionFlags = kLSSharedFileListNoUserInteraction | kLSSharedFileListDoNotMountVolumes;
CFURLRef currentItemURL = NULL;
LSSharedFileListItemResolve(item, resolutionFlags, ¤tItemURL, NULL);
if(currentItemURL && CFEqual(currentItemURL, findUrl)) {
// found
CFRelease(currentItemURL);
return item;
}
if(currentItemURL) {
CFRelease(currentItemURL);
}
}
return NULL;
}
bool GetStartOnSystemStartup()
{
CFURLRef bitcoinAppUrl = CFBundleCopyBundleURL(CFBundleGetMainBundle());
LSSharedFileListRef loginItems = LSSharedFileListCreate(NULL, kLSSharedFileListSessionLoginItems, NULL);
LSSharedFileListItemRef foundItem = findStartupItemInList(loginItems, bitcoinAppUrl);
return !!foundItem; // return boolified object
}
bool SetStartOnSystemStartup(bool fAutoStart)
{
CFURLRef bitcoinAppUrl = CFBundleCopyBundleURL(CFBundleGetMainBundle());
LSSharedFileListRef loginItems = LSSharedFileListCreate(NULL, kLSSharedFileListSessionLoginItems, NULL);
LSSharedFileListItemRef foundItem = findStartupItemInList(loginItems, bitcoinAppUrl);
if(fAutoStart && !foundItem) {
// add bitcoin app to startup item list
LSSharedFileListInsertItemURL(loginItems, kLSSharedFileListItemBeforeFirst, NULL, NULL, bitcoinAppUrl, NULL, NULL);
}
else if(!fAutoStart && foundItem) {
// remove item
LSSharedFileListItemRemove(loginItems, foundItem);
}
return true;
}
#else
bool GetStartOnSystemStartup() { return false; }
bool SetStartOnSystemStartup(bool fAutoStart) { return false; }
#endif
HelpMessageBox::HelpMessageBox(QWidget *parent) :
QMessageBox(parent)
{
header = tr("Kumulcoin-Qt") + " " + tr("version") + " " +
QString::fromStdString(FormatFullVersion()) + "\n\n" +
tr("Usage:") + "\n" +
" kumulcoin-qt [" + tr("command-line options") + "] " + "\n";
coreOptions = QString::fromStdString(HelpMessage());
uiOptions = tr("UI options") + ":\n" +
" -lang=<lang> " + tr("Set language, for example \"de_DE\" (default: system locale)") + "\n" +
" -min " + tr("Start minimized") + "\n" +
" -splash " + tr("Show splash screen on startup (default: 1)") + "\n";
setWindowTitle(tr("Kumulcoin-Qt"));
setTextFormat(Qt::PlainText);
// setMinimumWidth is ignored for QMessageBox so put in non-breaking spaces to make it wider.
setText(header + QString(QChar(0x2003)).repeated(50));
setDetailedText(coreOptions + "\n" + uiOptions);
}
void HelpMessageBox::printToConsole()
{
// On other operating systems, the expected action is to print the message to the console.
QString strUsage = header + "\n" + coreOptions + "\n" + uiOptions;
fprintf(stdout, "%s", strUsage.toStdString().c_str());
}
void HelpMessageBox::showOrPrint()
{
#if defined(WIN32)
// On Windows, show a message box, as there is no stderr/stdout in windowed applications
exec();
#else
// On other operating systems, print help text to console
printToConsole();
#endif
}
} // namespace GUIUtil
|
//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Decl nodes as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CGBlocks.h"
#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGDebugInfo.h"
#include "CGOpenCLRuntime.h"
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
using namespace clang;
using namespace CodeGen;
void CodeGenFunction::EmitDecl(const Decl &D) {
switch (D.getKind()) {
case Decl::BuiltinTemplate:
case Decl::TranslationUnit:
case Decl::ExternCContext:
case Decl::Namespace:
case Decl::UnresolvedUsingTypename:
case Decl::ClassTemplateSpecialization:
case Decl::ClassTemplatePartialSpecialization:
case Decl::VarTemplateSpecialization:
case Decl::VarTemplatePartialSpecialization:
case Decl::TemplateTypeParm:
case Decl::UnresolvedUsingValue:
case Decl::NonTypeTemplateParm:
case Decl::CXXDeductionGuide:
case Decl::CXXMethod:
case Decl::CXXConstructor:
case Decl::CXXDestructor:
case Decl::CXXConversion:
case Decl::Field:
case Decl::MSProperty:
case Decl::IndirectField:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
case Decl::ParmVar:
case Decl::ImplicitParam:
case Decl::ClassTemplate:
case Decl::VarTemplate:
case Decl::FunctionTemplate:
case Decl::TypeAliasTemplate:
case Decl::TemplateTemplateParm:
case Decl::ObjCMethod:
case Decl::ObjCCategory:
case Decl::ObjCProtocol:
case Decl::ObjCInterface:
case Decl::ObjCCategoryImpl:
case Decl::ObjCImplementation:
case Decl::ObjCProperty:
case Decl::ObjCCompatibleAlias:
case Decl::PragmaComment:
case Decl::PragmaDetectMismatch:
case Decl::AccessSpec:
case Decl::LinkageSpec:
case Decl::Export:
case Decl::ObjCPropertyImpl:
case Decl::FileScopeAsm:
case Decl::Friend:
case Decl::FriendTemplate:
case Decl::Block:
case Decl::Captured:
case Decl::ClassScopeFunctionSpecialization:
case Decl::UsingShadow:
case Decl::ConstructorUsingShadow:
case Decl::ObjCTypeParam:
case Decl::Binding:
llvm_unreachable("Declaration should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
case Decl::Enum: // enum X;
case Decl::EnumConstant: // enum ? { X = ? }
case Decl::CXXRecord: // struct/union/class X; [C++]
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
case Decl::Label: // __label__ x;
case Decl::Import:
case Decl::OMPThreadPrivate:
case Decl::OMPCapturedExpr:
case Decl::OMPRequires:
case Decl::Empty:
// None of these decls require codegen support.
return;
case Decl::NamespaceAlias:
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
return;
case Decl::Using: // using X; [C++]
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitUsingDecl(cast<UsingDecl>(D));
return;
case Decl::UsingPack:
for (auto *Using : cast<UsingPackDecl>(D).expansions())
EmitDecl(*Using);
return;
case Decl::UsingDirective: // using namespace X; [C++]
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
return;
case Decl::Var:
case Decl::Decomposition: {
const VarDecl &VD = cast<VarDecl>(D);
assert(VD.isLocalVarDecl() &&
"Should not see file-scope variables inside a function!");
EmitVarDecl(VD);
if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
for (auto *B : DD->bindings())
if (auto *HD = B->getHoldingVar())
EmitVarDecl(*HD);
return;
}
case Decl::OMPDeclareReduction:
return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
case Decl::Typedef: // typedef int X;
case Decl::TypeAlias: { // using X = int; [C++0x]
const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
QualType Ty = TD.getUnderlyingType();
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
}
}
}
/// EmitVarDecl - This method handles emission of any variable declaration
/// inside a function, including static vars etc.
void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
if (D.hasExternalStorage())
// Don't emit it now, allow it to be emitted lazily on its first use.
return;
// Some function-scope variable does not have static storage but still
// needs to be emitted like a static variable, e.g. a function-scope
// variable in constant address space in OpenCL.
if (D.getStorageDuration() != SD_Automatic) {
// Static sampler variables translated to function calls.
if (D.getType()->isSamplerT())
return;
llvm::GlobalValue::LinkageTypes Linkage =
CGM.getLLVMLinkageVarDefinition(&D, /*isConstant=*/false);
// FIXME: We need to force the emission/use of a guard variable for
// some variables even if we can constant-evaluate them because
// we can't guarantee every translation unit will constant-evaluate them.
return EmitStaticVarDecl(D, Linkage);
}
if (D.getType().getAddressSpace() == LangAS::opencl_local)
return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
assert(D.hasLocalStorage());
return EmitAutoVarDecl(D);
}
static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
if (CGM.getLangOpts().CPlusPlus)
return CGM.getMangledName(&D).str();
// If this isn't C++, we don't need a mangled name, just a pretty one.
assert(!D.isExternallyVisible() && "name shouldn't matter");
std::string ContextName;
const DeclContext *DC = D.getDeclContext();
if (auto *CD = dyn_cast<CapturedDecl>(DC))
DC = cast<DeclContext>(CD->getNonClosureContext());
if (const auto *FD = dyn_cast<FunctionDecl>(DC))
ContextName = CGM.getMangledName(FD);
else if (const auto *BD = dyn_cast<BlockDecl>(DC))
ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
ContextName = OMD->getSelector().getAsString();
else
llvm_unreachable("Unknown context for static var decl");
ContextName += "." + D.getNameAsString();
return ContextName;
}
llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
// In general, we don't always emit static var decls once before we reference
// them. It is possible to reference them before emitting the function that
// contains them, and it is possible to emit the containing function multiple
// times.
if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
return ExistingGV;
QualType Ty = D.getType();
assert(Ty->isConstantSizeType() && "VLAs can't be static");
// Use the label if the variable is renamed with the asm-label extension.
std::string Name;
if (D.hasAttr<AsmLabelAttr>())
Name = getMangledName(&D);
else
Name = getStaticDeclName(*this, D);
llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
LangAS AS = GetGlobalVarAddressSpace(&D);
unsigned TargetAS = getContext().getTargetAddressSpace(AS);
// OpenCL variables in local address space and CUDA shared
// variables cannot have an initializer.
llvm::Constant *Init = nullptr;
if (Ty.getAddressSpace() == LangAS::opencl_local ||
D.hasAttr<CUDASharedAttr>())
Init = llvm::UndefValue::get(LTy);
else
Init = EmitNullConstant(Ty);
llvm::GlobalVariable *GV = new llvm::GlobalVariable(
getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
if (supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
if (D.getTLSKind())
setTLSMode(GV, D);
setGVProperties(GV, &D);
// Make sure the result is of the correct type.
LangAS ExpectedAS = Ty.getAddressSpace();
llvm::Constant *Addr = GV;
if (AS != ExpectedAS) {
Addr = getTargetCodeGenInfo().performAddrSpaceCast(
*this, GV, AS, ExpectedAS,
LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
}
setStaticLocalDeclAddress(&D, Addr);
// Ensure that the static local gets initialized by making sure the parent
// function gets emitted eventually.
const Decl *DC = cast<Decl>(D.getDeclContext());
// We can't name blocks or captured statements directly, so try to emit their
// parents.
if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
DC = DC->getNonClosureContext();
// FIXME: Ensure that global blocks get emitted.
if (!DC)
return Addr;
}
GlobalDecl GD;
if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
GD = GlobalDecl(CD, Ctor_Base);
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
GD = GlobalDecl(DD, Dtor_Base);
else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
GD = GlobalDecl(FD);
else {
// Don't do anything for Obj-C method decls or global closures. We should
// never defer them.
assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
}
if (GD.getDecl()) {
// Disable emission of the parent function for the OpenMP device codegen.
CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
(void)GetAddrOfGlobal(GD);
}
return Addr;
}
/// hasNontrivialDestruction - Determine whether a type's destruction is
/// non-trivial. If so, and the variable uses static initialization, we must
/// register its destructor to run on exit.
static bool hasNontrivialDestruction(QualType T) {
CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
return RD && !RD->hasTrivialDestructor();
}
/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
/// one. Otherwise it just returns GV.
llvm::GlobalVariable *
CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
llvm::GlobalVariable *GV) {
ConstantEmitter emitter(*this);
llvm::Constant *Init = emitter.tryEmitForInitializer(D);
// If constant emission failed, then this should be a C++ static
// initializer.
if (!Init) {
if (!getLangOpts().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
else if (HaveInsertPoint()) {
// Since we have a static initializer, this global variable can't
// be constant.
GV->setConstant(false);
EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
}
return GV;
}
// The initializer may differ in type from the global. Rewrite
// the global to match the initializer. (We have to do this
// because some types, like unions, can't be completely represented
// in the LLVM type system.)
if (GV->getType()->getElementType() != Init->getType()) {
llvm::GlobalVariable *OldGV = GV;
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
OldGV->isConstant(),
OldGV->getLinkage(), Init, "",
/*InsertBefore*/ OldGV,
OldGV->getThreadLocalMode(),
CGM.getContext().getTargetAddressSpace(D.getType()));
GV->setVisibility(OldGV->getVisibility());
GV->setDSOLocal(OldGV->isDSOLocal());
GV->setComdat(OldGV->getComdat());
// Steal the name of the old global
GV->takeName(OldGV);
// Replace all uses of the old global with the new global
llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
OldGV->replaceAllUsesWith(NewPtrForOldDecl);
// Erase the old global, since it is no longer used.
OldGV->eraseFromParent();
}
GV->setConstant(CGM.isTypeConstant(D.getType(), true));
GV->setInitializer(Init);
emitter.finalize(GV);
if (hasNontrivialDestruction(D.getType()) && HaveInsertPoint()) {
// We have a constant initializer, but a nontrivial destructor. We still
// need to perform a guarded "initialization" in order to register the
// destructor.
EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
}
return GV;
}
void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage) {
// Check to see if we already have a global variable for this
// declaration. This can happen when double-emitting function
// bodies, e.g. with complete and base constructors.
llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
CharUnits alignment = getContext().getDeclAlign(&D);
// Store into LocalDeclMap before generating initializer to handle
// circular references.
setAddrOfLocalVar(&D, Address(addr, alignment));
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
// Make sure to evaluate VLA bounds now so that we have them for later.
if (D.getType()->isVariablyModifiedType())
EmitVariablyModifiedType(D.getType());
// Save the type in case adding the initializer forces a type change.
llvm::Type *expectedType = addr->getType();
llvm::GlobalVariable *var =
cast<llvm::GlobalVariable>(addr->stripPointerCasts());
// CUDA's local and local static __shared__ variables should not
// have any non-empty initializers. This is ensured by Sema.
// Whatever initializer such variable may have when it gets here is
// a no-op and should not be emitted.
bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
D.hasAttr<CUDASharedAttr>();
// If this value has an initializer, emit it.
if (D.getInit() && !isCudaSharedVar)
var = AddInitializerToStaticVarDecl(D, var);
var->setAlignment(alignment.getQuantity());
if (D.hasAttr<AnnotateAttr>())
CGM.AddGlobalAnnotations(&D, var);
if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
var->addAttribute("bss-section", SA->getName());
if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
var->addAttribute("data-section", SA->getName());
if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
var->addAttribute("rodata-section", SA->getName());
if (const SectionAttr *SA = D.getAttr<SectionAttr>())
var->setSection(SA->getName());
if (D.hasAttr<UsedAttr>())
CGM.addUsedGlobal(var);
// We may have to cast the constant because of the initializer
// mismatch above.
//
// FIXME: It is really dangerous to store this in the map; if anyone
// RAUW's the GV uses of this constant will be invalid.
llvm::Constant *castedAddr =
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
if (var != castedAddr)
LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
CGM.setStaticLocalDeclAddress(&D, castedAddr);
CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
if (DI &&
CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
DI->EmitGlobalVariable(var, &D);
}
}
namespace {
struct DestroyObject final : EHScopeStack::Cleanup {
DestroyObject(Address addr, QualType type,
CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
: addr(addr), type(type), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
Address addr;
QualType type;
CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Don't use an EH cleanup recursively from an EH cleanup.
bool useEHCleanupForArray =
flags.isForNormalCleanup() && this->useEHCleanupForArray;
CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
}
};
template <class Derived>
struct DestroyNRVOVariable : EHScopeStack::Cleanup {
DestroyNRVOVariable(Address addr, llvm::Value *NRVOFlag)
: NRVOFlag(NRVOFlag), Loc(addr) {}
llvm::Value *NRVOFlag;
Address Loc;
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Along the exceptions path we always execute the dtor.
bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
llvm::BasicBlock *SkipDtorBB = nullptr;
if (NRVO) {
// If we exited via NRVO, we skip the destructor call.
llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
llvm::Value *DidNRVO =
CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
CGF.EmitBlock(RunDtorBB);
}
static_cast<Derived *>(this)->emitDestructorCall(CGF);
if (NRVO) CGF.EmitBlock(SkipDtorBB);
}
virtual ~DestroyNRVOVariable() = default;
};
struct DestroyNRVOVariableCXX final
: DestroyNRVOVariable<DestroyNRVOVariableCXX> {
DestroyNRVOVariableCXX(Address addr, const CXXDestructorDecl *Dtor,
llvm::Value *NRVOFlag)
: DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, NRVOFlag),
Dtor(Dtor) {}
const CXXDestructorDecl *Dtor;
void emitDestructorCall(CodeGenFunction &CGF) {
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false,
/*Delegating=*/false, Loc);
}
};
struct DestroyNRVOVariableC final
: DestroyNRVOVariable<DestroyNRVOVariableC> {
DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
: DestroyNRVOVariable<DestroyNRVOVariableC>(addr, NRVOFlag), Ty(Ty) {}
QualType Ty;
void emitDestructorCall(CodeGenFunction &CGF) {
CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
}
};
struct CallStackRestore final : EHScopeStack::Cleanup {
Address Stack;
CallStackRestore(Address Stack) : Stack(Stack) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *V = CGF.Builder.CreateLoad(Stack);
llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
CGF.Builder.CreateCall(F, V);
}
};
struct ExtendGCLifetime final : EHScopeStack::Cleanup {
const VarDecl &Var;
ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Compute the address of the local variable, in case it's a
// byref or something.
DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
Var.getType(), VK_LValue, SourceLocation());
llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
SourceLocation());
CGF.EmitExtendGCLifetime(value);
}
};
struct CallCleanupFunction final : EHScopeStack::Cleanup {
llvm::Constant *CleanupFn;
const CGFunctionInfo &FnInfo;
const VarDecl &Var;
CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
const VarDecl *Var)
: CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
Var.getType(), VK_LValue, SourceLocation());
// Compute the address of the local variable, in case it's a byref
// or something.
llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer();
// In some cases, the type of the function argument will be different from
// the type of the pointer. An example of this is
// void f(void* arg);
// __attribute__((cleanup(f))) void *g;
//
// To fix this we insert a bitcast here.
QualType ArgTy = FnInfo.arg_begin()->type;
llvm::Value *Arg =
CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
CallArgList Args;
Args.add(RValue::get(Arg),
CGF.getContext().getPointerType(Var.getType()));
auto Callee = CGCallee::forDirect(CleanupFn);
CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
}
};
} // end anonymous namespace
/// EmitAutoVarWithLifetime - Does the setup required for an automatic
/// variable with lifetime.
static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
Address addr,
Qualifiers::ObjCLifetime lifetime) {
switch (lifetime) {
case Qualifiers::OCL_None:
llvm_unreachable("present but none");
case Qualifiers::OCL_ExplicitNone:
// nothing to do
break;
case Qualifiers::OCL_Strong: {
CodeGenFunction::Destroyer *destroyer =
(var.hasAttr<ObjCPreciseLifetimeAttr>()
? CodeGenFunction::destroyARCStrongPrecise
: CodeGenFunction::destroyARCStrongImprecise);
CleanupKind cleanupKind = CGF.getARCCleanupKind();
CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
cleanupKind & EHCleanup);
break;
}
case Qualifiers::OCL_Autoreleasing:
// nothing to do
break;
case Qualifiers::OCL_Weak:
// __weak objects always get EH cleanups; otherwise, exceptions
// could cause really nasty crashes instead of mere leaks.
CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
CodeGenFunction::destroyARCWeak,
/*useEHCleanup*/ true);
break;
}
}
static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
if (const Expr *e = dyn_cast<Expr>(s)) {
// Skip the most common kinds of expressions that make
// hierarchy-walking expensive.
s = e = e->IgnoreParenCasts();
if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
return (ref->getDecl() == &var);
if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
const BlockDecl *block = be->getBlockDecl();
for (const auto &I : block->captures()) {
if (I.getVariable() == &var)
return true;
}
}
}
for (const Stmt *SubStmt : s->children())
// SubStmt might be null; as in missing decl or conditional of an if-stmt.
if (SubStmt && isAccessedBy(var, SubStmt))
return true;
return false;
}
static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
if (!decl) return false;
if (!isa<VarDecl>(decl)) return false;
const VarDecl *var = cast<VarDecl>(decl);
return isAccessedBy(*var, e);
}
static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
const LValue &destLV, const Expr *init) {
bool needsCast = false;
while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
switch (castExpr->getCastKind()) {
// Look through casts that don't require representation changes.
case CK_NoOp:
case CK_BitCast:
case CK_BlockPointerToObjCPointerCast:
needsCast = true;
break;
// If we find an l-value to r-value cast from a __weak variable,
// emit this operation as a copy or move.
case CK_LValueToRValue: {
const Expr *srcExpr = castExpr->getSubExpr();
if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
return false;
// Emit the source l-value.
LValue srcLV = CGF.EmitLValue(srcExpr);
// Handle a formal type change to avoid asserting.
auto srcAddr = srcLV.getAddress();
if (needsCast) {
srcAddr = CGF.Builder.CreateElementBitCast(srcAddr,
destLV.getAddress().getElementType());
}
// If it was an l-value, use objc_copyWeak.
if (srcExpr->getValueKind() == VK_LValue) {
CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
} else {
assert(srcExpr->getValueKind() == VK_XValue);
CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
}
return true;
}
// Stop at anything else.
default:
return false;
}
init = castExpr->getSubExpr();
}
return false;
}
static void drillIntoBlockVariable(CodeGenFunction &CGF,
LValue &lvalue,
const VarDecl *var) {
lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
}
void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
SourceLocation Loc) {
if (!SanOpts.has(SanitizerKind::NullabilityAssign))
return;
auto Nullability = LHS.getType()->getNullability(getContext());
if (!Nullability || *Nullability != NullabilityKind::NonNull)
return;
// Check if the right hand side of the assignment is nonnull, if the left
// hand side must be nonnull.
SanitizerScope SanScope(this);
llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
SanitizerHandler::TypeMismatch, StaticData, RHS);
}
void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
LValue lvalue, bool capturedByInit) {
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
if (!lifetime) {
llvm::Value *value = EmitScalarExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
EmitNullabilityCheck(lvalue, value, init->getExprLoc());
EmitStoreThroughLValue(RValue::get(value), lvalue, true);
return;
}
if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
init = DIE->getExpr();
// If we're emitting a value with lifetime, we have to do the
// initialization *before* we leave the cleanup scopes.
if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
enterFullExpression(ewc);
init = ewc->getSubExpr();
}
CodeGenFunction::RunCleanupsScope Scope(*this);
// We have to maintain the illusion that the variable is
// zero-initialized. If the variable might be accessed in its
// initializer, zero-initialize before running the initializer, then
// actually perform the initialization with an assign.
bool accessedByInit = false;
if (lifetime != Qualifiers::OCL_ExplicitNone)
accessedByInit = (capturedByInit || isAccessedBy(D, init));
if (accessedByInit) {
LValue tempLV = lvalue;
// Drill down to the __block object if necessary.
if (capturedByInit) {
// We can use a simple GEP for this because it can't have been
// moved yet.
tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
cast<VarDecl>(D),
/*follow*/ false));
}
auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
// If __weak, we want to use a barrier under certain conditions.
if (lifetime == Qualifiers::OCL_Weak)
EmitARCInitWeak(tempLV.getAddress(), zero);
// Otherwise just do a simple store.
else
EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
}
// Emit the initializer.
llvm::Value *value = nullptr;
switch (lifetime) {
case Qualifiers::OCL_None:
llvm_unreachable("present but none");
case Qualifiers::OCL_ExplicitNone:
value = EmitARCUnsafeUnretainedScalarExpr(init);
break;
case Qualifiers::OCL_Strong: {
value = EmitARCRetainScalarExpr(init);
break;
}
case Qualifiers::OCL_Weak: {
// If it's not accessed by the initializer, try to emit the
// initialization with a copy or move.
if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
return;
}
// No way to optimize a producing initializer into this. It's not
// worth optimizing for, because the value will immediately
// disappear in the common case.
value = EmitScalarExpr(init);
if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
if (accessedByInit)
EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
else
EmitARCInitWeak(lvalue.getAddress(), value);
return;
}
case Qualifiers::OCL_Autoreleasing:
value = EmitARCRetainAutoreleaseScalarExpr(init);
break;
}
if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
EmitNullabilityCheck(lvalue, value, init->getExprLoc());
// If the variable might have been accessed by its initializer, we
// might have to initialize with a barrier. We have to do this for
// both __weak and __strong, but __weak got filtered out above.
if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
EmitARCRelease(oldValue, ARCImpreciseLifetime);
return;
}
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
}
/// Decide whether we can emit the non-zero parts of the specified initializer
/// with equal or fewer than NumStores scalar stores.
static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
unsigned &NumStores) {
// Zero and Undef never requires any extra stores.
if (isa<llvm::ConstantAggregateZero>(Init) ||
isa<llvm::ConstantPointerNull>(Init) ||
isa<llvm::UndefValue>(Init))
return true;
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init))
return Init->isNullValue() || NumStores--;
// See if we can emit each element.
if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
return false;
}
return true;
}
if (llvm::ConstantDataSequential *CDS =
dyn_cast<llvm::ConstantDataSequential>(Init)) {
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
llvm::Constant *Elt = CDS->getElementAsConstant(i);
if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
return false;
}
return true;
}
// Anything else is hard and scary.
return false;
}
/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
/// the scalar stores that would be required.
static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
llvm::Constant *Init, Address Loc,
bool isVolatile, CGBuilderTy &Builder) {
assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
"called emitStoresForInitAfterBZero for zero or undef value.");
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init)) {
Builder.CreateStore(Init, Loc, isVolatile);
return;
}
if (llvm::ConstantDataSequential *CDS =
dyn_cast<llvm::ConstantDataSequential>(Init)) {
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
llvm::Constant *Elt = CDS->getElementAsConstant(i);
// If necessary, get a pointer to the element and emit it.
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
emitStoresForInitAfterBZero(
CGM, Elt,
Builder.CreateConstInBoundsGEP2_32(Loc, 0, i, CGM.getDataLayout()),
isVolatile, Builder);
}
return;
}
assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
"Unknown value type!");
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
// If necessary, get a pointer to the element and emit it.
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
emitStoresForInitAfterBZero(
CGM, Elt,
Builder.CreateConstInBoundsGEP2_32(Loc, 0, i, CGM.getDataLayout()),
isVolatile, Builder);
}
}
/// Decide whether we should use bzero plus some stores to initialize a local
/// variable instead of using a memcpy from a constant global. It is beneficial
/// to use bzero if the global is all zeros, or mostly zeros and large.
static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
uint64_t GlobalSize) {
// If a global is all zeros, always use a bzero.
if (isa<llvm::ConstantAggregateZero>(Init)) return true;
// If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
// do it if it will require 6 or fewer scalar stores.
// TODO: Should budget depends on the size? Avoiding a large global warrants
// plopping in more stores.
unsigned StoreBudget = 6;
uint64_t SizeLimit = 32;
return GlobalSize > SizeLimit &&
canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
}
/// Decide whether we should use memset to initialize a local variable instead
/// of using a memcpy from a constant global. Assumes we've already decided to
/// not user bzero.
/// FIXME We could be more clever, as we are for bzero above, and generate
/// memset followed by stores. It's unclear that's worth the effort.
static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
uint64_t GlobalSize) {
uint64_t SizeLimit = 32;
if (GlobalSize <= SizeLimit)
return nullptr;
return llvm::isBytewiseValue(Init);
}
static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
Address Loc, bool isVolatile,
CGBuilderTy &Builder,
llvm::Constant *constant) {
auto *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
auto *IntPtrTy = CGM.getDataLayout().getIntPtrType(CGM.getLLVMContext());
// If the initializer is all or mostly the same, codegen with bzero / memset
// then do a few stores afterward.
uint64_t ConstantSize =
CGM.getDataLayout().getTypeAllocSize(constant->getType());
auto *SizeVal = llvm::ConstantInt::get(IntPtrTy, ConstantSize);
if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
isVolatile);
bool valueAlreadyCorrect =
constant->isNullValue() || isa<llvm::UndefValue>(constant);
if (!valueAlreadyCorrect) {
Loc = Builder.CreateBitCast(
Loc, constant->getType()->getPointerTo(Loc.getAddressSpace()));
emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
}
return;
}
llvm::Value *Pattern = shouldUseMemSetToInitialize(constant, ConstantSize);
if (Pattern) {
uint64_t Value = 0x00;
if (!isa<llvm::UndefValue>(Pattern)) {
const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
assert(AP.getBitWidth() <= 8);
Value = AP.getLimitedValue();
}
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, Value), SizeVal,
isVolatile);
return;
}
// Otherwise, create a temporary global with the initializer then memcpy from
// the global to the alloca.
std::string Name = getStaticDeclName(CGM, D);
unsigned AS = CGM.getContext().getTargetAddressSpace(
CGM.getStringLiteralAddressSpace());
llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(), AS);
llvm::GlobalVariable *GV = new llvm::GlobalVariable(
CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage, constant, Name, nullptr,
llvm::GlobalValue::NotThreadLocal, AS);
GV->setAlignment(Loc.getAlignment().getQuantity());
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
Address SrcPtr = Address(GV, Loc.getAlignment());
if (SrcPtr.getType() != BP)
SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, isVolatile);
}
/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
AutoVarEmission emission = EmitAutoVarAlloca(D);
EmitAutoVarInit(emission);
EmitAutoVarCleanups(emission);
}
/// Emit a lifetime.begin marker if some criteria are satisfied.
/// \return a pointer to the temporary size Value if a marker was emitted, null
/// otherwise
llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
llvm::Value *Addr) {
if (!ShouldEmitLifetimeMarkers)
return nullptr;
assert(Addr->getType()->getPointerAddressSpace() ==
CGM.getDataLayout().getAllocaAddrSpace() &&
"Pointer should be in alloca address space");
llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
llvm::CallInst *C =
Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
C->setDoesNotThrow();
return SizeV;
}
void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
assert(Addr->getType()->getPointerAddressSpace() ==
CGM.getDataLayout().getAllocaAddrSpace() &&
"Pointer should be in alloca address space");
Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
llvm::CallInst *C =
Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
C->setDoesNotThrow();
}
void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
// For each dimension stores its QualType and corresponding
// size-expression Value.
SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
// Break down the array into individual dimensions.
QualType Type1D = D.getType();
while (getContext().getAsVariableArrayType(Type1D)) {
auto VlaSize = getVLAElements1D(Type1D);
if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
else {
auto SizeExprAddr = CreateDefaultAlignTempAlloca(
VlaSize.NumElts->getType(), "__vla_expr");
Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
Dimensions.emplace_back(SizeExprAddr.getPointer(),
Type1D.getUnqualifiedType());
}
Type1D = VlaSize.Type;
}
if (!EmitDebugInfo)
return;
// Register each dimension's size-expression with a DILocalVariable,
// so that it can be used by CGDebugInfo when instantiating a DISubrange
// to describe this array.
for (auto &VlaSize : Dimensions) {
llvm::Metadata *MD;
if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
MD = llvm::ConstantAsMetadata::get(C);
else {
// Create an artificial VarDecl to generate debug info for.
IdentifierInfo &NameIdent = getContext().Idents.getOwn(
cast<llvm::AllocaInst>(VlaSize.NumElts)->getName());
auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
auto QT = getContext().getIntTypeForBitwidth(
VlaExprTy->getScalarSizeInBits(), false);
auto *ArtificialDecl = VarDecl::Create(
getContext(), const_cast<DeclContext *>(D.getDeclContext()),
D.getLocation(), D.getLocation(), &NameIdent, QT,
getContext().CreateTypeSourceInfo(QT), SC_Auto);
ArtificialDecl->setImplicit();
MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
Builder);
}
assert(MD && "No Size expression debug node created");
DI->registerVLASizeExpression(VlaSize.Type, MD);
}
}
/// EmitAutoVarAlloca - Emit the alloca and debug information for a
/// local variable. Does not emit initialization or destruction.
CodeGenFunction::AutoVarEmission
CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
QualType Ty = D.getType();
assert(
Ty.getAddressSpace() == Target.getStackAddressSpace(getLangOpts()) ||
(Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
AutoVarEmission emission(D);
bool isEscapingByRef = D.isEscapingByref();
emission.IsEscapingByRef = isEscapingByRef;
CharUnits alignment = getContext().getDeclAlign(&D);
// If the type is variably-modified, emit all the VLA sizes for it.
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
auto *DI = getDebugInfo();
bool EmitDebugInfo = DI && CGM.getCodeGenOpts().getDebugInfo() >=
codegenoptions::LimitedDebugInfo;
Address address = Address::invalid();
Address AllocaAddr = Address::invalid();
if (Ty->isConstantSizeType()) {
bool NRVO = getLangOpts().ElideConstructors &&
D.isNRVOVariable();
// If this value is an array or struct with a statically determinable
// constant initializer, there are optimizations we can do.
//
// TODO: We should constant-evaluate the initializer of any variable,
// as long as it is initialized by a constant expression. Currently,
// isConstantInitializer produces wrong answers for structs with
// reference or bitfield members, and a few other cases, and checking
// for POD-ness protects us from some of these.
if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
(D.isConstexpr() ||
((Ty.isPODType(getContext()) ||
getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
D.getInit()->isConstantInitializer(getContext(), false)))) {
// If the variable's a const type, and it's neither an NRVO
// candidate nor a __block variable and has no mutable members,
// emit it as a global instead.
// Exception is if a variable is located in non-constant address space
// in OpenCL.
if ((!getLangOpts().OpenCL ||
Ty.getAddressSpace() == LangAS::opencl_constant) &&
(CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
!isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
// Signal this condition to later callbacks.
emission.Addr = Address::invalid();
assert(emission.wasEmittedAsGlobal());
return emission;
}
// Otherwise, tell the initialization code that we're in this case.
emission.IsConstantAggregate = true;
}
// A normal fixed sized variable becomes an alloca in the entry block,
// unless:
// - it's an NRVO variable.
// - we are compiling OpenMP and it's an OpenMP local variable.
Address OpenMPLocalAddr =
getLangOpts().OpenMP
? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
: Address::invalid();
if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
address = OpenMPLocalAddr;
} else if (NRVO) {
// The named return value optimization: allocate this variable in the
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
address = ReturnValue;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
const auto *RD = RecordTy->getDecl();
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
RD->isNonTrivialToPrimitiveDestroy()) {
// Create a flag that is used to indicate when the NRVO was applied
// to this variable. Set it to zero to indicate that NRVO was not
// applied.
llvm::Value *Zero = Builder.getFalse();
Address NRVOFlag =
CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
EnsureInsertPoint();
Builder.CreateStore(Zero, NRVOFlag);
// Record the NRVO flag for this variable.
NRVOFlags[&D] = NRVOFlag.getPointer();
emission.NRVOFlag = NRVOFlag.getPointer();
}
}
} else {
CharUnits allocaAlignment;
llvm::Type *allocaTy;
if (isEscapingByRef) {
auto &byrefInfo = getBlockByrefInfo(&D);
allocaTy = byrefInfo.Type;
allocaAlignment = byrefInfo.ByrefAlignment;
} else {
allocaTy = ConvertTypeForMem(Ty);
allocaAlignment = alignment;
}
// Create the alloca. Note that we set the name separately from
// building the instruction so that it's there even in no-asserts
// builds.
address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
/*ArraySize=*/nullptr, &AllocaAddr);
// Don't emit lifetime markers for MSVC catch parameters. The lifetime of
// the catch parameter starts in the catchpad instruction, and we can't
// insert code in those basic blocks.
bool IsMSCatchParam =
D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
// Emit a lifetime intrinsic if meaningful. There's no point in doing this
// if we don't have a valid insertion point (?).
if (HaveInsertPoint() && !IsMSCatchParam) {
// If there's a jump into the lifetime of this variable, its lifetime
// gets broken up into several regions in IR, which requires more work
// to handle correctly. For now, just omit the intrinsics; this is a
// rare case, and it's better to just be conservatively correct.
// PR28267.
//
// We have to do this in all language modes if there's a jump past the
// declaration. We also have to do it in C if there's a jump to an
// earlier point in the current block because non-VLA lifetimes begin as
// soon as the containing block is entered, not when its variables
// actually come into scope; suppressing the lifetime annotations
// completely in this case is unnecessarily pessimistic, but again, this
// is rare.
if (!Bypasses.IsBypassed(&D) &&
!(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
emission.SizeForLifetimeMarkers =
EmitLifetimeStart(size, AllocaAddr.getPointer());
}
} else {
assert(!emission.useLifetimeMarkers());
}
}
} else {
EnsureInsertPoint();
if (!DidCallStackSave) {
// Save the stack.
Address Stack =
CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
Builder.CreateStore(V, Stack);
DidCallStackSave = true;
// Push a cleanup block and restore the stack there.
// FIXME: in general circumstances, this should be an EH cleanup.
pushStackRestore(NormalCleanup, Stack);
}
auto VlaSize = getVLASize(Ty);
llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
// Allocate memory for the array.
address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
&AllocaAddr);
// If we have debug info enabled, properly describe the VLA dimensions for
// this type by registering the vla size expression for each of the
// dimensions.
EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
}
setAddrOfLocalVar(&D, address);
emission.Addr = address;
emission.AllocaAddr = AllocaAddr;
// Emit debug info for local var declaration.
if (EmitDebugInfo && HaveInsertPoint()) {
DI->setLocation(D.getLocation());
(void)DI->EmitDeclareOfAutoVariable(&D, address.getPointer(), Builder);
}
if (D.hasAttr<AnnotateAttr>())
EmitVarAnnotations(&D, address.getPointer());
// Make sure we call @llvm.lifetime.end.
if (emission.useLifetimeMarkers())
EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
emission.getOriginalAllocatedAddress(),
emission.getSizeForLifetimeMarkers());
return emission;
}
static bool isCapturedBy(const VarDecl &, const Expr *);
/// Determines whether the given __block variable is potentially
/// captured by the given statement.
static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
if (const Expr *E = dyn_cast<Expr>(S))
return isCapturedBy(Var, E);
for (const Stmt *SubStmt : S->children())
if (isCapturedBy(Var, SubStmt))
return true;
return false;
}
/// Determines whether the given __block variable is potentially
/// captured by the given expression.
static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
// Skip the most common kinds of expressions that make
// hierarchy-walking expensive.
E = E->IgnoreParenCasts();
if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
const BlockDecl *Block = BE->getBlockDecl();
for (const auto &I : Block->captures()) {
if (I.getVariable() == &Var)
return true;
}
// No need to walk into the subexpressions.
return false;
}
if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
const CompoundStmt *CS = SE->getSubStmt();
for (const auto *BI : CS->body())
if (const auto *BIE = dyn_cast<Expr>(BI)) {
if (isCapturedBy(Var, BIE))
return true;
}
else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
// special case declarations
for (const auto *I : DS->decls()) {
if (const auto *VD = dyn_cast<VarDecl>((I))) {
const Expr *Init = VD->getInit();
if (Init && isCapturedBy(Var, Init))
return true;
}
}
}
else
// FIXME. Make safe assumption assuming arbitrary statements cause capturing.
// Later, provide code to poke into statements for capture analysis.
return true;
return false;
}
for (const Stmt *SubStmt : E->children())
if (isCapturedBy(Var, SubStmt))
return true;
return false;
}
/// Determine whether the given initializer is trivial in the sense
/// that it requires no code to be generated.
bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
if (!Init)
return true;
if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
if (CXXConstructorDecl *Constructor = Construct->getConstructor())
if (Constructor->isTrivial() &&
Constructor->isDefaultConstructor() &&
!Construct->requiresZeroInitialization())
return true;
return false;
}
void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
assert(emission.Variable && "emission was not valid!");
// If this was emitted as a global constant, we're done.
if (emission.wasEmittedAsGlobal()) return;
const VarDecl &D = *emission.Variable;
auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
QualType type = D.getType();
// If this local has an initializer, emit it now.
const Expr *Init = D.getInit();
// If we are at an unreachable point, we don't need to emit the initializer
// unless it contains a label.
if (!HaveInsertPoint()) {
if (!Init || !ContainsLabel(Init)) return;
EnsureInsertPoint();
}
// Initialize the structure of a __block variable.
if (emission.IsEscapingByRef)
emitByrefStructureInit(emission);
// Initialize the variable here if it doesn't have a initializer and it is a
// C struct that is non-trivial to initialize or an array containing such a
// struct.
if (!Init &&
type.isNonTrivialToPrimitiveDefaultInitialize() ==
QualType::PDIK_Struct) {
LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
if (emission.IsEscapingByRef)
drillIntoBlockVariable(*this, Dst, &D);
defaultInitNonTrivialCStructVar(Dst);
return;
}
if (isTrivialInitializer(Init))
return;
// Check whether this is a byref variable that's potentially
// captured and moved by its own initializer. If so, we'll need to
// emit the initializer first, then copy into the variable.
bool capturedByInit = emission.IsEscapingByRef && isCapturedBy(D, Init);
Address Loc =
capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
llvm::Constant *constant = nullptr;
if (emission.IsConstantAggregate || D.isConstexpr()) {
assert(!capturedByInit && "constant init contains a capturing block?");
constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
}
if (!constant) {
LValue lv = MakeAddrLValue(Loc, type);
lv.setNonGC(true);
return EmitExprAsInit(Init, &D, lv, capturedByInit);
}
if (!emission.IsConstantAggregate) {
// For simple scalar/complex initialization, store the value directly.
LValue lv = MakeAddrLValue(Loc, type);
lv.setNonGC(true);
return EmitStoreThroughLValue(RValue::get(constant), lv, true);
}
// If this is a simple aggregate initialization, we can optimize it
// in various ways.
bool isVolatile = type.isVolatileQualified();
llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
if (Loc.getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP);
emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
}
/// Emit an expression as an initializer for an object (variable, field, etc.)
/// at the given location. The expression is not necessarily the normal
/// initializer for the object, and the address is not necessarily
/// its normal location.
///
/// \param init the initializing expression
/// \param D the object to act as if we're initializing
/// \param loc the address to initialize; its type is a pointer
/// to the LLVM mapping of the object's type
/// \param alignment the alignment of the address
/// \param capturedByInit true if \p D is a __block variable
/// whose address is potentially changed by the initializer
void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
LValue lvalue, bool capturedByInit) {
QualType type = D->getType();
if (type->isReferenceType()) {
RValue rvalue = EmitReferenceBindingToExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
EmitStoreThroughLValue(rvalue, lvalue, true);
return;
}
switch (getEvaluationKind(type)) {
case TEK_Scalar:
EmitScalarInit(init, D, lvalue, capturedByInit);
return;
case TEK_Complex: {
ComplexPairTy complex = EmitComplexExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
EmitStoreOfComplex(complex, lvalue, /*init*/ true);
return;
}
case TEK_Aggregate:
if (type->isAtomicType()) {
EmitAtomicInit(const_cast<Expr*>(init), lvalue);
} else {
AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
if (isa<VarDecl>(D))
Overlap = AggValueSlot::DoesNotOverlap;
else if (auto *FD = dyn_cast<FieldDecl>(D))
Overlap = overlapForFieldInit(FD);
// TODO: how can we delay here if D is captured by its initializer?
EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
Overlap));
}
return;
}
llvm_unreachable("bad evaluation kind");
}
/// Enter a destroy cleanup for the given local variable.
void CodeGenFunction::emitAutoVarTypeCleanup(
const CodeGenFunction::AutoVarEmission &emission,
QualType::DestructionKind dtorKind) {
assert(dtorKind != QualType::DK_none);
// Note that for __block variables, we want to destroy the
// original stack object, not the possibly forwarded object.
Address addr = emission.getObjectAddress(*this);
const VarDecl *var = emission.Variable;
QualType type = var->getType();
CleanupKind cleanupKind = NormalAndEHCleanup;
CodeGenFunction::Destroyer *destroyer = nullptr;
switch (dtorKind) {
case QualType::DK_none:
llvm_unreachable("no cleanup for trivially-destructible variable");
case QualType::DK_cxx_destructor:
// If there's an NRVO flag on the emission, we need a different
// cleanup.
if (emission.NRVOFlag) {
assert(!type->isArrayType());
CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, dtor,
emission.NRVOFlag);
return;
}
break;
case QualType::DK_objc_strong_lifetime:
// Suppress cleanups for pseudo-strong variables.
if (var->isARCPseudoStrong()) return;
// Otherwise, consider whether to use an EH cleanup or not.
cleanupKind = getARCCleanupKind();
// Use the imprecise destroyer by default.
if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
destroyer = CodeGenFunction::destroyARCStrongImprecise;
break;
case QualType::DK_objc_weak_lifetime:
break;
case QualType::DK_nontrivial_c_struct:
destroyer = CodeGenFunction::destroyNonTrivialCStruct;
if (emission.NRVOFlag) {
assert(!type->isArrayType());
EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
emission.NRVOFlag, type);
return;
}
break;
}
// If we haven't chosen a more specific destroyer, use the default.
if (!destroyer) destroyer = getDestroyer(dtorKind);
// Use an EH cleanup in array destructors iff the destructor itself
// is being pushed as an EH cleanup.
bool useEHCleanup = (cleanupKind & EHCleanup);
EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
useEHCleanup);
}
void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
assert(emission.Variable && "emission was not valid!");
// If this was emitted as a global constant, we're done.
if (emission.wasEmittedAsGlobal()) return;
// If we don't have an insertion point, we're done. Sema prevents
// us from jumping into any of these scopes anyway.
if (!HaveInsertPoint()) return;
const VarDecl &D = *emission.Variable;
// Check the type for a cleanup.
if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
emitAutoVarTypeCleanup(emission, dtorKind);
// In GC mode, honor objc_precise_lifetime.
if (getLangOpts().getGC() != LangOptions::NonGC &&
D.hasAttr<ObjCPreciseLifetimeAttr>()) {
EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
}
// Handle the cleanup attribute.
if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
const FunctionDecl *FD = CA->getFunctionDecl();
llvm::Constant *F = CGM.GetAddrOfFunction(FD);
assert(F && "Could not find function!");
const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
}
// If this is a block variable, call _Block_object_destroy
// (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
// mode.
if (emission.IsEscapingByRef &&
CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
if (emission.Variable->getType().isObjCGCWeak())
Flags |= BLOCK_FIELD_IS_WEAK;
enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
/*LoadBlockVarAddr*/ false,
cxxDestructorCanThrow(emission.Variable->getType()));
}
}
CodeGenFunction::Destroyer *
CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
switch (kind) {
case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
case QualType::DK_cxx_destructor:
return destroyCXXObject;
case QualType::DK_objc_strong_lifetime:
return destroyARCStrongPrecise;
case QualType::DK_objc_weak_lifetime:
return destroyARCWeak;
case QualType::DK_nontrivial_c_struct:
return destroyNonTrivialCStruct;
}
llvm_unreachable("Unknown DestructionKind");
}
/// pushEHDestroy - Push the standard destructor for the given type as
/// an EH-only cleanup.
void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
Address addr, QualType type) {
assert(dtorKind && "cannot push destructor for trivial type");
assert(needsEHCleanup(dtorKind));
pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
}
/// pushDestroy - Push the standard destructor for the given type as
/// at least a normal cleanup.
void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
Address addr, QualType type) {
assert(dtorKind && "cannot push destructor for trivial type");
CleanupKind cleanupKind = getCleanupKind(dtorKind);
pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
cleanupKind & EHCleanup);
}
void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
QualType type, Destroyer *destroyer,
bool useEHCleanupForArray) {
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
destroyer, useEHCleanupForArray);
}
void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
}
void CodeGenFunction::pushLifetimeExtendedDestroy(
CleanupKind cleanupKind, Address addr, QualType type,
Destroyer *destroyer, bool useEHCleanupForArray) {
// Push an EH-only cleanup for the object now.
// FIXME: When popping normal cleanups, we need to keep this EH cleanup
// around in case a temporary's destructor throws an exception.
if (cleanupKind & EHCleanup)
EHStack.pushCleanup<DestroyObject>(
static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
destroyer, useEHCleanupForArray);
// Remember that we need to push a full cleanup for the object at the
// end of the full-expression.
pushCleanupAfterFullExpr<DestroyObject>(
cleanupKind, addr, type, destroyer, useEHCleanupForArray);
}
/// emitDestroy - Immediately perform the destruction of the given
/// object.
///
/// \param addr - the address of the object; a type*
/// \param type - the type of the object; if an array type, all
/// objects are destroyed in reverse order
/// \param destroyer - the function to call to destroy individual
/// elements
/// \param useEHCleanupForArray - whether an EH cleanup should be
/// used when destroying array elements, in case one of the
/// destructions throws an exception
void CodeGenFunction::emitDestroy(Address addr, QualType type,
Destroyer *destroyer,
bool useEHCleanupForArray) {
const ArrayType *arrayType = getContext().getAsArrayType(type);
if (!arrayType)
return destroyer(*this, addr, type);
llvm::Value *length = emitArrayLength(arrayType, type, addr);
CharUnits elementAlign =
addr.getAlignment()
.alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
// Normally we have to check whether the array is zero-length.
bool checkZeroLength = true;
// But if the array length is constant, we can suppress that.
if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
// ...and if it's constant zero, we can just skip the entire thing.
if (constLength->isZero()) return;
checkZeroLength = false;
}
llvm::Value *begin = addr.getPointer();
llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
emitArrayDestroy(begin, end, type, elementAlign, destroyer,
checkZeroLength, useEHCleanupForArray);
}
/// emitArrayDestroy - Destroys all the elements of the given array,
/// beginning from last to first. The array cannot be zero-length.
///
/// \param begin - a type* denoting the first element of the array
/// \param end - a type* denoting one past the end of the array
/// \param elementType - the element type of the array
/// \param destroyer - the function to call to destroy elements
/// \param useEHCleanup - whether to push an EH cleanup to destroy
/// the remaining elements in case the destruction of a single
/// element throws
void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
llvm::Value *end,
QualType elementType,
CharUnits elementAlign,
Destroyer *destroyer,
bool checkZeroLength,
bool useEHCleanup) {
assert(!elementType->isArrayType());
// The basic structure here is a do-while loop, because we don't
// need to check for the zero-element case.
llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
if (checkZeroLength) {
llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
"arraydestroy.isempty");
Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
}
// Enter the loop body, making that address the current address.
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
EmitBlock(bodyBB);
llvm::PHINode *elementPast =
Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
elementPast->addIncoming(end, entryBB);
// Shift the address back by one element.
llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
"arraydestroy.element");
if (useEHCleanup)
pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
destroyer);
// Perform the actual destruction there.
destroyer(*this, Address(element, elementAlign), elementType);
if (useEHCleanup)
PopCleanupBlock();
// Check whether we've reached the end.
llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
Builder.CreateCondBr(done, doneBB, bodyBB);
elementPast->addIncoming(element, Builder.GetInsertBlock());
// Done.
EmitBlock(doneBB);
}
/// Perform partial array destruction as if in an EH cleanup. Unlike
/// emitArrayDestroy, the element type here may still be an array type.
static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *begin, llvm::Value *end,
QualType type, CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer) {
// If the element type is itself an array, drill down.
unsigned arrayDepth = 0;
while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
// VLAs don't require a GEP index to walk into.
if (!isa<VariableArrayType>(arrayType))
arrayDepth++;
type = arrayType->getElementType();
}
if (arrayDepth) {
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
}
// Destroy the array. We don't ever need an EH cleanup because we
// assume that we're in an EH cleanup ourselves, so a throwing
// destructor causes an immediate terminate.
CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
/*checkZeroLength*/ true, /*useEHCleanup*/ false);
}
namespace {
/// RegularPartialArrayDestroy - a cleanup which performs a partial
/// array destroy where the end pointer is regularly determined and
/// does not need to be loaded from a local.
class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
llvm::Value *ArrayBegin;
llvm::Value *ArrayEnd;
QualType ElementType;
CodeGenFunction::Destroyer *Destroyer;
CharUnits ElementAlign;
public:
RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
QualType elementType, CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
ElementType(elementType), Destroyer(destroyer),
ElementAlign(elementAlign) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
ElementType, ElementAlign, Destroyer);
}
};
/// IrregularPartialArrayDestroy - a cleanup which performs a
/// partial array destroy where the end pointer is irregularly
/// determined and must be loaded from a local.
class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
llvm::Value *ArrayBegin;
Address ArrayEndPointer;
QualType ElementType;
CodeGenFunction::Destroyer *Destroyer;
CharUnits ElementAlign;
public:
IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
Address arrayEndPointer,
QualType elementType,
CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
ElementType(elementType), Destroyer(destroyer),
ElementAlign(elementAlign) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
ElementType, ElementAlign, Destroyer);
}
};
} // end anonymous namespace
/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
/// already-constructed elements of the given array. The cleanup
/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
///
/// \param elementType - the immediate element type of the array;
/// possibly still an array type
void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
Address arrayEndPointer,
QualType elementType,
CharUnits elementAlign,
Destroyer *destroyer) {
pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEndPointer,
elementType, elementAlign,
destroyer);
}
/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
/// already-constructed elements of the given array. The cleanup
/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
///
/// \param elementType - the immediate element type of the array;
/// possibly still an array type
void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
CharUnits elementAlign,
Destroyer *destroyer) {
pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEnd,
elementType, elementAlign,
destroyer);
}
/// Lazily declare the @llvm.lifetime.start intrinsic.
llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() {
if (LifetimeStartFn)
return LifetimeStartFn;
LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
return LifetimeStartFn;
}
/// Lazily declare the @llvm.lifetime.end intrinsic.
llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() {
if (LifetimeEndFn)
return LifetimeEndFn;
LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
return LifetimeEndFn;
}
namespace {
/// A cleanup to perform a release of an object at the end of a
/// function. This is used to balance out the incoming +1 of a
/// ns_consumed argument when we can't reasonably do that just by
/// not doing the initial retain for a __block argument.
struct ConsumeARCParameter final : EHScopeStack::Cleanup {
ConsumeARCParameter(llvm::Value *param,
ARCPreciseLifetime_t precise)
: Param(param), Precise(precise) {}
llvm::Value *Param;
ARCPreciseLifetime_t Precise;
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitARCRelease(Param, Precise);
}
};
} // end anonymous namespace
/// Emit an alloca (or GlobalValue depending on target)
/// for the specified parameter and set up LocalDeclMap.
void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
unsigned ArgNo) {
// FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
Arg.getAnyValue()->setName(D.getName());
QualType Ty = D.getType();
// Use better IR generation for certain implicit parameters.
if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
// The only implicit argument a block has is its literal.
// This may be passed as an inalloca'ed value on Windows x86.
if (BlockInfo) {
llvm::Value *V = Arg.isIndirect()
? Builder.CreateLoad(Arg.getIndirectAddress())
: Arg.getDirectValue();
setBlockContextParameter(IPD, ArgNo, V);
return;
}
}
Address DeclPtr = Address::invalid();
bool DoStore = false;
bool IsScalar = hasScalarEvaluationKind(Ty);
// If we already have a pointer to the argument, reuse the input pointer.
if (Arg.isIndirect()) {
DeclPtr = Arg.getIndirectAddress();
// If we have a prettier pointer type at this point, bitcast to that.
unsigned AS = DeclPtr.getType()->getAddressSpace();
llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
if (DeclPtr.getType() != IRTy)
DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
// Indirect argument is in alloca address space, which may be different
// from the default address space.
auto AllocaAS = CGM.getASTAllocaAddressSpace();
auto *V = DeclPtr.getPointer();
auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
auto DestLangAS = getLangOpts().OpenCL
? LangAS::opencl_private
: getTarget().getStackAddressSpace(getLangOpts());
if (SrcLangAS != DestLangAS) {
assert(getContext().getTargetAddressSpace(SrcLangAS) ==
CGM.getDataLayout().getAllocaAddrSpace());
auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
*this, V, SrcLangAS, DestLangAS, T, true),
DeclPtr.getAlignment());
}
// Push a destructor cleanup for this parameter if the ABI requires it.
// Don't push a cleanup in a thunk for a method that will also emit a
// cleanup.
if (hasAggregateEvaluationKind(Ty) && !CurFuncIsThunk &&
Ty->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
if (QualType::DestructionKind DtorKind = Ty.isDestructedType()) {
assert((DtorKind == QualType::DK_cxx_destructor ||
DtorKind == QualType::DK_nontrivial_c_struct) &&
"unexpected destructor type");
pushDestroy(DtorKind, DeclPtr, Ty);
CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
EHStack.stable_begin();
}
}
} else {
// Check if the parameter address is controlled by OpenMP runtime.
Address OpenMPLocalAddr =
getLangOpts().OpenMP
? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
: Address::invalid();
if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
DeclPtr = OpenMPLocalAddr;
} else {
// Otherwise, create a temporary to hold the value.
DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
D.getName() + ".addr");
}
DoStore = true;
}
llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
LValue lv = MakeAddrLValue(DeclPtr, Ty);
if (IsScalar) {
Qualifiers qs = Ty.getQualifiers();
if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
// We honor __attribute__((ns_consumed)) for types with lifetime.
// For __strong, it's handled by just skipping the initial retain;
// otherwise we have to balance out the initial +1 with an extra
// cleanup to do the release at the end of the function.
bool isConsumed = D.hasAttr<NSConsumedAttr>();
// 'self' is always formally __strong, but if this is not an
// init method then we don't want to retain it.
if (D.isARCPseudoStrong()) {
const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl);
assert(&D == method->getSelfDecl());
assert(lt == Qualifiers::OCL_Strong);
assert(qs.hasConst());
assert(method->getMethodFamily() != OMF_init);
(void) method;
lt = Qualifiers::OCL_ExplicitNone;
}
// Load objects passed indirectly.
if (Arg.isIndirect() && !ArgVal)
ArgVal = Builder.CreateLoad(DeclPtr);
if (lt == Qualifiers::OCL_Strong) {
if (!isConsumed) {
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
// use objc_storeStrong(&dest, value) for retaining the
// object. But first, store a null into 'dest' because
// objc_storeStrong attempts to release its old value.
llvm::Value *Null = CGM.EmitNullConstant(D.getType());
EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
DoStore = false;
}
else
// Don't use objc_retainBlock for block pointers, because we
// don't want to Block_copy something just because we got it
// as a parameter.
ArgVal = EmitARCRetainNonBlock(ArgVal);
}
} else {
// Push the cleanup for a consumed parameter.
if (isConsumed) {
ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
? ARCPreciseLifetime : ARCImpreciseLifetime);
EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
precise);
}
if (lt == Qualifiers::OCL_Weak) {
EmitARCInitWeak(DeclPtr, ArgVal);
DoStore = false; // The weak init is a store, no need to do two.
}
}
// Enter the cleanup scope.
EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
}
}
// Store the initial value into the alloca.
if (DoStore)
EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
setAddrOfLocalVar(&D, DeclPtr);
// Emit debug info for param declaration.
if (CGDebugInfo *DI = getDebugInfo()) {
if (CGM.getCodeGenOpts().getDebugInfo() >=
codegenoptions::LimitedDebugInfo) {
DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
}
}
if (D.hasAttr<AnnotateAttr>())
EmitVarAnnotations(&D, DeclPtr.getPointer());
// We can only check return value nullability if all arguments to the
// function satisfy their nullability preconditions. This makes it necessary
// to emit null checks for args in the function body itself.
if (requiresReturnValueNullabilityCheck()) {
auto Nullability = Ty->getNullability(getContext());
if (Nullability && *Nullability == NullabilityKind::NonNull) {
SanitizerScope SanScope(this);
RetValNullabilityPrecondition =
Builder.CreateAnd(RetValNullabilityPrecondition,
Builder.CreateIsNotNull(Arg.getAnyValue()));
}
}
}
void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
CodeGenFunction *CGF) {
if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
return;
getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
}
void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
//Do nothing - here to avoid build errors
}
|
#pragma once
#include <queue>
#include <sstream>
#include "JackToken.hpp"
#include "StatementCompiler.hpp"
using namespace std;
class LetCompiler : public StatementCompiler
{
public:
virtual void compile(queue<JackToken*>& tokens, ostringstream& result)
{
}
};
|
#include <argparse.h>
void setup_simple_positional_arg(argparse::argument_parser& parser)
{
parser.add_argument("foo").help("foo argument help.");
}
void setup_multiarg_example(argparse::argument_parser& parser)
{
parser.add_argument("foo").help("foo argument help.");
parser.add_argument("bar").num_args(3).help("bar argument help.");
parser.add_argument({"-b", "--baz"}).help("baz argument help.");
parser.add_argument({"-g", "--goo"}).num_args(3).help("goo argument help.");
}
void setup_config_file_example(argparse::argument_parser& parser)
{
parser.enable_config_file();
parser.add_argument("foo").help("foo argument help.");
parser.add_argument("bar").num_args(3).help("bar argument help.");
}
void setup_consume_all_arguments(argparse::argument_parser& parser)
{
parser.add_argument("foo").num_args("*").help("foo argument help.");
parser.add_argument("bar").num_args("*").help("bar argument help.");
}
int main(int argc, char *argv[])
{
// Setup the parser
auto parser = argparse::argument_parser("MyParser", "Parser Description");
// setup_multiarg_example(parser);
// setup_config_file_example(parser);
setup_consume_all_arguments(parser);
parser.parse_args(argc, argv);
try
{
parser.parse_args(argc, argv);
}
catch (std::exception& e)
{
std::cout << e.what() << std::endl;
std::exit(1);
}
// try
// {
// // Parse the arguments
// parser.parse_args(argc, argv);
// // Get some values from the parser
// auto foo = parser.get<std::string>("foo");
// auto bar = parser.get<std::vector<std::string>>("bar");
// }
// catch (std::exception& e)
// {
// std::cout << e.what() << std::endl;
// }
return 0;
}
|
// This file has been generated by Py++.
#include "boost/python.hpp"
#include "wrap_osg.h"
#include "wrap_referenced.h"
#include "Material.pypp.hpp"
namespace bp = boost::python;
struct Material_wrapper : osg::Material, bp::wrapper< osg::Material > {
Material_wrapper( )
: osg::Material( )
, bp::wrapper< osg::Material >(){
// null constructor
}
virtual void apply( ::osg::State & state ) const {
if( bp::override func_apply = this->get_override( "apply" ) )
func_apply( boost::ref(state) );
else{
this->osg::Material::apply( boost::ref(state) );
}
}
void default_apply( ::osg::State & state ) const {
osg::Material::apply( boost::ref(state) );
}
virtual char const * className( ) const {
if( bp::override func_className = this->get_override( "className" ) )
return func_className( );
else{
return this->osg::Material::className( );
}
}
char const * default_className( ) const {
return osg::Material::className( );
}
virtual ::osg::Object * clone( ::osg::CopyOp const & copyop ) const {
if( bp::override func_clone = this->get_override( "clone" ) )
return func_clone( boost::ref(copyop) );
else{
return this->osg::Material::clone( boost::ref(copyop) );
}
}
::osg::Object * default_clone( ::osg::CopyOp const & copyop ) const {
return osg::Material::clone( boost::ref(copyop) );
}
virtual ::osg::Object * cloneType( ) const {
if( bp::override func_cloneType = this->get_override( "cloneType" ) )
return func_cloneType( );
else{
return this->osg::Material::cloneType( );
}
}
::osg::Object * default_cloneType( ) const {
return osg::Material::cloneType( );
}
virtual bool getModeUsage( ::osg::StateAttribute::ModeUsage & arg0 ) const {
if( bp::override func_getModeUsage = this->get_override( "getModeUsage" ) )
return func_getModeUsage( boost::ref(arg0) );
else{
return this->osg::Material::getModeUsage( boost::ref(arg0) );
}
}
bool default_getModeUsage( ::osg::StateAttribute::ModeUsage & arg0 ) const {
return osg::Material::getModeUsage( boost::ref(arg0) );
}
virtual ::osg::StateAttribute::Type getType( ) const {
if( bp::override func_getType = this->get_override( "getType" ) )
return func_getType( );
else{
return this->osg::Material::getType( );
}
}
::osg::StateAttribute::Type default_getType( ) const {
return osg::Material::getType( );
}
virtual bool isSameKindAs( ::osg::Object const * obj ) const {
if( bp::override func_isSameKindAs = this->get_override( "isSameKindAs" ) )
return func_isSameKindAs( boost::python::ptr(obj) );
else{
return this->osg::Material::isSameKindAs( boost::python::ptr(obj) );
}
}
bool default_isSameKindAs( ::osg::Object const * obj ) const {
return osg::Material::isSameKindAs( boost::python::ptr(obj) );
}
virtual char const * libraryName( ) const {
if( bp::override func_libraryName = this->get_override( "libraryName" ) )
return func_libraryName( );
else{
return this->osg::Material::libraryName( );
}
}
char const * default_libraryName( ) const {
return osg::Material::libraryName( );
}
virtual ::osg::Texture * asTexture( ) {
if( bp::override func_asTexture = this->get_override( "asTexture" ) )
return func_asTexture( );
else{
return this->osg::StateAttribute::asTexture( );
}
}
::osg::Texture * default_asTexture( ) {
return osg::StateAttribute::asTexture( );
}
virtual ::osg::Texture const * asTexture( ) const {
if( bp::override func_asTexture = this->get_override( "asTexture" ) )
return func_asTexture( );
else{
return this->osg::StateAttribute::asTexture( );
}
}
::osg::Texture const * default_asTexture( ) const {
return osg::StateAttribute::asTexture( );
}
virtual bool checkValidityOfAssociatedModes( ::osg::State & arg0 ) const {
if( bp::override func_checkValidityOfAssociatedModes = this->get_override( "checkValidityOfAssociatedModes" ) )
return func_checkValidityOfAssociatedModes( boost::ref(arg0) );
else{
return this->osg::StateAttribute::checkValidityOfAssociatedModes( boost::ref(arg0) );
}
}
bool default_checkValidityOfAssociatedModes( ::osg::State & arg0 ) const {
return osg::StateAttribute::checkValidityOfAssociatedModes( boost::ref(arg0) );
}
virtual void compileGLObjects( ::osg::State & arg0 ) const {
if( bp::override func_compileGLObjects = this->get_override( "compileGLObjects" ) )
func_compileGLObjects( boost::ref(arg0) );
else{
this->osg::StateAttribute::compileGLObjects( boost::ref(arg0) );
}
}
void default_compileGLObjects( ::osg::State & arg0 ) const {
osg::StateAttribute::compileGLObjects( boost::ref(arg0) );
}
virtual unsigned int getMember( ) const {
if( bp::override func_getMember = this->get_override( "getMember" ) )
return func_getMember( );
else{
return this->osg::StateAttribute::getMember( );
}
}
unsigned int default_getMember( ) const {
return osg::StateAttribute::getMember( );
}
virtual bool isTextureAttribute( ) const {
if( bp::override func_isTextureAttribute = this->get_override( "isTextureAttribute" ) )
return func_isTextureAttribute( );
else{
return this->osg::StateAttribute::isTextureAttribute( );
}
}
bool default_isTextureAttribute( ) const {
return osg::StateAttribute::isTextureAttribute( );
}
virtual void resizeGLObjectBuffers( unsigned int arg0 ) {
if( bp::override func_resizeGLObjectBuffers = this->get_override( "resizeGLObjectBuffers" ) )
func_resizeGLObjectBuffers( arg0 );
else{
this->osg::StateAttribute::resizeGLObjectBuffers( arg0 );
}
}
void default_resizeGLObjectBuffers( unsigned int arg0 ) {
osg::StateAttribute::resizeGLObjectBuffers( arg0 );
}
};
void register_Material_class(){
{ //::osg::Material
typedef bp::class_< Material_wrapper, bp::bases< osg::StateAttribute >, osg::ref_ptr< ::osg::Material >, boost::noncopyable > Material_exposer_t;
Material_exposer_t Material_exposer = Material_exposer_t( "Material", "\n Material - encapsulates OpenGL glMaterial state.\n", bp::no_init );
bp::scope Material_scope( Material_exposer );
bp::enum_< osg::Material::ColorMode>("ColorMode")
.value("AMBIENT", osg::Material::AMBIENT)
.value("DIFFUSE", osg::Material::DIFFUSE)
.value("SPECULAR", osg::Material::SPECULAR)
.value("EMISSION", osg::Material::EMISSION)
.value("AMBIENT_AND_DIFFUSE", osg::Material::AMBIENT_AND_DIFFUSE)
.value("OFF", osg::Material::OFF)
.export_values()
;
bp::enum_< osg::Material::Face>("Face")
.value("FRONT", osg::Material::FRONT)
.value("BACK", osg::Material::BACK)
.value("FRONT_AND_BACK", osg::Material::FRONT_AND_BACK)
.export_values()
;
Material_exposer.def( bp::init< >("\n Material - encapsulates OpenGL glMaterial state.\n") );
{ //::osg::Material::apply
typedef void ( ::osg::Material::*apply_function_type )( ::osg::State & ) const;
typedef void ( Material_wrapper::*default_apply_function_type )( ::osg::State & ) const;
Material_exposer.def(
"apply"
, apply_function_type(&::osg::Material::apply)
, default_apply_function_type(&Material_wrapper::default_apply)
, ( bp::arg("state") ) );
}
{ //::osg::Material::className
typedef char const * ( ::osg::Material::*className_function_type )( ) const;
typedef char const * ( Material_wrapper::*default_className_function_type )( ) const;
Material_exposer.def(
"className"
, className_function_type(&::osg::Material::className)
, default_className_function_type(&Material_wrapper::default_className) );
}
{ //::osg::Material::clone
typedef ::osg::Object * ( ::osg::Material::*clone_function_type )( ::osg::CopyOp const & ) const;
typedef ::osg::Object * ( Material_wrapper::*default_clone_function_type )( ::osg::CopyOp const & ) const;
Material_exposer.def(
"clone"
, clone_function_type(&::osg::Material::clone)
, default_clone_function_type(&Material_wrapper::default_clone)
, ( bp::arg("copyop") )
, bp::return_value_policy< bp::reference_existing_object >() );
}
{ //::osg::Material::cloneType
typedef ::osg::Object * ( ::osg::Material::*cloneType_function_type )( ) const;
typedef ::osg::Object * ( Material_wrapper::*default_cloneType_function_type )( ) const;
Material_exposer.def(
"cloneType"
, cloneType_function_type(&::osg::Material::cloneType)
, default_cloneType_function_type(&Material_wrapper::default_cloneType)
, bp::return_value_policy< bp::reference_existing_object >() );
}
{ //::osg::Material::getAmbient
typedef ::osg::Vec4 const & ( ::osg::Material::*getAmbient_function_type )( ::osg::Material::Face ) const;
Material_exposer.def(
"getAmbient"
, getAmbient_function_type( &::osg::Material::getAmbient )
, ( bp::arg("face") )
, bp::return_internal_reference< >() );
}
{ //::osg::Material::getAmbientFrontAndBack
typedef bool ( ::osg::Material::*getAmbientFrontAndBack_function_type )( ) const;
Material_exposer.def(
"getAmbientFrontAndBack"
, getAmbientFrontAndBack_function_type( &::osg::Material::getAmbientFrontAndBack ) );
}
{ //::osg::Material::getColorMode
typedef ::osg::Material::ColorMode ( ::osg::Material::*getColorMode_function_type )( ) const;
Material_exposer.def(
"getColorMode"
, getColorMode_function_type( &::osg::Material::getColorMode ) );
}
{ //::osg::Material::getDiffuse
typedef ::osg::Vec4 const & ( ::osg::Material::*getDiffuse_function_type )( ::osg::Material::Face ) const;
Material_exposer.def(
"getDiffuse"
, getDiffuse_function_type( &::osg::Material::getDiffuse )
, ( bp::arg("face") )
, bp::return_internal_reference< >() );
}
{ //::osg::Material::getDiffuseFrontAndBack
typedef bool ( ::osg::Material::*getDiffuseFrontAndBack_function_type )( ) const;
Material_exposer.def(
"getDiffuseFrontAndBack"
, getDiffuseFrontAndBack_function_type( &::osg::Material::getDiffuseFrontAndBack ) );
}
{ //::osg::Material::getEmission
typedef ::osg::Vec4 const & ( ::osg::Material::*getEmission_function_type )( ::osg::Material::Face ) const;
Material_exposer.def(
"getEmission"
, getEmission_function_type( &::osg::Material::getEmission )
, ( bp::arg("face") )
, bp::return_internal_reference< >()
, " Get the emission value for specified face." );
}
{ //::osg::Material::getEmissionFrontAndBack
typedef bool ( ::osg::Material::*getEmissionFrontAndBack_function_type )( ) const;
Material_exposer.def(
"getEmissionFrontAndBack"
, getEmissionFrontAndBack_function_type( &::osg::Material::getEmissionFrontAndBack )
, " Return whether emission values are equal for front and back faces\n or not." );
}
{ //::osg::Material::getModeUsage
typedef bool ( ::osg::Material::*getModeUsage_function_type )( ::osg::StateAttribute::ModeUsage & ) const;
typedef bool ( Material_wrapper::*default_getModeUsage_function_type )( ::osg::StateAttribute::ModeUsage & ) const;
Material_exposer.def(
"getModeUsage"
, getModeUsage_function_type(&::osg::Material::getModeUsage)
, default_getModeUsage_function_type(&Material_wrapper::default_getModeUsage)
, ( bp::arg("arg0") ) );
}
{ //::osg::Material::getShininess
typedef float ( ::osg::Material::*getShininess_function_type )( ::osg::Material::Face ) const;
Material_exposer.def(
"getShininess"
, getShininess_function_type( &::osg::Material::getShininess )
, ( bp::arg("face") )
, " Get the shininess value for specified face." );
}
{ //::osg::Material::getShininessFrontAndBack
typedef bool ( ::osg::Material::*getShininessFrontAndBack_function_type )( ) const;
Material_exposer.def(
"getShininessFrontAndBack"
, getShininessFrontAndBack_function_type( &::osg::Material::getShininessFrontAndBack )
, " Return whether shininess values are equal for front and back faces\n or not." );
}
{ //::osg::Material::getSpecular
typedef ::osg::Vec4 const & ( ::osg::Material::*getSpecular_function_type )( ::osg::Material::Face ) const;
Material_exposer.def(
"getSpecular"
, getSpecular_function_type( &::osg::Material::getSpecular )
, ( bp::arg("face") )
, bp::return_internal_reference< >()
, " Get the specular value for specified face." );
}
{ //::osg::Material::getSpecularFrontAndBack
typedef bool ( ::osg::Material::*getSpecularFrontAndBack_function_type )( ) const;
Material_exposer.def(
"getSpecularFrontAndBack"
, getSpecularFrontAndBack_function_type( &::osg::Material::getSpecularFrontAndBack )
, " Return whether specular values are equal for front and back faces\n or not." );
}
{ //::osg::Material::getType
typedef ::osg::StateAttribute::Type ( ::osg::Material::*getType_function_type )( ) const;
typedef ::osg::StateAttribute::Type ( Material_wrapper::*default_getType_function_type )( ) const;
Material_exposer.def(
"getType"
, getType_function_type(&::osg::Material::getType)
, default_getType_function_type(&Material_wrapper::default_getType) );
}
{ //::osg::Material::isSameKindAs
typedef bool ( ::osg::Material::*isSameKindAs_function_type )( ::osg::Object const * ) const;
typedef bool ( Material_wrapper::*default_isSameKindAs_function_type )( ::osg::Object const * ) const;
Material_exposer.def(
"isSameKindAs"
, isSameKindAs_function_type(&::osg::Material::isSameKindAs)
, default_isSameKindAs_function_type(&Material_wrapper::default_isSameKindAs)
, ( bp::arg("obj") ) );
}
{ //::osg::Material::libraryName
typedef char const * ( ::osg::Material::*libraryName_function_type )( ) const;
typedef char const * ( Material_wrapper::*default_libraryName_function_type )( ) const;
Material_exposer.def(
"libraryName"
, libraryName_function_type(&::osg::Material::libraryName)
, default_libraryName_function_type(&Material_wrapper::default_libraryName) );
}
{ //::osg::Material::setAlpha
typedef void ( ::osg::Material::*setAlpha_function_type )( ::osg::Material::Face,float ) ;
Material_exposer.def(
"setAlpha"
, setAlpha_function_type( &::osg::Material::setAlpha )
, ( bp::arg("face"), bp::arg("alpha") )
, " Set the alpha value of ambient, diffuse, specular and emission\n colors. Valid transparency range is 0.0 to 1.0." );
}
{ //::osg::Material::setAmbient
typedef void ( ::osg::Material::*setAmbient_function_type )( ::osg::Material::Face,::osg::Vec4 const & ) ;
Material_exposer.def(
"setAmbient"
, setAmbient_function_type( &::osg::Material::setAmbient )
, ( bp::arg("face"), bp::arg("ambient") ) );
}
{ //::osg::Material::setColorMode
typedef void ( ::osg::Material::*setColorMode_function_type )( ::osg::Material::ColorMode ) ;
Material_exposer.def(
"setColorMode"
, setColorMode_function_type( &::osg::Material::setColorMode )
, ( bp::arg("mode") ) );
}
{ //::osg::Material::setDiffuse
typedef void ( ::osg::Material::*setDiffuse_function_type )( ::osg::Material::Face,::osg::Vec4 const & ) ;
Material_exposer.def(
"setDiffuse"
, setDiffuse_function_type( &::osg::Material::setDiffuse )
, ( bp::arg("face"), bp::arg("diffuse") ) );
}
{ //::osg::Material::setEmission
typedef void ( ::osg::Material::*setEmission_function_type )( ::osg::Material::Face,::osg::Vec4 const & ) ;
Material_exposer.def(
"setEmission"
, setEmission_function_type( &::osg::Material::setEmission )
, ( bp::arg("face"), bp::arg("emission") )
, " Set emission value of specified face(s) of the material,\n valid emission[0..3] range is 0.0 to 1.0." );
}
{ //::osg::Material::setShininess
typedef void ( ::osg::Material::*setShininess_function_type )( ::osg::Material::Face,float ) ;
Material_exposer.def(
"setShininess"
, setShininess_function_type( &::osg::Material::setShininess )
, ( bp::arg("face"), bp::arg("shininess") )
, " Set shininess of specified face(s) of the material.\n valid shininess range is 0.0 to 128.0." );
}
{ //::osg::Material::setSpecular
typedef void ( ::osg::Material::*setSpecular_function_type )( ::osg::Material::Face,::osg::Vec4 const & ) ;
Material_exposer.def(
"setSpecular"
, setSpecular_function_type( &::osg::Material::setSpecular )
, ( bp::arg("face"), bp::arg("specular") )
, " Set specular value of specified face(s) of the material,\n valid specular[0..3] range is 0.0 to 1.0." );
}
{ //::osg::Material::setTransparency
typedef void ( ::osg::Material::*setTransparency_function_type )( ::osg::Material::Face,float ) ;
Material_exposer.def(
"setTransparency"
, setTransparency_function_type( &::osg::Material::setTransparency )
, ( bp::arg("face"), bp::arg("trans") )
, " Set the alpha value of ambient, diffuse, specular and emission\n colors of specified face, to 1-transparency.\n Valid transparency range is 0.0 to 1.0." );
}
{ //::osg::StateAttribute::asTexture
typedef ::osg::Texture * ( ::osg::StateAttribute::*asTexture_function_type )( ) ;
typedef ::osg::Texture * ( Material_wrapper::*default_asTexture_function_type )( ) ;
Material_exposer.def(
"asTexture"
, asTexture_function_type(&::osg::StateAttribute::asTexture)
, default_asTexture_function_type(&Material_wrapper::default_asTexture)
, bp::return_internal_reference< >() );
}
{ //::osg::StateAttribute::asTexture
typedef ::osg::Texture const * ( ::osg::StateAttribute::*asTexture_function_type )( ) const;
typedef ::osg::Texture const * ( Material_wrapper::*default_asTexture_function_type )( ) const;
Material_exposer.def(
"asTexture"
, asTexture_function_type(&::osg::StateAttribute::asTexture)
, default_asTexture_function_type(&Material_wrapper::default_asTexture)
, bp::return_internal_reference< >() );
}
{ //::osg::StateAttribute::checkValidityOfAssociatedModes
typedef bool ( ::osg::StateAttribute::*checkValidityOfAssociatedModes_function_type )( ::osg::State & ) const;
typedef bool ( Material_wrapper::*default_checkValidityOfAssociatedModes_function_type )( ::osg::State & ) const;
Material_exposer.def(
"checkValidityOfAssociatedModes"
, checkValidityOfAssociatedModes_function_type(&::osg::StateAttribute::checkValidityOfAssociatedModes)
, default_checkValidityOfAssociatedModes_function_type(&Material_wrapper::default_checkValidityOfAssociatedModes)
, ( bp::arg("arg0") ) );
}
{ //::osg::StateAttribute::compileGLObjects
typedef void ( ::osg::StateAttribute::*compileGLObjects_function_type )( ::osg::State & ) const;
typedef void ( Material_wrapper::*default_compileGLObjects_function_type )( ::osg::State & ) const;
Material_exposer.def(
"compileGLObjects"
, compileGLObjects_function_type(&::osg::StateAttribute::compileGLObjects)
, default_compileGLObjects_function_type(&Material_wrapper::default_compileGLObjects)
, ( bp::arg("arg0") ) );
}
{ //::osg::StateAttribute::getMember
typedef unsigned int ( ::osg::StateAttribute::*getMember_function_type )( ) const;
typedef unsigned int ( Material_wrapper::*default_getMember_function_type )( ) const;
Material_exposer.def(
"getMember"
, getMember_function_type(&::osg::StateAttribute::getMember)
, default_getMember_function_type(&Material_wrapper::default_getMember) );
}
{ //::osg::StateAttribute::isTextureAttribute
typedef bool ( ::osg::StateAttribute::*isTextureAttribute_function_type )( ) const;
typedef bool ( Material_wrapper::*default_isTextureAttribute_function_type )( ) const;
Material_exposer.def(
"isTextureAttribute"
, isTextureAttribute_function_type(&::osg::StateAttribute::isTextureAttribute)
, default_isTextureAttribute_function_type(&Material_wrapper::default_isTextureAttribute) );
}
{ //::osg::StateAttribute::resizeGLObjectBuffers
typedef void ( ::osg::StateAttribute::*resizeGLObjectBuffers_function_type )( unsigned int ) ;
typedef void ( Material_wrapper::*default_resizeGLObjectBuffers_function_type )( unsigned int ) ;
Material_exposer.def(
"resizeGLObjectBuffers"
, resizeGLObjectBuffers_function_type(&::osg::StateAttribute::resizeGLObjectBuffers)
, default_resizeGLObjectBuffers_function_type(&Material_wrapper::default_resizeGLObjectBuffers)
, ( bp::arg("arg0") ) );
}
}
}
|
// ***********************************************************************
// Code Created by James Michael Armstrong (https://github.com/BlazesRus)
// ***********************************************************************
#pragma once
#include "..\DLLAPI.h"
#include "VariableFormula.hpp"
#include "..\AltNum\MediumDec.hpp"
namespace BlazesRusCode
{
template<typename VarType, typename VarStoreType>
class DLL_API BuiltinFormula : public VariableFormula<VarType>
{
public:
/// <summary>
/// Switches the operator into boolean value and erases old left+right side value.
/// </summary>
/// <param name="FormCopy">The form copy.</param>
/// <param name="FormDRef[*CurrentVal]">The op value.</param>
/// <param name="LeftIterator">The left iterator.</param>
/// <param name="RightIterator">The right iterator.</param>
/// <param name="Value">The value.</param>
void SwitchOpToBoolVal(FormData& FormCopy, FormElement& OpVal, bool Value)
{
if (Value) { OpVal.ElementCat = FormulaElementType::trueVal; }
else { OpVal.ElementCat = FormulaElementType::falseVal; }
}
/// <summary>
/// Switches the operator into Value
/// </summary>
/// <param name="FormCopy">The formula data copy.</param>
/// <param name="FormDRef[*CurrentVal]">The operator element value.</param>
/// <param name="OpKey">The operator key.</param>
/// <param name="RightIterator">The right value iterator.</param>
/// <param name="RightIterator">The right value iterator.</param>
/// <param name="Value">The value to turn operator into.</param>
void SwitchOpToVal(FormData& FormCopy, FormElement& OpVal, int OpKey, VarType Value)
{
FormCopy.NumMap.insert_or_assign(OpKey, Value);
OpVal.ElementCat = FormulaElementType::Num;
}
void EvaluateOperations(size_t FormIndex = 0)
{
FormData& FormDRef = Data.at(FormIndex);
FormData::iterator segmentStart = FormDRef.begin();
VarType valResult;
bool TempBool;
VarType leftValue;
VarType rightValue;
IntVector& OpOrderElement = FormDRef.OpOrderMap[0];
FormData::iterator OpIterator;
FormData::iterator LeftVal;
FormData::iterator RightVal;
VarType leftResult;
VarType rightResult;
FormulaElementType OpApplied;
int OpTargetKey;
int leftKey;
//bool moreOperations = FormDRef.size() > 3;
//Applying operations via C++ variation of order of operations
//https://en.cppreference.com/w/cpp/language/operator_precedence
for (int opIndex = 0; opIndex < 11; ++opIndex)
{
OpOrderElement = FormDRef.OpOrderMap[opIndex];
if (OpOrderElement.empty())
continue;
for (IntVector::iterator CurrentVal = OpOrderElement.begin(), LastVal = OpOrderElement.end(); CurrentVal != LastVal; ++CurrentVal)
{
OpTargetKey = *CurrentVal;
OpIterator = FormDRef.find(OpTargetKey);
if (opIndex == 1)
{
#ifndef Blazes_DisableFormula_NegativeSwapping
if (OpIterator->second.ElementCat == FormulaElementType::Negative && OpIterator != segmentStart)//Special conditional to potentially swap negative with minus
{
LeftVal = OpIterator - 1;
switch (LeftVal->second.ElementCat)
{
case FormulaElementType::Variable:
continue; break;//Ignore non-set variables for this version
case FormulaElementType::Formula:
case FormulaElementType::Num:
case FormulaElementType::trueVal:
case FormulaElementType::falseVal://Swapping out Negative application to right side, instead to a left-right subtraction operation
{
FormDRef[*CurrentVal].ElementCat = FormulaElementType::Sub;
//Making sure move the operation in correct operation order
if (FormDRef.OpOrderMap[3].empty() || FormDRef.OpOrderMap[3].back() < OpTargetKey)//If higher position then last element, then just add to end
FormDRef.OpOrderMap[3].push_back(OpTargetKey);
else
{//Keys with lower indexes are normally in front
for (IntVector::iterator cVal = FormDRef.OpOrderMap[3].begin(), LastSubVal = FormDRef.OpOrderMap[3].end(); cVal != LastSubVal; ++cVal)
{
if (*cVal > OpTargetKey)
{
FormDRef.OpOrderMap[3].insert(cVal, OpTargetKey);
break;
}
}
}
continue;
}
default:
leftKey = -1;
}
}
else
#endif
leftKey = -1;
}
else if (opIndex != 0 || (OpIterator->second.ElementCat != FormulaElementType::Sqrt && OpIterator->second.ElementCat != FormulaElementType::LN && OpIterator->second.ElementCat != FormulaElementType::LOGTEN))
{
LeftVal = OpIterator - 1;
leftKey = LeftVal->first;
switch (LeftVal->second.ElementCat)
{
case FormulaElementType::Variable:
continue; break;//Ignore non-set variables for this version
case FormulaElementType::Formula:
{
EvaluateOperations(LeftVal->second.Index);//Condense inner formula if can
FormData& targetSegmentRef = Data.at(LeftVal->second.Index);
if (targetSegmentRef.size() == 1)
{
FormData::iterator targetElem = targetSegmentRef.begin();
if (targetElem->second.ElementCat != FormulaElementType::Variable)
leftValue = targetElem->second.ElementCat == FormulaElementType::trueVal ? VarType::One : (targetElem->second.ElementCat == FormulaElementType::falseVal ? VarType::Zero : targetSegmentRef.NumMap[targetElem->first]);
else
continue;//Ignore operation with unknown variable value
}
else
continue;//Ignore if not condensed down to single value
}
break;
case FormulaElementType::Num:
leftValue = FormDRef.NumMap[leftKey]; break;
case FormulaElementType::trueVal:
leftValue = VarType::One;
case FormulaElementType::falseVal:
leftValue = VarType::Zero;
default:
continue; break;
}
}
else
leftKey = -1;
RightVal = OpIterator + 1;
switch (RightVal->second.ElementCat)
{
case FormulaElementType::Variable:
continue; break;//Ignore non-set variables for this version
case FormulaElementType::Formula:
{
FormData& targetSegmentRef = Data.at(RightVal->second.Index);
EvaluateOperations(RightVal->second.Index);//Condense inner formula if can
if (targetSegmentRef.size() == 1)
{
FormData::iterator targetElem = targetSegmentRef.begin();
if (targetElem->second.ElementCat != FormulaElementType::Variable)
rightValue = targetElem->second.ElementCat == FormulaElementType::trueVal ? VarType::One : (targetElem->second.ElementCat == FormulaElementType::falseVal ? VarType::Zero : targetSegmentRef.NumMap[targetElem->first]);
else
continue;//Ignore operation with unknown variable value
}
else
continue;//Ignore if not condensed down to single value
}
break;
case FormulaElementType::Num:
rightValue = FormDRef.NumMap[RightVal->first]; break;
case FormulaElementType::trueVal:
rightValue = VarType::One;
case FormulaElementType::falseVal:
rightValue = VarType::Zero;
default:
continue; break;
}
switch (opIndex)
{
default://placeholder code
break;
case 0:
switch (OpIterator->second.ElementCat)
{
case FormulaElementType::Pow:
leftValue = VarType::PowOp(leftValue, rightValue);
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, leftValue);
break;
case FormulaElementType::Sqrt:
rightValue = VarType::Sqrt(rightValue);
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, rightValue);
break;
case FormulaElementType::NthRoot:
rightValue = VarType::NthRootV2(rightValue, (int)leftValue);
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, rightValue);
break;
case FormulaElementType::LN:
rightValue = VarType::Ln(rightValue);
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, rightValue);
break;
case FormulaElementType::LOGTEN:
rightValue = VarType::Log10(rightValue);
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, rightValue);
break;
case FormulaElementType::BaseNLog:
rightValue = VarType::Log(rightValue, leftValue);
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, rightValue);
break;
default://placeholder code
break;
}
break;
case 1://Remove operator in this case and change right side value
switch (OpIterator->second.ElementCat)
{
case FormulaElementType::Not:
if (RightVal->second.ElementCat == FormulaElementType::trueVal)
{
FormDRef.at(RightVal->first).ElementCat = FormulaElementType::falseVal;
}
else if (RightVal->second.ElementCat == FormulaElementType::falseVal)
{
FormDRef.at(RightVal->first).ElementCat = FormulaElementType::trueVal;
}
else if (RightVal->second.ElementCat == FormulaElementType::Formula)
{
FormDRef.at(RightVal->first).ElementCat = rightValue == VarType::Zero ? FormulaElementType::trueVal : FormulaElementType::falseVal;
}
else//Assuming is number
{
if (rightValue == VarType::Zero)//Zero is false otherwise count as if it was true
FormDRef.NumMap[RightVal->first] = VarType::One;//SwitchOpToBoolVal(FormDRef, FormDRef[*CurrentVal], true);
else
FormDRef.NumMap[RightVal->first] = VarType::Zero;//SwitchOpToBoolVal(FormDRef, FormDRef[*CurrentVal], false);
}
break;
case FormulaElementType::Negative://Only applies to numbers or Formulas(for now)
if (RightVal->second.ElementCat == FormulaElementType::Formula)
{
rightValue.SwapNegativeStatus();
FormDRef.at(RightVal->first).ElementCat = FormulaElementType::Num;
FormDRef.NumMap.insert_or_assign(RightVal->first, rightValue);
}
else
{
rightValue.SwapNegativeStatus();//rightValue = -rightValue;
FormDRef.NumMap[RightVal->first] = rightValue;
}
break;
default://placeholder code
break;
}
FormDRef.erase(OpIterator);
break;
case 2:// Multiplication, division, and remainder
switch (OpIterator->second.ElementCat)
{
case FormulaElementType::Mult:
leftValue *= rightValue;
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, leftValue);
break;
case FormulaElementType::Div:
leftValue /= rightValue;
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, leftValue);
break;
case FormulaElementType::Rem:
leftValue %= rightValue;
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, leftValue);
break;
default://placeholder code
break;
}
break;
case 3://Addition and subtraction
switch (OpIterator->second.ElementCat)
{
case FormulaElementType::Add:
{
leftValue += rightValue;
//SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, leftValue);
FormDRef.NumMap.insert_or_assign(OpTargetKey, leftValue);
FormDRef[OpTargetKey].ElementCat = FormulaElementType::Num;
break;
}
case FormulaElementType::Sub:
leftValue -= rightValue;
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, leftValue);
break;
}
break;
case 4://<,<=, >, >=
switch (OpIterator->second.ElementCat)
{
case FormulaElementType::LessThan:
TempBool = leftValue < rightValue;
break;
case FormulaElementType::LessOrEqual:
TempBool = leftValue <= rightValue;
break;
case FormulaElementType::GreaterThan:
TempBool = leftValue > rightValue;
break;
case FormulaElementType::GreaterOrEqual:
TempBool = leftValue >= rightValue;
break;
}
SwitchOpToBoolVal(FormDRef, FormDRef[*CurrentVal], TempBool);
break;
case 5://==, !=
switch (OpIterator->second.ElementCat)
{
case FormulaElementType::Equal:
TempBool = leftValue == rightValue;
break;
case FormulaElementType::NotEqual:
TempBool = leftValue != rightValue;
break;
}
SwitchOpToBoolVal(FormDRef, FormDRef[*CurrentVal], TempBool);
break;
case 6://&
leftValue = leftValue & rightValue;
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, leftValue);
break;
case 7://XOR
leftValue = leftValue ^ rightValue;
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, leftValue);
break;
case 8:// | Bitwise OR (inclusive or)
leftValue = leftValue ^ rightValue;
SwitchOpToVal(FormDRef, FormDRef[*CurrentVal], OpTargetKey, leftValue);
break;
case 9://&&
TempBool = leftValue && rightValue;
SwitchOpToBoolVal(FormDRef, FormDRef[*CurrentVal], TempBool);
break;
case 10:// || (Logical OR)
TempBool = leftValue || rightValue;
SwitchOpToBoolVal(FormDRef, FormDRef[*CurrentVal], TempBool);
break;
}
if (opIndex != 1)
{
int RightKey = RightVal->first;
if (leftKey != -1)
{
FormDRef.erase(leftKey);
}
FormDRef.erase(RightKey);
}
}
}
}
/// <summary>
/// Swaps the updated form data.
/// </summary>
/// <param name="FormCopy">The form copy.</param>
/// <param name="ElementValues">The element values.</param>
/// <param name="FormIndex">Index of the form.</param>
void ReplaceVariablesWithRefValues(ReferenceMap ElementValues, size_t FormIndex = 0)
{
std::string CurString;
VarType targetResult;
FormData& FormDRef = Data.at(FormIndex);
for (FormData::iterator CurrentVal = FormDRef.begin(), LastVal = FormDRef.end(); CurrentVal != LastVal; ++CurrentVal)
{
if (CurrentVal->second.ElementCat == FormulaElementType::Formula)//FormulaDetected
{
ReplaceVariablesWithRefValues(ElementValues, CurrentVal->second.Index);
}
else if (CurrentVal->second.ElementCat == FormulaElementType::Variable)//Swap Variable with values
{
CurString = this->VariableStore.at(CurrentVal->first);
tsl::ordered_map<std::string, VarType&>::iterator KeyedElemVal = ElementValues.find(CurString);
if (KeyedElemVal != ElementValues.end())//Only attempt to replace variable if matching variable is found
{
FormDRef.at(CurrentVal->first).ElementCat = FormulaElementType::Num;
targetResult = KeyedElemVal.value();
FormDRef.NumMap.insert_or_assign(CurrentVal->first, targetResult);//ElementValues.at(CurString));
}
}
}
}
/// <summary>
/// Simplifies and evaluates the formula and then returns the copy.
/// </summary>
/// <param name="ElementValues">The element values.</param>
/// <returns>BlazesRusCode.BuiltinFormula</returns>
BuiltinFormula EvaluateRefToSimplifiedForm(ReferenceMap ElementValues)
{
BuiltinFormula FormCopy = *this;//Duplicate values so can erase parts when calculating
FormCopy.ReplaceVariablesWithRefValues(ElementValues);
FormCopy.EvaluateOperations();
return FormCopy;
}
/// <summary>
/// Swaps the updated form data.
/// </summary>
/// <param name="FormCopy">The form copy.</param>
/// <param name="ElementValues">The element values.</param>
/// <param name="FormIndex">Index of the form.</param>
void ReplaceVariablesWithValues(ValueMap& ElementValues, size_t FormIndex = 0)
{
std::string CurString;
VarType targetResult;
FormData& FormDRef = Data.at(FormIndex);
for (FormData::iterator CurrentVal = FormDRef.begin(), LastVal = FormDRef.end(); CurrentVal != LastVal; ++CurrentVal)
{
if (CurrentVal->second.ElementCat == FormulaElementType::Formula)//FormulaDetected
{
ReplaceVariablesWithValues(ElementValues, CurrentVal->second.Index);
}
else if (CurrentVal->second.ElementCat == FormulaElementType::Variable)//Swap Variable with values
{
CurString = this->VariableStore.at(CurrentVal->second.Index);
tsl::ordered_map<std::string, VarType>::iterator KeyedElemVal = ElementValues.find(CurString);
if (KeyedElemVal != ElementValues.end())//Only attempt to replace variable if matching variable is found
{
FormDRef.at(CurrentVal->first).ElementCat = FormulaElementType::Num;
targetResult = KeyedElemVal.value();
FormDRef.NumMap.insert_or_assign(CurrentVal->first, targetResult);//ElementValues.at(CurString));
}
}
}
}
/// <summary>
/// Simplifies and evaluates the formula and then returns the copy.
/// </summary>
/// <param name="ElementValues">The element values.</param>
/// <returns>BlazesRusCode.BuiltinFormula</returns>
BuiltinFormula EvaluateToSimplifiedForm(ValueMap ElementValues)
{
BuiltinFormula FormCopy = *this;//Duplicate values so can erase parts when calculating
FormCopy.ReplaceVariablesWithValues(ElementValues);
FormCopy.EvaluateOperations();
return FormCopy;
}
/// <summary>
/// Recursively adds to the string.
/// </summary>
/// <param name="strBuffer">The string buffer.</param>
/// <param name="FormIndex">Index of the form.</param>
void RecursivelyAddToString(std::string& strBuffer, size_t FormIndex)
{
FormData& FormDRef = Data.at(FormIndex);
for (FormData::iterator CurrentVal = FormDRef.begin(), LastVal = FormDRef.end(); CurrentVal != LastVal; ++CurrentVal)
{
switch (CurrentVal->second.ElementCat)
{
default:
break;
case FormulaElementType::Formula:
strBuffer += "(";
RecursivelyAddToString(strBuffer, CurrentVal->second.Index);
strBuffer += ")";
break;
case FormulaElementType::Num:
strBuffer += FormDRef.NumMap.at(CurrentVal->first).ToString();
break;
case FormulaElementType::Variable:
strBuffer += this->VariableStore.at(CurrentVal->second.Index);
break;
case FormulaElementType::trueVal:
strBuffer += "true";
break;
case FormulaElementType::falseVal:
strBuffer += "false";
break;
case FormulaElementType::Equal:
strBuffer += "==";
break;
case FormulaElementType::NotEqual:
strBuffer += "!=";
break;
case FormulaElementType::AND:
strBuffer += "&&";
break;
case FormulaElementType::OR:
strBuffer += "||";
break;
case FormulaElementType::Not:
strBuffer += "!";
break;
case FormulaElementType::LessThan:
strBuffer += "<";
break;
case FormulaElementType::LessOrEqual:
strBuffer += "<=";
break;
case FormulaElementType::GreaterThan:
strBuffer += ">";
break;
case FormulaElementType::GreaterOrEqual:
strBuffer += ">=";
break;
case FormulaElementType::Add:
strBuffer += "+";
break;
case FormulaElementType::Sub:
strBuffer += "-";
break;
case FormulaElementType::Negative:
strBuffer += "-";
break;
case FormulaElementType::Mult:
strBuffer += "*";
break;
case FormulaElementType::Div:
strBuffer += "/";
break;
case FormulaElementType::Pow:
strBuffer += "^";
break;
case FormulaElementType::Sqrt:
strBuffer += "SqrtOf";
break;
case FormulaElementType::NthRoot:
strBuffer += "thRootOf";
break;
case FormulaElementType::LOGTEN:
strBuffer += "LogTen";
break;
case FormulaElementType::LN:
strBuffer += "Ln";
break;
case FormulaElementType::BaseNLog:
strBuffer += "thBaseLog";
break;
case FormulaElementType::Rem:
strBuffer += "%";
break;
case FormulaElementType::PostFixPlus:
strBuffer += VariableStore.at(CurrentVal->second.Index) + "++";
break;
case FormulaElementType::PostFixMinus:
strBuffer += VariableStore.at(CurrentVal->second.Index) + "++";
break;
case FormulaElementType::PrefixPlus:
strBuffer += "++" + VariableStore.at(CurrentVal->second.Index);
break;
case FormulaElementType::PrefixMinus:
strBuffer += "--" + VariableStore.at(CurrentVal->second.Index);
break;
case FormulaElementType::BitwiseAND:
strBuffer += "&";
break;
case FormulaElementType::XOR:
strBuffer += "XOR";
break;
case FormulaElementType::BitwiseOr:
strBuffer += "|";
break;
}
}
}
/// <summary>
/// Converts to string(with no extra spacing applied).
/// </summary>
/// <returns>std.string.</returns>
std::string ToString()
{
std::string strBuffer = "";
FormData& FormDRef = Data.at(0);
for (FormData::iterator CurrentVal = FormDRef.begin(), LastVal = FormDRef.end(); CurrentVal != LastVal; ++CurrentVal)
{
switch (CurrentVal->second.ElementCat)
{
case FormulaElementType::Formula:
strBuffer += "(";
RecursivelyAddToString(strBuffer, CurrentVal->second.Index);
strBuffer += ")";
break;
case FormulaElementType::Num:
strBuffer += FormDRef.NumMap.at(CurrentVal->first).ToString();
break;
case FormulaElementType::Variable:
strBuffer += this->VariableStore.at(CurrentVal->second.Index);
break;
case FormulaElementType::trueVal:
strBuffer += "true";
break;
case FormulaElementType::falseVal:
strBuffer += "false";
break;
case FormulaElementType::Equal:
strBuffer += "==";
break;
case FormulaElementType::NotEqual:
strBuffer += "!=";
break;
case FormulaElementType::AND:
strBuffer += "&&";
break;
case FormulaElementType::OR:
strBuffer += "||";
break;
case FormulaElementType::Not:
strBuffer += "!";
break;
case FormulaElementType::LessThan:
strBuffer += "<";
break;
case FormulaElementType::LessOrEqual:
strBuffer += "<=";
break;
case FormulaElementType::GreaterThan:
strBuffer += ">";
break;
case FormulaElementType::GreaterOrEqual:
strBuffer += ">=";
break;
case FormulaElementType::Add:
strBuffer += "+";
break;
case FormulaElementType::Sub:
strBuffer += "-";
break;
case FormulaElementType::Negative:
strBuffer += "-";
break;
case FormulaElementType::Mult:
strBuffer += "*";
break;
case FormulaElementType::Div:
strBuffer += "/";
break;
case FormulaElementType::Pow:
strBuffer += "^";
break;
case FormulaElementType::Sqrt:
strBuffer += "SqrtOf";
break;
case FormulaElementType::NthRoot:
strBuffer += "thRootOf";
break;
case FormulaElementType::LOGTEN:
strBuffer += "LogTen";
break;
case FormulaElementType::LN:
strBuffer += "Ln";
break;
case FormulaElementType::BaseNLog:
strBuffer += "thBaseLog";
break;
case FormulaElementType::Rem:
strBuffer += "%";
break;
case FormulaElementType::PostFixPlus:
strBuffer += VariableStore.at(CurrentVal->second.Index) + "++";
break;
case FormulaElementType::PostFixMinus:
strBuffer += VariableStore.at(CurrentVal->second.Index) + "++";
break;
case FormulaElementType::PrefixPlus:
strBuffer += "++" + VariableStore.at(CurrentVal->second.Index);
break;
case FormulaElementType::PrefixMinus:
strBuffer += "--" + VariableStore.at(CurrentVal->second.Index);
break;
case FormulaElementType::BitwiseAND:
strBuffer += "&";
break;
case FormulaElementType::XOR:
strBuffer += "XOR";
break;
case FormulaElementType::BitwiseOr:
strBuffer += "|";
break;
}
}
return strBuffer;
}
/// <summary>
/// Initializes a new instance of the <see cref="BuiltinFormula" /> class.
/// </summary>
/// <param name="ElemValue">The elem value to read in order to create formula data.</param>
BuiltinFormula(std::string& ElemValue)
{
//0 = ???
//1 = Operator
//3 = Variable Value
//4 = Number
short ScanType = 0;
std::string strBuffer = "";
//Extra buffer for saving potential postfix etc(send as variable if confirmed not as postfix op)
std::string secondaryBuffer = "";
size_t FormulaIndex = 0;
bool numberWasLast = false;//Variable,Numbers, and formulas before - sets it to treat it as minus instead of negative
//size_t CurrentFormElement = 0;
Data.push_back(FormData());//Initialize first (Formula) field
//auto targetForm = at(0);
for (std::string::iterator CurrentVal = ElemValue.begin(), LastVal = ElemValue.end(); CurrentVal != LastVal; ++CurrentVal)
{
if (*CurrentVal == '(')
{
numberWasLast = false;
//if(ScanType==10){strBuffer = at(FormulaIndex).back()+strBuffer;at(FormulaIndex).back()=strBuffer;}
if (!strBuffer.empty()) { InsertFromBuffer(strBuffer, FormulaIndex, ScanType, numberWasLast); strBuffer.clear(); }
FormulaIndex = AddFormulaToBuffer(FormulaIndex); ScanType = 0;
}
else if (*CurrentVal == ')')
{
InsertFromBuffer(strBuffer, FormulaIndex, ScanType, numberWasLast);
--FormulaIndex;
numberWasLast = true;
}
else if (ScanType == 0 || ScanType == 10)
{
if (ScanType == 10)//Prefix/postfix detection for ++/--
{
if (*CurrentVal == ' ' || *CurrentVal == '*' || *CurrentVal == '/' || *CurrentVal == '&' || *CurrentVal == '|' || *CurrentVal == '<' || *CurrentVal == '>' || *CurrentVal == '=' || *CurrentVal == '!' || *CurrentVal == '+' || *CurrentVal == '-')//Postfix
{
//Data.at(FormulaIndex).ChangeLastToPostfixOp(strBuffer, FormulaIndex);
ScanType = 0;
}
else//Likely prefix
{
strBuffer += *CurrentVal;
ScanType = 11;
continue;
}
}
//operators = ['==', '!=', '>=', '<=', '&&', '||', '&', '|', '-', '+', '>', '<', '/', '*', '!','++','--']
if (*CurrentVal == '+')
{
strBuffer = '+'; ScanType = 1;
}
else if (*CurrentVal == '-')
{
strBuffer = '-'; ScanType = 1;//Either Number or operator
}
//---Other operations here as well in case of auto-sending variable on whitespace
else if (*CurrentVal == '!')//Negative Operator only valid for in front of NonOperators
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Not); numberWasLast = false;
}
else if (*CurrentVal == '^')
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Pow); numberWasLast = false;
}
else if (*CurrentVal == '&')
{
strBuffer = '&'; ScanType = 1;
}
else if (*CurrentVal == '|')
{
strBuffer = '|'; ScanType = 1;
}
else if (*CurrentVal == '>')
{
strBuffer = '>'; ScanType = 1;
}
else if (*CurrentVal == '<')
{
strBuffer = '<'; ScanType = 1;
}
else if (*CurrentVal == '/')
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Div); numberWasLast = false;
}
else if (*CurrentVal == '*')
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Mult); numberWasLast = false;
}
else if (*CurrentVal == '^')//Power of function
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Pow); numberWasLast = false;
}
else if (*CurrentVal == '$')//Shorthand for XOR for now
{
Data.at(FormulaIndex).AddOp(FormulaElementType::XOR); numberWasLast = false;
}
//---End of extra mid-formula operations
else
{
if (VariableConversionFunctions::IsDigit(*CurrentVal))
{
ScanType = 4;
strBuffer = *CurrentVal;
}
else if (*CurrentVal != ' ' && *CurrentVal != '\t')//If not whitespace, register as potential variable
{
ScanType = 3;
strBuffer = *CurrentVal;
}
}
}
else if (ScanType == 1)
{
if (strBuffer == "-")
{
if (VariableConversionFunctions::IsDigit(*CurrentVal))
{
ScanType = 4;
if (numberWasLast)
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Sub);
numberWasLast = false;
strBuffer = *CurrentVal;
}
else
strBuffer += *CurrentVal;
}
else if (*CurrentVal == '-')//-- Operator
{
strBuffer = "--"; ScanType = 11;
}
else//- Operator
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Negative);
if (*CurrentVal == '!')//Inverse operator detected after
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Not); strBuffer.clear(); ScanType = 0;
}
else
{
strBuffer = *CurrentVal;
}
}
}
else if (strBuffer == "!")
{
numberWasLast = false;
if (*CurrentVal == '=')//!= Operator
{
Data.at(FormulaIndex).AddOp(FormulaElementType::NotEqual); strBuffer.clear(); ScanType = 0;
}
else//- Operator
{
//Invalid operator between variables?
throw "Invalid operator between variables!";
}
}
else if (strBuffer == "+")
{
numberWasLast = false;
//To-Do Create detection of Prefix/postfix detection
if (*CurrentVal == '+')//++ Operator
{
strBuffer = "++"; ScanType = 11;//Data.at(FormulaIndex).push_back("++"); strBuffer.clear(); ScanType = 0;
}
else//+ Operator
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Add);
if (*CurrentVal == '!')//Inverse operator detected after
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Not); strBuffer.clear(); ScanType = 0;
}
else
{
strBuffer = *CurrentVal;
if (*CurrentVal == '-' || VariableConversionFunctions::IsDigit(*CurrentVal))
{
ScanType = 4;
}
else
{
ScanType = 3;
}
}
}
}
else if (strBuffer == "&")
{
numberWasLast = false;
if (*CurrentVal == '&')
{
Data.at(FormulaIndex).AddOp(FormulaElementType::AND); strBuffer.clear(); ScanType = 0;
}
else
{
Data.at(FormulaIndex).AddOp(FormulaElementType::BitwiseAND);
if (*CurrentVal == '!')
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Not); strBuffer.clear(); ScanType = 0;
}
else
{
strBuffer = *CurrentVal;
if (*CurrentVal == '-' || VariableConversionFunctions::IsDigit(*CurrentVal))
{
ScanType = 4;
}
else
{
ScanType = 3;
}
}
}
}
else if (strBuffer == "|")
{
numberWasLast = false;
if (*CurrentVal == '|')
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Not); strBuffer.clear(); ScanType = 0;
}
else
{
Data.at(FormulaIndex).AddOp(FormulaElementType::BitwiseOr);
if (*CurrentVal == '!')
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Not); strBuffer.clear(); ScanType = 0;
}
else
{
strBuffer = *CurrentVal;
if (*CurrentVal == '-' || VariableConversionFunctions::IsDigit(*CurrentVal))
{
ScanType = 4;
}
else
{
ScanType = 3;
}
}
}
}
else//Catch-all for most operators
{
//strBuffer += *CurrentVal;
if (*CurrentVal == '=')
{
//Data.at(FormulaIndex).AddOp(strBuffer + *CurrentVal); strBuffer.clear(); ScanType = 0;
}
else
{
//Data.at(FormulaIndex).AddOp(strBuffer);
if (*CurrentVal == '!')
{
Data.at(FormulaIndex).AddOp(FormulaElementType::Not); strBuffer.clear(); ScanType = 0;
}
else
{
strBuffer = *CurrentVal;
if (*CurrentVal == '-' || VariableConversionFunctions::IsDigit(*CurrentVal))
{
ScanType = 4;
}
else
{
ScanType = 3;
}
}
}
}
}
else if (ScanType == 11)//Prefix Variable (Partial support for prefix increment variables)
{
if (*CurrentVal != ' ' && *CurrentVal != '\t')//Whitespace
{
Data.at(FormulaIndex).AddPrefixOp(strBuffer);
}
else
{
strBuffer += *CurrentVal;
}
}
else
{//Scanning either number or variable at this point
if (*CurrentVal == '+')//++ or +
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
strBuffer = '+'; ScanType = 1;
}
else if (*CurrentVal == '-')//--, -, or (unlikely) negative number
{
InsertFromBuffer(strBuffer, FormulaIndex, ScanType, numberWasLast);
strBuffer = '-'; ScanType = 1;
}
else if (*CurrentVal == '!')//!=
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
strBuffer = '!'; ScanType = 1;
}
else if (*CurrentVal == '&')
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
strBuffer = '&'; ScanType = 1;
}
else if (*CurrentVal == '|')
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
strBuffer = '|'; ScanType = 1;
}
else if (*CurrentVal == '=')
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
strBuffer = '='; ScanType = 1;
}
else if (*CurrentVal == '>')
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
strBuffer = '>'; ScanType = 1;
}
else if (*CurrentVal == '<')
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
strBuffer = '<'; ScanType = 1;
}
else if (*CurrentVal == '/')
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
Data.at(FormulaIndex).AddOp(FormulaElementType::Div);
}
else if (*CurrentVal == '*')
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
Data.at(FormulaIndex).AddOp(FormulaElementType::Mult);
}
else if (*CurrentVal == '^')//Power of function
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
Data.at(FormulaIndex).AddOp(FormulaElementType::Pow);
}
else if (*CurrentVal == '$')//Shorthand for XOR for now
{
InsertFromBufferV2(strBuffer, FormulaIndex, ScanType, numberWasLast);
Data.at(FormulaIndex).AddOp(FormulaElementType::XOR);
}
else if (*CurrentVal == ' ' || *CurrentVal == '\t')//Immediately send variable if encounter whitespace
{
InsertFromBuffer(strBuffer, FormulaIndex, ScanType, numberWasLast);
}
else
{
strBuffer += *CurrentVal;
}
}
}
//Finish unfinished potential scans
if (!strBuffer.empty())
InsertFromBuffer(strBuffer, FormulaIndex, ScanType, numberWasLast);
TrimFormula();
}
/// <summary>
/// Initializes a new instance of the <see cref="BuiltinFormula" /> class from StringCopy instead of reference.
/// </summary>
/// <param name="ElemValue">The elem value to read in order to create formula data.</param>
BuiltinFormula(std::string ElemValue, bool BlankVar) : BuiltinFormula(ElemValue) {}
/// <summary>
/// Initializes a new instance of the <see cref="BuiltinFormula" /> class.(fix for initializing without copying from a string value set)
/// </summary>
/// <param name="ElemValue">The elem value to read in order to create formula data.</param>
BuiltinFormula(const char* strVal) : BuiltinFormula(std::string(strVal), true) {}
};
class TestFormVarStore
{
public:
MediumDec x = MediumDec::Two;
};
class DLL_API MediumDecFormulaV2 : public BuiltinFormula<MediumDec, TestFormVarStore>
{
};
}
|
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
namespace v8 {
namespace internal {
#define DEFINE_COMPILE(type) \
void L##type::CompileToNative(LCodeGen* generator) { \
generator->Do##type(this); \
}
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
register_spills_[i] = NULL;
}
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
double_register_spills_[i] = NULL;
}
}
void LOsrEntry::MarkSpilledRegister(int allocation_index,
LOperand* spill_operand) {
ASSERT(spill_operand->IsStackSlot());
ASSERT(register_spills_[allocation_index] == NULL);
register_spills_[allocation_index] = spill_operand;
}
#ifdef DEBUG
void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as temporaries and
// outputs because all registers are blocked by the calling convention.
// Inputs operands must use a fixed register or use-at-start policy or
// a non-register policy.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||
operand->IsUsedAtStart());
}
for (TempIterator it(this); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
#endif
void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
LOperand* spill_operand) {
ASSERT(spill_operand->IsDoubleStackSlot());
ASSERT(double_register_spills_[allocation_index] == NULL);
double_register_spills_[allocation_index] = spill_operand;
}
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
PrintOutputOperandTo(stream);
PrintDataTo(stream);
if (HasEnvironment()) {
stream->Add(" ");
environment()->PrintTo(stream);
}
if (HasPointerMap()) {
stream->Add(" ");
pointer_map()->PrintTo(stream);
}
}
void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
InputAt(i)->PrintTo(stream);
}
}
void LInstruction::PrintOutputOperandTo(StringStream* stream) {
if (HasResult()) result()->PrintTo(stream);
}
void LLabel::PrintDataTo(StringStream* stream) {
LGap::PrintDataTo(stream);
LLabel* rep = replacement();
if (rep != NULL) {
stream->Add(" Dead block replaced with B%d", rep->block_id());
}
}
bool LGap::IsRedundant() const {
for (int i = 0; i < 4; i++) {
if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
return false;
}
}
return true;
}
void LGap::PrintDataTo(StringStream* stream) {
for (int i = 0; i < 4; i++) {
stream->Add("(");
if (parallel_moves_[i] != NULL) {
parallel_moves_[i]->PrintDataTo(stream);
}
stream->Add(") ");
}
}
const char* LArithmeticD::Mnemonic() const {
switch (op()) {
case Token::ADD: return "add-d";
case Token::SUB: return "sub-d";
case Token::MUL: return "mul-d";
case Token::DIV: return "div-d";
case Token::MOD: return "mod-d";
default:
UNREACHABLE();
return NULL;
}
}
const char* LArithmeticT::Mnemonic() const {
switch (op()) {
case Token::ADD: return "add-t";
case Token::SUB: return "sub-t";
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
case Token::ROR: return "ror-t";
case Token::SHL: return "shl-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
default:
UNREACHABLE();
return NULL;
}
}
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
value()->PrintTo(stream);
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
right()->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
value()->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_undetectable(");
value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
left()->PrintTo(stream);
right()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
value()->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
false_block_id());
}
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
}
void LCallConstantFunction::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
value()->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
value()->PrintTo(stream);
}
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[r2] #%d / ", arity());
}
void LCallNamed::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
void LCallGlobal::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
length()->PrintTo(stream);
stream->Add(" index ");
index()->PrintTo(stream);
}
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" <- ");
value()->PrintTo(stream);
}
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" <- ");
value()->PrintTo(stream);
}
void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
}
void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
}
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
}
int LPlatformChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index, zone());
} else {
return LStackSlot::Create(index, zone());
}
}
LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LPlatformChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
if (i < blocks->length() - 1) next = blocks->at(i + 1);
DoBasicBlock(blocks->at(i), next);
if (is_aborted()) return NULL;
}
status_ = DONE;
return chunk_;
}
void LChunkBuilder::Abort(const char* reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
DoubleRegister::ToAllocationIndex(reg));
}
LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
return Use(value, ToUnallocated(fixed_register));
}
LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
return Use(value, ToUnallocated(reg));
}
LOperand* LChunkBuilder::UseRegister(HValue* value) {
return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
return Use(value,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
LUnallocated::USED_AT_START));
}
LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
}
LOperand* LChunkBuilder::Use(HValue* value) {
return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
}
LOperand* LChunkBuilder::UseAtStart(HValue* value) {
return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
LUnallocated::USED_AT_START));
}
LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value);
}
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseAtStart(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegister(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegisterAtStart(value);
}
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
}
LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
if (value->EmitAtUses()) {
HInstruction* instr = HInstruction::cast(value);
VisitInstruction(instr);
}
operand->set_virtual_register(value->id());
return operand;
}
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
return instr;
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
LTemplateInstruction<1, I, T>* instr, int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
LTemplateInstruction<1, I, T>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineFixed(
LTemplateInstruction<1, I, T>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
return instr;
}
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
#ifdef DEBUG
instr->VerifyCall();
#endif
instr->MarkAsCall();
instr = AssignPointerMap(instr);
if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
ASSERT(pending_deoptimization_ast_id_.IsNone());
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
// If instruction does not have side-effects lazy deoptimization
// after the call will try to deoptimize to the point before the call.
// Thus we still need to attach environment to this call even if
// call sequence can not deoptimize eagerly.
bool needs_environment =
(can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
!hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
return instr;
}
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr;
}
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand =
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
operand->set_virtual_register(allocator_->GetVirtualRegister());
if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
return operand;
}
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
ASSERT(operand->HasFixedPolicy());
return operand;
}
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
LUnallocated* operand = ToUnallocated(reg);
ASSERT(operand->HasFixedPolicy());
return operand;
}
LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
return new(zone()) LLabel(instr->block());
}
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
if (right_value->IsConstant()) {
HConstant* constant = HConstant::cast(right_value);
right = chunk_->DefineConstantOperand(constant);
constant_value = constant->Integer32Value() & 0x1f;
} else {
right = UseRegisterAtStart(right_value);
}
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
bool does_deopt = false;
if (op == Token::SHR && constant_value == 0) {
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true;
break;
}
}
}
}
LInstruction* result =
DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
return does_deopt ? AssignEnvironment(result) : result;
}
LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr) {
ASSERT(op == Token::ADD ||
op == Token::DIV ||
op == Token::MOD ||
op == Token::MUL ||
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
LArithmeticT* result =
new(zone()) LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr);
}
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building());
current_block_ = block;
next_block_ = next_block;
if (block->IsStartBlock()) {
block->UpdateEnvironment(graph_->start_environment());
argument_count_ = 0;
} else if (block->predecessors()->length() == 1) {
// We have a single predecessor => copy environment and outgoing
// argument count from the predecessor.
ASSERT(block->phis()->length() == 0);
HBasicBlock* pred = block->predecessors()->at(0);
HEnvironment* last_environment = pred->last_environment();
ASSERT(last_environment != NULL);
// Only copy the environment, if it is later used again.
if (pred->end()->SecondSuccessor() == NULL) {
ASSERT(pred->end()->FirstSuccessor() == block);
} else {
if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
last_environment = last_environment->Copy();
}
}
block->UpdateEnvironment(last_environment);
ASSERT(pred->argument_count() >= 0);
argument_count_ = pred->argument_count();
} else {
// We are at a state join => process phis.
HBasicBlock* pred = block->predecessors()->at(0);
// No need to copy the environment, it cannot be used later.
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
last_environment->SetValueAt(phi->merged_index(), phi);
}
for (int i = 0; i < block->deleted_phis()->length(); ++i) {
last_environment->SetValueAt(block->deleted_phis()->at(i),
graph_->GetConstantUndefined());
}
block->UpdateEnvironment(last_environment);
// Pick up the outgoing argument count of one of the predecessors.
argument_count_ = pred->argument_count();
}
HInstruction* current = block->first();
int start = chunk_->instructions()->length();
while (current != NULL && !is_aborted()) {
// Code for constants in registers is generated lazily.
if (!current->EmitAtUses()) {
VisitInstruction(current);
}
current = current->next();
}
int end = chunk_->instructions()->length() - 1;
if (end >= start) {
block->set_first_instruction_index(start);
block->set_last_instruction_index(end);
}
block->set_argument_count(argument_count_);
next_block_ = NULL;
current_block_ = NULL;
}
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
if (instr != NULL) {
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result = new(zone()) LEnvironment(
hydrogen_env->closure(),
hydrogen_env->frame_type(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer,
hydrogen_env->entry(),
zone());
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
return result;
}
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
if (value->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new(zone()) LGoto(successor->block_id());
}
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment.
Representation rep = value->representation();
HType type = value->type();
if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
return AssignEnvironment(result);
}
return result;
}
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
return new(zone()) LCmpMapAndBranch(value, temp);
}
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new(zone()) LArgumentsLength(value));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
return DefineAsRegister(new(zone()) LArgumentsElements);
}
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstanceOf* result =
new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
FixedTemp(r4));
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
return AssignEnvironment(DefineSameAsFirst(result));
}
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), r1);
LOperand* receiver = UseFixed(instr->receiver(), r0);
LOperand* length = UseFixed(instr->length(), r2);
LOperand* elements = UseFixed(instr->elements(), r3);
LApplyArguments* result = new(zone()) LApplyArguments(function,
receiver,
length,
elements);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
LOperand* argument = Use(instr->argument());
return new(zone()) LPushArgument(argument);
}
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
return instr->HasNoUses()
? NULL
: DefineAsRegister(new(zone()) LThisFunction);
}
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
}
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LOuterContext(context));
}
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
return MarkAsCall(new(zone()) LDeclareGlobals, instr);
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LOperand* global_object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
}
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
LInvokeFunction* result = new(zone()) LInvokeFunction(function);
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
} else if (op == kMathPowHalf) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
return DefineFixedDouble(result, d2);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL;
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathFloor:
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt:
return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
default:
UNREACHABLE();
return NULL;
}
}
}
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
LOperand* key = UseFixed(instr->key(), r2);
return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), r1);
argument_count_ -= instr->argument_count();
LCallNew* result = new(zone()) LCallNew(constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
}
LInstruction* LChunkBuilder::DoRor(HRor* instr) {
return DoShift(Token::ROR, instr);
}
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
LInstruction* LChunkBuilder::DoSar(HSar* instr) {
return DoShift(Token::SAR, instr);
}
LInstruction* LChunkBuilder::DoShl(HShl* instr) {
return DoShift(Token::SHL, instr);
}
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
}
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
if (instr->HasNoUses()) return NULL;
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LBitNotI(value));
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
// TODO(1042) The fixed register allocation
// is needed because we call TypeRecordingBinaryOpStub from
// the generated code, which requires registers r0
// and r1 to be used. We should remove that
// when we provide a native implementation.
LOperand* dividend = UseFixed(instr->left(), r0);
LOperand* divisor = UseFixed(instr->right(), r1);
return AssignEnvironment(AssignPointerMap(
DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
}
bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
uint32_t divisor_abs = abs(divisor);
// Dividing by 0, 1, and powers of 2 is easy.
// Note that IsPowerOf2(0) returns true;
ASSERT(IsPowerOf2(0) == true);
if (IsPowerOf2(divisor_abs)) return true;
// We have magic numbers for a few specific divisors.
// Details and proofs can be found in:
// - Hacker's Delight, Henry S. Warren, Jr.
// - The PowerPC Compiler Writer’s Guide
// and probably many others.
//
// We handle
// <divisor with magic numbers> * <power of 2>
// but not
// <divisor with magic numbers> * <other divisor with magic numbers>
int32_t power_of_2_factor =
CompilerIntrinsics::CountTrailingZeros(divisor_abs);
DivMagicNumbers magic_numbers =
DivMagicNumberFor(divisor_abs >> power_of_2_factor);
if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
return false;
}
HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
// A value with an integer representation does not need to be transformed.
if (dividend->representation().IsInteger32()) {
return dividend;
// A change from an integer32 can be replaced by the integer32 value.
} else if (dividend->IsChange() &&
HChange::cast(dividend)->from().IsInteger32()) {
return HChange::cast(dividend)->value();
}
return NULL;
}
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
// Only optimize when we have magic numbers for the divisor.
// The standard integer division routine is usually slower than transitionning
// to VFP.
if (divisor->IsConstant() &&
HConstant::cast(divisor)->HasInteger32Value()) {
HConstant* constant_val = HConstant::cast(divisor);
int32_t int32_val = constant_val->Integer32Value();
if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
}
return NULL;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegisterOrConstant(right);
LOperand* remainder = TempRegister();
ASSERT(right->IsConstant() &&
HConstant::cast(right)->HasInteger32Value() &&
HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
return AssignEnvironment(DefineAsRegister(
new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LModI* mod;
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
} else {
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
mod = new(zone()) LModI(dividend,
divisor,
TempRegister(),
FixedTemp(d10),
FixedTemp(d11));
}
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
instr->CheckFlag(HValue::kCanBeDivByZero)) {
return AssignEnvironment(DefineAsRegister(mod));
} else {
return DefineAsRegister(mod);
}
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
// We call a C function for double modulo. It can't trigger a GC.
// We need to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* right = UseFixedDouble(instr->right(), d2);
LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, d1), instr);
}
}
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left;
LOperand* right = UseOrConstant(instr->MostConstantOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
(instr->CheckFlag(HValue::kCanOverflow) ||
!right->IsConstantOperand())) {
left = UseRegister(instr->LeastConstantOperand());
temp = TempRegister();
} else {
left = UseRegisterAtStart(instr->LeastConstantOperand());
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
return DoArithmeticT(Token::MUL, instr);
}
}
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
LInstruction* result = DefineAsRegister(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
return DoArithmeticT(Token::SUB, instr);
}
}
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
left = UseRegisterAtStart(instr->LeastConstantOperand());
right = UseOrConstantAtStart(instr->MostConstantOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
return DefineAsRegister(new(zone()) LMathMinMax(left, right));
}
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), d2) :
UseFixed(instr->right(), r2);
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d3),
instr,
CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
LOperand* global_object = UseFixed(instr->global_object(), r0);
LRandom* result = new(zone()) LRandom(global_object);
return MarkAsCall(DefineFixedDouble(result, d7), instr);
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LCmpT* result = new(zone()) LCmpT(left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpIDAndBranch(left, right);
}
}
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
}
LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
HCompareConstantEqAndBranch* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LCmpConstantEqAndBranch(value);
}
LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
return new(zone()) LIsObjectAndBranch(value, temp);
}
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
return new(zone()) LIsStringAndBranch(value, temp);
}
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LIsSmiAndBranch(Use(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
}
LInstruction* LChunkBuilder::DoStringCompareAndBranch(
HStringCompareAndBranch* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LStringCompareAndBranch* result =
new(zone()) LStringCompareAndBranch(left, right);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LHasInstanceTypeAndBranch(value);
}
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return new(zone()) LClassOfTestAndBranch(value, TempRegister());
}
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
HFixedArrayBaseLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
}
LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LOperand* map = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMapEnumLength(map));
}
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
}
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new(zone()) LValueOf(object, TempRegister());
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LDateField* result =
new(zone()) LDateField(object, FixedTemp(r1), instr->index());
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
}
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
return NULL;
}
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), r0);
return MarkAsCall(new(zone()) LThrow(value), instr);
}
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
// All HForceRepresentation instructions should be eliminated in the
// representation change phase of Hydrogen.
UNREACHABLE();
return NULL;
}
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* res = NULL;
if (instr->value()->type().IsSmi()) {
res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
LOperand* temp3 = FixedTemp(d11);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
temp2,
temp3));
res = AssignEnvironment(res);
}
return res;
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
// Make sure that the temp and result_temp registers are
// different.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
if (val->CheckFlag(HInstruction::kUint32)) {
LNumberTagU* result = new(zone()) LNumberTagU(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
} else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
return DefineAsRegister(
new(zone()) LUint32ToDouble(UseRegister(instr->value())));
} else {
return DefineAsRegister(
new(zone()) LInteger32ToDouble(Use(instr->value())));
}
}
}
UNREACHABLE();
return NULL;
}
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LUnallocated* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(Define(result, temp1));
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
}
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckMaps(value);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
HValue* value = instr->value();
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(d11)));
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
ASSERT(input_rep.IsTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve d1 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
return AssignEnvironment(DefineAsRegister(result));
}
}
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new(zone()) LReturn(UseFixed(instr->value(), r0));
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
return DefineAsRegister(new(zone()) LConstantD);
} else if (r.IsTagged()) {
return DefineAsRegister(new(zone()) LConstantT);
} else {
UNREACHABLE();
return NULL;
}
}
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* global_object = UseFixed(instr->global_object(), r0);
LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LOperand* value = UseRegister(instr->value());
// Use a temp to check the value in the cell in the case where we perform
// a hole check.
return instr->RequiresHoleCheck()
? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
: new(zone()) LStoreGlobalCell(value, NULL);
}
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LOperand* global_object = UseFixed(instr->global_object(), r1);
LOperand* value = UseFixed(instr->value(), r0);
LStoreGlobalGeneric* result =
new(zone()) LStoreGlobalGeneric(global_object, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LOperand* context;
LOperand* value;
if (instr->NeedsWriteBarrier()) {
context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
} else {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value);
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
return DefineAsRegister(
new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
}
LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
HLoadNamedFieldPolymorphic* instr) {
ASSERT(instr->representation().IsTagged());
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), r0);
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(obj);
return MarkAsCall(DefineFixed(result, r0), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), r0);
LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
}
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
if (!instr->is_external()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseTempRegister(instr->elements());
} else {
ASSERT(instr->representation().IsTagged());
obj = UseRegisterAtStart(instr->elements());
}
result = new(zone()) LLoadKeyed(obj, key);
} else {
ASSERT(
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
bool can_deoptimize = instr->RequiresHoleCheck() ||
(elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
return can_deoptimize ? AssignEnvironment(result) : result;
}
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), r1);
LOperand* key = UseFixed(instr->key(), r0);
LInstruction* result =
DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register || needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
LStoreKeyed* result = NULL;
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
LOperand* object = NULL;
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
} else {
ASSERT(instr->value()->representation().IsTagged());
object = UseTempRegister(instr->elements());
}
result = new(zone()) LStoreKeyed(object, key, val);
} else {
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LStoreKeyed(external_pointer, key, val);
}
ASSERT(result != NULL);
return result;
}
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), r2);
LOperand* key = UseFixed(instr->key(), r1);
LOperand* val = UseFixed(instr->value(), r0);
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
}
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
ElementsKind from_kind = instr->original_map()->elements_kind();
ElementsKind to_kind = instr->transitioned_map()->elements_kind();
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result);
} else {
LOperand* object = UseFixed(instr->object(), r0);
LOperand* fixed_object_reg = FixedTemp(r2);
LOperand* new_map_reg = FixedTemp(r3);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object,
new_map_reg,
fixed_object_reg);
return MarkAsCall(DefineFixed(result, r0), instr);
}
}
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
obj = instr->is_in_object()
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
obj = needs_write_barrier_for_map
? UseRegister(instr->object())
: UseRegisterAtStart(instr->object());
}
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
return new(zone()) LStoreNamedField(obj, val, temp);
}
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), r1);
LOperand* val = UseFixed(instr->value(), r0);
LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LStringLength(string));
}
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LAllocateObject* result =
new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
}
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
}
LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, r0), instr);
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
}
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LOperand* object = UseFixed(instr->object(), r0);
LOperand* key = UseFixed(instr->key(), r1);
LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new(zone()) LOsrEntry);
}
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(new(zone()) LParameter, spill_index);
}
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
if (spill_index > LUnallocated::kMaxFixedIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
}
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
// stack arguments, and any real arguments object use causes a bailout.
// So this value is never used.
return NULL;
}
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index());
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LToFastProperties* result = new(zone()) LToFastProperties(object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
HIsConstructCallAndBranch* instr) {
return new(zone()) LIsConstructCallAndBranch(TempRegister());
}
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
HEnvironment* env = current_block_->last_environment();
ASSERT(env != NULL);
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
for (int i = 0; i < instr->values()->length(); ++i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
} else {
env->Push(value);
}
}
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
LInstruction* result = new(zone()) LLazyBailout;
result = AssignEnvironment(result);
// Store the lazy deopt environment with the instruction if needed. Right
// now it is only used for LInstanceOfKnownGlobal.
instruction_pending_deoptimization_environment_->
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = BailoutId::None();
return result;
}
return NULL;
}
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
return MarkAsCall(new(zone()) LStackCheck, instr);
} else {
ASSERT(instr->is_backwards_branch());
return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
}
}
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind(),
instr->inlining_kind());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
}
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* pop = NULL;
HEnvironment* env = current_block_->last_environment();
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
argument_count_ -= argument_count;
}
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return pop;
}
LInstruction* LChunkBuilder::DoIn(HIn* instr) {
LOperand* key = UseRegisterAtStart(instr->key());
LOperand* object = UseRegisterAtStart(instr->object());
LIn* result = new(zone()) LIn(key, object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* object = UseFixed(instr->enumerable(), r0);
LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
}
LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* map = UseRegisterAtStart(instr->map());
return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
}
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* index = UseRegister(instr->index());
return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
}
} } // namespace v8::internal
|
#include "component.h"
//-META-------------------------------------------------------------------------------------------//
COFFEE_BeginType(alien_controller::Component);
COFFEE_Ancestor(plugins::Component);
COFFEE_Attribute(real, _MoveSpeed, meta::MODE_Serializeable | meta::MODE_Editable | meta::MODE_Undoable);
COFFEE_Attribute(real, _RollSpeed, meta::MODE_Serializeable | meta::MODE_Editable | meta::MODE_Undoable);
COFFEE_EndType();
namespace alien_controller
{
//-CONSTRUCTORS-------------------------------------------------------------------------------//
Component::Component() :
_MoveSpeed(1.0f),
_RollSpeed(1.0f),
_ReloadTime(0.25f)
{
}
//--------------------------------------------------------------------------------------------//
Component::~Component()
{
}
//-OPERATIONS---------------------------------------------------------------------------------//
void Component::OnStart()
{
_Force = 0.0f;
_Torque = 0.0f;
for (uint32 index=0 ; index<4 ; ++index)
_KeyArray[index] = false;
_ShootTime = 0.0f;
_LifeTime = 0.0f;
}
//--------------------------------------------------------------------------------------------//
void Component::OnStop()
{
_Force = 0.0f;
_Torque = 0.0f;
if (GetNode().HasRigidBody() && GetNode().GetRigidBody().IsRunning())
GetNode().GetRigidBody().SetVelocity(basic::Vector3::Zero);
}
//--------------------------------------------------------------------------------------------//
void Component::OnUpdate(const basic::Time& time_step)
{
if (GetNode().GetParent().HasParent())
{
_LifeTime += time_step;
if (_LifeTime.GetSecondCount()>5.0f)
GetNode().GetParent().GetParent().Destroy();
}
// Is Player near ?
bool it_is_near = false;
bool it_is_targeting = false;
if (GetNode().GetParent().HasParent())
{
scene::Node* player = GetNode().GetRoot().FindChild("CubeShip");
if (player!=NULL)
{
scene::Node* root = player->FindChild("Root");
if (root!=NULL)
{
scene::Node* ship = root->FindChild("Ship");
if (ship!=NULL)
{
real distance = GetNode().GetTransform().GetMatrix(scene::TRS_Local, scene::TRS_World).GetTranslation().GetDistance(
ship->GetTransform().GetTranslation());
it_is_near = distance < 120.0f;
if (it_is_near)
{
_LifeTime = 0.0f;
real dot = GetNode().GetTransform().GetMatrix(scene::TRS_Local, scene::TRS_World).GetZAxis().GetDotProduct(
ship->GetTransform().GetTranslation()-GetNode().GetTransform().GetMatrix(scene::TRS_Local, scene::TRS_World).GetTranslation());
it_is_near = dot > 0.5f;
it_is_targeting = dot > 0.8f;
}
}
}
}
}
_ShootTime += time_step;
if (it_is_near && _ShootTime.GetSecondCount()>=_ReloadTime)
{
_ShootTime = 0.0f;
scene::Instance* projectile = COFFEE_New(scene::Instance, "/projectiles/bullet.scene");
projectile->AddComponent(COFFEE_New(scene::Transform));
projectile->AddComponent(COFFEE_New(scene::Bounding));
// Attach to the root of the ship instance
GetNode().GetParent().Attach(*projectile);
audio::Device::Get().PlaySound("/import/sounds/gun.sound", true, 2.0f, 100.0f,
GetNode().GetTransform().GetMatrix(scene::TRS_Local, scene::TRS_World).GetTranslation());
}
_UpdateTime += time_step;
if (GetNode().GetParent().HasParent() && _UpdateTime.GetSecondCount()>1.0f)
{
for (uint32 index=0 ; index<4 ; ++index)
_KeyArray[index] = basic::GetRandom(24)>10;
if (it_is_targeting)
{
_KeyArray[input::KEY_Left-input::KEY_Left] = false;
_KeyArray[input::KEY_Right-input::KEY_Left] = false;
}
_UpdateTime = 0.0f;
}
}
//--------------------------------------------------------------------------------------------//
void Component::OnFixedUpdate(const basic::Time& time_step)
{
if (GetNode().HasRigidBody() && GetNode().GetRigidBody().IsRunning())
{
_Force = 0.0f;
real distance = _MoveSpeed*time_step.GetSecondCount()*8.0f;
if (_KeyArray[input::KEY_Up-input::KEY_Left]) _Force += _ComputeMove(distance);
if (_KeyArray[input::KEY_Down-input::KEY_Left]) _Force += _ComputeMove(-distance);
basic::Vector3 force = ((_Force*1000.0f-GetNode().GetRigidBody().GetVelocity()*0.6f/time_step.GetSecondCount()));
GetNode().GetRigidBody().AddForce(force);
_Force = 0.0f;
_Torque = 0.0f;
real angle = _RollSpeed*time_step.GetSecondCount();
if (_KeyArray[input::KEY_Right-input::KEY_Left]) _Torque += basic::Vector3(0.0f, angle, 0.0f);
if (_KeyArray[input::KEY_Left-input::KEY_Left]) _Torque += basic::Vector3(0.0f, -angle, 0.0f);
basic::Vector3 torque = ((_Torque*1000.0f-GetNode().GetRigidBody().GetOmega()*0.6f/time_step.GetSecondCount()));
GetNode().GetRigidBody().AddTorque(torque);
_Torque = 0.0f;
}
GetNode().GetTransform().SetTranslation(basic::Vector3(
GetNode().GetTransform().GetTranslation().X,
0.0f,
GetNode().GetTransform().GetTranslation().Z));
basic::Euler rotation = GetNode().GetTransform().GetRotation();
rotation.X = 0.0f;
rotation.Z = 0.0f;
GetNode().GetTransform().SetRotation(rotation);
scene::Node* shield = GetNode().FindChild("Shield");
if (shield!=NULL)
{
plugins::ComponentWrapper* component = shield->FindComponent("shield::Component");
if (component!=NULL)
{
uint32* life = component->Grab<uint32>("Life");
if (life!=NULL && *life<=0)
GetNode().GetParent().GetParent().Destroy();
}
}
}
//--------------------------------------------------------------------------------------------//
void Component::OnRender(const graphics::Viewport& viewport)
{
}
//--------------------------------------------------------------------------------------------//
void Component::OnCollisionBegin(const physics::Collision& collision)
{
}
//--------------------------------------------------------------------------------------------//
void Component::OnCollision(const physics::Collision& collision)
{
}
//--------------------------------------------------------------------------------------------//
void Component::OnCollisionEnd(const physics::Collision& collision)
{
}
//--------------------------------------------------------------------------------------------//
bool Component::OnInputEvent(const input::EVENT& event, const void* parameters)
{
return false;
}
//-OPERATIONS-----------------------------------------------------------------------------//
basic::Vector3 Component::_ComputeMove(real distance)
{
basic::Vector3 movement;
movement = GetNode().GetTransform().GetMatrix(scene::TRS_Local, scene::TRS_World).GetRotation().GetZAxis();
movement *= distance;
return movement;
}
}
|
// Copyright (c) 2009-2019 The Bitcoin Core developers
// Copyright (c) 2014-2019 The BitsCoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <rpc/server.h>
#include <chainparams.h>
#include <clientversion.h>
#include <core_io.h>
#include <validation.h>
#include <net.h>
#include <net_processing.h>
#include <netbase.h>
#include <policy/policy.h>
#include <rpc/protocol.h>
#include <sync.h>
#include <timedata.h>
#include <ui_interface.h>
#include <util.h>
#include <utilstrencodings.h>
#include <version.h>
#include <warnings.h>
#include <univalue.h>
static UniValue getconnectioncount(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error(
"getconnectioncount\n"
"\nReturns the number of connections to other nodes.\n"
"\nResult:\n"
"n (numeric) The connection count\n"
"\nExamples:\n"
+ HelpExampleCli("getconnectioncount", "")
+ HelpExampleRpc("getconnectioncount", "")
);
if(!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
return (int)g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL);
}
static UniValue ping(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error(
"ping\n"
"\nRequests that a ping be sent to all other nodes, to measure ping time.\n"
"Results provided in getpeerinfo, pingtime and pingwait fields are decimal seconds.\n"
"Ping command is handled in queue with all other commands, so it measures processing backlog, not just network ping.\n"
"\nExamples:\n"
+ HelpExampleCli("ping", "")
+ HelpExampleRpc("ping", "")
);
if(!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
// Request that each node send a ping during next message processing pass
g_connman->ForEachNode([](CNode* pnode) {
pnode->fPingQueued = true;
});
return NullUniValue;
}
static UniValue getpeerinfo(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error(
"getpeerinfo\n"
"\nReturns data about each connected network node as a json array of objects.\n"
"\nResult:\n"
"[\n"
" {\n"
" \"id\": n, (numeric) Peer index\n"
" \"addr\":\"host:port\", (string) The IP address and port of the peer\n"
" \"addrbind\":\"ip:port\", (string) Bind address of the connection to the peer\n"
" \"addrlocal\":\"ip:port\", (string) Local address as reported by the peer\n"
" \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n"
" \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n"
" \"lastsend\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last send\n"
" \"lastrecv\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last receive\n"
" \"bytessent\": n, (numeric) The total bytes sent\n"
" \"bytesrecv\": n, (numeric) The total bytes received\n"
" \"conntime\": ttt, (numeric) The connection time in seconds since epoch (Jan 1 1970 GMT)\n"
" \"timeoffset\": ttt, (numeric) The time offset in seconds\n"
" \"pingtime\": n, (numeric) ping time (if available)\n"
" \"minping\": n, (numeric) minimum observed ping time (if any at all)\n"
" \"pingwait\": n, (numeric) ping wait (if non-zero)\n"
" \"version\": v, (numeric) The peer version, such as 70001\n"
" \"subver\": \"/Satoshi:0.8.5/\", (string) The string version\n"
" \"inbound\": true|false, (boolean) Inbound (true) or Outbound (false)\n"
" \"addnode\": true|false, (boolean) Whether connection was due to addnode/-connect or if it was an automatic/inbound connection\n"
" \"startingheight\": n, (numeric) The starting height (block) of the peer\n"
" \"banscore\": n, (numeric) The ban score\n"
" \"synced_headers\": n, (numeric) The last header we have in common with this peer\n"
" \"synced_blocks\": n, (numeric) The last block we have in common with this peer\n"
" \"inflight\": [\n"
" n, (numeric) The heights of blocks we're currently asking from this peer\n"
" ...\n"
" ],\n"
" \"whitelisted\": true|false, (boolean) Whether the peer is whitelisted\n"
" \"bytessent_per_msg\": {\n"
" \"addr\": n, (numeric) The total bytes sent aggregated by message type\n"
" ...\n"
" },\n"
" \"bytesrecv_per_msg\": {\n"
" \"addr\": n, (numeric) The total bytes received aggregated by message type\n"
" ...\n"
" }\n"
" }\n"
" ,...\n"
"]\n"
"\nExamples:\n"
+ HelpExampleCli("getpeerinfo", "")
+ HelpExampleRpc("getpeerinfo", "")
);
if(!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
std::vector<CNodeStats> vstats;
g_connman->GetNodeStats(vstats);
UniValue ret(UniValue::VARR);
for (const CNodeStats& stats : vstats) {
UniValue obj(UniValue::VOBJ);
CNodeStateStats statestats;
bool fStateStats = GetNodeStateStats(stats.nodeid, statestats);
obj.pushKV("id", stats.nodeid);
obj.pushKV("addr", stats.addrName);
if (!(stats.addrLocal.empty()))
obj.pushKV("addrlocal", stats.addrLocal);
if (stats.addrBind.IsValid())
obj.pushKV("addrbind", stats.addrBind.ToString());
obj.pushKV("services", strprintf("%016x", stats.nServices));
obj.pushKV("relaytxes", stats.fRelayTxes);
obj.pushKV("lastsend", stats.nLastSend);
obj.pushKV("lastrecv", stats.nLastRecv);
obj.pushKV("bytessent", stats.nSendBytes);
obj.pushKV("bytesrecv", stats.nRecvBytes);
obj.pushKV("conntime", stats.nTimeConnected);
obj.pushKV("timeoffset", stats.nTimeOffset);
if (stats.dPingTime > 0.0)
obj.pushKV("pingtime", stats.dPingTime);
if (stats.dMinPing < static_cast<double>(std::numeric_limits<int64_t>::max())/1e6)
obj.pushKV("minping", stats.dMinPing);
if (stats.dPingWait > 0.0)
obj.pushKV("pingwait", stats.dPingWait);
obj.pushKV("version", stats.nVersion);
// Use the sanitized form of subver here, to avoid tricksy remote peers from
// corrupting or modifying the JSON output by putting special characters in
// their ver message.
obj.pushKV("subver", stats.cleanSubVer);
obj.pushKV("inbound", stats.fInbound);
obj.pushKV("addnode", stats.m_manual_connection);
obj.pushKV("startingheight", stats.nStartingHeight);
if (fStateStats) {
obj.pushKV("banscore", statestats.nMisbehavior);
obj.pushKV("synced_headers", statestats.nSyncHeight);
obj.pushKV("synced_blocks", statestats.nCommonHeight);
UniValue heights(UniValue::VARR);
for (int height : statestats.vHeightInFlight) {
heights.push_back(height);
}
obj.pushKV("inflight", heights);
}
obj.pushKV("whitelisted", stats.fWhitelisted);
UniValue sendPerMsgCmd(UniValue::VOBJ);
for (const mapMsgCmdSize::value_type &i : stats.mapSendBytesPerMsgCmd) {
if (i.second > 0)
sendPerMsgCmd.pushKV(i.first, i.second);
}
obj.pushKV("bytessent_per_msg", sendPerMsgCmd);
UniValue recvPerMsgCmd(UniValue::VOBJ);
for (const mapMsgCmdSize::value_type &i : stats.mapRecvBytesPerMsgCmd) {
if (i.second > 0)
recvPerMsgCmd.pushKV(i.first, i.second);
}
obj.pushKV("bytesrecv_per_msg", recvPerMsgCmd);
ret.push_back(obj);
}
return ret;
}
static UniValue addnode(const JSONRPCRequest& request)
{
std::string strCommand;
if (!request.params[1].isNull())
strCommand = request.params[1].get_str();
if (request.fHelp || request.params.size() != 2 ||
(strCommand != "onetry" && strCommand != "add" && strCommand != "remove"))
throw std::runtime_error(
"addnode \"node\" \"add|remove|onetry\"\n"
"\nAttempts to add or remove a node from the addnode list.\n"
"Or try a connection to a node once.\n"
"Nodes added using addnode (or -connect) are protected from DoS disconnection and are not required to be\n"
"full nodes/support SegWit as other outbound peers are (though such peers will not be synced from).\n"
"\nArguments:\n"
"1. \"node\" (string, required) The node (see getpeerinfo for nodes)\n"
"2. \"command\" (string, required) 'add' to add a node to the list, 'remove' to remove a node from the list, 'onetry' to try a connection to the node once\n"
"\nExamples:\n"
+ HelpExampleCli("addnode", "\"192.168.0.6:8333\" \"onetry\"")
+ HelpExampleRpc("addnode", "\"192.168.0.6:8333\", \"onetry\"")
);
if(!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
std::string strNode = request.params[0].get_str();
if (strCommand == "onetry")
{
CAddress addr;
g_connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), false, false, true);
return NullUniValue;
}
if (strCommand == "add")
{
if(!g_connman->AddNode(strNode))
throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: Node already added");
}
else if(strCommand == "remove")
{
if(!g_connman->RemoveAddedNode(strNode))
throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node has not been added.");
}
return NullUniValue;
}
static UniValue disconnectnode(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() == 0 || request.params.size() >= 3)
throw std::runtime_error(
"disconnectnode \"[address]\" [nodeid]\n"
"\nImmediately disconnects from the specified peer node.\n"
"\nStrictly one out of 'address' and 'nodeid' can be provided to identify the node.\n"
"\nTo disconnect by nodeid, either set 'address' to the empty string, or call using the named 'nodeid' argument only.\n"
"\nArguments:\n"
"1. \"address\" (string, optional) The IP address/port of the node\n"
"2. \"nodeid\" (number, optional) The node ID (see getpeerinfo for node IDs)\n"
"\nExamples:\n"
+ HelpExampleCli("disconnectnode", "\"192.168.0.6:8333\"")
+ HelpExampleCli("disconnectnode", "\"\" 1")
+ HelpExampleRpc("disconnectnode", "\"192.168.0.6:8333\"")
+ HelpExampleRpc("disconnectnode", "\"\", 1")
);
if(!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
bool success;
const UniValue &address_arg = request.params[0];
const UniValue &id_arg = request.params[1];
if (!address_arg.isNull() && id_arg.isNull()) {
/* handle disconnect-by-address */
success = g_connman->DisconnectNode(address_arg.get_str());
} else if (!id_arg.isNull() && (address_arg.isNull() || (address_arg.isStr() && address_arg.get_str().empty()))) {
/* handle disconnect-by-id */
NodeId nodeid = (NodeId) id_arg.get_int64();
success = g_connman->DisconnectNode(nodeid);
} else {
throw JSONRPCError(RPC_INVALID_PARAMS, "Only one of address and nodeid should be provided.");
}
if (!success) {
throw JSONRPCError(RPC_CLIENT_NODE_NOT_CONNECTED, "Node not found in connected nodes");
}
return NullUniValue;
}
static UniValue getaddednodeinfo(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() > 1)
throw std::runtime_error(
"getaddednodeinfo ( \"node\" )\n"
"\nReturns information about the given added node, or all added nodes\n"
"(note that onetry addnodes are not listed here)\n"
"\nArguments:\n"
"1. \"node\" (string, optional) If provided, return information about this specific node, otherwise all nodes are returned.\n"
"\nResult:\n"
"[\n"
" {\n"
" \"addednode\" : \"192.168.0.201\", (string) The node IP address or name (as provided to addnode)\n"
" \"connected\" : true|false, (boolean) If connected\n"
" \"addresses\" : [ (list of objects) Only when connected = true\n"
" {\n"
" \"address\" : \"192.168.0.201:8333\", (string) The bitscoin server IP and port we're connected to\n"
" \"connected\" : \"outbound\" (string) connection, inbound or outbound\n"
" }\n"
" ]\n"
" }\n"
" ,...\n"
"]\n"
"\nExamples:\n"
+ HelpExampleCli("getaddednodeinfo", "\"192.168.0.201\"")
+ HelpExampleRpc("getaddednodeinfo", "\"192.168.0.201\"")
);
if(!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
std::vector<AddedNodeInfo> vInfo = g_connman->GetAddedNodeInfo();
if (!request.params[0].isNull()) {
bool found = false;
for (const AddedNodeInfo& info : vInfo) {
if (info.strAddedNode == request.params[0].get_str()) {
vInfo.assign(1, info);
found = true;
break;
}
}
if (!found) {
throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node has not been added.");
}
}
UniValue ret(UniValue::VARR);
for (const AddedNodeInfo& info : vInfo) {
UniValue obj(UniValue::VOBJ);
obj.pushKV("addednode", info.strAddedNode);
obj.pushKV("connected", info.fConnected);
UniValue addresses(UniValue::VARR);
if (info.fConnected) {
UniValue address(UniValue::VOBJ);
address.pushKV("address", info.resolvedAddress.ToString());
address.pushKV("connected", info.fInbound ? "inbound" : "outbound");
addresses.push_back(address);
}
obj.pushKV("addresses", addresses);
ret.push_back(obj);
}
return ret;
}
static UniValue getnettotals(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() > 0)
throw std::runtime_error(
"getnettotals\n"
"\nReturns information about network traffic, including bytes in, bytes out,\n"
"and current time.\n"
"\nResult:\n"
"{\n"
" \"totalbytesrecv\": n, (numeric) Total bytes received\n"
" \"totalbytessent\": n, (numeric) Total bytes sent\n"
" \"timemillis\": t, (numeric) Current UNIX time in milliseconds\n"
" \"uploadtarget\":\n"
" {\n"
" \"timeframe\": n, (numeric) Length of the measuring timeframe in seconds\n"
" \"target\": n, (numeric) Target in bytes\n"
" \"target_reached\": true|false, (boolean) True if target is reached\n"
" \"serve_historical_blocks\": true|false, (boolean) True if serving historical blocks\n"
" \"bytes_left_in_cycle\": t, (numeric) Bytes left in current time cycle\n"
" \"time_left_in_cycle\": t (numeric) Seconds left in current time cycle\n"
" }\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("getnettotals", "")
+ HelpExampleRpc("getnettotals", "")
);
if(!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
UniValue obj(UniValue::VOBJ);
obj.pushKV("totalbytesrecv", g_connman->GetTotalBytesRecv());
obj.pushKV("totalbytessent", g_connman->GetTotalBytesSent());
obj.pushKV("timemillis", GetTimeMillis());
UniValue outboundLimit(UniValue::VOBJ);
outboundLimit.pushKV("timeframe", g_connman->GetMaxOutboundTimeframe());
outboundLimit.pushKV("target", g_connman->GetMaxOutboundTarget());
outboundLimit.pushKV("target_reached", g_connman->OutboundTargetReached(false));
outboundLimit.pushKV("serve_historical_blocks", !g_connman->OutboundTargetReached(true));
outboundLimit.pushKV("bytes_left_in_cycle", g_connman->GetOutboundTargetBytesLeft());
outboundLimit.pushKV("time_left_in_cycle", g_connman->GetMaxOutboundTimeLeftInCycle());
obj.pushKV("uploadtarget", outboundLimit);
return obj;
}
static UniValue GetNetworksInfo()
{
UniValue networks(UniValue::VARR);
for(int n=0; n<NET_MAX; ++n)
{
enum Network network = static_cast<enum Network>(n);
if(network == NET_UNROUTABLE || network == NET_INTERNAL)
continue;
proxyType proxy;
UniValue obj(UniValue::VOBJ);
GetProxy(network, proxy);
obj.pushKV("name", GetNetworkName(network));
obj.pushKV("limited", IsLimited(network));
obj.pushKV("reachable", IsReachable(network));
obj.pushKV("proxy", proxy.IsValid() ? proxy.proxy.ToStringIPPort() : std::string());
obj.pushKV("proxy_randomize_credentials", proxy.randomize_credentials);
networks.push_back(obj);
}
return networks;
}
static UniValue getnetworkinfo(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error(
"getnetworkinfo\n"
"Returns an object containing various state info regarding P2P networking.\n"
"\nResult:\n"
"{\n"
" \"version\": xxxxx, (numeric) the server version\n"
" \"subversion\": \"/Satoshi:x.x.x/\", (string) the server subversion string\n"
" \"protocolversion\": xxxxx, (numeric) the protocol version\n"
" \"localservices\": \"xxxxxxxxxxxxxxxx\", (string) the services we offer to the network\n"
" \"localrelay\": true|false, (bool) true if transaction relay is requested from peers\n"
" \"timeoffset\": xxxxx, (numeric) the time offset\n"
" \"connections\": xxxxx, (numeric) the number of connections\n"
" \"networkactive\": true|false, (bool) whether p2p networking is enabled\n"
" \"networks\": [ (array) information per network\n"
" {\n"
" \"name\": \"xxx\", (string) network (ipv4, ipv6 or onion)\n"
" \"limited\": true|false, (boolean) is the network limited using -onlynet?\n"
" \"reachable\": true|false, (boolean) is the network reachable?\n"
" \"proxy\": \"host:port\" (string) the proxy that is used for this network, or empty if none\n"
" \"proxy_randomize_credentials\": true|false, (string) Whether randomized credentials are used\n"
" }\n"
" ,...\n"
" ],\n"
" \"relayfee\": x.xxxxxxxx, (numeric) minimum relay fee for transactions in " + CURRENCY_UNIT + "/kB\n"
" \"incrementalfee\": x.xxxxxxxx, (numeric) minimum fee increment for mempool limiting or BIP 125 replacement in " + CURRENCY_UNIT + "/kB\n"
" \"localaddresses\": [ (array) list of local addresses\n"
" {\n"
" \"address\": \"xxxx\", (string) network address\n"
" \"port\": xxx, (numeric) network port\n"
" \"score\": xxx (numeric) relative score\n"
" }\n"
" ,...\n"
" ]\n"
" \"warnings\": \"...\" (string) any network and blockchain warnings\n"
"}\n"
"\nExamples:\n"
+ HelpExampleCli("getnetworkinfo", "")
+ HelpExampleRpc("getnetworkinfo", "")
);
LOCK(cs_main);
UniValue obj(UniValue::VOBJ);
obj.pushKV("version", CLIENT_VERSION);
obj.pushKV("subversion", strSubVersion);
obj.pushKV("protocolversion",PROTOCOL_VERSION);
if(g_connman)
obj.pushKV("localservices", strprintf("%016x", g_connman->GetLocalServices()));
obj.pushKV("localrelay", fRelayTxes);
obj.pushKV("timeoffset", GetTimeOffset());
if (g_connman) {
obj.pushKV("networkactive", g_connman->GetNetworkActive());
obj.pushKV("connections", (int)g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL));
}
obj.pushKV("networks", GetNetworksInfo());
obj.pushKV("relayfee", ValueFromAmount(::minRelayTxFee.GetFeePerK()));
obj.pushKV("incrementalfee", ValueFromAmount(::incrementalRelayFee.GetFeePerK()));
UniValue localAddresses(UniValue::VARR);
{
LOCK(cs_mapLocalHost);
for (const std::pair<const CNetAddr, LocalServiceInfo> &item : mapLocalHost)
{
UniValue rec(UniValue::VOBJ);
rec.pushKV("address", item.first.ToString());
rec.pushKV("port", item.second.nPort);
rec.pushKV("score", item.second.nScore);
localAddresses.push_back(rec);
}
}
obj.pushKV("localaddresses", localAddresses);
obj.pushKV("warnings", GetWarnings("statusbar"));
return obj;
}
static UniValue setban(const JSONRPCRequest& request)
{
std::string strCommand;
if (!request.params[1].isNull())
strCommand = request.params[1].get_str();
if (request.fHelp || request.params.size() < 2 ||
(strCommand != "add" && strCommand != "remove"))
throw std::runtime_error(
"setban \"subnet\" \"add|remove\" (bantime) (absolute)\n"
"\nAttempts to add or remove an IP/Subnet from the banned list.\n"
"\nArguments:\n"
"1. \"subnet\" (string, required) The IP/Subnet (see getpeerinfo for nodes IP) with an optional netmask (default is /32 = single IP)\n"
"2. \"command\" (string, required) 'add' to add an IP/Subnet to the list, 'remove' to remove an IP/Subnet from the list\n"
"3. \"bantime\" (numeric, optional) time in seconds how long (or until when if [absolute] is set) the IP is banned (0 or empty means using the default time of 24h which can also be overwritten by the -bantime startup argument)\n"
"4. \"absolute\" (boolean, optional) If set, the bantime must be an absolute timestamp in seconds since epoch (Jan 1 1970 GMT)\n"
"\nExamples:\n"
+ HelpExampleCli("setban", "\"192.168.0.6\" \"add\" 86400")
+ HelpExampleCli("setban", "\"192.168.0.0/24\" \"add\"")
+ HelpExampleRpc("setban", "\"192.168.0.6\", \"add\", 86400")
);
if(!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
CSubNet subNet;
CNetAddr netAddr;
bool isSubnet = false;
if (request.params[0].get_str().find('/') != std::string::npos)
isSubnet = true;
if (!isSubnet) {
CNetAddr resolved;
LookupHost(request.params[0].get_str().c_str(), resolved, false);
netAddr = resolved;
}
else
LookupSubNet(request.params[0].get_str().c_str(), subNet);
if (! (isSubnet ? subNet.IsValid() : netAddr.IsValid()) )
throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Invalid IP/Subnet");
if (strCommand == "add")
{
if (isSubnet ? g_connman->IsBanned(subNet) : g_connman->IsBanned(netAddr))
throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: IP/Subnet already banned");
int64_t banTime = 0; //use standard bantime if not specified
if (!request.params[2].isNull())
banTime = request.params[2].get_int64();
bool absolute = false;
if (request.params[3].isTrue())
absolute = true;
isSubnet ? g_connman->Ban(subNet, BanReasonManuallyAdded, banTime, absolute) : g_connman->Ban(netAddr, BanReasonManuallyAdded, banTime, absolute);
}
else if(strCommand == "remove")
{
if (!( isSubnet ? g_connman->Unban(subNet) : g_connman->Unban(netAddr) ))
throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Unban failed. Requested address/subnet was not previously banned.");
}
return NullUniValue;
}
static UniValue listbanned(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error(
"listbanned\n"
"\nList all banned IPs/Subnets.\n"
"\nExamples:\n"
+ HelpExampleCli("listbanned", "")
+ HelpExampleRpc("listbanned", "")
);
if(!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
banmap_t banMap;
g_connman->GetBanned(banMap);
UniValue bannedAddresses(UniValue::VARR);
for (const auto& entry : banMap)
{
const CBanEntry& banEntry = entry.second;
UniValue rec(UniValue::VOBJ);
rec.pushKV("address", entry.first.ToString());
rec.pushKV("banned_until", banEntry.nBanUntil);
rec.pushKV("ban_created", banEntry.nCreateTime);
rec.pushKV("ban_reason", banEntry.banReasonToString());
bannedAddresses.push_back(rec);
}
return bannedAddresses;
}
static UniValue clearbanned(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() != 0)
throw std::runtime_error(
"clearbanned\n"
"\nClear all banned IPs.\n"
"\nExamples:\n"
+ HelpExampleCli("clearbanned", "")
+ HelpExampleRpc("clearbanned", "")
);
if(!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
g_connman->ClearBanned();
return NullUniValue;
}
static UniValue setnetworkactive(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() != 1) {
throw std::runtime_error(
"setnetworkactive true|false\n"
"\nDisable/enable all p2p network activity.\n"
"\nArguments:\n"
"1. \"state\" (boolean, required) true to enable networking, false to disable\n"
);
}
if (!g_connman) {
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
}
g_connman->SetNetworkActive(request.params[0].get_bool());
return g_connman->GetNetworkActive();
}
static const CRPCCommand commands[] =
{ // category name actor (function) argNames
// --------------------- ------------------------ ----------------------- ----------
{ "network", "getconnectioncount", &getconnectioncount, {} },
{ "network", "ping", &ping, {} },
{ "network", "getpeerinfo", &getpeerinfo, {} },
{ "network", "addnode", &addnode, {"node","command"} },
{ "network", "disconnectnode", &disconnectnode, {"address", "nodeid"} },
{ "network", "getaddednodeinfo", &getaddednodeinfo, {"node"} },
{ "network", "getnettotals", &getnettotals, {} },
{ "network", "getnetworkinfo", &getnetworkinfo, {} },
{ "network", "setban", &setban, {"subnet", "command", "bantime", "absolute"} },
{ "network", "listbanned", &listbanned, {} },
{ "network", "clearbanned", &clearbanned, {} },
{ "network", "setnetworkactive", &setnetworkactive, {"state"} },
};
void RegisterNetRPCCommands(CRPCTable &t)
{
for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++)
t.appendCommand(commands[vcidx].name, &commands[vcidx]);
}
|
/*
//@HEADER
// ************************************************************************
//
// Kokkos v. 2.0
// Copyright (2014) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact H. Carter Edwards (hcedwar@sandia.gov)
//
// ************************************************************************
//@HEADER
*/
#ifndef KOKKOS_SCRATCHSPACE_HPP
#define KOKKOS_SCRATCHSPACE_HPP
#include <stdio.h>
#include <Kokkos_Core_fwd.hpp>
#include <impl/Kokkos_Tags.hpp>
/*--------------------------------------------------------------------------*/
namespace Kokkos {
/** \brief Scratch memory space associated with an execution space.
*
*/
template< class ExecSpace >
class ScratchMemorySpace {
static_assert (Impl::is_execution_space<ExecSpace>::value,"Instantiating ScratchMemorySpace on non-execution-space type.");
public:
// Alignment of memory chunks returned by 'get'
// must be a power of two
enum { ALIGN = 8 };
private:
mutable char * m_iter_L0 ;
char * m_end_L0 ;
mutable char * m_iter_L1 ;
char * m_end_L1 ;
mutable int m_multiplier;
mutable int m_offset;
mutable int m_default_level;
ScratchMemorySpace();
ScratchMemorySpace & operator = ( const ScratchMemorySpace & );
enum { MASK = ALIGN - 1 }; // Alignment used by View::shmem_size
public:
//! Tag this class as a memory space
typedef ScratchMemorySpace memory_space ;
typedef ExecSpace execution_space ;
//! This execution space preferred device_type
typedef Kokkos::Device<execution_space,memory_space> device_type;
typedef typename ExecSpace::array_layout array_layout ;
typedef typename ExecSpace::size_type size_type ;
template< typename IntType >
KOKKOS_INLINE_FUNCTION static
IntType align( const IntType & size )
{ return ( size + MASK ) & ~MASK ; }
template< typename IntType >
KOKKOS_INLINE_FUNCTION
void* get_shmem (const IntType& size, int level = -1) const {
if(level == -1)
level = m_default_level;
if(level == 0) {
void* tmp = m_iter_L0 + m_offset * align (size);
if (m_end_L0 < (m_iter_L0 += align (size) * m_multiplier)) {
m_iter_L0 -= align (size) * m_multiplier; // put it back like it was
#ifdef KOKKOS_DEBUG
// mfh 23 Jun 2015: printf call consumes 25 registers
// in a CUDA build, so only print in debug mode. The
// function still returns NULL if not enough memory.
printf ("ScratchMemorySpace<...>::get_shmem: Failed to allocate "
"%ld byte(s); remaining capacity is %ld byte(s)\n", long(size),
long(m_end_L0-m_iter_L0));
#endif // KOKKOS_DEBUG
tmp = 0;
}
return tmp;
} else {
void* tmp = m_iter_L1 + m_offset * align (size);
if (m_end_L1 < (m_iter_L1 += align (size) * m_multiplier)) {
m_iter_L1 -= align (size) * m_multiplier; // put it back like it was
#ifdef KOKKOS_DEBUG
// mfh 23 Jun 2015: printf call consumes 25 registers
// in a CUDA build, so only print in debug mode. The
// function still returns NULL if not enough memory.
printf ("ScratchMemorySpace<...>::get_shmem: Failed to allocate "
"%ld byte(s); remaining capacity is %ld byte(s)\n", long(size),
long(m_end_L1-m_iter_L1));
#endif // KOKKOS_DEBUG
tmp = 0;
}
return tmp;
}
}
template< typename IntType >
KOKKOS_INLINE_FUNCTION
ScratchMemorySpace( void * ptr_L0 , const IntType & size_L0 , void * ptr_L1 = NULL , const IntType & size_L1 = 0)
: m_iter_L0( (char *) ptr_L0 )
, m_end_L0( m_iter_L0 + size_L0 )
, m_iter_L1( (char *) ptr_L1 )
, m_end_L1( m_iter_L1 + size_L1 )
, m_multiplier( 1 )
, m_offset( 0 )
, m_default_level( 0 )
{}
KOKKOS_INLINE_FUNCTION
const ScratchMemorySpace& set_team_thread_mode(const int& level, const int& multiplier, const int& offset) const {
m_default_level = level;
m_multiplier = multiplier;
m_offset = offset;
return *this;
}
};
} // namespace Kokkos
#endif /* #ifndef KOKKOS_SCRATCHSPACE_HPP */
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
|
// This example uses an Adafruit Huzzah ESP8266
// to connect to shiftr.io.
//
// You can check on your device after a successful
// connection here: https://shiftr.io/try.
//
// by Joël Gähwiler
// https://github.com/256dpi/arduino-mqtt
#include <ESP8266WiFi.h>
#include <MQTT.h>
#include "wificonfig.h"
WiFiClient net;
MQTTClient client;
unsigned long lastMillis = 0;
void setup() {
Serial.begin(115200);
WiFi.begin(WIFI_SSID, WIFI_PSK);
client.begin(MQTT_URL, MQTT_PORT, net);
Serial.print("checking wifi...");
while (WiFi.status() != WL_CONNECTED) {
Serial.print(".");
delay(1000);
}
Serial.print("\nconnecting...");
while (!client.connect(HOSTNAME, MQTT_USER, MQTT_PASSW)) {
Serial.print(".");
delay(1000);
}
Serial.println("\nconnected!");
client.publish(TOPIC, WiFi.macAddress());
}
void loop() {
static int count = 0;
client.loop();
delay(10);
if (++count > 10) {
Serial.println("\nDone...");
ESP.deepSleep(0);
}
}
|
/*!
@file
Defines `boost::hana::equal`.
@copyright Louis Dionne 2013-2017
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_EQUAL_HPP
#define BOOST_HANA_EQUAL_HPP
#include <boost/hana/fwd/equal.hpp>
#include <boost/hana/accessors.hpp>
#include <boost/hana/all_of.hpp>
#include <boost/hana/and.hpp>
#include <boost/hana/at.hpp>
#include <boost/hana/bool.hpp>
#include <boost/hana/concept/comparable.hpp>
#include <boost/hana/concept/constant.hpp>
#include <boost/hana/concept/product.hpp>
#include <boost/hana/concept/sequence.hpp>
#include <boost/hana/concept/struct.hpp>
#include <boost/hana/config.hpp>
#include <boost/hana/core/common.hpp>
#include <boost/hana/core/to.hpp>
#include <boost/hana/core/dispatch.hpp>
#include <boost/hana/core/tag_of.hpp>
#include <boost/hana/core/when.hpp>
#include <boost/hana/detail/concepts.hpp>
#include <boost/hana/detail/has_common_embedding.hpp>
#include <boost/hana/detail/nested_to.hpp> // required by fwd decl
#include <boost/hana/first.hpp>
#include <boost/hana/if.hpp>
#include <boost/hana/length.hpp>
#include <boost/hana/second.hpp>
#include <boost/hana/value.hpp>
#include <cstddef>
namespace boost { namespace hana {
//! @cond
template <typename X, typename Y>
constexpr auto equal_t::operator()(X&& x, Y&& y) const {
using T = typename hana::tag_of<X>::type;
using U = typename hana::tag_of<Y>::type;
using Equal = equal_impl<T, U>;
return Equal::apply(static_cast<X&&>(x), static_cast<Y&&>(y));
}
//! @endcond
template <typename T, typename U, bool condition>
struct equal_impl<T, U, when<condition>> : default_ {
template <typename X, typename Y>
static constexpr auto apply(X const&, Y const&) {
// Delay the static_assert by ensuring T_ is dependent.
using T_ = typename hana::tag_of<X>::type;
static_assert(!hana::is_convertible<T_, U>::value &&
!hana::is_convertible<U, T_>::value,
"No default implementation of hana::equal is provided for related "
"types that can't be safely embedded into a common type, because "
"those are most likely programming errors. If this is really what "
"you want, you can manually convert both objects to a common "
"Comparable type before performing the comparison. If you think "
"you have made your types Comparable but you see this, perhaps you "
"forgot to define some of the necessary methods for an automatic "
"model of Comparable to kick in. A possible culprit is defining "
"'operator==' but not 'operator!='.");
return hana::false_c;
}
};
// Cross-type overload
template <typename T, typename U>
struct equal_impl<T, U, when<
detail::has_nontrivial_common_embedding<Comparable, T, U>::value &&
!detail::EqualityComparable<T, U>::value
>> {
using C = typename hana::common<T, U>::type;
template <typename X, typename Y>
static constexpr auto apply(X&& x, Y&& y) {
return hana::equal(hana::to<C>(static_cast<X&&>(x)),
hana::to<C>(static_cast<Y&&>(y)));
}
};
//////////////////////////////////////////////////////////////////////////
// Model for EqualityComparable data types
//////////////////////////////////////////////////////////////////////////
template <typename T, typename U>
struct equal_impl<T, U, when<detail::EqualityComparable<T, U>::value>> {
template <typename X, typename Y>
static constexpr auto apply(X&& x, Y&& y)
{ return static_cast<X&&>(x) == static_cast<Y&&>(y); }
};
//////////////////////////////////////////////////////////////////////////
// Model for Constants wrapping a Comparable
//////////////////////////////////////////////////////////////////////////
template <typename C>
struct equal_impl<C, C, when<
hana::Constant<C>::value &&
Comparable<typename C::value_type>::value
>> {
template <typename X, typename Y>
static constexpr auto apply(X const&, Y const&) {
constexpr auto eq = hana::equal(hana::value<X>(), hana::value<Y>());
constexpr bool truth_value = hana::if_(eq, true, false);
return hana::bool_<truth_value>{};
}
};
//////////////////////////////////////////////////////////////////////////
// Comparable for Products
//////////////////////////////////////////////////////////////////////////
template <typename T, typename U>
struct equal_impl<T, U, when<hana::Product<T>::value && hana::Product<U>::value>> {
template <typename X, typename Y>
static constexpr auto apply(X const& x, Y const& y) {
return hana::and_(
hana::equal(hana::first(x), hana::first(y)),
hana::equal(hana::second(x), hana::second(y))
);
}
};
//////////////////////////////////////////////////////////////////////////
// Comparable for Sequences
//////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Xs, typename Ys, std::size_t Length>
struct compare_finite_sequences {
Xs const& xs;
Ys const& ys;
template <std::size_t i>
constexpr auto apply(hana::false_, hana::true_) const {
return compare_finite_sequences::apply<i+1>(
hana::bool_<i+1 == Length>{},
hana::if_(hana::equal(hana::at_c<i>(xs), hana::at_c<i>(ys)),
hana::true_c, hana::false_c)
);
}
template <std::size_t i>
constexpr auto apply(hana::false_, hana::false_) const
{ return hana::false_c; }
template <std::size_t i, typename Result>
constexpr auto apply(hana::true_, Result r) const
{ return r; }
template <std::size_t i>
constexpr bool apply(hana::false_, bool b) const {
return b && compare_finite_sequences::apply<i+1>(
hana::bool_<i+1 == Length>{},
hana::if_(hana::equal(hana::at_c<i>(xs), hana::at_c<i>(ys)),
hana::true_c, hana::false_c)
);
}
};
}
template <typename T, typename U>
struct equal_impl<T, U, when<Sequence<T>::value && hana::Sequence<U>::value>> {
template <typename Xs, typename Ys>
static constexpr auto apply(Xs const& xs, Ys const& ys) {
constexpr std::size_t xs_size = decltype(hana::length(xs))::value;
constexpr std::size_t ys_size = decltype(hana::length(ys))::value;
detail::compare_finite_sequences<Xs, Ys, xs_size> comp{xs, ys};
return comp.template apply<0>(hana::bool_<xs_size == 0>{},
hana::bool_<xs_size == ys_size>{});
}
};
namespace detail {
template <typename X, typename Y>
struct compare_struct_members {
X const& x;
Y const& y;
template <typename Member>
constexpr auto operator()(Member&& member) const {
auto accessor = hana::second(static_cast<Member&&>(member));
return hana::equal(accessor(x), accessor(y));
}
};
}
template <typename S>
struct equal_impl<S, S, when<
hana::Struct<S>::value &&
!detail::EqualityComparable<S, S>::value
>> {
template <typename X, typename Y>
static constexpr auto apply(X const& x, Y const& y) {
return hana::all_of(hana::accessors<S>(),
detail::compare_struct_members<X, Y>{x, y});
}
};
}} // end namespace boost::hana
#endif // !BOOST_HANA_EQUAL_HPP
|
/**
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "core/PropertyValidation.h"
namespace org {
namespace apache {
namespace nifi {
namespace minifi {
namespace core {
StandardValidators::StandardValidators()
: INVALID(std::make_shared<AlwaysValid>(false, "INVALID")),
INTEGER_VALIDATOR(std::make_shared<IntegerValidator>("INTEGER_VALIDATOR")),
UNSIGNED_INT_VALIDATOR(std::make_shared<UnsignedIntValidator>("NON_NEGATIVE_INTEGER_VALIDATOR")),
LONG_VALIDATOR(std::make_shared<LongValidator>("LONG_VALIDATOR")),
// name is used by java nifi validators, so we should keep this LONG and not change to reflect
// its internal use
UNSIGNED_LONG_VALIDATOR(std::make_shared<UnsignedLongValidator>("LONG_VALIDATOR")),
BOOLEAN_VALIDATOR(std::make_shared<BooleanValidator>("BOOLEAN_VALIDATOR")),
DATA_SIZE_VALIDATOR(std::make_shared<DataSizeValidator>("DATA_SIZE_VALIDATOR")),
TIME_PERIOD_VALIDATOR(std::make_shared<TimePeriodValidator>("TIME_PERIOD_VALIDATOR")),
NON_BLANK_VALIDATOR(std::make_shared<NonBlankValidator>("NON_BLANK_VALIDATOR")),
VALID_VALIDATOR(std::make_shared<AlwaysValid>(true, "VALID")),
PORT_VALIDATOR(std::make_shared<PortValidator>("PORT_VALIDATOR")),
LISTEN_PORT_VALIDATOR(std::make_shared<ListenPortValidator>("PORT_VALIDATOR")) {}
} /* namespace core */
} /* namespace minifi */
} /* namespace nifi */
} /* namespace apache */
} /* namespace org */
|
/********************************************************************
* @file : time.hpp
* @author : zapline <zhuxianzhang@kingsoft.com>
* @date : 2012/11/19 20:37
* @brief :
*
*
*********************************************************************/
#ifndef _W_TIME_HPP_
#define _W_TIME_HPP_
#include "time_format.hpp"
#include "file_time.hpp"
#include "time_ini.hpp"
#include "time_interval.hpp"
#endif
|
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <map>
#include <iomanip>
#include <iostream>
#include <fstream>
#include "dali/operators/reader/loader/coco_loader.h"
#include "dali/pipeline/util/lookahead_parser.h"
namespace dali {
namespace detail {
struct ImageInfo {
std::string filename_;
int original_id_;
int width_;
int height_;
};
struct Annotation {
int image_id_;
int category_id_;
std::array<float, 4> box_;
std::vector<int> segm_meta_;
std::vector<float> segm_coords_;
void ToLtrb() {
box_[2] += box_[0];
box_[3] += box_[1];
}
bool IsOver(float min_size_threshold) {
return box_[2] >= min_size_threshold && box_[3] >= min_size_threshold;
}
};
template <typename T>
void dump_meta_file(std::vector<T> &input, const std::string path) {
std::ofstream file(path, std::ios_base::binary | std::ios_base::out);
DALI_ENFORCE(file, "CocoReader meta file error while saving: " + path);
unsigned size = input.size();
file.write(reinterpret_cast<char*>(&size), sizeof(unsigned));
file.write(reinterpret_cast<char*>(input.data()), size * sizeof(T));
}
template <typename T>
void dump_meta_file(std::vector<std::vector<T> > &input, const std::string path) {
std::ofstream file(path, std::ios_base::binary | std::ios_base::out);
DALI_ENFORCE(file, "CocoReader meta file error while saving: " + path);
unsigned size = input.size();
file.write(reinterpret_cast<char*>(&size), sizeof(unsigned));
for (auto& v : input) {
size = v.size();
file.write(reinterpret_cast<char*>(&size), sizeof(unsigned));
file.write(reinterpret_cast<char*>(v.data()), size * sizeof(T));
}
}
void dump_filenames(const ImageIdPairs &image_id_pairs, const std::string path) {
std::ofstream file(path);
DALI_ENFORCE(file, "CocoReader meta file error while saving: " + path);
for (const auto &p : image_id_pairs) {
file << p.first << std::endl;
}
}
template <typename T>
void load_meta_file(std::vector<T> &output, const std::string path) {
std::ifstream file(path);
DALI_ENFORCE(file, "CocoReader meta file error while loading for path: " + path);
unsigned size;
file.read(reinterpret_cast<char*>(&size), sizeof(unsigned));
output.resize(size);
file.read(reinterpret_cast<char*>(output.data()), size * sizeof(T));
}
template <typename T>
void load_meta_file(std::vector<std::vector<T> > &output, const std::string path) {
std::ifstream file(path);
DALI_ENFORCE(file, "CocoReader meta file error while loading for path: " + path);
unsigned size;
file.read(reinterpret_cast<char*>(&size), sizeof(unsigned));
output.resize(size);
for (size_t i = 0; i < output.size(); ++i) {
file.read(reinterpret_cast<char*>(&size), sizeof(unsigned));
output[i].resize(size);
file.read(reinterpret_cast<char*>(output[i].data()), size * sizeof(T));
}
}
void load_filenames(ImageIdPairs &image_id_pairs, const std::string path) {
std::ifstream file(path);
DALI_ENFORCE(file, "CocoReader meta file error while loading for path: " + path);
int id = 0;
std::string filename;
while (file >> filename) {
image_id_pairs.emplace_back(std::move(filename), int{id});
++id;
}
}
void parse_image_infos(LookaheadParser &parser, std::vector<ImageInfo> &image_infos) {
RAPIDJSON_ASSERT(parser.PeekType() == kArrayType);
parser.EnterArray();
while (parser.NextArrayValue()) {
if (parser.PeekType() != kObjectType) {
continue;
}
parser.EnterObject();
ImageInfo image_info;
while (const char* internal_key = parser.NextObjectKey()) {
if (0 == detail::safe_strcmp(internal_key, "id")) {
image_info.original_id_ = parser.GetInt();
} else if (0 == detail::safe_strcmp(internal_key, "width")) {
image_info.width_ = parser.GetInt();
} else if (0 == detail::safe_strcmp(internal_key, "height")) {
image_info.height_ = parser.GetInt();
} else if (0 == detail::safe_strcmp(internal_key, "file_name")) {
image_info.filename_ = parser.GetString();
} else {
parser.SkipValue();
}
}
image_infos.emplace_back(std::move(image_info));
}
}
void parse_categories(LookaheadParser &parser, std::map<int, int> &category_ids) {
RAPIDJSON_ASSERT(parser.PeekType() == kArrayType);
parser.EnterArray();
int id = -1;
int new_id = 1;
while (parser.NextArrayValue()) {
if (parser.PeekType() != kObjectType) {
continue;
}
id = -1;
parser.EnterObject();
while (const char* internal_key = parser.NextObjectKey()) {
if (0 == detail::safe_strcmp(internal_key, "id")) {
id = parser.GetInt();
} else {
parser.SkipValue();
}
}
DALI_ENFORCE(id != -1, "Missing category ID in the JSON annotations file");
category_ids.insert(std::make_pair(id, new_id));
new_id++;
}
}
void parse_annotations(
LookaheadParser &parser,
std::vector<Annotation> &annotations,
float min_size_threshold,
bool ltrb,
bool read_masks) {
RAPIDJSON_ASSERT(parser.PeekType() == kArrayType);
parser.EnterArray();
while (parser.NextArrayValue()) {
detail::Annotation annotation;
if (parser.PeekType() != kObjectType) {
continue;
}
bool to_add = true;
parser.EnterObject();
while (const char* internal_key = parser.NextObjectKey()) {
if (0 == detail::safe_strcmp(internal_key, "image_id")) {
annotation.image_id_ = parser.GetInt();
} else if (0 == detail::safe_strcmp(internal_key, "category_id")) {
annotation.category_id_ = parser.GetInt();
} else if (0 == detail::safe_strcmp(internal_key, "bbox")) {
RAPIDJSON_ASSERT(parser.PeekType() == kArrayType);
parser.EnterArray();
int i = 0;
while (parser.NextArrayValue()) {
annotation.box_[i] = parser.GetDouble();
++i;
}
} else if (read_masks && 0 == detail::safe_strcmp(internal_key, "segmentation")) {
// That means that the mask encoding is not polygons but RLE (iscrowd==1),
// which is not needed for instance segmentation
if (parser.PeekType() != kArrayType) {
to_add = false;
parser.SkipObject();
break;
}
int coord_offset = 0;
auto& segm_meta = annotation.segm_meta_;
auto& segm_coords = annotation.segm_coords_;
parser.EnterArray();
while (parser.NextArrayValue()) {
segm_meta.push_back(coord_offset);
parser.EnterArray();
while (parser.NextArrayValue()) {
segm_coords.push_back(parser.GetDouble());
coord_offset++;
}
segm_meta.push_back(coord_offset - segm_meta.back());
}
} else {
parser.SkipValue();
}
}
if (!annotation.IsOver(min_size_threshold)) {
continue;
}
if (to_add) {
if (ltrb) {
annotation.ToLtrb();
}
annotations.emplace_back(std::move(annotation));
}
}
}
void parse_json_file(
const OpSpec &spec,
std::vector<detail::ImageInfo> &image_infos,
std::vector<detail::Annotation> &annotations,
std::map<int, int> &category_ids) {
const auto annotations_file = spec.GetArgument<string>("annotations_file");
std::ifstream f(annotations_file);
DALI_ENFORCE(f, "Could not open JSON annotations file");
f.seekg(0, std::ios::end);
size_t file_size = f.tellg();
std::unique_ptr<char, std::function<void(char*)>> buff(
new char[file_size + 1],
[](char* data) {delete [] data;});
f.seekg(0, std::ios::beg);
buff.get()[file_size] = '\0';
f.read(buff.get(), file_size);
f.close();
detail::LookaheadParser parser(buff.get());
RAPIDJSON_ASSERT(parser.PeekType() == kObjectType);
parser.EnterObject();
while (const char* key = parser.NextObjectKey()) {
if (0 == detail::safe_strcmp(key, "images")) {
detail::parse_image_infos(parser, image_infos);
} else if (0 == detail::safe_strcmp(key, "categories")) {
detail::parse_categories(parser, category_ids);
} else if (0 == detail::safe_strcmp(key, "annotations")) {
parse_annotations(
parser,
annotations,
spec.GetArgument<float>("size_threshold"),
spec.GetArgument<bool>("ltrb"),
spec.GetArgument<bool>("masks"));
} else {
parser.SkipValue();
}
}
}
} // namespace detail
void CocoLoader::DumpMetaFiles(const std::string path, const ImageIdPairs &image_id_pairs) {
detail::dump_meta_file(
offsets_,
path + "/offsets.dat");
detail::dump_meta_file(
boxes_,
path + "/boxes.dat");
detail::dump_meta_file(
labels_,
path + "/labels.dat");
detail::dump_meta_file(
counts_,
path + "/counts.dat");
detail::dump_filenames(
image_id_pairs,
path + "/filenames.dat");
if (read_masks_) {
detail::dump_meta_file(
masks_meta_,
path + "/masks_metas.dat");
detail::dump_meta_file(
masks_meta_,
path + "/masks_coords.dat");
}
if (save_img_ids_) {
detail::dump_meta_file(
original_ids_,
path + "/original_ids.dat");
}
}
void CocoLoader::ParseMetafiles() {
const auto meta_files_path = spec_.GetArgument<string>("meta_files_path");
detail::load_meta_file(
offsets_,
meta_files_path + "/offsets.dat");
detail::load_meta_file(
boxes_,
meta_files_path + "/boxes.dat");
detail::load_meta_file(
labels_,
meta_files_path + "/labels.dat");
detail::load_meta_file(
counts_,
meta_files_path + "/counts.dat");
detail::load_filenames(
image_label_pairs_,
meta_files_path + "/filenames.dat");
if (read_masks_) {
detail::load_meta_file(
masks_meta_,
meta_files_path + "/masks_metas.dat");
detail::load_meta_file(
masks_meta_,
meta_files_path + "/masks_coords.dat");
}
if (save_img_ids_) {
detail::load_meta_file(
original_ids_,
meta_files_path + "/original_ids.dat");
}
}
void CocoLoader::ParseJsonAnnotations() {
std::vector<detail::ImageInfo> image_infos;
std::vector<detail::Annotation> annotations;
std::map<int, int> category_ids;
detail::parse_json_file(
spec_,
image_infos,
annotations,
category_ids);
bool skip_empty = spec_.GetArgument<bool>("skip_empty");
bool ratio = spec_.GetArgument<bool>("ratio");
std::sort(image_infos.begin(), image_infos.end(), [](auto &left, auto &right) {
return left.original_id_ < right.original_id_;});
std::stable_sort(annotations.begin(), annotations.end(), [](auto &left, auto &right) {
return left.image_id_ < right.image_id_;});
detail::Annotation sentinel;
sentinel.image_id_ = -1;
annotations.emplace_back(std::move(sentinel));
int new_image_id = 0;
int annotation_id = 0;
int total_count = 0;
for (auto &image_info : image_infos) {
int objects_in_sample = 0;
std::vector<int> sample_mask_meta;
std::vector<float> sample_mask_coords;
while (annotations[annotation_id].image_id_ == image_info.original_id_) {
const auto &annotation = annotations[annotation_id];
labels_.emplace_back(category_ids[annotation.category_id_]);
if (ratio) {
boxes_.push_back(annotation.box_[0] / image_info.width_);
boxes_.push_back(annotation.box_[1] / image_info.height_);
boxes_.push_back(annotation.box_[2] / image_info.width_);
boxes_.push_back(annotation.box_[3] / image_info.height_);
} else {
boxes_.push_back(annotation.box_[0]);
boxes_.push_back(annotation.box_[1]);
boxes_.push_back(annotation.box_[2]);
boxes_.push_back(annotation.box_[3]);
}
if (read_masks_) {
auto obj_coords_offset = sample_mask_coords.size();
for (size_t i = 0; i < annotation.segm_meta_.size(); i += 2) {
sample_mask_meta.push_back(objects_in_sample);
sample_mask_meta.push_back(obj_coords_offset + annotation.segm_meta_[i]);
sample_mask_meta.push_back(obj_coords_offset + annotation.segm_meta_[i + 1]);
}
sample_mask_coords.insert(sample_mask_coords.end(),
annotation.segm_coords_.begin(),
annotation.segm_coords_.end());
}
++annotation_id;
++objects_in_sample;
}
if (!skip_empty || objects_in_sample != 0) {
offsets_.push_back(total_count);
counts_.push_back(objects_in_sample);
total_count += objects_in_sample;
if (save_img_ids_) {
original_ids_.push_back(image_info.original_id_);
}
if (read_masks_) {
masks_meta_.emplace_back(std::move(sample_mask_meta));
masks_coords_.emplace_back(std::move(sample_mask_coords));
}
image_label_pairs_.emplace_back(std::move(image_info.filename_), new_image_id);
new_image_id++;
}
}
if (spec_.GetArgument<bool>("dump_meta_files")) {
DumpMetaFiles(
spec_.GetArgument<std::string>("dump_meta_files_path"),
image_label_pairs_);
}
}
} // namespace dali
|
/*!
* @file
* @brief Stuff for representation of pieces of outgoing data.
*/
#pragma once
#include <arataga/acl_handler/buffers.hpp>
#include <arataga/utils/overloaded.hpp>
namespace arataga::acl_handler
{
/*!
* @brief Class for representation a single piece of data to be sent
* into a socket.
*
* The piece of data can be represented by std::string object, or by
* fmt::memory_buffer (in that case all memory_buffer value has to be
* borrowed into out_data_piece_t), or by std::string_view object
* (in that case no move/copy is necessary).
*
* An instance of out_data_piece_t can be used as generic buffer,
* just like instances of out_string_view_buffer_t or out_string_buffer_t.
*/
class out_data_piece_t
{
using piece_holder_t = std::variant<
out_string_view_buffer_t,
out_string_buffer_t,
out_fmt_memory_buffer_t
>;
piece_holder_t m_piece;
public:
out_data_piece_t( std::string_view data )
: m_piece{ out_string_view_buffer_t{ data } }
{}
out_data_piece_t( std::string data )
: m_piece{ out_string_buffer_t{ std::move(data) } }
{}
out_data_piece_t( fmt::memory_buffer data )
: m_piece{ out_fmt_memory_buffer_t{ std::move(data) } }
{}
[[nodiscard]]
std::size_t
remaining() const noexcept
{
return std::visit( ::arataga::utils::overloaded{
[]( const out_string_view_buffer_t & b ) {
return b.remaining();
},
[]( const out_string_buffer_t & b ) {
return b.remaining();
},
[]( const out_fmt_memory_buffer_t & b ) {
return b.remaining();
}
},
m_piece );
}
void
increment_bytes_written( std::size_t bytes ) noexcept
{
std::visit( ::arataga::utils::overloaded{
[bytes]( out_string_view_buffer_t & b ) {
b.increment_bytes_written( bytes );
},
[bytes]( out_string_buffer_t & b ) {
b.increment_bytes_written( bytes );
},
[bytes]( out_fmt_memory_buffer_t & b ) {
b.increment_bytes_written( bytes );
}
},
m_piece );
}
[[nodiscard]]
asio::const_buffer
asio_buffer() const noexcept
{
return std::visit( ::arataga::utils::overloaded{
[]( const out_string_view_buffer_t & b ) {
return b.asio_buffer();
},
[]( const out_string_buffer_t & b ) {
return b.asio_buffer();
},
[]( const out_fmt_memory_buffer_t & b ) {
return b.asio_buffer();
}
},
m_piece );
}
};
} /* namespace arataga::acl_handler */
|
/*
MIT License
Copyright (c) 2017 Alexander Zaitsev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "logger.hpp"
std::shared_ptr<spdlog::logger> logger;
std::string createLogFilename(const std::string& workingDir)
{
return workingDir;
}
|
//---------------------------------------------------------- -*- Mode: C++ -*-
// $Id: BufferManager.cc 385 2010-05-27 15:58:30Z sriramsrao $
//
// Created 2009/06/06
//
// Copyright 2009 Quantcast Corp.
//
// This file is part of Kosmos File System (KFS).
//
// Licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
//
//----------------------------------------------------------------------------
#include <algorithm>
#include "BufferManager.h"
#include "qcdio/qcutils.h"
#include "libkfsIO/NetManager.h"
#include "libkfsIO/Globals.h"
namespace KFS
{
BufferManager::Client::Client()
: mManagerPtr(0),
mByteCount(0),
mWaitingForByteCount(0)
{
WaitQueue::Init(*this);
}
BufferManager::BufferManager(
bool inEnabledFlag /* = true */)
: ITimeout(),
mTotalCount(0),
mMaxClientQuota(0),
mRemainingCount(0),
mWaitingByteCount(0),
mGetRequestCount(0),
mPutRequestCount(0),
mClientsWihtBuffersCount(0),
mMinBufferCount(0),
mWaitingCount(0),
mInitedFlag(false),
mEnabledFlag(inEnabledFlag),
mCounters()
{
WaitQueue::Init(mWaitQueuePtr);
mCounters.Clear();
}
BufferManager::~BufferManager()
{
QCRTASSERT(WaitQueue::IsEmpty(mWaitQueuePtr));
libkfsio::globalNetManager().UnRegisterTimeoutHandler(this);
}
void
BufferManager::Init(
QCIoBufferPool* inBufferPoolPtr,
BufferManager::ByteCount inTotalCount,
BufferManager::ByteCount inMaxClientQuota,
int inMinBufferCount)
{
QCRTASSERT(! mInitedFlag);
mInitedFlag = true;
mWaitingCount = 0;
mWaitingByteCount = 0;
mGetRequestCount = 0;
mPutRequestCount = 0;
mClientsWihtBuffersCount = 0;
mBufferPoolPtr = inBufferPoolPtr;
mTotalCount = inTotalCount;
mRemainingCount = mTotalCount;
mMinBufferCount = inMinBufferCount;
mMaxClientQuota = std::min(mTotalCount, inMaxClientQuota);
libkfsio::globalNetManager().RegisterTimeoutHandler(this);
}
bool
BufferManager::Modify(
BufferManager::Client& inClient,
BufferManager::ByteCount inByteCount)
{
if (! mEnabledFlag) {
return true;
}
assert(inClient.mByteCount >= 0 && inClient.mWaitingForByteCount >= 0);
assert(inClient.mManagerPtr ||
inClient.mWaitingForByteCount + inClient.mByteCount == 0);
assert(! inClient.mManagerPtr || inClient.mManagerPtr == this);
assert(inClient.IsWaiting() || inClient.mWaitingForByteCount == 0);
assert(mRemainingCount + inClient.mByteCount <= mTotalCount);
const bool theHadBuffersFlag = inClient.mByteCount > 0;
mRemainingCount += inClient.mByteCount;
#if 0
std::cout << reinterpret_cast<const void*>(&inClient) <<
" mod: " << inByteCount <<
" used: " << inClient.mByteCount <<
" waiting: " << inClient.mWaitingForByteCount <<
std::endl;
#endif
if (inByteCount < 0) {
mPutRequestCount++;
inClient.mByteCount += inByteCount;
if (inClient.mByteCount < 0) {
inClient.mByteCount = 0;
}
mRemainingCount -= inClient.mByteCount;
if (theHadBuffersFlag && inClient.mByteCount <= 0) {
mClientsWihtBuffersCount--;
}
return true;
}
mCounters.mRequestCount++;
mCounters.mRequestByteCount += inByteCount;
mGetRequestCount++;
inClient.mManagerPtr = this;
const ByteCount theReqCount =
inClient.mWaitingForByteCount + inClient.mByteCount + inByteCount;
const bool theGrantedFlag = ! inClient.IsWaiting() && (
theReqCount <= 0 || (
(! mBufferPoolPtr ||
mBufferPoolPtr->GetFreeBufferCount() >= mMinBufferCount) &&
theReqCount < mRemainingCount &&
! IsOverQuota(inClient)
)
);
if (theGrantedFlag) {
inClient.mByteCount = theReqCount;
mRemainingCount -= theReqCount;
mCounters.mRequestGrantedCount++;
mCounters.mRequestGrantedByteCount += inByteCount;
} else {
mCounters.mRequestDeniedCount++;
mCounters.mRequestDeniedByteCount += inByteCount;
// If already waiting leave him in the same place in the queue.
if (! inClient.IsWaiting()) {
WaitQueue::PushBack(mWaitQueuePtr, inClient);
mWaitingCount++;
}
mWaitingByteCount += inByteCount;
mRemainingCount -= inClient.mByteCount;
inClient.mWaitingForByteCount += inByteCount;
}
assert(mRemainingCount >= 0 && mRemainingCount <= mTotalCount);
assert(inClient.IsWaiting() || inClient.mWaitingForByteCount == 0);
if (! theHadBuffersFlag && inClient.mByteCount > 0) {
mClientsWihtBuffersCount++;
}
return theGrantedFlag;
}
void
BufferManager::Unregister(
BufferManager::Client& inClient)
{
if (! inClient.mManagerPtr) {
return;
}
QCRTASSERT(inClient.mManagerPtr == this);
if (IsWaiting(inClient)) {
mWaitingCount--;
mWaitingByteCount -= inClient.mWaitingForByteCount;
}
WaitQueue::Remove(mWaitQueuePtr, inClient);
inClient.mWaitingForByteCount = 0;
Put(inClient, inClient.mByteCount);
assert(! inClient.IsWaiting() && inClient.mByteCount == 0);
}
void
BufferManager::CancelRequest(
Client& inClient)
{
if (! inClient.mManagerPtr) {
return;
}
QCRTASSERT(inClient.mManagerPtr == this);
if (! IsWaiting(inClient)) {
assert(inClient.mWaitingForByteCount == 0);
return;
}
WaitQueue::Remove(mWaitQueuePtr, inClient);
mWaitingCount--;
mWaitingByteCount -= inClient.mWaitingForByteCount;
inClient.mWaitingForByteCount = 0;
}
bool
BufferManager::IsLowOnBuffers() const
{
return (mBufferPoolPtr->GetFreeBufferCount() < mMinBufferCount);
}
/* virtual */ void
BufferManager::Timeout()
{
while (! mBufferPoolPtr ||
mBufferPoolPtr->GetFreeBufferCount() > mMinBufferCount) {
WaitQueue::Iterator theIt(mWaitQueuePtr);
Client* theClientPtr;
while ((theClientPtr = theIt.Next())) {
// Skip all that are over quota.
if (! IsOverQuota(*theClientPtr)) {
break;
}
}
if (! theClientPtr ||
theClientPtr->mWaitingForByteCount > mRemainingCount) {
break;
}
WaitQueue::Remove(mWaitQueuePtr, *theClientPtr);
mWaitingCount--;
const ByteCount theGrantedCount = theClientPtr->mWaitingForByteCount;
assert(theGrantedCount > 0);
mRemainingCount -= theGrantedCount;
assert(mRemainingCount <= mTotalCount);
mWaitingByteCount -= theGrantedCount;
if (theClientPtr->mByteCount <= 0 && theGrantedCount > 0) {
mClientsWihtBuffersCount++;
}
mCounters.mRequestGrantedCount++;
mCounters.mRequestGrantedByteCount += theGrantedCount;
theClientPtr->mByteCount += theGrantedCount;
theClientPtr->mWaitingForByteCount = 0;
theClientPtr->Granted(theGrantedCount);
}
}
} /* namespace KFS */
|
#include <Fsa/Dfs.hh>
#include "BestCell.hh"
#include "EpsilonClosure.hh"
#include "FsaCellMap.hh"
#include "FullCell.hh"
#include "Path.hh"
#include "StartEndCell.hh"
namespace Permute {
// Finds all arcs that match a given label in the given automaton, and adds
// them to the given cell.
class CellDfs : public Fsa::DfsState {
protected:
size_t index_;
Fsa::LabelId label_;
CellRef cell_;
public:
CellDfs (Fsa::ConstAutomatonRef fsa, size_t index, Fsa::LabelId label, Cell * cell) :
Fsa::DfsState (fsa),
index_ (index),
label_ (label),
cell_ (cell)
{}
virtual void exploreArc (Fsa::ConstStateRef from, const Fsa::Arc & a) {
if (a.input () == label_) {
if (label_ == Fsa::Epsilon) {
cell_ -> add (Path::epsilon (from -> id (), a.target (), - float (a.weight ())));
} else {
cell_ -> add (Path::arc (index_, from -> id (), a.target (), - float (a.weight ())));
}
}
}
virtual void exploreTreeArc (Fsa::ConstStateRef from, const Fsa::Arc & a) {
exploreArc (from, a);
}
virtual void exploreNonTreeArc (Fsa::ConstStateRef from, const Fsa::Arc & a) {
exploreArc (from, a);
}
virtual CellRef getCell () {
return cell_;
}
};
/**********************************************************************/
FsaCellMap::FsaCellMap (Fsa::ConstAutomatonRef fsa) :
fsa_ (fsa)
{
CellDfs dfs (fsa, 0, Fsa::Epsilon, new FullCell);
dfs.dfs ();
closure_ = Permute::epsilonClosure (dfs.getCell ());
}
// First, uses CellDfs to find all arcs matching the given label. Then,
// computes the epsilon-closure of those arcs by combining them with the
// epsilon paths stored in closure_ using Cell::build. Adds all the closure
// paths to the cell and returns it.
ConstCellRef FsaCellMap::operator () (size_t index, Fsa::LabelId label) {
CellDfs dfs (fsa_, index, label, new FullCell);
dfs.dfs ();
CellRef cell = dfs.getCell ();
CellRef closure (new FullCell);
Cell::build (closure, cell, closure_, 0.0, false, Path::NEITHER);
Cell::build (closure, cell, closure_, 0.0, false, Path::KEEP);
for (Cell::PathIterator path = closure -> begin (); path != closure -> end (); ++ path) {
cell -> add (* path);
}
return cell;
}
ConstCellRef FsaCellMap::epsilonClosure () const {
return closure_;
}
// Returns an FsaCellMap and a StartEndCell constructed from the given automaton.
std::pair <CellMapRef, CellRef> FsaCellMap::mapFsa (Fsa::ConstAutomatonRef fsa) {
FsaCellMap * cellMap = new FsaCellMap (fsa);
return std::make_pair (CellMapRef (cellMap),
CellRef (new StartEndCell (fsa, cellMap -> closure_)));
}
/**********************************************************************/
// Finds all arcs that match a given label in a given automaton and adss them
// to a BestCell with start and end states replaced with 0.
class UnigramCellDfs : public CellDfs {
public:
UnigramCellDfs (Fsa::ConstAutomatonRef fsa, size_t index, Fsa::LabelId label) :
CellDfs (fsa, index, label, new BestCell)
{}
virtual void exploreArc (Fsa::ConstStateRef from, const Fsa::Arc & a) {
if (a.input () == label_) {
cell_ -> add (Path::arc (index_, 0, 0, - float (a.weight ())));
}
}
};
/**********************************************************************/
UnigramFsaCellMap::UnigramFsaCellMap (Fsa::ConstAutomatonRef fsa) :
FsaCellMap (fsa)
{}
// Uses UnigramCellDfs to find all arcs matching the given label, and returns
// a cell containing them, with start and end state replaced by 0.
ConstCellRef UnigramFsaCellMap::operator () (size_t index, Fsa::LabelId label) {
UnigramCellDfs dfs (fsa_, index, label);
dfs.dfs ();
return dfs.getCell ();
}
// Returns a UnigramFsaCellMap and a BestCell constructed from the given
// automaton.
//
// @bug Shouldn't this use a StartEndCell that takes into account the best
// final weight?
std::pair <CellMapRef, CellRef> UnigramFsaCellMap::mapFsa (Fsa::ConstAutomatonRef fsa) {
UnigramFsaCellMap * cellMap = new UnigramFsaCellMap (fsa);
return std::make_pair (CellMapRef (cellMap),
CellRef (new BestCell));
}
}
|
//////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2020 QMCPACK developers.
//
// File developed by: Peter Doak, doakpw@ornl.gov, Oak Ridge National Laboratory
//
// File created by: Peter Doak, doakpw@ornl.gov, Oak Ridge National Laboratory
//////////////////////////////////////////////////////////////////////////////////////
#include "catch.hpp"
#include "Message/Communicate.h"
#include "QMCDrivers/VMC/VMCDriverInput.h"
#include "QMCDrivers/VMC/VMCBatched.h"
#include "QMCDrivers/tests/ValidQMCInputSections.h"
#include "Particle/tests/MinimalParticlePool.h"
#include "QMCWaveFunctions/tests/MinimalWaveFunctionPool.h"
#include "QMCHamiltonians/tests/MinimalHamiltonianPool.h"
#include "Concurrency/Info.hpp"
#include "Concurrency/UtilityFunctions.hpp"
#include "Particle/SampleStack.h"
namespace qmcplusplus
{
namespace testing
{
class VMCBatchedTest
{
public:
VMCBatchedTest()
{
Concurrency::OverrideMaxCapacity<> override(8);
Communicate* comm;
OHMMS::Controller->initialize(0, NULL);
comm_ = OHMMS::Controller;
}
void testCalcDefaultLocalWalkers()
{
using namespace testing;
Concurrency::OverrideMaxCapacity<> override(8);
Communicate* comm;
OHMMS::Controller->initialize(0, NULL);
comm = OHMMS::Controller;
Libxml2Document doc;
doc.parseFromString(valid_vmc_input_sections[valid_vmc_input_vmc_batch_index]);
xmlNodePtr node = doc.getRoot();
QMCDriverInput qmcdriver_input(3);
qmcdriver_input.readXML(node);
MinimalParticlePool mpp;
ParticleSetPool particle_pool = mpp(comm);
MinimalWaveFunctionPool wfp;
WaveFunctionPool wavefunction_pool = wfp(comm, particle_pool);
wavefunction_pool.setPrimary(wavefunction_pool.getWaveFunction("psi0"));
MinimalHamiltonianPool mhp;
HamiltonianPool hamiltonian_pool = mhp(comm, particle_pool, wavefunction_pool);
}
private:
Communicate* comm_;
};
} // namespace testing
TEST_CASE("VMCBatched::calc_default_local_walkers", "[drivers]")
{
using namespace testing;
VMCBatchedTest vbt;
vbt.testCalcDefaultLocalWalkers();
}
} // namespace qmcplusplus
|
********************************
*Author: Jackie Marcano *
********************************
#include <iostream>
#include <iomanip>
using namespace std;
//Draws the table
void table(int tableSize)
{
const int row = 13;
const int col = 13;
int nums[row][col];
for (int row = 0; row <= tableSize; row++)
{
for (int col = 0; col <= tableSize; col++)
{
nums[row][col] = row*col;
cout << setw(4) << nums[row][col];
}
cout << endl << endl;
}
}
//Asks user for size of array, then validates.
void sizeDetermination()
{
int tableSize = 0;
cout << "Enter size of multiplication table (Max 12): ";
cin >> tableSize;
while (tableSize > 12)
{
cout << "Invalid number. Please try again: ";
cin >> tableSize;
}
table(tableSize);
}
int main()
{
sizeDetermination();
return 0;
}
|
/**
* Touhou Community Reliant Automatic Patcher
* Update module
*
* ----
*
* Digitally signed, automatic updates of thcrap itself.
*/
#include <thcrap.h>
#include <wincrypt.h>
#include "update.h"
#include "server.h"
#include "self.h"
#define TEMP_FN_LEN 41
const std::string SELF_SERVER = "http://thcrap.thpatch.net/";
const char *NETPATHS_FN = "thcrap_update.js";
const char *PREFIX_BACKUP = "thcrap_old_%s";
const char *PREFIX_NEW = "thcrap_new_";
const char *EXT_NEW = ".zip";
static char update_version[sizeof("0x20010101")];
/// Download notification window
/// ----------------------------
/*
* Not meant to be particularly pretty, just to have at least something in
* that regard for now. The move to Qt is going to blow up the download size
* significantly, and while we do want to block for that download because of
* the message box we pop up at the end, we don't want to block without any
* visual indication to the user until an archive of multiple megabytes has
* finished downloading.
*
* Written using raw calls to Ye Olde Win32 API mainly for practice reasons.
* Windows 1.0-style dialog resource scripts are terrible if you want to do
* i18n and automatic layout, being just a simple flat list of widgets
* positioned using "dialog units" (which really are just a way of expressing
* pixels independent of font sizes). Therefore, I think we should stop using
* them even for the smallest of dialogs like this one. Just compare the
* lengths we'll then have to go to in order to simply *translate* them
* automatically (thcrap's dialog.c) with what we're doing here ourselves.
*
* The smartdlg naming scheme is because I'm thinking about writing some
* Win32-specific GUIs in the future that use these same ideas – being easy
* to translate and automatically adjusting their layout while still using as
* much of Ye Olde Win32 API as possible.
*
* ~ Nmlgc
*/
#define RECT_EXPAND(rect) rect.left, rect.top, rect.right, rect.bottom
struct smartdlg_state_t {
HANDLE event_created = CreateEvent(nullptr, true, false, nullptr);
DWORD thread_id;
HFONT hFont;
HWND hWnd;
~smartdlg_state_t() {
CloseHandle(event_created);
}
};
LRESULT CALLBACK smartdlg_proc(
HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam
)
{
switch(uMsg) {
case WM_CLOSE: // Yes, these are not handled by DefDlgProc().
DestroyWindow(hWnd);
break;
case WM_DESTROY:
PostQuitMessage(0);
break;
}
return DefDlgProcW(hWnd, uMsg, wParam, lParam);
}
void smartdlg_close(smartdlg_state_t *state)
{
assert(state);
if(state->hWnd) {
SendMessageW(state->hWnd, WM_CLOSE, 0, 0);
}
if(state->hFont) {
DeleteObject(state->hFont);
}
}
DWORD WINAPI self_window_create_and_run(void *param)
{
const char *TEXT =
"A new build of the ${project} is being downloaded, please wait...";
const size_t TEXT_SLOT = (size_t)TEXT;
const char *text_final;
auto state = (smartdlg_state_t *)param;
assert(state);
HMODULE hMod = GetModuleHandle(NULL);
HDC hDC = GetDC(0);
HWND label = NULL;
DWORD wnd_style = WS_BORDER | WS_POPUP | WS_CAPTION;
DWORD wnd_style_ex = WS_EX_TOPMOST | WS_EX_CLIENTEDGE | WS_EX_CONTROLPARENT | WS_EX_DLGMODALFRAME;
RECT screen_rect = {0};
RECT wnd_rect = {0};
RECT label_rect = {0};
LONG font_pad = 0;
NONCLIENTMETRICSW nc_metrics = {sizeof(nc_metrics)};
if(SystemParametersInfoW(
SPI_GETNONCLIENTMETRICS, sizeof(nc_metrics), &nc_metrics, 0
)) {
int height = nc_metrics.lfMessageFont.lfHeight;
state->hFont = CreateFontIndirectW(&nc_metrics.lfMessageFont);
font_pad = (height < 0 ? -height : height);
}
if(!SystemParametersInfoW(SPI_GETWORKAREA, sizeof(RECT), &screen_rect, 0)) {
screen_rect.right = GetSystemMetrics(SM_CXSCREEN);
screen_rect.bottom = GetSystemMetrics(SM_CYSCREEN);
}
if(state->hFont) {
SelectObject(hDC, state->hFont);
}
strings_strcat(TEXT_SLOT, TEXT);
text_final = strings_replace(TEXT_SLOT, "${project}", PROJECT_NAME());
DrawText(hDC, text_final, -1, &label_rect, DT_CALCRECT);
wnd_rect = label_rect;
label_rect.left += font_pad;
label_rect.top += font_pad;
wnd_rect.right += font_pad * 2;
wnd_rect.bottom += font_pad * 2;
AdjustWindowRectEx(&wnd_rect, wnd_style, FALSE, wnd_style_ex);
wnd_rect.right -= wnd_rect.left;
wnd_rect.bottom -= wnd_rect.top;
wnd_rect.left = (screen_rect.right / 2) - (wnd_rect.right / 2);
wnd_rect.top = (screen_rect.bottom / 2) - (wnd_rect.bottom / 2);
state->hWnd = CreateWindowExU(
wnd_style_ex, (LPSTR)WC_DIALOG, PROJECT_NAME(), wnd_style,
RECT_EXPAND(wnd_rect), NULL, NULL, hMod, NULL
);
label = CreateWindowExU(
WS_EX_NOPARENTNOTIFY, "Static", text_final, WS_CHILD | WS_VISIBLE,
RECT_EXPAND(label_rect), state->hWnd, NULL, hMod, NULL
);
SetWindowLongPtrW(state->hWnd, GWLP_WNDPROC, (LPARAM)smartdlg_proc);
if(state->hFont) {
SendMessageW(state->hWnd, WM_SETFONT, (WPARAM)state->hFont, 0);
SendMessageW(label, WM_SETFONT, (WPARAM)state->hFont, 0);
}
ShowWindow(state->hWnd, SW_SHOW);
UpdateWindow(state->hWnd);
SetEvent(state->event_created);
// We must run this in the same thread anyway, so we might as well
// combine creation and the message loop into the same function.
MSG msg;
BOOL msg_ret;
while((msg_ret = GetMessage(&msg, nullptr, 0, 0)) != 0) {
if(msg_ret != -1) {
TranslateMessage(&msg);
DispatchMessage(&msg);
}
}
return msg.wParam;
}
/// ----------------------------
// A superior GetTempFileName. Fills [fn] with [len] / 2 random bytes printed
// as their hexadecimal representation. Returns a pointer to the final \0 at
// the end of the file name.
static char* self_tempname(char *fn, size_t len, const char *prefix)
{
char* p = fn;
size_t prefix_len = prefix ? strlen(prefix) : 0;
if(fn && len > 8 && prefix && prefix_len < len - 1) {
HCRYPTPROV hCryptProv;
size_t rnd_num = (len - 1) / 2;
VLA(BYTE, rnd, rnd_num);
size_t i = 0;
ZeroMemory(fn, len);
auto ret = W32_ERR_WRAP(CryptAcquireContext(
&hCryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT
));
if(!ret) {
CryptGenRandom(hCryptProv, rnd_num, (BYTE*)rnd);
} else {
LARGE_INTEGER t;
QueryPerformanceCounter(&t);
t.HighPart ^= GetCurrentProcessId();
for(i = 0; i < rnd_num / sizeof(t); i++) {
memcpy(rnd + (i * sizeof(t)), &t, sizeof(t));
}
}
for(i = 0; i < rnd_num; i++) {
p += sprintf(p, "%02x", rnd[i]);
}
memcpy(fn, prefix, prefix_len);
VLA_FREE(rnd);
CryptReleaseContext(hCryptProv, 0);
}
return p;
}
static int self_pubkey_from_signer(PCCERT_CONTEXT *context)
{
int ret = -1;
HMODULE self_mod = GetModuleContaining((void*)(uintptr_t)self_pubkey_from_signer);
HCERTSTORE hStore = NULL;
HCRYPTMSG hMsg = NULL;
DWORD msg_type = 0;
DWORD param_len;
DWORD signer_num;
DWORD i;
if(!context || !self_mod) {
return -1;
}
{
// CryptQueryObject() forces us to use the W version, but only
// our U version can calculate the length of the string, so...
size_t self_fn_len = GetModuleFileNameU(self_mod, NULL, 0) + 1;
VLA(wchar_t, self_fn, self_fn_len * UTF8_MUL);
VLA(char, self_fn_utf8, self_fn_len);
GetModuleFileNameU(self_mod, self_fn_utf8, self_fn_len);
GetModuleFileNameW(self_mod, self_fn, self_fn_len);
log_printf(
"Retrieving public key from the signer certificate of %s... ", self_fn_utf8
);
ret = W32_ERR_WRAP(CryptQueryObject(
CERT_QUERY_OBJECT_FILE, self_fn,
CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED,
CERT_QUERY_FORMAT_FLAG_BINARY,
0, &msg_type, NULL, NULL, &hStore, &hMsg, NULL
));
VLA_FREE(self_fn_utf8);
VLA_FREE(self_fn);
}
if(ret) {
goto end;
}
ret = W32_ERR_WRAP(CryptMsgGetParam(
hMsg, CMSG_SIGNER_COUNT_PARAM, 0, &signer_num, ¶m_len
));
if(ret) {
goto end;
}
for(i = 0; i < signer_num && !(*context); i++) {
PCERT_INFO signer_info = NULL;
ret = W32_ERR_WRAP(CryptMsgGetParam(
hMsg, CMSG_SIGNER_INFO_PARAM, i, NULL, ¶m_len
));
if(ret) {
continue;
}
signer_info = (PCERT_INFO)malloc(param_len);
// MANDATORY
ZeroMemory(signer_info, param_len);
ret = W32_ERR_WRAP(CryptMsgGetParam(
hMsg, CMSG_SIGNER_CERT_INFO_PARAM, i, signer_info, ¶m_len
));
if(ret) {
continue;
}
*context = CertGetSubjectCertificateFromStore(
hStore, msg_type, signer_info
);
ret = *context ? 0 : GetLastError();
SAFE_FREE(signer_info);
}
log_printf(ret ? "not found\n" : "OK\n");
end:
CertCloseStore(hStore, 0);
CryptMsgClose(hMsg);
return ret;
}
// Prints at most [len] characters of the value of [hHash] into [buf]. Returns
// a pointer to the final \0 at the end of the printed hash, or [buf] in case
// the hash couldn't be written.
char* self_sprint_hash(char *buf, size_t len, HCRYPTHASH hHash)
{
char *p = buf;
DWORD hash_len = 0;
DWORD hash_len_len = sizeof(hash_len);
auto crypt_get_hash_param = [&] (DWORD param, BYTE *buf, DWORD *len) {
return W32_ERR_WRAP(CryptGetHashParam(hHash, param, buf, len, 0));
};
if(crypt_get_hash_param(HP_HASHSIZE, (BYTE*)&hash_len, &hash_len_len)) {
return 0;
}
// Since we can't pass a nullptr as the length parameter of
// CryptGetHashParam(), we might as well get the whole hash upfront and
// merely truncate the string output.
VLA(BYTE, hash_val, hash_len);
if(!crypt_get_hash_param(HP_HASHVAL, hash_val, &hash_len)) {
size_t bytes_in_suffix = (len - 1) / 2;
size_t copy_len = min(bytes_in_suffix, hash_len) & ~1;
for(size_t i = 0; i < copy_len; i++) {
p += sprintf(p, "%02x", hash_val[i]);
}
}
VLA_FREE(hash_val);
return p;
}
static int self_verify_buffer(
HCRYPTHASH *hHash,
const void *file_buf,
const size_t file_len,
const json_t *sig,
HCRYPTPROV hCryptProv,
HCRYPTKEY hPubKey,
ALG_ID hash_alg
)
{
int ret = -1;
const size_t sig_base64_len = json_string_length(sig);
const char *sig_base64 = json_string_value(sig);
DWORD i, j;
DWORD sig_len = 0;
BYTE *sig_buf = NULL;
assert(hHash);
if(!file_buf || !file_len || !sig_base64 || !sig_base64_len) {
goto end;
}
ret = W32_ERR_WRAP(CryptStringToBinaryA(
sig_base64, sig_base64_len, CRYPT_STRING_BASE64, NULL, &sig_len, NULL, NULL
));
if(!ret) {
sig_buf = (BYTE *)malloc(sig_len);
ret = W32_ERR_WRAP(CryptStringToBinaryA(
sig_base64, sig_base64_len, CRYPT_STRING_BASE64, sig_buf, &sig_len, NULL, NULL
));
}
if(ret) {
log_printf("invalid Base64 string\n");
goto end;
}
// Reverse the signature...
// (http://www.ruiandrebatista.com/windows-crypto-api-nightmares-rsa-signature-padding-and-byte-order-ramblings)
for(i = 0, j = sig_len - 1; i < j; i++, j--) {
BYTE t = sig_buf[i];
sig_buf[i] = sig_buf[j];
sig_buf[j] = t;
}
ret = W32_ERR_WRAP(CryptCreateHash(hCryptProv, hash_alg, 0, 0, hHash));
if(ret) {
log_printf("couldn't create hash object\n");
goto end;
}
ret = W32_ERR_WRAP(CryptHashData(*hHash, (BYTE*)file_buf, file_len, 0));
if(ret) {
log_printf("couldn't hash the file data?!?\n");
goto end;
}
ret = W32_ERR_WRAP(CryptVerifySignature(
*hHash, sig_buf, sig_len, hPubKey, NULL, 0
));
log_printf(ret ? "invalid\n" : "valid\n");
end:
SAFE_FREE(sig_buf);
return ret;
}
static ALG_ID self_alg_from_str(const char *hash)
{
if(hash) {
if(!stricmp(hash, "SHA1")) {
return CALG_SHA1;
} else if(!stricmp(hash, "SHA256")) {
return CALG_SHA_256;
}
}
return 0;
}
// We can't directly create and release the HCRYPTPROV in this function because
// calling CryptReleaseContext() invalidates *all* objects that were created
// from this CSP handle.
static self_result_t self_verify(
HCRYPTPROV hCryptProv,
HCRYPTHASH *hHash,
const void *zip_buf,
size_t zip_len,
json_t *sig,
PCCERT_CONTEXT context
)
{
assert(hCryptProv);
const json_t *sig_sig = json_object_get(sig, "sig");
const char *sig_alg = json_object_get_string(sig, "alg");
const char *key = NULL;
json_t *val = NULL;
ALG_ID hash_alg = self_alg_from_str(sig_alg);
HCRYPTKEY hPubKey = 0;
if(!zip_buf || !zip_len || !json_is_string(sig_sig) || !context || !sig_alg) {
return SELF_NO_SIG;
}
log_printf("Verifying archive signature... ");
if(!hash_alg) {
log_func_printf("Unsupported hash algorithm ('%s')!\n", sig_alg);
return SELF_NO_SIG;
}
if(W32_ERR_WRAP(CryptImportPublicKeyInfo(
hCryptProv, X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
&context->pCertInfo->SubjectPublicKeyInfo, &hPubKey
))) {
log_func_printf("Invalid public key!\n");
return SELF_NO_PUBLIC_KEY;
}
return self_verify_buffer(
hHash, zip_buf, zip_len, sig_sig, hCryptProv, hPubKey, hash_alg
) ? SELF_SIG_FAIL : SELF_OK;
}
static int self_move_to_dir(const char *dst_dir, const char *fn)
{
int ret;
size_t full_fn_len = strlen(dst_dir) + 1 + strlen(fn) + 1;
VLA(char, full_fn, full_fn_len);
sprintf(full_fn, "%s/%s", dst_dir, fn);
dir_create_for_fn(full_fn);
ret = W32_ERR_WRAP(MoveFile(fn, full_fn));
RemoveDirectory(fn);
VLA_FREE(full_fn);
return ret;
}
static self_result_t self_replace(zip_t *zip)
{
self_result_t ret = SELF_REPLACE_ERROR;
if(zip) {
// + 1 for the underscore
size_t prefix_backup_len = _scprintf(PREFIX_BACKUP, PROJECT_VERSION_STRING()) + 1 + 1;
VLA(char, prefix_backup, prefix_backup_len);
char backup_dir[TEMP_FN_LEN];
const char *fn;
json_t *val;
size_t i;
sprintf(prefix_backup, PREFIX_BACKUP, PROJECT_VERSION_STRING());
if(!PathFileExistsU(prefix_backup)) {
strncpy(backup_dir, prefix_backup, sizeof(backup_dir));
} else {
strcat(prefix_backup, "_");
self_tempname(backup_dir, sizeof(backup_dir), prefix_backup);
}
VLA_FREE(prefix_backup);
// We don't error-check CreateDirectory(), as that might return
// ERROR_ALREADY_EXISTS when directories are created recursively.
// If this fails, writing should fail too.
log_printf("Replacing engine...\n");
CreateDirectoryU(backup_dir, NULL);
json_object_foreach(zip_list(zip), fn, val) {
int local_ret = self_move_to_dir(backup_dir, fn);
if(
local_ret == ERROR_FILE_NOT_FOUND
|| local_ret == ERROR_PATH_NOT_FOUND
) {
local_ret = 0;
}
if(local_ret || zip_file_unzip(zip, fn)) {
goto end;
}
}
json_array_foreach(zip_list_empty(zip), i, val) {
DeleteFileU(json_string_value(val));
}
ret = SELF_OK;
}
end:
return ret;
}
self_result_t self_update(const char *thcrap_dir, char **arc_fn_ptr)
{
self_result_t ret;
char arc_fn[TEMP_FN_LEN];
zip_t *arc = NULL;
PCCERT_CONTEXT context = NULL;
HCRYPTPROV hCryptProv = 0;
HCRYPTHASH hHash = 0;
log_printf("Checking for engine updates...\n");
auto [netpaths, netpaths_status] = ServerCache::get().downloadJsonFile(SELF_SERVER + NETPATHS_FN);
if (!netpaths_status || !netpaths) {
log_printf("%s%s: %s\n", SELF_SERVER.c_str(), NETPATHS_FN, netpaths_status.toString().c_str());
return SELF_VERSION_CHECK_ERROR;
}
const char* branch = PROJECT_BRANCH();
json_t* branch_json = json_object_get(*netpaths, branch);
if (!branch_json) {
return SELF_NO_UPDATE;
}
auto latest_version = json_object_get_hex(branch_json, "version");
if (!latest_version) {
return SELF_NO_TARGET_VERSION;
}
if (latest_version <= PROJECT_VERSION()) {
return SELF_NO_UPDATE;
}
// Since we already have downloaded NETPATHS_FN and we are trying to resolve the
// next exact version, we might as well get the right path immediately
const char* version_key;
const json_t* value;
const char* netpath = nullptr;
auto target_version = latest_version;
json_object_foreach(branch_json, version_key, value) {
errno = 0;
uint32_t milestone = strtoul(version_key, NULL, 16);
// Check if the string isn't an hex
if (errno > 0 || !milestone) {
continue;
}
if (milestone > PROJECT_VERSION() && milestone < target_version) {
netpath = json_string_value(value);
target_version = milestone;
}
}
if (target_version == latest_version) {
netpath = json_object_get_string(branch_json, "latest");
}
// We know for sure which version we need
str_hexdate_format(update_version, target_version);
if (!netpath) {
// If netpath is still null, branch_json is malformed.
return SELF_INVALID_NETPATH;
}
// We are now trying an update
smartdlg_state_t window;
defer(smartdlg_close(&window));
size_t cur_dir_len = GetCurrentDirectory(0, NULL) + 1;
VLA(char, cur_dir, cur_dir_len);
defer(VLA_FREE(cur_dir));
GetCurrentDirectory(cur_dir_len, cur_dir);
SetCurrentDirectory(thcrap_dir);
defer(SetCurrentDirectory(cur_dir));
if(W32_ERR_WRAP(CryptAcquireContext(
&hCryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT
))) {
return SELF_NO_PUBLIC_KEY;
}
defer(CryptReleaseContext(hCryptProv, 0));
if(self_pubkey_from_signer(&context)) {
return SELF_NO_PUBLIC_KEY;
}
defer(CertFreeCertificateContext(context));
CreateThread(
nullptr, 0, self_window_create_and_run, &window, 0, &window.thread_id
);
WaitForSingleObject(window.event_created, INFINITE);
auto [arc_dl, arc_dl_status] = ServerCache::get().downloadFile(SELF_SERVER + netpath);
if(!arc_dl_status || arc_dl.empty()) {
log_printf("%s%s: %s\n", SELF_SERVER.c_str(), netpath, arc_dl_status.toString().c_str());
return SELF_SERVER_ERROR;
}
auto [sig, sig_status] = ServerCache::get().downloadJsonFile(SELF_SERVER + netpath + ".sig");
if(!sig_status || !sig) {
log_printf("%s%s%s: %s\n", SELF_SERVER.c_str(), netpath, ".sig", sig_status.toString().c_str());
return SELF_NO_SIG;
}
ret = self_verify(hCryptProv, &hHash, arc_dl.data(), arc_dl.size(), *sig, context);
defer(CryptDestroyHash(hHash));
if(ret != SELF_OK) {
return ret;
}
auto prefix_new_len = strlen(PREFIX_NEW);
const auto ext_new_len = strlen(EXT_NEW) + 1;
char *suffix = arc_fn + prefix_new_len;
size_t suffix_len = TEMP_FN_LEN - prefix_new_len - ext_new_len;
memcpy(arc_fn, PREFIX_NEW, prefix_new_len);
char *ext = self_sprint_hash(suffix, suffix_len, hHash);
if(ext == suffix) {
ext = self_tempname(suffix, suffix_len, "");
}
memcpy(ext, EXT_NEW, ext_new_len);
if(file_write(arc_fn, arc_dl.data(), arc_dl.size())) {
return SELF_DISK_ERROR;
}
if(arc_fn_ptr) {
*arc_fn_ptr = (char *)malloc(TEMP_FN_LEN);
memcpy(*arc_fn_ptr, arc_fn, TEMP_FN_LEN);
}
arc = zip_open(arc_fn);
ret = self_replace(arc);
zip_close(arc);
if(ret != SELF_REPLACE_ERROR) {
DeleteFile(arc_fn);
}
return ret;
}
const char* self_get_target_version()
{
return update_version;
}
|
// Copyright (c) 2020 ETH Zurich
//
// SPDX-License-Identifier: BSL-1.0
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#pragma once
#include <hpx/properties/property.hpp>
|
/*
This file is a part of the NVDA project.
URL: http://www.nvda-project.org/
Copyright 2006-2010 NVDA contributers.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2.0, as published by
the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
This license can be found at:
http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
*/
#include <string>
#include <queue>
#include <mutex>
#include <crtdbg.h>
#include <remote/nvdaControllerInternal.h>
#include "nvdaHelperRemote.h"
#include <common/log.h>
std::deque<std::tuple<int, std::wstring>> logQueue;
std::mutex logQueueLock;
// Forward declare an APC function for flushing the log queue.
void __stdcall log_flushQueue_apcFunc(ULONG_PTR data);
// Fetch all available messages from the queue
// and send them onto NvDA via rpc.
void log_flushQueue() {
// Ensure this is never called from outside the manager thread.
if(!inprocMgrThreadHandle) {
// the manager thread does not yet exist.
// just ignore the call as once it does exist it will flush itself.
return;
} else if(GetCurrentThreadId() != GetThreadId(inprocMgrThreadHandle)) {
// call it correctly in the manager thread with APC.
QueueUserAPC(log_flushQueue_apcFunc, inprocMgrThreadHandle, 0);
return;
}
std::deque<std::tuple<int, std::wstring>> tempQueue;
{
std::lock_guard lock{logQueueLock};
tempQueue.swap(logQueue);
}
for(auto& [level, msg] : tempQueue) {
nvdaControllerInternal_logMessage(level, GetCurrentProcessId(), msg.c_str());
}
}
void __stdcall log_flushQueue_apcFunc(ULONG_PTR data) {
log_flushQueue();
}
void logMessage(int level, const wchar_t* msg) {
// Always log to any connected debugger
OutputDebugString(msg);
if(
!inprocMgrThreadHandle
|| GetCurrentThreadId() != GetThreadId(inprocMgrThreadHandle)
) {
// either the NVDA inproc manager thread is not yet running,
// Or this message is being logged from outside NVDA's inproc manager thread.
// So as to not block any app threads,
// The message is queued for later fetching by NVDA's inproc manager thread
{
std::lock_guard lock{logQueueLock};
logQueue.emplace_back(level, msg);
}
if(inprocMgrThreadHandle) {
QueueUserAPC(log_flushQueue_apcFunc, inprocMgrThreadHandle, 0);
}
} else {
// The message is being logged from NVDA's inproc manager thread.
// Log to NVDA via rpc directly.
// But first flush any pending log messages from other threads to ensure they are kept in the right order
log_flushQueue();
nvdaControllerInternal_logMessage(level,GetCurrentProcessId(),msg);
}
}
int NVDALogCrtReportHook(int reportType,const wchar_t *message,int *returnValue) {
bool doDebugBreak=false;
int level=LOGLEVEL_WARNING;
if(reportType==_CRT_ERROR) {
level=LOGLEVEL_ERROR;
doDebugBreak=true;
} else if(reportType==_CRT_ASSERT) {
level=LOGLEVEL_CRITICAL;
doDebugBreak=true;
}
logMessage(level,message);
if(doDebugBreak&&IsDebuggerPresent()) {
_CrtDbgBreak();
}
*returnValue=0;
return true;
}
|
#include "ros/ros.h"
#include <sstream>
#include <bits/stdc++.h>
#include <synchroniz/custommsg.h>
int main (int argc,char** argv) {
ros::init(argc, argv, "Node1");
ros::NodeHandle n;
ROS_INFO_STREAM("Node 1 has started running");
ros::Publisher pub=n.advertise<synchroniz::custommsg>("talker",5);
ros::Rate loop_rate(5);
while(ros::ok()){
synchroniz::custommsg msg;
msg.header.stamp=ros::Time::now();
msg.header.frame_id="/myworld";
std::stringstream ss;
ss<<"Synchronous ";
msg.st=ss.str();
pub.publish(msg);
ros::spinOnce();
loop_rate.sleep();
}
return 0;
}
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/toco/python/toco_python_api.h"
#include <fstream>
#include <map>
#include <string>
#include <vector>
#include "google/protobuf/text_format.h"
#include "tensorflow/c/kernels.h"
#include "tensorflow/compiler/mlir/lite/metrics/error_collector.h"
#include "tensorflow/compiler/mlir/lite/python/flatbuffer_to_mlir.h"
#include "tensorflow/compiler/mlir/lite/python/graphdef_to_tfl_flatbuffer.h"
#include "tensorflow/compiler/mlir/lite/python/jax_to_tfl_flatbuffer.h"
#include "tensorflow/compiler/mlir/lite/python/saved_model_to_tfl_flatbuffer.h"
#include "tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h"
#include "tensorflow/compiler/mlir/lite/sparsity/sparsify_model.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/python/interpreter_wrapper/python_error_reporter.h"
#include "tensorflow/lite/python/interpreter_wrapper/python_utils.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/import_tensorflow.h"
#include "tensorflow/lite/toco/logging/conversion_log_util.h"
#include "tensorflow/lite/toco/logging/toco_conversion_log.pb.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_convert.h"
#include "tensorflow/lite/toco/toco_flags.pb.h"
#include "tensorflow/lite/toco/toco_graphviz_dump_options.h"
#include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/toco/toco_tooling.h"
#include "tensorflow/lite/toco/toco_types.h"
#include "tensorflow/lite/toco/tooling_util.h"
#include "tensorflow/lite/toco/types.pb.h"
namespace toco {
using mlir::lite::StringSet;
void PopulateConversionLogHelper(const toco::ModelFlags& model_flags,
toco::TocoFlags* toco_flags,
const std::string& input_contents_txt,
const std::string& output_file_contents_txt,
const std::string& error_message,
GraphVizDumpOptions* dump_options) {
// Make sure the graphviz file will be dumped under the same folder.
dump_options->dump_graphviz = toco_flags->conversion_summary_dir();
// Here we construct the `toco::Model` class based on the input graph def,
// it will then be used to populate the conversion log.
// TODO(haoliang): Don't depend on `toco::Model`.
std::unique_ptr<toco::Model> imported_model =
toco::Import(*toco_flags, model_flags, input_contents_txt);
// Dump pre-conversion toco logs.
TocoConversionLog toco_log_before;
PopulateConversionLog(*imported_model, &toco_log_before);
std::ofstream osstream_before(toco_flags->conversion_summary_dir() +
"/toco_log_before.pb");
toco_log_before.SerializeToOstream(&osstream_before);
osstream_before.close();
toco::LogDump(toco::kLogLevelModelChanged, "tf_graph", *imported_model);
// Populate the post-conversion log, for convenient initiate the
// `toco::Model` class from the generated flatbuffer.
toco_flags->set_input_format(toco::FileFormat::TFLITE);
std::unique_ptr<toco::Model> flatbuffer_model =
toco::Import(*toco_flags, model_flags, output_file_contents_txt);
// Dump post-conversion toco logs.
TocoConversionLog toco_log_after;
PopulateConversionLog(*flatbuffer_model, &toco_log_after);
// Make sure we sanitize the error message.
toco_log_after.set_toco_err_logs(SanitizeErrorMessage(error_message));
std::ofstream ostream_after(toco_flags->conversion_summary_dir() +
"/toco_log_after.pb");
toco_log_after.SerializeToOstream(&ostream_after);
ostream_after.close();
toco::LogDump(toco::kLogLevelModelChanged, "tflite_graph", *flatbuffer_model);
}
// NOTE(aselle): We are using raw PyObject's here because we want to make
// sure we input and output bytes rather than unicode strings for Python3.
PyObject* TocoConvert(PyObject* model_flags_proto_txt_raw,
PyObject* toco_flags_proto_txt_raw,
PyObject* input_contents_txt_raw, bool extended_return,
PyObject* debug_info_txt_raw,
bool enable_mlir_converter) {
// Use Python C API to validate and convert arguments. In py3 (bytes),
// in py2 (str).
auto ConvertArg = [&](PyObject* obj, bool* error) {
char* buf;
Py_ssize_t len;
if (::tflite::python_utils::ConvertFromPyString(obj, &buf, &len) == -1) {
*error = true;
return std::string();
} else {
*error = false;
return std::string(buf, len);
}
};
bool error;
std::string model_flags_proto_txt =
ConvertArg(model_flags_proto_txt_raw, &error);
if (error) {
PyErr_SetString(PyExc_ValueError, "Model flags are invalid.");
return nullptr;
}
std::string toco_flags_proto_txt =
ConvertArg(toco_flags_proto_txt_raw, &error);
if (error) {
PyErr_SetString(PyExc_ValueError, "Toco flags are invalid.");
return nullptr;
}
// Use TOCO to produce new outputs.
toco::ModelFlags model_flags;
if (!model_flags.ParseFromString(model_flags_proto_txt)) {
PyErr_SetString(PyExc_ValueError,
"Failed to convert Model to Python String.");
return nullptr;
}
toco::TocoFlags toco_flags;
if (!toco_flags.ParseFromString(toco_flags_proto_txt)) {
PyErr_SetString(PyExc_ValueError,
"Failed to convert Toco to Python String.");
return nullptr;
}
tensorflow::GraphDebugInfo debug_info;
if (debug_info_txt_raw && debug_info_txt_raw != Py_None) {
std::string debug_info_txt = ConvertArg(debug_info_txt_raw, &error);
if (error) {
PyErr_SetString(PyExc_ValueError, "Input DebugInfo is invalid.");
return nullptr;
}
if (!debug_info.ParseFromString(debug_info_txt)) {
PyErr_SetString(PyExc_ValueError,
"Failed to convert DebugInfo to Python String.");
return nullptr;
}
}
tensorflow::GraphDef graph_def;
std::string input_contents_txt;
if (model_flags.saved_model_dir().empty()) {
input_contents_txt = ConvertArg(input_contents_txt_raw, &error);
if (error) {
PyErr_SetString(PyExc_ValueError, "Input GraphDef is invalid.");
return nullptr;
}
if (!model_flags.use_hlo_import() &&
!graph_def.ParseFromString(input_contents_txt)) {
PyErr_SetString(PyExc_ValueError,
"Failed to convert GraphDef to Python String.");
return nullptr;
}
}
auto& dump_options = *GraphVizDumpOptions::singleton();
if (toco_flags.has_dump_graphviz_dir()) {
dump_options.dump_graphviz = toco_flags.dump_graphviz_dir();
}
if (toco_flags.has_dump_graphviz_include_video()) {
dump_options.dump_graphviz_video = toco_flags.dump_graphviz_include_video();
}
std::string output_file_contents_txt;
tensorflow::Status status;
int64_t arithmetic_ops_count;
// Convert model.
if (enable_mlir_converter) {
if (model_flags.use_hlo_import() && model_flags.has_saved_model_dir()) {
PyErr_SetString(PyExc_ValueError,
"Cannot specify both saved_model and hlo import.");
return nullptr;
}
if (model_flags.use_hlo_import()) {
status = tensorflow::ConvertJaxToTFLiteFlatBuffer(
input_contents_txt, model_flags, toco_flags,
&output_file_contents_txt);
} else if (!model_flags.saved_model_dir().empty()) {
status = tensorflow::ConvertSavedModelToTFLiteFlatBuffer(
model_flags, toco_flags, &output_file_contents_txt);
} else {
tensorflow::GraphDef graph_def;
if (!graph_def.ParseFromString(input_contents_txt)) {
PyErr_SetString(PyExc_ValueError,
"Failed to convert GraphDef to Python String.");
return nullptr;
}
status = tensorflow::ConvertGraphDefToTFLiteFlatBuffer(
model_flags, toco_flags, debug_info, graph_def,
&output_file_contents_txt);
if (!toco_flags.conversion_summary_dir().empty()) {
PopulateConversionLogHelper(
model_flags, &toco_flags, input_contents_txt,
output_file_contents_txt, status.error_message(), &dump_options);
}
}
} else {
status = Convert(input_contents_txt, toco_flags, model_flags,
&output_file_contents_txt, &arithmetic_ops_count);
}
if (!status.ok()) {
PyErr_SetString(PyExc_Exception, status.error_message().c_str());
return nullptr;
}
if (extended_return && !enable_mlir_converter) {
PyObject* dict = PyDict_New();
PyDict_SetItemString(
dict, "flatbuffer",
::tflite::python_utils::ConvertToPyString(
output_file_contents_txt.data(), output_file_contents_txt.size()));
PyDict_SetItemString(dict, "arithmetic_ops",
PyLong_FromLong(arithmetic_ops_count));
return dict;
}
// Convert arguments back to byte (py3) or str (py2)
return ::tflite::python_utils::ConvertToPyString(
output_file_contents_txt.data(), output_file_contents_txt.size());
}
tflite::TensorType FromTocoDataTypeToTflitToTensorType(int inference_type) {
switch (inference_type) {
case toco::IODataType::QUANTIZED_INT16:
return tflite::TensorType_INT16;
case toco::IODataType::QUANTIZED_UINT8:
return tflite::TensorType_UINT8;
case toco::IODataType::UINT8:
return tflite::TensorType_UINT8;
case toco::IODataType::QUANTIZED_INT8:
return tflite::TensorType_INT8;
case toco::IODataType::INT8:
return tflite::TensorType_INT8;
default:
return tflite::TensorType_FLOAT32;
}
}
int ToStringSet(PyObject* py_denylist, StringSet* string_set) {
using tflite::python_utils::ConvertFromPyString;
// Ensure op_denylist is non null
if (!py_denylist) {
return 0;
}
if (PyList_Check(py_denylist)) {
for (int i = 0; i < PyList_GET_SIZE(py_denylist); ++i) {
PyObject* value = PyList_GetItem(py_denylist, i);
char* str_buf;
Py_ssize_t length;
if (ConvertFromPyString(value, &str_buf, &length) == -1) {
return -1;
}
string_set->emplace(str_buf, length);
}
}
if (PySet_Check(py_denylist)) {
auto* tmp = PySet_New(py_denylist);
while (PySet_GET_SIZE(tmp)) {
PyObject* value = PySet_Pop(tmp);
char* str_buf;
Py_ssize_t length;
if (ConvertFromPyString(value, &str_buf, &length) == -1) {
return -1;
}
string_set->emplace(str_buf, length);
}
}
return 0;
}
PyObject* MlirQuantizeModel(PyObject* data, bool disable_per_channel,
bool fully_quantize, int inference_type,
int input_data_type, int output_data_type,
bool enable_numeric_verify,
bool enable_whole_model_verify,
PyObject* op_denylist, PyObject* node_denylist) {
using tflite::interpreter_wrapper::PythonErrorReporter;
char* buf = nullptr;
Py_ssize_t length;
std::unique_ptr<PythonErrorReporter> error_reporter(new PythonErrorReporter);
if (tflite::python_utils::ConvertFromPyString(data, &buf, &length) == -1) {
PyErr_Format(PyExc_ValueError, "Failed to convert input PyObject");
return nullptr;
}
StringSet denylisted_ops;
StringSet denylisted_nodes;
if (ToStringSet(op_denylist, &denylisted_ops) == -1) {
PyErr_Format(PyExc_ValueError, "Failed to convert op denylist PyObject");
return nullptr;
}
if (ToStringSet(node_denylist, &denylisted_nodes) == -1) {
PyErr_Format(PyExc_ValueError, "Failed to convert node denylist PyObject");
return nullptr;
}
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromBuffer(buf, length,
error_reporter.get());
if (!model) {
PyErr_Format(PyExc_ValueError, "Invalid model");
return nullptr;
}
auto tflite_model = absl::make_unique<tflite::ModelT>();
model->GetModel()->UnPackTo(tflite_model.get(), nullptr);
tflite::TensorType inference_tensor_type =
FromTocoDataTypeToTflitToTensorType(inference_type);
tflite::TensorType input_type =
FromTocoDataTypeToTflitToTensorType(input_data_type);
tflite::TensorType output_type =
FromTocoDataTypeToTflitToTensorType(output_data_type);
flatbuffers::FlatBufferBuilder builder;
auto status = mlir::lite::QuantizeModel(
*tflite_model, input_type, output_type, inference_tensor_type, {},
disable_per_channel, fully_quantize, &builder, error_reporter.get(),
enable_numeric_verify, enable_whole_model_verify,
/*legacy_float_scale=*/true, denylisted_ops, denylisted_nodes);
if (status != kTfLiteOk) {
error_reporter->exception();
return nullptr;
}
return tflite::python_utils::ConvertToPyString(
reinterpret_cast<const char*>(builder.GetCurrentBufferPointer()),
builder.GetSize());
}
PyObject* MlirSparsifyModel(PyObject* data) {
using tflite::interpreter_wrapper::PythonErrorReporter;
char* buf = nullptr;
Py_ssize_t length;
std::unique_ptr<PythonErrorReporter> error_reporter(new PythonErrorReporter);
if (tflite::python_utils::ConvertFromPyString(data, &buf, &length) == -1) {
PyErr_Format(PyExc_ValueError, "Failed to convert input PyObject");
return nullptr;
}
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromBuffer(buf, length,
error_reporter.get());
if (!model) {
PyErr_Format(PyExc_ValueError, "Invalid model");
return nullptr;
}
auto tflite_model = absl::make_unique<tflite::ModelT>();
model->GetModel()->UnPackTo(tflite_model.get(), nullptr);
flatbuffers::FlatBufferBuilder builder;
auto status =
mlir::lite::SparsifyModel(*tflite_model, &builder, error_reporter.get());
if (status != kTfLiteOk) {
error_reporter->exception();
return nullptr;
}
return tflite::python_utils::ConvertToPyString(
reinterpret_cast<const char*>(builder.GetCurrentBufferPointer()),
builder.GetSize());
}
PyObject* RegisterCustomOpdefs(PyObject* list) {
if (!PyList_Check(list)) {
PyErr_SetString(PyExc_TypeError, "Expected list in argument");
return nullptr;
}
int64_t size = PyList_Size(list);
for (int i = 0; i < size; ++i) {
// Get character array from Python object.
char* tf_opdefs;
Py_ssize_t len;
if (tflite::python_utils::ConvertFromPyString(PyList_GetItem(list, i),
&tf_opdefs, &len) == -1) {
PyErr_Format(PyExc_ValueError,
"Failed to convert Python string at index %d of custom op "
"defs argument",
i);
return nullptr;
}
// Parse op def from character array.
tensorflow::OpDef opdef;
if (!tensorflow::protobuf::TextFormat::ParseFromString(tf_opdefs, &opdef)) {
PyErr_Format(
PyExc_ValueError,
"Failed to parse opdefs at index %d of custom op defs argument: %s",
i, tf_opdefs);
return nullptr;
}
// Register extra opdefs to TensorFlow global op registry.
tensorflow::OpRegistry::Global()->Register(
[opdef](
tensorflow::OpRegistrationData* op_reg_data) -> tensorflow::Status {
*op_reg_data = tensorflow::OpRegistrationData(opdef);
return tensorflow::Status::OK();
});
// Register the corresponding fake op kernel.
const char* node_name = opdef.name().c_str();
const char* op_name = opdef.name().c_str();
const char* device_name = "CPU";
static auto fake_compute_func = [](void* kernel, TF_OpKernelContext* ctx) {
};
TF_KernelBuilder* builder =
TF_NewKernelBuilder(op_name, device_name, /*create_func=*/nullptr,
fake_compute_func, /*delete_func=*/nullptr);
TF_Status* status = TF_NewStatus();
TF_RegisterKernelBuilder(node_name, builder, status);
if (TF_GetCode(status) != TF_OK) {
TF_DeleteStatus(status);
PyErr_Format(PyExc_ValueError,
"Failed to register fake op kernel at index %d of custom op "
"defs argument",
i);
return nullptr;
}
TF_DeleteStatus(status);
}
Py_RETURN_TRUE;
}
const std::vector<std::string> RetrieveCollectedErrors() {
mlir::TFL::ErrorCollector* collector =
mlir::TFL::ErrorCollector::GetErrorCollector();
std::vector<std::string> collected_errors;
for (const auto& error_data : collector->CollectedErrors()) {
collected_errors.push_back(error_data.SerializeAsString());
}
collector->Clear();
return collected_errors;
}
std::string FlatBufferFileToMlir(const std::string& model,
bool input_is_filepath) {
return ::tensorflow::FlatBufferFileToMlir(model, input_is_filepath);
}
} // namespace toco
|
/*++
Copyright (c) 2012 Microsoft Corporation
Module Name:
mpfx.h
Abstract:
Multi precision fixed point numbers.
Author:
Leonardo de Moura (leonardo) 2012-09-19
Revision History:
--*/
#include <cstring>
#include<sstream>
#include<iomanip>
#include "util/mpfx.h"
#include "util/mpn.h"
#include "util/mpz.h"
#include "util/mpq.h"
#include "util/bit_util.h"
#include "util/trace.h"
mpfx_manager::mpfx_manager(unsigned int_sz, unsigned frac_sz, unsigned initial_capacity) {
SASSERT(initial_capacity > 0);
SASSERT(int_sz > 0);
SASSERT(frac_sz > 0);
m_int_part_sz = int_sz;
m_frac_part_sz = frac_sz;
m_total_sz = m_int_part_sz + m_frac_part_sz;
m_words.resize(initial_capacity * m_total_sz, 0);
m_capacity = initial_capacity;
m_to_plus_inf = false;
m_buffer0.resize(2*m_total_sz, 0);
m_buffer1.resize(2*m_total_sz, 0);
m_buffer2.resize(2*m_total_sz, 0);
VERIFY(m_id_gen.mk() == 0);
set(m_one, 1);
}
mpfx_manager::~mpfx_manager() {
del(m_one);
}
void mpfx_manager::expand() {
m_capacity = 2*m_capacity;
m_words.resize(m_capacity * m_total_sz, 0);
}
void mpfx_manager::allocate(mpfx & n) {
SASSERT(n.m_sig_idx == 0);
unsigned sig_idx = m_id_gen.mk();
ensure_capacity(sig_idx);
n.m_sig_idx = sig_idx;
SASSERT(::is_zero(m_total_sz, words(n)));
}
unsigned mpfx_manager::sz(unsigned * ws) const {
SASSERT(!::is_zero(m_total_sz, ws));
unsigned r = m_total_sz;
while (true) {
SASSERT(r > 0);
--r;
if (ws[r] != 0)
return r + 1;
}
}
void mpfx_manager::del(mpfx & n) {
unsigned sig_idx = n.m_sig_idx;
if (sig_idx != 0) {
m_id_gen.recycle(sig_idx);
unsigned * w = words(n);
for (unsigned i = 0; i < m_total_sz; i++)
w[i] = 0;
}
}
void mpfx_manager::reset(mpfx & n) {
del(n);
n.m_sign = false;
n.m_sig_idx = 0;
SASSERT(check(n));
}
bool mpfx_manager::is_int(mpfx const & n) const {
unsigned * w = words(n);
for (unsigned i = 0; i < m_frac_part_sz; i++)
if (w[i] != 0)
return false;
return true;
}
bool mpfx_manager::is_abs_one(mpfx const & n) const {
unsigned * w = words(n);
return is_int(n) && w[m_frac_part_sz] == 1 && ::is_zero(m_int_part_sz - 1, w + m_frac_part_sz + 1);
}
bool mpfx_manager::is_int64(mpfx const & a) const {
if (!is_int(a))
return false;
if (is_zero(a) || m_int_part_sz <= 1)
return true;
unsigned * w = words(a);
w += m_frac_part_sz;
if (w[1] < 0x80000000u || (w[1] == 0x80000000u && is_neg(a))) {
for (unsigned i = 2; i < m_int_part_sz; i++)
if (w[i] != 0)
return false;
return true;
}
else {
return false;
}
}
bool mpfx_manager::is_uint64(mpfx const & a) const {
if (!is_int(a) || is_neg(a))
return false;
if (is_zero(a) || m_int_part_sz <= 2)
return true;
unsigned * w = words(a);
for (unsigned i = m_frac_part_sz + 2; i < m_total_sz; i++)
if (w[i] != 0)
return false;
return true;
}
void mpfx_manager::set(mpfx & n, int v) {
if (v == 0) {
reset(n);
}
else {
if (v < 0) {
set(n, static_cast<unsigned>(-v));
n.m_sign = 1;
}
else {
set(n, static_cast<unsigned>(v));
}
}
SASSERT(get_int64(n) == v);
SASSERT(check(n));
}
void mpfx_manager::set(mpfx & n, unsigned v) {
if (v == 0) {
reset(n);
}
else {
allocate_if_needed(n);
n.m_sign = 0;
unsigned * w = words(n);
for (unsigned i = 0; i < m_total_sz; i++)
w[i] = 0;
w[m_frac_part_sz] = v;
}
SASSERT(is_int(n));
SASSERT(get_uint64(n) == v);
SASSERT(check(n));
}
void mpfx_manager::set(mpfx & n, int64_t v) {
if (m_int_part_sz == 1) {
if (v < -static_cast<int64_t>(static_cast<uint64_t>(UINT_MAX)) ||
v > static_cast<int64_t>(static_cast<uint64_t>(UINT_MAX)))
throw overflow_exception();
}
if (v == 0) {
reset(n);
}
else {
if (v < 0) {
set(n, static_cast<uint64_t>(-v));
n.m_sign = 1;
}
else {
set(n, static_cast<uint64_t>(v));
}
}
SASSERT(is_int(n));
SASSERT(get_int64(n) == v);
SASSERT(check(n));
}
void mpfx_manager::set(mpfx & n, uint64_t v) {
if (m_int_part_sz == 1) {
if (v > static_cast<uint64_t>(UINT_MAX))
throw overflow_exception();
}
if (v == 0) {
reset(n);
}
else {
allocate_if_needed(n);
n.m_sign = 0;
unsigned * w = words(n);
uint64_t * _vp = &v;
unsigned * _v = nullptr;
memcpy(&_v, &_vp, sizeof(unsigned*));
for (unsigned i = 0; i < m_total_sz; i++)
w[i] = 0;
w[m_frac_part_sz] = _v[0];
if (m_int_part_sz == 1) {
SASSERT(_v[1] == 0);
}
else {
w[m_frac_part_sz+1] = _v[1];
}
}
SASSERT(is_int(n));
SASSERT(get_uint64(n) == v);
SASSERT(check(n));
}
void mpfx_manager::set(mpfx & n, int num, unsigned den) {
scoped_mpfx a(*this), b(*this);
set(a, num);
set(b, den);
div(a, b, n);
SASSERT(check(n));
}
void mpfx_manager::set(mpfx & n, int64_t num, uint64_t den) {
scoped_mpfx a(*this), b(*this);
set(a, num);
set(b, den);
div(a, b, n);
SASSERT(check(n));
}
void mpfx_manager::set(mpfx & n, mpfx const & v) {
if (is_zero(v)) {
reset(n);
return;
}
allocate_if_needed(n);
n.m_sign = v.m_sign;
unsigned * w1 = words(n);
unsigned * w2 = words(v);
for (unsigned i = 0; i < m_total_sz; i++)
w1[i] = w2[i];
SASSERT(check(n));
}
template<bool SYNCH>
void mpfx_manager::set_core(mpfx & n, mpz_manager<SYNCH> & m, mpz const & v) {
if (m.is_zero(v)) {
reset(n);
}
else {
m_tmp_digits.reset();
allocate_if_needed(n);
n.m_sign = m.decompose(v, m_tmp_digits);
auto sz = m_tmp_digits.size();
if (sz > m_int_part_sz)
throw overflow_exception();
unsigned * w = words(n);
for (unsigned i = 0; i < m_frac_part_sz; i++)
w[i] = 0;
::copy(sz, m_tmp_digits.data(), m_int_part_sz, w + m_frac_part_sz);
}
SASSERT(check(n));
}
void mpfx_manager::set(mpfx & n, unsynch_mpz_manager & m, mpz const & v) {
set_core(n, m, v);
}
#ifndef SINGLE_THREAD
void mpfx_manager::set(mpfx & n, synch_mpz_manager & m, mpz const & v) {
set_core(n, m, v);
}
#endif
template<bool SYNCH>
void mpfx_manager::set_core(mpfx & n, mpq_manager<SYNCH> & m, mpq const & v) {
if (m.is_int(v)) {
set_core(n, m, v.numerator());
}
else {
allocate_if_needed(n);
_scoped_numeral<mpz_manager<SYNCH> > tmp(m);
n.m_sign = is_neg(n);
m.mul2k(v.numerator(), 8 * sizeof(unsigned) * m_frac_part_sz, tmp);
m.abs(tmp);
if ((n.m_sign == 1) != m_to_plus_inf && !m.divides(v.denominator(), tmp)) {
m.div(tmp, v.denominator(), tmp);
m.inc(tmp);
}
else {
m.div(tmp, v.denominator(), tmp);
}
m_tmp_digits.reset();
m.decompose(tmp, m_tmp_digits);
auto sz = m_tmp_digits.size();
if (sz > m_total_sz)
throw overflow_exception();
unsigned * w = words(n);
::copy(sz, m_tmp_digits.data(), m_total_sz, w);
}
SASSERT(check(n));
}
void mpfx_manager::set(mpfx & n, unsynch_mpq_manager & m, mpq const & v) {
set_core(n, m, v);
}
#ifndef SINGLE_THREAD
void mpfx_manager::set(mpfx & n, synch_mpq_manager & m, mpq const & v) {
set_core(n, m, v);
}
#endif
bool mpfx_manager::eq(mpfx const & a, mpfx const & b) const {
if (is_zero(a) && is_zero(b))
return true;
if (is_zero(a) || is_zero(b))
return false;
if (a.m_sign != b.m_sign)
return false;
unsigned * w1 = words(a);
unsigned * w2 = words(b);
for (unsigned i = 0; i < m_total_sz; i++)
if (w1[i] != w2[i])
return false;
return true;
}
bool mpfx_manager::lt(mpfx const & a, mpfx const & b) const {
STRACE("mpfx_trace", tout << "[mpfx] ("; display(tout, a); tout << " < "; display(tout, b); tout << ") == ";);
bool r;
if (is_zero(a)) {
r = !is_zero(b) && !is_neg(b);
}
else if (is_zero(b)) {
r = is_neg(a);
}
else {
SASSERT(!is_zero(a));
SASSERT(!is_zero(b));
if (is_neg(a)) {
r = is_pos(b) || ::lt(m_total_sz, words(b), words(a));
}
else {
SASSERT(is_pos(a));
r = is_pos(b) && ::lt(m_total_sz, words(a), words(b));
}
}
STRACE("mpfx_trace", tout << "(" << r << " == 1)\n";);
return r;
}
void mpfx_manager::add_sub(bool is_sub, mpfx const & a, mpfx const & b, mpfx & c) {
if (is_zero(a)) {
set(c, b);
if (is_sub)
neg(c);
return;
}
if (is_zero(b)) {
set(c, a);
return;
}
TRACE("mpfx", tout << (is_sub ? "sub" : "add") << "("; display(tout, a); tout << ", "; display(tout, b); tout << ")\n";);
allocate_if_needed(c);
bool sgn_a = a.m_sign;
bool sgn_b = b.m_sign;
unsigned * w_a = words(a);
unsigned * w_b = words(b);
if (is_sub)
sgn_b = !sgn_b;
// Compute c
unsigned * w_c = words(c);
if (sgn_a == sgn_b) {
c.m_sign = sgn_a;
if (!::add(m_total_sz, w_a, w_b, w_c))
throw overflow_exception();
}
else {
unsigned borrow;
SASSERT(sgn_a != sgn_b);
if (::lt(m_total_sz, w_a, w_b)) {
c.m_sign = sgn_b;
m_mpn_manager.sub(w_b, m_total_sz, w_a, m_total_sz, w_c, &borrow);
SASSERT(!::is_zero(m_total_sz, w_c));
}
else {
c.m_sign = sgn_a;
m_mpn_manager.sub(w_a, m_total_sz, w_b, m_total_sz, w_c, &borrow);
if (::is_zero(m_total_sz, w_c))
reset(c);
}
SASSERT(borrow == 0);
}
TRACE("mpfx", tout << "result: "; display(tout, c); tout << "\n";);
SASSERT(check(c));
}
void mpfx_manager::add(mpfx const & a, mpfx const & b, mpfx & c) {
STRACE("mpfx_trace", tout << "[mpfx] "; display(tout, a); tout << " + "; display(tout, b); tout << " == ";);
add_sub(false, a, b, c);
STRACE("mpfx_trace", display(tout, c); tout << "\n";);
}
void mpfx_manager::sub(mpfx const & a, mpfx const & b, mpfx & c) {
STRACE("mpfx_trace", tout << "[mpfx] "; display(tout, a); tout << " - "; display(tout, b); tout << " == ";);
add_sub(true, a, b, c);
STRACE("mpfx_trace", display(tout, c); tout << "\n";);
}
void mpfx_manager::mul(mpfx const & a, mpfx const & b, mpfx & c) {
STRACE("mpfx_trace", tout << "[mpfx] ("; display(tout, a); tout << ") * ("; display(tout, b); tout << ") " << (m_to_plus_inf ? "<=" : ">=") << " ";);
if (is_zero(a) || is_zero(b)) {
reset(c);
}
else {
allocate_if_needed(c);
c.m_sign = a.m_sign ^ b.m_sign;
unsigned * r = m_buffer0.data();
m_mpn_manager.mul(words(a), m_total_sz, words(b), m_total_sz, r);
// round result
unsigned * _r = r + m_frac_part_sz;
if ((c.m_sign == 1) != m_to_plus_inf && !::is_zero(m_frac_part_sz, r)) {
if (!::inc(m_total_sz, _r))
throw overflow_exception();
}
// check for overflows
if (!::is_zero(m_int_part_sz, _r + m_total_sz))
throw overflow_exception();
// copy result to c
unsigned * w_c = words(c);
for (unsigned i = 0; i < m_total_sz; i++)
w_c[i] = _r[i];
}
STRACE("mpfx_trace", display(tout, c); tout << "\n";);
SASSERT(check(c));
}
void mpfx_manager::div(mpfx const & a, mpfx const & b, mpfx & c) {
if (is_zero(b))
throw div0_exception();
STRACE("mpfx_trace", tout << "[mpfx] ("; display(tout, a); tout << ") / ("; display(tout, b); tout << ") " << (m_to_plus_inf ? "<=" : ">=") << " ";);
if (is_zero(a)) {
reset(c);
}
else {
allocate_if_needed(c);
c.m_sign = a.m_sign ^ b.m_sign;
unsigned * w_a = words(a);
unsigned * w_a_shft = m_buffer0.data();
unsigned a_shft_sz = sz(w_a) + m_frac_part_sz;
// copy a to buffer 0, and shift by m_frac_part_sz
for (unsigned i = 0; i < m_frac_part_sz; i++)
w_a_shft[i] = 0;
for (unsigned i = 0; i < m_total_sz; i++)
w_a_shft[i+m_frac_part_sz] = w_a[i];
unsigned * w_b = words(b);
unsigned b_sz = sz(w_b);
unsigned * w_q = m_buffer1.data();
if (b_sz > a_shft_sz) {
if ((c.m_sign == 1) != m_to_plus_inf)
set_epsilon(c);
else
reset(c);
}
else {
unsigned q_sz = a_shft_sz - b_sz + 1;
unsigned * w_r = m_buffer2.data();
unsigned r_sz = b_sz;
m_mpn_manager.div(w_a_shft, a_shft_sz,
w_b, b_sz,
w_q,
w_r);
for (unsigned i = m_total_sz; i < q_sz; i++)
if (w_q[i] != 0)
throw overflow_exception();
if (((c.m_sign == 1) != m_to_plus_inf) && !::is_zero(r_sz, w_r)) {
// round the result
if (!::inc(m_total_sz, w_q))
throw overflow_exception();
}
unsigned * w_c = words(c);
bool zero_q = true;
if (m_total_sz >= q_sz) {
unsigned i;
for (i = 0; i < q_sz; i++) {
if (w_q[i] != 0)
zero_q = false;
w_c[i] = w_q[i];
}
for (; i < m_total_sz; i++)
w_c[i] = 0;
}
else {
for (unsigned i = 0; i < m_total_sz; i++) {
if (w_q[i] != 0)
zero_q = false;
w_c[i] = w_q[i];
}
}
if (zero_q) {
if ((c.m_sign == 1) != m_to_plus_inf)
set_epsilon(c);
else
reset(c);
}
}
}
STRACE("mpfx_trace", display(tout, c); tout << "\n";);
SASSERT(check(c));
}
void mpfx_manager::div2k(mpfx & a, unsigned k) {
STRACE("mpfx_trace", tout << "[mpfx] ("; display(tout, a); tout << ") / (2^" << k << ") " << (m_to_plus_inf ? "<=" : ">=") << " ";);
if (!is_zero(a) && k > 0) {
unsigned * w = words(a);
bool _inc = ((a.m_sign == 1) != m_to_plus_inf) && has_one_at_first_k_bits(m_total_sz, w, k);
shr(m_total_sz, w, k, m_total_sz, w);
if (_inc) {
VERIFY(::inc(m_total_sz, w));
SASSERT(!::is_zero(m_total_sz, w));
}
else if (::is_zero(m_total_sz, w)) {
reset(a);
}
}
STRACE("mpfx_trace", display(tout, a); tout << "\n";);
SASSERT(check(a));
}
void mpfx_manager::set_epsilon(mpfx & n) {
unsigned * w = words(n);
w[0] = 1;
for (unsigned i = 1; i < m_total_sz; i++)
w[i] = 0;
}
void mpfx_manager::set_minus_epsilon(mpfx & n) {
set_epsilon(n);
n.m_sign = true;
SASSERT(check(n));
}
void mpfx_manager::set_plus_epsilon(mpfx & n) {
set_epsilon(n);
n.m_sign = 0;
SASSERT(check(n));
}
void mpfx_manager::floor(mpfx & n) {
STRACE("mpfx_trace", tout << "[mpfx] Floor["; display(tout, n); tout << "] == ";);
unsigned * w = words(n);
if (is_neg(n)) {
bool is_int = true;
for (unsigned i = 0; i < m_frac_part_sz; i++) {
if (w[i] != 0) {
is_int = false;
w[i] = 0;
}
}
if (!is_int && !::inc(m_int_part_sz, w + m_frac_part_sz))
throw overflow_exception();
}
else {
for (unsigned i = 0; i < m_frac_part_sz; i++)
w[i] = 0;
}
if (::is_zero(m_int_part_sz, w + m_frac_part_sz))
reset(n);
SASSERT(check(n));
STRACE("mpfx_trace", display(tout, n); tout << "\n";);
}
void mpfx_manager::ceil(mpfx & n) {
STRACE("mpfx_trace", tout << "[mpfx] Ceiling["; display(tout, n); tout << "] == ";);
unsigned * w = words(n);
if (is_pos(n)) {
bool is_int = true;
for (unsigned i = 0; i < m_frac_part_sz; i++) {
if (w[i] != 0) {
is_int = false;
w[i] = 0;
}
}
if (!is_int && !::inc(m_int_part_sz, w + m_frac_part_sz))
throw overflow_exception();
}
else {
for (unsigned i = 0; i < m_frac_part_sz; i++)
w[i] = 0;
}
if (::is_zero(m_int_part_sz, w + m_frac_part_sz))
reset(n);
SASSERT(check(n));
STRACE("mpfx_trace", display(tout, n); tout << "\n";);
}
void mpfx_manager::power(mpfx const & a, unsigned p, mpfx & b) {
#ifdef _TRACE
scoped_mpfx _a(*this); _a = a;
unsigned _p = p;
#endif
#define SMALL_POWER 8
SASSERT(check(a));
if (is_zero(a)) {
SASSERT(p != 0);
reset(b);
}
else if (p == 0) {
set(b, 1);
}
else if (p == 1) {
set(b, a);
}
else if (p == 2) {
mul(a, a, b);
}
else if (p <= SMALL_POWER && &a != &b) {
SASSERT(p > 2);
--p;
set(b, a);
while (p > 0) {
--p;
mul(a, b, b);
}
}
else {
unsigned mask = 1;
scoped_mpfx pw(*this);
set(pw, a);
set(b, 1);
while (mask <= p) {
if (mask & p)
mul(b, pw, b);
mul(pw, pw, pw);
mask = mask << 1;
}
}
STRACE("mpfx_trace", tout << "[mpfx] ("; display(tout, _a); tout << ") ^ " << _p << (m_to_plus_inf ? "<=" : ">="); display(tout, b); tout << "\n";);
TRACE("mpfx_power", display_raw(tout, b); tout << "\n";);
SASSERT(check(b));
}
bool mpfx_manager::is_power_of_two(mpfx const & a, unsigned & k) const {
if (!is_int(a) || is_zero(a))
return false;
unsigned * w = words(a);
unsigned i = m_total_sz;
while (true) {
SASSERT (i > m_frac_part_sz);
--i;
if (w[i] != 0) {
if (!::is_power_of_two(w[i]))
return false;
k = (i - m_frac_part_sz) * 8 * sizeof(unsigned) + log2(w[i]);
while (i > m_frac_part_sz) {
--i;
if (w[i] != 0)
return false;
}
return true;
}
}
}
bool mpfx_manager::is_power_of_two(mpfx const & a) const {
unsigned k;
return is_power_of_two(a, k);
}
int64_t mpfx_manager::get_int64(mpfx const & n) const {
SASSERT(is_int64(n));
unsigned * w = words(n);
w += m_frac_part_sz;
uint64_t r = 0;
memcpy(&r, w, sizeof(uint64_t));
if (r == 0x8000000000000000ull) {
SASSERT(is_neg(n));
return INT64_MIN;
}
else {
return is_neg(n) ? -static_cast<int64_t>(r) : r;
}
}
uint64_t mpfx_manager::get_uint64(mpfx const & n) const {
SASSERT(is_uint64(n));
unsigned * w = words(n);
w += m_frac_part_sz;
uint64_t r = 0;
memcpy(&r, w, sizeof(uint64_t));
return r;
}
template<bool SYNCH>
void mpfx_manager::to_mpz_core(mpfx const & n, mpz_manager<SYNCH> & m, mpz & t) {
SASSERT(is_int(n));
unsigned * w = words(n);
m.set_digits(t, m_int_part_sz, w+m_frac_part_sz);
if (is_neg(n))
m.neg(t);
}
void mpfx_manager::to_mpz(mpfx const & n, unsynch_mpz_manager & m, mpz & t) {
to_mpz_core(n, m, t);
}
#ifndef SINGLE_THREAD
void mpfx_manager::to_mpz(mpfx const & n, synch_mpz_manager & m, mpz & t) {
to_mpz_core(n, m, t);
}
#endif
template<bool SYNCH>
void mpfx_manager::to_mpq_core(mpfx const & n, mpq_manager<SYNCH> & m, mpq & t) {
_scoped_numeral<mpz_manager<SYNCH> > a(m), b(m);
unsigned * w = words(n);
m.set(a, m_total_sz, w);
m.set(b, 1);
m.mul2k(b, sizeof(unsigned)*8*m_frac_part_sz);
m.rat_div(a, b, t);
if (is_neg(n))
m.neg(t);
}
void mpfx_manager::to_mpq(mpfx const & n, unsynch_mpq_manager & m, mpq & t) {
to_mpq_core(n, m, t);
}
#ifndef SINGLE_THREAD
void mpfx_manager::to_mpq(mpfx const & n, synch_mpq_manager & m, mpq & t) {
to_mpq_core(n, m, t);
}
#endif
void mpfx_manager::display_raw(std::ostream & out, mpfx const & n) const {
if (is_neg(n))
out << "-";
unsigned * w = words(n);
unsigned i = m_total_sz;
while(i > 0) {
if (i == m_frac_part_sz)
out << ".";
--i;
out << std::hex << std::setfill('0') << std::setw(2 * sizeof(unsigned)) << w[i];
}
}
void mpfx_manager::display(std::ostream & out, mpfx const & n) const {
if (is_neg(n))
out << "-";
unsigned * w = words(n);
unsigned sz = m_total_sz;
unsigned shift = UINT_MAX;
if (is_int(n)) {
w += m_frac_part_sz;
sz -= m_frac_part_sz;
}
else {
shift = ntz(m_total_sz, w);
if (shift > 0)
shr(m_total_sz, w, shift, m_total_sz, w);
}
sbuffer<char, 1024> str_buffer(11*sz, 0);
out << m_mpn_manager.to_string(w, sz, str_buffer.begin(), str_buffer.size());
if (!is_int(n)) {
SASSERT(shift != UINT_MAX);
// reverse effect of shr
if (shift > 0)
shl(m_total_sz, w, shift, m_total_sz, w);
// display denominator as a power of 2
unsigned k = sizeof(unsigned)*8*m_frac_part_sz - shift;
out << "/2";
if (k > 1)
out << "^" << k;
}
}
void mpfx_manager::display_smt2(std::ostream & out, mpfx const & n) const {
if (is_neg(n))
out << "(- ";
unsigned * w = words(n);
unsigned sz = m_total_sz;
if (is_int(n)) {
w += m_frac_part_sz;
sz -= m_frac_part_sz;
}
else {
out << "(/ ";
}
sbuffer<char, 1024> str_buffer(11*sz, 0);
out << m_mpn_manager.to_string(w, sz, str_buffer.begin(), str_buffer.size());
if (!is_int(n)) {
out << " ";
unsigned * w = m_buffer0.data();
for (unsigned i = 0; i < m_frac_part_sz; i++)
w[i] = 0;
w[m_frac_part_sz] = 1;
sbuffer<char, 1024> str_buffer2(11*(m_frac_part_sz+1), 0);
out << m_mpn_manager.to_string(w, m_frac_part_sz + 1, str_buffer2.begin(), str_buffer2.size());
out << ")";
}
if (is_neg(n))
out << ")";
}
void mpfx_manager::display_decimal(std::ostream & out, mpfx const & n, unsigned prec) const {
if (is_neg(n))
out << "-";
unsigned * w = words(n);
sbuffer<char, 1024> str_buffer(11*m_int_part_sz, 0);
out << m_mpn_manager.to_string(w + m_frac_part_sz, m_int_part_sz, str_buffer.begin(), str_buffer.size());
if (!is_int(n)) {
out << ".";
unsigned * frac = m_buffer0.data();
::copy(m_frac_part_sz, w, m_frac_part_sz, frac);
unsigned ten = 10;
unsigned * n_frac = m_buffer1.data();
bool frac_is_zero = false;
unsigned i = 0;
while (!frac_is_zero) {
if (i >= prec) {
out << "?";
return;
}
m_mpn_manager.mul(frac, m_frac_part_sz, &ten, 1, n_frac);
frac_is_zero = ::is_zero(m_frac_part_sz, n_frac);
SASSERT(n_frac[m_frac_part_sz] <= 9);
if (!frac_is_zero || n_frac[m_frac_part_sz] != 0)
out << n_frac[m_frac_part_sz];
n_frac[m_frac_part_sz] = 0;
std::swap(frac, n_frac);
i++;
}
}
}
std::string mpfx_manager::to_string(mpfx const & a) const {
std::ostringstream buffer;
display(buffer, a);
return buffer.str();
}
std::string mpfx_manager::to_rational_string(mpfx const & a) const {
return to_string(a);
}
bool mpfx_manager::check(mpfx const & a) const {
SASSERT(!is_zero(a) || a.m_sign == 0);
SASSERT(is_zero(a) == ::is_zero(m_total_sz, words(a)));
return true;
}
unsigned mpfx_manager::prev_power_of_two(mpfx const & a) {
if (!is_pos(a))
return 0;
return m_int_part_sz * sizeof(unsigned) * 8 - nlz(m_int_part_sz, words(a) + m_frac_part_sz) - 1;
}
|
//---------------------------------------------------------------------------
#include <vcl.h>
#pragma hdrstop
#include <stdlib.h>
#include <time.h>
#include <ctype.h>
#include <conio.h>
#include <stdio.h>
#include <filectrl.hpp>
#include "ToneThd.h"
#include "UMain.h"
#include "UTone.h"
#include "UEnv.h"
#include "UEdit.h"
#include "Constant.h"
#include "KbdTbl.h"
//---------------------------------------------------------------------------
TONESTM* tone;
LINGBUF* kbdbuff;
#define START_MSG ";***Start***"
#define COMPLETE_MSG ";***Complete***"
#define ABORT_MSG ";***Abort***"
//*******( 乱数発生用 文字テーブル )*******
String ernd_table = L"ABCDEFGHIJKLMNOPQRSTUVWXYZ";
String jrnd_table = L"イロハニホヘトチリヌルヲワカヨタレソツネナラムウノオクヤマケフコエテアサキユメミシヒモセスン゙゚";
String xrnd_table = L"イロハニホヘトチリヌルヲワカヨタレソツネナラムウ`ノオクヤマケフコエテアサキユメミシ~ヒモセスン゙゚";
String esym_table = L".,:?\'-()/\"@";
String jsym_table = L"ー、」()";
String num_table = L"1234567890";
String daku1table = L"カキクケコサシスセソタチツテトハヒフヘホ"; // 濁点文字
String daku2table = L"ハヒフヘホ"; // 半濁点文字
//---------------------------------------------------------------------------
__fastcall ToneThread::ToneThread(bool CreateSuspended, int mode, HWND hwin)
: TThread(CreateSuspended)
{
TestTime = 0;
cw_mode = mode; // 練習モード
whandle = hwin; // Windowのハンドル(DirectSoundで必要必要)
kana_flag = false;
}
//---------------------------------------------------------------------------
int __fastcall ToneThread::KbdWr(int key)
{
return kbdbuff->bufwr(key);
}
//---------------------------------------------------------------------------
int __fastcall ToneThread::ToneChar(Char c)
{
// 戻り値 >=0:発音CWユニット数, =-1:中断検出
// カナ/英数の自動切替判定
int judge_detect = 0;
int tmp = kana_judge;
int cnt;
kana_judge = 99;
if (Main->AMode->Checked) {
switch (c) {
case L'_':
c = ' '; // 空白文字に変換
break;
case KANA_ON_CHR:
kana_flag = true;
break;
case KANA_OFF_CHR:
kana_flag = false;
break;
case L'[':
kana_judge = 0;
break;
case L']':
if (tmp == 2) {
judge_detect = 1; // カナへ切り替え
break;
}
if (tmp == -2) {
judge_detect = -1; // 英数へ切り替え
break;
}
break;
case L'ホ':
if (tmp == 0)
kana_judge = 1;
break;
case L'レ':
if (tmp == 1)
kana_judge = 2;
break;
case L'ラ':
case L'B':
if (tmp == 0)
kana_judge = -1;
break;
case L'タ':
case L'T':
if (tmp == -1)
kana_judge = -2;
break;
}
}
// 特殊文字の処理
switch (c) {
case L'_':
c = ' '; // 空白文字に変換
break;
case L'#':
c = '\n'; // 改行文字に変換
break;
case KANA_ON_CHR:
kana_flag = true;
break;
case KANA_OFF_CHR:
kana_flag = false;
break;
}
// 指定文字を書き込めるまで出力
while ((cnt = tone->ToneWrChar(c)) == 0) { // 書き込めるまでループ
if (Terminated)
return -1; // 中断検出で書き込めない
Sleep(250);
}
// 自動切替判定結果の切替文字を出力
if (judge_detect) {
if (judge_detect > 0) {
kana_flag = true;
c = KANA_ON_CHR;
}
else {
kana_flag = false;
c = KANA_OFF_CHR;
}
while ((tone->ToneWrChar(c)) == 0) { // 書き込めるまでループ
if (Terminated)
return -1; // 中断検出で書き込めない
Sleep(250);
}
}
if (cnt < 0) // 制御コードなど?
cnt = 0;
return cnt; // 正常に書き込めた(CWユニット数)
}
//---------------------------------------------------------------------------
int __fastcall ToneThread::ToneStr(String str)
{
int i, j, cnt;
cnt = 0;
for (i=1; i<=str.Length(); i++) {
if ((j = ToneChar(str[i])) < 0)
return -1; // 中断検出で書き込めない
cnt += j;
}
return cnt; // 正常に書き込めた(CWユニット数)
}
//---------------------------------------------------------------------------
int __fastcall ToneThread::ToneFile(String fname)
{
int i, len;
Char c;
PChar s;
if (!FileExists(fname)) {
ShowMessage( fname + "ファイルが\n見つかりません");
return -1; // 異常
}
ToneFileName = MinimizeName(fname, Main->Status->Canvas, 250);
Synchronize(OutputStatus); // ファイル名の表示
TStreamReader* fs = new TStreamReader(fname, TEncoding::UTF8, true, 1024);
while (fs->EndOfStream == false)
{
String text = fs->ReadLine().UpperCase();
if ((len = text.Length()) == 0) { // 空行 ?
ToneChar('\n');
continue;
}
#if 1 /* 全角文字を半角文字に変換する */
DWORD flags = LCMAP_HALFWIDTH; // 半角文字に変換
const int size = len + 1;
if (size < 128) { // バッファに入る時だけ変換する
s = _msgbuff;
LCMapString(GetUserDefaultLCID(),
flags,
text.c_str(),
size,
(LPWSTR)s, // 変換した文字列の格納先
size);
s[len] = 0;
}
else {
s = text.c_str();
}
#else
s = text.c_str();
#endif
// ShowMessage(s); /* for debug */
for (i=0; i<len; i++) {
c = s[i];
if (c == L';') {
if (s[i+1] == L'|') { // メッセージ出力モード ?
c = L'|';
wcsncpy(_msgbuff, &s[i+2], 128);
_msgbuff[128] = 0;
}
break;
}
if (ToneChar(c) < 0) {
fs->Close();
return 1; // 中断検出した
}
}
if (c == L';') continue; // ';'から改行までの文字は無視
if (c == L'|') {
if (Env->CM_Mode->Checked)
Synchronize(DisplayMessage); // メッセージの表示
continue;
}
ToneChar('\n');
}
fs->Close();
return 0; // 正常
}
//---------------------------------------------------------------------------
int __fastcall ToneThread::ToneMemo(void)
{
int i;
String s;
for (i=0; i<EditFile->Memo->Lines->Count; i++) {
s = EditFile->Memo->Lines->Strings[i];
if (s.Length() != 0) {
if (s[1] == ';')
continue; // コメント行なので無視
ToneStr(s);
}
ToneChar('\n');
}
return 0; // 正常
}
//---------------------------------------------------------------------------
int __fastcall ToneThread::ToneEmpty(void)
{
while (tone->bufchk() != 0) { // バッファーが空になるまで待つ
if (Terminated)
return 1; // 中断検出した
Sleep(250);
}
return 0; // 中断検出しない
}
//---------------------------------------------------------------------------
void __fastcall ToneThread::OutputStatus(void)
{
Main->Status->Panels->Items[0]->Text = ToneFileName;
}
//---------------------------------------------------------------------------
void __fastcall ToneThread::DisplayMessage(void)
{
Main->Memo->Lines->Add(_msgbuff);
}
//---------------------------------------------------------------------------
void __fastcall ToneThread::DisplayStart(void)
{
Main->Memo->Lines->Add("");
Main->Memo->Lines->Add(START_MSG);
}
//---------------------------------------------------------------------------
void __fastcall ToneThread::DisplayComplete(void)
{
Main->Memo->Lines->Add(COMPLETE_MSG);
Main->Memo->Lines->Add("");
}
//---------------------------------------------------------------------------
void __fastcall ToneThread::DisplayAbort(void)
{
Main->Memo->Lines->Add(ABORT_MSG);
Main->Memo->Lines->Add("");
}
//---------------------------------------------------------------------------
void __fastcall ToneThread::Execute()
{
int i, j, n, units;
Char c, c1, sep;
String fn;
String rndbuf;
TONESPEC tsp;
kana_judge = 99; // [ホレ]/[ラタ]判定フラグ
int j_flag = Main->JMode->ItemIndex & 1; // 和文指定?
int l_flag = (Main->LMode->Checked == True); // ループ指定?
int JId = -1;
HANDLE RSh = INVALID_HANDLE_VALUE;
switch (Env->KIMode->ItemIndex) {
case 1:
RSh = Env->GetRSHandle(); break;
case 2:
JId = Env->GetJoyDevNo(); break;
}
int Opt = 0;
if (Env->DispMod->ItemIndex == 2)
Opt |= 0x10; // 符号表示指定
if (j_flag)
Opt |= 0x20; // 和文指定
if (Main->AMode->Checked)
Opt |= 0x40; // 自動切替指定
if (Env->DKMode->ItemIndex)
Opt |= 0x80; // エレキーモード
tsp.SoundSel = Env->SndSel->ItemIndex; // サウンド選択
tsp.BeepMode = Env->BeepMod->ItemIndex; // BEEPモード(機種)
tsp.MidiNo = Env->MidiDev->ItemIndex; // MIDIドライバ番号
tsp.DSndNo = Env->DSndDev->ItemIndex; // WAVEドライバ番号
tsp.DSndWave = Env->DSndWave->ItemIndex; // 波形番号
tsp.ToneSpd = Main->ToneSpdExe->Position; // 発信スピード
tsp.CrTime = Env->CrTime->Position * 1000; // 改行間隔時間[ms]
tsp.ToneNo = (BYTE)(Env->ToneNo->Position); // 音色番号
tsp.ToneKey = (BYTE)(Env->ToneKey->Position); // 音程番号
tsp.ToneVol = (BYTE)(Env->ToneVol->Position); // ボリューム
tsp.ToneEco = (BYTE)(Env->ToneEco->Position); // 残響
tsp.AutoCR = Env->SendAutoCR->ItemIndex; // 送信練習時の自動改行
kbdbuff = new LINGBUF;
tone = new TONESTM(cw_mode, &tsp, JId, RSh, Opt, whandle);
tone->OutOption(tsp.ToneNo, tsp.ToneKey, tsp.ToneVol, tsp.ToneEco);
//*****( 乱数用文字テーブルの作成 )*****
randomize();
if (j_flag) {
if (Main->XMode->Checked) {
if (Main->YMode->Checked)
rndbuf = xrnd_table + jsym_table;
else
rndbuf = xrnd_table;
}
else {
if (Main->YMode->Checked)
rndbuf = jrnd_table + jsym_table;
else
rndbuf = jrnd_table;
}
kana_flag = true;
Main->Status->Panels->Items[4]->Text = "カナ";
}
else {
if (Main->YMode->Checked)
rndbuf = ernd_table + esym_table;
else
rndbuf = ernd_table;
kana_flag = false;
Main->Status->Panels->Items[4]->Text = "英数";
}
if (Main->NMode->Checked)
rndbuf = rndbuf + num_table;
int len = rndbuf.Length();
//*****( モード別処理 )*****
Synchronize(DisplayStart);
do {
Sleep(500);
switch (cw_mode) {
//=====( 耳慣らし )=====
case WarmUp_mode:
Main->Status->Panels->Items[0]->Text = "耳慣らし受信練習";
ToneStr(rndbuf);
ToneChar('\n');
break;
//=====( キーボード入力 )=====
case KeyIn_mode:
Main->Status->Panels->Items[0]->Text = "キーボード入力受信練習";
while (!Terminated) {
i = kbdbuff->bufrd();
j = (i >> 8) & 0x01;
i &= 0xFF;
if (i == 0) {
Sleep(50); // キー入力待ち
continue;
}
if (kana_flag)
j += 2;
c = keyboard_table[i][j];
switch (c) {
case 0:
break; // 無視
case (Char)0xF1:
ToneStr(Env->KbdMem1->Text); break;
case (Char)0xF2:
ToneStr(Env->KbdMem2->Text); break;
case (Char)0xF3:
ToneStr(Env->KbdMem3->Text); break;
case (Char)0xF4:
ToneStr(Env->KbdMem4->Text); break;
case (Char)0xF5:
ToneStr(Env->KbdMem5->Text); break;
case (Char)0xF6:
ToneStr(Env->KbdMem6->Text); break;
case (Char)0xF7:
ToneStr(Env->KbdMem7->Text); break;
case (Char)0xF8:
ToneStr(Env->KbdMem8->Text); break;
case (Char)0xF9:
ToneStr(Env->KbdMem9->Text); break;
case (Char)0xFA:
ToneStr(Env->KbdMem10->Text); break;
case (Char)0x24: /* HOMEキー? */
ToneFile(Main->DataFile->Text); break;
default:
ToneChar(c); break;
}
}
break;
//=====( 文字ランダム )=====
case FileChar_mode:
if (EditFile->Memo->Lines->Count >= 1) {
rndbuf = EditFile->Memo->Lines->Strings[0];
len = rndbuf.Length();
}
//=====( 乱数暗文 )=====
case CharRand_mode:
Main->Status->Panels->Items[0]->Text = "乱数受信練習";
i = Env->RndTime->Position; // 練習時間
units = tsp.ToneSpd * 10 * i; // 練習時間文の総ユニット数
#if 0 /// for debug ///
i = ToneStr(L"PARIS ");
Main->Status->Panels->Items[0]->Text = "units=" + IntToStr(i);
#else
if (j_flag)
units -= ToneStr(L"HR HR [ホレ] ");
else
units -= ToneStr(L"HR HR [BT] ");
#endif
while (units > 0) {
if (Terminated)
break;
if (Env->RndRandom->Checked) {
// 区切文字数ランダム
i = Env->RndCount->Position - Env->RndCntMin->Position;
if (i > 0) {
n = random(i + 1) + Env->RndCntMin->Position;
}
else {
n = Env->RndCount->Position; // 固定にする
}
}
else {
// 区切文字数固定
n = Env->RndCount->Position; // 乱数文字数
}
sep = L' ';
if (j_flag) {
switch (Env->RndSeparator->ItemIndex) {
case 0:
n = 0; // 区切文字なし
break;
case 2:
n = 60;
sep = L'?';
break;
default:
sep = L' ';
break;
}
}
i = n;
do {
c = rndbuf[random(len)+1];
if (c == L'゙') {
// 濁点文字の発生
j = random(daku1table.Length());
c1 = daku1table[j+1];
units -= ToneChar(c1);
}
if (c == L'゚') {
// 半濁点文字の発生
j = random(daku2table.Length());
c1 = daku2table[j+1];
units -= ToneChar(c1);
}
units -= ToneChar(c);
} while (--i > 0);
if (n > 0) {
units -= ToneChar(sep);
if (sep == '?')
units -= ToneChar('\n');
}
}
if (j_flag)
ToneStr(L"[ラタ]");
else
ToneStr(L"[AR]");
ToneChar('\n');
break;
//=====( テキストファイル )=====
case FileText_mode:
fn = Main->DataFile->Text;
if (ExtractFilePath(fn).IsEmpty())
fn = Env->DataDir->Text + fn;
ToneFile(fn);
break;
//=====( ファイルランダム )=====
case FileRand_mode:
j = Main->get_random(EditFile->Memo->Lines->Count);
fn = EditFile->Memo->Lines->Strings[j];
if (ExtractFilePath(fn).IsEmpty()) {
fn = ExtractFilePath(EditFile->StatusBar->SimpleText) + fn;
}
ToneFile(fn);
break;
//=====( ワードランダム )=====
case FileWord_mode:
i = Env->RndTime->Position; // 練習時間
units = tsp.ToneSpd * 10 * i; // 練習時間文の総ユニット数
if (j_flag)
units -= ToneStr(L"HR HR [ホレ] ");
else
units -= ToneStr(L"HR HR [BT] ");
while (units > 0) {
if (Terminated)
break;
j = Main->get_random(EditFile->Memo->Lines->Count);
units -= ToneStr(EditFile->Memo->Lines->Strings[j]);
}
if (j_flag)
ToneStr(L"[ラタ]");
else
ToneStr(L"[AR]");
ToneChar('\n');
break;
//=====( ラインランダム )=====
case FileLine_mode:
j = Main->get_random(EditFile->Memo->Lines->Count);
ToneStr(EditFile->Memo->Lines->Strings[j]);
ToneChar('\n');
break;
//=====( 受信練習リピート )=====
case Repeat_mode:
Main->Status->Panels->Items[0]->Text = "受信練習リピート";
ToneMemo();
break;
//=====( 送信練習モード )=====
case CwSend_mode:
if (Opt & 0x80) // エレキーモード?
Main->Status->Panels->Items[0]->Text = "エレキー送信練習"
" ([Shift]:短点キー, [Ctrl]:長点キー)";
else
Main->Status->Panels->Items[0]->Text = "縦振キー送信練習 ([Shift]:キー)";
if (Env->KIMode->ItemIndex == 1) {
if (RSh == INVALID_HANDLE_VALUE) {
ShowMessage("RS-232Cが未接続です!");
break;
}
}
if (Env->KIMode->ItemIndex == 2) {
if (Env->JoyDevCnt == 0) {
ShowMessage("ジョイスティックが未接続です!");
break;
}
}
while (!Terminated) {
i = kbdbuff->bufrd();
j = (i >> 8) & 0x01;
i &= 0xFF;
if (i == 0) {
Sleep(100); // キー入力待ち
continue;
}
c = keyboard_table[i][j];
switch (c) {
case KANA_ON_CHR:
ToneChar(c); break;
case KANA_OFF_CHR:
ToneChar(c); break;
}
}
break;
//=====( PARIS基準 )=====
default:
l_flag = 0;
tone->LapStart();
ToneStr(L"PARIS ");
tone->LapStop();
ToneChar('\n');
break;
}
//=====( 発音完了待ち )=====
ToneEmpty(); // バッファーが空になるまで待つ
if (Terminated) {
l_flag = 0;
tone->bufclr(); // バッファーをクリア
}
} while (l_flag);
//*****( 終了処理 )*****
TestTime = tone->Terminate(); // 出力完了待ち
if (RSh != INVALID_HANDLE_VALUE) {
CloseHandle(RSh);
}
delete tone;
delete kbdbuff;
if (Terminated)
Synchronize(DisplayAbort);
else
Synchronize(DisplayComplete);
}
//---------------------------------------------------------------------------
|
// Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "src/core/server.h"
#include <stdint.h>
#include <time.h>
#include <unistd.h>
#include <algorithm>
#include <csignal>
#include <iostream>
#include <memory>
#include <utility>
#include <vector>
#include "src/core/api.pb.h"
#include "src/core/backend.h"
#include "src/core/constants.h"
#include "src/core/logging.h"
#include "src/core/model_config.h"
#include "src/core/model_config.pb.h"
#include "src/core/model_config_utils.h"
#include "src/core/model_repository_manager.h"
#include "src/core/provider.h"
#include "src/core/server.h"
#include "src/core/server_status.pb.h"
namespace nvidia { namespace inferenceserver {
namespace {
// Scoped increment / decrement of atomic
class ScopedAtomicIncrement {
public:
explicit ScopedAtomicIncrement(std::atomic<uint64_t>& counter)
: counter_(counter)
{
counter_++;
}
~ScopedAtomicIncrement() { counter_--; }
private:
std::atomic<uint64_t>& counter_;
};
} // namespace
//
// InferenceServer
//
InferenceServer::InferenceServer()
: ready_state_(ServerReadyState::SERVER_INVALID)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
start_time_ns_ = ts.tv_sec * NANOS_PER_SECOND + ts.tv_nsec;
const char* vstr = getenv("TENSORRT_SERVER_VERSION");
if (vstr != nullptr) {
version_.assign(vstr);
}
id_ = "inference:0";
strict_model_config_ = true;
strict_readiness_ = true;
tracing_enabled_ = false;
exit_timeout_secs_ = 30;
tf_soft_placement_enabled_ = true;
tf_gpu_memory_fraction_ = 0.0;
tf_vgpu_memory_limits_ = {};
inflight_request_counter_ = 0;
status_manager_.reset(new ServerStatusManager(version_));
}
Status
InferenceServer::Init()
{
Status status;
ready_state_ = ServerReadyState::SERVER_INITIALIZING;
LOG_INFO << "Initializing TensorRT Inference Server";
if (model_repository_path_.empty()) {
ready_state_ = ServerReadyState::SERVER_FAILED_TO_INITIALIZE;
return Status(
RequestStatusCode::INVALID_ARG, "--model-repository must be specified");
}
// Create the shared memory manager that registers / unregisters and returns
// the shared memory regions that are current registered.
status =
SharedMemoryManager::Create(status_manager_, &shared_memory_manager_);
if (!status.IsOk()) {
ready_state_ = ServerReadyState::SERVER_FAILED_TO_INITIALIZE;
return status;
}
// Create the model manager for the repository. Unless model control
// is disabled, all models are eagerly loaded when the manager is created.
bool polling_enabled = (model_control_mode_ == MODE_POLL);
bool model_control_enabled = (model_control_mode_ == MODE_EXPLICIT);
status = ModelRepositoryManager::Create(
this, version_, status_manager_, model_repository_path_,
strict_model_config_, tf_gpu_memory_fraction_, tf_soft_placement_enabled_,
tf_vgpu_memory_limits_, polling_enabled, model_control_enabled,
&model_repository_manager_);
if (!status.IsOk()) {
if (model_repository_manager_ == nullptr) {
ready_state_ = ServerReadyState::SERVER_FAILED_TO_INITIALIZE;
} else {
// If error is returned while the manager is set, we assume the
// failure is due to a model not loading correctly so we just
// continue if not exiting on error.
ready_state_ = ServerReadyState::SERVER_READY;
}
return status;
}
ready_state_ = ServerReadyState::SERVER_READY;
return Status::Success;
}
Status
InferenceServer::Stop()
{
if (ready_state_ != ServerReadyState::SERVER_READY) {
return Status::Success;
}
ready_state_ = ServerReadyState::SERVER_EXITING;
if (model_repository_manager_ == nullptr) {
LOG_INFO << "No server context available. Exiting immediately.";
return Status::Success;
} else {
LOG_INFO << "Waiting for in-flight inferences to complete.";
}
Status status = model_repository_manager_->UnloadAllModels();
if (!status.IsOk()) {
LOG_ERROR << status.Message();
}
// Wait for all in-flight requests to complete and all loaded models
// to unload, or for the exit timeout to expire.
uint32_t exit_timeout_iters = exit_timeout_secs_;
while (true) {
const auto& live_models = model_repository_manager_->GetLiveBackendStates();
LOG_INFO << "Timeout " << exit_timeout_iters << ": Found "
<< live_models.size() << " live models and "
<< inflight_request_counter_ << " in-flight requests";
if (LOG_VERBOSE_IS_ON(1)) {
for (const auto& m : live_models) {
for (const auto& v : m.second) {
LOG_VERBOSE(1) << m.first << " v" << v.first << ": " << v.second;
}
}
}
if ((live_models.size() == 0) && (inflight_request_counter_ == 0)) {
return Status::Success;
}
if (exit_timeout_iters <= 0) {
break;
}
exit_timeout_iters--;
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
}
return Status(
RequestStatusCode::INTERNAL,
"Exit timeout expired. Exiting immediately.");
}
Status
InferenceServer::PollModelRepository()
{
LOG_VERBOSE(1) << "Polling model repository";
// Look for changes and update the loaded model configurations
// appropriately.
if (ready_state_ == ServerReadyState::SERVER_READY) {
RETURN_IF_ERROR(model_repository_manager_->PollAndUpdate());
}
return Status::Success;
}
Status
InferenceServer::IsLive(bool* live)
{
*live = false;
if (ready_state_ == ServerReadyState::SERVER_EXITING) {
return Status(RequestStatusCode::UNAVAILABLE, "Server exiting");
}
ScopedAtomicIncrement inflight(inflight_request_counter_);
// Server is considered live if it can respond to this health
// request and it was able to initialize.
*live =
((ready_state_ != ServerReadyState::SERVER_INVALID) &&
(ready_state_ != ServerReadyState::SERVER_INITIALIZING) &&
(ready_state_ != ServerReadyState::SERVER_FAILED_TO_INITIALIZE));
return Status::Success;
}
Status
InferenceServer::IsReady(bool* ready)
{
*ready = false;
if (ready_state_ == ServerReadyState::SERVER_EXITING) {
return Status(RequestStatusCode::UNAVAILABLE, "Server exiting");
}
ScopedAtomicIncrement inflight(inflight_request_counter_);
// Server is considered ready if it is in the ready state.
// Additionally can report ready only when all models are ready.
*ready = (ready_state_ == ServerReadyState::SERVER_READY);
if (*ready && strict_readiness_) {
// Strict readiness... get the model status and make sure all
// models are ready.
ServerStatus server_status;
Status status = status_manager_->Get(
&server_status, id_, ready_state_, UptimeNs(),
model_repository_manager_.get());
*ready = status.IsOk();
if (*ready) {
for (const auto& ms : server_status.model_status()) {
// If a model status is present but no version status,
// the model is not ready as there is no proper version to be served
if (ms.second.version_status().size() == 0) {
*ready = false;
goto strict_done;
}
for (const auto& vs : ms.second.version_status()) {
if (vs.second.ready_state() != ModelReadyState::MODEL_READY) {
*ready = false;
goto strict_done;
}
}
}
strict_done:;
}
}
return Status::Success;
}
void
InferenceServer::Infer(
const std::shared_ptr<InferenceBackend>& backend,
std::shared_ptr<InferRequestProvider> request_provider,
std::shared_ptr<InferResponseProvider> response_provider,
std::shared_ptr<ModelInferStats> infer_stats,
std::function<void(const Status&)> OnCompleteInfer)
{
if (ready_state_ != ServerReadyState::SERVER_READY) {
OnCompleteInfer(Status(RequestStatusCode::UNAVAILABLE, "Server not ready"));
return;
}
std::shared_ptr<ScopedAtomicIncrement> inflight(
new ScopedAtomicIncrement(inflight_request_counter_));
// Need to capture 'backend' to keep it alive... it goes away when
// it goes out of scope which can cause the model to be unloaded,
// and we don't want that to happen when a request is in flight.
auto OnCompleteHandleInfer = [this, OnCompleteInfer, backend,
response_provider,
inflight](const Status& status) mutable {
if (status.IsOk()) {
OnCompleteInfer(response_provider->FinalizeResponse(*backend));
} else {
OnCompleteInfer(status);
}
};
backend->Run(
infer_stats, request_provider, response_provider, OnCompleteHandleInfer);
}
Status
InferenceServer::GetStatus(
ServerStatus* server_status, const std::string& model_name)
{
if (ready_state_ == ServerReadyState::SERVER_EXITING) {
return Status(RequestStatusCode::UNAVAILABLE, "Server exiting");
}
ScopedAtomicIncrement inflight(inflight_request_counter_);
// If no specific model request just return the entire status
// object.
if (model_name.empty()) {
return status_manager_->Get(
server_status, id_, ready_state_, UptimeNs(),
model_repository_manager_.get());
} else {
return status_manager_->Get(
server_status, id_, ready_state_, UptimeNs(), model_name,
model_repository_manager_.get());
}
return Status::Success;
}
Status
InferenceServer::LoadModel(const std::string& model_name)
{
if (ready_state_ != ServerReadyState::SERVER_READY) {
return Status(RequestStatusCode::UNAVAILABLE, "Server not ready");
}
ScopedAtomicIncrement inflight(inflight_request_counter_);
auto action_type = ModelRepositoryManager::ActionType::LOAD;
return model_repository_manager_->LoadUnloadModel(model_name, action_type);
}
Status
InferenceServer::UnloadModel(const std::string& model_name)
{
if (ready_state_ != ServerReadyState::SERVER_READY) {
return Status(RequestStatusCode::UNAVAILABLE, "Server not ready");
}
ScopedAtomicIncrement inflight(inflight_request_counter_);
auto action_type = ModelRepositoryManager::ActionType::UNLOAD;
return model_repository_manager_->LoadUnloadModel(model_name, action_type);
}
Status
InferenceServer::RegisterSharedMemory(
const std::string& name, const std::string& shm_key, const size_t offset,
const size_t byte_size)
{
if (ready_state_ != ServerReadyState::SERVER_READY) {
return Status(RequestStatusCode::UNAVAILABLE, "Server not ready");
}
ScopedAtomicIncrement inflight(inflight_request_counter_);
return shared_memory_manager_->RegisterSharedMemory(
name, shm_key, offset, byte_size);
}
Status
InferenceServer::UnregisterSharedMemory(const std::string& name)
{
if (ready_state_ != ServerReadyState::SERVER_READY) {
return Status(RequestStatusCode::UNAVAILABLE, "Server not ready");
}
ScopedAtomicIncrement inflight(inflight_request_counter_);
return shared_memory_manager_->UnregisterSharedMemory(name);
}
Status
InferenceServer::UnregisterAllSharedMemory()
{
if (ready_state_ != ServerReadyState::SERVER_READY) {
return Status(RequestStatusCode::UNAVAILABLE, "Server not ready");
}
ScopedAtomicIncrement inflight(inflight_request_counter_);
return shared_memory_manager_->UnregisterAllSharedMemory();
}
Status
InferenceServer::SharedMemoryAddress(
const std::string& name, size_t offset, size_t byte_size,
void** shm_mapped_addr)
{
return shared_memory_manager_->SharedMemoryAddress(
name, offset, byte_size, shm_mapped_addr);
}
Status
InferenceServer::ConfigureTrace(
const std::string& trace_name, const std::string& hostname, uint32_t port)
{
#ifdef TRTIS_ENABLE_TRACING
if (!tracing_enabled_) {
return Status(
RequestStatusCode::UNSUPPORTED,
"tracing is not enabled, use --allow-tracing");
}
return TraceManager::Create(trace_name, hostname, port);
#else
return Status(
RequestStatusCode::UNSUPPORTED,
"tracing is not supported by this server");
#endif // TRTIS_ENABLE_TRACING
}
Status
InferenceServer::SetTraceLevel(uint32_t level, uint32_t rate)
{
#ifdef TRTIS_ENABLE_TRACING
if (!tracing_enabled_) {
return Status(
RequestStatusCode::UNSUPPORTED,
"tracing is not enabled on the server, use --allow-tracing");
}
return TraceManager::SetLevel(level, rate);
#else
return Status(
RequestStatusCode::UNSUPPORTED,
"tracing is not supported by this server");
#endif // TRTIS_ENABLE_TRACING
}
uint64_t
InferenceServer::UptimeNs() const
{
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
uint64_t now_ns = now.tv_sec * NANOS_PER_SECOND + now.tv_nsec;
return now_ns - start_time_ns_;
}
}} // namespace nvidia::inferenceserver
|
#pragma once
#include "base/mappable.hpp"
#include "geometry/grassmann.hpp"
#include "geometry/linear_map.hpp"
#include "geometry/r3_element.hpp"
#include "geometry/sign.hpp"
#include "serialization/geometry.pb.h"
namespace principia {
namespace geometry {
FORWARD_DECLARE_FROM(orthogonal_map,
TEMPLATE(typename FromFrame, typename ToFrame) class,
OrthogonalMap);
namespace internal_permutation {
using base::not_null;
// A permutation of the coordinates. Obviously not coordinate-free, but
// practical. There are no precision losses when composing or applying
// permutations.
template<typename FromFrame, typename ToFrame>
class Permutation : public LinearMap<FromFrame, ToFrame> {
// Declare shorter names for the protocol buffer enums.
static int const EVEN = serialization::Permutation::EVEN;
static int const ODD = serialization::Permutation::ODD;
static int const X = serialization::Permutation::X;
static int const Y = serialization::Permutation::Y;
static int const Z = serialization::Permutation::Z;
static int const INDEX = serialization::Permutation::INDEX;
public:
// Danger, Will Robinson! This enum is stored in the serialized
// representation. Any change to the formulae below is likely to make it
// impossible to read existing files.
enum CoordinatePermutation {
XYZ = EVEN + (X << X * 2) + (Y << Y * 2) + (Z << Z * 2) + (0 << INDEX),
YZX = EVEN + (Y << X * 2) + (Z << Y * 2) + (X << Z * 2) + (1 << INDEX),
ZXY = EVEN + (Z << X * 2) + (X << Y * 2) + (Y << Z * 2) + (2 << INDEX),
XZY = ODD + (X << X * 2) + (Z << Y * 2) + (Y << Z * 2) + (3 << INDEX),
ZYX = ODD + (Z << X * 2) + (Y << Y * 2) + (X << Z * 2) + (4 << INDEX),
YXZ = ODD + (Y << X * 2) + (X << Y * 2) + (Z << Z * 2) + (5 << INDEX)
};
explicit Permutation(CoordinatePermutation coordinate_permutation);
Sign Determinant() const override;
Permutation<ToFrame, FromFrame> Inverse() const;
template<typename Scalar>
Vector<Scalar, ToFrame> operator()(
Vector<Scalar, FromFrame> const& vector) const;
template<typename Scalar>
Bivector<Scalar, ToFrame> operator()(
Bivector<Scalar, FromFrame> const& bivector) const;
template<typename Scalar>
Trivector<Scalar, ToFrame> operator()(
Trivector<Scalar, FromFrame> const& trivector) const;
template<typename T>
typename base::Mappable<Permutation, T>::type operator()(T const& t) const;
OrthogonalMap<FromFrame, ToFrame> Forget() const;
static Permutation Identity();
void WriteToMessage(not_null<serialization::LinearMap*> message) const;
static Permutation ReadFromMessage(serialization::LinearMap const& message);
void WriteToMessage(
not_null<serialization::Permutation*> message) const;
static Permutation ReadFromMessage(serialization::Permutation const& message);
private:
template<typename Scalar>
R3Element<Scalar> operator()(R3Element<Scalar> const& r3_element) const;
CoordinatePermutation coordinate_permutation_;
template<typename From, typename Through, typename To>
friend Permutation<From, To> operator*(
Permutation<Through, To> const& left,
Permutation<From, Through> const& right);
};
template<typename FromFrame, typename ThroughFrame, typename ToFrame>
Permutation<FromFrame, ToFrame> operator*(
Permutation<ThroughFrame, ToFrame> const& left,
Permutation<FromFrame, ThroughFrame> const& right);
} // namespace internal_permutation
using internal_permutation::Permutation;
} // namespace geometry
} // namespace principia
#include "geometry/permutation_body.hpp"
|
/*---------------------------------------------------------------------------*\
Copyright: ICE Stroemungsfoschungs GmbH
Copyright (C) 1991-2008 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is based on CAELUS.
CAELUS is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CAELUS is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with CAELUS. If not, see <http://www.gnu.org/licenses/>.
Class
CML::vectorEntryToExpression
Description
Creates an expression from a vector
SourceFiles
vectorEntryToExpression.cpp
Contributors/Copyright:
2014 Bernhard F.W. Gschaider <bgschaid@ice-sf.at>
\*---------------------------------------------------------------------------*/
#ifndef vectorEntryToExpression_H
#define vectorEntryToExpression_H
#include "entryToExpression.hpp"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace CML
{
/*---------------------------------------------------------------------------*\
Class vectorEntryToExpression Declaration
\*---------------------------------------------------------------------------*/
class vectorEntryToExpression
:
public entryToExpression
{
// Private data
//- Construct as copy
vectorEntryToExpression(const vectorEntryToExpression&);
void operator=(const vectorEntryToExpression&);
public:
//- Runtime type information
TypeName("vectorEntryToExpression");
// Constructors
//- Construct null
vectorEntryToExpression();
// Destructor
virtual ~vectorEntryToExpression();
virtual string toExpr(const entry &);
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace CML
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //
|
#include <vtkMultiBlockDataSet.h>
#include <vtkMultiBlockMergeFilter.h>
#include <vtkNew.h>
#include <vtkPoints.h>
#include <vtkPolyData.h>
#include <vtkSphereSource.h>
int main(int, char*[])
{
vtkNew<vtkSphereSource> sphereSource1;
sphereSource1->Update();
vtkNew<vtkSphereSource> sphereSource2;
sphereSource2->SetCenter(10, 10, 10);
sphereSource2->Update();
vtkNew<vtkMultiBlockDataSet> multiBlockDataSet1;
multiBlockDataSet1->SetNumberOfBlocks(1);
multiBlockDataSet1->SetBlock(0, sphereSource1->GetOutput());
vtkNew<vtkMultiBlockDataSet> multiBlockDataSet2;
multiBlockDataSet2->SetNumberOfBlocks(1);
multiBlockDataSet2->SetBlock(0, sphereSource2->GetOutput());
vtkNew<vtkMultiBlockMergeFilter> multiBlockMergeFilter;
multiBlockMergeFilter->AddInputData(multiBlockDataSet1);
multiBlockMergeFilter->AddInputData(multiBlockDataSet2);
multiBlockMergeFilter->Update();
return EXIT_SUCCESS;
}
|
#include "test/mocks/http/stream.h"
using testing::_;
using testing::Invoke;
namespace Envoy {
namespace Http {
MockStream::MockStream() {
ON_CALL(*this, addCallbacks(_)).WillByDefault(Invoke([this](StreamCallbacks& callbacks) -> void {
callbacks_.push_back(&callbacks);
}));
ON_CALL(*this, removeCallbacks(_))
.WillByDefault(
Invoke([this](StreamCallbacks& callbacks) -> void { callbacks_.remove(&callbacks); }));
ON_CALL(*this, resetStream(_)).WillByDefault(Invoke([this](StreamResetReason reason) -> void {
for (StreamCallbacks* callbacks : callbacks_) {
callbacks->onResetStream(reason);
}
}));
}
MockStream::~MockStream() {}
} // namespace Http
} // namespace Envoy
|
///////////////////////////////////////////////////////////////////////////////
///
/// Authors: Joshua Davis
/// Copyright 2011-2017, DigiPen Institute of Technology
///
///////////////////////////////////////////////////////////////////////////////
#pragma once
namespace Zero
{
//lazy...cleanup later
typedef Physics::TransformAction TransformAction;
typedef Physics::MassAction MassAction;
typedef Physics::BroadPhaseAction BroadPhaseAction;
//Higher level primitives: Used for common actions that carry meaning
//across Transform, Mass and BroadPhase.
///Used to update an object after integration.
struct IntegrationAction
{
IntegrationAction(Collider* collider);
//Have to account for the object being moved due to integration. This means
//that we have to update BroadPhase, update the world transform and also
//recompute the world inertia tensor since we likely rotated.
TransformAction mTransformAction;
BroadPhaseAction mBroadPhaseAction;
MassAction mMassAction;
};
///used to update an object whenever it is moved by the engine
struct MovementAction
{
MovementAction(Collider* collider);
//Have to account for the object being moved outside of physics.
//This means that we have to update BroadPhase, update the world transform,
//and we have to worry not only about the world inertia tensor, but the
//center of mass needs to be moved to account for the position movement.
TransformAction mTransformAction;
BroadPhaseAction mBroadPhaseAction;
MassAction mMassAction;
};
struct KinematicMovementAction
{
KinematicMovementAction(RigidBody* body);
//Kinematic object being moved, need to recompute the world matrix and compute
//what the velocity should have been. The collider should file a separate
//action to deal with its update (we may not be in BroadPhase if we are a point cloud).
TransformAction mTransformAction;
};
struct ColliderCreationAction
{
ColliderCreationAction(Collider* collider);
TransformAction mTransformAction;
BroadPhaseAction mBroadPhaseAction;
};
struct RigidBodyCreationAction
{
RigidBodyCreationAction(RigidBody* body);
MassAction mMassAction;
};
struct FullTransformAction
{
FullTransformAction(Collider* collider);
FullTransformAction(PhysicsNode* node);
TransformAction mTransformAction;
};
struct WorldTransformationAction
{
WorldTransformationAction(Collider* collider);
TransformAction mTransformAction;
};
///Used when an object is inserted into a BroadPhase.
struct InsertionAction
{
enum State
{
Static = BroadPhaseAction::StaticInsert,
Dynamic = BroadPhaseAction::DynamicInsert,
};
///Used when we want to be put in the correct BroadPhase (Static/Dynamic)
InsertionAction(Collider* collider);
///Used when we know if we are going to static or dynamic.
InsertionAction(Collider* collider, byte state);
private:
void Queue(Collider* collider);
//For now the insertion also implies creation, need to fix...
//Since this is being created, we need to compute the world transform,
//Insert in BroadPhase and also compute all mass properties along with
//updating the world inertia tensor.
TransformAction mTransformAction;
BroadPhaseAction mBroadPhaseAction;
};
///Used when removing an object from a BroadPhase.
struct RemovalAction
{
RemovalAction(Collider* collider);
//We are being removed, simply remove from BroadPhase.
BroadPhaseAction mBroadPhaseAction;
};
///Used upon creation or compositing to compute all mass values.
struct MassRecomputationAction
{
MassRecomputationAction(RigidBody* body);
//Just wraps recomputing the mass properties for a rigid body.
MassAction mMassAction;
};
///Recomputes mass, body inertia and world inertia.
void QueueFullMassRecompuation(PhysicsNode* node);
///Recomputes body and world inertia.
void QueueInertiaRecompuation(PhysicsNode* node);
///Reads the transform and updates the world transform
void QueueTransformRead(PhysicsNode* node);
///Updates the world transform and the world inertia. Meant for when a body is integrated.
void QueueBodyIntegration(PhysicsNode* node);
///Removes from the current broadphase and inserts into the desired one
void ChangeBroadPhase(Collider* collider, bool toDynamicBroadphase);
///Inserts into the logically correct broadphase
void InsertIntoBroadPhase(Collider* collider);
///Removes from the last broadphase we were being inserted into
bool RemoveFromBroadPhase(Collider* collider);
///Queues an update in the current broadphase.
void UpdateInBroadPhase(Collider* collider);
///Queues the rigid body to override it's old transform values
///(so that we don't teleport on a first frame)
void QueueOverrideOldTransform(RigidBody* body);
}//namespace Zero
|
#include "clang/Basic/Attributes.h"
#include "clang/Basic/AttrSubjectMatchRules.h"
#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
const IdentifierInfo *Attr, const TargetInfo &Target,
const LangOptions &LangOpts) {
StringRef Name = Attr->getName();
// Normalize the attribute name, __foo__ becomes foo.
if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
Name = Name.substr(2, Name.size() - 4);
// Normalize the scope name, but only for gnu and clang attributes.
StringRef ScopeName = Scope ? Scope->getName() : "";
if (ScopeName == "__gnu__")
ScopeName = "gnu";
else if (ScopeName == "_Clang")
ScopeName = "clang";
#include "clang/Basic/AttrHasAttributeImpl.inc"
return 0;
}
const char *attr::getSubjectMatchRuleSpelling(attr::SubjectMatchRule Rule) {
switch (Rule) {
#define ATTR_MATCH_RULE(NAME, SPELLING, IsAbstract) \
case attr::NAME: \
return SPELLING;
#include "clang/Basic/AttrSubMatchRulesList.inc"
}
llvm_unreachable("Invalid subject match rule");
}
static StringRef
normalizeAttrScopeName(const IdentifierInfo *Scope,
AttributeCommonInfo::Syntax SyntaxUsed) {
if (!Scope)
return "";
// Normalize the "__gnu__" scope name to be "gnu" and the "_Clang" scope name
// to be "clang".
StringRef ScopeName = Scope->getName();
if (SyntaxUsed == AttributeCommonInfo::AS_CXX11 ||
SyntaxUsed == AttributeCommonInfo::AS_C2x) {
if (ScopeName == "__gnu__")
ScopeName = "gnu";
else if (ScopeName == "_Clang")
ScopeName = "clang";
}
return ScopeName;
}
static StringRef normalizeAttrName(const IdentifierInfo *Name,
StringRef NormalizedScopeName,
AttributeCommonInfo::Syntax SyntaxUsed) {
// Normalize the attribute name, __foo__ becomes foo. This is only allowable
// for GNU attributes, and attributes using the double square bracket syntax.
bool ShouldNormalize =
SyntaxUsed == AttributeCommonInfo::AS_GNU ||
((SyntaxUsed == AttributeCommonInfo::AS_CXX11 ||
SyntaxUsed == AttributeCommonInfo::AS_C2x) &&
(NormalizedScopeName.empty() || NormalizedScopeName == "gnu" ||
NormalizedScopeName == "clang"));
StringRef AttrName = Name->getName();
if (ShouldNormalize && AttrName.size() >= 4 && AttrName.startswith("__") &&
AttrName.endswith("__"))
AttrName = AttrName.slice(2, AttrName.size() - 2);
return AttrName;
}
bool AttributeCommonInfo::isGNUScope() const {
return ScopeName && (ScopeName->isStr("gnu") || ScopeName->isStr("__gnu__"));
}
#include "clang/Sema/AttrParsedAttrKinds.inc"
static SmallString<64> normalizeName(const IdentifierInfo *Name,
const IdentifierInfo *Scope,
AttributeCommonInfo::Syntax SyntaxUsed) {
StringRef ScopeName = normalizeAttrScopeName(Scope, SyntaxUsed);
StringRef AttrName = normalizeAttrName(Name, ScopeName, SyntaxUsed);
SmallString<64> FullName = ScopeName;
if (!ScopeName.empty()) {
assert(SyntaxUsed == AttributeCommonInfo::AS_CXX11 ||
SyntaxUsed == AttributeCommonInfo::AS_C2x);
FullName += "::";
}
FullName += AttrName;
return FullName;
}
AttributeCommonInfo::Kind
AttributeCommonInfo::getParsedKind(const IdentifierInfo *Name,
const IdentifierInfo *ScopeName,
Syntax SyntaxUsed) {
return ::getAttrKind(normalizeName(Name, ScopeName, SyntaxUsed), SyntaxUsed);
}
std::string AttributeCommonInfo::getNormalizedFullName() const {
return static_cast<std::string>(
normalizeName(getAttrName(), getScopeName(), getSyntax()));
}
unsigned AttributeCommonInfo::calculateAttributeSpellingListIndex() const {
// Both variables will be used in tablegen generated
// attribute spell list index matching code.
auto Syntax = static_cast<AttributeCommonInfo::Syntax>(getSyntax());
StringRef Scope = normalizeAttrScopeName(getScopeName(), Syntax);
StringRef Name = normalizeAttrName(getAttrName(), Scope, Syntax);
#include "clang/Sema/AttrSpellingListIndex.inc"
}
|
//
// SourceManager.cpp
// tinyscript
//
// Created by Amy Parent on 03/07/2018.
// Copyright © 2018 Amy Parent. All rights reserved.
//
#include <tinyscript/compiler/sourcemanager.hpp>
namespace tinyscript {
SourceManager::SourceManager(std::istream& input) {
std::string source(std::istreambuf_iterator<char>(input), {});
length_ = static_cast<std::uint32_t>(source.length());
source_ = new char[length_];
std::copy(source.begin(), source.end(), source_);
}
SourceManager::SourceManager(const std::string& source) {
length_ = static_cast<std::uint32_t>(source.length());
source_ = new char[length_];
std::copy(source.begin(), source.end(), source_);
}
SourceManager::~SourceManager() {
delete[] source_;
}
std::string SourceManager::tokenAsString(const tinyscript::Token &token) const {
return std::string(source_ + token.location, token.length);
}
std::int64_t SourceManager::tokenAsInt(const Token& token) const {
return std::stoll(tokenAsString(token));
}
float SourceManager::tokenAsFloat(const tinyscript::Token &token) const {
return std::stof(tokenAsString(token));
}
void SourceManager::printLine(std::ostream& out, std::uint32_t location) const {
const char* loc = source_ + location;
const char* start = loc;
while(*(start-1) != '\n'&& start != source_) {
start -= 1;
}
const char* stop = loc;
while(*stop != '\n' && stop != end()) {
stop += 1;
}
std::string line(start, stop-start);
std::cout << line << std::endl;
}
void SourceManager::printLineAndToken(std::ostream& out, std::uint32_t location, std::uint32_t length) const {
const char* loc = source_ + location;
const char* start = loc;
while(*(start-1) != '\n'&& start != source_) {
start -= 1;
}
const char* stop = loc;
while(*stop != '\n' && stop != end()) {
stop += 1;
}
std::string line(start, stop-start);
std::cout << line << std::endl;
auto offset = loc - start;
for(int i = 0; i < offset; ++i) std::cout << " ";
std::cout << "^";
for(int i = 1; i < length; ++i) std::cout << "~";
std::cout << std::endl;
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <chrono>
#include <future>
#include <iostream>
#include <random>
#include <thread>
#include <gtest/gtest.h>
#include <geode/AuthInitialize.hpp>
#include <geode/Cache.hpp>
#include <geode/CqAttributes.hpp>
#include <geode/CqAttributesFactory.hpp>
#include <geode/CqEvent.hpp>
#include <geode/CqListener.hpp>
#include <geode/PoolManager.hpp>
#include <geode/QueryService.hpp>
#include <geode/RegionFactory.hpp>
#include <geode/RegionShortcut.hpp>
#include "CacheRegionHelper.hpp"
#include "SimpleAuthInitialize.hpp"
#include "SimpleCqListener.hpp"
#include "framework/Cluster.h"
#include "framework/Framework.h"
#include "framework/Gfsh.h"
using apache::geode::client::AuthenticationFailedException;
using apache::geode::client::AuthInitialize;
using apache::geode::client::Cache;
using apache::geode::client::Cacheable;
using apache::geode::client::CacheableKey;
using apache::geode::client::CacheableString;
using apache::geode::client::CacheFactory;
using apache::geode::client::CqAttributes;
using apache::geode::client::CqAttributesFactory;
using apache::geode::client::CqEvent;
using apache::geode::client::CqListener;
using apache::geode::client::CqOperation;
using apache::geode::client::Exception;
using apache::geode::client::HashMapOfCacheable;
using apache::geode::client::NotConnectedException;
using apache::geode::client::Pool;
using apache::geode::client::Properties;
using apache::geode::client::QueryService;
using apache::geode::client::Region;
using apache::geode::client::RegionShortcut;
using std::chrono::minutes;
const int32_t CQ_PLUS_AUTH_TEST_REGION_ENTRY_COUNT = 100000;
Cache createCache(std::shared_ptr<SimpleAuthInitialize> auth) {
auto cache = CacheFactory()
.set("log-level", "debug")
.set("log-file", "geode_native.log")
.set("statistic-sampling-enabled", "false")
.setAuthInitialize(auth)
.create();
return cache;
}
std::shared_ptr<Pool> createPool(Cluster& cluster, Cache& cache,
bool subscriptionEnabled) {
auto poolFactory = cache.getPoolManager().createFactory();
cluster.applyLocators(poolFactory);
poolFactory.setPRSingleHopEnabled(true).setSubscriptionEnabled(
subscriptionEnabled);
return poolFactory.create("default");
}
std::shared_ptr<Region> setupRegion(Cache& cache,
const std::shared_ptr<Pool>& pool) {
auto region = cache.createRegionFactory(RegionShortcut::PROXY)
.setPoolName(pool->getName())
.create("region");
return region;
}
TEST(AuthInitializeTest, putGetWithBasicAuth) {
Cluster cluster(
Name(std::string(::testing::UnitTest::GetInstance()
->current_test_info()
->test_case_name()) +
"/" +
::testing::UnitTest::GetInstance()->current_test_info()->name()),
Classpath{getFrameworkString(FrameworkVariable::JavaObjectJarPath)},
SecurityManager{"javaobject.SimpleSecurityManager"}, User{"root"},
Password{"root-password"}, LocatorCount{1}, ServerCount{1});
cluster.getGfsh()
.create()
.region()
.withName("region")
.withType("PARTITION")
.execute();
auto authInitialize = std::make_shared<SimpleAuthInitialize>();
auto cache = createCache(authInitialize);
auto pool = createPool(cluster, cache, false);
auto region = setupRegion(cache, pool);
region->put("foo", "bar");
auto value = region->get("foo");
auto stringValue = std::dynamic_pointer_cast<CacheableString>(value)->value();
ASSERT_EQ(stringValue, std::string("bar"));
ASSERT_GT(authInitialize->getGetCredentialsCallCount(), 0);
}
TEST(AuthInitializeTest, putWithBadUsername) {
Cluster cluster(
Name(std::string(::testing::UnitTest::GetInstance()
->current_test_info()
->test_case_name()) +
"/" +
::testing::UnitTest::GetInstance()->current_test_info()->name()),
Classpath{getFrameworkString(FrameworkVariable::JavaObjectJarPath)},
SecurityManager{"javaobject.SimpleSecurityManager"}, User{"root"},
Password{"root-password"}, LocatorCount{1}, ServerCount{1});
cluster.getGfsh()
.create()
.region()
.withName("region")
.withType("PARTITION")
.execute();
auto authInitialize = std::make_shared<SimpleAuthInitialize>(
"unauthorized-user", "root-password");
auto cache = createCache(authInitialize);
auto pool = createPool(cluster, cache, false);
auto region = setupRegion(cache, pool);
try {
region->put("foo", "bar");
} catch (const NotConnectedException&) {
} catch (const Exception& ex) {
std::cerr << "Caught unexpected exception: " << ex.what() << std::endl;
FAIL();
}
ASSERT_GT(authInitialize->getGetCredentialsCallCount(), 0);
}
TEST(AuthInitializeTest, putWithBadPassword) {
Cluster cluster(
Name(std::string(::testing::UnitTest::GetInstance()
->current_test_info()
->test_case_name()) +
"/" +
::testing::UnitTest::GetInstance()->current_test_info()->name()),
Classpath{getFrameworkString(FrameworkVariable::JavaObjectJarPath)},
SecurityManager{"javaobject.SimpleSecurityManager"}, User{"root"},
Password{"root-password"}, LocatorCount{1}, ServerCount{1});
auto authInitialize =
std::make_shared<SimpleAuthInitialize>("root", "bad-password");
auto cache = createCache(authInitialize);
auto pool = createPool(cluster, cache, false);
auto region = setupRegion(cache, pool);
try {
region->put("foo", "bar");
} catch (const NotConnectedException&) {
} catch (const Exception& ex) {
std::cerr << "Caught unexpected exception: " << ex.what() << std::endl;
FAIL();
}
ASSERT_GT(authInitialize->getGetCredentialsCallCount(), 0);
}
TEST(AuthInitializeTest, badCredentialsWithSubscriptionEnabled) {
Cluster cluster(
Name(std::string(::testing::UnitTest::GetInstance()
->current_test_info()
->test_case_name()) +
"/" +
::testing::UnitTest::GetInstance()->current_test_info()->name()),
Classpath{getFrameworkString(FrameworkVariable::JavaObjectJarPath)},
SecurityManager{"javaobject.SimpleSecurityManager"}, User{"root"},
Password{"root-password"}, LocatorCount{1}, ServerCount{1});
auto authInitialize =
std::make_shared<SimpleAuthInitialize>("root", "bad-password");
auto cache = createCache(authInitialize);
try {
createPool(cluster, cache, true);
} catch (const AuthenticationFailedException&) {
} catch (const Exception& ex) {
std::cerr << "Caught unexpected exception: " << ex.what() << std::endl;
FAIL();
}
ASSERT_GT(authInitialize->getGetCredentialsCallCount(), 0);
}
|
// Copyright (c) Sandeep Mistry. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
#if defined(NRF51) || defined(NRF52) || defined(__RFduino__)
#ifdef __RFduino__
#include <utility/RFduino/ble.h>
#include <utility/RFduino/ble_hci.h>
#include <utility/RFduino/nrf_sdm.h>
#elif defined(NRF5) || defined(NRF51_S130)
#include <ble.h>
#include <ble_hci.h>
#include <nrf_sdm.h>
#elif defined(NRF52) && defined(S132) // ARDUINO_RBL_nRF52832
#ifndef ARDUINO_RBL_nRF52832
#define ARDUINO_RBL_nRF52832
#endif
#include <sdk/softdevice/s132/headers/nrf_ble.h>
#include <sdk/softdevice/s132/headers/nrf_ble_hci.h>
#include <sdk/softdevice/s132/headers/nrf_sdm.h>
#else
#include <s110/ble.h>
#include <s110/ble_hci.h>
#include <s110/nrf_sdm.h>
#endif
#if defined(NRF5) || defined(NRF51_S130) || defined(ARDUINO_RBL_nRF52832)
uint32_t sd_ble_gatts_value_set(uint16_t handle, uint16_t offset, uint16_t* const p_len, uint8_t const * const p_value) {
ble_gatts_value_t val;
val.len = *p_len;
val.offset = offset;
val.p_value = (uint8_t*)p_value;
return sd_ble_gatts_value_set(BLE_CONN_HANDLE_INVALID, handle, &val);
}
#endif
#include "Arduino.h"
#include "BLEAttribute.h"
#include "BLEService.h"
#include "BLECharacteristic.h"
#include "BLEDescriptor.h"
#include "BLEUtil.h"
#include "BLEUuid.h"
#include "nRF51822.h"
// #define NRF_51822_DEBUG
#define BLE_STACK_EVT_MSG_BUF_SIZE (sizeof(ble_evt_t) + (GATT_MTU_SIZE_DEFAULT))
#ifndef BLE_GATTS_ATTR_TAB_SIZE
#define BLE_GATTS_ATTR_TAB_SIZE BLE_GATTS_ATTR_TAB_SIZE_DEFAULT
#endif
nRF51822::nRF51822() :
BLEDevice(),
_advDataLen(0),
_hasScanData(false),
_broadcastCharacteristic(NULL),
_connectionHandle(BLE_CONN_HANDLE_INVALID),
_txBufferCount(0),
_numLocalCharacteristics(0),
_localCharacteristicInfo(NULL),
_numRemoteServices(0),
_remoteServiceInfo(NULL),
_remoteServiceDiscoveryIndex(0),
_numRemoteCharacteristics(0),
_remoteCharacteristicInfo(NULL),
_remoteRequestInProgress(false)
{
#if defined(NRF5) || defined(NRF51_S130)
this->_encKey = (ble_gap_enc_key_t*)&this->_bondData;
memset(&this->_bondData, 0, sizeof(this->_bondData));
#else
this->_authStatus = (ble_gap_evt_auth_status_t*)&this->_authStatusBuffer;
memset(&this->_authStatusBuffer, 0, sizeof(this->_authStatusBuffer));
#endif
}
nRF51822::~nRF51822() {
this->end();
}
void nRF51822::updateAdvertisementData(unsigned char advertisementDataSize,
BLEEirData *advertisementData,
unsigned char scanDataSize,
BLEEirData *scanData)
{
unsigned char srData[31];
unsigned char srDataLen = 0;
this->_advDataLen = 0;
// flags
this->_advData[this->_advDataLen + 0] = 2;
this->_advData[this->_advDataLen + 1] = 0x01;
this->_advData[this->_advDataLen + 2] = 0x06;
this->_advDataLen += 3;
if (advertisementDataSize && advertisementData) {
for (int i = 0; i < advertisementDataSize; i++) {
this->_advData[this->_advDataLen + 0] = advertisementData[i].length + 1;
this->_advData[this->_advDataLen + 1] = advertisementData[i].type;
this->_advDataLen += 2;
memcpy(&this->_advData[this->_advDataLen], advertisementData[i].data, advertisementData[i].length);
this->_advDataLen += advertisementData[i].length;
}
}
if (scanDataSize && scanData) {
for (int i = 0; i < scanDataSize; i++) {
srData[srDataLen + 0] = scanData[i].length + 1;
srData[srDataLen + 1] = scanData[i].type;
srDataLen += 2;
memcpy(&srData[srDataLen], scanData[i].data, scanData[i].length);
srDataLen += scanData[i].length;
_hasScanData = true;
}
}
sd_ble_gap_adv_data_set(this->_advData, this->_advDataLen, srData, srDataLen);
}
void nRF51822::begin(unsigned char advertisementDataSize,
BLEEirData *advertisementData,
unsigned char scanDataSize,
BLEEirData *scanData,
BLELocalAttribute** localAttributes,
unsigned char numLocalAttributes,
BLERemoteAttribute** remoteAttributes,
unsigned char numRemoteAttributes)
{
#ifdef __RFduino__
sd_softdevice_enable(NRF_CLOCK_LFCLKSRC_SYNTH_250_PPM, NULL);
#elif defined(NRF5) && !defined(S110)
#if defined(USE_LFRC)
nrf_clock_lf_cfg_t cfg = {
.source = NRF_CLOCK_LF_SRC_RC,
.rc_ctiv = 8, //16
.rc_temp_ctiv = 2,
.xtal_accuracy = NRF_CLOCK_LF_XTAL_ACCURACY_250_PPM
};
#elif defined(USE_LFSYNT)
nrf_clock_lf_cfg_t cfg = {
.source = NRF_CLOCK_LF_SRC_SYNTH,
.rc_ctiv = 0,
.rc_temp_ctiv = 0,
.xtal_accuracy = NRF_CLOCK_LF_XTAL_ACCURACY_250_PPM
};
#else
//default USE_LFXO
nrf_clock_lf_cfg_t cfg = {
.source = NRF_CLOCK_LF_SRC_XTAL,
.rc_ctiv = 0,
.rc_temp_ctiv = 0,
.xtal_accuracy = NRF_CLOCK_LF_XTAL_ACCURACY_20_PPM
};
#endif
sd_softdevice_enable(&cfg, NULL);
#else
#if defined(USE_LFRC)
sd_softdevice_enable(NRF_CLOCK_LFCLKSRC_RC_250_PPM_250MS_CALIBRATION, NULL);
#elif defined(USE_LFSYNT)
sd_softdevice_enable(NRF_CLOCK_LFCLKSRC_SYNTH_250_PPM, NULL);
#else
//default USE_LFXO
sd_softdevice_enable(NRF_CLOCK_LFCLKSRC_XTAL_20_PPM, NULL);
#endif
#endif
#if defined(NRF5) && !defined(S110)
extern uint32_t __data_start__;
uint32_t app_ram_base = (uint32_t) &__data_start__;
ble_enable_params_t enableParams;
memset(&enableParams, 0, sizeof(ble_enable_params_t));
enableParams.common_enable_params.vs_uuid_count = 10;
enableParams.gatts_enable_params.attr_tab_size = BLE_GATTS_ATTR_TAB_SIZE;
enableParams.gatts_enable_params.service_changed = 1;
enableParams.gap_enable_params.periph_conn_count = 1;
enableParams.gap_enable_params.central_conn_count = 0;
enableParams.gap_enable_params.central_sec_count = 0;
sd_ble_enable(&enableParams, &app_ram_base);
#elif defined(S110)
ble_enable_params_t enableParams = {
.gatts_enable_params = {
.service_changed = true,
.attr_tab_size = BLE_GATTS_ATTR_TAB_SIZE
}
};
sd_ble_enable(&enableParams);
#elif defined(NRF51_S130)
ble_enable_params_t enableParams = {
.gatts_enable_params = {
.service_changed = true
}
};
sd_ble_enable(&enableParams);
#endif
#ifdef NRF_51822_DEBUG
ble_version_t version;
sd_ble_version_get(&version);
Serial.print(F("version = "));
Serial.print(version.version_number);
Serial.print(F(" "));
Serial.print(version.company_id);
Serial.print(F(" "));
Serial.print(version.subversion_number);
Serial.println();
#endif
ble_gap_conn_params_t gap_conn_params;
gap_conn_params.min_conn_interval = 40; // in 1.25ms units
gap_conn_params.max_conn_interval = 80; // in 1.25ms unit
gap_conn_params.slave_latency = 0;
gap_conn_params.conn_sup_timeout = 4000 / 10; // in 10ms unit
sd_ble_gap_ppcp_set(&gap_conn_params);
sd_ble_gap_tx_power_set(0);
updateAdvertisementData(advertisementDataSize, advertisementData, scanDataSize, scanData);
sd_ble_gap_appearance_set(0);
for (int i = 0; i < numLocalAttributes; i++) {
BLELocalAttribute *localAttribute = localAttributes[i];
if (localAttribute->type() == BLETypeCharacteristic) {
this->_numLocalCharacteristics++;
}
}
this->_numLocalCharacteristics -= 3; // 0x2a00, 0x2a01, 0x2a05
this->_localCharacteristicInfo = (struct localCharacteristicInfo*)malloc(sizeof(struct localCharacteristicInfo) * this->_numLocalCharacteristics);
unsigned char localCharacteristicIndex = 0;
uint16_t handle = 0;
BLEService *lastService = NULL;
for (int i = 0; i < numLocalAttributes; i++) {
BLELocalAttribute *localAttribute = localAttributes[i];
BLEUuid uuid = BLEUuid(localAttribute->uuid());
const unsigned char* uuidData = uuid.data();
unsigned char value[255];
ble_uuid_t nordicUUID;
if (uuid.length() == 2) {
nordicUUID.uuid = (uuidData[1] << 8) | uuidData[0];
nordicUUID.type = BLE_UUID_TYPE_BLE;
} else {
unsigned char uuidDataTemp[16];
memcpy(&uuidDataTemp, uuidData, sizeof(uuidDataTemp));
nordicUUID.uuid = (uuidData[13] << 8) | uuidData[12];
uuidDataTemp[13] = 0;
uuidDataTemp[12] = 0;
sd_ble_uuid_vs_add((ble_uuid128_t*)&uuidDataTemp, &nordicUUID.type);
}
if (localAttribute->type() == BLETypeService) {
BLEService *service = (BLEService *)localAttribute;
if (strcmp(service->uuid(), "1800") == 0 || strcmp(service->uuid(), "1801") == 0) {
continue; // skip
}
sd_ble_gatts_service_add(BLE_GATTS_SRVC_TYPE_PRIMARY, &nordicUUID, &handle);
lastService = service;
} else if (localAttribute->type() == BLETypeCharacteristic) {
BLECharacteristic *characteristic = (BLECharacteristic *)localAttribute;
if (strcmp(characteristic->uuid(), "2a00") == 0) {
ble_gap_conn_sec_mode_t secMode;
BLE_GAP_CONN_SEC_MODE_SET_OPEN(&secMode); // no security is needed
sd_ble_gap_device_name_set(&secMode, characteristic->value(), characteristic->valueLength());
} else if (strcmp(characteristic->uuid(), "2a01") == 0) {
const uint16_t *appearance = (const uint16_t*)characteristic->value();
sd_ble_gap_appearance_set(*appearance);
} else if (strcmp(characteristic->uuid(), "2a05") == 0) {
// do nothing
} else {
uint8_t properties = characteristic->properties() & 0xfe;
uint16_t valueLength = characteristic->valueLength();
this->_localCharacteristicInfo[localCharacteristicIndex].characteristic = characteristic;
this->_localCharacteristicInfo[localCharacteristicIndex].notifySubscribed = false;
this->_localCharacteristicInfo[localCharacteristicIndex].indicateSubscribed = false;
this->_localCharacteristicInfo[localCharacteristicIndex].service = lastService;
ble_gatts_char_md_t characteristicMetaData;
ble_gatts_attr_md_t clientCharacteristicConfigurationMetaData;
ble_gatts_attr_t characteristicValueAttribute;
ble_gatts_attr_md_t characteristicValueAttributeMetaData;
memset(&characteristicMetaData, 0, sizeof(characteristicMetaData));
memcpy(&characteristicMetaData.char_props, &properties, 1);
characteristicMetaData.p_char_user_desc = NULL;
characteristicMetaData.p_char_pf = NULL;
characteristicMetaData.p_user_desc_md = NULL;
characteristicMetaData.p_cccd_md = NULL;
characteristicMetaData.p_sccd_md = NULL;
if (properties & (BLENotify | BLEIndicate)) {
memset(&clientCharacteristicConfigurationMetaData, 0, sizeof(clientCharacteristicConfigurationMetaData));
BLE_GAP_CONN_SEC_MODE_SET_OPEN(&clientCharacteristicConfigurationMetaData.read_perm);
BLE_GAP_CONN_SEC_MODE_SET_OPEN(&clientCharacteristicConfigurationMetaData.write_perm);
clientCharacteristicConfigurationMetaData.vloc = BLE_GATTS_VLOC_STACK;
characteristicMetaData.p_cccd_md = &clientCharacteristicConfigurationMetaData;
}
memset(&characteristicValueAttributeMetaData, 0, sizeof(characteristicValueAttributeMetaData));
if (properties & (BLERead | BLENotify | BLEIndicate)) {
if (this->_bondStore) {
BLE_GAP_CONN_SEC_MODE_SET_ENC_NO_MITM(&characteristicValueAttributeMetaData.read_perm);
} else {
BLE_GAP_CONN_SEC_MODE_SET_OPEN(&characteristicValueAttributeMetaData.read_perm);
}
}
if (properties & (BLEWriteWithoutResponse | BLEWrite)) {
if (this->_bondStore) {
BLE_GAP_CONN_SEC_MODE_SET_ENC_NO_MITM(&characteristicValueAttributeMetaData.write_perm);
} else {
BLE_GAP_CONN_SEC_MODE_SET_OPEN(&characteristicValueAttributeMetaData.write_perm);
}
}
characteristicValueAttributeMetaData.vloc = BLE_GATTS_VLOC_STACK;
characteristicValueAttributeMetaData.rd_auth = 0;
characteristicValueAttributeMetaData.wr_auth = 0;
characteristicValueAttributeMetaData.vlen = !characteristic->fixedLength();
for (int j = (i + 1); j < numLocalAttributes; j++) {
localAttribute = localAttributes[j];
if (localAttribute->type() != BLETypeDescriptor) {
break;
}
BLEDescriptor *descriptor = (BLEDescriptor *)localAttribute;
if (strcmp(descriptor->uuid(), "2901") == 0) {
characteristicMetaData.p_char_user_desc = (uint8_t*)descriptor->value();
characteristicMetaData.char_user_desc_max_size = descriptor->valueLength();
characteristicMetaData.char_user_desc_size = descriptor->valueLength();
} else if (strcmp(descriptor->uuid(), "2904") == 0) {
characteristicMetaData.p_char_pf = (ble_gatts_char_pf_t *)descriptor->value();
}
}
memset(&characteristicValueAttribute, 0, sizeof(characteristicValueAttribute));
characteristicValueAttribute.p_uuid = &nordicUUID;
characteristicValueAttribute.p_attr_md = &characteristicValueAttributeMetaData;
characteristicValueAttribute.init_len = valueLength;
characteristicValueAttribute.init_offs = 0;
characteristicValueAttribute.max_len = characteristic->valueSize();
characteristicValueAttribute.p_value = NULL;
sd_ble_gatts_characteristic_add(BLE_GATT_HANDLE_INVALID, &characteristicMetaData, &characteristicValueAttribute, &this->_localCharacteristicInfo[localCharacteristicIndex].handles);
if (valueLength) {
for (int j = 0; j < valueLength; j++) {
value[j] = (*characteristic)[j];
}
sd_ble_gatts_value_set(this->_localCharacteristicInfo[localCharacteristicIndex].handles.value_handle, 0, &valueLength, value);
}
localCharacteristicIndex++;
}
} else if (localAttribute->type() == BLETypeDescriptor) {
BLEDescriptor *descriptor = (BLEDescriptor *)localAttribute;
if (strcmp(descriptor->uuid(), "2901") == 0 ||
strcmp(descriptor->uuid(), "2902") == 0 ||
strcmp(descriptor->uuid(), "2903") == 0 ||
strcmp(descriptor->uuid(), "2904") == 0) {
continue; // skip
}
uint16_t valueLength = descriptor->valueLength();
ble_gatts_attr_t descriptorAttribute;
ble_gatts_attr_md_t descriptorMetaData;
memset(&descriptorAttribute, 0, sizeof(descriptorAttribute));
memset(&descriptorMetaData, 0, sizeof(descriptorMetaData));
descriptorMetaData.vloc = BLE_GATTS_VLOC_STACK;
descriptorMetaData.vlen = (valueLength == descriptor->valueLength()) ? 0 : 1;
if (this->_bondStore) {
BLE_GAP_CONN_SEC_MODE_SET_ENC_NO_MITM(&descriptorMetaData.read_perm);
} else {
BLE_GAP_CONN_SEC_MODE_SET_OPEN(&descriptorMetaData.read_perm);
}
descriptorAttribute.p_uuid = &nordicUUID;
descriptorAttribute.p_attr_md = &descriptorMetaData;
descriptorAttribute.init_len = valueLength;
descriptorAttribute.max_len = descriptor->valueLength();
descriptorAttribute.p_value = NULL;
sd_ble_gatts_descriptor_add(BLE_GATT_HANDLE_INVALID, &descriptorAttribute, &handle);
if (valueLength) {
for (int j = 0; j < valueLength; j++) {
value[j] = (*descriptor)[j];
}
sd_ble_gatts_value_set(handle, 0, &valueLength, value);
}
}
}
if ( numRemoteAttributes > 0) {
numRemoteAttributes -= 2; // 0x1801, 0x2a05
}
for (int i = 0; i < numRemoteAttributes; i++) {
BLERemoteAttribute *remoteAttribute = remoteAttributes[i];
if (remoteAttribute->type() == BLETypeService) {
this->_numRemoteServices++;
} else if (remoteAttribute->type() == BLETypeCharacteristic) {
this->_numRemoteCharacteristics++;
}
}
this->_remoteServiceInfo = (struct remoteServiceInfo*)malloc(sizeof(struct remoteServiceInfo) * this->_numRemoteServices);
this->_remoteCharacteristicInfo = (struct remoteCharacteristicInfo*)malloc(sizeof(struct remoteCharacteristicInfo) * this->_numRemoteCharacteristics);
BLERemoteService *lastRemoteService = NULL;
unsigned char remoteServiceIndex = 0;
unsigned char remoteCharacteristicIndex = 0;
for (int i = 0; i < numRemoteAttributes; i++) {
BLERemoteAttribute *remoteAttribute = remoteAttributes[i];
BLEUuid uuid = BLEUuid(remoteAttribute->uuid());
const unsigned char* uuidData = uuid.data();
ble_uuid_t nordicUUID;
if (uuid.length() == 2) {
nordicUUID.uuid = (uuidData[1] << 8) | uuidData[0];
nordicUUID.type = BLE_UUID_TYPE_BLE;
} else {
unsigned char uuidDataTemp[16];
memcpy(&uuidDataTemp, uuidData, sizeof(uuidDataTemp));
nordicUUID.uuid = (uuidData[13] << 8) | uuidData[12];
uuidDataTemp[13] = 0;
uuidDataTemp[12] = 0;
sd_ble_uuid_vs_add((ble_uuid128_t*)&uuidDataTemp, &nordicUUID.type);
}
if (remoteAttribute->type() == BLETypeService) {
this->_remoteServiceInfo[remoteServiceIndex].service = lastRemoteService = (BLERemoteService *)remoteAttribute;
this->_remoteServiceInfo[remoteServiceIndex].uuid = nordicUUID;
memset(&this->_remoteServiceInfo[remoteServiceIndex].handlesRange, 0, sizeof(this->_remoteServiceInfo[remoteServiceIndex].handlesRange));
remoteServiceIndex++;
} else if (remoteAttribute->type() == BLETypeCharacteristic) {
this->_remoteCharacteristicInfo[remoteCharacteristicIndex].characteristic = (BLERemoteCharacteristic *)remoteAttribute;
this->_remoteCharacteristicInfo[remoteCharacteristicIndex].service = lastRemoteService;
this->_remoteCharacteristicInfo[remoteCharacteristicIndex].uuid = nordicUUID;
memset(&this->_remoteCharacteristicInfo[remoteCharacteristicIndex].properties, 0, sizeof(this->_remoteCharacteristicInfo[remoteCharacteristicIndex].properties));
this->_remoteCharacteristicInfo[remoteCharacteristicIndex].valueHandle = 0;
remoteCharacteristicIndex++;
}
}
if (this->_bondStore && this->_bondStore->hasData()) {
#ifdef NRF_51822_DEBUG
Serial.println(F("Restoring bond data"));
#endif
#if defined(NRF5) || defined(NRF51_S130)
this->_bondStore->getData(this->_bondData, 0, sizeof(this->_bondData));
#else
this->_bondStore->getData(this->_authStatusBuffer, 0, sizeof(this->_authStatusBuffer));
#endif
}
this->startAdvertising();
#ifdef __RFduino__
RFduinoBLE_enabled = 1;
#endif
}
void nRF51822::poll() {
uint32_t evtBuf[BLE_STACK_EVT_MSG_BUF_SIZE] __attribute__ ((__aligned__(BLE_EVTS_PTR_ALIGNMENT)));
uint16_t evtLen = sizeof(evtBuf);
ble_evt_t* bleEvt = (ble_evt_t*)evtBuf;
if (sd_ble_evt_get((uint8_t*)evtBuf, &evtLen) == NRF_SUCCESS) {
switch (bleEvt->header.evt_id) {
case BLE_EVT_TX_COMPLETE:
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt TX complete "));
Serial.println(bleEvt->evt.common_evt.params.tx_complete.count);
#endif
this->_txBufferCount += bleEvt->evt.common_evt.params.tx_complete.count;
break;
case BLE_GAP_EVT_CONNECTED:
#ifdef NRF_51822_DEBUG
char address[18];
BLEUtil::addressToString(bleEvt->evt.gap_evt.params.connected.peer_addr.addr, address);
Serial.print(F("Evt Connected "));
Serial.println(address);
#endif
this->_connectionHandle = bleEvt->evt.gap_evt.conn_handle;
#if defined(NRF5) && !defined(S110)
{
uint8_t count;
sd_ble_tx_packet_count_get(this->_connectionHandle, &count);
this->_txBufferCount = count;
}
#else
sd_ble_tx_buffer_count_get(&this->_txBufferCount);
#endif
if (this->_eventListener) {
this->_eventListener->BLEDeviceConnected(*this, bleEvt->evt.gap_evt.params.connected.peer_addr.addr);
}
if (this->_minimumConnectionInterval >= BLE_GAP_CP_MIN_CONN_INTVL_MIN &&
this->_maximumConnectionInterval <= BLE_GAP_CP_MAX_CONN_INTVL_MAX) {
ble_gap_conn_params_t gap_conn_params;
gap_conn_params.min_conn_interval = this->_minimumConnectionInterval; // in 1.25ms units
gap_conn_params.max_conn_interval = this->_maximumConnectionInterval; // in 1.25ms unit
gap_conn_params.slave_latency = 0;
gap_conn_params.conn_sup_timeout = 4000 / 10; // in 10ms unit
sd_ble_gap_conn_param_update(this->_connectionHandle, &gap_conn_params);
}
if (this->_numRemoteServices > 0) {
sd_ble_gattc_primary_services_discover(this->_connectionHandle, 1, NULL);
}
break;
case BLE_GAP_EVT_DISCONNECTED:
#ifdef NRF_51822_DEBUG
Serial.println(F("Evt Disconnected"));
#endif
this->_connectionHandle = BLE_CONN_HANDLE_INVALID;
this->_txBufferCount = 0;
for (int i = 0; i < this->_numLocalCharacteristics; i++) {
struct localCharacteristicInfo* localCharacteristicInfo = &this->_localCharacteristicInfo[i];
localCharacteristicInfo->notifySubscribed = false;
localCharacteristicInfo->indicateSubscribed = false;
if (localCharacteristicInfo->characteristic->subscribed()) {
if (this->_eventListener) {
this->_eventListener->BLEDeviceCharacteristicSubscribedChanged(*this, *localCharacteristicInfo->characteristic, false);
}
}
}
if (this->_eventListener) {
this->_eventListener->BLEDeviceDisconnected(*this);
}
// clear remote handle info
for (int i = 0; i < this->_numRemoteServices; i++) {
memset(&this->_remoteServiceInfo[i].handlesRange, 0, sizeof(this->_remoteServiceInfo[i].handlesRange));
}
for (int i = 0; i < this->_numRemoteCharacteristics; i++) {
memset(&this->_remoteCharacteristicInfo[i].properties, 0, sizeof(this->_remoteCharacteristicInfo[i].properties));
this->_remoteCharacteristicInfo[i].valueHandle = 0;
}
this->_remoteRequestInProgress = false;
this->startAdvertising();
break;
case BLE_GAP_EVT_CONN_PARAM_UPDATE:
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Conn Param Update 0x"));
Serial.print(bleEvt->evt.gap_evt.params.conn_param_update.conn_params.min_conn_interval, HEX);
Serial.print(F(" 0x"));
Serial.print(bleEvt->evt.gap_evt.params.conn_param_update.conn_params.max_conn_interval, HEX);
Serial.print(F(" 0x"));
Serial.print(bleEvt->evt.gap_evt.params.conn_param_update.conn_params.slave_latency, HEX);
Serial.print(F(" 0x"));
Serial.print(bleEvt->evt.gap_evt.params.conn_param_update.conn_params.conn_sup_timeout, HEX);
Serial.println();
#endif
break;
case BLE_GAP_EVT_SEC_PARAMS_REQUEST:
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Sec Params Request "));
#if !defined(NRF5) && !defined(NRF51_S130)
Serial.print(bleEvt->evt.gap_evt.params.sec_params_request.peer_params.timeout);
Serial.print(F(" "));
#endif
Serial.print(bleEvt->evt.gap_evt.params.sec_params_request.peer_params.bond);
Serial.print(F(" "));
Serial.print(bleEvt->evt.gap_evt.params.sec_params_request.peer_params.mitm);
Serial.print(F(" "));
Serial.print(bleEvt->evt.gap_evt.params.sec_params_request.peer_params.io_caps);
Serial.print(F(" "));
Serial.print(bleEvt->evt.gap_evt.params.sec_params_request.peer_params.oob);
Serial.print(F(" "));
Serial.print(bleEvt->evt.gap_evt.params.sec_params_request.peer_params.min_key_size);
Serial.print(F(" "));
Serial.print(bleEvt->evt.gap_evt.params.sec_params_request.peer_params.max_key_size);
Serial.println();
#endif
if (this->_bondStore && !this->_bondStore->hasData()) {
// only allow bonding if bond store exists and there is no data
ble_gap_sec_params_t gapSecParams;
memset(&gapSecParams, 0x00, sizeof(ble_gap_sec_params_t));
#if defined(NRF5) && !defined(S110)
gapSecParams.kdist_own.enc = 1;
#elif defined(NRF51_S130)
gapSecParams.kdist_periph.enc = 1;
#elif !defined(NRF5)
gapSecParams.timeout = 30; // must be 30s
#endif
gapSecParams.bond = true;
gapSecParams.mitm = false;
gapSecParams.io_caps = BLE_GAP_IO_CAPS_NONE;
gapSecParams.oob = false;
gapSecParams.min_key_size = 7;
gapSecParams.max_key_size = 16;
#if defined(NRF5) && !defined(S110)
ble_gap_sec_keyset_t keyset;
keyset.keys_peer.p_enc_key = NULL;
keyset.keys_peer.p_id_key = NULL;
keyset.keys_peer.p_sign_key = NULL;
keyset.keys_own.p_enc_key = this->_encKey;
keyset.keys_own.p_id_key = NULL;
keyset.keys_own.p_sign_key = NULL;
sd_ble_gap_sec_params_reply(this->_connectionHandle, BLE_GAP_SEC_STATUS_SUCCESS, &gapSecParams, &keyset);
#elif defined(NRF51_S130) || defined(S110)
ble_gap_sec_keyset_t keyset;
keyset.keys_central.p_enc_key = NULL;
keyset.keys_central.p_id_key = NULL;
keyset.keys_central.p_sign_key = NULL;
keyset.keys_periph.p_enc_key = this->_encKey;
keyset.keys_periph.p_id_key = NULL;
keyset.keys_periph.p_sign_key = NULL;
sd_ble_gap_sec_params_reply(this->_connectionHandle, BLE_GAP_SEC_STATUS_SUCCESS, &gapSecParams, &keyset);
#else
sd_ble_gap_sec_params_reply(this->_connectionHandle, BLE_GAP_SEC_STATUS_SUCCESS, &gapSecParams);
#endif
} else {
#if defined(NRF5) || defined(NRF51_S130)
sd_ble_gap_sec_params_reply(this->_connectionHandle, BLE_GAP_SEC_STATUS_PAIRING_NOT_SUPP, NULL, NULL);
#else
sd_ble_gap_sec_params_reply(this->_connectionHandle, BLE_GAP_SEC_STATUS_PAIRING_NOT_SUPP, NULL);
#endif
}
break;
case BLE_GAP_EVT_SEC_INFO_REQUEST:
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Sec Info Request "));
// Serial.print(bleEvt->evt.gap_evt.params.sec_info_request.peer_addr);
// Serial.print(F(" "));
#if defined(NRF5) || defined(NRF51_S130)
Serial.print(bleEvt->evt.gap_evt.params.sec_info_request.master_id.ediv);
#else
Serial.print(bleEvt->evt.gap_evt.params.sec_info_request.div);
#endif
Serial.print(F(" "));
Serial.print(bleEvt->evt.gap_evt.params.sec_info_request.enc_info);
Serial.print(F(" "));
Serial.print(bleEvt->evt.gap_evt.params.sec_info_request.id_info);
Serial.print(F(" "));
Serial.print(bleEvt->evt.gap_evt.params.sec_info_request.sign_info);
Serial.println();
#endif
#if defined(NRF5) || defined(NRF51_S130)
if (this->_encKey->master_id.ediv == bleEvt->evt.gap_evt.params.sec_info_request.master_id.ediv) {
sd_ble_gap_sec_info_reply(this->_connectionHandle, &this->_encKey->enc_info, NULL, NULL);
} else {
sd_ble_gap_sec_info_reply(this->_connectionHandle, NULL, NULL, NULL);
}
#else
if (this->_authStatus->periph_keys.enc_info.div == bleEvt->evt.gap_evt.params.sec_info_request.div) {
sd_ble_gap_sec_info_reply(this->_connectionHandle, &this->_authStatus->periph_keys.enc_info, NULL);
} else {
sd_ble_gap_sec_info_reply(this->_connectionHandle, NULL, NULL);
}
#endif
break;
case BLE_GAP_EVT_AUTH_STATUS:
#ifdef NRF_51822_DEBUG
Serial.println(F("Evt Auth Status"));
Serial.println(bleEvt->evt.gap_evt.params.auth_status.auth_status);
#endif
if (BLE_GAP_SEC_STATUS_SUCCESS == bleEvt->evt.gap_evt.params.auth_status.auth_status) {
#if !defined(NRF5) && !defined(NRF51_S130)
*this->_authStatus = bleEvt->evt.gap_evt.params.auth_status;
#endif
if (this->_bondStore) {
#ifdef NRF_51822_DEBUG
Serial.println(F("Storing bond data"));
#endif
#if defined(NRF5) || defined(NRF51_S130)
this->_bondStore->putData(this->_bondData, 0, sizeof(this->_bondData));
#else
this->_bondStore->putData(this->_authStatusBuffer, 0, sizeof(this->_authStatusBuffer));
#endif
}
if (this->_eventListener) {
this->_eventListener->BLEDeviceBonded(*this);
}
}
break;
case BLE_GAP_EVT_CONN_SEC_UPDATE:
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Conn Sec Update "));
Serial.print(bleEvt->evt.gap_evt.params.conn_sec_update.conn_sec.sec_mode.sm);
Serial.print(F(" "));
Serial.print(bleEvt->evt.gap_evt.params.conn_sec_update.conn_sec.sec_mode.lv);
Serial.print(F(" "));
Serial.print(bleEvt->evt.gap_evt.params.conn_sec_update.conn_sec.encr_key_size);
Serial.println();
#endif
break;
case BLE_GATTS_EVT_WRITE: {
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Write, handle = "));
Serial.println(bleEvt->evt.gatts_evt.params.write.handle, DEC);
BLEUtil::printBuffer(bleEvt->evt.gatts_evt.params.write.data, bleEvt->evt.gatts_evt.params.write.len);
#endif
uint16_t handle = bleEvt->evt.gatts_evt.params.write.handle;
for (int i = 0; i < this->_numLocalCharacteristics; i++) {
struct localCharacteristicInfo* localCharacteristicInfo = &this->_localCharacteristicInfo[i];
if (localCharacteristicInfo->handles.value_handle == handle) {
if (this->_eventListener) {
this->_eventListener->BLEDeviceCharacteristicValueChanged(*this, *localCharacteristicInfo->characteristic, bleEvt->evt.gatts_evt.params.write.data, bleEvt->evt.gatts_evt.params.write.len);
}
break;
} else if (localCharacteristicInfo->handles.cccd_handle == handle) {
uint8_t* data = &bleEvt->evt.gatts_evt.params.write.data[0];
uint16_t value = data[0] | (data[1] << 8);
localCharacteristicInfo->notifySubscribed = (value & 0x0001);
localCharacteristicInfo->indicateSubscribed = (value & 0x0002);
bool subscribed = (localCharacteristicInfo->notifySubscribed || localCharacteristicInfo->indicateSubscribed);
if (subscribed != localCharacteristicInfo->characteristic->subscribed()) {
if (this->_eventListener) {
this->_eventListener->BLEDeviceCharacteristicSubscribedChanged(*this, *localCharacteristicInfo->characteristic, subscribed);
}
break;
}
}
}
break;
}
case BLE_GATTS_EVT_SYS_ATTR_MISSING:
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Sys Attr Missing "));
Serial.println(bleEvt->evt.gatts_evt.params.sys_attr_missing.hint);
#endif
#if defined(NRF5) || defined(NRF51_S130)
sd_ble_gatts_sys_attr_set(this->_connectionHandle, NULL, 0, 0);
#else
sd_ble_gatts_sys_attr_set(this->_connectionHandle, NULL, 0);
#endif
break;
case BLE_GATTC_EVT_PRIM_SRVC_DISC_RSP:
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Prim Srvc Disc Rsp 0x"));
Serial.println(bleEvt->evt.gattc_evt.gatt_status, HEX);
#endif
if (bleEvt->evt.gattc_evt.gatt_status == BLE_GATT_STATUS_SUCCESS) {
uint16_t count = bleEvt->evt.gattc_evt.params.prim_srvc_disc_rsp.count;
for (int i = 0; i < count; i++) {
for (int j = 0; j < this->_numRemoteServices; j++) {
if ((bleEvt->evt.gattc_evt.params.prim_srvc_disc_rsp.services[i].uuid.type == this->_remoteServiceInfo[j].uuid.type) &&
(bleEvt->evt.gattc_evt.params.prim_srvc_disc_rsp.services[i].uuid.uuid == this->_remoteServiceInfo[j].uuid.uuid)) {
this->_remoteServiceInfo[j].handlesRange = bleEvt->evt.gattc_evt.params.prim_srvc_disc_rsp.services[i].handle_range;
break;
}
}
}
uint16_t startHandle = bleEvt->evt.gattc_evt.params.prim_srvc_disc_rsp.services[count - 1].handle_range.end_handle + 1;
sd_ble_gattc_primary_services_discover(this->_connectionHandle, startHandle, NULL);
} else {
// done discovering services
for (int i = 0; i < this->_numRemoteServices; i++) {
if (this->_remoteServiceInfo[i].handlesRange.start_handle != 0 && this->_remoteServiceInfo[i].handlesRange.end_handle != 0) {
this->_remoteServiceDiscoveryIndex = i;
sd_ble_gattc_characteristics_discover(this->_connectionHandle, &this->_remoteServiceInfo[i].handlesRange);
break;
}
}
}
break;
case BLE_GATTC_EVT_CHAR_DISC_RSP:
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Char Disc Rsp 0x"));
Serial.println(bleEvt->evt.gattc_evt.gatt_status, HEX);
#endif
if (bleEvt->evt.gattc_evt.gatt_status == BLE_GATT_STATUS_SUCCESS) {
ble_gattc_handle_range_t serviceHandlesRange = this->_remoteServiceInfo[this->_remoteServiceDiscoveryIndex].handlesRange;
uint16_t count = bleEvt->evt.gattc_evt.params.char_disc_rsp.count;
for (int i = 0; i < count; i++) {
for (int j = 0; j < this->_numRemoteCharacteristics; j++) {
if ((this->_remoteServiceInfo[this->_remoteServiceDiscoveryIndex].service == this->_remoteCharacteristicInfo[j].service) &&
(bleEvt->evt.gattc_evt.params.char_disc_rsp.chars[i].uuid.type == this->_remoteCharacteristicInfo[j].uuid.type) &&
(bleEvt->evt.gattc_evt.params.char_disc_rsp.chars[i].uuid.uuid == this->_remoteCharacteristicInfo[j].uuid.uuid)) {
this->_remoteCharacteristicInfo[j].properties = bleEvt->evt.gattc_evt.params.char_disc_rsp.chars[i].char_props;
this->_remoteCharacteristicInfo[j].valueHandle = bleEvt->evt.gattc_evt.params.char_disc_rsp.chars[i].handle_value;
}
}
serviceHandlesRange.start_handle = bleEvt->evt.gattc_evt.params.char_disc_rsp.chars[i].handle_value;
}
sd_ble_gattc_characteristics_discover(this->_connectionHandle, &serviceHandlesRange);
} else {
bool discoverCharacteristics = false;
for (int i = this->_remoteServiceDiscoveryIndex + 1; i < this->_numRemoteServices; i++) {
if (this->_remoteServiceInfo[i].handlesRange.start_handle != 0 && this->_remoteServiceInfo[i].handlesRange.end_handle != 0) {
this->_remoteServiceDiscoveryIndex = i;
sd_ble_gattc_characteristics_discover(this->_connectionHandle, &this->_remoteServiceInfo[i].handlesRange);
discoverCharacteristics = true;
break;
}
}
if (!discoverCharacteristics) {
if (this->_eventListener) {
this->_eventListener->BLEDeviceRemoteServicesDiscovered(*this);
}
}
}
break;
case BLE_GATTC_EVT_READ_RSP: {
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Read Rsp 0x"));
Serial.println(bleEvt->evt.gattc_evt.gatt_status, HEX);
Serial.println(bleEvt->evt.gattc_evt.params.read_rsp.handle, DEC);
BLEUtil::printBuffer(bleEvt->evt.gattc_evt.params.read_rsp.data, bleEvt->evt.gattc_evt.params.read_rsp.len);
#endif
this->_remoteRequestInProgress = false;
if (bleEvt->evt.gattc_evt.gatt_status == BLE_GATT_STATUS_ATTERR_INSUF_AUTHENTICATION &&
this->_bondStore) {
ble_gap_sec_params_t gapSecParams;
memset(&gapSecParams, 0x00, sizeof(ble_gap_sec_params_t));
#if defined(NRF5) && !defined(S110)
gapSecParams.kdist_own.enc = 1;
#elif defined(NRF51_S130)
gapSecParams.kdist_periph.enc = 1;
#elif !defined(NRF5)
gapSecParams.timeout = 30; // must be 30s
#endif
gapSecParams.bond = true;
gapSecParams.mitm = false;
gapSecParams.io_caps = BLE_GAP_IO_CAPS_NONE;
gapSecParams.oob = false;
gapSecParams.min_key_size = 7;
gapSecParams.max_key_size = 16;
sd_ble_gap_authenticate(this->_connectionHandle, &gapSecParams);
} else {
uint16_t handle = bleEvt->evt.gattc_evt.params.read_rsp.handle;
for (int i = 0; i < this->_numRemoteCharacteristics; i++) {
if (this->_remoteCharacteristicInfo[i].valueHandle == handle) {
if (this->_eventListener) {
this->_eventListener->BLEDeviceRemoteCharacteristicValueChanged(*this, *this->_remoteCharacteristicInfo[i].characteristic, bleEvt->evt.gattc_evt.params.read_rsp.data, bleEvt->evt.gattc_evt.params.read_rsp. len);
}
break;
}
}
}
break;
}
case BLE_GATTC_EVT_WRITE_RSP:
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Write Rsp 0x"));
Serial.println(bleEvt->evt.gattc_evt.gatt_status, HEX);
Serial.println(bleEvt->evt.gattc_evt.params.write_rsp.handle, DEC);
#endif
this->_remoteRequestInProgress = false;
if (bleEvt->evt.gattc_evt.gatt_status == BLE_GATT_STATUS_ATTERR_INSUF_AUTHENTICATION &&
this->_bondStore) {
ble_gap_sec_params_t gapSecParams;
memset(&gapSecParams, 0x00, sizeof(ble_gap_sec_params_t));
#if defined(NRF5) && !defined(S110)
gapSecParams.kdist_own.enc = 1;
#elif defined(NRF51_S130)
gapSecParams.kdist_periph.enc = 1;
#elif !defined(NRF5)
gapSecParams.timeout = 30; // must be 30s
#endif
gapSecParams.bond = true;
gapSecParams.mitm = false;
gapSecParams.io_caps = BLE_GAP_IO_CAPS_NONE;
gapSecParams.oob = false;
gapSecParams.min_key_size = 7;
gapSecParams.max_key_size = 16;
sd_ble_gap_authenticate(this->_connectionHandle, &gapSecParams);
}
break;
case BLE_GATTC_EVT_HVX: {
#ifdef NRF_51822_DEBUG
Serial.print(F("Evt Hvx 0x"));
Serial.println(bleEvt->evt.gattc_evt.gatt_status, HEX);
Serial.println(bleEvt->evt.gattc_evt.params.hvx.handle, DEC);
#endif
uint16_t handle = bleEvt->evt.gattc_evt.params.hvx.handle;
if (bleEvt->evt.gattc_evt.params.hvx.type == BLE_GATT_HVX_INDICATION) {
sd_ble_gattc_hv_confirm(this->_connectionHandle, handle);
}
for (int i = 0; i < this->_numRemoteCharacteristics; i++) {
if (this->_remoteCharacteristicInfo[i].valueHandle == handle) {
if (this->_eventListener) {
this->_eventListener->BLEDeviceRemoteCharacteristicValueChanged(*this, *this->_remoteCharacteristicInfo[i].characteristic, bleEvt->evt.gattc_evt.params.read_rsp.data, bleEvt->evt.gattc_evt.params.read_rsp. len);
}
break;
}
}
break;
}
default:
#ifdef NRF_51822_DEBUG
Serial.print(F("bleEvt->header.evt_id = 0x"));
Serial.print(bleEvt->header.evt_id, HEX);
Serial.print(F(" "));
Serial.println(bleEvt->header.evt_len);
#endif
break;
}
}
// sd_app_evt_wait();
}
void nRF51822::end() {
sd_softdevice_disable();
if (this->_remoteCharacteristicInfo) {
free(this->_remoteCharacteristicInfo);
}
if (this->_remoteServiceInfo) {
free(this->_remoteServiceInfo);
}
if (this->_localCharacteristicInfo) {
free(this->_localCharacteristicInfo);
}
this->_numLocalCharacteristics = 0;
this->_numRemoteServices = 0;
this->_numRemoteCharacteristics = 0;
}
bool nRF51822::updateCharacteristicValue(BLECharacteristic& characteristic) {
bool success = true;
for (int i = 0; i < this->_numLocalCharacteristics; i++) {
struct localCharacteristicInfo* localCharacteristicInfo = &this->_localCharacteristicInfo[i];
if (localCharacteristicInfo->characteristic == &characteristic) {
if (&characteristic == this->_broadcastCharacteristic) {
this->broadcastCharacteristic(characteristic);
}
uint16_t valueLength = characteristic.valueLength();
sd_ble_gatts_value_set(localCharacteristicInfo->handles.value_handle, 0, &valueLength, characteristic.value());
ble_gatts_hvx_params_t hvxParams;
memset(&hvxParams, 0, sizeof(hvxParams));
hvxParams.handle = localCharacteristicInfo->handles.value_handle;
hvxParams.offset = 0;
hvxParams.p_data = NULL;
hvxParams.p_len = &valueLength;
if (localCharacteristicInfo->notifySubscribed) {
if (this->_txBufferCount > 0) {
this->_txBufferCount--;
hvxParams.type = BLE_GATT_HVX_NOTIFICATION;
sd_ble_gatts_hvx(this->_connectionHandle, &hvxParams);
} else {
success = false;
}
}
if (localCharacteristicInfo->indicateSubscribed) {
if (this->_txBufferCount > 0) {
this->_txBufferCount--;
hvxParams.type = BLE_GATT_HVX_INDICATION;
sd_ble_gatts_hvx(this->_connectionHandle, &hvxParams);
} else {
success = false;
}
}
}
}
return success;
}
bool nRF51822::broadcastCharacteristic(BLECharacteristic& characteristic) {
bool success = false;
for (int i = 0; i < this->_numLocalCharacteristics; i++) {
struct localCharacteristicInfo* localCharacteristicInfo = &this->_localCharacteristicInfo[i];
if (localCharacteristicInfo->characteristic == &characteristic) {
if (characteristic.properties() & BLEBroadcast && localCharacteristicInfo->service) {
unsigned char advData[31];
unsigned char advDataLen = this->_advDataLen;
// copy the existing advertisement data
memcpy(advData, this->_advData, advDataLen);
advDataLen += (4 + characteristic.valueLength());
if (advDataLen <= 31) {
BLEUuid uuid = BLEUuid(localCharacteristicInfo->service->uuid());
advData[this->_advDataLen + 0] = 3 + characteristic.valueLength();
advData[this->_advDataLen + 1] = 0x16;
memcpy(&advData[this->_advDataLen + 2], uuid.data(), 2);
memcpy(&advData[this->_advDataLen + 4], characteristic.value(), characteristic.valueLength());
sd_ble_gap_adv_data_set(advData, advDataLen, NULL, 0); // update advertisement data
success = true;
this->_broadcastCharacteristic = &characteristic;
}
}
break;
}
}
return success;
}
bool nRF51822::canNotifyCharacteristic(BLECharacteristic& /*characteristic*/) {
return (this->_txBufferCount > 0);
}
bool nRF51822::canIndicateCharacteristic(BLECharacteristic& /*characteristic*/) {
return (this->_txBufferCount > 0);
}
bool nRF51822::canReadRemoteCharacteristic(BLERemoteCharacteristic& characteristic) {
bool success = false;
for (int i = 0; i < this->_numRemoteCharacteristics; i++) {
if (this->_remoteCharacteristicInfo[i].characteristic == &characteristic) {
success = (this->_remoteCharacteristicInfo[i].valueHandle &&
this->_remoteCharacteristicInfo[i].properties.read &&
!this->_remoteRequestInProgress);
break;
}
}
return success;
}
bool nRF51822::readRemoteCharacteristic(BLERemoteCharacteristic& characteristic) {
bool success = false;
for (int i = 0; i < this->_numRemoteCharacteristics; i++) {
if (this->_remoteCharacteristicInfo[i].characteristic == &characteristic) {
if (this->_remoteCharacteristicInfo[i].valueHandle && this->_remoteCharacteristicInfo[i].properties.read) {
this->_remoteRequestInProgress = true;
success = (sd_ble_gattc_read(this->_connectionHandle, this->_remoteCharacteristicInfo[i].valueHandle, 0) == NRF_SUCCESS);
}
break;
}
}
return success;
}
bool nRF51822::canWriteRemoteCharacteristic(BLERemoteCharacteristic& characteristic) {
bool success = false;
for (int i = 0; i < this->_numRemoteCharacteristics; i++) {
if (this->_remoteCharacteristicInfo[i].characteristic == &characteristic) {
if (this->_remoteCharacteristicInfo[i].valueHandle) {
if (this->_remoteCharacteristicInfo[i].properties.write) {
success = !this->_remoteRequestInProgress;
} else if (this->_remoteCharacteristicInfo[i].properties.write_wo_resp) {
success = (this->_txBufferCount > 0);
}
}
break;
}
}
return success;
}
bool nRF51822::writeRemoteCharacteristic(BLERemoteCharacteristic& characteristic, const unsigned char value[], unsigned char length) {
bool success = false;
for (int i = 0; i < this->_numRemoteCharacteristics; i++) {
if (this->_remoteCharacteristicInfo[i].characteristic == &characteristic) {
if (this->_remoteCharacteristicInfo[i].valueHandle &&
(this->_remoteCharacteristicInfo[i].properties.write_wo_resp || this->_remoteCharacteristicInfo[i].properties.write) &&
(this->_txBufferCount > 0)) {
ble_gattc_write_params_t writeParams;
writeParams.write_op = (this->_remoteCharacteristicInfo[i].properties.write) ? BLE_GATT_OP_WRITE_REQ : BLE_GATT_OP_WRITE_CMD;
#ifndef __RFduino__
writeParams.flags = 0;
#endif
writeParams.handle = this->_remoteCharacteristicInfo[i].valueHandle;
writeParams.offset = 0;
writeParams.len = length;
writeParams.p_value = (uint8_t*)value;
this->_remoteRequestInProgress = true;
this->_txBufferCount--;
success = (sd_ble_gattc_write(this->_connectionHandle, &writeParams) == NRF_SUCCESS);
}
break;
}
}
return success;
}
bool nRF51822::canSubscribeRemoteCharacteristic(BLERemoteCharacteristic& characteristic) {
bool success = false;
for (int i = 0; i < this->_numRemoteCharacteristics; i++) {
if (this->_remoteCharacteristicInfo[i].characteristic == &characteristic) {
success = (this->_remoteCharacteristicInfo[i].valueHandle &&
(this->_remoteCharacteristicInfo[i].properties.notify || this->_remoteCharacteristicInfo[i].properties.indicate));
break;
}
}
return success;
}
bool nRF51822::subscribeRemoteCharacteristic(BLERemoteCharacteristic& characteristic) {
bool success = false;
for (int i = 0; i < this->_numRemoteCharacteristics; i++) {
if (this->_remoteCharacteristicInfo[i].characteristic == &characteristic) {
if (this->_remoteCharacteristicInfo[i].valueHandle &&
(this->_remoteCharacteristicInfo[i].properties.notify || this->_remoteCharacteristicInfo[i].properties.indicate)) {
ble_gattc_write_params_t writeParams;
uint16_t value = (this->_remoteCharacteristicInfo[i].properties.notify ? 0x0001 : 0x002);
writeParams.write_op = BLE_GATT_OP_WRITE_REQ;
#ifndef __RFduino__
writeParams.flags = 0;
#endif
writeParams.handle = (this->_remoteCharacteristicInfo[i].valueHandle + 1); // don't discover descriptors for now
writeParams.offset = 0;
writeParams.len = sizeof(value);
writeParams.p_value = (uint8_t*)&value;
this->_remoteRequestInProgress = true;
success = (sd_ble_gattc_write(this->_connectionHandle, &writeParams) == NRF_SUCCESS);
}
break;
}
}
return success;
}
bool nRF51822::canUnsubscribeRemoteCharacteristic(BLERemoteCharacteristic& characteristic) {
return this->canSubscribeRemoteCharacteristic(characteristic);
}
bool nRF51822::unsubcribeRemoteCharacteristic(BLERemoteCharacteristic& characteristic) {
bool success = false;
for (int i = 0; i < this->_numRemoteCharacteristics; i++) {
if (this->_remoteCharacteristicInfo[i].characteristic == &characteristic) {
if (this->_remoteCharacteristicInfo[i].valueHandle &&
(this->_remoteCharacteristicInfo[i].properties.notify || this->_remoteCharacteristicInfo[i].properties.indicate)) {
ble_gattc_write_params_t writeParams;
uint16_t value = 0x0000;
writeParams.write_op = BLE_GATT_OP_WRITE_REQ;
#ifndef __RFduino__
writeParams.flags = 0;
#endif
writeParams.handle = (this->_remoteCharacteristicInfo[i].valueHandle + 1); // don't discover descriptors for now
writeParams.offset = 0;
writeParams.len = sizeof(value);
writeParams.p_value = (uint8_t*)&value;
this->_remoteRequestInProgress = true;
success = (sd_ble_gattc_write(this->_connectionHandle, &writeParams) == NRF_SUCCESS);
}
break;
}
}
return success;
}
bool nRF51822::setTxPower(int txPower) {
if (txPower <= -40) {
txPower = -40;
} else if (txPower <= -30) {
txPower = -30;
} else if (txPower <= -20) {
txPower = -20;
} else if (txPower <= -16) {
txPower = -16;
} else if (txPower <= -12) {
txPower = -12;
} else if (txPower <= -8) {
txPower = -8;
} else if (txPower <= -4) {
txPower = -4;
} else if (txPower <= 0) {
txPower = 0;
} else {
txPower = 4;
}
return (sd_ble_gap_tx_power_set(txPower) == NRF_SUCCESS);
}
void nRF51822::startAdvertising() {
#ifdef NRF_51822_DEBUG
Serial.println(F("Start advertisement"));
#endif
ble_gap_adv_params_t advertisingParameters;
memset(&advertisingParameters, 0x00, sizeof(advertisingParameters));
advertisingParameters.type = this->_connectable ? BLE_GAP_ADV_TYPE_ADV_IND : ( this->_hasScanData ? BLE_GAP_ADV_TYPE_ADV_SCAN_IND : BLE_GAP_ADV_TYPE_ADV_NONCONN_IND );
advertisingParameters.p_peer_addr = NULL;
advertisingParameters.fp = BLE_GAP_ADV_FP_ANY;
advertisingParameters.p_whitelist = NULL;
advertisingParameters.interval = (this->_advertisingInterval * 16) / 10; // advertising interval (in units of 0.625 ms)
advertisingParameters.timeout = 0;
sd_ble_gap_adv_start(&advertisingParameters);
}
void nRF51822::disconnect() {
sd_ble_gap_disconnect(this->_connectionHandle, BLE_HCI_REMOTE_USER_TERMINATED_CONNECTION);
}
void nRF51822::requestAddress() {
ble_gap_addr_t gapAddress;
sd_ble_gap_address_get(&gapAddress);
if (this->_eventListener) {
this->_eventListener->BLEDeviceAddressReceived(*this, gapAddress.addr);
}
}
void nRF51822::requestTemperature() {
#ifndef __RFduino__
int32_t rawTemperature = 0;
sd_temp_get(&rawTemperature);
float temperature = rawTemperature / 4.0;
if (this->_eventListener) {
this->_eventListener->BLEDeviceTemperatureReceived(*this, temperature);
}
#endif
}
void nRF51822::requestBatteryLevel() {
}
#endif
|
#pragma once
#include <cstdint>
namespace NWNXLib {
namespace API {
struct NWSync__CNWSync__ManifestMetaData
{
};
}
}
|
#include "keras_model.h"
int main() {
KerasModel model;
model.LoadModel("/beegfs/desy/user/amalara/output_varariables/Sequential/model_AK8_pt_300_500/model_crosscheck/mymodel.model");
Tensor in(13);
// in.data_ = {{ 1.52882206e-01, -7.82382991e-01, 4.58294465e-01, 1.62019307e-01, 6.77150193e-02, 9.38621026e-01, 1.40192715e-01, 1.73891120e-01, 1.86060589e+00, 6.80000000e+01, 8.08964333e-01, 7.69246801e-01, 6.84956843e-01 }};
in.data_ = {{ 1.52882206e-01, -7.82382991e-01, 4.58294465e-01, 1.62019307e-01, 6.77150193e-02, 9.38621026e-01, 1.40192715e-01, 1.73891120e-01, 1.86060589e+00, 6.80000000e+01, 8.08964333e-01, 7.69246801e-01, 6.84956843e-01 }};
// Run prediction.
Tensor out;
model.Apply(&in, &out);
out.Print();
return 0;
}
data_train.data_ = {{ 4.46115288e-01, -6.02262921e-02, 5.84577114e-02, 1.12644406e-01, 3.80830124e-02, 9.38310247e-01, 7.89906373e-02, 8.56328135e-02, 1.08000687e+00, 4.40000000e+01, 7.96757582e-01, 7.80584386e-01, 8.12577065e-01 }};
[ 9.77443609e-02, -6.45728378e-01, 4.25412002e-01,
5.03212534e-01, 5.17757809e-02, 9.90454626e-01,
6.57308077e-01, 5.34018396e-01, 3.24002060e+00,
8.60000000e+01, 2.77360290e-01, 2.30378058e-01,
2.71578298e-01],
[ 5.10025063e-01, 4.21661029e-01, 4.53980100e-02,
4.75613230e-01, 6.80359435e-02, 9.22948855e-01,
4.95690912e-01, 3.76836541e-01, 2.01209200e+00,
5.70000000e+01, 2.16479115e-01, 2.06035597e-01,
2.59556104e-01],
[ 2.60651629e-01, -3.85230521e-01, 6.24689055e-01,
2.83430923e-01, 3.81899872e-02, 9.27210975e-01,
3.05284449e-01, 1.30353138e-01, 1.48010842e+00,
4.70000000e+01, 2.66984551e-01, 2.44383310e-01,
6.83723798e-01],
[ 3.58395990e-01, 1.41667743e+00, 6.38215174e-01,
4.63965817e-01, 2.60376551e-01, 9.95649085e-01,
4.98470569e-01, 4.28303166e-01, 3.84240067e+00,
9.20000000e+01, 4.66259775e-01, 4.37455713e-01,
5.57336621e-01],
[ 8.10776942e-01, 3.91318311e-01, 2.73631841e-01,
5.86263649e-01, 9.56354300e-02, 9.88811934e-01,
5.37385758e-01, 1.66101577e-01, 1.87486340e+00,
5.90000000e+01, 1.77722678e-01, 1.67187695e-01,
7.05918619e-01],
[ 3.58395990e-01, -7.69346860e-01, 6.23289801e-01,
1.89238804e-01, 9.00727428e-02, 9.14036139e-01,
2.45521837e-01, 9.11967729e-02, 1.01852133e+00,
6.00000000e+01, 2.17699790e-01, 2.06035597e-01,
6.65228113e-01],
[ 5.62656642e-01, 1.20090699e+00, 1.74751244e-01,
5.58917550e-01, 2.24005135e-01, 9.69055230e-01,
6.17598700e-01, 4.92845096e-01, 2.51466945e+00,
6.90000000e+01, 2.18157543e-01, 1.38843733e-01,
1.31627620e-01],
[ 1.77944862e-01, 9.24001590e-01, 4.90715368e-01,
5.81421111e-02, 9.22122379e-02, 9.19452584e-01,
4.00258111e-02, 4.73458175e-02, 5.44904454e-01,
4.40000000e+01, 7.18634370e-01, 6.58538619e-01,
6.06658446e-01],
[ 1.77944862e-01, 1.85181449e+00, 5.37021922e-01,
2.95584744e-01, 4.16345742e-01, 9.83750666e-01,
3.84901750e-01, 1.32856919e-01, 1.51753440e+00,
3.80000000e+01, 2.07171467e-01, 1.87361927e-01,
6.79408138e-01]])
data_test.data_ = {{ 1.52882206e-01, -7.82382991e-01, 4.58294465e-01, 1.62019307e-01, 6.77150193e-02, 9.38621026e-01, 1.40192715e-01, 1.73891120e-01, 1.86060589e+00, 6.80000000e+01, 8.08964333e-01, 7.69246801e-01, 6.84956843e-01 }};
[ 1.04010025e-01, 2.02443085e+00, 9.82898010e-01,
8.10571293e-02, 4.95079161e-01, 9.43371515e-01,
5.72497534e-02, 4.34162711e-02, 5.09260663e-01,
1.70000000e+01, 4.76940683e-01, 3.65095244e-01,
4.91985203e-01],
[ 1.00250627e-01, -3.15779410e-01, 4.62259017e-01,
1.12897610e-01, 1.65810869e-02, 9.54581779e-01,
6.39507107e-02, 7.81214682e-02, 8.46540035e-01,
4.90000000e+01, 7.50982262e-01, 5.49831187e-01,
4.66091245e-01],
[ 2.45614035e-01, 1.29260943e+00, 1.86256219e-01,
3.80155088e-01, 1.94908002e-01, 9.64393536e-01,
3.81129360e-01, 2.49839167e-01, 2.49684756e+00,
8.10000000e+01, 3.84779706e-01, 3.76766287e-01,
6.57829840e-01],
[ 9.64912281e-02, -1.05569222e+00, 3.42039801e-02,
8.15002374e-02, 1.08258451e-01, 9.16194948e-01,
5.44700970e-02, 7.10274199e-02, 7.25796693e-01,
3.50000000e+01, 7.39385848e-01, 6.33195782e-01,
5.15413070e-01],
[ 5.45112782e-01, 3.45242331e-01, 9.98134328e-02,
4.47507517e-01, 6.45057766e-02, 9.31517492e-01,
3.80732266e-01, 3.05896058e-01, 2.22773693e+00,
6.60000000e+01, 3.36868205e-01, 2.32878996e-01,
2.78668311e-01],
[ 1.60401003e-01, 6.30463884e-01, 5.59701493e-02,
1.50625099e-01, 4.80316645e-02, 9.17471364e-01,
1.01674619e-01, 1.06080364e-01, 1.31258260e+00,
4.10000000e+01, 7.67461377e-01, 6.90550623e-01,
7.38594328e-01],
[ 1.12781955e-02, -4.68616806e-01, 7.14552239e-01,
3.05206520e-01, 2.15019255e-02, 9.23614811e-01,
4.08330283e-01, 3.08678038e-01, 2.40773808e+00,
7.30000000e+01, 3.40835400e-01, 3.22079113e-01,
4.58076449e-01],
[ 1.39097744e-01, -1.08718451e-01, 6.74751244e-01,
5.76135464e-01, 2.59948652e-02, 9.22493784e-01,
7.67303051e-01, 4.47777024e-01, 3.93507452e+00,
7.40000000e+01, 2.91703223e-01, 2.02534284e-01,
3.82860666e-01],
[ 1.12781955e-02, -6.78093944e-01, 3.12500000e-01,
3.19132774e-01, 4.10783055e-02, 9.19341591e-01,
3.29705716e-01, 1.12687566e-01, 1.21099780e+00,
5.20000000e+01, 1.87793248e-01, 1.82860239e-01,
6.64611591e-01
array([[ 0.14476113, 0.74322879, 0.11201009],
[ 0.05147558, 0.87995833, 0.06856609],
[ 0.17300698, 0.420205 , 0.40678802],
[ 0.0369037 , 0.16144098, 0.80165529],
[ 0.0449503 , 0.78200394, 0.17304575],
[ 0.17017224, 0.04697737, 0.78285038],
[ 0.06490728, 0.67274153, 0.26235119],
[ 0.5476616 , 0.06784479, 0.38449362],
[ 0.03523582, 0.01704395, 0.94772023],
[ 0.11124282, 0.08566742, 0.80308974]], dtype=float32)
|
/***********************************************************************************************************************
* OpenStudio(R), Copyright (c) 2008-2021, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
*
* (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
*
* (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission from the respective party.
*
* (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works
* may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior
* written permission from Alliance for Sustainable Energy, LLC.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED
* STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************************************************************/
#include <gtest/gtest.h>
#include "ModelFixture.hpp"
#include "../AirLoopHVAC.hpp"
#include "../AirLoopHVACOutdoorAirSystem.hpp"
#include "../ControllerOutdoorAir.hpp"
#include "../PlantLoop.hpp"
#include "../Model.hpp"
#include "../Node.hpp"
#include "../Node_Impl.hpp"
#include "../CoilHeatingElectric.hpp"
#include "../CoilHeatingElectric_Impl.hpp"
#include "../CoilCoolingWater.hpp"
#include "../Schedule.hpp"
#include "../AirLoopHVACZoneSplitter.hpp"
using namespace openstudio::model;
TEST_F(ModelFixture, CoilHeatingElectric_CoilHeatingElectric) {
::testing::FLAGS_gtest_death_test_style = "threadsafe";
ASSERT_EXIT(
{
Model m;
Schedule s = m.alwaysOnDiscreteSchedule();
CoilHeatingElectric coil(m, s);
exit(0);
},
::testing::ExitedWithCode(0), "");
}
TEST_F(ModelFixture, CoilHeatingElectric_addToNode) {
Model m;
Schedule s = m.alwaysOnDiscreteSchedule();
CoilHeatingElectric testObject(m, s);
AirLoopHVAC airLoop(m);
ControllerOutdoorAir controllerOutdoorAir(m);
AirLoopHVACOutdoorAirSystem outdoorAirSystem(m, controllerOutdoorAir);
Node supplyOutletNode = airLoop.supplyOutletNode();
outdoorAirSystem.addToNode(supplyOutletNode);
EXPECT_TRUE(testObject.addToNode(supplyOutletNode));
EXPECT_EQ((unsigned)5, airLoop.supplyComponents().size());
Node inletNode = airLoop.zoneSplitter().lastOutletModelObject()->cast<Node>();
EXPECT_FALSE(testObject.addToNode(inletNode));
EXPECT_EQ((unsigned)5, airLoop.demandComponents().size());
PlantLoop plantLoop(m);
supplyOutletNode = plantLoop.supplyOutletNode();
EXPECT_FALSE(testObject.addToNode(supplyOutletNode));
EXPECT_EQ((unsigned)5, plantLoop.supplyComponents().size());
Node demandOutletNode = plantLoop.demandOutletNode();
EXPECT_FALSE(testObject.addToNode(demandOutletNode));
EXPECT_EQ((unsigned)5, plantLoop.demandComponents().size());
CoilHeatingElectric testObject2(m, s);
CoilHeatingElectric testObject3(m, s);
if (boost::optional<Node> OANode = outdoorAirSystem.outboardOANode()) {
EXPECT_TRUE(testObject2.addToNode(*OANode));
EXPECT_EQ((unsigned)5, airLoop.supplyComponents().size());
EXPECT_EQ((unsigned)3, outdoorAirSystem.oaComponents().size());
}
if (boost::optional<Node> reliefNode = outdoorAirSystem.outboardReliefNode()) {
EXPECT_TRUE(testObject3.addToNode(*reliefNode));
EXPECT_EQ((unsigned)5, airLoop.supplyComponents().size());
EXPECT_EQ((unsigned)3, outdoorAirSystem.reliefComponents().size());
}
CoilHeatingElectric testObjectClone = testObject.clone(m).cast<CoilHeatingElectric>();
supplyOutletNode = airLoop.supplyOutletNode();
EXPECT_TRUE(testObjectClone.addToNode(supplyOutletNode));
EXPECT_EQ((unsigned)7, airLoop.supplyComponents().size());
}
|
/*
###############################################################################
#
# Temboo CoAP Edge Device library
#
# Copyright (C) 2017, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###############################################################################
*/
#include "utility/TembooGlobal.h"
#include "utility/TembooCoAPSession.h"
#include "utility/TembooTags.h"
#include "TembooCoAPEdgeDevice.h"
#ifndef UINT16_MAX
#define UINT16_MAX (0xFFFF)
#endif
#ifndef UINT32_MAX
#define UINT32_MAX (0xFFFFFFFF)
#endif
//TODO: Maybe. Put these in PROGMEM and
// modify any code that uses them.
const char HTTP_CODE_PREFIX[] = "HTTP_CODE\x0A\x1F";
const char HTTP_CODE_SUFFIX[] = "\x0A\x1E";
const char TembooCoAPClient::URI_PATH[] = "exec";
const char TIME_URI_PATH[] = "time";
static char HEADER_TIME[] = "x-temboo-time:";
uint16_t TembooCoAPChoreo::s_nextRequestId = 0;
TembooCoAPClient::TembooCoAPClient(TembooCoAPIPStack& ipStack, IPAddress gatewayAddress, uint16_t gatewayPort) :
m_messageLayer(m_rxBuffer, sizeof(m_rxBuffer), ipStack),
m_rrLayer(m_messageLayer, m_rxBuffer, sizeof(m_rxBuffer)),
m_gatewayAddress(gatewayAddress),
m_gatewayPort(gatewayPort),
m_messageID(0),
m_state(STATE_IDLE),
m_blockSize(MAX_BLOCK_SIZE),
m_lastError(NO_ERROR),
m_dataLen(0),
m_txIndex(0),
m_respLen(0),
m_txByteCount(0),
m_respHttpCode(0) {
memset(m_token, 0, sizeof(m_token));
memset(m_dataBuffer, 0, sizeof(m_dataBuffer));
memset(m_respBuffer, 0, sizeof(m_respBuffer));
}
void TembooCoAPClient::resetChoreo() {
memset(m_token, 0, sizeof(m_token));
memset(m_dataBuffer, 0, sizeof(m_dataBuffer));
memset(m_respBuffer, 0, sizeof(m_respBuffer));
m_dataLen = 0;
m_respLen = 0;
m_txIndex = 0;
m_txByteCount = 0;
m_rxBlockNum = 0;
m_rrLayer.setState(CoapRRLayer::STATE_IDLE);
m_messageLayer.setState(CoapMessageLayer::STATE_CLOSED);
}
void TembooCoAPClient::begin(long seed){
//RFC7252 "strongly recommends" that the initial
//value of messageID be randomized. There's no good way
//to do that reliably on many MCU boards. We will
//use random(), and can instruct the user to call randomSeed
//with input from an unused analog input if it's important to them.
randomSeed(seed);
m_messageID = random(0, UINT16_MAX);
}
TembooCoAPClient::~TembooCoAPClient() {
}
TembooCoAPClient::Result TembooCoAPClient::write(uint8_t value) {
if (m_dataLen < sizeof(m_dataBuffer)) {
m_dataBuffer[m_dataLen] = value;
m_dataLen++;
m_txByteCount = 0;
m_txIndex = 0;
return NO_ERROR;
}
return ERROR_BUFFER_FULL;
}
TembooCoAPClient::Result TembooCoAPClient::saveResponse(uint8_t* values, uint16_t len) {
len = len < (sizeof(m_respBuffer) - m_respLen - 1) ? len : (sizeof(m_respBuffer) - m_respLen -1);
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACELN("Saving payload to the buffer");
if ( len > 0) {
memcpy(&m_respBuffer[m_respLen], values, len);
m_respLen += len;
return NO_ERROR;
}
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Buffer full, payload not saved");
return ERROR_BUFFER_FULL;
}
TembooCoAPClient::Result TembooCoAPClient::write(uint8_t* values, uint16_t len) {
Result rc = NO_ERROR;
while(NO_ERROR == rc && len > 0) {
rc = write(*values++);
len--;
}
return rc;
}
uint16_t TembooCoAPClient::getNextMessageID() {
m_messageID++;
if (m_messageID == 0) {
m_messageID++;
}
return m_messageID;
}
TembooCoAPClient::Result TembooCoAPClient::generateToken() {
// 5.3.1. Token suggests the tokenID should be a random value
for (int i = 0; i < 8; i++) {
m_token[i] = (rand() % 93) + 33;
}
m_token[8] = '\0';
return NO_ERROR;
}
TembooCoAPClient::Result TembooCoAPClient::sendBlockRequest(uint16_t msgID, uint32_t blockNum) {
CoapMsg msg(m_txBuffer, sizeof (m_txBuffer));
msg.setCode(CoapMsg::COAP_POST);
if (msg.setToken((uint8_t*)m_token, strlen(m_token))) {
TEMBOO_TRACELN("err: setToken");
return ERROR_MSG_TOKEN;
}
msg.setId(msgID);
if (msg.addOption(CoapMsg::COAP_OPTION_URI_PATH, (const uint8_t*)URI_PATH, strlen(URI_PATH))) {
TEMBOO_TRACELN("err: setURI");
return ERROR_MSG_OPTION;
}
uint8_t optionValue[3];
uint16_t optionLen = 0;
// If this is the last block in a series of blocks (or an only block)
// include a block2 option to let the server know what our
// desired block size is for the response.
optionValue[0] = (blockNum & 0xF000) >> 12;
optionValue[1] = (blockNum & 0x0FF0) >> 4;
optionValue[2] = (blockNum & 0x000F) << 4;
optionValue[2] |= (0 ? 0x08 : 0);
optionValue[2] |= (m_blockSize >> 5) & 0x07;
optionLen = 1;
if (optionValue[0] > 0) {
optionLen = 3;
} else if (optionValue[1] > 0) {
optionLen = 2;
}
if (msg.addOption(CoapMsg::COAP_OPTION_BLOCK2, (const uint8_t*)&optionValue[3 - optionLen], optionLen)) {
TEMBOO_TRACELN("err: block2");
return ERROR_MSG_OPTION;
}
if (m_rrLayer.reliableSend(msg, m_token, m_gatewayAddress, m_gatewayPort) != CoapRRLayer::NO_ERROR) {
TEMBOO_TRACELN("err: send");
return ERROR_SENDING_MSG;
}
return NO_ERROR;
}
TembooCoAPClient::Result TembooCoAPClient::sendBlock(uint16_t msgID, uint8_t* payload, size_t len, uint32_t blockNum, bool moreBlocks) {
CoapMsg msg(m_txBuffer, sizeof (m_txBuffer));
msg.setCode(CoapMsg::COAP_POST);
if (msg.setToken((uint8_t*)m_token, strlen(m_token))) {
TEMBOO_TRACELN("err: setToken");
return ERROR_MSG_TOKEN;
}
msg.setId(msgID);
if (msg.addOption(CoapMsg::COAP_OPTION_URI_PATH, (const uint8_t*)URI_PATH, strlen(URI_PATH))) {
TEMBOO_TRACELN("err: setURI");
return ERROR_MSG_OPTION;
}
uint8_t optionValue[3];
uint16_t optionLen = 0;
// If this is the last block in a series of blocks (or an only block)
// include a block2 option to let the server know what our
// desired block size is for the response.
if (!moreBlocksToSend()) {
optionValue[0] = (m_blockSize >> 5) & 0x07;
optionLen = (optionValue[0] > 0) ? 1 : 0;
if (msg.addOption(CoapMsg::COAP_OPTION_BLOCK2, (const uint8_t*)optionValue, optionLen)) {
TEMBOO_TRACELN("err: block2");
return ERROR_MSG_OPTION;
}
}
// If this is not the only block in the request,
// include the block1 option.
if (blockNum > 0 || moreBlocks) {
optionValue[0] = (blockNum & 0xF000) >> 12;
optionValue[1] = (blockNum & 0x0FF0) >> 4;
optionValue[2] = (blockNum & 0x000F) << 4;
optionValue[2] |= (moreBlocks ? 0x08 : 0);
optionValue[2] |= (m_blockSize >> 5) & 0x07;
optionLen = 1;
if (optionValue[0] > 0) {
optionLen = 3;
} else if (optionValue[1] > 0) {
optionLen = 2;
}
if (msg.addOption(CoapMsg::COAP_OPTION_BLOCK1, (const uint8_t*)&optionValue[3 - optionLen], optionLen)) {
TEMBOO_TRACELN("err: block1");
return ERROR_MSG_OPTION;
}
}
if (msg.setPayload((uint8_t*)payload, len)) {
TEMBOO_TRACELN("err: setPayload");
return ERROR_MSG_PAYLOAD;
}
if (m_rrLayer.reliableSend(msg, m_token, m_gatewayAddress, m_gatewayPort) != CoapRRLayer::NO_ERROR) {
TEMBOO_TRACELN("err: send");
return ERROR_SENDING_MSG;
}
return NO_ERROR;
}
void TembooCoAPClient::adjustRequestBlockSize(CoapMsg& msg) {
// A block1 option in a response means the server is
// requesting that we use a smaller block size.
uint16_t newBlockSize = msg.getBlock1Size();
if (newBlockSize > 0 && newBlockSize < m_blockSize) {
m_blockSize = newBlockSize;
}
}
TembooCoAPClient::Result TembooCoAPClient::loop() {
m_lastResult = NO_ERROR;
switch (m_state) {
case STATE_IDLE:
case STATE_RESPONSE_READY:
// Pump the receiver.
// We're not serving anything, so unless there's an outstanding
// request (which would mean we would be in STATE_WAITING, not STATE_IDLE),
// the R/R layer will reject or ignore any incoming traffic.
m_rrLayer.loop();
break;
case STATE_SEND_REQUEST:
case STATE_RESPONSE_STARTED:
case STATE_WAITING_FOR_RESPONSE:
// We're waiting for a response to an earlier request.
switch(m_rrLayer.loop()) {
case CoapRRLayer::NO_ERROR:
// Nothing happened. Nothing to do.
break;
case CoapRRLayer::RESPONSE_RECEIVED: {
// A response to our request was received.
// It may have been a piggybacked ACK or a separate response
CoapMsg msg(m_rxBuffer, sizeof(m_rxBuffer), m_messageLayer.getRXByteCount());
// See if it has a BLOCK1 option. If so, make sure the
// block number matches the one we just sent. If the block
// numbers don't match, we're FUBAR, so abort the request.
// If they do match, adjust our request block size if the
// server requested a different (smaller) size.
if (msg.getOptionCount(CoapMsg::COAP_OPTION_BLOCK1)) {
uint32_t ackBlockNum = msg.getBlock1Num();
if (ackBlockNum != m_txBlockNum) {
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Block1 message number does not match");
m_lastResult = ERROR_RECEIVING_RESPONSE;
if (msg.getType() == CoapMsg::COAP_CONFIRMABLE) {
m_messageLayer.rejectMsg(msg);
}
resetChoreo();
break;
}
adjustRequestBlockSize(msg);
}
// Now deal with the response itself.
switch(msg.getCode()) {
case CoapMsg::COAP_CONTINUE: //2.31
// 2.31 means the server is requesting the next block of the request.
// If there are no more blocks to send, we're FUBAR, so abort the
// request. Otherwise, send the next block.
if (m_txIndex >= m_dataLen) {
// no more data to send, bad news
resetChoreo();
m_lastResult = ERROR_REQUEST_FAILED;
m_state = STATE_IDLE;
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Gateway requested too many blocks");
break;
}
if (sendChoreoRequest() != NO_ERROR) {
resetChoreo();
m_lastResult = ERROR_REQUEST_FAILED;
m_state = STATE_IDLE;
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Send Choreo request failed");
}
break;
case CoapMsg::COAP_REQUEST_ENTITY_INCOMPLETE: //4.08
// 4.08 means the server is missing one or more blocks, so can't
// service the request.
// We're FUBAR, so abort the request.
resetChoreo();
m_lastResult = ERROR_REQUEST_FAILED;
m_state = STATE_IDLE;
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Gateway returned 4.08");
break;
case CoapMsg::COAP_REQUEST_ENTITY_TOO_LARGE: //4.13
// 4.13 means the server ran out of memory when receiving the
// request.
// We're FUBAR, so abort the request.
resetChoreo();
m_lastResult = ERROR_REQUEST_FAILED;
m_state = STATE_IDLE;
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Gateway returned 4.13");
break;
default:
// Any response code other than the special ones above means the
// server has processed the request and is returning the final result,
// which may be in one or more blocks. If we haven't finished sending
// the request, we're FUBAR, so abort the request. Otherwise, process
// the response.
m_dataLen = 0;
if (moreBlocksToSend()) {
m_lastResult = ERROR_RECEIVING_RESPONSE;
if (msg.getType() == CoapMsg::COAP_CONFIRMABLE) {
m_messageLayer.rejectMsg(msg);
}
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Response received before request finished");
resetChoreo();
m_state = STATE_IDLE;
break;
} else {
if (msg.getOptionCount(CoapMsg::COAP_OPTION_BLOCK2)) {
// The server is sending a multi-block response, make sure
// it's sending the response block we're expecting.
uint32_t respBlockNum = msg.getBlock2Num();
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACELN("Block2 opt recv");
if (respBlockNum > m_rxBlockNum) {
// It sent a newer block than the one we're expecting,
// (i.e. we've somehow missed a block)
// that's an error.
m_lastResult = ERROR_RECEIVING_RESPONSE;
if (msg.getType() == CoapMsg::COAP_CONFIRMABLE) {
m_messageLayer.rejectMsg(msg);
}
resetChoreo();
m_state = STATE_IDLE;
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Received block out of order");
break;
} else if (respBlockNum < m_rxBlockNum) {
// It resent a block we've already received,
// (i.e. it didn't see our ACK),
// just accept (ACK) it again.
if (msg.getType() == CoapMsg::COAP_CONFIRMABLE) {
m_messageLayer.acceptMsg(msg);
}
m_lastResult = NO_ERROR;
sendBlockRequest(m_messageID, m_rxBlockNum);
m_state = STATE_RESPONSE_STARTED;
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACELN("Received previous block");
} else {
// Server sent the next block we are expecting.
// Accept it and add the payload to our buffer.
bool block2More = msg.getBlock2More();
m_respHttpCode = msg.getHTTPStatus();
m_lastResult = saveResponse(msg.getPayload(), msg.getPayloadLen());
if (msg.getType() == CoapMsg::COAP_CONFIRMABLE) {
m_rxBlockNum = respBlockNum;
m_messageLayer.acceptMsg(msg);
}
if (block2More) {
m_rxBlockNum++;
m_messageID++;
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACELN("Request next block2 msg");
sendBlockRequest(m_messageID, m_rxBlockNum);
m_state = STATE_RESPONSE_STARTED;
} else {
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACELN("Final block2 msg recv");
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACELN("Response complete");
m_state = STATE_RESPONSE_READY;
}
}
} else {
// There's no Block2 option, so is either
// the one and only block in the response
// or an empty ack.
// check if empty to handle final ack. If empty
// wait for CON with matching token and then
// request other blocks of response
if (msg.getCode() == CoapMsg::COAP_EMPTY) {
m_rrLayer.setState(CoapRRLayer::STATE_WAITING);
m_messageLayer.setState(CoapMessageLayer::STATE_WAITING_FOR_CON);
m_state = STATE_WAITING_FOR_RESPONSE;
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACELN("Empty ACK received, waiting for response");
} else {
m_respHttpCode = msg.getHTTPStatus();
m_lastResult = saveResponse(msg.getPayload(), msg.getPayloadLen());
m_messageLayer.acceptMsg(msg);
m_state = STATE_RESPONSE_READY;
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACELN("Response complete");
}
}
}
}
// Does it have a Block2 option?
uint32_t responseBlockNum = msg.getBlock2Num();
if (responseBlockNum == m_rxBlockNum && msg.getOptionCount(CoapMsg::COAP_OPTION_BLOCK2)) {
adjustRequestBlockSize(msg);
m_rxBlockNum++;
if (0 == m_rxBlockNum) {
clearData();
}
}
break;
}
case CoapRRLayer::ERROR_RECEIVING_RESPONSE:
m_lastResult = ERROR_REQUEST_FAILED;
m_state = STATE_IDLE;
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Error receiving response");
break;
case CoapRRLayer::RST_RECEIVED:
m_lastResult = ERROR_REQUEST_FAILED;
m_state = STATE_IDLE;
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("RST received");
break;
default:
// Anything else indicates a failure of some sort. Check
// the messageLayer lastResult for specifics.
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Request failed");
m_lastResult = ERROR_REQUEST_FAILED;
m_state = STATE_IDLE;
}
break;
default:
break;
}
return m_lastResult;
}
void TembooCoAPClient::cancelWait() {
if (STATE_WAITING_FOR_RESPONSE == m_state) {
m_messageLayer.cancelReliableSend();
m_dataLen = 0;
m_state = STATE_IDLE;
}
}
bool TembooCoAPClient::moreBlocksToSend() {
uint16_t payloadLength = (m_dataLen - m_txByteCount) < m_blockSize ? (m_dataLen - m_txByteCount) : m_blockSize;
return ((m_txByteCount + payloadLength) < m_dataLen);
}
TembooCoAPClient::Result TembooCoAPClient::sendChoreoRequest() {
uint16_t payloadLength = 0;
generateToken();
payloadLength = (m_dataLen - m_txByteCount) < m_blockSize ? (m_dataLen - m_txByteCount) : m_blockSize;
m_txBlockNum = m_txByteCount/m_blockSize;
bool moreBlocks = (m_txByteCount + payloadLength) < m_dataLen;
m_lastError = sendBlock(m_messageID, &m_dataBuffer[m_txIndex], payloadLength, m_txBlockNum, moreBlocks);
m_messageID++;
if (TembooCoAPClient::NO_ERROR == m_lastError) {
m_state = STATE_SEND_REQUEST;
m_txIndex += payloadLength;
m_txByteCount += payloadLength;
} else {
m_lastError = ERROR_SENDING_MSG;
m_state = STATE_ERROR;
}
return m_lastError;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
TembooCoAPChoreo::TembooCoAPChoreo(TembooCoAPClient& client) :
m_client(client),
m_accountName(NULL),
m_appKeyName(NULL),
m_appKeyValue(NULL),
m_path(NULL),
m_requestId(0),
m_availableChars(0),
m_nextChar(NULL),
m_nextState(END)
{
}
TembooCoAPChoreo::~TembooCoAPChoreo() {
}
void TembooCoAPChoreo::setAccountName(const String& accountName) {
m_accountName = accountName.c_str();
}
void TembooCoAPChoreo::setAccountName(const char* accountName) {
m_accountName = accountName;
}
void TembooCoAPChoreo::setAppKeyName(const String& appKeyName) {
m_appKeyName = appKeyName.c_str();
}
void TembooCoAPChoreo::setAppKeyName(const char* appKeyName) {
m_appKeyName = appKeyName;
}
void TembooCoAPChoreo::setAppKey(const String& appKeyValue) {
m_appKeyValue = appKeyValue.c_str();
}
void TembooCoAPChoreo::setAppKey(const char* appKeyValue) {
m_appKeyValue = appKeyValue;
}
void TembooCoAPChoreo::setChoreo(const String& path) {
m_path = path.c_str();
}
void TembooCoAPChoreo::setChoreo(const char* path) {
m_path = path;
}
void TembooCoAPChoreo::setSavedInputs(const String& savedInputsName) {
m_preset.put(savedInputsName.c_str());
}
void TembooCoAPChoreo::setSavedInputs(const char* savedInputsName) {
m_preset.put(savedInputsName);
}
void TembooCoAPChoreo::setCredential(const String& credentialName) {
m_preset.put(credentialName.c_str());
}
void TembooCoAPChoreo::setCredential(const char* credentialName) {
m_preset.put(credentialName);
}
void TembooCoAPChoreo::setProfile(const String& profileName) {
m_preset.put(profileName.c_str());
}
void TembooCoAPChoreo::setProfile(const char* profileName) {
m_preset.put(profileName);
}
void TembooCoAPChoreo::addInput(const String& inputName, const String& inputValue) {
m_inputs.put(inputName.c_str(), inputValue.c_str());
}
void TembooCoAPChoreo::addInput(const char* inputName, const char* inputValue) {
m_inputs.put(inputName, inputValue);
}
void TembooCoAPChoreo::addInput(const char* inputName, const String& inputValue) {
m_inputs.put(inputName, inputValue.c_str());
}
void TembooCoAPChoreo::addInput(const String& inputName, const char* inputValue) {
m_inputs.put(inputName.c_str(), inputValue);
}
void TembooCoAPChoreo::addOutputFilter(const char* outputName, const char* filterPath, const char* variableName) {
m_outputs.put(outputName, filterPath, variableName);
}
void TembooCoAPChoreo::addOutputFilter(const String& outputName, const char* filterPath, const char* variableName) {
m_outputs.put(outputName.c_str(), filterPath, variableName);
}
void TembooCoAPChoreo::addOutputFilter(const char* outputName, const String& filterPath, const char* variableName) {
m_outputs.put(outputName, filterPath.c_str(), variableName);
}
void TembooCoAPChoreo::addOutputFilter(const String& outputName, const String& filterPath, const char* variableName) {
m_outputs.put(outputName.c_str(), filterPath.c_str(), variableName);
}
void TembooCoAPChoreo::addOutputFilter(const char* outputName, const char* filterPath, const String& variableName) {
m_outputs.put(outputName, filterPath, variableName.c_str());
}
void TembooCoAPChoreo::addOutputFilter(const String& outputName, const char* filterPath, const String& variableName) {
m_outputs.put(outputName.c_str(), filterPath, variableName.c_str());
}
void TembooCoAPChoreo::addOutputFilter(const char* outputName, const String& filterPath, const String& variableName) {
m_outputs.put(outputName, filterPath.c_str(), variableName.c_str());
}
void TembooCoAPChoreo::addOutputFilter(const String& outputName, const String& filterPath, const String& variableName) {
m_outputs.put(outputName.c_str(), filterPath.c_str(), variableName.c_str());
}
int TembooCoAPChoreo::waitForResponse(TembooTimer& timer) {
int rc = SUCCESS;
while (m_client.getState() == TembooCoAPClient::STATE_RESPONSE_STARTED || m_client.getState() == TembooCoAPClient::STATE_WAITING_FOR_RESPONSE) {
if (timer.expired()) {
TEMBOO_TRACELN("ERROR: Choreo timeout");
rc = TEMBOO_ERROR_TIMEOUT;
break;
}
m_client.loop();
// While the buffer may be full, we need to receive all of the data
// from the gateway even though we discard it. We still return
// the buffer error code, but the user is still able to see what
// data was able to fit in the current buffer
if (m_client.getMessageState() != TembooCoAPClient::NO_ERROR && m_client.getMessageState() != TembooCoAPClient::ERROR_BUFFER_FULL) {
rc = FAILURE;
break;
}
}
return rc;
}
int TembooCoAPChoreo::run(uint16_t timeoutSecs) {
m_nextChar = NULL;
if (IS_EMPTY(m_accountName)) {
return TEMBOO_ERROR_ACCOUNT_MISSING;
}
if (IS_EMPTY(m_path)) {
return TEMBOO_ERROR_CHOREO_MISSING;
}
if (IS_EMPTY(m_appKeyName)) {
return TEMBOO_ERROR_APPKEY_NAME_MISSING;
}
if (IS_EMPTY(m_appKeyValue)) {
return TEMBOO_ERROR_APPKEY_MISSING;
}
int rc = 0;
TembooTimer timer(timeoutSecs * 1000L);
for (int i = 0; i < 2; i++) {
m_client.resetChoreo();
TembooCoAPSession session(m_client);
m_requestId = s_nextRequestId++;
m_respData = NULL;
m_availableChars = 0;
m_nextState = START;
uint16toa(0 , m_httpCodeStr);
m_client.getNextMessageID();
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACELN("Sending request");
rc = session.executeChoreo(m_requestId, m_accountName, m_appKeyName, m_appKeyValue, m_path, m_inputs, m_expressions, m_sensors, m_outputs, m_preset, m_deviceType, m_deviceName);
if (SUCCESS != rc) {
goto ErrorExit;
}
// finish sending Choreo request
while (m_client.getState() == TembooCoAPClient::STATE_SEND_REQUEST) {
if(m_client.loop() == TembooCoAPClient::ERROR_REQUEST_FAILED) {
rc = m_client.getMessageState();
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Choreo request failed");
goto ErrorExit;
}
}
// choreo request complete, wait for CON from gateway
// and then request the rest of the response
rc = waitForResponse(timer);
if (SUCCESS != rc){
rc = m_client.getMessageState();
TEMBOO_TRACE("ERROR: ");
TEMBOO_TRACELN("Waiting for response failed");
goto ErrorExit;
} else {
m_respData = (char*)m_client.getPacketBuffer();
uint16_t httpCode = m_client.getRespHttpCode();
if (httpCode >= 700) {
httpCode = 0;
}
uint16toa(httpCode, m_httpCodeStr);
m_availableChars = strlen(m_respData) + strlen(m_httpCodeStr) + strlen(HTTP_CODE_PREFIX) + strlen(HTTP_CODE_SUFFIX);
m_nextChar = HTTP_CODE_PREFIX;
//Unauthroized, need to update the time
if (httpCode == 401 && i == 0) {
find(HEADER_TIME);
TembooCoAPSession::setTime((unsigned long)this->parseInt());
} else {
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACE(m_availableChars);
TEMBOO_TRACELN(" CHARS");
TEMBOO_TRACE("DBG: ");
TEMBOO_TRACELN("Response buffer data:");
TEMBOO_TRACELN(m_respData);
rc = m_client.getMessageState();
break;
}
}
}
ErrorExit:
if (SUCCESS != rc) {
TEMBOO_TRACE(" ERROR:");
TEMBOO_TRACELN(rc);
}
return rc;
}
int TembooCoAPChoreo::available() {
return m_availableChars;
}
int TembooCoAPChoreo::peek() {
if (m_availableChars > 0) {
return (int)*m_nextChar;
}
return -1;
}
int TembooCoAPChoreo::read() {
if (m_availableChars > 0) {
int c = 0;
switch(m_nextState) {
case START:
m_nextChar = HTTP_CODE_PREFIX;
c = (int)(*m_nextChar++);
m_nextState = HTTP_CODE_PRE;
break;
case HTTP_CODE_PRE:
c = (int)(*m_nextChar++);
if ('\0' == *m_nextChar) {
m_nextState = HTTP_CODE_VALUE;
m_nextChar = m_httpCodeStr;
}
break;
case HTTP_CODE_VALUE:
c = (int)(*m_nextChar++);
if (*m_nextChar == '\0') {
m_nextState = HTTP_CODE_SUF;
m_nextChar = HTTP_CODE_SUFFIX;
}
break;
case HTTP_CODE_SUF:
c = (int)(*m_nextChar++);
if ('\0' == *m_nextChar) {
m_nextState = RESP_DATA;
m_nextChar = m_respData;
}
break;
case RESP_DATA:
c = (int)(*m_nextChar++);
if ('\0' == *m_nextChar || m_availableChars <= 0) {
m_nextState = END;
}
break;
case END:
default:
c = -1;
}
if (m_availableChars > 0) {
m_availableChars--;
}
return c;
} else {
return -1;
}
}
size_t TembooCoAPChoreo::write(uint8_t __attribute__ ((unused)) data) {
return 0;
}
void TembooCoAPChoreo::flush() {
m_nextChar = NULL;
m_nextState = END;
m_availableChars = 0;
}
|
#include <iostream>
#include <string>
using namespace std;
void stringReverse( string, size_t );
int main()
{
string s = "Print this string backward.";
cout << s << '\n';
stringReverse( s, 0 );
cout << endl;
}
void stringReverse( string stringToReverse, size_t startSubscript )
{
static size_t stringSize = stringToReverse.size();
if(stringSize == (startSubscript + 1))
cout << stringToReverse[startSubscript];
else
{
cout << stringToReverse[(stringSize -1 )];
--stringSize;
stringReverse(stringToReverse, startSubscript);
}
}
|
#include "hdrpch.h"
#include "ParticleGravityForce.h"
namespace hedron
{
ParticleGravityForce::ParticleGravityForce(const vec3f32 gravity)
: m_Gravity{gravity}
{
}
void ParticleGravityForce::UpdateForce(Particle* particle, float duration)
{
// If the particle do not have infinite mass
if (!particle->HasFiniteMass())
return;
particle->AddForce(m_Gravity * particle->GetMass());
}
}
|
// Copyright (c) 2017-2018 The PIVX developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "blocksignature.h"
#include "main.h"
#include "zpivchain.h"
bool SignBlockWithKey(CBlock& block, const CKey& key)
{
if (!key.Sign(block.GetHash(), block.vchBlockSig))
return error("%s: failed to sign block hash with key", __func__);
return true;
}
bool GetKeyIDFromUTXO(const CTxOut& txout, CKeyID& keyID)
{
std::vector<valtype> vSolutions;
txnouttype whichType;
if (!Solver(txout.scriptPubKey, whichType, vSolutions))
return false;
if (whichType == TX_PUBKEY) {
keyID = CPubKey(vSolutions[0]).GetID();
} else if (whichType == TX_PUBKEYHASH) {
keyID = CKeyID(uint160(vSolutions[0]));
}
return true;
}
bool SignBlock(CBlock& block, const CKeyStore& keystore)
{
CKeyID keyID;
if (block.IsProofOfWork()) {
bool fFoundID = false;
for (const CTxOut& txout :block.vtx[0].vout) {
if (!GetKeyIDFromUTXO(txout, keyID))
continue;
fFoundID = true;
break;
}
if (!fFoundID)
return error("%s: failed to find key for PoW", __func__);
} else {
if (!GetKeyIDFromUTXO(block.vtx[1].vout[1], keyID))
return error("%s: failed to find key for PoS", __func__);
}
CKey key;
if (!keystore.GetKey(keyID, key))
return error("%s: failed to get key from keystore", __func__);
return SignBlockWithKey(block, key);
}
bool CheckBlockSignature(const CBlock& block)
{
if (block.IsProofOfWork())
return block.vchBlockSig.empty();
if (block.vchBlockSig.empty())
return error("%s: vchBlockSig is empty!", __func__);
/** Each block is signed by the private key of the input that is staked. This can be either zPIV or normal UTXO
* zPIV: Each zPIV has a keypair associated with it. The serial number is a hash of the public key.
* UTXO: The public key that signs must match the public key associated with the first utxo of the coinstake tx.
*/
CPubKey pubkey;
bool fzPIVStake = block.vtx[1].IsZerocoinSpend();
if (fzPIVStake) {
libzerocoin::CoinSpend spend = TxInToZerocoinSpend(block.vtx[1].vin[0]);
pubkey = spend.getPubKey();
} else {
txnouttype whichType;
std::vector<valtype> vSolutions;
const CTxOut& txout = block.vtx[1].vout[1];
if (!Solver(txout.scriptPubKey, whichType, vSolutions))
return false;
if (whichType == TX_PUBKEY || whichType == TX_PUBKEYHASH) {
valtype& vchPubKey = vSolutions[0];
pubkey = CPubKey(vchPubKey);
}
}
if (!pubkey.IsValid())
return error("%s: invalid pubkey %s", __func__, pubkey.GetHex());
return pubkey.Verify(block.GetHash(), block.vchBlockSig);
}
|
/*-------------------------------------------------------------------------
* drawElements Quality Program OpenGL (ES) Module
* -----------------------------------------------
*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief Shader execution utilities.
*//*--------------------------------------------------------------------*/
#include "glsShaderExecUtil.hpp"
#include "gluRenderContext.hpp"
#include "gluDrawUtil.hpp"
#include "gluObjectWrapper.hpp"
#include "gluShaderProgram.hpp"
#include "gluTextureUtil.hpp"
#include "gluProgramInterfaceQuery.hpp"
#include "gluPixelTransfer.hpp"
#include "gluStrUtil.hpp"
#include "tcuTestLog.hpp"
#include "glwFunctions.hpp"
#include "glwEnums.hpp"
#include "deSTLUtil.hpp"
#include "deStringUtil.hpp"
#include "deUniquePtr.hpp"
#include "deMemory.h"
#include <map>
namespace deqp
{
namespace gls
{
namespace ShaderExecUtil
{
using std::vector;
static bool isExtensionSupported (const glu::RenderContext& renderCtx, const std::string& extension)
{
const glw::Functions& gl = renderCtx.getFunctions();
int numExts = 0;
gl.getIntegerv(GL_NUM_EXTENSIONS, &numExts);
for (int ndx = 0; ndx < numExts; ndx++)
{
const char* curExt = (const char*)gl.getStringi(GL_EXTENSIONS, ndx);
if (extension == curExt)
return true;
}
return false;
}
static void checkExtension (const glu::RenderContext& renderCtx, const std::string& extension)
{
if (!isExtensionSupported(renderCtx, extension))
throw tcu::NotSupportedError(extension + " is not supported");
}
static void checkLimit (const glu::RenderContext& renderCtx, deUint32 pname, int required)
{
const glw::Functions& gl = renderCtx.getFunctions();
int implementationLimit = -1;
deUint32 error;
gl.getIntegerv(pname, &implementationLimit);
error = gl.getError();
if (error != GL_NO_ERROR)
throw tcu::TestError("Failed to query " + de::toString(glu::getGettableStateStr(pname)) + " - got " + de::toString(glu::getErrorStr(error)));
if (implementationLimit < required)
throw tcu::NotSupportedError("Test requires " + de::toString(glu::getGettableStateStr(pname)) + " >= " + de::toString(required) + ", got " + de::toString(implementationLimit));
}
// Shader utilities
static std::string generateVertexShader (const ShaderSpec& shaderSpec, const std::string& inputPrefix, const std::string& outputPrefix)
{
const bool usesInout = glu::glslVersionUsesInOutQualifiers(shaderSpec.version);
const char* in = usesInout ? "in" : "attribute";
const char* out = usesInout ? "out" : "varying";
std::ostringstream src;
DE_ASSERT(!inputPrefix.empty() && !outputPrefix.empty());
src << glu::getGLSLVersionDeclaration(shaderSpec.version) << "\n";
if (!shaderSpec.globalDeclarations.empty())
src << shaderSpec.globalDeclarations << "\n";
src << in << " highp vec4 a_position;\n";
for (vector<Symbol>::const_iterator input = shaderSpec.inputs.begin(); input != shaderSpec.inputs.end(); ++input)
src << in << " " << glu::declare(input->varType, inputPrefix + input->name) << ";\n";
for (vector<Symbol>::const_iterator output = shaderSpec.outputs.begin(); output != shaderSpec.outputs.end(); ++output)
{
DE_ASSERT(output->varType.isBasicType());
if (glu::isDataTypeBoolOrBVec(output->varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output->varType.getBasicType());
const glu::DataType intBaseType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
const glu::VarType intType (intBaseType, glu::PRECISION_HIGHP);
src << "flat " << out << " " << glu::declare(intType, outputPrefix + output->name) << ";\n";
}
else
src << "flat " << out << " " << glu::declare(output->varType, outputPrefix + output->name) << ";\n";
}
src << "\n"
<< "void main (void)\n"
<< "{\n"
<< " gl_Position = a_position;\n"
<< " gl_PointSize = 1.0;\n\n";
// Declare & fetch local input variables
for (vector<Symbol>::const_iterator input = shaderSpec.inputs.begin(); input != shaderSpec.inputs.end(); ++input)
src << "\t" << glu::declare(input->varType, input->name) << " = " << inputPrefix << input->name << ";\n";
// Declare local output variables
for (vector<Symbol>::const_iterator output = shaderSpec.outputs.begin(); output != shaderSpec.outputs.end(); ++output)
src << "\t" << glu::declare(output->varType, output->name) << ";\n";
// Operation - indented to correct level.
{
std::istringstream opSrc (shaderSpec.source);
std::string line;
while (std::getline(opSrc, line))
src << "\t" << line << "\n";
}
// Assignments to outputs.
for (vector<Symbol>::const_iterator output = shaderSpec.outputs.begin(); output != shaderSpec.outputs.end(); ++output)
{
if (glu::isDataTypeBoolOrBVec(output->varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output->varType.getBasicType());
const glu::DataType intBaseType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
src << "\t" << outputPrefix << output->name << " = " << glu::getDataTypeName(intBaseType) << "(" << output->name << ");\n";
}
else
src << "\t" << outputPrefix << output->name << " = " << output->name << ";\n";
}
src << "}\n";
return src.str();
}
static std::string generateGeometryShader (const ShaderSpec& shaderSpec, const std::string& inputPrefix, const std::string& outputPrefix)
{
DE_ASSERT(glu::glslVersionUsesInOutQualifiers(shaderSpec.version));
DE_ASSERT(!inputPrefix.empty() && !outputPrefix.empty());
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(shaderSpec.version) << "\n";
if (glu::glslVersionIsES(shaderSpec.version) && shaderSpec.version <= glu::GLSL_VERSION_310_ES)
src << "#extension GL_EXT_geometry_shader : require\n";
if (!shaderSpec.globalDeclarations.empty())
src << shaderSpec.globalDeclarations << "\n";
src << "layout(points) in;\n"
<< "layout(points, max_vertices = 1) out;\n";
for (vector<Symbol>::const_iterator input = shaderSpec.inputs.begin(); input != shaderSpec.inputs.end(); ++input)
src << "flat in " << glu::declare(input->varType, inputPrefix + input->name) << "[];\n";
for (vector<Symbol>::const_iterator output = shaderSpec.outputs.begin(); output != shaderSpec.outputs.end(); ++output)
{
DE_ASSERT(output->varType.isBasicType());
if (glu::isDataTypeBoolOrBVec(output->varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output->varType.getBasicType());
const glu::DataType intBaseType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
const glu::VarType intType (intBaseType, glu::PRECISION_HIGHP);
src << "flat out " << glu::declare(intType, outputPrefix + output->name) << ";\n";
}
else
src << "flat out " << glu::declare(output->varType, outputPrefix + output->name) << ";\n";
}
src << "\n"
<< "void main (void)\n"
<< "{\n"
<< " gl_Position = gl_in[0].gl_Position;\n\n";
// Fetch input variables
for (vector<Symbol>::const_iterator input = shaderSpec.inputs.begin(); input != shaderSpec.inputs.end(); ++input)
src << "\t" << glu::declare(input->varType, input->name) << " = " << inputPrefix << input->name << "[0];\n";
// Declare local output variables.
for (vector<Symbol>::const_iterator output = shaderSpec.outputs.begin(); output != shaderSpec.outputs.end(); ++output)
src << "\t" << glu::declare(output->varType, output->name) << ";\n";
src << "\n";
// Operation - indented to correct level.
{
std::istringstream opSrc (shaderSpec.source);
std::string line;
while (std::getline(opSrc, line))
src << "\t" << line << "\n";
}
// Assignments to outputs.
for (vector<Symbol>::const_iterator output = shaderSpec.outputs.begin(); output != shaderSpec.outputs.end(); ++output)
{
if (glu::isDataTypeBoolOrBVec(output->varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output->varType.getBasicType());
const glu::DataType intBaseType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
src << "\t" << outputPrefix << output->name << " = " << glu::getDataTypeName(intBaseType) << "(" << output->name << ");\n";
}
else
src << "\t" << outputPrefix << output->name << " = " << output->name << ";\n";
}
src << " EmitVertex();\n"
<< " EndPrimitive();\n"
<< "}\n";
return src.str();
}
static std::string generateEmptyFragmentSource (glu::GLSLVersion version)
{
const bool customOut = glu::glslVersionUsesInOutQualifiers(version);
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(version) << "\n";
// \todo [2013-08-05 pyry] Do we need one dummy output?
src << "void main (void)\n{\n";
if (!customOut)
src << " gl_FragColor = vec4(0.0);\n";
src << "}\n";
return src.str();
}
static std::string generatePassthroughVertexShader (const ShaderSpec& shaderSpec, const std::string& inputPrefix, const std::string& outputPrefix)
{
// flat qualifier is not present in earlier versions?
DE_ASSERT(glu::glslVersionUsesInOutQualifiers(shaderSpec.version));
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(shaderSpec.version) << "\n"
<< "in highp vec4 a_position;\n";
for (vector<Symbol>::const_iterator input = shaderSpec.inputs.begin(); input != shaderSpec.inputs.end(); ++input)
{
src << "in " << glu::declare(input->varType, inputPrefix + input->name) << ";\n"
<< "flat out " << glu::declare(input->varType, outputPrefix + input->name) << ";\n";
}
src << "\nvoid main (void)\n{\n"
<< " gl_Position = a_position;\n"
<< " gl_PointSize = 1.0;\n";
for (vector<Symbol>::const_iterator input = shaderSpec.inputs.begin(); input != shaderSpec.inputs.end(); ++input)
src << "\t" << outputPrefix << input->name << " = " << inputPrefix << input->name << ";\n";
src << "}\n";
return src.str();
}
static void generateFragShaderOutputDecl (std::ostream& src, const ShaderSpec& shaderSpec, bool useIntOutputs, const std::map<std::string, int>& outLocationMap, const std::string& outputPrefix)
{
DE_ASSERT(glu::glslVersionUsesInOutQualifiers(shaderSpec.version));
for (int outNdx = 0; outNdx < (int)shaderSpec.outputs.size(); ++outNdx)
{
const Symbol& output = shaderSpec.outputs[outNdx];
const int location = de::lookup(outLocationMap, output.name);
const std::string outVarName = outputPrefix + output.name;
glu::VariableDeclaration decl (output.varType, outVarName, glu::STORAGE_OUT, glu::INTERPOLATION_LAST, glu::Layout(location));
TCU_CHECK_INTERNAL(output.varType.isBasicType());
if (useIntOutputs && glu::isDataTypeFloatOrVec(output.varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output.varType.getBasicType());
const glu::DataType uintBasicType = vecSize > 1 ? glu::getDataTypeUintVec(vecSize) : glu::TYPE_UINT;
const glu::VarType uintType (uintBasicType, glu::PRECISION_HIGHP);
decl.varType = uintType;
src << decl << ";\n";
}
else if (glu::isDataTypeBoolOrBVec(output.varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output.varType.getBasicType());
const glu::DataType intBasicType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
const glu::VarType intType (intBasicType, glu::PRECISION_HIGHP);
decl.varType = intType;
src << decl << ";\n";
}
else if (glu::isDataTypeMatrix(output.varType.getBasicType()))
{
const int vecSize = glu::getDataTypeMatrixNumRows(output.varType.getBasicType());
const int numVecs = glu::getDataTypeMatrixNumColumns(output.varType.getBasicType());
const glu::DataType uintBasicType = glu::getDataTypeUintVec(vecSize);
const glu::VarType uintType (uintBasicType, glu::PRECISION_HIGHP);
decl.varType = uintType;
for (int vecNdx = 0; vecNdx < numVecs; ++vecNdx)
{
decl.name = outVarName + "_" + de::toString(vecNdx);
decl.layout.location = location + vecNdx;
src << decl << ";\n";
}
}
else
src << decl << ";\n";
}
}
static void generateFragShaderOutAssign (std::ostream& src, const ShaderSpec& shaderSpec, bool useIntOutputs, const std::string& valuePrefix, const std::string& outputPrefix)
{
for (vector<Symbol>::const_iterator output = shaderSpec.outputs.begin(); output != shaderSpec.outputs.end(); ++output)
{
if (useIntOutputs && glu::isDataTypeFloatOrVec(output->varType.getBasicType()))
src << " o_" << output->name << " = floatBitsToUint(" << valuePrefix << output->name << ");\n";
else if (glu::isDataTypeMatrix(output->varType.getBasicType()))
{
const int numVecs = glu::getDataTypeMatrixNumColumns(output->varType.getBasicType());
for (int vecNdx = 0; vecNdx < numVecs; ++vecNdx)
if (useIntOutputs)
src << "\t" << outputPrefix << output->name << "_" << vecNdx << " = floatBitsToUint(" << valuePrefix << output->name << "[" << vecNdx << "]);\n";
else
src << "\t" << outputPrefix << output->name << "_" << vecNdx << " = " << valuePrefix << output->name << "[" << vecNdx << "];\n";
}
else if (glu::isDataTypeBoolOrBVec(output->varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output->varType.getBasicType());
const glu::DataType intBaseType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
src << "\t" << outputPrefix << output->name << " = " << glu::getDataTypeName(intBaseType) << "(" << valuePrefix << output->name << ");\n";
}
else
src << "\t" << outputPrefix << output->name << " = " << valuePrefix << output->name << ";\n";
}
}
static std::string generateFragmentShader (const ShaderSpec& shaderSpec, bool useIntOutputs, const std::map<std::string, int>& outLocationMap, const std::string& inputPrefix, const std::string& outputPrefix)
{
DE_ASSERT(glu::glslVersionUsesInOutQualifiers(shaderSpec.version));
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(shaderSpec.version) << "\n";
if (!shaderSpec.globalDeclarations.empty())
src << shaderSpec.globalDeclarations << "\n";
for (vector<Symbol>::const_iterator input = shaderSpec.inputs.begin(); input != shaderSpec.inputs.end(); ++input)
src << "flat in " << glu::declare(input->varType, inputPrefix + input->name) << ";\n";
generateFragShaderOutputDecl(src, shaderSpec, useIntOutputs, outLocationMap, outputPrefix);
src << "\nvoid main (void)\n{\n";
// Declare & fetch local input variables
for (vector<Symbol>::const_iterator input = shaderSpec.inputs.begin(); input != shaderSpec.inputs.end(); ++input)
src << "\t" << glu::declare(input->varType, input->name) << " = " << inputPrefix << input->name << ";\n";
// Declare output variables
for (vector<Symbol>::const_iterator output = shaderSpec.outputs.begin(); output != shaderSpec.outputs.end(); ++output)
src << "\t" << glu::declare(output->varType, output->name) << ";\n";
// Operation - indented to correct level.
{
std::istringstream opSrc (shaderSpec.source);
std::string line;
while (std::getline(opSrc, line))
src << "\t" << line << "\n";
}
generateFragShaderOutAssign(src, shaderSpec, useIntOutputs, "", outputPrefix);
src << "}\n";
return src.str();
}
static std::string generatePassthroughFragmentShader (const ShaderSpec& shaderSpec, bool useIntOutputs, const std::map<std::string, int>& outLocationMap, const std::string& inputPrefix, const std::string& outputPrefix)
{
DE_ASSERT(glu::glslVersionUsesInOutQualifiers(shaderSpec.version));
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(shaderSpec.version) << "\n";
if (!shaderSpec.globalDeclarations.empty())
src << shaderSpec.globalDeclarations << "\n";
for (vector<Symbol>::const_iterator output = shaderSpec.outputs.begin(); output != shaderSpec.outputs.end(); ++output)
{
if (glu::isDataTypeBoolOrBVec(output->varType.getBasicType()))
{
const int vecSize = glu::getDataTypeScalarSize(output->varType.getBasicType());
const glu::DataType intBaseType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
const glu::VarType intType (intBaseType, glu::PRECISION_HIGHP);
src << "flat in " << glu::declare(intType, inputPrefix + output->name) << ";\n";
}
else
src << "flat in " << glu::declare(output->varType, inputPrefix + output->name) << ";\n";
}
generateFragShaderOutputDecl(src, shaderSpec, useIntOutputs, outLocationMap, outputPrefix);
src << "\nvoid main (void)\n{\n";
generateFragShaderOutAssign(src, shaderSpec, useIntOutputs, inputPrefix, outputPrefix);
src << "}\n";
return src.str();
}
// ShaderExecutor
ShaderExecutor::ShaderExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
: m_renderCtx (renderCtx)
, m_inputs (shaderSpec.inputs)
, m_outputs (shaderSpec.outputs)
{
}
ShaderExecutor::~ShaderExecutor (void)
{
}
void ShaderExecutor::useProgram (void)
{
DE_ASSERT(isOk());
m_renderCtx.getFunctions().useProgram(getProgram());
}
// FragmentOutExecutor
struct FragmentOutputLayout
{
std::vector<const Symbol*> locationSymbols; //! Symbols by location
std::map<std::string, int> locationMap; //! Map from symbol name to start location
};
class FragmentOutExecutor : public ShaderExecutor
{
public:
FragmentOutExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec);
~FragmentOutExecutor (void);
void execute (int numValues, const void* const* inputs, void* const* outputs);
protected:
const FragmentOutputLayout m_outputLayout;
};
static FragmentOutputLayout computeFragmentOutputLayout (const std::vector<Symbol>& symbols)
{
FragmentOutputLayout ret;
int location = 0;
for (std::vector<Symbol>::const_iterator it = symbols.begin(); it != symbols.end(); ++it)
{
const int numLocations = glu::getDataTypeNumLocations(it->varType.getBasicType());
TCU_CHECK_INTERNAL(!de::contains(ret.locationMap, it->name));
de::insert(ret.locationMap, it->name, location);
location += numLocations;
for (int ndx = 0; ndx < numLocations; ++ndx)
ret.locationSymbols.push_back(&*it);
}
return ret;
}
inline bool hasFloatRenderTargets (const glu::RenderContext& renderCtx)
{
glu::ContextType type = renderCtx.getType();
return glu::isContextTypeGLCore(type);
}
FragmentOutExecutor::FragmentOutExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
: ShaderExecutor (renderCtx, shaderSpec)
, m_outputLayout (computeFragmentOutputLayout(m_outputs))
{
}
FragmentOutExecutor::~FragmentOutExecutor (void)
{
}
inline int queryInt (const glw::Functions& gl, deUint32 pname)
{
int value = 0;
gl.getIntegerv(pname, &value);
return value;
}
static tcu::TextureFormat getRenderbufferFormatForOutput (const glu::VarType& outputType, bool useIntOutputs)
{
const tcu::TextureFormat::ChannelOrder channelOrderMap[] =
{
tcu::TextureFormat::R,
tcu::TextureFormat::RG,
tcu::TextureFormat::RGBA, // No RGB variants available.
tcu::TextureFormat::RGBA
};
const glu::DataType basicType = outputType.getBasicType();
const int numComps = glu::getDataTypeNumComponents(basicType);
tcu::TextureFormat::ChannelType channelType;
switch (glu::getDataTypeScalarType(basicType))
{
case glu::TYPE_UINT: channelType = tcu::TextureFormat::UNSIGNED_INT32; break;
case glu::TYPE_INT: channelType = tcu::TextureFormat::SIGNED_INT32; break;
case glu::TYPE_BOOL: channelType = tcu::TextureFormat::SIGNED_INT32; break;
case glu::TYPE_FLOAT: channelType = useIntOutputs ? tcu::TextureFormat::UNSIGNED_INT32 : tcu::TextureFormat::FLOAT; break;
default:
throw tcu::InternalError("Invalid output type");
}
DE_ASSERT(de::inRange<int>(numComps, 1, DE_LENGTH_OF_ARRAY(channelOrderMap)));
return tcu::TextureFormat(channelOrderMap[numComps-1], channelType);
}
void FragmentOutExecutor::execute (int numValues, const void* const* inputs, void* const* outputs)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
const bool useIntOutputs = !hasFloatRenderTargets(m_renderCtx);
const int maxRenderbufferSize = queryInt(gl, GL_MAX_RENDERBUFFER_SIZE);
const int framebufferW = de::min(maxRenderbufferSize, numValues);
const int framebufferH = (numValues / framebufferW) + ((numValues % framebufferW != 0) ? 1 : 0);
glu::Framebuffer framebuffer (m_renderCtx);
glu::RenderbufferVector renderbuffers (m_renderCtx, m_outputLayout.locationSymbols.size());
vector<glu::VertexArrayBinding> vertexArrays;
vector<tcu::Vec2> positions (numValues);
if (framebufferH > maxRenderbufferSize)
throw tcu::NotSupportedError("Value count is too high for maximum supported renderbuffer size");
// Compute positions - 1px points are used to drive fragment shading.
for (int valNdx = 0; valNdx < numValues; valNdx++)
{
const int ix = valNdx % framebufferW;
const int iy = valNdx / framebufferW;
const float fx = -1.0f + 2.0f*((float(ix) + 0.5f) / float(framebufferW));
const float fy = -1.0f + 2.0f*((float(iy) + 0.5f) / float(framebufferH));
positions[valNdx] = tcu::Vec2(fx, fy);
}
// Vertex inputs.
vertexArrays.push_back(glu::va::Float("a_position", 2, numValues, 0, (const float*)&positions[0]));
for (int inputNdx = 0; inputNdx < (int)m_inputs.size(); inputNdx++)
{
const Symbol& symbol = m_inputs[inputNdx];
const std::string attribName = "a_" + symbol.name;
const void* ptr = inputs[inputNdx];
const glu::DataType basicType = symbol.varType.getBasicType();
const int vecSize = glu::getDataTypeScalarSize(basicType);
if (glu::isDataTypeFloatOrVec(basicType))
vertexArrays.push_back(glu::va::Float(attribName, vecSize, numValues, 0, (const float*)ptr));
else if (glu::isDataTypeIntOrIVec(basicType))
vertexArrays.push_back(glu::va::Int32(attribName, vecSize, numValues, 0, (const deInt32*)ptr));
else if (glu::isDataTypeUintOrUVec(basicType))
vertexArrays.push_back(glu::va::Uint32(attribName, vecSize, numValues, 0, (const deUint32*)ptr));
else if (glu::isDataTypeMatrix(basicType))
{
int numRows = glu::getDataTypeMatrixNumRows(basicType);
int numCols = glu::getDataTypeMatrixNumColumns(basicType);
int stride = numRows * numCols * (int)sizeof(float);
for (int colNdx = 0; colNdx < numCols; ++colNdx)
vertexArrays.push_back(glu::va::Float(attribName, colNdx, numRows, numValues, stride, ((const float*)ptr) + colNdx * numRows));
}
else
DE_ASSERT(false);
}
// Construct framebuffer.
gl.bindFramebuffer(GL_FRAMEBUFFER, *framebuffer);
for (int outNdx = 0; outNdx < (int)m_outputLayout.locationSymbols.size(); ++outNdx)
{
const Symbol& output = *m_outputLayout.locationSymbols[outNdx];
const deUint32 renderbuffer = renderbuffers[outNdx];
const deUint32 format = glu::getInternalFormat(getRenderbufferFormatForOutput(output.varType, useIntOutputs));
gl.bindRenderbuffer(GL_RENDERBUFFER, renderbuffer);
gl.renderbufferStorage(GL_RENDERBUFFER, format, framebufferW, framebufferH);
gl.framebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0+outNdx, GL_RENDERBUFFER, renderbuffer);
}
gl.bindRenderbuffer(GL_RENDERBUFFER, 0);
GLU_EXPECT_NO_ERROR(gl.getError(), "Failed to set up framebuffer object");
TCU_CHECK(gl.checkFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
{
vector<deUint32> drawBuffers(m_outputLayout.locationSymbols.size());
for (int ndx = 0; ndx < (int)m_outputLayout.locationSymbols.size(); ndx++)
drawBuffers[ndx] = GL_COLOR_ATTACHMENT0+ndx;
gl.drawBuffers((int)drawBuffers.size(), &drawBuffers[0]);
GLU_EXPECT_NO_ERROR(gl.getError(), "glDrawBuffers()");
}
// Render
gl.viewport(0, 0, framebufferW, framebufferH);
glu::draw(m_renderCtx, this->getProgram(), (int)vertexArrays.size(), &vertexArrays[0],
glu::pr::Points(numValues));
GLU_EXPECT_NO_ERROR(gl.getError(), "Error in draw");
// Read back pixels.
{
tcu::TextureLevel tmpBuf;
// \todo [2013-08-07 pyry] Some fast-paths could be added here.
for (int outNdx = 0; outNdx < (int)m_outputs.size(); ++outNdx)
{
const Symbol& output = m_outputs[outNdx];
const int outSize = output.varType.getScalarSize();
const int outVecSize = glu::getDataTypeNumComponents(output.varType.getBasicType());
const int outNumLocs = glu::getDataTypeNumLocations(output.varType.getBasicType());
deUint32* dstPtrBase = static_cast<deUint32*>(outputs[outNdx]);
const tcu::TextureFormat format = getRenderbufferFormatForOutput(output.varType, useIntOutputs);
const tcu::TextureFormat readFormat (tcu::TextureFormat::RGBA, format.type);
const int outLocation = de::lookup(m_outputLayout.locationMap, output.name);
tmpBuf.setStorage(readFormat, framebufferW, framebufferH);
for (int locNdx = 0; locNdx < outNumLocs; ++locNdx)
{
gl.readBuffer(GL_COLOR_ATTACHMENT0 + outLocation + locNdx);
glu::readPixels(m_renderCtx, 0, 0, tmpBuf.getAccess());
GLU_EXPECT_NO_ERROR(gl.getError(), "Reading pixels");
if (outSize == 4 && outNumLocs == 1)
deMemcpy(dstPtrBase, tmpBuf.getAccess().getDataPtr(), numValues*outVecSize*sizeof(deUint32));
else
{
for (int valNdx = 0; valNdx < numValues; valNdx++)
{
const deUint32* srcPtr = (const deUint32*)tmpBuf.getAccess().getDataPtr() + valNdx*4;
deUint32* dstPtr = &dstPtrBase[outSize*valNdx + outVecSize*locNdx];
deMemcpy(dstPtr, srcPtr, outVecSize*sizeof(deUint32));
}
}
}
}
}
// \todo [2013-08-07 pyry] Clear draw buffers & viewport?
gl.bindFramebuffer(GL_FRAMEBUFFER, 0);
}
// VertexShaderExecutor
class VertexShaderExecutor : public FragmentOutExecutor
{
public:
VertexShaderExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec);
~VertexShaderExecutor (void);
bool isOk (void) const { return m_program.isOk(); }
void log (tcu::TestLog& dst) const { dst << m_program; }
deUint32 getProgram (void) const { return m_program.getProgram(); }
protected:
const glu::ShaderProgram m_program;
};
VertexShaderExecutor::VertexShaderExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
: FragmentOutExecutor (renderCtx, shaderSpec)
, m_program (renderCtx,
glu::ProgramSources() << glu::VertexSource(generateVertexShader(shaderSpec, "a_", "vtx_out_"))
<< glu::FragmentSource(generatePassthroughFragmentShader(shaderSpec, !hasFloatRenderTargets(renderCtx), m_outputLayout.locationMap, "vtx_out_", "o_")))
{
}
VertexShaderExecutor::~VertexShaderExecutor (void)
{
}
// GeometryShaderExecutor
class GeometryShaderExecutor : public FragmentOutExecutor
{
public:
static GeometryShaderExecutor* create (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec);
~GeometryShaderExecutor (void);
bool isOk (void) const { return m_program.isOk(); }
void log (tcu::TestLog& dst) const { dst << m_program; }
deUint32 getProgram (void) const { return m_program.getProgram(); }
protected:
const glu::ShaderProgram m_program;
private:
GeometryShaderExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec);
};
GeometryShaderExecutor* GeometryShaderExecutor::create (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
{
if (glu::glslVersionIsES(shaderSpec.version) && shaderSpec.version <= glu::GLSL_VERSION_310_ES
&& !contextSupports(renderCtx.getType(), glu::ApiType::core(4, 5)))
checkExtension(renderCtx, "GL_EXT_geometry_shader");
return new GeometryShaderExecutor(renderCtx, shaderSpec);
}
GeometryShaderExecutor::GeometryShaderExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
: FragmentOutExecutor (renderCtx, shaderSpec)
, m_program (renderCtx,
glu::ProgramSources() << glu::VertexSource(generatePassthroughVertexShader(shaderSpec, "a_", "vtx_out_"))
<< glu::GeometrySource(generateGeometryShader(shaderSpec, "vtx_out_", "geom_out_"))
<< glu::FragmentSource(generatePassthroughFragmentShader(shaderSpec, !hasFloatRenderTargets(renderCtx), m_outputLayout.locationMap, "geom_out_", "o_")))
{
}
GeometryShaderExecutor::~GeometryShaderExecutor (void)
{
}
// FragmentShaderExecutor
class FragmentShaderExecutor : public FragmentOutExecutor
{
public:
FragmentShaderExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec);
~FragmentShaderExecutor (void);
bool isOk (void) const { return m_program.isOk(); }
void log (tcu::TestLog& dst) const { dst << m_program; }
deUint32 getProgram (void) const { return m_program.getProgram(); }
protected:
const glu::ShaderProgram m_program;
};
FragmentShaderExecutor::FragmentShaderExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
: FragmentOutExecutor (renderCtx, shaderSpec)
, m_program (renderCtx,
glu::ProgramSources() << glu::VertexSource(generatePassthroughVertexShader(shaderSpec, "a_", "vtx_out_"))
<< glu::FragmentSource(generateFragmentShader(shaderSpec, !hasFloatRenderTargets(renderCtx), m_outputLayout.locationMap, "vtx_out_", "o_")))
{
}
FragmentShaderExecutor::~FragmentShaderExecutor (void)
{
}
// Shared utilities for compute and tess executors
static deUint32 getVecStd430ByteAlignment (glu::DataType type)
{
switch (glu::getDataTypeScalarSize(type))
{
case 1: return 4u;
case 2: return 8u;
case 3: return 16u;
case 4: return 16u;
default:
DE_ASSERT(false);
return 0u;
}
}
class BufferIoExecutor : public ShaderExecutor
{
public:
BufferIoExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec, const glu::ProgramSources& sources);
~BufferIoExecutor (void);
bool isOk (void) const { return m_program.isOk(); }
void log (tcu::TestLog& dst) const { dst << m_program; }
deUint32 getProgram (void) const { return m_program.getProgram(); }
protected:
enum
{
INPUT_BUFFER_BINDING = 0,
OUTPUT_BUFFER_BINDING = 1,
};
void initBuffers (int numValues);
deUint32 getInputBuffer (void) const { return *m_inputBuffer; }
deUint32 getOutputBuffer (void) const { return *m_outputBuffer; }
deUint32 getInputStride (void) const { return getLayoutStride(m_inputLayout); }
deUint32 getOutputStride (void) const { return getLayoutStride(m_outputLayout); }
void uploadInputBuffer (const void* const* inputPtrs, int numValues);
void readOutputBuffer (void* const* outputPtrs, int numValues);
static void declareBufferBlocks (std::ostream& src, const ShaderSpec& spec);
static void generateExecBufferIo(std::ostream& src, const ShaderSpec& spec, const char* invocationNdxName);
glu::ShaderProgram m_program;
private:
struct VarLayout
{
deUint32 offset;
deUint32 stride;
deUint32 matrixStride;
VarLayout (void) : offset(0), stride(0), matrixStride(0) {}
};
void resizeInputBuffer (int newSize);
void resizeOutputBuffer (int newSize);
static void computeVarLayout (const std::vector<Symbol>& symbols, std::vector<VarLayout>* layout);
static deUint32 getLayoutStride (const vector<VarLayout>& layout);
static void copyToBuffer (const glu::VarType& varType, const VarLayout& layout, int numValues, const void* srcBasePtr, void* dstBasePtr);
static void copyFromBuffer (const glu::VarType& varType, const VarLayout& layout, int numValues, const void* srcBasePtr, void* dstBasePtr);
glu::Buffer m_inputBuffer;
glu::Buffer m_outputBuffer;
vector<VarLayout> m_inputLayout;
vector<VarLayout> m_outputLayout;
};
BufferIoExecutor::BufferIoExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec, const glu::ProgramSources& sources)
: ShaderExecutor (renderCtx, shaderSpec)
, m_program (renderCtx, sources)
, m_inputBuffer (renderCtx)
, m_outputBuffer (renderCtx)
{
computeVarLayout(m_inputs, &m_inputLayout);
computeVarLayout(m_outputs, &m_outputLayout);
}
BufferIoExecutor::~BufferIoExecutor (void)
{
}
void BufferIoExecutor::resizeInputBuffer (int newSize)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
gl.bindBuffer(GL_SHADER_STORAGE_BUFFER, *m_inputBuffer);
gl.bufferData(GL_SHADER_STORAGE_BUFFER, newSize, DE_NULL, GL_STATIC_DRAW);
GLU_EXPECT_NO_ERROR(gl.getError(), "Failed to allocate input buffer");
}
void BufferIoExecutor::resizeOutputBuffer (int newSize)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
gl.bindBuffer(GL_SHADER_STORAGE_BUFFER, *m_outputBuffer);
gl.bufferData(GL_SHADER_STORAGE_BUFFER, newSize, DE_NULL, GL_STATIC_DRAW);
GLU_EXPECT_NO_ERROR(gl.getError(), "Failed to allocate output buffer");
}
void BufferIoExecutor::initBuffers (int numValues)
{
const deUint32 inputStride = getLayoutStride(m_inputLayout);
const deUint32 outputStride = getLayoutStride(m_outputLayout);
const int inputBufferSize = numValues * inputStride;
const int outputBufferSize = numValues * outputStride;
resizeInputBuffer(inputBufferSize);
resizeOutputBuffer(outputBufferSize);
}
void BufferIoExecutor::computeVarLayout (const std::vector<Symbol>& symbols, std::vector<VarLayout>* layout)
{
deUint32 maxAlignment = 0;
deUint32 curOffset = 0;
DE_ASSERT(layout->empty());
layout->resize(symbols.size());
for (size_t varNdx = 0; varNdx < symbols.size(); varNdx++)
{
const Symbol& symbol = symbols[varNdx];
const glu::DataType basicType = symbol.varType.getBasicType();
VarLayout& layoutEntry = (*layout)[varNdx];
if (glu::isDataTypeScalarOrVector(basicType))
{
const deUint32 alignment = getVecStd430ByteAlignment(basicType);
const deUint32 size = (deUint32)glu::getDataTypeScalarSize(basicType)*(int)sizeof(deUint32);
curOffset = (deUint32)deAlign32((int)curOffset, (int)alignment);
maxAlignment = de::max(maxAlignment, alignment);
layoutEntry.offset = curOffset;
layoutEntry.matrixStride = 0;
curOffset += size;
}
else if (glu::isDataTypeMatrix(basicType))
{
const int numVecs = glu::getDataTypeMatrixNumColumns(basicType);
const glu::DataType vecType = glu::getDataTypeFloatVec(glu::getDataTypeMatrixNumRows(basicType));
const deUint32 vecAlignment = getVecStd430ByteAlignment(vecType);
curOffset = (deUint32)deAlign32((int)curOffset, (int)vecAlignment);
maxAlignment = de::max(maxAlignment, vecAlignment);
layoutEntry.offset = curOffset;
layoutEntry.matrixStride = vecAlignment;
curOffset += vecAlignment*numVecs;
}
else
DE_ASSERT(false);
}
{
const deUint32 totalSize = (deUint32)deAlign32(curOffset, maxAlignment);
for (vector<VarLayout>::iterator varIter = layout->begin(); varIter != layout->end(); ++varIter)
varIter->stride = totalSize;
}
}
inline deUint32 BufferIoExecutor::getLayoutStride (const vector<VarLayout>& layout)
{
return layout.empty() ? 0 : layout[0].stride;
}
void BufferIoExecutor::copyToBuffer (const glu::VarType& varType, const VarLayout& layout, int numValues, const void* srcBasePtr, void* dstBasePtr)
{
if (varType.isBasicType())
{
const glu::DataType basicType = varType.getBasicType();
const bool isMatrix = glu::isDataTypeMatrix(basicType);
const int scalarSize = glu::getDataTypeScalarSize(basicType);
const int numVecs = isMatrix ? glu::getDataTypeMatrixNumColumns(basicType) : 1;
const int numComps = scalarSize / numVecs;
for (int elemNdx = 0; elemNdx < numValues; elemNdx++)
{
for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
{
const int srcOffset = (int)sizeof(deUint32)*(elemNdx*scalarSize + vecNdx*numComps);
const int dstOffset = layout.offset + layout.stride*elemNdx + (isMatrix ? layout.matrixStride*vecNdx : 0);
const deUint8* srcPtr = (const deUint8*)srcBasePtr + srcOffset;
deUint8* dstPtr = (deUint8*)dstBasePtr + dstOffset;
deMemcpy(dstPtr, srcPtr, sizeof(deUint32)*numComps);
}
}
}
else
throw tcu::InternalError("Unsupported type");
}
void BufferIoExecutor::copyFromBuffer (const glu::VarType& varType, const VarLayout& layout, int numValues, const void* srcBasePtr, void* dstBasePtr)
{
if (varType.isBasicType())
{
const glu::DataType basicType = varType.getBasicType();
const bool isMatrix = glu::isDataTypeMatrix(basicType);
const int scalarSize = glu::getDataTypeScalarSize(basicType);
const int numVecs = isMatrix ? glu::getDataTypeMatrixNumColumns(basicType) : 1;
const int numComps = scalarSize / numVecs;
for (int elemNdx = 0; elemNdx < numValues; elemNdx++)
{
for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
{
const int srcOffset = layout.offset + layout.stride*elemNdx + (isMatrix ? layout.matrixStride*vecNdx : 0);
const int dstOffset = (int)sizeof(deUint32)*(elemNdx*scalarSize + vecNdx*numComps);
const deUint8* srcPtr = (const deUint8*)srcBasePtr + srcOffset;
deUint8* dstPtr = (deUint8*)dstBasePtr + dstOffset;
deMemcpy(dstPtr, srcPtr, sizeof(deUint32)*numComps);
}
}
}
else
throw tcu::InternalError("Unsupported type");
}
void BufferIoExecutor::uploadInputBuffer (const void* const* inputPtrs, int numValues)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
const deUint32 buffer = *m_inputBuffer;
const deUint32 inputStride = getLayoutStride(m_inputLayout);
const int inputBufferSize = inputStride*numValues;
if (inputBufferSize == 0)
return; // No inputs
gl.bindBuffer(GL_SHADER_STORAGE_BUFFER, buffer);
void* mapPtr = gl.mapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, inputBufferSize, GL_MAP_WRITE_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapBufferRange()");
TCU_CHECK(mapPtr);
try
{
DE_ASSERT(m_inputs.size() == m_inputLayout.size());
for (size_t inputNdx = 0; inputNdx < m_inputs.size(); ++inputNdx)
{
const glu::VarType& varType = m_inputs[inputNdx].varType;
const VarLayout& layout = m_inputLayout[inputNdx];
copyToBuffer(varType, layout, numValues, inputPtrs[inputNdx], mapPtr);
}
}
catch (...)
{
gl.unmapBuffer(GL_SHADER_STORAGE_BUFFER);
throw;
}
gl.unmapBuffer(GL_SHADER_STORAGE_BUFFER);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUnmapBuffer()");
}
void BufferIoExecutor::readOutputBuffer (void* const* outputPtrs, int numValues)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
const deUint32 buffer = *m_outputBuffer;
const deUint32 outputStride = getLayoutStride(m_outputLayout);
const int outputBufferSize = numValues*outputStride;
DE_ASSERT(outputBufferSize > 0); // At least some outputs are required.
gl.bindBuffer(GL_SHADER_STORAGE_BUFFER, buffer);
void* mapPtr = gl.mapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, outputBufferSize, GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapBufferRange()");
TCU_CHECK(mapPtr);
try
{
DE_ASSERT(m_outputs.size() == m_outputLayout.size());
for (size_t outputNdx = 0; outputNdx < m_outputs.size(); ++outputNdx)
{
const glu::VarType& varType = m_outputs[outputNdx].varType;
const VarLayout& layout = m_outputLayout[outputNdx];
copyFromBuffer(varType, layout, numValues, mapPtr, outputPtrs[outputNdx]);
}
}
catch (...)
{
gl.unmapBuffer(GL_SHADER_STORAGE_BUFFER);
throw;
}
gl.unmapBuffer(GL_SHADER_STORAGE_BUFFER);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUnmapBuffer()");
}
void BufferIoExecutor::declareBufferBlocks (std::ostream& src, const ShaderSpec& spec)
{
// Input struct
if (!spec.inputs.empty())
{
glu::StructType inputStruct("Inputs");
for (vector<Symbol>::const_iterator symIter = spec.inputs.begin(); symIter != spec.inputs.end(); ++symIter)
inputStruct.addMember(symIter->name.c_str(), symIter->varType);
src << glu::declare(&inputStruct) << ";\n";
}
// Output struct
{
glu::StructType outputStruct("Outputs");
for (vector<Symbol>::const_iterator symIter = spec.outputs.begin(); symIter != spec.outputs.end(); ++symIter)
outputStruct.addMember(symIter->name.c_str(), symIter->varType);
src << glu::declare(&outputStruct) << ";\n";
}
src << "\n";
if (!spec.inputs.empty())
{
src << "layout(binding = " << int(INPUT_BUFFER_BINDING) << ", std430) buffer InBuffer\n"
<< "{\n"
<< " Inputs inputs[];\n"
<< "};\n";
}
src << "layout(binding = " << int(OUTPUT_BUFFER_BINDING) << ", std430) buffer OutBuffer\n"
<< "{\n"
<< " Outputs outputs[];\n"
<< "};\n"
<< "\n";
}
void BufferIoExecutor::generateExecBufferIo (std::ostream& src, const ShaderSpec& spec, const char* invocationNdxName)
{
for (vector<Symbol>::const_iterator symIter = spec.inputs.begin(); symIter != spec.inputs.end(); ++symIter)
src << "\t" << glu::declare(symIter->varType, symIter->name) << " = inputs[" << invocationNdxName << "]." << symIter->name << ";\n";
for (vector<Symbol>::const_iterator symIter = spec.outputs.begin(); symIter != spec.outputs.end(); ++symIter)
src << "\t" << glu::declare(symIter->varType, symIter->name) << ";\n";
src << "\n";
{
std::istringstream opSrc (spec.source);
std::string line;
while (std::getline(opSrc, line))
src << "\t" << line << "\n";
}
src << "\n";
for (vector<Symbol>::const_iterator symIter = spec.outputs.begin(); symIter != spec.outputs.end(); ++symIter)
src << "\toutputs[" << invocationNdxName << "]." << symIter->name << " = " << symIter->name << ";\n";
}
// ComputeShaderExecutor
class ComputeShaderExecutor : public BufferIoExecutor
{
public:
ComputeShaderExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec);
~ComputeShaderExecutor (void);
void execute (int numValues, const void* const* inputs, void* const* outputs);
protected:
static std::string generateComputeShader (const ShaderSpec& spec);
tcu::IVec3 m_maxWorkSize;
};
std::string ComputeShaderExecutor::generateComputeShader (const ShaderSpec& spec)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(spec.version) << "\n";
if (!spec.globalDeclarations.empty())
src << spec.globalDeclarations << "\n";
src << "layout(local_size_x = 1) in;\n"
<< "\n";
declareBufferBlocks(src, spec);
src << "void main (void)\n"
<< "{\n"
<< " uint invocationNdx = gl_NumWorkGroups.x*gl_NumWorkGroups.y*gl_WorkGroupID.z\n"
<< " + gl_NumWorkGroups.x*gl_WorkGroupID.y + gl_WorkGroupID.x;\n";
generateExecBufferIo(src, spec, "invocationNdx");
src << "}\n";
return src.str();
}
ComputeShaderExecutor::ComputeShaderExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
: BufferIoExecutor (renderCtx, shaderSpec,
glu::ProgramSources() << glu::ComputeSource(generateComputeShader(shaderSpec)))
{
m_maxWorkSize = tcu::IVec3(128,128,64); // Minimum in 3plus
}
ComputeShaderExecutor::~ComputeShaderExecutor (void)
{
}
void ComputeShaderExecutor::execute (int numValues, const void* const* inputs, void* const* outputs)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
const int maxValuesPerInvocation = m_maxWorkSize[0];
const deUint32 inputStride = getInputStride();
const deUint32 outputStride = getOutputStride();
initBuffers(numValues);
// Setup input buffer & copy data
uploadInputBuffer(inputs, numValues);
// Perform compute invocations
{
int curOffset = 0;
while (curOffset < numValues)
{
const int numToExec = de::min(maxValuesPerInvocation, numValues-curOffset);
if (inputStride > 0)
gl.bindBufferRange(GL_SHADER_STORAGE_BUFFER, INPUT_BUFFER_BINDING, getInputBuffer(), curOffset*inputStride, numToExec*inputStride);
gl.bindBufferRange(GL_SHADER_STORAGE_BUFFER, OUTPUT_BUFFER_BINDING, getOutputBuffer(), curOffset*outputStride, numToExec*outputStride);
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindBufferRange(GL_SHADER_STORAGE_BUFFER)");
gl.dispatchCompute(numToExec, 1, 1);
GLU_EXPECT_NO_ERROR(gl.getError(), "glDispatchCompute()");
curOffset += numToExec;
}
}
// Read back data
readOutputBuffer(outputs, numValues);
}
// Tessellation utils
static std::string generateVertexShaderForTess (glu::GLSLVersion version)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(version) << "\n";
src << "void main (void)\n{\n"
<< " gl_Position = vec4(gl_VertexID/2, gl_VertexID%2, 0.0, 1.0);\n"
<< "}\n";
return src.str();
}
void checkTessSupport (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec, glu::ShaderType stage)
{
const int numBlockRequired = 2; // highest binding is always 1 (output) i.e. count == 2
if (glu::glslVersionIsES(shaderSpec.version) && shaderSpec.version <= glu::GLSL_VERSION_310_ES
&& !contextSupports(renderCtx.getType(), glu::ApiType::core(4, 5)))
checkExtension(renderCtx, "GL_EXT_tessellation_shader");
if (stage == glu::SHADERTYPE_TESSELLATION_CONTROL)
checkLimit(renderCtx, GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS, numBlockRequired);
else if (stage == glu::SHADERTYPE_TESSELLATION_EVALUATION)
checkLimit(renderCtx, GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS, numBlockRequired);
else
DE_ASSERT(false);
}
// TessControlExecutor
class TessControlExecutor : public BufferIoExecutor
{
public:
static TessControlExecutor* create (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec);
~TessControlExecutor (void);
void execute (int numValues, const void* const* inputs, void* const* outputs);
protected:
static std::string generateTessControlShader (const ShaderSpec& shaderSpec);
private:
TessControlExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec);
};
TessControlExecutor* TessControlExecutor::create (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
{
checkTessSupport(renderCtx, shaderSpec, glu::SHADERTYPE_TESSELLATION_CONTROL);
return new TessControlExecutor(renderCtx, shaderSpec);
}
std::string TessControlExecutor::generateTessControlShader (const ShaderSpec& shaderSpec)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(shaderSpec.version) << "\n";
if (glu::glslVersionIsES(shaderSpec.version) && shaderSpec.version <= glu::GLSL_VERSION_310_ES)
src << "#extension GL_EXT_tessellation_shader : require\n";
if (!shaderSpec.globalDeclarations.empty())
src << shaderSpec.globalDeclarations << "\n";
src << "\nlayout(vertices = 1) out;\n\n";
declareBufferBlocks(src, shaderSpec);
src << "void main (void)\n{\n";
for (int ndx = 0; ndx < 2; ndx++)
src << "\tgl_TessLevelInner[" << ndx << "] = 1.0;\n";
for (int ndx = 0; ndx < 4; ndx++)
src << "\tgl_TessLevelOuter[" << ndx << "] = 1.0;\n";
src << "\n"
<< "\thighp uint invocationId = uint(gl_PrimitiveID);\n";
generateExecBufferIo(src, shaderSpec, "invocationId");
src << "}\n";
return src.str();
}
static std::string generateEmptyTessEvalShader (glu::GLSLVersion version)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(version) << "\n";
if (glu::glslVersionIsES(version) && version <= glu::GLSL_VERSION_310_ES)
src << "#extension GL_EXT_tessellation_shader : require\n\n";
src << "layout(triangles, ccw) in;\n";
src << "\nvoid main (void)\n{\n"
<< "\tgl_Position = vec4(gl_TessCoord.xy, 0.0, 1.0);\n"
<< "}\n";
return src.str();
}
TessControlExecutor::TessControlExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
: BufferIoExecutor (renderCtx, shaderSpec, glu::ProgramSources()
<< glu::VertexSource(generateVertexShaderForTess(shaderSpec.version))
<< glu::TessellationControlSource(generateTessControlShader(shaderSpec))
<< glu::TessellationEvaluationSource(generateEmptyTessEvalShader(shaderSpec.version))
<< glu::FragmentSource(generateEmptyFragmentSource(shaderSpec.version)))
{
}
TessControlExecutor::~TessControlExecutor (void)
{
}
void TessControlExecutor::execute (int numValues, const void* const* inputs, void* const* outputs)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
initBuffers(numValues);
// Setup input buffer & copy data
uploadInputBuffer(inputs, numValues);
if (!m_inputs.empty())
gl.bindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BUFFER_BINDING, getInputBuffer());
gl.bindBufferBase(GL_SHADER_STORAGE_BUFFER, OUTPUT_BUFFER_BINDING, getOutputBuffer());
deUint32 vertexArray;
gl.genVertexArrays(1, &vertexArray);
gl.bindVertexArray(vertexArray);
// Render patches
gl.patchParameteri(GL_PATCH_VERTICES, 3);
gl.drawArrays(GL_PATCHES, 0, 3*numValues);
gl.bindVertexArray(0);
gl.deleteVertexArrays(1, &vertexArray);
// Read back data
readOutputBuffer(outputs, numValues);
}
// TessEvaluationExecutor
class TessEvaluationExecutor : public BufferIoExecutor
{
public:
static TessEvaluationExecutor* create (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec);
~TessEvaluationExecutor (void);
void execute (int numValues, const void* const* inputs, void* const* outputs);
protected:
static std::string generateTessEvalShader (const ShaderSpec& shaderSpec);
private:
TessEvaluationExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec);
};
TessEvaluationExecutor* TessEvaluationExecutor::create (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
{
checkTessSupport(renderCtx, shaderSpec, glu::SHADERTYPE_TESSELLATION_EVALUATION);
return new TessEvaluationExecutor(renderCtx, shaderSpec);
}
static std::string generatePassthroughTessControlShader (glu::GLSLVersion version)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(version) << "\n";
if (glu::glslVersionIsES(version) && version <= glu::GLSL_VERSION_310_ES)
src << "#extension GL_EXT_tessellation_shader : require\n\n";
src << "layout(vertices = 1) out;\n\n";
src << "void main (void)\n{\n";
for (int ndx = 0; ndx < 2; ndx++)
src << "\tgl_TessLevelInner[" << ndx << "] = 1.0;\n";
for (int ndx = 0; ndx < 4; ndx++)
src << "\tgl_TessLevelOuter[" << ndx << "] = 1.0;\n";
src << "}\n";
return src.str();
}
std::string TessEvaluationExecutor::generateTessEvalShader (const ShaderSpec& shaderSpec)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(shaderSpec.version) << "\n";
if (glu::glslVersionIsES(shaderSpec.version) && shaderSpec.version <= glu::GLSL_VERSION_310_ES)
src << "#extension GL_EXT_tessellation_shader : require\n";
if (!shaderSpec.globalDeclarations.empty())
src << shaderSpec.globalDeclarations << "\n";
src << "\n";
src << "layout(isolines, equal_spacing) in;\n\n";
declareBufferBlocks(src, shaderSpec);
src << "void main (void)\n{\n"
<< "\tgl_Position = vec4(gl_TessCoord.x, 0.0, 0.0, 1.0);\n"
<< "\thighp uint invocationId = uint(gl_PrimitiveID)*2u + (gl_TessCoord.x > 0.5 ? 1u : 0u);\n";
generateExecBufferIo(src, shaderSpec, "invocationId");
src << "}\n";
return src.str();
}
TessEvaluationExecutor::TessEvaluationExecutor (const glu::RenderContext& renderCtx, const ShaderSpec& shaderSpec)
: BufferIoExecutor (renderCtx, shaderSpec, glu::ProgramSources()
<< glu::VertexSource(generateVertexShaderForTess(shaderSpec.version))
<< glu::TessellationControlSource(generatePassthroughTessControlShader(shaderSpec.version))
<< glu::TessellationEvaluationSource(generateTessEvalShader(shaderSpec))
<< glu::FragmentSource(generateEmptyFragmentSource(shaderSpec.version)))
{
}
TessEvaluationExecutor::~TessEvaluationExecutor (void)
{
}
void TessEvaluationExecutor::execute (int numValues, const void* const* inputs, void* const* outputs)
{
const glw::Functions& gl = m_renderCtx.getFunctions();
const int alignedValues = deAlign32(numValues, 2);
// Initialize buffers with aligned value count to make room for padding
initBuffers(alignedValues);
// Setup input buffer & copy data
uploadInputBuffer(inputs, numValues);
// \todo [2014-06-26 pyry] Duplicate last value in the buffer to prevent infinite loops for example?
if (!m_inputs.empty())
gl.bindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BUFFER_BINDING, getInputBuffer());
gl.bindBufferBase(GL_SHADER_STORAGE_BUFFER, OUTPUT_BUFFER_BINDING, getOutputBuffer());
deUint32 vertexArray;
gl.genVertexArrays(1, &vertexArray);
gl.bindVertexArray(vertexArray);
// Render patches
gl.patchParameteri(GL_PATCH_VERTICES, 2);
gl.drawArrays(GL_PATCHES, 0, alignedValues);
gl.bindVertexArray(0);
gl.deleteVertexArrays(1, &vertexArray);
// Read back data
readOutputBuffer(outputs, numValues);
}
// Utilities
ShaderExecutor* createExecutor (const glu::RenderContext& renderCtx, glu::ShaderType shaderType, const ShaderSpec& shaderSpec)
{
switch (shaderType)
{
case glu::SHADERTYPE_VERTEX: return new VertexShaderExecutor (renderCtx, shaderSpec);
case glu::SHADERTYPE_TESSELLATION_CONTROL: return TessControlExecutor::create (renderCtx, shaderSpec);
case glu::SHADERTYPE_TESSELLATION_EVALUATION: return TessEvaluationExecutor::create (renderCtx, shaderSpec);
case glu::SHADERTYPE_GEOMETRY: return GeometryShaderExecutor::create (renderCtx, shaderSpec);
case glu::SHADERTYPE_FRAGMENT: return new FragmentShaderExecutor (renderCtx, shaderSpec);
case glu::SHADERTYPE_COMPUTE: return new ComputeShaderExecutor (renderCtx, shaderSpec);
default:
throw tcu::InternalError("Unsupported shader type");
}
}
bool executorSupported(glu::ShaderType shaderType)
{
switch (shaderType)
{
case glu::SHADERTYPE_VERTEX:
case glu::SHADERTYPE_TESSELLATION_CONTROL:
case glu::SHADERTYPE_TESSELLATION_EVALUATION:
case glu::SHADERTYPE_GEOMETRY:
case glu::SHADERTYPE_FRAGMENT:
case glu::SHADERTYPE_COMPUTE:
return true;
default:
return false;
}
}
} // ShaderExecUtil
} // gls
} // deqp
|
// Copyright (c) 2021 DNV AS
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
#include "Reflection\TypeLibraries\TypeLibraryFactory.h"
#include "Reflection\Operators\DefineOperator.h"
#include "Reflection\Objects\Object.h"
#include "Reflection\TypeConversions\BuiltInConversions.h"
#include "gtest\gtest.h"
using namespace DNVS::MoFa::Reflection::TypeConversions;
using namespace DNVS::MoFa::Reflection::TypeLibraries;
using namespace DNVS::MoFa::Reflection::Operators;
using namespace DNVS::MoFa::Reflection::Variants;
using namespace DNVS::MoFa::Reflection::Objects;
using namespace DNVS::MoFa::Operators;
TEST(UnaryOperatorTests,UnaryMinusDouble)
{
TypeLibraryPointer typeLibrary = TypeLibraryFactory::CreateDefaultTypeLibrary();
AddBuiltInConversions(typeLibrary->GetConversionGraph());
TypePointer type=typeLibrary->CreateType(typeid(int),"int");
///Definition of operators
DefineOperator<int> intClass(type);
intClass.Operator(-This.Const);
///Testing operator overloads
Object lhs(typeLibrary,45);
Object result=-lhs;
EXPECT_EQ(-45,result.As<int>());
}
struct MyClass {
public:
MyClass(int a) : m_a(a) {}
int operator*() const {return m_a;}
private:
int m_a;
};
TEST(UnaryOperatorTests,UnaryDereference)
{
TypeLibraryPointer typeLibrary = TypeLibraryFactory::CreateDefaultTypeLibrary();
AddBuiltInConversions(typeLibrary->GetConversionGraph());
TypePointer type = typeLibrary->CreateType(typeid(MyClass), "MyClass");
///Definition of operators
DefineOperator<MyClass> myClass(type);
myClass.Operator(*This.Const);
///Testing operator overloads
MyClass a(4);
Object lhs(typeLibrary,a);
Object result=*lhs;
EXPECT_EQ(4,result.As<int>());
}
|
#include "{{client_context['client-module-name']}}ClientObject.h"
#include "{{client_context['client-module-name']}}PrivatePCH.h"
|
/***************************************************************************
# Copyright (c) 2015-21, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**************************************************************************/
#include "Testing/UnitTest.h"
#include "Utils/Sampling/SampleGenerator.h"
/** GPU tests for the SampleGenerator utility class.
*/
namespace Falcor
{
namespace
{
const char kShaderFile[] = "Tests/Sampling/SampleGeneratorTests.cs.slang";
// The shader uses the first two dispatch dimensions as spatial seed and the last as instance index.
// For each sample generator instance, it generates kDimensions samples.
const uint3 kDispatchDim = { 64, 64, 16 };
const uint32_t kDimensions = 32;
/** Estimates the population Pearson correlation between pairs of
measurements of a random variable stored in an array 'elems'.
The two values in each pair are separated a distance 'stride'.
The function iterates over all samples i, measuring correlation
between sample i and i+stride, so each value may be part of multiple pairs.
\return Estimated Pearson correlation coefficient in [-1,1], where 0.0 means no correlation.
*/
double correlation(const float* elems, const size_t numElems, const size_t stride)
{
double sum_x = 0.0, sum_y = 0.0;
double sum_xx = 0.0, sum_yy = 0.0, sum_xy = 0.0;
size_t n = 0;
for (size_t i = 0; i + stride < numElems; i++)
{
float x = elems[i];
float y = elems[i + stride];
sum_x += x;
sum_y += y;
sum_xx += x * x;
sum_yy += y * y;
sum_xy += x * y;
n++;
}
assert(n > 0);
double r_xy = ((double)n * sum_xy - sum_x * sum_y) /
(std::sqrt(n * sum_xx - sum_x * sum_x) * std::sqrt(n * sum_yy - sum_y * sum_y));
return r_xy;
}
void testSampleGenerator(GPUUnitTestContext& ctx, uint32_t type, const double corrThreshold, bool testInstances)
{
// Create sample generator.
SampleGenerator::SharedPtr pSampleGenerator = SampleGenerator::create(type);
// Setup GPU test.
// We defer the creation of the vars until after shader specialization.
auto defines = pSampleGenerator->getDefines();
ctx.createProgram(kShaderFile, "test", defines, Shader::CompilerFlags::None, "6_2");
pSampleGenerator->setShaderData(ctx.vars().getRootVar());
const size_t numSamples = kDispatchDim.x * kDispatchDim.y * kDispatchDim.z * kDimensions;
ctx.allocateStructuredBuffer("result", uint32_t(numSamples));
ctx["CB"]["gDispatchDim"] = kDispatchDim;
ctx["CB"]["gDimensions"] = kDimensions;
// Run the test.
ctx.runProgram(kDispatchDim);
// Readback results.
const float* result = ctx.mapBuffer<const float>("result");
// Check that all samples are in the [0,1) range,
// and that their mean is roughly 0.5.
double mean = 0.0;
for (size_t i = 0; i < numSamples; i++)
{
float u = result[i];
mean += u;
EXPECT(u >= 0.f && u < 1.f) << u;
}
mean /= numSamples;
EXPECT_GE(mean, 0.499);
EXPECT_LE(mean, 0.501);
// Check correlation between adjacent samples along different dimensions in the sample set.
// This is not really a robust statistical test, but it should detect if something is fundamentally wrong.
auto corr = [&](size_t stride) -> double
{
return std::abs(correlation(result, numSamples, stride));
};
// Test nearby dimensions.
for (size_t i = 1; i <= 8; i++)
{
EXPECT_LE(corr(i), corrThreshold) << "i = " << i;
}
// Test nearby pixels.
const size_t xStride = kDimensions;
const size_t yStride = kDispatchDim.x * kDimensions;
for (size_t y = 0; y < 4; y++)
{
for (size_t x = 0; x < 4; x++)
{
if (x == 0 && y == 0) continue;
EXPECT_LE(corr(x * xStride + y * yStride), corrThreshold) << "x = " << x << " y = " << y;
}
}
// Test nearby instances, if they are expected to be uncorrelated.
if (testInstances)
{
const size_t instanceStride = kDispatchDim.x * kDispatchDim.y * kDimensions;
for (size_t i = 1; i <= 4; i++)
{
EXPECT_LE(corr(i * instanceStride), corrThreshold) << "i = " << i;
}
}
ctx.unmapBuffer("result");
}
}
/** Tests for the different types of sample generators.
For each one, we specify the maximum allowed absolute correlation between samples.
The values have been tweaked based on observed correlations at these sample counts.
*/
GPU_TEST(SampleGenerator_TinyUniform)
{
testSampleGenerator(ctx, SAMPLE_GENERATOR_TINY_UNIFORM, 0.0025, true);
}
GPU_TEST(SampleGenerator_Uniform)
{
testSampleGenerator(ctx, SAMPLE_GENERATOR_UNIFORM, 0.002, true);
}
}
|
#include "tests.h"
#include "test_tools.h"
#include <vector>
#include <iostream>
#include "../RMSDTools.h"
#include "../RMSDCalculator.h"
#include "../QCP/QCPSerialKernel.h"
#include "../QTRFIT/QTRFITOmpKernel.h"
#include "../factory/RMSDCalculatorFactory.h"
#include "../KABSCH/KABSCHSerialKernel.h"
#include "../RMSDCalculationData.h"
#include "../symmGroups.h"
using namespace std;
#define TOPOINTER(vec) (&(vec[0]))
void test_initialize(){
print_test_tittle(__FUNCTION__);
double expected_initialized[] = {3,3,3,3,3,3,3,3,3,3};
double initialiazed[10];
double matrix_mode[3][3];
double matrix_mode_copy[9];
double expected_matrix_mode_copy[] ={5,5,5,5,5,5,5,5,5};
RMSDTools::initializeTo(initialiazed, 3, 10);
compareVectors("\tTesting initialization: ", expected_initialized, initialiazed, 10, 1e-16);
RMSDTools::initializeTo(matrix_mode[0],5, 9);
int k = 0;
for (int i = 0; i < 3; i++){
for (int j = 0; j < 3; j++){
matrix_mode_copy[k]=matrix_mode[i][j];
k = k+1;
}
}
compareVectors("\tTesting initialization (matrix): ", expected_matrix_mode_copy, matrix_mode_copy, 5, 1e-16);
}
void test_copy_array(){
print_test_tittle(__FUNCTION__);
double expected_array[] = {0,1,2,3,4,5,1234,7,8,9};
double uninitialized_array[10];
RMSDTools::copyArrays(uninitialized_array, expected_array, 10);
compareVectors("\tTesting array copy: ", expected_array, uninitialized_array, 10, 1e-10);
}
void test_mean_coordinates(){
print_test_tittle(__FUNCTION__);
double mean_coordinates[9];
double coordinates[] = { 1,1,1, 2,2,2, 3,3,3, // 1st conformation
4,4,4, 5,5,5, 6,6,6, // 2nd conformation
7,7,7, 8,8,8, 9,9,9 };// 3rd conformation
double expected_mean_coordinates[] = {4, 4, 4, 5, 5, 5, 6, 6, 6};
int number_of_atoms = 3;
int number_of_conformations = 3;
RMSDTools::calculateMeanCoordinates(mean_coordinates, coordinates,
number_of_conformations, number_of_atoms);
compareVectors("\tTesting coordinates mean: ", expected_mean_coordinates, mean_coordinates, 9, 1e-10);
}
void test_translations(){
print_test_tittle(__FUNCTION__);
double coordinates[] = {
1,1,0, 2,2,0, 3,3,0, // 1st conformation
6,6,0, 5,5,0, 4,4,0, // 2nd conformation (upside down)
7,7,0, 8,8,0, 9,9,0, // 3rd conformation
5,4,0, 5,5,0, 4,4,0 // 4th conformation
};
double translations[] = {
0,0,1,
0,1,0,
1,0,0,
1,0,1
};
double expected_coordinates[]={
1,1,1, 2,2,1, 3,3,1,
6,7,0, 5,6,0, 4,5,0,
8,7,0, 9,8,0, 10,9,0,
6,4,1, 6,5,1, 5,4,1
};
double translation_vector [] = {1, 2, 4};
double expected_retranslated_coordinates[]={
2,3,5, 3,4,5, 4,5,5,
7,9,4, 6,8,4, 5,7,4,
9,9,4, 10,10,4, 11,11,4,
7,6,5, 7,7,5, 6,6,5
};
RMSDTools::applyTranslationsToAll(3,4,coordinates,translations);
compareVectors("\tTesting translated coordinates: ", expected_coordinates, coordinates, 3*3*4, 1e-12);
RMSDTools::applyTranslationToAll(3,4,coordinates,translation_vector);
compareVectors("\tTesting translated coordinates: ", expected_retranslated_coordinates, coordinates, 3*3*4, 1e-12);
}
void test_swap_atoms(){
print_test_tittle(__FUNCTION__);
double coordinates [] = {1,2,3, 4,5,6, 7,8,9, 10,11,12, 13,14,15, 16,17,18, 19,20,21, 22,23,24};
double swapped_coords [] = {1,2,3, 16,17,18, 7,8,9, 22,23,24, 13,14,15, 4,5,6, 19,20,21, 10,11,12};
RMSDTools::swap_atoms(coordinates, 1, 5);
RMSDTools::swap_atoms(coordinates, 3, 7);
compareVectors("\tAtoms coordinates have been swapped: ", swapped_coords, coordinates, 8*3, 1e-16);
}
void test_apply_symm_group(){
print_test_tittle(__FUNCTION__);
double coordinates [] = {1,2,3, 4,5,6, 7,8,9, 10,11,12, 13,14,15, 16,17,18, 19,20,21, 22,23,24};
double swapped_coords [] = {1,2,3, 16,17,18, 7,8,9, 22,23,24, 13,14,15, 4,5,6, 19,20,21, 10,11,12};
//pair<vector<int>, vector<int> > symm_group;
//symm_group.first.push_back(1);
//symm_group.first.push_back(3);
//symm_group.second.push_back(5);
//symm_group.second.push_back(7);
vector<pair<int, int> > symm_group;
symm_group.push_back(pair<int,int>(1,5));
symm_group.push_back(pair<int,int>(3,7));
RMSDTools::applySymmetryGroup(coordinates, symm_group);
compareVectors("\tSymm group was correctly applied: ", swapped_coords, coordinates, 8*3, 1e-16);
}
void test_apply_all_symmetries(){
print_test_tittle(__FUNCTION__);
double reference [] = { 1,2,3, 4,5,6,
7,8,9, 10,11,12,
13,14,15, 16,17,18,
19,20,21, 22,23,24,
25,26,27, 28,29,30};
double reference_copy [10*3];
RMSDTools::copyArrays(TOPOINTER(reference_copy),
TOPOINTER(reference), 30);
// Permutation of the first with one atom changed (negated) (rmsd = 13.1453 )
// This forces a search to get the best value
double superposed_conformation [] = { 1,2,3, 16,17,18,
7,8,9, 22,23,24,
-13,-14,-15, 4,5,6,
19,20,21, 10,11,12,
25,26,27, 28,29,30,};
double superposed_conformation_copy [10*3];
RMSDTools::copyArrays(TOPOINTER(superposed_conformation_copy),
TOPOINTER(superposed_conformation), 30);
//pair<vector<int>, vector<int> > symm_group_1;
//symm_group_1.first.push_back(1);
//symm_group_1.first.push_back(3);
//symm_group_1.second.push_back(5);
//symm_group_1.second.push_back(7);
vector<pair<int, int> > symm_group_1;
symm_group_1.push_back(pair<int,int>(1,5));
symm_group_1.push_back(pair<int,int>(3,7));
//pair<vector<int>, vector<int> > symm_group_2;
//symm_group_2.first.push_back(2);
//symm_group_2.first.push_back(4);
//symm_group_2.second.push_back(6);
//symm_group_2.second.push_back(8);
vector<pair<int, int> > symm_group_2;
symm_group_2.push_back(pair<int,int>(2,6));
symm_group_2.push_back(pair<int,int>(4,8));
//pair<vector<int>, vector<int> > symm_group_3;
//symm_group_3.first.push_back(5);
//symm_group_3.second.push_back(9);
vector<pair<int, int> > symm_group_3;
symm_group_3.push_back(pair<int,int>(5,9));
symmGroups symm_groups;
symm_groups.push_back(symm_group_1);
symm_groups.push_back(symm_group_2);
symm_groups.push_back(symm_group_3);
symmGroups empty_symm_group;
// This generates:
// 1,2,3, 4,5,6, 19,20,21, 10,11,12, 25,26,27, 28,29,30, 7,8,9, 22,23,24, -13,-14,-15, 16,17,18, [26.397]
// 1,2,3, 4,5,6, 19,20,21, 10,11,12, 25,26,27, 16,17,18, 7,8,9, 22,23,24, -13,-14,-15, 28,29,30, [24.7063]
// 1,2,3, 4,5,6, 7,8,9, 10,11,12, -13,-14,-15, 28,29,30, 19,20,21, 22,23,24, 25,26,27, 16,17,18, [17.9555]
// 1,2,3, 4,5,6, 7,8,9, 10,11,12, -13,-14,-15, 16,17,18, 19,20,21, 22,23,24, 25,26,27, 28,29,30, [15.3623]
// 1,2,3, 16,17,18, 19,20,21, 22,23,24, 25,26,27, 28,29,30, 7,8,9, 10,11,12, -13,-14,-15, 4,5,6, [30.9192]
// 1,2,3, 16,17,18, 19,20,21, 22,23,24, 25,26,27, 4,5,6, 7,8,9, 10,11,12, -13,-14,-15, 28,29,30, [27.9857]
// 1,2,3, 16,17,18, 7,8,9, 22,23,24, -13,-14,-15, 28,29,30, 19,20,21, 10,11,12, 25,26,27, 4,5,6, [24.1164]
// 1,2,3, 16,17,18, 7,8,9, 22,23,24, -13,-14,-15, 4,5,6, 19,20,21, 10,11,12, 25,26,27, 28,29,30, [20.2188]
double rmsd = RMSDTools::calcMinRMSDOfAllSymmetryGroups(reference,
superposed_conformation,
10,
&symm_groups);
double expected_min_rmsd = 15.3623;
compareVectors("\tMinimum RMSD must be the expected one: ",
&expected_min_rmsd, &rmsd, 1, 1e-4);
// We have to use copies as the coordinates array has already been permuted
rmsd = RMSDTools::calcMinRMSDOfAllSymmetryGroups(reference_copy,
superposed_conformation_copy,
10,
&empty_symm_group);
double expected_empty_rmsd = 20.2188;
compareVectors("\tAnd if the symm group is empty, it calculates the normal RMSD: ",
&expected_empty_rmsd, &rmsd, 1, 1e-4);
}
// Fine grain test of qcp with data from the original files in http://theobald.brandeis.edu/qcp/
void test_QCP_Kernel(){
print_test_tittle(__FUNCTION__);
int atoms_len = 7;
double frag_a [] = {-2.803, -15.373, 24.556, 0.893, -16.062, 25.147, 1.368, -12.371, 25.885, -1.651, -12.153, 28.177, -0.440,
-15.218, 30.068, 2.551, -13.273, 31.372, 0.105, -11.330, 33.567};
double frag_b [] = {-14.739, -18.673, 15.040, -12.473, -15.810, 16.074, -14.802, -13.307, 14.408, -17.782, -14.852, 16.171, -16.124, -14.617,
19.584, -15.029, -11.037, 18.902, -18.577, -10.001, 17.996};
double frag_b_copy[atoms_len*3];
double expected_rotation [] = {0.7221635837820651, 0.6911893731786908, -0.02714790348982324,
-0.5203825657891069, 0.5170083254696894, -0.6796354733368274,
-0.4557211246823982, 0.5049352847641727, 0.7330474846272469};
double expected_rotated_coordinates [] = {
-2.495176411277905, -1.614342696222044, -4.102116562817358,
1.092050512774367, -2.016077833910722, -2.931179811963272,
1.185406934426247, 1.622237699041897, -1.827209404202237,
-2.082389880657943, 1.176002542749938, 0.04307724778849886,
-0.8152689506610532, -1.884890665341617, 1.908042480017456,
2.46847299974008, -0.1403083768834845, 2.716757783430188,
0.6469047956562158, 2.857379330566034, 4.192628267746734
};
QCPSerialKernel kernel;
double rot_matrix [9];
double translations[3];
// Do it step by step
RMSDTools::copyArrays(frag_b_copy,frag_b,atoms_len*3);
RMSDTools::centerAllAtOrigin(atoms_len,1,frag_a,translations);
RMSDTools::centerAllAtOrigin(atoms_len,1,frag_b_copy,translations);
double rmsd = kernel.calcRMSDOfTwoConformations(frag_a,frag_b_copy,atoms_len,rot_matrix);
double expected_rmsd = 0.719106;
compareVectors("\tTesting RMSD: ", &expected_rmsd, &rmsd, 1, 1e-6);
compareVectors("\tTesting rotation matrix: ", expected_rotation, rot_matrix, 9, 1e-14);
RMSDTools::rotate3D(atoms_len, frag_b_copy, rot_matrix);
compareVectors("\tTesting rotated coordinates: ", expected_rotated_coordinates, frag_b_copy, atoms_len*3, 1e-14);
// Using the function modifying coords
RMSDTools::copyArrays(frag_b_copy,frag_b,atoms_len*3);
RMSDTools::centerAllAtOrigin(atoms_len,1,frag_b_copy,translations);
RMSDCalculationData data(1,atoms_len,frag_b_copy,0,NULL,NULL);
kernel.oneVsFollowingFitEqualCalcCoords(
frag_a,
-1,
&rmsd,
&data);
compareVectors("\tTesting rotated coordinates: ", expected_rotated_coordinates, frag_b_copy, atoms_len*3, 1e-14);
}
void test_KABSCH_Kernel(){
print_test_tittle(__FUNCTION__);
int atoms_len = 7;
double frag_a [] = {-2.803, -15.373, 24.556, 0.893, -16.062, 25.147, 1.368, -12.371, 25.885, -1.651, -12.153, 28.177, -0.440,
-15.218, 30.068, 2.551, -13.273, 31.372, 0.105, -11.330, 33.567};
double frag_b [] = {-14.739, -18.673, 15.040, -12.473, -15.810, 16.074, -14.802, -13.307, 14.408, -17.782, -14.852, 16.171, -16.124, -14.617,
19.584, -15.029, -11.037, 18.902, -18.577, -10.001, 17.996};
double frag_b_copy[atoms_len*3];
double expected_rotation [] = {0.7221635837820651, 0.6911893731786908, -0.02714790348982324,
-0.5203825657891069, 0.5170083254696894, -0.6796354733368274,
-0.4557211246823982, 0.5049352847641727, 0.7330474846272469};
double expected_rotated_coordinates [] = {
-2.495176411277905, -1.614342696222044, -4.102116562817358,
1.092050512774367, -2.016077833910722, -2.931179811963272,
1.185406934426247, 1.622237699041897, -1.827209404202237,
-2.082389880657943, 1.176002542749938, 0.04307724778849886,
-0.8152689506610532, -1.884890665341617, 1.908042480017456,
2.46847299974008, -0.1403083768834845, 2.716757783430188,
0.6469047956562158, 2.857379330566034, 4.192628267746734
};
KABSCHSerialKernel kernel;
double rot_matrix [9];
double translations[3];
double U[3][3];
// Do it step by step
RMSDTools::copyArrays(frag_b_copy,frag_b,atoms_len*3);
RMSDTools::centerAllAtOrigin(atoms_len,1,frag_a,translations);
RMSDTools::centerAllAtOrigin(atoms_len,1,frag_b_copy,translations);
double rmsd = kernel.calculate_rotation_rmsd(frag_a,frag_b_copy,atoms_len,U);
double expected_rmsd = 0.719106;
compareVectors("\tTesting RMSD: ", &expected_rmsd, &rmsd, 1, 1e-6);
compareVectors("\tTesting rotation matrix: ", expected_rotation, rot_matrix, 9, 1e-14);
RMSDTools::rotate3D(atoms_len, frag_b_copy, rot_matrix);
compareVectors("\tTesting rotated coordinates: ", expected_rotated_coordinates, frag_b_copy, atoms_len*3, 1e-14);
// Using the function modifying coords
RMSDTools::copyArrays(frag_b_copy,frag_b,atoms_len*3);
RMSDTools::centerAllAtOrigin(atoms_len,1,frag_b_copy,translations);
RMSDCalculationData data(1,atoms_len,frag_b_copy,0,NULL,NULL);
kernel.oneVsFollowingFitEqualCalcCoords(
frag_a,
-1,
&rmsd,
&data);
compareVectors("\tTesting rotated coordinates: ", expected_rotated_coordinates, frag_b_copy, atoms_len*3, 1e-14);
}
void test_center_coordinates(){
print_test_tittle(__FUNCTION__);
vector<double> coordinates, centered_coordinates;
vector<int> shape, centered_shape;
double calculated_centers[18];
double expected_centers [] = {
14.3707713, 47.34880717, 25.46220179,
7.3707713, 2.3488296, 31.46225112,
-10.62925112, -6.65117489, 30.46225112,
15.37077578, 21.3488565, 46.46220179,
-5.62919731, 3.34880269, 21.46222422,
8.37079821, 96.34881166, 25.46223767};
RMSDTools::initializeTo(calculated_centers, 0, 18);
// Not centered coordinates
load_pdb_coords( coordinates,
shape,
"data/Models/prot_plus_ligand_similar/prot_plus_ligand_offset.CA.coords");
for (int i = 0; i < shape[0]; ++i){
RMSDTools::geometricCenter(shape[1], TOPOINTER(coordinates)+(i*shape[1]*3) , calculated_centers+(i*3));
}
// Centers must be equal to the expected ones
compareVectors("\tGeometric centers are as expected: ",
expected_centers,
calculated_centers,
18,
1e-7);
// Load the coordinates centered with Python and center them
load_pdb_coords(centered_coordinates,
centered_shape,
"data/Models/prot_plus_ligand_similar/prot_plus_ligand_offset.CA.centered.coords");
RMSDTools::centerAllAtOrigin(shape[1],shape[0],TOPOINTER(coordinates));
// Coordinates must coincide
compareVectors("\tCoordinates have been centered: ",
TOPOINTER(centered_coordinates),
TOPOINTER(coordinates),
shape[0]*shape[1]*shape[2],
1e-12);
}
void test_superposition_with_fit( RMSDCalculatorType type,
const char* initial_coords_file,
const char* final_coords_file,
const char* rmsd_results_file,
double precision_of_check){
print_test_tittle(__FUNCTION__);
print_calculator_and_precission(type, precision_of_check);
vector<double> not_superposed_fit_coordinates,
expected_superposed_fit_coordinates,
calculated_rmsds, expected_rmsds;
vector<int> expected_superposed_fit_coordinates_shape,
not_superposed_fit_coordinates_shape;
load_vector(expected_rmsds, rmsd_results_file);
load_pdb_coords(not_superposed_fit_coordinates,
not_superposed_fit_coordinates_shape,
initial_coords_file);
// Prody's results are superposed but the centering has been canceled,
// it is necessary then to move then again to their original places
load_and_center_pdb_coords(expected_superposed_fit_coordinates,
expected_superposed_fit_coordinates_shape,
final_coords_file);
calculated_rmsds.resize(not_superposed_fit_coordinates_shape[0],0);
RMSDCalculator* calculator = RMSDCalculatorFactory::createCalculator(
type,
not_superposed_fit_coordinates_shape[0],
not_superposed_fit_coordinates_shape[1],
TOPOINTER(not_superposed_fit_coordinates));
calculator->oneVsFollowing(0, TOPOINTER(calculated_rmsds));
// RMSDs must be the same
compareVectors("\tCalculated RMSDs coincide with golden: ",
&(expected_rmsds[1]),
TOPOINTER(calculated_rmsds),
not_superposed_fit_coordinates_shape[0]-1, precision_of_check);
// Final centered coordinates must be the superposed coordinates
compareVectors("\tInitial coordinates have been superposed: ",
TOPOINTER(expected_superposed_fit_coordinates),
TOPOINTER(not_superposed_fit_coordinates),
not_superposed_fit_coordinates_shape[0] *
not_superposed_fit_coordinates_shape[1] *
not_superposed_fit_coordinates_shape[2],
precision_of_check);
//save_pdb_coords(not_superposed_fit_coordinates,not_superposed_fit_coordinates_shape,"calculated.coords");
//save_pdb_coords(expected_superposed_fit_coordinates,expected_superposed_fit_coordinates_shape,"expected.coords");
delete calculator;
}
void test_superposition_with_fit_and_calc(RMSDCalculatorType type,
const char* initial_prot_coords_file,
const char* final_prot_coords_file,
const char* initial_lig_coords_file,
const char* final_lig_coords_file,
const char* rmsd_results_file,
double precision_of_check){
print_test_tittle(__FUNCTION__);
print_calculator_and_precission(type, precision_of_check);
vector<double> not_superposed_fit_coordinates,
expected_superposed_fit_coordinates,
not_superposed_calc_coordinates,
expected_superposed_calc_coordinates,
calculated_rmsds, expected_rmsds,
centers;
vector<int> expected_superposed_fit_coordinates_shape,
not_superposed_fit_coordinates_shape,
expected_superposed_calc_coordinates_shape,
not_superposed_calc_coordinates_shape;
load_vector(expected_rmsds, rmsd_results_file);
load_pdb_coords(not_superposed_fit_coordinates,
not_superposed_fit_coordinates_shape,
initial_prot_coords_file);
// Prody's results are superposed but the centering has been canceled,
// it is necessary then to move then again to their original places
load_and_center_pdb_coords(expected_superposed_fit_coordinates,
expected_superposed_fit_coordinates_shape,
final_prot_coords_file,
¢ers);
load_pdb_coords(not_superposed_calc_coordinates,
not_superposed_calc_coordinates_shape,
initial_lig_coords_file);
// The case of a different non-centered calculation coordset is a little bit more tricky,
// to preserve the relative distance to the center, one has to move this coordinates using
// the same centers got for the fitting coordinates
load_and_move_pdb_coords(expected_superposed_calc_coordinates,
expected_superposed_calc_coordinates_shape,
final_lig_coords_file,
TOPOINTER(centers));
calculated_rmsds.resize(not_superposed_fit_coordinates_shape[0],0);
RMSDCalculator* calculator = RMSDCalculatorFactory::createCalculator(
type,
not_superposed_fit_coordinates_shape[0],
not_superposed_fit_coordinates_shape[1],
TOPOINTER(not_superposed_fit_coordinates),
not_superposed_calc_coordinates_shape[1],
TOPOINTER(not_superposed_calc_coordinates));
calculator->oneVsFollowing(0, TOPOINTER(calculated_rmsds));
// print_vector("expected RMSD: ", TODOUBLEP(expected_rmsds),expected_rmsds.size(),8);
// print_vector("calcted. RMSD: ", TODOUBLEP(calculated_rmsds), calculated_rmsds.size(),8);
// RMSDs must be the same
compareVectors("\tCalculated RMSDs coincide with golden: ",
&(expected_rmsds[1]),
TOPOINTER(calculated_rmsds),
not_superposed_fit_coordinates_shape[0]-1, precision_of_check);
compareVectors("\tInitial fitting coordinates have been superposed: ",
TOPOINTER(expected_superposed_fit_coordinates),
TOPOINTER(not_superposed_fit_coordinates),
not_superposed_fit_coordinates_shape[0] *
not_superposed_fit_coordinates_shape[1] *
not_superposed_fit_coordinates_shape[2],
precision_of_check);
compareVectors("\tAnd also calculation coordinates: ",
TOPOINTER(expected_superposed_calc_coordinates),
TOPOINTER(not_superposed_calc_coordinates),
not_superposed_calc_coordinates_shape[0] *
not_superposed_calc_coordinates_shape[1] *
not_superposed_calc_coordinates_shape[2],
precision_of_check);
}
// Prody steps
//-------------
// Ensemble @ /usr/local/lib/python2.7/dist-packages/prody/ensemble/ensemble.py
// PDBEnsemble @ /usr/local/lib/python2.7/dist-packages/prody/ensemble/pdbensemble.py
// Steps:
// 1: PDBEnsemble::iterpose -> confs_tmp = confs
// 2: PDBEnsemble::iterpose -> Ensemble::iterpose(confs_tmp)
// Iterative
// 3: PDBEnsemble::_superpose()
// 4: Ensemble::superpose()
// 5: PDBEnsemble::_superpose(trans=True)
void test_step_by_step_iterative_superposition_with_fit(RMSDCalculatorType type,
const char* step_directory,
const char* mean_directory,
const char* initial_prot_coords_file,
double precision_of_check,
int expected_number_of_iterations){
print_test_tittle(__FUNCTION__);
print_calculator_and_precission(type, precision_of_check);
vector<double> initial_fit_coordinates,
calculated_by_step_rmsds, expected_by_step_rmsds,
expected_iterposed_coords_for_step, expected_mean_coords_for_step;
double* reference_coords = NULL;
double* mean_coords = NULL;
vector<int> initial_fit_coordinates_shape,
one_step_shape;
// Initial coordinates are centered within the algorithm
load_and_center_pdb_coords(initial_fit_coordinates,
initial_fit_coordinates_shape,
initial_prot_coords_file);
// Step by step test
RMSDCalculator* calculator = RMSDCalculatorFactory::createCalculator(
type,
initial_fit_coordinates_shape[0],
initial_fit_coordinates_shape[1],
TOPOINTER(initial_fit_coordinates));
// Init temporary vectors
reference_coords = new double[initial_fit_coordinates_shape[1]*3];
RMSDTools::copyArrays(reference_coords,
TOPOINTER(initial_fit_coordinates),
initial_fit_coordinates_shape[1]*3);
mean_coords = new double[initial_fit_coordinates_shape[1]*3];
for (int i = 0; i < expected_number_of_iterations; i++){
string mean_step_file = string(mean_directory)+"/mean_step_"+toString(i)+".coords";
string iter_step_file = string(step_directory)+"/iter_step_"+toString(i)+".coords";
load_and_center_pdb_coords(expected_iterposed_coords_for_step,
one_step_shape,
iter_step_file.c_str());
load_and_center_pdb_coords(expected_mean_coords_for_step,
one_step_shape,
mean_step_file.c_str());
calculator->iterative_superposition_step(reference_coords, mean_coords);
compareVectors((string("\tMean coordinates for this step (")+toString(i)+ string("): ")).c_str(),
mean_coords,
TOPOINTER(expected_mean_coords_for_step),
one_step_shape[0] *
one_step_shape[1] *
one_step_shape[2],
precision_of_check);
compareVectors((string("\tIteratively superposed until this step (")+toString(i)+ string("): ")).c_str(),
mean_coords,
TOPOINTER(expected_mean_coords_for_step),
one_step_shape[0] *
one_step_shape[1] *
one_step_shape[2],
precision_of_check);
}
delete [] reference_coords;
delete [] mean_coords;
delete calculator;
}
void test_iterative_superposition_with_fit(RMSDCalculatorType type,
const char* initial_prot_coords_file,
const char* final_prot_coords_file,
const char* iteration_rmsd_results_file,
double precision_of_check,
int expected_number_of_iterations){
print_test_tittle(__FUNCTION__);
print_calculator_and_precission(type, precision_of_check);
vector<double> initial_fit_coordinates,
expected_final_fit_coordinates,
calculated_by_step_rmsds, expected_by_step_rmsds;
vector<int> expected_final_fit_coordinates_shape,
initial_fit_coordinates_shape;
load_vector(expected_by_step_rmsds, iteration_rmsd_results_file);
load_pdb_coords(initial_fit_coordinates,
initial_fit_coordinates_shape,
initial_prot_coords_file);
load_and_center_pdb_coords(expected_final_fit_coordinates,
expected_final_fit_coordinates_shape,
final_prot_coords_file);
calculated_by_step_rmsds.resize(expected_number_of_iterations,0);
RMSDCalculator* calculator = RMSDCalculatorFactory::createCalculator(
type,
initial_fit_coordinates_shape[0],
initial_fit_coordinates_shape[1],
TOPOINTER(initial_fit_coordinates));
calculator->iterativeSuperposition(1e-4, TOPOINTER(calculated_by_step_rmsds));
// print_vector("calculated RMSD: ", TODOUBLEP(calculated_by_step_rmsds), calculated_by_step_rmsds.size(),12);
// print_vector("expected RMSD: ", TODOUBLEP(expected_by_step_rmsds), expected_by_step_rmsds.size(),12);
compareVectors("\tFinal iterposed coordinates are as expected: ",
TOPOINTER(expected_final_fit_coordinates),
TOPOINTER(initial_fit_coordinates),
expected_final_fit_coordinates_shape[0] *
expected_final_fit_coordinates_shape[1] *
expected_final_fit_coordinates_shape[2],
precision_of_check);
compareVectors("\tPer-step rmsd values are the same: ",
TOPOINTER(expected_by_step_rmsds),
TOPOINTER(calculated_by_step_rmsds),
expected_number_of_iterations,
precision_of_check);
delete calculator;
}
void test_iterative_superposition_with_fit_and_calc_rotation(RMSDCalculatorType type,
const char* initial_prot_coords_file,
const char* initial_lig_coords_file,
const char* final_prot_coords_file,
const char* final_lig_coords_file,
const char* iteration_rmsd_results_file,
double precision_of_check,
int expected_number_of_iterations){
print_test_tittle(__FUNCTION__);
print_calculator_and_precission(type, precision_of_check);
vector<double> initial_fit_coordinates, initial_lig_coordinates,
expected_final_fit_coordinates, expected_final_lig_coordinates,
calculated_by_step_rmsds, expected_by_step_rmsds;
vector<int> expected_final_fit_coordinates_shape,expected_final_lig_coordinates_shape,
initial_fit_coordinates_shape,initial_lig_coordinates_shape;
vector<double> centers;
load_vector(expected_by_step_rmsds, iteration_rmsd_results_file);
load_pdb_coords(initial_fit_coordinates,
initial_fit_coordinates_shape,
initial_prot_coords_file);
load_and_center_pdb_coords(expected_final_fit_coordinates,
expected_final_fit_coordinates_shape,
final_prot_coords_file,
¢ers);
load_pdb_coords(initial_lig_coordinates,
initial_lig_coordinates_shape,
initial_lig_coords_file);
load_and_move_pdb_coords(expected_final_lig_coordinates,
expected_final_lig_coordinates_shape,
final_lig_coords_file,
TOPOINTER(centers));
calculated_by_step_rmsds.resize(expected_number_of_iterations,0);
RMSDCalculator* calculator = RMSDCalculatorFactory::createCalculator(
type,
initial_fit_coordinates_shape[0],
initial_fit_coordinates_shape[1],
TOPOINTER(initial_fit_coordinates),
initial_lig_coordinates_shape[1],
TOPOINTER(initial_lig_coordinates));
calculator->iterativeSuperposition(1e-4,
TOPOINTER(calculated_by_step_rmsds));
// print_vector("calculated RMSD: ", TODOUBLEP(calculated_by_step_rmsds), calculated_by_step_rmsds.size(),12);
// print_vector("expected RMSD: ", TODOUBLEP(expected_by_step_rmsds), expected_by_step_rmsds.size(),12);
// print_vector("initial_pfit_coordinates_shape: ", TOPOINTER(initial_fit_coordinates_shape), initial_fit_coordinates_shape.size(),1);
// print_vector("expected_final_fit_coordinates_shape: ", TOPOINTER(expected_final_fit_coordinates_shape), expected_final_fit_coordinates_shape.size(),1);
// print_vector("initial_lig_coordinates_shape: ", TOPOINTER(initial_lig_coordinates_shape), initial_lig_coordinates_shape.size(),1);
// print_vector("expected_final_lig_coordinates_shape: ", TOPOINTER(expected_final_lig_coordinates_shape), expected_final_lig_coordinates_shape.size(),1);
compareVectors("\tFinal iterposed coordinates are as expected: ",
TOPOINTER(expected_final_fit_coordinates),
TOPOINTER(initial_fit_coordinates),
expected_final_fit_coordinates_shape[0] *
expected_final_fit_coordinates_shape[1] *
expected_final_fit_coordinates_shape[2],
precision_of_check);
compareVectors("\tAnd ligands have been moved to its correct positions : ",
TOPOINTER(expected_final_lig_coordinates),
TOPOINTER(initial_lig_coordinates),
expected_final_lig_coordinates_shape[0] *
expected_final_lig_coordinates_shape[1] *
expected_final_lig_coordinates_shape[2],
precision_of_check);
compareVectors("\tPer-step rmsd values are the same: ",
TOPOINTER(expected_by_step_rmsds),
TOPOINTER(calculated_by_step_rmsds),
expected_number_of_iterations,
precision_of_check);
delete calculator;
}
void test_matrix_with_fit_coordinates(RMSDCalculatorType type,
const char* initial_prot_coords_file,
const char* rmsd_results_file,
double precision_of_check){
print_test_tittle(__FUNCTION__);
print_calculator_and_precission(type, precision_of_check);
vector<double> initial_fit_coordinates,
calculated_rmsds, expected_rmsds;
vector<int> initial_fit_coordinates_shape;
load_vector(expected_rmsds, rmsd_results_file);
load_pdb_coords(initial_fit_coordinates,
initial_fit_coordinates_shape,
initial_prot_coords_file);
RMSDCalculator* calculator = RMSDCalculatorFactory::createCalculator(
type,
initial_fit_coordinates_shape[0],
initial_fit_coordinates_shape[1],
TOPOINTER(initial_fit_coordinates));
calculator->calculateRMSDCondensedMatrix(calculated_rmsds);
// print_vector<double>("calculated RMSD: ", TOPOINTER(calculated_rmsds), calculated_rmsds.size(),12);
// print_vector<double>("expected RMSD: ", TOPOINTER(expected_rmsds), expected_rmsds.size(),12);
compareVectors("\tThe RMSD matrix is as expected: ",
TOPOINTER(expected_rmsds),
TOPOINTER(calculated_rmsds),
expected_rmsds.size(),
precision_of_check);
}
void test_matrix_with_fit_and_calculation_coordinates(RMSDCalculatorType type,
const char* initial_prot_coords_file,
const char* initial_lig_coords_file,
const char* rmsd_results_file,
double precision_of_check){
print_test_tittle(__FUNCTION__);
print_calculator_and_precission(type, precision_of_check);
vector<double> initial_fit_coordinates, initial_lig_coordinates,
calculated_rmsds, expected_rmsds;
vector<int> initial_fit_coordinates_shape,initial_lig_coordinates_shape;
load_vector(expected_rmsds, rmsd_results_file);
load_pdb_coords(initial_fit_coordinates,
initial_fit_coordinates_shape,
initial_prot_coords_file);
load_pdb_coords(initial_lig_coordinates,
initial_lig_coordinates_shape,
initial_lig_coords_file);
RMSDCalculator* calculator = RMSDCalculatorFactory::createCalculator(
type,
initial_fit_coordinates_shape[0],
initial_fit_coordinates_shape[1],
TOPOINTER(initial_fit_coordinates),
initial_lig_coordinates_shape[1],
TOPOINTER(initial_lig_coordinates));
calculator->calculateRMSDCondensedMatrix(calculated_rmsds);
// print_vector<double>("calculated RMSD: ", TOPOINTER(calculated_rmsds), calculated_rmsds.size(),12);
// print_vector<double>("expected RMSD: ", TOPOINTER(expected_rmsds), expected_rmsds.size(),12);
compareVectors("\tThe RMSD matrix is as expected: ",
TOPOINTER(expected_rmsds),
TOPOINTER(calculated_rmsds),
expected_rmsds.size(),
precision_of_check);
}
void test_iterative_superposition_with_fit_and_calc_rotation_comparing_QCP_serial_and_QCP_CUDA(
const char* initial_prot_coords_file,
const char* initial_lig_coords_file){
print_test_tittle(__FUNCTION__);
cout<<"Comparing QCP_SERIAL_FLOAT_CALCULATOR and QCP_CUDA_CALCULATOR (float)"<<endl;
vector<double> initial_qcp_serial_fit_coordinates, initial_qcp_serial_lig_coordinates,
initial_qcp_cuda_fit_coordinates, initial_qcp_cuda_lig_coordinates,
calculated_serial_by_step_rmsds, calculated_cuda_by_step_rmsds;
vector<int> fit_coords_shape, lig_coords_shape;
load_pdb_coords(initial_qcp_serial_fit_coordinates,
fit_coords_shape,
initial_prot_coords_file);
load_pdb_coords(initial_qcp_serial_lig_coordinates,
lig_coords_shape,
initial_lig_coords_file);
load_pdb_coords(initial_qcp_cuda_fit_coordinates,
fit_coords_shape,
initial_prot_coords_file);
load_pdb_coords(initial_qcp_cuda_lig_coordinates,
lig_coords_shape,
initial_lig_coords_file);
int expected_number_of_iterations = 50;
calculated_serial_by_step_rmsds.resize(expected_number_of_iterations,0);
calculated_cuda_by_step_rmsds.resize(expected_number_of_iterations,0);
RMSDCalculator* serial_calculator = RMSDCalculatorFactory::createCalculator(
QCP_SERIAL_FLOAT_CALCULATOR,
fit_coords_shape[0],
fit_coords_shape[1],
TOPOINTER(initial_qcp_serial_fit_coordinates),
lig_coords_shape[1],
TOPOINTER(initial_qcp_serial_lig_coordinates));
serial_calculator->iterativeSuperposition(1e-4,
TOPOINTER(calculated_serial_by_step_rmsds));
RMSDCalculator* cuda_calculator = RMSDCalculatorFactory::createCalculator(
QCP_CUDA_CALCULATOR,
fit_coords_shape[0],
fit_coords_shape[1],
TOPOINTER(initial_qcp_cuda_fit_coordinates),
lig_coords_shape[1],
TOPOINTER(initial_qcp_cuda_lig_coordinates));
cuda_calculator->iterativeSuperposition(1e-4,
TOPOINTER(calculated_cuda_by_step_rmsds));
// print_vector("calculated RMSD: ", TODOUBLEP(calculated_by_step_rmsds), calculated_by_step_rmsds.size(),12);
// print_vector("expected RMSD: ", TODOUBLEP(expected_by_step_rmsds), expected_by_step_rmsds.size(),12);
// print_vector("initial_pfit_coordinates_shape: ", TOPOINTER(initial_fit_coordinates_shape), initial_fit_coordinates_shape.size(),1);
// print_vector("expected_final_fit_coordinates_shape: ", TOPOINTER(expected_final_fit_coordinates_shape), expected_final_fit_coordinates_shape.size(),1);
// print_vector("initial_lig_coordinates_shape: ", TOPOINTER(initial_lig_coordinates_shape), initial_lig_coordinates_shape.size(),1);
// print_vector("expected_final_lig_coordinates_shape: ", TOPOINTER(expected_final_lig_coordinates_shape), expected_final_lig_coordinates_shape.size(),1);
compareVectors("\tFinal fitting coordinates of serial (float) and CUDA (float) versions coincide: ",
TOPOINTER(initial_qcp_serial_fit_coordinates),
TOPOINTER(initial_qcp_cuda_fit_coordinates),
fit_coords_shape[0] *
fit_coords_shape[1] *
fit_coords_shape[2],
1e-3);
compareVectors("\tFinal calculation coordinates of serial (float) and CUDA (float) versions coincide: ",
TOPOINTER(initial_qcp_serial_lig_coordinates),
TOPOINTER(initial_qcp_cuda_lig_coordinates),
lig_coords_shape[0] *
lig_coords_shape[1] *
lig_coords_shape[2],
1e-4);
compareVectors("\tPer-step rmsd values are the same for both: ",
TOPOINTER(calculated_serial_by_step_rmsds),
TOPOINTER(calculated_cuda_by_step_rmsds),
expected_number_of_iterations,
1e-5);
delete serial_calculator;
delete cuda_calculator;
}
void test_rmsd_calculation_fit_and_calc_with_symmetry(RMSDCalculatorType type){
print_test_tittle(__FUNCTION__);
// Coordinates
vector<double> native_0_plus_coords_CA,
native_1_plus_coords_CA,
native_2_plus_coords_CA,
native_3_plus_coords_CA,
calculated_rmsds,
calculated_rmsds_0,
calculated_rmsds_1,
calculated_rmsds_2,
calculated_rmsds_3,
min_rmsds,
expected_rmsds;
vector<int> trajectory_with_native_CA_size;
vector<double> native_0_plus_coords_lig,
native_1_plus_coords_lig,
native_2_plus_coords_lig,
native_3_plus_coords_lig;
vector<int> trajectory_with_native_lig_size;
// Load rmsds
load_vector(expected_rmsds, "data/Symmetry/OneVsAllFitAndCalc/minimum.rmsds");
// Load natives+trajectories (CA)
load_and_merge(native_0_plus_coords_CA,
trajectory_with_native_CA_size,
"data/Symmetry/Models/Natives/Native_0.CA.coords",
"data/Symmetry/Models/Trajectory/traj_testset.CA.coords");
load_and_merge(native_1_plus_coords_CA,
trajectory_with_native_CA_size,
"data/Symmetry/Models/Natives/Native_1.CA.coords",
"data/Symmetry/Models/Trajectory/traj_testset.CA.coords");
load_and_merge(native_2_plus_coords_CA,
trajectory_with_native_CA_size,
"data/Symmetry/Models/Natives/Native_2.CA.coords",
"data/Symmetry/Models/Trajectory/traj_testset.CA.coords");
load_and_merge(native_3_plus_coords_CA,
trajectory_with_native_CA_size,
"data/Symmetry/Models/Natives/Native_3.CA.coords",
"data/Symmetry/Models/Trajectory/traj_testset.CA.coords");
// Load natives+trajectories (lig)
load_and_merge(native_0_plus_coords_lig,
trajectory_with_native_lig_size,
"data/Symmetry/Models/Natives/Native_0.ligand.coords",
"data/Symmetry/Models/Trajectory/traj_testset.ligand.coords");
load_and_merge(native_1_plus_coords_lig,
trajectory_with_native_lig_size,
"data/Symmetry/Models/Natives/Native_1.ligand.coords",
"data/Symmetry/Models/Trajectory/traj_testset.ligand.coords");
load_and_merge(native_2_plus_coords_lig,
trajectory_with_native_lig_size,
"data/Symmetry/Models/Natives/Native_2.ligand.coords",
"data/Symmetry/Models/Trajectory/traj_testset.ligand.coords");
load_and_merge(native_3_plus_coords_lig,
trajectory_with_native_lig_size,
"data/Symmetry/Models/Natives/Native_3.ligand.coords",
"data/Symmetry/Models/Trajectory/traj_testset.ligand.coords");
calculated_rmsds_0.resize(trajectory_with_native_CA_size[0]-1,0);
RMSDCalculator* calculator = RMSDCalculatorFactory::createCalculator(
type,
trajectory_with_native_CA_size[0],
trajectory_with_native_CA_size[1],
TOPOINTER(native_0_plus_coords_CA),
trajectory_with_native_lig_size[1],
TOPOINTER(native_0_plus_coords_lig));
calculator->oneVsFollowing(0, TOPOINTER(calculated_rmsds_0));
delete calculator;
calculated_rmsds_1.resize(trajectory_with_native_CA_size[0]-1,0);
calculator = RMSDCalculatorFactory::createCalculator(
type,
trajectory_with_native_CA_size[0],
trajectory_with_native_CA_size[1],
TOPOINTER(native_1_plus_coords_CA),
trajectory_with_native_lig_size[1],
TOPOINTER(native_1_plus_coords_lig));
calculator->oneVsFollowing(0, TOPOINTER(calculated_rmsds_1));
delete calculator;
calculated_rmsds_2.resize(trajectory_with_native_CA_size[0]-1,0);
calculator = RMSDCalculatorFactory::createCalculator(
type,
trajectory_with_native_CA_size[0],
trajectory_with_native_CA_size[1],
TOPOINTER(native_2_plus_coords_CA),
trajectory_with_native_lig_size[1],
TOPOINTER(native_2_plus_coords_lig));
calculator->oneVsFollowing(0, TOPOINTER(calculated_rmsds_2));
delete calculator;
calculated_rmsds_3.resize(trajectory_with_native_CA_size[0]-1,0);
calculator = RMSDCalculatorFactory::createCalculator(
type,
trajectory_with_native_CA_size[0],
trajectory_with_native_CA_size[1],
TOPOINTER(native_3_plus_coords_CA),
trajectory_with_native_lig_size[1],
TOPOINTER(native_3_plus_coords_lig));
calculator->oneVsFollowing(0, TOPOINTER(calculated_rmsds_3));
delete calculator;
// print_vector<double>("calculated RMSD 0: ", TOPOINTER(calculated_rmsds_0), calculated_rmsds_0.size(),12);
// print_vector<double>("calculated RMSD 1: ", TOPOINTER(calculated_rmsds_0), calculated_rmsds_0.size(),12);
// print_vector<double>("calculated RMSD 2: ", TOPOINTER(calculated_rmsds_0), calculated_rmsds_0.size(),12);
// print_vector<double>("calculated RMSD 3: ", TOPOINTER(calculated_rmsds_0), calculated_rmsds_0.size(),12);
// Get the minimum of all the calculated rmsds
for(unsigned int i= 0; i< calculated_rmsds_0.size(); i++){
min_rmsds.push_back(min(min(calculated_rmsds_0[i],calculated_rmsds_1[i]),
min(calculated_rmsds_2[i],calculated_rmsds_3[i])));
}
// We will create two groups, one to substitute the symmetric Cs of the bezene ring
// Now let's do the same defining the symmetry groups for the ligand.
// and the other for the Ns.
symmGroups symm_groups;
//vector<int> group1_first, group1_second;
//group1_first.push_back(0);group1_first.push_back(3);
//group1_second.push_back(2);group1_second.push_back(5);
//symm_groups.push_back(pair<vector<int>, vector<int> >(group1_first, group1_second));
//
//vector<int> group2_first, group2_second;
//group2_first.push_back(7);
//group2_second.push_back(8);
//symm_groups.push_back(pair<vector<int>, vector<int> >(group2_first, group2_second));
vector<pair<int, int> > symm_group_1;
symm_group_1.push_back(pair<int,int>(0,2));
symm_group_1.push_back(pair<int,int>(3,5));
vector<pair<int, int> > symm_group_2;
symm_group_2.push_back(pair<int,int>(7,8));
symm_groups.push_back(symm_group_1);
symm_groups.push_back(symm_group_2);
calculated_rmsds.resize(trajectory_with_native_CA_size[0]-1,0);
calculator = RMSDCalculatorFactory::createCalculator(
type,
trajectory_with_native_CA_size[0],
trajectory_with_native_CA_size[1],
TOPOINTER(native_3_plus_coords_CA),
trajectory_with_native_lig_size[1],
TOPOINTER(native_3_plus_coords_lig),
&symm_groups);
calculator->oneVsFollowing(0, TOPOINTER(calculated_rmsds));
delete calculator;
// The hand made calculator has to be equal to the automatic calculation
compareVectors("\tHand made calculation is equal to automatic calculation: ",
TOPOINTER(min_rmsds),
TOPOINTER(calculated_rmsds),
min_rmsds.size(),
1e-12);
//print_vector<double>("DBG: *HAND CALCTED MIN RMSD: ", TOPOINTER(min_rmsds), min_rmsds.size(), 12);
//print_vector<double>("DBG: *EXPECTED MIN RMSD: ", TOPOINTER(expected_rmsds), min_rmsds.size(), 12);
//print_vector<double>("DBG: *CALCTED MIN RMSD: ", TOPOINTER(calculated_rmsds), min_rmsds.size(), 12);
// The result has to be similar (at least in the same order) than the one got with prody
compareVectors("\tAnd results are within the safety range: ",
TOPOINTER(expected_rmsds),
TOPOINTER(calculated_rmsds),
expected_rmsds.size(),
1e-12); // Qualitative check
}
void test_calculator_with_no_superposition_fit(RMSDCalculatorType type, const char* final_coords_file,const char* rmsd_results_file){
print_test_tittle(__FUNCTION__);
cout<<"- Using "<<calculatorTypeToString(type)<<endl;
vector<double> superposed_fit_coordinates,
calculated_rmsds,
expected_rmsds;
vector<int> superposed_fit_coordinates_shape;
load_vector(expected_rmsds, rmsd_results_file);
load_and_center_pdb_coords(superposed_fit_coordinates,
superposed_fit_coordinates_shape,
final_coords_file);
calculated_rmsds.resize(superposed_fit_coordinates_shape[0],0);
RMSDCalculator* calculator = RMSDCalculatorFactory::createCalculator(
type,
superposed_fit_coordinates_shape[0],
superposed_fit_coordinates_shape[1],
TOPOINTER(superposed_fit_coordinates));
calculator->oneVsFollowing(0, TOPOINTER(calculated_rmsds));
// RMSDs must be the same
compareVectors("\tCalculated RMSDs coincide with golden: ",
&(expected_rmsds[1]),
TOPOINTER(calculated_rmsds),
superposed_fit_coordinates_shape[0]-1, 1e-14);
delete calculator;
}
void test_calculator_with_no_superposition_fit_and_calc(RMSDCalculatorType type,
const char* final_prot_coords_file,
const char* final_lig_coords_file,
const char* rmsd_results_file){
print_test_tittle(__FUNCTION__);
cout<<"- Using "<<calculatorTypeToString(type)<<endl;
vector<double> superposed_fit_coordinates,
superposed_calc_coordinates,
calculated_rmsds,
expected_rmsds,
centers;;
vector<int> superposed_fit_coordinates_shape,
superposed_calc_coordinates_shape;
load_vector(expected_rmsds, rmsd_results_file);
load_and_center_pdb_coords(superposed_fit_coordinates,
superposed_fit_coordinates_shape,
final_prot_coords_file,
¢ers);
load_and_move_pdb_coords(superposed_calc_coordinates,
superposed_calc_coordinates_shape,
final_lig_coords_file,
TOPOINTER(centers));
calculated_rmsds.resize(superposed_fit_coordinates_shape[0],0);
RMSDCalculator* calculator = RMSDCalculatorFactory::createCalculator(
type,
superposed_fit_coordinates_shape[0],
superposed_fit_coordinates_shape[1],
TOPOINTER(superposed_fit_coordinates),
superposed_calc_coordinates_shape[1],
TOPOINTER(superposed_calc_coordinates));
calculator->oneVsFollowing(0, TOPOINTER(calculated_rmsds));
// print_vector<double>("expected RMSD: ", TOPOINTER(expected_rmsds), expected_rmsds.size(),12);
// print_vector<double>("calculated RMSD: ", TOPOINTER(calculated_rmsds), calculated_rmsds.size(),12);
// RMSDs must be the same
compareVectors("\tCalculated RMSDs coincide with golden: ",
&(expected_rmsds[1]),
TOPOINTER(calculated_rmsds),
superposed_fit_coordinates_shape[0]-1, 1e-13);
delete calculator;
}
|
/*
***********************************************************************************************************************
*
* Copyright (c) 2016-2020 Advanced Micro Devices, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************************************************************************/
#if PAL_BUILD_INTERFACE_LOGGER
#include "core/layers/interfaceLogger/interfaceLoggerCmdAllocator.h"
#include "core/layers/interfaceLogger/interfaceLoggerCmdBuffer.h"
#include "core/layers/interfaceLogger/interfaceLoggerDevice.h"
#include "core/layers/interfaceLogger/interfaceLoggerGpuEvent.h"
#include "core/layers/interfaceLogger/interfaceLoggerGpuMemory.h"
#include "core/layers/interfaceLogger/interfaceLoggerImage.h"
#include "core/layers/interfaceLogger/interfaceLoggerPlatform.h"
#include "palAutoBuffer.h"
using namespace Util;
namespace Pal
{
namespace InterfaceLogger
{
// =====================================================================================================================
CmdBuffer::CmdBuffer(
ICmdBuffer* pNextCmdBuffer,
const Device* pDevice,
uint32 objectId)
:
CmdBufferDecorator(pNextCmdBuffer, pDevice),
m_pPlatform(static_cast<Platform*>(pDevice->GetPlatform())),
m_objectId(objectId)
{
m_funcTable.pfnCmdSetUserData[static_cast<uint32>(PipelineBindPoint::Compute)] = CmdSetUserDataCs;
m_funcTable.pfnCmdSetUserData[static_cast<uint32>(PipelineBindPoint::Graphics)] = CmdSetUserDataGfx;
m_funcTable.pfnCmdDraw = CmdDraw;
m_funcTable.pfnCmdDrawOpaque = CmdDrawOpaque;
m_funcTable.pfnCmdDrawIndexed = CmdDrawIndexed;
m_funcTable.pfnCmdDrawIndirectMulti = CmdDrawIndirectMulti;
m_funcTable.pfnCmdDrawIndexedIndirectMulti = CmdDrawIndexedIndirectMulti;
m_funcTable.pfnCmdDispatch = CmdDispatch;
m_funcTable.pfnCmdDispatchIndirect = CmdDispatchIndirect;
m_funcTable.pfnCmdDispatchOffset = CmdDispatchOffset;
}
// =====================================================================================================================
Result CmdBuffer::Begin(
const CmdBufferBuildInfo& info)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferBegin;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
const Result result = m_pNextLayer->Begin(NextCmdBufferBuildInfo(info));
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("info", info);
pLogContext->EndInput();
pLogContext->BeginOutput();
pLogContext->KeyAndEnum("result", result);
pLogContext->EndOutput();
m_pPlatform->LogEndFunc(pLogContext);
}
return result;
}
// =====================================================================================================================
Result CmdBuffer::End()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferEnd;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
const Result result = m_pNextLayer->End();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginOutput();
pLogContext->KeyAndEnum("result", result);
pLogContext->EndOutput();
m_pPlatform->LogEndFunc(pLogContext);
}
return result;
}
// =====================================================================================================================
Result CmdBuffer::Reset(
ICmdAllocator* pCmdAllocator,
bool returnGpuMemory)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferReset;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
const Result result = m_pNextLayer->Reset(NextCmdAllocator(pCmdAllocator), returnGpuMemory);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("pCmdAllocator", pCmdAllocator);
pLogContext->KeyAndValue("returnGpuMemory", returnGpuMemory);
pLogContext->EndInput();
pLogContext->BeginOutput();
pLogContext->KeyAndEnum("result", result);
pLogContext->EndOutput();
m_pPlatform->LogEndFunc(pLogContext);
}
return result;
}
// =====================================================================================================================
uint32 CmdBuffer::GetEmbeddedDataLimit() const
{
// This function is not logged because it doesn't modify the command buffer.
return m_pNextLayer->GetEmbeddedDataLimit();
}
// =====================================================================================================================
void CmdBuffer::CmdBindPipeline(
const PipelineBindParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdBindPipeline;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdBindPipeline(NextPipelineBindParams(params));
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdBindMsaaState(
const IMsaaState* pMsaaState)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdBindMsaaState;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdBindMsaaState(NextMsaaState(pMsaaState));
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("msaaState", pMsaaState);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdBindColorBlendState(
const IColorBlendState* pColorBlendState)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdBindColorBlendState;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdBindColorBlendState(NextColorBlendState(pColorBlendState));
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("colorBlendState", pColorBlendState);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdBindDepthStencilState(
const IDepthStencilState* pDepthStencilState)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdBindDepthStencilState;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdBindDepthStencilState(NextDepthStencilState(pDepthStencilState));
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("depthStencilState", pDepthStencilState);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetDepthBounds(
const DepthBoundsParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetDepthBounds;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetDepthBounds(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetVertexBuffers(
uint32 firstBuffer,
uint32 bufferCount,
const BufferViewInfo* pBuffers)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetVertexBuffers;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetVertexBuffers(firstBuffer, bufferCount, pBuffers);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("firstBuffer", firstBuffer);
pLogContext->KeyAndValue("bufferCount", bufferCount);
pLogContext->KeyAndBeginList("buffers", false);
for (uint32 idx = 0; idx < bufferCount; ++idx)
{
pLogContext->Struct(pBuffers[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdBindIndexData(
gpusize gpuAddr,
uint32 indexCount,
IndexType indexType)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdBindIndexData;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdBindIndexData(gpuAddr, indexCount, indexType);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("gpuAddr", gpuAddr);
pLogContext->KeyAndValue("indexCount", indexCount);
pLogContext->KeyAndEnum("indexType", indexType);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdBindTargets(
const BindTargetParams& params)
{
BindTargetParams nextParams = params;
for (uint32 i = 0; i < params.colorTargetCount; i++)
{
nextParams.colorTargets[i].pColorTargetView = NextColorTargetView(params.colorTargets[i].pColorTargetView);
}
nextParams.depthTarget.pDepthStencilView = NextDepthStencilView(params.depthTarget.pDepthStencilView);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdBindTargets;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdBindTargets(nextParams);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdBindStreamOutTargets(
const BindStreamOutTargetParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdBindStreamOutTargets;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdBindStreamOutTargets(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetBlendConst(
const BlendConstParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetBlendConst;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetBlendConst(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetInputAssemblyState(
const InputAssemblyStateParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetInputAssemblyState;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetInputAssemblyState(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetTriangleRasterState(
const TriangleRasterStateParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetTriangleRasterState;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetTriangleRasterState(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetPointLineRasterState(
const PointLineRasterStateParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetPointLineRasterState;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetPointLineRasterState(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetLineStippleState(
const LineStippleStateParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetLineStippleState;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetLineStippleState(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetDepthBiasState(
const DepthBiasParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetDepthBiasState;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetDepthBiasState(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetStencilRefMasks(
const StencilRefMaskParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetStencilRefMasks;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetStencilRefMasks(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetUserClipPlanes(
uint32 firstPlane,
uint32 planeCount,
const UserClipPlane* pPlanes)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetUserClipPlanes;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetUserClipPlanes(firstPlane, planeCount, pPlanes);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("firstPlane", firstPlane);
pLogContext->KeyAndBeginList("planes", false);
for (uint32 idx = 0; idx < planeCount; ++idx)
{
pLogContext->Struct(pPlanes[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetClipRects(
uint16 clipRule,
uint32 rectCount,
const Rect* pRectList)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetClipRects;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetClipRects(clipRule, rectCount, pRectList);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("clipRule", clipRule);
pLogContext->KeyAndValue("rectCount", rectCount);
pLogContext->KeyAndBeginList("Rectangles", false);
for (uint32 idx = 0; idx < rectCount; ++idx)
{
pLogContext->Struct(pRectList[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetMsaaQuadSamplePattern(
uint32 numSamplesPerPixel,
const MsaaQuadSamplePattern& quadSamplePattern)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetMsaaQuadSamplePattern;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetMsaaQuadSamplePattern(numSamplesPerPixel, quadSamplePattern);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("numSamplesPerPixel", numSamplesPerPixel);
pLogContext->KeyAndStruct("quadSamplePattern", quadSamplePattern);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetViewports(
const ViewportParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetViewports;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetViewports(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetScissorRects(
const ScissorRectParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetScissorRects;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetScissorRects(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetGlobalScissor(
const GlobalScissorParams& params)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetGlobalScissor;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetGlobalScissor(params);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("params", params);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdBarrier(
const BarrierInfo& barrierInfo)
{
AutoBuffer<const IGpuEvent*, 16, Platform> gpuEvents(barrierInfo.gpuEventWaitCount, m_pPlatform);
AutoBuffer<const IImage*, 16, Platform> targets(barrierInfo.rangeCheckedTargetWaitCount, m_pPlatform);
AutoBuffer<BarrierTransition, 32, Platform> transitions(barrierInfo.transitionCount, m_pPlatform);
if ((gpuEvents.Capacity() < barrierInfo.gpuEventWaitCount) ||
(targets.Capacity() < barrierInfo.rangeCheckedTargetWaitCount) ||
(transitions.Capacity() < barrierInfo.transitionCount))
{
// If the layers become production code, we must set a flag here and return out of memory on End().
PAL_ASSERT_ALWAYS();
}
else
{
BarrierInfo nextBarrierInfo = barrierInfo;
for (uint32 i = 0; i < barrierInfo.gpuEventWaitCount; i++)
{
gpuEvents[i] = NextGpuEvent(barrierInfo.ppGpuEvents[i]);
}
nextBarrierInfo.ppGpuEvents = &gpuEvents[0];
for (uint32 i = 0; i < barrierInfo.rangeCheckedTargetWaitCount; i++)
{
targets[i] = NextImage(barrierInfo.ppTargets[i]);
}
nextBarrierInfo.ppTargets = &targets[0];
for (uint32 i = 0; i < barrierInfo.transitionCount; i++)
{
transitions[i] = barrierInfo.pTransitions[i];
transitions[i].imageInfo.pImage = NextImage(barrierInfo.pTransitions[i].imageInfo.pImage);
}
nextBarrierInfo.pTransitions = &transitions[0];
nextBarrierInfo.pSplitBarrierGpuEvent = NextGpuEvent(barrierInfo.pSplitBarrierGpuEvent);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdBarrier;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdBarrier(nextBarrierInfo);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("barrierInfo", barrierInfo);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
}
// =====================================================================================================================
void CmdBuffer::CmdRelease(
const AcquireReleaseInfo& releaseInfo,
const IGpuEvent* pGpuEvent)
{
AutoBuffer<MemBarrier, 32, Platform> memoryBarriers(releaseInfo.memoryBarrierCount, m_pPlatform);
AutoBuffer<ImgBarrier, 32, Platform> imageBarriers(releaseInfo.imageBarrierCount, m_pPlatform);
if ((memoryBarriers.Capacity() < releaseInfo.memoryBarrierCount) ||
(imageBarriers.Capacity() < releaseInfo.imageBarrierCount))
{
// If the layers become production code, we must set a flag here and return out of memory on End().
PAL_ASSERT_ALWAYS();
}
else
{
AcquireReleaseInfo nextReleaseInfo = releaseInfo;
for (uint32 i = 0; i < releaseInfo.memoryBarrierCount; i++)
{
memoryBarriers[i] = releaseInfo.pMemoryBarriers[i];
memoryBarriers[i].memory.pGpuMemory = NextGpuMemory(releaseInfo.pMemoryBarriers[i].memory.pGpuMemory);
}
nextReleaseInfo.pMemoryBarriers = &memoryBarriers[0];
for (uint32 i = 0; i < releaseInfo.imageBarrierCount; i++)
{
imageBarriers[i] = releaseInfo.pImageBarriers[i];
imageBarriers[i].pImage = NextImage(releaseInfo.pImageBarriers[i].pImage);
}
nextReleaseInfo.pImageBarriers = &imageBarriers[0];
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdRelease;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdRelease(nextReleaseInfo, NextGpuEvent(pGpuEvent));
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("releaseInfo", releaseInfo);
pLogContext->KeyAndObject("gpuEvent", pGpuEvent);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
}
// =====================================================================================================================
void CmdBuffer::CmdAcquire(
const AcquireReleaseInfo& acquireInfo,
uint32 gpuEventCount,
const IGpuEvent*const* ppGpuEvents)
{
AutoBuffer<MemBarrier, 32, Platform> memoryBarriers(acquireInfo.memoryBarrierCount, m_pPlatform);
AutoBuffer<ImgBarrier, 32, Platform> imageBarriers(acquireInfo.imageBarrierCount, m_pPlatform);
AutoBuffer<IGpuEvent*, 16, Platform> nextGpuEvents(gpuEventCount, m_pPlatform);
if ((memoryBarriers.Capacity() < acquireInfo.memoryBarrierCount) ||
(imageBarriers.Capacity() < acquireInfo.imageBarrierCount) ||
(nextGpuEvents.Capacity() < gpuEventCount))
{
// If the layers become production code, we must set a flag here and return out of memory on End().
PAL_ASSERT_ALWAYS();
}
else
{
AcquireReleaseInfo nextAcquireInfo = acquireInfo;
for (uint32 i = 0; i < acquireInfo.memoryBarrierCount; i++)
{
memoryBarriers[i] = acquireInfo.pMemoryBarriers[i];
memoryBarriers[i].memory.pGpuMemory = NextGpuMemory(acquireInfo.pMemoryBarriers[i].memory.pGpuMemory);
}
nextAcquireInfo.pMemoryBarriers = &memoryBarriers[0];
for (uint32 i = 0; i < acquireInfo.imageBarrierCount; i++)
{
imageBarriers[i] = acquireInfo.pImageBarriers[i];
imageBarriers[i].pImage = NextImage(acquireInfo.pImageBarriers[i].pImage);
}
nextAcquireInfo.pImageBarriers = &imageBarriers[0];
for (uint32 i = 0; i < gpuEventCount; i++)
{
nextGpuEvents[i] = NextGpuEvent(ppGpuEvents[i]);
}
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdAcquire;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdAcquire(nextAcquireInfo, gpuEventCount, &nextGpuEvents[0]);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("acquireInfo", acquireInfo);
pLogContext->KeyAndBeginList("gpuEvents", false);
for (uint32 idx = 0; idx < gpuEventCount; ++idx)
{
pLogContext->Object(nextGpuEvents[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
}
// =====================================================================================================================
void CmdBuffer::CmdReleaseThenAcquire(
const AcquireReleaseInfo& barrierInfo)
{
AutoBuffer<MemBarrier, 32, Platform> memoryBarriers(barrierInfo.memoryBarrierCount, m_pPlatform);
AutoBuffer<ImgBarrier, 32, Platform> imageBarriers(barrierInfo.imageBarrierCount, m_pPlatform);
if ((memoryBarriers.Capacity() < barrierInfo.memoryBarrierCount) ||
(imageBarriers.Capacity() < barrierInfo.imageBarrierCount))
{
// If the layers become production code, we must set a flag here and return out of memory on End().
PAL_ASSERT_ALWAYS();
}
else
{
AcquireReleaseInfo nextBarrierInfo = barrierInfo;
for (uint32 i = 0; i < barrierInfo.memoryBarrierCount; i++)
{
memoryBarriers[i] = barrierInfo.pMemoryBarriers[i];
memoryBarriers[i].memory.pGpuMemory = NextGpuMemory(barrierInfo.pMemoryBarriers[i].memory.pGpuMemory);
}
nextBarrierInfo.pMemoryBarriers = &memoryBarriers[0];
for (uint32 i = 0; i < barrierInfo.imageBarrierCount; i++)
{
imageBarriers[i] = barrierInfo.pImageBarriers[i];
imageBarriers[i].pImage = NextImage(barrierInfo.pImageBarriers[i].pImage);
}
nextBarrierInfo.pImageBarriers = &imageBarriers[0];
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdReleaseThenAcquire;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdReleaseThenAcquire(nextBarrierInfo);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("barrierInfo", barrierInfo);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
}
// =====================================================================================================================
void CmdBuffer::CmdCopyMemory(
const IGpuMemory& srcGpuMemory,
const IGpuMemory& dstGpuMemory,
uint32 regionCount,
const MemoryCopyRegion* pRegions)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdCopyMemory;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdCopyMemory(*NextGpuMemory(&srcGpuMemory), *NextGpuMemory(&dstGpuMemory), regionCount, pRegions);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcGpuMemory", &srcGpuMemory);
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pRegions[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdCopyImage(
const IImage& srcImage,
ImageLayout srcImageLayout,
const IImage& dstImage,
ImageLayout dstImageLayout,
uint32 regionCount,
const ImageCopyRegion* pRegions,
const Rect* pScissorRect,
uint32 flags)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdCopyImage;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdCopyImage(*NextImage(&srcImage),
srcImageLayout,
*NextImage(&dstImage),
dstImageLayout,
regionCount,
pRegions,
pScissorRect,
flags);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcImage", &srcImage);
pLogContext->KeyAndStruct("srcImageLayout", srcImageLayout);
pLogContext->KeyAndObject("dstImage", &dstImage);
pLogContext->KeyAndStruct("dstImageLayout", dstImageLayout);
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pRegions[idx]);
}
pLogContext->EndList();
if (pScissorRect != nullptr)
{
pLogContext->KeyAndStruct("scissorRect", *pScissorRect);
}
pLogContext->KeyAndCopyControlFlags("flags", flags);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdCopyMemoryToImage(
const IGpuMemory& srcGpuMemory,
const IImage& dstImage,
ImageLayout dstImageLayout,
uint32 regionCount,
const MemoryImageCopyRegion* pRegions)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdCopyMemoryToImage;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdCopyMemoryToImage(*NextGpuMemory(&srcGpuMemory),
*NextImage(&dstImage),
dstImageLayout,
regionCount,
pRegions);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcGpuMemory", &srcGpuMemory);
pLogContext->KeyAndObject("dstImage", &dstImage);
pLogContext->KeyAndStruct("dstImageLayout", dstImageLayout);
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pRegions[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdCopyImageToMemory(
const IImage& srcImage,
ImageLayout srcImageLayout,
const IGpuMemory& dstGpuMemory,
uint32 regionCount,
const MemoryImageCopyRegion* pRegions)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdCopyImageToMemory;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdCopyImageToMemory(*NextImage(&srcImage),
srcImageLayout,
*NextGpuMemory(&dstGpuMemory),
regionCount,
pRegions);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcImage", &srcImage);
pLogContext->KeyAndStruct("srcImageLayout", srcImageLayout);
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pRegions[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdCopyMemoryToTiledImage(
const IGpuMemory& srcGpuMemory,
const IImage& dstImage,
ImageLayout dstImageLayout,
uint32 regionCount,
const MemoryTiledImageCopyRegion* pRegions)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdCopyMemoryToTiledImage;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdCopyMemoryToTiledImage(*NextGpuMemory(&srcGpuMemory),
*NextImage(&dstImage),
dstImageLayout,
regionCount,
pRegions);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcGpuMemory", &srcGpuMemory);
pLogContext->KeyAndObject("dstImage", &dstImage);
pLogContext->KeyAndStruct("dstImageLayout", dstImageLayout);
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pRegions[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdCopyTiledImageToMemory(
const IImage& srcImage,
ImageLayout srcImageLayout,
const IGpuMemory& dstGpuMemory,
uint32 regionCount,
const MemoryTiledImageCopyRegion* pRegions)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdCopyTiledImageToMemory;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdCopyTiledImageToMemory(*NextImage(&srcImage),
srcImageLayout,
*NextGpuMemory(&dstGpuMemory),
regionCount,
pRegions);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcImage", &srcImage);
pLogContext->KeyAndStruct("srcImageLayout", srcImageLayout);
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pRegions[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdCopyTypedBuffer(
const IGpuMemory& srcGpuMemory,
const IGpuMemory& dstGpuMemory,
uint32 regionCount,
const TypedBufferCopyRegion* pRegions)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdCopyTypedBuffer;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdCopyTypedBuffer(*NextGpuMemory(&srcGpuMemory),
*NextGpuMemory(&dstGpuMemory),
regionCount,
pRegions);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcGpuMemory", &srcGpuMemory);
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pRegions[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdCopyRegisterToMemory(
uint32 srcRegisterOffset,
const IGpuMemory& dstGpuMemory,
gpusize dstOffset)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdCopyRegisterToMemory;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdCopyRegisterToMemory(srcRegisterOffset, *NextGpuMemory(&dstGpuMemory), dstOffset);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("srcRegisterOffset", srcRegisterOffset);
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndValue("dstOffset", dstOffset);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdScaledCopyImage(
const ScaledCopyInfo& copyInfo)
{
ScaledCopyInfo nextCopyInfo = copyInfo;
nextCopyInfo.pSrcImage = NextImage(copyInfo.pSrcImage);
nextCopyInfo.pDstImage = NextImage(copyInfo.pDstImage);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdScaledCopyImage;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdScaledCopyImage(nextCopyInfo);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("copyInfo", copyInfo);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdGenerateMipmaps(
const GenMipmapsInfo& genInfo)
{
GenMipmapsInfo nextGenInfo = genInfo;
nextGenInfo.pImage = NextImage(genInfo.pImage);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdGenerateMipmaps;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdGenerateMipmaps(nextGenInfo);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("genInfo", genInfo);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdColorSpaceConversionCopy(
const IImage& srcImage,
ImageLayout srcImageLayout,
const IImage& dstImage,
ImageLayout dstImageLayout,
uint32 regionCount,
const ColorSpaceConversionRegion* pRegions,
TexFilter filter,
const ColorSpaceConversionTable& cscTable)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdColorSpaceConversionCopy;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdColorSpaceConversionCopy(*NextImage(&srcImage),
srcImageLayout,
*NextImage(&dstImage),
dstImageLayout,
regionCount,
pRegions,
filter,
cscTable);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcImage", &srcImage);
pLogContext->KeyAndStruct("srcImageLayout", srcImageLayout);
pLogContext->KeyAndObject("dstImage", &dstImage);
pLogContext->KeyAndStruct("dstImageLayout", dstImageLayout);
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pRegions[idx]);
}
pLogContext->EndList();
pLogContext->KeyAndStruct("filter", filter);
pLogContext->KeyAndStruct("cscTable", cscTable);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdCloneImageData(
const IImage& srcImage,
const IImage& dstImage)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdCloneImageData;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdCloneImageData(*NextImage(&srcImage), *NextImage(&dstImage));
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcImage", &srcImage);
pLogContext->KeyAndObject("dstImage", &dstImage);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdUpdateMemory(
const IGpuMemory& dstGpuMemory,
gpusize dstOffset,
gpusize dataSize,
const uint32* pData)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdUpdateMemory;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdUpdateMemory(*NextGpuMemory(&dstGpuMemory), dstOffset, dataSize, pData);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndValue("dstOffset", dstOffset);
pLogContext->KeyAndBeginList("data", false);
for (gpusize idx = 0; idx < NumBytesToNumDwords(static_cast<uint32>(dataSize)); ++idx)
{
pLogContext->Value(pData[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdUpdateBusAddressableMemoryMarker(
const IGpuMemory& dstGpuMemory,
gpusize offset,
uint32 value)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdUpdateBusAddressableMemoryMarker;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdUpdateBusAddressableMemoryMarker(*NextGpuMemory(&dstGpuMemory), offset, value);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndValue("offset", offset);
pLogContext->KeyAndValue("value", value);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdFillMemory(
const IGpuMemory& dstGpuMemory,
gpusize dstOffset,
gpusize fillSize,
uint32 data)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdFillMemory;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdFillMemory(*NextGpuMemory(&dstGpuMemory), dstOffset, fillSize, data);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndValue("dstOffset", dstOffset);
pLogContext->KeyAndValue("fillSize", fillSize);
pLogContext->KeyAndValue("data", data);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdClearColorBuffer(
const IGpuMemory& gpuMemory,
const ClearColor& color,
SwizzledFormat bufferFormat,
uint32 bufferOffset,
uint32 bufferExtent,
uint32 rangeCount,
const Range* pRanges)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdClearColorBuffer;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdClearColorBuffer(*NextGpuMemory(&gpuMemory),
color,
bufferFormat,
bufferOffset,
bufferExtent,
rangeCount,
pRanges);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuMemory", &gpuMemory);
pLogContext->KeyAndStruct("color", color);
pLogContext->KeyAndStruct("bufferFormat", bufferFormat);
pLogContext->KeyAndValue("bufferOffset", bufferOffset);
pLogContext->KeyAndValue("bufferExtent", bufferExtent);
pLogContext->KeyAndBeginList("ranges", false);
for (uint32 idx = 0; idx < rangeCount; ++idx)
{
pLogContext->Struct(pRanges[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdClearBoundColorTargets(
uint32 colorTargetCount,
const BoundColorTarget* pBoundColorTargets,
uint32 regionCount,
const ClearBoundTargetRegion* pClearRegions)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdClearBoundColorTargets;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdClearBoundColorTargets(colorTargetCount, pBoundColorTargets, regionCount, pClearRegions);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndBeginList("boundColorTargets", false);
for (uint32 idx = 0; idx < colorTargetCount; ++idx)
{
pLogContext->Struct(pBoundColorTargets[idx]);
}
pLogContext->EndList();
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pClearRegions[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdClearColorImage(
const IImage& image,
ImageLayout imageLayout,
const ClearColor& color,
uint32 rangeCount,
const SubresRange* pRanges,
uint32 boxCount,
const Box* pBoxes,
uint32 flags)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdClearColorImage;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdClearColorImage(*NextImage(&image),
imageLayout,
color,
rangeCount,
pRanges,
boxCount,
pBoxes,
flags);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("image", &image);
pLogContext->KeyAndStruct("imageLayout", imageLayout);
pLogContext->KeyAndStruct("color", color);
pLogContext->KeyAndBeginList("ranges", false);
for (uint32 idx = 0; idx < rangeCount; ++idx)
{
pLogContext->Struct(pRanges[idx]);
}
pLogContext->EndList();
pLogContext->KeyAndBeginList("boxes", false);
for (uint32 idx = 0; idx < boxCount; ++idx)
{
pLogContext->Struct(pBoxes[idx]);
}
pLogContext->EndList();
pLogContext->KeyAndClearColorImageFlags("flags", flags);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdClearBoundDepthStencilTargets(
float depth,
uint8 stencil,
uint8 stencilWriteMask,
uint32 samples,
uint32 fragments,
DepthStencilSelectFlags flag,
uint32 regionCount,
const ClearBoundTargetRegion* pClearRegions)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdClearBoundDepthStencilTargets;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdClearBoundDepthStencilTargets(depth,
stencil,
stencilWriteMask,
samples,
fragments,
flag,
regionCount,
pClearRegions);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("depth", depth);
pLogContext->KeyAndValue("stencil", stencil);
pLogContext->KeyAndValue("stencilWriteMask", stencilWriteMask);
pLogContext->KeyAndValue("samples", samples);
pLogContext->KeyAndValue("fragments", fragments);
pLogContext->KeyAndStruct("flags", flag);
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pClearRegions[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdClearDepthStencil(
const IImage& image,
ImageLayout depthLayout,
ImageLayout stencilLayout,
float depth,
uint8 stencil,
uint8 stencilWriteMask,
uint32 rangeCount,
const SubresRange* pRanges,
uint32 rectCount,
const Rect* pRects,
uint32 flags)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdClearDepthStencil;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdClearDepthStencil(*NextImage(&image),
depthLayout,
stencilLayout,
depth,
stencil,
stencilWriteMask,
rangeCount,
pRanges,
rectCount,
pRects,
flags);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("image", &image);
pLogContext->KeyAndStruct("depthLayout", depthLayout);
pLogContext->KeyAndStruct("stencilLayout", stencilLayout);
pLogContext->KeyAndValue("depth", depth);
pLogContext->KeyAndValue("stencil", stencil);
pLogContext->KeyAndValue("stencilWriteMask", stencilWriteMask);
pLogContext->KeyAndBeginList("ranges", false);
for (uint32 idx = 0; idx < rangeCount; ++idx)
{
pLogContext->Struct(pRanges[idx]);
}
pLogContext->EndList();
pLogContext->KeyAndBeginList("rects", false);
for (uint32 idx = 0; idx < rectCount; ++idx)
{
pLogContext->Struct(pRects[idx]);
}
pLogContext->EndList();
pLogContext->KeyAndClearDepthStencilFlags("flags", flags);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdClearBufferView(
const IGpuMemory& gpuMemory,
const ClearColor& color,
const void* pBufferViewSrd,
uint32 rangeCount,
const Range* pRanges)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdClearBufferView;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdClearBufferView(*NextGpuMemory(&gpuMemory), color, pBufferViewSrd, rangeCount, pRanges);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuMemory", &gpuMemory);
pLogContext->KeyAndStruct("color", color);
pLogContext->KeyAndBeginList("ranges", false);
for (uint32 idx = 0; idx < rangeCount; ++idx)
{
pLogContext->Struct(pRanges[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdClearImageView(
const IImage& image,
ImageLayout imageLayout,
const ClearColor& color,
const void* pImageViewSrd,
uint32 rectCount,
const Rect* pRects)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdClearImageView;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdClearImageView(*NextImage(&image), imageLayout, color, pImageViewSrd, rectCount, pRects);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("image", &image);
pLogContext->KeyAndStruct("imageLayout", imageLayout);
pLogContext->KeyAndStruct("color", color);
pLogContext->KeyAndBeginList("rects", false);
for (uint32 idx = 0; idx < rectCount; ++idx)
{
pLogContext->Struct(pRects[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdResolveImage(
const IImage& srcImage,
ImageLayout srcImageLayout,
const IImage& dstImage,
ImageLayout dstImageLayout,
ResolveMode resolveMode,
uint32 regionCount,
const ImageResolveRegion* pRegions,
uint32 flags)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdResolveImage;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdResolveImage(*NextImage(&srcImage),
srcImageLayout,
*NextImage(&dstImage),
dstImageLayout,
resolveMode,
regionCount,
pRegions,
flags);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcImage", &srcImage);
pLogContext->KeyAndStruct("srcImageLayout", srcImageLayout);
pLogContext->KeyAndObject("dstImage", &dstImage);
pLogContext->KeyAndStruct("dstImageLayout", dstImageLayout);
pLogContext->KeyAndEnum("resolveMode", resolveMode);
pLogContext->KeyAndBeginList("regions", false);
for (uint32 idx = 0; idx < regionCount; ++idx)
{
pLogContext->Struct(pRegions[idx]);
}
pLogContext->EndList();
pLogContext->KeyAndResolveImageFlags("flags", flags);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetEvent(
const IGpuEvent& gpuEvent,
HwPipePoint setPoint)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetEvent;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetEvent(*NextGpuEvent(&gpuEvent), setPoint);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuEvent", &gpuEvent);
pLogContext->KeyAndEnum("setPoint", setPoint);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdResetEvent(
const IGpuEvent& gpuEvent,
HwPipePoint resetPoint)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdResetEvent;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdResetEvent(*NextGpuEvent(&gpuEvent), resetPoint);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuEvent", &gpuEvent);
pLogContext->KeyAndEnum("resetPoint", resetPoint);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdPredicateEvent(
const IGpuEvent& gpuEvent)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdPredicateEvent;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdPredicateEvent(*NextGpuEvent(&gpuEvent));
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuEvent", &gpuEvent);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdMemoryAtomic(
const IGpuMemory& dstGpuMemory,
gpusize dstOffset,
uint64 srcData,
AtomicOp atomicOp)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdMemoryAtomic;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdMemoryAtomic(*NextGpuMemory(&dstGpuMemory), dstOffset, srcData, atomicOp);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndValue("dstOffset", dstOffset);
pLogContext->KeyAndValue("srcData", srcData);
pLogContext->KeyAndEnum("atomicOp", atomicOp);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdBeginQuery(
const IQueryPool& queryPool,
QueryType queryType,
uint32 slot,
QueryControlFlags flags)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdBeginQuery;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdBeginQuery(*NextQueryPool(&queryPool), queryType, slot, flags);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("queryPool", &queryPool);
pLogContext->KeyAndEnum("queryType", queryType);
pLogContext->KeyAndValue("slot", slot);
pLogContext->KeyAndStruct("flags", flags);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdEndQuery(
const IQueryPool& queryPool,
QueryType queryType,
uint32 slot)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdEndQuery;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdEndQuery(*NextQueryPool(&queryPool), queryType, slot);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("queryPool", &queryPool);
pLogContext->KeyAndEnum("queryType", queryType);
pLogContext->KeyAndValue("slot", slot);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdResolveQuery(
const IQueryPool& queryPool,
QueryResultFlags flags,
QueryType queryType,
uint32 startQuery,
uint32 queryCount,
const IGpuMemory& dstGpuMemory,
gpusize dstOffset,
gpusize dstStride)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdResolveQuery;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdResolveQuery(*NextQueryPool(&queryPool),
flags,
queryType,
startQuery,
queryCount,
*NextGpuMemory(&dstGpuMemory),
dstOffset,
dstStride);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("queryPool", &queryPool);
pLogContext->KeyAndQueryResultFlags("flags", flags);
pLogContext->KeyAndEnum("queryType", queryType);
pLogContext->KeyAndValue("startQuery", startQuery);
pLogContext->KeyAndValue("queryCount", queryCount);
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndValue("dstOffset", dstOffset);
pLogContext->KeyAndValue("dstStride", dstStride);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdResetQueryPool(
const IQueryPool& queryPool,
uint32 startQuery,
uint32 queryCount)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdResetQueryPool;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdResetQueryPool(*NextQueryPool(&queryPool), startQuery, queryCount);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("queryPool", &queryPool);
pLogContext->KeyAndValue("startQuery", startQuery);
pLogContext->KeyAndValue("queryCount", queryCount);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdWriteTimestamp(
HwPipePoint pipePoint,
const IGpuMemory& dstGpuMemory,
gpusize dstOffset)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdWriteTimestamp;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdWriteTimestamp(pipePoint, *NextGpuMemory(&dstGpuMemory), dstOffset);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndEnum("pipePoint", pipePoint);
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndValue("dstOffset", dstOffset);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdWriteImmediate(
HwPipePoint pipePoint,
uint64 data,
ImmediateDataWidth dataSize,
const gpusize address)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdWriteImmediate;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdWriteImmediate(pipePoint, data, dataSize, address);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndEnum("pipePoint", pipePoint);
pLogContext->KeyAndValue("data", data);
pLogContext->KeyAndEnum("dataSize", dataSize);
pLogContext->KeyAndValue("address", address);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdLoadBufferFilledSizes(
const gpusize (&gpuVirtAddr)[MaxStreamOutTargets])
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdLoadBufferFilledSizes;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdLoadBufferFilledSizes(gpuVirtAddr);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndBeginList("gpuVirtAddr", false);
for (uint32 idx = 0; idx < MaxStreamOutTargets; ++idx)
{
pLogContext->Value(gpuVirtAddr[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSaveBufferFilledSizes(
const gpusize (&gpuVirtAddr)[MaxStreamOutTargets])
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSaveBufferFilledSizes;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSaveBufferFilledSizes(gpuVirtAddr);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndBeginList("gpuVirtAddr", false);
for (uint32 idx = 0; idx < MaxStreamOutTargets; ++idx)
{
pLogContext->Value(gpuVirtAddr[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetBufferFilledSize(
uint32 bufferId,
uint32 offset)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetBufferFilledSize;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetBufferFilledSize(bufferId, offset);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("bufferId", bufferId);
pLogContext->KeyAndValue("offset", offset);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdBindBorderColorPalette(
PipelineBindPoint pipelineBindPoint,
const IBorderColorPalette* pPalette)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdBindBorderColorPalette;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdBindBorderColorPalette(pipelineBindPoint, NextBorderColorPalette(pPalette));
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndEnum("pipelineBindPoint", pipelineBindPoint);
pLogContext->KeyAndObject("palette", pPalette);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetPredication(
IQueryPool* pQueryPool,
uint32 slot,
const IGpuMemory* pGpuMemory,
gpusize offset,
PredicateType predType,
bool predPolarity,
bool waitResults,
bool accumulateData)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetPredication;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetPredication(NextQueryPool(pQueryPool),
slot,
pGpuMemory,
offset,
predType,
predPolarity,
waitResults,
accumulateData);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("queryPool", pQueryPool);
pLogContext->KeyAndValue("slot", slot);
pLogContext->KeyAndObject("GpuMemory", pGpuMemory);
pLogContext->KeyAndValue("offset", offset);
pLogContext->KeyAndEnum("predType", predType);
pLogContext->KeyAndValue("predPolarity", predPolarity);
pLogContext->KeyAndValue("waitResults", waitResults);
pLogContext->KeyAndValue("accumulateData", accumulateData);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSuspendPredication(
bool suspend)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSuspendPredication;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSuspendPredication(suspend);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("suspend", suspend);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdIf(
const IGpuMemory& gpuMemory,
gpusize offset,
uint64 data,
uint64 mask,
CompareFunc compareFunc)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdIf;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdIf(*NextGpuMemory(&gpuMemory), offset, data, mask, compareFunc);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuMemory", &gpuMemory);
pLogContext->KeyAndValue("offset", offset);
pLogContext->KeyAndValue("data", data);
pLogContext->KeyAndValue("mask", mask);
pLogContext->KeyAndEnum("compareFunc", compareFunc);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdElse()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdElse;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdElse();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdEndIf()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdEndIf;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdEndIf();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdWhile(
const IGpuMemory& gpuMemory,
gpusize offset,
uint64 data,
uint64 mask,
CompareFunc compareFunc)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdWhile;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdWhile(*NextGpuMemory(&gpuMemory), offset, data, mask, compareFunc);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuMemory", &gpuMemory);
pLogContext->KeyAndValue("offset", offset);
pLogContext->KeyAndValue("data", data);
pLogContext->KeyAndValue("mask", mask);
pLogContext->KeyAndEnum("compareFunc", compareFunc);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdEndWhile()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdEndWhile;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdEndWhile();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdWaitRegisterValue(
uint32 registerOffset,
uint32 data,
uint32 mask,
CompareFunc compareFunc)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdWaitRegisterValue;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdWaitRegisterValue(registerOffset, data, mask, compareFunc);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("registerOffset", registerOffset);
pLogContext->KeyAndValue("data", data);
pLogContext->KeyAndValue("mask", mask);
pLogContext->KeyAndEnum("compareFunc", compareFunc);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdWaitMemoryValue(
const IGpuMemory& gpuMemory,
gpusize offset,
uint32 data,
uint32 mask,
CompareFunc compareFunc)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdWaitMemoryValue;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdWaitMemoryValue(*NextGpuMemory(&gpuMemory), offset, data, mask, compareFunc);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuMemory", &gpuMemory);
pLogContext->KeyAndValue("offset", offset);
pLogContext->KeyAndValue("data", data);
pLogContext->KeyAndValue("mask", mask);
pLogContext->KeyAndEnum("compareFunc", compareFunc);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdWaitBusAddressableMemoryMarker(
const IGpuMemory& gpuMemory,
uint32 data,
uint32 mask,
CompareFunc compareFunc)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdWaitBusAddressableMemoryMarker;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdWaitBusAddressableMemoryMarker(*NextGpuMemory(&gpuMemory), data, mask, compareFunc);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuMemory", &gpuMemory);
pLogContext->KeyAndValue("data", data);
pLogContext->KeyAndValue("mask", mask);
pLogContext->KeyAndEnum("compareFunc", compareFunc);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdUpdateHiSPretests(
const IImage* pImage,
const HiSPretests& pretests,
uint32 firstMip,
uint32 numMips)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdUpdateHiSPretests;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdUpdateHiSPretests(NextImage(pImage), pretests, firstMip, numMips);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("image", pImage);
pLogContext->KeyAndStruct("pretests", pretests);
pLogContext->KeyAndValue("firstMip", firstMip);
pLogContext->KeyAndValue("numMips", numMips);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdFlglSync()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdFlglSync;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdFlglSync();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdFlglEnable()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdFlglEnable;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdFlglEnable();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdFlglDisable()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdFlglDisable;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdFlglDisable();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdBeginPerfExperiment(
IPerfExperiment* pPerfExperiment)
{
// This function is not logged because it should only be called by other debug tools.
m_pNextLayer->CmdBeginPerfExperiment(NextPerfExperiment(pPerfExperiment));
}
// =====================================================================================================================
void CmdBuffer::CmdUpdatePerfExperimentSqttTokenMask(
IPerfExperiment* pPerfExperiment,
const ThreadTraceTokenConfig& sqttTokenConfig)
{
// This function is not logged because it should only be called by other debug tools.
GetNextLayer()->CmdUpdatePerfExperimentSqttTokenMask(NextPerfExperiment(pPerfExperiment), sqttTokenConfig);
}
// =====================================================================================================================
void CmdBuffer::CmdUpdateSqttTokenMask(
const ThreadTraceTokenConfig& sqttTokenConfig)
{
// This function is not logged because it should only be called by other debug tools.
GetNextLayer()->CmdUpdateSqttTokenMask(sqttTokenConfig);
}
// =====================================================================================================================
void CmdBuffer::CmdEndPerfExperiment(
IPerfExperiment* pPerfExperiment)
{
// This function is not logged because it should only be called by other debug tools.
m_pNextLayer->CmdEndPerfExperiment(NextPerfExperiment(pPerfExperiment));
}
// =====================================================================================================================
void CmdBuffer::CmdInsertTraceMarker(
PerfTraceMarkerType markerType,
uint32 markerData)
{
// This function is not logged because it should only be called by other debug tools.
m_pNextLayer->CmdInsertTraceMarker(markerType, markerData);
}
// =====================================================================================================================
void CmdBuffer::CmdInsertRgpTraceMarker(
uint32 numDwords,
const void* pData)
{
// This function is not logged because it should only be called by other debug tools.
m_pNextLayer->CmdInsertRgpTraceMarker(numDwords, pData);
}
// =====================================================================================================================
void CmdBuffer::CmdLoadCeRam(
const IGpuMemory& srcGpuMemory,
gpusize memOffset,
uint32 ramOffset,
uint32 dwordSize)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdLoadCeRam;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdLoadCeRam(*NextGpuMemory(&srcGpuMemory), memOffset, ramOffset, dwordSize);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcGpuMemory", &srcGpuMemory);
pLogContext->KeyAndValue("memOffset", memOffset);
pLogContext->KeyAndValue("ramOffset", ramOffset);
pLogContext->KeyAndValue("dwordSize", dwordSize);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdDumpCeRam(
const IGpuMemory& dstGpuMemory,
gpusize memOffset,
uint32 ramOffset,
uint32 dwordSize,
uint32 currRingPos,
uint32 ringSize)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdDumpCeRam;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdDumpCeRam(*NextGpuMemory(&dstGpuMemory), memOffset, ramOffset, dwordSize, currRingPos, ringSize);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("dstGpuMemory", &dstGpuMemory);
pLogContext->KeyAndValue("memOffset", memOffset);
pLogContext->KeyAndValue("ramOffset", ramOffset);
pLogContext->KeyAndValue("dwordSize", dwordSize);
pLogContext->KeyAndValue("currRingPos", currRingPos);
pLogContext->KeyAndValue("ringSize", ringSize);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdWriteCeRam(
const void* pSrcData,
uint32 ramOffset,
uint32 dwordSize)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdWriteCeRam;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdWriteCeRam(pSrcData, ramOffset, dwordSize);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("ramOffset", ramOffset);
pLogContext->KeyAndBeginList("srcData", false);
const uint32*const pSrcDwords = static_cast<const uint32*>(pSrcData);
for (uint32 idx = 0; idx < dwordSize; ++idx)
{
pLogContext->Value(pSrcDwords[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
uint32* CmdBuffer::CmdAllocateEmbeddedData(
uint32 sizeInDwords,
uint32 alignmentInDwords,
gpusize* pGpuAddress)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdAllocateEmbeddedData;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
uint32*const pCpuAddr = m_pNextLayer->CmdAllocateEmbeddedData(sizeInDwords, alignmentInDwords, pGpuAddress);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("sizeInDwords", sizeInDwords);
pLogContext->KeyAndValue("alignmentInDwords", alignmentInDwords);
pLogContext->EndInput();
pLogContext->BeginOutput();
pLogContext->KeyAndValue("gpuAddress", *pGpuAddress);
pLogContext->EndOutput();
m_pPlatform->LogEndFunc(pLogContext);
}
return pCpuAddr;
}
// =====================================================================================================================
Result CmdBuffer::AllocateAndBindGpuMemToEvent(
IGpuEvent* pGpuEvent)
{
// This function is not logged because it doesn't modify the command buffer.
return GetNextLayer()->AllocateAndBindGpuMemToEvent(NextGpuEvent(pGpuEvent));
}
// =====================================================================================================================
void CmdBuffer::CmdExecuteNestedCmdBuffers(
uint32 cmdBufferCount,
ICmdBuffer*const* ppCmdBuffers)
{
AutoBuffer<ICmdBuffer*, 16, Platform> nextCmdBuffers(cmdBufferCount, m_pPlatform);
if (nextCmdBuffers.Capacity() < cmdBufferCount)
{
// If the layers become production code, we must set a flag here and return out of memory on End().
PAL_ASSERT_ALWAYS();
}
else
{
for (uint32 i = 0; i < cmdBufferCount; ++i)
{
nextCmdBuffers[i] = NextCmdBuffer(ppCmdBuffers[i]);
}
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdExecuteNestedCmdBuffers;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdExecuteNestedCmdBuffers(cmdBufferCount, &nextCmdBuffers[0]);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndBeginList("cmdBuffers", false);
for (uint32 idx = 0; idx < cmdBufferCount; ++idx)
{
pLogContext->Object(ppCmdBuffers[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
}
// =====================================================================================================================
void CmdBuffer::CmdSaveComputeState(
uint32 stateFlags)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSaveComputeState;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSaveComputeState(stateFlags);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndComputeStateFlags("stateFlags", stateFlags);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdRestoreComputeState(
uint32 stateFlags)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdRestoreComputeState;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdRestoreComputeState(stateFlags);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndComputeStateFlags("stateFlags", stateFlags);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdExecuteIndirectCmds(
const IIndirectCmdGenerator& generator,
const IGpuMemory& gpuMemory,
gpusize offset,
uint32 maximumCount,
gpusize countGpuAddr)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdExecuteIndirectCmds;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdExecuteIndirectCmds(*NextIndirectCmdGenerator(&generator),
*NextGpuMemory(&gpuMemory),
offset,
maximumCount,
countGpuAddr);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("generator", &generator);
pLogContext->KeyAndObject("gpuMemory", &gpuMemory);
pLogContext->KeyAndValue("offset", offset);
pLogContext->KeyAndValue("maximumCount", maximumCount);
pLogContext->KeyAndValue("countGpuAddr", countGpuAddr);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdPostProcessFrame(
const CmdPostProcessFrameInfo& postProcessInfo,
bool* pAddedGpuWork)
{
CmdPostProcessFrameInfo nextPostProcessInfo = {};
bool addedGpuWork = false;
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdPostProcessFrame;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdPostProcessFrame(*NextCmdPostProcessFrameInfo(postProcessInfo, &nextPostProcessInfo),
&addedGpuWork);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndStruct("postProcessInfo", postProcessInfo);
pLogContext->EndInput();
pLogContext->BeginOutput();
pLogContext->KeyAndValue("addedGpuWork", addedGpuWork);
pLogContext->EndOutput();
m_pPlatform->LogEndFunc(pLogContext);
}
if (addedGpuWork && (pAddedGpuWork != nullptr))
{
*pAddedGpuWork = true;
}
}
// =====================================================================================================================
void CmdBuffer::CmdCommentString(
const char* pComment)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdCommentString;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdCommentString(pComment);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("comment", pComment);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdNop(
const void* pPayload,
uint32 payloadSize)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdNop;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdNop(pPayload, payloadSize);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("payload", pPayload);
pLogContext->KeyAndValue("payloadSize", payloadSize);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
uint32 CmdBuffer::CmdInsertExecutionMarker()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdInsertExecutionMarker;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
const uint32 executionMarker = m_pNextLayer->CmdInsertExecutionMarker();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
return executionMarker;
}
// =====================================================================================================================
void CmdBuffer::CmdStartGpuProfilerLogging()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdStartGpuProfilerLogging;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdStartGpuProfilerLogging();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdStopGpuProfilerLogging()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdStopGpuProfilerLogging;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdStopGpuProfilerLogging();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdXdmaWaitFlipPending()
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdXdmaWaitFlipPending;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdXdmaWaitFlipPending();
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::Destroy()
{
// Note that we can't time a Destroy call.
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferDestroy;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
funcInfo.postCallTime = funcInfo.preCallTime;
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
m_pPlatform->LogEndFunc(pLogContext);
}
ICmdBuffer*const pNextLayer = m_pNextLayer;
this->~CmdBuffer();
pNextLayer->Destroy();
}
// =====================================================================================================================
void PAL_STDCALL CmdBuffer::CmdSetUserDataCs(
ICmdBuffer* pCmdBuffer,
uint32 firstEntry,
uint32 entryCount,
const uint32* pEntryValues)
{
auto*const pThis = static_cast<CmdBuffer*>(pCmdBuffer);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetUserData;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdSetUserData(PipelineBindPoint::Compute, firstEntry, entryCount, pEntryValues);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("firstEntry", firstEntry);
pLogContext->KeyAndBeginList("values", false);
for (uint32 idx = 0; idx < entryCount; ++idx)
{
pLogContext->Value(pEntryValues[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void PAL_STDCALL CmdBuffer::CmdSetUserDataGfx(
ICmdBuffer* pCmdBuffer,
uint32 firstEntry,
uint32 entryCount,
const uint32* pEntryValues)
{
auto*const pThis = static_cast<CmdBuffer*>(pCmdBuffer);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetUserData;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdSetUserData(PipelineBindPoint::Graphics, firstEntry, entryCount, pEntryValues);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("firstEntry", firstEntry);
pLogContext->KeyAndBeginList("values", false);
for (uint32 idx = 0; idx < entryCount; ++idx)
{
pLogContext->Value(pEntryValues[idx]);
}
pLogContext->EndList();
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void PAL_STDCALL CmdBuffer::CmdDraw(
ICmdBuffer* pCmdBuffer,
uint32 firstVertex,
uint32 vertexCount,
uint32 firstInstance,
uint32 instanceCount,
uint32 drawId)
{
auto*const pThis = static_cast<CmdBuffer*>(pCmdBuffer);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdDraw;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdDraw(firstVertex, vertexCount, firstInstance, instanceCount, drawId);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("firstVertex", firstVertex);
pLogContext->KeyAndValue("vertexCount", vertexCount);
pLogContext->KeyAndValue("firstInstance", firstInstance);
pLogContext->KeyAndValue("instanceCount", instanceCount);
pLogContext->KeyAndValue("drawId", drawId);
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void PAL_STDCALL CmdBuffer::CmdDrawOpaque(
ICmdBuffer* pCmdBuffer,
gpusize streamOutFilledSizeVa,
uint32 streamOutOffset,
uint32 stride,
uint32 firstInstance,
uint32 instanceCount)
{
auto*const pThis = static_cast<CmdBuffer*>(pCmdBuffer);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdDrawOpaque;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdDrawOpaque(streamOutFilledSizeVa, streamOutOffset, stride, firstInstance, instanceCount);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("streamOutFilledSizeVa", streamOutFilledSizeVa);
pLogContext->KeyAndValue("streamOutOffset", streamOutOffset);
pLogContext->KeyAndValue("stride", stride);
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void PAL_STDCALL CmdBuffer::CmdDrawIndexed(
ICmdBuffer* pCmdBuffer,
uint32 firstIndex,
uint32 indexCount,
int32 vertexOffset,
uint32 firstInstance,
uint32 instanceCount,
uint32 drawId)
{
auto*const pThis = static_cast<CmdBuffer*>(pCmdBuffer);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdDrawIndexed;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdDrawIndexed(firstIndex, indexCount, vertexOffset, firstInstance, instanceCount, drawId);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("firstIndex", firstIndex);
pLogContext->KeyAndValue("indexCount", indexCount);
pLogContext->KeyAndValue("vertexOffset", vertexOffset);
pLogContext->KeyAndValue("firstInstance", firstInstance);
pLogContext->KeyAndValue("instanceCount", instanceCount);
pLogContext->KeyAndValue("drawId", drawId);
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void PAL_STDCALL CmdBuffer::CmdDrawIndirectMulti(
ICmdBuffer* pCmdBuffer,
const IGpuMemory& gpuMemory,
gpusize offset,
uint32 stride,
uint32 maximumCount,
gpusize countGpuAddr)
{
auto*const pThis = static_cast<CmdBuffer*>(pCmdBuffer);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdDrawIndirectMulti;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdDrawIndirectMulti(*NextGpuMemory(&gpuMemory), offset, stride, maximumCount, countGpuAddr);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuMemory", &gpuMemory);
pLogContext->KeyAndValue("offset", offset);
pLogContext->KeyAndValue("stride", stride);
pLogContext->KeyAndValue("maximumCount", maximumCount);
pLogContext->KeyAndValue("countGpuAddr", countGpuAddr);
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void PAL_STDCALL CmdBuffer::CmdDrawIndexedIndirectMulti(
ICmdBuffer* pCmdBuffer,
const IGpuMemory& gpuMemory,
gpusize offset,
uint32 stride,
uint32 maximumCount,
gpusize countGpuAddr)
{
auto*const pThis = static_cast<CmdBuffer*>(pCmdBuffer);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdDrawIndexedIndirectMulti;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdDrawIndexedIndirectMulti(*NextGpuMemory(&gpuMemory),
offset,
stride,
maximumCount,
countGpuAddr);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuMemory", &gpuMemory);
pLogContext->KeyAndValue("offset", offset);
pLogContext->KeyAndValue("stride", stride);
pLogContext->KeyAndValue("maximumCount", maximumCount);
pLogContext->KeyAndValue("countGpuAddr", countGpuAddr);
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void PAL_STDCALL CmdBuffer::CmdDispatch(
ICmdBuffer* pCmdBuffer,
uint32 x,
uint32 y,
uint32 z)
{
auto*const pThis = static_cast<CmdBuffer*>(pCmdBuffer);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdDispatch;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdDispatch(x, y, z);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("x", x);
pLogContext->KeyAndValue("y", y);
pLogContext->KeyAndValue("z", z);
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void PAL_STDCALL CmdBuffer::CmdDispatchIndirect(
ICmdBuffer* pCmdBuffer,
const IGpuMemory& gpuMemory,
gpusize offset)
{
auto*const pThis = static_cast<CmdBuffer*>(pCmdBuffer);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdDispatchIndirect;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdDispatchIndirect(*NextGpuMemory(&gpuMemory), offset);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("gpuMemory", &gpuMemory);
pLogContext->KeyAndValue("offset", offset);
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void PAL_STDCALL CmdBuffer::CmdDispatchOffset(
ICmdBuffer* pCmdBuffer,
uint32 xOffset,
uint32 yOffset,
uint32 zOffset,
uint32 xDim,
uint32 yDim,
uint32 zDim)
{
auto*const pThis = static_cast<CmdBuffer*>(pCmdBuffer);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdDispatchOffset;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdDispatchOffset(xOffset, yOffset, zOffset, xDim, yDim, zDim);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("xOffset", xOffset);
pLogContext->KeyAndValue("yOffset", yOffset);
pLogContext->KeyAndValue("zOffset", zOffset);
pLogContext->KeyAndValue("xDim", xDim);
pLogContext->KeyAndValue("yDim", yDim);
pLogContext->KeyAndValue("zDim", zDim);
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdCopyImageToPackedPixelImage(
const Pal::IImage& srcImage,
const Pal::IImage& dstImage,
uint32 regionCount,
const Pal::ImageCopyRegion* pRegions,
Pal::PackedPixelType packPixelType)
{
auto*const pThis = static_cast<CmdBuffer*>(this);
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCopyImageToPackedPixelImage;
funcInfo.objectId = pThis->m_objectId;
funcInfo.preCallTime = pThis->m_pPlatform->GetTime();
pThis->m_pNextLayer->CmdCopyImageToPackedPixelImage(srcImage,
dstImage,
regionCount,
pRegions,
packPixelType);
funcInfo.postCallTime = pThis->m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (pThis->m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndObject("srcImage", &srcImage);
pLogContext->KeyAndObject("dstImage", &dstImage);
pLogContext->KeyAndValue("regionCount", regionCount);
pLogContext->KeyAndStruct("Region", *pRegions);
pLogContext->KeyAndValue("packPixelType", static_cast<uint32>(packPixelType));
pLogContext->EndInput();
pThis->m_pPlatform->LogEndFunc(pLogContext);
}
}
// =====================================================================================================================
void CmdBuffer::CmdSetViewInstanceMask(
uint32 mask)
{
BeginFuncInfo funcInfo;
funcInfo.funcId = InterfaceFunc::CmdBufferCmdSetViewInstanceMask;
funcInfo.objectId = m_objectId;
funcInfo.preCallTime = m_pPlatform->GetTime();
m_pNextLayer->CmdSetViewInstanceMask(mask);
funcInfo.postCallTime = m_pPlatform->GetTime();
LogContext* pLogContext = nullptr;
if (m_pPlatform->LogBeginFunc(funcInfo, &pLogContext))
{
pLogContext->BeginInput();
pLogContext->KeyAndValue("mask", mask);
pLogContext->EndInput();
m_pPlatform->LogEndFunc(pLogContext);
}
}
} // InterfaceLogger
} // Pal
#endif
|
/*=============================================================================
Copyright (c) 2001-2011 Joel de Guzman
Copyright (c) 2001-2011 Hartmut Kaiser
Copyright (c) 2011 Bryce Lelbach
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#if !defined(BOOST_SPIRIT_UTREE_DETAIL2)
#define BOOST_SPIRIT_UTREE_DETAIL2
#if defined(BOOST_MSVC)
# pragma warning(push)
# pragma warning(disable: 4800)
#endif
#include <boost/type_traits/remove_pointer.hpp>
#include <boost/type_traits/is_pointer.hpp>
#include <boost/utility/enable_if.hpp>
#include <boost/throw_exception.hpp>
#include <boost/iterator/iterator_traits.hpp>
namespace boost { namespace spirit { namespace detail
{
inline char& fast_string::info()
{
return buff[small_string_size];
}
inline char fast_string::info() const
{
return buff[small_string_size];
}
inline int fast_string::get_type() const
{
return info() >> 1;
}
inline void fast_string::set_type(int t)
{
info() = (t << 1) | (info() & 1);
}
inline short fast_string::tag() const
{
boost::int16_t tmp;
std::memcpy(&tmp, &buff[small_string_size-2], sizeof(tmp));
return tmp;
}
inline void fast_string::tag(short tag)
{
boost::int16_t tmp = tag;
std::memcpy(&buff[small_string_size-2], &tmp, sizeof(tmp));
}
inline bool fast_string::is_heap_allocated() const
{
return info() & 1;
}
inline std::size_t fast_string::size() const
{
if (is_heap_allocated())
return heap.size;
else
return max_string_len - buff[max_string_len];
}
inline char const* fast_string::str() const
{
if (is_heap_allocated())
return heap.str;
else
return buff;
}
template <typename Iterator>
inline void fast_string::construct(Iterator f, Iterator l)
{
std::size_t const size = static_cast<std::size_t>(l-f);
char* str;
if (size < max_string_len)
{
// if it fits, store it in-situ; small_string_size minus the length
// of the string is placed in buff[small_string_size - 1]
str = buff;
buff[max_string_len] = static_cast<char>(max_string_len - size);
info() &= ~0x1;
}
else
{
// else, store it in the heap
str = new char[size + 1]; // add one for the null char
heap.str = str;
heap.size = size;
info() |= 0x1;
}
for (std::size_t i = 0; i != size; ++i)
{
*str++ = *f++;
}
*str = '\0'; // add the null char
}
inline void fast_string::swap(fast_string& other)
{
std::swap(*this, other);
}
inline void fast_string::free()
{
if (is_heap_allocated())
{
delete [] heap.str;
}
}
inline void fast_string::copy(fast_string const& other)
{
construct(other.str(), other.str() + other.size());
}
inline void fast_string::initialize()
{
for (std::size_t i = 0; i != buff_size / (sizeof(long)/sizeof(char)); ++i)
lbuff[i] = 0;
}
struct list::node : boost::noncopyable
{
template <typename T>
node(T const& val, node* next, node* prev)
: val(val), next(next), prev(prev) {}
void unlink()
{
prev->next = next;
next->prev = prev;
}
utree val;
node* next;
node* prev;
};
template <typename Value>
class list::node_iterator
: public boost::iterator_facade<
node_iterator<Value>
, Value
, boost::bidirectional_traversal_tag>
{
public:
node_iterator()
: node(0), prev(0) {}
node_iterator(list::node* node, list::node* prev)
: node(node), prev(prev) {}
private:
friend class boost::iterator_core_access;
friend class boost::spirit::utree;
friend struct boost::spirit::detail::list;
void increment()
{
if (node != 0) // not at end
{
prev = node;
node = node->next;
}
}
void decrement()
{
if (prev != 0) // not at begin
{
node = prev;
prev = prev->prev;
}
}
bool equal(node_iterator const& other) const
{
return node == other.node;
}
typename node_iterator::reference dereference() const
{
return node->val;
}
list::node* node;
list::node* prev;
};
template <typename Value>
class list::node_iterator<boost::reference_wrapper<Value> >
: public boost::iterator_facade<
node_iterator<boost::reference_wrapper<Value> >
, boost::reference_wrapper<Value>
, boost::bidirectional_traversal_tag>
{
public:
node_iterator()
: node(0), prev(0), curr(nil_node) {}
node_iterator(list::node* node, list::node* prev)
: node(node), prev(prev), curr(node ? node->val : nil_node) {}
private:
friend class boost::iterator_core_access;
friend class boost::spirit::utree;
friend struct boost::spirit::detail::list;
void increment()
{
if (node != 0) // not at end
{
prev = node;
node = node->next;
curr = boost::ref(node ? node->val : nil_node);
}
}
void decrement()
{
if (prev != 0) // not at begin
{
node = prev;
prev = prev->prev;
curr = boost::ref(node ? node->val : nil_node);
}
}
bool equal(node_iterator const& other) const
{
return node == other.node;
}
typename node_iterator::reference dereference() const
{
return curr;
}
list::node* node;
list::node* prev;
static Value nil_node;
mutable boost::reference_wrapper<Value> curr;
};
template <typename Value>
Value list::node_iterator<boost::reference_wrapper<Value> >::nil_node = Value();
inline void list::free()
{
node* p = first;
while (p != 0)
{
node* next = p->next;
delete p;
p = next;
}
}
inline void list::copy(list const& other)
{
node* p = other.first;
while (p != 0)
{
push_back(p->val);
p = p->next;
}
}
inline void list::default_construct()
{
first = last = 0;
size = 0;
}
template <typename T, typename Iterator>
inline void list::insert(T const& val, Iterator pos)
{
if (!pos.node)
{
push_back(val);
return;
}
detail::list::node* new_node =
new detail::list::node(val, pos.node, pos.node->prev);
if (pos.node->prev)
pos.node->prev->next = new_node;
else
first = new_node;
pos.node->prev = new_node;
++size;
}
template <typename T>
inline void list::push_front(T const& val)
{
detail::list::node* new_node;
if (first == 0)
{
new_node = new detail::list::node(val, 0, 0);
first = last = new_node;
++size;
}
else
{
new_node = new detail::list::node(val, first, first->prev);
first->prev = new_node;
first = new_node;
++size;
}
}
template <typename T>
inline void list::push_back(T const& val)
{
if (last == 0)
push_front(val);
else {
detail::list::node* new_node =
new detail::list::node(val, last->next, last);
last->next = new_node;
last = new_node;
++size;
}
}
inline void list::pop_front()
{
BOOST_ASSERT(size != 0);
if (first == last) // there's only one item
{
delete first;
size = 0;
first = last = 0;
}
else
{
node* np = first;
first = first->next;
first->prev = 0;
delete np;
--size;
}
}
inline void list::pop_back()
{
BOOST_ASSERT(size != 0);
if (first == last) // there's only one item
{
delete first;
size = 0;
first = last = 0;
}
else
{
node* np = last;
last = last->prev;
last->next = 0;
delete np;
--size;
}
}
inline list::node* list::erase(node* pos)
{
BOOST_ASSERT(pos != 0);
if (pos == first)
{
pop_front();
return first;
}
else if (pos == last)
{
pop_back();
return 0;
}
else
{
node* next(pos->next);
pos->unlink();
delete pos;
--size;
return next;
}
}
///////////////////////////////////////////////////////////////////////////
// simple binder for binary visitation (we don't want to bring in the big guns)
template <typename F, typename X>
struct bind_impl
{
typedef typename F::result_type result_type;
X& x; // always by reference
F f;
bind_impl(F f, X& x) : x(x), f(f) {}
template <typename Y>
typename F::result_type operator()(Y& y) const
{
return f(x, y);
}
template <typename Y>
typename F::result_type operator()(Y const& y) const
{
return f(x, y);
}
};
template <typename F, typename X>
bind_impl<F, X const> bind(F f, X const& x)
{
return bind_impl<F, X const>(f, x);
}
template <typename F, typename X>
bind_impl<F, X> bind(F f, X& x)
{
return bind_impl<F, X>(f, x);
}
template <typename UTreeX, typename UTreeY = UTreeX>
struct visit_impl
{
template <typename F>
typename F::result_type
static apply(UTreeX& x, F f) // single dispatch
{
typedef typename
boost::mpl::if_<boost::is_const<UTreeX>,
typename UTreeX::const_iterator,
typename UTreeX::iterator>::type
iterator;
typedef boost::iterator_range<iterator> list_range;
typedef utree_type type;
switch (x.get_type())
{
default:
BOOST_THROW_EXCEPTION(
bad_type_exception("corrupt utree type", x.get_type()));
break;
case type::invalid_type:
return f(invalid);
case type::nil_type:
return f(nil);
case type::bool_type:
return f(x.b);
case type::int_type:
return f(x.i);
case type::double_type:
return f(x.d);
case type::list_type:
return f(list_range(iterator(x.l.first, 0), iterator(0, x.l.last)));
case type::range_type:
return f(list_range(iterator(x.r.first, 0), iterator(0, x.r.last)));
case type::string_type:
return f(utf8_string_range_type(x.s.str(), x.s.size()));
case type::string_range_type:
return f(utf8_string_range_type(x.sr.first, x.sr.last));
case type::symbol_type:
return f(utf8_symbol_range_type(x.s.str(), x.s.size()));
case type::binary_type:
return f(binary_range_type(x.s.str(), x.s.size()));
case type::reference_type:
return apply(*x.p, f);
case type::any_type:
return f(any_ptr(x.v.p, x.v.i));
case type::function_type:
return f(*x.pf);
}
}
template <typename F>
typename F::result_type
static apply(UTreeX& x, UTreeY& y, F f) // double dispatch
{
typedef typename
boost::mpl::if_<boost::is_const<UTreeX>,
typename UTreeX::const_iterator,
typename UTreeX::iterator>::type
iterator;
typedef boost::iterator_range<iterator> list_range;
typedef utree_type type;
switch (x.get_type())
{
default:
BOOST_THROW_EXCEPTION(
bad_type_exception("corrupt utree type", x.get_type()));
break;
case type::invalid_type:
return visit_impl::apply(y, detail::bind(f, invalid));
case type::nil_type:
return visit_impl::apply(y, detail::bind(f, nil));
case type::bool_type:
return visit_impl::apply(y, detail::bind(f, x.b));
case type::int_type:
return visit_impl::apply(y, detail::bind(f, x.i));
case type::double_type:
return visit_impl::apply(y, detail::bind(f, x.d));
case type::list_type:
return visit_impl::apply(
y, detail::bind<F, list_range>(f,
list_range(iterator(x.l.first, 0), iterator(0, x.l.last))));
case type::range_type:
return visit_impl::apply(
y, detail::bind<F, list_range>(f,
list_range(iterator(x.r.first, 0), iterator(0, x.r.last))));
case type::string_type:
return visit_impl::apply(y, detail::bind(
f, utf8_string_range_type(x.s.str(), x.s.size())));
case type::string_range_type:
return visit_impl::apply(y, detail::bind(
f, utf8_string_range_type(x.sr.first, x.sr.last)));
case type::symbol_type:
return visit_impl::apply(y, detail::bind(
f, utf8_symbol_range_type(x.s.str(), x.s.size())));
case type::binary_type:
return visit_impl::apply(y, detail::bind(
f, binary_range_type(x.s.str(), x.s.size())));
case type::reference_type:
return apply(*x.p, y, f);
case type::any_type:
return visit_impl::apply(
y, detail::bind(f, any_ptr(x.v.p, x.v.i)));
case type::function_type:
return visit_impl::apply(y, detail::bind(f, *x.pf));
}
}
};
struct index_impl
{
static utree& apply(utree& ut, std::size_t i)
{
switch (ut.get_type())
{
case utree_type::reference_type:
return apply(ut.deref(), i);
case utree_type::range_type:
return apply(ut.r.first, i);
case utree_type::list_type:
return apply(ut.l.first, i);
default:
BOOST_THROW_EXCEPTION(
bad_type_exception
("index operation performed on non-list utree type",
ut.get_type()));
}
}
static utree const& apply(utree const& ut, std::size_t i)
{
switch (ut.get_type())
{
case utree_type::reference_type:
return apply(ut.deref(), i);
case utree_type::range_type:
return apply(ut.r.first, i);
case utree_type::list_type:
return apply(ut.l.first, i);
default:
BOOST_THROW_EXCEPTION(
bad_type_exception
("index operation performed on non-list utree type",
ut.get_type()));
}
}
static utree& apply(list::node* node, std::size_t i)
{
for (; i > 0; --i)
node = node->next;
return node->val;
}
static utree const& apply(list::node const* node, std::size_t i)
{
for (; i > 0; --i)
node = node->next;
return node->val;
}
};
}}}
namespace boost { namespace spirit
{
template <typename F>
stored_function<F>::stored_function(F f)
: f(f)
{
}
template <typename F>
stored_function<F>::~stored_function()
{
}
template <typename F>
utree stored_function<F>::operator()(utree const& env) const
{
return f(env);
}
template <typename F>
utree stored_function<F>::operator()(utree& env) const
{
return f(env);
}
template <typename F>
function_base*
stored_function<F>::clone() const
{
return new stored_function<F>(f);
}
template <typename F>
referenced_function<F>::referenced_function(F& f)
: f(f)
{
}
template <typename F>
referenced_function<F>::~referenced_function()
{
}
template <typename F>
utree referenced_function<F>::operator()(utree const& env) const
{
return f(env);
}
template <typename F>
utree referenced_function<F>::operator()(utree& env) const
{
return f(env);
}
template <typename F>
function_base*
referenced_function<F>::clone() const
{
return new referenced_function<F>(f);
}
inline utree::utree(utree::invalid_type)
{
s.initialize();
set_type(type::invalid_type);
}
inline utree::utree(utree::nil_type)
{
s.initialize();
set_type(type::nil_type);
}
inline utree::utree(bool b_)
{
s.initialize();
b = b_;
set_type(type::bool_type);
}
inline utree::utree(char c)
{
s.initialize();
// char constructs a single element string
s.construct(&c, &c+1);
set_type(type::string_type);
}
inline utree::utree(unsigned int i_)
{
s.initialize();
i = i_;
set_type(type::int_type);
}
inline utree::utree(int i_)
{
s.initialize();
i = i_;
set_type(type::int_type);
}
inline utree::utree(double d_)
{
s.initialize();
d = d_;
set_type(type::double_type);
}
inline utree::utree(char const* str)
{
s.initialize();
s.construct(str, str + strlen(str));
set_type(type::string_type);
}
inline utree::utree(char const* str, std::size_t len)
{
s.initialize();
s.construct(str, str + len);
set_type(type::string_type);
}
inline utree::utree(std::string const& str)
{
s.initialize();
s.construct(str.begin(), str.end());
set_type(type::string_type);
}
template <typename Base, utree_type::info type_>
inline utree::utree(basic_string<Base, type_> const& bin)
{
s.initialize();
s.construct(bin.begin(), bin.end());
set_type(type_);
}
inline utree::utree(boost::reference_wrapper<utree> ref)
{
s.initialize();
p = ref.get_pointer();
set_type(type::reference_type);
}
inline utree::utree(any_ptr const& p)
{
s.initialize();
v.p = p.p;
v.i = p.i;
set_type(type::any_type);
}
inline utree::utree(function_base const& pf_)
{
s.initialize();
pf = pf_.clone();
set_type(type::function_type);
}
inline utree::utree(function_base* pf_)
{
s.initialize();
pf = pf_;
set_type(type::function_type);
}
template <typename Iter>
inline utree::utree(boost::iterator_range<Iter> r)
{
s.initialize();
assign(r.begin(), r.end());
}
inline utree::utree(range r, shallow_tag)
{
s.initialize();
this->r.first = r.begin().node;
this->r.last = r.end().prev;
set_type(type::range_type);
}
inline utree::utree(const_range r, shallow_tag)
{
s.initialize();
this->r.first = r.begin().node;
this->r.last = r.end().prev;
set_type(type::range_type);
}
inline utree::utree(utf8_string_range_type const& str, shallow_tag)
{
s.initialize();
this->sr.first = str.begin();
this->sr.last = str.end();
set_type(type::string_range_type);
}
inline utree::utree(utree const& other)
{
s.initialize();
copy(other);
}
inline utree::~utree()
{
free();
}
inline utree& utree::operator=(utree const& other)
{
if (this != &other)
{
free();
copy(other);
}
return *this;
}
inline utree& utree::operator=(nil_type)
{
free();
set_type(type::nil_type);
return *this;
}
inline utree& utree::operator=(bool b_)
{
free();
b = b_;
set_type(type::bool_type);
return *this;
}
inline utree& utree::operator=(char c)
{
// char constructs a single element string
free();
s.construct(&c, &c+1);
set_type(type::string_type);
return *this;
}
inline utree& utree::operator=(unsigned int i_)
{
free();
i = i_;
set_type(type::int_type);
return *this;
}
inline utree& utree::operator=(int i_)
{
free();
i = i_;
set_type(type::int_type);
return *this;
}
inline utree& utree::operator=(double d_)
{
free();
d = d_;
set_type(type::double_type);
return *this;
}
inline utree& utree::operator=(char const* s_)
{
free();
s.construct(s_, s_ + strlen(s_));
set_type(type::string_type);
return *this;
}
inline utree& utree::operator=(std::string const& s_)
{
free();
s.construct(s_.begin(), s_.end());
set_type(type::string_type);
return *this;
}
template <typename Base, utree_type::info type_>
inline utree& utree::operator=(basic_string<Base, type_> const& bin)
{
free();
s.construct(bin.begin(), bin.end());
set_type(type_);
return *this;
}
inline utree& utree::operator=(boost::reference_wrapper<utree> ref)
{
free();
p = ref.get_pointer();
set_type(type::reference_type);
return *this;
}
inline utree& utree::operator=(any_ptr const& p_)
{
free();
v.p = p_.p;
v.i = p_.i;
set_type(type::any_type);
return *this;
}
inline utree& utree::operator=(function_base const& pf_)
{
free();
pf = pf_.clone();
set_type(type::function_type);
return *this;
}
inline utree& utree::operator=(function_base* pf_)
{
free();
pf = pf_;
set_type(type::function_type);
return *this;
}
template <typename Iter>
inline utree& utree::operator=(boost::iterator_range<Iter> r)
{
free();
assign(r.begin(), r.end());
return *this;
}
template <typename F>
typename boost::result_of<F(utree const&)>::type
inline utree::visit(utree const& x, F f)
{
return detail::visit_impl<utree const>::apply(x, f);
}
template <typename F>
typename boost::result_of<F(utree&)>::type
inline utree::visit(utree& x, F f)
{
return detail::visit_impl<utree>::apply(x, f);
}
template <typename F>
typename boost::result_of<F(utree const&, utree const&)>::type
inline utree::visit(utree const& x, utree const& y, F f)
{
return detail::visit_impl<utree const, utree const>::apply(x, y, f);
}
template <typename F>
typename boost::result_of<F(utree const&, utree&)>::type
inline utree::visit(utree const& x, utree& y, F f)
{
return detail::visit_impl<utree const, utree>::apply(x, y, f);
}
template <typename F>
typename boost::result_of<F(utree&, utree const&)>::type
inline utree::visit(utree& x, utree const& y, F f)
{
return detail::visit_impl<utree, utree const>::apply(x, y, f);
}
template <typename F>
typename boost::result_of<F(utree&, utree&)>::type
inline utree::visit(utree& x, utree& y, F f)
{
return detail::visit_impl<utree, utree>::apply(x, y, f);
}
inline utree::reference get(utree::reference ut, utree::size_type i)
{ return detail::index_impl::apply(ut, i); }
inline utree::const_reference
get(utree::const_reference ut, utree::size_type i)
{ return detail::index_impl::apply(ut, i); }
template <typename T>
inline void utree::push_front(T const& val)
{
if (get_type() == type::reference_type)
return p->push_front(val);
ensure_list_type("push_front()");
l.push_front(val);
}
template <typename T>
inline void utree::push_back(T const& val)
{
if (get_type() == type::reference_type)
return p->push_back(val);
ensure_list_type("push_back()");
l.push_back(val);
}
template <typename T>
inline utree::iterator utree::insert(iterator pos, T const& val)
{
if (get_type() == type::reference_type)
return p->insert(pos, val);
ensure_list_type("insert()");
if (!pos.node)
{
l.push_back(val);
return utree::iterator(l.last, l.last->prev);
}
l.insert(val, pos);
return utree::iterator(pos.node->prev, pos.node->prev->prev);
}
template <typename T>
inline void utree::insert(iterator pos, std::size_t n, T const& val)
{
if (get_type() == type::reference_type)
return p->insert(pos, n, val);
ensure_list_type("insert()");
for (std::size_t i = 0; i != n; ++i)
insert(pos, val);
}
template <typename Iterator>
inline void utree::insert(iterator pos, Iterator first, Iterator last)
{
if (get_type() == type::reference_type)
return p->insert(pos, first, last);
ensure_list_type("insert()");
while (first != last)
insert(pos, *first++);
}
template <typename Iterator>
inline void utree::assign(Iterator first, Iterator last)
{
if (get_type() == type::reference_type)
return p->assign(first, last);
clear();
set_type(type::list_type);
while (first != last)
{
push_back(*first);
++first;
}
}
inline void utree::clear()
{
if (get_type() == type::reference_type)
return p->clear();
// clear will always make this an invalid type
free();
set_type(type::invalid_type);
}
inline void utree::pop_front()
{
if (get_type() == type::reference_type)
return p->pop_front();
if (get_type() != type::list_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("pop_front() called on non-list utree type",
get_type()));
l.pop_front();
}
inline void utree::pop_back()
{
if (get_type() == type::reference_type)
return p->pop_back();
if (get_type() != type::list_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("pop_back() called on non-list utree type",
get_type()));
l.pop_back();
}
inline utree::iterator utree::erase(iterator pos)
{
if (get_type() == type::reference_type)
return p->erase(pos);
if (get_type() != type::list_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("erase() called on non-list utree type",
get_type()));
detail::list::node* np = l.erase(pos.node);
return iterator(np, np?np->prev:l.last);
}
inline utree::iterator utree::erase(iterator first, iterator last)
{
if (get_type() == type::reference_type)
return p->erase(first, last);
if (get_type() != type::list_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("erase() called on non-list utree type",
get_type()));
while (first != last)
erase(first++);
return last;
}
inline utree::iterator utree::begin()
{
if (get_type() == type::reference_type)
return p->begin();
else if (get_type() == type::range_type)
return iterator(r.first, 0);
// otherwise...
ensure_list_type("begin()");
return iterator(l.first, 0);
}
inline utree::iterator utree::end()
{
if (get_type() == type::reference_type)
return p->end();
else if (get_type() == type::range_type)
return iterator(0, r.first);
// otherwise...
ensure_list_type("end()");
return iterator(0, l.last);
}
inline utree::ref_iterator utree::ref_begin()
{
if (get_type() == type::reference_type)
return p->ref_begin();
else if (get_type() == type::range_type)
return ref_iterator(r.first, 0);
// otherwise...
ensure_list_type("ref_begin()");
return ref_iterator(l.first, 0);
}
inline utree::ref_iterator utree::ref_end()
{
if (get_type() == type::reference_type)
return p->ref_end();
else if (get_type() == type::range_type)
return ref_iterator(0, r.first);
// otherwise...
ensure_list_type("ref_end()");
return ref_iterator(0, l.last);
}
inline utree::const_iterator utree::begin() const
{
if (get_type() == type::reference_type)
return ((utree const*)p)->begin();
if (get_type() == type::range_type)
return const_iterator(r.first, 0);
// otherwise...
if (get_type() != type::list_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("begin() called on non-list utree type",
get_type()));
return const_iterator(l.first, 0);
}
inline utree::const_iterator utree::end() const
{
if (get_type() == type::reference_type)
return ((utree const*)p)->end();
if (get_type() == type::range_type)
return const_iterator(0, r.first);
// otherwise...
if (get_type() != type::list_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("end() called on non-list utree type",
get_type()));
return const_iterator(0, l.last);
}
inline bool utree::empty() const
{
type::info t = get_type();
if (t == type::reference_type)
return ((utree const*)p)->empty();
if (t == type::range_type)
return r.first == 0;
if (t == type::list_type)
return l.size == 0;
return t == type::nil_type || t == type::invalid_type;
}
inline std::size_t utree::size() const
{
type::info t = get_type();
if (t == type::reference_type)
return ((utree const*)p)->size();
if (t == type::range_type)
{
// FIXME: O(n), and we have the room to store the size of a range
// in the union if we compute it when assigned/constructed.
std::size_t size = 0;
detail::list::node* n = r.first;
while (n)
{
n = n->next;
++size;
}
return size;
}
if (t == type::list_type)
return l.size;
if (t == type::string_type)
return s.size();
if (t == type::symbol_type)
return s.size();
if (t == type::binary_type)
return s.size();
if (t == type::string_range_type)
return sr.last - sr.first;
if (t != type::nil_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("size() called on non-list and non-string utree type",
get_type()));
return 0;
}
inline utree_type::info utree::which() const
{
return get_type();
}
inline utree& utree::front()
{
if (get_type() == type::reference_type)
return p->front();
if (get_type() == type::range_type)
{
if (!r.first)
BOOST_THROW_EXCEPTION(
empty_exception("front() called on empty utree range"));
return r.first->val;
}
// otherwise...
if (get_type() != type::list_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("front() called on non-list utree type", get_type()));
else if (!l.first)
BOOST_THROW_EXCEPTION(
empty_exception("front() called on empty utree list"));
return l.first->val;
}
inline utree& utree::back()
{
if (get_type() == type::reference_type)
return p->back();
if (get_type() == type::range_type)
{
if (!r.last)
BOOST_THROW_EXCEPTION(
empty_exception("back() called on empty utree range"));
return r.last->val;
}
// otherwise...
if (get_type() != type::list_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("back() called on non-list utree type", get_type()));
else if (!l.last)
BOOST_THROW_EXCEPTION(
empty_exception("back() called on empty utree list"));
return l.last->val;
}
inline utree const& utree::front() const
{
if (get_type() == type::reference_type)
return ((utree const*)p)->front();
if (get_type() == type::range_type)
{
if (!r.first)
BOOST_THROW_EXCEPTION(
empty_exception("front() called on empty utree range"));
return r.first->val;
}
// otherwise...
if (get_type() != type::list_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("front() called on non-list utree type", get_type()));
else if (!l.first)
BOOST_THROW_EXCEPTION(
empty_exception("front() called on empty utree list"));
return l.first->val;
}
inline utree const& utree::back() const
{
if (get_type() == type::reference_type)
return ((utree const*)p)->back();
if (get_type() == type::range_type)
{
if (!r.last)
BOOST_THROW_EXCEPTION(
empty_exception("back() called on empty utree range"));
return r.last->val;
}
// otherwise...
if (get_type() != type::list_type)
BOOST_THROW_EXCEPTION(
bad_type_exception
("back() called on non-list utree type", get_type()));
else if (!l.last)
BOOST_THROW_EXCEPTION(
empty_exception("back() called on empty utree list"));
return l.last->val;
}
inline void utree::swap(utree& other)
{
s.swap(other.s);
}
inline utree::type::info utree::get_type() const
{
// the fast string holds the type info
return static_cast<utree::type::info>(s.get_type());
}
inline void utree::set_type(type::info t)
{
// the fast string holds the type info
s.set_type(t);
}
inline void utree::ensure_list_type(char const* failed_in)
{
type::info t = get_type();
if (t == type::invalid_type)
{
set_type(type::list_type);
l.default_construct();
}
else if (get_type() != type::list_type)
{
std::string msg = failed_in;
msg += "called on non-list and non-invalid utree type";
BOOST_THROW_EXCEPTION(bad_type_exception(msg.c_str(), get_type()));
}
}
inline void utree::free()
{
switch (get_type())
{
case type::binary_type:
case type::symbol_type:
case type::string_type:
s.free();
break;
case type::list_type:
l.free();
break;
case type::function_type:
delete pf;
break;
default:
break;
};
s.initialize();
}
inline void utree::copy(utree const& other)
{
set_type(other.get_type());
switch (other.get_type())
{
default:
BOOST_THROW_EXCEPTION(
bad_type_exception("corrupt utree type", other.get_type()));
break;
case type::invalid_type:
case type::nil_type:
s.tag(other.s.tag());
break;
case type::bool_type:
b = other.b;
s.tag(other.s.tag());
break;
case type::int_type:
i = other.i;
s.tag(other.s.tag());
break;
case type::double_type:
d = other.d;
s.tag(other.s.tag());
break;
case type::reference_type:
p = other.p;
s.tag(other.s.tag());
break;
case type::any_type:
v = other.v;
s.tag(other.s.tag());
break;
case type::range_type:
r = other.r;
s.tag(other.s.tag());
break;
case type::string_range_type:
sr = other.sr;
s.tag(other.s.tag());
break;
case type::function_type:
pf = other.pf->clone();
s.tag(other.s.tag());
break;
case type::string_type:
case type::symbol_type:
case type::binary_type:
s.copy(other.s);
s.tag(other.s.tag());
break;
case type::list_type:
l.copy(other.l);
s.tag(other.s.tag());
break;
}
}
template <typename T>
struct is_iterator_range
: boost::mpl::false_
{};
template <typename Iterator>
struct is_iterator_range<boost::iterator_range<Iterator> >
: boost::mpl::true_
{};
template <typename To>
struct utree_cast
{
typedef To result_type;
template <typename From>
To dispatch(From const& val, boost::mpl::true_) const
{
return To(val); // From is convertible to To
}
template <typename From>
To dispatch(From const&, boost::mpl::false_) const
{
// From is NOT convertible to To !!!
throw std::bad_cast();
return To();
}
template <typename From>
To operator()(From const& val) const
{
// boost::iterator_range has a templated constructor, accepting
// any argument and hence any type is 'convertible' to it.
typedef typename boost::mpl::eval_if<
is_iterator_range<To>
, boost::is_same<From, To>, boost::is_convertible<From, To>
>::type is_convertible;
return dispatch(val, is_convertible());
}
};
template <typename T>
struct utree_cast<T*>
{
typedef T* result_type;
template <typename From>
T* operator()(From const&) const
{
// From is NOT convertible to T !!!
throw std::bad_cast();
return 0;
}
T* operator()(any_ptr const& p) const
{
return p.get<T*>();
}
};
template <typename T>
inline T utree::get() const
{
return utree::visit(*this, utree_cast<T>());
}
inline utree& utree::deref()
{
return (get_type() == type::reference_type) ? *p : *this;
}
inline utree const& utree::deref() const
{
return (get_type() == type::reference_type) ? *p : *this;
}
inline short utree::tag() const
{
return s.tag();
}
inline void utree::tag(short tag)
{
s.tag(tag);
}
inline utree utree::eval(utree const& env) const
{
if (get_type() == type::reference_type)
return deref().eval(env);
if (get_type() != type::function_type)
BOOST_THROW_EXCEPTION(
bad_type_exception(
"eval() called on non-function utree type", get_type()));
return (*pf)(env);
}
inline utree utree::eval(utree& env) const
{
if (get_type() == type::reference_type)
return deref().eval(env);
if (get_type() != type::function_type)
BOOST_THROW_EXCEPTION(
bad_type_exception(
"eval() called on non-function utree type", get_type()));
return (*pf)(env);
}
inline utree utree::operator() (utree const& env) const
{
return eval(env);
}
inline utree utree::operator() (utree& env) const
{
return eval(env);
}
}}
#if defined(BOOST_MSVC)
# pragma warning(pop)
#endif
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.