text
stringlengths 5
1.04M
|
|---|
#include "algorithm-median.hpp"
#include "../include-opencv.hpp"
#include <fmo/processing.hpp>
namespace fmo {
void registerMedianV1() {
Algorithm::registerFactory(
"median-v1", [](const Algorithm::Config& config, Format format, Dims dims) {
return std::unique_ptr<Algorithm>(new MedianV1(config, format, dims));
});
}
MedianV1::MedianV1(const Config& cfg, Format format, Dims dims)
: mCfg(cfg), mSourceLevel{{format, dims}, 0}, mDiff(cfg.diff) {}
void MedianV1::setInputSwap(Image& in) {
swapAndSubsampleInput(in);
computeBinDiff();
findComponents();
findObjects();
matchObjects();
selectObjects();
// add steps here...
}
void MedianV1::swapAndSubsampleInput(Image& in) {
if (in.format() != mSourceLevel.image.format()) {
throw std::runtime_error("setInputSwap(): bad format");
}
if (in.dims() != mSourceLevel.image.dims()) {
throw std::runtime_error("setInputSwap(): bad dimensions");
}
mSourceLevel.image.swap(in);
mSourceLevel.frameNum++;
// subsample until the image size is below a set height
int pixelSizeLog2 = 0;
Image* input = &mSourceLevel.image;
for (; input->dims().height > mCfg.maxImageHeight; pixelSizeLog2++) {
if (int(mCache.subsampled.size()) == pixelSizeLog2) {
mCache.subsampled.emplace_back(new Image);
}
auto& next = *mCache.subsampled[pixelSizeLog2];
mSubsampler(*input, next);
input = &next;
}
// need at least one decimation to happen
// - because strips use integral half heights
// - becuase we want the source image untouched
if (pixelSizeLog2 == 0) {
throw std::runtime_error("setInputSwap(): input image too small");
}
// swap the product of decimation into the processing level
mProcessingLevel.inputs[2].swap(mProcessingLevel.inputs[1]);
mProcessingLevel.inputs[1].swap(mProcessingLevel.inputs[0]);
mProcessingLevel.inputs[0].swap(*input);
mProcessingLevel.pixelSizeLog2 = pixelSizeLog2;
}
void MedianV1::computeBinDiff() {
auto& level = mProcessingLevel;
if (mSourceLevel.frameNum < 3) {
// initial frames: just generate a black diff
level.binDiff.resize(Format::GRAY, level.inputs[0].dims());
level.binDiff.wrap().setTo(uint8_t(0x00));
return;
}
fmo::median3(level.inputs[0], level.inputs[1], level.inputs[2], level.background);
mDiff(level.inputs[0], level.background, level.binDiff);
}
}
|
//******************************************************************
//
// Copyright 2014 Samsung Electronics All Rights Reserved.
// Copyright 2014 Intel Mobile Communications GmbH All Rights Reserved.
//
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#include <stdio.h>
#include <stdlib.h>
#include <ocstack.h>
const char *getResult(OCStackResult result)
{
switch (result)
{
case OC_STACK_OK:
return "OC_STACK_OK";
case OC_STACK_RESOURCE_CREATED:
return "OC_STACK_RESOURCE_CREATED";
case OC_STACK_RESOURCE_DELETED:
return "OC_STACK_RESOURCE_DELETED";
case OC_STACK_RESOURCE_CHANGED:
return "OC_STACK_RESOURCE_CHANGED";
case OC_STACK_INVALID_URI:
return "OC_STACK_INVALID_URI";
case OC_STACK_INVALID_QUERY:
return "OC_STACK_INVALID_QUERY";
case OC_STACK_INVALID_IP:
return "OC_STACK_INVALID_IP";
case OC_STACK_INVALID_PORT:
return "OC_STACK_INVALID_PORT";
case OC_STACK_INVALID_CALLBACK:
return "OC_STACK_INVALID_CALLBACK";
case OC_STACK_INVALID_METHOD:
return "OC_STACK_INVALID_METHOD";
case OC_STACK_NO_MEMORY:
return "OC_STACK_NO_MEMORY";
case OC_STACK_COMM_ERROR:
return "OC_STACK_COMM_ERROR";
case OC_STACK_INVALID_PARAM:
return "OC_STACK_INVALID_PARAM";
case OC_STACK_NOTIMPL:
return "OC_STACK_NOTIMPL";
case OC_STACK_NO_RESOURCE:
return "OC_STACK_NO_RESOURCE";
case OC_STACK_RESOURCE_ERROR:
return "OC_STACK_RESOURCE_ERROR";
case OC_STACK_SLOW_RESOURCE:
return "OC_STACK_SLOW_RESOURCE";
case OC_STACK_NO_OBSERVERS:
return "OC_STACK_NO_OBSERVERS";
#ifdef WITH_PRESENCE
case OC_STACK_PRESENCE_STOPPED:
return "OC_STACK_PRESENCE_STOPPED";
case OC_STACK_PRESENCE_TIMEOUT:
return "OC_STACK_PRESENCE_TIMEOUT";
#endif
case OC_STACK_ERROR:
return "OC_STACK_ERROR";
default:
return "UNKNOWN";
}
}
void StripNewLineChar(char* str)
{
int i = 0;
if (str)
{
while( str[i])
{
if (str[i] == '\n')
{
str[i] = '\0';
}
i++;
}
}
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// SPDX-License-Identifier: MIT
#include "azure/template/template_client.hpp"
#include "azure/template/version.hpp"
#include <string>
using namespace Azure::Template;
std::string const TemplateClient::ClientVersion()
{
return Version::VersionString();
}
|
//
// Created by piotrek on 27.12.16.
//
#ifndef CSSP_FOR_HPP
#define CSSP_FOR_HPP
#include <list>
#include "lib/types.hpp"
#include "lib/ast/node/Node.hpp"
namespace CSSP {
class Generator;
namespace AST {
class For : public Node {
public:
/**
* Constructor
* @param selectorList
* @param instructionList
*/
For(std::string name, Value *from, Value *to, NodeListType *instructionList)
: Node("For"),
name(name),
from(from),
to(to),
instructionList(instructionList) {};
virtual ~For();
/**
* @inherit
* @param generator
* @return
*/
virtual const std::string generate(Generator *generator) const override;
/**
* @inherit
* @return
*/
virtual const std::string debugString() const override;
protected:
/**
* Variable name
*/
std::string name;
/**
* Loop start index
*/
Value *from = nullptr;
/**
* Loop end index
*/
Value *to = nullptr;
/**
* List of instructions inside loop
*/
NodeListType *instructionList = nullptr;
};
}
}
#endif //CSSP_FOR_HPP
|
// Autogenerated from CppHeaderCreator
// Created by Sc2ad
// =========================================================================
#pragma once
// Begin includes
#include "extern/beatsaber-hook/shared/utils/typedefs.h"
// Including type: System.Enum
#include "System/Enum.hpp"
// Completed includes
// Type namespace: System
namespace System {
// Size: 0x4
#pragma pack(push, 1)
// Autogenerated type: System.Enum/ParseFailureKind
struct Enum::ParseFailureKind/*, public System::Enum*/ {
public:
// public System.Int32 value__
// Size: 0x4
// Offset: 0x0
int value;
// Field size check
static_assert(sizeof(int) == 0x4);
// Creating value type constructor for type: ParseFailureKind
constexpr ParseFailureKind(int value_ = {}) noexcept : value{value_} {}
// Creating interface conversion operator: operator System::Enum
operator System::Enum() noexcept {
return *reinterpret_cast<System::Enum*>(this);
}
// Creating conversion operator: operator int
constexpr operator int() const noexcept {
return value;
}
// static field const value: static public System.Enum/ParseFailureKind None
static constexpr const int None = 0;
// Get static field: static public System.Enum/ParseFailureKind None
static System::Enum::ParseFailureKind _get_None();
// Set static field: static public System.Enum/ParseFailureKind None
static void _set_None(System::Enum::ParseFailureKind value);
// static field const value: static public System.Enum/ParseFailureKind Argument
static constexpr const int Argument = 1;
// Get static field: static public System.Enum/ParseFailureKind Argument
static System::Enum::ParseFailureKind _get_Argument();
// Set static field: static public System.Enum/ParseFailureKind Argument
static void _set_Argument(System::Enum::ParseFailureKind value);
// static field const value: static public System.Enum/ParseFailureKind ArgumentNull
static constexpr const int ArgumentNull = 2;
// Get static field: static public System.Enum/ParseFailureKind ArgumentNull
static System::Enum::ParseFailureKind _get_ArgumentNull();
// Set static field: static public System.Enum/ParseFailureKind ArgumentNull
static void _set_ArgumentNull(System::Enum::ParseFailureKind value);
// static field const value: static public System.Enum/ParseFailureKind ArgumentWithParameter
static constexpr const int ArgumentWithParameter = 3;
// Get static field: static public System.Enum/ParseFailureKind ArgumentWithParameter
static System::Enum::ParseFailureKind _get_ArgumentWithParameter();
// Set static field: static public System.Enum/ParseFailureKind ArgumentWithParameter
static void _set_ArgumentWithParameter(System::Enum::ParseFailureKind value);
// static field const value: static public System.Enum/ParseFailureKind UnhandledException
static constexpr const int UnhandledException = 4;
// Get static field: static public System.Enum/ParseFailureKind UnhandledException
static System::Enum::ParseFailureKind _get_UnhandledException();
// Set static field: static public System.Enum/ParseFailureKind UnhandledException
static void _set_UnhandledException(System::Enum::ParseFailureKind value);
}; // System.Enum/ParseFailureKind
#pragma pack(pop)
static check_size<sizeof(Enum::ParseFailureKind), 0 + sizeof(int)> __System_Enum_ParseFailureKindSizeCheck;
static_assert(sizeof(Enum::ParseFailureKind) == 0x4);
}
#include "extern/beatsaber-hook/shared/utils/il2cpp-type-check.hpp"
DEFINE_IL2CPP_ARG_TYPE(System::Enum::ParseFailureKind, "System", "Enum/ParseFailureKind");
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE415_Double_Free__new_delete_char_34.cpp
Label Definition File: CWE415_Double_Free__new_delete.label.xml
Template File: sources-sinks-34.tmpl.cpp
*/
/*
* @description
* CWE: 415 Double Free
* BadSource: Allocate data using new and Deallocae data using delete
* GoodSource: Allocate data using new
* Sinks:
* GoodSink: do nothing
* BadSink : Deallocate data using delete
* Flow Variant: 34 Data flow: use of a union containing two methods of accessing the same data (within the same function)
*
* */
#include "std_testcase.h"
#include <wchar.h>
namespace CWE415_Double_Free__new_delete_char_34
{
typedef union
{
char * unionFirst;
char * unionSecond;
} unionType;
#ifndef OMITBAD
void bad()
{
char * data;
unionType myUnion;
/* Initialize data */
data = NULL;
data = new char;
/* POTENTIAL FLAW: delete data in the source - the bad sink deletes data as well */
delete data;
myUnion.unionFirst = data;
{
char * data = myUnion.unionSecond;
/* POTENTIAL FLAW: Possibly deleting memory twice */
delete data;
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B() uses the GoodSource with the BadSink */
static void goodG2B()
{
char * data;
unionType myUnion;
/* Initialize data */
data = NULL;
data = new char;
/* FIX: Do NOT delete data in the source - the bad sink deletes data */
myUnion.unionFirst = data;
{
char * data = myUnion.unionSecond;
/* POTENTIAL FLAW: Possibly deleting memory twice */
delete data;
}
}
/* goodB2G() uses the BadSource with the GoodSink */
static void goodB2G()
{
char * data;
unionType myUnion;
/* Initialize data */
data = NULL;
data = new char;
/* POTENTIAL FLAW: delete data in the source - the bad sink deletes data as well */
delete data;
myUnion.unionFirst = data;
{
char * data = myUnion.unionSecond;
/* do nothing */
/* FIX: Don't attempt to delete the memory */
; /* empty statement needed for some flow variants */
}
}
void good()
{
goodG2B();
goodB2G();
}
#endif /* OMITGOOD */
} /* close namespace */
/* Below is the main(). It is only used when building this testcase on
its own for testing or for building a binary to use in testing binary
analysis tools. It is not used when compiling all the testcases as one
application, which is how source code analysis tools are tested. */
#ifdef INCLUDEMAIN
using namespace CWE415_Double_Free__new_delete_char_34; /* so that we can use good and bad easily */
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
/****************************************************************************
* TMesh *
* *
* Consiglio Nazionale delle Ricerche *
* Istituto di Matematica Applicata e Tecnologie Informatiche *
* Sezione di Genova *
* IMATI-GE / CNR *
* *
* Authors: Marco Attene *
* Copyright(C) 2012: IMATI-GE / CNR *
* All rights reserved. *
* *
* This program is dual-licensed as follows: *
* *
* (1) You may use TMesh as free software; you can redistribute it and/or *
* modify it under the terms of the GNU General Public License as published *
* by the Free Software Foundation; either version 3 of the License, or *
* (at your option) any later version. *
* In this case the program is distributed in the hope that it will be *
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License (http://www.gnu.org/licenses/gpl.txt) *
* for more details. *
* *
* (2) You may use TMesh as part of a commercial software. In this case a *
* proper agreement must be reached with the Authors and with IMATI-GE/CNR *
* based on a proper licensing contract. *
* *
****************************************************************************/
#include "coordinates.h"
#include <limits>
#include <cmath>
namespace T_MESH
{
#ifndef NAN
#define NAN std::numeric_limits<double>::quiet_NaN()
#endif
#ifdef USE_HYBRID_KERNEL
// Default behaviour = FILTERED KERNEL
bool PM_Rational::use_rationals = false;
bool PM_Rational::use_filtering = true;
void PM_Rational::switchToDouble()
{
if (_whv)
{
EXACT_NT * ov = (EXACT_NT *)_val;
double d = (double)((EXACT_NT_DENOMINATOR(ov) != 0) ? (EXACT_NT_TO_DOUBLE((*ov))) : (NAN));
_val = d2int64t(d);
delete ov;
_whv = false;
}
}
void PM_Rational::switchToRational()
{
if (!_whv)
{
double od = int64t2d(_val);
if (od == NAN) _val = (int64_t)new EXACT_NT(0, 0);
else _val = (int64_t)new EXACT_NT(od);
_whv = true;
}
}
void PM_Rational::operator+=(const PM_Rational& a)
{
if (use_rationals) { switchToRational(); getVal() += a.toRational(); }
else { switchToDouble(); getDVal() += a.toDouble(); }
}
void PM_Rational::operator-=(const PM_Rational& a)
{
if (use_rationals) { switchToRational(); getVal() -= a.toRational(); }
else { switchToDouble(); getDVal() -= a.toDouble(); }
}
void PM_Rational::operator*=(const PM_Rational& a)
{
if (use_rationals) { switchToRational(); getVal() *= a.toRational(); }
else { switchToDouble(); getDVal() *= a.toDouble(); }
}
void PM_Rational::operator/=(const PM_Rational& a)
{
if (use_rationals) { switchToRational(); getVal() /= a.toRational(); }
else { switchToDouble(); getDVal() /= a.toDouble(); }
}
PM_Rational PM_Rational::operator+(const PM_Rational& a) const
{
if (use_rationals) return PM_Rational(toRational() + a.toRational());
else return PM_Rational(toDouble() + a.toDouble());
}
PM_Rational PM_Rational::operator-(const PM_Rational& a) const
{
if (use_rationals) return PM_Rational(toRational() - a.toRational());
else return PM_Rational(toDouble() - a.toDouble());
}
PM_Rational PM_Rational::operator*(const PM_Rational& a) const
{
if (use_rationals) return PM_Rational(toRational() * a.toRational());
else return PM_Rational(toDouble() * a.toDouble());
}
PM_Rational PM_Rational::operator/(const PM_Rational& a) const
{
if (use_rationals) return PM_Rational(toRational() / a.toRational());
else return PM_Rational(toDouble() / a.toDouble());
}
bool PM_Rational::operator==(const PM_Rational& a) const
{
if (_whv || a._whv /*use_rationals*/) return (toRational() == a.toRational());
else return (toDouble() == a.toDouble());
}
bool PM_Rational::operator!=(const PM_Rational& a) const
{
if (_whv || a._whv /*use_rationals*/) return (toRational() != a.toRational());
else return (toDouble() != a.toDouble());
}
PM_Rational& PM_Rational::operator=(const PM_Rational& a)
{
if (_whv) delete ((EXACT_NT *)_val);
_whv = a._whv; _val = (_whv) ? ((int64_t)new EXACT_NT(a.getVal())) : (a._val);
return *this;
}
void PM_Rational::setFromRational(const EXACT_NT& a)
{
if (_whv) delete ((EXACT_NT *)_val);
_whv = 1; _val = (int64_t)new EXACT_NT(a);
}
bool PM_Rational::operator<(const PM_Rational& a) const
{
if (_whv || a._whv) return (toRational() < a.toRational());
else return (toDouble() < a.toDouble());
}
bool PM_Rational::operator>(const PM_Rational& a) const
{
if (_whv || a._whv) return (toRational() > a.toRational());
else return (toDouble() > a.toDouble());
}
PM_Rational operator-(const PM_Rational& a) // This might be probably changed... do not understand why to switch..
{
if (PM_Rational::isUsingRationals()) return PM_Rational(-(a.toRational()));
else return PM_Rational(-(a.toDouble()));
}
PM_Rational ceil(const PM_Rational& a)
{
if (PM_Rational::isUsingRationals())
{
mpz_t n, d, f;
mpz_init(n); mpz_init(d); mpz_init(f);
#ifdef USE_CGAL_LAZYNT
mpz_set(n, a.toRational().exact().numerator().mpz());
mpz_set(d, a.toRational().exact().denominator().mpz());
mpz_cdiv_q(f, n, d);
mpz_clear(n); mpz_clear(d);
return PM_Rational(EXACT_NT(CGAL::Gmpz(f)));
#else
mpz_set(n, a.toRational().get_num_mpz_t());
mpz_set(d, a.toRational().get_den_mpz_t());
mpz_cdiv_q(f, n, d);
mpz_clear(n); mpz_clear(d);
return PM_Rational(mpq_class(mpz_class(f)));
#endif
}
else
return PM_Rational(::ceil(a.toDouble()));
}
PM_Rational floor(const PM_Rational& a)
{
if (PM_Rational::isUsingRationals())
{
mpz_t n, d, f;
mpz_init(n); mpz_init(d); mpz_init(f);
#ifdef USE_CGAL_LAZYNT
mpz_set(n, a.toRational().exact().numerator().mpz());
mpz_set(d, a.toRational().exact().denominator().mpz());
mpz_fdiv_q(f, n, d);
mpz_clear(n); mpz_clear(d);
return PM_Rational(EXACT_NT(CGAL::Gmpz(f)));
#else
mpz_set(n, a.toRational().get_num_mpz_t());
mpz_set(d, a.toRational().get_den_mpz_t());
mpz_fdiv_q(f, n, d);
mpz_clear(n); mpz_clear(d);
return PM_Rational(mpq_class(mpz_class(f)));
#endif
} else
return PM_Rational(::floor(a.toDouble()));
}
PM_Rational round(const PM_Rational& a)
{
if (PM_Rational::isUsingRationals())
{
mpz_t n, d, f, c;
mpz_init(n); mpz_init(d); mpz_init(f); mpz_init(c);
#ifdef USE_CGAL_LAZYNT
mpz_set(n, a.toRational().exact().numerator().mpz());
mpz_set(d, a.toRational().exact().denominator().mpz());
mpz_fdiv_q(f, n, d);
mpz_cdiv_q(c, n, d);
mpz_clear(n); mpz_clear(d);
PM_Rational fr = PM_Rational(EXACT_NT(CGAL::Gmpz(f)));
PM_Rational cr = PM_Rational(EXACT_NT(CGAL::Gmpz(c)));
mpz_clear(f); mpz_clear(c);
return ((a - fr) < (cr - a)) ? (fr) : (cr);
#else
mpz_set(n, a.toRational().get_num_mpz_t());
mpz_set(d, a.toRational().get_den_mpz_t());
mpz_fdiv_q(f, n, d);
mpz_cdiv_q(c, n, d);
mpz_clear(n); mpz_clear(d);
PM_Rational fr = PM_Rational(mpq_class(mpz_class(f)));
PM_Rational cr = PM_Rational(mpq_class(mpz_class(c)));
mpz_clear(f); mpz_clear(c);
return ((a - fr) < (cr - a)) ? (fr) : (cr);
#endif
} else
return PM_Rational(::round(a.toDouble()));
}
#endif
} //namespace T_MESH
|
#include <iostream>
using namespace std;
class Random {
public:
Random() {
cout << "A constructor is called." << endl;
}
~Random() {
cout << "A destructor is called." << endl;
}
};
int main() {
Random r1, r2;
return 0;
}
/** OUTPUT:
A constructor is called.
A constructor is called.
A destructor is called.
A destructor is called.
*/
|
//===- TestConvertGPUKernelToHsaco.cpp - Test gpu kernel hsaco lowering ---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir/Conversion/GPUCommon/GPUCommonPass.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Target/ROCDLIR.h"
#include "llvm/Support/TargetSelect.h"
using namespace mlir;
#if MLIR_ROCM_CONVERSIONS_ENABLED
static OwnedBlob compileIsaToHsacoForTesting(const std::string &, Location,
StringRef) {
const char data[] = "HSACO";
return std::make_unique<std::vector<char>>(data, data + sizeof(data) - 1);
}
namespace mlir {
namespace test {
void registerTestConvertGPUKernelToHsacoPass() {
PassPipelineRegistration<>(
"test-kernel-to-hsaco",
"Convert all kernel functions to ROCm hsaco blobs",
[](OpPassManager &pm) {
// Initialize LLVM AMDGPU backend.
LLVMInitializeAMDGPUTarget();
LLVMInitializeAMDGPUTargetInfo();
LLVMInitializeAMDGPUTargetMC();
LLVMInitializeAMDGPUAsmPrinter();
pm.addPass(createConvertGPUKernelToBlobPass(
translateModuleToROCDLIR, compileIsaToHsacoForTesting,
"amdgcn-amd-amdhsa", "gfx900", "-code-object-v3", "rocdl.hsaco"));
});
}
} // namespace test
} // namespace mlir
#endif
|
#include <iostream>
#include <cmath>
using namespace std;
int main()
{
int a, b, c;
cout << "a=?, b=?, c=?" << endl;
cin >> a >> b >> c;
float d = pow(b, 2) - 4 * a * c;
if (d < 0)
{
cout << "No Real Solutions." << endl;
}
if (d == 0)
{
float x = (-b) / (2 * a);
cout << "d=0" << endl;
cout << "x1=x2" << endl;
cout << "x=" << x << endl;
}
if (d > 0)
{
float x1 = ((-b) + sqrt(d)) / (2 * a);
float x2 = ((-b) - sqrt(d)) / (2 * a);
cout << "d=" << d << endl;
cout << "x1=" << x1 << endl;
cout << "x2=" << x2 << endl;
}
return 0;
|
/*
* Copyright(c) 2020-present simplelog contributors.
* Distributed under the MIT License (http://opensource.org/licenses/MIT)
*/
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <sstream>
#include "config_parser.h"
using namespace simplelog;
using namespace testing;
TEST(config_parser_tests, empty_stream)
{
auto config = std::make_unique<std::stringstream>("");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res, IsEmpty());
}
TEST(config_parser_tests, no_section)
{
auto config = std::make_unique<std::stringstream>("tag1 = val1\n"
"tag2 = val2\n");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res, IsEmpty());
}
TEST(config_parser_tests, empty_section)
{
auto config = std::make_unique<std::stringstream>("[section1]\n"
"[section2]\n");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res, IsEmpty());
}
TEST(config_parser_tests, valid_sections)
{
auto config = std::make_unique<std::stringstream>("[section1]\n"
"tag1 = val1\n"
"tag2 = val2\n"
"[section2]\n"
"[section3]\n"
"tag3 = val3\n");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res,
UnorderedElementsAre(Pair("section1", UnorderedElementsAre(Pair("tag1", "val1"),
Pair("tag2", "val2"))),
Pair("section3", UnorderedElementsAre(Pair("tag3", "val3")))));
}
TEST(config_parser_tests, duplicate_sections)
{
auto config = std::make_unique<std::stringstream>("[section1]\n"
"tag1 = val1\n"
"[section2]\n"
"tag2 = val2\n"
"[section1]\n"
"tag3 = val3\n");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res,
UnorderedElementsAre(Pair("section1", UnorderedElementsAre(Pair("tag1", "val1"),
Pair("tag3", "val3"))),
Pair("section2", UnorderedElementsAre(Pair("tag2", "val2")))));
}
TEST(config_parser_tests, duplicate_entries)
{
auto config = std::make_unique<std::stringstream>("[section1]\n"
"tag1 = val1\n"
"tag2 = val2\n"
"tag1 = val3\n");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res,
UnorderedElementsAre(Pair("section1", UnorderedElementsAre(Pair("tag1", "val3"),
Pair("tag2", "val2")))));
}
TEST(config_parser_tests, empty_lines)
{
auto config = std::make_unique<std::stringstream>("\n\n"
"[section1]\n"
"tag1 = val1\n"
"tag2 = val2\n"
"\n"
"[section3]\n"
"tag3 = val3\n\n");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res,
UnorderedElementsAre(Pair("section1", UnorderedElementsAre(Pair("tag1", "val1"),
Pair("tag2", "val2"))),
Pair("section3", UnorderedElementsAre(Pair("tag3", "val3")))));
}
TEST(config_parser_tests, comments)
{
auto config = std::make_unique<std::stringstream>("#\n"
"# comment\n"
"[section1]\n"
"tag1 = val1\n"
"tag2 = val2\n"
"#[section3]\n"
"#tag3 = val3\n\n");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res,
UnorderedElementsAre(Pair("section1", UnorderedElementsAre(Pair("tag1", "val1"),
Pair("tag2", "val2")))));
}
TEST(config_parser_tests, spaces)
{
auto config = std::make_unique<std::stringstream>(" # comment\n"
"[section1] \n"
" tag1= val1\t\n"
" tag2 =val2 \n"
"\ttag3 = val3 \n"
" #[section3]\n"
"#tag3 = val3\n");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res,
UnorderedElementsAre(Pair("section1", UnorderedElementsAre(Pair("tag1", "val1"),
Pair("tag2", "val2"),
Pair("tag3", "val3")))));
}
TEST(config_parser_tests, section_bas_syntax)
{
auto config = std::make_unique<std::stringstream>(
"[section1\n"
"tag1 = val1\n"
"[section 2 coucou]\n" // with spaces
"tag2 = val2\n"
"[section3] coucou haha\n" // with garbage after section
"tag3 = val3\n"
"[]\n" // empty
"tag4 = val4\n"
"[\n" // empty
"tag5 = val5\n");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res,
UnorderedElementsAre(Pair("section1", UnorderedElementsAre(Pair("tag1", "val1"))),
Pair("section 2 coucou",
UnorderedElementsAre(Pair("tag2", "val2"))),
Pair("section3", UnorderedElementsAre(Pair("tag3", "val3"),
Pair("tag4", "val4"),
Pair("tag5", "val5")))));
}
TEST(config_parser_tests, bad_syntax)
{
auto config = std::make_unique<std::stringstream>("[section1]\n"
"tag1 = val1\n"
"=\n" // only =
" \n" // only spaces
"tag2 val2\n" // missing =
"tag2 == val2\n" // double =
"vjsfir()k(gé\n" // random chars
"= val2\n" // missing tag
"tag2 =\n" // missing value
"tag2 = val2 = val3\n" // double value
"tag3 = val3\n");
auto res = config_parser(std::move(config)).take();
ASSERT_THAT(res,
UnorderedElementsAre(Pair("section1", UnorderedElementsAre(Pair("tag1", "val1"),
Pair("tag3", "val3")))));
}
|
#include "mapnik_map.hpp"
#include "utils.hpp"
#include "mapnik_color.hpp" // for Color, Color::constructor
#include "mapnik_image.hpp" // for Image, Image::constructor
#include "mapnik_layer.hpp" // for Layer, Layer::constructor
#include "mapnik_palette.hpp" // for palette_ptr, Palette, etc
// mapnik
#include <mapnik/map.hpp>
#include <mapnik/layer.hpp> // for layer
#include <mapnik/save_map.hpp> // for save_map, etc
// stl
#include <sstream> // for basic_ostringstream, etc
Napi::FunctionReference Map::constructor;
Napi::Object Map::Initialize(Napi::Env env, Napi::Object exports, napi_property_attributes prop_attr)
{
// clang-format off
Napi::Function func = DefineClass(env, "Map", {
InstanceMethod<&Map::fonts>("fonts", prop_attr),
InstanceMethod<&Map::fontFiles>("fontFiles", prop_attr),
InstanceMethod<&Map::fontDirectory>("fontDirectory", prop_attr),
InstanceMethod<&Map::memoryFonts>("memoryFonts", prop_attr),
InstanceMethod<&Map::registerFonts>("registerFonts", prop_attr),
InstanceMethod<&Map::loadFonts>("loadFonts", prop_attr),
InstanceMethod<&Map::load>("load", prop_attr),
InstanceMethod<&Map::loadSync>("loadSync", prop_attr),
InstanceMethod<&Map::fromStringSync>("fromStringSync", prop_attr),
InstanceMethod<&Map::fromString>("fromString", prop_attr),
InstanceMethod<&Map::clone>("clone", prop_attr),
InstanceMethod<&Map::save>("save", prop_attr),
InstanceMethod<&Map::clear>("clear", prop_attr),
InstanceMethod<&Map::toXML>("toXML", prop_attr),
InstanceMethod<&Map::resize>("resize", prop_attr),
InstanceMethod<&Map::render>("render", prop_attr),
InstanceMethod<&Map::renderSync>("renderSync", prop_attr),
InstanceMethod<&Map::renderFile>("renderFile", prop_attr),
InstanceMethod<&Map::renderFileSync>("renderFileSync", prop_attr),
InstanceMethod<&Map::zoomAll>("zoomAll", prop_attr),
InstanceMethod<&Map::zoomToBox>("zoomToBox", prop_attr),
InstanceMethod<&Map::scale>("scale", prop_attr),
InstanceMethod<&Map::scaleDenominator>("scaleDenominator", prop_attr),
InstanceMethod<&Map::queryPoint>("queryPoint", prop_attr),
InstanceMethod<&Map::queryMapPoint>("queryMapPoint", prop_attr),
InstanceMethod<&Map::add_layer>("add_layer", prop_attr),
InstanceMethod<&Map::remove_layer>("remove_layer", prop_attr),
InstanceMethod<&Map::get_layer>("get_layer", prop_attr),
InstanceMethod<&Map::layers>("layers", prop_attr),
// accessors
InstanceAccessor<&Map::srs, &Map::srs>("srs", prop_attr),
InstanceAccessor<&Map::width, &Map::width>("width", prop_attr),
InstanceAccessor<&Map::height, &Map::height>("height", prop_attr),
InstanceAccessor<&Map::bufferSize, &Map::bufferSize>("bufferSize", prop_attr),
InstanceAccessor<&Map::extent, &Map::extent>("extent", prop_attr),
InstanceAccessor<&Map::bufferedExtent>("bufferedExtent", prop_attr),
InstanceAccessor<&Map::maximumExtent, &Map::maximumExtent>("maximumExtent", prop_attr),
InstanceAccessor<&Map::background, &Map::background>("background", prop_attr),
InstanceAccessor<&Map::parameters, &Map::parameters>("parameters", prop_attr),
InstanceAccessor<&Map::aspect_fix_mode, &Map::aspect_fix_mode>("aspect_fix_mode", prop_attr)
});
// clang-format on
func.Set("ASPECT_GROW_BBOX", Napi::Number::New(env, mapnik::Map::GROW_BBOX));
func.Set("ASPECT_GROW_CANVAS", Napi::Number::New(env, mapnik::Map::GROW_CANVAS));
func.Set("ASPECT_SHRINK_BBOX", Napi::Number::New(env, mapnik::Map::SHRINK_BBOX));
func.Set("ASPECT_SHRINK_CANVAS", Napi::Number::New(env, mapnik::Map::SHRINK_CANVAS));
func.Set("ASPECT_ADJUST_BBOX_WIDTH", Napi::Number::New(env, mapnik::Map::ADJUST_BBOX_WIDTH));
func.Set("ASPECT_ADJUST_BBOX_HEIGHT", Napi::Number::New(env, mapnik::Map::ADJUST_BBOX_HEIGHT));
func.Set("ASPECT_ADJUST_CANVAS_WIDTH", Napi::Number::New(env, mapnik::Map::ADJUST_CANVAS_WIDTH));
func.Set("ASPECT_ADJUST_CANVAS_HEIGHT", Napi::Number::New(env, mapnik::Map::ADJUST_CANVAS_HEIGHT));
func.Set("ASPECT_RESPECT", Napi::Number::New(env, mapnik::Map::RESPECT));
constructor = Napi::Persistent(func);
constructor.SuppressDestruct();
exports.Set("Map", func);
return exports;
}
/**
* **`mapnik.Map`**
*
* A map in mapnik is an object that combines data sources and styles in
* a way that lets you produce styled cartographic output.
*
* @class Map
* @param {int} width in pixels
* @param {int} height in pixels
* @param {string} [projection='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'] projection as a proj4 code
* typically used with '+init=epsg:3857'
* @property {string} src
* @property {number} width
* @property {number} height
* @property {number} bufferSize
* @property {Array<number>} extent - extent of the map as an array `[ minx, miny, maxx, maxy ]`
* @property {Array<number>} bufferedExtent - extent of the map's buffer `[ minx, miny, maxx, maxy ]`
* @property {Array<number>} maximumExtent - combination of extent and bufferedExtent `[ minx, miny, maxx, maxy ]`
* @property {mapnik.Color} background - background color as a {@link mapnik.Color} object
* @property {} parameters
* @property {} aspect_fix_mode
* @example
* var map = new mapnik.Map(600, 400, '+init=epsg:3857');
* console.log(map);
* // {
* // aspect_fix_mode: 0,
* // parameters: {},
* // background: undefined,
* // maximumExtent: undefined,
* // bufferedExtent: [ NaN, NaN, NaN, NaN ],
* // extent:
* // [ 1.7976931348623157e+308,
* // 1.7976931348623157e+308,
* // -1.7976931348623157e+308,
* // -1.7976931348623157e+308 ],
* // bufferSize: 0,
* // height: 400,
* // width: 600,
* // srs: '+init=epsg:3857'
* // }
*/
Map::Map(Napi::CallbackInfo const& info)
: Napi::ObjectWrap<Map>(info)
{
Napi::Env env = info.Env();
if (info.Length() == 1 && info[0].IsExternal())
{
auto ext = info[0].As<Napi::External<map_ptr>>();
if (ext) map_ = *ext.Data();
return;
}
if (info.Length() == 2)
{
if (!info[0].IsNumber() || !info[1].IsNumber())
{
Napi::TypeError::New(env, "'width' and 'height' must be integers").ThrowAsJavaScriptException();
return;
}
map_ = std::make_shared<mapnik::Map>(info[0].As<Napi::Number>().Int32Value(), info[1].As<Napi::Number>().Int32Value());
return;
}
else if (info.Length() == 3)
{
if (!info[0].IsNumber() || !info[1].IsNumber())
{
Napi::TypeError::New(env, "'width' and 'height' must be integers").ThrowAsJavaScriptException();
return;
}
if (!info[2].IsString())
{
Napi::Error::New(env, "'srs' value must be a string").ThrowAsJavaScriptException();
return;
}
map_ = std::make_shared<mapnik::Map>(info[0].As<Napi::Number>().Int32Value(),
info[1].As<Napi::Number>().Int32Value(),
info[2].As<Napi::String>());
}
else
{
Napi::Error::New(env, "please provide Map width and height and optional srs").ThrowAsJavaScriptException();
}
}
// accessors
// srs
Napi::Value Map::srs(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
return Napi::String::New(env, map_->srs());
}
void Map::srs(Napi::CallbackInfo const& info, Napi::Value const& value)
{
Napi::Env env = info.Env();
if (!value.IsString())
{
Napi::TypeError::New(env, "'srs' must be a string").ThrowAsJavaScriptException();
return;
}
map_->set_srs(value.As<Napi::String>());
}
// extent
Napi::Value Map::extent(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
mapnik::box2d<double> const& e = map_->get_current_extent();
Napi::Array arr = Napi::Array::New(env, 4u);
arr.Set(0u, Napi::Number::New(env, e.minx()));
arr.Set(1u, Napi::Number::New(env, e.miny()));
arr.Set(2u, Napi::Number::New(env, e.maxx()));
arr.Set(3u, Napi::Number::New(env, e.maxy()));
return scope.Escape(arr);
}
void Map::extent(Napi::CallbackInfo const& info, Napi::Value const& value)
{
Napi::Env env = info.Env();
if (!value.IsArray())
{
Napi::Error::New(env, "Must provide an array of: [minx,miny,maxx,maxy]").ThrowAsJavaScriptException();
return;
}
Napi::Array arr = value.As<Napi::Array>();
double minx = arr.Get(0u).As<Napi::Number>().DoubleValue();
double miny = arr.Get(1u).As<Napi::Number>().DoubleValue();
double maxx = arr.Get(2u).As<Napi::Number>().DoubleValue();
double maxy = arr.Get(3u).As<Napi::Number>().DoubleValue();
mapnik::box2d<double> box{minx, miny, maxx, maxy};
map_->zoom_to_box(box);
}
// maximumExtent
Napi::Value Map::maximumExtent(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
boost::optional<mapnik::box2d<double>> const& e = map_->maximum_extent();
if (!e) return env.Undefined();
Napi::Array arr = Napi::Array::New(env, 4u);
arr.Set(0u, Napi::Number::New(env, e->minx()));
arr.Set(1u, Napi::Number::New(env, e->miny()));
arr.Set(2u, Napi::Number::New(env, e->maxx()));
arr.Set(3u, Napi::Number::New(env, e->maxy()));
return scope.Escape(arr);
}
void Map::maximumExtent(Napi::CallbackInfo const& info, Napi::Value const& value)
{
Napi::Env env = info.Env();
if (!value.IsArray())
{
Napi::Error::New(env, "Must provide an array of: [minx,miny,maxx,maxy]").ThrowAsJavaScriptException();
return;
}
Napi::Array arr = value.As<Napi::Array>();
double minx = arr.Get(0u).As<Napi::Number>().DoubleValue();
double miny = arr.Get(1u).As<Napi::Number>().DoubleValue();
double maxx = arr.Get(2u).As<Napi::Number>().DoubleValue();
double maxy = arr.Get(3u).As<Napi::Number>().DoubleValue();
mapnik::box2d<double> box{minx, miny, maxx, maxy};
map_->set_maximum_extent(box);
}
// bufferedExtent
Napi::Value Map::bufferedExtent(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
boost::optional<mapnik::box2d<double>> const& e = map_->get_buffered_extent();
if (!e) return env.Undefined();
Napi::Array arr = Napi::Array::New(env, 4u);
arr.Set(0u, Napi::Number::New(env, e->minx()));
arr.Set(1u, Napi::Number::New(env, e->miny()));
arr.Set(2u, Napi::Number::New(env, e->maxx()));
arr.Set(3u, Napi::Number::New(env, e->maxy()));
return scope.Escape(arr);
}
// width
Napi::Value Map::width(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
return Napi::Number::New(env, map_->width());
}
void Map::width(Napi::CallbackInfo const& info, Napi::Value const& value)
{
Napi::Env env = info.Env();
if (!value.IsNumber())
{
Napi::TypeError::New(env, "Must provide an integer width").ThrowAsJavaScriptException();
return;
}
map_->set_width(value.As<Napi::Number>().Int32Value());
}
// height
Napi::Value Map::height(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
return Napi::Number::New(env, map_->height());
}
void Map::height(Napi::CallbackInfo const& info, Napi::Value const& value)
{
Napi::Env env = info.Env();
if (!value.IsNumber())
{
Napi::TypeError::New(env, "Must provide an integer height").ThrowAsJavaScriptException();
return;
}
map_->set_height(value.As<Napi::Number>().Int32Value());
}
// aspect_fix_mode
Napi::Value Map::aspect_fix_mode(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
return Napi::Number::New(env, map_->get_aspect_fix_mode());
}
void Map::aspect_fix_mode(Napi::CallbackInfo const& info, Napi::Value const& value)
{
Napi::Env env = info.Env();
if (!value.IsNumber())
{
Napi::TypeError::New(env, "'aspect_fix_mode' must be a constant (number)").ThrowAsJavaScriptException();
return;
}
int val = value.As<Napi::Number>().Int32Value();
if (val < mapnik::Map::aspect_fix_mode_MAX && val >= 0)
{
map_->set_aspect_fix_mode(static_cast<mapnik::Map::aspect_fix_mode>(val));
}
else
{
Napi::Error::New(env, "'aspect_fix_mode' value is invalid").ThrowAsJavaScriptException();
}
}
// bufferSize
Napi::Value Map::bufferSize(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
return Napi::Number::New(env, map_->buffer_size());
}
void Map::bufferSize(Napi::CallbackInfo const& info, Napi::Value const& value)
{
Napi::Env env = info.Env();
if (!value.IsNumber())
{
Napi::TypeError::New(env, "Must provide an integer bufferSize").ThrowAsJavaScriptException();
return;
}
map_->set_buffer_size(value.As<Napi::Number>().Int32Value());
}
// background
Napi::Value Map::background(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
boost::optional<mapnik::color> col = map_->background();
if (col)
{
Napi::Value arg = Napi::External<mapnik::color>::New(env, &(*col));
Napi::Object obj = Color::constructor.New({arg});
return scope.Escape(obj);
}
return env.Undefined();
}
void Map::background(Napi::CallbackInfo const& info, Napi::Value const& value)
{
Napi::Env env = info.Env();
if (!value.IsObject())
{
Napi::TypeError::New(env, "mapnik.Color expected").ThrowAsJavaScriptException();
return;
}
Napi::Object obj = value.As<Napi::Object>();
if (!obj.InstanceOf(Color::constructor.Value()))
{
Napi::TypeError::New(env, "Must provide an integer height").ThrowAsJavaScriptException();
return;
}
Color* c = Napi::ObjectWrap<Color>::Unwrap(obj);
map_->set_background(c->color_);
}
// parameters
Napi::Value Map::parameters(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
Napi::Object obj = Napi::Object::New(env);
mapnik::parameters const& params = map_->get_extra_parameters();
for (auto const& p : params)
{
node_mapnik::params_to_object(env, obj, p.first, p.second);
}
return scope.Escape(obj);
}
void Map::parameters(Napi::CallbackInfo const& info, Napi::Value const& value)
{
Napi::Env env = info.Env();
if (!value.IsObject())
{
Napi::TypeError::New(env, "object expected for map.parameters").ThrowAsJavaScriptException();
return;
}
mapnik::parameters params;
Napi::Object obj = value.As<Napi::Object>();
Napi::Array names = obj.GetPropertyNames();
std::size_t length = names.Length();
for (std::size_t index = 0; index < length; ++index)
{
std::string name = names.Get(index).ToString();
Napi::Value val = obj.Get(name);
if (val.IsString())
{
params[name] = val.As<Napi::String>().Utf8Value();
}
else if (val.IsNumber())
{
double num = val.As<Napi::Number>().DoubleValue();
// todo - round
if (num == val.As<Napi::Number>().Int32Value())
{
params[name] = static_cast<node_mapnik::value_integer>(num);
}
else
{
params[name] = num;
}
}
else if (val.IsBoolean())
{
params[name] = val.As<Napi::Boolean>().Value();
}
}
map_->set_extra_parameters(params);
}
/**
* Load fonts from local or external source
*
* @name loadFonts
* @memberof Map
* @instance
*
*/
Napi::Value Map::loadFonts(Napi::CallbackInfo const& info)
{
return Napi::Boolean::New(info.Env(), map_->load_fonts());
}
Napi::Value Map::memoryFonts(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
auto const& font_cache = map_->get_font_memory_cache();
Napi::Array arr = Napi::Array::New(env, font_cache.size());
std::size_t index = 0u;
for (auto const& kv : font_cache)
{
arr.Set(index++, kv.first);
}
return scope.Escape(arr);
}
Napi::Value Map::registerFonts(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
if (info.Length() == 0 || !info[0].IsString())
{
Napi::TypeError::New(env, "first argument must be a path to a directory of fonts")
.ThrowAsJavaScriptException();
return env.Undefined();
}
bool recurse = false;
if (info.Length() >= 2)
{
if (!info[1].IsObject())
{
Napi::TypeError::New(env, "second argument is optional, but if provided must be an object, eg. { recurse: true }")
.ThrowAsJavaScriptException();
return env.Undefined();
}
Napi::Object options = info[1].As<Napi::Object>();
if (options.Has("recurse"))
{
Napi::Value recurse_opt = options.Get("recurse");
if (!recurse_opt.IsBoolean())
{
Napi::TypeError::New(env, "'recurse' must be a Boolean").ThrowAsJavaScriptException();
return env.Undefined();
}
recurse = recurse_opt.As<Napi::Boolean>();
}
}
std::string path = info[0].As<Napi::String>();
return Napi::Boolean::New(env, map_->register_fonts(path, recurse));
}
/**
* Get all of the fonts currently registered as part of this map
* @memberof Map
* @instance
* @name font
* @returns {Array<string>} fonts
*/
Napi::Value Map::fonts(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
auto const& mapping = map_->get_font_file_mapping();
Napi::Array arr = Napi::Array::New(env, mapping.size());
std::size_t index = 0u;
for (auto const& kv : mapping)
{
arr.Set(index++, kv.first);
}
return scope.Escape(arr);
}
/**
* Get all of the fonts currently registered as part of this map, as a mapping
* from font to font file
* @memberof Map
* @instance
* @name fontFiles
* @returns {Object} fonts
*/
Napi::Value Map::fontFiles(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
auto const& mapping = map_->get_font_file_mapping();
Napi::Object obj = Napi::Object::New(env);
for (auto const& kv : mapping)
{
obj.Set(kv.first, kv.second.second);
}
return scope.Escape(obj);
}
/**
* Get the currently-registered font directory, if any
* @memberof Map
* @instance
* @name fontDirectory
* @returns {string|undefined} fonts
*/
Napi::Value Map::fontDirectory(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
boost::optional<std::string> const& font_dir = map_->font_directory();
if (font_dir)
{
return Napi::String::New(env, *font_dir);
}
return env.Undefined();
}
/**
* Get the map's scale factor. This is the ratio between pixels and geographical
* units like meters.
* @memberof Map
* @instance
* @name scale
* @returns {number} scale
*/
Napi::Value Map::scale(Napi::CallbackInfo const& info)
{
return Napi::Number::New(info.Env(), map_->scale());
}
/**
* Get the map's scale denominator.
*
* @memberof Map
* @instance
* @name scaleDenominator
* @returns {number} scale denominator
*/
Napi::Value Map::scaleDenominator(Napi::CallbackInfo const& info)
{
return Napi::Number::New(info.Env(), map_->scale_denominator());
}
/**
* Get all of the currently-added layers in this map
*
* @memberof Map
* @instance
* @name layers
* @returns {Array<mapnik.Layer>} layers
*/
Napi::Value Map::layers(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
std::vector<mapnik::layer> const& layers = map_->layers();
std::size_t size = layers.size();
Napi::Array arr = Napi::Array::New(env, size);
for (std::size_t index = 0; index < size; ++index)
{
auto layer = std::make_shared<mapnik::layer>(layers[index]);
Napi::Value arg = Napi::External<layer_ptr>::New(env, &layer);
Napi::Object obj = Layer::constructor.New({arg});
arr.Set(index, obj);
}
return scope.Escape(arr);
}
/**
* Add a new layer to this map
*
* @memberof Map
* @instance
* @name add_layer
* @param {mapnik.Layer} new layer
*/
Napi::Value Map::add_layer(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
if (!info[0].IsObject())
{
Napi::TypeError::New(env, "mapnik.Layer expected").ThrowAsJavaScriptException();
return env.Undefined();
}
Napi::Object obj = info[0].As<Napi::Object>();
if (!obj.InstanceOf(Layer::constructor.Value()))
{
Napi::TypeError::New(env, "mapnik.Layer expected").ThrowAsJavaScriptException();
return env.Undefined();
}
Layer* layer = Napi::ObjectWrap<Layer>::Unwrap(obj);
map_->add_layer(*layer->impl());
return Napi::Boolean::New(env, true);
}
/**
* Remove layer from this map
*
* @memberof Map
* @instance
* @name remove_layer
* @param {number} layer index
*/
Napi::Value Map::remove_layer(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
if (info.Length() != 1)
{
Napi::Error::New(env, "Please provide layer index").ThrowAsJavaScriptException();
return env.Undefined();
}
if (!info[0].IsNumber())
{
Napi::TypeError::New(env, "index must be number").ThrowAsJavaScriptException();
return env.Undefined();
}
std::vector<mapnik::layer> const& layers = map_->layers();
unsigned int index = info[0].As<Napi::Number>().Int32Value();
if (index < layers.size())
{
map_->remove_layer(index);
return Napi::Boolean::New(env, true);
}
Napi::TypeError::New(env, "invalid layer index").ThrowAsJavaScriptException();
return env.Undefined();
}
/**
* Get a layer out of this map, given a name or index
*
* @memberof Map
* @instance
* @name get_layer
* @param {string|number} layer name or index
* @returns {mapnik.Layer} the layer
* @throws {Error} if index is incorrect or layer is not found
*/
Napi::Value Map::get_layer(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
if (info.Length() != 1)
{
Napi::Error::New(env, "Please provide layer name or index").ThrowAsJavaScriptException();
return env.Undefined();
}
std::vector<mapnik::layer> const& layers = map_->layers();
Napi::Value key = info[0];
if (key.IsNumber())
{
unsigned int index = key.As<Napi::Number>().Int32Value();
if (index < layers.size())
{
auto layer = std::make_shared<mapnik::layer>(layers[index]);
Napi::Value arg = Napi::External<layer_ptr>::New(env, &layer);
Napi::Object obj = Layer::constructor.New({arg});
return scope.Escape(obj);
}
else
{
Napi::TypeError::New(env, "invalid layer index").ThrowAsJavaScriptException();
return env.Undefined();
}
}
else if (key.IsString())
{
std::string layer_name = key.As<Napi::String>();
std::size_t index = 0;
for (mapnik::layer const& lyr : layers)
{
if (lyr.name() == layer_name)
{
auto layer = std::make_shared<mapnik::layer>(layers[index]);
Napi::Value arg = Napi::External<layer_ptr>::New(env, &layer);
Napi::Object obj = Layer::constructor.New({arg});
return scope.Escape(obj);
}
++index;
}
std::ostringstream s;
s << "Layer name '" << layer_name << "' not found";
Napi::TypeError::New(env, s.str()).ThrowAsJavaScriptException();
return env.Undefined();
}
Napi::TypeError::New(env, "first argument must be either a layer name(string) or layer index (integer)")
.ThrowAsJavaScriptException();
return env.Undefined();
}
/**
* Remove all layers and styles from this map
*
* @memberof Map
* @instance
* @name clear
*/
Napi::Value Map::clear(Napi::CallbackInfo const& info)
{
map_->remove_all();
return info.Env().Undefined();
}
/**
* Give this map new dimensions
*
* @memberof Map
* @instance
* @name resize
* @param {number} width
* @param {number} height
*/
Napi::Value Map::resize(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
if (info.Length() != 2)
{
Napi::Error::New(env, "Please provide width and height").ThrowAsJavaScriptException();
return env.Undefined();
}
if (!info[0].IsNumber() || !info[1].IsNumber())
{
Napi::TypeError::New(env, "width and height must be integers").ThrowAsJavaScriptException();
return env.Undefined();
}
map_->resize(info[0].As<Napi::Number>().Int32Value(), info[1].As<Napi::Number>().Int32Value());
return env.Undefined();
}
/**
* Clone this map object, returning a value which can be changed
* without mutating the original
*
* @instance
* @name clone
* @memberof Map
* @returns {mapnik.Map} clone
*/
Napi::Value Map::clone(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
Napi::EscapableHandleScope scope(env);
try
{
auto map = std::make_shared<mapnik::Map>(*map_);
Napi::Value arg = Napi::External<map_ptr>::New(env, &map);
Napi::Object obj = Map::constructor.New({arg});
return scope.Escape(obj);
}
catch (...)
{
Napi::Error::New(env, "Could not create new Map instance").ThrowAsJavaScriptException();
}
return env.Undefined();
}
/**
* Writes the map to an xml file
*
* @memberof Map
* @instance
* @name save
* @param {string} file path
* @example
* map.save("path/to/map.xml");
*/
Napi::Value Map::save(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
if (info.Length() != 1 || !info[0].IsString())
{
Napi::TypeError::New(env, "first argument must be a path to map.xml to save").ThrowAsJavaScriptException();
return env.Undefined();
}
std::string filename = info[0].As<Napi::String>();
bool explicit_defaults = false;
try
{
mapnik::save_map(*map_, filename, explicit_defaults);
return Napi::Boolean::New(env, true);
}
catch (...)
{
}
return Napi::Boolean::New(env, false);
}
/**
* Converts the map to an XML string
*
* @memberof Map
* @instance
* @name toXML
* @example
* var xml = map.toXML();
*/
Napi::Value Map::toXML(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
bool explicit_defaults = false;
try
{
std::string map_string = mapnik::save_map_to_string(*map_, explicit_defaults);
return Napi::String::New(env, map_string);
}
catch (...)
{
}
Napi::TypeError::New(env, "Failed to export to XML").ThrowAsJavaScriptException();
return env.Undefined();
}
Napi::Value Map::zoomAll(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
try
{
map_->zoom_all();
}
catch (std::exception const& ex)
{
Napi::Error::New(env, ex.what()).ThrowAsJavaScriptException();
}
return env.Undefined();
}
Napi::Value Map::zoomToBox(Napi::CallbackInfo const& info)
{
Napi::Env env = info.Env();
double minx;
double miny;
double maxx;
double maxy;
if (info.Length() == 1)
{
if (!info[0].IsArray())
{
Napi::Error::New(env, "Must provide an array of: [minx,miny,maxx,maxy]").ThrowAsJavaScriptException();
return env.Undefined();
}
Napi::Array arr = info[0].As<Napi::Array>();
if (arr.Length() != 4 || !arr.Get(0u).IsNumber() || !arr.Get(1u).IsNumber() || !arr.Get(2u).IsNumber() || !arr.Get(3u).IsNumber())
{
Napi::Error::New(env, "Must provide an array of: [minx,miny,maxx,maxy]").ThrowAsJavaScriptException();
return env.Undefined();
}
minx = arr.Get(0u).As<Napi::Number>().DoubleValue();
miny = arr.Get(1u).As<Napi::Number>().DoubleValue();
maxx = arr.Get(2u).As<Napi::Number>().DoubleValue();
maxy = arr.Get(3u).As<Napi::Number>().DoubleValue();
}
else if (info.Length() != 4)
{
Napi::Error::New(env, "Must provide 4 arguments: minx,miny,maxx,maxy").ThrowAsJavaScriptException();
return env.Undefined();
}
else if (info[0].IsNumber() &&
info[1].IsNumber() &&
info[2].IsNumber() &&
info[3].IsNumber())
{
minx = info[0].As<Napi::Number>().DoubleValue();
miny = info[1].As<Napi::Number>().DoubleValue();
maxx = info[2].As<Napi::Number>().DoubleValue();
maxy = info[3].As<Napi::Number>().DoubleValue();
}
else
{
Napi::Error::New(env, "If you are providing 4 arguments: minx,miny,maxx,maxy - they must be all numbers")
.ThrowAsJavaScriptException();
return env.Undefined();
}
mapnik::box2d<double> box{minx, miny, maxx, maxy};
map_->zoom_to_box(box);
return env.Undefined();
}
|
#include <bits/stdc++.h>
#define endl '\n'
using namespace std;
// Primo grande: 291077
struct Node
{
string data;
int index;
Node *link;
};
class Stack
{
private:
Node *top = NULL;
int topIndex = 0;
public:
bool isEmpty()
{
return (top == NULL) ? true : false;
}
void push(string value)
{
topIndex += 1;
Node *ptr = new Node();
ptr->data = value;
ptr->link = top;
ptr->index = topIndex;
top = ptr;
}
void pop()
{
if (isEmpty())
{
return;
}
else
{
Node *ptr = top;
//Era top aí em ptr, mas deixei pra ver se funciona
top = top->link;
topIndex -= 1;
delete (ptr);
}
}
string getTop()
{
if (isEmpty())
{
return "NULL";
}
else
{
return top->data;
}
}
void showTop()
{
if (isEmpty())
{
cout << "NULL" << endl;
}
else
{
cout << top->data << endl;
}
}
void showIndexTop()
{
if (isEmpty())
{
cout << 0 << endl;
}
else
{
cout << top->index << endl;
}
}
void deleteAll()
{
while (!isEmpty())
{
pop();
}
}
};
class Queue
{
private:
Node *rear = NULL;
Node *front = NULL;
int numPages = 0;
public:
bool isEmpty()
{
return (rear == NULL && front == NULL) ? true : false;
}
//Adiciona ao final
void enqueue(string value)
{
Node *ptr = new Node();
ptr->data = value;
ptr->link = NULL;
if (numPages == 0)
{
front = ptr;
rear = ptr;
}
else
{
rear->link = ptr;
rear = ptr;
}
numPages += 1;
}
//Retira do início
void dequeue()
{
if (isEmpty())
{
cout << "NULL" << endl;
}
else
{
//Caso só tenha um elemento
if (front == rear)
{
/*Estou liberando o lugar que front aponta, mas front e rear ainda existem, quando você usa a função "delete"
você está deletando o ponteiro e o lugar para onde ele está apontando*/
free(front);
front = NULL;
rear = NULL;
}
else
{
Node *ptr = front;
front = front->link;
free(ptr);
}
numPages -= 1;
}
}
void showStart()
{
if (isEmpty())
{
return;
}
else
{
cout << front->data << endl;
}
}
int getNumPages()
{
return numPages;
}
};
int main(int argc, char const *argv[])
{
return 0;
}
|
#ifndef WYRAZENIEZESP_HH
#define WYRAZENIEZESP_HH
#include "LZespolona.hh"
/*!
* Modeluje zbior operatorow arytmetycznych.
*/
enum Operator
{
Op_Dodaj = '+', Op_Odejmij = '-', Op_Mnoz = '*', Op_Dziel = '/'
};
/*
* Modeluje pojecie dwuargumentowego wyrazenia zespolonego
*/
struct WyrazenieZesp {
LZespolona Arg1; // Pierwszy argument wyrazenia arytmetycznego
Operator Op; // Opertor wyrazenia arytmetycznego
LZespolona Arg2; // Drugi argument wyrazenia arytmetycznego
};
/*
* Funkcje ponizej nalezy zdefiniowac w module.
*
*/
std::istream & operator >> (std::istream & str, WyrazenieZesp &WyrZ);
std::ostream & operator << (std::ostream & str, WyrazenieZesp WyrZ);
LZespolona Oblicz(WyrazenieZesp WyrZ);
#endif
|
///
/// Copyright(c) 2018 Aimin Huang
/// Distributed under the MIT License (http://opensource.org/licenses/MIT)
///
#include "trading_day.h"
#include <time.h>
#include <boost/date_time/gregorian/gregorian.hpp>
std::string current_trading_day(){
time_t rawtime = 0;
::time(&rawtime);
struct tm * timeinfo = ::localtime(&rawtime);
tm info;
std::memcpy(&info, timeinfo, sizeof(info));
boost::gregorian::date cur_trading_date = boost::gregorian::day_clock::local_day();
if (info.tm_hour > 17){ /// 17 = 5pm, then settlement_day +1
cur_trading_date += boost::gregorian::days(1);
}
switch (cur_trading_date.day_of_week())
{
case boost::date_time::Saturday:
cur_trading_date += boost::gregorian::days(2);
break;
case boost::date_time::Sunday:
cur_trading_date += boost::gregorian::days(1);
break;
default:
break;
}
return boost::gregorian::to_iso_string(cur_trading_date);//RealTimeObservingDate;
}
|
#include "string.hpp"
#include <cassert>
#include <iostream>
//===========================================================================
int main ()
{
{
//------------------------------------------------------
// SETUP FIXTURE
// TEST
String str("qw");
// VERIFY
//std::cout<<str<<'\n';
assert(str == "qw");
}
{
//------------------------------------------------------
// SETUP FIXTURE
// TEST
String str("qwerty");
//std::cout<<str<<'\n';
// VERIFY
assert(str == "qwerty");
}
{
//------------------------------------------------------
// SETUP FIXTURE
// TEST
String str("qwerty2\n");
String str2("qwerty2\n");
// VERIFY
assert(str == str2);
}
{
//------------------------------------------------------
// SETUP FIXTURE
// TEST
String str("12345678\n12345");
// VERIFY
assert(str == "12345678\n12345");
}
{
//------------------------------------------------------
// SETUP FIXTURE
// TEST
String str("\0");
// VERIFY
assert(str == "\0");
}
// ADD ADDITIONAL TESTS AS NECESSARY
std::cout << "Done testing charArray constructor." << std::endl;
}
|
// Copyright (c) 2007-2013 Hartmut Kaiser
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#if !defined(HPX_COMPRESSION_SNAPPY_FEB_26_2013_0415AM)
#define HPX_COMPRESSION_SNAPPY_FEB_26_2013_0415AM
#include <hpx/hpx_fwd.hpp>
#include <hpx/plugins/binary_filter/snappy_serialization_filter.hpp>
#endif
|
/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/*
Copyright (C) 2005, 2006 Klaus Spanderen
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<quantlib-dev@lists.sf.net>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
*/
/*! \file gaussianorthogonalpolynomial.hpp
\brief orthogonal polynomials for gaussian quadratures
*/
#ifndef quantlib_gaussian_orthogonal_polynomial_hpp
#define quantlib_gaussian_orthogonal_polynomial_hpp
#include <ql/types.hpp>
namespace QuantLib {
//! orthogonal polynomial for Gaussian quadratures
/*! References:
Gauss quadratures and orthogonal polynomials
G.H. Gloub and J.H. Welsch: Calculation of Gauss quadrature rule.
Math. Comput. 23 (1986), 221-230
"Numerical Recipes in C", 2nd edition,
Press, Teukolsky, Vetterling, Flannery,
The polynomials are defined by the three-term recurrence relation
\f[
P_{k+1}(x)=(x-\alpha_k) P_k(x) - \beta_k P_{k-1}(x)
\f]
and
\f[
\mu_0 = \int{w(x)dx}
\f]
*/
class GaussianOrthogonalPolynomial {
public:
virtual ~GaussianOrthogonalPolynomial() {}
virtual Real mu_0() const = 0;
virtual Real alpha(Size i) const = 0;
virtual Real beta(Size i) const = 0;
virtual Real w(Real x) const = 0;
Real value(Size i, Real x) const;
Real weightedValue(Size i, Real x) const;
};
//! Gauss-Laguerre polynomial
class GaussLaguerrePolynomial : public GaussianOrthogonalPolynomial {
public:
GaussLaguerrePolynomial(Real s = 0.0);
Real mu_0() const;
Real alpha(Size i) const;
Real beta(Size i) const;
Real w(Real x) const;
private:
const Real s_;
};
//! Gauss-Hermite polynomial
class GaussHermitePolynomial : public GaussianOrthogonalPolynomial {
public:
GaussHermitePolynomial(Real mu = 0.0);
Real mu_0()const;
Real alpha(Size i) const;
Real beta(Size i) const;
Real w(Real x) const;
private:
const Real mu_;
};
//! Gauss-Jacobi polynomial
class GaussJacobiPolynomial : public GaussianOrthogonalPolynomial {
public:
GaussJacobiPolynomial(Real alpha, Real beta);
Real mu_0() const;
Real alpha(Size i) const;
Real beta(Size i) const;
Real w(Real x) const;
private:
const Real alpha_;
const Real beta_;
};
//! Gauss-Legendre polynomial
class GaussLegendrePolynomial : public GaussJacobiPolynomial {
public:
GaussLegendrePolynomial();
};
//! Gauss-Chebyshev polynomial
class GaussChebyshevPolynomial : public GaussJacobiPolynomial {
public:
GaussChebyshevPolynomial();
};
//! Gauss-Chebyshev polynomial (second kind)
class GaussChebyshev2ndPolynomial : public GaussJacobiPolynomial {
public:
GaussChebyshev2ndPolynomial();
};
//! Gauss-Gegenbauer polynomial
class GaussGegenbauerPolynomial : public GaussJacobiPolynomial {
public:
GaussGegenbauerPolynomial(Real lambda);
};
//! Gauss hyperbolic polynomial
class GaussHyperbolicPolynomial : public GaussianOrthogonalPolynomial {
public:
Real mu_0()const;
Real alpha(Size i) const;
Real beta(Size i) const;
Real w(Real x) const;
};
}
#endif
|
#pragma once
#include <boost/optional/optional.hpp>
#include <utility>
template< typename A, typename F >
auto optional_apply( A&& val, F func ) -> decltype( boost::make_optional( func( *std::forward< A >( val ) ) ) )
{
if( val )
{
return boost::make_optional( func( *std::forward< A >( val ) ) );
}
else
{
return{};
}
}
|
//=================================================================================================
/*!
// \file src/mathtest/operations/dvecsvecouter/V6bVCa.cpp
// \brief Source file for the V6bVCa dense vector/sparse vector outer product math test
//
// Copyright (C) 2012-2020 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <cstdlib>
#include <iostream>
#include <blaze/math/CompressedVector.h>
#include <blaze/math/StaticVector.h>
#include <blazetest/mathtest/Creator.h>
#include <blazetest/mathtest/operations/dvecsvecouter/OperationTest.h>
#include <blazetest/system/MathTest.h>
#ifdef BLAZE_USE_HPX_THREADS
# include <hpx/hpx_main.hpp>
#endif
//=================================================================================================
//
// MAIN FUNCTION
//
//=================================================================================================
//*************************************************************************************************
int main()
{
std::cout << " Running 'V6bVCa'..." << std::endl;
using blazetest::mathtest::TypeA;
using blazetest::mathtest::TypeB;
try
{
// Vector type definitions
using V6b = blaze::StaticVector<TypeB,6UL>;
using VCa = blaze::CompressedVector<TypeA>;
// Creator type definitions
using CV6b = blazetest::Creator<V6b>;
using CVCa = blazetest::Creator<VCa>;
// Running the tests
for( size_t i=0UL; i<=8UL; ++i ) {
for( size_t j=0UL; j<=i; ++j ) {
RUN_DVECSVECOUTER_OPERATION_TEST( CV6b(), CVCa( i, j ) );
}
}
}
catch( std::exception& ex ) {
std::cerr << "\n\n ERROR DETECTED during dense vector/sparse vector outer product:\n"
<< ex.what() << "\n";
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
//*************************************************************************************************
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "pch.h"
#include "Header Files/CalcEngine.h"
using namespace std;
using namespace CalcEngine;
constexpr int C_NUM_MAX_DIGITS = MAX_STRLEN;
constexpr int C_EXP_MAX_DIGITS = 4;
void CalcNumSec::Clear()
{
value.clear();
m_isNegative = false;
}
void CalcInput::Clear()
{
m_base.Clear();
m_exponent.Clear();
m_hasExponent = false;
m_hasDecimal = false;
m_decPtIndex = 0;
}
bool CalcInput::TryToggleSign(bool isIntegerMode, wstring_view maxNumStr)
{
// Zero is always positive
if (m_base.IsEmpty())
{
m_base.IsNegative(false);
m_exponent.IsNegative(false);
}
else if (m_hasExponent)
{
m_exponent.IsNegative(!m_exponent.IsNegative());
}
else
{
// When in integer only mode, it isn't always allowed to toggle, as toggling can cause the num to be out of
// bounds. For eg. in byte -128 is valid, but when it toggled it becomes 128, which is more than 127.
if (isIntegerMode && m_base.IsNegative())
{
// Decide if this additional digit will fit for the given bit width
if (m_base.value.size() >= maxNumStr.size() && m_base.value.back() > maxNumStr.back())
{
// Last digit is more than the allowed positive number. Fail
return false;
}
}
m_base.IsNegative(!m_base.IsNegative());
}
return true;
}
bool CalcInput::TryAddDigit(unsigned int value, uint32_t radix, bool isIntegerMode, wstring_view maxNumStr, long wordBitWidth, int maxDigits)
{
// Convert from an integer into a character
// This includes both normal digits and alpha 'digits' for radixes > 10
auto chDigit = static_cast<wchar_t>((value < 10) ? (L'0' + value) : (L'A' + value - 10));
CalcNumSec* pNumSec;
size_t maxCount;
if (m_hasExponent)
{
pNumSec = &m_exponent;
maxCount = C_EXP_MAX_DIGITS;
}
else
{
pNumSec = &m_base;
maxCount = maxDigits;
// Don't include the decimal point in the count. In that way you can enter the maximum allowed precision.
// Precision doesn't include decimal point.
if (HasDecimalPt())
{
maxCount++;
}
// First leading 0 is not counted in input restriction as the output can be of that form
// See NumberToString algorithm. REVIEW: We don't have such input restriction mimicking based on output of NumberToString for exponent
// NumberToString can give 10 digit exponent, but we still restrict the exponent here to be only 4 digits.
if (!pNumSec->IsEmpty() && pNumSec->value.front() == L'0')
{
maxCount++;
}
}
// Ignore leading zeros
if (pNumSec->IsEmpty() && (value == 0))
{
return true;
}
if (pNumSec->value.size() < maxCount)
{
pNumSec->value += chDigit;
return true;
}
// if we are in integer mode, within the base, and we're on the last digit then
// there are special cases where we can actually add one more digit.
if (isIntegerMode && pNumSec->value.size() == maxCount && !m_hasExponent)
{
bool allowExtraDigit = false;
if (radix == 8)
{
switch (wordBitWidth % 3)
{
case 1:
// in 16 or 64bit word size, if the first digit is a 1 we can enter 6 (16bit) or 22 (64bit) digits
allowExtraDigit = (pNumSec->value.front() == L'1');
break;
case 2:
// in 8 or 32bit word size, if the first digit is a 3 or less we can enter 3 (8bit) or 11 (32bit) digits
allowExtraDigit = (pNumSec->value.front() <= L'3');
break;
}
}
else if (radix == 10)
{
// If value length is at least the max, we know we can't add another digit.
if(pNumSec->value.size() < maxNumStr.size())
{
// Compare value to substring of maxNumStr of value.size() length.
// If cmpResult > 0:
// eg. max is "127", and the current number is "20". first digit itself says we are out.
// Additional digit is not possible
// If cmpResult < 0:
// Success case. eg. max is "127", and current number is say "11". The second digit '1' being <
// corresponding digit '2', means all digits are possible to append, like 119 will still be < 127
// If cmpResult == 0:
// Undecided still. The case when max is "127", and current number is "12". Look for the new number being 7 or less to allow
auto cmpResult = pNumSec->value.compare(0, wstring::npos, maxNumStr, 0, pNumSec->value.size());
if (cmpResult < 0)
{
allowExtraDigit = true;
}
else if (cmpResult == 0)
{
auto lastChar = maxNumStr[pNumSec->value.size()];
if (chDigit <= lastChar)
{
allowExtraDigit = true;
}
else if (pNumSec->IsNegative() && chDigit <= lastChar + 1)
{
// Negative value case, eg. max is "127", and current number is "-12". Then 8 is also valid, as the range
// is always from -(max+1)...max in signed mode
allowExtraDigit = true;
}
}
}
}
if (allowExtraDigit)
{
pNumSec->value += chDigit;
return true;
}
}
return false;
}
bool CalcInput::TryAddDecimalPt()
{
// Already have a decimal pt or we're in the exponent
if (m_hasDecimal || m_hasExponent)
{
return false;
}
if (m_base.IsEmpty())
{
m_base.value += L"0"; // Add a leading zero
}
m_decPtIndex = m_base.value.size();
m_base.value += m_decSymbol;
m_hasDecimal = true;
return true;
}
bool CalcInput::HasDecimalPt()
{
return m_hasDecimal;
}
bool CalcInput::TryBeginExponent()
{
// For compatibility, add a trailing dec point to base num if it doesn't have one
TryAddDecimalPt();
if (m_hasExponent) // Already entering exponent
{
return false;
}
m_hasExponent = true; // Entering exponent
return true;
}
void CalcInput::Backspace()
{
if (m_hasExponent)
{
if (!m_exponent.IsEmpty())
{
m_exponent.value.pop_back();
if (m_exponent.IsEmpty())
{
m_exponent.Clear();
}
}
else
{
m_hasExponent = false;
}
}
else
{
if (!m_base.IsEmpty())
{
m_base.value.pop_back();
}
if (m_base.value.size() <= m_decPtIndex)
{
// Backed up over decimal point
m_hasDecimal = false;
m_decPtIndex = 0;
}
if (m_base.IsEmpty())
{
m_base.Clear();
}
}
}
void CalcInput::SetDecimalSymbol(wchar_t decSymbol)
{
if (m_decSymbol != decSymbol)
{
m_decSymbol = decSymbol;
if (m_hasDecimal)
{
// Change to new decimal pt
m_base.value[m_decPtIndex] = m_decSymbol;
}
}
}
wstring CalcInput::ToString(uint32_t radix, bool isIntegerMode)
{
// In theory both the base and exponent could be C_NUM_MAX_DIGITS long.
wstringstream resStream;
if ((m_base.value.size() > MAX_STRLEN) || (m_hasExponent && m_exponent.value.size() > MAX_STRLEN))
{
return wstring();
}
if (m_base.IsNegative())
{
resStream << L'-';
}
resStream << (m_base.IsEmpty() ? L"0" : m_base.value);
if (m_hasExponent)
{
// Add a decimal point if it is not already there
if (!m_hasDecimal)
{
resStream << m_decSymbol;
}
resStream << ((radix == 10) ? L'e' : L'^');
resStream << (m_exponent.IsNegative() ? L'-' : L'+');
resStream << (m_exponent.IsEmpty() ? L"0" : m_exponent.value);
}
auto result = resStream.str();
// Base and Exp can each be up to C_NUM_MAX_DIGITS in length, plus 4 characters for sign, dec, exp, and expSign.
if (result.size() > C_NUM_MAX_DIGITS * 2 + 4)
{
return wstring();
}
return result;
}
Rational CalcInput::ToRational(uint32_t radix, int32_t precision)
{
PRAT rat = StringToRat(m_base.IsNegative(), m_base.value, m_exponent.IsNegative(), m_exponent.value, radix, precision);
if (rat == nullptr)
{
return 0;
}
Rational result{ rat };
destroyrat(rat);
return result;
}
|
/// @file
///
/// Copyright Matus Chochlik.
/// Distributed under the Boost Software License, Version 1.0.
/// See accompanying file LICENSE_1_0.txt or copy at
/// http://www.boost.org/LICENSE_1_0.txt
///
#ifndef EAGINE_MATH_CONSTANTS_HPP
#define EAGINE_MATH_CONSTANTS_HPP
#include <cmath>
namespace eagine::math {
/// @var pi
/// @brief The pi constant.
/// @ingroup math
/// @see phi
#ifdef M_PI
static constexpr const auto pi = M_PI;
#else
static constexpr const auto pi = 3.14159265358979323846;
#endif
/// @var phi
/// @brief The phi constant.
/// @ingroup math
/// @see pi
static const auto phi = (1.0 + std::sqrt(5.0)) * 0.5;
} // namespace eagine::math
#endif // EAGINE_MATH_CONSTANTS_HPP
|
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <lib/syslog/global.h>
#include <lib/syslog/logger.h>
#include <initializer_list>
#include <string>
#include "logger.h"
namespace syslog {
zx_status_t InitLogger(const syslog::LogSettings& settings,
const std::initializer_list<std::string>& tags) {
if (tags.size() > FX_LOG_MAX_TAGS) {
return ZX_ERR_INVALID_ARGS;
}
const char* ctags[FX_LOG_MAX_TAGS];
int i = 0;
for (auto& tag : tags) {
ctags[i++] = tag.c_str();
}
fx_logger_config_t config = {.min_severity = settings.severity,
.console_fd = settings.fd,
.log_service_channel = ZX_HANDLE_INVALID,
.tags = ctags,
.num_tags = tags.size()};
return fx_log_init_with_config(&config);
}
zx_status_t InitLogger(const std::initializer_list<std::string>& tags) {
LogSettings settings = {.severity = FX_LOG_INFO, .fd = -1};
return InitLogger(settings, tags);
}
zx_status_t InitLogger() { return InitLogger({}); }
} // namespace syslog
|
/**
* All rights reserved.
* License: see LICENSE.txt
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must display the names 'Denis Zyamaev' and
* in the credits of the application, if such credits exist.
* The authors of this work must be notified via email (code4un@yandex.ru) in
* this case of redistribution.
* 3. Neither the name of copyright holders nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS
* IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
**/
// = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// INCLUDES
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Include STL
#include <iostream>
#include <string>
#include <cstdio>
// Include Windows SDK
#include <Windows.h>
// Include c0de4un::engine::win::WinGraphics
#ifndef C0DE4UN_ENGINE_WIN_GRAPHICS_HPP
#include "../public/engine/windows/graphics/WinGraphics.hpp"
#endif // !C0DE4UN_ENGINE_WIN_GRAPHICS_HPP
#ifdef DEBUG // DEBUG
// c0de4un::engine::core::Log
#ifndef C0DE4UN_ENGINE_CORE_LOG_HPP
#include "../public/engine/core/utils/metrics/Log.hpp"
#endif // !C0DE4UN_ENGINE_CORE_LOG_HPP
// Include c0de4un::engine::core::DefaultLogger
#ifndef C0DE4UN_ENGINE_CORE_DEFAULT_LOGGER_HPP
#include "../public/engine/core/utils/metrics/DefaultLogger.hpp"
#endif // !C0DE4UN_ENGINE_CORE_DEFAULT_LOGGER_HPP
#endif // DEBUG
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// TYPES
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ifndef OK
static constexpr const int OK = 0;
#define OK OK
#endif
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// FIELDS
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/** GraphicsManager **/
engine_WinGraphics* mGraphics(nullptr);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// MAIN
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
bool init()
{
#ifdef DEBUG // DEBUG
engine_Log::Initialize( new engine_DefaultLogger() );
#endif // DEBUG
// Guarded-Block
try
{
if ( !mGraphics )
{
// Initialize GraphicsManager instance
mGraphics = new engine_WinGraphics();
engine_WinGraphics::Initialize( static_cast<engine_Graphics*>(mGraphics) );
mGraphics = static_cast<engine_WinGraphics*>( engine_WinGraphics::getInstance() );
}
if ( !mGraphics->Start() )
{
#ifdef DEBUG // DEBUG
engine_Log::error( "main::init: failed to start WinGraphics" );
#endif // DEBUG
return false;
}
}
catch(const std::exception& e)
{
#ifdef DEBUG // DEBUG
engine_Log::error( e.what() );
#endif // DEBUG
return false;
}
return true;
}
void terminate()
{
// Guarded-Block
try
{
// Stop & Release Graphics
if ( mGraphics )
{
engine_WinGraphics::Terminate();
mGraphics = nullptr;
}
}
catch(const std::exception& e)
{
#ifdef DEBUG // DEBUG
engine_Log::error( e.what() );
#endif // DEBUG
}
engine_Log::Terminate();
}
int main()
{
#ifdef DEBUG // DEBUG
std::cout << "Starting . . ." << std::endl;
#endif // DEBUG
// Set console code page to UTF-8 so console known how to interpret string data
SetConsoleOutputCP( CP_UTF8 );
// Enable buffering to prevent VS from chopping up UTF-8 byte sequences
setvbuf( stdout, nullptr, _IOFBF, 1000 );
if ( !init() )
{
#ifdef DEBUG // DEBUG
std::cout << "ERORR" << std::endl << "Press any key to exit" << std::endl;
std::cin.get();
#endif // DEBUG
}
terminate();
#ifdef DEBUG // DEBUG
std::cout << "Completed . . ." << std::endl << "Press any key to exit" << std::endl;
std::cin.get();
#endif // DEBUG
return OK;
}
// = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
/*
MIT License
Copyright (c) 2017 FMI Open Development / Markus Peura, first.last@fmi.fi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/*
Part of Rack development has been done in the BALTRAD projects part-financed
by the European Union (European Regional Development Fund and European
Neighbourhood Partnership Instrument, Baltic Sea Region Programme 2007-2013)
*/
//#include <exception>
#include <fstream>
#include <iostream>
#include "drain/util/Log.h"
#include "drain/util/Output.h"
#include "fileio.h"
#include "resources.h"
#include "file-hist.h"
namespace rack {
const HistEntry CmdHistogram::histEntryHelper;
void CmdHistogram::exec() const {
RackContext & ctx = getContext<RackContext>();
drain::Logger mout(ctx.log, __FUNCTION__, __FILE__); // getResources().mout;
Hi5Tree & currentHi5 = *ctx.currentHi5;
ODIMPath path;
DataSelector selector(ODIMPathElemMatcher::DATA);
//selector.setParameters(ctx.select);
selector.consumeParameters(ctx.select);
selector.getPath3(currentHi5, path);
//ctx.select.clear();
PlainData<BasicDst> dstData(currentHi5(path));
mout.warn() << "data: " << dstData.data << mout.endl;
mout.note() << "path: " << path << " [" << dstData.odim.quantity << ']' << mout.endl;
// NO resources.setCurrentImage(selector);
// drain::image::Image & img = *ctx.currentImage;
// mout.warn() << "computing hist" << mout.endl;
drain::Histogram histogram(256);
histogram.setScale(dstData.data.getScaling());
histogram.compute(dstData.data, dstData.data.getType());
if (!filename.empty()){
mout.warn() << "writing " << filename << mout.endl;
legend leg;
const drain::VariableMap & dstWhat = dstData.getWhat();
if (dstWhat.hasKey("legend")){
mout.note() << "Using what:legend" << dstWhat["legend"] << mout.endl;
//typedef std::map<int, std::string> legend;
dstWhat["legend"].toMap(leg, ',', ':'); // TOD
}
else {
setSpecialEntry(leg, dstData.odim.nodata, "nodata");
setSpecialEntry(leg, dstData.odim.undetect, "undetect");
}
writeHistogram(histogram, filename, leg);
}
if (!store.empty()){
dstData.getHow()[store] = histogram.getVector();
//dstData.updateTree2();
}
}
void CmdHistogram::setSpecialEntry(legend & leg, double value, const std::string & label) const {
RackContext & ctx = getContext<RackContext>();
drain::Logger mout(ctx.log, __FUNCTION__, __FILE__); // getResources().mout;
legend::key_type i = static_cast<legend::key_type>(value);
if (static_cast<double>(i) != value){
mout.warn() << "special code '" << label << "'=" << value << " not integer" << mout;
}
/*
if (i < leg.begin()->first){
mout.warn() << "special code '" << label << "'=" << value << " smaller than" << mout;
}
*/
leg[i] = label;
}
void CmdHistogram::writeHistogram(const drain::Histogram & histogram, const std::string & filename, const legend &leg) const {
RackContext & ctx = getContext<RackContext>();
drain::Logger mout(ctx.log, __FUNCTION__, __FILE__);
drain::Output out((filename == "-") ? filename : ctx.outputPrefix + filename);
std::ostream & ostr = out;
drain::StringMapper mapper;
if (! ctx.formatStr.empty()){
mapper.parse(ctx.formatStr, true);
}
else
mapper.parse("${count} # '${label}' (${index}) [${min}, ${max}] \n", false); // here \n IS newline...
// TODO: check tests
// NEW
ostr << "# " << mapper << '\n'; // TODO: pick plain keys
mout.note() << "Legend: " << drain::sprinter(leg) << mout.endl;
// OLD
// Header
ostr << "# [0," << histogram.getSize() << "] ";
if (histogram.scaling.isPhysical())
ostr << '[' << histogram.scaling.physRange << ']';
ostr << '\n';
HistEntry entry;
const drain::Histogram::vect_t & v = histogram.getVector();
//if (!leg.empty()){
if (leg.size() >= 3){ // KLUDGE
for (legend::const_iterator it=leg.begin(); it!=leg.end(); ++it){
ostr << "# " << it->first << '=' << it->second << '\n';
}
for (legend::const_iterator it=leg.begin(); it!=leg.end(); ++it){
entry.index = it->first;
entry.count = v[it->first];
entry.binRange.min = histogram.scaling.fwd(it->first);
entry.binRange.max = histogram.scaling.fwd(it->first + 1);
entry.label = it->second; // or parameters.reference?
mapper.toStream(ostr, entry.getParameters());
}
ostr << '\n';
}
else {
mout.note() << "No legend supplied, writing all elements" << mout.endl;
for (std::size_t i=0; i<v.size(); ++i){
entry.index = i;
entry.count = v[i];
entry.binRange.min = histogram.scaling.fwd(i);
entry.binRange.max = histogram.scaling.fwd(i+1);
legend::const_iterator it = leg.find(i);
if (it == leg.end()){
entry.label.clear();
}
else {
entry.label = it->second;
}
/*
if (i == dstData.odim.nodata)
entry.label = "nodata";
else if (i == dstData.odim.undetect)
entry.label = "undetect";
else
entry.label.clear();
*/
mapper.toStream(ostr, entry.getParameters());
}
ostr << '\n';
}
// histogram.dump(out);
}
} // namespace rack
|
//==================================================================================================
/**
EVE - Expressive Vector Engine
Copyright : EVE Contributors & Maintainers
SPDX-License-Identifier: MIT
**/
//==================================================================================================
#include "test.hpp"
#include <eve/module/core.hpp>
#include <eve/module/core.hpp>
#include <eve/module/math.hpp>
#include <eve/module/math/detail/constant/rempio2_limits.hpp>
//==================================================================================================
// Types tests
//==================================================================================================
EVE_TEST_TYPES( "Check return types of cos"
, eve::test::simd::ieee_reals
)
<typename T>(eve::as<T>)
{
using v_t = eve::element_type_t<T>;
TTS_EXPR_IS( eve::cos(T()) , T);
TTS_EXPR_IS( eve::cos(v_t()), v_t);
};
//==================================================================================================
// cos tests
//==================================================================================================
auto mquarter_c = []<typename T>(eve::as<T> const & tgt){ return -eve::pio_4(tgt); };
auto quarter_c = []<typename T>(eve::as<T> const & tgt){ return eve::pio_4(tgt); };
auto mhalf_c = []<typename T>(eve::as<T> const & tgt){ return -eve::pio_2(tgt); };
auto half_c = []<typename T>(eve::as<T> const & tgt){ return eve::pio_2(tgt); };
auto mfull_c= []<typename T>(eve::as<T> const & tgt){ return -eve::pi(tgt); };
auto full_c = []<typename T>(eve::as<T> const & tgt){ return eve::pi(tgt); };
auto mmed = []<typename T>(eve::as<T> const & tgt){ return -eve::detail::Rempio2_limit(eve::detail::medium_type(), tgt); };
auto med = []<typename T>(eve::as<T> const & tgt){ return eve::detail::Rempio2_limit(eve::detail::medium_type(), tgt); };
EVE_TEST( "Check behavior of cos on wide"
, eve::test::simd::ieee_reals
, eve::test::generate( eve::test::randoms(mquarter_c, quarter_c)
, eve::test::randoms(mhalf_c, half_c)
, eve::test::randoms(mfull_c, full_c)
, eve::test::randoms(mmed, med)
, eve::test::randoms(eve::valmin, eve::valmax))
)
<typename T>(T const& a0, T const& a1 , T const& a2, T const& a3, T const& a4)
{
using eve::detail::map;
using eve::cos;
using eve::diff;
using v_t = eve::element_type_t<T>;
auto ref = [](auto e) -> v_t { return std::cos(e); };
TTS_ULP_EQUAL(eve::quarter_circle(cos)(a0) , map(ref, a0), 2);
TTS_ULP_EQUAL(eve::half_circle(cos)(a0) , map(ref, a0), 2);
TTS_ULP_EQUAL(eve::half_circle(cos)(a1) , map(ref, a1), 2);
TTS_ULP_EQUAL(eve::full_circle(cos)(a0) , map(ref, a0), 2);
TTS_ULP_EQUAL(eve::full_circle(cos)(a1) , map(ref, a1), 2);
TTS_ULP_EQUAL(eve::full_circle(cos)(a2) , map(ref, a2), 2);
TTS_ULP_EQUAL(cos(a0) , map(ref, a0), 2);
TTS_ULP_EQUAL(cos(a1) , map(ref, a1), 2);
TTS_ULP_EQUAL(cos(a2) , map(ref, a2), 2);
TTS_ULP_EQUAL(cos(a3) , map(ref, a3), 2);
TTS_ULP_EQUAL(cos(a4) , map(ref, a4), 2);
TTS_ULP_EQUAL(diff(cos)(a0), map([](auto e) -> v_t { return -std::sin(e); }, a0), 2);
};
|
/* Siconos is a program dedicated to modeling, simulation and control
* of non smooth dynamical systems.
*
* Copyright 2016 INRIA.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef _WIN32
#define SICONOS_EXPORT extern "C" __declspec(dllexport)
#else
#define SICONOS_EXPORT extern "C"
#endif
#include <stdio.h>
#include <math.h>
const double m = 1; // ball mass
const double g = 9.81; // gravity
extern "C" double FextFunction(double time)
{
double res = -0.0;
return res;
}
SICONOS_EXPORT void ballFExt(double time, double *fExt, unsigned int sizeOfq, unsigned int sizeZ, double* z)
{
for (unsigned int i = 0; i < sizeOfq; i++)
fExt[i] = 0.0;
fExt[0] = -m * g + FextFunction(time);
}
extern "C" double MextFunction(double time)
{
double res = -sin(time);
return res;
}
SICONOS_EXPORT void ballMExt(double time, double *mExt, unsigned int sizeOfq, unsigned int sizeZ, double* z)
{
for (unsigned int i = 0; i < sizeOfq; i++)
mExt[i] = 0.0;
mExt[0] = MextFunction(time);
}
|
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/s3/model/TopicConfiguration.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
#include <utility>
using namespace Aws::Utils::Xml;
using namespace Aws::Utils;
namespace Aws
{
namespace S3
{
namespace Model
{
TopicConfiguration::TopicConfiguration() :
m_idHasBeenSet(false),
m_topicArnHasBeenSet(false),
m_eventsHasBeenSet(false),
m_filterHasBeenSet(false)
{
}
TopicConfiguration::TopicConfiguration(const XmlNode& xmlNode) :
m_idHasBeenSet(false),
m_topicArnHasBeenSet(false),
m_eventsHasBeenSet(false),
m_filterHasBeenSet(false)
{
*this = xmlNode;
}
TopicConfiguration& TopicConfiguration::operator =(const XmlNode& xmlNode)
{
XmlNode resultNode = xmlNode;
if(!resultNode.IsNull())
{
XmlNode idNode = resultNode.FirstChild("Id");
if(!idNode.IsNull())
{
m_id = StringUtils::Trim(idNode.GetText().c_str());
m_idHasBeenSet = true;
}
XmlNode topicArnNode = resultNode.FirstChild("Topic");
if(!topicArnNode.IsNull())
{
m_topicArn = StringUtils::Trim(topicArnNode.GetText().c_str());
m_topicArnHasBeenSet = true;
}
XmlNode eventsNode = resultNode.FirstChild("Event");
if(!eventsNode.IsNull())
{
XmlNode eventMember = eventsNode;
while(!eventMember.IsNull())
{
m_events.push_back(EventMapper::GetEventForName(StringUtils::Trim(eventMember.GetText().c_str())));
eventMember = eventMember.NextNode("Event");
}
m_eventsHasBeenSet = true;
}
XmlNode filterNode = resultNode.FirstChild("Filter");
if(!filterNode.IsNull())
{
m_filter = filterNode;
m_filterHasBeenSet = true;
}
}
return *this;
}
void TopicConfiguration::AddToNode(XmlNode& parentNode) const
{
Aws::StringStream ss;
if(m_idHasBeenSet)
{
XmlNode idNode = parentNode.CreateChildElement("Id");
idNode.SetText(m_id);
}
if(m_topicArnHasBeenSet)
{
XmlNode topicArnNode = parentNode.CreateChildElement("TopicArn");
topicArnNode.SetText(m_topicArn);
}
if(m_eventsHasBeenSet)
{
for(const auto& item : m_events)
{
XmlNode eventsNode = parentNode.CreateChildElement("Event");
eventsNode.SetText(EventMapper::GetNameForEvent(item));
}
}
if(m_filterHasBeenSet)
{
XmlNode filterNode = parentNode.CreateChildElement("Event");
m_filter.AddToNode(filterNode);
}
}
} // namespace Model
} // namespace S3
} // namespace Aws
|
// Ivan Carvalho
// Solution to https://www.urionlinejudge.com.br/judge/problems/view/1026
#include <cstdio>
int main(){
unsigned int x,y;
while(scanf("%u %u",&x,&y) != EOF) printf("%u\n",(x^y));
return 0;
}
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencl_kernels_imgproc.hpp"
/****************************************************************************************\
Base Image Filter
\****************************************************************************************/
#if IPP_VERSION_X100 >= 710
#define USE_IPP_SEP_FILTERS 1
#else
#undef USE_IPP_SEP_FILTERS
#endif
namespace cv
{
BaseRowFilter::BaseRowFilter() { ksize = anchor = -1; }
BaseRowFilter::~BaseRowFilter() {}
BaseColumnFilter::BaseColumnFilter() { ksize = anchor = -1; }
BaseColumnFilter::~BaseColumnFilter() {}
void BaseColumnFilter::reset() {}
BaseFilter::BaseFilter() { ksize = Size(-1,-1); anchor = Point(-1,-1); }
BaseFilter::~BaseFilter() {}
void BaseFilter::reset() {}
FilterEngine::FilterEngine()
{
srcType = dstType = bufType = -1;
rowBorderType = columnBorderType = BORDER_REPLICATE;
bufStep = startY = startY0 = endY = rowCount = dstY = 0;
maxWidth = 0;
wholeSize = Size(-1,-1);
}
FilterEngine::FilterEngine( const Ptr<BaseFilter>& _filter2D,
const Ptr<BaseRowFilter>& _rowFilter,
const Ptr<BaseColumnFilter>& _columnFilter,
int _srcType, int _dstType, int _bufType,
int _rowBorderType, int _columnBorderType,
const Scalar& _borderValue )
{
init(_filter2D, _rowFilter, _columnFilter, _srcType, _dstType, _bufType,
_rowBorderType, _columnBorderType, _borderValue);
}
FilterEngine::~FilterEngine()
{
}
void FilterEngine::init( const Ptr<BaseFilter>& _filter2D,
const Ptr<BaseRowFilter>& _rowFilter,
const Ptr<BaseColumnFilter>& _columnFilter,
int _srcType, int _dstType, int _bufType,
int _rowBorderType, int _columnBorderType,
const Scalar& _borderValue )
{
_srcType = CV_MAT_TYPE(_srcType);
_bufType = CV_MAT_TYPE(_bufType);
_dstType = CV_MAT_TYPE(_dstType);
srcType = _srcType;
int srcElemSize = (int)getElemSize(srcType);
dstType = _dstType;
bufType = _bufType;
filter2D = _filter2D;
rowFilter = _rowFilter;
columnFilter = _columnFilter;
if( _columnBorderType < 0 )
_columnBorderType = _rowBorderType;
rowBorderType = _rowBorderType;
columnBorderType = _columnBorderType;
CV_Assert( columnBorderType != BORDER_WRAP );
if( isSeparable() )
{
CV_Assert( rowFilter && columnFilter );
ksize = Size(rowFilter->ksize, columnFilter->ksize);
anchor = Point(rowFilter->anchor, columnFilter->anchor);
}
else
{
CV_Assert( bufType == srcType );
ksize = filter2D->ksize;
anchor = filter2D->anchor;
}
CV_Assert( 0 <= anchor.x && anchor.x < ksize.width &&
0 <= anchor.y && anchor.y < ksize.height );
borderElemSize = srcElemSize/(CV_MAT_DEPTH(srcType) >= CV_32S ? sizeof(int) : 1);
int borderLength = std::max(ksize.width - 1, 1);
borderTab.resize(borderLength*borderElemSize);
maxWidth = bufStep = 0;
constBorderRow.clear();
if( rowBorderType == BORDER_CONSTANT || columnBorderType == BORDER_CONSTANT )
{
constBorderValue.resize(srcElemSize*borderLength);
int srcType1 = CV_MAKETYPE(CV_MAT_DEPTH(srcType), MIN(CV_MAT_CN(srcType), 4));
scalarToRawData(_borderValue, &constBorderValue[0], srcType1,
borderLength*CV_MAT_CN(srcType));
}
wholeSize = Size(-1,-1);
}
#define VEC_ALIGN CV_MALLOC_ALIGN
int FilterEngine::start(Size _wholeSize, Rect _roi, int _maxBufRows)
{
int i, j;
wholeSize = _wholeSize;
roi = _roi;
CV_Assert( roi.x >= 0 && roi.y >= 0 && roi.width >= 0 && roi.height >= 0 &&
roi.x + roi.width <= wholeSize.width &&
roi.y + roi.height <= wholeSize.height );
int esz = (int)getElemSize(srcType);
int bufElemSize = (int)getElemSize(bufType);
const uchar* constVal = !constBorderValue.empty() ? &constBorderValue[0] : 0;
if( _maxBufRows < 0 )
_maxBufRows = ksize.height + 3;
_maxBufRows = std::max(_maxBufRows, std::max(anchor.y, ksize.height-anchor.y-1)*2+1);
if( maxWidth < roi.width || _maxBufRows != (int)rows.size() )
{
rows.resize(_maxBufRows);
maxWidth = std::max(maxWidth, roi.width);
int cn = CV_MAT_CN(srcType);
srcRow.resize(esz*(maxWidth + ksize.width - 1));
if( columnBorderType == BORDER_CONSTANT )
{
constBorderRow.resize(getElemSize(bufType)*(maxWidth + ksize.width - 1 + VEC_ALIGN));
uchar *dst = alignPtr(&constBorderRow[0], VEC_ALIGN), *tdst;
int n = (int)constBorderValue.size(), N;
N = (maxWidth + ksize.width - 1)*esz;
tdst = isSeparable() ? &srcRow[0] : dst;
for( i = 0; i < N; i += n )
{
n = std::min( n, N - i );
for(j = 0; j < n; j++)
tdst[i+j] = constVal[j];
}
if( isSeparable() )
(*rowFilter)(&srcRow[0], dst, maxWidth, cn);
}
int maxBufStep = bufElemSize*(int)alignSize(maxWidth +
(!isSeparable() ? ksize.width - 1 : 0),VEC_ALIGN);
ringBuf.resize(maxBufStep*rows.size()+VEC_ALIGN);
}
// adjust bufstep so that the used part of the ring buffer stays compact in memory
bufStep = bufElemSize*(int)alignSize(roi.width + (!isSeparable() ? ksize.width - 1 : 0),16);
dx1 = std::max(anchor.x - roi.x, 0);
dx2 = std::max(ksize.width - anchor.x - 1 + roi.x + roi.width - wholeSize.width, 0);
// recompute border tables
if( dx1 > 0 || dx2 > 0 )
{
if( rowBorderType == BORDER_CONSTANT )
{
int nr = isSeparable() ? 1 : (int)rows.size();
for( i = 0; i < nr; i++ )
{
uchar* dst = isSeparable() ? &srcRow[0] : alignPtr(&ringBuf[0],VEC_ALIGN) + bufStep*i;
memcpy( dst, constVal, dx1*esz );
memcpy( dst + (roi.width + ksize.width - 1 - dx2)*esz, constVal, dx2*esz );
}
}
else
{
int xofs1 = std::min(roi.x, anchor.x) - roi.x;
int btab_esz = borderElemSize, wholeWidth = wholeSize.width;
int* btab = (int*)&borderTab[0];
for( i = 0; i < dx1; i++ )
{
int p0 = (borderInterpolate(i-dx1, wholeWidth, rowBorderType) + xofs1)*btab_esz;
for( j = 0; j < btab_esz; j++ )
btab[i*btab_esz + j] = p0 + j;
}
for( i = 0; i < dx2; i++ )
{
int p0 = (borderInterpolate(wholeWidth + i, wholeWidth, rowBorderType) + xofs1)*btab_esz;
for( j = 0; j < btab_esz; j++ )
btab[(i + dx1)*btab_esz + j] = p0 + j;
}
}
}
rowCount = dstY = 0;
startY = startY0 = std::max(roi.y - anchor.y, 0);
endY = std::min(roi.y + roi.height + ksize.height - anchor.y - 1, wholeSize.height);
if( columnFilter )
columnFilter->reset();
if( filter2D )
filter2D->reset();
return startY;
}
int FilterEngine::start(const Mat& src, const Rect& _srcRoi,
bool isolated, int maxBufRows)
{
Rect srcRoi = _srcRoi;
if( srcRoi == Rect(0,0,-1,-1) )
srcRoi = Rect(0,0,src.cols,src.rows);
CV_Assert( srcRoi.x >= 0 && srcRoi.y >= 0 &&
srcRoi.width >= 0 && srcRoi.height >= 0 &&
srcRoi.x + srcRoi.width <= src.cols &&
srcRoi.y + srcRoi.height <= src.rows );
Point ofs;
Size wsz(src.cols, src.rows);
if( !isolated )
src.locateROI( wsz, ofs );
start( wsz, srcRoi + ofs, maxBufRows );
return startY - ofs.y;
}
int FilterEngine::remainingInputRows() const
{
return endY - startY - rowCount;
}
int FilterEngine::remainingOutputRows() const
{
return roi.height - dstY;
}
int FilterEngine::proceed( const uchar* src, int srcstep, int count,
uchar* dst, int dststep )
{
CV_Assert( wholeSize.width > 0 && wholeSize.height > 0 );
const int *btab = &borderTab[0];
int esz = (int)getElemSize(srcType), btab_esz = borderElemSize;
uchar** brows = &rows[0];
int bufRows = (int)rows.size();
int cn = CV_MAT_CN(bufType);
int width = roi.width, kwidth = ksize.width;
int kheight = ksize.height, ay = anchor.y;
int _dx1 = dx1, _dx2 = dx2;
int width1 = roi.width + kwidth - 1;
int xofs1 = std::min(roi.x, anchor.x);
bool isSep = isSeparable();
bool makeBorder = (_dx1 > 0 || _dx2 > 0) && rowBorderType != BORDER_CONSTANT;
int dy = 0, i = 0;
src -= xofs1*esz;
count = std::min(count, remainingInputRows());
CV_Assert( src && dst && count > 0 );
for(;; dst += dststep*i, dy += i)
{
int dcount = bufRows - ay - startY - rowCount + roi.y;
dcount = dcount > 0 ? dcount : bufRows - kheight + 1;
dcount = std::min(dcount, count);
count -= dcount;
for( ; dcount-- > 0; src += srcstep )
{
int bi = (startY - startY0 + rowCount) % bufRows;
uchar* brow = alignPtr(&ringBuf[0], VEC_ALIGN) + bi*bufStep;
uchar* row = isSep ? &srcRow[0] : brow;
if( ++rowCount > bufRows )
{
--rowCount;
++startY;
}
memcpy( row + _dx1*esz, src, (width1 - _dx2 - _dx1)*esz );
if( makeBorder )
{
if( btab_esz*(int)sizeof(int) == esz )
{
const int* isrc = (const int*)src;
int* irow = (int*)row;
for( i = 0; i < _dx1*btab_esz; i++ )
irow[i] = isrc[btab[i]];
for( i = 0; i < _dx2*btab_esz; i++ )
irow[i + (width1 - _dx2)*btab_esz] = isrc[btab[i+_dx1*btab_esz]];
}
else
{
for( i = 0; i < _dx1*esz; i++ )
row[i] = src[btab[i]];
for( i = 0; i < _dx2*esz; i++ )
row[i + (width1 - _dx2)*esz] = src[btab[i+_dx1*esz]];
}
}
if( isSep )
(*rowFilter)(row, brow, width, CV_MAT_CN(srcType));
}
int max_i = std::min(bufRows, roi.height - (dstY + dy) + (kheight - 1));
for( i = 0; i < max_i; i++ )
{
int srcY = borderInterpolate(dstY + dy + i + roi.y - ay,
wholeSize.height, columnBorderType);
if( srcY < 0 ) // can happen only with constant border type
brows[i] = alignPtr(&constBorderRow[0], VEC_ALIGN);
else
{
CV_Assert( srcY >= startY );
if( srcY >= startY + rowCount )
break;
int bi = (srcY - startY0) % bufRows;
brows[i] = alignPtr(&ringBuf[0], VEC_ALIGN) + bi*bufStep;
}
}
if( i < kheight )
break;
i -= kheight - 1;
if( isSeparable() )
(*columnFilter)((const uchar**)brows, dst, dststep, i, roi.width*cn);
else
(*filter2D)((const uchar**)brows, dst, dststep, i, roi.width, cn);
}
dstY += dy;
CV_Assert( dstY <= roi.height );
return dy;
}
void FilterEngine::apply(const Mat& src, Mat& dst,
const Rect& _srcRoi, Point dstOfs, bool isolated)
{
CV_Assert( src.type() == srcType && dst.type() == dstType );
Rect srcRoi = _srcRoi;
if( srcRoi == Rect(0,0,-1,-1) )
srcRoi = Rect(0,0,src.cols,src.rows);
if( srcRoi.area() == 0 )
return;
CV_Assert( dstOfs.x >= 0 && dstOfs.y >= 0 &&
dstOfs.x + srcRoi.width <= dst.cols &&
dstOfs.y + srcRoi.height <= dst.rows );
int y = start(src, srcRoi, isolated);
proceed( src.ptr() + y*src.step + srcRoi.x*src.elemSize(),
(int)src.step, endY - startY,
dst.ptr(dstOfs.y) +
dstOfs.x*dst.elemSize(), (int)dst.step );
}
}
/****************************************************************************************\
* Separable linear filter *
\****************************************************************************************/
int cv::getKernelType(InputArray filter_kernel, Point anchor)
{
Mat _kernel = filter_kernel.getMat();
CV_Assert( _kernel.channels() == 1 );
int i, sz = _kernel.rows*_kernel.cols;
Mat kernel;
_kernel.convertTo(kernel, CV_64F);
const double* coeffs = kernel.ptr<double>();
double sum = 0;
int type = KERNEL_SMOOTH + KERNEL_INTEGER;
if( (_kernel.rows == 1 || _kernel.cols == 1) &&
anchor.x*2 + 1 == _kernel.cols &&
anchor.y*2 + 1 == _kernel.rows )
type |= (KERNEL_SYMMETRICAL + KERNEL_ASYMMETRICAL);
for( i = 0; i < sz; i++ )
{
double a = coeffs[i], b = coeffs[sz - i - 1];
if( a != b )
type &= ~KERNEL_SYMMETRICAL;
if( a != -b )
type &= ~KERNEL_ASYMMETRICAL;
if( a < 0 )
type &= ~KERNEL_SMOOTH;
if( a != saturate_cast<int>(a) )
type &= ~KERNEL_INTEGER;
sum += a;
}
if( fabs(sum - 1) > FLT_EPSILON*(fabs(sum) + 1) )
type &= ~KERNEL_SMOOTH;
return type;
}
namespace cv
{
struct RowNoVec
{
RowNoVec() {}
RowNoVec(const Mat&) {}
int operator()(const uchar*, uchar*, int, int) const { return 0; }
};
struct ColumnNoVec
{
ColumnNoVec() {}
ColumnNoVec(const Mat&, int, int, double) {}
int operator()(const uchar**, uchar*, int) const { return 0; }
};
struct SymmRowSmallNoVec
{
SymmRowSmallNoVec() {}
SymmRowSmallNoVec(const Mat&, int) {}
int operator()(const uchar*, uchar*, int, int) const { return 0; }
};
struct SymmColumnSmallNoVec
{
SymmColumnSmallNoVec() {}
SymmColumnSmallNoVec(const Mat&, int, int, double) {}
int operator()(const uchar**, uchar*, int) const { return 0; }
};
struct FilterNoVec
{
FilterNoVec() {}
FilterNoVec(const Mat&, int, double) {}
int operator()(const uchar**, uchar*, int) const { return 0; }
};
#if CV_SSE2
///////////////////////////////////// 8u-16s & 8u-8u //////////////////////////////////
struct RowVec_8u32s
{
RowVec_8u32s() { smallValues = false; }
RowVec_8u32s( const Mat& _kernel )
{
kernel = _kernel;
smallValues = true;
int k, ksize = kernel.rows + kernel.cols - 1;
for( k = 0; k < ksize; k++ )
{
int v = kernel.ptr<int>()[k];
if( v < SHRT_MIN || v > SHRT_MAX )
{
smallValues = false;
break;
}
}
}
int operator()(const uchar* _src, uchar* _dst, int width, int cn) const
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
int i = 0, k, _ksize = kernel.rows + kernel.cols - 1;
int* dst = (int*)_dst;
const int* _kx = kernel.ptr<int>();
width *= cn;
if( smallValues )
{
for( ; i <= width - 16; i += 16 )
{
const uchar* src = _src + i;
__m128i f, z = _mm_setzero_si128(), s0 = z, s1 = z, s2 = z, s3 = z;
__m128i x0, x1, x2, x3;
for( k = 0; k < _ksize; k++, src += cn )
{
f = _mm_cvtsi32_si128(_kx[k]);
f = _mm_shuffle_epi32(f, 0);
f = _mm_packs_epi32(f, f);
x0 = _mm_loadu_si128((const __m128i*)src);
x2 = _mm_unpackhi_epi8(x0, z);
x0 = _mm_unpacklo_epi8(x0, z);
x1 = _mm_mulhi_epi16(x0, f);
x3 = _mm_mulhi_epi16(x2, f);
x0 = _mm_mullo_epi16(x0, f);
x2 = _mm_mullo_epi16(x2, f);
s0 = _mm_add_epi32(s0, _mm_unpacklo_epi16(x0, x1));
s1 = _mm_add_epi32(s1, _mm_unpackhi_epi16(x0, x1));
s2 = _mm_add_epi32(s2, _mm_unpacklo_epi16(x2, x3));
s3 = _mm_add_epi32(s3, _mm_unpackhi_epi16(x2, x3));
}
_mm_store_si128((__m128i*)(dst + i), s0);
_mm_store_si128((__m128i*)(dst + i + 4), s1);
_mm_store_si128((__m128i*)(dst + i + 8), s2);
_mm_store_si128((__m128i*)(dst + i + 12), s3);
}
for( ; i <= width - 4; i += 4 )
{
const uchar* src = _src + i;
__m128i f, z = _mm_setzero_si128(), s0 = z, x0, x1;
for( k = 0; k < _ksize; k++, src += cn )
{
f = _mm_cvtsi32_si128(_kx[k]);
f = _mm_shuffle_epi32(f, 0);
f = _mm_packs_epi32(f, f);
x0 = _mm_cvtsi32_si128(*(const int*)src);
x0 = _mm_unpacklo_epi8(x0, z);
x1 = _mm_mulhi_epi16(x0, f);
x0 = _mm_mullo_epi16(x0, f);
s0 = _mm_add_epi32(s0, _mm_unpacklo_epi16(x0, x1));
}
_mm_store_si128((__m128i*)(dst + i), s0);
}
}
return i;
}
Mat kernel;
bool smallValues;
};
struct SymmRowSmallVec_8u32s
{
SymmRowSmallVec_8u32s() { smallValues = false; }
SymmRowSmallVec_8u32s( const Mat& _kernel, int _symmetryType )
{
kernel = _kernel;
symmetryType = _symmetryType;
smallValues = true;
int k, ksize = kernel.rows + kernel.cols - 1;
for( k = 0; k < ksize; k++ )
{
int v = kernel.ptr<int>()[k];
if( v < SHRT_MIN || v > SHRT_MAX )
{
smallValues = false;
break;
}
}
}
int operator()(const uchar* src, uchar* _dst, int width, int cn) const
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
int i = 0, j, k, _ksize = kernel.rows + kernel.cols - 1;
int* dst = (int*)_dst;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const int* kx = kernel.ptr<int>() + _ksize/2;
if( !smallValues )
return 0;
src += (_ksize/2)*cn;
width *= cn;
__m128i z = _mm_setzero_si128();
if( symmetrical )
{
if( _ksize == 1 )
return 0;
if( _ksize == 3 )
{
if( kx[0] == 2 && kx[1] == 1 )
for( ; i <= width - 16; i += 16, src += 16 )
{
__m128i x0, x1, x2, y0, y1, y2;
x0 = _mm_loadu_si128((__m128i*)(src - cn));
x1 = _mm_loadu_si128((__m128i*)src);
x2 = _mm_loadu_si128((__m128i*)(src + cn));
y0 = _mm_unpackhi_epi8(x0, z);
x0 = _mm_unpacklo_epi8(x0, z);
y1 = _mm_unpackhi_epi8(x1, z);
x1 = _mm_unpacklo_epi8(x1, z);
y2 = _mm_unpackhi_epi8(x2, z);
x2 = _mm_unpacklo_epi8(x2, z);
x0 = _mm_add_epi16(x0, _mm_add_epi16(_mm_add_epi16(x1, x1), x2));
y0 = _mm_add_epi16(y0, _mm_add_epi16(_mm_add_epi16(y1, y1), y2));
_mm_store_si128((__m128i*)(dst + i), _mm_unpacklo_epi16(x0, z));
_mm_store_si128((__m128i*)(dst + i + 4), _mm_unpackhi_epi16(x0, z));
_mm_store_si128((__m128i*)(dst + i + 8), _mm_unpacklo_epi16(y0, z));
_mm_store_si128((__m128i*)(dst + i + 12), _mm_unpackhi_epi16(y0, z));
}
else if( kx[0] == -2 && kx[1] == 1 )
for( ; i <= width - 16; i += 16, src += 16 )
{
__m128i x0, x1, x2, y0, y1, y2;
x0 = _mm_loadu_si128((__m128i*)(src - cn));
x1 = _mm_loadu_si128((__m128i*)src);
x2 = _mm_loadu_si128((__m128i*)(src + cn));
y0 = _mm_unpackhi_epi8(x0, z);
x0 = _mm_unpacklo_epi8(x0, z);
y1 = _mm_unpackhi_epi8(x1, z);
x1 = _mm_unpacklo_epi8(x1, z);
y2 = _mm_unpackhi_epi8(x2, z);
x2 = _mm_unpacklo_epi8(x2, z);
x0 = _mm_add_epi16(x0, _mm_sub_epi16(x2, _mm_add_epi16(x1, x1)));
y0 = _mm_add_epi16(y0, _mm_sub_epi16(y2, _mm_add_epi16(y1, y1)));
_mm_store_si128((__m128i*)(dst + i), _mm_srai_epi32(_mm_unpacklo_epi16(x0, x0),16));
_mm_store_si128((__m128i*)(dst + i + 4), _mm_srai_epi32(_mm_unpackhi_epi16(x0, x0),16));
_mm_store_si128((__m128i*)(dst + i + 8), _mm_srai_epi32(_mm_unpacklo_epi16(y0, y0),16));
_mm_store_si128((__m128i*)(dst + i + 12), _mm_srai_epi32(_mm_unpackhi_epi16(y0, y0),16));
}
else
{
__m128i k0 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[0]), 0),
k1 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[1]), 0);
k0 = _mm_packs_epi32(k0, k0);
k1 = _mm_packs_epi32(k1, k1);
for( ; i <= width - 16; i += 16, src += 16 )
{
__m128i x0, x1, x2, y0, y1, t0, t1, z0, z1, z2, z3;
x0 = _mm_loadu_si128((__m128i*)(src - cn));
x1 = _mm_loadu_si128((__m128i*)src);
x2 = _mm_loadu_si128((__m128i*)(src + cn));
y0 = _mm_add_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x2, z));
x0 = _mm_add_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x2, z));
y1 = _mm_unpackhi_epi8(x1, z);
x1 = _mm_unpacklo_epi8(x1, z);
t1 = _mm_mulhi_epi16(x1, k0);
t0 = _mm_mullo_epi16(x1, k0);
x2 = _mm_mulhi_epi16(x0, k1);
x0 = _mm_mullo_epi16(x0, k1);
z0 = _mm_unpacklo_epi16(t0, t1);
z1 = _mm_unpackhi_epi16(t0, t1);
z0 = _mm_add_epi32(z0, _mm_unpacklo_epi16(x0, x2));
z1 = _mm_add_epi32(z1, _mm_unpackhi_epi16(x0, x2));
t1 = _mm_mulhi_epi16(y1, k0);
t0 = _mm_mullo_epi16(y1, k0);
y1 = _mm_mulhi_epi16(y0, k1);
y0 = _mm_mullo_epi16(y0, k1);
z2 = _mm_unpacklo_epi16(t0, t1);
z3 = _mm_unpackhi_epi16(t0, t1);
z2 = _mm_add_epi32(z2, _mm_unpacklo_epi16(y0, y1));
z3 = _mm_add_epi32(z3, _mm_unpackhi_epi16(y0, y1));
_mm_store_si128((__m128i*)(dst + i), z0);
_mm_store_si128((__m128i*)(dst + i + 4), z1);
_mm_store_si128((__m128i*)(dst + i + 8), z2);
_mm_store_si128((__m128i*)(dst + i + 12), z3);
}
}
}
else if( _ksize == 5 )
{
if( kx[0] == -2 && kx[1] == 0 && kx[2] == 1 )
for( ; i <= width - 16; i += 16, src += 16 )
{
__m128i x0, x1, x2, y0, y1, y2;
x0 = _mm_loadu_si128((__m128i*)(src - cn*2));
x1 = _mm_loadu_si128((__m128i*)src);
x2 = _mm_loadu_si128((__m128i*)(src + cn*2));
y0 = _mm_unpackhi_epi8(x0, z);
x0 = _mm_unpacklo_epi8(x0, z);
y1 = _mm_unpackhi_epi8(x1, z);
x1 = _mm_unpacklo_epi8(x1, z);
y2 = _mm_unpackhi_epi8(x2, z);
x2 = _mm_unpacklo_epi8(x2, z);
x0 = _mm_add_epi16(x0, _mm_sub_epi16(x2, _mm_add_epi16(x1, x1)));
y0 = _mm_add_epi16(y0, _mm_sub_epi16(y2, _mm_add_epi16(y1, y1)));
_mm_store_si128((__m128i*)(dst + i), _mm_srai_epi32(_mm_unpacklo_epi16(x0, x0),16));
_mm_store_si128((__m128i*)(dst + i + 4), _mm_srai_epi32(_mm_unpackhi_epi16(x0, x0),16));
_mm_store_si128((__m128i*)(dst + i + 8), _mm_srai_epi32(_mm_unpacklo_epi16(y0, y0),16));
_mm_store_si128((__m128i*)(dst + i + 12), _mm_srai_epi32(_mm_unpackhi_epi16(y0, y0),16));
}
else
{
__m128i k0 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[0]), 0),
k1 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[1]), 0),
k2 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[2]), 0);
k0 = _mm_packs_epi32(k0, k0);
k1 = _mm_packs_epi32(k1, k1);
k2 = _mm_packs_epi32(k2, k2);
for( ; i <= width - 16; i += 16, src += 16 )
{
__m128i x0, x1, x2, y0, y1, t0, t1, z0, z1, z2, z3;
x0 = _mm_loadu_si128((__m128i*)(src - cn));
x1 = _mm_loadu_si128((__m128i*)src);
x2 = _mm_loadu_si128((__m128i*)(src + cn));
y0 = _mm_add_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x2, z));
x0 = _mm_add_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x2, z));
y1 = _mm_unpackhi_epi8(x1, z);
x1 = _mm_unpacklo_epi8(x1, z);
t1 = _mm_mulhi_epi16(x1, k0);
t0 = _mm_mullo_epi16(x1, k0);
x2 = _mm_mulhi_epi16(x0, k1);
x0 = _mm_mullo_epi16(x0, k1);
z0 = _mm_unpacklo_epi16(t0, t1);
z1 = _mm_unpackhi_epi16(t0, t1);
z0 = _mm_add_epi32(z0, _mm_unpacklo_epi16(x0, x2));
z1 = _mm_add_epi32(z1, _mm_unpackhi_epi16(x0, x2));
t1 = _mm_mulhi_epi16(y1, k0);
t0 = _mm_mullo_epi16(y1, k0);
y1 = _mm_mulhi_epi16(y0, k1);
y0 = _mm_mullo_epi16(y0, k1);
z2 = _mm_unpacklo_epi16(t0, t1);
z3 = _mm_unpackhi_epi16(t0, t1);
z2 = _mm_add_epi32(z2, _mm_unpacklo_epi16(y0, y1));
z3 = _mm_add_epi32(z3, _mm_unpackhi_epi16(y0, y1));
x0 = _mm_loadu_si128((__m128i*)(src - cn*2));
x1 = _mm_loadu_si128((__m128i*)(src + cn*2));
y1 = _mm_add_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x1, z));
y0 = _mm_add_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x1, z));
t1 = _mm_mulhi_epi16(y0, k2);
t0 = _mm_mullo_epi16(y0, k2);
y0 = _mm_mullo_epi16(y1, k2);
y1 = _mm_mulhi_epi16(y1, k2);
z0 = _mm_add_epi32(z0, _mm_unpacklo_epi16(t0, t1));
z1 = _mm_add_epi32(z1, _mm_unpackhi_epi16(t0, t1));
z2 = _mm_add_epi32(z2, _mm_unpacklo_epi16(y0, y1));
z3 = _mm_add_epi32(z3, _mm_unpackhi_epi16(y0, y1));
_mm_store_si128((__m128i*)(dst + i), z0);
_mm_store_si128((__m128i*)(dst + i + 4), z1);
_mm_store_si128((__m128i*)(dst + i + 8), z2);
_mm_store_si128((__m128i*)(dst + i + 12), z3);
}
}
}
}
else
{
if( _ksize == 3 )
{
if( kx[0] == 0 && kx[1] == 1 )
for( ; i <= width - 16; i += 16, src += 16 )
{
__m128i x0, x1, y0;
x0 = _mm_loadu_si128((__m128i*)(src + cn));
x1 = _mm_loadu_si128((__m128i*)(src - cn));
y0 = _mm_sub_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x1, z));
x0 = _mm_sub_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x1, z));
_mm_store_si128((__m128i*)(dst + i), _mm_srai_epi32(_mm_unpacklo_epi16(x0, x0),16));
_mm_store_si128((__m128i*)(dst + i + 4), _mm_srai_epi32(_mm_unpackhi_epi16(x0, x0),16));
_mm_store_si128((__m128i*)(dst + i + 8), _mm_srai_epi32(_mm_unpacklo_epi16(y0, y0),16));
_mm_store_si128((__m128i*)(dst + i + 12), _mm_srai_epi32(_mm_unpackhi_epi16(y0, y0),16));
}
else
{
__m128i k1 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[1]), 0);
k1 = _mm_packs_epi32(k1, k1);
for( ; i <= width - 16; i += 16, src += 16 )
{
__m128i x0, x1, y0, y1, z0, z1, z2, z3;
x0 = _mm_loadu_si128((__m128i*)(src + cn));
x1 = _mm_loadu_si128((__m128i*)(src - cn));
y0 = _mm_sub_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x1, z));
x0 = _mm_sub_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x1, z));
x1 = _mm_mulhi_epi16(x0, k1);
x0 = _mm_mullo_epi16(x0, k1);
z0 = _mm_unpacklo_epi16(x0, x1);
z1 = _mm_unpackhi_epi16(x0, x1);
y1 = _mm_mulhi_epi16(y0, k1);
y0 = _mm_mullo_epi16(y0, k1);
z2 = _mm_unpacklo_epi16(y0, y1);
z3 = _mm_unpackhi_epi16(y0, y1);
_mm_store_si128((__m128i*)(dst + i), z0);
_mm_store_si128((__m128i*)(dst + i + 4), z1);
_mm_store_si128((__m128i*)(dst + i + 8), z2);
_mm_store_si128((__m128i*)(dst + i + 12), z3);
}
}
}
else if( _ksize == 5 )
{
__m128i k0 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[0]), 0),
k1 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[1]), 0),
k2 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[2]), 0);
k0 = _mm_packs_epi32(k0, k0);
k1 = _mm_packs_epi32(k1, k1);
k2 = _mm_packs_epi32(k2, k2);
for( ; i <= width - 16; i += 16, src += 16 )
{
__m128i x0, x1, x2, y0, y1, t0, t1, z0, z1, z2, z3;
x0 = _mm_loadu_si128((__m128i*)(src + cn));
x2 = _mm_loadu_si128((__m128i*)(src - cn));
y0 = _mm_sub_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x2, z));
x0 = _mm_sub_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x2, z));
x2 = _mm_mulhi_epi16(x0, k1);
x0 = _mm_mullo_epi16(x0, k1);
z0 = _mm_unpacklo_epi16(x0, x2);
z1 = _mm_unpackhi_epi16(x0, x2);
y1 = _mm_mulhi_epi16(y0, k1);
y0 = _mm_mullo_epi16(y0, k1);
z2 = _mm_unpacklo_epi16(y0, y1);
z3 = _mm_unpackhi_epi16(y0, y1);
x0 = _mm_loadu_si128((__m128i*)(src + cn*2));
x1 = _mm_loadu_si128((__m128i*)(src - cn*2));
y1 = _mm_sub_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x1, z));
y0 = _mm_sub_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x1, z));
t1 = _mm_mulhi_epi16(y0, k2);
t0 = _mm_mullo_epi16(y0, k2);
y0 = _mm_mullo_epi16(y1, k2);
y1 = _mm_mulhi_epi16(y1, k2);
z0 = _mm_add_epi32(z0, _mm_unpacklo_epi16(t0, t1));
z1 = _mm_add_epi32(z1, _mm_unpackhi_epi16(t0, t1));
z2 = _mm_add_epi32(z2, _mm_unpacklo_epi16(y0, y1));
z3 = _mm_add_epi32(z3, _mm_unpackhi_epi16(y0, y1));
_mm_store_si128((__m128i*)(dst + i), z0);
_mm_store_si128((__m128i*)(dst + i + 4), z1);
_mm_store_si128((__m128i*)(dst + i + 8), z2);
_mm_store_si128((__m128i*)(dst + i + 12), z3);
}
}
}
src -= (_ksize/2)*cn;
kx -= _ksize/2;
for( ; i <= width - 4; i += 4, src += 4 )
{
__m128i f, s0 = z, x0, x1;
for( k = j = 0; k < _ksize; k++, j += cn )
{
f = _mm_cvtsi32_si128(kx[k]);
f = _mm_shuffle_epi32(f, 0);
f = _mm_packs_epi32(f, f);
x0 = _mm_cvtsi32_si128(*(const int*)(src + j));
x0 = _mm_unpacklo_epi8(x0, z);
x1 = _mm_mulhi_epi16(x0, f);
x0 = _mm_mullo_epi16(x0, f);
s0 = _mm_add_epi32(s0, _mm_unpacklo_epi16(x0, x1));
}
_mm_store_si128((__m128i*)(dst + i), s0);
}
return i;
}
Mat kernel;
int symmetryType;
bool smallValues;
};
struct SymmColumnVec_32s8u
{
SymmColumnVec_32s8u() { symmetryType=0; }
SymmColumnVec_32s8u(const Mat& _kernel, int _symmetryType, int _bits, double _delta)
{
symmetryType = _symmetryType;
_kernel.convertTo(kernel, CV_32F, 1./(1 << _bits), 0);
delta = (float)(_delta/(1 << _bits));
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 );
}
int operator()(const uchar** _src, uchar* dst, int width) const
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = kernel.ptr<float>() + ksize2;
int i = 0, k;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const int** src = (const int**)_src;
const __m128i *S, *S2;
__m128 d4 = _mm_set1_ps(delta);
if( symmetrical )
{
for( ; i <= width - 16; i += 16 )
{
__m128 f = _mm_load_ss(ky);
f = _mm_shuffle_ps(f, f, 0);
__m128 s0, s1, s2, s3;
__m128i x0, x1;
S = (const __m128i*)(src[0] + i);
s0 = _mm_cvtepi32_ps(_mm_load_si128(S));
s1 = _mm_cvtepi32_ps(_mm_load_si128(S+1));
s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4);
s1 = _mm_add_ps(_mm_mul_ps(s1, f), d4);
s2 = _mm_cvtepi32_ps(_mm_load_si128(S+2));
s3 = _mm_cvtepi32_ps(_mm_load_si128(S+3));
s2 = _mm_add_ps(_mm_mul_ps(s2, f), d4);
s3 = _mm_add_ps(_mm_mul_ps(s3, f), d4);
for( k = 1; k <= ksize2; k++ )
{
S = (const __m128i*)(src[k] + i);
S2 = (const __m128i*)(src[-k] + i);
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_add_epi32(_mm_load_si128(S), _mm_load_si128(S2));
x1 = _mm_add_epi32(_mm_load_si128(S+1), _mm_load_si128(S2+1));
s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0), f));
s1 = _mm_add_ps(s1, _mm_mul_ps(_mm_cvtepi32_ps(x1), f));
x0 = _mm_add_epi32(_mm_load_si128(S+2), _mm_load_si128(S2+2));
x1 = _mm_add_epi32(_mm_load_si128(S+3), _mm_load_si128(S2+3));
s2 = _mm_add_ps(s2, _mm_mul_ps(_mm_cvtepi32_ps(x0), f));
s3 = _mm_add_ps(s3, _mm_mul_ps(_mm_cvtepi32_ps(x1), f));
}
x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1));
x1 = _mm_packs_epi32(_mm_cvtps_epi32(s2), _mm_cvtps_epi32(s3));
x0 = _mm_packus_epi16(x0, x1);
_mm_storeu_si128((__m128i*)(dst + i), x0);
}
for( ; i <= width - 4; i += 4 )
{
__m128 f = _mm_load_ss(ky);
f = _mm_shuffle_ps(f, f, 0);
__m128i x0;
__m128 s0 = _mm_cvtepi32_ps(_mm_load_si128((const __m128i*)(src[0] + i)));
s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4);
for( k = 1; k <= ksize2; k++ )
{
S = (const __m128i*)(src[k] + i);
S2 = (const __m128i*)(src[-k] + i);
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_add_epi32(_mm_load_si128(S), _mm_load_si128(S2));
s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0), f));
}
x0 = _mm_cvtps_epi32(s0);
x0 = _mm_packs_epi32(x0, x0);
x0 = _mm_packus_epi16(x0, x0);
*(int*)(dst + i) = _mm_cvtsi128_si32(x0);
}
}
else
{
for( ; i <= width - 16; i += 16 )
{
__m128 f, s0 = d4, s1 = d4, s2 = d4, s3 = d4;
__m128i x0, x1;
for( k = 1; k <= ksize2; k++ )
{
S = (const __m128i*)(src[k] + i);
S2 = (const __m128i*)(src[-k] + i);
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_sub_epi32(_mm_load_si128(S), _mm_load_si128(S2));
x1 = _mm_sub_epi32(_mm_load_si128(S+1), _mm_load_si128(S2+1));
s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0), f));
s1 = _mm_add_ps(s1, _mm_mul_ps(_mm_cvtepi32_ps(x1), f));
x0 = _mm_sub_epi32(_mm_load_si128(S+2), _mm_load_si128(S2+2));
x1 = _mm_sub_epi32(_mm_load_si128(S+3), _mm_load_si128(S2+3));
s2 = _mm_add_ps(s2, _mm_mul_ps(_mm_cvtepi32_ps(x0), f));
s3 = _mm_add_ps(s3, _mm_mul_ps(_mm_cvtepi32_ps(x1), f));
}
x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1));
x1 = _mm_packs_epi32(_mm_cvtps_epi32(s2), _mm_cvtps_epi32(s3));
x0 = _mm_packus_epi16(x0, x1);
_mm_storeu_si128((__m128i*)(dst + i), x0);
}
for( ; i <= width - 4; i += 4 )
{
__m128 f, s0 = d4;
__m128i x0;
for( k = 1; k <= ksize2; k++ )
{
S = (const __m128i*)(src[k] + i);
S2 = (const __m128i*)(src[-k] + i);
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_sub_epi32(_mm_load_si128(S), _mm_load_si128(S2));
s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0), f));
}
x0 = _mm_cvtps_epi32(s0);
x0 = _mm_packs_epi32(x0, x0);
x0 = _mm_packus_epi16(x0, x0);
*(int*)(dst + i) = _mm_cvtsi128_si32(x0);
}
}
return i;
}
int symmetryType;
float delta;
Mat kernel;
};
struct SymmColumnSmallVec_32s16s
{
SymmColumnSmallVec_32s16s() { symmetryType=0; }
SymmColumnSmallVec_32s16s(const Mat& _kernel, int _symmetryType, int _bits, double _delta)
{
symmetryType = _symmetryType;
_kernel.convertTo(kernel, CV_32F, 1./(1 << _bits), 0);
delta = (float)(_delta/(1 << _bits));
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 );
}
int operator()(const uchar** _src, uchar* _dst, int width) const
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = kernel.ptr<float>() + ksize2;
int i = 0;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const int** src = (const int**)_src;
const int *S0 = src[-1], *S1 = src[0], *S2 = src[1];
short* dst = (short*)_dst;
__m128 df4 = _mm_set1_ps(delta);
__m128i d4 = _mm_cvtps_epi32(df4);
if( symmetrical )
{
if( ky[0] == 2 && ky[1] == 1 )
{
for( ; i <= width - 8; i += 8 )
{
__m128i s0, s1, s2, s3, s4, s5;
s0 = _mm_load_si128((__m128i*)(S0 + i));
s1 = _mm_load_si128((__m128i*)(S0 + i + 4));
s2 = _mm_load_si128((__m128i*)(S1 + i));
s3 = _mm_load_si128((__m128i*)(S1 + i + 4));
s4 = _mm_load_si128((__m128i*)(S2 + i));
s5 = _mm_load_si128((__m128i*)(S2 + i + 4));
s0 = _mm_add_epi32(s0, _mm_add_epi32(s4, _mm_add_epi32(s2, s2)));
s1 = _mm_add_epi32(s1, _mm_add_epi32(s5, _mm_add_epi32(s3, s3)));
s0 = _mm_add_epi32(s0, d4);
s1 = _mm_add_epi32(s1, d4);
_mm_storeu_si128((__m128i*)(dst + i), _mm_packs_epi32(s0, s1));
}
}
else if( ky[0] == -2 && ky[1] == 1 )
{
for( ; i <= width - 8; i += 8 )
{
__m128i s0, s1, s2, s3, s4, s5;
s0 = _mm_load_si128((__m128i*)(S0 + i));
s1 = _mm_load_si128((__m128i*)(S0 + i + 4));
s2 = _mm_load_si128((__m128i*)(S1 + i));
s3 = _mm_load_si128((__m128i*)(S1 + i + 4));
s4 = _mm_load_si128((__m128i*)(S2 + i));
s5 = _mm_load_si128((__m128i*)(S2 + i + 4));
s0 = _mm_add_epi32(s0, _mm_sub_epi32(s4, _mm_add_epi32(s2, s2)));
s1 = _mm_add_epi32(s1, _mm_sub_epi32(s5, _mm_add_epi32(s3, s3)));
s0 = _mm_add_epi32(s0, d4);
s1 = _mm_add_epi32(s1, d4);
_mm_storeu_si128((__m128i*)(dst + i), _mm_packs_epi32(s0, s1));
}
}
else
{
__m128 k0 = _mm_set1_ps(ky[0]), k1 = _mm_set1_ps(ky[1]);
for( ; i <= width - 8; i += 8 )
{
__m128 s0, s1;
s0 = _mm_cvtepi32_ps(_mm_load_si128((__m128i*)(S1 + i)));
s1 = _mm_cvtepi32_ps(_mm_load_si128((__m128i*)(S1 + i + 4)));
s0 = _mm_add_ps(_mm_mul_ps(s0, k0), df4);
s1 = _mm_add_ps(_mm_mul_ps(s1, k0), df4);
__m128i x0, x1;
x0 = _mm_add_epi32(_mm_load_si128((__m128i*)(S0 + i)),
_mm_load_si128((__m128i*)(S2 + i)));
x1 = _mm_add_epi32(_mm_load_si128((__m128i*)(S0 + i + 4)),
_mm_load_si128((__m128i*)(S2 + i + 4)));
s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0),k1));
s1 = _mm_add_ps(s1, _mm_mul_ps(_mm_cvtepi32_ps(x1),k1));
x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1));
_mm_storeu_si128((__m128i*)(dst + i), x0);
}
}
}
else
{
if( fabs(ky[1]) == 1 && ky[1] == -ky[-1] )
{
if( ky[1] < 0 )
std::swap(S0, S2);
for( ; i <= width - 8; i += 8 )
{
__m128i s0, s1, s2, s3;
s0 = _mm_load_si128((__m128i*)(S2 + i));
s1 = _mm_load_si128((__m128i*)(S2 + i + 4));
s2 = _mm_load_si128((__m128i*)(S0 + i));
s3 = _mm_load_si128((__m128i*)(S0 + i + 4));
s0 = _mm_add_epi32(_mm_sub_epi32(s0, s2), d4);
s1 = _mm_add_epi32(_mm_sub_epi32(s1, s3), d4);
_mm_storeu_si128((__m128i*)(dst + i), _mm_packs_epi32(s0, s1));
}
}
else
{
__m128 k1 = _mm_set1_ps(ky[1]);
for( ; i <= width - 8; i += 8 )
{
__m128 s0 = df4, s1 = df4;
__m128i x0, x1;
x0 = _mm_sub_epi32(_mm_load_si128((__m128i*)(S2 + i)),
_mm_load_si128((__m128i*)(S0 + i)));
x1 = _mm_sub_epi32(_mm_load_si128((__m128i*)(S2 + i + 4)),
_mm_load_si128((__m128i*)(S0 + i + 4)));
s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0),k1));
s1 = _mm_add_ps(s1, _mm_mul_ps(_mm_cvtepi32_ps(x1),k1));
x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1));
_mm_storeu_si128((__m128i*)(dst + i), x0);
}
}
}
return i;
}
int symmetryType;
float delta;
Mat kernel;
};
/////////////////////////////////////// 16s //////////////////////////////////
struct RowVec_16s32f
{
RowVec_16s32f() {}
RowVec_16s32f( const Mat& _kernel )
{
kernel = _kernel;
sse2_supported = checkHardwareSupport(CV_CPU_SSE2);
}
int operator()(const uchar* _src, uchar* _dst, int width, int cn) const
{
if( !sse2_supported )
return 0;
int i = 0, k, _ksize = kernel.rows + kernel.cols - 1;
float* dst = (float*)_dst;
const float* _kx = kernel.ptr<float>();
width *= cn;
for( ; i <= width - 8; i += 8 )
{
const short* src = (const short*)_src + i;
__m128 f, s0 = _mm_setzero_ps(), s1 = s0, x0, x1;
for( k = 0; k < _ksize; k++, src += cn )
{
f = _mm_load_ss(_kx+k);
f = _mm_shuffle_ps(f, f, 0);
__m128i x0i = _mm_loadu_si128((const __m128i*)src);
__m128i x1i = _mm_srai_epi32(_mm_unpackhi_epi16(x0i, x0i), 16);
x0i = _mm_srai_epi32(_mm_unpacklo_epi16(x0i, x0i), 16);
x0 = _mm_cvtepi32_ps(x0i);
x1 = _mm_cvtepi32_ps(x1i);
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
s1 = _mm_add_ps(s1, _mm_mul_ps(x1, f));
}
_mm_store_ps(dst + i, s0);
_mm_store_ps(dst + i + 4, s1);
}
return i;
}
Mat kernel;
bool sse2_supported;
};
struct SymmColumnVec_32f16s
{
SymmColumnVec_32f16s() { symmetryType=0; }
SymmColumnVec_32f16s(const Mat& _kernel, int _symmetryType, int, double _delta)
{
symmetryType = _symmetryType;
kernel = _kernel;
delta = (float)_delta;
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 );
sse2_supported = checkHardwareSupport(CV_CPU_SSE2);
}
int operator()(const uchar** _src, uchar* _dst, int width) const
{
if( !sse2_supported )
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = kernel.ptr<float>() + ksize2;
int i = 0, k;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const float** src = (const float**)_src;
const float *S, *S2;
short* dst = (short*)_dst;
__m128 d4 = _mm_set1_ps(delta);
if( symmetrical )
{
for( ; i <= width - 16; i += 16 )
{
__m128 f = _mm_load_ss(ky);
f = _mm_shuffle_ps(f, f, 0);
__m128 s0, s1, s2, s3;
__m128 x0, x1;
S = src[0] + i;
s0 = _mm_load_ps(S);
s1 = _mm_load_ps(S+4);
s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4);
s1 = _mm_add_ps(_mm_mul_ps(s1, f), d4);
s2 = _mm_load_ps(S+8);
s3 = _mm_load_ps(S+12);
s2 = _mm_add_ps(_mm_mul_ps(s2, f), d4);
s3 = _mm_add_ps(_mm_mul_ps(s3, f), d4);
for( k = 1; k <= ksize2; k++ )
{
S = src[k] + i;
S2 = src[-k] + i;
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_add_ps(_mm_load_ps(S), _mm_load_ps(S2));
x1 = _mm_add_ps(_mm_load_ps(S+4), _mm_load_ps(S2+4));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
s1 = _mm_add_ps(s1, _mm_mul_ps(x1, f));
x0 = _mm_add_ps(_mm_load_ps(S+8), _mm_load_ps(S2+8));
x1 = _mm_add_ps(_mm_load_ps(S+12), _mm_load_ps(S2+12));
s2 = _mm_add_ps(s2, _mm_mul_ps(x0, f));
s3 = _mm_add_ps(s3, _mm_mul_ps(x1, f));
}
__m128i s0i = _mm_cvtps_epi32(s0);
__m128i s1i = _mm_cvtps_epi32(s1);
__m128i s2i = _mm_cvtps_epi32(s2);
__m128i s3i = _mm_cvtps_epi32(s3);
_mm_storeu_si128((__m128i*)(dst + i), _mm_packs_epi32(s0i, s1i));
_mm_storeu_si128((__m128i*)(dst + i + 8), _mm_packs_epi32(s2i, s3i));
}
for( ; i <= width - 4; i += 4 )
{
__m128 f = _mm_load_ss(ky);
f = _mm_shuffle_ps(f, f, 0);
__m128 x0, s0 = _mm_load_ps(src[0] + i);
s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4);
for( k = 1; k <= ksize2; k++ )
{
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
S = src[k] + i;
S2 = src[-k] + i;
x0 = _mm_add_ps(_mm_load_ps(src[k]+i), _mm_load_ps(src[-k] + i));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
}
__m128i s0i = _mm_cvtps_epi32(s0);
_mm_storel_epi64((__m128i*)(dst + i), _mm_packs_epi32(s0i, s0i));
}
}
else
{
for( ; i <= width - 16; i += 16 )
{
__m128 f, s0 = d4, s1 = d4, s2 = d4, s3 = d4;
__m128 x0, x1;
S = src[0] + i;
for( k = 1; k <= ksize2; k++ )
{
S = src[k] + i;
S2 = src[-k] + i;
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_sub_ps(_mm_load_ps(S), _mm_load_ps(S2));
x1 = _mm_sub_ps(_mm_load_ps(S+4), _mm_load_ps(S2+4));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
s1 = _mm_add_ps(s1, _mm_mul_ps(x1, f));
x0 = _mm_sub_ps(_mm_load_ps(S+8), _mm_load_ps(S2+8));
x1 = _mm_sub_ps(_mm_load_ps(S+12), _mm_load_ps(S2+12));
s2 = _mm_add_ps(s2, _mm_mul_ps(x0, f));
s3 = _mm_add_ps(s3, _mm_mul_ps(x1, f));
}
__m128i s0i = _mm_cvtps_epi32(s0);
__m128i s1i = _mm_cvtps_epi32(s1);
__m128i s2i = _mm_cvtps_epi32(s2);
__m128i s3i = _mm_cvtps_epi32(s3);
_mm_storeu_si128((__m128i*)(dst + i), _mm_packs_epi32(s0i, s1i));
_mm_storeu_si128((__m128i*)(dst + i + 8), _mm_packs_epi32(s2i, s3i));
}
for( ; i <= width - 4; i += 4 )
{
__m128 f, x0, s0 = d4;
for( k = 1; k <= ksize2; k++ )
{
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_sub_ps(_mm_load_ps(src[k]+i), _mm_load_ps(src[-k] + i));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
}
__m128i s0i = _mm_cvtps_epi32(s0);
_mm_storel_epi64((__m128i*)(dst + i), _mm_packs_epi32(s0i, s0i));
}
}
return i;
}
int symmetryType;
float delta;
Mat kernel;
bool sse2_supported;
};
/////////////////////////////////////// 32f //////////////////////////////////
struct RowVec_32f
{
RowVec_32f()
{
haveSSE = checkHardwareSupport(CV_CPU_SSE);
}
RowVec_32f( const Mat& _kernel )
{
kernel = _kernel;
haveSSE = checkHardwareSupport(CV_CPU_SSE);
#if defined USE_IPP_SEP_FILTERS && IPP_DISABLE_BLOCK
bufsz = -1;
#endif
}
int operator()(const uchar* _src, uchar* _dst, int width, int cn) const
{
#if defined USE_IPP_SEP_FILTERS && IPP_DISABLE_BLOCK
CV_IPP_CHECK()
{
int ret = ippiOperator(_src, _dst, width, cn);
if (ret > 0)
return ret;
}
#endif
int _ksize = kernel.rows + kernel.cols - 1;
const float* src0 = (const float*)_src;
float* dst = (float*)_dst;
const float* _kx = kernel.ptr<float>();
if( !haveSSE )
return 0;
int i = 0, k;
width *= cn;
for( ; i <= width - 8; i += 8 )
{
const float* src = src0 + i;
__m128 f, s0 = _mm_setzero_ps(), s1 = s0, x0, x1;
for( k = 0; k < _ksize; k++, src += cn )
{
f = _mm_load_ss(_kx+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_loadu_ps(src);
x1 = _mm_loadu_ps(src + 4);
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
s1 = _mm_add_ps(s1, _mm_mul_ps(x1, f));
}
_mm_store_ps(dst + i, s0);
_mm_store_ps(dst + i + 4, s1);
}
return i;
}
Mat kernel;
bool haveSSE;
#if defined USE_IPP_SEP_FILTERS && IPP_DISABLE_BLOCK
private:
mutable int bufsz;
int ippiOperator(const uchar* _src, uchar* _dst, int width, int cn) const
{
int _ksize = kernel.rows + kernel.cols - 1;
if ((1 != cn && 3 != cn) || width < _ksize*8)
return 0;
const float* src = (const float*)_src;
float* dst = (float*)_dst;
const float* _kx = (const float*)kernel.data;
IppiSize roisz = { width, 1 };
if( bufsz < 0 )
{
if( (cn == 1 && ippiFilterRowBorderPipelineGetBufferSize_32f_C1R(roisz, _ksize, &bufsz) < 0) ||
(cn == 3 && ippiFilterRowBorderPipelineGetBufferSize_32f_C3R(roisz, _ksize, &bufsz) < 0))
return 0;
}
AutoBuffer<uchar> buf(bufsz + 64);
uchar* bufptr = alignPtr((uchar*)buf, 32);
int step = (int)(width*sizeof(dst[0])*cn);
float borderValue[] = {0.f, 0.f, 0.f};
// here is the trick. IPP needs border type and extrapolates the row. We did it already.
// So we pass anchor=0 and ignore the right tail of results since they are incorrect there.
if( (cn == 1 && ippiFilterRowBorderPipeline_32f_C1R(src, step, &dst, roisz, _kx, _ksize, 0,
ippBorderRepl, borderValue[0], bufptr) < 0) ||
(cn == 3 && ippiFilterRowBorderPipeline_32f_C3R(src, step, &dst, roisz, _kx, _ksize, 0,
ippBorderRepl, borderValue, bufptr) < 0))
{
setIppErrorStatus();
return 0;
}
CV_IMPL_ADD(CV_IMPL_IPP);
return width - _ksize + 1;
}
#endif
};
struct SymmRowSmallVec_32f
{
SymmRowSmallVec_32f() {}
SymmRowSmallVec_32f( const Mat& _kernel, int _symmetryType )
{
kernel = _kernel;
symmetryType = _symmetryType;
}
int operator()(const uchar* _src, uchar* _dst, int width, int cn) const
{
if( !checkHardwareSupport(CV_CPU_SSE) )
return 0;
int i = 0, _ksize = kernel.rows + kernel.cols - 1;
float* dst = (float*)_dst;
const float* src = (const float*)_src + (_ksize/2)*cn;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const float* kx = kernel.ptr<float>() + _ksize/2;
width *= cn;
if( symmetrical )
{
if( _ksize == 1 )
return 0;
if( _ksize == 3 )
{
if( kx[0] == 2 && kx[1] == 1 )
for( ; i <= width - 8; i += 8, src += 8 )
{
__m128 x0, x1, x2, y0, y1, y2;
x0 = _mm_loadu_ps(src - cn);
x1 = _mm_loadu_ps(src);
x2 = _mm_loadu_ps(src + cn);
y0 = _mm_loadu_ps(src - cn + 4);
y1 = _mm_loadu_ps(src + 4);
y2 = _mm_loadu_ps(src + cn + 4);
x0 = _mm_add_ps(x0, _mm_add_ps(_mm_add_ps(x1, x1), x2));
y0 = _mm_add_ps(y0, _mm_add_ps(_mm_add_ps(y1, y1), y2));
_mm_store_ps(dst + i, x0);
_mm_store_ps(dst + i + 4, y0);
}
else if( kx[0] == -2 && kx[1] == 1 )
for( ; i <= width - 8; i += 8, src += 8 )
{
__m128 x0, x1, x2, y0, y1, y2;
x0 = _mm_loadu_ps(src - cn);
x1 = _mm_loadu_ps(src);
x2 = _mm_loadu_ps(src + cn);
y0 = _mm_loadu_ps(src - cn + 4);
y1 = _mm_loadu_ps(src + 4);
y2 = _mm_loadu_ps(src + cn + 4);
x0 = _mm_add_ps(x0, _mm_sub_ps(x2, _mm_add_ps(x1, x1)));
y0 = _mm_add_ps(y0, _mm_sub_ps(y2, _mm_add_ps(y1, y1)));
_mm_store_ps(dst + i, x0);
_mm_store_ps(dst + i + 4, y0);
}
else
{
__m128 k0 = _mm_set1_ps(kx[0]), k1 = _mm_set1_ps(kx[1]);
for( ; i <= width - 8; i += 8, src += 8 )
{
__m128 x0, x1, x2, y0, y1, y2;
x0 = _mm_loadu_ps(src - cn);
x1 = _mm_loadu_ps(src);
x2 = _mm_loadu_ps(src + cn);
y0 = _mm_loadu_ps(src - cn + 4);
y1 = _mm_loadu_ps(src + 4);
y2 = _mm_loadu_ps(src + cn + 4);
x0 = _mm_mul_ps(_mm_add_ps(x0, x2), k1);
y0 = _mm_mul_ps(_mm_add_ps(y0, y2), k1);
x0 = _mm_add_ps(x0, _mm_mul_ps(x1, k0));
y0 = _mm_add_ps(y0, _mm_mul_ps(y1, k0));
_mm_store_ps(dst + i, x0);
_mm_store_ps(dst + i + 4, y0);
}
}
}
else if( _ksize == 5 )
{
if( kx[0] == -2 && kx[1] == 0 && kx[2] == 1 )
for( ; i <= width - 8; i += 8, src += 8 )
{
__m128 x0, x1, x2, y0, y1, y2;
x0 = _mm_loadu_ps(src - cn*2);
x1 = _mm_loadu_ps(src);
x2 = _mm_loadu_ps(src + cn*2);
y0 = _mm_loadu_ps(src - cn*2 + 4);
y1 = _mm_loadu_ps(src + 4);
y2 = _mm_loadu_ps(src + cn*2 + 4);
x0 = _mm_add_ps(x0, _mm_sub_ps(x2, _mm_add_ps(x1, x1)));
y0 = _mm_add_ps(y0, _mm_sub_ps(y2, _mm_add_ps(y1, y1)));
_mm_store_ps(dst + i, x0);
_mm_store_ps(dst + i + 4, y0);
}
else
{
__m128 k0 = _mm_set1_ps(kx[0]), k1 = _mm_set1_ps(kx[1]), k2 = _mm_set1_ps(kx[2]);
for( ; i <= width - 8; i += 8, src += 8 )
{
__m128 x0, x1, x2, y0, y1, y2;
x0 = _mm_loadu_ps(src - cn);
x1 = _mm_loadu_ps(src);
x2 = _mm_loadu_ps(src + cn);
y0 = _mm_loadu_ps(src - cn + 4);
y1 = _mm_loadu_ps(src + 4);
y2 = _mm_loadu_ps(src + cn + 4);
x0 = _mm_mul_ps(_mm_add_ps(x0, x2), k1);
y0 = _mm_mul_ps(_mm_add_ps(y0, y2), k1);
x0 = _mm_add_ps(x0, _mm_mul_ps(x1, k0));
y0 = _mm_add_ps(y0, _mm_mul_ps(y1, k0));
x2 = _mm_add_ps(_mm_loadu_ps(src + cn*2), _mm_loadu_ps(src - cn*2));
y2 = _mm_add_ps(_mm_loadu_ps(src + cn*2 + 4), _mm_loadu_ps(src - cn*2 + 4));
x0 = _mm_add_ps(x0, _mm_mul_ps(x2, k2));
y0 = _mm_add_ps(y0, _mm_mul_ps(y2, k2));
_mm_store_ps(dst + i, x0);
_mm_store_ps(dst + i + 4, y0);
}
}
}
}
else
{
if( _ksize == 3 )
{
if( kx[0] == 0 && kx[1] == 1 )
for( ; i <= width - 8; i += 8, src += 8 )
{
__m128 x0, x2, y0, y2;
x0 = _mm_loadu_ps(src + cn);
x2 = _mm_loadu_ps(src - cn);
y0 = _mm_loadu_ps(src + cn + 4);
y2 = _mm_loadu_ps(src - cn + 4);
x0 = _mm_sub_ps(x0, x2);
y0 = _mm_sub_ps(y0, y2);
_mm_store_ps(dst + i, x0);
_mm_store_ps(dst + i + 4, y0);
}
else
{
__m128 k1 = _mm_set1_ps(kx[1]);
for( ; i <= width - 8; i += 8, src += 8 )
{
__m128 x0, x2, y0, y2;
x0 = _mm_loadu_ps(src + cn);
x2 = _mm_loadu_ps(src - cn);
y0 = _mm_loadu_ps(src + cn + 4);
y2 = _mm_loadu_ps(src - cn + 4);
x0 = _mm_mul_ps(_mm_sub_ps(x0, x2), k1);
y0 = _mm_mul_ps(_mm_sub_ps(y0, y2), k1);
_mm_store_ps(dst + i, x0);
_mm_store_ps(dst + i + 4, y0);
}
}
}
else if( _ksize == 5 )
{
__m128 k1 = _mm_set1_ps(kx[1]), k2 = _mm_set1_ps(kx[2]);
for( ; i <= width - 8; i += 8, src += 8 )
{
__m128 x0, x2, y0, y2;
x0 = _mm_loadu_ps(src + cn);
x2 = _mm_loadu_ps(src - cn);
y0 = _mm_loadu_ps(src + cn + 4);
y2 = _mm_loadu_ps(src - cn + 4);
x0 = _mm_mul_ps(_mm_sub_ps(x0, x2), k1);
y0 = _mm_mul_ps(_mm_sub_ps(y0, y2), k1);
x2 = _mm_sub_ps(_mm_loadu_ps(src + cn*2), _mm_loadu_ps(src - cn*2));
y2 = _mm_sub_ps(_mm_loadu_ps(src + cn*2 + 4), _mm_loadu_ps(src - cn*2 + 4));
x0 = _mm_add_ps(x0, _mm_mul_ps(x2, k2));
y0 = _mm_add_ps(y0, _mm_mul_ps(y2, k2));
_mm_store_ps(dst + i, x0);
_mm_store_ps(dst + i + 4, y0);
}
}
}
return i;
}
Mat kernel;
int symmetryType;
};
struct SymmColumnVec_32f
{
SymmColumnVec_32f() { symmetryType=0; }
SymmColumnVec_32f(const Mat& _kernel, int _symmetryType, int, double _delta)
{
symmetryType = _symmetryType;
kernel = _kernel;
delta = (float)_delta;
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 );
}
int operator()(const uchar** _src, uchar* _dst, int width) const
{
if( !checkHardwareSupport(CV_CPU_SSE) )
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = kernel.ptr<float>() + ksize2;
int i = 0, k;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const float** src = (const float**)_src;
const float *S, *S2;
float* dst = (float*)_dst;
__m128 d4 = _mm_set1_ps(delta);
if( symmetrical )
{
for( ; i <= width - 16; i += 16 )
{
__m128 f = _mm_load_ss(ky);
f = _mm_shuffle_ps(f, f, 0);
__m128 s0, s1, s2, s3;
__m128 x0, x1;
S = src[0] + i;
s0 = _mm_load_ps(S);
s1 = _mm_load_ps(S+4);
s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4);
s1 = _mm_add_ps(_mm_mul_ps(s1, f), d4);
s2 = _mm_load_ps(S+8);
s3 = _mm_load_ps(S+12);
s2 = _mm_add_ps(_mm_mul_ps(s2, f), d4);
s3 = _mm_add_ps(_mm_mul_ps(s3, f), d4);
for( k = 1; k <= ksize2; k++ )
{
S = src[k] + i;
S2 = src[-k] + i;
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_add_ps(_mm_load_ps(S), _mm_load_ps(S2));
x1 = _mm_add_ps(_mm_load_ps(S+4), _mm_load_ps(S2+4));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
s1 = _mm_add_ps(s1, _mm_mul_ps(x1, f));
x0 = _mm_add_ps(_mm_load_ps(S+8), _mm_load_ps(S2+8));
x1 = _mm_add_ps(_mm_load_ps(S+12), _mm_load_ps(S2+12));
s2 = _mm_add_ps(s2, _mm_mul_ps(x0, f));
s3 = _mm_add_ps(s3, _mm_mul_ps(x1, f));
}
_mm_storeu_ps(dst + i, s0);
_mm_storeu_ps(dst + i + 4, s1);
_mm_storeu_ps(dst + i + 8, s2);
_mm_storeu_ps(dst + i + 12, s3);
}
for( ; i <= width - 4; i += 4 )
{
__m128 f = _mm_load_ss(ky);
f = _mm_shuffle_ps(f, f, 0);
__m128 x0, s0 = _mm_load_ps(src[0] + i);
s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4);
for( k = 1; k <= ksize2; k++ )
{
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
S = src[k] + i;
S2 = src[-k] + i;
x0 = _mm_add_ps(_mm_load_ps(src[k]+i), _mm_load_ps(src[-k] + i));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
}
_mm_storeu_ps(dst + i, s0);
}
}
else
{
for( ; i <= width - 16; i += 16 )
{
__m128 f, s0 = d4, s1 = d4, s2 = d4, s3 = d4;
__m128 x0, x1;
S = src[0] + i;
for( k = 1; k <= ksize2; k++ )
{
S = src[k] + i;
S2 = src[-k] + i;
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_sub_ps(_mm_load_ps(S), _mm_load_ps(S2));
x1 = _mm_sub_ps(_mm_load_ps(S+4), _mm_load_ps(S2+4));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
s1 = _mm_add_ps(s1, _mm_mul_ps(x1, f));
x0 = _mm_sub_ps(_mm_load_ps(S+8), _mm_load_ps(S2+8));
x1 = _mm_sub_ps(_mm_load_ps(S+12), _mm_load_ps(S2+12));
s2 = _mm_add_ps(s2, _mm_mul_ps(x0, f));
s3 = _mm_add_ps(s3, _mm_mul_ps(x1, f));
}
_mm_storeu_ps(dst + i, s0);
_mm_storeu_ps(dst + i + 4, s1);
_mm_storeu_ps(dst + i + 8, s2);
_mm_storeu_ps(dst + i + 12, s3);
}
for( ; i <= width - 4; i += 4 )
{
__m128 f, x0, s0 = d4;
for( k = 1; k <= ksize2; k++ )
{
f = _mm_load_ss(ky+k);
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_sub_ps(_mm_load_ps(src[k]+i), _mm_load_ps(src[-k] + i));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
}
_mm_storeu_ps(dst + i, s0);
}
}
return i;
}
int symmetryType;
float delta;
Mat kernel;
};
struct SymmColumnSmallVec_32f
{
SymmColumnSmallVec_32f() { symmetryType=0; }
SymmColumnSmallVec_32f(const Mat& _kernel, int _symmetryType, int, double _delta)
{
symmetryType = _symmetryType;
kernel = _kernel;
delta = (float)_delta;
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 );
}
int operator()(const uchar** _src, uchar* _dst, int width) const
{
if( !checkHardwareSupport(CV_CPU_SSE) )
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = kernel.ptr<float>() + ksize2;
int i = 0;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const float** src = (const float**)_src;
const float *S0 = src[-1], *S1 = src[0], *S2 = src[1];
float* dst = (float*)_dst;
__m128 d4 = _mm_set1_ps(delta);
if( symmetrical )
{
if( ky[0] == 2 && ky[1] == 1 )
{
for( ; i <= width - 8; i += 8 )
{
__m128 s0, s1, s2, s3, s4, s5;
s0 = _mm_load_ps(S0 + i);
s1 = _mm_load_ps(S0 + i + 4);
s2 = _mm_load_ps(S1 + i);
s3 = _mm_load_ps(S1 + i + 4);
s4 = _mm_load_ps(S2 + i);
s5 = _mm_load_ps(S2 + i + 4);
s0 = _mm_add_ps(s0, _mm_add_ps(s4, _mm_add_ps(s2, s2)));
s1 = _mm_add_ps(s1, _mm_add_ps(s5, _mm_add_ps(s3, s3)));
s0 = _mm_add_ps(s0, d4);
s1 = _mm_add_ps(s1, d4);
_mm_storeu_ps(dst + i, s0);
_mm_storeu_ps(dst + i + 4, s1);
}
}
else if( ky[0] == -2 && ky[1] == 1 )
{
for( ; i <= width - 8; i += 8 )
{
__m128 s0, s1, s2, s3, s4, s5;
s0 = _mm_load_ps(S0 + i);
s1 = _mm_load_ps(S0 + i + 4);
s2 = _mm_load_ps(S1 + i);
s3 = _mm_load_ps(S1 + i + 4);
s4 = _mm_load_ps(S2 + i);
s5 = _mm_load_ps(S2 + i + 4);
s0 = _mm_add_ps(s0, _mm_sub_ps(s4, _mm_add_ps(s2, s2)));
s1 = _mm_add_ps(s1, _mm_sub_ps(s5, _mm_add_ps(s3, s3)));
s0 = _mm_add_ps(s0, d4);
s1 = _mm_add_ps(s1, d4);
_mm_storeu_ps(dst + i, s0);
_mm_storeu_ps(dst + i + 4, s1);
}
}
else
{
__m128 k0 = _mm_set1_ps(ky[0]), k1 = _mm_set1_ps(ky[1]);
for( ; i <= width - 8; i += 8 )
{
__m128 s0, s1, x0, x1;
s0 = _mm_load_ps(S1 + i);
s1 = _mm_load_ps(S1 + i + 4);
s0 = _mm_add_ps(_mm_mul_ps(s0, k0), d4);
s1 = _mm_add_ps(_mm_mul_ps(s1, k0), d4);
x0 = _mm_add_ps(_mm_load_ps(S0 + i), _mm_load_ps(S2 + i));
x1 = _mm_add_ps(_mm_load_ps(S0 + i + 4), _mm_load_ps(S2 + i + 4));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0,k1));
s1 = _mm_add_ps(s1, _mm_mul_ps(x1,k1));
_mm_storeu_ps(dst + i, s0);
_mm_storeu_ps(dst + i + 4, s1);
}
}
}
else
{
if( fabs(ky[1]) == 1 && ky[1] == -ky[-1] )
{
if( ky[1] < 0 )
std::swap(S0, S2);
for( ; i <= width - 8; i += 8 )
{
__m128 s0, s1, s2, s3;
s0 = _mm_load_ps(S2 + i);
s1 = _mm_load_ps(S2 + i + 4);
s2 = _mm_load_ps(S0 + i);
s3 = _mm_load_ps(S0 + i + 4);
s0 = _mm_add_ps(_mm_sub_ps(s0, s2), d4);
s1 = _mm_add_ps(_mm_sub_ps(s1, s3), d4);
_mm_storeu_ps(dst + i, s0);
_mm_storeu_ps(dst + i + 4, s1);
}
}
else
{
__m128 k1 = _mm_set1_ps(ky[1]);
for( ; i <= width - 8; i += 8 )
{
__m128 s0 = d4, s1 = d4, x0, x1;
x0 = _mm_sub_ps(_mm_load_ps(S2 + i), _mm_load_ps(S0 + i));
x1 = _mm_sub_ps(_mm_load_ps(S2 + i + 4), _mm_load_ps(S0 + i + 4));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0,k1));
s1 = _mm_add_ps(s1, _mm_mul_ps(x1,k1));
_mm_storeu_ps(dst + i, s0);
_mm_storeu_ps(dst + i + 4, s1);
}
}
}
return i;
}
int symmetryType;
float delta;
Mat kernel;
};
/////////////////////////////// non-separable filters ///////////////////////////////
///////////////////////////////// 8u<->8u, 8u<->16s /////////////////////////////////
struct FilterVec_8u
{
FilterVec_8u() {}
FilterVec_8u(const Mat& _kernel, int _bits, double _delta)
{
Mat kernel;
_kernel.convertTo(kernel, CV_32F, 1./(1 << _bits), 0);
delta = (float)(_delta/(1 << _bits));
std::vector<Point> coords;
preprocess2DKernel(kernel, coords, coeffs);
_nz = (int)coords.size();
}
int operator()(const uchar** src, uchar* dst, int width) const
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
const float* kf = (const float*)&coeffs[0];
int i = 0, k, nz = _nz;
__m128 d4 = _mm_set1_ps(delta);
for( ; i <= width - 16; i += 16 )
{
__m128 s0 = d4, s1 = d4, s2 = d4, s3 = d4;
__m128i x0, x1, z = _mm_setzero_si128();
for( k = 0; k < nz; k++ )
{
__m128 f = _mm_load_ss(kf+k), t0, t1;
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_loadu_si128((const __m128i*)(src[k] + i));
x1 = _mm_unpackhi_epi8(x0, z);
x0 = _mm_unpacklo_epi8(x0, z);
t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x0, z));
t1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(x0, z));
s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f));
s1 = _mm_add_ps(s1, _mm_mul_ps(t1, f));
t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x1, z));
t1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(x1, z));
s2 = _mm_add_ps(s2, _mm_mul_ps(t0, f));
s3 = _mm_add_ps(s3, _mm_mul_ps(t1, f));
}
x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1));
x1 = _mm_packs_epi32(_mm_cvtps_epi32(s2), _mm_cvtps_epi32(s3));
x0 = _mm_packus_epi16(x0, x1);
_mm_storeu_si128((__m128i*)(dst + i), x0);
}
for( ; i <= width - 4; i += 4 )
{
__m128 s0 = d4;
__m128i x0, z = _mm_setzero_si128();
for( k = 0; k < nz; k++ )
{
__m128 f = _mm_load_ss(kf+k), t0;
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_cvtsi32_si128(*(const int*)(src[k] + i));
x0 = _mm_unpacklo_epi8(x0, z);
t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x0, z));
s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f));
}
x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), z);
x0 = _mm_packus_epi16(x0, x0);
*(int*)(dst + i) = _mm_cvtsi128_si32(x0);
}
return i;
}
int _nz;
std::vector<uchar> coeffs;
float delta;
};
struct FilterVec_8u16s
{
FilterVec_8u16s() {}
FilterVec_8u16s(const Mat& _kernel, int _bits, double _delta)
{
Mat kernel;
_kernel.convertTo(kernel, CV_32F, 1./(1 << _bits), 0);
delta = (float)(_delta/(1 << _bits));
std::vector<Point> coords;
preprocess2DKernel(kernel, coords, coeffs);
_nz = (int)coords.size();
}
int operator()(const uchar** src, uchar* _dst, int width) const
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
const float* kf = (const float*)&coeffs[0];
short* dst = (short*)_dst;
int i = 0, k, nz = _nz;
__m128 d4 = _mm_set1_ps(delta);
for( ; i <= width - 16; i += 16 )
{
__m128 s0 = d4, s1 = d4, s2 = d4, s3 = d4;
__m128i x0, x1, z = _mm_setzero_si128();
for( k = 0; k < nz; k++ )
{
__m128 f = _mm_load_ss(kf+k), t0, t1;
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_loadu_si128((const __m128i*)(src[k] + i));
x1 = _mm_unpackhi_epi8(x0, z);
x0 = _mm_unpacklo_epi8(x0, z);
t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x0, z));
t1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(x0, z));
s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f));
s1 = _mm_add_ps(s1, _mm_mul_ps(t1, f));
t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x1, z));
t1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(x1, z));
s2 = _mm_add_ps(s2, _mm_mul_ps(t0, f));
s3 = _mm_add_ps(s3, _mm_mul_ps(t1, f));
}
x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1));
x1 = _mm_packs_epi32(_mm_cvtps_epi32(s2), _mm_cvtps_epi32(s3));
_mm_storeu_si128((__m128i*)(dst + i), x0);
_mm_storeu_si128((__m128i*)(dst + i + 8), x1);
}
for( ; i <= width - 4; i += 4 )
{
__m128 s0 = d4;
__m128i x0, z = _mm_setzero_si128();
for( k = 0; k < nz; k++ )
{
__m128 f = _mm_load_ss(kf+k), t0;
f = _mm_shuffle_ps(f, f, 0);
x0 = _mm_cvtsi32_si128(*(const int*)(src[k] + i));
x0 = _mm_unpacklo_epi8(x0, z);
t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x0, z));
s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f));
}
x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), z);
_mm_storel_epi64((__m128i*)(dst + i), x0);
}
return i;
}
int _nz;
std::vector<uchar> coeffs;
float delta;
};
struct FilterVec_32f
{
FilterVec_32f() {}
FilterVec_32f(const Mat& _kernel, int, double _delta)
{
delta = (float)_delta;
std::vector<Point> coords;
preprocess2DKernel(_kernel, coords, coeffs);
_nz = (int)coords.size();
}
int operator()(const uchar** _src, uchar* _dst, int width) const
{
if( !checkHardwareSupport(CV_CPU_SSE) )
return 0;
const float* kf = (const float*)&coeffs[0];
const float** src = (const float**)_src;
float* dst = (float*)_dst;
int i = 0, k, nz = _nz;
__m128 d4 = _mm_set1_ps(delta);
for( ; i <= width - 16; i += 16 )
{
__m128 s0 = d4, s1 = d4, s2 = d4, s3 = d4;
for( k = 0; k < nz; k++ )
{
__m128 f = _mm_load_ss(kf+k), t0, t1;
f = _mm_shuffle_ps(f, f, 0);
const float* S = src[k] + i;
t0 = _mm_loadu_ps(S);
t1 = _mm_loadu_ps(S + 4);
s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f));
s1 = _mm_add_ps(s1, _mm_mul_ps(t1, f));
t0 = _mm_loadu_ps(S + 8);
t1 = _mm_loadu_ps(S + 12);
s2 = _mm_add_ps(s2, _mm_mul_ps(t0, f));
s3 = _mm_add_ps(s3, _mm_mul_ps(t1, f));
}
_mm_storeu_ps(dst + i, s0);
_mm_storeu_ps(dst + i + 4, s1);
_mm_storeu_ps(dst + i + 8, s2);
_mm_storeu_ps(dst + i + 12, s3);
}
for( ; i <= width - 4; i += 4 )
{
__m128 s0 = d4;
for( k = 0; k < nz; k++ )
{
__m128 f = _mm_load_ss(kf+k), t0;
f = _mm_shuffle_ps(f, f, 0);
t0 = _mm_loadu_ps(src[k] + i);
s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f));
}
_mm_storeu_ps(dst + i, s0);
}
return i;
}
int _nz;
std::vector<uchar> coeffs;
float delta;
};
#elif CV_NEON
struct SymmRowSmallVec_8u32s
{
SymmRowSmallVec_8u32s() { smallValues = false; }
SymmRowSmallVec_8u32s( const Mat& _kernel, int _symmetryType )
{
kernel = _kernel;
symmetryType = _symmetryType;
smallValues = true;
int k, ksize = kernel.rows + kernel.cols - 1;
for( k = 0; k < ksize; k++ )
{
int v = kernel.ptr<int>()[k];
if( v < SHRT_MIN || v > SHRT_MAX )
{
smallValues = false;
break;
}
}
}
int operator()(const uchar* src, uchar* _dst, int width, int cn) const
{
if( !checkHardwareSupport(CV_CPU_NEON) )
return 0;
int i = 0, _ksize = kernel.rows + kernel.cols - 1;
int* dst = (int*)_dst;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const int* kx = kernel.ptr<int>() + _ksize/2;
if( !smallValues )
return 0;
src += (_ksize/2)*cn;
width *= cn;
if( symmetrical )
{
if( _ksize == 1 )
return 0;
if( _ksize == 3 )
{
if( kx[0] == 2 && kx[1] == 1 )
{
uint16x8_t zq = vdupq_n_u16(0);
for( ; i <= width - 8; i += 8, src += 8 )
{
uint8x8_t x0, x1, x2;
x0 = vld1_u8( (uint8_t *) (src - cn) );
x1 = vld1_u8( (uint8_t *) (src) );
x2 = vld1_u8( (uint8_t *) (src + cn) );
uint16x8_t y0, y1, y2;
y0 = vaddl_u8(x0, x2);
y1 = vshll_n_u8(x1, 1);
y2 = vaddq_u16(y0, y1);
uint16x8x2_t str;
str.val[0] = y2; str.val[1] = zq;
vst2q_u16( (uint16_t *) (dst + i), str );
}
}
else if( kx[0] == -2 && kx[1] == 1 )
return 0;
else
{
int32x4_t k32 = vdupq_n_s32(0);
k32 = vld1q_lane_s32(kx, k32, 0);
k32 = vld1q_lane_s32(kx + 1, k32, 1);
int16x4_t k = vqmovn_s32(k32);
uint8x8_t z = vdup_n_u8(0);
for( ; i <= width - 8; i += 8, src += 8 )
{
uint8x8_t x0, x1, x2;
x0 = vld1_u8( (uint8_t *) (src - cn) );
x1 = vld1_u8( (uint8_t *) (src) );
x2 = vld1_u8( (uint8_t *) (src + cn) );
int16x8_t y0, y1;
int32x4_t y2, y3;
y0 = vreinterpretq_s16_u16(vaddl_u8(x1, z));
y1 = vreinterpretq_s16_u16(vaddl_u8(x0, x2));
y2 = vmull_lane_s16(vget_low_s16(y0), k, 0);
y2 = vmlal_lane_s16(y2, vget_low_s16(y1), k, 1);
y3 = vmull_lane_s16(vget_high_s16(y0), k, 0);
y3 = vmlal_lane_s16(y3, vget_high_s16(y1), k, 1);
vst1q_s32((int32_t *)(dst + i), y2);
vst1q_s32((int32_t *)(dst + i + 4), y3);
}
}
}
else if( _ksize == 5 )
{
if( kx[0] == -2 && kx[1] == 0 && kx[2] == 1 )
return 0;
else
{
int32x4_t k32 = vdupq_n_s32(0);
k32 = vld1q_lane_s32(kx, k32, 0);
k32 = vld1q_lane_s32(kx + 1, k32, 1);
k32 = vld1q_lane_s32(kx + 2, k32, 2);
int16x4_t k = vqmovn_s32(k32);
uint8x8_t z = vdup_n_u8(0);
for( ; i <= width - 8; i += 8, src += 8 )
{
uint8x8_t x0, x1, x2, x3, x4;
x0 = vld1_u8( (uint8_t *) (src - cn) );
x1 = vld1_u8( (uint8_t *) (src) );
x2 = vld1_u8( (uint8_t *) (src + cn) );
int16x8_t y0, y1;
int32x4_t accl, acch;
y0 = vreinterpretq_s16_u16(vaddl_u8(x1, z));
y1 = vreinterpretq_s16_u16(vaddl_u8(x0, x2));
accl = vmull_lane_s16(vget_low_s16(y0), k, 0);
accl = vmlal_lane_s16(accl, vget_low_s16(y1), k, 1);
acch = vmull_lane_s16(vget_high_s16(y0), k, 0);
acch = vmlal_lane_s16(acch, vget_high_s16(y1), k, 1);
int16x8_t y2;
x3 = vld1_u8( (uint8_t *) (src - cn*2) );
x4 = vld1_u8( (uint8_t *) (src + cn*2) );
y2 = vreinterpretq_s16_u16(vaddl_u8(x3, x4));
accl = vmlal_lane_s16(accl, vget_low_s16(y2), k, 2);
acch = vmlal_lane_s16(acch, vget_high_s16(y2), k, 2);
vst1q_s32((int32_t *)(dst + i), accl);
vst1q_s32((int32_t *)(dst + i + 4), acch);
}
}
}
}
else
{
if( _ksize == 3 )
{
if( kx[0] == 0 && kx[1] == 1 )
{
uint8x8_t z = vdup_n_u8(0);
for( ; i <= width - 8; i += 8, src += 8 )
{
uint8x8_t x0, x1;
x0 = vld1_u8( (uint8_t *) (src - cn) );
x1 = vld1_u8( (uint8_t *) (src + cn) );
int16x8_t y0;
y0 = vsubq_s16(vreinterpretq_s16_u16(vaddl_u8(x1, z)),
vreinterpretq_s16_u16(vaddl_u8(x0, z)));
vst1q_s32((int32_t *)(dst + i), vmovl_s16(vget_low_s16(y0)));
vst1q_s32((int32_t *)(dst + i + 4), vmovl_s16(vget_high_s16(y0)));
}
}
else
{
int32x4_t k32 = vdupq_n_s32(0);
k32 = vld1q_lane_s32(kx + 1, k32, 1);
int16x4_t k = vqmovn_s32(k32);
uint8x8_t z = vdup_n_u8(0);
for( ; i <= width - 8; i += 8, src += 8 )
{
uint8x8_t x0, x1;
x0 = vld1_u8( (uint8_t *) (src - cn) );
x1 = vld1_u8( (uint8_t *) (src + cn) );
int16x8_t y0;
int32x4_t y1, y2;
y0 = vsubq_s16(vreinterpretq_s16_u16(vaddl_u8(x1, z)),
vreinterpretq_s16_u16(vaddl_u8(x0, z)));
y1 = vmull_lane_s16(vget_low_s16(y0), k, 1);
y2 = vmull_lane_s16(vget_high_s16(y0), k, 1);
vst1q_s32((int32_t *)(dst + i), y1);
vst1q_s32((int32_t *)(dst + i + 4), y2);
}
}
}
else if( _ksize == 5 )
{
int32x4_t k32 = vdupq_n_s32(0);
k32 = vld1q_lane_s32(kx + 1, k32, 1);
k32 = vld1q_lane_s32(kx + 2, k32, 2);
int16x4_t k = vqmovn_s32(k32);
uint8x8_t z = vdup_n_u8(0);
for( ; i <= width - 8; i += 8, src += 8 )
{
uint8x8_t x0, x1;
x0 = vld1_u8( (uint8_t *) (src - cn) );
x1 = vld1_u8( (uint8_t *) (src + cn) );
int32x4_t accl, acch;
int16x8_t y0;
y0 = vsubq_s16(vreinterpretq_s16_u16(vaddl_u8(x1, z)),
vreinterpretq_s16_u16(vaddl_u8(x0, z)));
accl = vmull_lane_s16(vget_low_s16(y0), k, 1);
acch = vmull_lane_s16(vget_high_s16(y0), k, 1);
uint8x8_t x2, x3;
x2 = vld1_u8( (uint8_t *) (src - cn*2) );
x3 = vld1_u8( (uint8_t *) (src + cn*2) );
int16x8_t y1;
y1 = vsubq_s16(vreinterpretq_s16_u16(vaddl_u8(x3, z)),
vreinterpretq_s16_u16(vaddl_u8(x2, z)));
accl = vmlal_lane_s16(accl, vget_low_s16(y1), k, 2);
acch = vmlal_lane_s16(acch, vget_high_s16(y1), k, 2);
vst1q_s32((int32_t *)(dst + i), accl);
vst1q_s32((int32_t *)(dst + i + 4), acch);
}
}
}
return i;
}
Mat kernel;
int symmetryType;
bool smallValues;
};
struct SymmColumnVec_32s8u
{
SymmColumnVec_32s8u() { symmetryType=0; }
SymmColumnVec_32s8u(const Mat& _kernel, int _symmetryType, int _bits, double _delta)
{
symmetryType = _symmetryType;
_kernel.convertTo(kernel, CV_32F, 1./(1 << _bits), 0);
delta = (float)(_delta/(1 << _bits));
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 );
}
int operator()(const uchar** _src, uchar* dst, int width) const
{
if( !checkHardwareSupport(CV_CPU_NEON) )
return 0;
int _ksize = kernel.rows + kernel.cols - 1;
int ksize2 = _ksize / 2;
const float* ky = kernel.ptr<float>() + ksize2;
int i = 0, k;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const int** src = (const int**)_src;
const int *S, *S2;
float32x4_t d4 = vdupq_n_f32(delta);
if( symmetrical )
{
if( _ksize == 1 )
return 0;
float32x2_t k32;
k32 = vdup_n_f32(0);
k32 = vld1_lane_f32(ky, k32, 0);
k32 = vld1_lane_f32(ky + 1, k32, 1);
for( ; i <= width - 8; i += 8 )
{
float32x4_t accl, acch;
float32x4_t f0l, f0h, f1l, f1h, f2l, f2h;
S = src[0] + i;
f0l = vcvtq_f32_s32( vld1q_s32(S) );
f0h = vcvtq_f32_s32( vld1q_s32(S + 4) );
S = src[1] + i;
S2 = src[-1] + i;
f1l = vcvtq_f32_s32( vld1q_s32(S) );
f1h = vcvtq_f32_s32( vld1q_s32(S + 4) );
f2l = vcvtq_f32_s32( vld1q_s32(S2) );
f2h = vcvtq_f32_s32( vld1q_s32(S2 + 4) );
accl = acch = d4;
accl = vmlaq_lane_f32(accl, f0l, k32, 0);
acch = vmlaq_lane_f32(acch, f0h, k32, 0);
accl = vmlaq_lane_f32(accl, vaddq_f32(f1l, f2l), k32, 1);
acch = vmlaq_lane_f32(acch, vaddq_f32(f1h, f2h), k32, 1);
for( k = 2; k <= ksize2; k++ )
{
S = src[k] + i;
S2 = src[-k] + i;
float32x4_t f3l, f3h, f4l, f4h;
f3l = vcvtq_f32_s32( vld1q_s32(S) );
f3h = vcvtq_f32_s32( vld1q_s32(S + 4) );
f4l = vcvtq_f32_s32( vld1q_s32(S2) );
f4h = vcvtq_f32_s32( vld1q_s32(S2 + 4) );
accl = vmlaq_n_f32(accl, vaddq_f32(f3l, f4l), ky[k]);
acch = vmlaq_n_f32(acch, vaddq_f32(f3h, f4h), ky[k]);
}
int32x4_t s32l, s32h;
s32l = vcvtq_s32_f32(accl);
s32h = vcvtq_s32_f32(acch);
int16x4_t s16l, s16h;
s16l = vqmovn_s32(s32l);
s16h = vqmovn_s32(s32h);
uint8x8_t u8;
u8 = vqmovun_s16(vcombine_s16(s16l, s16h));
vst1_u8((uint8_t *)(dst + i), u8);
}
}
else
{
float32x2_t k32;
k32 = vdup_n_f32(0);
k32 = vld1_lane_f32(ky + 1, k32, 1);
for( ; i <= width - 8; i += 8 )
{
float32x4_t accl, acch;
float32x4_t f1l, f1h, f2l, f2h;
S = src[1] + i;
S2 = src[-1] + i;
f1l = vcvtq_f32_s32( vld1q_s32(S) );
f1h = vcvtq_f32_s32( vld1q_s32(S + 4) );
f2l = vcvtq_f32_s32( vld1q_s32(S2) );
f2h = vcvtq_f32_s32( vld1q_s32(S2 + 4) );
accl = acch = d4;
accl = vmlaq_lane_f32(accl, vsubq_f32(f1l, f2l), k32, 1);
acch = vmlaq_lane_f32(acch, vsubq_f32(f1h, f2h), k32, 1);
for( k = 2; k <= ksize2; k++ )
{
S = src[k] + i;
S2 = src[-k] + i;
float32x4_t f3l, f3h, f4l, f4h;
f3l = vcvtq_f32_s32( vld1q_s32(S) );
f3h = vcvtq_f32_s32( vld1q_s32(S + 4) );
f4l = vcvtq_f32_s32( vld1q_s32(S2) );
f4h = vcvtq_f32_s32( vld1q_s32(S2 + 4) );
accl = vmlaq_n_f32(accl, vsubq_f32(f3l, f4l), ky[k]);
acch = vmlaq_n_f32(acch, vsubq_f32(f3h, f4h), ky[k]);
}
int32x4_t s32l, s32h;
s32l = vcvtq_s32_f32(accl);
s32h = vcvtq_s32_f32(acch);
int16x4_t s16l, s16h;
s16l = vqmovn_s32(s32l);
s16h = vqmovn_s32(s32h);
uint8x8_t u8;
u8 = vqmovun_s16(vcombine_s16(s16l, s16h));
vst1_u8((uint8_t *)(dst + i), u8);
}
}
return i;
}
int symmetryType;
float delta;
Mat kernel;
};
struct SymmColumnSmallVec_32s16s
{
SymmColumnSmallVec_32s16s() { symmetryType=0; }
SymmColumnSmallVec_32s16s(const Mat& _kernel, int _symmetryType, int _bits, double _delta)
{
symmetryType = _symmetryType;
_kernel.convertTo(kernel, CV_32F, 1./(1 << _bits), 0);
delta = (float)(_delta/(1 << _bits));
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 );
}
int operator()(const uchar** _src, uchar* _dst, int width) const
{
if( !checkHardwareSupport(CV_CPU_NEON) )
return 0;
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = kernel.ptr<float>() + ksize2;
int i = 0;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const int** src = (const int**)_src;
const int *S0 = src[-1], *S1 = src[0], *S2 = src[1];
short* dst = (short*)_dst;
float32x4_t df4 = vdupq_n_f32(delta);
int32x4_t d4 = vcvtq_s32_f32(df4);
if( symmetrical )
{
if( ky[0] == 2 && ky[1] == 1 )
{
for( ; i <= width - 4; i += 4 )
{
int32x4_t x0, x1, x2;
x0 = vld1q_s32((int32_t const *)(S0 + i));
x1 = vld1q_s32((int32_t const *)(S1 + i));
x2 = vld1q_s32((int32_t const *)(S2 + i));
int32x4_t y0, y1, y2, y3;
y0 = vaddq_s32(x0, x2);
y1 = vqshlq_n_s32(x1, 1);
y2 = vaddq_s32(y0, y1);
y3 = vaddq_s32(y2, d4);
int16x4_t t;
t = vqmovn_s32(y3);
vst1_s16((int16_t *)(dst + i), t);
}
}
else if( ky[0] == -2 && ky[1] == 1 )
{
for( ; i <= width - 4; i += 4 )
{
int32x4_t x0, x1, x2;
x0 = vld1q_s32((int32_t const *)(S0 + i));
x1 = vld1q_s32((int32_t const *)(S1 + i));
x2 = vld1q_s32((int32_t const *)(S2 + i));
int32x4_t y0, y1, y2, y3;
y0 = vaddq_s32(x0, x2);
y1 = vqshlq_n_s32(x1, 1);
y2 = vsubq_s32(y0, y1);
y3 = vaddq_s32(y2, d4);
int16x4_t t;
t = vqmovn_s32(y3);
vst1_s16((int16_t *)(dst + i), t);
}
}
else if( ky[0] == 10 && ky[1] == 3 )
{
for( ; i <= width - 4; i += 4 )
{
int32x4_t x0, x1, x2, x3;
x0 = vld1q_s32((int32_t const *)(S0 + i));
x1 = vld1q_s32((int32_t const *)(S1 + i));
x2 = vld1q_s32((int32_t const *)(S2 + i));
x3 = vaddq_s32(x0, x2);
int32x4_t y0;
y0 = vmlaq_n_s32(d4, x1, 10);
y0 = vmlaq_n_s32(y0, x3, 3);
int16x4_t t;
t = vqmovn_s32(y0);
vst1_s16((int16_t *)(dst + i), t);
}
}
else
{
float32x2_t k32 = vdup_n_f32(0);
k32 = vld1_lane_f32(ky, k32, 0);
k32 = vld1_lane_f32(ky + 1, k32, 1);
for( ; i <= width - 4; i += 4 )
{
int32x4_t x0, x1, x2, x3, x4;
x0 = vld1q_s32((int32_t const *)(S0 + i));
x1 = vld1q_s32((int32_t const *)(S1 + i));
x2 = vld1q_s32((int32_t const *)(S2 + i));
x3 = vaddq_s32(x0, x2);
float32x4_t s0, s1, s2;
s0 = vcvtq_f32_s32(x1);
s1 = vcvtq_f32_s32(x3);
s2 = vmlaq_lane_f32(df4, s0, k32, 0);
s2 = vmlaq_lane_f32(s2, s1, k32, 1);
x4 = vcvtq_s32_f32(s2);
int16x4_t x5;
x5 = vqmovn_s32(x4);
vst1_s16((int16_t *)(dst + i), x5);
}
}
}
else
{
if( fabs(ky[1]) == 1 && ky[1] == -ky[-1] )
{
if( ky[1] < 0 )
std::swap(S0, S2);
for( ; i <= width - 4; i += 4 )
{
int32x4_t x0, x1;
x0 = vld1q_s32((int32_t const *)(S0 + i));
x1 = vld1q_s32((int32_t const *)(S2 + i));
int32x4_t y0, y1;
y0 = vsubq_s32(x1, x0);
y1 = vqaddq_s32(y0, d4);
int16x4_t t;
t = vqmovn_s32(y1);
vst1_s16((int16_t *)(dst + i), t);
}
}
else
{
float32x2_t k32 = vdup_n_f32(0);
k32 = vld1_lane_f32(ky + 1, k32, 1);
for( ; i <= width - 4; i += 4 )
{
int32x4_t x0, x1, x2, x3;
x0 = vld1q_s32((int32_t const *)(S0 + i));
x1 = vld1q_s32((int32_t const *)(S2 + i));
x2 = vsubq_s32(x1, x0);
float32x4_t s0, s1;
s0 = vcvtq_f32_s32(x2);
s1 = vmlaq_lane_f32(df4, s0, k32, 1);
x3 = vcvtq_s32_f32(s1);
int16x4_t x4;
x4 = vqmovn_s32(x3);
vst1_s16((int16_t *)(dst + i), x4);
}
}
}
return i;
}
int symmetryType;
float delta;
Mat kernel;
};
struct SymmColumnVec_32f16s
{
SymmColumnVec_32f16s() { symmetryType=0; }
SymmColumnVec_32f16s(const Mat& _kernel, int _symmetryType, int, double _delta)
{
symmetryType = _symmetryType;
kernel = _kernel;
delta = (float)_delta;
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 );
neon_supported = checkHardwareSupport(CV_CPU_NEON);
}
int operator()(const uchar** _src, uchar* _dst, int width) const
{
if( !neon_supported )
return 0;
int _ksize = kernel.rows + kernel.cols - 1;
int ksize2 = _ksize / 2;
const float* ky = kernel.ptr<float>() + ksize2;
int i = 0, k;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const float** src = (const float**)_src;
const float *S, *S2;
short* dst = (short*)_dst;
float32x4_t d4 = vdupq_n_f32(delta);
if( symmetrical )
{
if( _ksize == 1 )
return 0;
float32x2_t k32;
k32 = vdup_n_f32(0);
k32 = vld1_lane_f32(ky, k32, 0);
k32 = vld1_lane_f32(ky + 1, k32, 1);
for( ; i <= width - 8; i += 8 )
{
float32x4_t x0l, x0h, x1l, x1h, x2l, x2h;
float32x4_t accl, acch;
S = src[0] + i;
x0l = vld1q_f32(S);
x0h = vld1q_f32(S + 4);
S = src[1] + i;
S2 = src[-1] + i;
x1l = vld1q_f32(S);
x1h = vld1q_f32(S + 4);
x2l = vld1q_f32(S2);
x2h = vld1q_f32(S2 + 4);
accl = acch = d4;
accl = vmlaq_lane_f32(accl, x0l, k32, 0);
acch = vmlaq_lane_f32(acch, x0h, k32, 0);
accl = vmlaq_lane_f32(accl, vaddq_f32(x1l, x2l), k32, 1);
acch = vmlaq_lane_f32(acch, vaddq_f32(x1h, x2h), k32, 1);
for( k = 2; k <= ksize2; k++ )
{
S = src[k] + i;
S2 = src[-k] + i;
float32x4_t x3l, x3h, x4l, x4h;
x3l = vld1q_f32(S);
x3h = vld1q_f32(S + 4);
x4l = vld1q_f32(S2);
x4h = vld1q_f32(S2 + 4);
accl = vmlaq_n_f32(accl, vaddq_f32(x3l, x4l), ky[k]);
acch = vmlaq_n_f32(acch, vaddq_f32(x3h, x4h), ky[k]);
}
int32x4_t s32l, s32h;
s32l = vcvtq_s32_f32(accl);
s32h = vcvtq_s32_f32(acch);
int16x4_t s16l, s16h;
s16l = vqmovn_s32(s32l);
s16h = vqmovn_s32(s32h);
vst1_s16((int16_t *)(dst + i), s16l);
vst1_s16((int16_t *)(dst + i + 4), s16h);
}
}
else
{
float32x2_t k32;
k32 = vdup_n_f32(0);
k32 = vld1_lane_f32(ky + 1, k32, 1);
for( ; i <= width - 8; i += 8 )
{
float32x4_t x1l, x1h, x2l, x2h;
float32x4_t accl, acch;
S = src[1] + i;
S2 = src[-1] + i;
x1l = vld1q_f32(S);
x1h = vld1q_f32(S + 4);
x2l = vld1q_f32(S2);
x2h = vld1q_f32(S2 + 4);
accl = acch = d4;
accl = vmlaq_lane_f32(accl, vsubq_f32(x1l, x2l), k32, 1);
acch = vmlaq_lane_f32(acch, vsubq_f32(x1h, x2h), k32, 1);
for( k = 2; k <= ksize2; k++ )
{
S = src[k] + i;
S2 = src[-k] + i;
float32x4_t x3l, x3h, x4l, x4h;
x3l = vld1q_f32(S);
x3h = vld1q_f32(S + 4);
x4l = vld1q_f32(S2);
x4h = vld1q_f32(S2 + 4);
accl = vmlaq_n_f32(accl, vsubq_f32(x3l, x4l), ky[k]);
acch = vmlaq_n_f32(acch, vsubq_f32(x3h, x4h), ky[k]);
}
int32x4_t s32l, s32h;
s32l = vcvtq_s32_f32(accl);
s32h = vcvtq_s32_f32(acch);
int16x4_t s16l, s16h;
s16l = vqmovn_s32(s32l);
s16h = vqmovn_s32(s32h);
vst1_s16((int16_t *)(dst + i), s16l);
vst1_s16((int16_t *)(dst + i + 4), s16h);
}
}
return i;
}
int symmetryType;
float delta;
Mat kernel;
bool neon_supported;
};
struct SymmRowSmallVec_32f
{
SymmRowSmallVec_32f() {}
SymmRowSmallVec_32f( const Mat& _kernel, int _symmetryType )
{
kernel = _kernel;
symmetryType = _symmetryType;
}
int operator()(const uchar* _src, uchar* _dst, int width, int cn) const
{
if( !checkHardwareSupport(CV_CPU_NEON) )
return 0;
int i = 0, _ksize = kernel.rows + kernel.cols - 1;
float* dst = (float*)_dst;
const float* src = (const float*)_src + (_ksize/2)*cn;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
const float* kx = kernel.ptr<float>() + _ksize/2;
width *= cn;
if( symmetrical )
{
if( _ksize == 1 )
return 0;
if( _ksize == 3 )
{
if( kx[0] == 2 && kx[1] == 1 )
return 0;
else if( kx[0] == -2 && kx[1] == 1 )
return 0;
else
{
return 0;
}
}
else if( _ksize == 5 )
{
if( kx[0] == -2 && kx[1] == 0 && kx[2] == 1 )
return 0;
else
{
float32x2_t k0, k1;
k0 = k1 = vdup_n_f32(0);
k0 = vld1_lane_f32(kx + 0, k0, 0);
k0 = vld1_lane_f32(kx + 1, k0, 1);
k1 = vld1_lane_f32(kx + 2, k1, 0);
for( ; i <= width - 4; i += 4, src += 4 )
{
float32x4_t x0, x1, x2, x3, x4;
x0 = vld1q_f32(src);
x1 = vld1q_f32(src - cn);
x2 = vld1q_f32(src + cn);
x3 = vld1q_f32(src - cn*2);
x4 = vld1q_f32(src + cn*2);
float32x4_t y0;
y0 = vmulq_lane_f32(x0, k0, 0);
y0 = vmlaq_lane_f32(y0, vaddq_f32(x1, x2), k0, 1);
y0 = vmlaq_lane_f32(y0, vaddq_f32(x3, x4), k1, 0);
vst1q_f32(dst + i, y0);
}
}
}
}
else
{
if( _ksize == 3 )
{
if( kx[0] == 0 && kx[1] == 1 )
return 0;
else
{
return 0;
}
}
else if( _ksize == 5 )
{
float32x2_t k;
k = vdup_n_f32(0);
k = vld1_lane_f32(kx + 1, k, 0);
k = vld1_lane_f32(kx + 2, k, 1);
for( ; i <= width - 4; i += 4, src += 4 )
{
float32x4_t x0, x1, x2, x3;
x0 = vld1q_f32(src - cn);
x1 = vld1q_f32(src + cn);
x2 = vld1q_f32(src - cn*2);
x3 = vld1q_f32(src + cn*2);
float32x4_t y0;
y0 = vmulq_lane_f32(vsubq_f32(x1, x0), k, 0);
y0 = vmlaq_lane_f32(y0, vsubq_f32(x3, x2), k, 1);
vst1q_f32(dst + i, y0);
}
}
}
return i;
}
Mat kernel;
int symmetryType;
};
typedef RowNoVec RowVec_8u32s;
typedef RowNoVec RowVec_16s32f;
typedef RowNoVec RowVec_32f;
typedef ColumnNoVec SymmColumnVec_32f;
typedef SymmColumnSmallNoVec SymmColumnSmallVec_32f;
typedef FilterNoVec FilterVec_8u;
typedef FilterNoVec FilterVec_8u16s;
typedef FilterNoVec FilterVec_32f;
#else
typedef RowNoVec RowVec_8u32s;
typedef RowNoVec RowVec_16s32f;
typedef RowNoVec RowVec_32f;
typedef SymmRowSmallNoVec SymmRowSmallVec_8u32s;
typedef SymmRowSmallNoVec SymmRowSmallVec_32f;
typedef ColumnNoVec SymmColumnVec_32s8u;
typedef ColumnNoVec SymmColumnVec_32f16s;
typedef ColumnNoVec SymmColumnVec_32f;
typedef SymmColumnSmallNoVec SymmColumnSmallVec_32s16s;
typedef SymmColumnSmallNoVec SymmColumnSmallVec_32f;
typedef FilterNoVec FilterVec_8u;
typedef FilterNoVec FilterVec_8u16s;
typedef FilterNoVec FilterVec_32f;
#endif
template<typename ST, typename DT, class VecOp> struct RowFilter : public BaseRowFilter
{
RowFilter( const Mat& _kernel, int _anchor, const VecOp& _vecOp=VecOp() )
{
if( _kernel.isContinuous() )
kernel = _kernel;
else
_kernel.copyTo(kernel);
anchor = _anchor;
ksize = kernel.rows + kernel.cols - 1;
CV_Assert( kernel.type() == DataType<DT>::type &&
(kernel.rows == 1 || kernel.cols == 1));
vecOp = _vecOp;
}
void operator()(const uchar* src, uchar* dst, int width, int cn)
{
int _ksize = ksize;
const DT* kx = kernel.ptr<DT>();
const ST* S;
DT* D = (DT*)dst;
int i, k;
i = vecOp(src, dst, width, cn);
width *= cn;
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
S = (const ST*)src + i;
DT f = kx[0];
DT s0 = f*S[0], s1 = f*S[1], s2 = f*S[2], s3 = f*S[3];
for( k = 1; k < _ksize; k++ )
{
S += cn;
f = kx[k];
s0 += f*S[0]; s1 += f*S[1];
s2 += f*S[2]; s3 += f*S[3];
}
D[i] = s0; D[i+1] = s1;
D[i+2] = s2; D[i+3] = s3;
}
#endif
for( ; i < width; i++ )
{
S = (const ST*)src + i;
DT s0 = kx[0]*S[0];
for( k = 1; k < _ksize; k++ )
{
S += cn;
s0 += kx[k]*S[0];
}
D[i] = s0;
}
}
Mat kernel;
VecOp vecOp;
};
template<typename ST, typename DT, class VecOp> struct SymmRowSmallFilter :
public RowFilter<ST, DT, VecOp>
{
SymmRowSmallFilter( const Mat& _kernel, int _anchor, int _symmetryType,
const VecOp& _vecOp = VecOp())
: RowFilter<ST, DT, VecOp>( _kernel, _anchor, _vecOp )
{
symmetryType = _symmetryType;
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 && this->ksize <= 5 );
}
void operator()(const uchar* src, uchar* dst, int width, int cn)
{
int ksize2 = this->ksize/2, ksize2n = ksize2*cn;
const DT* kx = this->kernel.template ptr<DT>() + ksize2;
bool symmetrical = (this->symmetryType & KERNEL_SYMMETRICAL) != 0;
DT* D = (DT*)dst;
int i = this->vecOp(src, dst, width, cn), j, k;
const ST* S = (const ST*)src + i + ksize2n;
width *= cn;
if( symmetrical )
{
if( this->ksize == 1 && kx[0] == 1 )
{
for( ; i <= width - 2; i += 2 )
{
DT s0 = S[i], s1 = S[i+1];
D[i] = s0; D[i+1] = s1;
}
S += i;
}
else if( this->ksize == 3 )
{
if( kx[0] == 2 && kx[1] == 1 )
for( ; i <= width - 2; i += 2, S += 2 )
{
DT s0 = S[-cn] + S[0]*2 + S[cn], s1 = S[1-cn] + S[1]*2 + S[1+cn];
D[i] = s0; D[i+1] = s1;
}
else if( kx[0] == -2 && kx[1] == 1 )
for( ; i <= width - 2; i += 2, S += 2 )
{
DT s0 = S[-cn] - S[0]*2 + S[cn], s1 = S[1-cn] - S[1]*2 + S[1+cn];
D[i] = s0; D[i+1] = s1;
}
else
{
DT k0 = kx[0], k1 = kx[1];
for( ; i <= width - 2; i += 2, S += 2 )
{
DT s0 = S[0]*k0 + (S[-cn] + S[cn])*k1, s1 = S[1]*k0 + (S[1-cn] + S[1+cn])*k1;
D[i] = s0; D[i+1] = s1;
}
}
}
else if( this->ksize == 5 )
{
DT k0 = kx[0], k1 = kx[1], k2 = kx[2];
if( k0 == -2 && k1 == 0 && k2 == 1 )
for( ; i <= width - 2; i += 2, S += 2 )
{
DT s0 = -2*S[0] + S[-cn*2] + S[cn*2];
DT s1 = -2*S[1] + S[1-cn*2] + S[1+cn*2];
D[i] = s0; D[i+1] = s1;
}
else
for( ; i <= width - 2; i += 2, S += 2 )
{
DT s0 = S[0]*k0 + (S[-cn] + S[cn])*k1 + (S[-cn*2] + S[cn*2])*k2;
DT s1 = S[1]*k0 + (S[1-cn] + S[1+cn])*k1 + (S[1-cn*2] + S[1+cn*2])*k2;
D[i] = s0; D[i+1] = s1;
}
}
for( ; i < width; i++, S++ )
{
DT s0 = kx[0]*S[0];
for( k = 1, j = cn; k <= ksize2; k++, j += cn )
s0 += kx[k]*(S[j] + S[-j]);
D[i] = s0;
}
}
else
{
if( this->ksize == 3 )
{
if( kx[0] == 0 && kx[1] == 1 )
for( ; i <= width - 2; i += 2, S += 2 )
{
DT s0 = S[cn] - S[-cn], s1 = S[1+cn] - S[1-cn];
D[i] = s0; D[i+1] = s1;
}
else
{
DT k1 = kx[1];
for( ; i <= width - 2; i += 2, S += 2 )
{
DT s0 = (S[cn] - S[-cn])*k1, s1 = (S[1+cn] - S[1-cn])*k1;
D[i] = s0; D[i+1] = s1;
}
}
}
else if( this->ksize == 5 )
{
DT k1 = kx[1], k2 = kx[2];
for( ; i <= width - 2; i += 2, S += 2 )
{
DT s0 = (S[cn] - S[-cn])*k1 + (S[cn*2] - S[-cn*2])*k2;
DT s1 = (S[1+cn] - S[1-cn])*k1 + (S[1+cn*2] - S[1-cn*2])*k2;
D[i] = s0; D[i+1] = s1;
}
}
for( ; i < width; i++, S++ )
{
DT s0 = kx[0]*S[0];
for( k = 1, j = cn; k <= ksize2; k++, j += cn )
s0 += kx[k]*(S[j] - S[-j]);
D[i] = s0;
}
}
}
int symmetryType;
};
template<class CastOp, class VecOp> struct ColumnFilter : public BaseColumnFilter
{
typedef typename CastOp::type1 ST;
typedef typename CastOp::rtype DT;
ColumnFilter( const Mat& _kernel, int _anchor,
double _delta, const CastOp& _castOp=CastOp(),
const VecOp& _vecOp=VecOp() )
{
if( _kernel.isContinuous() )
kernel = _kernel;
else
_kernel.copyTo(kernel);
anchor = _anchor;
ksize = kernel.rows + kernel.cols - 1;
delta = saturate_cast<ST>(_delta);
castOp0 = _castOp;
vecOp = _vecOp;
CV_Assert( kernel.type() == DataType<ST>::type &&
(kernel.rows == 1 || kernel.cols == 1));
}
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
{
const ST* ky = kernel.template ptr<ST>();
ST _delta = delta;
int _ksize = ksize;
int i, k;
CastOp castOp = castOp0;
for( ; count--; dst += dststep, src++ )
{
DT* D = (DT*)dst;
i = vecOp(src, dst, width);
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST f = ky[0];
const ST* S = (const ST*)src[0] + i;
ST s0 = f*S[0] + _delta, s1 = f*S[1] + _delta,
s2 = f*S[2] + _delta, s3 = f*S[3] + _delta;
for( k = 1; k < _ksize; k++ )
{
S = (const ST*)src[k] + i; f = ky[k];
s0 += f*S[0]; s1 += f*S[1];
s2 += f*S[2]; s3 += f*S[3];
}
D[i] = castOp(s0); D[i+1] = castOp(s1);
D[i+2] = castOp(s2); D[i+3] = castOp(s3);
}
#endif
for( ; i < width; i++ )
{
ST s0 = ky[0]*((const ST*)src[0])[i] + _delta;
for( k = 1; k < _ksize; k++ )
s0 += ky[k]*((const ST*)src[k])[i];
D[i] = castOp(s0);
}
}
}
Mat kernel;
CastOp castOp0;
VecOp vecOp;
ST delta;
};
template<class CastOp, class VecOp> struct SymmColumnFilter : public ColumnFilter<CastOp, VecOp>
{
typedef typename CastOp::type1 ST;
typedef typename CastOp::rtype DT;
SymmColumnFilter( const Mat& _kernel, int _anchor,
double _delta, int _symmetryType,
const CastOp& _castOp=CastOp(),
const VecOp& _vecOp=VecOp())
: ColumnFilter<CastOp, VecOp>( _kernel, _anchor, _delta, _castOp, _vecOp )
{
symmetryType = _symmetryType;
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 );
}
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
{
int ksize2 = this->ksize/2;
const ST* ky = this->kernel.template ptr<ST>() + ksize2;
int i, k;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
ST _delta = this->delta;
CastOp castOp = this->castOp0;
src += ksize2;
if( symmetrical )
{
for( ; count--; dst += dststep, src++ )
{
DT* D = (DT*)dst;
i = (this->vecOp)(src, dst, width);
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST f = ky[0];
const ST* S = (const ST*)src[0] + i, *S2;
ST s0 = f*S[0] + _delta, s1 = f*S[1] + _delta,
s2 = f*S[2] + _delta, s3 = f*S[3] + _delta;
for( k = 1; k <= ksize2; k++ )
{
S = (const ST*)src[k] + i;
S2 = (const ST*)src[-k] + i;
f = ky[k];
s0 += f*(S[0] + S2[0]);
s1 += f*(S[1] + S2[1]);
s2 += f*(S[2] + S2[2]);
s3 += f*(S[3] + S2[3]);
}
D[i] = castOp(s0); D[i+1] = castOp(s1);
D[i+2] = castOp(s2); D[i+3] = castOp(s3);
}
#endif
for( ; i < width; i++ )
{
ST s0 = ky[0]*((const ST*)src[0])[i] + _delta;
for( k = 1; k <= ksize2; k++ )
s0 += ky[k]*(((const ST*)src[k])[i] + ((const ST*)src[-k])[i]);
D[i] = castOp(s0);
}
}
}
else
{
for( ; count--; dst += dststep, src++ )
{
DT* D = (DT*)dst;
i = this->vecOp(src, dst, width);
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST f = ky[0];
const ST *S, *S2;
ST s0 = _delta, s1 = _delta, s2 = _delta, s3 = _delta;
for( k = 1; k <= ksize2; k++ )
{
S = (const ST*)src[k] + i;
S2 = (const ST*)src[-k] + i;
f = ky[k];
s0 += f*(S[0] - S2[0]);
s1 += f*(S[1] - S2[1]);
s2 += f*(S[2] - S2[2]);
s3 += f*(S[3] - S2[3]);
}
D[i] = castOp(s0); D[i+1] = castOp(s1);
D[i+2] = castOp(s2); D[i+3] = castOp(s3);
}
#endif
for( ; i < width; i++ )
{
ST s0 = _delta;
for( k = 1; k <= ksize2; k++ )
s0 += ky[k]*(((const ST*)src[k])[i] - ((const ST*)src[-k])[i]);
D[i] = castOp(s0);
}
}
}
}
int symmetryType;
};
template<class CastOp, class VecOp>
struct SymmColumnSmallFilter : public SymmColumnFilter<CastOp, VecOp>
{
typedef typename CastOp::type1 ST;
typedef typename CastOp::rtype DT;
SymmColumnSmallFilter( const Mat& _kernel, int _anchor,
double _delta, int _symmetryType,
const CastOp& _castOp=CastOp(),
const VecOp& _vecOp=VecOp())
: SymmColumnFilter<CastOp, VecOp>( _kernel, _anchor, _delta, _symmetryType, _castOp, _vecOp )
{
CV_Assert( this->ksize == 3 );
}
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
{
int ksize2 = this->ksize/2;
const ST* ky = this->kernel.template ptr<ST>() + ksize2;
int i;
bool symmetrical = (this->symmetryType & KERNEL_SYMMETRICAL) != 0;
bool is_1_2_1 = ky[0] == 2 && ky[1] == 1;
bool is_1_m2_1 = ky[0] == -2 && ky[1] == 1;
bool is_m1_0_1 = ky[0] == 0 && (ky[1] == 1 || ky[1] == -1);
ST f0 = ky[0], f1 = ky[1];
ST _delta = this->delta;
CastOp castOp = this->castOp0;
src += ksize2;
for( ; count--; dst += dststep, src++ )
{
DT* D = (DT*)dst;
i = (this->vecOp)(src, dst, width);
const ST* S0 = (const ST*)src[-1];
const ST* S1 = (const ST*)src[0];
const ST* S2 = (const ST*)src[1];
if( symmetrical )
{
if( is_1_2_1 )
{
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST s0 = S0[i] + S1[i]*2 + S2[i] + _delta;
ST s1 = S0[i+1] + S1[i+1]*2 + S2[i+1] + _delta;
D[i] = castOp(s0);
D[i+1] = castOp(s1);
s0 = S0[i+2] + S1[i+2]*2 + S2[i+2] + _delta;
s1 = S0[i+3] + S1[i+3]*2 + S2[i+3] + _delta;
D[i+2] = castOp(s0);
D[i+3] = castOp(s1);
}
#endif
for( ; i < width; i ++ )
{
ST s0 = S0[i] + S1[i]*2 + S2[i] + _delta;
D[i] = castOp(s0);
}
}
else if( is_1_m2_1 )
{
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST s0 = S0[i] - S1[i]*2 + S2[i] + _delta;
ST s1 = S0[i+1] - S1[i+1]*2 + S2[i+1] + _delta;
D[i] = castOp(s0);
D[i+1] = castOp(s1);
s0 = S0[i+2] - S1[i+2]*2 + S2[i+2] + _delta;
s1 = S0[i+3] - S1[i+3]*2 + S2[i+3] + _delta;
D[i+2] = castOp(s0);
D[i+3] = castOp(s1);
}
#endif
for( ; i < width; i ++ )
{
ST s0 = S0[i] - S1[i]*2 + S2[i] + _delta;
D[i] = castOp(s0);
}
}
else
{
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST s0 = (S0[i] + S2[i])*f1 + S1[i]*f0 + _delta;
ST s1 = (S0[i+1] + S2[i+1])*f1 + S1[i+1]*f0 + _delta;
D[i] = castOp(s0);
D[i+1] = castOp(s1);
s0 = (S0[i+2] + S2[i+2])*f1 + S1[i+2]*f0 + _delta;
s1 = (S0[i+3] + S2[i+3])*f1 + S1[i+3]*f0 + _delta;
D[i+2] = castOp(s0);
D[i+3] = castOp(s1);
}
#endif
for( ; i < width; i ++ )
{
ST s0 = (S0[i] + S2[i])*f1 + S1[i]*f0 + _delta;
D[i] = castOp(s0);
}
}
}
else
{
if( is_m1_0_1 )
{
if( f1 < 0 )
std::swap(S0, S2);
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST s0 = S2[i] - S0[i] + _delta;
ST s1 = S2[i+1] - S0[i+1] + _delta;
D[i] = castOp(s0);
D[i+1] = castOp(s1);
s0 = S2[i+2] - S0[i+2] + _delta;
s1 = S2[i+3] - S0[i+3] + _delta;
D[i+2] = castOp(s0);
D[i+3] = castOp(s1);
}
#endif
for( ; i < width; i ++ )
{
ST s0 = S2[i] - S0[i] + _delta;
D[i] = castOp(s0);
}
if( f1 < 0 )
std::swap(S0, S2);
}
else
{
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST s0 = (S2[i] - S0[i])*f1 + _delta;
ST s1 = (S2[i+1] - S0[i+1])*f1 + _delta;
D[i] = castOp(s0);
D[i+1] = castOp(s1);
s0 = (S2[i+2] - S0[i+2])*f1 + _delta;
s1 = (S2[i+3] - S0[i+3])*f1 + _delta;
D[i+2] = castOp(s0);
D[i+3] = castOp(s1);
}
#endif
for( ; i < width; i++ )
D[i] = castOp((S2[i] - S0[i])*f1 + _delta);
}
}
}
}
};
template<typename ST, typename DT> struct Cast
{
typedef ST type1;
typedef DT rtype;
DT operator()(ST val) const { return saturate_cast<DT>(val); }
};
template<typename ST, typename DT, int bits> struct FixedPtCast
{
typedef ST type1;
typedef DT rtype;
enum { SHIFT = bits, DELTA = 1 << (bits-1) };
DT operator()(ST val) const { return saturate_cast<DT>((val + DELTA)>>SHIFT); }
};
template<typename ST, typename DT> struct FixedPtCastEx
{
typedef ST type1;
typedef DT rtype;
FixedPtCastEx() : SHIFT(0), DELTA(0) {}
FixedPtCastEx(int bits) : SHIFT(bits), DELTA(bits ? 1 << (bits-1) : 0) {}
DT operator()(ST val) const { return saturate_cast<DT>((val + DELTA)>>SHIFT); }
int SHIFT, DELTA;
};
}
cv::Ptr<cv::BaseRowFilter> cv::getLinearRowFilter( int srcType, int bufType,
InputArray _kernel, int anchor,
int symmetryType )
{
Mat kernel = _kernel.getMat();
int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(bufType);
int cn = CV_MAT_CN(srcType);
CV_Assert( cn == CV_MAT_CN(bufType) &&
ddepth >= std::max(sdepth, CV_32S) &&
kernel.type() == ddepth );
int ksize = kernel.rows + kernel.cols - 1;
if( (symmetryType & (KERNEL_SYMMETRICAL|KERNEL_ASYMMETRICAL)) != 0 && ksize <= 5 )
{
if( sdepth == CV_8U && ddepth == CV_32S )
return makePtr<SymmRowSmallFilter<uchar, int, SymmRowSmallVec_8u32s> >
(kernel, anchor, symmetryType, SymmRowSmallVec_8u32s(kernel, symmetryType));
if( sdepth == CV_32F && ddepth == CV_32F )
return makePtr<SymmRowSmallFilter<float, float, SymmRowSmallVec_32f> >
(kernel, anchor, symmetryType, SymmRowSmallVec_32f(kernel, symmetryType));
}
if( sdepth == CV_8U && ddepth == CV_32S )
return makePtr<RowFilter<uchar, int, RowVec_8u32s> >
(kernel, anchor, RowVec_8u32s(kernel));
if( sdepth == CV_8U && ddepth == CV_32F )
return makePtr<RowFilter<uchar, float, RowNoVec> >(kernel, anchor);
if( sdepth == CV_8U && ddepth == CV_64F )
return makePtr<RowFilter<uchar, double, RowNoVec> >(kernel, anchor);
if( sdepth == CV_16U && ddepth == CV_32F )
return makePtr<RowFilter<ushort, float, RowNoVec> >(kernel, anchor);
if( sdepth == CV_16U && ddepth == CV_64F )
return makePtr<RowFilter<ushort, double, RowNoVec> >(kernel, anchor);
if( sdepth == CV_16S && ddepth == CV_32F )
return makePtr<RowFilter<short, float, RowVec_16s32f> >
(kernel, anchor, RowVec_16s32f(kernel));
if( sdepth == CV_16S && ddepth == CV_64F )
return makePtr<RowFilter<short, double, RowNoVec> >(kernel, anchor);
if( sdepth == CV_32F && ddepth == CV_32F )
return makePtr<RowFilter<float, float, RowVec_32f> >
(kernel, anchor, RowVec_32f(kernel));
if( sdepth == CV_32F && ddepth == CV_64F )
return makePtr<RowFilter<float, double, RowNoVec> >(kernel, anchor);
if( sdepth == CV_64F && ddepth == CV_64F )
return makePtr<RowFilter<double, double, RowNoVec> >(kernel, anchor);
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and buffer format (=%d)",
srcType, bufType));
return Ptr<BaseRowFilter>();
}
cv::Ptr<cv::BaseColumnFilter> cv::getLinearColumnFilter( int bufType, int dstType,
InputArray _kernel, int anchor,
int symmetryType, double delta,
int bits )
{
Mat kernel = _kernel.getMat();
int sdepth = CV_MAT_DEPTH(bufType), ddepth = CV_MAT_DEPTH(dstType);
int cn = CV_MAT_CN(dstType);
CV_Assert( cn == CV_MAT_CN(bufType) &&
sdepth >= std::max(ddepth, CV_32S) &&
kernel.type() == sdepth );
if( !(symmetryType & (KERNEL_SYMMETRICAL|KERNEL_ASYMMETRICAL)) )
{
if( ddepth == CV_8U && sdepth == CV_32S )
return makePtr<ColumnFilter<FixedPtCastEx<int, uchar>, ColumnNoVec> >
(kernel, anchor, delta, FixedPtCastEx<int, uchar>(bits));
if( ddepth == CV_8U && sdepth == CV_32F )
return makePtr<ColumnFilter<Cast<float, uchar>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_8U && sdepth == CV_64F )
return makePtr<ColumnFilter<Cast<double, uchar>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_16U && sdepth == CV_32F )
return makePtr<ColumnFilter<Cast<float, ushort>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_16U && sdepth == CV_64F )
return makePtr<ColumnFilter<Cast<double, ushort>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_16S && sdepth == CV_32F )
return makePtr<ColumnFilter<Cast<float, short>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_16S && sdepth == CV_64F )
return makePtr<ColumnFilter<Cast<double, short>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_32F && sdepth == CV_32F )
return makePtr<ColumnFilter<Cast<float, float>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_64F && sdepth == CV_64F )
return makePtr<ColumnFilter<Cast<double, double>, ColumnNoVec> >(kernel, anchor, delta);
}
else
{
int ksize = kernel.rows + kernel.cols - 1;
if( ksize == 3 )
{
if( ddepth == CV_8U && sdepth == CV_32S )
return makePtr<SymmColumnSmallFilter<
FixedPtCastEx<int, uchar>, SymmColumnVec_32s8u> >
(kernel, anchor, delta, symmetryType, FixedPtCastEx<int, uchar>(bits),
SymmColumnVec_32s8u(kernel, symmetryType, bits, delta));
if( ddepth == CV_16S && sdepth == CV_32S && bits == 0 )
return makePtr<SymmColumnSmallFilter<Cast<int, short>,
SymmColumnSmallVec_32s16s> >(kernel, anchor, delta, symmetryType,
Cast<int, short>(), SymmColumnSmallVec_32s16s(kernel, symmetryType, bits, delta));
if( ddepth == CV_32F && sdepth == CV_32F )
return makePtr<SymmColumnSmallFilter<
Cast<float, float>,SymmColumnSmallVec_32f> >
(kernel, anchor, delta, symmetryType, Cast<float, float>(),
SymmColumnSmallVec_32f(kernel, symmetryType, 0, delta));
}
if( ddepth == CV_8U && sdepth == CV_32S )
return makePtr<SymmColumnFilter<FixedPtCastEx<int, uchar>, SymmColumnVec_32s8u> >
(kernel, anchor, delta, symmetryType, FixedPtCastEx<int, uchar>(bits),
SymmColumnVec_32s8u(kernel, symmetryType, bits, delta));
if( ddepth == CV_8U && sdepth == CV_32F )
return makePtr<SymmColumnFilter<Cast<float, uchar>, ColumnNoVec> >
(kernel, anchor, delta, symmetryType);
if( ddepth == CV_8U && sdepth == CV_64F )
return makePtr<SymmColumnFilter<Cast<double, uchar>, ColumnNoVec> >
(kernel, anchor, delta, symmetryType);
if( ddepth == CV_16U && sdepth == CV_32F )
return makePtr<SymmColumnFilter<Cast<float, ushort>, ColumnNoVec> >
(kernel, anchor, delta, symmetryType);
if( ddepth == CV_16U && sdepth == CV_64F )
return makePtr<SymmColumnFilter<Cast<double, ushort>, ColumnNoVec> >
(kernel, anchor, delta, symmetryType);
if( ddepth == CV_16S && sdepth == CV_32S )
return makePtr<SymmColumnFilter<Cast<int, short>, ColumnNoVec> >
(kernel, anchor, delta, symmetryType);
if( ddepth == CV_16S && sdepth == CV_32F )
return makePtr<SymmColumnFilter<Cast<float, short>, SymmColumnVec_32f16s> >
(kernel, anchor, delta, symmetryType, Cast<float, short>(),
SymmColumnVec_32f16s(kernel, symmetryType, 0, delta));
if( ddepth == CV_16S && sdepth == CV_64F )
return makePtr<SymmColumnFilter<Cast<double, short>, ColumnNoVec> >
(kernel, anchor, delta, symmetryType);
if( ddepth == CV_32F && sdepth == CV_32F )
return makePtr<SymmColumnFilter<Cast<float, float>, SymmColumnVec_32f> >
(kernel, anchor, delta, symmetryType, Cast<float, float>(),
SymmColumnVec_32f(kernel, symmetryType, 0, delta));
if( ddepth == CV_64F && sdepth == CV_64F )
return makePtr<SymmColumnFilter<Cast<double, double>, ColumnNoVec> >
(kernel, anchor, delta, symmetryType);
}
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of buffer format (=%d), and destination format (=%d)",
bufType, dstType));
return Ptr<BaseColumnFilter>();
}
cv::Ptr<cv::FilterEngine> cv::createSeparableLinearFilter(
int _srcType, int _dstType,
InputArray __rowKernel, InputArray __columnKernel,
Point _anchor, double _delta,
int _rowBorderType, int _columnBorderType,
const Scalar& _borderValue )
{
Mat _rowKernel = __rowKernel.getMat(), _columnKernel = __columnKernel.getMat();
_srcType = CV_MAT_TYPE(_srcType);
_dstType = CV_MAT_TYPE(_dstType);
int sdepth = CV_MAT_DEPTH(_srcType), ddepth = CV_MAT_DEPTH(_dstType);
int cn = CV_MAT_CN(_srcType);
CV_Assert( cn == CV_MAT_CN(_dstType) );
int rsize = _rowKernel.rows + _rowKernel.cols - 1;
int csize = _columnKernel.rows + _columnKernel.cols - 1;
if( _anchor.x < 0 )
_anchor.x = rsize/2;
if( _anchor.y < 0 )
_anchor.y = csize/2;
int rtype = getKernelType(_rowKernel,
_rowKernel.rows == 1 ? Point(_anchor.x, 0) : Point(0, _anchor.x));
int ctype = getKernelType(_columnKernel,
_columnKernel.rows == 1 ? Point(_anchor.y, 0) : Point(0, _anchor.y));
Mat rowKernel, columnKernel;
int bdepth = std::max(CV_32F,std::max(sdepth, ddepth));
int bits = 0;
if( sdepth == CV_8U &&
((rtype == KERNEL_SMOOTH+KERNEL_SYMMETRICAL &&
ctype == KERNEL_SMOOTH+KERNEL_SYMMETRICAL &&
ddepth == CV_8U) ||
((rtype & (KERNEL_SYMMETRICAL+KERNEL_ASYMMETRICAL)) &&
(ctype & (KERNEL_SYMMETRICAL+KERNEL_ASYMMETRICAL)) &&
(rtype & ctype & KERNEL_INTEGER) &&
ddepth == CV_16S)) )
{
bdepth = CV_32S;
bits = ddepth == CV_8U ? 8 : 0;
_rowKernel.convertTo( rowKernel, CV_32S, 1 << bits );
_columnKernel.convertTo( columnKernel, CV_32S, 1 << bits );
bits *= 2;
_delta *= (1 << bits);
}
else
{
if( _rowKernel.type() != bdepth )
_rowKernel.convertTo( rowKernel, bdepth );
else
rowKernel = _rowKernel;
if( _columnKernel.type() != bdepth )
_columnKernel.convertTo( columnKernel, bdepth );
else
columnKernel = _columnKernel;
}
int _bufType = CV_MAKETYPE(bdepth, cn);
Ptr<BaseRowFilter> _rowFilter = getLinearRowFilter(
_srcType, _bufType, rowKernel, _anchor.x, rtype);
Ptr<BaseColumnFilter> _columnFilter = getLinearColumnFilter(
_bufType, _dstType, columnKernel, _anchor.y, ctype, _delta, bits );
return Ptr<FilterEngine>( new FilterEngine(Ptr<BaseFilter>(), _rowFilter, _columnFilter,
_srcType, _dstType, _bufType, _rowBorderType, _columnBorderType, _borderValue ));
}
/****************************************************************************************\
* Non-separable linear filter *
\****************************************************************************************/
namespace cv
{
void preprocess2DKernel( const Mat& kernel, std::vector<Point>& coords, std::vector<uchar>& coeffs )
{
int i, j, k, nz = countNonZero(kernel), ktype = kernel.type();
if(nz == 0)
nz = 1;
CV_Assert( ktype == CV_8U || ktype == CV_32S || ktype == CV_32F || ktype == CV_64F );
coords.resize(nz);
coeffs.resize(nz*getElemSize(ktype));
uchar* _coeffs = &coeffs[0];
for( i = k = 0; i < kernel.rows; i++ )
{
const uchar* krow = kernel.ptr(i);
for( j = 0; j < kernel.cols; j++ )
{
if( ktype == CV_8U )
{
uchar val = krow[j];
if( val == 0 )
continue;
coords[k] = Point(j,i);
_coeffs[k++] = val;
}
else if( ktype == CV_32S )
{
int val = ((const int*)krow)[j];
if( val == 0 )
continue;
coords[k] = Point(j,i);
((int*)_coeffs)[k++] = val;
}
else if( ktype == CV_32F )
{
float val = ((const float*)krow)[j];
if( val == 0 )
continue;
coords[k] = Point(j,i);
((float*)_coeffs)[k++] = val;
}
else
{
double val = ((const double*)krow)[j];
if( val == 0 )
continue;
coords[k] = Point(j,i);
((double*)_coeffs)[k++] = val;
}
}
}
}
template<typename ST, class CastOp, class VecOp> struct Filter2D : public BaseFilter
{
typedef typename CastOp::type1 KT;
typedef typename CastOp::rtype DT;
Filter2D( const Mat& _kernel, Point _anchor,
double _delta, const CastOp& _castOp=CastOp(),
const VecOp& _vecOp=VecOp() )
{
anchor = _anchor;
ksize = _kernel.size();
delta = saturate_cast<KT>(_delta);
castOp0 = _castOp;
vecOp = _vecOp;
CV_Assert( _kernel.type() == DataType<KT>::type );
preprocess2DKernel( _kernel, coords, coeffs );
ptrs.resize( coords.size() );
}
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width, int cn)
{
KT _delta = delta;
const Point* pt = &coords[0];
const KT* kf = (const KT*)&coeffs[0];
const ST** kp = (const ST**)&ptrs[0];
int i, k, nz = (int)coords.size();
CastOp castOp = castOp0;
width *= cn;
for( ; count > 0; count--, dst += dststep, src++ )
{
DT* D = (DT*)dst;
for( k = 0; k < nz; k++ )
kp[k] = (const ST*)src[pt[k].y] + pt[k].x*cn;
i = vecOp((const uchar**)kp, dst, width);
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
KT s0 = _delta, s1 = _delta, s2 = _delta, s3 = _delta;
for( k = 0; k < nz; k++ )
{
const ST* sptr = kp[k] + i;
KT f = kf[k];
s0 += f*sptr[0];
s1 += f*sptr[1];
s2 += f*sptr[2];
s3 += f*sptr[3];
}
D[i] = castOp(s0); D[i+1] = castOp(s1);
D[i+2] = castOp(s2); D[i+3] = castOp(s3);
}
#endif
for( ; i < width; i++ )
{
KT s0 = _delta;
for( k = 0; k < nz; k++ )
s0 += kf[k]*kp[k][i];
D[i] = castOp(s0);
}
}
}
std::vector<Point> coords;
std::vector<uchar> coeffs;
std::vector<uchar*> ptrs;
KT delta;
CastOp castOp0;
VecOp vecOp;
};
#ifdef HAVE_OPENCL
#define DIVUP(total, grain) (((total) + (grain) - 1) / (grain))
#define ROUNDUP(sz, n) ((sz) + (n) - 1 - (((sz) + (n) - 1) % (n)))
// prepare kernel: transpose and make double rows (+align). Returns size of aligned row
// Samples:
// a b c
// Input: d e f
// g h i
// Output, last two zeros is the alignment:
// a d g a d g 0 0
// b e h b e h 0 0
// c f i c f i 0 0
template <typename T>
static int _prepareKernelFilter2D(std::vector<T> & data, const Mat & kernel)
{
Mat _kernel; kernel.convertTo(_kernel, DataDepth<T>::value);
int size_y_aligned = ROUNDUP(kernel.rows * 2, 4);
data.clear(); data.resize(size_y_aligned * kernel.cols, 0);
for (int x = 0; x < kernel.cols; x++)
{
for (int y = 0; y < kernel.rows; y++)
{
data[x * size_y_aligned + y] = _kernel.at<T>(y, x);
data[x * size_y_aligned + y + kernel.rows] = _kernel.at<T>(y, x);
}
}
return size_y_aligned;
}
static bool ocl_filter2D( InputArray _src, OutputArray _dst, int ddepth,
InputArray _kernel, Point anchor,
double delta, int borderType )
{
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
ddepth = ddepth < 0 ? sdepth : ddepth;
int dtype = CV_MAKE_TYPE(ddepth, cn), wdepth = std::max(std::max(sdepth, ddepth), CV_32F),
wtype = CV_MAKE_TYPE(wdepth, cn);
if (cn > 4)
return false;
Size ksize = _kernel.size();
if (anchor.x < 0)
anchor.x = ksize.width / 2;
if (anchor.y < 0)
anchor.y = ksize.height / 2;
bool isolated = (borderType & BORDER_ISOLATED) != 0;
borderType &= ~BORDER_ISOLATED;
const cv::ocl::Device &device = cv::ocl::Device::getDefault();
bool doubleSupport = device.doubleFPConfig() > 0;
if (wdepth == CV_64F && !doubleSupport)
return false;
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT",
"BORDER_WRAP", "BORDER_REFLECT_101" };
cv::Mat kernelMat = _kernel.getMat();
cv::Size sz = _src.size(), wholeSize;
size_t globalsize[2] = { (size_t)sz.width, (size_t)sz.height };
size_t localsize_general[2] = {0, 1};
size_t* localsize = NULL;
ocl::Kernel k;
UMat src = _src.getUMat();
if (!isolated)
{
Point ofs;
src.locateROI(wholeSize, ofs);
}
size_t tryWorkItems = device.maxWorkGroupSize();
if (device.isIntel() && 128 < tryWorkItems)
tryWorkItems = 128;
char cvt[2][40];
// For smaller filter kernels, there is a special kernel that is more
// efficient than the general one.
UMat kernalDataUMat;
if (device.isIntel() && (device.type() & ocl::Device::TYPE_GPU) &&
((ksize.width < 5 && ksize.height < 5) ||
(ksize.width == 5 && ksize.height == 5 && cn == 1)))
{
kernelMat = kernelMat.reshape(0, 1);
String kerStr = ocl::kernelToStr(kernelMat, CV_32F);
int h = isolated ? sz.height : wholeSize.height;
int w = isolated ? sz.width : wholeSize.width;
if (w < ksize.width || h < ksize.height)
return false;
// Figure out what vector size to use for loading the pixels.
int pxLoadNumPixels = cn != 1 || sz.width % 4 ? 1 : 4;
int pxLoadVecSize = cn * pxLoadNumPixels;
// Figure out how many pixels per work item to compute in X and Y
// directions. Too many and we run out of registers.
int pxPerWorkItemX = 1;
int pxPerWorkItemY = 1;
if (cn <= 2 && ksize.width <= 4 && ksize.height <= 4)
{
pxPerWorkItemX = sz.width % 8 ? sz.width % 4 ? sz.width % 2 ? 1 : 2 : 4 : 8;
pxPerWorkItemY = sz.height % 2 ? 1 : 2;
}
else if (cn < 4 || (ksize.width <= 4 && ksize.height <= 4))
{
pxPerWorkItemX = sz.width % 2 ? 1 : 2;
pxPerWorkItemY = sz.height % 2 ? 1 : 2;
}
globalsize[0] = sz.width / pxPerWorkItemX;
globalsize[1] = sz.height / pxPerWorkItemY;
// Need some padding in the private array for pixels
int privDataWidth = ROUNDUP(pxPerWorkItemX + ksize.width - 1, pxLoadNumPixels);
// Make the global size a nice round number so the runtime can pick
// from reasonable choices for the workgroup size
const int wgRound = 256;
globalsize[0] = ROUNDUP(globalsize[0], wgRound);
char build_options[1024];
sprintf(build_options, "-D cn=%d "
"-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d "
"-D PX_LOAD_VEC_SIZE=%d -D PX_LOAD_NUM_PX=%d "
"-D PX_PER_WI_X=%d -D PX_PER_WI_Y=%d -D PRIV_DATA_WIDTH=%d -D %s -D %s "
"-D PX_LOAD_X_ITERATIONS=%d -D PX_LOAD_Y_ITERATIONS=%d "
"-D srcT=%s -D srcT1=%s -D dstT=%s -D dstT1=%s -D WT=%s -D WT1=%s "
"-D convertToWT=%s -D convertToDstT=%s %s",
cn, anchor.x, anchor.y, ksize.width, ksize.height,
pxLoadVecSize, pxLoadNumPixels,
pxPerWorkItemX, pxPerWorkItemY, privDataWidth, borderMap[borderType],
isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
privDataWidth / pxLoadNumPixels, pxPerWorkItemY + ksize.height - 1,
ocl::typeToStr(type), ocl::typeToStr(sdepth), ocl::typeToStr(dtype),
ocl::typeToStr(ddepth), ocl::typeToStr(wtype), ocl::typeToStr(wdepth),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]),
ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]), kerStr.c_str());
if (!k.create("filter2DSmall", cv::ocl::imgproc::filter2DSmall_oclsrc, build_options))
return false;
}
else
{
localsize = localsize_general;
std::vector<float> kernelMatDataFloat;
int kernel_size_y2_aligned = _prepareKernelFilter2D<float>(kernelMatDataFloat, kernelMat);
String kerStr = ocl::kernelToStr(kernelMatDataFloat, CV_32F);
for ( ; ; )
{
size_t BLOCK_SIZE = tryWorkItems;
while (BLOCK_SIZE > 32 && BLOCK_SIZE >= (size_t)ksize.width * 2 && BLOCK_SIZE > (size_t)sz.width * 2)
BLOCK_SIZE /= 2;
if ((size_t)ksize.width > BLOCK_SIZE)
return false;
int requiredTop = anchor.y;
int requiredLeft = (int)BLOCK_SIZE; // not this: anchor.x;
int requiredBottom = ksize.height - 1 - anchor.y;
int requiredRight = (int)BLOCK_SIZE; // not this: ksize.width - 1 - anchor.x;
int h = isolated ? sz.height : wholeSize.height;
int w = isolated ? sz.width : wholeSize.width;
bool extra_extrapolation = h < requiredTop || h < requiredBottom || w < requiredLeft || w < requiredRight;
if ((w < ksize.width) || (h < ksize.height))
return false;
String opts = format("-D LOCAL_SIZE=%d -D cn=%d "
"-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d "
"-D KERNEL_SIZE_Y2_ALIGNED=%d -D %s -D %s -D %s%s%s "
"-D srcT=%s -D srcT1=%s -D dstT=%s -D dstT1=%s -D WT=%s -D WT1=%s "
"-D convertToWT=%s -D convertToDstT=%s",
(int)BLOCK_SIZE, cn, anchor.x, anchor.y,
ksize.width, ksize.height, kernel_size_y2_aligned, borderMap[borderType],
extra_extrapolation ? "EXTRA_EXTRAPOLATION" : "NO_EXTRA_EXTRAPOLATION",
isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
doubleSupport ? " -D DOUBLE_SUPPORT" : "", kerStr.c_str(),
ocl::typeToStr(type), ocl::typeToStr(sdepth), ocl::typeToStr(dtype),
ocl::typeToStr(ddepth), ocl::typeToStr(wtype), ocl::typeToStr(wdepth),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]),
ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]));
localsize[0] = BLOCK_SIZE;
globalsize[0] = DIVUP(sz.width, BLOCK_SIZE - (ksize.width - 1)) * BLOCK_SIZE;
globalsize[1] = sz.height;
if (!k.create("filter2D", cv::ocl::imgproc::filter2D_oclsrc, opts))
return false;
size_t kernelWorkGroupSize = k.workGroupSize();
if (localsize[0] <= kernelWorkGroupSize)
break;
if (BLOCK_SIZE < kernelWorkGroupSize)
return false;
tryWorkItems = kernelWorkGroupSize;
}
}
_dst.create(sz, dtype);
UMat dst = _dst.getUMat();
int srcOffsetX = (int)((src.offset % src.step) / src.elemSize());
int srcOffsetY = (int)(src.offset / src.step);
int srcEndX = (isolated ? (srcOffsetX + sz.width) : wholeSize.width);
int srcEndY = (isolated ? (srcOffsetY + sz.height) : wholeSize.height);
k.args(ocl::KernelArg::PtrReadOnly(src), (int)src.step, srcOffsetX, srcOffsetY,
srcEndX, srcEndY, ocl::KernelArg::WriteOnly(dst), (float)delta);
return k.run(2, globalsize, localsize, false);
}
const int shift_bits = 8;
static bool ocl_sepRowFilter2D(const UMat & src, UMat & buf, const Mat & kernelX, int anchor,
int borderType, int ddepth, bool fast8uc1, bool int_arithm)
{
int type = src.type(), cn = CV_MAT_CN(type), sdepth = CV_MAT_DEPTH(type);
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
Size bufSize = buf.size();
int buf_type = buf.type(), bdepth = CV_MAT_DEPTH(buf_type);
if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
return false;
#ifdef ANDROID
size_t localsize[2] = {16, 10};
#else
size_t localsize[2] = {16, 16};
#endif
size_t globalsize[2] = {DIVUP(bufSize.width, localsize[0]) * localsize[0], DIVUP(bufSize.height, localsize[1]) * localsize[1]};
if (fast8uc1)
globalsize[0] = DIVUP((bufSize.width + 3) >> 2, localsize[0]) * localsize[0];
int radiusX = anchor, radiusY = (buf.rows - src.rows) >> 1;
bool isolated = (borderType & BORDER_ISOLATED) != 0;
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101" },
* const btype = borderMap[borderType & ~BORDER_ISOLATED];
bool extra_extrapolation = src.rows < (int)((-radiusY + globalsize[1]) >> 1) + 1;
extra_extrapolation |= src.rows < radiusY;
extra_extrapolation |= src.cols < (int)((-radiusX + globalsize[0] + 8 * localsize[0] + 3) >> 1) + 1;
extra_extrapolation |= src.cols < radiusX;
char cvt[40];
cv::String build_options = cv::format("-D RADIUSX=%d -D LSIZE0=%d -D LSIZE1=%d -D CN=%d -D %s -D %s -D %s"
" -D srcT=%s -D dstT=%s -D convertToDstT=%s -D srcT1=%s -D dstT1=%s%s%s",
radiusX, (int)localsize[0], (int)localsize[1], cn, btype,
extra_extrapolation ? "EXTRA_EXTRAPOLATION" : "NO_EXTRA_EXTRAPOLATION",
isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
ocl::typeToStr(type), ocl::typeToStr(buf_type),
ocl::convertTypeStr(sdepth, bdepth, cn, cvt),
ocl::typeToStr(sdepth), ocl::typeToStr(bdepth),
doubleSupport ? " -D DOUBLE_SUPPORT" : "",
int_arithm ? " -D INTEGER_ARITHMETIC" : "");
build_options += ocl::kernelToStr(kernelX, bdepth);
Size srcWholeSize; Point srcOffset;
src.locateROI(srcWholeSize, srcOffset);
String kernelName("row_filter");
if (fast8uc1)
kernelName += "_C1_D0";
ocl::Kernel k(kernelName.c_str(), cv::ocl::imgproc::filterSepRow_oclsrc,
build_options);
if (k.empty())
return false;
if (fast8uc1)
k.args(ocl::KernelArg::PtrReadOnly(src), (int)(src.step / src.elemSize()), srcOffset.x,
srcOffset.y, src.cols, src.rows, srcWholeSize.width, srcWholeSize.height,
ocl::KernelArg::PtrWriteOnly(buf), (int)(buf.step / buf.elemSize()),
buf.cols, buf.rows, radiusY);
else
k.args(ocl::KernelArg::PtrReadOnly(src), (int)src.step, srcOffset.x,
srcOffset.y, src.cols, src.rows, srcWholeSize.width, srcWholeSize.height,
ocl::KernelArg::PtrWriteOnly(buf), (int)buf.step, buf.cols, buf.rows, radiusY);
return k.run(2, globalsize, localsize, false);
}
static bool ocl_sepColFilter2D(const UMat & buf, UMat & dst, const Mat & kernelY, double delta, int anchor, bool int_arithm)
{
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
if (dst.depth() == CV_64F && !doubleSupport)
return false;
#ifdef ANDROID
size_t localsize[2] = { 16, 10 };
#else
size_t localsize[2] = { 16, 16 };
#endif
size_t globalsize[2] = { 0, 0 };
int dtype = dst.type(), cn = CV_MAT_CN(dtype), ddepth = CV_MAT_DEPTH(dtype);
Size sz = dst.size();
int buf_type = buf.type(), bdepth = CV_MAT_DEPTH(buf_type);
globalsize[1] = DIVUP(sz.height, localsize[1]) * localsize[1];
globalsize[0] = DIVUP(sz.width, localsize[0]) * localsize[0];
char cvt[40];
cv::String build_options = cv::format("-D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D CN=%d"
" -D srcT=%s -D dstT=%s -D convertToDstT=%s"
" -D srcT1=%s -D dstT1=%s -D SHIFT_BITS=%d%s%s",
anchor, (int)localsize[0], (int)localsize[1], cn,
ocl::typeToStr(buf_type), ocl::typeToStr(dtype),
ocl::convertTypeStr(bdepth, ddepth, cn, cvt),
ocl::typeToStr(bdepth), ocl::typeToStr(ddepth),
2*shift_bits, doubleSupport ? " -D DOUBLE_SUPPORT" : "",
int_arithm ? " -D INTEGER_ARITHMETIC" : "");
build_options += ocl::kernelToStr(kernelY, bdepth);
ocl::Kernel k("col_filter", cv::ocl::imgproc::filterSepCol_oclsrc,
build_options);
if (k.empty())
return false;
k.args(ocl::KernelArg::ReadOnly(buf), ocl::KernelArg::WriteOnly(dst),
static_cast<float>(delta));
return k.run(2, globalsize, localsize, false);
}
const int optimizedSepFilterLocalWidth = 16;
const int optimizedSepFilterLocalHeight = 8;
static bool ocl_sepFilter2D_SinglePass(InputArray _src, OutputArray _dst,
Mat row_kernel, Mat col_kernel,
double delta, int borderType, int ddepth, int bdepth, bool int_arithm)
{
Size size = _src.size(), wholeSize;
Point origin;
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype),
esz = CV_ELEM_SIZE(stype), wdepth = std::max(std::max(sdepth, ddepth), bdepth),
dtype = CV_MAKE_TYPE(ddepth, cn);
size_t src_step = _src.step(), src_offset = _src.offset();
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
if ((src_offset % src_step) % esz != 0 || (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F)) ||
!(borderType == BORDER_CONSTANT || borderType == BORDER_REPLICATE ||
borderType == BORDER_REFLECT || borderType == BORDER_WRAP ||
borderType == BORDER_REFLECT_101))
return false;
size_t lt2[2] = { optimizedSepFilterLocalWidth, optimizedSepFilterLocalHeight };
size_t gt2[2] = { lt2[0] * (1 + (size.width - 1) / lt2[0]), lt2[1]};
char cvt[2][40];
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP",
"BORDER_REFLECT_101" };
String opts = cv::format("-D BLK_X=%d -D BLK_Y=%d -D RADIUSX=%d -D RADIUSY=%d%s%s"
" -D srcT=%s -D convertToWT=%s -D WT=%s -D dstT=%s -D convertToDstT=%s"
" -D %s -D srcT1=%s -D dstT1=%s -D WT1=%s -D CN=%d -D SHIFT_BITS=%d%s",
(int)lt2[0], (int)lt2[1], row_kernel.cols / 2, col_kernel.cols / 2,
ocl::kernelToStr(row_kernel, wdepth, "KERNEL_MATRIX_X").c_str(),
ocl::kernelToStr(col_kernel, wdepth, "KERNEL_MATRIX_Y").c_str(),
ocl::typeToStr(stype), ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]),
ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)), ocl::typeToStr(dtype),
ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]), borderMap[borderType],
ocl::typeToStr(sdepth), ocl::typeToStr(ddepth), ocl::typeToStr(wdepth),
cn, 2*shift_bits, int_arithm ? " -D INTEGER_ARITHMETIC" : "");
ocl::Kernel k("sep_filter", ocl::imgproc::filterSep_singlePass_oclsrc, opts);
if (k.empty())
return false;
UMat src = _src.getUMat();
_dst.create(size, dtype);
UMat dst = _dst.getUMat();
int src_offset_x = static_cast<int>((src_offset % src_step) / esz);
int src_offset_y = static_cast<int>(src_offset / src_step);
src.locateROI(wholeSize, origin);
k.args(ocl::KernelArg::PtrReadOnly(src), (int)src_step, src_offset_x, src_offset_y,
wholeSize.height, wholeSize.width, ocl::KernelArg::WriteOnly(dst),
static_cast<float>(delta));
return k.run(2, gt2, lt2, false);
}
static bool ocl_sepFilter2D( InputArray _src, OutputArray _dst, int ddepth,
InputArray _kernelX, InputArray _kernelY, Point anchor,
double delta, int borderType )
{
const ocl::Device & d = ocl::Device::getDefault();
Size imgSize = _src.size();
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if (cn > 4)
return false;
Mat kernelX = _kernelX.getMat().reshape(1, 1);
if (kernelX.cols % 2 != 1)
return false;
Mat kernelY = _kernelY.getMat().reshape(1, 1);
if (kernelY.cols % 2 != 1)
return false;
if (ddepth < 0)
ddepth = sdepth;
if (anchor.x < 0)
anchor.x = kernelX.cols >> 1;
if (anchor.y < 0)
anchor.y = kernelY.cols >> 1;
int rtype = getKernelType(kernelX,
kernelX.rows == 1 ? Point(anchor.x, 0) : Point(0, anchor.x));
int ctype = getKernelType(kernelY,
kernelY.rows == 1 ? Point(anchor.y, 0) : Point(0, anchor.y));
int bdepth = CV_32F;
bool int_arithm = false;
if( sdepth == CV_8U && ddepth == CV_8U &&
rtype == KERNEL_SMOOTH+KERNEL_SYMMETRICAL &&
ctype == KERNEL_SMOOTH+KERNEL_SYMMETRICAL)
{
if (ocl::Device::getDefault().isIntel())
{
for (int i=0; i<kernelX.cols; i++)
kernelX.at<float>(0, i) = (float) cvRound(kernelX.at<float>(0, i) * (1 << shift_bits));
if (kernelX.data != kernelY.data)
for (int i=0; i<kernelX.cols; i++)
kernelY.at<float>(0, i) = (float) cvRound(kernelY.at<float>(0, i) * (1 << shift_bits));
} else
{
bdepth = CV_32S;
kernelX.convertTo( kernelX, bdepth, 1 << shift_bits );
kernelY.convertTo( kernelY, bdepth, 1 << shift_bits );
}
int_arithm = true;
}
CV_OCL_RUN_(kernelY.cols <= 21 && kernelX.cols <= 21 &&
imgSize.width > optimizedSepFilterLocalWidth + anchor.x &&
imgSize.height > optimizedSepFilterLocalHeight + anchor.y &&
(!(borderType & BORDER_ISOLATED) || _src.offset() == 0) &&
anchor == Point(kernelX.cols >> 1, kernelY.cols >> 1) &&
(d.isIntel() || (d.isAMD() && !d.hostUnifiedMemory())),
ocl_sepFilter2D_SinglePass(_src, _dst, kernelX, kernelY, delta,
borderType & ~BORDER_ISOLATED, ddepth, bdepth, int_arithm), true)
UMat src = _src.getUMat();
Size srcWholeSize; Point srcOffset;
src.locateROI(srcWholeSize, srcOffset);
bool fast8uc1 = type == CV_8UC1 && srcOffset.x % 4 == 0 &&
src.cols % 4 == 0 && src.step % 4 == 0;
Size srcSize = src.size();
Size bufSize(srcSize.width, srcSize.height + kernelY.cols - 1);
UMat buf(bufSize, CV_MAKETYPE(bdepth, cn));
if (!ocl_sepRowFilter2D(src, buf, kernelX, anchor.x, borderType, ddepth, fast8uc1, int_arithm))
return false;
_dst.create(srcSize, CV_MAKETYPE(ddepth, cn));
UMat dst = _dst.getUMat();
return ocl_sepColFilter2D(buf, dst, kernelY, delta, anchor.y, int_arithm);
}
#endif
}
cv::Ptr<cv::BaseFilter> cv::getLinearFilter(int srcType, int dstType,
InputArray filter_kernel, Point anchor,
double delta, int bits)
{
Mat _kernel = filter_kernel.getMat();
int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(dstType);
int cn = CV_MAT_CN(srcType), kdepth = _kernel.depth();
CV_Assert( cn == CV_MAT_CN(dstType) && ddepth >= sdepth );
anchor = normalizeAnchor(anchor, _kernel.size());
/*if( sdepth == CV_8U && ddepth == CV_8U && kdepth == CV_32S )
return makePtr<Filter2D<uchar, FixedPtCastEx<int, uchar>, FilterVec_8u> >
(_kernel, anchor, delta, FixedPtCastEx<int, uchar>(bits),
FilterVec_8u(_kernel, bits, delta));
if( sdepth == CV_8U && ddepth == CV_16S && kdepth == CV_32S )
return makePtr<Filter2D<uchar, FixedPtCastEx<int, short>, FilterVec_8u16s> >
(_kernel, anchor, delta, FixedPtCastEx<int, short>(bits),
FilterVec_8u16s(_kernel, bits, delta));*/
kdepth = sdepth == CV_64F || ddepth == CV_64F ? CV_64F : CV_32F;
Mat kernel;
if( _kernel.type() == kdepth )
kernel = _kernel;
else
_kernel.convertTo(kernel, kdepth, _kernel.type() == CV_32S ? 1./(1 << bits) : 1.);
if( sdepth == CV_8U && ddepth == CV_8U )
return makePtr<Filter2D<uchar, Cast<float, uchar>, FilterVec_8u> >
(kernel, anchor, delta, Cast<float, uchar>(), FilterVec_8u(kernel, 0, delta));
if( sdepth == CV_8U && ddepth == CV_16U )
return makePtr<Filter2D<uchar,
Cast<float, ushort>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_8U && ddepth == CV_16S )
return makePtr<Filter2D<uchar, Cast<float, short>, FilterVec_8u16s> >
(kernel, anchor, delta, Cast<float, short>(), FilterVec_8u16s(kernel, 0, delta));
if( sdepth == CV_8U && ddepth == CV_32F )
return makePtr<Filter2D<uchar,
Cast<float, float>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_8U && ddepth == CV_64F )
return makePtr<Filter2D<uchar,
Cast<double, double>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16U && ddepth == CV_16U )
return makePtr<Filter2D<ushort,
Cast<float, ushort>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16U && ddepth == CV_32F )
return makePtr<Filter2D<ushort,
Cast<float, float>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16U && ddepth == CV_64F )
return makePtr<Filter2D<ushort,
Cast<double, double>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16S && ddepth == CV_16S )
return makePtr<Filter2D<short,
Cast<float, short>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16S && ddepth == CV_32F )
return makePtr<Filter2D<short,
Cast<float, float>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16S && ddepth == CV_64F )
return makePtr<Filter2D<short,
Cast<double, double>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_32F && ddepth == CV_32F )
return makePtr<Filter2D<float, Cast<float, float>, FilterVec_32f> >
(kernel, anchor, delta, Cast<float, float>(), FilterVec_32f(kernel, 0, delta));
if( sdepth == CV_64F && ddepth == CV_64F )
return makePtr<Filter2D<double,
Cast<double, double>, FilterNoVec> >(kernel, anchor, delta);
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and destination format (=%d)",
srcType, dstType));
return Ptr<BaseFilter>();
}
cv::Ptr<cv::FilterEngine> cv::createLinearFilter( int _srcType, int _dstType,
InputArray filter_kernel,
Point _anchor, double _delta,
int _rowBorderType, int _columnBorderType,
const Scalar& _borderValue )
{
Mat _kernel = filter_kernel.getMat();
_srcType = CV_MAT_TYPE(_srcType);
_dstType = CV_MAT_TYPE(_dstType);
int cn = CV_MAT_CN(_srcType);
CV_Assert( cn == CV_MAT_CN(_dstType) );
Mat kernel = _kernel;
int bits = 0;
/*int sdepth = CV_MAT_DEPTH(_srcType), ddepth = CV_MAT_DEPTH(_dstType);
int ktype = _kernel.depth() == CV_32S ? KERNEL_INTEGER : getKernelType(_kernel, _anchor);
if( sdepth == CV_8U && (ddepth == CV_8U || ddepth == CV_16S) &&
_kernel.rows*_kernel.cols <= (1 << 10) )
{
bits = (ktype & KERNEL_INTEGER) ? 0 : 11;
_kernel.convertTo(kernel, CV_32S, 1 << bits);
}*/
Ptr<BaseFilter> _filter2D = getLinearFilter(_srcType, _dstType,
kernel, _anchor, _delta, bits);
return makePtr<FilterEngine>(_filter2D, Ptr<BaseRowFilter>(),
Ptr<BaseColumnFilter>(), _srcType, _dstType, _srcType,
_rowBorderType, _columnBorderType, _borderValue );
}
#ifdef HAVE_IPP
namespace cv
{
static bool ipp_filter2D( InputArray _src, OutputArray _dst, int ddepth,
InputArray _kernel, Point anchor0,
double delta, int borderType )
{
#if !HAVE_ICV
Mat src = _src.getMat(), kernel = _kernel.getMat();
if( ddepth < 0 )
ddepth = src.depth();
_dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) );
Mat dst = _dst.getMat();
Point anchor = normalizeAnchor(anchor0, kernel.size());
typedef IppStatus (CV_STDCALL * ippiFilterBorder)(const void * pSrc, int srcStep, void * pDst, int dstStep, IppiSize dstRoiSize,
IppiBorderType border, const void * borderValue,
const IppiFilterBorderSpec* pSpec, Ipp8u* pBuffer);
int stype = src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype),
ktype = kernel.type(), kdepth = CV_MAT_DEPTH(ktype);
bool isolated = (borderType & BORDER_ISOLATED) != 0;
#if IPP_VERSION_X100 >= 900
Point ippAnchor((kernel.cols-1)/2, (kernel.rows-1)/2);
#else
Point ippAnchor(kernel.cols >> 1, kernel.rows >> 1);
#endif
int borderTypeNI = borderType & ~BORDER_ISOLATED;
IppiBorderType ippBorderType = ippiGetBorderType(borderTypeNI);
if (borderTypeNI == BORDER_CONSTANT || borderTypeNI == BORDER_REPLICATE)
{
ippiFilterBorder ippFunc =
stype == CV_8UC1 ? (ippiFilterBorder)ippiFilterBorder_8u_C1R :
stype == CV_8UC3 ? (ippiFilterBorder)ippiFilterBorder_8u_C3R :
stype == CV_8UC4 ? (ippiFilterBorder)ippiFilterBorder_8u_C4R :
stype == CV_16UC1 ? (ippiFilterBorder)ippiFilterBorder_16u_C1R :
stype == CV_16UC3 ? (ippiFilterBorder)ippiFilterBorder_16u_C3R :
stype == CV_16UC4 ? (ippiFilterBorder)ippiFilterBorder_16u_C4R :
stype == CV_16SC1 ? (ippiFilterBorder)ippiFilterBorder_16s_C1R :
stype == CV_16SC3 ? (ippiFilterBorder)ippiFilterBorder_16s_C3R :
stype == CV_16SC4 ? (ippiFilterBorder)ippiFilterBorder_16s_C4R :
stype == CV_32FC1 ? (ippiFilterBorder)ippiFilterBorder_32f_C1R :
stype == CV_32FC3 ? (ippiFilterBorder)ippiFilterBorder_32f_C3R :
stype == CV_32FC4 ? (ippiFilterBorder)ippiFilterBorder_32f_C4R : 0;
if (sdepth == ddepth && (ktype == CV_16SC1 || ktype == CV_32FC1) &&
ippFunc && (int)ippBorderType >= 0 && (!src.isSubmatrix() || isolated) &&
std::fabs(delta - 0) < DBL_EPSILON && ippAnchor == anchor && dst.data != src.data)
{
IppiSize kernelSize = { kernel.cols, kernel.rows }, dstRoiSize = { dst.cols, dst.rows };
IppDataType dataType = ippiGetDataType(ddepth), kernelType = ippiGetDataType(kdepth);
Ipp32s specSize = 0, bufsize = 0;
IppStatus status = (IppStatus)-1;
if ((status = ippiFilterBorderGetSize(kernelSize, dstRoiSize, dataType, kernelType, cn, &specSize, &bufsize)) >= 0)
{
IppAutoBuffer<IppiFilterBorderSpec> spec(specSize);
IppAutoBuffer<Ipp8u> buffer(bufsize);
Ipp32f borderValue[4] = { 0, 0, 0, 0 };
if(kdepth == CV_32F)
{
Ipp32f *pKerBuffer = (Ipp32f*)kernel.data;
IppAutoBuffer<Ipp32f> kerTmp;
int kerStep = sizeof(Ipp32f)*kernelSize.width;
#if IPP_VERSION_X100 >= 900
if((int)kernel.step != kerStep)
{
kerTmp.Alloc(kerStep*kernelSize.height);
if(ippiCopy_32f_C1R((Ipp32f*)kernel.data, (int)kernel.step, kerTmp, kerStep, kernelSize) < 0)
return false;
pKerBuffer = kerTmp;
}
#else
kerTmp.Alloc(kerStep*kernelSize.height);
Mat kerFlip(Size(kernelSize.width, kernelSize.height), CV_32FC1, kerTmp, kerStep);
flip(kernel, kerFlip, -1);
pKerBuffer = kerTmp;
#endif
if((status = ippiFilterBorderInit_32f(pKerBuffer, kernelSize,
dataType, cn, ippRndFinancial, spec)) >= 0 )
{
status = ippFunc(src.data, (int)src.step, dst.data, (int)dst.step, dstRoiSize,
ippBorderType, borderValue, spec, buffer);
}
}
else if(kdepth == CV_16S)
{
Ipp16s *pKerBuffer = (Ipp16s*)kernel.data;
IppAutoBuffer<Ipp16s> kerTmp;
int kerStep = sizeof(Ipp16s)*kernelSize.width;
#if IPP_VERSION_X100 >= 900
if((int)kernel.step != kerStep)
{
kerTmp.Alloc(kerStep*kernelSize.height);
if(ippiCopy_16s_C1R((Ipp16s*)kernel.data, (int)kernel.step, kerTmp, kerStep, kernelSize) < 0)
return false;
pKerBuffer = kerTmp;
}
#else
kerTmp.Alloc(kerStep*kernelSize.height);
Mat kerFlip(Size(kernelSize.width, kernelSize.height), CV_16SC1, kerTmp, kerStep);
flip(kernel, kerFlip, -1);
pKerBuffer = kerTmp;
#endif
if((status = ippiFilterBorderInit_16s(pKerBuffer, kernelSize,
0, dataType, cn, ippRndFinancial, spec)) >= 0)
{
status = ippFunc(src.data, (int)src.step, dst.data, (int)dst.step, dstRoiSize,
ippBorderType, borderValue, spec, buffer);
}
}
}
if (status >= 0)
{
CV_IMPL_ADD(CV_IMPL_IPP);
return true;
}
}
}
#else
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(ddepth); CV_UNUSED(_kernel), CV_UNUSED(anchor0), CV_UNUSED(delta), CV_UNUSED(borderType);
#endif
return false;
}
}
#endif
void cv::filter2D( InputArray _src, OutputArray _dst, int ddepth,
InputArray _kernel, Point anchor0,
double delta, int borderType )
{
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
ocl_filter2D(_src, _dst, ddepth, _kernel, anchor0, delta, borderType))
Mat src = _src.getMat(), kernel = _kernel.getMat();
if( ddepth < 0 )
ddepth = src.depth();
#if CV_SSE2
int dft_filter_size = ((src.depth() == CV_8U && (ddepth == CV_8U || ddepth == CV_16S)) ||
(src.depth() == CV_32F && ddepth == CV_32F)) && checkHardwareSupport(CV_CPU_SSE3)? 130 : 50;
#else
int dft_filter_size = 50;
#endif
_dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) );
Mat dst = _dst.getMat();
Point anchor = normalizeAnchor(anchor0, kernel.size());
CV_IPP_RUN(true, ipp_filter2D(_src, _dst, ddepth, _kernel, anchor0, delta, borderType));
#ifdef HAVE_TEGRA_OPTIMIZATION
if( tegra::useTegra() && tegra::filter2D(src, dst, kernel, anchor, delta, borderType) )
return;
#endif
if( kernel.cols*kernel.rows >= dft_filter_size )
{
Mat temp;
// crossCorr doesn't accept non-zero delta with multiple channels
if( src.channels() != 1 && delta != 0 )
{
// The semantics of filter2D require that the delta be applied
// as floating-point math. So wee need an intermediate Mat
// with a float datatype. If the dest is already floats,
// we just use that.
int corrDepth = dst.depth();
if( (dst.depth() == CV_32F || dst.depth() == CV_64F) &&
src.data != dst.data )
{
temp = dst;
}
else
{
corrDepth = dst.depth() == CV_64F ? CV_64F : CV_32F;
temp.create( dst.size(), CV_MAKETYPE(corrDepth, dst.channels()) );
}
crossCorr( src, kernel, temp, src.size(),
CV_MAKETYPE(corrDepth, src.channels()),
anchor, 0, borderType );
add( temp, delta, temp );
if ( temp.data != dst.data )
{
temp.convertTo( dst, dst.type() );
}
}
else
{
if( src.data != dst.data )
temp = dst;
else
temp.create(dst.size(), dst.type());
crossCorr( src, kernel, temp, src.size(),
CV_MAKETYPE(ddepth, src.channels()),
anchor, delta, borderType );
if( temp.data != dst.data )
temp.copyTo(dst);
}
return;
}
Ptr<FilterEngine> f = createLinearFilter(src.type(), dst.type(), kernel,
anchor, delta, borderType & ~BORDER_ISOLATED );
f->apply(src, dst, Rect(0,0,-1,-1), Point(), (borderType & BORDER_ISOLATED) != 0 );
}
void cv::sepFilter2D( InputArray _src, OutputArray _dst, int ddepth,
InputArray _kernelX, InputArray _kernelY, Point anchor,
double delta, int borderType )
{
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
ocl_sepFilter2D(_src, _dst, ddepth, _kernelX, _kernelY, anchor, delta, borderType))
Mat src = _src.getMat(), kernelX = _kernelX.getMat(), kernelY = _kernelY.getMat();
if( ddepth < 0 )
ddepth = src.depth();
_dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) );
Mat dst = _dst.getMat();
Ptr<FilterEngine> f = createSeparableLinearFilter(src.type(),
dst.type(), kernelX, kernelY, anchor, delta, borderType & ~BORDER_ISOLATED );
f->apply(src, dst, Rect(0,0,-1,-1), Point(), (borderType & BORDER_ISOLATED) != 0 );
}
CV_IMPL void
cvFilter2D( const CvArr* srcarr, CvArr* dstarr, const CvMat* _kernel, CvPoint anchor )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
cv::Mat kernel = cv::cvarrToMat(_kernel);
CV_Assert( src.size() == dst.size() && src.channels() == dst.channels() );
cv::filter2D( src, dst, dst.depth(), kernel, anchor, 0, cv::BORDER_REPLICATE );
}
/* End of file. */
|
/*
Open Asset Import Library (assimp)
----------------------------------------------------------------------
Copyright (c) 2006-2018, assimp team
All rights reserved.
Redistribution and use of this software in source and binary forms,
with or without modification, are permitted provided that the
following conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
* Neither the name of the assimp team, nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior
written permission of the assimp team.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------------
*/
#ifndef ASSIMP_BUILD_NO_Q3BSP_IMPORTER
#include "Q3BSPFileParser.h"
#include "Q3BSPFileData.h"
#include "Q3BSPZipArchive.h"
#include <vector>
#include <assimp/DefaultIOSystem.h>
#include <assimp/ai_assert.h>
namespace Assimp {
using namespace Q3BSP;
// ------------------------------------------------------------------------------------------------
Q3BSPFileParser::Q3BSPFileParser( const std::string &mapName, Q3BSPZipArchive *pZipArchive ) :
m_sOffset( 0 ),
m_Data(),
m_pModel(nullptr),
m_pZipArchive( pZipArchive )
{
ai_assert(nullptr != m_pZipArchive );
ai_assert( !mapName.empty() );
if ( !readData( mapName ) )
return;
m_pModel = new Q3BSPModel;
m_pModel->m_ModelName = mapName;
if ( !parseFile() ) {
delete m_pModel;
m_pModel = nullptr;
}
}
// ------------------------------------------------------------------------------------------------
Q3BSPFileParser::~Q3BSPFileParser() {
delete m_pModel;
m_pModel = nullptr;
}
// ------------------------------------------------------------------------------------------------
Q3BSP::Q3BSPModel *Q3BSPFileParser::getModel() const {
return m_pModel;
}
// ------------------------------------------------------------------------------------------------
bool Q3BSPFileParser::readData( const std::string &rMapName ) {
if ( !m_pZipArchive->Exists( rMapName.c_str() ) )
return false;
IOStream *pMapFile = m_pZipArchive->Open( rMapName.c_str() );
if ( nullptr == pMapFile )
return false;
const size_t size = pMapFile->FileSize();
m_Data.resize( size );
const size_t readSize = pMapFile->Read( &m_Data[0], sizeof( char ), size );
if ( readSize != size ) {
m_Data.clear();
return false;
}
m_pZipArchive->Close( pMapFile );
return true;
}
// ------------------------------------------------------------------------------------------------
bool Q3BSPFileParser::parseFile() {
if ( m_Data.empty() ) {
return false;
}
if ( !validateFormat() )
{
return false;
}
// Imports the dictionary of the level
getLumps();
// Count data and prepare model data
countLumps();
// Read in Vertices
getVertices();
// Read in Indices
getIndices();
// Read Faces
getFaces();
// Read Textures
getTextures();
// Read Lightmaps
getLightMaps();
// Load the entities
getEntities();
return true;
}
// ------------------------------------------------------------------------------------------------
bool Q3BSPFileParser::validateFormat()
{
sQ3BSPHeader *pHeader = (sQ3BSPHeader*) &m_Data[ 0 ];
m_sOffset += sizeof( sQ3BSPHeader );
// Version and identify string validation
if (pHeader->strID[ 0 ] != 'I' || pHeader->strID[ 1 ] != 'B' || pHeader->strID[ 2 ] != 'S'
|| pHeader->strID[ 3 ] != 'P')
{
return false;
}
return true;
}
// ------------------------------------------------------------------------------------------------
void Q3BSPFileParser::getLumps()
{
size_t Offset = m_sOffset;
m_pModel->m_Lumps.resize( kMaxLumps );
for ( size_t idx=0; idx < kMaxLumps; idx++ )
{
sQ3BSPLump *pLump = new sQ3BSPLump;
memcpy( pLump, &m_Data[ Offset ], sizeof( sQ3BSPLump ) );
Offset += sizeof( sQ3BSPLump );
m_pModel->m_Lumps[ idx ] = pLump;
}
}
// ------------------------------------------------------------------------------------------------
void Q3BSPFileParser::countLumps()
{
m_pModel->m_Vertices.resize( m_pModel->m_Lumps[ kVertices ]->iSize / sizeof( sQ3BSPVertex ) );
m_pModel->m_Indices.resize( m_pModel->m_Lumps[ kMeshVerts ]->iSize / sizeof( int ) );
m_pModel->m_Faces.resize( m_pModel->m_Lumps[ kFaces ]->iSize / sizeof( sQ3BSPFace ) );
m_pModel->m_Textures.resize( m_pModel->m_Lumps[ kTextures ]->iSize / sizeof( sQ3BSPTexture ) );
m_pModel->m_Lightmaps.resize( m_pModel->m_Lumps[ kLightmaps ]->iSize / sizeof( sQ3BSPLightmap ) );
}
// ------------------------------------------------------------------------------------------------
void Q3BSPFileParser::getVertices()
{
size_t Offset = m_pModel->m_Lumps[ kVertices ]->iOffset;
for ( size_t idx = 0; idx < m_pModel->m_Vertices.size(); idx++ )
{
sQ3BSPVertex *pVertex = new sQ3BSPVertex;
memcpy( pVertex, &m_Data[ Offset ], sizeof( sQ3BSPVertex ) );
Offset += sizeof( sQ3BSPVertex );
m_pModel->m_Vertices[ idx ] = pVertex;
}
}
// ------------------------------------------------------------------------------------------------
void Q3BSPFileParser::getIndices()
{
ai_assert(nullptr != m_pModel );
sQ3BSPLump *lump = m_pModel->m_Lumps[ kMeshVerts ];
size_t Offset = (size_t) lump->iOffset;
const size_t nIndices = lump->iSize / sizeof( int );
m_pModel->m_Indices.resize( nIndices );
memcpy( &m_pModel->m_Indices[ 0 ], &m_Data[ Offset ], lump->iSize );
}
// ------------------------------------------------------------------------------------------------
void Q3BSPFileParser::getFaces()
{
ai_assert(nullptr != m_pModel );
size_t Offset = m_pModel->m_Lumps[ kFaces ]->iOffset;
for ( size_t idx = 0; idx < m_pModel->m_Faces.size(); idx++ )
{
sQ3BSPFace *pFace = new sQ3BSPFace;
memcpy( pFace, &m_Data[ Offset ], sizeof( sQ3BSPFace ) );
m_pModel->m_Faces[ idx ] = pFace;
Offset += sizeof( sQ3BSPFace );
}
}
// ------------------------------------------------------------------------------------------------
void Q3BSPFileParser::getTextures()
{
ai_assert(nullptr != m_pModel );
size_t Offset = m_pModel->m_Lumps[ kTextures ]->iOffset;
for ( size_t idx=0; idx < m_pModel->m_Textures.size(); idx++ )
{
sQ3BSPTexture *pTexture = new sQ3BSPTexture;
memcpy( pTexture, &m_Data[ Offset ], sizeof(sQ3BSPTexture) );
m_pModel->m_Textures[ idx ] = pTexture;
Offset += sizeof(sQ3BSPTexture);
}
}
// ------------------------------------------------------------------------------------------------
void Q3BSPFileParser::getLightMaps()
{
ai_assert(nullptr != m_pModel );
size_t Offset = m_pModel->m_Lumps[kLightmaps]->iOffset;
for ( size_t idx=0; idx < m_pModel->m_Lightmaps.size(); idx++ )
{
sQ3BSPLightmap *pLightmap = new sQ3BSPLightmap;
memcpy( pLightmap, &m_Data[ Offset ], sizeof( sQ3BSPLightmap ) );
Offset += sizeof( sQ3BSPLightmap );
m_pModel->m_Lightmaps[ idx ] = pLightmap;
}
}
// ------------------------------------------------------------------------------------------------
void Q3BSPFileParser::getEntities() {
const int size = m_pModel->m_Lumps[ kEntities ]->iSize;
m_pModel->m_EntityData.resize( size );
if ( size > 0 ) {
size_t Offset = m_pModel->m_Lumps[ kEntities ]->iOffset;
memcpy( &m_pModel->m_EntityData[ 0 ], &m_Data[ Offset ], sizeof( char ) * size );
}
}
// ------------------------------------------------------------------------------------------------
} // Namespace Assimp
#endif // ASSIMP_BUILD_NO_Q3BSP_IMPORTER
|
/* Copyright (C) 2013 Wildfire Games.
* This file is part of 0 A.D.
*
* 0 A.D. is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* 0 A.D. is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
*/
#include "precompiled.h"
#include "FontManager.h"
#include "graphics/Font.h"
#include "graphics/TextureManager.h"
#include "ps/CLogger.h"
#include "ps/CStr.h"
#include "ps/Filesystem.h"
#include "renderer/Renderer.h"
#include <cfloat>
shared_ptr<CFont> CFontManager::LoadFont(CStrIntern fontName)
{
FontsMap::iterator it = m_Fonts.find(fontName);
if (it != m_Fonts.end())
return it->second;
shared_ptr<CFont> font(new CFont());
if (!ReadFont(font.get(), fontName))
{
// Fall back to default font (unless this is the default font)
if (fontName == str_sans_10)
font.reset();
else
font = LoadFont(str_sans_10);
}
m_Fonts[fontName] = font;
return font;
}
bool CFontManager::ReadFont(CFont* font, CStrIntern fontName)
{
const VfsPath path(L"fonts/");
// Read font definition file into a stringstream
shared_ptr<u8> buf;
size_t size;
const VfsPath fntName(fontName.string() + ".fnt");
if (g_VFS->LoadFile(path / fntName, buf, size) < 0)
{
LOGERROR("Failed to open font file %s", (path / fntName).string8());
return false;
}
std::istringstream FNTStream(std::string((const char*)buf.get(), size));
int Version;
FNTStream >> Version;
if (Version != 101) // Make sure this is from a recent version of the font builder
{
LOGERROR("Font %s has invalid version", fontName.c_str());
return 0;
}
int TextureWidth, TextureHeight;
FNTStream >> TextureWidth >> TextureHeight;
std::string Format;
FNTStream >> Format;
if (Format == "rgba")
font->m_HasRGB = true;
else if (Format == "a")
font->m_HasRGB = false;
else
debug_warn(L"Invalid .fnt format string");
int NumGlyphs;
FNTStream >> NumGlyphs;
FNTStream >> font->m_LineSpacing;
FNTStream >> font->m_Height;
font->m_BoundsX0 = FLT_MAX;
font->m_BoundsY0 = FLT_MAX;
font->m_BoundsX1 = -FLT_MAX;
font->m_BoundsY1 = -FLT_MAX;
for (int i = 0; i < NumGlyphs; ++i)
{
int Codepoint, TextureX, TextureY, Width, Height, OffsetX, OffsetY, Advance;
FNTStream >> Codepoint>>TextureX>>TextureY>>Width>>Height>>OffsetX>>OffsetY>>Advance;
if (Codepoint < 0 || Codepoint > 0xFFFF)
{
LOGWARNING("Font %s has invalid codepoint 0x%x", fontName.c_str(), Codepoint);
continue;
}
float u = (float)TextureX / (float)TextureWidth;
float v = (float)TextureY / (float)TextureHeight;
float w = (float)Width / (float)TextureWidth;
float h = (float)Height / (float)TextureHeight;
CFont::GlyphData g = { u, -v, u+w, -v+h, (i16)OffsetX, (i16)-OffsetY, (i16)(OffsetX+Width), (i16)(-OffsetY+Height), (i16)Advance };
font->m_Glyphs.set((u16)Codepoint, g);
font->m_BoundsX0 = std::min(font->m_BoundsX0, (float)g.x0);
font->m_BoundsY0 = std::min(font->m_BoundsY0, (float)g.y0);
font->m_BoundsX1 = std::max(font->m_BoundsX1, (float)g.x1);
font->m_BoundsY1 = std::max(font->m_BoundsY1, (float)g.y1);
}
ENSURE(font->m_Height); // Ensure the height has been found (which should always happen if the font includes an 'I')
// Load glyph texture
const VfsPath imgName(fontName.string() + ".png");
CTextureProperties textureProps(path / imgName);
textureProps.SetFilter(GL_NEAREST);
if (!font->m_HasRGB)
textureProps.SetFormatOverride(GL_ALPHA);
font->m_Texture = g_Renderer.GetTextureManager().CreateTexture(textureProps);
return true;
}
|
// Autogenerated from CppHeaderCreator
// Created by Sc2ad
// =========================================================================
#pragma once
// Begin includes
#include "beatsaber-hook/shared/utils/typedefs.h"
#include "beatsaber-hook/shared/utils/byref.hpp"
// Including type: Unity.Collections.NativeArray`1
#include "Unity/Collections/NativeArray_1.hpp"
#include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
#include "beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp"
#include "beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp"
#include "beatsaber-hook/shared/utils/utils.h"
// Completed includes
// Begin forward declares
// Forward declaring namespace: System
namespace System {
// Skipping declaration: ValueType because it is already included!
}
// Completed forward declares
// Type namespace: Unity.Collections.LowLevel.Unsafe
namespace Unity::Collections::LowLevel::Unsafe {
// Forward declaring type: NativeArrayUnsafeUtility
class NativeArrayUnsafeUtility;
}
#include "beatsaber-hook/shared/utils/il2cpp-type-check.hpp"
NEED_NO_BOX(::Unity::Collections::LowLevel::Unsafe::NativeArrayUnsafeUtility);
DEFINE_IL2CPP_ARG_TYPE(::Unity::Collections::LowLevel::Unsafe::NativeArrayUnsafeUtility*, "Unity.Collections.LowLevel.Unsafe", "NativeArrayUnsafeUtility");
// Type namespace: Unity.Collections.LowLevel.Unsafe
namespace Unity::Collections::LowLevel::Unsafe {
// Size: 0x10
#pragma pack(push, 1)
// Autogenerated type: Unity.Collections.LowLevel.Unsafe.NativeArrayUnsafeUtility
// [TokenAttribute] Offset: FFFFFFFF
// [ExtensionAttribute] Offset: FFFFFFFF
class NativeArrayUnsafeUtility : public ::Il2CppObject {
public:
// static public Unity.Collections.NativeArray`1<T> ConvertExistingDataToNativeArray(System.Void* dataPointer, System.Int32 length, Unity.Collections.Allocator allocator)
// Offset: 0xFFFFFFFFFFFFFFFF
template<class T>
static ::Unity::Collections::NativeArray_1<T> ConvertExistingDataToNativeArray(void* dataPointer, int length, ::Unity::Collections::Allocator allocator) {
static_assert(std::is_convertible_v<T, ::System::ValueType*>);
static auto ___internal__logger = ::Logger::get().WithContext("::Unity::Collections::LowLevel::Unsafe::NativeArrayUnsafeUtility::ConvertExistingDataToNativeArray");
static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod("Unity.Collections.LowLevel.Unsafe", "NativeArrayUnsafeUtility", "ConvertExistingDataToNativeArray", std::vector<Il2CppClass*>{::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<T>::get()}, ::std::vector<const Il2CppType*>{::il2cpp_utils::ExtractType(dataPointer), ::il2cpp_utils::ExtractType(length), ::il2cpp_utils::ExtractType(allocator)})));
static auto* ___generic__method = THROW_UNLESS(::il2cpp_utils::MakeGenericMethod(___internal__method, std::vector<Il2CppClass*>{::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<T>::get()}));
return ::il2cpp_utils::RunMethodRethrow<::Unity::Collections::NativeArray_1<T>, false>(static_cast<Il2CppObject*>(nullptr), ___generic__method, dataPointer, length, allocator);
}
}; // Unity.Collections.LowLevel.Unsafe.NativeArrayUnsafeUtility
#pragma pack(pop)
}
#include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
// Writing MetadataGetter for method: Unity::Collections::LowLevel::Unsafe::NativeArrayUnsafeUtility::ConvertExistingDataToNativeArray
// Il2CppName: ConvertExistingDataToNativeArray
// Cannot write MetadataGetter for generic methods!
|
/* dogleg_method.cpp
*
* Created on: 11 Nov 2020
* Author: Fabian Meyer
*/
#include <lsqcpp.h>
// Implement an objective functor.
struct ParabolicError
{
void operator()(const Eigen::VectorXd &xval,
Eigen::VectorXd &fval,
Eigen::MatrixXd &) const
{
// omit calculation of jacobian, so finite differences will be used
// to estimate jacobian numerically
fval.resize(xval.size() / 2);
for(lsq::Index i = 0; i < fval.size(); ++i)
fval(i) = xval(i*2) * xval(i*2) + xval(i*2+1) * xval(i*2+1);
}
};
int main()
{
// Create GradienDescent optimizer with Barzilai Borwein method
lsq::DoglegMethod<double, ParabolicError> optimizer;
// Set number of iterations for trust region method.
optimizer.setMaxIterationsTR(100);
// Set number of iterations as stop criterion.
optimizer.setMaxIterations(100);
// Set the minimum length of the gradient.
optimizer.setMinGradientLength(1e-6);
// Set the minimum length of the step.
optimizer.setMinStepLength(1e-6);
// Set the minimum least squares error.
optimizer.setMinError(0);
// Turn verbosity on, so the optimizer prints status updates after each
// iteration.
optimizer.setVerbosity(4);
// Set initial guess.
Eigen::VectorXd initialGuess(4);
initialGuess << 1, 2, 3, 4;
// Start the optimization.
auto result = optimizer.minimize(initialGuess);
std::cout << "Done! Converged: " << (result.converged ? "true" : "false")
<< " Iterations: " << result.iterations << std::endl;
// do something with final function value
std::cout << "Final fval: " << result.fval.transpose() << std::endl;
// do something with final x-value
std::cout << "Final xval: " << result.xval.transpose() << std::endl;
return 0;
}
|
#include "ofApp.h"
//--------------------------------------------------------------
void ofApp::setup(){
ofBackground(60);
ofEnableSmoothing();
ofSetCircleResolution(6);
}
//--------------------------------------------------------------
void ofApp::update(){
}
//--------------------------------------------------------------
void ofApp::draw(){
ofDrawLine(64, 64, 256, 128);
ofDrawCircle(ofGetWidth() / 2, ofGetHeight() / 2, 128);
}
//--------------------------------------------------------------
void ofApp::keyPressed(int key){
}
//--------------------------------------------------------------
void ofApp::keyReleased(int key){
}
//--------------------------------------------------------------
void ofApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void ofApp::mouseDragged(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mousePressed(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mouseReleased(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mouseEntered(int x, int y){
}
//--------------------------------------------------------------
void ofApp::mouseExited(int x, int y){
}
//--------------------------------------------------------------
void ofApp::windowResized(int w, int h){
}
//--------------------------------------------------------------
void ofApp::gotMessage(ofMessage msg){
}
//--------------------------------------------------------------
void ofApp::dragEvent(ofDragInfo dragInfo){
}
|
// Copyright (c) 2018 Graphcore Ltd. All rights reserved.
#include <onnxutil.hpp>
#include <poprithmshosttensor.hpp>
#include <popart/ces/castce.hpp>
#include <popart/op/cast.hpp>
#include <popart/tensor.hpp>
namespace popart {
ConstExprCast::ConstExprCast(Op *op_) : ConstExprOp(op_) {}
std::vector<char> ConstExprCast::compute() {
const auto in0 = getPoprithmsComputeHostTensor(*inTensor(0));
return in0.to(getPoprithmsDType(outInfo0().dataType())).getNativeCharVector();
}
} // namespace popart
|
/*=========================================================================
Program: Visualization Toolkit
Module: $RCSfile$
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#include "vtkCgShaderDeviceAdapter.h"
#include "vtkObjectFactory.h"
#include "vtkSmartPointer.h"
#include "vtkShaderProgram.h"
#include "vtkCgShader.h"
#include "vtkCollectionIterator.h"
#include "vtkXMLShader.h"
class vtkCgShaderDeviceAdapter::vtkInternal
{
public:
vtkSmartPointer<vtkCgShader> VertexShader;
};
vtkStandardNewMacro(vtkCgShaderDeviceAdapter);
vtkCxxRevisionMacro(vtkCgShaderDeviceAdapter, "$Revision$");
//----------------------------------------------------------------------------
vtkCgShaderDeviceAdapter::vtkCgShaderDeviceAdapter()
{
this->Internal = new vtkInternal();
}
//----------------------------------------------------------------------------
vtkCgShaderDeviceAdapter::~vtkCgShaderDeviceAdapter()
{
delete this->Internal;
}
//----------------------------------------------------------------------------
void vtkCgShaderDeviceAdapter::PrepareForRender()
{
// locate the vertex CgShader which can accept varying parameters.
vtkCollectionIterator* shaderIter = this->ShaderProgram->NewShaderIterator();
for (shaderIter->InitTraversal(); !shaderIter->IsDoneWithTraversal();
shaderIter->GoToNextItem())
{
vtkCgShader* shader = vtkCgShader::SafeDownCast(
shaderIter->GetCurrentObject());
if (shader && shader->GetScope() == vtkXMLShader::SCOPE_VERTEX)
{
this->Internal->VertexShader = shader;
break;
}
}
shaderIter->Delete();
}
template <class T>
void vtkCgShaderDeviceAdapterSendAttributeInternal(vtkCgShaderDeviceAdapter* self,
const char* attrname, int components, const T* attribute, unsigned long offset)
{
double converted_value[4];
for (int cc=0; cc < 4 && cc < components; cc++)
{
converted_value[cc] = static_cast<double>((attribute+offset)[cc]);
}
self->SendAttributeInternal(attrname, components, converted_value);
}
VTK_TEMPLATE_SPECIALIZE
void vtkCgShaderDeviceAdapterSendAttributeInternal(vtkCgShaderDeviceAdapter* self,
const char* attrname, int components, const float* attribute, unsigned long offset)
{
self->SendAttributeInternal(attrname, components, (attribute+offset));
}
//----------------------------------------------------------------------------
void vtkCgShaderDeviceAdapter::SendAttributeInternal(
const char* attrname, int components, const double* data)
{
this->Internal->VertexShader->SetUniformParameter(attrname, components, data);
}
//----------------------------------------------------------------------------
void vtkCgShaderDeviceAdapter::SendAttributeInternal(
const char* attrname, int components, const float* data)
{
this->Internal->VertexShader->SetUniformParameter(attrname, components, data);
}
//----------------------------------------------------------------------------
void vtkCgShaderDeviceAdapter::SendAttribute(const char* attrname,
int components, int type,
const void* attribute, unsigned long offset/*=0*/)
{
switch (type)
{
vtkTemplateMacro(
vtkCgShaderDeviceAdapterSendAttributeInternal(this,
attrname, components, static_cast<const VTK_TT*>(attribute), offset));
}
}
//----------------------------------------------------------------------------
void vtkCgShaderDeviceAdapter::PrintSelf(ostream& os, vtkIndent indent)
{
this->Superclass::PrintSelf(os, indent);
}
|
/******************************************************************************
*
* Project: OpenGIS Simple Features Reference Implementation
* Purpose: Implement ERMapper projection conversions.
* Author: Frank Warmerdam, warmerdam@pobox.com
*
******************************************************************************
* Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
* Copyright (c) 2008-2011, Even Rouault <even dot rouault at mines-paris dot org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
****************************************************************************/
#include "cpl_port.h"
#include "ogr_srs_api.h"
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "cpl_conv.h"
#include "cpl_error.h"
#include "ogr_core.h"
#include "ogr_spatialref.h"
CPL_CVSID("$Id$");
/************************************************************************/
/* OSRImportFromERM() */
/************************************************************************/
/**
* \brief Create OGR WKT from ERMapper projection definitions.
*
* This function is the same as OGRSpatialReference::importFromERM().
*/
OGRErr OSRImportFromERM( OGRSpatialReferenceH hSRS, const char *pszProj,
const char *pszDatum, const char *pszUnits )
{
VALIDATE_POINTER1( hSRS, "OSRImportFromERM", OGRERR_FAILURE );
return reinterpret_cast<OGRSpatialReference *>(hSRS)->
importFromERM(pszProj, pszDatum, pszUnits);
}
/************************************************************************/
/* importFromERM() */
/************************************************************************/
/**
* Create OGR WKT from ERMapper projection definitions.
*
* Generates an OGRSpatialReference definition from an ERMapper datum
* and projection name. Based on the ecw_cs.wkt dictionary file from
* gdal/data.
*
* @param pszProj the projection name, such as "NUTM11" or "GEOGRAPHIC".
* @param pszDatum the datum name, such as "NAD83".
* @param pszUnits the linear units "FEET" or "METERS".
*
* @return OGRERR_NONE on success or OGRERR_UNSUPPORTED_SRS if not found.
*/
OGRErr OGRSpatialReference::importFromERM( const char *pszProj,
const char *pszDatum,
const char *pszUnits )
{
Clear();
/* -------------------------------------------------------------------- */
/* do we have projection and datum? */
/* -------------------------------------------------------------------- */
if( EQUAL(pszProj, "RAW") )
return OGRERR_NONE;
/* -------------------------------------------------------------------- */
/* Do we have an EPSG coordinate system? */
/* -------------------------------------------------------------------- */
if( STARTS_WITH_CI(pszProj, "EPSG:") )
return importFromEPSG( atoi(pszProj+5) );
if( STARTS_WITH_CI(pszDatum, "EPSG:") )
return importFromEPSG( atoi(pszDatum+5) );
/* -------------------------------------------------------------------- */
/* Set projection if we have it. */
/* -------------------------------------------------------------------- */
if( !EQUAL(pszProj, "GEODETIC") )
{
const OGRErr eErr = importFromDict( "ecw_cs.wkt", pszProj );
if( eErr != OGRERR_NONE )
return eErr;
if( EQUAL(pszUnits, "FEET") )
SetLinearUnits( SRS_UL_US_FOOT, CPLAtof(SRS_UL_US_FOOT_CONV));
else
SetLinearUnits( SRS_UL_METER, 1.0 );
}
/* -------------------------------------------------------------------- */
/* Set the geogcs. */
/* -------------------------------------------------------------------- */
OGRSpatialReference oGeogCS;
const OGRErr eErr = oGeogCS.importFromDict( "ecw_cs.wkt", pszDatum );
if( eErr != OGRERR_NONE )
{
Clear();
return eErr;
}
if( !IsLocal() )
CopyGeogCSFrom( &oGeogCS );
return OGRERR_NONE;
}
/************************************************************************/
/* OSRExportToERM() */
/************************************************************************/
/**
* \brief Convert coordinate system to ERMapper format.
*
* This function is the same as OGRSpatialReference::exportToERM().
*/
OGRErr OSRExportToERM( OGRSpatialReferenceH hSRS,
char *pszProj, char *pszDatum, char *pszUnits )
{
VALIDATE_POINTER1( hSRS, "OSRExportToERM", OGRERR_FAILURE );
return reinterpret_cast<OGRSpatialReference *>(hSRS)->
exportToERM(pszProj, pszDatum, pszUnits);
}
/************************************************************************/
/* exportToERM() */
/************************************************************************/
/**
* Convert coordinate system to ERMapper format.
*
* @param pszProj 32 character buffer to receive projection name.
* @param pszDatum 32 character buffer to receive datum name.
* @param pszUnits 32 character buffer to receive units name.
*
* @return OGRERR_NONE on success, OGRERR_SRS_UNSUPPORTED if not translation is
* found, or OGRERR_FAILURE on other failures.
*/
OGRErr OGRSpatialReference::exportToERM( char *pszProj, char *pszDatum,
char *pszUnits )
{
const int BUFFER_SIZE = 32;
strcpy( pszProj, "RAW" );
strcpy( pszDatum, "RAW" );
strcpy( pszUnits, "METERS" );
if( !IsProjected() && !IsGeographic() )
return OGRERR_UNSUPPORTED_SRS;
/* -------------------------------------------------------------------- */
/* Try to find the EPSG code. */
/* -------------------------------------------------------------------- */
int nEPSGCode = 0;
if( IsProjected() )
{
const char *pszAuthName = GetAuthorityName( "PROJCS" );
if( pszAuthName != NULL && EQUAL(pszAuthName, "epsg") )
{
nEPSGCode = atoi(GetAuthorityCode( "PROJCS" ));
}
}
else if( IsGeographic() )
{
const char *pszAuthName = GetAuthorityName( "GEOGCS" );
if( pszAuthName != NULL && EQUAL(pszAuthName, "epsg") )
{
nEPSGCode = atoi(GetAuthorityCode( "GEOGCS" ));
}
}
/* -------------------------------------------------------------------- */
/* Is our GEOGCS name already defined in ecw_cs.wkt? */
/* -------------------------------------------------------------------- */
OGRSpatialReference oSRSWork;
const char *pszWKTDatum = GetAttrValue( "DATUM" );
if( pszWKTDatum != NULL
&& oSRSWork.importFromDict( "ecw_cs.wkt", pszWKTDatum ) == OGRERR_NONE)
{
strncpy( pszDatum, pszWKTDatum, BUFFER_SIZE );
pszDatum[BUFFER_SIZE-1] = '\0';
}
/* -------------------------------------------------------------------- */
/* Is this a "well known" geographic coordinate system? */
/* -------------------------------------------------------------------- */
if( EQUAL(pszDatum, "RAW") )
{
int nEPSGGCSCode = GetEPSGGeogCS();
if( nEPSGGCSCode == 4326 )
strcpy( pszDatum, "WGS84" );
else if( nEPSGGCSCode == 4322 )
strcpy( pszDatum, "WGS72DOD" );
else if( nEPSGGCSCode == 4267 )
strcpy( pszDatum, "NAD27" );
else if( nEPSGGCSCode == 4269 )
strcpy( pszDatum, "NAD83" );
else if( nEPSGGCSCode == 4277 )
strcpy( pszDatum, "OSGB36" );
else if( nEPSGGCSCode == 4278 )
strcpy( pszDatum, "OSGB78" );
else if( nEPSGGCSCode == 4201 )
strcpy( pszDatum, "ADINDAN" );
else if( nEPSGGCSCode == 4202 )
strcpy( pszDatum, "AGD66" );
else if( nEPSGGCSCode == 4203 )
strcpy( pszDatum, "AGD84" );
else if( nEPSGGCSCode == 4209 )
strcpy( pszDatum, "ARC1950" );
else if( nEPSGGCSCode == 4210 )
strcpy( pszDatum, "ARC1960" );
else if( nEPSGGCSCode == 4275 )
strcpy( pszDatum, "NTF" );
else if( nEPSGGCSCode == 4283 )
strcpy( pszDatum, "GDA94" );
else if( nEPSGGCSCode == 4284 )
strcpy( pszDatum, "PULKOVO" );
}
/* -------------------------------------------------------------------- */
/* Are we working with a geographic (geodetic) coordinate system? */
/* -------------------------------------------------------------------- */
if( IsGeographic() )
{
if( EQUAL(pszDatum, "RAW") )
return OGRERR_UNSUPPORTED_SRS;
else
{
strcpy( pszProj, "GEODETIC" );
return OGRERR_NONE;
}
}
/* -------------------------------------------------------------------- */
/* Is this a UTM projection? */
/* -------------------------------------------------------------------- */
int bNorth = FALSE;
int nZone = 0;
nZone = GetUTMZone( &bNorth );
if( nZone > 0 )
{
if( EQUAL(pszDatum, "GDA94") && !bNorth && nZone >= 48 && nZone <= 58)
{
snprintf( pszProj, BUFFER_SIZE, "MGA%02d", nZone );
}
else
{
if( bNorth )
snprintf( pszProj, BUFFER_SIZE, "NUTM%02d", nZone );
else
snprintf( pszProj, BUFFER_SIZE, "SUTM%02d", nZone );
}
}
/* -------------------------------------------------------------------- */
/* Is our PROJCS name already defined in ecw_cs.wkt? */
/* -------------------------------------------------------------------- */
else
{
const char *pszPROJCS = GetAttrValue( "PROJCS" );
if( pszPROJCS != NULL
&& oSRSWork.importFromDict( "ecw_cs.wkt", pszPROJCS ) == OGRERR_NONE
&& oSRSWork.IsProjected() )
{
strncpy( pszProj, pszPROJCS, BUFFER_SIZE );
pszProj[BUFFER_SIZE-1] = '\0';
}
}
/* -------------------------------------------------------------------- */
/* If we have not translated it yet, but we have an EPSG code */
/* then use EPSG:n notation. */
/* -------------------------------------------------------------------- */
if( (EQUAL(pszDatum, "RAW") || EQUAL(pszProj, "RAW")) && nEPSGCode != 0 )
{
snprintf( pszProj, BUFFER_SIZE, "EPSG:%d", nEPSGCode );
snprintf( pszDatum, BUFFER_SIZE, "EPSG:%d", nEPSGCode );
}
/* -------------------------------------------------------------------- */
/* Handle the units. */
/* -------------------------------------------------------------------- */
const double dfUnits = GetLinearUnits();
if( fabs(dfUnits-0.3048) < 0.0001 )
strcpy( pszUnits, "FEET" );
else
strcpy( pszUnits, "METERS" );
if( EQUAL(pszProj, "RAW") )
return OGRERR_UNSUPPORTED_SRS;
return OGRERR_NONE;
}
|
#include <cassert>
#include <cctype>
#ifndef _MSC_VER
#include <dirent.h>
#include <unistd.h>
#endif
#include <cinttypes>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <algorithm>
#include <chrono>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <vector>
#include "linux-perf-events.h"
#ifdef __linux__
#include <libgen.h>
#endif
//#define DEBUG
#include "simdjson/common_defs.h"
#include "simdjson/jsonioutil.h"
#include "simdjson/jsonparser.h"
#include "simdjson/parsedjson.h"
#include "simdjson/stage1_find_marks.h"
#include "simdjson/stage2_build_tape.h"
int main(int argc, char *argv[]) {
bool verbose = false;
bool dump = false;
bool jsonoutput = false;
bool forceoneiteration = false;
bool justdata = false;
#ifndef _MSC_VER
int c;
while ((c = getopt(argc, argv, "1vdt")) != -1) {
switch (c) {
case 't':
justdata = true;
break;
case 'v':
verbose = true;
break;
case 'd':
dump = true;
break;
case 'j':
jsonoutput = true;
break;
case '1':
forceoneiteration = true;
break;
default:
abort();
}
}
#else
int optind = 1;
#endif
if (optind >= argc) {
std::cerr << "Usage: " << argv[0] << " <jsonfile>" << std::endl;
exit(1);
}
const char *filename = argv[optind];
if (optind + 1 < argc) {
std::cerr << "warning: ignoring everything after " << argv[optind + 1] << std::endl;
}
if (verbose) {
std::cout << "[verbose] loading " << filename << std::endl;
}
padded_string p;
try {
get_corpus(filename).swap(p);
} catch (const std::exception &e) { // caught by reference to base
std::cout << "Could not load the file " << filename << std::endl;
return EXIT_FAILURE;
}
if (verbose) {
std::cout << "[verbose] loaded " << filename << " (" << p.size() << " bytes)"
<< std::endl;
}
#if defined(DEBUG)
const uint32_t iterations = 1;
#else
const uint32_t iterations =
forceoneiteration ? 1 : (p.size() < 1 * 1000 * 1000 ? 1000 : 10);
#endif
std::vector<double> res;
res.resize(iterations);
if(!justdata) printf("number of iterations %u \n", iterations);
#if !defined(__linux__)
#define SQUASH_COUNTERS
if (justdata) {
printf("justdata (-t) flag only works under linux.\n");
}
#endif
#ifndef SQUASH_COUNTERS
std::vector<int> evts;
evts.push_back(PERF_COUNT_HW_CPU_CYCLES);
evts.push_back(PERF_COUNT_HW_INSTRUCTIONS);
evts.push_back(PERF_COUNT_HW_BRANCH_MISSES);
evts.push_back(PERF_COUNT_HW_CACHE_REFERENCES);
evts.push_back(PERF_COUNT_HW_CACHE_MISSES);
LinuxEvents<PERF_TYPE_HARDWARE> unified(evts);
std::vector<unsigned long long> results;
results.resize(evts.size());
unsigned long cy0 = 0, cy1 = 0, cy2 = 0;
unsigned long cl0 = 0, cl1 = 0, cl2 = 0;
unsigned long mis0 = 0, mis1 = 0, mis2 = 0;
unsigned long cref0 = 0, cref1 = 0, cref2 = 0;
unsigned long cmis0 = 0, cmis1 = 0, cmis2 = 0;
#endif
bool isok = true;
#ifndef SQUASH_COUNTERS
for (uint32_t i = 0; i < iterations; i++) {
if (verbose) {
std::cout << "[verbose] iteration # " << i << std::endl;
}
unified.start();
ParsedJson pj;
bool allocok = pj.allocateCapacity(p.size());
if (!allocok) {
std::cerr << "failed to allocate memory" << std::endl;
return EXIT_FAILURE;
}
unified.end(results);
cy0 += results[0];
cl0 += results[1];
mis0 += results[2];
cref0 += results[3];
cmis0 += results[4];
if (verbose) {
std::cout << "[verbose] allocated memory for parsed JSON " << std::endl;
}
unified.start();
isok = (find_structural_bits(p.data(), p.size(), pj) == simdjson::SUCCESS);
unified.end(results);
cy1 += results[0];
cl1 += results[1];
mis1 += results[2];
cref1 += results[3];
cmis1 += results[4];
if (!isok) {
std::cout << "Failed during stage 1" << std::endl;
break;
}
unified.start();
isok = isok && (simdjson::SUCCESS == unified_machine(p.data(), p.size(), pj));
unified.end(results);
cy2 += results[0];
cl2 += results[1];
mis2 += results[2];
cref2 += results[3];
cmis2 += results[4];
if (!isok) {
std::cout << "Failed during stage 2" << std::endl;
break;
}
}
#endif
// we do it again, this time just measuring the elapsed time
for (uint32_t i = 0; i < iterations; i++) {
if (verbose) {
std::cout << "[verbose] iteration # " << i << std::endl;
}
ParsedJson pj;
bool allocok = pj.allocateCapacity(p.size());
if (!allocok) {
std::cerr << "failed to allocate memory" << std::endl;
return EXIT_FAILURE;
}
if (verbose) {
std::cout << "[verbose] allocated memory for parsed JSON " << std::endl;
}
auto start = std::chrono::steady_clock::now();
isok = (find_structural_bits(p.data(), p.size(), pj) == simdjson::SUCCESS);
isok = isok && (simdjson::SUCCESS == unified_machine(p.data(), p.size(), pj));
auto end = std::chrono::steady_clock::now();
std::chrono::duration<double> secs = end - start;
res[i] = secs.count();
if(! isok) {
std::cerr << pj.getErrorMsg() << std::endl;
std::cerr << "Could not parse. " << std::endl;
return EXIT_FAILURE;
}
}
ParsedJson pj = build_parsed_json(p); // do the parsing again to get the stats
if (!pj.isValid()) {
std::cerr << pj.getErrorMsg() << std::endl;
std::cerr << "Could not parse. " << std::endl;
return EXIT_FAILURE;
}
double min_result = *min_element(res.begin(), res.end());
double speedinGBs = (p.size()) / (min_result * 1000000000.0);
#ifndef SQUASH_COUNTERS
unsigned long total = cy0 + cy1 + cy2;
if (justdata) {
float cpb0 = (double)cy0 / (iterations * p.size());
float cpb1 = (double)cy1 / (iterations * p.size());
float cpb2 = (double)cy2 / (iterations * p.size());
float cpbtotal = (double)total / (iterations * p.size());
char *newfile = (char *)malloc(strlen(filename) + 1);
if (newfile == NULL) {
return EXIT_FAILURE;
}
::strcpy(newfile, filename);
char *snewfile = ::basename(newfile);
size_t nl = strlen(snewfile);
for (size_t j = nl - 1; j > 0; j--) {
if (snewfile[j] == '.') {
snewfile[j] = '\0';
break;
}
}
printf("\"%s\"\t%f\t%f\t%f\t%f\t%f\n", snewfile, cpb0, cpb1, cpb2,
cpbtotal, speedinGBs);
free(newfile);
} else {
printf("number of bytes %ld number of structural chars %u ratio %.3f\n",
p.size(), pj.n_structural_indexes,
(double)pj.n_structural_indexes / p.size());
printf("mem alloc instructions: %10lu cycles: %10lu (%.2f %%) ins/cycles: "
"%.2f mis. branches: %10lu (cycles/mis.branch %.2f) cache accesses: "
"%10lu (failure %10lu)\n",
cl0 / iterations, cy0 / iterations, 100. * cy0 / total,
(double)cl0 / cy0, mis0 / iterations, (double)cy0 / mis0,
cref1 / iterations, cmis0 / iterations);
printf(" mem alloc runs at %.2f cycles per input byte.\n",
(double)cy0 / (iterations * p.size()));
printf("stage 1 instructions: %10lu cycles: %10lu (%.2f %%) ins/cycles: "
"%.2f mis. branches: %10lu (cycles/mis.branch %.2f) cache accesses: "
"%10lu (failure %10lu)\n",
cl1 / iterations, cy1 / iterations, 100. * cy1 / total,
(double)cl1 / cy1, mis1 / iterations, (double)cy1 / mis1,
cref1 / iterations, cmis1 / iterations);
printf(" stage 1 runs at %.2f cycles per input byte.\n",
(double)cy1 / (iterations * p.size()));
printf("stage 2 instructions: %10lu cycles: %10lu (%.2f %%) ins/cycles: "
"%.2f mis. branches: %10lu (cycles/mis.branch %.2f) cache "
"accesses: %10lu (failure %10lu)\n",
cl2 / iterations, cy2 / iterations, 100. * cy2 / total,
(double)cl2 / cy2, mis2 / iterations, (double)cy2 / mis2,
cref2 / iterations, cmis2 / iterations);
printf(" stage 2 runs at %.2f cycles per input byte and ",
(double)cy2 / (iterations * p.size()));
printf("%.2f cycles per structural character.\n",
(double)cy2 / (iterations * pj.n_structural_indexes));
printf(" all stages: %.2f cycles per input byte.\n",
(double)total / (iterations * p.size()));
printf("Estimated average frequency: %.3f GHz.\n", (double)total / (iterations * min_result * 1000000000.0));
}
#endif
if (!justdata) {
std::cout << "Min: " << min_result << " bytes read: " << p.size()
<< " Gigabytes/second: " << speedinGBs
<< std::endl;
}
if (jsonoutput) {
isok = isok && pj.printjson(std::cout);
}
if (dump) {
isok = isok && pj.dump_raw_tape(std::cout);
}
if (!isok) {
fprintf(stderr, " Parsing failed. \n ");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
/**
* @file src/bin2llvmir/providers/abi/x64.cpp
* @brief ABI information for x86_64.
* @copyright (c) 2017 Avast Software, licensed under the MIT license
*/
#include "retdec/bin2llvmir/providers/abi/x64.h"
using namespace llvm;
namespace retdec {
namespace bin2llvmir {
AbiX64::AbiX64(llvm::Module* m, Config* c) :
Abi(m, c)
{
_regs.reserve(X86_REG_ENDING);
_id2regs.resize(X86_REG_ENDING, nullptr);
_regStackPointerId = X86_REG_RSP;
// system calls
_regSyscallId = X86_REG_EAX;
_regSyscallReturn = X86_REG_EAX;
_syscallRegs = {
X86_REG_RDI,
X86_REG_RSI,
X86_REG_RDX,
X86_REG_R10,
X86_REG_R8,
X86_REG_R9};
_defcc = CallingConvention::ID::CC_X64;
}
bool AbiX64::isGeneralPurposeRegister(const llvm::Value* val) const
{
uint32_t rid = getRegisterId(val);
return rid == X86_REG_RAX
|| rid == X86_REG_RBX
|| rid == X86_REG_RCX
|| rid == X86_REG_RDX
|| rid == X86_REG_RSP
|| rid == X86_REG_RBP
|| rid == X86_REG_RSI
|| rid == X86_REG_RDI
|| rid == X86_REG_R8
|| rid == X86_REG_R9
|| rid == X86_REG_R10
|| rid == X86_REG_R11
|| rid == X86_REG_R12
|| rid == X86_REG_R13
|| rid == X86_REG_R14
|| rid == X86_REG_R15;
}
bool AbiX64::isNopInstruction(cs_insn* insn)
{
cs_x86& insn86 = insn->detail->x86;
// True NOP variants.
//
if (insn->id == X86_INS_NOP
|| insn->id == X86_INS_FNOP
|| insn->id == X86_INS_FDISI8087_NOP
|| insn->id == X86_INS_FENI8087_NOP
|| insn->id == X86_INS_INT3)
{
return true;
}
// e.g. lea esi, [esi]
//
else if (insn->id == X86_INS_LEA
&& insn86.disp == 0
&& insn86.op_count == 2
&& insn86.operands[0].type == X86_OP_REG
&& insn86.operands[1].type == X86_OP_MEM
&& insn86.operands[1].mem.segment == X86_REG_INVALID
&& insn86.operands[1].mem.index == X86_REG_INVALID
&& insn86.operands[1].mem.scale == 1
&& insn86.operands[1].mem.disp == 0
&& insn86.operands[1].mem.base == insn86.operands[0].reg)
{
return true;
}
// e.g. mov esi. esi
//
else if (insn->id == X86_INS_MOV
&& insn86.disp == 0
&& insn86.op_count == 2
&& insn86.operands[0].type == X86_OP_REG
&& insn86.operands[1].type == X86_OP_REG
&& insn86.operands[0].reg == insn86.operands[1].reg)
{
return true;
}
return false;
}
} // namespace bin2llvmir
} // namespace retdec
|
/*************************************************************************
* libjson-rpc-cpp
*************************************************************************
* @file testredisserver.cpp
* @date 24.08.2017
* @author Jacques Software <software@jacques.com.au>
* @license See attached LICENSE.txt
************************************************************************/
#include "testredisserver.h"
#include <iostream>
#include <signal.h>
#include <stdio.h>
#include <unistd.h>
using namespace std;
using namespace jsonrpc;
#define REDIS_BIN "redis-server"
#define REDIS_BIN_DIR "/usr/bin/"
#define REDIS_CONF "redis.conf"
#define REDIS_MAXLINES 255
#define REDIS_KEY "accept connections"
TestRedisServer::TestRedisServer()
: pid(0), maxlines(REDIS_MAXLINES), key(REDIS_KEY) {
this->Start();
}
TestRedisServer::~TestRedisServer() { this->Stop(); }
bool TestRedisServer::Start() {
int pipefd[2];
pipe(pipefd);
pid = fork();
if (pid < 0) {
cerr << "ERROR: Failed to fork for redis server!" << endl;
return -1;
}
if (pid == 0) {
this->StartProcess(pipefd);
}
int ret = this->WaitProcess(pipefd);
if (ret == false) {
cerr << "ERROR: Server failed to start.\n";
this->Stop();
return -1;
}
return true;
}
bool TestRedisServer::Stop() {
if (pid != 0) {
kill(pid, 9);
pid = 0;
return true;
}
return false;
}
bool TestRedisServer::WaitProcess(int pipefd[2]) {
close(pipefd[1]);
FILE *output = fdopen(pipefd[0], "r");
unsigned int i = 0;
do {
i++;
char *buffer = NULL;
size_t n;
getline(&buffer, &n, output);
if (n <= 0) {
free(buffer);
continue;
}
string line(buffer);
free(buffer);
size_t found = line.find(key);
if (found != string::npos) {
return true;
}
} while (i < maxlines);
return false;
}
void TestRedisServer::StartProcess(int pipefd[2]) {
// Close the unused read end
close(pipefd[0]);
// We don't want input going to this process or errors from it.
FILE *f_null = fopen("/dev/null", "r+");
if (f_null == NULL) {
cerr << "ERROR: Failed to open /dev/null for redis server!" << endl;
}
dup2(fileno(f_null), STDIN_FILENO);
dup2(fileno(f_null), STDERR_FILENO);
fclose(f_null);
// Redirect output to our filedescriptor
dup2(pipefd[1], STDOUT_FILENO);
// Start redis server with our redis.conf
execlp(REDIS_BIN, REDIS_BIN, REDIS_CONF, (char *)NULL);
exit(-1);
}
|
//----------------------------------*-C++-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file DeviceVector.i.hh
//---------------------------------------------------------------------------//
namespace celeritas
{
//---------------------------------------------------------------------------//
/*!
* Construct with a number of allocated elements.
*/
template<class T>
DeviceVector<T>::DeviceVector(size_type count)
: allocation_(count * sizeof(T)), size_(count), capacity_(count)
{
}
//---------------------------------------------------------------------------//
/*!
* Get the device data pointer.
*/
template<class T>
void DeviceVector<T>::swap(DeviceVector& other) noexcept
{
using std::swap;
swap(size_, other.size_);
swap(capacity_, other.capacity_);
swap(allocation_, other.allocation_);
}
//---------------------------------------------------------------------------//
/*!
* Change the size without changing capacity. There is no reallocation of
* storage: the vector can only shrink or grow up to the container capacity.
*/
template<class T>
void DeviceVector<T>::resize(size_type size)
{
REQUIRE(size <= this->capacity());
size_ = size;
}
//---------------------------------------------------------------------------//
/*!
* Copy data to device.
*/
template<class T>
void DeviceVector<T>::copy_to_device(constSpan_t data)
{
REQUIRE(data.size() == this->size());
allocation_.copy_to_device(
{reinterpret_cast<const Byte*>(data.data()), data.size() * sizeof(T)});
}
//---------------------------------------------------------------------------//
/*!
* Copy data to host.
*/
template<class T>
void DeviceVector<T>::copy_to_host(Span_t data) const
{
REQUIRE(data.size() == this->size());
allocation_.copy_to_host(
{reinterpret_cast<Byte*>(data.data()), data.size() * sizeof(T)});
}
//---------------------------------------------------------------------------//
/*!
* Get an on-device view to the data.
*/
template<class T>
auto DeviceVector<T>::device_pointers() -> Span_t
{
return {reinterpret_cast<T*>(allocation_.device_pointers().data()),
this->size()};
}
//---------------------------------------------------------------------------//
/*!
* Get an on-device view to the data.
*/
template<class T>
auto DeviceVector<T>::device_pointers() const -> constSpan_t
{
return {reinterpret_cast<const T*>(allocation_.device_pointers().data()),
this->size()};
}
//---------------------------------------------------------------------------//
/*!
* Swap two vectors.
*/
template<class T>
void swap(DeviceVector<T>& a, DeviceVector<T>& b) noexcept
{
return a.swap(b);
}
//---------------------------------------------------------------------------//
} // namespace celeritas
|
/*
* ServerKnobs.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbclient/ServerKnobs.h"
#define init(...) KNOB_FN(__VA_ARGS__, INIT_ATOMIC_KNOB, INIT_KNOB)(__VA_ARGS__)
ServerKnobs::ServerKnobs(Randomize randomize, ClientKnobs* clientKnobs, IsSimulated isSimulated) {
initialize(randomize, clientKnobs, isSimulated);
}
void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSimulated isSimulated) {
// clang-format off
// Versions
init( VERSIONS_PER_SECOND, 1e6 );
init( MAX_VERSIONS_IN_FLIGHT, 100 * VERSIONS_PER_SECOND );
init( MAX_VERSIONS_IN_FLIGHT_FORCED, 6e5 * VERSIONS_PER_SECOND ); //one week of versions
init( MAX_READ_TRANSACTION_LIFE_VERSIONS, 5 * VERSIONS_PER_SECOND ); if (randomize && BUGGIFY) MAX_READ_TRANSACTION_LIFE_VERSIONS = VERSIONS_PER_SECOND; else if (randomize && BUGGIFY) MAX_READ_TRANSACTION_LIFE_VERSIONS = std::max<int>(1, 0.1 * VERSIONS_PER_SECOND); else if( randomize && BUGGIFY ) MAX_READ_TRANSACTION_LIFE_VERSIONS = 10 * VERSIONS_PER_SECOND;
init( MAX_WRITE_TRANSACTION_LIFE_VERSIONS, 5 * VERSIONS_PER_SECOND ); if (randomize && BUGGIFY) MAX_WRITE_TRANSACTION_LIFE_VERSIONS=std::max<int>(1, 1 * VERSIONS_PER_SECOND);
init( MAX_COMMIT_BATCH_INTERVAL, 2.0 ); if( randomize && BUGGIFY ) MAX_COMMIT_BATCH_INTERVAL = 0.5; // Each commit proxy generates a CommitTransactionBatchRequest at least this often, so that versions always advance smoothly
MAX_COMMIT_BATCH_INTERVAL = std::min(MAX_COMMIT_BATCH_INTERVAL, MAX_READ_TRANSACTION_LIFE_VERSIONS/double(2*VERSIONS_PER_SECOND)); // Ensure that the proxy commits 2 times every MAX_READ_TRANSACTION_LIFE_VERSIONS, otherwise the master will not give out versions fast enough
// TLogs
init( TLOG_TIMEOUT, 0.4 ); //cannot buggify because of availability
init( TLOG_SLOW_REJOIN_WARN_TIMEOUT_SECS, 60 ); if( randomize && BUGGIFY ) TLOG_SLOW_REJOIN_WARN_TIMEOUT_SECS = deterministicRandom()->randomInt(5,10);
init( RECOVERY_TLOG_SMART_QUORUM_DELAY, 0.25 ); if( randomize && BUGGIFY ) RECOVERY_TLOG_SMART_QUORUM_DELAY = 0.0; // smaller might be better for bug amplification
init( TLOG_STORAGE_MIN_UPDATE_INTERVAL, 0.5 );
init( BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL, 30 );
init( DESIRED_TOTAL_BYTES, 150000 ); if( randomize && BUGGIFY ) DESIRED_TOTAL_BYTES = 10000;
init( DESIRED_UPDATE_BYTES, 2*DESIRED_TOTAL_BYTES );
init( UPDATE_DELAY, 0.001 );
init( MAXIMUM_PEEK_BYTES, 10e6 );
init( APPLY_MUTATION_BYTES, 1e6 );
init( RECOVERY_DATA_BYTE_LIMIT, 100000 );
init( BUGGIFY_RECOVERY_DATA_LIMIT, 1000 );
init( LONG_TLOG_COMMIT_TIME, 0.25 ); //cannot buggify because of recovery time
init( LARGE_TLOG_COMMIT_BYTES, 4<<20 );
init( BUGGIFY_RECOVER_MEMORY_LIMIT, 1e6 );
init( BUGGIFY_WORKER_REMOVED_MAX_LAG, 30 );
init( UPDATE_STORAGE_BYTE_LIMIT, 1e6 );
init( TLOG_PEEK_DELAY, 0.00005 );
init( LEGACY_TLOG_UPGRADE_ENTRIES_PER_VERSION, 100 );
init( VERSION_MESSAGES_OVERHEAD_FACTOR_1024THS, 1072 ); // Based on a naive interpretation of the gcc version of std::deque, we would expect this to be 16 bytes overhead per 512 bytes data. In practice, it seems to be 24 bytes overhead per 512.
init( VERSION_MESSAGES_ENTRY_BYTES_WITH_OVERHEAD, std::ceil(16.0 * VERSION_MESSAGES_OVERHEAD_FACTOR_1024THS / 1024) );
init( LOG_SYSTEM_PUSHED_DATA_BLOCK_SIZE, 1e5 );
init( MAX_MESSAGE_SIZE, std::max<int>(LOG_SYSTEM_PUSHED_DATA_BLOCK_SIZE, 1e5 + 2e4 + 1) + 8 ); // VALUE_SIZE_LIMIT + SYSTEM_KEY_SIZE_LIMIT + 9 bytes (4 bytes for length, 4 bytes for sequence number, and 1 byte for mutation type)
init( TLOG_MESSAGE_BLOCK_BYTES, 10e6 );
init( TLOG_MESSAGE_BLOCK_OVERHEAD_FACTOR, double(TLOG_MESSAGE_BLOCK_BYTES) / (TLOG_MESSAGE_BLOCK_BYTES - MAX_MESSAGE_SIZE) ); //1.0121466709838096006362758832473
init( PEEK_TRACKER_EXPIRATION_TIME, 600 ); if( randomize && BUGGIFY ) PEEK_TRACKER_EXPIRATION_TIME = deterministicRandom()->coinflip() ? 0.1 : 120;
init( PEEK_USING_STREAMING, true );
init( PARALLEL_GET_MORE_REQUESTS, 32 ); if( randomize && BUGGIFY ) PARALLEL_GET_MORE_REQUESTS = 2;
init( MULTI_CURSOR_PRE_FETCH_LIMIT, 10 );
init( MAX_QUEUE_COMMIT_BYTES, 15e6 ); if( randomize && BUGGIFY ) MAX_QUEUE_COMMIT_BYTES = 5000;
init( DESIRED_OUTSTANDING_MESSAGES, 5000 ); if( randomize && BUGGIFY ) DESIRED_OUTSTANDING_MESSAGES = deterministicRandom()->randomInt(0,100);
init( DESIRED_GET_MORE_DELAY, 0.005 );
init( CONCURRENT_LOG_ROUTER_READS, 5 ); if( randomize && BUGGIFY ) CONCURRENT_LOG_ROUTER_READS = 1;
init( LOG_ROUTER_PEEK_FROM_SATELLITES_PREFERRED, 1 ); if( randomize && BUGGIFY ) LOG_ROUTER_PEEK_FROM_SATELLITES_PREFERRED = 0;
init( DISK_QUEUE_ADAPTER_MIN_SWITCH_TIME, 1.0 );
init( DISK_QUEUE_ADAPTER_MAX_SWITCH_TIME, 5.0 );
init( TLOG_SPILL_REFERENCE_MAX_PEEK_MEMORY_BYTES, 2e9 ); if ( randomize && BUGGIFY ) TLOG_SPILL_REFERENCE_MAX_PEEK_MEMORY_BYTES = 2e6;
init( TLOG_SPILL_REFERENCE_MAX_BATCHES_PER_PEEK, 100 ); if ( randomize && BUGGIFY ) TLOG_SPILL_REFERENCE_MAX_BATCHES_PER_PEEK = 1;
init( TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH, 16<<10 ); if ( randomize && BUGGIFY ) TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH = 500;
init( DISK_QUEUE_FILE_EXTENSION_BYTES, 10<<20 ); // BUGGIFYd per file within the DiskQueue
init( DISK_QUEUE_FILE_SHRINK_BYTES, 100<<20 ); // BUGGIFYd per file within the DiskQueue
init( DISK_QUEUE_MAX_TRUNCATE_BYTES, 2LL<<30 ); if ( randomize && BUGGIFY ) DISK_QUEUE_MAX_TRUNCATE_BYTES = 0;
init( TLOG_DEGRADED_DURATION, 5.0 );
init( MAX_CACHE_VERSIONS, 10e6 );
init( TLOG_IGNORE_POP_AUTO_ENABLE_DELAY, 300.0 );
init( TXS_POPPED_MAX_DELAY, 1.0 ); if ( randomize && BUGGIFY ) TXS_POPPED_MAX_DELAY = deterministicRandom()->random01();
init( TLOG_MAX_CREATE_DURATION, 10.0 );
init( PEEK_LOGGING_AMOUNT, 5 );
init( PEEK_LOGGING_DELAY, 5.0 );
init( PEEK_RESET_INTERVAL, 300.0 ); if ( randomize && BUGGIFY ) PEEK_RESET_INTERVAL = 20.0;
init( PEEK_MAX_LATENCY, 0.5 ); if ( randomize && BUGGIFY ) PEEK_MAX_LATENCY = 0.0;
init( PEEK_COUNT_SMALL_MESSAGES, false ); if ( randomize && BUGGIFY ) PEEK_COUNT_SMALL_MESSAGES = true;
init( PEEK_STATS_INTERVAL, 10.0 );
init( PEEK_STATS_SLOW_AMOUNT, 2 );
init( PEEK_STATS_SLOW_RATIO, 0.5 );
// Buggified value must be larger than the amount of simulated time taken by snapshots, to prevent repeatedly failing
// snapshots due to closed commit proxy connections
init( PUSH_RESET_INTERVAL, 300.0 ); if ( randomize && BUGGIFY ) PUSH_RESET_INTERVAL = 40.0;
init( PUSH_MAX_LATENCY, 0.5 ); if ( randomize && BUGGIFY ) PUSH_MAX_LATENCY = 0.0;
init( PUSH_STATS_INTERVAL, 10.0 );
init( PUSH_STATS_SLOW_AMOUNT, 2 );
init( PUSH_STATS_SLOW_RATIO, 0.5 );
init( TLOG_POP_BATCH_SIZE, 1000 ); if ( randomize && BUGGIFY ) TLOG_POP_BATCH_SIZE = 10;
init( TLOG_POPPED_VER_LAG_THRESHOLD_FOR_TLOGPOP_TRACE, 250e6 );
init( ENABLE_DETAILED_TLOG_POP_TRACE, true );
// disk snapshot max timeout, to be put in TLog, storage and coordinator nodes
init( MAX_FORKED_PROCESS_OUTPUT, 1024 );
init( SNAP_CREATE_MAX_TIMEOUT, 300.0 );
// Data distribution queue
init( HEALTH_POLL_TIME, 1.0 );
init( BEST_TEAM_STUCK_DELAY, 1.0 );
init( BG_REBALANCE_POLLING_INTERVAL, 10.0 );
init( BG_REBALANCE_SWITCH_CHECK_INTERVAL, 5.0 ); if (randomize && BUGGIFY) BG_REBALANCE_SWITCH_CHECK_INTERVAL = 1.0;
init( DD_QUEUE_LOGGING_INTERVAL, 5.0 );
init( RELOCATION_PARALLELISM_PER_SOURCE_SERVER, 2 ); if( randomize && BUGGIFY ) RELOCATION_PARALLELISM_PER_SOURCE_SERVER = 1;
init( DD_QUEUE_MAX_KEY_SERVERS, 100 ); if( randomize && BUGGIFY ) DD_QUEUE_MAX_KEY_SERVERS = 1;
init( DD_REBALANCE_PARALLELISM, 50 );
init( DD_REBALANCE_RESET_AMOUNT, 30 );
init( BG_DD_MAX_WAIT, 120.0 );
init( BG_DD_MIN_WAIT, 0.1 );
init( BG_DD_INCREASE_RATE, 1.10 );
init( BG_DD_DECREASE_RATE, 1.02 );
init( BG_DD_SATURATION_DELAY, 1.0 );
init( INFLIGHT_PENALTY_HEALTHY, 1.0 );
init( INFLIGHT_PENALTY_UNHEALTHY, 500.0 );
init( INFLIGHT_PENALTY_ONE_LEFT, 1000.0 );
init( USE_OLD_NEEDED_SERVERS, false );
init( PRIORITY_RECOVER_MOVE, 110 );
init( PRIORITY_REBALANCE_UNDERUTILIZED_TEAM, 120 );
init( PRIORITY_REBALANCE_OVERUTILIZED_TEAM, 121 );
init( PRIORITY_PERPETUAL_STORAGE_WIGGLE, 139 );
init( PRIORITY_TEAM_HEALTHY, 140 );
init( PRIORITY_TEAM_CONTAINS_UNDESIRED_SERVER, 150 );
init( PRIORITY_TEAM_REDUNDANT, 200 );
init( PRIORITY_MERGE_SHARD, 340 );
init( PRIORITY_POPULATE_REGION, 600 );
init( PRIORITY_TEAM_UNHEALTHY, 700 );
init( PRIORITY_TEAM_2_LEFT, 709 );
init( PRIORITY_TEAM_1_LEFT, 800 );
init( PRIORITY_TEAM_FAILED, 805 );
init( PRIORITY_TEAM_0_LEFT, 809 );
init( PRIORITY_SPLIT_SHARD, 950 ); if( randomize && BUGGIFY ) PRIORITY_SPLIT_SHARD = 350;
// Data distribution
init( RETRY_RELOCATESHARD_DELAY, 0.1 );
init( DATA_DISTRIBUTION_FAILURE_REACTION_TIME, 60.0 ); if( randomize && BUGGIFY ) DATA_DISTRIBUTION_FAILURE_REACTION_TIME = 1.0;
bool buggifySmallShards = randomize && BUGGIFY;
init( MIN_SHARD_BYTES, 200000 ); if( buggifySmallShards ) MIN_SHARD_BYTES = 40000; //FIXME: data distribution tracker (specifically StorageMetrics) relies on this number being larger than the maximum size of a key value pair
init( SHARD_BYTES_RATIO, 4 );
init( SHARD_BYTES_PER_SQRT_BYTES, 45 ); if( buggifySmallShards ) SHARD_BYTES_PER_SQRT_BYTES = 0;//Approximately 10000 bytes per shard
init( MAX_SHARD_BYTES, 500000000 );
init( KEY_SERVER_SHARD_BYTES, 500000000 );
init( SHARD_MAX_READ_DENSITY_RATIO, 8.0); if (randomize && BUGGIFY) SHARD_MAX_READ_DENSITY_RATIO = 2.0;
/*
The bytesRead/byteSize radio. Will be declared as read hot when larger than this. 8.0 was chosen to avoid reporting table scan as read hot.
*/
init ( SHARD_READ_HOT_BANDWITH_MIN_PER_KSECONDS, 1666667 * 1000);
/*
The read bandwidth of a given shard needs to be larger than this value in order to be evaluated if it's read hot. The roughly 1.67MB per second is calculated as following:
- Heuristic data suggests that each storage process can do max 500K read operations per second
- Each read has a minimum cost of EMPTY_READ_PENALTY, which is 20 bytes
- Thus that gives a minimum 10MB per second
- But to be conservative, set that number to be 1/6 of 10MB, which is roughly 1,666,667 bytes per second
Shard with a read bandwidth smaller than this value will never be too busy to handle the reads.
*/
init( SHARD_MAX_BYTES_READ_PER_KSEC_JITTER, 0.1 );
bool buggifySmallBandwidthSplit = randomize && BUGGIFY;
init( SHARD_MAX_BYTES_PER_KSEC, 1LL*1000000*1000 ); if( buggifySmallBandwidthSplit ) SHARD_MAX_BYTES_PER_KSEC = 10LL*1000*1000;
/* 1*1MB/sec * 1000sec/ksec
Shards with more than this bandwidth will be split immediately.
For a large shard (100MB), it will be split into multiple shards with sizes < SHARD_SPLIT_BYTES_PER_KSEC;
all but one split shard will be moved; so splitting may cost ~100MB of work or about 10MB/sec over a 10 sec sampling window.
If the sampling window is too much longer, the MVCC window will fill up while we wait.
If SHARD_MAX_BYTES_PER_KSEC is too much lower, we could do a lot of data movement work in response to a small impulse of bandwidth.
If SHARD_MAX_BYTES_PER_KSEC is too high relative to the I/O bandwidth of a given server, a workload can remain concentrated on a single
team indefinitely, limiting performance.
*/
init( SHARD_MIN_BYTES_PER_KSEC, 100 * 1000 * 1000 ); if( buggifySmallBandwidthSplit ) SHARD_MIN_BYTES_PER_KSEC = 200*1*1000;
/* 100*1KB/sec * 1000sec/ksec
Shards with more than this bandwidth will not be merged.
Obviously this needs to be significantly less than SHARD_MAX_BYTES_PER_KSEC, else we will repeatedly merge and split.
It should probably be significantly less than SHARD_SPLIT_BYTES_PER_KSEC, else we will merge right after splitting.
The number of extra shards in the database because of bandwidth splitting can't be more than about W/SHARD_MIN_BYTES_PER_KSEC, where
W is the maximum bandwidth of the entire database in bytes/ksec. For 250MB/sec write bandwidth, (250MB/sec)/(200KB/sec) = 1250 extra
shards.
The bandwidth sample maintained by the storage server needs to be accurate enough to reliably measure this minimum bandwidth. See
BANDWIDTH_UNITS_PER_SAMPLE. If this number is too low, the storage server needs to spend more memory and time on sampling.
*/
init( SHARD_SPLIT_BYTES_PER_KSEC, 250 * 1000 * 1000 ); if( buggifySmallBandwidthSplit ) SHARD_SPLIT_BYTES_PER_KSEC = 50 * 1000 * 1000;
/* 250*1KB/sec * 1000sec/ksec
When splitting a shard, it is split into pieces with less than this bandwidth.
Obviously this should be less than half of SHARD_MAX_BYTES_PER_KSEC.
Smaller values mean that high bandwidth shards are split into more pieces, more quickly utilizing large numbers of servers to handle the
bandwidth.
Too many pieces (too small a value) may stress data movement mechanisms (see e.g. RELOCATION_PARALLELISM_PER_SOURCE_SERVER).
If this value is too small relative to SHARD_MIN_BYTES_PER_KSEC immediate merging work will be generated.
*/
init( STORAGE_METRIC_TIMEOUT, isSimulated ? 60.0 : 600.0 ); if( randomize && BUGGIFY ) STORAGE_METRIC_TIMEOUT = deterministicRandom()->coinflip() ? 10.0 : 30.0;
init( METRIC_DELAY, 0.1 ); if( randomize && BUGGIFY ) METRIC_DELAY = 1.0;
init( ALL_DATA_REMOVED_DELAY, 1.0 );
init( INITIAL_FAILURE_REACTION_DELAY, 30.0 ); if( randomize && BUGGIFY ) INITIAL_FAILURE_REACTION_DELAY = 0.0;
init( CHECK_TEAM_DELAY, 30.0 );
init( PERPETUAL_WIGGLE_DELAY, 50.0 );
init( LOG_ON_COMPLETION_DELAY, DD_QUEUE_LOGGING_INTERVAL );
init( BEST_TEAM_MAX_TEAM_TRIES, 10 );
init( BEST_TEAM_OPTION_COUNT, 4 );
init( BEST_OF_AMT, 4 );
init( SERVER_LIST_DELAY, 1.0 );
init( RECRUITMENT_IDLE_DELAY, 1.0 );
init( STORAGE_RECRUITMENT_DELAY, 10.0 );
init( TSS_HACK_IDENTITY_MAPPING, false ); // THIS SHOULD NEVER BE SET IN PROD. Only for performance testing
init( TSS_RECRUITMENT_TIMEOUT, 3*STORAGE_RECRUITMENT_DELAY ); if (randomize && BUGGIFY ) TSS_RECRUITMENT_TIMEOUT = 1.0; // Super low timeout should cause tss recruitments to fail
init( TSS_DD_CHECK_INTERVAL, 60.0 ); if (randomize && BUGGIFY ) TSS_DD_CHECK_INTERVAL = 1.0; // May kill all TSS quickly
init( DATA_DISTRIBUTION_LOGGING_INTERVAL, 5.0 );
init( DD_ENABLED_CHECK_DELAY, 1.0 );
init( DD_STALL_CHECK_DELAY, 0.4 ); //Must be larger than 2*MAX_BUGGIFIED_DELAY
init( DD_LOW_BANDWIDTH_DELAY, isSimulated ? 15.0 : 240.0 ); if( randomize && BUGGIFY ) DD_LOW_BANDWIDTH_DELAY = 0; //Because of delayJitter, this should be less than 0.9 * DD_MERGE_COALESCE_DELAY
init( DD_MERGE_COALESCE_DELAY, isSimulated ? 30.0 : 300.0 ); if( randomize && BUGGIFY ) DD_MERGE_COALESCE_DELAY = 0.001;
init( STORAGE_METRICS_POLLING_DELAY, 2.0 ); if( randomize && BUGGIFY ) STORAGE_METRICS_POLLING_DELAY = 15.0;
init( STORAGE_METRICS_RANDOM_DELAY, 0.2 );
init( AVAILABLE_SPACE_RATIO_CUTOFF, 0.05 );
init( DESIRED_TEAMS_PER_SERVER, 5 ); if( randomize && BUGGIFY ) DESIRED_TEAMS_PER_SERVER = deterministicRandom()->randomInt(1, 10);
init( MAX_TEAMS_PER_SERVER, 5*DESIRED_TEAMS_PER_SERVER );
init( DD_SHARD_SIZE_GRANULARITY, 5000000 );
init( DD_SHARD_SIZE_GRANULARITY_SIM, 500000 ); if( randomize && BUGGIFY ) DD_SHARD_SIZE_GRANULARITY_SIM = 0;
init( DD_MOVE_KEYS_PARALLELISM, 15 ); if( randomize && BUGGIFY ) DD_MOVE_KEYS_PARALLELISM = 1;
init( DD_FETCH_SOURCE_PARALLELISM, 1000 ); if( randomize && BUGGIFY ) DD_FETCH_SOURCE_PARALLELISM = 1;
init( DD_MERGE_LIMIT, 2000 ); if( randomize && BUGGIFY ) DD_MERGE_LIMIT = 2;
init( DD_SHARD_METRICS_TIMEOUT, 60.0 ); if( randomize && BUGGIFY ) DD_SHARD_METRICS_TIMEOUT = 0.1;
init( DD_LOCATION_CACHE_SIZE, 2000000 ); if( randomize && BUGGIFY ) DD_LOCATION_CACHE_SIZE = 3;
init( MOVEKEYS_LOCK_POLLING_DELAY, 5.0 );
init( DEBOUNCE_RECRUITING_DELAY, 5.0 );
init( DD_FAILURE_TIME, 1.0 ); if( randomize && BUGGIFY ) DD_FAILURE_TIME = 10.0;
init( DD_ZERO_HEALTHY_TEAM_DELAY, 1.0 );
init( REBALANCE_MAX_RETRIES, 100 );
init( DD_OVERLAP_PENALTY, 10000 );
init( DD_EXCLUDE_MIN_REPLICAS, 1 );
init( DD_VALIDATE_LOCALITY, true ); if( randomize && BUGGIFY ) DD_VALIDATE_LOCALITY = false;
init( DD_CHECK_INVALID_LOCALITY_DELAY, 60 ); if( randomize && BUGGIFY ) DD_CHECK_INVALID_LOCALITY_DELAY = 1 + deterministicRandom()->random01() * 600;
init( DD_ENABLE_VERBOSE_TRACING, false ); if( randomize && BUGGIFY ) DD_ENABLE_VERBOSE_TRACING = true;
init( DD_SS_FAILURE_VERSIONLAG, 250000000 );
init( DD_SS_ALLOWED_VERSIONLAG, 200000000 ); if( randomize && BUGGIFY ) { DD_SS_FAILURE_VERSIONLAG = deterministicRandom()->randomInt(15000000, 500000000); DD_SS_ALLOWED_VERSIONLAG = 0.75 * DD_SS_FAILURE_VERSIONLAG; }
init( DD_SS_STUCK_TIME_LIMIT, 300.0 ); if( randomize && BUGGIFY ) { DD_SS_STUCK_TIME_LIMIT = 200.0 + deterministicRandom()->random01() * 100.0; }
init( DD_TEAMS_INFO_PRINT_INTERVAL, 60 ); if( randomize && BUGGIFY ) DD_TEAMS_INFO_PRINT_INTERVAL = 10;
init( DD_TEAMS_INFO_PRINT_YIELD_COUNT, 100 ); if( randomize && BUGGIFY ) DD_TEAMS_INFO_PRINT_YIELD_COUNT = deterministicRandom()->random01() * 1000 + 1;
init( DD_TEAM_ZERO_SERVER_LEFT_LOG_DELAY, 120 ); if( randomize && BUGGIFY ) DD_TEAM_ZERO_SERVER_LEFT_LOG_DELAY = 5;
init( DD_STORAGE_WIGGLE_PAUSE_THRESHOLD, 1 ); if( randomize && BUGGIFY ) DD_STORAGE_WIGGLE_PAUSE_THRESHOLD = 10;
init( DD_STORAGE_WIGGLE_STUCK_THRESHOLD, 50 );
// TeamRemover
init( TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER, false ); if( randomize && BUGGIFY ) TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER = deterministicRandom()->random01() < 0.1 ? true : false; // false by default. disable the consistency check when it's true
init( TR_REMOVE_MACHINE_TEAM_DELAY, 60.0 ); if( randomize && BUGGIFY ) TR_REMOVE_MACHINE_TEAM_DELAY = deterministicRandom()->random01() * 60.0;
init( TR_FLAG_REMOVE_MT_WITH_MOST_TEAMS, true ); if( randomize && BUGGIFY ) TR_FLAG_REMOVE_MT_WITH_MOST_TEAMS = deterministicRandom()->random01() < 0.1 ? true : false;
init( TR_FLAG_DISABLE_SERVER_TEAM_REMOVER, false ); if( randomize && BUGGIFY ) TR_FLAG_DISABLE_SERVER_TEAM_REMOVER = deterministicRandom()->random01() < 0.1 ? true : false; // false by default. disable the consistency check when it's true
init( TR_REMOVE_SERVER_TEAM_DELAY, 60.0 ); if( randomize && BUGGIFY ) TR_REMOVE_SERVER_TEAM_DELAY = deterministicRandom()->random01() * 60.0;
init( TR_REMOVE_SERVER_TEAM_EXTRA_DELAY, 5.0 ); if( randomize && BUGGIFY ) TR_REMOVE_SERVER_TEAM_EXTRA_DELAY = deterministicRandom()->random01() * 10.0;
init( DD_REMOVE_STORE_ENGINE_DELAY, 60.0 ); if( randomize && BUGGIFY ) DD_REMOVE_STORE_ENGINE_DELAY = deterministicRandom()->random01() * 60.0;
// KeyValueStore SQLITE
init( CLEAR_BUFFER_SIZE, 20000 );
init( READ_VALUE_TIME_ESTIMATE, .00005 );
init( READ_RANGE_TIME_ESTIMATE, .00005 );
init( SET_TIME_ESTIMATE, .00005 );
init( CLEAR_TIME_ESTIMATE, .00005 );
init( COMMIT_TIME_ESTIMATE, .005 );
init( CHECK_FREE_PAGE_AMOUNT, 100 ); if( randomize && BUGGIFY ) CHECK_FREE_PAGE_AMOUNT = 5;
init( DISK_METRIC_LOGGING_INTERVAL, 5.0 );
init( SOFT_HEAP_LIMIT, 300e6 );
init( SQLITE_PAGE_SCAN_ERROR_LIMIT, 10000 );
init( SQLITE_BTREE_PAGE_USABLE, 4096 - 8); // pageSize - reserveSize for page checksum
init( SQLITE_CHUNK_SIZE_PAGES, 25600 ); // 100MB
init( SQLITE_CHUNK_SIZE_PAGES_SIM, 1024 ); // 4MB
init( SQLITE_READER_THREADS, 64 ); // number of read threads
init( SQLITE_WRITE_WINDOW_SECONDS, -1 );
init( SQLITE_WRITE_WINDOW_LIMIT, -1 );
if( randomize && BUGGIFY ) {
// Choose an window between .01 and 1.01 seconds.
SQLITE_WRITE_WINDOW_SECONDS = 0.01 + deterministicRandom()->random01();
// Choose random operations per second
int opsPerSecond = deterministicRandom()->randomInt(1000, 5000);
// Set window limit to opsPerSecond scaled down to window size
SQLITE_WRITE_WINDOW_LIMIT = opsPerSecond * SQLITE_WRITE_WINDOW_SECONDS;
}
// Maximum and minimum cell payload bytes allowed on primary page as calculated in SQLite.
// These formulas are copied from SQLite, using its hardcoded constants, so if you are
// changing this you should also be changing SQLite.
init( SQLITE_BTREE_CELL_MAX_LOCAL, (SQLITE_BTREE_PAGE_USABLE - 12) * 64/255 - 23 );
init( SQLITE_BTREE_CELL_MIN_LOCAL, (SQLITE_BTREE_PAGE_USABLE - 12) * 32/255 - 23 );
// Maximum FDB fragment key and value bytes that can fit in a primary btree page
init( SQLITE_FRAGMENT_PRIMARY_PAGE_USABLE,
SQLITE_BTREE_CELL_MAX_LOCAL
- 1 // vdbeRecord header length size
- 2 // max key length size
- 4 // max index length size
- 2 // max value fragment length size
);
// Maximum FDB fragment value bytes in an overflow page
init( SQLITE_FRAGMENT_OVERFLOW_PAGE_USABLE,
SQLITE_BTREE_PAGE_USABLE
- 4 // next pageNumber size
);
init( SQLITE_FRAGMENT_MIN_SAVINGS, 0.20 );
// KeyValueStoreSqlite spring cleaning
init( SPRING_CLEANING_NO_ACTION_INTERVAL, 1.0 ); if( randomize && BUGGIFY ) SPRING_CLEANING_NO_ACTION_INTERVAL = deterministicRandom()->coinflip() ? 0.1 : deterministicRandom()->random01() * 5;
init( SPRING_CLEANING_LAZY_DELETE_INTERVAL, 0.1 ); if( randomize && BUGGIFY ) SPRING_CLEANING_LAZY_DELETE_INTERVAL = deterministicRandom()->coinflip() ? 1.0 : deterministicRandom()->random01() * 5;
init( SPRING_CLEANING_VACUUM_INTERVAL, 1.0 ); if( randomize && BUGGIFY ) SPRING_CLEANING_VACUUM_INTERVAL = deterministicRandom()->coinflip() ? 0.1 : deterministicRandom()->random01() * 5;
init( SPRING_CLEANING_LAZY_DELETE_TIME_ESTIMATE, .010 ); if( randomize && BUGGIFY ) SPRING_CLEANING_LAZY_DELETE_TIME_ESTIMATE = deterministicRandom()->random01() * 5;
init( SPRING_CLEANING_VACUUM_TIME_ESTIMATE, .010 ); if( randomize && BUGGIFY ) SPRING_CLEANING_VACUUM_TIME_ESTIMATE = deterministicRandom()->random01() * 5;
init( SPRING_CLEANING_VACUUMS_PER_LAZY_DELETE_PAGE, 0.0 ); if( randomize && BUGGIFY ) SPRING_CLEANING_VACUUMS_PER_LAZY_DELETE_PAGE = deterministicRandom()->coinflip() ? 1e9 : deterministicRandom()->random01() * 5;
init( SPRING_CLEANING_MIN_LAZY_DELETE_PAGES, 0 ); if( randomize && BUGGIFY ) SPRING_CLEANING_MIN_LAZY_DELETE_PAGES = deterministicRandom()->randomInt(1, 100);
init( SPRING_CLEANING_MAX_LAZY_DELETE_PAGES, 1e9 ); if( randomize && BUGGIFY ) SPRING_CLEANING_MAX_LAZY_DELETE_PAGES = deterministicRandom()->coinflip() ? 0 : deterministicRandom()->randomInt(1, 1e4);
init( SPRING_CLEANING_LAZY_DELETE_BATCH_SIZE, 100 ); if( randomize && BUGGIFY ) SPRING_CLEANING_LAZY_DELETE_BATCH_SIZE = deterministicRandom()->randomInt(1, 1000);
init( SPRING_CLEANING_MIN_VACUUM_PAGES, 1 ); if( randomize && BUGGIFY ) SPRING_CLEANING_MIN_VACUUM_PAGES = deterministicRandom()->randomInt(0, 100);
init( SPRING_CLEANING_MAX_VACUUM_PAGES, 1e9 ); if( randomize && BUGGIFY ) SPRING_CLEANING_MAX_VACUUM_PAGES = deterministicRandom()->coinflip() ? 0 : deterministicRandom()->randomInt(1, 1e4);
// KeyValueStoreMemory
init( REPLACE_CONTENTS_BYTES, 1e5 );
// KeyValueStoreRocksDB
init( ROCKSDB_BACKGROUND_PARALLELISM, 0 );
init( ROCKSDB_READ_PARALLELISM, 4 );
// Use a smaller memtable in simulation to avoid OOMs.
int64_t memtableBytes = isSimulated ? 32 * 1024 : 512 * 1024 * 1024;
init( ROCKSDB_MEMTABLE_BYTES, memtableBytes );
init( ROCKSDB_UNSAFE_AUTO_FSYNC, false );
init( ROCKSDB_PERIODIC_COMPACTION_SECONDS, 0 );
init( ROCKSDB_PREFIX_LEN, 0 );
init( ROCKSDB_BLOCK_CACHE_SIZE, 0 );
init( ROCKSDB_METRICS_DELAY, 60.0 );
init( ROCKSDB_READ_VALUE_TIMEOUT, 5.0 );
init( ROCKSDB_READ_VALUE_PREFIX_TIMEOUT, 5.0 );
init( ROCKSDB_READ_RANGE_TIMEOUT, 5.0 );
// Leader election
bool longLeaderElection = randomize && BUGGIFY;
init( MAX_NOTIFICATIONS, 100000 );
init( MIN_NOTIFICATIONS, 100 );
init( NOTIFICATION_FULL_CLEAR_TIME, 10000.0 );
init( CANDIDATE_MIN_DELAY, 0.05 );
init( CANDIDATE_MAX_DELAY, 1.0 );
init( CANDIDATE_GROWTH_RATE, 1.2 );
init( POLLING_FREQUENCY, 2.0 ); if( longLeaderElection ) POLLING_FREQUENCY = 8.0;
init( HEARTBEAT_FREQUENCY, 0.5 ); if( longLeaderElection ) HEARTBEAT_FREQUENCY = 1.0;
// Commit CommitProxy and GRV CommitProxy
init( START_TRANSACTION_BATCH_INTERVAL_MIN, 1e-6 );
init( START_TRANSACTION_BATCH_INTERVAL_MAX, 0.010 );
init( START_TRANSACTION_BATCH_INTERVAL_LATENCY_FRACTION, 0.5 );
init( START_TRANSACTION_BATCH_INTERVAL_SMOOTHER_ALPHA, 0.1 );
init( START_TRANSACTION_BATCH_QUEUE_CHECK_INTERVAL, 0.001 );
init( START_TRANSACTION_MAX_TRANSACTIONS_TO_START, 100000 );
init( START_TRANSACTION_MAX_REQUESTS_TO_START, 10000 );
init( START_TRANSACTION_RATE_WINDOW, 2.0 );
init( START_TRANSACTION_MAX_EMPTY_QUEUE_BUDGET, 10.0 );
init( START_TRANSACTION_MAX_QUEUE_SIZE, 1e6 );
init( KEY_LOCATION_MAX_QUEUE_SIZE, 1e6 );
init( COMMIT_PROXY_LIVENESS_TIMEOUT, 20.0 );
init( COMMIT_TRANSACTION_BATCH_INTERVAL_FROM_IDLE, 0.0005 ); if( randomize && BUGGIFY ) COMMIT_TRANSACTION_BATCH_INTERVAL_FROM_IDLE = 0.005;
init( COMMIT_TRANSACTION_BATCH_INTERVAL_MIN, 0.001 ); if( randomize && BUGGIFY ) COMMIT_TRANSACTION_BATCH_INTERVAL_MIN = 0.1;
init( COMMIT_TRANSACTION_BATCH_INTERVAL_MAX, 0.020 );
init( COMMIT_TRANSACTION_BATCH_INTERVAL_LATENCY_FRACTION, 0.1 );
init( COMMIT_TRANSACTION_BATCH_INTERVAL_SMOOTHER_ALPHA, 0.1 );
init( COMMIT_TRANSACTION_BATCH_COUNT_MAX, 32768 ); if( randomize && BUGGIFY ) COMMIT_TRANSACTION_BATCH_COUNT_MAX = 1000; // Do NOT increase this number beyond 32768, as CommitIds only budget 2 bytes for storing transaction id within each batch
init( COMMIT_BATCHES_MEM_BYTES_HARD_LIMIT, 8LL << 30 ); if (randomize && BUGGIFY) COMMIT_BATCHES_MEM_BYTES_HARD_LIMIT = deterministicRandom()->randomInt64(100LL << 20, 8LL << 30);
init( COMMIT_BATCHES_MEM_FRACTION_OF_TOTAL, 0.5 );
init( COMMIT_BATCHES_MEM_TO_TOTAL_MEM_SCALE_FACTOR, 5.0 );
// these settings disable batch bytes scaling. Try COMMIT_TRANSACTION_BATCH_BYTES_MAX=1e6, COMMIT_TRANSACTION_BATCH_BYTES_SCALE_BASE=50000, COMMIT_TRANSACTION_BATCH_BYTES_SCALE_POWER=0.5?
init( COMMIT_TRANSACTION_BATCH_BYTES_MIN, 100000 );
init( COMMIT_TRANSACTION_BATCH_BYTES_MAX, 100000 ); if( randomize && BUGGIFY ) { COMMIT_TRANSACTION_BATCH_BYTES_MIN = COMMIT_TRANSACTION_BATCH_BYTES_MAX = 1000000; }
init( COMMIT_TRANSACTION_BATCH_BYTES_SCALE_BASE, 100000 );
init( COMMIT_TRANSACTION_BATCH_BYTES_SCALE_POWER, 0.0 );
init( RESOLVER_COALESCE_TIME, 1.0 );
init( BUGGIFIED_ROW_LIMIT, APPLY_MUTATION_BYTES ); if( randomize && BUGGIFY ) BUGGIFIED_ROW_LIMIT = deterministicRandom()->randomInt(3, 30);
init( PROXY_SPIN_DELAY, 0.01 );
init( UPDATE_REMOTE_LOG_VERSION_INTERVAL, 2.0 );
init( MAX_TXS_POP_VERSION_HISTORY, 1e5 );
init( MIN_CONFIRM_INTERVAL, 0.05 );
bool shortRecoveryDuration = randomize && BUGGIFY;
init( ENFORCED_MIN_RECOVERY_DURATION, 0.085 ); if( shortRecoveryDuration ) ENFORCED_MIN_RECOVERY_DURATION = 0.01;
init( REQUIRED_MIN_RECOVERY_DURATION, 0.080 ); if( shortRecoveryDuration ) REQUIRED_MIN_RECOVERY_DURATION = 0.01;
init( ALWAYS_CAUSAL_READ_RISKY, false );
init( MAX_COMMIT_UPDATES, 2000 ); if( randomize && BUGGIFY ) MAX_COMMIT_UPDATES = 1;
init( MAX_PROXY_COMPUTE, 2.0 );
init( MAX_COMPUTE_PER_OPERATION, 0.1 );
init( PROXY_COMPUTE_BUCKETS, 20000 );
init( PROXY_COMPUTE_GROWTH_RATE, 0.01 );
init( TXN_STATE_SEND_AMOUNT, 4 );
init( REPORT_TRANSACTION_COST_ESTIMATION_DELAY, 0.1 );
init( PROXY_REJECT_BATCH_QUEUED_TOO_LONG, true );
init( RESET_MASTER_BATCHES, 200 );
init( RESET_RESOLVER_BATCHES, 200 );
init( RESET_MASTER_DELAY, 300.0 );
init( RESET_RESOLVER_DELAY, 300.0 );
// Master Server
// masterCommitter() in the master server will allow lower priority tasks (e.g. DataDistibution)
// by delay()ing for this amount of time between accepted batches of TransactionRequests.
bool fastBalancing = randomize && BUGGIFY;
init( COMMIT_SLEEP_TIME, 0.0001 ); if( randomize && BUGGIFY ) COMMIT_SLEEP_TIME = 0;
init( KEY_BYTES_PER_SAMPLE, 2e4 ); if( fastBalancing ) KEY_BYTES_PER_SAMPLE = 1e3;
init( MIN_BALANCE_TIME, 0.2 );
init( MIN_BALANCE_DIFFERENCE, 1e6 ); if( fastBalancing ) MIN_BALANCE_DIFFERENCE = 1e4;
init( SECONDS_BEFORE_NO_FAILURE_DELAY, 8 * 3600 );
init( MAX_TXS_SEND_MEMORY, 1e7 ); if( randomize && BUGGIFY ) MAX_TXS_SEND_MEMORY = 1e5;
init( MAX_RECOVERY_VERSIONS, 200 * VERSIONS_PER_SECOND );
init( MAX_RECOVERY_TIME, 20.0 ); if( randomize && BUGGIFY ) MAX_RECOVERY_TIME = 1.0;
init( PROVISIONAL_START_DELAY, 1.0 );
init( PROVISIONAL_MAX_DELAY, 60.0 );
init( PROVISIONAL_DELAY_GROWTH, 1.5 );
init( SECONDS_BEFORE_RECRUIT_BACKUP_WORKER, 4.0 ); if( randomize && BUGGIFY ) SECONDS_BEFORE_RECRUIT_BACKUP_WORKER = deterministicRandom()->random01() * 8;
init( CC_INTERFACE_TIMEOUT, 10.0 ); if( randomize && BUGGIFY ) CC_INTERFACE_TIMEOUT = 0.0;
// Resolver
init( SAMPLE_OFFSET_PER_KEY, 100 );
init( SAMPLE_EXPIRATION_TIME, 1.0 );
init( SAMPLE_POLL_TIME, 0.1 );
init( RESOLVER_STATE_MEMORY_LIMIT, 1e6 );
init( LAST_LIMITED_RATIO, 2.0 );
// Backup Worker
init( BACKUP_TIMEOUT, 0.4 );
init( BACKUP_NOOP_POP_DELAY, 5.0 );
init( BACKUP_FILE_BLOCK_BYTES, 1024 * 1024 );
init( BACKUP_LOCK_BYTES, 3e9 ); if(randomize && BUGGIFY) BACKUP_LOCK_BYTES = deterministicRandom()->randomInt(1024, 4096) * 1024;
init( BACKUP_UPLOAD_DELAY, 10.0 ); if(randomize && BUGGIFY) BACKUP_UPLOAD_DELAY = deterministicRandom()->random01() * 60;
//Cluster Controller
init( CLUSTER_CONTROLLER_LOGGING_DELAY, 5.0 );
init( MASTER_FAILURE_REACTION_TIME, 0.4 ); if( randomize && BUGGIFY ) MASTER_FAILURE_REACTION_TIME = 10.0;
init( MASTER_FAILURE_SLOPE_DURING_RECOVERY, 0.1 );
init( WORKER_COORDINATION_PING_DELAY, 60 );
init( SIM_SHUTDOWN_TIMEOUT, 10 );
init( SHUTDOWN_TIMEOUT, 600 ); if( randomize && BUGGIFY ) SHUTDOWN_TIMEOUT = 60.0;
init( MASTER_SPIN_DELAY, 1.0 ); if( randomize && BUGGIFY ) MASTER_SPIN_DELAY = 10.0;
init( CC_CHANGE_DELAY, 0.1 );
init( CC_CLASS_DELAY, 0.01 );
init( WAIT_FOR_GOOD_RECRUITMENT_DELAY, 1.0 );
init( WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY, 5.0 );
init( ATTEMPT_RECRUITMENT_DELAY, 0.035 );
init( WAIT_FOR_DISTRIBUTOR_JOIN_DELAY, 1.0 );
init( WAIT_FOR_RATEKEEPER_JOIN_DELAY, 1.0 );
init( WORKER_FAILURE_TIME, 1.0 ); if( randomize && BUGGIFY ) WORKER_FAILURE_TIME = 10.0;
init( CHECK_OUTSTANDING_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) CHECK_OUTSTANDING_INTERVAL = 0.001;
init( VERSION_LAG_METRIC_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) VERSION_LAG_METRIC_INTERVAL = 10.0;
init( MAX_VERSION_DIFFERENCE, 20 * VERSIONS_PER_SECOND );
init( FORCE_RECOVERY_CHECK_DELAY, 5.0 );
init( RATEKEEPER_FAILURE_TIME, 1.0 );
init( REPLACE_INTERFACE_DELAY, 60.0 );
init( REPLACE_INTERFACE_CHECK_DELAY, 5.0 );
init( COORDINATOR_REGISTER_INTERVAL, 5.0 );
init( CLIENT_REGISTER_INTERVAL, 600.0 );
init( CC_ENABLE_WORKER_HEALTH_MONITOR, false );
init( CC_WORKER_HEALTH_CHECKING_INTERVAL, 60.0 );
init( CC_DEGRADED_LINK_EXPIRATION_INTERVAL, 300.0 );
init( CC_MIN_DEGRADATION_INTERVAL, 120.0 );
init( CC_DEGRADED_PEER_DEGREE_TO_EXCLUDE, 3 );
init( CC_MAX_EXCLUSION_DUE_TO_HEALTH, 2 );
init( CC_HEALTH_TRIGGER_RECOVERY, false );
init( CC_TRACKING_HEALTH_RECOVERY_INTERVAL, 3600.0 );
init( CC_MAX_HEALTH_RECOVERY_COUNT, 2 );
init( INCOMPATIBLE_PEERS_LOGGING_INTERVAL, 600 ); if( randomize && BUGGIFY ) INCOMPATIBLE_PEERS_LOGGING_INTERVAL = 60.0;
init( EXPECTED_MASTER_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_TLOG_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_LOG_ROUTER_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_COMMIT_PROXY_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_GRV_PROXY_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_RESOLVER_FITNESS, ProcessClass::UnsetFit );
init( RECRUITMENT_TIMEOUT, 600 ); if( randomize && BUGGIFY ) RECRUITMENT_TIMEOUT = deterministicRandom()->coinflip() ? 60.0 : 1.0;
init( POLICY_RATING_TESTS, 200 ); if( randomize && BUGGIFY ) POLICY_RATING_TESTS = 20;
init( POLICY_GENERATIONS, 100 ); if( randomize && BUGGIFY ) POLICY_GENERATIONS = 10;
init( DBINFO_SEND_AMOUNT, 5 );
init( DBINFO_BATCH_DELAY, 0.1 );
//Move Keys
init( SHARD_READY_DELAY, 0.25 );
init( SERVER_READY_QUORUM_INTERVAL, std::min(1.0, std::min(MAX_READ_TRANSACTION_LIFE_VERSIONS, MAX_WRITE_TRANSACTION_LIFE_VERSIONS)/(5.0*VERSIONS_PER_SECOND)) );
init( SERVER_READY_QUORUM_TIMEOUT, 15.0 ); if( randomize && BUGGIFY ) SERVER_READY_QUORUM_TIMEOUT = 1.0;
init( REMOVE_RETRY_DELAY, 1.0 );
init( MOVE_KEYS_KRM_LIMIT, 2000 ); if( randomize && BUGGIFY ) MOVE_KEYS_KRM_LIMIT = 2;
init( MOVE_KEYS_KRM_LIMIT_BYTES, 1e5 ); if( randomize && BUGGIFY ) MOVE_KEYS_KRM_LIMIT_BYTES = 5e4; //This must be sufficiently larger than CLIENT_KNOBS->KEY_SIZE_LIMIT (fdbclient/Knobs.h) to ensure that at least two entries will be returned from an attempt to read a key range map
init( MAX_SKIP_TAGS, 1 ); //The TLogs require tags to be densely packed to be memory efficient, so be careful increasing this knob
init( MAX_ADDED_SOURCES_MULTIPLIER, 2.0 );
//FdbServer
bool longReboots = randomize && BUGGIFY;
init( MIN_REBOOT_TIME, 4.0 ); if( longReboots ) MIN_REBOOT_TIME = 10.0;
init( MAX_REBOOT_TIME, 5.0 ); if( longReboots ) MAX_REBOOT_TIME = 20.0;
init( LOG_DIRECTORY, "."); // Will be set to the command line flag.
init( SERVER_MEM_LIMIT, 8LL << 30 );
init( SYSTEM_MONITOR_FREQUENCY, 5.0 );
//Ratekeeper
bool slowRatekeeper = randomize && BUGGIFY;
init( SMOOTHING_AMOUNT, 1.0 ); if( slowRatekeeper ) SMOOTHING_AMOUNT = 5.0;
init( SLOW_SMOOTHING_AMOUNT, 10.0 ); if( slowRatekeeper ) SLOW_SMOOTHING_AMOUNT = 50.0;
init( METRIC_UPDATE_RATE, .1 ); if( slowRatekeeper ) METRIC_UPDATE_RATE = 0.5;
init( DETAILED_METRIC_UPDATE_RATE, 5.0 );
init (RATEKEEPER_DEFAULT_LIMIT, 1e6 ); if( randomize && BUGGIFY ) RATEKEEPER_DEFAULT_LIMIT = 0;
bool smallStorageTarget = randomize && BUGGIFY;
init( TARGET_BYTES_PER_STORAGE_SERVER, 1000e6 ); if( smallStorageTarget ) TARGET_BYTES_PER_STORAGE_SERVER = 3000e3;
init( SPRING_BYTES_STORAGE_SERVER, 100e6 ); if( smallStorageTarget ) SPRING_BYTES_STORAGE_SERVER = 300e3;
init( AUTO_TAG_THROTTLE_STORAGE_QUEUE_BYTES, 800e6 ); if( smallStorageTarget ) AUTO_TAG_THROTTLE_STORAGE_QUEUE_BYTES = 2500e3;
init( TARGET_BYTES_PER_STORAGE_SERVER_BATCH, 750e6 ); if( smallStorageTarget ) TARGET_BYTES_PER_STORAGE_SERVER_BATCH = 1500e3;
init( SPRING_BYTES_STORAGE_SERVER_BATCH, 100e6 ); if( smallStorageTarget ) SPRING_BYTES_STORAGE_SERVER_BATCH = 150e3;
init( STORAGE_HARD_LIMIT_BYTES, 1500e6 ); if( smallStorageTarget ) STORAGE_HARD_LIMIT_BYTES = 4500e3;
init( STORAGE_DURABILITY_LAG_HARD_MAX, 2000e6 ); if( smallStorageTarget ) STORAGE_DURABILITY_LAG_HARD_MAX = 100e6;
init( STORAGE_DURABILITY_LAG_SOFT_MAX, 250e6 ); if( smallStorageTarget ) STORAGE_DURABILITY_LAG_SOFT_MAX = 10e6;
//FIXME: Low priority reads are disabled by assigning very high knob values, reduce knobs for 7.0
init( LOW_PRIORITY_STORAGE_QUEUE_BYTES, 775e8 ); if( smallStorageTarget ) LOW_PRIORITY_STORAGE_QUEUE_BYTES = 1750e3;
init( LOW_PRIORITY_DURABILITY_LAG, 200e6 ); if( smallStorageTarget ) LOW_PRIORITY_DURABILITY_LAG = 15e6;
bool smallTlogTarget = randomize && BUGGIFY;
init( TARGET_BYTES_PER_TLOG, 2400e6 ); if( smallTlogTarget ) TARGET_BYTES_PER_TLOG = 2000e3;
init( SPRING_BYTES_TLOG, 400e6 ); if( smallTlogTarget ) SPRING_BYTES_TLOG = 200e3;
init( TARGET_BYTES_PER_TLOG_BATCH, 1400e6 ); if( smallTlogTarget ) TARGET_BYTES_PER_TLOG_BATCH = 1400e3;
init( SPRING_BYTES_TLOG_BATCH, 300e6 ); if( smallTlogTarget ) SPRING_BYTES_TLOG_BATCH = 150e3;
init( TLOG_SPILL_THRESHOLD, 1500e6 ); if( smallTlogTarget ) TLOG_SPILL_THRESHOLD = 1500e3; if( randomize && BUGGIFY ) TLOG_SPILL_THRESHOLD = 0;
init( REFERENCE_SPILL_UPDATE_STORAGE_BYTE_LIMIT, 20e6 ); if( (randomize && BUGGIFY) || smallTlogTarget ) REFERENCE_SPILL_UPDATE_STORAGE_BYTE_LIMIT = 1e6;
init( TLOG_HARD_LIMIT_BYTES, 3000e6 ); if( smallTlogTarget ) TLOG_HARD_LIMIT_BYTES = 30e6;
init( TLOG_RECOVER_MEMORY_LIMIT, TARGET_BYTES_PER_TLOG + SPRING_BYTES_TLOG );
init( MAX_TRANSACTIONS_PER_BYTE, 1000 );
init( MIN_AVAILABLE_SPACE, 1e8 );
init( MIN_AVAILABLE_SPACE_RATIO, 0.05 );
init( TARGET_AVAILABLE_SPACE_RATIO, 0.30 );
init( AVAILABLE_SPACE_UPDATE_DELAY, 5.0 );
init( MAX_TL_SS_VERSION_DIFFERENCE, 1e99 ); // if( randomize && BUGGIFY ) MAX_TL_SS_VERSION_DIFFERENCE = std::max(1.0, 0.25 * VERSIONS_PER_SECOND); // spring starts at half this value //FIXME: this knob causes ratekeeper to clamp on idle cluster in simulation that have a large number of logs
init( MAX_TL_SS_VERSION_DIFFERENCE_BATCH, 1e99 );
init( MAX_MACHINES_FALLING_BEHIND, 1 );
init( MAX_TPS_HISTORY_SAMPLES, 600 );
init( NEEDED_TPS_HISTORY_SAMPLES, 200 );
init( TARGET_DURABILITY_LAG_VERSIONS, 350e6 ); // Should be larger than STORAGE_DURABILITY_LAG_SOFT_MAX
init( AUTO_TAG_THROTTLE_DURABILITY_LAG_VERSIONS, 250e6 );
init( TARGET_DURABILITY_LAG_VERSIONS_BATCH, 150e6 ); // Should be larger than STORAGE_DURABILITY_LAG_SOFT_MAX
init( DURABILITY_LAG_UNLIMITED_THRESHOLD, 50e6 );
init( INITIAL_DURABILITY_LAG_MULTIPLIER, 1.02 );
init( DURABILITY_LAG_REDUCTION_RATE, 0.9999 );
init( DURABILITY_LAG_INCREASE_RATE, 1.001 );
init( STORAGE_SERVER_LIST_FETCH_TIMEOUT, 20.0 );
init( MAX_AUTO_THROTTLED_TRANSACTION_TAGS, 5 ); if(randomize && BUGGIFY) MAX_AUTO_THROTTLED_TRANSACTION_TAGS = 1;
init( MAX_MANUAL_THROTTLED_TRANSACTION_TAGS, 40 ); if(randomize && BUGGIFY) MAX_MANUAL_THROTTLED_TRANSACTION_TAGS = 1;
init( MIN_TAG_COST, 200 ); if(randomize && BUGGIFY) MIN_TAG_COST = 0.0;
init( AUTO_THROTTLE_TARGET_TAG_BUSYNESS, 0.1 ); if(randomize && BUGGIFY) AUTO_THROTTLE_TARGET_TAG_BUSYNESS = 0.0;
init( AUTO_TAG_THROTTLE_RAMP_UP_TIME, 120.0 ); if(randomize && BUGGIFY) AUTO_TAG_THROTTLE_RAMP_UP_TIME = 5.0;
init( AUTO_TAG_THROTTLE_DURATION, 240.0 ); if(randomize && BUGGIFY) AUTO_TAG_THROTTLE_DURATION = 20.0;
init( TAG_THROTTLE_PUSH_INTERVAL, 1.0 ); if(randomize && BUGGIFY) TAG_THROTTLE_PUSH_INTERVAL = 0.0;
init( AUTO_TAG_THROTTLE_START_AGGREGATION_TIME, 5.0 ); if(randomize && BUGGIFY) AUTO_TAG_THROTTLE_START_AGGREGATION_TIME = 0.5;
init( AUTO_TAG_THROTTLE_UPDATE_FREQUENCY, 10.0 ); if(randomize && BUGGIFY) AUTO_TAG_THROTTLE_UPDATE_FREQUENCY = 0.5;
init( TAG_THROTTLE_EXPIRED_CLEANUP_INTERVAL, 30.0 ); if(randomize && BUGGIFY) TAG_THROTTLE_EXPIRED_CLEANUP_INTERVAL = 1.0;
init( AUTO_TAG_THROTTLING_ENABLED, true ); if(randomize && BUGGIFY) AUTO_TAG_THROTTLING_ENABLED = false;
//Storage Metrics
init( STORAGE_METRICS_AVERAGE_INTERVAL, 120.0 );
init( STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS, 1000.0 / STORAGE_METRICS_AVERAGE_INTERVAL ); // milliHz!
init( SPLIT_JITTER_AMOUNT, 0.05 ); if( randomize && BUGGIFY ) SPLIT_JITTER_AMOUNT = 0.2;
init( IOPS_UNITS_PER_SAMPLE, 10000 * 1000 / STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS / 100 );
init( BANDWIDTH_UNITS_PER_SAMPLE, SHARD_MIN_BYTES_PER_KSEC / STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS / 25 );
init( BYTES_READ_UNITS_PER_SAMPLE, 100000 ); // 100K bytes
init( READ_HOT_SUB_RANGE_CHUNK_SIZE, 10000000); // 10MB
init( EMPTY_READ_PENALTY, 20 ); // 20 bytes
init( READ_SAMPLING_ENABLED, false ); if ( randomize && BUGGIFY ) READ_SAMPLING_ENABLED = true;// enable/disable read sampling
//Storage Server
init( STORAGE_LOGGING_DELAY, 5.0 );
init( STORAGE_SERVER_POLL_METRICS_DELAY, 1.0 );
init( FUTURE_VERSION_DELAY, 1.0 );
init( STORAGE_LIMIT_BYTES, 500000 );
init( BUGGIFY_LIMIT_BYTES, 1000 );
init( FETCH_USING_STREAMING, true ); if( randomize && BUGGIFY ) FETCH_USING_STREAMING = false; //Determines if fetch keys uses streaming reads
init( FETCH_BLOCK_BYTES, 2e6 );
init( FETCH_KEYS_PARALLELISM_BYTES, 4e6 ); if( randomize && BUGGIFY ) FETCH_KEYS_PARALLELISM_BYTES = 3e6;
init( FETCH_KEYS_PARALLELISM, 2 );
init( FETCH_KEYS_LOWER_PRIORITY, 0 );
init( BUGGIFY_BLOCK_BYTES, 10000 );
init( STORAGE_COMMIT_BYTES, 10000000 ); if( randomize && BUGGIFY ) STORAGE_COMMIT_BYTES = 2000000;
init( STORAGE_FETCH_BYTES, 2500000 ); if( randomize && BUGGIFY ) STORAGE_FETCH_BYTES = 500000;
init( STORAGE_DURABILITY_LAG_REJECT_THRESHOLD, 0.25 );
init( STORAGE_DURABILITY_LAG_MIN_RATE, 0.1 );
init( STORAGE_COMMIT_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) STORAGE_COMMIT_INTERVAL = 2.0;
init( UPDATE_SHARD_VERSION_INTERVAL, 0.25 ); if( randomize && BUGGIFY ) UPDATE_SHARD_VERSION_INTERVAL = 1.0;
init( BYTE_SAMPLING_FACTOR, 250 ); //cannot buggify because of differences in restarting tests
init( BYTE_SAMPLING_OVERHEAD, 100 );
init( MAX_STORAGE_SERVER_WATCH_BYTES, 100e6 ); if( randomize && BUGGIFY ) MAX_STORAGE_SERVER_WATCH_BYTES = 10e3;
init( MAX_BYTE_SAMPLE_CLEAR_MAP_SIZE, 1e9 ); if( randomize && BUGGIFY ) MAX_BYTE_SAMPLE_CLEAR_MAP_SIZE = 1e3;
init( LONG_BYTE_SAMPLE_RECOVERY_DELAY, 60.0 );
init( BYTE_SAMPLE_LOAD_PARALLELISM, 8 ); if( randomize && BUGGIFY ) BYTE_SAMPLE_LOAD_PARALLELISM = 1;
init( BYTE_SAMPLE_LOAD_DELAY, 0.0 ); if( randomize && BUGGIFY ) BYTE_SAMPLE_LOAD_DELAY = 0.1;
init( BYTE_SAMPLE_START_DELAY, 1.0 ); if( randomize && BUGGIFY ) BYTE_SAMPLE_START_DELAY = 0.0;
init( UPDATE_STORAGE_PROCESS_STATS_INTERVAL, 5.0 );
init( BEHIND_CHECK_DELAY, 2.0 );
init( BEHIND_CHECK_COUNT, 2 );
init( BEHIND_CHECK_VERSIONS, 5 * VERSIONS_PER_SECOND );
init( WAIT_METRICS_WRONG_SHARD_CHANCE, isSimulated ? 1.0 : 0.1 );
init( MIN_TAG_READ_PAGES_RATE, 1.0e4 ); if( randomize && BUGGIFY ) MIN_TAG_READ_PAGES_RATE = 0;
init( MIN_TAG_WRITE_PAGES_RATE, 3200 ); if( randomize && BUGGIFY ) MIN_TAG_WRITE_PAGES_RATE = 0;
init( TAG_MEASUREMENT_INTERVAL, 30.0 ); if( randomize && BUGGIFY ) TAG_MEASUREMENT_INTERVAL = 1.0;
init( READ_COST_BYTE_FACTOR, 16384 ); if( randomize && BUGGIFY ) READ_COST_BYTE_FACTOR = 4096;
init( PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS, true ); if( randomize && BUGGIFY ) PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS = false;
init( REPORT_DD_METRICS, true );
init( DD_METRICS_REPORT_INTERVAL, 30.0 );
init( FETCH_KEYS_TOO_LONG_TIME_CRITERIA, 300.0 );
init( MAX_STORAGE_COMMIT_TIME, 120.0 ); //The max fsync stall time on the storage server and tlog before marking a disk as failed
init( RANGESTREAM_LIMIT_BYTES, 2e6 ); if( randomize && BUGGIFY ) RANGESTREAM_LIMIT_BYTES = 1;
init( ENABLE_CLEAR_RANGE_EAGER_READS, true );
//Wait Failure
init( MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS, 250 ); if( randomize && BUGGIFY ) MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS = 2;
init( WAIT_FAILURE_DELAY_LIMIT, 1.0 ); if( randomize && BUGGIFY ) WAIT_FAILURE_DELAY_LIMIT = 5.0;
//Worker
init( WORKER_LOGGING_INTERVAL, 5.0 );
init( HEAP_PROFILER_INTERVAL, 30.0 );
init( UNKNOWN_CC_TIMEOUT, 600.0 );
init( DEGRADED_RESET_INTERVAL, 24*60*60 ); if ( randomize && BUGGIFY ) DEGRADED_RESET_INTERVAL = 10;
init( DEGRADED_WARNING_LIMIT, 1 );
init( DEGRADED_WARNING_RESET_DELAY, 7*24*60*60 );
init( TRACE_LOG_FLUSH_FAILURE_CHECK_INTERVAL_SECONDS, 10 );
init( TRACE_LOG_PING_TIMEOUT_SECONDS, 5.0 );
init( MIN_DELAY_CC_WORST_FIT_CANDIDACY_SECONDS, 10.0 );
init( MAX_DELAY_CC_WORST_FIT_CANDIDACY_SECONDS, 30.0 );
init( DBINFO_FAILED_DELAY, 1.0 );
init( ENABLE_WORKER_HEALTH_MONITOR, false );
init( WORKER_HEALTH_MONITOR_INTERVAL, 60.0 );
init( PEER_LATENCY_CHECK_MIN_POPULATION, 30 );
init( PEER_LATENCY_DEGRADATION_PERCENTILE, 0.90 );
init( PEER_LATENCY_DEGRADATION_THRESHOLD, 0.05 );
init( PEER_TIMEOUT_PERCENTAGE_DEGRADATION_THRESHOLD, 0.1 );
// Test harness
init( WORKER_POLL_DELAY, 1.0 );
// Coordination
init( COORDINATED_STATE_ONCONFLICT_POLL_INTERVAL, 1.0 ); if( randomize && BUGGIFY ) COORDINATED_STATE_ONCONFLICT_POLL_INTERVAL = 10.0;
init( FORWARD_REQUEST_TOO_OLD, 4*24*60*60 ); if( randomize && BUGGIFY ) FORWARD_REQUEST_TOO_OLD = 60.0;
init( ENABLE_CROSS_CLUSTER_SUPPORT, true ); if( randomize && BUGGIFY ) ENABLE_CROSS_CLUSTER_SUPPORT = false;
init( COORDINATOR_LEADER_CONNECTION_TIMEOUT, 20.0 );
// Buggification
init( BUGGIFIED_EVENTUAL_CONSISTENCY, 1.0 );
init( BUGGIFY_ALL_COORDINATION, false ); if( randomize && BUGGIFY ) BUGGIFY_ALL_COORDINATION = true;
// Status
init( STATUS_MIN_TIME_BETWEEN_REQUESTS, 0.0 );
init( MAX_STATUS_REQUESTS_PER_SECOND, 256.0 );
init( CONFIGURATION_ROWS_TO_FETCH, 20000 );
init( DISABLE_DUPLICATE_LOG_WARNING, false );
init( HISTOGRAM_REPORT_INTERVAL, 300.0 );
// IPager
init( PAGER_RESERVED_PAGES, 1 );
// IndirectShadowPager
init( FREE_PAGE_VACUUM_THRESHOLD, 1 );
init( VACUUM_QUEUE_SIZE, 100000 );
init( VACUUM_BYTES_PER_SECOND, 1e6 );
// Timekeeper
init( TIME_KEEPER_DELAY, 10 );
init( TIME_KEEPER_MAX_ENTRIES, 3600 * 24 * 30 * 6 ); if( randomize && BUGGIFY ) { TIME_KEEPER_MAX_ENTRIES = 2; }
// Fast Restore
init( FASTRESTORE_FAILURE_TIMEOUT, 3600 );
init( FASTRESTORE_HEARTBEAT_INTERVAL, 60 );
init( FASTRESTORE_SAMPLING_PERCENT, 100 ); if( randomize && BUGGIFY ) { FASTRESTORE_SAMPLING_PERCENT = deterministicRandom()->random01() * 100; }
init( FASTRESTORE_NUM_LOADERS, 3 ); if( randomize && BUGGIFY ) { FASTRESTORE_NUM_LOADERS = deterministicRandom()->random01() * 10 + 1; }
init( FASTRESTORE_NUM_APPLIERS, 3 ); if( randomize && BUGGIFY ) { FASTRESTORE_NUM_APPLIERS = deterministicRandom()->random01() * 10 + 1; }
init( FASTRESTORE_TXN_BATCH_MAX_BYTES, 1024.0 * 1024.0 ); if( randomize && BUGGIFY ) { FASTRESTORE_TXN_BATCH_MAX_BYTES = deterministicRandom()->random01() * 1024.0 * 1024.0 + 1.0; }
init( FASTRESTORE_VERSIONBATCH_MAX_BYTES, 10.0 * 1024.0 * 1024.0 ); if( randomize && BUGGIFY ) { FASTRESTORE_VERSIONBATCH_MAX_BYTES = deterministicRandom()->random01() < 0.2 ? 10 * 1024 : deterministicRandom()->random01() < 0.4 ? 100 * 1024 * 1024 : deterministicRandom()->random01() * 1000.0 * 1024.0 * 1024.0; } // too small value may increase chance of TooManyFile error
init( FASTRESTORE_VB_PARALLELISM, 5 ); if( randomize && BUGGIFY ) { FASTRESTORE_VB_PARALLELISM = deterministicRandom()->random01() < 0.2 ? 2 : deterministicRandom()->random01() * 10 + 1; }
init( FASTRESTORE_VB_MONITOR_DELAY, 30 ); if( randomize && BUGGIFY ) { FASTRESTORE_VB_MONITOR_DELAY = deterministicRandom()->random01() * 20 + 1; }
init( FASTRESTORE_VB_LAUNCH_DELAY, 1.0 ); if( randomize && BUGGIFY ) { FASTRESTORE_VB_LAUNCH_DELAY = deterministicRandom()->random01() < 0.2 ? 0.1 : deterministicRandom()->random01() * 10.0 + 1; }
init( FASTRESTORE_ROLE_LOGGING_DELAY, 5 ); if( randomize && BUGGIFY ) { FASTRESTORE_ROLE_LOGGING_DELAY = deterministicRandom()->random01() * 60 + 1; }
init( FASTRESTORE_UPDATE_PROCESS_STATS_INTERVAL, 5 ); if( randomize && BUGGIFY ) { FASTRESTORE_UPDATE_PROCESS_STATS_INTERVAL = deterministicRandom()->random01() * 60 + 1; }
init( FASTRESTORE_ATOMICOP_WEIGHT, 1 ); if( randomize && BUGGIFY ) { FASTRESTORE_ATOMICOP_WEIGHT = deterministicRandom()->random01() * 200 + 1; }
init( FASTRESTORE_APPLYING_PARALLELISM, 10000 ); if( randomize && BUGGIFY ) { FASTRESTORE_APPLYING_PARALLELISM = deterministicRandom()->random01() * 10 + 1; }
init( FASTRESTORE_MONITOR_LEADER_DELAY, 5 ); if( randomize && BUGGIFY ) { FASTRESTORE_MONITOR_LEADER_DELAY = deterministicRandom()->random01() * 100; }
init( FASTRESTORE_STRAGGLER_THRESHOLD_SECONDS, 60 ); if( randomize && BUGGIFY ) { FASTRESTORE_STRAGGLER_THRESHOLD_SECONDS = deterministicRandom()->random01() * 240 + 10; }
init( FASTRESTORE_TRACK_REQUEST_LATENCY, false ); if( randomize && BUGGIFY ) { FASTRESTORE_TRACK_REQUEST_LATENCY = false; }
init( FASTRESTORE_TRACK_LOADER_SEND_REQUESTS, false ); if( randomize && BUGGIFY ) { FASTRESTORE_TRACK_LOADER_SEND_REQUESTS = true; }
init( FASTRESTORE_MEMORY_THRESHOLD_MB_SOFT, 6144 ); if( randomize && BUGGIFY ) { FASTRESTORE_MEMORY_THRESHOLD_MB_SOFT = 1; }
init( FASTRESTORE_WAIT_FOR_MEMORY_LATENCY, 10 ); if( randomize && BUGGIFY ) { FASTRESTORE_WAIT_FOR_MEMORY_LATENCY = 60; }
init( FASTRESTORE_HEARTBEAT_DELAY, 10 ); if( randomize && BUGGIFY ) { FASTRESTORE_HEARTBEAT_DELAY = deterministicRandom()->random01() * 120 + 2; }
init( FASTRESTORE_HEARTBEAT_MAX_DELAY, 10 ); if( randomize && BUGGIFY ) { FASTRESTORE_HEARTBEAT_MAX_DELAY = FASTRESTORE_HEARTBEAT_DELAY * 10; }
init( FASTRESTORE_APPLIER_FETCH_KEYS_SIZE, 100 ); if( randomize && BUGGIFY ) { FASTRESTORE_APPLIER_FETCH_KEYS_SIZE = deterministicRandom()->random01() * 10240 + 1; }
init( FASTRESTORE_LOADER_SEND_MUTATION_MSG_BYTES, 1.0 * 1024.0 * 1024.0 ); if( randomize && BUGGIFY ) { FASTRESTORE_LOADER_SEND_MUTATION_MSG_BYTES = deterministicRandom()->random01() < 0.2 ? 1024 : deterministicRandom()->random01() * 5.0 * 1024.0 * 1024.0 + 1; }
init( FASTRESTORE_GET_RANGE_VERSIONS_EXPENSIVE, false ); if( randomize && BUGGIFY ) { FASTRESTORE_GET_RANGE_VERSIONS_EXPENSIVE = deterministicRandom()->random01() < 0.5 ? true : false; }
init( FASTRESTORE_REQBATCH_PARALLEL, 50 ); if( randomize && BUGGIFY ) { FASTRESTORE_REQBATCH_PARALLEL = deterministicRandom()->random01() * 100 + 1; }
init( FASTRESTORE_REQBATCH_LOG, false ); if( randomize && BUGGIFY ) { FASTRESTORE_REQBATCH_LOG = deterministicRandom()->random01() < 0.2 ? true : false; }
init( FASTRESTORE_TXN_CLEAR_MAX, 100 ); if( randomize && BUGGIFY ) { FASTRESTORE_TXN_CLEAR_MAX = deterministicRandom()->random01() * 100 + 1; }
init( FASTRESTORE_TXN_RETRY_MAX, 10 ); if( randomize && BUGGIFY ) { FASTRESTORE_TXN_RETRY_MAX = deterministicRandom()->random01() * 100 + 1; }
init( FASTRESTORE_TXN_EXTRA_DELAY, 0.0 ); if( randomize && BUGGIFY ) { FASTRESTORE_TXN_EXTRA_DELAY = deterministicRandom()->random01() * 1 + 0.001;}
init( FASTRESTORE_NOT_WRITE_DB, false ); // Perf test only: set it to true will cause simulation failure
init( FASTRESTORE_USE_RANGE_FILE, true ); // Perf test only: set it to false will cause simulation failure
init( FASTRESTORE_USE_LOG_FILE, true ); // Perf test only: set it to false will cause simulation failure
init( FASTRESTORE_SAMPLE_MSG_BYTES, 1048576 ); if( randomize && BUGGIFY ) { FASTRESTORE_SAMPLE_MSG_BYTES = deterministicRandom()->random01() * 2048;}
init( FASTRESTORE_SCHED_UPDATE_DELAY, 0.1 ); if( randomize && BUGGIFY ) { FASTRESTORE_SCHED_UPDATE_DELAY = deterministicRandom()->random01() * 2;}
init( FASTRESTORE_SCHED_TARGET_CPU_PERCENT, 70 ); if( randomize && BUGGIFY ) { FASTRESTORE_SCHED_TARGET_CPU_PERCENT = deterministicRandom()->random01() * 100 + 50;} // simulate cpu usage can be larger than 100
init( FASTRESTORE_SCHED_MAX_CPU_PERCENT, 90 ); if( randomize && BUGGIFY ) { FASTRESTORE_SCHED_MAX_CPU_PERCENT = FASTRESTORE_SCHED_TARGET_CPU_PERCENT + deterministicRandom()->random01() * 100;}
init( FASTRESTORE_SCHED_INFLIGHT_LOAD_REQS, 50 ); if( randomize && BUGGIFY ) { FASTRESTORE_SCHED_INFLIGHT_LOAD_REQS = deterministicRandom()->random01() < 0.2 ? 1 : deterministicRandom()->random01() * 30 + 1;}
init( FASTRESTORE_SCHED_INFLIGHT_SEND_REQS, 3 ); if( randomize && BUGGIFY ) { FASTRESTORE_SCHED_INFLIGHT_SEND_REQS = deterministicRandom()->random01() < 0.2 ? 1 : deterministicRandom()->random01() * 10 + 1;}
init( FASTRESTORE_SCHED_LOAD_REQ_BATCHSIZE, 5 ); if( randomize && BUGGIFY ) { FASTRESTORE_SCHED_LOAD_REQ_BATCHSIZE = deterministicRandom()->random01() < 0.2 ? 1 : deterministicRandom()->random01() * 10 + 1;}
init( FASTRESTORE_SCHED_INFLIGHT_SENDPARAM_THRESHOLD, 10 ); if( randomize && BUGGIFY ) { FASTRESTORE_SCHED_INFLIGHT_SENDPARAM_THRESHOLD = deterministicRandom()->random01() < 0.2 ? 1 : deterministicRandom()->random01() * 15 + 1;}
init( FASTRESTORE_SCHED_SEND_FUTURE_VB_REQS_BATCH, 2 ); if( randomize && BUGGIFY ) { FASTRESTORE_SCHED_SEND_FUTURE_VB_REQS_BATCH = deterministicRandom()->random01() < 0.2 ? 1 : deterministicRandom()->random01() * 15 + 1;}
init( FASTRESTORE_NUM_TRACE_EVENTS, 100 ); if( randomize && BUGGIFY ) { FASTRESTORE_NUM_TRACE_EVENTS = deterministicRandom()->random01() < 0.2 ? 1 : deterministicRandom()->random01() * 500 + 1;}
init( FASTRESTORE_EXPENSIVE_VALIDATION, false ); if( randomize && BUGGIFY ) { FASTRESTORE_EXPENSIVE_VALIDATION = deterministicRandom()->random01() < 0.5 ? true : false;}
init( FASTRESTORE_WRITE_BW_MB, 70 ); if( randomize && BUGGIFY ) { FASTRESTORE_WRITE_BW_MB = deterministicRandom()->random01() < 0.5 ? 2 : 100;}
init( FASTRESTORE_RATE_UPDATE_SECONDS, 1.0 ); if( randomize && BUGGIFY ) { FASTRESTORE_RATE_UPDATE_SECONDS = deterministicRandom()->random01() < 0.5 ? 0.1 : 2;}
init( REDWOOD_DEFAULT_PAGE_SIZE, 4096 );
init( REDWOOD_DEFAULT_EXTENT_SIZE, 32 * 1024 * 1024 );
init( REDWOOD_DEFAULT_EXTENT_READ_SIZE, 1024 * 1024 );
init( REDWOOD_EXTENT_CONCURRENT_READS, 4 );
init( REDWOOD_KVSTORE_CONCURRENT_READS, 64 );
init( REDWOOD_KVSTORE_RANGE_PREFETCH, true );
init( REDWOOD_PAGE_REBUILD_MAX_SLACK, 0.33 );
init( REDWOOD_LAZY_CLEAR_BATCH_SIZE_PAGES, 10 );
init( REDWOOD_LAZY_CLEAR_MIN_PAGES, 0 );
init( REDWOOD_LAZY_CLEAR_MAX_PAGES, 1e6 );
init( REDWOOD_REMAP_CLEANUP_WINDOW, 50 );
init( REDWOOD_REMAP_CLEANUP_LAG, 0.1 );
init( REDWOOD_LOGGING_INTERVAL, 5.0 );
// Server request latency measurement
init( LATENCY_SAMPLE_SIZE, 100000 );
init( LATENCY_METRICS_LOGGING_INTERVAL, 60.0 );
// clang-format on
if (clientKnobs) {
clientKnobs->IS_ACCEPTABLE_DELAY =
clientKnobs->IS_ACCEPTABLE_DELAY *
std::min(MAX_READ_TRANSACTION_LIFE_VERSIONS, MAX_WRITE_TRANSACTION_LIFE_VERSIONS) /
(5.0 * VERSIONS_PER_SECOND);
clientKnobs->INIT_MID_SHARD_BYTES = MIN_SHARD_BYTES;
}
}
|
// This file is made available under Elastic License 2.0.
// This file is based on code available under the Apache license here:
// https://github.com/apache/incubator-doris/blob/master/be/src/olap/rowset/segment_v2/zone_map_index.cpp
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "storage/rowset/zone_map_index.h"
#include "runtime/mem_pool.h"
#include "runtime/mem_tracker.h"
#include "storage/column_block.h"
#include "storage/olap_define.h"
#include "storage/olap_type_infra.h"
#include "storage/rowset/encoding_info.h"
#include "storage/rowset/indexed_column_reader.h"
#include "storage/rowset/indexed_column_writer.h"
#include "storage/types.h"
#include "util/unaligned_access.h"
namespace starrocks {
struct ZoneMap {
// min value of zone
char* min_value = nullptr;
// max value of zone
char* max_value = nullptr;
// if both has_null and has_not_null is false, means no rows.
// if has_null is true and has_not_null is false, means all rows is null.
// if has_null is false and has_not_null is true, means all rows is not null.
// if has_null is true and has_not_null is true, means some rows is null and others are not.
// has_null means whether zone has null value
bool has_null = false;
// has_not_null means whether zone has none-null value
bool has_not_null = false;
void to_proto(ZoneMapPB* dst, Field* field) const {
dst->set_min(field->to_zone_map_string(min_value));
dst->set_max(field->to_zone_map_string(max_value));
dst->set_has_null(has_null);
dst->set_has_not_null(has_not_null);
}
};
template <FieldType type>
class ZoneMapIndexWriterImpl final : public ZoneMapIndexWriter {
using CppType = typename TypeTraits<type>::CppType;
public:
explicit ZoneMapIndexWriterImpl(starrocks::Field* field);
void add_values(const void* values, size_t count) override;
void add_nulls(uint32_t count) override { _page_zone_map.has_null |= count > 0; }
// mark the end of one data page so that we can finalize the corresponding zone map
Status flush() override;
Status finish(WritableFile* wfile, ColumnIndexMetaPB* index_meta) override;
uint64_t size() const override { return _estimated_size; }
private:
void _reset_zone_map(ZoneMap* zone_map) {
// we should allocate max varchar length and set to max for min value
_field->set_to_max(zone_map->min_value);
_field->set_to_min(zone_map->max_value);
zone_map->has_null = false;
zone_map->has_not_null = false;
}
Field* _field;
// memory will be managed by MemPool
ZoneMap _page_zone_map;
ZoneMap _segment_zone_map;
// TODO(zc): we should replace this memory pool later, we only allocate min/max
// for field. But MemPool allocate 4KB least, it will a waste for most cases.
MemPool _pool;
// serialized ZoneMapPB for each data page
std::vector<std::string> _values;
uint64_t _estimated_size = 0;
};
template <FieldType type>
ZoneMapIndexWriterImpl<type>::ZoneMapIndexWriterImpl(Field* field) : _field(field) {
_page_zone_map.min_value = _field->allocate_value(&_pool);
_page_zone_map.max_value = _field->allocate_value(&_pool);
_reset_zone_map(&_page_zone_map);
_segment_zone_map.min_value = _field->allocate_value(&_pool);
_segment_zone_map.max_value = _field->allocate_value(&_pool);
_reset_zone_map(&_segment_zone_map);
}
template <FieldType type>
void ZoneMapIndexWriterImpl<type>::add_values(const void* values, size_t count) {
if (count > 0) {
_page_zone_map.has_not_null = true;
const CppType* vals = reinterpret_cast<const CppType*>(values);
auto [pmin, pmax] = std::minmax_element(vals, vals + count);
if (unaligned_load<CppType>(pmin) < unaligned_load<CppType>(_page_zone_map.min_value)) {
_field->type_info()->direct_copy(_page_zone_map.min_value, pmin, nullptr);
}
if (unaligned_load<CppType>(pmax) > unaligned_load<CppType>(_page_zone_map.max_value)) {
_field->type_info()->direct_copy(_page_zone_map.max_value, pmax, nullptr);
}
}
}
template <FieldType type>
Status ZoneMapIndexWriterImpl<type>::flush() {
// Update segment zone map.
if (_field->compare(_segment_zone_map.min_value, _page_zone_map.min_value) > 0) {
_field->type_info()->direct_copy(_segment_zone_map.min_value, _page_zone_map.min_value, nullptr);
}
if (_field->compare(_segment_zone_map.max_value, _page_zone_map.max_value) < 0) {
_field->type_info()->direct_copy(_segment_zone_map.max_value, _page_zone_map.max_value, nullptr);
}
if (_page_zone_map.has_null) {
_segment_zone_map.has_null = true;
}
if (_page_zone_map.has_not_null) {
_segment_zone_map.has_not_null = true;
}
ZoneMapPB zone_map_pb;
_page_zone_map.to_proto(&zone_map_pb, _field);
_reset_zone_map(&_page_zone_map);
std::string serialized_zone_map;
bool ret = zone_map_pb.SerializeToString(&serialized_zone_map);
if (!ret) {
return Status::InternalError("serialize zone map failed");
}
_estimated_size += serialized_zone_map.size() + sizeof(uint32_t);
_values.push_back(std::move(serialized_zone_map));
return Status::OK();
}
struct ZoneMapIndexWriterBuilder {
template <FieldType ftype>
std::unique_ptr<ZoneMapIndexWriter> operator()(Field* field) {
return std::make_unique<ZoneMapIndexWriterImpl<ftype>>(field);
}
};
std::unique_ptr<ZoneMapIndexWriter> ZoneMapIndexWriter::create(starrocks::Field* field) {
return field_type_dispatch_zonemap_index(field->type(), ZoneMapIndexWriterBuilder(), field);
}
template <FieldType type>
Status ZoneMapIndexWriterImpl<type>::finish(WritableFile* wfile, ColumnIndexMetaPB* index_meta) {
index_meta->set_type(ZONE_MAP_INDEX);
ZoneMapIndexPB* meta = index_meta->mutable_zone_map_index();
// store segment zone map
_segment_zone_map.to_proto(meta->mutable_segment_zone_map(), _field);
// write out zone map for each data pages
TypeInfoPtr typeinfo = get_type_info(OLAP_FIELD_TYPE_OBJECT);
IndexedColumnWriterOptions options;
options.write_ordinal_index = true;
options.write_value_index = false;
options.encoding = EncodingInfo::get_default_encoding(OLAP_FIELD_TYPE_OBJECT, false);
options.compression = NO_COMPRESSION; // currently not compressed
IndexedColumnWriter writer(options, typeinfo, wfile);
RETURN_IF_ERROR(writer.init());
for (auto& value : _values) {
Slice value_slice(value);
RETURN_IF_ERROR(writer.add(&value_slice));
}
return writer.finish(meta->mutable_page_zone_maps());
}
Status ZoneMapIndexReader::load(FileSystem* fs, const std::string& filename, const ZoneMapIndexPB* index_meta,
bool use_page_cache, bool kept_in_memory) {
IndexedColumnReader reader(fs, filename, index_meta->page_zone_maps());
RETURN_IF_ERROR(reader.load(use_page_cache, kept_in_memory));
std::unique_ptr<IndexedColumnIterator> iter;
RETURN_IF_ERROR(reader.new_iterator(&iter));
MemPool pool;
_page_zone_maps.resize(reader.num_values());
// read and cache all page zone maps
for (int i = 0; i < reader.num_values(); ++i) {
size_t num_to_read = 1;
std::unique_ptr<ColumnVectorBatch> cvb;
RETURN_IF_ERROR(ColumnVectorBatch::create(num_to_read, false, reader.type_info(), nullptr, &cvb));
ColumnBlock block(cvb.get(), &pool);
ColumnBlockView column_block_view(&block);
RETURN_IF_ERROR(iter->seek_to_ordinal(i));
size_t num_read = num_to_read;
RETURN_IF_ERROR(iter->next_batch(&num_read, &column_block_view));
DCHECK(num_to_read == num_read);
auto* value = reinterpret_cast<Slice*>(cvb->data());
if (!_page_zone_maps[i].ParseFromArray(value->data, value->size)) {
return Status::Corruption("Failed to parse zone map");
}
pool.clear();
}
return Status::OK();
}
} // namespace starrocks
|
#include <gli/gli.hpp>
#include <vbte/graphics/texture.hpp>
namespace vbte {
namespace graphics {
texture::texture(const gli::texture2D& texture) noexcept
: width_(texture.dimensions().x), height_(texture.dimensions().y), target_(GL_TEXTURE_2D) {
glGenTextures(1, &id_);
glBindTexture(target_, id_);
apply_settings();
// adapted from http://gli.g-truc.net/0.5.1/code.html
if (gli::is_compressed(texture.format())) {
for (gli::texture2D::size_type level = 0; level < texture.levels(); ++level) {
glCompressedTexImage2D(target_,
static_cast<GLint>(level),
static_cast<GLenum>(gli::internal_format(texture.format())),
static_cast<GLsizei>(texture[level].dimensions().x),
static_cast<GLsizei>(texture[level].dimensions().y),
0,
static_cast<GLsizei>(texture[level].size()),
texture[level].data());
}
} else {
for (gli::texture2D::size_type level = 0; level < texture.levels(); ++level) {
glTexImage2D(target_,
static_cast<GLint>(level),
static_cast<GLenum>(gli::internal_format(texture.format())),
static_cast<GLsizei>(texture[level].dimensions().x),
static_cast<GLsizei>(texture[level].dimensions().y),
0,
static_cast<GLenum>(gli::external_format(texture.format())),
static_cast<GLenum>(gli::type_format(texture.format())),
texture[level].data());
}
}
}
texture::texture(GLenum target, GLint internal_format, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* data) noexcept
: width_(static_cast<size_t>(width)), height_(static_cast<size_t>(height)), target_(target) {
glGenTextures(1, &id_);
glBindTexture(target_, id_);
glTexImage2D(target_, 0, internal_format, width_, height_, 0, format, type, data);
glTexParameteri(target_, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(target_, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
texture::~texture() {
glDeleteTextures(1, &id_);
}
texture::texture(texture&& rhs) noexcept
: id_(rhs.id_), target_(rhs.target_), width_(rhs.width_), height_(rhs.height_) {
rhs.id_ = 0;
}
texture& texture::operator=(texture&& rhs) noexcept {
id_ = rhs.id_;
target_ = rhs.target_;
width_ = rhs.width_;
height_ = rhs.height_;
rhs.id_ = 0;
return *this;
}
void texture::bind(uint32_t unit) const noexcept {
glActiveTexture(GL_TEXTURE0 + unit);
glBindTexture(target_, id_);
}
void texture::apply_settings() const noexcept {
glTexParameteri(target_, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(target_, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
auto max_aniso = 0.f;
glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &max_aniso);
glTexParameteri(target_, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_aniso);
}
}
}
|
/*
* Copyright (c) 2011-2019, The DART development contributors
* All rights reserved.
*
* The list of contributors can be found at:
* https://github.com/dartsim/dart/blob/master/LICENSE
*
* This file is provided under the following "BSD-style" License:
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "dart/common/Console.hpp"
#include "dart/math/Geometry.hpp"
#include "dart/dynamics/SimpleFrame.hpp"
namespace dart {
namespace dynamics {
//==============================================================================
SimpleFrame::SimpleFrame(Frame* _refFrame, const std::string& _name,
const Eigen::Isometry3s& _relativeTransform)
: Entity(ConstructFrame),
Frame(_refFrame),
Detachable(),
ShapeFrame(_refFrame),
mRelativeTf(_relativeTransform),
mRelativeVelocity(Eigen::Vector6s::Zero()),
mRelativeAcceleration(Eigen::Vector6s::Zero()),
mPartialAcceleration(Eigen::Vector6s::Zero())
{
setName(_name);
}
//==============================================================================
SimpleFrame::SimpleFrame(const SimpleFrame& _otherFrame, Frame* _refFrame)
: Entity(ConstructFrame),
common::Composite(),
Frame(_refFrame),
Detachable(),
ShapeFrame(_refFrame),
mRelativeTf(Eigen::Isometry3s::Identity()),
mRelativeVelocity(Eigen::Vector6s::Zero()),
mRelativeAcceleration(Eigen::Vector6s::Zero()),
mPartialAcceleration(Eigen::Vector6s::Zero())
{
copy(_otherFrame, _refFrame);
duplicateAspects(&_otherFrame);
}
//==============================================================================
SimpleFrame::~SimpleFrame()
{
// Do nothing
}
//==============================================================================
const std::string& SimpleFrame::setName(const std::string& _name)
{
if(_name == mName)
return mName;
std::string oldName = mName;
mName = _name;
incrementVersion();
Entity::mNameChangedSignal.raise(this, oldName, mName);
return mName;
}
//==============================================================================
const std::string& SimpleFrame::getName() const
{
return mName;
}
//==============================================================================
SimpleFramePtr SimpleFrame::clone(Frame* _refFrame) const
{
return SimpleFramePtr(new SimpleFrame(*this, _refFrame));
}
//==============================================================================
void SimpleFrame::copy(const Frame& _otherFrame, Frame* _refFrame,
bool _copyProperties)
{
copy(&_otherFrame, _refFrame, _copyProperties);
}
//==============================================================================
void SimpleFrame::copy(const Frame* _otherFrame, Frame* _refFrame,
bool _copyProperties)
{
if(nullptr == _otherFrame || nullptr == _refFrame)
return;
if( (this == _otherFrame) && (_refFrame == getParentFrame()) )
return;
Eigen::Isometry3s relativeTf = _otherFrame->getTransform(_refFrame);
Eigen::Vector6s relativeVelocity =
_otherFrame->getSpatialVelocity(_refFrame, Frame::World());
Eigen::Vector6s relativeAcceleration =
_otherFrame->getSpatialAcceleration(_refFrame, Frame::World());
setParentFrame(_refFrame);
setRelativeTransform(relativeTf);
setRelativeSpatialVelocity(relativeVelocity, Frame::World());
setRelativeSpatialAcceleration(relativeAcceleration, Frame::World());
if(_copyProperties)
{
const auto shapeFrame = dynamic_cast<const ShapeFrame*>(_otherFrame);
if(shapeFrame)
setCompositeProperties(shapeFrame->getCompositeProperties());
const auto simpleFrame = dynamic_cast<const SimpleFrame*>(_otherFrame);
if(simpleFrame)
setName(simpleFrame->getName());
}
}
//==============================================================================
SimpleFrame& SimpleFrame::operator=(const SimpleFrame& _otherFrame)
{
copy(_otherFrame, getParentFrame(), false);
return *this;
}
//==============================================================================
std::shared_ptr<SimpleFrame> SimpleFrame::spawnChildSimpleFrame(
const std::string& name, const Eigen::Isometry3s& relativeTransform)
{
return std::make_shared<SimpleFrame>(this, name, relativeTransform);
}
//==============================================================================
void SimpleFrame::setRelativeTransform(
const Eigen::Isometry3s& _newRelTransform)
{
mRelativeTf = _newRelTransform;
dirtyTransform();
}
//==============================================================================
void SimpleFrame::setRelativeTranslation(const Eigen::Vector3s& _newTranslation)
{
mRelativeTf.translation() = _newTranslation;
dirtyTransform();
}
//==============================================================================
void SimpleFrame::setRelativeRotation(const Eigen::Matrix3s& _newRotation)
{
mRelativeTf.linear() = _newRotation;
dirtyTransform();
}
//==============================================================================
void SimpleFrame::setTransform(const Eigen::Isometry3s& _newTransform,
const Frame* _withRespectTo)
{
setRelativeTransform(
_withRespectTo->getTransform(getParentFrame()) * _newTransform);
}
//==============================================================================
void SimpleFrame::setTranslation(const Eigen::Vector3s& _newTranslation,
const Frame* _withRespectTo)
{
setRelativeTranslation(
_withRespectTo->getTransform(getParentFrame()) * _newTranslation);
}
//==============================================================================
void SimpleFrame::setRotation(const Eigen::Matrix3s& _newRotation,
const Frame* _withRespectTo)
{
setRelativeRotation(
_withRespectTo->getTransform(getParentFrame()).linear()
* _newRotation);
}
//==============================================================================
const Eigen::Isometry3s& SimpleFrame::getRelativeTransform() const
{
return mRelativeTf;
}
//==============================================================================
void SimpleFrame::setRelativeSpatialVelocity(
const Eigen::Vector6s& _newSpatialVelocity)
{
mRelativeVelocity = _newSpatialVelocity;
dirtyVelocity();
}
//==============================================================================
void SimpleFrame::setRelativeSpatialVelocity(
const Eigen::Vector6s& _newSpatialVelocity, const Frame* _inCoordinatesOf)
{
if(this == _inCoordinatesOf)
setRelativeSpatialVelocity(_newSpatialVelocity);
else
setRelativeSpatialVelocity(math::AdR(_inCoordinatesOf->getTransform(this),
_newSpatialVelocity));
}
//==============================================================================
const Eigen::Vector6s& SimpleFrame::getRelativeSpatialVelocity() const
{
return mRelativeVelocity;
}
//==============================================================================
void SimpleFrame::setRelativeSpatialAcceleration(
const Eigen::Vector6s &_newSpatialAcceleration)
{
mRelativeAcceleration = _newSpatialAcceleration;
dirtyAcceleration();
}
//==============================================================================
void SimpleFrame::setRelativeSpatialAcceleration(
const Eigen::Vector6s& _newSpatialAcceleration,
const Frame* _inCoordinatesOf)
{
if(this == _inCoordinatesOf)
setRelativeSpatialAcceleration(_newSpatialAcceleration);
else
setRelativeSpatialAcceleration(
math::AdR(_inCoordinatesOf->getTransform(this),
_newSpatialAcceleration) );
}
//==============================================================================
const Eigen::Vector6s& SimpleFrame::getRelativeSpatialAcceleration() const
{
return mRelativeAcceleration;
}
//==============================================================================
const Eigen::Vector6s& SimpleFrame::getPrimaryRelativeAcceleration() const
{
return mRelativeAcceleration;
}
//==============================================================================
const Eigen::Vector6s& SimpleFrame::getPartialAcceleration() const
{
mPartialAcceleration = math::ad(getSpatialVelocity(),
getRelativeSpatialVelocity());
return mPartialAcceleration;
}
//==============================================================================
void SimpleFrame::setClassicDerivatives(
const Eigen::Vector3s& _linearVelocity,
const Eigen::Vector3s& _angularVelocity,
const Eigen::Vector3s& _linearAcceleration,
const Eigen::Vector3s& _angularAcceleration)
{
Eigen::Vector6s v, a;
v << _angularVelocity,
_linearVelocity;
// a_spatial = | a_angular |
// | a_linear - w x v |
a << _angularAcceleration,
_linearAcceleration - _angularVelocity.cross(_linearVelocity);
setRelativeSpatialVelocity(v, getParentFrame());
setRelativeSpatialAcceleration(a, getParentFrame());
}
} // namespace dart
} // namespace dynamics
|
// Copyright (c) 2020 ETH Zurich
// Copyright (c) 2014 Grant Mercer
// Copyright (c) 2021 Akhil J Nair
//
// SPDX-License-Identifier: BSL-1.0
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/// \file parallel/container_algorithms/exclusive_scan.hpp
#pragma once
#if defined(DOXYGEN)
namespace hpx { namespace ranges {
///////////////////////////////////////////////////////////////////////////
/// Assigns through each iterator \a i in [result, result + (last - first))
/// the value of
/// GENERALIZED_NONCOMMUTATIVE_SUM(+, init, *first, ...,
/// *(first + (i - result) - 1))
///
/// \note Complexity: O(\a last - \a first) applications of the
/// predicate \a std::plus<T>.
///
/// \tparam InIter The type of the source iterators used (deduced).
/// This iterator type must meet the requirements of an
/// input iterator.
/// \tparam Sent The type of the source sentinel (deduced). This
/// sentinel type must be a sentinel for InIter.
/// \tparam OutIter The type of the iterator representing the
/// destination range (deduced).
/// This iterator type must meet the requirements of an
/// output iterator.
/// \tparam T The type of the value to be used as initial (and
/// intermediate) values (deduced).
///
/// \param first Refers to the beginning of the sequence of elements
/// the algorithm will be applied to.
/// \param last Refers to sentinel value denoting the end of the
/// sequence of elements the algorithm will be applied.
/// \param dest Refers to the beginning of the destination range.
/// \param init The initial value for the generalized sum.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked without an execution policy object will execute in sequential
/// order in the calling thread.
///
/// \returns The \a exclusive_scan algorithm returns \a
/// util::in_out_result<InIter, OutIter>.
/// The \a exclusive_scan algorithm returns an input iterator to
/// the point denoted by the sentinel and an output iterator
/// to the element in the destination range, one past the last
/// element copied.
///
/// \note GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aN) is defined as:
/// * a1 when N is 1
/// * GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aK)
/// + GENERALIZED_NONCOMMUTATIVE_SUM(+, aM, ..., aN)
/// where 1 < K+1 = M <= N.
///
/// The difference between \a exclusive_scan and \a inclusive_scan is that
/// \a inclusive_scan includes the ith input element in the ith sum.
///
template <typename InIter, typename Sent, typename OutIter, typename T>
exclusive_scan_result<InIter, OutIter> exclusive_scan(
InIter first, Sent last, OutIter dest, T init);
///////////////////////////////////////////////////////////////////////////
/// Assigns through each iterator \a i in [result, result + (last - first))
/// the value of
/// GENERALIZED_NONCOMMUTATIVE_SUM(+, init, *first, ...,
/// *(first + (i - result) - 1))
///
/// \note Complexity: O(\a last - \a first) applications of the
/// predicate \a std::plus<T>.
///
/// \tparam ExPolicy The type of the execution policy to use (deduced).
/// It describes the manner in which the execution
/// of the algorithm may be parallelized and the manner
/// in which it executes the assignments.
/// \tparam FwdIter1 The type of the source iterators used (deduced).
/// This iterator type must meet the requirements of an
/// forward iterator.
/// \tparam Sent The type of the source sentinel (deduced). This
/// sentinel type must be a sentinel for FwdIter.
/// \tparam FwdIter2 The type of the iterator representing the
/// destination range (deduced).
/// This iterator type must meet the requirements of an
/// forward iterator.
/// \tparam T The type of the value to be used as initial (and
/// intermediate) values (deduced).
///
/// \param policy The execution policy to use for the scheduling of
/// the iterations.
/// \param first Refers to the beginning of the sequence of elements
/// the algorithm will be applied to.
/// \param last Refers to sentinel value denoting the end of the
/// sequence of elements the algorithm will be applied.
/// \param dest Refers to the beginning of the destination range.
/// \param init The initial value for the generalized sum.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked with an execution policy object of type \a sequenced_policy
/// execute in sequential order in the calling thread.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked with an execution policy object of type \a parallel_policy
/// or \a parallel_task_policy are permitted to execute in an unordered
/// fashion in unspecified threads, and indeterminately sequenced
/// within each thread.
///
/// \returns The \a exclusive_scan algorithm returns a
/// \a hpx::future<util::in_out_result<FwdIter1, FwdIter2>> if
/// the execution policy is of type
/// \a sequenced_task_policy or
/// \a parallel_task_policy and
/// returns \a util::in_out_result<FwdIter1, FwdIter2> otherwise.
/// The \a exclusive_scan algorithm returns an input iterator to
/// the point denoted by the sentinel and an output iterator
/// to the element in the destination range, one past the last
/// element copied.
///
/// \note GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aN) is defined as:
/// * a1 when N is 1
/// * GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aK)
/// + GENERALIZED_NONCOMMUTATIVE_SUM(+, aM, ..., aN)
/// where 1 < K+1 = M <= N.
///
/// The difference between \a exclusive_scan and \a inclusive_scan is that
/// \a inclusive_scan includes the ith input element in the ith sum.
///
template <typename ExPolicy, typename FwdIter1, typename Sent,
typename FwdIter2, typename T>
typename util::detail::algorithm_result<ExPolicy,
exclusive_scan_result<FwdIter1, FwdIter2>>::type
exclusive_scan(
ExPolicy&& policy, FwdIter1 first, Sent last, FwdIter2 dest, T init);
///////////////////////////////////////////////////////////////////////////
/// Assigns through each iterator \a i in [result, result + (last - first))
/// the value of
/// GENERALIZED_NONCOMMUTATIVE_SUM(binary_op, init, *first, ...,
/// *(first + (i - result) - 1)).
///
/// \note Complexity: O(\a last - \a first) applications of the
/// predicate \a op.
///
/// \tparam InIter The type of the source iterators used (deduced).
/// This iterator type must meet the requirements of an
/// input iterator.
/// \tparam Sent The type of the source sentinel (deduced). This
/// sentinel type must be a sentinel for InIter.
/// \tparam OutIter The type of the iterator representing the
/// destination range (deduced).
/// This iterator type must meet the requirements of an
/// output iterator.
/// \tparam T The type of the value to be used as initial (and
/// intermediate) values (deduced).
/// \tparam Op The type of the binary function object used for
/// the reduction operation.
///
/// \param first Refers to the beginning of the sequence of elements
/// the algorithm will be applied to.
/// \param last Refers to sentinel value denoting the end of the
/// sequence of elements the algorithm will be applied.
/// \param dest Refers to the beginning of the destination range.
/// \param init The initial value for the generalized sum.
/// \param op Specifies the function (or function object) which
/// will be invoked for each of the values of the input
/// sequence. This is a
/// binary predicate. The signature of this predicate
/// should be equivalent to:
/// \code
/// Ret fun(const Type1 &a, const Type1 &b);
/// \endcode \n
/// The signature does not need to have const&, but
/// the function must not modify the objects passed to
/// it.
/// The types \a Type1 and \a Ret must be
/// such that an object of a type as given by the input
/// sequence can be implicitly converted to any
/// of those types.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked without an execution policy object will execute in sequential
/// order in the calling thread.
///
/// \returns The \a exclusive_scan algorithm returns \a
/// util::in_out_result<InIter, OutIter>.
/// The \a exclusive_scan algorithm returns an input iterator to
/// the point denoted by the sentinel and an output iterator
/// to the element in the destination range, one past the last
/// element copied.
///
/// \note GENERALIZED_NONCOMMUTATIVE_SUM(op, a1, ..., aN) is defined as:
/// * a1 when N is 1
/// * op(GENERALIZED_NONCOMMUTATIVE_SUM(op, a1, ..., aK),
/// GENERALIZED_NONCOMMUTATIVE_SUM(op, aM, ..., aN))
/// where 1 < K+1 = M <= N.
///
/// The difference between \a exclusive_scan and \a inclusive_scan is that
/// \a inclusive_scan includes the ith input element in the ith sum. If
/// \a op is not mathematically associative, the behavior of
/// \a inclusive_scan may be non-deterministic.
///
template <typename InIter, typename Sent, typename OutIter, typename T,
typename Op>
exclusive_scan_result<InIter, OutIter> exclusive_scan(
InIter first, Sent last, OutIter dest, T init, Op&& op);
///////////////////////////////////////////////////////////////////////////
/// Assigns through each iterator \a i in [result, result + (last - first))
/// the value of
/// GENERALIZED_NONCOMMUTATIVE_SUM(binary_op, init, *first, ...,
/// *(first + (i - result) - 1)).
///
/// \note Complexity: O(\a last - \a first) applications of the
/// predicate \a op.
///
/// \tparam ExPolicy The type of the execution policy to use (deduced).
/// It describes the manner in which the execution
/// of the algorithm may be parallelized and the manner
/// in which it executes the assignments.
/// \tparam FwdIter1 The type of the source iterators used (deduced).
/// This iterator type must meet the requirements of an
/// forward iterator.
/// \tparam Sent The type of the source sentinel (deduced). This
/// sentinel type must be a sentinel for FwdIter1.
/// \tparam FwdIter2 The type of the iterator representing the
/// destination range (deduced).
/// This iterator type must meet the requirements of an
/// forward iterator.
/// \tparam T The type of the value to be used as initial (and
/// intermediate) values (deduced).
/// \tparam Op The type of the binary function object used for
/// the reduction operation.
///
/// \param policy The execution policy to use for the scheduling of
/// the iterations.
/// \param first Refers to the beginning of the sequence of elements
/// the algorithm will be applied to.
/// \param last Refers to sentinel value denoting the end of the
/// sequence of elements the algorithm will be applied.
/// \param dest Refers to the beginning of the destination range.
/// \param init The initial value for the generalized sum.
/// \param op Specifies the function (or function object) which
/// will be invoked for each of the values of the input
/// sequence. This is a
/// binary predicate. The signature of this predicate
/// should be equivalent to:
/// \code
/// Ret fun(const Type1 &a, const Type1 &b);
/// \endcode \n
/// The signature does not need to have const&, but
/// the function must not modify the objects passed to
/// it.
/// The types \a Type1 and \a Ret must be
/// such that an object of a type as given by the input
/// sequence can be implicitly converted to any
/// of those types.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked with an execution policy object of type \a sequenced_policy
/// execute in sequential order in the calling thread.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked with an execution policy object of type \a parallel_policy
/// or \a parallel_task_policy are permitted to execute in an unordered
/// fashion in unspecified threads, and indeterminately sequenced
/// within each thread.
///
/// \returns The \a exclusive_scan algorithm returns a
/// \a hpx::future<util::in_out_result<FwdIter1, FwdIter2>> if
/// the execution policy is of type
/// \a sequenced_task_policy or
/// \a parallel_task_policy and
/// returns \a util::in_out_result<FwdIter1, FwdIter2> otherwise.
/// The \a exclusive_scan algorithm returns an input iterator to
/// the point denoted by the sentinel and an output iterator
/// to the element in the destination range, one past the last
/// element copied.
///
/// \note GENERALIZED_NONCOMMUTATIVE_SUM(op, a1, ..., aN) is defined as:
/// * a1 when N is 1
/// * op(GENERALIZED_NONCOMMUTATIVE_SUM(op, a1, ..., aK),
/// GENERALIZED_NONCOMMUTATIVE_SUM(op, aM, ..., aN))
/// where 1 < K+1 = M <= N.
///
/// The difference between \a exclusive_scan and \a inclusive_scan is that
/// \a inclusive_scan includes the ith input element in the ith sum. If
/// \a op is not mathematically associative, the behavior of
/// \a inclusive_scan may be non-deterministic.
///
template <typename ExPolicy, typename FwdIter1, typename Sent,
typename FwdIter2, typename T, typename Op>
typename util::detail::algorithm_result<ExPolicy,
exclusive_scan_result<FwdIter1, FwdIter2>>::type
exclusive_scan(ExPolicy&& policy, FwdIter1 first, Sent last, FwdIter2 dest,
T init, Op&& op);
///////////////////////////////////////////////////////////////////////////
/// Assigns through each iterator \a i in [result, result + (last - first))
/// the value of
/// GENERALIZED_NONCOMMUTATIVE_SUM(+, init, *first, ...,
/// *(first + (i - result) - 1))
///
/// \note Complexity: O(\a last - \a first) applications of the
/// predicate \a std::plus<T>.
///
/// \tparam Rng The type of the source range used (deduced).
/// The iterators extracted from this range type must
/// meet the requirements of an input iterator.
/// \tparam O The type of the iterator representing the
/// destination range (deduced).
/// This iterator type must meet the requirements of an
/// output iterator.
/// \tparam T The type of the value to be used as initial (and
/// intermediate) values (deduced).
///
/// \param rng Refers to the sequence of elements the algorithm
/// will be applied to.
/// \param dest Refers to the beginning of the destination range.
/// \param init The initial value for the generalized sum.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked without an execution policy object will execute in sequential
/// order in the calling thread.
///
/// \returns The \a exclusive_scan algorithm returns
/// \a util::in_out_result<traits::range_iterator_t<Rng>, O>
/// The \a exclusive_scan algorithm returns an input iterator to
/// the point denoted by the sentinel and an output iterator
/// to the element in the destination range, one past the last
/// element copied.
///
/// \note GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aN) is defined as:
/// * a1 when N is 1
/// * GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aK)
/// + GENERALIZED_NONCOMMUTATIVE_SUM(+, aM, ..., aN)
/// where 1 < K+1 = M <= N.
///
/// The difference between \a exclusive_scan and \a inclusive_scan is that
/// \a inclusive_scan includes the ith input element in the ith sum.
///
template <typename Rng, typename O, typename T>
exclusive_scan_result<traits::range_iterator_t<Rng>, O> exclusive_scan(
Rng&& rng, O dest, T init);
///////////////////////////////////////////////////////////////////////////
/// Assigns through each iterator \a i in [result, result + (last - first))
/// the value of
/// GENERALIZED_NONCOMMUTATIVE_SUM(+, init, *first, ...,
/// *(first + (i - result) - 1))
///
/// \note Complexity: O(\a last - \a first) applications of the
/// predicate \a std::plus<T>.
///
/// \tparam ExPolicy The type of the execution policy to use (deduced).
/// It describes the manner in which the execution
/// of the algorithm may be parallelized and the manner
/// in which it executes the assignments.
/// \tparam Rng The type of the source range used (deduced).
/// The iterators extracted from this range type must
/// meet the requirements of an forward iterator.
/// \tparam O The type of the iterator representing the
/// destination range (deduced).
/// This iterator type must meet the requirements of an
/// forward iterator.
/// \tparam T The type of the value to be used as initial (and
/// intermediate) values (deduced).
///
/// \param policy The execution policy to use for the scheduling of
/// the iterations.
/// \param rng Refers to the sequence of elements the algorithm
/// will be applied to.
/// \param dest Refers to the beginning of the destination range.
/// \param init The initial value for the generalized sum.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked with an execution policy object of type \a sequenced_policy
/// execute in sequential order in the calling thread.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked with an execution policy object of type \a parallel_policy
/// or \a parallel_task_policy are permitted to execute in an unordered
/// fashion in unspecified threads, and indeterminately sequenced
/// within each thread.
///
/// \returns The \a exclusive_scan algorithm returns a
/// \a hpx::future<util::in_out_result
/// <traits::range_iterator_t<Rng>, O>>
/// if the execution policy is of type
/// \a sequenced_task_policy or
/// \a parallel_task_policy and
/// returns \a util::in_out_result
/// <traits::range_iterator_t<Rng>, O>
/// otherwise.
/// The \a exclusive_scan algorithm returns an input iterator to
/// the point denoted by the sentinel and an output iterator
/// to the element in the destination range, one past the last
/// element copied.
///
/// \note GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aN) is defined as:
/// * a1 when N is 1
/// * GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aK)
/// + GENERALIZED_NONCOMMUTATIVE_SUM(+, aM, ..., aN)
/// where 1 < K+1 = M <= N.
///
/// The difference between \a exclusive_scan and \a inclusive_scan is that
/// \a inclusive_scan includes the ith input element in the ith sum.
///
template <typename ExPolicy, typename Rng, typename O, typename T>
typename util::detail::algorithm_result<ExPolicy,
exclusive_scan_result<traits::range_iterator_t<Rng>, O>>::type
exclusive_scan(ExPolicy&& policy, Rng&& rng, O dest, T init);
///////////////////////////////////////////////////////////////////////////
/// Assigns through each iterator \a i in [result, result + (last - first))
/// the value of
/// GENERALIZED_NONCOMMUTATIVE_SUM(+, init, *first, ...,
/// *(first + (i - result) - 1))
///
/// \note Complexity: O(\a last - \a first) applications of the
/// predicate \a std::plus<T>.
///
/// \tparam Rng The type of the source range used (deduced).
/// The iterators extracted from this range type must
/// meet the requirements of an input iterator.
/// \tparam O The type of the iterator representing the
/// destination range (deduced).
/// This iterator type must meet the requirements of an
/// output iterator.
/// \tparam T The type of the value to be used as initial (and
/// intermediate) values (deduced).
/// \tparam Op The type of the binary function object used for
/// the reduction operation.
///
/// \param rng Refers to the sequence of elements the algorithm
/// will be applied to.
/// \param dest Refers to the beginning of the destination range.
/// \param init The initial value for the generalized sum.
/// \param op Specifies the function (or function object) which
/// will be invoked for each of the values of the input
/// sequence. This is a
/// binary predicate. The signature of this predicate
/// should be equivalent to:
/// \code
/// Ret fun(const Type1 &a, const Type1 &b);
/// \endcode \n
/// The signature does not need to have const&, but
/// the function must not modify the objects passed to
/// it.
/// The types \a Type1 and \a Ret must be
/// such that an object of a type as given by the input
/// sequence can be implicitly converted to any
/// of those types.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked without an execution policy object will execute in sequential
/// order in the calling thread.
///
/// \returns The \a exclusive_scan algorithm returns
/// \a util::in_out_result<traits::range_iterator_t<Rng>, O>
/// The \a exclusive_scan algorithm returns an input iterator to
/// the point denoted by the sentinel and an output iterator
/// to the element in the destination range, one past the last
/// element copied.
///
/// \note GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aN) is defined as:
/// * a1 when N is 1
/// * GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aK)
/// + GENERALIZED_NONCOMMUTATIVE_SUM(+, aM, ..., aN)
/// where 1 < K+1 = M <= N.
///
/// The difference between \a exclusive_scan and \a inclusive_scan is that
/// \a inclusive_scan includes the ith input element in the ith sum.
///
template <typename Rng, typename O, typename T, typename Op>
exclusive_scan_result<traits::range_iterator_t<Rng>, O> exclusive_scan(
Rng&& rng, O dest, T init, Op&& op);
///////////////////////////////////////////////////////////////////////////
/// Assigns through each iterator \a i in [result, result + (last - first))
/// the value of
/// GENERALIZED_NONCOMMUTATIVE_SUM(+, init, *first, ...,
/// *(first + (i - result) - 1))
///
/// \note Complexity: O(\a last - \a first) applications of the
/// predicate \a std::plus<T>.
///
/// \tparam ExPolicy The type of the execution policy to use (deduced).
/// It describes the manner in which the execution
/// of the algorithm may be parallelized and the manner
/// in which it executes the assignments.
/// \tparam Rng The type of the source range used (deduced).
/// The iterators extracted from this range type must
/// meet the requirements of an forward iterator.
/// \tparam O The type of the iterator representing the
/// destination range (deduced).
/// This iterator type must meet the requirements of an
/// forward iterator.
/// \tparam T The type of the value to be used as initial (and
/// intermediate) values (deduced).
/// \tparam Op The type of the binary function object used for
/// the reduction operation.
///
/// \param policy The execution policy to use for the scheduling of
/// the iterations.
/// \param rng Refers to the sequence of elements the algorithm
/// will be applied to.
/// \param dest Refers to the beginning of the destination range.
/// \param init The initial value for the generalized sum.
/// \param op Specifies the function (or function object) which
/// will be invoked for each of the values of the input
/// sequence. This is a
/// binary predicate. The signature of this predicate
/// should be equivalent to:
/// \code
/// Ret fun(const Type1 &a, const Type1 &b);
/// \endcode \n
/// The signature does not need to have const&, but
/// the function must not modify the objects passed to
/// it.
/// The types \a Type1 and \a Ret must be
/// such that an object of a type as given by the input
/// sequence can be implicitly converted to any
/// of those types.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked with an execution policy object of type \a sequenced_policy
/// execute in sequential order in the calling thread.
///
/// The reduce operations in the parallel \a exclusive_scan algorithm
/// invoked with an execution policy object of type \a parallel_policy
/// or \a parallel_task_policy are permitted to execute in an unordered
/// fashion in unspecified threads, and indeterminately sequenced
/// within each thread.
///
/// \returns The \a exclusive_scan algorithm returns a
/// \a hpx::future<util::in_out_result
/// <traits::range_iterator_t<Rng>, O>>
/// if the execution policy is of type
/// \a sequenced_task_policy or
/// \a parallel_task_policy and
/// returns \a util::in_out_result
/// <traits::range_iterator_t<Rng>, O>
/// otherwise.
/// The \a exclusive_scan algorithm returns an input iterator to
/// the point denoted by the sentinel and an output iterator
/// to the element in the destination range, one past the last
/// element copied.
///
/// \note GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aN) is defined as:
/// * a1 when N is 1
/// * GENERALIZED_NONCOMMUTATIVE_SUM(+, a1, ..., aK)
/// + GENERALIZED_NONCOMMUTATIVE_SUM(+, aM, ..., aN)
/// where 1 < K+1 = M <= N.
///
/// The difference between \a exclusive_scan and \a inclusive_scan is that
/// \a inclusive_scan includes the ith input element in the ith sum.
///
template <typename ExPolicy, typename Rng, typename O, typename T,
typename Op>
typename util::detail::algorithm_result<ExPolicy,
exclusive_scan_result<traits::range_iterator_t<Rng>, O>>::type
exclusive_scan(ExPolicy&& policy, Rng&& rng, O dest, T init, Op&& op);
}} // namespace hpx::ranges
#else
#include <hpx/config.hpp>
#include <hpx/algorithms/traits/projected_range.hpp>
#include <hpx/execution/algorithms/detail/predicates.hpp>
#include <hpx/executors/execution_policy.hpp>
#include <hpx/functional/detail/tag_fallback_invoke.hpp>
#include <hpx/iterator_support/traits/is_iterator.hpp>
#include <hpx/iterator_support/traits/is_range.hpp>
#include <hpx/parallel/algorithms/exclusive_scan.hpp>
#include <hpx/parallel/util/detail/algorithm_result.hpp>
#include <hpx/parallel/util/detail/sender_util.hpp>
#include <hpx/parallel/util/projection_identity.hpp>
#include <algorithm>
#include <cstddef>
#include <iterator>
#include <type_traits>
#include <utility>
#include <vector>
namespace hpx { namespace ranges {
template <typename I, typename O>
using exclusive_scan_result = parallel::util::in_out_result<I, O>;
HPX_INLINE_CONSTEXPR_VARIABLE struct exclusive_scan_t final
: hpx::detail::tag_parallel_algorithm<exclusive_scan_t>
{
private:
// clang-format off
template <typename InIter, typename Sent, typename OutIter,
typename T, typename Op = std::plus<T>,
HPX_CONCEPT_REQUIRES_(
hpx::traits::is_iterator_v<InIter> &&
hpx::traits::is_sentinel_for<Sent, InIter>::value &&
hpx::traits::is_iterator_v<OutIter> &&
hpx::is_invocable_v<Op,
typename std::iterator_traits<InIter>::value_type,
typename std::iterator_traits<InIter>::value_type
>
)>
// clang-format on
friend exclusive_scan_result<InIter, OutIter> tag_fallback_invoke(
hpx::ranges::exclusive_scan_t, InIter first, Sent last,
OutIter dest, T init, Op&& op = Op())
{
static_assert(hpx::traits::is_input_iterator_v<InIter>,
"Requires at least input iterator.");
static_assert(hpx::traits::is_output_iterator_v<OutIter>,
"Requires at least output iterator.");
using result_type = exclusive_scan_result<InIter, OutIter>;
return hpx::parallel::v1::detail::exclusive_scan<result_type>()
.call(hpx::execution::seq, first, last, dest, std::move(init),
std::forward<Op>(op));
}
// clang-format off
template <typename ExPolicy, typename FwdIter1, typename Sent,
typename FwdIter2, typename T, typename Op = std::plus<T>,
HPX_CONCEPT_REQUIRES_(
hpx::is_execution_policy<ExPolicy>::value &&
hpx::traits::is_iterator_v<FwdIter1> &&
hpx::traits::is_sentinel_for<Sent, FwdIter1>::value &&
hpx::traits::is_iterator_v<FwdIter2> &&
hpx::is_invocable_v<Op,
typename std::iterator_traits<FwdIter1>::value_type,
typename std::iterator_traits<FwdIter1>::value_type
>
)>
// clang-format on
friend typename parallel::util::detail::algorithm_result<ExPolicy,
exclusive_scan_result<FwdIter1, FwdIter2>>::type
tag_fallback_invoke(hpx::ranges::exclusive_scan_t, ExPolicy&& policy,
FwdIter1 first, Sent last, FwdIter2 dest, T init, Op&& op = Op())
{
static_assert(hpx::traits::is_forward_iterator_v<FwdIter1>,
"Requires at least forward iterator.");
static_assert(hpx::traits::is_forward_iterator_v<FwdIter2>,
"Requires at least forward iterator.");
using result_type = exclusive_scan_result<FwdIter1, FwdIter2>;
return hpx::parallel::v1::detail::exclusive_scan<result_type>()
.call(std::forward<ExPolicy>(policy), first, last, dest,
std::move(init), std::forward<Op>(op));
}
// clang-format off
template <typename Rng, typename O, typename T,
typename Op = std::plus<T>,
HPX_CONCEPT_REQUIRES_(
hpx::traits::is_range<Rng>::value &&
hpx::is_invocable_v<Op,
typename hpx::traits::range_traits<Rng>::value_type,
typename hpx::traits::range_traits<Rng>::value_type
>
)>
// clang-format on
friend exclusive_scan_result<traits::range_iterator_t<Rng>, O>
tag_fallback_invoke(hpx::ranges::exclusive_scan_t, Rng&& rng, O dest,
T init, Op&& op = Op())
{
static_assert(hpx::traits::is_input_iterator<
traits::range_iterator_t<Rng>>::value,
"Requires at least input iterator.");
using result_type =
exclusive_scan_result<traits::range_iterator_t<Rng>, O>;
return hpx::parallel::v1::detail::exclusive_scan<result_type>()
.call(hpx::execution::seq, std::begin(rng), std::end(rng), dest,
std::move(init), std::forward<Op>(op));
}
// clang-format off
template <typename ExPolicy, typename Rng, typename O, typename T,
typename Op = std::plus<T>,
HPX_CONCEPT_REQUIRES_(
hpx::is_execution_policy<ExPolicy>::value &&
hpx::traits::is_range<Rng>::value &&
hpx::is_invocable_v<Op,
typename hpx::traits::range_traits<Rng>::value_type,
typename hpx::traits::range_traits<Rng>::value_type
>
)>
// clang-format on
friend typename parallel::util::detail::algorithm_result<ExPolicy,
exclusive_scan_result<traits::range_iterator_t<Rng>, O>>::type
tag_fallback_invoke(hpx::ranges::exclusive_scan_t, ExPolicy&& policy,
Rng&& rng, O dest, T init, Op&& op = Op())
{
static_assert(hpx::traits::is_forward_iterator<
traits::range_iterator_t<Rng>>::value,
"Requires at least forward iterator.");
using result_type =
exclusive_scan_result<traits::range_iterator_t<Rng>, O>;
return hpx::parallel::v1::detail::exclusive_scan<result_type>()
.call(std::forward<ExPolicy>(policy), std::begin(rng),
std::end(rng), dest, std::move(init), std::forward<Op>(op));
}
} exclusive_scan{};
}} // namespace hpx::ranges
#endif
|
#include "appinforeader.h"
#include <QJsonDocument>
#include <QJsonArray>
#include <QJsonObject>
#include <QFile>
#include <QDebug>
#include "appi18n.h"
AppInfoReader::AppInfoReader()
{
}
QList<Application*>* AppInfoReader::readFromPath(QString fileName){
QFile file(fileName);
file.open(QIODevice::ReadOnly);
auto byteArray= file.readAll();
file.close();
return loadFromJsonArray(byteArray);
}
Application* AppInfoReader::fromJsonObject(QByteArray byteArray){
Application * appInfo=new Application();
QJsonParseError json_error;
QJsonDocument document=QJsonDocument::fromJson(byteArray,&json_error);
if(json_error.error==QJsonParseError::NoError)
{
QJsonObject jsonObject;
if(!document.isObject())
{
return appInfo;
}
jsonObject=document.object();
if(jsonObject.contains("ui_name"))
{
QJsonValue ui_name= jsonObject.take("ui_name");
appInfo->setUiName(ui_name.toString(""));
}
if(jsonObject.contains("app_name"))
{
QJsonValue app_name= jsonObject.take("app_name");
appInfo->setName(app_name.toString(""));
}
if(jsonObject.contains("app_icon"))
{
QJsonValue app_icon= jsonObject.take("app_icon");
appInfo->setIcon(app_icon.toString("0"""));
}
if(jsonObject.contains("app_argv"))
{
QJsonValue app_argv= jsonObject.take("app_argv");
appInfo->setArgv(app_argv.toString("0"""));
}
if(jsonObject.contains("app_file"))
{
QJsonValue app_file= jsonObject.take("app_file");
appInfo->app_file=app_file.toString("");
appInfo->setApplicationName(app_file.toString(""));
}
if(jsonObject.contains("exit_callback"))
{
QJsonValue exit_callback= jsonObject.take("exit_callback");
appInfo->setExitCallback(exit_callback.toString(""));
}
if(jsonObject.contains("i18n"))
{
QJsonValue i18n= jsonObject.take("i18n");
appInfo->seti18n(i18n.toString());
}
return appInfo;
}
return appInfo;
}
QList<Application*>* AppInfoReader::loadFromJsonArray(QByteArray byteArray)
{
QList<Application*> *list=new QList<Application*>();
QJsonParseError json_error;
QJsonDocument document=QJsonDocument::fromJson(byteArray,&json_error);
if(json_error.error==QJsonParseError::NoError)
{
QJsonArray array;
if(!document.isArray())
{
return list;
}
array=document.array();
Application* appInfo;
for(int i=0;i<array.count();i++)
{
QJsonObject jsonObject= array.at(i).toObject();
appInfo=loadFromJsonObject(jsonObject);
list->append(appInfo);
}
}
return list;
}
Application* AppInfoReader::loadFromJsonObject(QJsonObject &jsonObject){
Application* appInfo=new Application();
if(jsonObject.contains("ui_name"))
{
QJsonValue ui_name= jsonObject.take("ui_name");
appInfo->setUiName(ui_name.toString(""));
}
if(jsonObject.contains("app_name"))
{
QJsonValue app_name= jsonObject.take("app_name");
appInfo->setName(app_name.toString(""));
}
if(jsonObject.contains("app_icon"))
{
QJsonValue app_icon= jsonObject.take("app_icon");
appInfo->setIcon(app_icon.toString("0"));
}
if(jsonObject.contains("app_argv"))
{
QJsonValue app_argv= jsonObject.take("app_argv");
appInfo->setArgv(app_argv.toString("0"));
}
if(jsonObject.contains("app_file"))
{
QJsonValue app_file= jsonObject.take("app_file");
appInfo->app_file=app_file.toString("");
}
if(jsonObject.contains("exit_callback"))
{
QJsonValue exit_callback= jsonObject.take("exit_callback");
appInfo->setExitCallback(exit_callback.toString(""));
}
return appInfo;
}
|
/* $Id: VBVABase.cpp $ */
/** @file
* VirtualBox Video driver, common code - VBVA initialisation and helper
* functions.
*/
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <VBoxVideoGuest.h>
#include <VBoxVideoIPRT.h>
#include <HGSMIChannels.h>
/*
* There is a hardware ring buffer in the graphics device video RAM, formerly
* in the VBox VMMDev PCI memory space.
* All graphics commands go there serialized by VBoxVBVABufferBeginUpdate.
* and vboxHwBufferEndUpdate.
*
* off32Free is writing position. off32Data is reading position.
* off32Free == off32Data means buffer is empty.
* There must be always gap between off32Data and off32Free when data
* are in the buffer.
* Guest only changes off32Free, host changes off32Data.
*/
/* Forward declarations of internal functions. */
static void vboxHwBufferFlush(PHGSMIGUESTCOMMANDCONTEXT pCtx);
static void vboxHwBufferPlaceDataAt(PVBVABUFFERCONTEXT pCtx, const void *p,
uint32_t cb, uint32_t offset);
static bool vboxHwBufferWrite(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
const void *p, uint32_t cb);
static bool vboxVBVAInformHost(PVBVABUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, int32_t cScreen, bool fEnable)
{
bool fRc = false;
#if 0 /* All callers check this */
if (ppdev->bHGSMISupported)
#endif
{
VBVAENABLE_EX RT_UNTRUSTED_VOLATILE_HOST *pEnable =
(VBVAENABLE_EX RT_UNTRUSTED_VOLATILE_HOST *)VBoxHGSMIBufferAlloc(pHGSMICtx, sizeof(VBVAENABLE_EX),
HGSMI_CH_VBVA, VBVA_ENABLE);
if (pEnable != NULL)
{
pEnable->Base.u32Flags = fEnable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
pEnable->Base.u32Offset = pCtx->offVRAMBuffer;
pEnable->Base.i32Result = VERR_NOT_SUPPORTED;
if (cScreen >= 0)
{
pEnable->Base.u32Flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
pEnable->u32ScreenId = cScreen;
}
VBoxHGSMIBufferSubmit(pHGSMICtx, pEnable);
if (fEnable)
fRc = RT_SUCCESS(pEnable->Base.i32Result);
else
fRc = true;
VBoxHGSMIBufferFree(pHGSMICtx, pEnable);
}
else
{
// LogFunc(("HGSMIHeapAlloc failed\n"));
}
}
return fRc;
}
/*
* Public hardware buffer methods.
*/
DECLHIDDEN(bool) VBoxVBVAEnable(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
VBVABUFFER *pVBVA, int32_t cScreen)
{
bool fRc = false;
// LogFlowFunc(("pVBVA %p\n", pVBVA));
#if 0 /* All callers check this */
if (ppdev->bHGSMISupported)
#endif
{
// LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
pVBVA->hostFlags.u32HostEvents = 0;
pVBVA->hostFlags.u32SupportedOrders = 0;
pVBVA->off32Data = 0;
pVBVA->off32Free = 0;
memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
pVBVA->indexRecordFirst = 0;
pVBVA->indexRecordFree = 0;
pVBVA->cbPartialWriteThreshold = 256;
pVBVA->cbData = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
pCtx->fHwBufferOverflow = false;
pCtx->pRecord = NULL;
pCtx->pVBVA = pVBVA;
fRc = vboxVBVAInformHost(pCtx, pHGSMICtx, cScreen, true);
}
if (!fRc)
{
VBoxVBVADisable(pCtx, pHGSMICtx, cScreen);
}
return fRc;
}
DECLHIDDEN(void) VBoxVBVADisable(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
int32_t cScreen)
{
// LogFlowFunc(("\n"));
pCtx->fHwBufferOverflow = false;
pCtx->pRecord = NULL;
pCtx->pVBVA = NULL;
vboxVBVAInformHost(pCtx, pHGSMICtx, cScreen, false);
}
DECLHIDDEN(bool) VBoxVBVABufferBeginUpdate(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
{
bool fRc = false;
// LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
if ( pCtx->pVBVA
&& (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
{
uint32_t indexRecordNext;
Assert(!pCtx->fHwBufferOverflow);
Assert(pCtx->pRecord == NULL);
indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
if (indexRecordNext == pCtx->pVBVA->indexRecordFirst)
{
/* All slots in the records queue are used. */
vboxHwBufferFlush (pHGSMICtx);
}
if (indexRecordNext == pCtx->pVBVA->indexRecordFirst)
{
/* Even after flush there is no place. Fail the request. */
// LogFunc(("no space in the queue of records!!! first %d, last %d\n",
// pCtx->pVBVA->indexRecordFirst, pCtx->pVBVA->indexRecordFree));
}
else
{
/* Initialize the record. */
VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
pCtx->pVBVA->indexRecordFree = indexRecordNext;
// LogFunc(("indexRecordNext = %d\n", indexRecordNext));
/* Remember which record we are using. */
pCtx->pRecord = pRecord;
fRc = true;
}
}
return fRc;
}
DECLHIDDEN(void) VBoxVBVABufferEndUpdate(PVBVABUFFERCONTEXT pCtx)
{
VBVARECORD *pRecord;
// LogFunc(("\n"));
Assert(pCtx->pVBVA);
pRecord = pCtx->pRecord;
Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
/* Mark the record completed. */
pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
pCtx->fHwBufferOverflow = false;
pCtx->pRecord = NULL;
}
/*
* Private operations.
*/
static uint32_t vboxHwBufferAvail (const VBVABUFFER *pVBVA)
{
int32_t i32Diff = pVBVA->off32Data - pVBVA->off32Free;
return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
}
static void vboxHwBufferFlush(PHGSMIGUESTCOMMANDCONTEXT pCtx)
{
/* Issue the flush command. */
VBVAFLUSH RT_UNTRUSTED_VOLATILE_HOST *pFlush =
(VBVAFLUSH RT_UNTRUSTED_VOLATILE_HOST * )VBoxHGSMIBufferAlloc(pCtx, sizeof(VBVAFLUSH), HGSMI_CH_VBVA, VBVA_FLUSH);
if (pFlush != NULL)
{
pFlush->u32Reserved = 0;
VBoxHGSMIBufferSubmit(pCtx, pFlush);
VBoxHGSMIBufferFree(pCtx, pFlush);
}
else
{
// LogFunc(("HGSMIHeapAlloc failed\n"));
}
}
static void vboxHwBufferPlaceDataAt(PVBVABUFFERCONTEXT pCtx, const void *p,
uint32_t cb, uint32_t offset)
{
VBVABUFFER *pVBVA = pCtx->pVBVA;
uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
uint8_t *dst = &pVBVA->au8Data[offset];
int32_t i32Diff = cb - u32BytesTillBoundary;
if (i32Diff <= 0)
{
/* Chunk will not cross buffer boundary. */
memcpy (dst, p, cb);
}
else
{
/* Chunk crosses buffer boundary. */
memcpy (dst, p, u32BytesTillBoundary);
memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
}
}
static bool vboxHwBufferWrite(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
const void *p, uint32_t cb)
{
VBVARECORD *pRecord;
uint32_t cbHwBufferAvail;
uint32_t cbWritten = 0;
VBVABUFFER *pVBVA = pCtx->pVBVA;
Assert(pVBVA);
if (!pVBVA || pCtx->fHwBufferOverflow)
{
return false;
}
Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
pRecord = pCtx->pRecord;
Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
// LogFunc(("%d\n", cb));
cbHwBufferAvail = vboxHwBufferAvail (pVBVA);
while (cb > 0)
{
uint32_t cbChunk = cb;
// LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
// pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
if (cbChunk >= cbHwBufferAvail)
{
// LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
vboxHwBufferFlush (pHGSMICtx);
cbHwBufferAvail = vboxHwBufferAvail (pVBVA);
if (cbChunk >= cbHwBufferAvail)
{
// LogFunc(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
// cb, cbHwBufferAvail));
if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
{
// LogFunc(("Buffer overflow!!!\n"));
pCtx->fHwBufferOverflow = true;
Assert(false);
return false;
}
cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
}
}
Assert(cbChunk <= cb);
Assert(cbChunk <= vboxHwBufferAvail (pVBVA));
vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
pVBVA->off32Free = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
pRecord->cbRecord += cbChunk;
cbHwBufferAvail -= cbChunk;
cb -= cbChunk;
cbWritten += cbChunk;
}
return true;
}
/*
* Public writer to the hardware buffer.
*/
DECLHIDDEN(bool) VBoxVBVAWrite(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
const void *pv, uint32_t cb)
{
return vboxHwBufferWrite (pCtx, pHGSMICtx, pv, cb);
}
DECLHIDDEN(bool) VBoxVBVAOrderSupported(PVBVABUFFERCONTEXT pCtx, unsigned code)
{
VBVABUFFER *pVBVA = pCtx->pVBVA;
if (!pVBVA)
{
return false;
}
if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
{
return true;
}
return false;
}
DECLHIDDEN(void) VBoxVBVASetupBufferContext(PVBVABUFFERCONTEXT pCtx,
uint32_t offVRAMBuffer,
uint32_t cbBuffer)
{
pCtx->offVRAMBuffer = offVRAMBuffer;
pCtx->cbBuffer = cbBuffer;
}
|
/************************************************************
*
* CmdKillOffer.cpp
*
*/
/************************************************************
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1
* OPEN TRANSACTIONS
*
* Financial Cryptography and Digital Cash
* Library, Protocol, API, Server, CLI, GUI
*
* -- Anonymous Numbered Accounts.
* -- Untraceable Digital Cash.
* -- Triple-Signed Receipts.
* -- Cheques, Vouchers, Transfers, Inboxes.
* -- Basket Currencies, Markets, Payment Plans.
* -- Signed, XML, Ricardian-style Contracts.
* -- Scripted smart contracts.
*
* Copyright (C) 2010-2013 by "Fellow Traveler" (A pseudonym)
*
* EMAIL:
* FellowTraveler@rayservers.net
*
* BITCOIN: 1NtTPVVjDsUfDWybS4BwvHpG2pdS9RnYyQ
*
* KEY FINGERPRINT (PGP Key in license file):
* 9DD5 90EB 9292 4B48 0484 7910 0308 00ED F951 BB8E
*
* OFFICIAL PROJECT WIKI(s):
* https://github.com/FellowTraveler/Moneychanger
* https://github.com/FellowTraveler/Open-Transactions/wiki
*
* WEBSITE:
* http://www.OpenTransactions.org/
*
* Components and licensing:
* -- Moneychanger..A Java client GUI.....LICENSE:.....GPLv3
* -- otlib.........A class library.......LICENSE:...LAGPLv3
* -- otapi.........A client API..........LICENSE:...LAGPLv3
* -- opentxs/ot....Command-line client...LICENSE:...LAGPLv3
* -- otserver......Server Application....LICENSE:....AGPLv3
* Github.com/FellowTraveler/Open-Transactions/wiki/Components
*
* All of the above OT components were designed and written by
* Fellow Traveler, with the exception of Moneychanger, which
* was contracted out to Vicky C (bitcointrader4@gmail.com).
* The open-source community has since actively contributed.
*
* -----------------------------------------------------
*
* LICENSE:
* This program is free software: you can redistribute it
* and/or modify it under the terms of the GNU Affero
* General Public License as published by the Free Software
* Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* ADDITIONAL PERMISSION under the GNU Affero GPL version 3
* section 7: (This paragraph applies only to the LAGPLv3
* components listed above.) If you modify this Program, or
* any covered work, by linking or combining it with other
* code, such other code is not for that reason alone subject
* to any of the requirements of the GNU Affero GPL version 3.
* (==> This means if you are only using the OT API, then you
* don't have to open-source your code--only your changes to
* Open-Transactions itself must be open source. Similar to
* LGPLv3, except it applies to software-as-a-service, not
* just to distributing binaries.)
*
* Extra WAIVER for OpenSSL, Lucre, and all other libraries
* used by Open Transactions: This program is released under
* the AGPL with the additional exemption that compiling,
* linking, and/or using OpenSSL is allowed. The same is true
* for any other open source libraries included in this
* project: complete waiver from the AGPL is hereby granted to
* compile, link, and/or use them with Open-Transactions,
* according to their own terms, as long as the rest of the
* Open-Transactions terms remain respected, with regard to
* the Open-Transactions code itself.
*
* Lucre License:
* This code is also "dual-license", meaning that Ben Lau-
* rie's license must also be included and respected, since
* the code for Lucre is also included with Open Transactions.
* See Open-Transactions/src/otlib/lucre/LUCRE_LICENSE.txt
* The Laurie requirements are light, but if there is any
* problem with his license, simply remove the Lucre code.
* Although there are no other blind token algorithms in Open
* Transactions (yet. credlib is coming), the other functions
* will continue to operate.
* See Lucre on Github: https://github.com/benlaurie/lucre
* -----------------------------------------------------
* You should have received a copy of the GNU Affero General
* Public License along with this program. If not, see:
* http://www.gnu.org/licenses/
*
* If you would like to use this software outside of the free
* software license, please contact FellowTraveler.
* (Unfortunately many will run anonymously and untraceably,
* so who could really stop them?)
*
* DISCLAIMER:
* This program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU Affero General Public License for
* more details.
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.9 (Darwin)
iQIcBAEBAgAGBQJRSsfJAAoJEAMIAO35UbuOQT8P/RJbka8etf7wbxdHQNAY+2cC
vDf8J3X8VI+pwMqv6wgTVy17venMZJa4I4ikXD/MRyWV1XbTG0mBXk/7AZk7Rexk
KTvL/U1kWiez6+8XXLye+k2JNM6v7eej8xMrqEcO0ZArh/DsLoIn1y8p8qjBI7+m
aE7lhstDiD0z8mwRRLKFLN2IH5rAFaZZUvj5ERJaoYUKdn4c+RcQVei2YOl4T0FU
LWND3YLoH8naqJXkaOKEN4UfJINCwxhe5Ke9wyfLWLUO7NamRkWD2T7CJ0xocnD1
sjAzlVGNgaFDRflfIF4QhBx1Ddl6wwhJfw+d08bjqblSq8aXDkmFA7HeunSFKkdn
oIEOEgyj+veuOMRJC5pnBJ9vV+7qRdDKQWaCKotynt4sWJDGQ9kWGWm74SsNaduN
TPMyr9kNmGsfR69Q2Zq/FLcLX/j8ESxU+HYUB4vaARw2xEOu2xwDDv6jt0j3Vqsg
x7rWv4S/Eh18FDNDkVRChiNoOIilLYLL6c38uMf1pnItBuxP3uhgY6COm59kVaRh
nyGTYCDYD2TK+fI9o89F1297uDCwEJ62U0Q7iTDp5QuXCoxkPfv8/kX6lS6T3y9G
M9mqIoLbIQ1EDntFv7/t6fUTS2+46uCrdZWbQ5RjYXdrzjij02nDmJAm2BngnZvd
kamH0Y/n11lCvo1oQxM+
=uSzz
-----END PGP SIGNATURE-----
**************************************************************/
#include "CmdKillOffer.hpp"
#include "../ot_made_easy_ot.hpp"
#include <opentxs/client/OTAPI.hpp>
#include <opentxs/core/Log.hpp>
using namespace opentxs;
using namespace std;
CmdKillOffer::CmdKillOffer()
{
command = "killoffer";
args[0] = "--server <server>";
args[1] = "--mynym <nym>";
args[2] = "--myacct <account>";
args[3] = "--id <transactionnr>";
category = catMarkets;
help = "Kill an active market offer.";
}
CmdKillOffer::~CmdKillOffer()
{
}
int32_t CmdKillOffer::runWithOptions()
{
return run(getOption("server"), getOption("mynym"), getOption("myacct"),
getOption("id"));
}
int32_t CmdKillOffer::run(string server, string mynym, string myacct, string id)
{
if (!checkServer("server", server)) {
return -1;
}
if (!checkNym("mynym", mynym)) {
return -1;
}
if (!checkAccount("myacct", myacct)) {
return -1;
}
if (!checkTransNum("id", id)) {
return -1;
}
string response = MadeEasy::kill_market_offer(server, mynym, myacct, id);
return processTxResponse(server, mynym, myacct, response,
"kill market offer");
}
|
#include "Rendering/RenderDebugSettings.hpp"
namespace kokko
{
RenderDebugSettings::RenderDebugSettings() :
debugEntity(Entity::Null),
featureFlags(0)
{
}
Entity RenderDebugSettings::GetDebugEntity() const
{
return debugEntity;
}
void RenderDebugSettings::SetDebugEntity(Entity entity)
{
debugEntity = entity;
}
bool RenderDebugSettings::IsFeatureEnabled(RenderDebugFeatureFlag feature) const
{
uint32_t feat = static_cast<uint32_t>(feature);
return (feat & featureFlags) == feat;
}
void RenderDebugSettings::SetFeatureEnabled(RenderDebugFeatureFlag feature, bool enabled)
{
if (enabled)
featureFlags |= static_cast<uint32_t>(feature);
else
featureFlags &= ~static_cast<uint32_t>(feature);
}
}
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
#include "productquantizer.h"
#include <algorithm>
#include <iostream>
#include <numeric>
namespace fasttext {
real distL2(const real* x, const real* y, int32_t d) {
real dist = 0;
for (auto i = 0; i < d; i++) {
auto tmp = x[i] - y[i];
dist += tmp * tmp;
}
return dist;
}
ProductQuantizer::ProductQuantizer(int32_t dim, int32_t dsub): dim_(dim),
nsubq_(dim / dsub), dsub_(dsub), centroids_(dim * ksub_), rng(seed_) {
lastdsub_ = dim_ % dsub;
if (lastdsub_ == 0) {lastdsub_ = dsub_;}
else {nsubq_++;}
}
const real* ProductQuantizer::get_centroids(int32_t m, uint8_t i) const {
if (m == nsubq_ - 1) {return ¢roids_[m * ksub_ * dsub_ + i * lastdsub_];}
return ¢roids_[(m * ksub_ + i) * dsub_];
}
real* ProductQuantizer::get_centroids(int32_t m, uint8_t i) {
if (m == nsubq_ - 1) {return ¢roids_[m * ksub_ * dsub_ + i * lastdsub_];}
return ¢roids_[(m * ksub_ + i) * dsub_];
}
real ProductQuantizer::assign_centroid(const real * x, const real* c0,
uint8_t* code, int32_t d) const {
const real* c = c0;
real dis = distL2(x, c, d);
code[0] = 0;
for (auto j = 1; j < ksub_; j++) {
c += d;
real disij = distL2(x, c, d);
if (disij < dis) {
code[0] = (uint8_t) j;
dis = disij;
}
}
return dis;
}
void ProductQuantizer::Estep(const real* x, const real* centroids,
uint8_t* codes, int32_t d,
int32_t n) const {
for (auto i = 0; i < n; i++) {
assign_centroid(x + i * d, centroids, codes + i, d);
}
}
void ProductQuantizer::MStep(const real* x0, real* centroids,
const uint8_t* codes,
int32_t d, int32_t n) {
std::vector<int32_t> nelts(ksub_, 0);
memset(centroids, 0, sizeof(real) * d * ksub_);
const real* x = x0;
for (auto i = 0; i < n; i++) {
auto k = codes[i];
real* c = centroids + k * d;
for (auto j = 0; j < d; j++) {
c[j] += x[j];
}
nelts[k]++;
x += d;
}
real* c = centroids;
for (auto k = 0; k < ksub_; k++) {
real z = (real) nelts[k];
if (z != 0) {
for (auto j = 0; j < d; j++) {
c[j] /= z;
}
}
c += d;
}
std::uniform_real_distribution<> runiform(0,1);
for (auto k = 0; k < ksub_; k++) {
if (nelts[k] == 0) {
int32_t m = 0;
while (runiform(rng) * (n - ksub_) >= nelts[m] - 1) {
m = (m + 1) % ksub_;
}
memcpy(centroids + k * d, centroids + m * d, sizeof(real) * d);
for (auto j = 0; j < d; j++) {
int32_t sign = (j % 2) * 2 - 1;
centroids[k * d + j] += sign * eps_;
centroids[m * d + j] -= sign * eps_;
}
nelts[k] = nelts[m] / 2;
nelts[m] -= nelts[k];
}
}
}
void ProductQuantizer::kmeans(const real *x, real* c, int32_t n, int32_t d) {
std::vector<int32_t> perm(n,0);
std::iota(perm.begin(), perm.end(), 0);
std::shuffle(perm.begin(), perm.end(), rng);
for (auto i = 0; i < ksub_; i++) {
memcpy (&c[i * d], x + perm[i] * d, d * sizeof(real));
}
uint8_t* codes = new uint8_t[n];
for (auto i = 0; i < niter_; i++) {
Estep(x, c, codes, d, n);
MStep(x, c, codes, d, n);
}
delete [] codes;
}
void ProductQuantizer::train(int32_t n, const real * x) {
if (n < ksub_) {
std::cerr<<"Matrix too small for quantization, must have > 256 rows"<<std::endl;
exit(1);
}
std::vector<int32_t> perm(n, 0);
std::iota(perm.begin(), perm.end(), 0);
auto d = dsub_;
auto np = std::min(n, max_points_);
real* xslice = new real[np * dsub_];
for (auto m = 0; m < nsubq_; m++) {
if (m == nsubq_-1) {d = lastdsub_;}
if (np != n) {std::shuffle(perm.begin(), perm.end(), rng);}
for (auto j = 0; j < np; j++) {
memcpy (xslice + j * d, x + perm[j] * dim_ + m * dsub_, d * sizeof(real));
}
kmeans(xslice, get_centroids(m, 0), np, d);
}
delete [] xslice;
}
real ProductQuantizer::mulcode(const Vector& x, const uint8_t* codes,
int32_t t, real alpha) const {
real res = 0.0;
auto d = dsub_;
const uint8_t* code = codes + nsubq_ * t;
for (auto m = 0; m < nsubq_; m++) {
const real* c = get_centroids(m, code[m]);
if (m == nsubq_ - 1) {d = lastdsub_;}
for(auto n = 0; n < d; n++) {
res += x[m * dsub_ + n] * c[n];
}
}
return res * alpha;
}
void ProductQuantizer::addcode(Vector& x, const uint8_t* codes,
int32_t t, real alpha) const {
auto d = dsub_;
const uint8_t* code = codes + nsubq_ * t;
for (auto m = 0; m < nsubq_; m++) {
const real* c = get_centroids(m, code[m]);
if (m == nsubq_ - 1) {d = lastdsub_;}
for(auto n = 0; n < d; n++) {
x[m * dsub_ + n] += alpha * c[n];
}
}
}
void ProductQuantizer::compute_code(const real* x, uint8_t* code) const {
auto d = dsub_;
for (auto m = 0; m < nsubq_; m++) {
if (m == nsubq_ - 1) {d = lastdsub_;}
assign_centroid(x + m * dsub_, get_centroids(m, 0), code + m, d);
}
}
void ProductQuantizer::compute_codes(const real* x, uint8_t* codes,
int32_t n) const {
for (auto i = 0; i < n; i++) {
compute_code(x + i * dim_, codes + i * nsubq_);
}
}
void ProductQuantizer::save(std::ostream& out) {
out.write((char*) &dim_, sizeof(dim_));
out.write((char*) &nsubq_, sizeof(nsubq_));
out.write((char*) &dsub_, sizeof(dsub_));
out.write((char*) &lastdsub_, sizeof(lastdsub_));
out.write((char*) centroids_.data(), centroids_.size() * sizeof(real));
}
void ProductQuantizer::load(std::istream& in) {
in.read((char*) &dim_, sizeof(dim_));
in.read((char*) &nsubq_, sizeof(nsubq_));
in.read((char*) &dsub_, sizeof(dsub_));
in.read((char*) &lastdsub_, sizeof(lastdsub_));
centroids_.resize(dim_ * ksub_);
for (auto i=0; i < centroids_.size(); i++) {
in.read((char*) ¢roids_[i], sizeof(real));
}
}
}
|
/*
* Copyright 2009-2017 Alibaba Cloud All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <alibabacloud/dysmsapi/model/DeleteSmsSignRequest.h>
using AlibabaCloud::Dysmsapi::Model::DeleteSmsSignRequest;
DeleteSmsSignRequest::DeleteSmsSignRequest() :
RpcServiceRequest("dysmsapi", "2017-05-25", "DeleteSmsSign")
{
setMethod(HttpRequest::Method::Post);
}
DeleteSmsSignRequest::~DeleteSmsSignRequest()
{}
long DeleteSmsSignRequest::getResourceOwnerId()const
{
return resourceOwnerId_;
}
void DeleteSmsSignRequest::setResourceOwnerId(long resourceOwnerId)
{
resourceOwnerId_ = resourceOwnerId;
setParameter("ResourceOwnerId", std::to_string(resourceOwnerId));
}
std::string DeleteSmsSignRequest::getResourceOwnerAccount()const
{
return resourceOwnerAccount_;
}
void DeleteSmsSignRequest::setResourceOwnerAccount(const std::string& resourceOwnerAccount)
{
resourceOwnerAccount_ = resourceOwnerAccount;
setParameter("ResourceOwnerAccount", resourceOwnerAccount);
}
long DeleteSmsSignRequest::getOwnerId()const
{
return ownerId_;
}
void DeleteSmsSignRequest::setOwnerId(long ownerId)
{
ownerId_ = ownerId;
setParameter("OwnerId", std::to_string(ownerId));
}
std::string DeleteSmsSignRequest::getAccessKeyId()const
{
return accessKeyId_;
}
void DeleteSmsSignRequest::setAccessKeyId(const std::string& accessKeyId)
{
accessKeyId_ = accessKeyId;
setParameter("AccessKeyId", accessKeyId);
}
std::string DeleteSmsSignRequest::getSignName()const
{
return signName_;
}
void DeleteSmsSignRequest::setSignName(const std::string& signName)
{
signName_ = signName;
setParameter("SignName", signName);
}
|
/**
* 10.1 概述
* @Author Bob
* @Eamil 0haizhu0@gmail.com
* @Date 2017/9/20
*/
#include <iostream>
#include <vector>
using namespace std;
int main() {
/**
* 大多数算法并不直接操作容器,而是遍历由两个迭代器指定的一个元素范围来进行操作。
* 迭代器令算法不依赖于容器,但算法依赖于元素类型的操作。
* 算法永远不会执行容器的操作:可以改变或者移动元素,但永远不会改变容器的大小。
*/
int val = 42;// 要查找的元素,类型要与vector<>类型一致
vector<int> vec;
for (int i = 0; i < 10; i++) {
vec.push_back(i * i);
}
auto iter = std::find(vec.begin(), vec.end(), val);// 返回的是一个迭代器指针,类型是 vector<int>::iterator
if (iter == vec.end())
cout << "ERROR!" << endl;
else // 注意迭代器指针输出元素的方式和distance用法
cout << "the index of value " << (*iter) << " is " << std::distance(vec.begin(), iter) << std::endl;
return 0;
}
|
//===- VectorToSCF.cpp - Conversion from Vector to mix of SCF and Std -----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements target-dependent lowering of vector transfer operations.
//
//===----------------------------------------------------------------------===//
#include <type_traits>
#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
#include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
#include "mlir/Dialect/SCF/EDSC/Builders.h"
#include "mlir/Dialect/SCF/EDSC/Intrinsics.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/Vector/EDSC/Intrinsics.h"
#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/Dialect/Vector/VectorUtils.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Types.h"
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using vector::TransferReadOp;
using vector::TransferWriteOp;
namespace {
/// Helper class captures the common information needed to lower N>1-D vector
/// transfer operations (read and write).
/// On construction, this class opens an edsc::ScopedContext for simpler IR
/// manipulation.
/// In pseudo-IR, for an n-D vector_transfer_read such as:
///
/// ```
/// vector_transfer_read(%m, %offsets, identity_map, %fill) :
/// memref<(leading_dims) x (major_dims) x (minor_dims) x type>,
/// vector<(major_dims) x (minor_dims) x type>
/// ```
///
/// where rank(minor_dims) is the lower-level vector rank (e.g. 1 for LLVM or
/// higher).
///
/// This is the entry point to emitting pseudo-IR resembling:
///
/// ```
/// %tmp = alloc(): memref<(major_dims) x vector<minor_dim x type>>
/// for (%ivs_major, {0}, {vector_shape}, {1}) { // (N-1)-D loop nest
/// if (any_of(%ivs_major + %offsets, <, major_dims)) {
/// %v = vector_transfer_read(
/// {%offsets_leading, %ivs_major + %offsets_major, %offsets_minor},
/// %ivs_minor):
/// memref<(leading_dims) x (major_dims) x (minor_dims) x type>,
/// vector<(minor_dims) x type>;
/// store(%v, %tmp);
/// } else {
/// %v = splat(vector<(minor_dims) x type>, %fill)
/// store(%v, %tmp, %ivs_major);
/// }
/// }
/// %res = load(%tmp, %0): memref<(major_dims) x vector<minor_dim x type>>):
// vector<(major_dims) x (minor_dims) x type>
/// ```
///
template <typename ConcreteOp>
class NDTransferOpHelper {
public:
NDTransferOpHelper(PatternRewriter &rewriter, ConcreteOp xferOp,
const VectorTransferToSCFOptions &options)
: rewriter(rewriter), options(options), loc(xferOp.getLoc()),
scope(std::make_unique<ScopedContext>(rewriter, loc)), xferOp(xferOp),
op(xferOp.getOperation()) {
vectorType = xferOp.getVectorType();
// TODO(ntv, ajcbik): when we go to k > 1-D vectors adapt minorRank.
minorRank = 1;
majorRank = vectorType.getRank() - minorRank;
leadingRank = xferOp.getMemRefType().getRank() - (majorRank + minorRank);
majorVectorType =
VectorType::get(vectorType.getShape().take_front(majorRank),
vectorType.getElementType());
minorVectorType =
VectorType::get(vectorType.getShape().take_back(minorRank),
vectorType.getElementType());
/// Memref of minor vector type is used for individual transfers.
memRefMinorVectorType =
MemRefType::get(majorVectorType.getShape(), minorVectorType, {},
xferOp.getMemRefType().getMemorySpace());
}
LogicalResult doReplace();
private:
/// Creates the loop nest on the "major" dimensions and calls the
/// `loopBodyBuilder` lambda in the context of the loop nest.
template <typename Lambda>
void emitLoops(Lambda loopBodyBuilder);
/// Operate within the body of `emitLoops` to:
/// 1. Compute the indexings `majorIvs + majorOffsets` and save them in
/// `majorIvsPlusOffsets`.
/// 2. Return a boolean that determines whether the first `majorIvs.rank()`
/// dimensions `majorIvs + majorOffsets` are all within `memrefBounds`.
Value emitInBoundsCondition(ValueRange majorIvs, ValueRange majorOffsets,
MemRefBoundsCapture &memrefBounds,
SmallVectorImpl<Value> &majorIvsPlusOffsets);
/// Common state to lower vector transfer ops.
PatternRewriter &rewriter;
const VectorTransferToSCFOptions &options;
Location loc;
std::unique_ptr<ScopedContext> scope;
ConcreteOp xferOp;
Operation *op;
// A vector transfer copies data between:
// - memref<(leading_dims) x (major_dims) x (minor_dims) x type>
// - vector<(major_dims) x (minor_dims) x type>
unsigned minorRank; // for now always 1
unsigned majorRank; // vector rank - minorRank
unsigned leadingRank; // memref rank - vector rank
VectorType vectorType; // vector<(major_dims) x (minor_dims) x type>
VectorType majorVectorType; // vector<(major_dims) x type>
VectorType minorVectorType; // vector<(minor_dims) x type>
MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>>
};
template <typename ConcreteOp>
template <typename Lambda>
void NDTransferOpHelper<ConcreteOp>::emitLoops(Lambda loopBodyBuilder) {
/// Loop nest operates on the major dimensions
MemRefBoundsCapture memrefBoundsCapture(xferOp.memref());
if (options.unroll) {
auto shape = majorVectorType.getShape();
auto strides = computeStrides(shape);
unsigned numUnrolledInstances = computeMaxLinearIndex(shape);
ValueRange indices(xferOp.indices());
for (unsigned idx = 0; idx < numUnrolledInstances; ++idx) {
SmallVector<int64_t, 4> offsets = delinearize(strides, idx);
SmallVector<Value, 4> offsetValues =
llvm::to_vector<4>(llvm::map_range(offsets, [](int64_t off) -> Value {
return std_constant_index(off);
}));
loopBodyBuilder(offsetValues, indices.take_front(leadingRank),
indices.drop_front(leadingRank).take_front(majorRank),
indices.take_back(minorRank), memrefBoundsCapture);
}
} else {
VectorBoundsCapture vectorBoundsCapture(majorVectorType);
auto majorLbs = vectorBoundsCapture.getLbs();
auto majorUbs = vectorBoundsCapture.getUbs();
auto majorSteps = vectorBoundsCapture.getSteps();
SmallVector<Value, 8> majorIvs(vectorBoundsCapture.rank());
AffineLoopNestBuilder(majorIvs, majorLbs, majorUbs, majorSteps)([&] {
ValueRange indices(xferOp.indices());
loopBodyBuilder(majorIvs, indices.take_front(leadingRank),
indices.drop_front(leadingRank).take_front(majorRank),
indices.take_back(minorRank), memrefBoundsCapture);
});
}
}
template <typename ConcreteOp>
Value NDTransferOpHelper<ConcreteOp>::emitInBoundsCondition(
ValueRange majorIvs, ValueRange majorOffsets,
MemRefBoundsCapture &memrefBounds,
SmallVectorImpl<Value> &majorIvsPlusOffsets) {
Value inBoundsCondition;
majorIvsPlusOffsets.reserve(majorIvs.size());
unsigned idx = 0;
for (auto it : llvm::zip(majorIvs, majorOffsets, memrefBounds.getUbs())) {
Value iv = std::get<0>(it), off = std::get<1>(it), ub = std::get<2>(it);
using namespace mlir::edsc::op;
majorIvsPlusOffsets.push_back(iv + off);
if (xferOp.isMaskedDim(leadingRank + idx)) {
Value inBounds = majorIvsPlusOffsets.back() < ub;
inBoundsCondition =
(inBoundsCondition) ? (inBoundsCondition && inBounds) : inBounds;
}
++idx;
}
return inBoundsCondition;
}
template <>
LogicalResult NDTransferOpHelper<TransferReadOp>::doReplace() {
Value alloc, result;
if (options.unroll)
result = std_splat(vectorType, xferOp.padding());
else
alloc = std_alloc(memRefMinorVectorType);
emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets,
ValueRange majorOffsets, ValueRange minorOffsets,
MemRefBoundsCapture &memrefBounds) {
/// Lambda to load 1-D vector in the current loop ivs + offset context.
auto load1DVector = [&](ValueRange majorIvsPlusOffsets) -> Value {
SmallVector<Value, 8> indexing;
indexing.reserve(leadingRank + majorRank + minorRank);
indexing.append(leadingOffsets.begin(), leadingOffsets.end());
indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end());
indexing.append(minorOffsets.begin(), minorOffsets.end());
Value memref = xferOp.memref();
auto map = TransferReadOp::getTransferMinorIdentityMap(
xferOp.getMemRefType(), minorVectorType);
ArrayAttr masked;
if (xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) {
OpBuilder &b = ScopedContext::getBuilderRef();
masked = b.getBoolArrayAttr({true});
}
return vector_transfer_read(minorVectorType, memref, indexing,
AffineMapAttr::get(map), xferOp.padding(),
masked);
};
// 1. Compute the inBoundsCondition in the current loops ivs + offset
// context.
SmallVector<Value, 4> majorIvsPlusOffsets;
Value inBoundsCondition = emitInBoundsCondition(
majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets);
if (inBoundsCondition) {
// 2. If the condition is not null, we need an IfOp, which may yield
// if `options.unroll` is true.
SmallVector<Type, 1> resultType;
if (options.unroll)
resultType.push_back(vectorType);
auto ifOp = ScopedContext::getBuilderRef().create<scf::IfOp>(
ScopedContext::getLocation(), resultType, inBoundsCondition,
/*withElseRegion=*/true);
// 3.a. If in-bounds, progressively lower to a 1-D transfer read.
BlockBuilder(&ifOp.thenRegion().front(), Append())([&] {
Value vector = load1DVector(majorIvsPlusOffsets);
// 3.a.i. If `options.unroll` is true, insert the 1-D vector in the
// aggregate. We must yield and merge with the `else` branch.
if (options.unroll) {
vector = vector_insert(vector, result, majorIvs);
(loop_yield(vector));
return;
}
// 3.a.ii. Otherwise, just go through the temporary `alloc`.
std_store(vector, alloc, majorIvs);
});
// 3.b. If not in-bounds, splat a 1-D vector.
BlockBuilder(&ifOp.elseRegion().front(), Append())([&] {
Value vector = std_splat(minorVectorType, xferOp.padding());
// 3.a.i. If `options.unroll` is true, insert the 1-D vector in the
// aggregate. We must yield and merge with the `then` branch.
if (options.unroll) {
vector = vector_insert(vector, result, majorIvs);
(loop_yield(vector));
return;
}
// 3.b.ii. Otherwise, just go through the temporary `alloc`.
std_store(vector, alloc, majorIvs);
});
if (!resultType.empty())
result = *ifOp.results().begin();
} else {
// 4. Guaranteed in-bounds, progressively lower to a 1-D transfer read.
Value loaded1D = load1DVector(majorIvsPlusOffsets);
// 5.a. If `options.unroll` is true, insert the 1-D vector in the
// aggregate.
if (options.unroll)
result = vector_insert(loaded1D, result, majorIvs);
// 5.b. Otherwise, just go through the temporary `alloc`.
else
std_store(loaded1D, alloc, majorIvs);
}
});
assert((!options.unroll ^ result) && "Expected resulting Value iff unroll");
if (!result)
result = std_load(vector_type_cast(MemRefType::get({}, vectorType), alloc));
rewriter.replaceOp(op, result);
return success();
}
template <>
LogicalResult NDTransferOpHelper<TransferWriteOp>::doReplace() {
Value alloc;
if (!options.unroll) {
alloc = std_alloc(memRefMinorVectorType);
std_store(xferOp.vector(),
vector_type_cast(MemRefType::get({}, vectorType), alloc));
}
emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets,
ValueRange majorOffsets, ValueRange minorOffsets,
MemRefBoundsCapture &memrefBounds) {
// Lower to 1-D vector_transfer_write and let recursion handle it.
auto emitTransferWrite = [&](ValueRange majorIvsPlusOffsets) {
SmallVector<Value, 8> indexing;
indexing.reserve(leadingRank + majorRank + minorRank);
indexing.append(leadingOffsets.begin(), leadingOffsets.end());
indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end());
indexing.append(minorOffsets.begin(), minorOffsets.end());
Value result;
// If `options.unroll` is true, extract the 1-D vector from the
// aggregate.
if (options.unroll)
result = vector_extract(xferOp.vector(), majorIvs);
else
result = std_load(alloc, majorIvs);
auto map = TransferWriteOp::getTransferMinorIdentityMap(
xferOp.getMemRefType(), minorVectorType);
ArrayAttr masked;
if (xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) {
OpBuilder &b = ScopedContext::getBuilderRef();
masked = b.getBoolArrayAttr({true});
}
vector_transfer_write(result, xferOp.memref(), indexing,
AffineMapAttr::get(map), masked);
};
// 1. Compute the inBoundsCondition in the current loops ivs + offset
// context.
SmallVector<Value, 4> majorIvsPlusOffsets;
Value inBoundsCondition = emitInBoundsCondition(
majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets);
if (inBoundsCondition) {
// 2.a. If the condition is not null, we need an IfOp, to write
// conditionally. Progressively lower to a 1-D transfer write.
auto ifOp = ScopedContext::getBuilderRef().create<scf::IfOp>(
ScopedContext::getLocation(), TypeRange{}, inBoundsCondition,
/*withElseRegion=*/false);
BlockBuilder(&ifOp.thenRegion().front(),
Append())([&] { emitTransferWrite(majorIvsPlusOffsets); });
} else {
// 2.b. Guaranteed in-bounds. Progressively lower to a 1-D transfer write.
emitTransferWrite(majorIvsPlusOffsets);
}
});
rewriter.eraseOp(op);
return success();
}
} // namespace
/// Analyzes the `transfer` to find an access dimension along the fastest remote
/// MemRef dimension. If such a dimension with coalescing properties is found,
/// `pivs` and `vectorBoundsCapture` are swapped so that the invocation of
/// LoopNestBuilder captures it in the innermost loop.
template <typename TransferOpTy>
static int computeCoalescedIndex(TransferOpTy transfer) {
// rank of the remote memory access, coalescing behavior occurs on the
// innermost memory dimension.
auto remoteRank = transfer.getMemRefType().getRank();
// Iterate over the results expressions of the permutation map to determine
// the loop order for creating pointwise copies between remote and local
// memories.
int coalescedIdx = -1;
auto exprs = transfer.permutation_map().getResults();
for (auto en : llvm::enumerate(exprs)) {
auto dim = en.value().template dyn_cast<AffineDimExpr>();
if (!dim) {
continue;
}
auto memRefDim = dim.getPosition();
if (memRefDim == remoteRank - 1) {
// memRefDim has coalescing properties, it should be swapped in the last
// position.
assert(coalescedIdx == -1 && "Unexpected > 1 coalesced indices");
coalescedIdx = en.index();
}
}
return coalescedIdx;
}
/// Emits remote memory accesses that are clipped to the boundaries of the
/// MemRef.
template <typename TransferOpTy>
static SmallVector<Value, 8>
clip(TransferOpTy transfer, MemRefBoundsCapture &bounds, ArrayRef<Value> ivs) {
using namespace mlir::edsc;
Value zero(std_constant_index(0)), one(std_constant_index(1));
SmallVector<Value, 8> memRefAccess(transfer.indices());
SmallVector<Value, 8> clippedScalarAccessExprs(memRefAccess.size());
// Indices accessing to remote memory are clipped and their expressions are
// returned in clippedScalarAccessExprs.
for (unsigned memRefDim = 0; memRefDim < clippedScalarAccessExprs.size();
++memRefDim) {
// Linear search on a small number of entries.
int loopIndex = -1;
auto exprs = transfer.permutation_map().getResults();
for (auto en : llvm::enumerate(exprs)) {
auto expr = en.value();
auto dim = expr.template dyn_cast<AffineDimExpr>();
// Sanity check.
assert(
(dim || expr.template cast<AffineConstantExpr>().getValue() == 0) &&
"Expected dim or 0 in permutationMap");
if (dim && memRefDim == dim.getPosition()) {
loopIndex = en.index();
break;
}
}
// We cannot distinguish atm between unrolled dimensions that implement
// the "always full" tile abstraction and need clipping from the other
// ones. So we conservatively clip everything.
using namespace edsc::op;
auto N = bounds.ub(memRefDim);
auto i = memRefAccess[memRefDim];
if (loopIndex < 0) {
auto N_minus_1 = N - one;
auto select_1 = std_select(i < N, i, N_minus_1);
clippedScalarAccessExprs[memRefDim] =
std_select(i < zero, zero, select_1);
} else {
auto ii = ivs[loopIndex];
auto i_plus_ii = i + ii;
auto N_minus_1 = N - one;
auto select_1 = std_select(i_plus_ii < N, i_plus_ii, N_minus_1);
clippedScalarAccessExprs[memRefDim] =
std_select(i_plus_ii < zero, zero, select_1);
}
}
return clippedScalarAccessExprs;
}
namespace mlir {
template <typename TransferOpTy>
VectorTransferRewriter<TransferOpTy>::VectorTransferRewriter(
VectorTransferToSCFOptions options, MLIRContext *context)
: RewritePattern(TransferOpTy::getOperationName(), 1, context),
options(options) {}
/// Used for staging the transfer in a local buffer.
template <typename TransferOpTy>
MemRefType VectorTransferRewriter<TransferOpTy>::tmpMemRefType(
TransferOpTy transfer) const {
auto vectorType = transfer.getVectorType();
return MemRefType::get(vectorType.getShape(), vectorType.getElementType(), {},
0);
}
/// Lowers TransferReadOp into a combination of:
/// 1. local memory allocation;
/// 2. perfect loop nest over:
/// a. scalar load from local buffers (viewed as a scalar memref);
/// a. scalar store to original memref (with clipping).
/// 3. vector_load from local buffer (viewed as a memref<1 x vector>);
/// 4. local memory deallocation.
///
/// Lowers the data transfer part of a TransferReadOp while ensuring no
/// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by
/// clipping. This means that a given value in memory can be read multiple
/// times and concurrently.
///
/// Important notes about clipping and "full-tiles only" abstraction:
/// =================================================================
/// When using clipping for dealing with boundary conditions, the same edge
/// value will appear multiple times (a.k.a edge padding). This is fine if the
/// subsequent vector operations are all data-parallel but **is generally
/// incorrect** in the presence of reductions or extract operations.
///
/// More generally, clipping is a scalar abstraction that is expected to work
/// fine as a baseline for CPUs and GPUs but not for vector_load and DMAs.
/// To deal with real vector_load and DMAs, a "padded allocation + view"
/// abstraction with the ability to read out-of-memref-bounds (but still within
/// the allocated region) is necessary.
///
/// Whether using scalar loops or vector_load/DMAs to perform the transfer,
/// junk values will be materialized in the vectors and generally need to be
/// filtered out and replaced by the "neutral element". This neutral element is
/// op-dependent so, in the future, we expect to create a vector filter and
/// apply it to a splatted constant vector with the proper neutral element at
/// each ssa-use. This filtering is not necessary for pure data-parallel
/// operations.
///
/// In the case of vector_store/DMAs, Read-Modify-Write will be required, which
/// also have concurrency implications. Note that by using clipped scalar stores
/// in the presence of data-parallel only operations, we generate code that
/// writes the same value multiple time on the edge locations.
///
/// TODO(ntv): implement alternatives to clipping.
/// TODO(ntv): support non-data-parallel operations.
/// Performs the rewrite.
template <>
LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite(
Operation *op, PatternRewriter &rewriter) const {
using namespace mlir::edsc::op;
TransferReadOp transfer = cast<TransferReadOp>(op);
if (AffineMap::isMinorIdentity(transfer.permutation_map())) {
// If > 1D, emit a bunch of loops around 1-D vector transfers.
if (transfer.getVectorType().getRank() > 1)
return NDTransferOpHelper<TransferReadOp>(rewriter, transfer, options)
.doReplace();
// If 1-D this is now handled by the target-specific lowering.
if (transfer.getVectorType().getRank() == 1)
return failure();
}
// Conservative lowering to scalar load / stores.
// 1. Setup all the captures.
ScopedContext scope(rewriter, transfer.getLoc());
StdIndexedValue remote(transfer.memref());
MemRefBoundsCapture memRefBoundsCapture(transfer.memref());
VectorBoundsCapture vectorBoundsCapture(transfer.vector());
int coalescedIdx = computeCoalescedIndex(transfer);
// Swap the vectorBoundsCapture which will reorder loop bounds.
if (coalescedIdx >= 0)
vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1,
coalescedIdx);
auto lbs = vectorBoundsCapture.getLbs();
auto ubs = vectorBoundsCapture.getUbs();
SmallVector<Value, 8> steps;
steps.reserve(vectorBoundsCapture.getSteps().size());
for (auto step : vectorBoundsCapture.getSteps())
steps.push_back(std_constant_index(step));
// 2. Emit alloc-copy-load-dealloc.
Value tmp = std_alloc(tmpMemRefType(transfer));
StdIndexedValue local(tmp);
Value vec = vector_type_cast(tmp);
loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) {
auto ivs = llvm::to_vector<8>(loopIvs);
// Swap the ivs which will reorder memory accesses.
if (coalescedIdx >= 0)
std::swap(ivs.back(), ivs[coalescedIdx]);
// Computes clippedScalarAccessExprs in the loop nest scope (ivs exist).
local(ivs) = remote(clip(transfer, memRefBoundsCapture, ivs));
});
Value vectorValue = std_load(vec);
(std_dealloc(tmp)); // vexing parse
// 3. Propagate.
rewriter.replaceOp(op, vectorValue);
return success();
}
/// Lowers TransferWriteOp into a combination of:
/// 1. local memory allocation;
/// 2. vector_store to local buffer (viewed as a memref<1 x vector>);
/// 3. perfect loop nest over:
/// a. scalar load from local buffers (viewed as a scalar memref);
/// a. scalar store to original memref (with clipping).
/// 4. local memory deallocation.
///
/// More specifically, lowers the data transfer part while ensuring no
/// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by
/// clipping. This means that a given value in memory can be written to multiple
/// times and concurrently.
///
/// See `Important notes about clipping and full-tiles only abstraction` in the
/// description of `readClipped` above.
///
/// TODO(ntv): implement alternatives to clipping.
/// TODO(ntv): support non-data-parallel operations.
template <>
LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite(
Operation *op, PatternRewriter &rewriter) const {
using namespace edsc::op;
TransferWriteOp transfer = cast<TransferWriteOp>(op);
if (AffineMap::isMinorIdentity(transfer.permutation_map())) {
// If > 1D, emit a bunch of loops around 1-D vector transfers.
if (transfer.getVectorType().getRank() > 1)
return NDTransferOpHelper<TransferWriteOp>(rewriter, transfer, options)
.doReplace();
// If 1-D this is now handled by the target-specific lowering.
if (transfer.getVectorType().getRank() == 1)
return failure();
}
// 1. Setup all the captures.
ScopedContext scope(rewriter, transfer.getLoc());
StdIndexedValue remote(transfer.memref());
MemRefBoundsCapture memRefBoundsCapture(transfer.memref());
Value vectorValue(transfer.vector());
VectorBoundsCapture vectorBoundsCapture(transfer.vector());
int coalescedIdx = computeCoalescedIndex(transfer);
// Swap the vectorBoundsCapture which will reorder loop bounds.
if (coalescedIdx >= 0)
vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1,
coalescedIdx);
auto lbs = vectorBoundsCapture.getLbs();
auto ubs = vectorBoundsCapture.getUbs();
SmallVector<Value, 8> steps;
steps.reserve(vectorBoundsCapture.getSteps().size());
for (auto step : vectorBoundsCapture.getSteps())
steps.push_back(std_constant_index(step));
// 2. Emit alloc-store-copy-dealloc.
Value tmp = std_alloc(tmpMemRefType(transfer));
StdIndexedValue local(tmp);
Value vec = vector_type_cast(tmp);
std_store(vectorValue, vec);
loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) {
auto ivs = llvm::to_vector<8>(loopIvs);
// Swap the ivs which will reorder memory accesses.
if (coalescedIdx >= 0)
std::swap(ivs.back(), ivs[coalescedIdx]);
// Computes clippedScalarAccessExprs in the loop nest scope (ivs exist).
remote(clip(transfer, memRefBoundsCapture, ivs)) = local(ivs);
});
(std_dealloc(tmp)); // vexing parse...
rewriter.eraseOp(op);
return success();
}
void populateVectorToSCFConversionPatterns(
OwningRewritePatternList &patterns, MLIRContext *context,
const VectorTransferToSCFOptions &options) {
patterns.insert<VectorTransferRewriter<vector::TransferReadOp>,
VectorTransferRewriter<vector::TransferWriteOp>>(options,
context);
}
} // namespace mlir
|
//---------------------------------------------------------------------------//
// Copyright (c) 2017-2021 Mikhail Komarov <nemo@nil.foundation>
// Copyright (c) 2020-2021 Nikita Kaskov <nbering@nil.foundation>
// Copyright (c) 2021 Ilias Khairullin <ilias@nil.foundation>
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//---------------------------------------------------------------------------//
#ifndef CRYPTO3_MARSHALLING_FIELD_ELEMENT_HPP
#define CRYPTO3_MARSHALLING_FIELD_ELEMENT_HPP
#include <ratio>
#include <limits>
#include <type_traits>
#include <nil/marshalling/status_type.hpp>
#include <nil/marshalling/options.hpp>
#include <nil/marshalling/types/integral.hpp>
#include <nil/marshalling/types/array_list.hpp>
#include <nil/marshalling/types/tag.hpp>
#include <nil/marshalling/types/detail/adapt_basic_field.hpp>
#include <nil/crypto3/algebra/type_traits.hpp>
#include <nil/crypto3/marshalling/multiprecision/types/integral.hpp>
namespace nil {
namespace crypto3 {
namespace marshalling {
namespace types {
template<typename TTypeBase,
typename FieldType,
typename = typename std::enable_if<algebra::is_field<FieldType>::value, bool>::type,
typename... TOptions>
using field_element =
typename std::conditional<algebra::is_extended_field<FieldType>::value,
nil::marshalling::types::array_list<
nil::marshalling::field_type<nil::marshalling::option::little_endian>,
integral<TTypeBase, typename FieldType::integral_type>,
nil::marshalling::option::fixed_size_storage<FieldType::arity>>,
integral<TTypeBase, typename FieldType::integral_type>>::type;
namespace detail {
template<typename FieldType>
typename std::enable_if<!(algebra::is_extended_field<FieldType>::value),
std::array<typename FieldType::integral_type, FieldType::arity>>::type
obtain_field_data(const typename FieldType::value_type &field_elem) {
std::array<typename FieldType::integral_type, FieldType::arity> result;
result[0] = typename FieldType::integral_type(field_elem.data);
return result;
}
template<typename FieldType>
typename std::enable_if<algebra::is_extended_field<FieldType>::value,
std::array<typename FieldType::integral_type, FieldType::arity>>::type
obtain_field_data(const typename FieldType::value_type &field_elem) {
std::array<typename FieldType::integral_type, FieldType::arity> result;
for (std::size_t i = 0; i < FieldType::arity / FieldType::underlying_field_type::arity; i++) {
std::array<typename FieldType::integral_type, FieldType::underlying_field_type::arity>
intermediate_res =
obtain_field_data<typename FieldType::underlying_field_type>(field_elem.data[i]);
std::copy(intermediate_res.begin(),
intermediate_res.end(),
result.begin() + i * FieldType::underlying_field_type::arity);
}
return result;
}
} // namespace detail
template<typename FieldType, typename Endianness>
typename std::enable_if<algebra::is_field<FieldType>::value &&
algebra::is_extended_field<FieldType>::value,
field_element<nil::marshalling::field_type<Endianness>, FieldType>>::type
fill_field_element(const typename FieldType::value_type &field_elem) {
using field_element_type = field_element<nil::marshalling::field_type<Endianness>, FieldType>;
using integral_type =
integral<nil::marshalling::field_type<Endianness>, typename FieldType::integral_type>;
nil::marshalling::container::static_vector<integral_type, FieldType::arity> container_data;
std::array<typename FieldType::integral_type, FieldType::arity> val_container =
detail::obtain_field_data<FieldType>(field_elem);
for (std::size_t i = 0; i < FieldType::arity; i++) {
container_data.push_back(integral_type(val_container[i]));
}
return field_element_type(container_data);
}
template<typename FieldType, typename Endianness>
typename std::enable_if<algebra::is_field<FieldType>::value &&
!(algebra::is_extended_field<FieldType>::value),
field_element<nil::marshalling::field_type<Endianness>, FieldType>>::type
fill_field_element(const typename FieldType::value_type &field_elem) {
using field_element_type = field_element<nil::marshalling::field_type<Endianness>, FieldType>;
using integral_type =
integral<nil::marshalling::field_type<Endianness>, typename FieldType::integral_type>;
return field_element_type(integral_type(typename FieldType::integral_type(field_elem.data)));
}
template<typename FieldType, typename Endianness>
nil::marshalling::types::array_list<
nil::marshalling::field_type<Endianness>,
field_element<nil::marshalling::field_type<Endianness>, FieldType>,
nil::marshalling::option::sequence_size_field_prefix<
nil::marshalling::types::integral<nil::marshalling::field_type<Endianness>, std::size_t>>>
fill_field_element_vector(const std::vector<typename FieldType::value_type> &field_elem_vector) {
using TTypeBase = nil::marshalling::field_type<Endianness>;
using field_element_type = field_element<TTypeBase, FieldType>;
using field_element_vector_type = nil::marshalling::types::array_list<
TTypeBase,
field_element_type,
nil::marshalling::option::sequence_size_field_prefix<
nil::marshalling::types::integral<TTypeBase, std::size_t>>>;
field_element_vector_type result;
std::vector<field_element_type> &val = result.value();
for (std::size_t i = 0; i < field_elem_vector.size(); i++) {
val.push_back(fill_field_element<FieldType, Endianness>(field_elem_vector[i]));
}
return result;
}
namespace detail {
template<typename FieldType>
typename std::enable_if<algebra::is_field<FieldType>::value &&
!(algebra::is_extended_field<FieldType>::value),
typename FieldType::value_type>::type
make_field_element(typename std::array<typename FieldType::integral_type,
FieldType::arity>::iterator field_elem_data_iter) {
return typename FieldType::value_type(*field_elem_data_iter);
}
template<typename FieldType>
typename std::enable_if<algebra::is_extended_field<FieldType>::value,
typename FieldType::value_type>::type
make_field_element(typename std::array<typename FieldType::integral_type,
FieldType::arity>::iterator field_elem_data_iter) {
constexpr static const std::size_t cur_arity =
FieldType::arity / FieldType::underlying_field_type::arity;
std::array<typename FieldType::underlying_field_type::value_type, cur_arity> data;
for (std::size_t i = 0; i < cur_arity; i++) {
data[i] = make_field_element<typename FieldType::underlying_field_type>(
field_elem_data_iter + i * FieldType::underlying_field_type::arity);
}
return typename FieldType::value_type(data);
}
} // namespace detail
template<typename FieldType, typename Endianness>
typename std::enable_if<algebra::is_extended_field<FieldType>::value,
typename FieldType::value_type>::type
make_field_element(
const field_element<nil::marshalling::field_type<Endianness>, FieldType> &field_elem) {
std::array<typename FieldType::integral_type, FieldType::arity> field_elem_data;
for (std::size_t i = 0; i < FieldType::arity; i++) {
field_elem_data[i] = field_elem.value()[i].value();
}
return detail::make_field_element<FieldType>(field_elem_data.begin());
}
template<typename FieldType, typename Endianness>
typename std::enable_if<algebra::is_field<FieldType>::value &&
!(algebra::is_extended_field<FieldType>::value),
typename FieldType::value_type>::type
make_field_element(
const field_element<nil::marshalling::field_type<Endianness>, FieldType> &field_elem) {
return typename FieldType::value_type(field_elem.value());
}
template<typename FieldType, typename Endianness>
std::vector<typename FieldType::value_type> make_field_element_vector(
const nil::marshalling::types::array_list<
nil::marshalling::field_type<Endianness>,
field_element<nil::marshalling::field_type<Endianness>, FieldType>,
nil::marshalling::option::sequence_size_field_prefix<
nil::marshalling::types::integral<nil::marshalling::field_type<Endianness>, std::size_t>>>
&field_elem_vector) {
std::vector<typename FieldType::value_type> result;
const std::vector<field_element<nil::marshalling::field_type<Endianness>, FieldType>> &values =
field_elem_vector.value();
std::size_t size = values.size();
for (std::size_t i = 0; i < size; i++) {
result.push_back(make_field_element<FieldType, Endianness>(values[i]));
}
return result;
}
} // namespace types
} // namespace marshalling
} // namespace crypto3
} // namespace nil
#endif // CRYPTO3_MARSHALLING_FIELD_ELEMENT_HPP
|
// Autogenerated from CppHeaderCreator on 7/27/2020 3:09:53 PM
// Created by Sc2ad
// =========================================================================
#pragma once
#pragma pack(push, 8)
// Begin includes
#include "utils/typedefs.h"
// Including type: System.Diagnostics.Tracing.TraceLoggingTypeInfo`1
#include "System/Diagnostics/Tracing/TraceLoggingTypeInfo_1.hpp"
// Including type: System.Diagnostics.Tracing.EventFieldFormat
#include "System/Diagnostics/Tracing/EventFieldFormat.hpp"
#include "utils/il2cpp-utils.hpp"
// Completed includes
// Begin forward declares
// Forward declaring namespace: System::Collections::Generic
namespace System::Collections::Generic {
// Forward declaring type: IEnumerable`1<T>
template<typename T>
class IEnumerable_1;
}
// Forward declaring namespace: System::Diagnostics::Tracing
namespace System::Diagnostics::Tracing {
// Skipping declaration: TraceLoggingTypeInfo`1 because it is already included!
// Forward declaring type: TraceLoggingMetadataCollector
class TraceLoggingMetadataCollector;
// Forward declaring type: TraceLoggingDataCollector
class TraceLoggingDataCollector;
}
// Completed forward declares
// Type namespace: System.Diagnostics.Tracing
namespace System::Diagnostics::Tracing {
// Autogenerated type: System.Diagnostics.Tracing.EnumerableTypeInfo`2
template<typename IterableType, typename ElementType>
class EnumerableTypeInfo_2 : public System::Diagnostics::Tracing::TraceLoggingTypeInfo_1<IterableType> {
public:
// private readonly System.Diagnostics.Tracing.TraceLoggingTypeInfo`1<ElementType> elementInfo
// Offset: 0x0
System::Diagnostics::Tracing::TraceLoggingTypeInfo_1<ElementType>* elementInfo;
// public override System.Void WriteMetadata(System.Diagnostics.Tracing.TraceLoggingMetadataCollector collector, System.String name, System.Diagnostics.Tracing.EventFieldFormat format)
// Offset: 0x154B834
// Implemented from: System.Diagnostics.Tracing.TraceLoggingTypeInfo
// Base method: System.Void TraceLoggingTypeInfo::WriteMetadata(System.Diagnostics.Tracing.TraceLoggingMetadataCollector collector, System.String name, System.Diagnostics.Tracing.EventFieldFormat format)
void WriteMetadata(System::Diagnostics::Tracing::TraceLoggingMetadataCollector* collector, ::Il2CppString* name, System::Diagnostics::Tracing::EventFieldFormat format) {
CRASH_UNLESS(il2cpp_utils::RunMethod(this, "WriteMetadata", collector, name, format));
}
// public override System.Void WriteData(System.Diagnostics.Tracing.TraceLoggingDataCollector collector, IterableType value)
// Offset: 0x154B8A0
// Implemented from: System.Diagnostics.Tracing.TraceLoggingTypeInfo`1
// Base method: System.Void TraceLoggingTypeInfo`1::WriteData(System.Diagnostics.Tracing.TraceLoggingDataCollector collector, IterableType value)
void WriteData(System::Diagnostics::Tracing::TraceLoggingDataCollector* collector, IterableType& value) {
CRASH_UNLESS(il2cpp_utils::RunMethod(this, "WriteData", collector, value));
}
}; // System.Diagnostics.Tracing.EnumerableTypeInfo`2
}
DEFINE_IL2CPP_ARG_TYPE_GENERIC_CLASS(System::Diagnostics::Tracing::EnumerableTypeInfo_2, "System.Diagnostics.Tracing", "EnumerableTypeInfo`2");
#pragma pack(pop)
|
// Copyright (c) 2014-2015 The Dash developers
// Copyright (c) 2015-2017 The PIVX developers
// Copyright (c) 2018 The LightPayCoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "masternode.h"
#include "addrman.h"
#include "masternodeman.h"
#include "obfuscation.h"
#include "sync.h"
#include "util.h"
#include <boost/lexical_cast.hpp>
// keep track of the scanning errors I've seen
map<uint256, int> mapSeenMasternodeScanningErrors;
// cache block hashes as we calculate them
std::map<int64_t, uint256> mapCacheBlockHashes;
//Get the last hash that matches the modulus given. Processed in reverse order
bool GetBlockHash(uint256& hash, int nBlockHeight)
{
if (chainActive.Tip() == NULL) return false;
if (nBlockHeight == 0)
nBlockHeight = chainActive.Tip()->nHeight;
if (mapCacheBlockHashes.count(nBlockHeight)) {
hash = mapCacheBlockHashes[nBlockHeight];
return true;
}
const CBlockIndex* BlockLastSolved = chainActive.Tip();
const CBlockIndex* BlockReading = chainActive.Tip();
if (BlockLastSolved == NULL || BlockLastSolved->nHeight == 0 || chainActive.Tip()->nHeight + 1 < nBlockHeight) return false;
int nBlocksAgo = 0;
if (nBlockHeight > 0) nBlocksAgo = (chainActive.Tip()->nHeight + 1) - nBlockHeight;
assert(nBlocksAgo >= 0);
int n = 0;
for (unsigned int i = 1; BlockReading && BlockReading->nHeight > 0; i++) {
if (n >= nBlocksAgo) {
hash = BlockReading->GetBlockHash();
mapCacheBlockHashes[nBlockHeight] = hash;
return true;
}
n++;
if (BlockReading->pprev == NULL) {
assert(BlockReading);
break;
}
BlockReading = BlockReading->pprev;
}
return false;
}
CMasternode::CMasternode()
{
LOCK(cs);
vin = CTxIn();
addr = CService();
pubKeyCollateralAddress = CPubKey();
pubKeyMasternode = CPubKey();
sig = std::vector<unsigned char>();
activeState = MASTERNODE_ENABLED;
sigTime = GetAdjustedTime();
lastPing = CMasternodePing();
cacheInputAge = 0;
cacheInputAgeBlock = 0;
unitTest = false;
allowFreeTx = true;
nActiveState = MASTERNODE_ENABLED,
protocolVersion = PROTOCOL_VERSION;
nLastDsq = 0;
nScanningErrorCount = 0;
nLastScanningErrorBlockHeight = 0;
lastTimeChecked = 0;
nLastDsee = 0; // temporary, do not save. Remove after migration to v12
nLastDseep = 0; // temporary, do not save. Remove after migration to v12
}
CMasternode::CMasternode(const CMasternode& other)
{
LOCK(cs);
vin = other.vin;
addr = other.addr;
pubKeyCollateralAddress = other.pubKeyCollateralAddress;
pubKeyMasternode = other.pubKeyMasternode;
sig = other.sig;
activeState = other.activeState;
sigTime = other.sigTime;
lastPing = other.lastPing;
cacheInputAge = other.cacheInputAge;
cacheInputAgeBlock = other.cacheInputAgeBlock;
unitTest = other.unitTest;
allowFreeTx = other.allowFreeTx;
nActiveState = MASTERNODE_ENABLED,
protocolVersion = other.protocolVersion;
nLastDsq = other.nLastDsq;
nScanningErrorCount = other.nScanningErrorCount;
nLastScanningErrorBlockHeight = other.nLastScanningErrorBlockHeight;
lastTimeChecked = 0;
nLastDsee = other.nLastDsee; // temporary, do not save. Remove after migration to v12
nLastDseep = other.nLastDseep; // temporary, do not save. Remove after migration to v12
}
CMasternode::CMasternode(const CMasternodeBroadcast& mnb)
{
LOCK(cs);
vin = mnb.vin;
addr = mnb.addr;
pubKeyCollateralAddress = mnb.pubKeyCollateralAddress;
pubKeyMasternode = mnb.pubKeyMasternode;
sig = mnb.sig;
activeState = MASTERNODE_ENABLED;
sigTime = mnb.sigTime;
lastPing = mnb.lastPing;
cacheInputAge = 0;
cacheInputAgeBlock = 0;
unitTest = false;
allowFreeTx = true;
nActiveState = MASTERNODE_ENABLED,
protocolVersion = mnb.protocolVersion;
nLastDsq = mnb.nLastDsq;
nScanningErrorCount = 0;
nLastScanningErrorBlockHeight = 0;
lastTimeChecked = 0;
nLastDsee = 0; // temporary, do not save. Remove after migration to v12
nLastDseep = 0; // temporary, do not save. Remove after migration to v12
}
//
// When a new masternode broadcast is sent, update our information
//
bool CMasternode::UpdateFromNewBroadcast(CMasternodeBroadcast& mnb)
{
if (mnb.sigTime > sigTime) {
pubKeyMasternode = mnb.pubKeyMasternode;
pubKeyCollateralAddress = mnb.pubKeyCollateralAddress;
sigTime = mnb.sigTime;
sig = mnb.sig;
protocolVersion = mnb.protocolVersion;
addr = mnb.addr;
lastTimeChecked = 0;
int nDoS = 0;
if (mnb.lastPing == CMasternodePing() || (mnb.lastPing != CMasternodePing() && mnb.lastPing.CheckAndUpdate(nDoS, false))) {
lastPing = mnb.lastPing;
mnodeman.mapSeenMasternodePing.insert(make_pair(lastPing.GetHash(), lastPing));
}
return true;
}
return false;
}
//
// Deterministically calculate a given "score" for a Masternode depending on how close it's hash is to
// the proof of work for that block. The further away they are the better, the furthest will win the election
// and get paid this block
//
uint256 CMasternode::CalculateScore(int mod, int64_t nBlockHeight)
{
if (chainActive.Tip() == NULL) return 0;
uint256 hash = 0;
uint256 aux = vin.prevout.hash + vin.prevout.n;
if (!GetBlockHash(hash, nBlockHeight)) {
LogPrintf("CalculateScore ERROR - nHeight %d - Returned 0\n", nBlockHeight);
return 0;
}
CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION);
ss << hash;
uint256 hash2 = ss.GetHash();
CHashWriter ss2(SER_GETHASH, PROTOCOL_VERSION);
ss2 << hash;
ss2 << aux;
uint256 hash3 = ss2.GetHash();
uint256 r = (hash3 > hash2 ? hash3 - hash2 : hash2 - hash3);
return r;
}
void CMasternode::Check(bool forceCheck)
{
if (ShutdownRequested()) return;
if (!forceCheck && (GetTime() - lastTimeChecked < MASTERNODE_CHECK_SECONDS)) return;
lastTimeChecked = GetTime();
//once spent, stop doing the checks
if (activeState == MASTERNODE_VIN_SPENT) return;
if (!IsPingedWithin(MASTERNODE_REMOVAL_SECONDS)) {
activeState = MASTERNODE_REMOVE;
return;
}
if (!IsPingedWithin(MASTERNODE_EXPIRATION_SECONDS)) {
activeState = MASTERNODE_EXPIRED;
return;
}
if (!unitTest) {
CValidationState state;
CMutableTransaction tx = CMutableTransaction();
CTxOut vout = CTxOut(((Params().MasternodeCollateralLimit() - 0.01)) * COIN, obfuScationPool.collateralPubKey);
tx.vin.push_back(vin);
tx.vout.push_back(vout);
{
TRY_LOCK(cs_main, lockMain);
if (!lockMain) return;
if (!AcceptableInputs(mempool, state, CTransaction(tx), false, NULL)) {
activeState = MASTERNODE_VIN_SPENT;
return;
}
}
}
activeState = MASTERNODE_ENABLED; // OK
}
int64_t CMasternode::SecondsSincePayment()
{
CScript pubkeyScript;
pubkeyScript = GetScriptForDestination(pubKeyCollateralAddress.GetID());
int64_t sec = (GetAdjustedTime() - GetLastPaid());
int64_t month = 60 * 60 * 24 * 30;
if (sec < month) return sec; //if it's less than 30 days, give seconds
CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION);
ss << vin;
ss << sigTime;
uint256 hash = ss.GetHash();
// return some deterministic value for unknown/unpaid but force it to be more than 30 days old
return month + hash.GetCompact(false);
}
int64_t CMasternode::GetLastPaid()
{
CBlockIndex* pindexPrev = chainActive.Tip();
if (pindexPrev == NULL) return false;
CScript mnpayee;
mnpayee = GetScriptForDestination(pubKeyCollateralAddress.GetID());
CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION);
ss << vin;
ss << sigTime;
uint256 hash = ss.GetHash();
// use a deterministic offset to break a tie -- 2.5 minutes
int64_t nOffset = hash.GetCompact(false) % 150;
if (chainActive.Tip() == NULL) return false;
const CBlockIndex* BlockReading = chainActive.Tip();
int nMnCount = mnodeman.CountEnabled() * 1.25;
int n = 0;
for (unsigned int i = 1; BlockReading && BlockReading->nHeight > 0; i++) {
if (n >= nMnCount) {
return 0;
}
n++;
if (masternodePayments.mapMasternodeBlocks.count(BlockReading->nHeight)) {
/*
Search for this payee, with at least 2 votes. This will aid in consensus allowing the network
to converge on the same payees quickly, then keep the same schedule.
*/
if (masternodePayments.mapMasternodeBlocks[BlockReading->nHeight].HasPayeeWithVotes(mnpayee, 2)) {
return BlockReading->nTime + nOffset;
}
}
if (BlockReading->pprev == NULL) {
assert(BlockReading);
break;
}
BlockReading = BlockReading->pprev;
}
return 0;
}
std::string CMasternode::GetStatus()
{
switch (nActiveState) {
case CMasternode::MASTERNODE_PRE_ENABLED:
return "PRE_ENABLED";
case CMasternode::MASTERNODE_ENABLED:
return "ENABLED";
case CMasternode::MASTERNODE_EXPIRED:
return "EXPIRED";
case CMasternode::MASTERNODE_OUTPOINT_SPENT:
return "OUTPOINT_SPENT";
case CMasternode::MASTERNODE_REMOVE:
return "REMOVE";
case CMasternode::MASTERNODE_WATCHDOG_EXPIRED:
return "WATCHDOG_EXPIRED";
case CMasternode::MASTERNODE_POSE_BAN:
return "POSE_BAN";
default:
return "UNKNOWN";
}
}
bool CMasternode::IsValidNetAddr()
{
// TODO: regtest is fine with any addresses for now,
// should probably be a bit smarter if one day we start to implement tests for this
return Params().NetworkID() == CBaseChainParams::REGTEST ||
(IsReachable(addr) && addr.IsRoutable());
}
CMasternodeBroadcast::CMasternodeBroadcast()
{
vin = CTxIn();
addr = CService();
pubKeyCollateralAddress = CPubKey();
pubKeyMasternode1 = CPubKey();
sig = std::vector<unsigned char>();
activeState = MASTERNODE_ENABLED;
sigTime = GetAdjustedTime();
lastPing = CMasternodePing();
cacheInputAge = 0;
cacheInputAgeBlock = 0;
unitTest = false;
allowFreeTx = true;
protocolVersion = PROTOCOL_VERSION;
nLastDsq = 0;
nScanningErrorCount = 0;
nLastScanningErrorBlockHeight = 0;
}
CMasternodeBroadcast::CMasternodeBroadcast(CService newAddr, CTxIn newVin, CPubKey pubKeyCollateralAddressNew, CPubKey pubKeyMasternodeNew, int protocolVersionIn)
{
vin = newVin;
addr = newAddr;
pubKeyCollateralAddress = pubKeyCollateralAddressNew;
pubKeyMasternode = pubKeyMasternodeNew;
sig = std::vector<unsigned char>();
activeState = MASTERNODE_ENABLED;
sigTime = GetAdjustedTime();
lastPing = CMasternodePing();
cacheInputAge = 0;
cacheInputAgeBlock = 0;
unitTest = false;
allowFreeTx = true;
protocolVersion = protocolVersionIn;
nLastDsq = 0;
nScanningErrorCount = 0;
nLastScanningErrorBlockHeight = 0;
}
CMasternodeBroadcast::CMasternodeBroadcast(const CMasternode& mn)
{
vin = mn.vin;
addr = mn.addr;
pubKeyCollateralAddress = mn.pubKeyCollateralAddress;
pubKeyMasternode = mn.pubKeyMasternode;
sig = mn.sig;
activeState = mn.activeState;
sigTime = mn.sigTime;
lastPing = mn.lastPing;
cacheInputAge = mn.cacheInputAge;
cacheInputAgeBlock = mn.cacheInputAgeBlock;
unitTest = mn.unitTest;
allowFreeTx = mn.allowFreeTx;
protocolVersion = mn.protocolVersion;
nLastDsq = mn.nLastDsq;
nScanningErrorCount = mn.nScanningErrorCount;
nLastScanningErrorBlockHeight = mn.nLastScanningErrorBlockHeight;
}
bool CMasternodeBroadcast::Create(std::string strService, std::string strKeyMasternode, std::string strTxHash, std::string strOutputIndex, std::string& strErrorRet, CMasternodeBroadcast& mnbRet, bool fOffline)
{
CTxIn txin;
CPubKey pubKeyCollateralAddressNew;
CKey keyCollateralAddressNew;
CPubKey pubKeyMasternodeNew;
CKey keyMasternodeNew;
//need correct blocks to send ping
if (!fOffline && !masternodeSync.IsBlockchainSynced()) {
strErrorRet = "Sync in progress. Must wait until sync is complete to start Masternode";
LogPrintf("CMasternodeBroadcast::Create -- %s\n", strErrorRet);
return false;
}
if (!obfuScationSigner.GetKeysFromSecret(strKeyMasternode, keyMasternodeNew, pubKeyMasternodeNew)) {
strErrorRet = strprintf("Invalid masternode key %s", strKeyMasternode);
LogPrintf("CMasternodeBroadcast::Create -- %s\n", strErrorRet);
return false;
}
if (!pwalletMain->GetMasternodeVinAndKeys(txin, pubKeyCollateralAddressNew, keyCollateralAddressNew, strTxHash, strOutputIndex)) {
strErrorRet = strprintf("Could not allocate txin %s:%s for masternode %s", strTxHash, strOutputIndex, strService);
LogPrintf("CMasternodeBroadcast::Create -- %s\n", strErrorRet);
return false;
}
CService service = CService(strService);
int mainnetDefaultPort = Params(CBaseChainParams::MAIN).GetDefaultPort();
if (Params().NetworkID() == CBaseChainParams::MAIN) {
if (service.GetPort() != mainnetDefaultPort) {
strErrorRet = strprintf("Invalid port %u for masternode %s, only %d is supported on mainnet.", service.GetPort(), strService, mainnetDefaultPort);
LogPrintf("CMasternodeBroadcast::Create -- %s\n", strErrorRet);
return false;
}
} else if (service.GetPort() == mainnetDefaultPort) {
strErrorRet = strprintf("Invalid port %u for masternode %s, %d is the only supported on mainnet.", service.GetPort(), strService, mainnetDefaultPort);
LogPrintf("CMasternodeBroadcast::Create -- %s\n", strErrorRet);
return false;
}
return Create(txin, CService(strService), keyCollateralAddressNew, pubKeyCollateralAddressNew, keyMasternodeNew, pubKeyMasternodeNew, strErrorRet, mnbRet);
}
bool CMasternodeBroadcast::Create(CTxIn txin, CService service, CKey keyCollateralAddressNew, CPubKey pubKeyCollateralAddressNew, CKey keyMasternodeNew, CPubKey pubKeyMasternodeNew, std::string& strErrorRet, CMasternodeBroadcast& mnbRet)
{
// wait for reindex and/or import to finish
if (fImporting || fReindex) return false;
LogPrint("masternode", "CMasternodeBroadcast::Create -- pubKeyCollateralAddressNew = %s, pubKeyMasternodeNew.GetID() = %s\n",
CBitcoinAddress(pubKeyCollateralAddressNew.GetID()).ToString(),
pubKeyMasternodeNew.GetID().ToString());
CMasternodePing mnp(txin);
if (!mnp.Sign(keyMasternodeNew, pubKeyMasternodeNew)) {
strErrorRet = strprintf("Failed to sign ping, masternode=%s", txin.prevout.hash.ToString());
LogPrintf("CMasternodeBroadcast::Create -- %s\n", strErrorRet);
mnbRet = CMasternodeBroadcast();
return false;
}
mnbRet = CMasternodeBroadcast(service, txin, pubKeyCollateralAddressNew, pubKeyMasternodeNew, PROTOCOL_VERSION);
if (!mnbRet.IsValidNetAddr()) {
strErrorRet = strprintf("Invalid IP address %s, masternode=%s", mnbRet.addr.ToStringIP (), txin.prevout.hash.ToString());
LogPrintf("CMasternodeBroadcast::Create -- %s\n", strErrorRet);
mnbRet = CMasternodeBroadcast();
return false;
}
mnbRet.lastPing = mnp;
if (!mnbRet.Sign(keyCollateralAddressNew)) {
strErrorRet = strprintf("Failed to sign broadcast, masternode=%s", txin.prevout.hash.ToString());
LogPrintf("CMasternodeBroadcast::Create -- %s\n", strErrorRet);
mnbRet = CMasternodeBroadcast();
return false;
}
return true;
}
bool CMasternodeBroadcast::CheckAndUpdate(int& nDos)
{
// make sure signature isn't in the future (past is OK)
if (sigTime > GetAdjustedTime() + 60 * 60) {
LogPrintf("mnb - Signature rejected, too far into the future %s\n", vin.prevout.hash.ToString());
nDos = 1;
return false;
}
std::string vchPubKey(pubKeyCollateralAddress.begin(), pubKeyCollateralAddress.end());
std::string vchPubKey2(pubKeyMasternode.begin(), pubKeyMasternode.end());
std::string strMessage = addr.ToString() + boost::lexical_cast<std::string>(sigTime) + vchPubKey + vchPubKey2 + boost::lexical_cast<std::string>(protocolVersion);
if (protocolVersion < masternodePayments.GetMinMasternodePaymentsProto()) {
LogPrintf("mnb - ignoring outdated Masternode %s protocol version %d\n", vin.prevout.hash.ToString(), protocolVersion);
return false;
}
CScript pubkeyScript;
pubkeyScript = GetScriptForDestination(pubKeyCollateralAddress.GetID());
if (pubkeyScript.size() != 25) {
LogPrintf("mnb - pubkey the wrong size\n");
nDos = 100;
return false;
}
CScript pubkeyScript2;
pubkeyScript2 = GetScriptForDestination(pubKeyMasternode.GetID());
if (pubkeyScript2.size() != 25) {
LogPrintf("mnb - pubkey2 the wrong size\n");
nDos = 100;
return false;
}
if (!vin.scriptSig.empty()) {
LogPrintf("mnb - Ignore Not Empty ScriptSig %s\n", vin.prevout.hash.ToString());
return false;
}
std::string errorMessage = "";
if (!obfuScationSigner.VerifyMessage(pubKeyCollateralAddress, sig, strMessage, errorMessage)) {
LogPrintf("mnb - Got bad Masternode address signature\n");
nDos = 100;
return false;
}
if (Params().NetworkID() == CBaseChainParams::MAIN) {
if (addr.GetPort() != 49797) return false;
} else if (addr.GetPort() == 49797)
return false;
//search existing Masternode list, this is where we update existing Masternodes with new mnb broadcasts
CMasternode* pmn = mnodeman.Find(vin);
// no such masternode, nothing to update
if (pmn == NULL)
return true;
else {
// this broadcast older than we have, it's bad.
if (pmn->sigTime > sigTime) {
LogPrintf("mnb - Bad sigTime %d for Masternode %s (existing broadcast is at %d)\n",
sigTime, vin.prevout.hash.ToString(), pmn->sigTime);
return false;
}
// masternode is not enabled yet/already, nothing to update
if (!pmn->IsEnabled()) return true;
}
// mn.pubkey = pubkey, IsVinAssociatedWithPubkey is validated once below,
// after that they just need to match
if (pmn->pubKeyCollateralAddress == pubKeyCollateralAddress && !pmn->IsBroadcastedWithin(MASTERNODE_MIN_MNB_SECONDS)) {
//take the newest entry
LogPrint("masternode", "mnb - Got updated entry for %s\n", vin.prevout.hash.ToString());
if (pmn->UpdateFromNewBroadcast((*this))) {
pmn->Check();
if (pmn->IsEnabled()) Relay();
}
masternodeSync.AddedMasternodeList(GetHash());
}
return true;
}
bool CMasternodeBroadcast::CheckInputsAndAdd(int& nDoS)
{
// we are a masternode with the same vin (i.e. already activated) and this mnb is ours (matches our Masternode privkey)
// so nothing to do here for us
if (fMasterNode && vin.prevout == activeMasternode.vin.prevout && pubKeyMasternode == activeMasternode.pubKeyMasternode)
return true;
// search existing Masternode list
CMasternode* pmn = mnodeman.Find(vin);
if (pmn != NULL) {
// nothing to do here if we already know about this masternode and it's enabled
if (pmn->IsEnabled()) return true;
// if it's not enabled, remove old MN first and continue
else
mnodeman.Remove(pmn->vin);
}
CValidationState state;
CMutableTransaction tx = CMutableTransaction();
CTxOut vout = CTxOut(((Params().MasternodeCollateralLimit() - 0.01)) * COIN, obfuScationPool.collateralPubKey);
tx.vin.push_back(vin);
tx.vout.push_back(vout);
{
TRY_LOCK(cs_main, lockMain);
if (!lockMain) {
// not mnb fault, let it to be checked again later
mnodeman.mapSeenMasternodeBroadcast.erase(GetHash());
masternodeSync.mapSeenSyncMNB.erase(GetHash());
return false;
}
if (!AcceptableInputs(mempool, state, CTransaction(tx), false, NULL)) {
//set nDos
state.IsInvalid(nDoS);
return false;
}
}
LogPrint("masternode", "mnb - Accepted Masternode entry\n");
if (GetInputAge(vin) < MASTERNODE_MIN_CONFIRMATIONS) {
LogPrintf("mnb - Input must have at least %d confirmations\n", MASTERNODE_MIN_CONFIRMATIONS);
// maybe we miss few blocks, let this mnb to be checked again later
mnodeman.mapSeenMasternodeBroadcast.erase(GetHash());
masternodeSync.mapSeenSyncMNB.erase(GetHash());
return false;
}
// verify that sig time is legit in past
// should be at least not earlier than block when 1000 LPC tx got MASTERNODE_MIN_CONFIRMATIONS
uint256 hashBlock = 0;
CTransaction tx2;
GetTransaction(vin.prevout.hash, tx2, hashBlock, true);
BlockMap::iterator mi = mapBlockIndex.find(hashBlock);
if (mi != mapBlockIndex.end() && (*mi).second) {
CBlockIndex* pMNIndex = (*mi).second; // block for 1000 LightPayCoin tx -> 1 confirmation
CBlockIndex* pConfIndex = chainActive[pMNIndex->nHeight + MASTERNODE_MIN_CONFIRMATIONS - 1]; // block where tx got MASTERNODE_MIN_CONFIRMATIONS
if (pConfIndex->GetBlockTime() > sigTime) {
LogPrintf("mnb - Bad sigTime %d for Masternode %s (%i conf block is at %d)\n",
sigTime, vin.prevout.hash.ToString(), MASTERNODE_MIN_CONFIRMATIONS, pConfIndex->GetBlockTime());
return false;
}
}
LogPrintf("mnb - Got NEW Masternode entry - %s - %lli \n", vin.prevout.hash.ToString(), sigTime);
CMasternode mn(*this);
mnodeman.Add(mn);
// if it matches our Masternode privkey, then we've been remotely activated
if (pubKeyMasternode == activeMasternode.pubKeyMasternode && protocolVersion == PROTOCOL_VERSION) {
activeMasternode.EnableHotColdMasterNode(vin, addr);
}
bool isLocal = addr.IsRFC1918() || addr.IsLocal();
if (Params().NetworkID() == CBaseChainParams::REGTEST) isLocal = false;
if (!isLocal) Relay();
return true;
}
void CMasternodeBroadcast::Relay()
{
CInv inv(MSG_MASTERNODE_ANNOUNCE, GetHash());
RelayInv(inv);
}
bool CMasternodeBroadcast::Sign(CKey& keyCollateralAddress)
{
std::string errorMessage;
std::string vchPubKey(pubKeyCollateralAddress.begin(), pubKeyCollateralAddress.end());
std::string vchPubKey2(pubKeyMasternode.begin(), pubKeyMasternode.end());
sigTime = GetAdjustedTime();
std::string strMessage = addr.ToString() + boost::lexical_cast<std::string>(sigTime) + vchPubKey + vchPubKey2 + boost::lexical_cast<std::string>(protocolVersion);
if (!obfuScationSigner.SignMessage(strMessage, errorMessage, sig, keyCollateralAddress)) {
LogPrintf("CMasternodeBroadcast::Sign() - Error: %s\n", errorMessage);
return false;
}
if (!obfuScationSigner.VerifyMessage(pubKeyCollateralAddress, sig, strMessage, errorMessage)) {
LogPrintf("CMasternodeBroadcast::Sign() - Error: %s\n", errorMessage);
return false;
}
return true;
}
CMasternodePing::CMasternodePing()
{
vin = CTxIn();
blockHash = uint256(0);
sigTime = 0;
vchSig = std::vector<unsigned char>();
}
CMasternodePing::CMasternodePing(CTxIn& newVin)
{
vin = newVin;
blockHash = chainActive[chainActive.Height() - 12]->GetBlockHash();
sigTime = GetAdjustedTime();
vchSig = std::vector<unsigned char>();
}
bool CMasternodePing::Sign(CKey& keyMasternode, CPubKey& pubKeyMasternode)
{
std::string errorMessage;
std::string strMasterNodeSignMessage;
sigTime = GetAdjustedTime();
std::string strMessage = vin.ToString() + blockHash.ToString() + boost::lexical_cast<std::string>(sigTime);
if (!obfuScationSigner.SignMessage(strMessage, errorMessage, vchSig, keyMasternode)) {
LogPrintf("CMasternodePing::Sign() - Error: %s\n", errorMessage);
return false;
}
if (!obfuScationSigner.VerifyMessage(pubKeyMasternode, vchSig, strMessage, errorMessage)) {
LogPrintf("CMasternodePing::Sign() - Error: %s\n", errorMessage);
return false;
}
return true;
}
bool CMasternodePing::CheckAndUpdate(int& nDos, bool fRequireEnabled)
{
if (sigTime > GetAdjustedTime() + 60 * 60) {
LogPrintf("CMasternodePing::CheckAndUpdate - Signature rejected, too far into the future %s\n", vin.prevout.hash.ToString());
nDos = 1;
return false;
}
if (sigTime <= GetAdjustedTime() - 60 * 60) {
LogPrintf("CMasternodePing::CheckAndUpdate - Signature rejected, too far into the past %s - %d %d \n", vin.prevout.hash.ToString(), sigTime, GetAdjustedTime());
nDos = 1;
return false;
}
LogPrint("masternode", "CMasternodePing::CheckAndUpdate - New Ping - %s - %lli\n", blockHash.ToString(), sigTime);
// see if we have this Masternode
CMasternode* pmn = mnodeman.Find(vin);
if (pmn != NULL && pmn->protocolVersion >= masternodePayments.GetMinMasternodePaymentsProto()) {
if (fRequireEnabled && !pmn->IsEnabled()) return false;
// LogPrintf("mnping - Found corresponding mn for vin: %s\n", vin.ToString());
// update only if there is no known ping for this masternode or
// last ping was more then MASTERNODE_MIN_MNP_SECONDS-60 ago comparing to this one
if (!pmn->IsPingedWithin(MASTERNODE_MIN_MNP_SECONDS - 60, sigTime)) {
std::string strMessage = vin.ToString() + blockHash.ToString() + boost::lexical_cast<std::string>(sigTime);
std::string errorMessage = "";
if (!obfuScationSigner.VerifyMessage(pmn->pubKeyMasternode, vchSig, strMessage, errorMessage)) {
LogPrintf("CMasternodePing::CheckAndUpdate - Got bad Masternode address signature %s\n", vin.prevout.hash.ToString());
nDos = 33;
return false;
}
BlockMap::iterator mi = mapBlockIndex.find(blockHash);
if (mi != mapBlockIndex.end() && (*mi).second) {
if ((*mi).second->nHeight < chainActive.Height() - 24) {
LogPrintf("CMasternodePing::CheckAndUpdate - Masternode %s block hash %s is too old\n", vin.prevout.hash.ToString(), blockHash.ToString());
// Do nothing here (no Masternode update, no mnping relay)
// Let this node to be visible but fail to accept mnping
return false;
}
} else {
if (fDebug) LogPrintf("CMasternodePing::CheckAndUpdate - Masternode %s block hash %s is unknown\n", vin.prevout.hash.ToString(), blockHash.ToString());
// maybe we stuck so we shouldn't ban this node, just fail to accept it
// TODO: or should we also request this block?
return false;
}
pmn->lastPing = *this;
//mnodeman.mapSeenMasternodeBroadcast.lastPing is probably outdated, so we'll update it
CMasternodeBroadcast mnb(*pmn);
uint256 hash = mnb.GetHash();
if (mnodeman.mapSeenMasternodeBroadcast.count(hash)) {
mnodeman.mapSeenMasternodeBroadcast[hash].lastPing = *this;
}
pmn->Check(true);
if (!pmn->IsEnabled()) return false;
LogPrint("masternode", "CMasternodePing::CheckAndUpdate - Masternode ping accepted, vin: %s\n", vin.prevout.hash.ToString());
Relay();
return true;
}
LogPrint("masternode", "CMasternodePing::CheckAndUpdate - Masternode ping arrived too early, vin: %s\n", vin.prevout.hash.ToString());
//nDos = 1; //disable, this is happening frequently and causing banned peers
return false;
}
LogPrint("masternode", "CMasternodePing::CheckAndUpdate - Couldn't find compatible Masternode entry, vin: %s\n", vin.prevout.hash.ToString());
return false;
}
void CMasternodePing::Relay()
{
CInv inv(MSG_MASTERNODE_PING, GetHash());
RelayInv(inv);
}
|
#include <iostream>
using namespace std;
int main() {
int iterations;
int square_size = 2;
cin >> iterations;
for (int i = 0 ; i < iterations; i++){
square_size += square_size -1;
}
cout << square_size*square_size << "\n";
return 0;
}
|
/*++
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
Abstract:
MsQuic API Unittest
--*/
#include "precomp.h"
#ifdef QUIC_CLOG
#include "ApiTest.cpp.clog.h"
#endif
#pragma warning(disable:6387) // '_Param_(1)' could be '0': this does not adhere to the specification for the function
void QuicTestValidateApi()
{
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuicOpen2(nullptr));
MsQuicClose(nullptr);
// TODO - Move these into GetParam/SetParam tests
QUIC_TLS_PROVIDER TlsProvider;
uint32_t BufferLength = sizeof(TlsProvider);
TEST_QUIC_SUCCEEDED(
MsQuic->GetParam(
nullptr,
QUIC_PARAM_GLOBAL_TLS_PROVIDER,
&BufferLength,
&TlsProvider));
TEST_EQUAL(
MsQuic->SetParam(
nullptr,
QUIC_PARAM_GLOBAL_TLS_PROVIDER,
BufferLength,
&TlsProvider),
QUIC_STATUS_INVALID_PARAMETER);
}
void QuicTestValidateRegistration()
{
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->RegistrationOpen(nullptr, nullptr));
MsQuic->RegistrationClose(nullptr);
}
void QuicTestValidateConfiguration()
{
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
QUIC_SETTINGS EmptySettings{0};
QUIC_SETTINGS GoodSettings{0};
GoodSettings.IdleTimeoutMs = 30000;
GoodSettings.IsSet.IdleTimeoutMs = TRUE;
const char RawGoodAlpn[] = "Alpn";
const char RawEmptyAlpn[] = "";
const char RawLongAlpn[] = "makethisstringjuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuustright";
const char RawTooLongAlpn[] = "makethisextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextraextrlong";
const QUIC_BUFFER GoodAlpn = { sizeof(RawGoodAlpn) - 1, (uint8_t*)RawGoodAlpn };
const QUIC_BUFFER EmptyAlpn = { sizeof(RawEmptyAlpn) - 1, (uint8_t*)RawEmptyAlpn };
const QUIC_BUFFER LongAlpn = { sizeof(RawLongAlpn) - 1, (uint8_t*)RawLongAlpn };
const QUIC_BUFFER TooLongAlpn = { sizeof(RawTooLongAlpn) - 1, (uint8_t*)RawTooLongAlpn };
//
// Test null out param.
//
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConfigurationOpen(
Registration,
&GoodAlpn,
1,
nullptr,
0,
nullptr,
nullptr));
//
// Null registration.
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConfigurationOpen(
nullptr,
&GoodAlpn,
1,
nullptr,
0,
nullptr,
&LocalConfiguration.Handle));
}
//
// Null settings.
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationOpen(
Registration,
&GoodAlpn,
1,
nullptr,
0,
nullptr,
&LocalConfiguration.Handle));
}
//
// Empty settings.
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationOpen(
Registration,
&GoodAlpn,
1,
&EmptySettings,
sizeof(EmptySettings),
nullptr,
&LocalConfiguration.Handle));
}
//
// Good settings.
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationOpen(
Registration,
&GoodAlpn,
1,
&GoodSettings,
sizeof(GoodSettings),
nullptr,
&LocalConfiguration.Handle));
}
//
// Invalid settings - TODO
//
//
// Null ALPN.
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConfigurationOpen(
Registration,
nullptr,
0,
nullptr,
0,
nullptr,
&LocalConfiguration.Handle));
}
//
// Empty ALPN.
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConfigurationOpen(
Registration,
&EmptyAlpn,
1,
nullptr,
0,
nullptr,
&LocalConfiguration.Handle));
}
//
// 255-byte ALPN.
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationOpen(
Registration,
&LongAlpn,
1,
nullptr,
0,
nullptr,
&LocalConfiguration.Handle));
}
//
// 256-byte ALPN.
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConfigurationOpen(
Registration,
&TooLongAlpn,
1,
nullptr,
0,
nullptr,
&LocalConfiguration.Handle));
}
//
// Multiple ALPNs
//
{
ConfigurationScope LocalConfiguration;
const QUIC_BUFFER TwoAlpns[] = {
{ sizeof("alpn1") - 1, (uint8_t*)"alpn1" },
{ sizeof("alpn2") - 1, (uint8_t*)"alpn2" }
};
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationOpen(
Registration,
TwoAlpns,
2,
nullptr,
0,
nullptr,
&LocalConfiguration.Handle));
}
//
// ConfigurationLoad
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationOpen(
Registration,
&GoodAlpn,
1,
nullptr,
0,
nullptr,
&LocalConfiguration.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationLoadCredential(
LocalConfiguration,
&ServerSelfSignedCredConfig));
}
#ifndef QUIC_DISABLE_TICKET_KEY_TESTS
//
// Set Ticket Key (single)
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationOpen(
Registration,
&GoodAlpn,
1,
nullptr,
0,
nullptr,
&LocalConfiguration.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationLoadCredential(
LocalConfiguration,
&ServerSelfSignedCredConfig));
QUIC_TICKET_KEY_CONFIG KeyConfig;
CxPlatZeroMemory(&KeyConfig, sizeof(KeyConfig));
KeyConfig.MaterialLength = 64;
TEST_QUIC_SUCCEEDED(
MsQuic->SetParam(
LocalConfiguration,
QUIC_PARAM_CONFIGURATION_TICKET_KEYS,
sizeof(KeyConfig),
&KeyConfig));
}
//
// Set Ticket Key (multiple)
//
{
ConfigurationScope LocalConfiguration;
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationOpen(
Registration,
&GoodAlpn,
1,
nullptr,
0,
nullptr,
&LocalConfiguration.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationLoadCredential(
LocalConfiguration,
&ServerSelfSignedCredConfig));
QUIC_TICKET_KEY_CONFIG KeyConfigs[2];
CxPlatZeroMemory(KeyConfigs, sizeof(KeyConfigs));
KeyConfigs[0].MaterialLength = 64;
KeyConfigs[1].MaterialLength = 64;
KeyConfigs[1].Id[0] = 1;
TEST_QUIC_SUCCEEDED(
MsQuic->SetParam(
LocalConfiguration,
QUIC_PARAM_CONFIGURATION_TICKET_KEYS,
sizeof(KeyConfigs),
KeyConfigs));
}
#endif // QUIC_DISABLE_TICKET_KEY_TESTS
}
static
_Function_class_(QUIC_LISTENER_CALLBACK)
QUIC_STATUS
QUIC_API
DummyListenerCallback(
HQUIC,
void* Context,
QUIC_LISTENER_EVENT* Event
)
{
CxPlatEvent* StopCompleteEvent = (CxPlatEvent*)Context;
if (StopCompleteEvent &&
Event->Type == QUIC_LISTENER_EVENT_STOP_COMPLETE) {
StopCompleteEvent->Set();
return QUIC_STATUS_SUCCESS;
}
return QUIC_STATUS_NOT_SUPPORTED;
}
static
_Function_class_(QUIC_LISTENER_CALLBACK)
QUIC_STATUS
QUIC_API
AutoCloseListenerCallback(
HQUIC Listener,
void* Context,
QUIC_LISTENER_EVENT* Event
)
{
CxPlatEvent* StopCompleteEvent = (CxPlatEvent*)Context;
if (StopCompleteEvent &&
Event->Type == QUIC_LISTENER_EVENT_STOP_COMPLETE) {
StopCompleteEvent->Set();
MsQuic->ListenerClose(Listener);
return QUIC_STATUS_SUCCESS;
}
return QUIC_STATUS_NOT_SUPPORTED;
}
void QuicTestValidateListener()
{
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
MsQuicAlpn Alpn("MsQuicTest");
MsQuicConfiguration LocalConfiguration(Registration, Alpn);
TEST_TRUE(LocalConfiguration.IsValid());
HQUIC Listener = nullptr;
CxPlatEvent StopCompleteEvent;
//
// Null listener callback handler.
//
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ListenerOpen(
Registration,
nullptr,
nullptr,
&Listener));
//
// Null registration.
//
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ListenerOpen(
nullptr,
DummyListenerCallback,
nullptr,
&Listener));
//
// Null out parameter.
//
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ListenerOpen(
Registration,
DummyListenerCallback,
nullptr,
nullptr));
//
// Stop before start.
//
TEST_QUIC_SUCCEEDED(
MsQuic->ListenerOpen(
Registration,
DummyListenerCallback,
&StopCompleteEvent,
&Listener));
MsQuic->ListenerStop(Listener);
TEST_FALSE(StopCompleteEvent.WaitTimeout(100)); // Event not should have been set
TEST_QUIC_SUCCEEDED(
MsQuic->ListenerStart(
Listener,
Alpn,
Alpn.Length(),
nullptr));
MsQuic->ListenerClose(Listener);
TEST_TRUE(StopCompleteEvent.WaitTimeout(100)); // Event should have been set
Listener = nullptr;
//
// Close before stop.
//
TEST_QUIC_SUCCEEDED(
MsQuic->ListenerOpen(
Registration,
DummyListenerCallback,
&StopCompleteEvent,
&Listener));
TEST_QUIC_SUCCEEDED(
MsQuic->ListenerStart(
Listener,
Alpn,
Alpn.Length(),
nullptr));
MsQuic->ListenerClose(Listener);
TEST_TRUE(StopCompleteEvent.WaitTimeout(100)); // Event should have been set
Listener = nullptr;
//
// Start twice.
//
TEST_QUIC_SUCCEEDED(
MsQuic->ListenerOpen(
Registration,
DummyListenerCallback,
&StopCompleteEvent,
&Listener));
TEST_QUIC_SUCCEEDED(
MsQuic->ListenerStart(
Listener,
Alpn,
Alpn.Length(),
nullptr));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_STATE,
MsQuic->ListenerStart(
Listener,
Alpn,
Alpn.Length(),
nullptr));
MsQuic->ListenerClose(Listener);
Listener = nullptr;
//
// Stop twice.
//
TEST_QUIC_SUCCEEDED(
MsQuic->ListenerOpen(
Registration,
DummyListenerCallback,
nullptr,
&Listener));
MsQuic->ListenerStop(Listener);
TEST_TRUE(StopCompleteEvent.WaitTimeout(100)); // Event should have been set
MsQuic->ListenerStop(Listener);
TEST_FALSE(StopCompleteEvent.WaitTimeout(100)); // Event not should have been set (again)
MsQuic->ListenerClose(Listener);
TEST_FALSE(StopCompleteEvent.WaitTimeout(100)); // Event not should have been set (again)
Listener = nullptr;
//
// Null handle to close.
//
MsQuic->ListenerClose(nullptr);
//
// Close in callback
//
TEST_QUIC_SUCCEEDED(
MsQuic->ListenerOpen(
Registration,
AutoCloseListenerCallback,
&StopCompleteEvent,
&Listener));
TEST_QUIC_SUCCEEDED(
MsQuic->ListenerStart(
Listener,
Alpn,
Alpn.Length(),
nullptr));
MsQuic->ListenerStop(Listener);
TEST_TRUE(StopCompleteEvent.WaitTimeout(100)); // Event should have been set
Listener = nullptr;
}
static
_Function_class_(QUIC_CONNECTION_CALLBACK)
QUIC_STATUS
QUIC_API
DummyConnectionCallback(
HQUIC,
void*,
QUIC_CONNECTION_EVENT*
)
{
return QUIC_STATUS_NOT_SUPPORTED;
}
#ifndef QUIC_DISABLE_0RTT_TESTS
struct QuicServerSendResumeState {
CxPlatEvent ListenerAcceptEvent;
CxPlatEvent HandshakeCompleteEvent;
};
static
_Function_class_(QUIC_CONNECTION_CALLBACK)
QUIC_STATUS
QUIC_API
ResumptionFailConnectionCallback(
HQUIC Connection,
void* Context,
QUIC_CONNECTION_EVENT* Event
)
{
if (Event->Type == QUIC_CONNECTION_EVENT_CONNECTED) {
QUIC_STATUS Status =
MsQuic->ConnectionSendResumptionTicket(
Connection,
QUIC_SEND_RESUMPTION_FLAG_NONE,
0,
nullptr);
if (Status != QUIC_STATUS_INVALID_STATE) {
TEST_FAILURE(
"ConnectionSendResumptionTicket has unexpected error! Expected 0x%x, actual 0x%x",
QUIC_STATUS_INVALID_STATE,
Status);
}
((QuicServerSendResumeState*)Context)->HandshakeCompleteEvent.Set();
return QUIC_STATUS_SUCCESS;
} else if (Event->Type == QUIC_CONNECTION_EVENT_SHUTDOWN_COMPLETE) {
MsQuic->ConnectionClose(Connection);
return QUIC_STATUS_SUCCESS;
}
return QUIC_STATUS_NOT_SUPPORTED;
}
_Function_class_(NEW_CONNECTION_CALLBACK)
static
bool
ListenerFailSendResumeCallback(
_In_ TestListener* Listener,
_In_ HQUIC ConnectionHandle
)
{
//
// Validate sending the resumption ticket fails
//
QUIC_STATUS Status =
MsQuic->ConnectionSendResumptionTicket(
ConnectionHandle,
QUIC_SEND_RESUMPTION_FLAG_NONE,
0,
nullptr);
if (Status != QUIC_STATUS_INVALID_STATE) {
TEST_FAILURE(
"ConnectionSendResumptionTicket has unexpected error! Expected 0x%x, actual 0x%x",
QUIC_STATUS_INVALID_STATE,
Status);
return false;
}
MsQuic->SetCallbackHandler(ConnectionHandle, (void*)ResumptionFailConnectionCallback, Listener->Context);
((QuicServerSendResumeState*)Listener->Context)->ListenerAcceptEvent.Set();
return true;
}
#endif
void QuicTestValidateConnection()
{
#ifndef QUIC_DISABLE_0RTT_TESTS
QuicServerSendResumeState ListenerContext;
#endif
MsQuicRegistration Registration(true);
TEST_TRUE(Registration.IsValid());
MsQuicAlpn Alpn("MsQuicTest");
MsQuicConfiguration ServerConfigurationNoResumption(Registration, Alpn, ServerSelfSignedCredConfig);
TEST_TRUE(ServerConfigurationNoResumption.IsValid());
MsQuicSettings Settings;
Settings.SetServerResumptionLevel(QUIC_SERVER_RESUME_ONLY);
MsQuicConfiguration ServerConfiguration(Registration, Alpn, Settings, ServerSelfSignedCredConfig);
TEST_TRUE(ServerConfiguration.IsValid());
Settings.SetIdleTimeoutMs(1000);
MsQuicCredentialConfig ClientCredConfig;
MsQuicConfiguration ClientConfiguration(Registration, Alpn, Settings, ClientCredConfig);
TEST_TRUE(ClientConfiguration.IsValid());
//
// Null out-parameter.
//
{
TestScopeLogger logScope("Null out-parameter");
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
nullptr));
}
//
// Null Callback-parameter.
//
{
TestScopeLogger logScope("Null Callback-parameter");
ConnectionScope Connection;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConnectionOpen(
Registration,
nullptr,
nullptr,
&Connection.Handle));
}
//
// Null registration parameter.
//
{
TestScopeLogger logScope("Null registration parameter");
ConnectionScope Connection;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConnectionOpen(
nullptr,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
}
//
// Null connection parameter.
//
{
TestScopeLogger logScope("Null connection parameter");
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConnectionStart(
nullptr,
ClientConfiguration,
QUIC_ADDRESS_FAMILY_INET,
"localhost",
4433));
}
//
// Bad address family
//
{
TestScopeLogger logScope("Bad address family");
ConnectionScope Connection;
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConnectionStart(
Connection.Handle,
ClientConfiguration,
127,
"localhost",
4433));
}
//
// Null server name
//
{
TestScopeLogger logScope("Null server name");
ConnectionScope Connection;
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConnectionStart(
Connection.Handle,
ClientConfiguration,
QUIC_ADDRESS_FAMILY_INET,
nullptr,
4433));
}
//
// Bad port
//
{
TestScopeLogger logScope("Bad port");
ConnectionScope Connection;
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConnectionStart(
Connection.Handle,
ClientConfiguration,
QUIC_ADDRESS_FAMILY_INET,
"localhost",
0));
}
//
// Start connection twice
//
{
TestScopeLogger logScope("Start connection twice");
ConnectionScope Connection;
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionStart(
Connection.Handle,
ClientConfiguration,
QUIC_ADDRESS_FAMILY_INET,
"localhost",
4433));
//
// If ConnectionStart is called immediately for a second time, it will
// likely succeed because the previous one was queued up. It would
// instead eventually fail asynchronously. Instead, this test case
// waits a bit to allow for the previous command to be processed so
// that the second call will fail inline.
//
CxPlatSleep(500);
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_STATE,
MsQuic->ConnectionStart(
Connection.Handle,
ClientConfiguration,
QUIC_ADDRESS_FAMILY_INET,
"localhost",
4433));
}
//
// Shutdown connection and then start. Make sure there is no crash.
// Depending on the timing it's possible for the ConnectionStart call to
// either fail or succeed. This test case doesn't care about the result,
// just that no crash results because of this.
//
{
TestScopeLogger logScope("Shutdown connection and then start");
ConnectionScope Connection;
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
MsQuic->ConnectionShutdown(
Connection.Handle,
QUIC_CONNECTION_SHUTDOWN_FLAG_NONE,
QUIC_TEST_NO_ERROR);
MsQuic->ConnectionStart(
Connection.Handle,
ClientConfiguration,
QUIC_ADDRESS_FAMILY_INET,
"localhost",
4433);
}
//
// Shutdown connection twice
//
{
TestScopeLogger logScope("Shutdown connection twice");
ConnectionScope Connection;
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
MsQuic->ConnectionShutdown(
Connection.Handle,
QUIC_CONNECTION_SHUTDOWN_FLAG_NONE,
QUIC_TEST_NO_ERROR);
MsQuic->ConnectionShutdown(
Connection.Handle,
QUIC_CONNECTION_SHUTDOWN_FLAG_NONE,
QUIC_TEST_NO_ERROR);
}
//
// ConnectionShutdown null handle
//
{
TestScopeLogger logScope("ConnectionShutdown null handle");
MsQuic->ConnectionShutdown(
nullptr,
QUIC_CONNECTION_SHUTDOWN_FLAG_NONE,
QUIC_TEST_NO_ERROR);
}
//
// ConnectionClose null handle
//
{
TestScopeLogger logScope("ConnectionClose null handle");
MsQuic->ConnectionClose(nullptr);
}
//
// Invalid datagram send calls
//
{
TestScopeLogger logScope("Invalid datagram send calls");
ConnectionScope Connection;
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
uint8_t RawBuffer[] = "datagram";
QUIC_BUFFER DatagramBuffer = { sizeof(RawBuffer), RawBuffer };
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->DatagramSend(
Connection.Handle,
nullptr,
1,
QUIC_SEND_FLAG_NONE,
nullptr));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->DatagramSend(
Connection.Handle,
&DatagramBuffer,
0,
QUIC_SEND_FLAG_NONE,
nullptr));
}
//
// Successful send datagram calls
//
{
TestScopeLogger logScope("Successful send datagram calls");
ConnectionScope Connection;
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
uint8_t RawBuffer[] = "datagram";
QUIC_BUFFER DatagramBuffer = { sizeof(RawBuffer), RawBuffer };
TEST_QUIC_SUCCEEDED(
MsQuic->DatagramSend(
Connection.Handle,
&DatagramBuffer,
1,
QUIC_SEND_FLAG_NONE,
nullptr));
}
//
// Successful set datagram receive parameter
//
{
TestScopeLogger logScope("Successful set datagram receive parameter");
ConnectionScope Connection;
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
BOOLEAN ReceiveDatagrams = TRUE;
TEST_QUIC_SUCCEEDED(
MsQuic->SetParam(
Connection.Handle,
QUIC_PARAM_CONN_DATAGRAM_RECEIVE_ENABLED,
sizeof(ReceiveDatagrams),
&ReceiveDatagrams));
ReceiveDatagrams = FALSE;
TEST_QUIC_SUCCEEDED(
MsQuic->SetParam(
Connection.Handle,
QUIC_PARAM_CONN_DATAGRAM_RECEIVE_ENABLED,
sizeof(ReceiveDatagrams),
&ReceiveDatagrams));
}
//
// Invalid send resumption
//
{
TestScopeLogger logScope("Invalid send resumption");
ConnectionScope Connection;
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionOpen(
Registration,
DummyConnectionCallback,
nullptr,
&Connection.Handle));
//
// NULL connection handle.
//
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConnectionSendResumptionTicket(
nullptr,
QUIC_SEND_RESUMPTION_FLAG_NONE,
0,
nullptr));
//
// Can only be called on server Connections.
//
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConnectionSendResumptionTicket(
Connection.Handle,
QUIC_SEND_RESUMPTION_FLAG_NONE,
0,
nullptr));
//
// Validate flags are within range.
//
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->ConnectionSendResumptionTicket(
Connection.Handle,
(QUIC_SEND_RESUMPTION_FLAGS)4,
0,
nullptr));
}
//
// Invalid send resumption, server-side
// Some of these cases require an actual connection to succeed, so
// they won't work on Schannel in AZP.
// Currently disabling these test cases for TLS platforms without 0-RTT.
//
#ifndef QUIC_DISABLE_0RTT_TESTS
{
TestScopeLogger logScopeouter("Invalid send resumption, server-side");
TestListener MyListener(Registration, ListenerFailSendResumeCallback, ServerConfigurationNoResumption);
TEST_TRUE(MyListener.IsValid());
TEST_QUIC_SUCCEEDED(MyListener.Start(Alpn, Alpn.Length()));
QuicAddr ServerLocalAddr;
TEST_QUIC_SUCCEEDED(MyListener.GetLocalAddr(ServerLocalAddr));
MyListener.Context = &ListenerContext;
{
//
// Validate that the resumption ticket call fails in the listener.
//
{
TestScopeLogger logScope("SendResumption in Listener callback");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
TEST_QUIC_SUCCEEDED(Connection.Start(ClientConfiguration, ServerLocalAddr.GetFamily(), QUIC_TEST_LOOPBACK_FOR_AF(ServerLocalAddr.GetFamily()), ServerLocalAddr.GetPort()));
TEST_TRUE(ListenerContext.ListenerAcceptEvent.WaitTimeout(2000));
}
//
// Ensure sending a resumption ticket fails even when connected
// because resumption is not enabled.
//
{
TestScopeLogger logScope("SendResumption with resumption disabled");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
TEST_QUIC_SUCCEEDED(Connection.Start(ClientConfiguration, ServerLocalAddr.GetFamily(), QUIC_TEST_LOOPBACK_FOR_AF(ServerLocalAddr.GetFamily()), ServerLocalAddr.GetPort()));
TEST_TRUE(ListenerContext.ListenerAcceptEvent.WaitTimeout(2000));
TEST_TRUE(ListenerContext.HandshakeCompleteEvent.WaitTimeout(2000)); // Wait for server to get connected
}
//
// Enable resumption but ensure failure because the connection
// isn't in connected state yet.
//
{
TestScopeLogger logScope("SendResumption handshake not complete");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
TEST_QUIC_SUCCEEDED(Connection.Start(ClientConfiguration, ServerLocalAddr.GetFamily(), QUIC_TEST_LOOPBACK_FOR_AF(ServerLocalAddr.GetFamily()), ServerLocalAddr.GetPort()));
TEST_TRUE(ListenerContext.ListenerAcceptEvent.WaitTimeout(2000));
TEST_TRUE(Connection.HandshakeCompleteEvent.WaitTimeout(2000)); // Wait for client to get connected
//
// TODO: add test case to validate ConnectionSendResumptionTicket:
// * succeeds when resumption is enabled and once connected.
//
}
}
}
#endif
}
_Function_class_(STREAM_SHUTDOWN_CALLBACK)
static
void
ServerApiTestStreamShutdown(
_In_ TestStream* Stream
)
{
delete Stream;
}
_Function_class_(NEW_STREAM_CALLBACK)
static
void
ServerApiTestNewStream(
_In_ TestConnection* /* Connection */,
_In_ HQUIC StreamHandle,
_In_ QUIC_STREAM_OPEN_FLAGS Flags
)
{
auto Stream = TestStream::FromStreamHandle(StreamHandle, ServerApiTestStreamShutdown, Flags);
if (Stream == nullptr || !Stream->IsValid()) {
delete Stream;
TEST_FAILURE("Failed to accept new TestStream.");
}
}
_Function_class_(NEW_CONNECTION_CALLBACK)
static
bool
ListenerAcceptCallback(
_In_ TestListener* Listener,
_In_ HQUIC ConnectionHandle
)
{
TestConnection** NewConnection = (TestConnection**)Listener->Context;
*NewConnection = new(std::nothrow) TestConnection(ConnectionHandle, ServerApiTestNewStream);
if (*NewConnection == nullptr || !(*NewConnection)->IsValid()) {
TEST_FAILURE("Failed to accept new TestConnection.");
delete *NewConnection;
return false;
}
return true;
}
_Function_class_(QUIC_STREAM_CALLBACK)
static
QUIC_STATUS
QUIC_API
DummyStreamCallback(
_In_ HQUIC /*Stream*/,
_In_opt_ void* /*Context*/,
_Inout_ QUIC_STREAM_EVENT* Event
)
{
switch (Event->Type) {
case QUIC_STREAM_EVENT_RECEIVE:
if (Event->RECEIVE.TotalBufferLength != 0) {
TEST_FAILURE("QUIC_STREAM_EVENT_RECEIVE with data should never be called!");
}
break;
case QUIC_STREAM_EVENT_SEND_COMPLETE:
TEST_FAILURE("QUIC_STREAM_EVENT_SEND_COMPLETE should never be called!");
break;
default:
break;
}
return QUIC_STATUS_SUCCESS;
}
struct ShutdownStreamContext {
QUIC_STATUS StartCompleteStatus { QUIC_STATUS_SUCCESS };
bool ShutdownComplete { false };
CxPlatEvent StartCompleteEvent;
CxPlatEvent ShutdownCompleteEvent;
ShutdownStreamContext() { }
};
_Function_class_(QUIC_STREAM_CALLBACK)
static
QUIC_STATUS
QUIC_API
ShutdownStreamCallback(
_In_ HQUIC /*Stream*/,
_In_opt_ void* Context,
_Inout_ QUIC_STREAM_EVENT* Event
)
{
ShutdownStreamContext* ShutdownContext = (ShutdownStreamContext*)Context;
switch (Event->Type) {
case QUIC_STREAM_EVENT_START_COMPLETE:
ShutdownContext->StartCompleteStatus = Event->START_COMPLETE.Status;
ShutdownContext->StartCompleteEvent.Set();
break;
case QUIC_STREAM_EVENT_RECEIVE:
if (Event->RECEIVE.TotalBufferLength != 0) {
TEST_FAILURE("QUIC_STREAM_EVENT_RECEIVE with data should never be called!");
}
break;
case QUIC_STREAM_EVENT_SEND_COMPLETE:
TEST_FAILURE("QUIC_STREAM_EVENT_SEND_COMPLETE should never be called!");
break;
case QUIC_STREAM_EVENT_SHUTDOWN_COMPLETE:
ShutdownContext->ShutdownComplete = true;
ShutdownContext->ShutdownCompleteEvent.Set();
break;
default:
break;
}
return QUIC_STATUS_SUCCESS;
}
_Function_class_(QUIC_STREAM_CALLBACK)
static
QUIC_STATUS
QUIC_API
AllowSendCompleteStreamCallback(
_In_ HQUIC /*Stream*/,
_In_opt_ void* /*Context*/,
_Inout_ QUIC_STREAM_EVENT* Event
)
{
switch (Event->Type) {
case QUIC_STREAM_EVENT_RECEIVE:
TEST_FAILURE("QUIC_STREAM_EVENT_RECEIVE should never be called!");
break;
default:
break;
}
return QUIC_STATUS_SUCCESS;
}
void QuicTestValidateStream(bool Connect)
{
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
MsQuicAlpn Alpn("MsQuicTest");
MsQuicSettings Settings;
Settings.SetPeerBidiStreamCount(32);
MsQuicConfiguration ServerConfiguration(Registration, Alpn, Settings, ServerSelfSignedCredConfig);
TEST_TRUE(ServerConfiguration.IsValid());
MsQuicCredentialConfig ClientCredConfig;
MsQuicConfiguration ClientConfiguration(Registration, Alpn, ClientCredConfig);
TEST_TRUE(ClientConfiguration.IsValid());
QUIC_BUFFER Buffers[1] = {};
//
// Force the Client, Server, and Listener to clean up before the Registration.
//
{
TestListener MyListener(Registration, ListenerAcceptCallback, ServerConfiguration);
TEST_TRUE(MyListener.IsValid());
UniquePtr<TestConnection> Server;
MyListener.Context = &Server;
{
TestConnection Client(Registration);
TEST_TRUE(Client.IsValid());
if (Connect) {
TEST_QUIC_SUCCEEDED(MyListener.Start(Alpn, Alpn.Length()));
QuicAddr ServerLocalAddr;
TEST_QUIC_SUCCEEDED(MyListener.GetLocalAddr(ServerLocalAddr));
//
// Start client connection.
//
TEST_QUIC_SUCCEEDED(
Client.Start(
ClientConfiguration,
QuicAddrGetFamily(&ServerLocalAddr.SockAddr),
QUIC_TEST_LOOPBACK_FOR_AF(
QuicAddrGetFamily(&ServerLocalAddr.SockAddr)),
ServerLocalAddr.GetPort()));
//
// Wait for connection.
//
TEST_TRUE(Client.WaitForConnectionComplete());
TEST_TRUE(Client.GetIsConnected());
TEST_NOT_EQUAL(nullptr, Server);
TEST_TRUE(Server->WaitForConnectionComplete());
TEST_TRUE(Server->GetIsConnected());
}
//
// Null connection.
//
{
TestScopeLogger logScope("Null connection");
StreamScope Stream;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->StreamOpen(
nullptr,
QUIC_STREAM_OPEN_FLAG_NONE,
DummyStreamCallback,
nullptr,
&Stream.Handle));
}
//
// Null handler.
//
{
TestScopeLogger logScope("Null handler");
StreamScope Stream;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE,
nullptr,
nullptr,
&Stream.Handle));
}
//
// Null out-parameter.
//
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE,
DummyStreamCallback,
nullptr,
nullptr));
//
// Fail on blocked.
//
{
TestScopeLogger logScope("Fail on blocked");
ShutdownStreamContext Context;
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE,
ShutdownStreamCallback,
&Context,
&Stream.Handle));
if (Connect) {
TEST_QUIC_SUCCEEDED(
MsQuic->StreamStart(
Stream.Handle,
QUIC_STREAM_START_FLAG_FAIL_BLOCKED));
} else {
TEST_QUIC_STATUS(
QUIC_STATUS_PENDING,
MsQuic->StreamStart(
Stream.Handle,
QUIC_STREAM_START_FLAG_FAIL_BLOCKED));
Context.StartCompleteEvent.WaitTimeout(2000);
TEST_EQUAL(Context.StartCompleteStatus, QUIC_STATUS_STREAM_LIMIT_REACHED);
}
TEST_FALSE(Context.ShutdownComplete);
}
//
// Shutdown on fail.
//
if (!Connect) {
TestScopeLogger logScope("Shutdown on fail");
ShutdownStreamContext Context;
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE,
ShutdownStreamCallback,
&Context,
&Stream.Handle));
TEST_QUIC_STATUS(
QUIC_STATUS_PENDING,
MsQuic->StreamStart(
Stream.Handle,
QUIC_STREAM_START_FLAG_FAIL_BLOCKED | QUIC_STREAM_START_FLAG_SHUTDOWN_ON_FAIL));
Context.StartCompleteEvent.WaitTimeout(2000);
TEST_EQUAL(Context.StartCompleteStatus, QUIC_STATUS_STREAM_LIMIT_REACHED);
Context.ShutdownCompleteEvent.WaitTimeout(2000);
TEST_TRUE(Context.ShutdownComplete);
}
//
// Null stream handle.
//
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->StreamSend(
nullptr,
Buffers,
ARRAYSIZE(Buffers),
QUIC_SEND_FLAG_NONE,
nullptr));
//
// Never started (close).
//
{
TestScopeLogger logScope("Never started (close)");
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE,
DummyStreamCallback,
nullptr,
&Stream.Handle));
}
//
// Never started (shutdown graceful).
//
{
TestScopeLogger logScope("Never started (shutdown graceful)");
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE,
DummyStreamCallback,
nullptr,
&Stream.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamShutdown(
Stream.Handle,
QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL,
0));
}
//
// Never started (shutdown abortive).
//
{
TestScopeLogger logScope("Never started (shutdown abortive)");
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE,
DummyStreamCallback,
nullptr,
&Stream.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamShutdown(
Stream.Handle,
QUIC_STREAM_SHUTDOWN_FLAG_ABORT_SEND | QUIC_STREAM_SHUTDOWN_FLAG_ABORT_RECEIVE,
0));
}
//
// Null buffer.
//
{
TestScopeLogger logScope("Null buffer");
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE,
DummyStreamCallback,
nullptr,
&Stream.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamStart(
Stream.Handle,
QUIC_STREAM_START_FLAG_NONE));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->StreamSend(
Stream.Handle,
nullptr,
ARRAYSIZE(Buffers),
QUIC_SEND_FLAG_NONE,
nullptr));
}
//
// Zero buffers.
//
{
TestScopeLogger logScope("Zero buffers");
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE | QUIC_STREAM_OPEN_FLAG_UNIDIRECTIONAL,
AllowSendCompleteStreamCallback,
nullptr,
&Stream.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamStart(
Stream.Handle,
QUIC_STREAM_START_FLAG_NONE));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamSend(
Stream.Handle,
Buffers,
0,
QUIC_SEND_FLAG_NONE,
nullptr));
}
//
// Zero-length buffers.
//
{
TestScopeLogger logScope("Zero-length buffers");
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE | QUIC_STREAM_OPEN_FLAG_UNIDIRECTIONAL,
AllowSendCompleteStreamCallback,
nullptr,
&Stream.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamStart(
Stream.Handle,
QUIC_STREAM_START_FLAG_NONE));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamSend(
Stream.Handle,
Buffers,
ARRAYSIZE(Buffers),
QUIC_SEND_FLAG_NONE,
nullptr));
}
//
// Send on shutdown stream.
//
{
TestScopeLogger logScope("Send on shutdown stream");
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE | QUIC_STREAM_OPEN_FLAG_UNIDIRECTIONAL,
DummyStreamCallback,
nullptr,
&Stream.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamStart(
Stream.Handle,
QUIC_STREAM_START_FLAG_NONE));
// TODO: try this for each flag type
TEST_QUIC_SUCCEEDED(
MsQuic->StreamShutdown(
Stream.Handle,
QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL,
QUIC_TEST_NO_ERROR));
CxPlatSleep(100); // TODO - Ideally wait for shutdown event instead
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_STATE,
MsQuic->StreamSend(
Stream.Handle,
Buffers,
ARRAYSIZE(Buffers),
QUIC_SEND_FLAG_NONE,
nullptr));
}
//
// Double-shutdown stream.
//
{
TestScopeLogger logScope("Double-shutdown stream");
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE,
DummyStreamCallback,
nullptr,
&Stream.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamStart(
Stream.Handle,
QUIC_STREAM_START_FLAG_NONE));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamShutdown(
Stream.Handle,
QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL,
QUIC_TEST_NO_ERROR));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamShutdown(
Stream.Handle,
QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL,
QUIC_TEST_NO_ERROR));
}
//
// Shutdown null handle.
//
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->StreamShutdown(
nullptr,
QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL,
QUIC_TEST_NO_ERROR));
//
// Shutdown no flags.
//
{
TestScopeLogger logScope("Shutdown no flags");
StreamScope Stream;
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE,
DummyStreamCallback,
nullptr,
&Stream.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamStart(
Stream.Handle,
QUIC_STREAM_START_FLAG_NONE));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->StreamShutdown(
Stream.Handle,
QUIC_STREAM_SHUTDOWN_FLAG_NONE,
QUIC_TEST_NO_ERROR));
}
//
// Close nullptr.
//
MsQuic->StreamClose(nullptr);
if (Connect) {
StreamScope PrevOpenStream; // Opened before shutdown
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE | QUIC_STREAM_OPEN_FLAG_UNIDIRECTIONAL,
AllowSendCompleteStreamCallback,
nullptr,
&PrevOpenStream.Handle));
StreamScope PrevOpenAndStartedStream; // Started before shutdown
TEST_QUIC_SUCCEEDED(
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE | QUIC_STREAM_OPEN_FLAG_UNIDIRECTIONAL,
AllowSendCompleteStreamCallback,
nullptr,
&PrevOpenAndStartedStream.Handle));
TEST_QUIC_SUCCEEDED(
MsQuic->StreamStart(
PrevOpenAndStartedStream.Handle,
QUIC_STREAM_START_FLAG_NONE));
//
// Test after connection has been shutdown.
//
Server->Shutdown(QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0);
CxPlatSleep(100); // TODO - Ideally wait for completion event instead
//
// Open After Connection Shutdown
//
{
TestScopeLogger logScope("Open After Connection Shutdown");
StreamScope Stream;
TEST_QUIC_STATUS(
QUIC_STATUS_ABORTED,
MsQuic->StreamOpen(
Client.GetConnection(),
QUIC_STREAM_OPEN_FLAG_NONE | QUIC_STREAM_OPEN_FLAG_UNIDIRECTIONAL,
AllowSendCompleteStreamCallback,
nullptr,
&Stream.Handle));
}
//
// Start After Connection Shutdown
//
{
TestScopeLogger logScope("Start After Connection Shutdown");
TEST_QUIC_STATUS(
QUIC_STATUS_ABORTED,
MsQuic->StreamStart(
PrevOpenStream.Handle,
QUIC_STREAM_START_FLAG_NONE));
}
//
// Send+Start After Connection Shutdown
//
{
TestScopeLogger logScope("Send+Start After Connection Shutdown");
TEST_QUIC_STATUS(
QUIC_STATUS_ABORTED,
MsQuic->StreamSend(
PrevOpenStream.Handle,
Buffers,
ARRAYSIZE(Buffers),
QUIC_SEND_FLAG_START,
nullptr));
}
//
// Send After Connection Shutdown
//
{
TestScopeLogger logScope("Send After Connection Shutdown");
TEST_QUIC_STATUS(
QUIC_STATUS_ABORTED,
MsQuic->StreamSend(
PrevOpenAndStartedStream.Handle,
Buffers,
ARRAYSIZE(Buffers),
QUIC_SEND_FLAG_START,
nullptr));
}
}
}
}
}
class SecConfigTestContext {
public:
CXPLAT_EVENT Event;
QUIC_STATUS Expected;
bool Failed;
SecConfigTestContext() : Expected(0), Failed(false)
{
CxPlatEventInitialize(&Event, FALSE, FALSE);
}
~SecConfigTestContext()
{
CxPlatEventUninitialize(Event);
}
};
void QuicTestGlobalSetParam()
{
//
// QUIC_PARAM_GLOBAL_SUPPORTED_VERSIONS
//
{
TestScopeLogger LogScope("QUIC_PARAM_GLOBAL_SUPPORTED_VERSIONS is get only");
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
nullptr,
QUIC_PARAM_GLOBAL_SUPPORTED_VERSIONS,
0,
nullptr));
}
//
// QUIC_PARAM_GLOBAL_PERF_COUNTERS
//
{
TestScopeLogger LogScope("QUIC_PARAM_GLOBAL_PERF_COUNTERS is get only");
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
nullptr,
QUIC_PARAM_GLOBAL_PERF_COUNTERS,
0,
nullptr));
}
//
// QUIC_PARAM_GLOBAL_LIBRARY_VERSION
//
{
TestScopeLogger LogScope("QUIC_PARAM_GLOBAL_LIBRARY_VERSION is get only");
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
nullptr,
QUIC_PARAM_GLOBAL_LIBRARY_VERSION,
0,
nullptr));
}
//
// QUIC_PARAM_GLOBAL_VERSION_SETTINGS
//
{
TestScopeLogger LogScope("QUIC_PARAM_GLOBAL_VERSION_SETTINGS is covered by QuicTestVersionSettings");
}
//
// QUIC_PARAM_GLOBAL_LIBRARY_GIT_HASH
//
{
TestScopeLogger LogScope("QUIC_PARAM_GLOBAL_LIBRARY_GIT_HASH is get only");
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
nullptr,
QUIC_PARAM_GLOBAL_LIBRARY_VERSION,
0,
nullptr));
}
}
void QuicTestCommonSetParam()
{
//
// Null hundle
//
{
TestScopeLogger LogScope("Null handle with non-global param");
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
nullptr,
0, // Any param other than GLOBAL
0,
nullptr));
}
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
//
// Global param with handle
//
{
TestScopeLogger LogScope("Global with handle");
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
Registration.Handle,
QUIC_PARAM_PREFIX_GLOBAL,
0,
nullptr));
}
//
// Invalid handle type
//
{
TestScopeLogger LogScope("Invalid handle type");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
auto OriginalType = ((uint8_t*)Connection.Handle)[0];
((uint8_t*)Connection.Handle)[0] = 128; // Invalid
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
Connection.Handle,
0,
0,
nullptr));
((uint8_t*)Connection.Handle)[0] = OriginalType;
}
}
void QuicTestRegistrationSetParam()
{
//
// No parameter for Registration
//
{
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
uint32_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
Registration.Handle,
QUIC_PARAM_PREFIX_REGISTRATION,
sizeof(Dummy),
&Dummy));
}
}
void QuicTestConfigurationSetParam()
{
//
// QUIC_PARAM_CONFIGURATION_TICKET_KEYS
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONFIGURATION_TICKET_KEYS is covered by QuicTestValidateConfiguration");
}
//
// QUIC_PARAM_CONFIGURATION_VERSION_SETTINGS
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONFIGURATION_VERSION_SETTINGS is covered by QuicTestVersionSettings");
}
}
void QuicTestListenerSetParam()
{
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
MsQuicAlpn Alpn("MsQuicTest");
MsQuicListener Listener(Registration, DummyListenerCallback, nullptr);
TEST_TRUE(Listener.IsValid());
//
// QUIC_PARAM_LISTENER_LOCAL_ADDRESS
//
{
TestScopeLogger LogScope("QUIC_PARAM_LISTENER_LOCAL_ADDRESS is get only");
QUIC_ADDR Dummy = {0};
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Listener.SetParam(
QUIC_PARAM_LISTENER_LOCAL_ADDRESS,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_LISTENER_STATS
//
{
TestScopeLogger LogScope("QUIC_PARAM_LISTENER_STATS is get only");
QUIC_LISTENER_STATISTICS Dummy = {0};
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Listener.SetParam(
QUIC_PARAM_LISTENER_STATS,
sizeof(Dummy),
&Dummy));
}
}
void QuicTestConnectionSetParam()
{
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
//
// QUIC_PARAM_CONN_QUIC_VERSION
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONN_QUIC_VERSION is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
uint32_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_QUIC_VERSION,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_CONN_IDEAL_PROCESSOR
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONN_IDEAL_PROCESSOR is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
uint16_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_IDEAL_PROCESSOR,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_CONN_STATISTICS
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONN_STATISTICS is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
uint16_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_STATISTICS,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_CONN_STATISTICS_PLAT
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONN_STATISTICS_PLAT is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
uint16_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_STATISTICS_PLAT,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_CONN_LOCAL_BIDI_STREAM_COUNT
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONN_LOCAL_BIDI_STREAM_COUNT is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
uint16_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_LOCAL_BIDI_STREAM_COUNT,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_CONN_LOCAL_UNIDI_STREAM_COUNT
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONN_LOCAL_UNIDI_STREAM_COUNT is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
uint16_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_LOCAL_UNIDI_STREAM_COUNT,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_CONN_MAX_STREAM_IDS
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONN_MAX_STREAM_IDS is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
uint16_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_MAX_STREAM_IDS,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_CONN_DATAGRAM_SEND_ENABLED
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONN_DATAGRAM_SEND_ENABLED is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
uint16_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_DATAGRAM_SEND_ENABLED,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_CONN_STATISTICS_V2
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONN_STATISTICS_V2 is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
uint16_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_STATISTICS_V2,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_CONN_STATISTICS_V2_PLAT
//
{
TestScopeLogger LogScope("QUIC_PARAM_CONN_STATISTICS_V2_PLAT is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
uint16_t Dummy = 0;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_STATISTICS_V2_PLAT,
sizeof(Dummy),
&Dummy));
}
}
void QuicTestTlsSetParam()
{
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
MsQuicAlpn Alpn("MsQuicTest");
MsQuicCredentialConfig ClientCredConfig;
MsQuicConfiguration ClientConfiguration(Registration, Alpn, ClientCertCredConfig);
TEST_TRUE(ClientConfiguration.IsValid());
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
TEST_QUIC_SUCCEEDED(
MsQuic->ConnectionStart(
Connection.Handle,
ClientConfiguration,
QUIC_ADDRESS_FAMILY_INET,
"localhost",
4433));
//
// QUIC_PARAM_TLS_HANDSHAKE_INFO
//
{
TestScopeLogger LogScope("QUIC_PARAM_TLS_HANDSHAKE_INFO is get only");
QUIC_HANDSHAKE_INFO Dummy = {};
TEST_QUIC_STATUS(
QUIC_STATUS_NOT_SUPPORTED,
Connection.SetParam(
QUIC_PARAM_TLS_HANDSHAKE_INFO,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_TLS_NEGOTIATED_ALPN
//
{
TestScopeLogger LogScope("QUIC_PARAM_TLS_NEGOTIATED_ALPN is get only");
uint8_t Dummy[] = "MsQuicTest";
TEST_QUIC_STATUS(
QUIC_STATUS_NOT_SUPPORTED,
Connection.SetParam(
QUIC_PARAM_TLS_NEGOTIATED_ALPN,
sizeof(Dummy),
&Dummy));
}
}
void QuicTestStreamSetParam()
{
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
TestScopeLogger LogScope("QUIC_PARAM_CONN_QUIC_VERSION is get only");
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
//
// QUIC_PARAM_STREAM_ID
//
{
MsQuicStream Stream(Connection, QUIC_STREAM_OPEN_FLAG_NONE);
QUIC_UINT62 Dummy = 123;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
Stream.Handle,
QUIC_PARAM_STREAM_ID,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_STREAM_0RTT_LENGTH
//
{
MsQuicStream Stream(Connection, QUIC_STREAM_OPEN_FLAG_NONE);
uint64_t Dummy = 123;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
Stream.Handle,
QUIC_PARAM_STREAM_0RTT_LENGTH,
sizeof(Dummy),
&Dummy));
}
//
// QUIC_PARAM_STREAM_IDEAL_SEND_BUFFER_SIZE
//
{
MsQuicStream Stream(Connection, QUIC_STREAM_OPEN_FLAG_NONE);
uint64_t Dummy = 123;
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
Stream.Handle,
QUIC_PARAM_STREAM_IDEAL_SEND_BUFFER_SIZE,
sizeof(Dummy),
&Dummy));
}
}
void
QuicTestGetPerfCounters()
{
//
// Test getting the correct size.
//
uint32_t BufferLength = 0;
TEST_EQUAL(
MsQuic->GetParam(
nullptr,
QUIC_PARAM_GLOBAL_PERF_COUNTERS,
&BufferLength,
nullptr),
QUIC_STATUS_BUFFER_TOO_SMALL);
TEST_EQUAL(BufferLength, sizeof(uint64_t) * QUIC_PERF_COUNTER_MAX);
//
// Test getting the full array of counters.
//
uint64_t Counters[QUIC_PERF_COUNTER_MAX];
TEST_QUIC_SUCCEEDED(
MsQuic->GetParam(
nullptr,
QUIC_PARAM_GLOBAL_PERF_COUNTERS,
&BufferLength,
Counters));
//
// Test a smaller buffer will be rounded to the nearest counter and filled.
//
BufferLength = (sizeof(uint64_t) * (QUIC_PERF_COUNTER_MAX - 4)) + 1;
TEST_QUIC_SUCCEEDED(
MsQuic->GetParam(
nullptr,
QUIC_PARAM_GLOBAL_PERF_COUNTERS,
&BufferLength,
Counters));
TEST_EQUAL(BufferLength, (sizeof(uint64_t) * (QUIC_PERF_COUNTER_MAX - 4)));
}
void
ValidateVersionSettings(
_In_ const QUIC_VERSION_SETTINGS* const OutputVersionSettings,
_In_reads_bytes_(ValidVersionsLength * sizeof(uint32_t))
const uint32_t* const ValidVersions,
_In_ const size_t ValidVersionsLength
)
{
TEST_EQUAL(OutputVersionSettings->AcceptableVersionsLength, ValidVersionsLength);
TEST_EQUAL(OutputVersionSettings->OfferedVersionsLength, ValidVersionsLength);
TEST_EQUAL(OutputVersionSettings->FullyDeployedVersionsLength, ValidVersionsLength);
//
// Test to make sure the version lists are correct.
//
for (unsigned i = 0; i < OutputVersionSettings->AcceptableVersionsLength; ++i) {
TEST_EQUAL(OutputVersionSettings->AcceptableVersions[i], CxPlatByteSwapUint32(ValidVersions[i]));
}
for (unsigned i = 0; i < OutputVersionSettings->OfferedVersionsLength; ++i) {
TEST_EQUAL(OutputVersionSettings->OfferedVersions[i], CxPlatByteSwapUint32(ValidVersions[i]));
}
for (unsigned i = 0; i < OutputVersionSettings->FullyDeployedVersionsLength; ++i) {
TEST_EQUAL(OutputVersionSettings->FullyDeployedVersions[i], CxPlatByteSwapUint32(ValidVersions[i]));
}
}
void
QuicTestVersionSettings()
{
const uint32_t ValidVersions[] = {0x00000001, 0xabcd0000, 0xff00001d, 0x0a0a0a0a};
const uint32_t InvalidVersions[] = {0x00000001, 0x00000002};
const uint32_t ZeroVersion[] = { 0 };
uint8_t OutputVersionBuffer[sizeof(QUIC_VERSION_SETTINGS) + (3 * sizeof(ValidVersions))];
uint32_t BufferLength = sizeof(OutputVersionBuffer);
QUIC_VERSION_SETTINGS* OutputVersionSettings = (QUIC_VERSION_SETTINGS*)OutputVersionBuffer;
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
MsQuicVersionSettings InputSettings;
//
// Test setting and getting the desired versions on Connection
//
{
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
//
// Test invalid versions are failed on Connection
//
InputSettings.SetAllVersionLists(InvalidVersions, ARRAYSIZE(InvalidVersions));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_VERSION_SETTINGS,
sizeof(InputSettings),
&InputSettings));
InputSettings.SetAllVersionLists(ZeroVersion, ARRAYSIZE(ZeroVersion));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
Connection.SetParam(
QUIC_PARAM_CONN_VERSION_SETTINGS,
sizeof(InputSettings),
&InputSettings));
//
// Test setting/getting valid versions list on Connection
//
InputSettings.SetAllVersionLists(ValidVersions, ARRAYSIZE(ValidVersions));
TEST_QUIC_SUCCEEDED(
Connection.SetParam(
QUIC_PARAM_CONN_VERSION_SETTINGS,
sizeof(InputSettings),
&InputSettings));
TEST_QUIC_SUCCEEDED(
Connection.GetParam(
QUIC_PARAM_CONN_VERSION_SETTINGS,
&BufferLength,
OutputVersionBuffer));
TEST_EQUAL(BufferLength, sizeof(OutputVersionBuffer));
ValidateVersionSettings(OutputVersionSettings, ValidVersions, ARRAYSIZE(ValidVersions));
BufferLength = 0;
CxPlatZeroMemory(OutputVersionBuffer, sizeof(OutputVersionBuffer));
TEST_QUIC_STATUS(
QUIC_STATUS_BUFFER_TOO_SMALL,
Connection.GetParam(
QUIC_PARAM_CONN_VERSION_SETTINGS,
&BufferLength,
NULL));
TEST_EQUAL(BufferLength, sizeof(OutputVersionBuffer));
TEST_QUIC_SUCCEEDED(
Connection.GetParam(
QUIC_PARAM_CONN_VERSION_SETTINGS,
&BufferLength,
OutputVersionBuffer));
TEST_EQUAL(BufferLength, sizeof(OutputVersionBuffer));
ValidateVersionSettings(OutputVersionSettings, ValidVersions, ARRAYSIZE(ValidVersions));
}
//
// Test setting/getting versions on Configuration
//
{
MsQuicAlpn Alpn("MsQuicTest");
ConfigurationScope Configuration;
TEST_QUIC_SUCCEEDED(
MsQuic->ConfigurationOpen(
Registration,
Alpn,
Alpn.Length(),
nullptr,
0,
nullptr,
&Configuration.Handle));
InputSettings.SetAllVersionLists(InvalidVersions, ARRAYSIZE(InvalidVersions));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
Configuration.Handle,
QUIC_PARAM_CONFIGURATION_VERSION_SETTINGS,
sizeof(InputSettings),
&InputSettings));
InputSettings.SetAllVersionLists(ZeroVersion, ARRAYSIZE(ZeroVersion));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
Configuration.Handle,
QUIC_PARAM_CONFIGURATION_VERSION_SETTINGS,
sizeof(InputSettings),
&InputSettings));
InputSettings.SetAllVersionLists(ValidVersions, ARRAYSIZE(ValidVersions));
TEST_QUIC_SUCCEEDED(
MsQuic->SetParam(
Configuration.Handle,
QUIC_PARAM_CONFIGURATION_VERSION_SETTINGS,
sizeof(InputSettings),
&InputSettings));
BufferLength = sizeof(OutputVersionBuffer);
TEST_QUIC_SUCCEEDED(
MsQuic->GetParam(
Configuration.Handle,
QUIC_PARAM_CONFIGURATION_VERSION_SETTINGS,
&BufferLength,
OutputVersionBuffer));
TEST_EQUAL(BufferLength, sizeof(OutputVersionBuffer));
ValidateVersionSettings(OutputVersionSettings, ValidVersions, ARRAYSIZE(ValidVersions));
BufferLength = 0;
CxPlatZeroMemory(OutputVersionBuffer, sizeof(OutputVersionBuffer));
TEST_QUIC_STATUS(
QUIC_STATUS_BUFFER_TOO_SMALL,
MsQuic->GetParam(
Configuration.Handle,
QUIC_PARAM_CONFIGURATION_VERSION_SETTINGS,
&BufferLength,
NULL));
TEST_EQUAL(BufferLength, sizeof(OutputVersionBuffer));
TEST_QUIC_SUCCEEDED(
MsQuic->GetParam(
Configuration.Handle,
QUIC_PARAM_CONFIGURATION_VERSION_SETTINGS,
&BufferLength,
OutputVersionBuffer));
TEST_EQUAL(BufferLength, sizeof(OutputVersionBuffer));
ValidateVersionSettings(OutputVersionSettings, ValidVersions, ARRAYSIZE(ValidVersions));
}
{
//
// Test invalid versions are failed on Global
//
InputSettings.SetAllVersionLists(InvalidVersions, ARRAYSIZE(InvalidVersions));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
NULL,
QUIC_PARAM_GLOBAL_VERSION_SETTINGS,
sizeof(InputSettings),
&InputSettings));
InputSettings.SetAllVersionLists(ZeroVersion, ARRAYSIZE(ZeroVersion));
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->SetParam(
NULL,
QUIC_PARAM_GLOBAL_VERSION_SETTINGS,
sizeof(InputSettings),
&InputSettings));
//
// Test setting/getting valid desired versions on global
//
BufferLength = sizeof(InputSettings);
InputSettings.SetAllVersionLists(ValidVersions, ARRAYSIZE(ValidVersions));
TEST_QUIC_SUCCEEDED(
MsQuic->SetParam(
NULL,
QUIC_PARAM_GLOBAL_VERSION_SETTINGS,
sizeof(InputSettings),
&InputSettings));
ClearGlobalVersionListScope ClearVersionListScope;
BufferLength = 0;
CxPlatZeroMemory(OutputVersionBuffer, sizeof(OutputVersionBuffer));
TEST_QUIC_STATUS(
QUIC_STATUS_BUFFER_TOO_SMALL,
MsQuic->GetParam(
NULL,
QUIC_PARAM_GLOBAL_VERSION_SETTINGS,
&BufferLength,
NULL));
TEST_EQUAL(BufferLength, sizeof(OutputVersionBuffer));
TEST_QUIC_SUCCEEDED(
MsQuic->GetParam(
NULL,
QUIC_PARAM_GLOBAL_VERSION_SETTINGS,
&BufferLength,
OutputVersionBuffer));
TEST_EQUAL(BufferLength, sizeof(OutputVersionBuffer));
ValidateVersionSettings(OutputVersionSettings, ValidVersions, ARRAYSIZE(ValidVersions));
}
}
void
QuicTestValidateParamApi()
{
//
// Test backwards compatibility.
//
uint16_t LoadBalancingMode;
uint32_t BufferSize = sizeof(LoadBalancingMode);
BufferSize = sizeof(LoadBalancingMode);
TEST_QUIC_STATUS(
QUIC_STATUS_INVALID_PARAMETER,
MsQuic->GetParam(
nullptr,
2, // No longer backwards compatible with v1.*
&BufferSize,
(void*)&LoadBalancingMode));
BufferSize = sizeof(LoadBalancingMode);
TEST_QUIC_SUCCEEDED(
MsQuic->GetParam(
nullptr,
QUIC_PARAM_GLOBAL_LOAD_BALACING_MODE,
&BufferSize,
(void*)&LoadBalancingMode));
}
static
_IRQL_requires_max_(PASSIVE_LEVEL)
_Function_class_(QUIC_LISTENER_CALLBACK)
QUIC_STATUS
QUIC_API
RejectListenerCallback(
_In_ HQUIC /* Listener */,
_In_opt_ void* Context,
_Inout_ QUIC_LISTENER_EVENT* Event
) noexcept {
if (Event->Type == QUIC_LISTENER_EVENT_NEW_CONNECTION) {
auto ShutdownEvent = (CxPlatEvent*)Context;
if (ShutdownEvent) {
MsQuic->ConnectionClose(Event->NEW_CONNECTION.Connection);
ShutdownEvent->Set();
return QUIC_STATUS_SUCCESS;
} else {
return QUIC_STATUS_ABORTED;
}
}
return QUIC_STATUS_SUCCESS;
}
void
QuicTestConnectionRejection(
bool RejectByClosing
)
{
CxPlatEvent ShutdownEvent;
MsQuicRegistration Registration(true);
TEST_QUIC_SUCCEEDED(Registration.GetInitStatus());
MsQuicConfiguration ServerConfiguration(Registration, "MsQuicTest", ServerSelfSignedCredConfig);
TEST_QUIC_SUCCEEDED(ServerConfiguration.GetInitStatus());
MsQuicCredentialConfig ClientCredConfig;
MsQuicConfiguration ClientConfiguration(Registration, "MsQuicTest", ClientCredConfig);
TEST_QUIC_SUCCEEDED(ClientConfiguration.GetInitStatus());
MsQuicListener Listener(Registration, RejectListenerCallback, RejectByClosing ? &ShutdownEvent : nullptr);
TEST_QUIC_SUCCEEDED(Listener.GetInitStatus());
QUIC_ADDRESS_FAMILY QuicAddrFamily = QUIC_ADDRESS_FAMILY_INET;
QuicAddr ServerLocalAddr(QuicAddrFamily);
TEST_QUIC_SUCCEEDED(Listener.Start("MsQuicTest", &ServerLocalAddr.SockAddr));
TEST_QUIC_SUCCEEDED(Listener.GetLocalAddr(ServerLocalAddr));
MsQuicConnection Connection(Registration);
TEST_QUIC_SUCCEEDED(Connection.GetInitStatus());
TEST_QUIC_SUCCEEDED(Connection.Start(ClientConfiguration, ServerLocalAddr.GetFamily(), QUIC_TEST_LOOPBACK_FOR_AF(ServerLocalAddr.GetFamily()), ServerLocalAddr.GetPort()));
if (RejectByClosing) {
TEST_TRUE(ShutdownEvent.WaitTimeout(TestWaitTimeout));
} else {
TEST_TRUE(Connection.HandshakeCompleteEvent.WaitTimeout(TestWaitTimeout));
TEST_FALSE(Connection.HandshakeComplete);
TEST_EQUAL(Connection.TransportShutdownStatus, QUIC_STATUS_CONNECTION_REFUSED);
}
}
void
QuicTestCredentialLoad(const QUIC_CREDENTIAL_CONFIG* Config)
{
MsQuicRegistration Registration;
TEST_TRUE(Registration.IsValid());
MsQuicConfiguration Configuration(Registration, "MsQuicTest");
TEST_TRUE(Configuration.IsValid());
TEST_QUIC_SUCCEEDED(Configuration.LoadCredential(Config));
}
void
QuicTestStorage()
{
const uint32_t SpecialInitialRtt = 55;
#ifdef _KERNEL_MODE
DECLARE_CONST_UNICODE_STRING(GlobalStoragePath, L"\\Registry\\Machine\\System\\CurrentControlSet\\Services\\MsQuic\\Parameters\\");
DECLARE_CONST_UNICODE_STRING(AppStoragePath, L"\\Registry\\Machine\\System\\CurrentControlSet\\Services\\MsQuic\\Parameters\\Apps\\StorageTest\\");
DECLARE_CONST_UNICODE_STRING(ValueName, L"InitialRttMs");
HANDLE GlobalKey, AppKey;
OBJECT_ATTRIBUTES GlobalAttributes, AppAttributes;
InitializeObjectAttributes(
&GlobalAttributes,
(PUNICODE_STRING)&GlobalStoragePath,
OBJ_CASE_INSENSITIVE | OBJ_KERNEL_HANDLE,
NULL,
NULL);
InitializeObjectAttributes(
&AppAttributes,
(PUNICODE_STRING)&AppStoragePath,
OBJ_CASE_INSENSITIVE | OBJ_KERNEL_HANDLE,
NULL,
NULL);
TEST_QUIC_SUCCEEDED(
ZwOpenKey(
&GlobalKey,
KEY_READ | KEY_NOTIFY,
&GlobalAttributes));
ZwDeleteValueKey(
GlobalKey,
(PUNICODE_STRING)&ValueName);
if (QUIC_SUCCEEDED(
ZwOpenKey(
&AppKey,
KEY_READ | KEY_NOTIFY,
&AppAttributes))) {
ZwDeleteKey(AppKey);
ZwClose(AppKey);
}
TEST_QUIC_SUCCEEDED(
ZwCreateKey(
&AppKey,
KEY_READ | KEY_NOTIFY,
&AppAttributes,
0,
NULL,
REG_OPTION_NON_VOLATILE,
NULL));
#elif _WIN32
RegDeleteKeyValueA(
HKEY_LOCAL_MACHINE,
"System\\CurrentControlSet\\Services\\MsQuic\\Parameters",
"InitialRttMs");
RegDeleteKeyA(
HKEY_LOCAL_MACHINE,
"System\\CurrentControlSet\\Services\\MsQuic\\Parameters\\Apps\\StorageTest");
HKEY Key;
RegCreateKeyA(
HKEY_LOCAL_MACHINE,
"System\\CurrentControlSet\\Services\\MsQuic\\Parameters\\Apps\\StorageTest",
&Key);
RegCloseKey(Key);
#else
TEST_FAILURE("Storage tests not supported on this platform");
#endif
MsQuicSettings Settings;
//
// Global settings
//
TEST_QUIC_SUCCEEDED(Settings.GetGlobal());
TEST_NOT_EQUAL(Settings.InitialRttMs, SpecialInitialRtt);
#ifdef _KERNEL_MODE
TEST_QUIC_SUCCEEDED(
ZwSetValueKey(
GlobalKey,
(PUNICODE_STRING)&ValueName,
0,
REG_DWORD,
(PVOID)&SpecialInitialRtt,
sizeof(SpecialInitialRtt)));
#elif _WIN32
TEST_EQUAL(
NO_ERROR,
RegSetKeyValueA(
HKEY_LOCAL_MACHINE,
"System\\CurrentControlSet\\Services\\MsQuic\\Parameters",
"InitialRttMs",
REG_DWORD,
&SpecialInitialRtt,
sizeof(SpecialInitialRtt)));
#else
TEST_FAILURE("Storage tests not supported on this platform");
#endif
CxPlatSleep(100);
TEST_QUIC_SUCCEEDED(Settings.GetGlobal());
TEST_EQUAL(Settings.InitialRttMs, SpecialInitialRtt);
#ifdef _KERNEL_MODE
TEST_QUIC_SUCCEEDED(
ZwDeleteValueKey(
GlobalKey,
(PUNICODE_STRING)&ValueName));
ZwClose(GlobalKey);
#elif _WIN32
TEST_EQUAL(
NO_ERROR,
RegDeleteKeyValueA(
HKEY_LOCAL_MACHINE,
"System\\CurrentControlSet\\Services\\MsQuic\\Parameters",
"InitialRttMs"));
#else
TEST_FAILURE("Storage tests not supported on this platform");
#endif
CxPlatSleep(100);
TEST_QUIC_SUCCEEDED(Settings.GetGlobal());
TEST_NOT_EQUAL(Settings.InitialRttMs, SpecialInitialRtt);
//
// App settings
//
MsQuicRegistration Registration("StorageTest");
TEST_TRUE(Registration.IsValid());
MsQuicConfiguration Configuration(Registration, "MsQuicTest");
TEST_TRUE(Configuration.IsValid());
TEST_QUIC_SUCCEEDED(Configuration.GetSettings(Settings));
TEST_NOT_EQUAL(Settings.InitialRttMs, SpecialInitialRtt);
#ifdef _KERNEL_MODE
TEST_QUIC_SUCCEEDED(
ZwSetValueKey(
AppKey,
(PUNICODE_STRING)&ValueName,
0,
REG_DWORD,
(PVOID)&SpecialInitialRtt,
sizeof(SpecialInitialRtt)));
#elif _WIN32
TEST_EQUAL(
NO_ERROR,
RegSetKeyValueA(
HKEY_LOCAL_MACHINE,
"System\\CurrentControlSet\\Services\\MsQuic\\Parameters\\Apps\\StorageTest",
"InitialRttMs",
REG_DWORD,
&SpecialInitialRtt,
sizeof(SpecialInitialRtt)));
#else
TEST_FAILURE("Storage tests not supported on this platform");
#endif
CxPlatSleep(100);
TEST_QUIC_SUCCEEDED(Configuration.GetSettings(Settings));
TEST_EQUAL(Settings.InitialRttMs, SpecialInitialRtt);
#ifdef _KERNEL_MODE
TEST_QUIC_SUCCEEDED(
ZwDeleteValueKey(
AppKey,
(PUNICODE_STRING)&ValueName));
ZwClose(AppKey);
#elif _WIN32
TEST_EQUAL(
NO_ERROR,
RegDeleteKeyValueA(
HKEY_LOCAL_MACHINE,
"System\\CurrentControlSet\\Services\\MsQuic\\Parameters\\Apps\\StorageTest",
"InitialRttMs"));
#else
TEST_FAILURE("Storage tests not supported on this platform");
#endif
CxPlatSleep(100);
TEST_QUIC_SUCCEEDED(Configuration.GetSettings(Settings));
TEST_NOT_EQUAL(Settings.InitialRttMs, SpecialInitialRtt);
}
|
// @file threadedtests.cpp - Tests for threaded code
//
/**
* Copyright (C) 2008 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the GNU Affero General Public License in all respects
* for all of the code used other than as permitted herein. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do not
* wish to do so, delete this exception statement from your version. If you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongol::logger::LogComponent::kCommand
#include "mongol/platform/basic.h"
#include <boost/thread/barrier.hpp>
#include <boost/version.hpp>
#include <iostream>
#include "mongol/config.h"
#include "mongol/db/client.h"
#include "mongol/db/concurrency/d_concurrency.h"
#include "mongol/db/concurrency/lock_state.h"
#include "mongol/db/operation_context_impl.h"
#include "mongol/dbtests/dbtests.h"
#include "mongol/platform/atomic_word.h"
#include "mongol/platform/bits.h"
#include "mongol/stdx/functional.h"
#include "mongol/stdx/thread.h"
#include "mongol/util/concurrency/old_thread_pool.h"
#include "mongol/util/concurrency/rwlock.h"
#include "mongol/util/concurrency/synchronization.h"
#include "mongol/util/concurrency/old_thread_pool.h"
#include "mongol/util/concurrency/ticketholder.h"
#include "mongol/util/log.h"
#include "mongol/util/timer.h"
namespace ThreadedTests {
using std::unique_ptr;
using std::cout;
using std::endl;
using std::string;
template <int nthreads_param = 10>
class ThreadedTest {
public:
virtual void setup() {} // optional
virtual void subthread(int remaining) = 0; // each thread whatever test work you want done
virtual void validate() = 0; // after work is done
static const int nthreads = nthreads_param;
void run() {
setup();
launch_subthreads(nthreads);
validate();
}
virtual ~ThreadedTest(){}; // not necessary, but makes compilers happy
private:
void launch_subthreads(int remaining) {
if (!remaining)
return;
stdx::thread athread(stdx::bind(&ThreadedTest::subthread, this, remaining));
launch_subthreads(remaining - 1);
athread.join();
}
};
#ifdef MONGO_PLATFORM_32
// Avoid OOM on Linux-32 by using fewer threads
const int nthr = 45;
#else
const int nthr = 135;
#endif
class MongoMutexTest : public ThreadedTest<nthr> {
#if defined(MONGO_CONFIG_DEBUG_BUILD)
enum { N = 2000 };
#else
enum { N = 4000 /*0*/ };
#endif
ProgressMeter pm;
public:
MongoMutexTest() : pm(N * nthreads) {}
void run() {
Timer t;
cout << "MongoMutexTest N:" << N << endl;
ThreadedTest<nthr>::run();
cout << "MongoMutexTest " << t.millis() << "ms" << endl;
}
private:
virtual void subthread(int tnumber) {
Client::initThread("mongolmutextest");
OperationContextImpl txn;
sleepmillis(0);
for (int i = 0; i < N; i++) {
int x = std::rand();
bool sometimes = (x % 15 == 0);
if (i % 7 == 0) {
Lock::GlobalRead r(txn.lockState()); // nested test
Lock::GlobalRead r2(txn.lockState());
} else if (i % 7 == 1) {
Lock::GlobalRead r(txn.lockState());
ASSERT(txn.lockState()->isReadLocked());
} else if (i % 7 == 4 && tnumber == 1 /*only one upgrader legal*/) {
Lock::GlobalWrite w(txn.lockState());
ASSERT(txn.lockState()->isW());
if (i % 7 == 2) {
Lock::TempRelease t(txn.lockState());
}
} else if (i % 7 == 2) {
Lock::GlobalWrite w(txn.lockState());
ASSERT(txn.lockState()->isW());
if (sometimes) {
Lock::TempRelease t(txn.lockState());
}
} else if (i % 7 == 3) {
Lock::GlobalWrite w(txn.lockState());
{ Lock::TempRelease t(txn.lockState()); }
Lock::GlobalRead r(txn.lockState());
ASSERT(txn.lockState()->isW());
if (sometimes) {
Lock::TempRelease t(txn.lockState());
}
} else if (i % 7 == 5) {
{
ScopedTransaction scopedXact(&txn, MODE_IS);
Lock::DBLock r(txn.lockState(), "foo", MODE_S);
}
{
ScopedTransaction scopedXact(&txn, MODE_IS);
Lock::DBLock r(txn.lockState(), "bar", MODE_S);
}
} else if (i % 7 == 6) {
if (i > N / 2) {
int q = i % 11;
if (q == 0) {
ScopedTransaction scopedXact(&txn, MODE_IS);
Lock::DBLock r(txn.lockState(), "foo", MODE_S);
ASSERT(txn.lockState()->isDbLockedForMode("foo", MODE_S));
Lock::DBLock r2(txn.lockState(), "foo", MODE_S);
ASSERT(txn.lockState()->isDbLockedForMode("foo", MODE_S));
Lock::DBLock r3(txn.lockState(), "local", MODE_S);
ASSERT(txn.lockState()->isDbLockedForMode("foo", MODE_S));
ASSERT(txn.lockState()->isDbLockedForMode("local", MODE_S));
} else if (q == 1) {
// test locking local only -- with no preceding lock
{
ScopedTransaction scopedXact(&txn, MODE_IS);
Lock::DBLock x(txn.lockState(), "local", MODE_S);
}
{
ScopedTransaction scopedXact(&txn, MODE_IX);
Lock::DBLock x(txn.lockState(), "local", MODE_X);
// No actual writing here, so no WriteUnitOfWork
if (sometimes) {
Lock::TempRelease t(txn.lockState());
}
}
} else if (q == 1) {
{
ScopedTransaction scopedXact(&txn, MODE_IS);
Lock::DBLock x(txn.lockState(), "admin", MODE_S);
}
{
ScopedTransaction scopedXact(&txn, MODE_IX);
Lock::DBLock x(txn.lockState(), "admin", MODE_X);
}
} else if (q == 3) {
ScopedTransaction scopedXact(&txn, MODE_IX);
Lock::DBLock x(txn.lockState(), "foo", MODE_X);
Lock::DBLock y(txn.lockState(), "admin", MODE_S);
} else if (q == 4) {
ScopedTransaction scopedXact(&txn, MODE_IS);
Lock::DBLock x(txn.lockState(), "foo2", MODE_S);
Lock::DBLock y(txn.lockState(), "admin", MODE_S);
} else {
ScopedTransaction scopedXact(&txn, MODE_IX);
Lock::DBLock w(txn.lockState(), "foo", MODE_X);
{ Lock::TempRelease t(txn.lockState()); }
Lock::DBLock r2(txn.lockState(), "foo", MODE_S);
Lock::DBLock r3(txn.lockState(), "local", MODE_S);
}
} else {
ScopedTransaction scopedXact(&txn, MODE_IS);
Lock::DBLock r(txn.lockState(), "foo", MODE_S);
Lock::DBLock r2(txn.lockState(), "foo", MODE_S);
Lock::DBLock r3(txn.lockState(), "local", MODE_S);
}
}
pm.hit();
}
}
virtual void validate() {
{
MMAPV1LockerImpl ls;
Lock::GlobalWrite w(&ls);
}
{
MMAPV1LockerImpl ls;
Lock::GlobalRead r(&ls);
}
}
};
template <typename _AtomicUInt>
class IsAtomicWordAtomic : public ThreadedTest<> {
static const int iterations = 1000000;
typedef typename _AtomicUInt::WordType WordType;
_AtomicUInt target;
void subthread(int) {
for (int i = 0; i < iterations; i++) {
target.fetchAndAdd(WordType(1));
}
}
void validate() {
ASSERT_EQUALS(target.load(), unsigned(nthreads * iterations));
_AtomicUInt u;
ASSERT_EQUALS(0u, u.load());
ASSERT_EQUALS(0u, u.fetchAndAdd(WordType(1)));
ASSERT_EQUALS(2u, u.addAndFetch(WordType(1)));
ASSERT_EQUALS(2u, u.fetchAndSubtract(WordType(1)));
ASSERT_EQUALS(0u, u.subtractAndFetch(WordType(1)));
ASSERT_EQUALS(0u, u.load());
u.fetchAndAdd(WordType(1));
ASSERT_GREATER_THAN(u.load(), WordType(0));
u.fetchAndSubtract(WordType(1));
ASSERT_NOT_GREATER_THAN(u.load(), WordType(0));
}
};
class ThreadPoolTest {
static const unsigned iterations = 10000;
static const unsigned nThreads = 8;
AtomicUInt32 counter;
void increment(unsigned n) {
for (unsigned i = 0; i < n; i++) {
counter.fetchAndAdd(1);
}
}
public:
void run() {
OldThreadPool tp(nThreads);
for (unsigned i = 0; i < iterations; i++) {
tp.schedule(&ThreadPoolTest::increment, this, 2);
}
tp.join();
ASSERT_EQUALS(counter.load(), iterations * 2);
}
};
class RWLockTest1 {
public:
void run() {
RWLock lk("eliot");
{ rwlock r(lk, true, 1000); }
}
};
class RWLockTest2 {
public:
static void worker1(RWLockRecursiveNongreedy* lk, AtomicUInt32* x) {
x->fetchAndAdd(1); // 1
RWLockRecursiveNongreedy::Exclusive b(*lk);
x->fetchAndAdd(1); // 2
}
static void worker2(RWLockRecursiveNongreedy* lk, AtomicUInt32* x) {
RWLockRecursiveNongreedy::Shared c(*lk);
x->fetchAndAdd(1);
}
void run() {
/**
* note: this test will deadlock if the code breaks
*/
RWLockRecursiveNongreedy lk("eliot2", 120 * 1000);
cout << "RWLock impl: " << lk.implType() << endl;
unique_ptr<RWLockRecursiveNongreedy::Shared> a(new RWLockRecursiveNongreedy::Shared(lk));
AtomicUInt32 x1(0);
cout << "A : " << &x1 << endl;
stdx::thread t1(stdx::bind(worker1, &lk, &x1));
while (!x1.load())
;
verify(x1.load() == 1);
sleepmillis(500);
verify(x1.load() == 1);
AtomicUInt32 x2(0);
stdx::thread t2(stdx::bind(worker2, &lk, &x2));
t2.join();
verify(x2.load() == 1);
a.reset();
for (int i = 0; i < 2000; i++) {
if (x1.load() == 2)
break;
sleepmillis(1);
}
verify(x1.load() == 2);
t1.join();
}
};
class RWLockTest3 {
public:
static void worker2(RWLockRecursiveNongreedy* lk, AtomicUInt32* x) {
verify(!lk->__lock_try(0));
RWLockRecursiveNongreedy::Shared c(*lk);
x->fetchAndAdd(1);
}
void run() {
/**
* note: this test will deadlock if the code breaks
*/
RWLockRecursiveNongreedy lk("eliot2", 120 * 1000);
unique_ptr<RWLockRecursiveNongreedy::Shared> a(new RWLockRecursiveNongreedy::Shared(lk));
AtomicUInt32 x2(0);
stdx::thread t2(stdx::bind(worker2, &lk, &x2));
t2.join();
verify(x2.load() == 1);
a.reset();
}
};
class RWLockTest4 {
public:
#if defined(__linux__) || defined(__APPLE__)
static void worker1(pthread_rwlock_t* lk, AtomicUInt32* x) {
x->fetchAndAdd(1); // 1
cout << "lock b try" << endl;
while (1) {
if (pthread_rwlock_trywrlock(lk) == 0)
break;
sleepmillis(10);
}
cout << "lock b got" << endl;
x->fetchAndAdd(1); // 2
pthread_rwlock_unlock(lk);
}
static void worker2(pthread_rwlock_t* lk, AtomicUInt32* x) {
cout << "lock c try" << endl;
pthread_rwlock_rdlock(lk);
x->fetchAndAdd(1);
cout << "lock c got" << endl;
pthread_rwlock_unlock(lk);
}
#endif
void run() {
/**
* note: this test will deadlock if the code breaks
*/
#if defined(__linux__) || defined(__APPLE__)
// create
pthread_rwlock_t lk;
verify(pthread_rwlock_init(&lk, 0) == 0);
// read lock
verify(pthread_rwlock_rdlock(&lk) == 0);
AtomicUInt32 x1(0);
stdx::thread t1(stdx::bind(worker1, &lk, &x1));
while (!x1.load())
;
verify(x1.load() == 1);
sleepmillis(500);
verify(x1.load() == 1);
AtomicUInt32 x2(0);
stdx::thread t2(stdx::bind(worker2, &lk, &x2));
t2.join();
verify(x2.load() == 1);
pthread_rwlock_unlock(&lk);
for (int i = 0; i < 2000; i++) {
if (x1.load() == 2)
break;
sleepmillis(1);
}
verify(x1.load() == 2);
t1.join();
#endif
}
};
// we don't use upgrade so that part is not important currently but the other aspects of this test
// are interesting; it would be nice to do analogous tests for SimpleRWLock and QLock
class UpgradableTest : public ThreadedTest<7> {
RWLock m;
public:
UpgradableTest() : m("utest") {}
private:
virtual void validate() {}
virtual void subthread(int x) {
Client::initThread("utest");
/* r = get a read lock
R = get a read lock and we expect it to be fast
u = get upgradable
U = get upgradable and we expect it to be fast
w = get a write lock
*/
// /-- verify upgrade can be done instantly while in a read lock already
// | /-- verify upgrade acquisition isn't greedy
// | | /-- verify writes aren't greedy while in upgradable(or are they?)
// v v v
const char* what = " RURuRwR";
sleepmillis(100 * x);
int Z = 1;
LOG(Z) << x << ' ' << what[x] << " request" << endl;
char ch = what[x];
switch (ch) {
case 'w': {
m.lock();
LOG(Z) << x << " w got" << endl;
sleepmillis(100);
LOG(Z) << x << " w unlock" << endl;
m.unlock();
} break;
case 'u':
case 'U': {
Timer t;
RWLock::Upgradable u(m);
LOG(Z) << x << ' ' << ch << " got" << endl;
if (ch == 'U') {
#if defined(NTDDI_VERSION) && defined(NTDDI_WIN7) && (NTDDI_VERSION >= NTDDI_WIN7)
// SRW locks are neither fair nor FIFO, as per docs
if (t.millis() > 2000) {
#else
if (t.millis() > 20) {
#endif
DEV {
// a debug buildbot might be slow, try to avoid false positives
mongol::unittest::log() << "warning lock upgrade was slow " << t.millis()
<< endl;
}
else {
mongol::unittest::log()
<< "assertion failure: lock upgrade was too slow: " << t.millis()
<< endl;
ASSERT(false);
}
}
}
sleepsecs(1);
LOG(Z) << x << ' ' << ch << " unlock" << endl;
} break;
case 'r':
case 'R': {
Timer t;
m.lock_shared();
LOG(Z) << x << ' ' << ch << " got " << endl;
if (what[x] == 'R') {
if (t.millis() > 15) {
// commented out for less chatter, we aren't using upgradeable anyway right
// now:
// log() << x << " info: when in upgradable, write locks are still greedy "
// "on this platform" << endl;
}
}
sleepmillis(200);
LOG(Z) << x << ' ' << ch << " unlock" << endl;
m.unlock_shared();
} break;
default:
ASSERT(false);
}
}
};
void sleepalittle() {
Timer t;
while (1) {
stdx::this_thread::yield();
if (t.micros() > 8)
break;
}
}
int once;
/* This test is to see how long it takes to get a lock after there has been contention -- the OS
will need to reschedule us. if a spinlock, it will be fast of course, but these aren't spin
locks. Experimenting with different # of threads would be a good idea.
*/
template <class whichmutex, class scoped>
class Slack : public ThreadedTest<17> {
public:
Slack() {
k = 0;
done = false;
a = b = 0;
locks = 0;
}
private:
whichmutex m;
char pad1[128];
unsigned a, b;
char pad2[128];
unsigned locks;
char pad3[128];
volatile int k;
virtual void validate() {
if (once++ == 0) {
// <= 1.35 we use a different rwmutex impl so worth noting
cout << "Boost version : " << BOOST_VERSION << endl;
}
cout << typeid(whichmutex).name() << " Slack useful work fraction: " << ((double)a) / b
<< " locks:" << locks << endl;
}
void watch() {
while (1) {
b++;
//__sync_synchronize();
if (k) {
a++;
}
sleepmillis(0);
if (done)
break;
}
}
volatile bool done;
virtual void subthread(int x) {
if (x == 1) {
watch();
return;
}
Timer t;
unsigned lks = 0;
while (1) {
scoped lk(m);
k = 1;
// not very long, we'd like to simulate about 100K locks per second
sleepalittle();
lks++;
if (done || t.millis() > 1500) {
locks += lks;
k = 0;
break;
}
k = 0;
//__sync_synchronize();
}
done = true;
}
};
class CondSlack : public ThreadedTest<17> {
Notification n;
public:
CondSlack() {
k = 0;
done = false;
a = b = 0;
locks = 0;
}
private:
unsigned a, b;
virtual void validate() {
cout << "CondSlack useful work fraction: " << ((double)a) / b << " locks:" << locks << endl;
}
unsigned locks;
volatile int k;
void watch() {
while (1) {
b++;
if (k) {
a++;
}
sleepmillis(0);
if (done)
break;
}
}
volatile bool done;
virtual void subthread(int x) {
if (x == 1) {
n.notifyOne();
watch();
return;
}
Timer t;
while (1) {
n.waitToBeNotified();
verify(k == 0);
k = 1;
// not very long, we'd like to simulate about 100K locks per second
sleepalittle();
k = 0;
locks++;
n.notifyOne();
if (done || t.millis() > 1500)
break;
}
done = true;
}
};
const int WriteLocksAreGreedy_ThreadCount = 3;
class WriteLocksAreGreedy : public ThreadedTest<WriteLocksAreGreedy_ThreadCount> {
public:
WriteLocksAreGreedy() : m("gtest"), _barrier(WriteLocksAreGreedy_ThreadCount) {}
private:
RWLock m;
boost::barrier _barrier;
virtual void validate() {}
virtual void subthread(int x) {
_barrier.wait();
int Z = 0;
Client::initThread("utest");
if (x == 1) {
LOG(Z) << mongol::curTimeMillis64() % 10000 << " 1" << endl;
rwlock_shared lk(m);
sleepmillis(400);
LOG(Z) << mongol::curTimeMillis64() % 10000 << " 1x" << endl;
}
if (x == 2) {
sleepmillis(100);
LOG(Z) << mongol::curTimeMillis64() % 10000 << " 2" << endl;
rwlock lk(m, true);
LOG(Z) << mongol::curTimeMillis64() % 10000 << " 2x" << endl;
}
if (x == 3) {
sleepmillis(200);
Timer t;
LOG(Z) << mongol::curTimeMillis64() % 10000 << " 3" << endl;
rwlock_shared lk(m);
LOG(Z) << mongol::curTimeMillis64() % 10000 << " 3x" << endl;
LOG(Z) << t.millis() << endl;
ASSERT(t.millis() > 50);
}
}
};
// Tests waiting on the TicketHolder by running many more threads than can fit into the "hotel", but
// only max _nRooms threads should ever get in at once
class TicketHolderWaits : public ThreadedTest<10> {
static const int checkIns = 1000;
static const int rooms = 3;
public:
TicketHolderWaits() : _hotel(rooms), _tickets(_hotel._nRooms) {}
private:
class Hotel {
public:
Hotel(int nRooms) : _nRooms(nRooms), _checkedIn(0), _maxRooms(0) {}
void checkIn() {
stdx::lock_guard<stdx::mutex> lk(_frontDesk);
_checkedIn++;
verify(_checkedIn <= _nRooms);
if (_checkedIn > _maxRooms)
_maxRooms = _checkedIn;
}
void checkOut() {
stdx::lock_guard<stdx::mutex> lk(_frontDesk);
_checkedIn--;
verify(_checkedIn >= 0);
}
stdx::mutex _frontDesk;
int _nRooms;
int _checkedIn;
int _maxRooms;
};
Hotel _hotel;
TicketHolder _tickets;
virtual void subthread(int x) {
string threadName = (str::stream() << "ticketHolder" << x);
Client::initThread(threadName.c_str());
for (int i = 0; i < checkIns; i++) {
_tickets.waitForTicket();
TicketHolderReleaser whenDone(&_tickets);
_hotel.checkIn();
sleepalittle();
if (i == checkIns - 1)
sleepsecs(2);
_hotel.checkOut();
if ((i % (checkIns / 10)) == 0)
mongol::unittest::log() << "checked in " << i << " times..." << endl;
}
}
virtual void validate() {
// This should always be true, assuming that it takes < 1 sec for the hardware to process a
// check-out/check-in Time for test is then ~ #threads / _nRooms * 2 seconds
verify(_hotel._maxRooms == _hotel._nRooms);
}
};
class All : public Suite {
public:
All() : Suite("threading") {}
void setupTests() {
add<WriteLocksAreGreedy>();
// Slack is a test to see how long it takes for another thread to pick up
// and begin work after another relinquishes the lock. e.g. a spin lock
// would have very little slack.
add<Slack<SimpleMutex, stdx::lock_guard<SimpleMutex>>>();
add<Slack<SimpleRWLock, SimpleRWLock::Exclusive>>();
add<CondSlack>();
add<UpgradableTest>();
add<IsAtomicWordAtomic<AtomicUInt32>>();
add<IsAtomicWordAtomic<AtomicUInt64>>();
add<ThreadPoolTest>();
add<RWLockTest1>();
add<RWLockTest2>();
add<RWLockTest3>();
add<RWLockTest4>();
add<MongoMutexTest>();
add<TicketHolderWaits>();
}
};
SuiteInstance<All> myall;
}
|
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2018, Raghavender Sahdev.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Raghavender Sahdev nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
/* Author: Raghavender Sahdev */
#include <moveit/planning_request_adapter/planning_request_adapter.h>
#include <moveit/robot_state/conversions.h>
#include <moveit/trajectory_processing/trajectory_tools.h>
#include <moveit_msgs/RobotTrajectory.h>
#include <class_loader/class_loader.hpp>
#include <ros/ros.h>
#include <chomp_motion_planner/chomp_planner.h>
#include <chomp_motion_planner/chomp_parameters.h>
#include <moveit/planning_interface/planning_interface.h>
#include <moveit/trajectory_processing/iterative_time_parameterization.h>
#include <moveit/collision_distance_field/collision_detector_allocator_hybrid.h>
#include <moveit/robot_state/conversions.h>
#include <vector>
#include <eigen3/Eigen/Core>
namespace chomp
{
class OptimizerAdapter : public planning_request_adapter::PlanningRequestAdapter
{
public:
OptimizerAdapter() : planning_request_adapter::PlanningRequestAdapter(), nh_("~")
{
if (!nh_.getParam("planning_time_limit", params_.planning_time_limit_))
{
params_.planning_time_limit_ = 10.0;
ROS_INFO_STREAM("Param planning_time_limit was not set. Using default value: " << params_.planning_time_limit_);
}
if (!nh_.getParam("max_iterations", params_.max_iterations_))
{
params_.max_iterations_ = 200;
ROS_INFO_STREAM("Param max_iterations was not set. Using default value: " << params_.max_iterations_);
}
if (!nh_.getParam("max_iterations_after_collision_free", params_.max_iterations_after_collision_free_))
{
params_.max_iterations_after_collision_free_ = 5;
ROS_INFO_STREAM("Param max_iterations_after_collision_free was not set. Using default value: "
<< params_.max_iterations_after_collision_free_);
}
if (!nh_.getParam("smoothness_cost_weight", params_.smoothness_cost_weight_))
{
params_.smoothness_cost_weight_ = 0.1;
ROS_INFO_STREAM(
"Param smoothness_cost_weight was not set. Using default value: " << params_.smoothness_cost_weight_);
}
if (!nh_.getParam("obstacle_cost_weight", params_.obstacle_cost_weight_))
{
params_.obstacle_cost_weight_ = 1.0;
ROS_INFO_STREAM("Param obstacle_cost_weight was not set. Using default value: " << params_.obstacle_cost_weight_);
}
if (!nh_.getParam("learning_rate", params_.learning_rate_))
{
params_.learning_rate_ = 0.01;
ROS_INFO_STREAM("Param learning_rate was not set. Using default value: " << params_.learning_rate_);
}
if (!nh_.getParam("smoothness_cost_velocity", params_.smoothness_cost_velocity_))
{
params_.smoothness_cost_velocity_ = 0.0;
ROS_INFO_STREAM(
"Param smoothness_cost_velocity was not set. Using default value: " << params_.smoothness_cost_velocity_);
}
if (!nh_.getParam("smoothness_cost_acceleration", params_.smoothness_cost_acceleration_))
{
params_.smoothness_cost_acceleration_ = 1.0;
ROS_INFO_STREAM("Param smoothness_cost_acceleration was not set. Using default value: "
<< params_.smoothness_cost_acceleration_);
}
if (!nh_.getParam("smoothness_cost_jerk", params_.smoothness_cost_jerk_))
{
params_.smoothness_cost_jerk_ = 0.0;
ROS_INFO_STREAM(
"Param smoothness_cost_jerk_ was not set. Using default value: " << params_.smoothness_cost_jerk_);
}
if (!nh_.getParam("ridge_factor", params_.ridge_factor_))
{
params_.ridge_factor_ = 0.0;
ROS_INFO_STREAM("Param ridge_factor_ was not set. Using default value: " << params_.ridge_factor_);
}
if (!nh_.getParam("use_pseudo_inverse", params_.use_pseudo_inverse_))
{
params_.use_pseudo_inverse_ = 0.0;
ROS_INFO_STREAM("Param use_pseudo_inverse_ was not set. Using default value: " << params_.use_pseudo_inverse_);
}
if (!nh_.getParam("pseudo_inverse_ridge_factor", params_.pseudo_inverse_ridge_factor_))
{
params_.pseudo_inverse_ridge_factor_ = 1e-4;
ROS_INFO_STREAM("Param pseudo_inverse_ridge_factor was not set. Using default value: "
<< params_.pseudo_inverse_ridge_factor_);
}
if (!nh_.getParam("joint_update_limit", params_.joint_update_limit_))
{
params_.joint_update_limit_ = 0.1;
ROS_INFO_STREAM("Param joint_update_limit was not set. Using default value: " << params_.joint_update_limit_);
}
if (!nh_.getParam("min_clearence", params_.min_clearence_))
{
params_.min_clearence_ = 0.2;
ROS_INFO_STREAM("Param min_clearence was not set. Using default value: " << params_.min_clearence_);
}
if (!nh_.getParam("collision_threshold", params_.collision_threshold_))
{
params_.collision_threshold_ = 0.07;
ROS_INFO_STREAM("Param collision_threshold_ was not set. Using default value: " << params_.collision_threshold_);
}
if (!nh_.getParam("use_stochastic_descent", params_.use_stochastic_descent_))
{
params_.use_stochastic_descent_ = true;
ROS_INFO_STREAM(
"Param use_stochastic_descent was not set. Using default value: " << params_.use_stochastic_descent_);
}
if (!nh_.getParam("trajectory_initialization_method", params_.trajectory_initialization_method_))
{
params_.trajectory_initialization_method_ = std::string("fillTrajectory");
ROS_INFO_STREAM("Param trajectory_initialization_method was not set. Using New value as: "
<< params_.trajectory_initialization_method_);
}
}
std::string getDescription() const override
{
return "CHOMP Optimizer";
}
bool adaptAndPlan(const PlannerFn& planner, const planning_scene::PlanningSceneConstPtr& ps,
const planning_interface::MotionPlanRequest& req, planning_interface::MotionPlanResponse& res,
std::vector<std::size_t>& added_path_index) const override
{
// following call to planner() calls the OMPL planner and stores the trajectory inside the MotionPlanResponse res
// variable which is then used by CHOMP for optimization of the computed trajectory
bool solved = planner(ps, req, res);
// create a hybrid collision detector to set the collision checker as hybrid
collision_detection::CollisionDetectorAllocatorPtr hybrid_cd(
collision_detection::CollisionDetectorAllocatorHybrid::create());
// create a writable planning scene
planning_scene::PlanningScenePtr planning_scene = ps->diff();
ROS_INFO_STREAM("Configuring Planning Scene for CHOMP ....");
planning_scene->setActiveCollisionDetector(hybrid_cd, true);
chomp::ChompPlanner chompPlanner;
planning_interface::MotionPlanDetailedResponse res_detailed;
moveit_msgs::MotionPlanDetailedResponse res_detailed_moveit_msgs;
// populate the trajectory to pass to CHOMPPlanner::solve() method. Obtain trajectory from OMPL's
// planning_interface::MotionPlanResponse object and put / populate it in the
// moveit_msgs::MotionPlanDetailedResponse object
moveit_msgs::RobotTrajectory trajectory_msgs_from_response;
res.trajectory_->getRobotTrajectoryMsg(trajectory_msgs_from_response);
res_detailed_moveit_msgs.trajectory.resize(1);
res_detailed_moveit_msgs.trajectory[0] = trajectory_msgs_from_response;
bool planning_success = chompPlanner.solve(planning_scene, req, params_, res_detailed_moveit_msgs);
if (planning_success)
{
res_detailed.trajectory_.resize(1);
res_detailed.trajectory_[0] = robot_trajectory::RobotTrajectoryPtr(
new robot_trajectory::RobotTrajectory(res.trajectory_->getRobotModel(), res.trajectory_->getGroup()));
moveit::core::RobotState start_state(planning_scene->getRobotModel());
robot_state::robotStateMsgToRobotState(res_detailed_moveit_msgs.trajectory_start, start_state);
res_detailed.trajectory_[0]->setRobotTrajectoryMsg(start_state, res_detailed_moveit_msgs.trajectory[0]);
res_detailed.description_.push_back("plan");
res_detailed.processing_time_ = res_detailed_moveit_msgs.processing_time;
res_detailed.error_code_ = res_detailed_moveit_msgs.error_code;
}
else
res_detailed.error_code_ = res_detailed_moveit_msgs.error_code;
res.error_code_ = res_detailed.error_code_;
// populate the original response object 'res' with the CHOMP's optimized trajectory.
if (planning_success)
{
res.trajectory_ = res_detailed.trajectory_[0];
res.planning_time_ = res_detailed.processing_time_[0];
}
return solved;
}
private:
ros::NodeHandle nh_;
chomp::ChompParameters params_;
};
}
CLASS_LOADER_REGISTER_CLASS(chomp::OptimizerAdapter, planning_request_adapter::PlanningRequestAdapter);
|
#include<cstdlib>
#include<iostream>
#include"../include/graph.hpp"
using namespace Graph;
ListGraph::ListGraph(){
this->nodes = (List::LinkedList<int>**) malloc(sizeof(List::LinkedList<int>*));
this->nodes[0] = new List::LinkedList<int>();
}
ListGraph::ListGraph(int nodes){
this->size = nodes;
this->nodes = (List::LinkedList<int>**) malloc(this->size * sizeof(List::LinkedList<int>*));
for(int i =0; i < this->size; i++){
this->nodes[i] = new List::LinkedList<int>();
}
}
ListGraph::~ListGraph(){
for(int i =0; i < this->size; i++){
delete this->nodes[i];
}
free(this->nodes);
}
int ListGraph::get_size(){
return this->size;
}
bool ListGraph::is_valid_node(int node){
return (node >= 0 && node < this->size);
}
bool ListGraph::has_edge(int source, int dest){
if(!this->is_valid_node(source) || !this->is_valid_node(dest)){
throw("Invalid node");
}
List::LinkedList<int>* source_node = this->nodes[source];
for(List::Cell<int>* it = source_node->begin(); it != nullptr; it = it->get_next()){
if(it->get_object() == dest) return true;
}
return false;
}
void ListGraph::add_edge(int source, int dest){
if(!this->is_valid_node(source) || !this->is_valid_node(dest)){
throw("Invalid node");
}
List::LinkedList<int>* source_node = this->nodes[source];
if(!this->has_edge(source, dest)){
source_node->add(dest);
}
}
void ListGraph::print(){
std::cout << "Graph:" << std::endl;
for(int i = 0; i < this->size; i++){
List::LinkedList<int>* node = this->nodes[i];
std::cout << i << ": [";
for(List::Cell<int>* it = node->begin(); it != nullptr; it = it->get_next()){
std::cout << it->get_object() << ", ";
}
if(node->length() > 0){
std::cout << "\b\b";
}
std::cout << "]" << std::endl;
}
}
List::LinkedList<int>** ListGraph::get_nodes(){
return this->nodes;
}
List::LinkedList<int>* ListGraph::get_node(int node){
return this->nodes[node];
}
|
#include <iostream>
#include "Test.h"
int main(int argc, char** argv)
{
// English language
Language lang;
readDictionary("data/dict/english.txt", lang.dictionary);
lang.wordMatchRatio = 0.4f;
lang.maxTextLengthPerSpace = 7.0f;
lang.minTextLengthPerSpace = 3.0f;
/*
std::cout << testHex() << std::endl;
std::cout << testBase64() << std::endl;
std::cout << testXor() << std::endl;
std::cout << testXorBreaker(lang) << std::endl;
std::cout << findSingleByteXorEncodedMessage(lang) << std::endl;
std::cout << testStreamXorCipher() << std::endl;
*/
std::cout << "Hamming: " << testHammingDistance() << std::endl;
std::cout << "Stream Xor breaker: " << testRepeatingXorBreaker(lang) << std::endl;
return 0;
}
|
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* */
/* This file is part of the program */
/* GCG --- Generic Column Generation */
/* a Dantzig-Wolfe decomposition based extension */
/* of the branch-cut-and-price framework */
/* SCIP --- Solving Constraint Integer Programs */
/* */
/* Copyright (C) 2010-2019 Operations Research, RWTH Aachen University */
/* Zuse Institute Berlin (ZIB) */
/* */
/* This program is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU Lesser General Public License */
/* as published by the Free Software Foundation; either version 3 */
/* of the License, or (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU Lesser General Public License for more details. */
/* */
/* You should have received a copy of the GNU Lesser General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.*/
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/**@file reader_dec.c
* @brief DEC file reader for structure information
* @author Lukas Kirchhart
* @author Martin Bergner
* @author Gerald Gamrath
* @author Christian Puchert
* @author Michael Bastubbe
*/
/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
/* #define SCIP_DEBUG */
#include <assert.h>
#include <string.h>
#if defined(_WIN32) || defined(_WIN64)
#else
#include <strings.h> /*lint --e{766}*/ /* needed for strcasecmp() */
#endif
#include <ctype.h>
#include "reader_dec.h"
#include "scip_misc.h"
#include "pub_gcgvar.h"
#include "cons_decomp.h"
#include "pub_decomp.h"
#include "class_seeed.h"
typedef gcg::Seeed* SeeedPtr;
#define READER_NAME "decreader"
#define READER_DESC "file reader for blocks in dec format"
#define READER_EXTENSION "dec"
/*
* Data structures
*/
#define DEC_MAX_LINELEN 65536
#define DEC_MAX_PUSHEDTOKENS 2
/** section in DEC File */
enum DecSection
{
DEC_START, DEC_INCOMPLETE, DEC_PRESOLVED, DEC_NBLOCKS, DEC_BLOCKCONSS, DEC_MASTERCONSS, DEC_BLOCKVARS, DEC_MASTERVARS, DEC_LINKINGVARS, DEC_END
};
typedef enum DecSection DECSECTION;
/** exponent indicator of the a value */
enum DecExpType
{
DEC_EXP_NONE
};
typedef enum DecExpType DECEXPTYPE;
/** DEC reading data */
struct DecInput
{
SCIP_FILE* file; /**< file to read */
char linebuf[DEC_MAX_LINELEN]; /**< line buffer */
char* token; /**< current token */
char* tokenbuf; /**< token buffer */
char* pushedtokens[DEC_MAX_PUSHEDTOKENS]; /**< token stack */
int npushedtokens; /**< size of token buffer */
int linenumber; /**< current line number */
int linepos; /**< current line position (column) */
SCIP_Bool presolved; /**< does the decomposition refer to the presolved problem? */
SCIP_Bool haspresolvesection; /**< does the decomposition have a presolved section */
SCIP_Bool incomplete; /**< if false the unspecified constraints should be forced to the master (for downward compatibility) */
int nblocks; /**< number of blocks */
int blocknr; /**< number of the currentblock between 0 and Nblocks-1*/
DECSECTION section; /**< current section */
SCIP_Bool haserror; /**< flag to indicate an error occurence */
SeeedPtr seeed; /**< incomplete decomposition */
};
typedef struct DecInput DECINPUT;
/** data for dec reader */
struct SCIP_ReaderData
{
SCIP_HASHMAP* constoblock; /**< hashmap key=constaint value=block*/
};
static const int NOVALUE = -1;
static const int LINKINGVALUE = -2;
static const char delimchars[] = " \f\n\r\t\v";
static const char tokenchars[] = "-+:<>=";
static const char commentchars[] = "\\";
/*
* Local methods (for reading)
*/
/** issues an error message and marks the DEC data to have errors */
static
void syntaxError(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput, /**< DEC reading data */
const char* msg /**< error message */
)
{
char formatstr[256];
assert(decinput != NULL);
SCIPverbMessage(scip, SCIP_VERBLEVEL_MINIMAL, NULL, "Syntax error in line %d: %s ('%s')\n",
decinput->linenumber, msg, decinput->token);
if( decinput->linebuf[strlen(decinput->linebuf) - 1] == '\n' )
{
SCIPverbMessage(scip, SCIP_VERBLEVEL_MINIMAL, NULL, " input: %s", decinput->linebuf);
}
else
{
SCIPverbMessage(scip, SCIP_VERBLEVEL_MINIMAL, NULL, " input: %s\n", decinput->linebuf);
}
(void) SCIPsnprintf(formatstr, 256, " %%%ds\n", decinput->linepos);
SCIPverbMessage(scip, SCIP_VERBLEVEL_MINIMAL, NULL, formatstr, "^");
decinput->section = DEC_END;
decinput->haserror = TRUE;
}
/** returns whether a syntax error was detected */
static
SCIP_Bool hasError(
DECINPUT* decinput /**< DEC reading data */
)
{
assert(decinput != NULL);
return decinput->haserror;
}
/** returns whether the given character is a token delimiter */
static
SCIP_Bool isDelimChar(
char c /**< input character */
)
{
return (c == '\0') || (strchr(delimchars, c) != NULL);
}
/** returns whether the given character is a single token */
static
SCIP_Bool isTokenChar(
char c /**< input character */
)
{
return (strchr(tokenchars, c) != NULL);
}
/** returns whether the current character is member of a value string */
static
SCIP_Bool isValueChar(
char c, /**< input character */
char nextc, /**< next input character */
SCIP_Bool firstchar, /**< is the given character the first char of the token? */
SCIP_Bool* hasdot, /**< pointer to update the dot flag */
DECEXPTYPE* exptype /**< pointer to update the exponent type */
)
{ /*lint --e{715}*/
assert(hasdot != NULL);
assert(exptype != NULL);
if( isdigit(c) )
return TRUE;
return FALSE;
}
/** reads the next line from the input file into the line buffer; skips comments;
* returns whether a line could be read
*/
static
SCIP_Bool getNextLine(
DECINPUT* decinput /**< DEC reading data */
)
{
int i;
assert(decinput != NULL);
/* clear the line */
BMSclearMemoryArray(decinput->linebuf, DEC_MAX_LINELEN);
/* read next line */
decinput->linepos = 0;
decinput->linebuf[DEC_MAX_LINELEN - 2] = '\0';
if( SCIPfgets(decinput->linebuf, DEC_MAX_LINELEN, decinput->file) == NULL )
return FALSE;
decinput->linenumber ++;
if( decinput->linebuf[DEC_MAX_LINELEN - 2] != '\0' )
{
SCIPerrorMessage("Error: line %d exceeds %d characters\n", decinput->linenumber, DEC_MAX_LINELEN - 2);
decinput->haserror = TRUE;
return FALSE;
}
decinput->linebuf[DEC_MAX_LINELEN - 1] = '\0';
decinput->linebuf[DEC_MAX_LINELEN - 2] = '\0'; /* we want to use lookahead of one char -> we need two \0 at the end */
/* skip characters after comment symbol */
for( i = 0; commentchars[i] != '\0'; ++ i )
{
char* commentstart;
commentstart = strchr(decinput->linebuf, commentchars[i]);
if( commentstart != NULL )
{
*commentstart = '\0';
*(commentstart + 1) = '\0'; /* we want to use lookahead of one char -> we need two \0 at the end */
}
}
return TRUE;
}
/** swaps the addresses of two pointers */
static
void swapPointers(
char** pointer1, /**< first pointer */
char** pointer2 /**< second pointer */
)
{
char* tmp;
tmp = * pointer1;
*pointer1 = * pointer2;
*pointer2 = tmp;
}
/** reads the next token from the input file into the token buffer; returns whether a token was read */
static
SCIP_Bool getNextToken(
DECINPUT* decinput /**< DEC reading data */
)
{
SCIP_Bool hasdot;
DECEXPTYPE exptype;
char* buf;
int tokenlen;
assert(decinput != NULL);
assert(decinput->linepos < DEC_MAX_LINELEN);
/* check the token stack */
if( decinput->npushedtokens > 0 )
{
swapPointers(&decinput->token, &decinput->pushedtokens[decinput->npushedtokens - 1]);
decinput->npushedtokens --;
SCIPdebugMessage("(line %d) read token again: '%s'\n", decinput->linenumber, decinput->token);
return TRUE;
}
/* skip delimiters */
buf = decinput->linebuf;
while( isDelimChar(buf[decinput->linepos]) )
{
if( buf[decinput->linepos] == '\0' )
{
if( ! getNextLine(decinput) )
{
decinput->section = DEC_END;
SCIPdebugMessage("(line %d) end of file\n", decinput->linenumber);
return FALSE;
}
assert(decinput->linepos == 0);
}
else
decinput->linepos ++;
}
assert(decinput->linepos < DEC_MAX_LINELEN);
assert(! isDelimChar(buf[decinput->linepos]));
/* check if the token is a value */
hasdot = FALSE;
exptype = DEC_EXP_NONE;
if( isValueChar(buf[decinput->linepos], buf[decinput->linepos + 1], TRUE, &hasdot, &exptype) ) /*lint !e679*/
{
/* read value token */
tokenlen = 0;
do
{
assert(tokenlen < DEC_MAX_LINELEN);
assert(! isDelimChar(buf[decinput->linepos]));
decinput->token[tokenlen] = buf[decinput->linepos];
++tokenlen;
++(decinput->linepos);
assert(decinput->linepos < DEC_MAX_LINELEN-1);
}
while( isValueChar(buf[decinput->linepos], buf[decinput->linepos + 1], FALSE, &hasdot, &exptype) ); /*lint !e679*/
}
else
{
/* read non-value token */
tokenlen = 0;
do
{
assert(tokenlen < DEC_MAX_LINELEN);
decinput->token[tokenlen] = buf[decinput->linepos];
tokenlen ++;
decinput->linepos ++;
if( tokenlen == 1 && isTokenChar(decinput->token[0]) )
break;
}
while( ! isDelimChar(buf[decinput->linepos]) && ! isTokenChar(buf[decinput->linepos]) );
/* if the token is an equation sense '<', '>', or '=', skip a following '='
* if the token is an equality token '=' and the next character is a '<' or '>', replace the token by the inequality sense
*/
if( tokenlen >= 1
&& (decinput->token[tokenlen - 1] == '<' || decinput->token[tokenlen - 1] == '>' || decinput->token[tokenlen - 1] == '=')
&& buf[decinput->linepos] == '=' )
{
decinput->linepos ++;
}
else if( decinput->token[tokenlen - 1] == '=' && (buf[decinput->linepos] == '<' || buf[decinput->linepos] == '>') )
{
decinput->token[tokenlen - 1] = buf[decinput->linepos];
decinput->linepos ++;
}
}
assert(tokenlen < DEC_MAX_LINELEN);
decinput->token[tokenlen] = '\0';
SCIPdebugMessage("(line %d) read token: '%s'\n", decinput->linenumber, decinput->token);
return TRUE;
}
/** puts the current token on the token stack, such that it is read at the next call to getNextToken() */
static
void pushToken(
DECINPUT* decinput /**< DEC reading data */
)
{
assert(decinput != NULL);
assert(decinput->npushedtokens < DEC_MAX_PUSHEDTOKENS);
swapPointers(&decinput->pushedtokens[decinput->npushedtokens], &decinput->token);
decinput->npushedtokens ++;
}
/** swaps the current token with the token buffer */
static
void swapTokenBuffer(
DECINPUT* decinput /**< DEC reading data */
)
{
assert(decinput != NULL);
swapPointers(&decinput->token, &decinput->tokenbuf);
}
/** returns whether the current token is a value */
static
SCIP_Bool isInt(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput, /**< DEC reading data */
int* value /**< pointer to store the value (unchanged, if token is no value) */
)
{
long val;
char* endptr;
assert(decinput != NULL);
assert(value != NULL);
assert(!(strcasecmp(decinput->token, "INFINITY") == 0) && !(strcasecmp(decinput->token, "INF") == 0));
val = strtol(decinput->token, &endptr, 0);
if( endptr != decinput->token && * endptr == '\0' )
{
if( val < INT_MIN || val > INT_MAX ) /*lint !e685*/
return FALSE;
*value = (int) val;
return TRUE;
}
return FALSE;
}
/** checks whether the current token is a section identifier, and if yes, switches to the corresponding section */
static
SCIP_Bool isNewSection(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput /**< DEC reading data */
)
{
assert(decinput != NULL);
/* remember first token by swapping the token buffer */
swapTokenBuffer(decinput);
/* look at next token: if this is a ':', the first token is a name and no section keyword */
if( getNextToken(decinput) )
{
pushToken(decinput);
}
/* reinstall the previous token by swapping back the token buffer */
swapTokenBuffer(decinput);
if( strcasecmp(decinput->token, "INCOMPLETE") == 0 )
{
SCIPdebugMessage("(line %d) new section: INCOMPLETE\n", decinput->linenumber);
decinput->section = DEC_INCOMPLETE;
return TRUE;
}
if( strcasecmp(decinput->token, "PRESOLVED") == 0 )
{
SCIPdebugMessage("(line %d) new section: PRESOLVED\n", decinput->linenumber);
decinput->section = DEC_PRESOLVED;
return TRUE;
}
if( strcasecmp(decinput->token, "NBLOCKS") == 0 )
{
SCIPdebugMessage("(line %d) new section: NBLOCKS\n", decinput->linenumber);
decinput->section = DEC_NBLOCKS;
return TRUE;
}
if( strcasecmp(decinput->token, "BLOCK") == 0 || strcasecmp(decinput->token, "BLOCKCONSS") == 0 || strcasecmp(decinput->token, "BLOCKCONS") == 0)
{
int blocknr;
decinput->section = DEC_BLOCKCONSS;
if( getNextToken(decinput) )
{
/* read block number */
if( isInt(scip, decinput, &blocknr) )
{
assert(blocknr >= 0);
assert(blocknr <= decinput->nblocks);
decinput->blocknr = blocknr - 1;
}
else
syntaxError(scip, decinput, "no block number after block keyword!\n");
}
else
syntaxError(scip, decinput, "no block number after block keyword!\n");
SCIPdebugMessage("new section: BLOCKCONSS %d\n", decinput->blocknr);
return TRUE;
}
if( strcasecmp(decinput->token, "MASTERCONSS") == 0 || strcasecmp(decinput->token, "MASTERCONS") == 0 )
{
decinput->section = DEC_MASTERCONSS;
SCIPdebugMessage("new section: MASTERCONSS\n");
return TRUE;
}
if( strcasecmp(decinput->token, "BLOCKVARS") == 0 || strcasecmp(decinput->token, "BLOCKVAR") == 0 )
{
int blocknr;
decinput->section = DEC_BLOCKVARS;
if( getNextToken(decinput) )
{
/* read block number */
if( isInt(scip, decinput, &blocknr) )
{
assert(blocknr >= 0);
assert(blocknr <= decinput->nblocks);
decinput->blocknr = blocknr - 1;
}
else
syntaxError(scip, decinput, "no block number after block keyword!\n");
}
else
syntaxError(scip, decinput, "no block number after block keyword!\n");
SCIPdebugMessage("new section: BLOCKVARS %d\n", decinput->blocknr);
return TRUE;
}
if( strcasecmp(decinput->token, "MASTERVARS") == 0 || strcasecmp(decinput->token, "MASTERVAR") == 0
|| strcasecmp(decinput->token, "STATICVAR") == 0 || strcasecmp(decinput->token, "STATICVARS") == 0 )
{
decinput->section = DEC_MASTERVARS;
SCIPdebugMessage("new section: MASTERVARS\n");
return TRUE;
}
if( strcasecmp(decinput->token, "LINKINGVARS") == 0 || strcasecmp(decinput->token, "LINKINGVAR") == 0 )
{
decinput->section = DEC_LINKINGVARS;
SCIPdebugMessage("new section: LINKINGVARS\n");
return TRUE;
}
return FALSE;
}
/** reads the header of the file */
static
SCIP_RETCODE readStart(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput /**< DEC reading data */
)
{
assert(decinput != NULL);
/* everything before first section is treated as comment */
do
{
/* get token */
if( ! getNextToken(decinput) )
return SCIP_OKAY;
}
while( ! isNewSection(scip, decinput) );
return SCIP_OKAY;
}
/** reads the incomplete section */
static
SCIP_RETCODE readIncomplete(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput /**< DEC reading data */
)
{
int incomplete;
assert(scip != NULL);
assert(decinput != NULL);
while( getNextToken(decinput) )
{
/* check if we reached a new section */
if( isNewSection(scip, decinput) )
return SCIP_OKAY;
/* read if the consdefaultmaster */
if( isInt(scip, decinput, &incomplete) )
{
if( incomplete == 1 )
decinput->incomplete = TRUE;
else if ( incomplete == 0 )
decinput->incomplete = FALSE;
else
syntaxError(scip, decinput, "incomplete parameter must be 0 or 1");
SCIPdebugMessage("The constraints that are not specified in this decomposition are %s forced to the master\n",
decinput->incomplete ? "" : " not");
}
}
return SCIP_OKAY;
}
/** reads the presolved section */
static
SCIP_RETCODE readPresolved(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput /**< DEC reading data */
)
{
int presolved;
assert(scip != NULL);
assert(decinput != NULL);
while( getNextToken(decinput) )
{
/* check if we reached a new section */
if( isNewSection(scip, decinput) )
return SCIP_OKAY;
/* read number of blocks */
if( isInt(scip, decinput, &presolved) )
{
decinput->haspresolvesection = TRUE;
if( presolved == 1 )
{
decinput->presolved = TRUE;
}
else if ( presolved == 0 )
{
decinput->presolved = FALSE;
}
else
syntaxError(scip, decinput, "presolved parameter must be 0 or 1");
SCIPdebugMessage("Decomposition is%s from presolved problem\n",
decinput->presolved ? "" : " not");
}
}
return SCIP_OKAY;
}
/** reads the nblocks section */
static
SCIP_RETCODE readNBlocks(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput /**< DEC reading data */
)
{
int nblocks;
assert(scip != NULL);
assert(decinput != NULL);
while( getNextToken(decinput) )
{
/* check if we reached a new section */
if( isNewSection(scip, decinput) )
{
if( decinput->nblocks == NOVALUE )
syntaxError(scip, decinput, "no integer value in nblocks section");
else
return SCIP_OKAY;
}
/* read number of blocks */
if( isInt(scip, decinput, &nblocks) )
{
if( decinput->nblocks == NOVALUE )
{
decinput->nblocks = nblocks;
SCIPconshdlrDecompUserSeeedSetnumberOfBlocks(scip, nblocks);
}
else
syntaxError(scip, decinput, "2 integer values in nblocks section");
SCIPdebugMessage("Number of blocks = %d\n", decinput->nblocks);
}
}
return SCIP_OKAY;
}
/** reads the blocks section */
static
SCIP_RETCODE readBlockconss(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput, /**< DEC reading data */
SCIP_READERDATA* readerdata /**< reader data */
)
{
int blockid;
int currblock;
SCIP_Bool success;
assert(decinput != NULL);
assert(readerdata != NULL);
currblock = 0;
while( getNextToken(decinput) )
{
int i;
SCIP_CONS* cons;
SCIP_VAR** curvars = NULL;
int ncurvars;
SCIP_Bool conshasvar = FALSE;
/* check if we reached a new section */
if( isNewSection(scip, decinput) )
break;
/* the token must be the name of an existing cons */
if( decinput->presolved )
cons = SCIPfindCons(scip, decinput->token);
else
cons = SCIPfindOrigCons(scip, decinput->token);
if( cons == NULL )
{
syntaxError(scip, decinput, "unknown constraint in block section");
decinput->haserror = TRUE;
break;
}
if( !SCIPconsIsActive(cons) && decinput->presolved )
{
assert( !SCIPhashmapExists(readerdata->constoblock, cons));
SCIPdebugMessage("scic cons is not active, scip it \n");
continue;
}
/* get all curvars for the specific constraint */
SCIP_CALL( SCIPgetConsNVars(scip, cons, &ncurvars, &success) );
assert(success);
if( ncurvars > 0 )
{
SCIP_CALL( SCIPallocBufferArray(scip, &curvars, ncurvars) );
SCIP_CALL( SCIPgetConsVars(scip, cons, curvars, ncurvars, &success) );
assert(success);
}
blockid = decinput->blocknr;
for( i = 0; i < ncurvars; i ++ )
{
assert(curvars != NULL); /* for flexelint */
if( decinput->presolved )
{
SCIP_VAR* var = SCIPvarGetProbvar(curvars[i]);
if( !GCGisVarRelevant(var) )
continue;
}
conshasvar = TRUE;
break; /* found var */
}
SCIPfreeBufferArrayNull(scip, &curvars);
if( !conshasvar )
{
SCIPdebugMessage("Cons <%s> has been deleted by presolving or has no variable at all, skipped.\n", SCIPconsGetName(cons) );
SCIP_CALL( SCIPhashmapSetImage(readerdata->constoblock, cons, (void*) (size_t) (currblock+1)) );
SCIP_CALL(SCIPconshdlrDecompUserSeeedSetConsToBlock(scip, decinput->token, currblock) );
++currblock;
currblock = currblock % decinput->nblocks;
continue;
}
/*
* saving block <-> constraint
*/
if( (SCIPhashmapGetImage(readerdata->constoblock, cons) != (void*)(size_t) LINKINGVALUE ) && ( SCIPhashmapGetImage(readerdata->constoblock, cons) != (void*) (size_t) (blockid+1) ) )
{
decinput->haserror = TRUE;
SCIPwarningMessage(scip, "cons %s is already assigned to block %d but is supposed to assigned to %d\n", SCIPconsGetName(cons), SCIPhashmapGetImage(readerdata->constoblock, cons), (blockid+1) );
return SCIP_OKAY;
}
SCIPdebugMessage("cons %s is in block %d\n", SCIPconsGetName(cons), blockid);
SCIP_CALL( SCIPhashmapSetImage(readerdata->constoblock, cons, (void*) (size_t) (blockid+1)) );
SCIP_CALL(SCIPconshdlrDecompUserSeeedSetConsToBlock(scip, decinput->token, blockid) );
}
return SCIP_OKAY;
}
/** reads the block vars section */
static
SCIP_RETCODE readBlockvars(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput, /**< DEC reading data */
SCIP_READERDATA* readerdata /**< reader data */
)
{
int blockid;
assert(decinput != NULL);
assert(readerdata != NULL);
while( getNextToken(decinput) )
{
SCIP_Var* var;
// SCIP_VAR** curvars = NULL;
// int ncurvars;
// SCIP_Bool conshasvar = FALSE;
/* check if we reached a new section */
if( isNewSection(scip, decinput) )
break;
/* the token must be the name of an existing cons */
var = SCIPfindVar(scip, decinput->token);
if( var == NULL )
{
syntaxError(scip, decinput, "unknown variable in block section");
break;
}
if( !SCIPvarIsActive(var) )
{
SCIPwarningMessage(scip, "Var <%s> has been fixed or aggregated by presolving, skipping.\n", SCIPvarGetName(var));
continue;
}
blockid = decinput->blocknr;
SCIPconshdlrDecompUserSeeedSetVarToBlock(scip, decinput->token, blockid);
}
return SCIP_OKAY;
}
/** reads the masterconss section */
static
SCIP_RETCODE readMasterconss(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput, /**< DEC reading data */
SCIP_READERDATA* readerdata /**< reader data */
)
{
assert(scip != NULL);
assert(decinput != NULL);
assert(readerdata != NULL);
while( getNextToken(decinput) )
{
SCIP_CONS* cons;
/* check if we reached a new section */
if( isNewSection(scip, decinput) )
break;
/* the token must be the name of an existing constraint */
if (decinput->presolved )
cons = SCIPfindCons(scip, decinput->token);
else
cons = SCIPfindOrigCons(scip, decinput->token);
if( cons == NULL )
{
syntaxError(scip, decinput, "unknown constraint in masterconss section");
break;
}
else
{
if( !SCIPhashmapExists( readerdata->constoblock, cons) )
{
SCIPwarningMessage(scip, "Cons <%s> has been deleted by presolving, skipping.\n", SCIPconsGetName(cons));
continue;
}
assert(SCIPhashmapGetImage(readerdata->constoblock, cons) == (void*) (size_t) LINKINGVALUE);
SCIPconshdlrDecompUserSeeedSetConsToMaster(scip, decinput->token);
SCIPdebugMessage("cons %s is linking constraint\n", decinput->token);
}
}
return SCIP_OKAY;
}
/** reads the mastervars section */
static
SCIP_RETCODE readMastervars(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput, /**< DEC reading data */
SCIP_READERDATA* readerdata /**< reader data */
)
{
assert(scip != NULL);
assert(decinput != NULL);
assert(readerdata != NULL);
while( getNextToken(decinput) )
{
SCIP_VAR* var;
/* check if we reached a new section */
if( isNewSection(scip, decinput) )
break;
/* the token must be the name of an existing constraint */
var = SCIPfindVar(scip, decinput->token);
if( var == NULL )
{
syntaxError(scip, decinput, "unknown constraint in mastervars section");
break;
}
else
{
if( !SCIPvarIsActive(var) )
{
SCIPdebugMessage("Var <%s> has been fixed or aggregated by presolving, skipping.\n", SCIPvarGetName(var));
continue;
}
SCIPconshdlrDecompUserSeeedSetVarToMaster(scip, decinput->token);
SCIPdebugMessage("var %s is master constraint\n", decinput->token);
}
}
return SCIP_OKAY;
}
/** reads the linkingvars section */
static
SCIP_RETCODE readLinkingvars(
SCIP* scip, /**< SCIP data structure */
DECINPUT* decinput, /**< DEC reading data */
SCIP_READERDATA* readerdata /**< reader data */
)
{
assert(scip != NULL);
assert(decinput != NULL);
assert(readerdata != NULL);
while( getNextToken(decinput) )
{
SCIP_Var* var;
/* check if we reached a new section */
if( isNewSection(scip, decinput) )
break;
/* the token must be the name of an existing constraint */
var = SCIPfindVar(scip, decinput->token);
if( var == NULL )
{
syntaxError(scip, decinput, "unknown constraint in masterconss section");
break;
}
else
{
if( !SCIPvarIsActive(var) )
{
SCIPwarningMessage(scip, "Var <%s> has been fixed or aggregated by presolving, skipping.\n", SCIPvarGetName(var));
continue;
}
SCIPconshdlrDecompUserSeeedSetVarToLinking(scip, decinput->token);
SCIPdebugMessage("cons %s is linking constraint\n", decinput->token);
}
}
return SCIP_OKAY;
}
/** DEPRECATED @TODE: delete */
/** fills the whole Decomp struct after the dec file has been read */
//static
//SCIP_RETCODE fillDecompStruct(
// SCIP* scip, /**< SCIP data structure */
// DECINPUT* decinput, /**< DEC reading data */
// DEC_DECOMP* decomp, /**< DEC_DECOMP structure to fill */
// SCIP_READERDATA* readerdata /**< reader data*/
// )
//{
// int nblocks;
//
// SCIP_CONS** conss;
// int nconss;
// int i;
// SCIP_HASHMAP* constoblock;
// assert(scip != NULL);
// assert(decinput != NULL);
// assert(decomp != NULL);
// assert(readerdata != NULL);
//
// nblocks = decinput->nblocks;
//
// DECdecompSetPresolved(decomp, decinput->presolved);
// DECdecompSetNBlocks(decomp, nblocks);
// DECdecompSetDetector(decomp, NULL);
//
// nconss = SCIPgetNConss(scip);
// conss = SCIPgetConss(scip);
//
// SCIP_CALL( SCIPhashmapCreate(&constoblock, SCIPblkmem(scip), nconss) );
//
// for( i = 0; i < nconss; ++i )
// {
//
// int blockid;
// assert(SCIPhashmapExists(readerdata->constoblock, conss[i]));
// blockid = (int) (size_t) SCIPhashmapGetImage(readerdata->constoblock, conss[i]); /*lint !e507*/
// if( blockid == LINKINGVALUE )
// {
// blockid = decinput->nblocks+1;
// SCIP_CALL( SCIPhashmapSetImage(constoblock, conss[i], (void*) (size_t) (nblocks+1)) );
// }
//
// SCIP_CALL( SCIPhashmapSetImage(constoblock, conss[i], (void*) (size_t) blockid) );
// }
//
//
// SCIP_CALL_QUIET( DECfilloutDecompFromConstoblock(scip, decomp, constoblock, nblocks, FALSE) );
// return SCIP_OKAY;
//}
/** reads a DEC file */
static
SCIP_RETCODE readDECFile(
SCIP* scip, /**< SCIP data structure */
SCIP_READER* reader, /**< Reader data structure */
DECINPUT* decinput, /**< DEC reading data */
const char* filename /**< name of the input file */
)
{
SCIP_RETCODE retcode;
SCIP_READERDATA* readerdata;
SCIP_CONS** conss;
int nconss;
int i;
assert(decinput != NULL);
assert(scip != NULL);
assert(reader != NULL);
if( SCIPgetStage(scip) == SCIP_STAGE_INIT || SCIPgetNVars(scip) == 0 || SCIPgetNConss(scip) == 0 )
{
SCIPverbMessage(scip, SCIP_VERBLEVEL_DIALOG, NULL, "No problem exists, will not read structure!\n");
return SCIP_OKAY;
}
/* open file */
decinput->file = SCIPfopen(filename, "r");
if( decinput->file == NULL )
{
SCIPerrorMessage("cannot open file <%s> for reading\n", filename);
SCIPprintSysError(filename);
return SCIP_NOFILE;
}
readerdata = SCIPreaderGetData(reader);
assert(readerdata != NULL);
/* parse the file */
decinput->section = DEC_START;
retcode = SCIP_OKAY;
while( decinput->section != DEC_END && ! hasError(decinput) && retcode == SCIP_OKAY )
{
switch( decinput->section )
{
case DEC_START:
SCIP_CALL( readStart(scip, decinput) );
break;
case DEC_INCOMPLETE:
SCIP_CALL( readIncomplete(scip, decinput) );
break;
case DEC_PRESOLVED:
SCIP_CALL( readPresolved(scip, decinput) );
if( decinput->presolved && SCIPgetStage(scip) < SCIP_STAGE_PRESOLVED )
{
SCIPinfoMessage(scip, NULL, "read presolved decomposition but problem is not presolved yet -> presolve()\n");
SCIPpresolve(scip);
assert(decinput->haspresolvesection);
}
/** call cons_decomp to create seeed (and correct seeedpool if necessary) seeed from the right seeedpool */
if ( decinput->presolved )
{
SCIPconshdlrDecompCreateSeeedpool(scip);
}
else
{
SCIPconshdlrDecompCreateSeeedpoolUnpresolved(scip);
}
/* cons -> block mapping */
if( decinput->presolved )
{
conss = SCIPgetConss(scip);
nconss = SCIPgetNConss(scip);
}
else
{
conss = SCIPgetOrigConss(scip);
nconss = SCIPgetNOrigConss(scip);
}
SCIP_CALL( SCIPhashmapCreate(&readerdata->constoblock, SCIPblkmem(scip), nconss) );
for( i = 0; i < nconss; i ++ )
{
assert( !SCIPhashmapExists(readerdata->constoblock, conss[i] ) );
SCIP_CALL( SCIPhashmapInsert(readerdata->constoblock, conss[i], (void*) (size_t) LINKINGVALUE) );
SCIPdebugMessage("init cons block of %s to %ld\n", SCIPconsGetName(conss[i]), (long) SCIPhashmapGetImage(readerdata->constoblock, conss[i]) );
}
SCIPconshdlrDecompCreateUserSeeed(scip, decinput->presolved, decinput->incomplete);
break;
case DEC_NBLOCKS:
SCIP_CALL( readNBlocks(scip, decinput) );
if( decinput->haspresolvesection && !decinput->presolved && SCIPgetStage(scip) >= SCIP_STAGE_PRESOLVED )
{
SCIPwarningMessage(scip, "decomposition belongs to the unpresolved problem, but the problem is already presolved, please consider to re-read the problem and read the decomposition without presolving when transforming do not succeed.\n");
break;
}
if( !decinput->haspresolvesection )
{
SCIPwarningMessage(scip, "decomposition has no presolve section at beginning. The behaviour is undefined. Please add a presolve section. File reading is aborted. \n");
}
break;
case DEC_BLOCKCONSS:
SCIP_CALL( readBlockconss(scip, decinput, readerdata) );
break;
case DEC_MASTERCONSS:
SCIP_CALL( readMasterconss(scip, decinput, readerdata) );
break;
case DEC_BLOCKVARS:
SCIP_CALL( readBlockvars(scip, decinput, readerdata) );
break;
case DEC_MASTERVARS:
SCIP_CALL( readMastervars(scip, decinput, readerdata) );
break;
case DEC_LINKINGVARS:
SCIP_CALL( readLinkingvars(scip, decinput, readerdata) );
break;
case DEC_END: /* this is already handled in the while() loop */
default:
SCIPerrorMessage("invalid DEC file section <%d>\n", decinput->section);
return SCIP_INVALIDDATA;
}
}
if( decinput->haserror)
{
SCIPinfoMessage(scip, NULL, "error occured while reading dec file");
SCIPconshdlrDecompUserSeeedReject(scip);
}
else
{
SCIPinfoMessage(scip, NULL, "just read dec file:");
SCIPconshdlrDecompUserSeeedFlush(scip);
}
// SCIP_CALL( DECdecompCreate(scip, &decdecomp) );
//
// if( retcode == SCIP_OKAY )
// {
// /* fill decomp */
// retcode = fillDecompStruct(scip, decinput, decdecomp, readerdata);
// }
//
// if( retcode == SCIP_OKAY )
// {
// /* add decomp to cons_decomp */
// SCIP_CALL( SCIPconshdlrDecompAddDecdecomp(scip, decdecomp) );
// }
// else
// {
// SCIP_CALL( DECdecompFree(scip, &decdecomp) );
// }
SCIPhashmapFree(&readerdata->constoblock);
/* close file */
SCIPfclose(decinput->file);
return retcode;
}
/*
* Callback methods of reader
*/
/** destructor of reader to free user data (called when SCIP is exiting) */
static
SCIP_DECL_READERFREE(readerFreeDec)
{
SCIP_READERDATA* readerdata;
readerdata = SCIPreaderGetData(reader);
assert(readerdata != NULL);
SCIPfreeMemory(scip, &readerdata);
return SCIP_OKAY;
}
/** problem reading method of reader */
static
SCIP_DECL_READERREAD(readerReadDec)
{ /*lint --e{715}*/
if( SCIPgetStage(scip) == SCIP_STAGE_INIT || SCIPgetNVars(scip) == 0 || SCIPgetNConss(scip) == 0 )
{
SCIPverbMessage(scip, SCIP_VERBLEVEL_DIALOG, NULL, "Please read in a problem before reading in the corresponding structure file!\n");
return SCIP_OKAY;
}
SCIP_CALL( SCIPreadDec(scip, filename, result) );
return SCIP_OKAY;
}
/** problem writing method of reader */
static
SCIP_DECL_READERWRITE(readerWriteDec)
{ /*lint --e{715}*/
assert(scip != NULL);
assert(reader != NULL);
SCIPconshdlrDecompWriteDec(scip, file, transformed, result);
return SCIP_OKAY;
}
/*
* reader specific interface methods
*/
/** includes the dec file reader in SCIP */
SCIP_RETCODE
SCIPincludeReaderDec(
SCIP* scip /**< SCIP data structure */
)
{
SCIP_READERDATA* readerdata;
/* create dec reader data */
SCIP_CALL( SCIPallocMemory(scip, &readerdata) );
/* include dec reader */
SCIP_CALL(SCIPincludeReader(scip, READER_NAME, READER_DESC, READER_EXTENSION, NULL,
readerFreeDec, readerReadDec, readerWriteDec, readerdata));
return SCIP_OKAY;
}
/* reads problem from file */
SCIP_RETCODE SCIPreadDec(
SCIP* scip, /**< SCIP data structure */
const char* filename, /**< full path and name of file to read, or NULL if stdin should be used */
SCIP_RESULT* result /**< pointer to store the result of the file reading call */
)
{
SCIP_RETCODE retcode;
SCIP_READER* reader;
DECINPUT decinput;
int i;
if( SCIPgetStage(scip) < SCIP_STAGE_TRANSFORMED )
SCIP_CALL( SCIPtransformProb(scip) );
reader = SCIPfindReader(scip, READER_NAME);
assert(reader != NULL);
/* initialize DEC input data */
decinput.file = NULL;
decinput.linebuf[0] = '\0';
SCIP_CALL( SCIPallocMemoryArray(scip, &decinput.token, DEC_MAX_LINELEN) ); /*lint !e506*/
decinput.token[0] = '\0';
SCIP_CALL( SCIPallocMemoryArray(scip, &decinput.tokenbuf, DEC_MAX_LINELEN) ); /*lint !e506*/
decinput.tokenbuf[0] = '\0';
for( i = 0; i < DEC_MAX_PUSHEDTOKENS; ++ i )
{
SCIP_CALL( SCIPallocMemoryArray(scip, &decinput.pushedtokens[i], DEC_MAX_LINELEN) ); /*lint !e506 !e866*/
}
decinput.npushedtokens = 0;
decinput.linenumber = 0;
decinput.linepos = 0;
decinput.section = DEC_START;
decinput.presolved = FALSE;
decinput.haspresolvesection = FALSE;
decinput.nblocks = NOVALUE;
decinput.blocknr = - 2;
decinput.haserror = FALSE;
decinput.incomplete = FALSE;
/* read the file */
retcode = readDECFile(scip, reader, &decinput, filename);
/* free dynamically allocated memory */
SCIPfreeMemoryArray(scip, &decinput.token);
SCIPfreeMemoryArray(scip, &decinput.tokenbuf);
for( i = 0; i < DEC_MAX_PUSHEDTOKENS; ++ i )
{
SCIPfreeMemoryArray(scip, &decinput.pushedtokens[i]);
}
/* evaluate the result */
if( decinput.haserror )
return SCIP_READERROR;
else if( retcode == SCIP_OKAY )
{
*result = SCIP_SUCCESS;
}
return retcode;
}
/** write the data optionally using the decomposition data */
static
SCIP_RETCODE writeData(
SCIP* scip, /**< SCIP data structure */
FILE* file, /**< File pointer to write to */
DEC_DECOMP* decdecomp /**< Decomposition pointer */
)
{
SCIP_CONS*** subscipconss;
SCIP_CONS** linkingconss;
int* nsubscipconss;
int nlinkingconss;
int nblocks;
SCIP_Bool presolved;
int i;
int j;
assert(scip != NULL);
assert(decdecomp != NULL);
assert(DECdecompGetType(decdecomp) == DEC_DECTYPE_ARROWHEAD
|| DECdecompGetType(decdecomp) == DEC_DECTYPE_BORDERED
|| DECdecompGetType(decdecomp) == DEC_DECTYPE_DIAGONAL
|| DECdecompGetType(decdecomp) == DEC_DECTYPE_UNKNOWN
|| DECdecompGetType(decdecomp) == DEC_DECTYPE_STAIRCASE);
SCIPdebugMessage("DEC_DECOMP Type: %s\n", DECgetStrType(DECdecompGetType(decdecomp)));
/* at first: write meta data of decompsition as comment */
SCIPinfoMessage(scip, file, "%s%s ndetectors \n", commentchars, commentchars );
SCIPinfoMessage(scip, file, "%s%s %d \n", commentchars, commentchars, DECdecompGetDetectorChainSize(decdecomp) );
SCIPinfoMessage(scip, file, "%s%s name time nnewblocks %%ofnewborderconss %%ofnewblockconss %%ofnewlinkingvars %%ofnewblockvars \n", commentchars, commentchars );
for ( i = 0; i < DECdecompGetDetectorChainSize(decdecomp) ; ++i)
{
SCIPinfoMessage(scip, file, "%s%s %s %f %d %f %f %f %f \n", commentchars, commentchars, DECdetectorGetName(DECdecompGetDetectorChain(decdecomp)[i] ), DECdecompGetDetectorClockTimes(decdecomp)[i],
DECdecompGetNNewBlocks(decdecomp)[i], DECdecompGetDetectorPctConssToBorder(decdecomp)[i], DECdecompGetDetectorPctConssToBlock(decdecomp)[i], DECdecompGetDetectorPctVarsToBorder(decdecomp)[i],
DECdecompGetDetectorPctVarsToBlock(decdecomp)[i]) ;
}
/* if we don't have staircase, but something else, go through the blocks and create the indices */
/* subscip conss */
subscipconss = DECdecompGetSubscipconss(decdecomp);
nsubscipconss = DECdecompGetNSubscipconss(decdecomp);
assert(subscipconss != NULL);
assert(nsubscipconss != NULL);
/* linking cons */
linkingconss = DECdecompGetLinkingconss(decdecomp);
nlinkingconss = DECdecompGetNLinkingconss(decdecomp);
assert(nlinkingconss >= 0 && nlinkingconss < SCIPgetNConss(scip));
assert(linkingconss != NULL || nlinkingconss == 0 );
presolved = DECdecompGetPresolved(decdecomp);
SCIPinfoMessage(scip, file, "PRESOLVED\n");
SCIPinfoMessage(scip, file, "%d\n", presolved ? 1 : 0);
nblocks = DECdecompGetNBlocks(decdecomp);
SCIPinfoMessage(scip, file, "NBLOCKS\n");
SCIPinfoMessage(scip, file, "%d\n", nblocks);
for( i = 0; i < nblocks; i ++ )
{
SCIPinfoMessage(scip, file, "BLOCK %d\n", i + 1);
for( j = 0; j < nsubscipconss[i]; j ++ )
{
SCIPinfoMessage(scip, file, "%s\n", SCIPconsGetName(subscipconss[i][j]));
}
}
if( nlinkingconss > 0 )
{
assert(linkingconss != NULL); /* for flexelint */
SCIPinfoMessage(scip, file, "MASTERCONSS\n");
for( i = 0; i < nlinkingconss; i ++ )
{
SCIPinfoMessage(scip, file, "%s\n", SCIPconsGetName(linkingconss[i]));
}
}
return SCIP_OKAY;
}
/** write a DEC file for a given decomposition */
SCIP_RETCODE GCGwriteDecomp(
SCIP* scip, /**< SCIP data structure */
FILE* file, /**< File pointer to write to */
DEC_DECOMP* decdecomp /**< Decomposition pointer */
)
{
char outname[SCIP_MAXSTRLEN];
assert(scip != NULL);
if( decdecomp == NULL )
{
SCIPwarningMessage(scip, "Cannot write decomposed problem if decomposition structure is empty!\n");
(void) SCIPsnprintf(outname, SCIP_MAXSTRLEN, "%s", SCIPgetProbName(scip));
}
else
{
(void) SCIPsnprintf(outname, SCIP_MAXSTRLEN, "%s_%d", SCIPgetProbName(scip), DECdecompGetNBlocks(decdecomp));
SCIP_CALL( writeData(scip, file, decdecomp) );
}
return SCIP_OKAY;
}
|
//---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2011 EMC Corp.
//
// @filename:
// CQueryContext.cpp
//
// @doc:
// Implementation of optimization context
//---------------------------------------------------------------------------
#include "gpos/base.h"
#include "gpopt/base/CColumnFactory.h"
#include "gpopt/base/CColRefSetIter.h"
#include "gpopt/base/CDistributionSpecAny.h"
#include "gpopt/base/CQueryContext.h"
#include "gpopt/base/COptCtxt.h"
#include "gpopt/operators/CLogicalLimit.h"
using namespace gpopt;
//---------------------------------------------------------------------------
// @function:
// CQueryContext::CQueryContext
//
// @doc:
// Ctor
//
//---------------------------------------------------------------------------
CQueryContext::CQueryContext(CMemoryPool *mp, CExpression *pexpr,
CReqdPropPlan *prpp, CColRefArray *colref_array,
CMDNameArray *pdrgpmdname, BOOL fDeriveStats)
: m_mp(mp),
m_prpp(prpp),
m_pdrgpcr(colref_array),
m_pdrgpcrSystemCols(NULL),
m_pdrgpmdname(pdrgpmdname),
m_fDeriveStats(fDeriveStats)
{
GPOS_ASSERT(NULL != pexpr);
GPOS_ASSERT(NULL != prpp);
GPOS_ASSERT(NULL != colref_array);
GPOS_ASSERT(NULL != pdrgpmdname);
GPOS_ASSERT(colref_array->Size() == pdrgpmdname->Size());
#ifdef GPOS_DEBUG
const ULONG ulReqdColumns = m_pdrgpcr->Size();
#endif //GPOS_DEBUG
// mark unused CTEs
CCTEInfo *pcteinfo = COptCtxt::PoctxtFromTLS()->Pcteinfo();
pcteinfo->MarkUnusedCTEs();
CColRefSet *pcrsOutputAndOrderingCols = GPOS_NEW(mp) CColRefSet(mp);
CColRefSet *pcrsOrderSpec = prpp->Peo()->PosRequired()->PcrsUsed(mp);
pcrsOutputAndOrderingCols->Include(colref_array);
pcrsOutputAndOrderingCols->Include(pcrsOrderSpec);
pcrsOrderSpec->Release();
m_pexpr = CExpressionPreprocessor::PexprPreprocess(
mp, pexpr, pcrsOutputAndOrderingCols);
pcrsOutputAndOrderingCols->Release();
GPOS_ASSERT(m_pdrgpcr->Size() == ulReqdColumns);
// collect required system columns
SetSystemCols(mp);
// collect CTE predicates and add them to CTE producer expressions
CExpressionPreprocessor::AddPredsToCTEProducers(mp, m_pexpr);
CColumnFactory *col_factory = COptCtxt::PoctxtFromTLS()->Pcf();
// create the mapping between the computed column, defined in the expression
// and all CTEs, and its corresponding used columns
MapComputedToUsedCols(col_factory, m_pexpr);
pcteinfo->MapComputedToUsedCols(col_factory);
}
//---------------------------------------------------------------------------
// @function:
// CQueryContext::~CQueryContext
//
// @doc:
// Dtor
//
//---------------------------------------------------------------------------
CQueryContext::~CQueryContext()
{
m_pexpr->Release();
m_prpp->Release();
m_pdrgpcr->Release();
m_pdrgpmdname->Release();
CRefCount::SafeRelease(m_pdrgpcrSystemCols);
}
//---------------------------------------------------------------------------
// @function:
// CQueryContext::PopTop
//
// @doc:
// Return top level operator in the given expression
//
//---------------------------------------------------------------------------
COperator *
CQueryContext::PopTop(CExpression *pexpr)
{
GPOS_ASSERT(NULL != pexpr);
// skip CTE anchors if any
CExpression *pexprCurr = pexpr;
while (COperator::EopLogicalCTEAnchor == pexprCurr->Pop()->Eopid())
{
pexprCurr = (*pexprCurr)[0];
GPOS_ASSERT(NULL != pexprCurr);
}
return pexprCurr->Pop();
}
//---------------------------------------------------------------------------
// @function:
// CQueryContext::SetReqdSystemCols
//
// @doc:
// Collect system columns from output columns
//
//---------------------------------------------------------------------------
void
CQueryContext::SetSystemCols(CMemoryPool *mp)
{
GPOS_ASSERT(NULL == m_pdrgpcrSystemCols);
GPOS_ASSERT(NULL != m_pdrgpcr);
m_pdrgpcrSystemCols = GPOS_NEW(mp) CColRefArray(mp);
const ULONG ulReqdCols = m_pdrgpcr->Size();
for (ULONG ul = 0; ul < ulReqdCols; ul++)
{
CColRef *colref = (*m_pdrgpcr)[ul];
if (colref->FSystemCol())
{
m_pdrgpcrSystemCols->Append(colref);
}
}
}
//---------------------------------------------------------------------------
// @function:
// CQueryContext::PqcGenerate
//
// @doc:
// Generate the query context for the given expression and array of
// output column ref ids
//
//---------------------------------------------------------------------------
CQueryContext *
CQueryContext::PqcGenerate(CMemoryPool *mp, CExpression *pexpr,
ULongPtrArray *pdrgpulQueryOutputColRefId,
CMDNameArray *pdrgpmdname, BOOL fDeriveStats)
{
GPOS_ASSERT(NULL != pexpr && NULL != pdrgpulQueryOutputColRefId);
CColRefSet *pcrs = GPOS_NEW(mp) CColRefSet(mp);
CColRefArray *colref_array = GPOS_NEW(mp) CColRefArray(mp);
COptCtxt *poptctxt = COptCtxt::PoctxtFromTLS();
CColumnFactory *col_factory = poptctxt->Pcf();
GPOS_ASSERT(NULL != col_factory);
// Collect required column references (colref_array)
const ULONG length = pdrgpulQueryOutputColRefId->Size();
for (ULONG ul = 0; ul < length; ul++)
{
ULONG *pul = (*pdrgpulQueryOutputColRefId)[ul];
GPOS_ASSERT(NULL != pul);
CColRef *colref = col_factory->LookupColRef(*pul);
GPOS_ASSERT(NULL != colref);
pcrs->Include(colref);
colref_array->Append(colref);
}
// Collect required properties (prpp) at the top level:
// By default no sort order requirement is added, unless the root operator in
// the input logical expression is a LIMIT. This is because Orca always
// attaches top level Sort to a LIMIT node.
COrderSpec *pos = NULL;
CExpression *pexprResult = pexpr;
COperator *popTop = PopTop(pexpr);
if (COperator::EopLogicalLimit == popTop->Eopid())
{
// top level operator is a limit, copy order spec to query context
pos = CLogicalLimit::PopConvert(popTop)->Pos();
pos->AddRef();
}
else
{
// no order required
pos = GPOS_NEW(mp) COrderSpec(mp);
}
CDistributionSpec *pds = NULL;
BOOL fDML = CUtils::FLogicalDML(pexpr->Pop());
poptctxt->MarkDMLQuery(fDML);
// DML commands do not have distribution requirement. Otherwise the
// distribution requirement is Singleton.
if (fDML)
{
pds = GPOS_NEW(mp) CDistributionSpecAny(COperator::EopSentinel);
}
else
{
pds = GPOS_NEW(mp)
CDistributionSpecSingleton(CDistributionSpecSingleton::EstMaster);
}
// By default, no rewindability requirement needs to be satisfied at the top level
CRewindabilitySpec *prs = GPOS_NEW(mp) CRewindabilitySpec(
CRewindabilitySpec::ErtNone, CRewindabilitySpec::EmhtNoMotion);
// Ensure order, distribution and rewindability meet 'satisfy' matching at the top level
CEnfdOrder *peo = GPOS_NEW(mp) CEnfdOrder(pos, CEnfdOrder::EomSatisfy);
CEnfdDistribution *ped =
GPOS_NEW(mp) CEnfdDistribution(pds, CEnfdDistribution::EdmSatisfy);
CEnfdRewindability *per =
GPOS_NEW(mp) CEnfdRewindability(prs, CEnfdRewindability::ErmSatisfy);
// Required CTEs are obtained from the CTEInfo global information in the optimizer context
CCTEReq *pcter = poptctxt->Pcteinfo()->PcterProducers(mp);
// NB: Partition propagation requirements are not initialized here. They are
// constructed later based on derived relation properties (CPartInfo) by
// CReqdPropPlan::InitReqdPartitionPropagation().
CReqdPropPlan *prpp =
GPOS_NEW(mp) CReqdPropPlan(pcrs, peo, ped, per, pcter);
// Finally, create the CQueryContext
pdrgpmdname->AddRef();
return GPOS_NEW(mp) CQueryContext(mp, pexprResult, prpp, colref_array,
pdrgpmdname, fDeriveStats);
}
#ifdef GPOS_DEBUG
//---------------------------------------------------------------------------
// @function:
// CQueryContext::OsPrint
//
// @doc:
// Debug print
//
//---------------------------------------------------------------------------
IOstream &
CQueryContext::OsPrint(IOstream &os) const
{
return os << *m_pexpr << std::endl << *m_prpp;
}
void
CQueryContext::DbgPrint() const
{
CAutoTrace at(m_mp);
(void) this->OsPrint(at.Os());
}
#endif // GPOS_DEBUG
//---------------------------------------------------------------------------
// @function:
// CQueryContext::MapComputedToUsedCols
//
// @doc:
// Walk the expression and add the mapping between computed column
// and its used columns
//
//---------------------------------------------------------------------------
void
CQueryContext::MapComputedToUsedCols(CColumnFactory *col_factory,
CExpression *pexpr)
{
GPOS_ASSERT(NULL != pexpr);
if (COperator::EopLogicalProject == pexpr->Pop()->Eopid())
{
CExpression *pexprPrL = (*pexpr)[1];
const ULONG arity = pexprPrL->Arity();
for (ULONG ul = 0; ul < arity; ul++)
{
CExpression *pexprPrEl = (*pexprPrL)[ul];
col_factory->AddComputedToUsedColsMap(pexprPrEl);
}
}
// process children
const ULONG ulChildren = pexpr->Arity();
for (ULONG ul = 0; ul < ulChildren; ul++)
{
MapComputedToUsedCols(col_factory, (*pexpr)[ul]);
}
}
// EOF
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "appdomain.hpp"
#include "peimagelayout.inl"
#include "field.h"
#include "strongnameinternal.h"
#include "excep.h"
#include "eeconfig.h"
#include "gcheaputilities.h"
#include "eventtrace.h"
#include "assemblyname.hpp"
#include "eeprofinterfaces.h"
#include "dbginterface.h"
#ifndef DACCESS_COMPILE
#include "eedbginterfaceimpl.h"
#endif
#include "comdynamic.h"
#include "mlinfo.h"
#include "posterror.h"
#include "assemblynative.hpp"
#include "shimload.h"
#include "stringliteralmap.h"
#include "codeman.h"
#include "comcallablewrapper.h"
#include "eventtrace.h"
#include "comdelegate.h"
#include "siginfo.hpp"
#include "typekey.h"
#include "castcache.h"
#include "caparser.h"
#include "ecall.h"
#include "finalizerthread.h"
#include "threadsuspend.h"
#ifdef FEATURE_PREJIT
#include "corcompile.h"
#include "compile.h"
#endif // FEATURE_PREJIT
#ifdef FEATURE_COMINTEROP
#include "comtoclrcall.h"
#include "runtimecallablewrapper.h"
#include "mngstdinterfaces.h"
#include "olevariant.h"
#include "rcwrefcache.h"
#include "olecontexthelpers.h"
#endif // FEATURE_COMINTEROP
#include "typeequivalencehash.hpp"
#include "appdomain.inl"
#include "typeparse.h"
#include "threadpoolrequest.h"
#include "nativeoverlapped.h"
#ifndef TARGET_UNIX
#include "dwreport.h"
#endif // !TARGET_UNIX
#include "stringarraylist.h"
#include "../binder/inc/bindertracing.h"
#include "../binder/inc/clrprivbindercoreclr.h"
// this file handles string conversion errors for itself
#undef MAKE_TRANSLATIONFAILED
// Define these macro's to do strict validation for jit lock and class
// init entry leaks. This defines determine if the asserts that
// verify for these leaks are defined or not. These asserts can
// sometimes go off even if no entries have been leaked so this
// defines should be used with caution.
//
// If we are inside a .cctor when the application shut's down then the
// class init lock's head will be set and this will cause the assert
// to go off.
//
// If we are jitting a method when the application shut's down then
// the jit lock's head will be set causing the assert to go off.
//#define STRICT_CLSINITLOCK_ENTRY_LEAK_DETECTION
static const WCHAR DEFAULT_DOMAIN_FRIENDLY_NAME[] = W("DefaultDomain");
static const WCHAR OTHER_DOMAIN_FRIENDLY_NAME_PREFIX[] = W("Domain");
#define STATIC_OBJECT_TABLE_BUCKET_SIZE 1020
// Statics
SPTR_IMPL(AppDomain, AppDomain, m_pTheAppDomain);
SPTR_IMPL(SystemDomain, SystemDomain, m_pSystemDomain);
#ifdef FEATURE_PREJIT
SVAL_IMPL(BOOL, SystemDomain, s_fForceDebug);
SVAL_IMPL(BOOL, SystemDomain, s_fForceProfiling);
SVAL_IMPL(BOOL, SystemDomain, s_fForceInstrument);
#endif
#ifndef DACCESS_COMPILE
// Base Domain Statics
CrstStatic BaseDomain::m_SpecialStaticsCrst;
int BaseDomain::m_iNumberOfProcessors = 0;
// System Domain Statics
GlobalStringLiteralMap* SystemDomain::m_pGlobalStringLiteralMap = NULL;
DECLSPEC_ALIGN(16)
static BYTE g_pSystemDomainMemory[sizeof(SystemDomain)];
CrstStatic SystemDomain::m_SystemDomainCrst;
CrstStatic SystemDomain::m_DelayedUnloadCrst;
ULONG SystemDomain::s_dNumAppDomains = 0;
DWORD SystemDomain::m_dwLowestFreeIndex = 0;
#ifndef CROSSGEN_COMPILE
// Constructor for the LargeHeapHandleBucket class.
LargeHeapHandleBucket::LargeHeapHandleBucket(LargeHeapHandleBucket *pNext, DWORD Size, BaseDomain *pDomain)
: m_pNext(pNext)
, m_ArraySize(Size)
, m_CurrentPos(0)
, m_CurrentEmbeddedFreePos(0) // hint for where to start a search for an embedded free item
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pDomain));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
PTRARRAYREF HandleArrayObj;
// Allocate the array in the large object heap.
OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
HandleArrayObj = (PTRARRAYREF)AllocateObjectArray(Size, g_pObjectClass, TRUE);
// Retrieve the pointer to the data inside the array. This is legal since the array
// is located in the large object heap and is guaranteed not to move.
m_pArrayDataPtr = (OBJECTREF *)HandleArrayObj->GetDataPtr();
// Store the array in a strong handle to keep it alive.
m_hndHandleArray = pDomain->CreatePinningHandle((OBJECTREF)HandleArrayObj);
}
// Destructor for the LargeHeapHandleBucket class.
LargeHeapHandleBucket::~LargeHeapHandleBucket()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
if (m_hndHandleArray)
{
DestroyPinningHandle(m_hndHandleArray);
m_hndHandleArray = NULL;
}
}
// Allocate handles from the bucket.
OBJECTREF *LargeHeapHandleBucket::AllocateHandles(DWORD nRequested)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
}
CONTRACTL_END;
_ASSERTE(nRequested > 0 && nRequested <= GetNumRemainingHandles());
_ASSERTE(m_pArrayDataPtr == (OBJECTREF*)((PTRARRAYREF)ObjectFromHandle(m_hndHandleArray))->GetDataPtr());
// Store the handles in the buffer that was passed in
OBJECTREF* ret = &m_pArrayDataPtr[m_CurrentPos];
m_CurrentPos += nRequested;
return ret;
}
// look for a free item embedded in the table
OBJECTREF *LargeHeapHandleBucket::TryAllocateEmbeddedFreeHandle()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
}
CONTRACTL_END;
OBJECTREF pPreallocatedSentinalObject = ObjectFromHandle(g_pPreallocatedSentinelObject);
_ASSERTE(pPreallocatedSentinalObject != NULL);
for (int i = m_CurrentEmbeddedFreePos; i < m_CurrentPos; i++)
{
if (m_pArrayDataPtr[i] == pPreallocatedSentinalObject)
{
m_CurrentEmbeddedFreePos = i;
m_pArrayDataPtr[i] = NULL;
return &m_pArrayDataPtr[i];
}
}
// didn't find it (we don't bother wrapping around for a full search, it's not worth it to try that hard, we'll get it next time)
m_CurrentEmbeddedFreePos = 0;
return NULL;
}
// enumerate the handles in the bucket
void LargeHeapHandleBucket::EnumStaticGCRefs(promote_func* fn, ScanContext* sc)
{
for (int i = 0; i < m_CurrentPos; i++)
{
fn((Object**)&m_pArrayDataPtr[i], sc, 0);
}
}
// Maximum bucket size will be 64K on 32-bit and 128K on 64-bit.
// We subtract out a small amount to leave room for the object
// header and length of the array.
#define MAX_BUCKETSIZE (16384 - 4)
// Constructor for the LargeHeapHandleTable class.
LargeHeapHandleTable::LargeHeapHandleTable(BaseDomain *pDomain, DWORD InitialBucketSize)
: m_pHead(NULL)
, m_pDomain(pDomain)
, m_NextBucketSize(InitialBucketSize)
, m_pFreeSearchHint(NULL)
, m_cEmbeddedFree(0)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pDomain));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
#ifdef _DEBUG
m_pCrstDebug = NULL;
#endif
}
// Destructor for the LargeHeapHandleTable class.
LargeHeapHandleTable::~LargeHeapHandleTable()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
// Delete the buckets.
while (m_pHead)
{
LargeHeapHandleBucket *pOld = m_pHead;
m_pHead = pOld->GetNext();
delete pOld;
}
}
//*****************************************************************************
//
// LOCKING RULES FOR AllocateHandles() and ReleaseHandles() 12/08/2004
//
//
// These functions are not protected by any locking in this location but rather the callers are
// assumed to be doing suitable locking for the handle table. The handle table itself is
// behaving rather like a thread-agnostic collection class -- it doesn't want to know
// much about the outside world and so it is just doing its job with no awareness of
// thread notions.
//
// The instance in question is
// There are two locations you can find a LargeHeapHandleTable
// 1) there is one in every BaseDomain, it is used to keep track of the static members
// in that domain
// 2) there is one in the System Domain that is used for the GlobalStringLiteralMap
//
// the one in (2) is not the same as the one that is in the BaseDomain object that corresponds
// to the SystemDomain -- that one is basically stilborn because the string literals don't go
// there and of course the System Domain has no code loaded into it -- only regular
// AppDomains (like Domain 0) actually execute code. As a result handle tables are in
// practice used either for string literals or for static members but never for both.
// At least not at this writing.
//
// Now it's useful to consider what the locking discipline is for these classes.
//
// ---------
//
// First case: (easiest) is the statics members
//
// Each BaseDomain has its own critical section
//
// BaseDomain::AllocateObjRefPtrsInLargeTable takes a lock with
// CrstHolder ch(&m_LargeHeapHandleTableCrst);
//
// it does this before it calls AllocateHandles which suffices. It does not call ReleaseHandles
// at any time (although ReleaseHandles may be called via AllocateHandles if the request
// doesn't fit in the current block, the remaining handles at the end of the block are released
// automatically as part of allocation/recycling)
//
// note: Recycled handles are only used during String Literal allocation because we only try
// to recycle handles if the allocation request is for exactly one handle.
//
// The handles in the BaseDomain handle table are released when the Domain is unloaded
// as the GC objects become rootless at that time.
//
// This dispenses with all of the Handle tables except the one that is used for string literals
//
// ---------
//
// Second case: Allocation for use in a string literal
//
// AppDomainStringLiteralMap::GetStringLiteral
// leads to calls to
// LargeHeapHandleBlockHolder constructor
// leads to calls to
// m_Data = pOwner->AllocateHandles(nCount);
//
// before doing this AppDomainStringLiteralMap::GetStringLiteral takes this lock
//
// CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
//
// which is the lock for the hash table that it owns
//
// STRINGREF *AppDomainStringLiteralMap::GetInternedString
//
// has a similar call path and uses the same approach and the same lock
// this covers all the paths which allocate
//
// ---------
//
// Third case: Releases for use in a string literal entry
//
// CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
// taken in the AppDomainStringLiteralMap functions below protects the 3 ways that this can happen
//
// case 3a)
//
// AppDomainStringLiteralMap::GetStringLiteral() can call StringLiteralEntry::Release in some
// error cases, leading to the same stack as above
//
// case 3b)
//
// AppDomainStringLiteralMap::GetInternedString() can call StringLiteralEntry::Release in some
// error cases, leading to the same stack as above
//
// case 3c)
//
// The same code paths in 3b and 3c and also end up releasing if an exception is thrown
// during their processing. Both these paths use a StringLiteralEntryHolder to assist in cleanup,
// the StaticRelease method of the StringLiteralEntry gets called, which in turn calls the
// Release method.
// Allocate handles from the large heap handle table.
OBJECTREF* LargeHeapHandleTable::AllocateHandles(DWORD nRequested)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(nRequested > 0);
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
// SEE "LOCKING RULES FOR AllocateHandles() and ReleaseHandles()" above
// the lock must be registered and already held by the caller per contract
#ifdef _DEBUG
_ASSERTE(m_pCrstDebug != NULL);
_ASSERTE(m_pCrstDebug->OwnedByCurrentThread());
#endif
if (nRequested == 1 && m_cEmbeddedFree != 0)
{
// special casing singleton requests to look for slots that can be re-used
// we need to do this because string literals are allocated one at a time and then sometimes
// released. we do not wish for the number of handles consumed by string literals to
// increase forever as assemblies are loaded and unloaded
if (m_pFreeSearchHint == NULL)
m_pFreeSearchHint = m_pHead;
while (m_pFreeSearchHint)
{
OBJECTREF* pObjRef = m_pFreeSearchHint->TryAllocateEmbeddedFreeHandle();
if (pObjRef != NULL)
{
// the slot is to have been prepared with a null ready to go
_ASSERTE(*pObjRef == NULL);
m_cEmbeddedFree--;
return pObjRef;
}
m_pFreeSearchHint = m_pFreeSearchHint->GetNext();
}
// the search doesn't wrap around so it's possible that we might have embedded free items
// and not find them but that's ok, we'll get them on the next alloc... all we're trying to do
// is to not have big leaks over time.
}
// Retrieve the remaining number of handles in the bucket.
DWORD NumRemainingHandlesInBucket = (m_pHead != NULL) ? m_pHead->GetNumRemainingHandles() : 0;
// create a new block if this request doesn't fit in the current block
if (nRequested > NumRemainingHandlesInBucket)
{
if (m_pHead != NULL)
{
// mark the handles in that remaining region as available for re-use
ReleaseHandles(m_pHead->CurrentPos(), NumRemainingHandlesInBucket);
// mark what's left as having been used
m_pHead->ConsumeRemaining();
}
// create a new bucket for this allocation
// We need a block big enough to hold the requested handles
DWORD NewBucketSize = max(m_NextBucketSize, nRequested);
m_pHead = new LargeHeapHandleBucket(m_pHead, NewBucketSize, m_pDomain);
m_NextBucketSize = min(m_NextBucketSize * 2, MAX_BUCKETSIZE);
}
return m_pHead->AllocateHandles(nRequested);
}
//*****************************************************************************
// Release object handles allocated using AllocateHandles().
void LargeHeapHandleTable::ReleaseHandles(OBJECTREF *pObjRef, DWORD nReleased)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pObjRef));
}
CONTRACTL_END;
// SEE "LOCKING RULES FOR AllocateHandles() and ReleaseHandles()" above
// the lock must be registered and already held by the caller per contract
#ifdef _DEBUG
_ASSERTE(m_pCrstDebug != NULL);
_ASSERTE(m_pCrstDebug->OwnedByCurrentThread());
#endif
OBJECTREF pPreallocatedSentinalObject = ObjectFromHandle(g_pPreallocatedSentinelObject);
_ASSERTE(pPreallocatedSentinalObject != NULL);
// Add the released handles to the list of available handles.
for (DWORD i = 0; i < nReleased; i++)
{
SetObjectReference(&pObjRef[i], pPreallocatedSentinalObject);
}
m_cEmbeddedFree += nReleased;
}
// enumerate the handles in the handle table
void LargeHeapHandleTable::EnumStaticGCRefs(promote_func* fn, ScanContext* sc)
{
for (LargeHeapHandleBucket *pBucket = m_pHead; pBucket != nullptr; pBucket = pBucket->GetNext())
{
pBucket->EnumStaticGCRefs(fn, sc);
}
}
// Constructor for the ThreadStaticHandleBucket class.
ThreadStaticHandleBucket::ThreadStaticHandleBucket(ThreadStaticHandleBucket *pNext, DWORD Size, BaseDomain *pDomain)
: m_pNext(pNext)
, m_ArraySize(Size)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pDomain));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
PTRARRAYREF HandleArrayObj;
// Allocate the array on the GC heap.
OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
HandleArrayObj = (PTRARRAYREF)AllocateObjectArray(Size, g_pObjectClass, FALSE);
// Store the array in a strong handle to keep it alive.
m_hndHandleArray = pDomain->CreateStrongHandle((OBJECTREF)HandleArrayObj);
}
// Destructor for the ThreadStaticHandleBucket class.
ThreadStaticHandleBucket::~ThreadStaticHandleBucket()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
}
CONTRACTL_END;
if (m_hndHandleArray)
{
DestroyStrongHandle(m_hndHandleArray);
m_hndHandleArray = NULL;
}
}
// Allocate handles from the bucket.
OBJECTHANDLE ThreadStaticHandleBucket::GetHandles()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
}
CONTRACTL_END;
return m_hndHandleArray;
}
// Constructor for the ThreadStaticHandleTable class.
ThreadStaticHandleTable::ThreadStaticHandleTable(BaseDomain *pDomain)
: m_pHead(NULL)
, m_pDomain(pDomain)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(CheckPointer(pDomain));
}
CONTRACTL_END;
}
// Destructor for the ThreadStaticHandleTable class.
ThreadStaticHandleTable::~ThreadStaticHandleTable()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
// Delete the buckets.
while (m_pHead)
{
ThreadStaticHandleBucket *pOld = m_pHead;
m_pHead = pOld->GetNext();
delete pOld;
}
}
// Allocate handles from the large heap handle table.
OBJECTHANDLE ThreadStaticHandleTable::AllocateHandles(DWORD nRequested)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(nRequested > 0);
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
// create a new bucket for this allocation
m_pHead = new ThreadStaticHandleBucket(m_pHead, nRequested, m_pDomain);
return m_pHead->GetHandles();
}
#endif // CROSSGEN_COMPILE
//*****************************************************************************
// BaseDomain
//*****************************************************************************
void BaseDomain::Attach()
{
m_SpecialStaticsCrst.Init(CrstSpecialStatics);
}
BaseDomain::BaseDomain()
{
// initialize fields so the domain can be safely destructed
// shouldn't call anything that can fail here - use ::Init instead
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
FORBID_FAULT;
}
CONTRACTL_END;
m_pTPABinderContext = NULL;
// Make sure the container is set to NULL so that it gets loaded when it is used.
m_pLargeHeapHandleTable = NULL;
#ifndef CROSSGEN_COMPILE
// Note that m_handleStore is overridden by app domains
m_handleStore = GCHandleUtilities::GetGCHandleManager()->GetGlobalHandleStore();
#else
m_handleStore = NULL;
#endif
#ifdef FEATURE_COMINTEROP
m_pMngStdInterfacesInfo = NULL;
#endif
m_FileLoadLock.PreInit();
m_JITLock.PreInit();
m_ClassInitLock.PreInit();
m_ILStubGenLock.PreInit();
m_NativeTypeLoadLock.PreInit();
} //BaseDomain::BaseDomain
//*****************************************************************************
void BaseDomain::Init()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
//
// Initialize the domain locks
//
if (this == reinterpret_cast<BaseDomain*>(&g_pSystemDomainMemory[0]))
m_DomainCrst.Init(CrstSystemBaseDomain);
else
m_DomainCrst.Init(CrstBaseDomain);
m_DomainCacheCrst.Init(CrstAppDomainCache);
m_DomainLocalBlockCrst.Init(CrstDomainLocalBlock);
m_InteropDataCrst.Init(CrstInteropData, CRST_REENTRANCY);
// NOTE: CRST_UNSAFE_COOPGC prevents a GC mode switch to preemptive when entering this crst.
// If you remove this flag, we will switch to preemptive mode when entering
// m_FileLoadLock, which means all functions that enter it will become
// GC_TRIGGERS. (This includes all uses of PEFileListLockHolder, LoadLockHolder, etc.) So be sure
// to update the contracts if you remove this flag.
m_FileLoadLock.Init(CrstAssemblyLoader,
CrstFlags(CRST_HOST_BREAKABLE), TRUE);
//
// The JIT lock and the CCtor locks are at the same level (and marked as
// UNSAFE_SAME_LEVEL) because they are all part of the same deadlock detection mechanism. We
// see through cycles of JITting and .cctor execution and then explicitly allow the cycle to
// be broken by giving access to uninitialized classes. If there is no cycle or if the cycle
// involves other locks that arent part of this special deadlock-breaking semantics, then
// we continue to block.
//
m_JITLock.Init(CrstJit, CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_SAMELEVEL), TRUE);
m_ClassInitLock.Init(CrstClassInit, CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_SAMELEVEL), TRUE);
m_ILStubGenLock.Init(CrstILStubGen, CrstFlags(CRST_REENTRANCY), TRUE);
m_NativeTypeLoadLock.Init(CrstInteropData, CrstFlags(CRST_REENTRANCY), TRUE);
// Large heap handle table CRST.
m_LargeHeapHandleTableCrst.Init(CrstAppDomainHandleTable);
m_crstLoaderAllocatorReferences.Init(CrstLoaderAllocatorReferences);
// Has to switch thread to GC_NOTRIGGER while being held (see code:BaseDomain#AssemblyListLock)
m_crstAssemblyList.Init(CrstAssemblyList, CrstFlags(
CRST_GC_NOTRIGGER_WHEN_TAKEN | CRST_DEBUGGER_THREAD | CRST_TAKEN_DURING_SHUTDOWN));
#ifdef FEATURE_COMINTEROP
// Allocate the managed standard interfaces information.
m_pMngStdInterfacesInfo = new MngStdInterfacesInfo();
#endif // FEATURE_COMINTEROP
// Init the COM Interop data hash
{
LockOwner lock = {&m_InteropDataCrst, IsOwnerOfCrst};
m_interopDataHash.Init(0, NULL, false, &lock);
}
m_dwSizedRefHandles = 0;
if (!m_iNumberOfProcessors)
{
m_iNumberOfProcessors = GetCurrentProcessCpuCount();
}
}
#undef LOADERHEAP_PROFILE_COUNTER
void BaseDomain::InitVSD()
{
STANDARD_VM_CONTRACT;
UINT32 startingId = TypeIDMap::STARTING_UNSHARED_DOMAIN_ID;
m_typeIDMap.Init(startingId, 2);
#ifndef CROSSGEN_COMPILE
GetLoaderAllocator()->InitVirtualCallStubManager(this);
#endif
}
#ifndef CROSSGEN_COMPILE
void BaseDomain::ClearBinderContext()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
if (m_pTPABinderContext) {
m_pTPABinderContext->Release();
m_pTPABinderContext = NULL;
}
}
void AppDomain::ShutdownFreeLoaderAllocators()
{
// If we're called from managed code (i.e. the finalizer thread) we take a lock in
// LoaderAllocator::CleanupFailedTypeInit, which may throw. Otherwise we're called
// from the app-domain shutdown path in which we can avoid taking the lock.
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
CrstHolder ch(GetLoaderAllocatorReferencesLock());
// Shutdown the LoaderAllocators associated with collectible assemblies
while (m_pDelayedLoaderAllocatorUnloadList != NULL)
{
LoaderAllocator * pCurrentLoaderAllocator = m_pDelayedLoaderAllocatorUnloadList;
// Remove next loader allocator from the list
m_pDelayedLoaderAllocatorUnloadList = m_pDelayedLoaderAllocatorUnloadList->m_pLoaderAllocatorDestroyNext;
// For loader allocator finalization, we need to be careful about cleaning up per-appdomain allocations
// and synchronizing with GC using delay unload list. We need to wait for next Gen2 GC to finish to ensure
// that GC heap does not have any references to the MethodTables being unloaded.
pCurrentLoaderAllocator->CleanupFailedTypeInit();
pCurrentLoaderAllocator->CleanupHandles();
GCX_COOP();
SystemDomain::System()->AddToDelayedUnloadList(pCurrentLoaderAllocator);
}
} // AppDomain::ShutdownFreeLoaderAllocators
//---------------------------------------------------------------------------------------
//
// Register the loader allocator for deletion in code:AppDomain::ShutdownFreeLoaderAllocators.
//
void AppDomain::RegisterLoaderAllocatorForDeletion(LoaderAllocator * pLoaderAllocator)
{
CONTRACTL
{
GC_TRIGGERS;
NOTHROW;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
CrstHolder ch(GetLoaderAllocatorReferencesLock());
pLoaderAllocator->m_pLoaderAllocatorDestroyNext = m_pDelayedLoaderAllocatorUnloadList;
m_pDelayedLoaderAllocatorUnloadList = pLoaderAllocator;
}
void AppDomain::SetNativeDllSearchDirectories(LPCWSTR wszNativeDllSearchDirectories)
{
STANDARD_VM_CONTRACT;
SString sDirectories(wszNativeDllSearchDirectories);
if (sDirectories.GetCount() > 0)
{
SString::CIterator start = sDirectories.Begin();
SString::CIterator itr = sDirectories.Begin();
SString::CIterator end = sDirectories.End();
SString qualifiedPath;
while (itr != end)
{
start = itr;
BOOL found = sDirectories.Find(itr, PATH_SEPARATOR_CHAR_W);
if (!found)
{
itr = end;
}
SString qualifiedPath(sDirectories, start, itr);
if (found)
{
itr++;
}
unsigned len = qualifiedPath.GetCount();
if (len > 0)
{
if (qualifiedPath[len - 1] != DIRECTORY_SEPARATOR_CHAR_W)
{
qualifiedPath.Append(DIRECTORY_SEPARATOR_CHAR_W);
}
NewHolder<SString> stringHolder(new SString(qualifiedPath));
IfFailThrow(m_NativeDllSearchDirectories.Append(stringHolder.GetValue()));
stringHolder.SuppressRelease();
}
}
}
}
OBJECTREF* BaseDomain::AllocateObjRefPtrsInLargeTable(int nRequested, OBJECTREF** ppLazyAllocate)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION((nRequested > 0));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
if (ppLazyAllocate && *ppLazyAllocate)
{
// Allocation already happened
return *ppLazyAllocate;
}
// Enter preemptive state, take the lock and go back to cooperative mode.
{
CrstHolder ch(&m_LargeHeapHandleTableCrst);
GCX_COOP();
if (ppLazyAllocate && *ppLazyAllocate)
{
// Allocation already happened
return *ppLazyAllocate;
}
// Make sure the large heap handle table is initialized.
if (!m_pLargeHeapHandleTable)
InitLargeHeapHandleTable();
// Allocate the handles.
OBJECTREF* result = m_pLargeHeapHandleTable->AllocateHandles(nRequested);
if (ppLazyAllocate)
{
*ppLazyAllocate = result;
}
return result;
}
}
#endif // CROSSGEN_COMPILE
#endif // !DACCESS_COMPILE
#ifdef FEATURE_COMINTEROP
#ifndef CROSSGEN_COMPILE
#ifndef DACCESS_COMPILE
OBJECTREF AppDomain::GetMissingObject()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
if (!m_hndMissing)
{
// Get the field
FieldDesc *pValueFD = CoreLibBinder::GetField(FIELD__MISSING__VALUE);
pValueFD->CheckRunClassInitThrowing();
// Retrieve the value static field and store it.
OBJECTHANDLE hndMissing = CreateHandle(pValueFD->GetStaticOBJECTREF());
if (FastInterlockCompareExchangePointer(&m_hndMissing, hndMissing, NULL) != NULL)
{
// Exchanged failed. The m_hndMissing did not equal NULL and was returned.
DestroyHandle(hndMissing);
}
}
return ObjectFromHandle(m_hndMissing);
}
#endif // DACCESS_COMPILE
#endif //CROSSGEN_COMPILE
#endif // FEATURE_COMINTEROP
#ifndef DACCESS_COMPILE
#ifndef CROSSGEN_COMPILE
STRINGREF *BaseDomain::IsStringInterned(STRINGREF *pString)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pString));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
return GetLoaderAllocator()->IsStringInterned(pString);
}
STRINGREF *BaseDomain::GetOrInternString(STRINGREF *pString)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pString));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
return GetLoaderAllocator()->GetOrInternString(pString);
}
void BaseDomain::InitLargeHeapHandleTable()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(m_pLargeHeapHandleTable==NULL);
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
m_pLargeHeapHandleTable = new LargeHeapHandleTable(this, STATIC_OBJECT_TABLE_BUCKET_SIZE);
#ifdef _DEBUG
m_pLargeHeapHandleTable->RegisterCrstDebug(&m_LargeHeapHandleTableCrst);
#endif
}
#endif // CROSSGEN_COMPILE
//*****************************************************************************
//*****************************************************************************
//*****************************************************************************
void *SystemDomain::operator new(size_t size, void *pInPlace)
{
LIMITED_METHOD_CONTRACT;
return pInPlace;
}
void SystemDomain::operator delete(void *pMem)
{
LIMITED_METHOD_CONTRACT;
// Do nothing - new() was in-place
}
#ifdef FEATURE_PREJIT
void SystemDomain::SetCompilationOverrides(BOOL fForceDebug,
BOOL fForceProfiling,
BOOL fForceInstrument)
{
LIMITED_METHOD_CONTRACT;
s_fForceDebug = fForceDebug;
s_fForceProfiling = fForceProfiling;
s_fForceInstrument = fForceInstrument;
}
#endif
#endif //!DACCESS_COMPILE
#ifdef FEATURE_PREJIT
void SystemDomain::GetCompilationOverrides(BOOL * fForceDebug,
BOOL * fForceProfiling,
BOOL * fForceInstrument)
{
LIMITED_METHOD_DAC_CONTRACT;
*fForceDebug = s_fForceDebug;
*fForceProfiling = s_fForceProfiling;
*fForceInstrument = s_fForceInstrument;
}
#endif
#ifndef DACCESS_COMPILE
void SystemDomain::Attach()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(m_pSystemDomain == NULL);
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
#ifndef CROSSGEN_COMPILE
// Initialize stub managers
PrecodeStubManager::Init();
DelegateInvokeStubManager::Init();
JumpStubStubManager::Init();
RangeSectionStubManager::Init();
ILStubManager::Init();
InteropDispatchStubManager::Init();
StubLinkStubManager::Init();
ThunkHeapStubManager::Init();
TailCallStubManager::Init();
#ifdef FEATURE_TIERED_COMPILATION
CallCountingStubManager::Init();
#endif
PerAppDomainTPCountList::InitAppDomainIndexList();
#endif // CROSSGEN_COMPILE
m_SystemDomainCrst.Init(CrstSystemDomain, (CrstFlags)(CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
m_DelayedUnloadCrst.Init(CrstSystemDomainDelayedUnloadList, CRST_UNSAFE_COOPGC);
// Initialize the ID dispenser that is used for domain neutral module IDs
g_pModuleIndexDispenser = new IdDispenser();
// Create the global SystemDomain and initialize it.
m_pSystemDomain = new (&g_pSystemDomainMemory[0]) SystemDomain();
// No way it can fail since g_pSystemDomainMemory is a static array.
CONSISTENCY_CHECK(CheckPointer(m_pSystemDomain));
LOG((LF_CLASSLOADER,
LL_INFO10,
"Created system domain at %p\n",
m_pSystemDomain));
// We need to initialize the memory pools etc. for the system domain.
m_pSystemDomain->BaseDomain::Init(); // Setup the memory heaps
// Create the one and only app domain
AppDomain::Create();
// Each domain gets its own ReJitManager, and ReJitManager has its own static
// initialization to run
ReJitManager::InitStatic();
}
#ifndef CROSSGEN_COMPILE
void SystemDomain::DetachBegin()
{
WRAPPER_NO_CONTRACT;
// Shut down the domain and its children (but don't deallocate anything just
// yet).
// TODO: we should really not running managed DLLMain during process detach.
if (GetThread() == NULL)
{
return;
}
if(m_pSystemDomain)
m_pSystemDomain->Stop();
}
void SystemDomain::DetachEnd()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
// Shut down the domain and its children (but don't deallocate anything just
// yet).
if(m_pSystemDomain)
{
GCX_PREEMP();
m_pSystemDomain->ClearBinderContext();
AppDomain* pAppDomain = GetAppDomain();
if (pAppDomain)
pAppDomain->ClearBinderContext();
}
}
void SystemDomain::Stop()
{
WRAPPER_NO_CONTRACT;
AppDomainIterator i(TRUE);
while (i.Next())
i.GetDomain()->Stop();
}
void SystemDomain::PreallocateSpecialObjects()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
_ASSERTE(g_pPreallocatedSentinelObject == NULL);
OBJECTREF pPreallocatedSentinalObject = AllocateObject(g_pObjectClass);
g_pPreallocatedSentinelObject = CreatePinningHandle( pPreallocatedSentinalObject );
#ifdef FEATURE_PREJIT
if (SystemModule()->HasNativeImage())
{
CORCOMPILE_EE_INFO_TABLE *pEEInfo = SystemModule()->GetNativeImage()->GetNativeEEInfoTable();
pEEInfo->emptyString = (CORINFO_Object **)StringObject::GetEmptyStringRefPtr();
}
#endif
}
void SystemDomain::CreatePreallocatedExceptions()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
EXCEPTIONREF pBaseException = (EXCEPTIONREF)AllocateObject(g_pExceptionClass);
pBaseException->SetHResult(COR_E_EXCEPTION);
pBaseException->SetXCode(EXCEPTION_COMPLUS);
_ASSERTE(g_pPreallocatedBaseException == NULL);
g_pPreallocatedBaseException = CreateHandle(pBaseException);
EXCEPTIONREF pOutOfMemory = (EXCEPTIONREF)AllocateObject(g_pOutOfMemoryExceptionClass);
pOutOfMemory->SetHResult(COR_E_OUTOFMEMORY);
pOutOfMemory->SetXCode(EXCEPTION_COMPLUS);
_ASSERTE(g_pPreallocatedOutOfMemoryException == NULL);
g_pPreallocatedOutOfMemoryException = CreateHandle(pOutOfMemory);
EXCEPTIONREF pStackOverflow = (EXCEPTIONREF)AllocateObject(g_pStackOverflowExceptionClass);
pStackOverflow->SetHResult(COR_E_STACKOVERFLOW);
pStackOverflow->SetXCode(EXCEPTION_COMPLUS);
_ASSERTE(g_pPreallocatedStackOverflowException == NULL);
g_pPreallocatedStackOverflowException = CreateHandle(pStackOverflow);
EXCEPTIONREF pExecutionEngine = (EXCEPTIONREF)AllocateObject(g_pExecutionEngineExceptionClass);
pExecutionEngine->SetHResult(COR_E_EXECUTIONENGINE);
pExecutionEngine->SetXCode(EXCEPTION_COMPLUS);
_ASSERTE(g_pPreallocatedExecutionEngineException == NULL);
g_pPreallocatedExecutionEngineException = CreateHandle(pExecutionEngine);
EXCEPTIONREF pRudeAbortException = (EXCEPTIONREF)AllocateObject(g_pThreadAbortExceptionClass);
pRudeAbortException->SetHResult(COR_E_THREADABORTED);
pRudeAbortException->SetXCode(EXCEPTION_COMPLUS);
_ASSERTE(g_pPreallocatedRudeThreadAbortException == NULL);
g_pPreallocatedRudeThreadAbortException = CreateHandle(pRudeAbortException);
EXCEPTIONREF pAbortException = (EXCEPTIONREF)AllocateObject(g_pThreadAbortExceptionClass);
pAbortException->SetHResult(COR_E_THREADABORTED);
pAbortException->SetXCode(EXCEPTION_COMPLUS);
_ASSERTE(g_pPreallocatedThreadAbortException == NULL);
g_pPreallocatedThreadAbortException = CreateHandle( pAbortException );
}
#endif // CROSSGEN_COMPILE
void SystemDomain::Init()
{
STANDARD_VM_CONTRACT;
HRESULT hr = S_OK;
#ifdef _DEBUG
LOG((
LF_EEMEM,
LL_INFO10,
"sizeof(EEClass) = %d\n"
"sizeof(MethodTable) = %d\n"
"sizeof(MethodDesc)= %d\n"
"sizeof(FieldDesc) = %d\n"
"sizeof(Module) = %d\n",
sizeof(EEClass),
sizeof(MethodTable),
sizeof(MethodDesc),
sizeof(FieldDesc),
sizeof(Module)
));
#endif // _DEBUG
// The base domain is initialized in SystemDomain::Attach()
// to allow stub caches to use the memory pool. Do not
// initialze it here!
if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ZapDisable) != 0)
g_fAllowNativeImages = false;
m_pSystemFile = NULL;
m_pSystemAssembly = NULL;
DWORD size = 0;
// Get the install directory so we can find CoreLib
hr = GetInternalSystemDirectory(NULL, &size);
if (hr != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
ThrowHR(hr);
// GetInternalSystemDirectory returns a size, including the null!
WCHAR* buffer = m_SystemDirectory.OpenUnicodeBuffer(size - 1);
IfFailThrow(GetInternalSystemDirectory(buffer, &size));
m_SystemDirectory.CloseBuffer();
m_SystemDirectory.Normalize();
// At this point m_SystemDirectory should already be canonicalized
m_BaseLibrary.Append(m_SystemDirectory);
if (!m_BaseLibrary.EndsWith(DIRECTORY_SEPARATOR_CHAR_W))
{
m_BaseLibrary.Append(DIRECTORY_SEPARATOR_CHAR_W);
}
m_BaseLibrary.Append(g_pwBaseLibrary);
m_BaseLibrary.Normalize();
LoadBaseSystemClasses();
{
// We are about to start allocating objects, so we must be in cooperative mode.
// However, many of the entrypoints to the system (DllGetClassObject and all
// N/Direct exports) get called multiple times. Sometimes they initialize the EE,
// but generally they remain in preemptive mode. So we really want to push/pop
// the state here:
GCX_COOP();
#ifndef CROSSGEN_COMPILE
if (!NingenEnabled())
{
CreatePreallocatedExceptions();
PreallocateSpecialObjects();
}
#endif
// Finish loading CoreLib now.
m_pSystemAssembly->GetDomainAssembly()->EnsureActive();
}
#ifdef _DEBUG
BOOL fPause = EEConfig::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_PauseOnLoad, FALSE);
while (fPause)
{
ClrSleepEx(20, TRUE);
}
#endif // _DEBUG
}
#ifndef CROSSGEN_COMPILE
void SystemDomain::LazyInitGlobalStringLiteralMap()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
// Allocate the global string literal map.
NewHolder<GlobalStringLiteralMap> pGlobalStringLiteralMap(new GlobalStringLiteralMap());
// Initialize the global string literal map.
pGlobalStringLiteralMap->Init();
if (InterlockedCompareExchangeT<GlobalStringLiteralMap *>(&m_pGlobalStringLiteralMap, pGlobalStringLiteralMap, NULL) == NULL)
{
pGlobalStringLiteralMap.SuppressRelease();
}
}
/*static*/ void SystemDomain::EnumAllStaticGCRefs(promote_func* fn, ScanContext* sc)
{
CONTRACT_VOID
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
}
CONTRACT_END;
// We don't do a normal AppDomainIterator because we can't take the SystemDomain lock from
// here.
// We're only supposed to call this from a Server GC. We're walking here m_appDomainIdList
// m_appDomainIdList will have an AppDomain* or will be NULL. So the only danger is if we
// Fetch an AppDomain and then in some other thread the AppDomain is deleted.
//
// If the thread deleting the AppDomain (AppDomain::~AppDomain)was in Preemptive mode
// while doing SystemDomain::EnumAllStaticGCRefs we will issue a GCX_COOP(), which will wait
// for the GC to finish, so we are safe
//
// If the thread is in cooperative mode, it must have been suspended for the GC so a delete
// can't happen.
_ASSERTE(GCHeapUtilities::IsGCInProgress() &&
GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
SystemDomain* sysDomain = SystemDomain::System();
if (sysDomain)
{
AppDomain* pAppDomain = ::GetAppDomain();
if (pAppDomain && pAppDomain->IsActive())
{
pAppDomain->EnumStaticGCRefs(fn, sc);
}
}
RETURN;
}
// Only called when EE is suspended.
DWORD SystemDomain::GetTotalNumSizedRefHandles()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
SystemDomain* sysDomain = SystemDomain::System();
DWORD dwTotalNumSizedRefHandles = 0;
if (sysDomain)
{
AppDomain* pAppDomain = ::GetAppDomain();
if (pAppDomain && pAppDomain->IsActive())
{
dwTotalNumSizedRefHandles += pAppDomain->GetNumSizedRefHandles();
}
}
return dwTotalNumSizedRefHandles;
}
#endif // CROSSGEN_COMPILE
void SystemDomain::LoadBaseSystemClasses()
{
STANDARD_VM_CONTRACT;
ETWOnStartup(LdSysBases_V1, LdSysBasesEnd_V1);
{
m_pSystemFile = PEAssembly::OpenSystem(NULL);
}
// Only partially load the system assembly. Other parts of the code will want to access
// the globals in this function before finishing the load.
m_pSystemAssembly = DefaultDomain()->LoadDomainAssembly(NULL, m_pSystemFile, FILE_LOAD_POST_LOADLIBRARY)->GetCurrentAssembly();
// Set up binder for CoreLib
CoreLibBinder::AttachModule(m_pSystemAssembly->GetManifestModule());
// Load Object
g_pObjectClass = CoreLibBinder::GetClass(CLASS__OBJECT);
// Now that ObjectClass is loaded, we can set up
// the system for finalizers. There is no point in deferring this, since we need
// to know this before we allocate our first object.
g_pObjectFinalizerMD = CoreLibBinder::GetMethod(METHOD__OBJECT__FINALIZE);
g_pCanonMethodTableClass = CoreLibBinder::GetClass(CLASS____CANON);
// NOTE: !!!IMPORTANT!!! ValueType and Enum MUST be loaded one immediately after
// the other, because we have coded MethodTable::IsChildValueType
// in such a way that it depends on this behaviour.
// Load the ValueType class
g_pValueTypeClass = CoreLibBinder::GetClass(CLASS__VALUE_TYPE);
// Load the enum class
g_pEnumClass = CoreLibBinder::GetClass(CLASS__ENUM);
_ASSERTE(!g_pEnumClass->IsValueType());
// Load System.RuntimeType
g_pRuntimeTypeClass = CoreLibBinder::GetClass(CLASS__CLASS);
_ASSERTE(g_pRuntimeTypeClass->IsFullyLoaded());
// Load Array class
g_pArrayClass = CoreLibBinder::GetClass(CLASS__ARRAY);
// Calling a method on IList<T> for an array requires redirection to a method on
// the SZArrayHelper class. Retrieving such methods means calling
// GetActualImplementationForArrayGenericIListMethod, which calls FetchMethod for
// the corresponding method on SZArrayHelper. This basically results in a class
// load due to a method call, which the debugger cannot handle, so we pre-load
// the SZArrayHelper class here.
g_pSZArrayHelperClass = CoreLibBinder::GetClass(CLASS__SZARRAYHELPER);
// Load ByReference class
//
// NOTE: ByReference<T> must be the first by-ref-like system type to be loaded,
// because MethodTable::ClassifyEightBytesWithManagedLayout depends on it.
g_pByReferenceClass = CoreLibBinder::GetClass(CLASS__BYREFERENCE);
// Load Nullable class
g_pNullableClass = CoreLibBinder::GetClass(CLASS__NULLABLE);
// Load the Object array class.
g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT] = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pObjectClass));
// We have delayed allocation of CoreLib's static handles until we load the object class
CoreLibBinder::GetModule()->AllocateRegularStaticHandles(DefaultDomain());
// Make sure all primitive types are loaded
for (int et = ELEMENT_TYPE_VOID; et <= ELEMENT_TYPE_R8; et++)
CoreLibBinder::LoadPrimitiveType((CorElementType)et);
CoreLibBinder::LoadPrimitiveType(ELEMENT_TYPE_I);
CoreLibBinder::LoadPrimitiveType(ELEMENT_TYPE_U);
g_TypedReferenceMT = CoreLibBinder::GetClass(CLASS__TYPED_REFERENCE);
// unfortunately, the following cannot be delay loaded since the jit
// uses it to compute method attributes within a function that cannot
// handle Complus exception and the following call goes through a path
// where a complus exception can be thrown. It is unfortunate, because
// we know that the delegate class and multidelegate class are always
// guaranteed to be found.
g_pDelegateClass = CoreLibBinder::GetClass(CLASS__DELEGATE);
g_pMulticastDelegateClass = CoreLibBinder::GetClass(CLASS__MULTICAST_DELEGATE);
#ifndef CROSSGEN_COMPILE
CrossLoaderAllocatorHashSetup::EnsureTypesLoaded();
#endif
// further loading of nonprimitive types may need casting support.
// initialize cast cache here.
#ifndef CROSSGEN_COMPILE
CastCache::Initialize();
ECall::PopulateManagedCastHelpers();
#endif // CROSSGEN_COMPILE
// used by IsImplicitInterfaceOfSZArray
CoreLibBinder::GetClass(CLASS__IENUMERABLEGENERIC);
CoreLibBinder::GetClass(CLASS__ICOLLECTIONGENERIC);
CoreLibBinder::GetClass(CLASS__ILISTGENERIC);
CoreLibBinder::GetClass(CLASS__IREADONLYCOLLECTIONGENERIC);
CoreLibBinder::GetClass(CLASS__IREADONLYLISTGENERIC);
// Load String
g_pStringClass = CoreLibBinder::LoadPrimitiveType(ELEMENT_TYPE_STRING);
#ifndef CROSSGEN_COMPILE
ECall::PopulateManagedStringConstructors();
#endif // CROSSGEN_COMPILE
g_pExceptionClass = CoreLibBinder::GetClass(CLASS__EXCEPTION);
g_pOutOfMemoryExceptionClass = CoreLibBinder::GetException(kOutOfMemoryException);
g_pStackOverflowExceptionClass = CoreLibBinder::GetException(kStackOverflowException);
g_pExecutionEngineExceptionClass = CoreLibBinder::GetException(kExecutionEngineException);
g_pThreadAbortExceptionClass = CoreLibBinder::GetException(kThreadAbortException);
g_pThreadClass = CoreLibBinder::GetClass(CLASS__THREAD);
#ifdef FEATURE_COMINTEROP
g_pBaseCOMObject = CoreLibBinder::GetClass(CLASS__COM_OBJECT);
#endif
g_pIDynamicInterfaceCastableInterface = CoreLibBinder::GetClass(CLASS__IDYNAMICINTERFACECASTABLE);
#ifdef FEATURE_ICASTABLE
g_pICastableInterface = CoreLibBinder::GetClass(CLASS__ICASTABLE);
#endif // FEATURE_ICASTABLE
// Make sure that FCall mapping for Monitor.Enter is initialized. We need it in case Monitor.Enter is used only as JIT helper.
// For more details, see comment in code:JITutil_MonEnterWorker around "__me = GetEEFuncEntryPointMacro(JIT_MonEnter)".
ECall::GetFCallImpl(CoreLibBinder::GetMethod(METHOD__MONITOR__ENTER));
#ifdef PROFILING_SUPPORTED
// Note that g_profControlBlock.fBaseSystemClassesLoaded must be set to TRUE only after
// all base system classes are loaded. Profilers are not allowed to call any type-loading
// APIs until g_profControlBlock.fBaseSystemClassesLoaded is TRUE. It is important that
// all base system classes need to be loaded before profilers can trigger the type loading.
g_profControlBlock.fBaseSystemClassesLoaded = TRUE;
#endif // PROFILING_SUPPORTED
#if defined(_DEBUG) && !defined(CROSSGEN_COMPILE)
if (!NingenEnabled())
{
g_CoreLib.Check();
}
#endif
#if defined(HAVE_GCCOVER) && defined(FEATURE_PREJIT)
if (GCStress<cfg_instr_ngen>::IsEnabled())
{
// Setting up gc coverage requires the base system classes
// to be initialized. So we have deferred it until now for CoreLib.
Module *pModule = CoreLibBinder::GetModule();
_ASSERTE(pModule->IsSystem());
if(pModule->HasNativeImage())
{
SetupGcCoverageForNativeImage(pModule);
}
}
#endif // defined(HAVE_GCCOVER) && !defined(FEATURE_PREJIT)
}
/*static*/
void SystemDomain::LoadDomain(AppDomain *pDomain)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(CheckPointer(System()));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
SystemDomain::System()->AddDomain(pDomain);
}
#endif // !DACCESS_COMPILE
#ifndef DACCESS_COMPILE
#if defined(FEATURE_COMINTEROP_APARTMENT_SUPPORT) && !defined(CROSSGEN_COMPILE)
Thread::ApartmentState SystemDomain::GetEntryPointThreadAptState(IMDInternalImport* pScope, mdMethodDef mdMethod)
{
STANDARD_VM_CONTRACT;
HRESULT hr;
IfFailThrow(hr = pScope->GetCustomAttributeByName(mdMethod,
DEFAULTDOMAIN_MTA_TYPE,
NULL,
NULL));
BOOL fIsMTA = FALSE;
if(hr == S_OK)
fIsMTA = TRUE;
IfFailThrow(hr = pScope->GetCustomAttributeByName(mdMethod,
DEFAULTDOMAIN_STA_TYPE,
NULL,
NULL));
BOOL fIsSTA = FALSE;
if (hr == S_OK)
fIsSTA = TRUE;
if (fIsSTA && fIsMTA)
COMPlusThrowHR(COR_E_CUSTOMATTRIBUTEFORMAT);
if (fIsSTA)
return Thread::AS_InSTA;
else if (fIsMTA)
return Thread::AS_InMTA;
return Thread::AS_Unknown;
}
void SystemDomain::SetThreadAptState (Thread::ApartmentState state)
{
STANDARD_VM_CONTRACT;
Thread* pThread = GetThread();
_ASSERTE(pThread);
if(state == Thread::AS_InSTA)
{
Thread::ApartmentState pState = pThread->SetApartment(Thread::AS_InSTA);
_ASSERTE(pState == Thread::AS_InSTA);
}
else
{
// If an apartment state was not explicitly requested, default to MTA
Thread::ApartmentState pState = pThread->SetApartment(Thread::AS_InMTA);
_ASSERTE(pState == Thread::AS_InMTA);
}
}
#endif // defined(FEATURE_COMINTEROP_APARTMENT_SUPPORT) && !defined(CROSSGEN_COMPILE)
/*static*/
bool SystemDomain::IsReflectionInvocationMethod(MethodDesc* pMeth)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
MethodTable* pCaller = pMeth->GetMethodTable();
// All Reflection Invocation methods are defined in CoreLib
if (!pCaller->GetModule()->IsSystem())
return false;
/* List of types that should be skipped to identify true caller */
static const BinderClassID reflectionInvocationTypes[] = {
CLASS__METHOD,
CLASS__METHOD_BASE,
CLASS__METHOD_INFO,
CLASS__CONSTRUCTOR,
CLASS__CONSTRUCTOR_INFO,
CLASS__CLASS,
CLASS__TYPE_HANDLE,
CLASS__METHOD_HANDLE,
CLASS__FIELD_HANDLE,
CLASS__TYPE,
CLASS__FIELD,
CLASS__RT_FIELD_INFO,
CLASS__FIELD_INFO,
CLASS__EVENT,
CLASS__EVENT_INFO,
CLASS__PROPERTY,
CLASS__PROPERTY_INFO,
CLASS__ACTIVATOR,
CLASS__ARRAY,
CLASS__ASSEMBLYBASE,
CLASS__ASSEMBLY,
CLASS__TYPE_DELEGATOR,
CLASS__RUNTIME_HELPERS,
CLASS__DYNAMICMETHOD,
CLASS__DELEGATE,
CLASS__MULTICAST_DELEGATE
};
static bool fInited = false;
if (!VolatileLoad(&fInited))
{
// Make sure all types are loaded so that we can use faster GetExistingClass()
for (unsigned i = 0; i < NumItems(reflectionInvocationTypes); i++)
{
CoreLibBinder::GetClass(reflectionInvocationTypes[i]);
}
VolatileStore(&fInited, true);
}
if (!pCaller->HasInstantiation())
{
for (unsigned i = 0; i < NumItems(reflectionInvocationTypes); i++)
{
if (CoreLibBinder::GetExistingClass(reflectionInvocationTypes[i]) == pCaller)
return true;
}
}
return false;
}
#ifndef CROSSGEN_COMPILE
struct CallersDataWithStackMark
{
StackCrawlMark* stackMark;
BOOL foundMe;
MethodDesc* pFoundMethod;
MethodDesc* pPrevMethod;
};
/*static*/
MethodDesc* SystemDomain::GetCallersMethod(StackCrawlMark* stackMark)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
GCX_COOP();
CallersDataWithStackMark cdata;
ZeroMemory(&cdata, sizeof(CallersDataWithStackMark));
cdata.stackMark = stackMark;
GetThread()->StackWalkFrames(CallersMethodCallbackWithStackMark, &cdata, FUNCTIONSONLY | LIGHTUNWIND);
if(cdata.pFoundMethod) {
return cdata.pFoundMethod;
} else
return NULL;
}
/*static*/
MethodTable* SystemDomain::GetCallersType(StackCrawlMark* stackMark)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
CallersDataWithStackMark cdata;
ZeroMemory(&cdata, sizeof(CallersDataWithStackMark));
cdata.stackMark = stackMark;
GetThread()->StackWalkFrames(CallersMethodCallbackWithStackMark, &cdata, FUNCTIONSONLY | LIGHTUNWIND);
if(cdata.pFoundMethod) {
return cdata.pFoundMethod->GetMethodTable();
} else
return NULL;
}
/*static*/
Module* SystemDomain::GetCallersModule(StackCrawlMark* stackMark)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
GCX_COOP();
CallersDataWithStackMark cdata;
ZeroMemory(&cdata, sizeof(CallersDataWithStackMark));
cdata.stackMark = stackMark;
GetThread()->StackWalkFrames(CallersMethodCallbackWithStackMark, &cdata, FUNCTIONSONLY | LIGHTUNWIND);
if(cdata.pFoundMethod) {
return cdata.pFoundMethod->GetModule();
} else
return NULL;
}
struct CallersData
{
int skip;
MethodDesc* pMethod;
};
/*static*/
Assembly* SystemDomain::GetCallersAssembly(StackCrawlMark *stackMark)
{
WRAPPER_NO_CONTRACT;
Module* mod = GetCallersModule(stackMark);
if (mod)
return mod->GetAssembly();
return NULL;
}
/*private static*/
StackWalkAction SystemDomain::CallersMethodCallbackWithStackMark(CrawlFrame* pCf, VOID* data)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
MethodDesc *pFunc = pCf->GetFunction();
/* We asked to be called back only for functions */
_ASSERTE(pFunc);
CallersDataWithStackMark* pCaller = (CallersDataWithStackMark*) data;
if (pCaller->stackMark)
{
if (!pCf->IsInCalleesFrames(pCaller->stackMark))
{
// save the current in case it is the one we want
pCaller->pPrevMethod = pFunc;
return SWA_CONTINUE;
}
// LookForMe stack crawl marks needn't worry about reflection or
// remoting frames on the stack. Each frame above (newer than) the
// target will be captured by the logic above. Once we transition to
// finding the stack mark below the AofRA, we know that we hit the
// target last time round and immediately exit with the cached result.
if (*(pCaller->stackMark) == LookForMe)
{
pCaller->pFoundMethod = pCaller->pPrevMethod;
return SWA_ABORT;
}
}
// Skip reflection and remoting frames that could lie between a stack marked
// method and its true caller (or that caller and its own caller). These
// frames are infrastructure and logically transparent to the stack crawling
// algorithm.
// Skipping remoting frames. We always skip entire client to server spans
// (though we see them in the order server then client during a stack crawl
// obviously).
// We spot the server dispatcher end because all calls are dispatched
// through a single method: StackBuilderSink._PrivateProcessMessage.
Frame* frame = pCf->GetFrame();
_ASSERTE(pCf->IsFrameless() || frame);
// Skipping reflection frames. We don't need to be quite as exhaustive here
// as the security or reflection stack walking code since we know this logic
// is only invoked for selected methods in CoreLib itself. So we're
// reasonably sure we won't have any sensitive methods late bound invoked on
// constructors, properties or events. This leaves being invoked via
// MethodInfo, Type or Delegate (and depending on which invoke overload is
// being used, several different reflection classes may be involved).
g_IBCLogger.LogMethodDescAccess(pFunc);
if (SystemDomain::IsReflectionInvocationMethod(pFunc))
return SWA_CONTINUE;
if (frame && frame->GetFrameType() == Frame::TYPE_MULTICAST)
{
// This must be either a multicast delegate invocation.
_ASSERTE(pFunc->GetMethodTable()->IsDelegate());
DELEGATEREF del = (DELEGATEREF)((MulticastFrame*)frame)->GetThis(); // This can throw.
_ASSERTE(COMDelegate::IsTrueMulticastDelegate(del));
return SWA_CONTINUE;
}
// Return the first non-reflection/remoting frame if no stack mark was
// supplied.
if (!pCaller->stackMark)
{
pCaller->pFoundMethod = pFunc;
return SWA_ABORT;
}
// If we got here, we must already be in the frame containing the stack mark and we are not looking for "me".
_ASSERTE(pCaller->stackMark &&
pCf->IsInCalleesFrames(pCaller->stackMark) &&
*(pCaller->stackMark) != LookForMe);
// When looking for caller's caller, we delay returning results for another
// round (the way this is structured, we will still be able to skip
// reflection and remoting frames between the caller and the caller's
// caller).
if ((*(pCaller->stackMark) == LookForMyCallersCaller) &&
(pCaller->pFoundMethod == NULL))
{
pCaller->pFoundMethod = pFunc;
return SWA_CONTINUE;
}
pCaller->pFoundMethod = pFunc;
return SWA_ABORT;
}
/*private static*/
StackWalkAction SystemDomain::CallersMethodCallback(CrawlFrame* pCf, VOID* data)
{
LIMITED_METHOD_CONTRACT;
MethodDesc *pFunc = pCf->GetFunction();
/* We asked to be called back only for functions */
_ASSERTE(pFunc);
CallersData* pCaller = (CallersData*) data;
if(pCaller->skip == 0) {
pCaller->pMethod = pFunc;
return SWA_ABORT;
}
else {
pCaller->skip--;
return SWA_CONTINUE;
}
}
#endif // CROSSGEN_COMPILE
#ifdef CROSSGEN_COMPILE
// defined in compile.cpp
extern CompilationDomain * theDomain;
#endif
void AppDomain::Create()
{
STANDARD_VM_CONTRACT;
#ifdef CROSSGEN_COMPILE
AppDomainRefHolder pDomain(theDomain);
#else
AppDomainRefHolder pDomain(new AppDomain());
#endif
pDomain->Init();
// allocate a Virtual Call Stub Manager for the default domain
pDomain->InitVSD();
pDomain->SetStage(AppDomain::STAGE_OPEN);
pDomain.SuppressRelease();
m_pTheAppDomain = pDomain;
LOG((LF_CLASSLOADER | LF_CORDB,
LL_INFO10,
"Created the app domain at %p\n", m_pTheAppDomain));
}
#ifdef DEBUGGING_SUPPORTED
void SystemDomain::PublishAppDomainAndInformDebugger (AppDomain *pDomain)
{
CONTRACTL
{
if(!g_fEEInit) {THROWS;} else {DISABLED(NOTHROW);};
if(!g_fEEInit) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);};
MODE_ANY;
}
CONTRACTL_END;
LOG((LF_CORDB, LL_INFO100, "SD::PADAID: Adding 0x%x\n", pDomain));
// Call the publisher API to add this appdomain entry to the list
// The publisher will handle failures, so we don't care if this succeeds or fails.
if (g_pDebugInterface != NULL)
{
g_pDebugInterface->AddAppDomainToIPC(pDomain);
}
}
#endif // DEBUGGING_SUPPORTED
void SystemDomain::AddDomain(AppDomain* pDomain)
{
CONTRACTL
{
NOTHROW;
MODE_ANY;
GC_TRIGGERS;
PRECONDITION(CheckPointer((pDomain)));
}
CONTRACTL_END;
{
LockHolder lh;
_ASSERTE (pDomain->m_Stage != AppDomain::STAGE_CREATING);
if (pDomain->m_Stage == AppDomain::STAGE_READYFORMANAGEDCODE ||
pDomain->m_Stage == AppDomain::STAGE_ACTIVE)
{
pDomain->SetStage(AppDomain::STAGE_OPEN);
}
}
// Note that if you add another path that can reach here without calling
// PublishAppDomainAndInformDebugger, then you should go back & make sure
// that PADAID gets called. Right after this call, if not sooner.
LOG((LF_CORDB, LL_INFO1000, "SD::AD:Would have added domain here! 0x%x\n",
pDomain));
}
#ifdef PROFILING_SUPPORTED
void SystemDomain::NotifyProfilerStartup()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
{
BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
_ASSERTE(System());
g_profControlBlock.pProfInterface->AppDomainCreationStarted((AppDomainID) System());
END_PIN_PROFILER();
}
{
BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
_ASSERTE(System());
g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID) System(), S_OK);
END_PIN_PROFILER();
}
{
BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
_ASSERTE(System()->DefaultDomain());
g_profControlBlock.pProfInterface->AppDomainCreationStarted((AppDomainID) System()->DefaultDomain());
END_PIN_PROFILER();
}
{
BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
_ASSERTE(System()->DefaultDomain());
g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID) System()->DefaultDomain(), S_OK);
END_PIN_PROFILER();
}
}
HRESULT SystemDomain::NotifyProfilerShutdown()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
{
BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
_ASSERTE(System());
g_profControlBlock.pProfInterface->AppDomainShutdownStarted((AppDomainID) System());
END_PIN_PROFILER();
}
{
BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
_ASSERTE(System());
g_profControlBlock.pProfInterface->AppDomainShutdownFinished((AppDomainID) System(), S_OK);
END_PIN_PROFILER();
}
{
BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
_ASSERTE(System()->DefaultDomain());
g_profControlBlock.pProfInterface->AppDomainShutdownStarted((AppDomainID) System()->DefaultDomain());
END_PIN_PROFILER();
}
{
BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
_ASSERTE(System()->DefaultDomain());
g_profControlBlock.pProfInterface->AppDomainShutdownFinished((AppDomainID) System()->DefaultDomain(), S_OK);
END_PIN_PROFILER();
}
return (S_OK);
}
#endif // PROFILING_SUPPORTED
AppDomain::AppDomain()
{
// initialize fields so the appdomain can be safely destructed
// shouldn't call anything that can fail here - use ::Init instead
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
FORBID_FAULT;
}
CONTRACTL_END;
m_cRef=1;
m_pRootAssembly = NULL;
m_dwFlags = 0;
#ifdef FEATURE_COMINTEROP
m_pRCWCache = NULL;
m_pRCWRefCache = NULL;
#endif // FEATURE_COMINTEROP
m_handleStore = NULL;
#ifdef _DEBUG
m_Assemblies.Debug_SetAppDomain(this);
#endif // _DEBUG
#ifdef FEATURE_COMINTEROP
m_pRefDispIDCache = NULL;
m_hndMissing = NULL;
#endif
m_pRefClassFactHash = NULL;
m_ForceTrivialWaitOperations = false;
m_Stage=STAGE_CREATING;
#ifdef _DEBUG
m_dwIterHolders=0;
#endif
#ifdef FEATURE_TYPEEQUIVALENCE
m_pTypeEquivalenceTable = NULL;
#endif // FEATURE_TYPEEQUIVALENCE
#ifdef FEATURE_PREJIT
m_pDomainFileWithNativeImageList = NULL;
#endif
} // AppDomain::AppDomain
AppDomain::~AppDomain()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
#ifndef CROSSGEN_COMPILE
// release the TPIndex. note that since TPIndex values are recycled the TPIndex
// can only be released once all threads in the AppDomain have exited.
if (GetTPIndex().m_dwIndex != 0)
PerAppDomainTPCountList::ResetAppDomainIndex(GetTPIndex());
m_AssemblyCache.Clear();
#endif // CROSSGEN_COMPILE
}
//*****************************************************************************
//*****************************************************************************
//*****************************************************************************
void AppDomain::Init()
{
CONTRACTL
{
STANDARD_VM_CHECK;
}
CONTRACTL_END;
m_pDelayedLoaderAllocatorUnloadList = NULL;
SetStage( STAGE_CREATING);
// The lock is taken also during stack walking (GC or profiler)
// - To prevent deadlock with GC thread, we cannot trigger GC while holding the lock
// - To prevent deadlock with profiler thread, we cannot allow thread suspension
m_crstHostAssemblyMap.Init(
CrstHostAssemblyMap,
(CrstFlags)(CRST_GC_NOTRIGGER_WHEN_TAKEN
| CRST_DEBUGGER_THREAD
INDEBUG(| CRST_DEBUG_ONLY_CHECK_FORBID_SUSPEND_THREAD)));
m_crstHostAssemblyMapAdd.Init(CrstHostAssemblyMapAdd);
#ifndef CROSSGEN_COMPILE
//Allocate the threadpool entry before the appdomain id list. Otherwise,
//the thread pool list will be out of sync if insertion of id in
//the appdomain fails.
m_tpIndex = PerAppDomainTPCountList::AddNewTPIndex();
#endif // CROSSGEN_COMPILE
BaseDomain::Init();
// Set up the binding caches
m_AssemblyCache.Init(&m_DomainCacheCrst, GetHighFrequencyHeap());
m_UnmanagedCache.InitializeTable(this, &m_DomainCacheCrst);
m_MemoryPressure = 0;
#ifndef CROSSGEN_COMPILE
// Default domain reuses the handletablemap that was created during EEStartup
m_handleStore = GCHandleUtilities::GetGCHandleManager()->GetGlobalHandleStore();
if (!m_handleStore)
{
COMPlusThrowOM();
}
#endif // CROSSGEN_COMPILE
#ifdef FEATURE_TYPEEQUIVALENCE
m_TypeEquivalenceCrst.Init(CrstTypeEquivalenceMap);
#endif
m_ReflectionCrst.Init(CrstReflection, CRST_UNSAFE_ANYMODE);
m_RefClassFactCrst.Init(CrstClassFactInfoHash);
SetStage(STAGE_READYFORMANAGEDCODE);
#ifndef CROSSGEN_COMPILE
#ifdef FEATURE_TIERED_COMPILATION
m_tieredCompilationManager.Init();
#endif
#endif // CROSSGEN_COMPILE
m_nativeImageLoadCrst.Init(CrstNativeImageLoad);
} // AppDomain::Init
/*********************************************************************/
BOOL AppDomain::IsCompilationDomain()
{
LIMITED_METHOD_CONTRACT;
BOOL isCompilationDomain = (m_dwFlags & COMPILATION_DOMAIN) != 0;
#ifdef FEATURE_PREJIT
_ASSERTE(!isCompilationDomain || IsCompilationProcess());
#endif // FEATURE_PREJIT
return isCompilationDomain;
}
#ifndef CROSSGEN_COMPILE
void AppDomain::Stop()
{
CONTRACTL
{
NOTHROW;
MODE_ANY;
GC_TRIGGERS;
}
CONTRACTL_END;
#ifdef FEATURE_MULTICOREJIT
GetMulticoreJitManager().StopProfile(true);
#endif
// Set the unloaded flag before notifying the debugger
GetLoaderAllocator()->SetIsUnloaded();
#ifdef DEBUGGING_SUPPORTED
if (IsDebuggerAttached())
NotifyDebuggerUnload();
if (NULL != g_pDebugInterface)
{
// Call the publisher API to delete this appdomain entry from the list
CONTRACT_VIOLATION(ThrowsViolation);
g_pDebugInterface->RemoveAppDomainFromIPC (this);
}
#endif // DEBUGGING_SUPPORTED
}
#endif // !CROSSGEN_COMPILE
#endif //!DACCESS_COMPILE
#ifndef DACCESS_COMPILE
void AppDomain::AddAssembly(DomainAssembly * assem)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
{
CrstHolder ch(GetAssemblyListLock());
// Attempt to find empty space in assemblies list
DWORD asmCount = m_Assemblies.GetCount_Unlocked();
for (DWORD i = 0; i < asmCount; ++i)
{
if (m_Assemblies.Get_UnlockedNoReference(i) == NULL)
{
m_Assemblies.Set_Unlocked(i, assem);
return;
}
}
// If empty space not found, simply add to end of list
IfFailThrow(m_Assemblies.Append_Unlocked(assem));
}
}
void AppDomain::RemoveAssembly(DomainAssembly * pAsm)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
CrstHolder ch(GetAssemblyListLock());
DWORD asmCount = m_Assemblies.GetCount_Unlocked();
for (DWORD i = 0; i < asmCount; ++i)
{
if (m_Assemblies.Get_UnlockedNoReference(i) == pAsm)
{
m_Assemblies.Set_Unlocked(i, NULL);
return;
}
}
_ASSERTE(!"Unreachable");
}
BOOL AppDomain::ContainsAssembly(Assembly * assem)
{
WRAPPER_NO_CONTRACT;
AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(
kIncludeLoaded | kIncludeExecution));
CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
while (i.Next(pDomainAssembly.This()))
{
CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetLoadedAssembly();
if (pAssembly == assem)
return TRUE;
}
return FALSE;
}
EEClassFactoryInfoHashTable* AppDomain::SetupClassFactHash()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
CrstHolder ch(&m_ReflectionCrst);
if (m_pRefClassFactHash == NULL)
{
AllocMemHolder<void> pCache(GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof (EEClassFactoryInfoHashTable))));
EEClassFactoryInfoHashTable *tmp = new (pCache) EEClassFactoryInfoHashTable;
LockOwner lock = {&m_RefClassFactCrst,IsOwnerOfCrst};
if (!tmp->Init(20, &lock))
COMPlusThrowOM();
pCache.SuppressRelease();
m_pRefClassFactHash = tmp;
}
return m_pRefClassFactHash;
}
#ifdef FEATURE_COMINTEROP
DispIDCache* AppDomain::SetupRefDispIDCache()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
CrstHolder ch(&m_ReflectionCrst);
if (m_pRefDispIDCache == NULL)
{
AllocMemHolder<void> pCache = GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof (DispIDCache)));
DispIDCache *tmp = new (pCache) DispIDCache;
tmp->Init();
pCache.SuppressRelease();
m_pRefDispIDCache = tmp;
}
return m_pRefDispIDCache;
}
#endif // FEATURE_COMINTEROP
FileLoadLock *FileLoadLock::Create(PEFileListLock *pLock, PEFile *pFile, DomainFile *pDomainFile)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(pLock->HasLock());
PRECONDITION(pLock->FindFileLock(pFile) == NULL);
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
NewHolder<FileLoadLock> result(new FileLoadLock(pLock, pFile, pDomainFile));
pLock->AddElement(result);
result->AddRef(); // Add one ref on behalf of the ListLock's reference. The corresponding Release() happens in FileLoadLock::CompleteLoadLevel.
return result.Extract();
}
FileLoadLock::~FileLoadLock()
{
CONTRACTL
{
DESTRUCTOR_CHECK;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
((PEFile *) m_data)->Release();
}
DomainFile *FileLoadLock::GetDomainFile()
{
LIMITED_METHOD_CONTRACT;
return m_pDomainFile;
}
FileLoadLevel FileLoadLock::GetLoadLevel()
{
LIMITED_METHOD_CONTRACT;
return m_level;
}
// Acquire will return FALSE and not take the lock if the file
// has already been loaded to the target level. Otherwise,
// it will return TRUE and take the lock.
//
// Note that the taker must release the lock via IncrementLoadLevel.
BOOL FileLoadLock::Acquire(FileLoadLevel targetLevel)
{
WRAPPER_NO_CONTRACT;
// If we are already loaded to the desired level, the lock is "free".
if (m_level >= targetLevel)
return FALSE;
if (!DeadlockAwareEnter())
{
// We failed to get the lock due to a deadlock.
return FALSE;
}
if (m_level >= targetLevel)
{
Leave();
return FALSE;
}
return TRUE;
}
BOOL FileLoadLock::CanAcquire(FileLoadLevel targetLevel)
{
// If we are already loaded to the desired level, the lock is "free".
if (m_level >= targetLevel)
return FALSE;
return CanDeadlockAwareEnter();
}
#if !defined(DACCESS_COMPILE) && (defined(LOGGING) || defined(STRESS_LOG))
static const char *fileLoadLevelName[] =
{
"CREATE", // FILE_LOAD_CREATE
"BEGIN", // FILE_LOAD_BEGIN
"FIND_NATIVE_IMAGE", // FILE_LOAD_FIND_NATIVE_IMAGE
"VERIFY_NATIVE_IMAGE_DEPENDENCIES", // FILE_LOAD_VERIFY_NATIVE_IMAGE_DEPENDENCIES
"ALLOCATE", // FILE_LOAD_ALLOCATE
"ADD_DEPENDENCIES", // FILE_LOAD_ADD_DEPENDENCIES
"PRE_LOADLIBRARY", // FILE_LOAD_PRE_LOADLIBRARY
"LOADLIBRARY", // FILE_LOAD_LOADLIBRARY
"POST_LOADLIBRARY", // FILE_LOAD_POST_LOADLIBRARY
"EAGER_FIXUPS", // FILE_LOAD_EAGER_FIXUPS
"DELIVER_EVENTS", // FILE_LOAD_DELIVER_EVENTS
"VTABLE FIXUPS", // FILE_LOAD_VTABLE_FIXUPS
"LOADED", // FILE_LOADED
"ACTIVE", // FILE_ACTIVE
};
#endif // !DACCESS_COMPILE && (LOGGING || STRESS_LOG)
BOOL FileLoadLock::CompleteLoadLevel(FileLoadLevel level, BOOL success)
{
CONTRACTL
{
MODE_ANY;
GC_TRIGGERS;
THROWS;
PRECONDITION(HasLock());
}
CONTRACTL_END;
// Increment may happen more than once if reentrancy occurs (e.g. LoadLibrary)
if (level > m_level)
{
// Must complete each level in turn, unless we have an error
CONSISTENCY_CHECK(m_pDomainFile->IsError() || (level == (m_level+1)));
// Remove the lock from the list if the load is completed
if (level >= FILE_ACTIVE)
{
{
GCX_COOP();
PEFileListLockHolder lock((PEFileListLock*)m_pList);
#if _DEBUG
BOOL fDbgOnly_SuccessfulUnlink =
#endif
m_pList->Unlink(this);
_ASSERTE(fDbgOnly_SuccessfulUnlink);
m_pDomainFile->ClearLoading();
CONSISTENCY_CHECK(m_dwRefCount >= 2); // Caller (LoadDomainFile) should have 1 refcount and m_pList should have another which was acquired in FileLoadLock::Create.
m_level = (FileLoadLevel)level;
// Dev11 bug 236344
// In AppDomain::IsLoading, if the lock is taken on m_pList and then FindFileLock returns NULL,
// we depend on the DomainFile's load level being up to date. Hence we must update the load
// level while the m_pList lock is held.
if (success)
m_pDomainFile->SetLoadLevel(level);
}
Release(); // Release m_pList's refcount on this lock, which was acquired in FileLoadLock::Create
}
else
{
m_level = (FileLoadLevel)level;
if (success)
m_pDomainFile->SetLoadLevel(level);
}
#ifndef DACCESS_COMPILE
switch(level)
{
case FILE_LOAD_ALLOCATE:
case FILE_LOAD_ADD_DEPENDENCIES:
case FILE_LOAD_DELIVER_EVENTS:
case FILE_LOADED:
case FILE_ACTIVE: // The timing of stress logs is not critical, so even for the FILE_ACTIVE stage we need not do it while the m_pList lock is held.
STRESS_LOG3(LF_CLASSLOADER, LL_INFO100, "Completed Load Level %s for DomainFile %p - success = %i\n", fileLoadLevelName[level], m_pDomainFile, success);
break;
default:
break;
}
#endif
return TRUE;
}
else
return FALSE;
}
void FileLoadLock::SetError(Exception *ex)
{
CONTRACTL
{
MODE_ANY;
GC_TRIGGERS;
THROWS;
PRECONDITION(CheckPointer(ex));
PRECONDITION(HasLock());
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
m_cachedHR = ex->GetHR();
LOG((LF_LOADER, LL_WARNING, "LOADER: %x:***%s*\t!!!Non-transient error 0x%x\n",
m_pDomainFile->GetAppDomain(), m_pDomainFile->GetSimpleName(), m_cachedHR));
m_pDomainFile->SetError(ex);
CompleteLoadLevel(FILE_ACTIVE, FALSE);
}
void FileLoadLock::AddRef()
{
LIMITED_METHOD_CONTRACT;
FastInterlockIncrement((LONG *) &m_dwRefCount);
}
UINT32 FileLoadLock::Release()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
LONG count = FastInterlockDecrement((LONG *) &m_dwRefCount);
if (count == 0)
delete this;
return count;
}
FileLoadLock::FileLoadLock(PEFileListLock *pLock, PEFile *pFile, DomainFile *pDomainFile)
: ListLockEntry(pLock, pFile, "File load lock"),
m_level((FileLoadLevel) (FILE_LOAD_CREATE)),
m_pDomainFile(pDomainFile),
m_cachedHR(S_OK)
{
WRAPPER_NO_CONTRACT;
pFile->AddRef();
}
void FileLoadLock::HolderLeave(FileLoadLock *pThis)
{
LIMITED_METHOD_CONTRACT;
pThis->Leave();
}
//
// Assembly loading:
//
// Assembly loading is carefully layered to avoid deadlocks in the
// presence of circular loading dependencies.
// A LoadLevel is associated with each assembly as it is being loaded. During the
// act of loading (abstractly, increasing its load level), its lock is
// held, and the current load level is stored on the thread. Any
// recursive loads during that period are automatically restricted to
// only partially load the dependent assembly to the same level as the
// caller (or to one short of that level in the presence of a deadlock
// loop.)
//
// Each loading stage must be carfully constructed so that
// this constraint is expected and can be dealt with.
//
// Note that there is one case where this still doesn't handle recursion, and that is the
// security subsytem. The security system runs managed code, and thus must typically fully
// initialize assemblies of permission sets it is trying to use. (And of course, these may be used
// while those assemblies are initializing.) This is dealt with in the historical manner - namely
// the security system passes in a special flag which says that it will deal with null return values
// in the case where a load cannot be safely completed due to such issues.
//
void AppDomain::LoadSystemAssemblies()
{
STANDARD_VM_CONTRACT;
// The only reason to make an assembly a "system assembly" is if the EE is caching
// pointers to stuff in the assembly. Because this is going on, we need to preserve
// the invariant that the assembly is loaded into every app domain.
//
// Right now we have only one system assembly. We shouldn't need to add any more.
LoadAssembly(NULL, SystemDomain::System()->SystemFile(), FILE_ACTIVE);
}
FileLoadLevel AppDomain::GetDomainFileLoadLevel(DomainFile *pFile)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END
LoadLockHolder lock(this);
FileLoadLock* pLockEntry = (FileLoadLock *) lock->FindFileLock(pFile->GetFile());
if (pLockEntry == NULL)
return pFile->GetLoadLevel();
else
return pLockEntry->GetLoadLevel();
}
// This checks if the thread has initiated (or completed) loading at the given level. A false guarantees that
// (a) The current thread (or a thread blocking on the current thread) has not started loading the file
// at the given level, and
// (b) No other thread had started loading the file at this level at the start of this function call.
// Note that another thread may start loading the file at that level in a race with the completion of
// this function. However, the caller still has the guarantee that such a load started after this
// function was called (and e.g. any state in place before the function call will be seen by the other thread.)
//
// Conversely, a true guarantees that either the current thread has started the load step, or another
// thread has completed the load step.
//
BOOL AppDomain::IsLoading(DomainFile *pFile, FileLoadLevel level)
{
// Cheap out
if (pFile->GetLoadLevel() < level)
{
FileLoadLock *pLock = NULL;
{
LoadLockHolder lock(this);
pLock = (FileLoadLock *) lock->FindFileLock(pFile->GetFile());
if (pLock == NULL)
{
// No thread involved with loading
return pFile->GetLoadLevel() >= level;
}
pLock->AddRef();
}
FileLoadLockRefHolder lockRef(pLock);
if (pLock->Acquire(level))
{
// We got the lock - therefore no other thread has started this loading step yet.
pLock->Leave();
return FALSE;
}
// We didn't get the lock - either this thread is already doing the load,
// or else the load has already finished.
}
return TRUE;
}
// CheckLoading is a weaker form of IsLoading, which will not block on
// other threads waiting for their status. This is appropriate for asserts.
CHECK AppDomain::CheckLoading(DomainFile *pFile, FileLoadLevel level)
{
// Cheap out
if (pFile->GetLoadLevel() < level)
{
FileLoadLock *pLock = NULL;
LoadLockHolder lock(this);
pLock = (FileLoadLock *) lock->FindFileLock(pFile->GetFile());
if (pLock != NULL
&& pLock->CanAcquire(level))
{
// We can get the lock - therefore no other thread has started this loading step yet.
CHECK_FAILF(("Loading step %d has not been initiated yet", level));
}
// We didn't get the lock - either this thread is already doing the load,
// or else the load has already finished.
}
CHECK_OK;
}
CHECK AppDomain::CheckCanLoadTypes(Assembly *pAssembly)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
CHECK_MSG(CheckValidModule(pAssembly->GetManifestModule()),
"Type loading can occur only when executing in the assembly's app domain");
CHECK_OK;
}
CHECK AppDomain::CheckCanExecuteManagedCode(MethodDesc* pMD)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
Module* pModule=pMD->GetModule();
CHECK_MSG(CheckValidModule(pModule),
"Managed code can only run when executing in the module's app domain");
if (!pMD->IsInterface() || pMD->IsStatic()) //interfaces require no activation for instance methods
{
//cctor could have been interupted by ADU
CHECK_MSG(pModule->CheckActivated(),
"Managed code can only run when its module has been activated in the current app domain");
}
CHECK_OK;
}
#endif // !DACCESS_COMPILE
void AppDomain::LoadDomainFile(DomainFile *pFile,
FileLoadLevel targetLevel)
{
CONTRACTL
{
if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM();); }
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
// Quick exit if finished
if (pFile->GetLoadLevel() >= targetLevel)
return;
// Handle the error case
pFile->ThrowIfError(targetLevel);
#ifndef DACCESS_COMPILE
if (pFile->IsLoading())
{
GCX_PREEMP();
// Load some more if appropriate
LoadLockHolder lock(this);
FileLoadLock* pLockEntry = (FileLoadLock *) lock->FindFileLock(pFile->GetFile());
if (pLockEntry == NULL)
{
_ASSERTE (!pFile->IsLoading());
return;
}
pLockEntry->AddRef();
lock.Release();
LoadDomainFile(pLockEntry, targetLevel);
}
#else // DACCESS_COMPILE
DacNotImpl();
#endif // DACCESS_COMPILE
}
#ifndef DACCESS_COMPILE
FileLoadLevel AppDomain::GetThreadFileLoadLevel()
{
WRAPPER_NO_CONTRACT;
if (GetThread()->GetLoadLevelLimiter() == NULL)
return FILE_ACTIVE;
else
return (FileLoadLevel)(GetThread()->GetLoadLevelLimiter()->GetLoadLevel()-1);
}
Assembly *AppDomain::LoadAssembly(AssemblySpec* pIdentity,
PEAssembly *pFile,
FileLoadLevel targetLevel)
{
CONTRACT(Assembly *)
{
GC_TRIGGERS;
THROWS;
MODE_ANY;
PRECONDITION(CheckPointer(pFile));
POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); // May be NULL in recursive load case
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACT_END;
DomainAssembly *pAssembly = LoadDomainAssembly(pIdentity, pFile, targetLevel);
PREFIX_ASSUME(pAssembly != NULL);
RETURN pAssembly->GetAssembly();
}
extern BOOL AreSameBinderInstance(ICLRPrivBinder *pBinderA, ICLRPrivBinder *pBinderB);
DomainAssembly* AppDomain::LoadDomainAssembly(AssemblySpec* pSpec,
PEAssembly *pFile,
FileLoadLevel targetLevel)
{
STATIC_CONTRACT_THROWS;
if (pSpec == nullptr)
{
// skip caching, since we don't have anything to base it on
return LoadDomainAssemblyInternal(pSpec, pFile, targetLevel);
}
DomainAssembly* pRetVal = NULL;
EX_TRY
{
pRetVal = LoadDomainAssemblyInternal(pSpec, pFile, targetLevel);
}
EX_HOOK
{
Exception* pEx = GET_EXCEPTION();
if (!pEx->IsTransient())
{
// Setup the binder reference in AssemblySpec from the PEAssembly if one is not already set.
ICLRPrivBinder* pCurrentBindingContext = pSpec->GetBindingContext();
ICLRPrivBinder* pBindingContextFromPEAssembly = pFile->GetBindingContext();
if (pCurrentBindingContext == NULL)
{
// Set the binding context we got from the PEAssembly if AssemblySpec does not
// have that information
_ASSERTE(pBindingContextFromPEAssembly != NULL);
pSpec->SetBindingContext(pBindingContextFromPEAssembly);
}
#if defined(_DEBUG)
else
{
// Binding context in the spec should be the same as the binding context in the PEAssembly
_ASSERTE(AreSameBinderInstance(pCurrentBindingContext, pBindingContextFromPEAssembly));
}
#endif // _DEBUG
if (!EEFileLoadException::CheckType(pEx))
{
StackSString name;
pSpec->GetFileOrDisplayName(0, name);
pEx=new EEFileLoadException(name, pEx->GetHR(), pEx);
AddExceptionToCache(pSpec, pEx);
PAL_CPP_THROW(Exception *, pEx);
}
else
AddExceptionToCache(pSpec, pEx);
}
}
EX_END_HOOK;
return pRetVal;
}
DomainAssembly *AppDomain::LoadDomainAssemblyInternal(AssemblySpec* pIdentity,
PEAssembly *pFile,
FileLoadLevel targetLevel)
{
CONTRACT(DomainAssembly *)
{
GC_TRIGGERS;
THROWS;
MODE_ANY;
PRECONDITION(CheckPointer(pFile));
PRECONDITION(pFile->IsSystem() || ::GetAppDomain()==this);
POSTCONDITION(CheckPointer(RETVAL));
POSTCONDITION(RETVAL->GetLoadLevel() >= GetThreadFileLoadLevel()
|| RETVAL->GetLoadLevel() >= targetLevel);
POSTCONDITION(RETVAL->CheckNoError(targetLevel));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACT_END;
DomainAssembly * result;
// Go into preemptive mode since this may take a while.
GCX_PREEMP();
// Check for existing fully loaded assembly, or for an assembly which has failed during the loading process.
result = FindAssembly(pFile, FindAssemblyOptions_IncludeFailedToLoad);
if (result == NULL)
{
LoaderAllocator *pLoaderAllocator = NULL;
#ifndef CROSSGEN_COMPILE
ICLRPrivBinder *pFileBinder = pFile->GetBindingContext();
if (pFileBinder != NULL)
{
// Assemblies loaded with AssemblyLoadContext need to use a different LoaderAllocator if
// marked as collectible
pFileBinder->GetLoaderAllocator((LPVOID*)&pLoaderAllocator);
}
#endif // !CROSSGEN_COMPILE
if (pLoaderAllocator == NULL)
{
pLoaderAllocator = this->GetLoaderAllocator();
}
// Allocate the DomainAssembly a bit early to avoid GC mode problems. We could potentially avoid
// a rare redundant allocation by moving this closer to FileLoadLock::Create, but it's not worth it.
NewHolder<DomainAssembly> pDomainAssembly = new DomainAssembly(this, pFile, pLoaderAllocator);
LoadLockHolder lock(this);
// Find the list lock entry
FileLoadLock * fileLock = (FileLoadLock *)lock->FindFileLock(pFile);
if (fileLock == NULL)
{
// Check again in case we were racing
result = FindAssembly(pFile, FindAssemblyOptions_IncludeFailedToLoad);
if (result == NULL)
{
// We are the first one in - create the DomainAssembly
fileLock = FileLoadLock::Create(lock, pFile, pDomainAssembly);
pDomainAssembly.SuppressRelease();
#ifndef CROSSGEN_COMPILE
if (pDomainAssembly->IsCollectible())
{
// We add the assembly to the LoaderAllocator only when we are sure that it can be added
// and won't be deleted in case of a concurrent load from the same ALC
((AssemblyLoaderAllocator *)pLoaderAllocator)->AddDomainAssembly(pDomainAssembly);
}
#endif // !CROSSGEN_COMPILE
}
}
else
{
fileLock->AddRef();
}
lock.Release();
if (result == NULL)
{
// We pass our ref on fileLock to LoadDomainFile to release.
// Note that if we throw here, we will poison fileLock with an error condition,
// so it will not be removed until app domain unload. So there is no need
// to release our ref count.
result = (DomainAssembly *)LoadDomainFile(fileLock, targetLevel);
}
else
{
result->EnsureLoadLevel(targetLevel);
}
}
else
result->EnsureLoadLevel(targetLevel);
// Malformed metadata may contain a Module reference to what is actually
// an Assembly. In this case we need to throw an exception, since returning
// a DomainModule as a DomainAssembly is a type safety violation.
if (!result->IsAssembly())
{
ThrowHR(COR_E_ASSEMBLYEXPECTED);
}
// Cache result in all cases, since found pFile could be from a different AssemblyRef than pIdentity
if (pIdentity == NULL)
{
AssemblySpec spec;
spec.InitializeSpec(result->GetFile());
if (spec.CanUseWithBindingCache() && result->CanUseWithBindingCache())
GetAppDomain()->AddAssemblyToCache(&spec, result);
}
else if (pIdentity->CanUseWithBindingCache() && result->CanUseWithBindingCache())
{
GetAppDomain()->AddAssemblyToCache(pIdentity, result);
}
RETURN result;
} // AppDomain::LoadDomainAssembly
struct LoadFileArgs
{
FileLoadLock *pLock;
FileLoadLevel targetLevel;
DomainFile *result;
};
DomainFile *AppDomain::LoadDomainFile(FileLoadLock *pLock, FileLoadLevel targetLevel)
{
CONTRACT(DomainFile *)
{
STANDARD_VM_CHECK;
PRECONDITION(CheckPointer(pLock));
PRECONDITION(pLock->GetDomainFile()->GetAppDomain() == this);
POSTCONDITION(RETVAL->GetLoadLevel() >= GetThreadFileLoadLevel()
|| RETVAL->GetLoadLevel() >= targetLevel);
POSTCONDITION(RETVAL->CheckNoError(targetLevel));
}
CONTRACT_END;
DomainFile *pFile = pLock->GetDomainFile();
// Make sure we release the lock on exit
FileLoadLockRefHolder lockRef(pLock);
// We need to perform the early steps of loading CoreLib without a domain transition. This is
// important for bootstrapping purposes - we need to get CoreLib at least partially loaded
// into a domain before we can run serialization code to do the transition.
//
// Note that we cannot do this in general for all assemblies, because some of the security computations
// require the managed exposed object, which must be created in the correct app domain.
if (this != GetAppDomain()
&& pFile->GetFile()->IsSystem()
&& targetLevel > FILE_LOAD_ALLOCATE)
{
// Re-call the routine with a limited load level. This will cause the first part of the load to
// get performed in the current app domain.
pLock->AddRef();
LoadDomainFile(pLock, targetLevel > FILE_LOAD_ALLOCATE ? FILE_LOAD_ALLOCATE : targetLevel);
// Now continue on to complete the rest of the load, if any.
}
// Do a quick out check for the already loaded case.
if (pLock->GetLoadLevel() >= targetLevel)
{
pFile->ThrowIfError(targetLevel);
RETURN pFile;
}
// Initialize a loading queue. This will hold any loads which are triggered recursively but
// which cannot be immediately satisfied due to anti-deadlock constraints.
// PendingLoadQueues are allocated on the stack during a load, and
// shared with all nested loads on the same thread. (Note that we won't use
// "candidate" if we are in a recursive load; that's OK since they are cheap to
// construct.)
FileLoadLevel immediateTargetLevel = targetLevel;
{
LoadLevelLimiter limit;
limit.Activate();
// We cannot set a target level higher than that allowed by the limiter currently.
// This is because of anti-deadlock constraints.
if (immediateTargetLevel > limit.GetLoadLevel())
immediateTargetLevel = limit.GetLoadLevel();
LOG((LF_LOADER, LL_INFO100, "LOADER: %x:***%s*\t>>>Load initiated, %s/%s\n",
pFile->GetAppDomain(), pFile->GetSimpleName(),
fileLoadLevelName[immediateTargetLevel], fileLoadLevelName[targetLevel]));
// Now loop and do the load incrementally to the target level.
if (pLock->GetLoadLevel() < immediateTargetLevel)
{
while (pLock->Acquire(immediateTargetLevel))
{
FileLoadLevel workLevel;
{
FileLoadLockHolder fileLock(pLock);
// Work level is next step to do
workLevel = (FileLoadLevel)(fileLock->GetLoadLevel()+1);
// Set up the anti-deadlock constraint: we cannot safely recursively load any assemblies
// on this thread to a higher level than this assembly is being loaded now.
// Note that we do allow work at a parallel level; any deadlocks caused here will
// be resolved by the deadlock detection in the FileLoadLocks.
limit.SetLoadLevel(workLevel);
LOG((LF_LOADER,
(workLevel == FILE_LOAD_BEGIN
|| workLevel == FILE_LOADED
|| workLevel == FILE_ACTIVE)
? LL_INFO10 : LL_INFO1000,
"LOADER: %p:***%s*\t loading at level %s\n",
this, pFile->GetSimpleName(), fileLoadLevelName[workLevel]));
TryIncrementalLoad(pFile, workLevel, fileLock);
}
}
if (pLock->GetLoadLevel() == immediateTargetLevel-1)
{
LOG((LF_LOADER, LL_INFO100, "LOADER: %x:***%s*\t<<<Load limited due to detected deadlock, %s\n",
pFile->GetAppDomain(), pFile->GetSimpleName(),
fileLoadLevelName[immediateTargetLevel-1]));
}
}
LOG((LF_LOADER, LL_INFO100, "LOADER: %x:***%s*\t<<<Load completed, %s\n",
pFile->GetAppDomain(), pFile->GetSimpleName(),
fileLoadLevelName[pLock->GetLoadLevel()]));
}
// There may have been an error stored on the domain file by another thread, or from a previous load
pFile->ThrowIfError(targetLevel);
// There are two normal results from the above loop.
//
// 1. We succeeded in loading the file to the current thread's load level.
// 2. We succeeded in loading the file to the current thread's load level - 1, due
// to deadlock condition with another thread loading the same assembly.
//
// Either of these are considered satisfactory results, as code inside a load must expect
// a parial load result.
//
// However, if load level elevation has occurred, then it is possible for a deadlock to
// prevent us from loading an assembly which was loading before the elevation at a radically
// lower level. In such a case, we throw an exception which transiently fails the current
// load, since it is likely we have not satisfied the caller.
// (An alternate, and possibly preferable, strategy here would be for all callers to explicitly
// identify the minimum load level acceptable via CheckLoadDomainFile and throw from there.)
pFile->RequireLoadLevel((FileLoadLevel)(immediateTargetLevel-1));
RETURN pFile;
}
void AppDomain::TryIncrementalLoad(DomainFile *pFile, FileLoadLevel workLevel, FileLoadLockHolder &lockHolder)
{
STANDARD_VM_CONTRACT;
// This is factored out so we don't call EX_TRY in a loop (EX_TRY can _alloca)
BOOL released = FALSE;
FileLoadLock* pLoadLock = lockHolder.GetValue();
EX_TRY
{
// Special case: for LoadLibrary, we cannot hold the lock during the
// actual LoadLibrary call, because we might get a callback from _CorDllMain on any
// other thread. (Note that this requires DomainFile's LoadLibrary to be independently threadsafe.)
if (workLevel == FILE_LOAD_LOADLIBRARY)
{
lockHolder.Release();
released = TRUE;
}
// Do the work
BOOL success = pFile->DoIncrementalLoad(workLevel);
if (released)
{
// Reobtain lock to increment level. (Note that another thread may
// have already done it which is OK.
if (pLoadLock->Acquire(workLevel))
{
// note lockHolder.Acquire isn't wired up to actually take the lock
lockHolder = pLoadLock;
released = FALSE;
}
}
if (!released)
{
// Complete the level.
if (pLoadLock->CompleteLoadLevel(workLevel, success) &&
pLoadLock->GetLoadLevel()==FILE_LOAD_DELIVER_EVENTS)
{
lockHolder.Release();
released = TRUE;
pFile->DeliverAsyncEvents();
};
}
}
EX_HOOK
{
Exception *pEx = GET_EXCEPTION();
//We will cache this error and wire this load to forever fail,
// unless the exception is transient or the file is loaded OK but just cannot execute
if (!pEx->IsTransient() && !pFile->IsLoaded())
{
if (released)
{
// Reobtain lock to increment level. (Note that another thread may
// have already done it which is OK.
if (pLoadLock->Acquire(workLevel)) // note pLockHolder->Acquire isn't wired up to actually take the lock
{
// note lockHolder.Acquire isn't wired up to actually take the lock
lockHolder = pLoadLock;
released = FALSE;
}
}
if (!released)
{
// Report the error in the lock
pLoadLock->SetError(pEx);
}
if (!EEFileLoadException::CheckType(pEx))
EEFileLoadException::Throw(pFile->GetFile(), pEx->GetHR(), pEx);
}
// Otherwise, we simply abort this load, and can retry later on.
// @todo cleanup: make sure that each level is restartable after an exception, and
// leaves no bad side effects
}
EX_END_HOOK;
}
// Checks whether the module is valid to be in the given app domain (need not be yet loaded)
CHECK AppDomain::CheckValidModule(Module * pModule)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
if (pModule->GetDomainFile() != NULL)
CHECK_OK;
CHECK_OK;
}
static void NormalizeAssemblySpecForNativeDependencies(AssemblySpec * pSpec)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
if (pSpec->IsStrongNamed() && pSpec->HasPublicKey())
{
pSpec->ConvertPublicKeyToToken();
}
//
// CoreCLR binder unifies assembly versions. Ignore assembly version here to
// detect more types of potential mismatches.
//
AssemblyMetaDataInternal * pContext = pSpec->GetContext();
pContext->usMajorVersion = (USHORT)-1;
pContext->usMinorVersion = (USHORT)-1;
pContext->usBuildNumber = (USHORT)-1;
pContext->usRevisionNumber = (USHORT)-1;
}
void AppDomain::CheckForMismatchedNativeImages(AssemblySpec * pSpec, const GUID * pGuid)
{
STANDARD_VM_CONTRACT;
//
// The native images are ever used only for trusted images in CoreCLR.
// We don't wish to open the IL file at runtime so we just forgo any
// eager consistency checking. But we still want to prevent mistmatched
// NGen images from being used. We record all mappings between assembly
// names and MVID, and fail once we detect mismatch.
//
NormalizeAssemblySpecForNativeDependencies(pSpec);
CrstHolder ch(&m_DomainCrst);
const NativeImageDependenciesEntry * pEntry = m_NativeImageDependencies.Lookup(pSpec);
if (pEntry != NULL)
{
if (*pGuid != pEntry->m_guidMVID)
{
SString msg;
msg.Printf("ERROR: Native images generated against multiple versions of assembly %s. ", pSpec->GetName());
WszOutputDebugString(msg.GetUnicode());
COMPlusThrowNonLocalized(kFileLoadException, msg.GetUnicode());
}
}
else
{
//
// No entry yet - create one
//
NativeImageDependenciesEntry * pNewEntry = new NativeImageDependenciesEntry();
pNewEntry->m_AssemblySpec.CopyFrom(pSpec);
pNewEntry->m_AssemblySpec.CloneFields(AssemblySpec::ALL_OWNED);
pNewEntry->m_guidMVID = *pGuid;
m_NativeImageDependencies.Add(pNewEntry);
}
}
BOOL AppDomain::RemoveNativeImageDependency(AssemblySpec * pSpec)
{
CONTRACTL
{
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pSpec));
}
CONTRACTL_END;
BOOL result = FALSE;
NormalizeAssemblySpecForNativeDependencies(pSpec);
CrstHolder ch(&m_DomainCrst);
const NativeImageDependenciesEntry * pEntry = m_NativeImageDependencies.Lookup(pSpec);
if (pEntry != NULL)
{
m_NativeImageDependencies.Remove(pSpec);
delete pEntry;
result = TRUE;
}
return result;
}
void AppDomain::SetupSharedStatics()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
#ifndef CROSSGEN_COMPILE
if (NingenEnabled())
return;
LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: SetupSharedStatics()"));
// don't do any work in init stage. If not init only do work in non-shared case if are default domain
_ASSERTE(!g_fEEInit);
// Because we are allocating/referencing objects, need to be in cooperative mode
GCX_COOP();
DomainLocalModule *pLocalModule = CoreLibBinder::GetModule()->GetDomainLocalModule();
// This is a convenient place to initialize String.Empty.
// It is treated as intrinsic by the JIT as so the static constructor would never run.
// Leaving it uninitialized would confuse debuggers.
// String should not have any static constructors.
_ASSERTE(g_pStringClass->IsClassPreInited());
FieldDesc * pEmptyStringFD = CoreLibBinder::GetField(FIELD__STRING__EMPTY);
OBJECTREF* pEmptyStringHandle = (OBJECTREF*)
((TADDR)pLocalModule->GetPrecomputedGCStaticsBasePointer()+pEmptyStringFD->GetOffset());
SetObjectReference( pEmptyStringHandle, StringObject::GetEmptyString());
#endif // CROSSGEN_COMPILE
}
DomainAssembly * AppDomain::FindAssembly(PEAssembly * pFile, FindAssemblyOptions options/* = FindAssemblyOptions_None*/)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
const bool includeFailedToLoad = (options & FindAssemblyOptions_IncludeFailedToLoad) != 0;
if (pFile->HasHostAssembly())
{
DomainAssembly * pDA = FindAssembly(pFile->GetHostAssembly());
if (pDA != nullptr && (pDA->IsLoaded() || (includeFailedToLoad && pDA->IsError())))
{
return pDA;
}
return nullptr;
}
AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(
kIncludeLoaded |
(includeFailedToLoad ? kIncludeFailedToLoad : 0) |
kIncludeExecution));
CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
while (i.Next(pDomainAssembly.This()))
{
PEFile * pManifestFile = pDomainAssembly->GetFile();
if (pManifestFile &&
!pManifestFile->IsResource() &&
pManifestFile->Equals(pFile))
{
// Caller already has PEAssembly, so we can give DomainAssembly away freely without AddRef
return pDomainAssembly.Extract();
}
}
return NULL;
}
static const AssemblyIterationFlags STANDARD_IJW_ITERATOR_FLAGS =
(AssemblyIterationFlags)(kIncludeLoaded | kIncludeLoading | kIncludeExecution | kExcludeCollectible);
void AppDomain::SetFriendlyName(LPCWSTR pwzFriendlyName, BOOL fDebuggerCares/*=TRUE*/)
{
CONTRACTL
{
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
// Do all computations into a temporary until we're ensured of success
SString tmpFriendlyName;
if (pwzFriendlyName)
tmpFriendlyName.Set(pwzFriendlyName);
else
{
// If there is an assembly, try to get the name from it.
// If no assembly, but if it's the DefaultDomain, then give it a name
if (m_pRootAssembly)
{
tmpFriendlyName.SetUTF8(m_pRootAssembly->GetSimpleName());
SString::Iterator i = tmpFriendlyName.End();
if (tmpFriendlyName.FindBack(i, '.'))
tmpFriendlyName.Truncate(i);
}
else
{
tmpFriendlyName.Set(DEFAULT_DOMAIN_FRIENDLY_NAME);
}
}
tmpFriendlyName.Normalize();
m_friendlyName = tmpFriendlyName;
m_friendlyName.Normalize();
if(g_pDebugInterface)
{
// update the name in the IPC publishing block
if (SUCCEEDED(g_pDebugInterface->UpdateAppDomainEntryInIPC(this)))
{
// inform the attached debugger that the name of this appdomain has changed.
if (IsDebuggerAttached() && fDebuggerCares)
g_pDebugInterface->NameChangeEvent(this, NULL);
}
}
}
LPCWSTR AppDomain::GetFriendlyName(BOOL fDebuggerCares/*=TRUE*/)
{
CONTRACT (LPCWSTR)
{
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACT_END;
if (m_friendlyName.IsEmpty())
SetFriendlyName(NULL, fDebuggerCares);
RETURN m_friendlyName;
}
LPCWSTR AppDomain::GetFriendlyNameForLogging()
{
CONTRACT(LPCWSTR)
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL,NULL_OK));
}
CONTRACT_END;
RETURN (m_friendlyName.IsEmpty() ?W(""):(LPCWSTR)m_friendlyName);
}
LPCWSTR AppDomain::GetFriendlyNameForDebugger()
{
CONTRACT (LPCWSTR)
{
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
if (m_friendlyName.IsEmpty())
{
BOOL fSuccess = FALSE;
EX_TRY
{
SetFriendlyName(NULL);
fSuccess = TRUE;
}
EX_CATCH
{
// Gobble all exceptions.
}
EX_END_CATCH(SwallowAllExceptions);
if (!fSuccess)
{
RETURN W("");
}
}
RETURN m_friendlyName;
}
#endif // !DACCESS_COMPILE
#ifdef DACCESS_COMPILE
PVOID AppDomain::GetFriendlyNameNoSet(bool* isUtf8)
{
SUPPORTS_DAC;
if (!m_friendlyName.IsEmpty())
{
*isUtf8 = false;
return m_friendlyName.DacGetRawContent();
}
else if (m_pRootAssembly)
{
*isUtf8 = true;
return (PVOID)m_pRootAssembly->GetSimpleName();
}
else if (dac_cast<TADDR>(this) ==
dac_cast<TADDR>(SystemDomain::System()->DefaultDomain()))
{
*isUtf8 = false;
return (PVOID)DEFAULT_DOMAIN_FRIENDLY_NAME;
}
else
{
return NULL;
}
}
#endif // DACCESS_COMPILE
#ifndef DACCESS_COMPILE
BOOL AppDomain::AddFileToCache(AssemblySpec* pSpec, PEAssembly *pFile, BOOL fAllowFailure)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(CheckPointer(pSpec));
// Hosted fusion binder makes an exception here, so we cannot assert.
//PRECONDITION(pSpec->CanUseWithBindingCache());
//PRECONDITION(pFile->CanUseWithBindingCache());
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
CrstHolder holder(&m_DomainCacheCrst);
// !!! suppress exceptions
if(!m_AssemblyCache.StoreFile(pSpec, pFile) && !fAllowFailure)
{
// TODO: Disabling the below assertion as currently we experience
// inconsistency on resolving the Microsoft.Office.Interop.MSProject.dll
// This causes below assertion to fire and crashes the VS. This issue
// is being tracked with Dev10 Bug 658555. Brought back it when this bug
// is fixed.
// _ASSERTE(FALSE);
EEFileLoadException::Throw(pSpec, FUSION_E_CACHEFILE_FAILED, NULL);
}
return TRUE;
}
BOOL AppDomain::AddAssemblyToCache(AssemblySpec* pSpec, DomainAssembly *pAssembly)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(CheckPointer(pSpec));
PRECONDITION(CheckPointer(pAssembly));
PRECONDITION(pSpec->CanUseWithBindingCache());
PRECONDITION(pAssembly->CanUseWithBindingCache());
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
CrstHolder holder(&m_DomainCacheCrst);
// !!! suppress exceptions
BOOL bRetVal = m_AssemblyCache.StoreAssembly(pSpec, pAssembly);
return bRetVal;
}
BOOL AppDomain::AddExceptionToCache(AssemblySpec* pSpec, Exception *ex)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(CheckPointer(pSpec));
PRECONDITION(pSpec->CanUseWithBindingCache());
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
if (ex->IsTransient())
return TRUE;
CrstHolder holder(&m_DomainCacheCrst);
// !!! suppress exceptions
return m_AssemblyCache.StoreException(pSpec, ex);
}
void AppDomain::AddUnmanagedImageToCache(LPCWSTR libraryName, NATIVE_LIBRARY_HANDLE hMod)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(CheckPointer(libraryName));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
if (libraryName)
{
AssemblySpec spec;
spec.SetCodeBase(libraryName);
m_UnmanagedCache.InsertEntry(&spec, hMod);
}
return ;
}
NATIVE_LIBRARY_HANDLE AppDomain::FindUnmanagedImageInCache(LPCWSTR libraryName)
{
CONTRACT(NATIVE_LIBRARY_HANDLE)
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(CheckPointer(libraryName,NULL_OK));
POSTCONDITION(CheckPointer(RETVAL,NULL_OK));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACT_END;
if(libraryName == NULL) RETURN NULL;
AssemblySpec spec;
spec.SetCodeBase(libraryName);
RETURN (NATIVE_LIBRARY_HANDLE) m_UnmanagedCache.LookupEntry(&spec, 0);
}
BOOL AppDomain::RemoveFileFromCache(PEAssembly *pFile)
{
CONTRACTL
{
GC_TRIGGERS;
PRECONDITION(CheckPointer(pFile));
}
CONTRACTL_END;
LoadLockHolder lock(this);
FileLoadLock *fileLock = (FileLoadLock *)lock->FindFileLock(pFile);
if (fileLock == NULL)
return FALSE;
VERIFY(lock->Unlink(fileLock));
fileLock->Release();
return TRUE;
}
BOOL AppDomain::RemoveAssemblyFromCache(DomainAssembly* pAssembly)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(CheckPointer(pAssembly));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
CrstHolder holder(&m_DomainCacheCrst);
return m_AssemblyCache.RemoveAssembly(pAssembly);
}
BOOL AppDomain::IsCached(AssemblySpec *pSpec)
{
WRAPPER_NO_CONTRACT;
// Check to see if this fits our rather loose idea of a reference to CoreLib.
// If so, don't use fusion to bind it - do it ourselves.
if (pSpec->IsCoreLib())
return TRUE;
return m_AssemblyCache.Contains(pSpec);
}
void AppDomain::GetCacheAssemblyList(SetSHash<PTR_DomainAssembly>& assemblyList)
{
CrstHolder holder(&m_DomainCacheCrst);
m_AssemblyCache.GetAllAssemblies(assemblyList);
}
PEAssembly* AppDomain::FindCachedFile(AssemblySpec* pSpec, BOOL fThrow /*=TRUE*/)
{
CONTRACTL
{
if (fThrow) {
GC_TRIGGERS;
THROWS;
}
else {
GC_NOTRIGGER;
NOTHROW;
}
MODE_ANY;
}
CONTRACTL_END;
// Check to see if this fits our rather loose idea of a reference to CoreLib.
// If so, don't use fusion to bind it - do it ourselves.
if (fThrow && pSpec->IsCoreLib())
{
CONSISTENCY_CHECK(SystemDomain::System()->SystemAssembly() != NULL);
PEAssembly *pFile = SystemDomain::System()->SystemFile();
pFile->AddRef();
return pFile;
}
return m_AssemblyCache.LookupFile(pSpec, fThrow);
}
BOOL AppDomain::PostBindResolveAssembly(AssemblySpec *pPrePolicySpec,
AssemblySpec *pPostPolicySpec,
HRESULT hrBindResult,
AssemblySpec **ppFailedSpec)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
PRECONDITION(CheckPointer(pPrePolicySpec));
PRECONDITION(CheckPointer(pPostPolicySpec));
PRECONDITION(CheckPointer(ppFailedSpec));
BOOL fFailure = TRUE;
*ppFailedSpec = pPrePolicySpec;
PEAssemblyHolder result;
if ((EEFileLoadException::GetFileLoadKind(hrBindResult) == kFileNotFoundException) ||
(hrBindResult == FUSION_E_REF_DEF_MISMATCH) ||
(hrBindResult == FUSION_E_INVALID_NAME))
{
result = TryResolveAssemblyUsingEvent(*ppFailedSpec);
if (result != NULL && pPrePolicySpec->CanUseWithBindingCache() && result->CanUseWithBindingCache())
{
fFailure = FALSE;
// Given the post-policy resolve event construction of the CLR binder,
// chained managed resolve events can race with each other, therefore we do allow
// the adding of the result to fail. Checking for already chached specs
// is not an option as it would introduce another race window.
// The binder does a re-fetch of the
// original binding spec and therefore will not cause inconsistency here.
// For the purposes of the resolve event, failure to add to the cache still is a success.
AddFileToCache(pPrePolicySpec, result, TRUE /* fAllowFailure */);
if (*ppFailedSpec != pPrePolicySpec && pPostPolicySpec->CanUseWithBindingCache())
{
AddFileToCache(pPostPolicySpec, result, TRUE /* fAllowFailure */ );
}
}
}
return fFailure;
}
//---------------------------------------------------------------------------------------------------------------------
PEAssembly * AppDomain::BindAssemblySpec(
AssemblySpec * pSpec,
BOOL fThrowOnFileNotFound)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
PRECONDITION(CheckPointer(pSpec));
PRECONDITION(pSpec->GetAppDomain() == this);
PRECONDITION(this==::GetAppDomain());
GCX_PREEMP();
BOOL fForceReThrow = FALSE;
BinderTracing::AssemblyBindOperation bindOperation(pSpec);
if (pSpec->HasUniqueIdentity())
{
HRESULT hrBindResult = S_OK;
PEAssemblyHolder result;
bool isCached = false;
EX_TRY
{
isCached = IsCached(pSpec);
if (!isCached)
{
{
// Use CoreClr's fusion alternative
CoreBindResult bindResult;
pSpec->Bind(this, FALSE /* fThrowOnFileNotFound */, &bindResult, FALSE /* fNgenExplicitBind */, FALSE /* fExplicitBindToNativeImage */);
hrBindResult = bindResult.GetHRBindResult();
if (bindResult.Found())
{
if (SystemDomain::SystemFile() && bindResult.IsCoreLib())
{
// Avoid rebinding to another copy of CoreLib
result = SystemDomain::SystemFile();
result.SuppressRelease(); // Didn't get a refcount
}
else
{
// IsSystem on the PEFile should be false, even for CoreLib satellites
result = PEAssembly::Open(&bindResult,
FALSE);
}
// Setup the reference to the binder, which performed the bind, into the AssemblySpec
ICLRPrivBinder* pBinder = result->GetBindingContext();
_ASSERTE(pBinder != NULL);
pSpec->SetBindingContext(pBinder);
if (pSpec->CanUseWithBindingCache() && result->CanUseWithBindingCache())
{
// Failure to add simply means someone else beat us to it. In that case
// the FindCachedFile call below (after catch block) will update result
// to the cached value.
AddFileToCache(pSpec, result, TRUE /*fAllowFailure*/);
}
}
else
{
// Don't trigger the resolve event for the CoreLib satellite assembly. A misbehaving resolve event may
// return an assembly that does not match, and this can cause recursive resource lookups during error
// reporting. The CoreLib satellite assembly is loaded from relative locations based on the culture, see
// AssemblySpec::Bind().
if (!pSpec->IsCoreLibSatellite())
{
// Trigger the resolve event also for non-throw situation.
AssemblySpec NewSpec(this);
AssemblySpec *pFailedSpec = NULL;
fForceReThrow = TRUE; // Managed resolve event handler can throw
BOOL fFailure = PostBindResolveAssembly(pSpec, &NewSpec, hrBindResult, &pFailedSpec);
if (fFailure && fThrowOnFileNotFound)
{
EEFileLoadException::Throw(pFailedSpec, COR_E_FILENOTFOUND, NULL);
}
}
}
}
}
}
EX_CATCH
{
Exception *ex = GET_EXCEPTION();
AssemblySpec NewSpec(this);
AssemblySpec *pFailedSpec = NULL;
// Let transient exceptions or managed resolve event handler exceptions propagate
if (ex->IsTransient() || fForceReThrow)
{
EX_RETHROW;
}
{
BOOL fFailure = PostBindResolveAssembly(pSpec, &NewSpec, ex->GetHR(), &pFailedSpec);
if (fFailure)
{
BOOL bFileNotFoundException =
(EEFileLoadException::GetFileLoadKind(ex->GetHR()) == kFileNotFoundException);
if (!bFileNotFoundException)
{
fFailure = AddExceptionToCache(pFailedSpec, ex);
} // else, fFailure stays TRUE
// Effectively, fFailure == bFileNotFoundException || AddExceptionToCache(pFailedSpec, ex)
// Only throw this exception if we are the first in the cache
if (fFailure)
{
// Store the failure information for DAC to read
if (IsDebuggerAttached()) {
FailedAssembly *pFailed = new FailedAssembly();
pFailed->Initialize(pFailedSpec, ex);
IfFailThrow(m_failedAssemblies.Append(pFailed));
}
if (!bFileNotFoundException || fThrowOnFileNotFound)
{
// V1.1 App-compatibility workaround. See VSW530166 if you want to whine about it.
//
// In Everett, if we failed to download an assembly because of a broken network cable,
// we returned a FileNotFoundException with a COR_E_FILENOTFOUND hr embedded inside
// (which would be exposed when marshaled to native.)
//
// In Whidbey, we now set the more appropriate INET_E_RESOURCE_NOT_FOUND hr. But
// the online/offline switch code in VSTO for Everett hardcoded a check for
// COR_E_FILENOTFOUND.
//
// So now, to keep that code from breaking, we have to remap INET_E_RESOURCE_NOT_FOUND
// back to COR_E_FILENOTFOUND. We're doing it here rather down in Fusion so as to affect
// the least number of callers.
if (ex->GetHR() == INET_E_RESOURCE_NOT_FOUND)
{
EEFileLoadException::Throw(pFailedSpec, COR_E_FILENOTFOUND, ex);
}
if (EEFileLoadException::CheckType(ex))
{
if (pFailedSpec == pSpec)
{
EX_RETHROW; //preserve the information
}
else
{
StackSString exceptionDisplayName, failedSpecDisplayName;
((EEFileLoadException*)ex)->GetName(exceptionDisplayName);
pFailedSpec->GetFileOrDisplayName(0, failedSpecDisplayName);
if (exceptionDisplayName.CompareCaseInsensitive(failedSpecDisplayName) == 0)
{
EX_RETHROW; // Throw the original exception. Otherwise, we'd throw an exception that contains the same message twice.
}
}
}
EEFileLoadException::Throw(pFailedSpec, ex->GetHR(), ex);
}
}
}
}
}
EX_END_CATCH(RethrowTerminalExceptions);
// Now, if it's a cacheable bind we need to re-fetch the result from the cache, as we may have been racing with another
// thread to store our result. Note that we may throw from here, if there is a cached exception.
// This will release the refcount of the current result holder (if any), and will replace
// it with a non-addref'ed result
if (pSpec->CanUseWithBindingCache() && (result== NULL || result->CanUseWithBindingCache()))
{
result = FindCachedFile(pSpec);
if (result != NULL)
result->AddRef();
}
bindOperation.SetResult(result.GetValue(), isCached);
return result.Extract();
}
else
{
// Unsupported content type
if (fThrowOnFileNotFound)
{
ThrowHR(COR_E_BADIMAGEFORMAT);
}
return nullptr;
}
} // AppDomain::BindAssemblySpec
PEAssembly *AppDomain::TryResolveAssemblyUsingEvent(AssemblySpec *pSpec)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
// No assembly resolve on codebase binds
if (pSpec->GetName() == nullptr)
return nullptr;
PEAssembly *result = nullptr;
EX_TRY
{
Assembly *pAssembly = RaiseAssemblyResolveEvent(pSpec);
if (pAssembly != nullptr)
{
PEAssembly *pFile = pAssembly->GetManifestFile();
pFile->AddRef();
result = pFile;
}
BinderTracing::ResolutionAttemptedOperation::TraceAppDomainAssemblyResolve(pSpec, result);
}
EX_HOOK
{
Exception *pEx = GET_EXCEPTION();
BinderTracing::ResolutionAttemptedOperation::TraceAppDomainAssemblyResolve(pSpec, nullptr, pEx);
if (!pEx->IsTransient())
{
AddExceptionToCache(pSpec, pEx);
if (!EEFileLoadException::CheckType(pEx))
EEFileLoadException::Throw(pSpec, pEx->GetHR(), pEx);
}
}
EX_END_HOOK;
return result;
}
ULONG AppDomain::AddRef()
{
LIMITED_METHOD_CONTRACT;
return InterlockedIncrement(&m_cRef);
}
ULONG AppDomain::Release()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(m_cRef > 0);
}
CONTRACTL_END;
ULONG cRef = InterlockedDecrement(&m_cRef);
if (!cRef)
{
_ASSERTE (m_Stage == STAGE_CREATING);
delete this;
}
return (cRef);
}
#ifndef CROSSGEN_COMPILE
void AppDomain::RaiseLoadingAssemblyEvent(DomainAssembly *pAssembly)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
PRECONDITION(this == GetAppDomain());
MODE_ANY;
}
CONTRACTL_END;
if (pAssembly->GetFile()->IsSystem())
{
return;
}
GCX_COOP();
FAULT_NOT_FATAL();
OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
EX_TRY
{
if (CoreLibBinder::GetField(FIELD__ASSEMBLYLOADCONTEXT__ASSEMBLY_LOAD)->GetStaticOBJECTREF() != NULL)
{
struct _gc {
OBJECTREF orThis;
} gc;
ZeroMemory(&gc, sizeof(gc));
ARG_SLOT args[1];
GCPROTECT_BEGIN(gc);
gc.orThis = pAssembly->GetExposedAssemblyObject();
MethodDescCallSite onAssemblyLoad(METHOD__ASSEMBLYLOADCONTEXT__ON_ASSEMBLY_LOAD);
// GetExposedAssemblyObject may cause a gc, so call this before filling args[0]
args[0] = ObjToArgSlot(gc.orThis);
onAssemblyLoad.Call(args);
GCPROTECT_END();
}
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
}
BOOL AppDomain::OnUnhandledException(OBJECTREF *pThrowable, BOOL isTerminating/*=TRUE*/)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
BOOL retVal = FALSE;
GCX_COOP();
EX_TRY
{
retVal = GetAppDomain()->RaiseUnhandledExceptionEvent(pThrowable, isTerminating);
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions) // Swallow any errors.
return retVal;
}
void AppDomain::RaiseExitProcessEvent()
{
if (!g_fEEStarted)
return;
STATIC_CONTRACT_MODE_COOPERATIVE;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
// Only finalizer thread during shutdown can call this function.
_ASSERTE ((g_fEEShutDown&ShutDown_Finalize1) && GetThread() == FinalizerThread::GetFinalizerThread());
_ASSERTE (GetThread()->PreemptiveGCDisabled());
MethodDescCallSite onProcessExit(METHOD__APPCONTEXT__ON_PROCESS_EXIT);
onProcessExit.Call(NULL);
}
BOOL
AppDomain::RaiseUnhandledExceptionEvent(OBJECTREF *pThrowable, BOOL isTerminating)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
_ASSERTE(pThrowable != NULL && IsProtectedByGCFrame(pThrowable));
_ASSERTE(this == GetThread()->GetDomain());
OBJECTREF orDelegate = CoreLibBinder::GetField(FIELD__APPCONTEXT__UNHANDLED_EXCEPTION)->GetStaticOBJECTREF();
if (orDelegate == NULL)
return FALSE;
struct _gc {
OBJECTREF Delegate;
OBJECTREF Sender;
} gc;
ZeroMemory(&gc, sizeof(gc));
GCPROTECT_BEGIN(gc);
gc.Delegate = orDelegate;
if (orDelegate != NULL)
{
DistributeUnhandledExceptionReliably(&gc.Delegate, &gc.Sender, pThrowable, isTerminating);
}
GCPROTECT_END();
return TRUE;
}
#endif // CROSSGEN_COMPILE
CLRPrivBinderCoreCLR *AppDomain::CreateBinderContext()
{
CONTRACT(CLRPrivBinderCoreCLR *)
{
GC_TRIGGERS;
THROWS;
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACT_END;
if (!m_pTPABinderContext)
{
ETWOnStartup (FusionAppCtx_V1, FusionAppCtxEnd_V1);
GCX_PREEMP();
// Initialize the assembly binder for the default context loads for CoreCLR.
IfFailThrow(CCoreCLRBinderHelper::DefaultBinderSetupContext(DefaultADID, &m_pTPABinderContext));
}
RETURN m_pTPABinderContext;
}
//---------------------------------------------------------------------------------------
//
// AppDomain::IsDebuggerAttached - is a debugger attached to this process
//
// Arguments:
// None
//
// Return Value:
// TRUE if a debugger is attached to this process, FALSE otherwise.
//
// Notes:
// This is identical to CORDebuggerAttached. This exists idependantly for legacy reasons - we used to
// support attaching to individual AppDomains. This should probably go away eventually.
//
BOOL AppDomain::IsDebuggerAttached()
{
LIMITED_METHOD_CONTRACT;
if (CORDebuggerAttached())
{
return TRUE;
}
else
{
return FALSE;
}
}
#ifdef DEBUGGING_SUPPORTED
// This is called from the debugger to request notification events from
// Assemblies, Modules, Types in this appdomain.
BOOL AppDomain::NotifyDebuggerLoad(int flags, BOOL attaching)
{
WRAPPER_NO_CONTRACT;
BOOL result = FALSE;
if (!attaching && !IsDebuggerAttached())
return FALSE;
AssemblyIterator i;
// Attach to our assemblies
LOG((LF_CORDB, LL_INFO100, "AD::NDA: Iterating assemblies\n"));
i = IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeLoading | kIncludeExecution));
CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
while (i.Next(pDomainAssembly.This()))
{
result = (pDomainAssembly->NotifyDebuggerLoad(flags, attaching) ||
result);
}
return result;
}
void AppDomain::NotifyDebuggerUnload()
{
WRAPPER_NO_CONTRACT;
if (!IsDebuggerAttached())
return;
LOG((LF_CORDB, LL_INFO10, "AD::NDD domain %#08x %ls\n",
this, GetFriendlyNameForLogging()));
LOG((LF_CORDB, LL_INFO100, "AD::NDD: Interating domain bound assemblies\n"));
AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeLoading | kIncludeExecution));
CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
// Detach from our assemblies
while (i.Next(pDomainAssembly.This()))
{
LOG((LF_CORDB, LL_INFO100, "AD::NDD: Iterating assemblies\n"));
pDomainAssembly->NotifyDebuggerUnload();
}
}
#endif // DEBUGGING_SUPPORTED
#ifndef CROSSGEN_COMPILE
#ifdef FEATURE_COMINTEROP
RCWRefCache *AppDomain::GetRCWRefCache()
{
CONTRACT(RCWRefCache*)
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
if (!m_pRCWRefCache) {
NewHolder<RCWRefCache> pRCWRefCache = new RCWRefCache(this);
if (FastInterlockCompareExchangePointer(&m_pRCWRefCache, (RCWRefCache *)pRCWRefCache, NULL) == NULL)
{
pRCWRefCache.SuppressRelease();
}
}
RETURN m_pRCWRefCache;
}
RCWCache *AppDomain::CreateRCWCache()
{
CONTRACT(RCWCache*)
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
// Initialize the global RCW cleanup list here as well. This is so that it
// it guaranteed to exist if any RCW's are created, but it is not created
// unconditionally.
if (!g_pRCWCleanupList)
{
SystemDomain::LockHolder lh;
if (!g_pRCWCleanupList)
g_pRCWCleanupList = new RCWCleanupList();
}
_ASSERTE(g_pRCWCleanupList);
{
BaseDomain::LockHolder lh(this);
if (!m_pRCWCache)
m_pRCWCache = new RCWCache(this);
}
RETURN m_pRCWCache;
}
void AppDomain::ReleaseRCWs(LPVOID pCtxCookie)
{
WRAPPER_NO_CONTRACT;
if (m_pRCWCache)
m_pRCWCache->ReleaseWrappersWorker(pCtxCookie);
}
void AppDomain::DetachRCWs()
{
WRAPPER_NO_CONTRACT;
if (m_pRCWCache)
m_pRCWCache->DetachWrappersWorker();
}
#endif // FEATURE_COMINTEROP
void AppDomain::ExceptionUnwind(Frame *pFrame)
{
CONTRACTL
{
DISABLED(GC_TRIGGERS); // EEResourceException
DISABLED(THROWS); // EEResourceException
MODE_ANY;
}
CONTRACTL_END;
LOG((LF_APPDOMAIN, LL_INFO10, "AppDomain::ExceptionUnwind for %8.8x\n", pFrame));
Thread *pThread = GetThread();
_ASSERTE(pThread);
LOG((LF_APPDOMAIN, LL_INFO10, "AppDomain::ExceptionUnwind: not first transition or abort\n"));
}
#endif // CROSSGEN_COMPILE
#endif // !DACCESS_COMPILE
DWORD DomainLocalModule::GetClassFlags(MethodTable* pMT, DWORD iClassIndex /*=(DWORD)-1*/)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
{
CONSISTENCY_CHECK(GetDomainFile()->GetModule() == pMT->GetModuleForStatics());
}
if (pMT->IsDynamicStatics())
{
_ASSERTE(!pMT->ContainsGenericVariables());
DWORD dynamicClassID = pMT->GetModuleDynamicEntryID();
if(m_aDynamicEntries <= dynamicClassID)
return FALSE;
return (m_pDynamicClassTable[dynamicClassID].m_dwFlags);
}
else
{
if (iClassIndex == (DWORD)-1)
iClassIndex = pMT->GetClassIndex();
return GetPrecomputedStaticsClassData()[iClassIndex];
}
}
#ifndef DACCESS_COMPILE
void DomainLocalModule::SetClassInitialized(MethodTable* pMT)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
BaseDomain::DomainLocalBlockLockHolder lh(GetDomainFile()->GetAppDomain());
_ASSERTE(!IsClassInitialized(pMT));
_ASSERTE(!IsClassInitError(pMT));
SetClassFlags(pMT, ClassInitFlags::INITIALIZED_FLAG);
}
void DomainLocalModule::SetClassInitError(MethodTable* pMT)
{
WRAPPER_NO_CONTRACT;
BaseDomain::DomainLocalBlockLockHolder lh(GetDomainFile()->GetAppDomain());
SetClassFlags(pMT, ClassInitFlags::ERROR_FLAG);
}
void DomainLocalModule::SetClassFlags(MethodTable* pMT, DWORD dwFlags)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
PRECONDITION(GetDomainFile()->GetModule() == pMT->GetModuleForStatics());
// Assumes BaseDomain::DomainLocalBlockLockHolder is taken
PRECONDITION(GetDomainFile()->GetAppDomain()->OwnDomainLocalBlockLock());
} CONTRACTL_END;
if (pMT->IsDynamicStatics())
{
_ASSERTE(!pMT->ContainsGenericVariables());
DWORD dwID = pMT->GetModuleDynamicEntryID();
EnsureDynamicClassIndex(dwID);
m_pDynamicClassTable[dwID].m_dwFlags |= dwFlags;
}
else
{
GetPrecomputedStaticsClassData()[pMT->GetClassIndex()] |= dwFlags;
}
}
void DomainLocalModule::EnsureDynamicClassIndex(DWORD dwID)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
// Assumes BaseDomain::DomainLocalBlockLockHolder is taken
PRECONDITION(GetDomainFile()->GetAppDomain()->OwnDomainLocalBlockLock());
}
CONTRACTL_END;
if (dwID < m_aDynamicEntries)
{
_ASSERTE(m_pDynamicClassTable.Load() != NULL);
return;
}
SIZE_T aDynamicEntries = max(16, m_aDynamicEntries.Load());
while (aDynamicEntries <= dwID)
{
aDynamicEntries *= 2;
}
DynamicClassInfo* pNewDynamicClassTable;
pNewDynamicClassTable = (DynamicClassInfo*)
(void*)GetDomainFile()->GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(
S_SIZE_T(sizeof(DynamicClassInfo)) * S_SIZE_T(aDynamicEntries));
memcpy(pNewDynamicClassTable, m_pDynamicClassTable, sizeof(DynamicClassInfo) * m_aDynamicEntries);
// Note: Memory allocated on loader heap is zero filled
// memset(pNewDynamicClassTable + m_aDynamicEntries, 0, (aDynamicEntries - m_aDynamicEntries) * sizeof(DynamicClassInfo));
_ASSERTE(m_aDynamicEntries%2 == 0);
// Commit new dynamic table. The lock-free helpers depend on the order.
MemoryBarrier();
m_pDynamicClassTable = pNewDynamicClassTable;
MemoryBarrier();
m_aDynamicEntries = aDynamicEntries;
}
#ifndef CROSSGEN_COMPILE
void DomainLocalModule::AllocateDynamicClass(MethodTable *pMT)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
// Assumes BaseDomain::DomainLocalBlockLockHolder is taken
PRECONDITION(GetDomainFile()->GetAppDomain()->OwnDomainLocalBlockLock());
}
CONTRACTL_END;
_ASSERTE(!pMT->ContainsGenericVariables());
_ASSERTE(!pMT->IsSharedByGenericInstantiations());
_ASSERTE(GetDomainFile()->GetModule() == pMT->GetModuleForStatics());
_ASSERTE(pMT->IsDynamicStatics());
DWORD dynamicEntryIDIndex = pMT->GetModuleDynamicEntryID();
EnsureDynamicClassIndex(dynamicEntryIDIndex);
_ASSERTE(m_aDynamicEntries > dynamicEntryIDIndex);
EEClass *pClass = pMT->GetClass();
DWORD dwStaticBytes = pClass->GetNonGCRegularStaticFieldBytes();
DWORD dwNumHandleStatics = pClass->GetNumHandleRegularStatics();
_ASSERTE(!IsClassAllocated(pMT));
_ASSERTE(!IsClassInitialized(pMT));
_ASSERTE(!IsClassInitError(pMT));
DynamicEntry *pDynamicStatics = m_pDynamicClassTable[dynamicEntryIDIndex].m_pDynamicEntry;
// We need this check because maybe a class had a cctor but no statics
if (dwStaticBytes > 0 || dwNumHandleStatics > 0)
{
if (pDynamicStatics == NULL)
{
LoaderHeap * pLoaderAllocator = GetDomainFile()->GetLoaderAllocator()->GetHighFrequencyHeap();
if (pMT->Collectible())
{
pDynamicStatics = (DynamicEntry*)(void*)pLoaderAllocator->AllocMem(S_SIZE_T(sizeof(CollectibleDynamicEntry)));
}
else
{
SIZE_T dynamicEntrySize = DynamicEntry::GetOffsetOfDataBlob() + dwStaticBytes;
#ifdef FEATURE_64BIT_ALIGNMENT
// Allocate memory with extra alignment only if it is really necessary
if (dwStaticBytes >= MAX_PRIMITIVE_FIELD_SIZE)
{
static_assert_no_msg(sizeof(NormalDynamicEntry) % MAX_PRIMITIVE_FIELD_SIZE == 0);
pDynamicStatics = (DynamicEntry*)(void*)pLoaderAllocator->AllocAlignedMem(dynamicEntrySize, MAX_PRIMITIVE_FIELD_SIZE);
}
else
#endif
pDynamicStatics = (DynamicEntry*)(void*)pLoaderAllocator->AllocMem(S_SIZE_T(dynamicEntrySize));
}
// Note: Memory allocated on loader heap is zero filled
m_pDynamicClassTable[dynamicEntryIDIndex].m_pDynamicEntry = pDynamicStatics;
}
if (pMT->Collectible() && (dwStaticBytes != 0))
{
GCX_COOP();
OBJECTREF nongcStaticsArray = NULL;
GCPROTECT_BEGIN(nongcStaticsArray);
#ifdef FEATURE_64BIT_ALIGNMENT
// Allocate memory with extra alignment only if it is really necessary
if (dwStaticBytes >= MAX_PRIMITIVE_FIELD_SIZE)
nongcStaticsArray = AllocatePrimitiveArray(ELEMENT_TYPE_I8, (dwStaticBytes + (sizeof(CLR_I8)-1)) / (sizeof(CLR_I8)));
else
#endif
nongcStaticsArray = AllocatePrimitiveArray(ELEMENT_TYPE_U1, dwStaticBytes);
((CollectibleDynamicEntry *)pDynamicStatics)->m_hNonGCStatics = GetDomainFile()->GetModule()->GetLoaderAllocator()->AllocateHandle(nongcStaticsArray);
GCPROTECT_END();
}
if (dwNumHandleStatics > 0)
{
if (!pMT->Collectible())
{
GetAppDomain()->AllocateStaticFieldObjRefPtrs(dwNumHandleStatics,
&((NormalDynamicEntry *)pDynamicStatics)->m_pGCStatics);
}
else
{
GCX_COOP();
OBJECTREF gcStaticsArray = NULL;
GCPROTECT_BEGIN(gcStaticsArray);
gcStaticsArray = AllocateObjectArray(dwNumHandleStatics, g_pObjectClass);
((CollectibleDynamicEntry *)pDynamicStatics)->m_hGCStatics = GetDomainFile()->GetModule()->GetLoaderAllocator()->AllocateHandle(gcStaticsArray);
GCPROTECT_END();
}
}
}
}
void DomainLocalModule::PopulateClass(MethodTable *pMT)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
_ASSERTE(!pMT->ContainsGenericVariables());
// <todo> the only work actually done here for non-dynamics is the freezing related work.
// See if we can eliminate this and make this a dynamic-only path </todo>
DWORD iClassIndex = pMT->GetClassIndex();
if (!IsClassAllocated(pMT, iClassIndex))
{
BaseDomain::DomainLocalBlockLockHolder lh(GetDomainFile()->GetAppDomain());
if (!IsClassAllocated(pMT, iClassIndex))
{
// Allocate dynamic space if necessary
if (pMT->IsDynamicStatics())
AllocateDynamicClass(pMT);
// determine flags to set on the statics block
DWORD dwFlags = ClassInitFlags::ALLOCATECLASS_FLAG;
if (!pMT->HasClassConstructor() && !pMT->HasBoxedRegularStatics())
{
_ASSERTE(!IsClassInitialized(pMT));
_ASSERTE(!IsClassInitError(pMT));
dwFlags |= ClassInitFlags::INITIALIZED_FLAG;
}
if (pMT->Collectible())
{
dwFlags |= ClassInitFlags::COLLECTIBLE_FLAG;
}
// Set all flags at the same time to avoid races
SetClassFlags(pMT, dwFlags);
}
}
return;
}
#endif // CROSSGEN_COMPILE
#ifndef CROSSGEN_COMPILE
DomainAssembly* AppDomain::RaiseTypeResolveEventThrowing(DomainAssembly* pAssembly, LPCSTR szName, ASSEMBLYREF *pResultingAssemblyRef)
{
CONTRACTL
{
MODE_ANY;
GC_TRIGGERS;
THROWS;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
DomainAssembly* pResolvedAssembly = NULL;
_ASSERTE(strcmp(szName, g_AppDomainClassName));
GCX_COOP();
struct _gc {
OBJECTREF AssemblyRef;
STRINGREF str;
} gc;
ZeroMemory(&gc, sizeof(gc));
GCPROTECT_BEGIN(gc);
if (pAssembly != NULL)
gc.AssemblyRef = pAssembly->GetExposedAssemblyObject();
MethodDescCallSite onTypeResolve(METHOD__ASSEMBLYLOADCONTEXT__ON_TYPE_RESOLVE);
gc.str = StringObject::NewString(szName);
ARG_SLOT args[2] =
{
ObjToArgSlot(gc.AssemblyRef),
ObjToArgSlot(gc.str)
};
ASSEMBLYREF ResultingAssemblyRef = (ASSEMBLYREF) onTypeResolve.Call_RetOBJECTREF(args);
if (ResultingAssemblyRef != NULL)
{
pResolvedAssembly = ResultingAssemblyRef->GetDomainAssembly();
if (pResultingAssemblyRef)
*pResultingAssemblyRef = ResultingAssemblyRef;
else
{
if (pResolvedAssembly->IsCollectible())
{
COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleBoundNonCollectible"));
}
}
}
GCPROTECT_END();
return pResolvedAssembly;
}
Assembly* AppDomain::RaiseResourceResolveEvent(DomainAssembly* pAssembly, LPCSTR szName)
{
CONTRACT(Assembly*)
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACT_END;
Assembly* pResolvedAssembly = NULL;
GCX_COOP();
struct _gc {
OBJECTREF AssemblyRef;
STRINGREF str;
} gc;
ZeroMemory(&gc, sizeof(gc));
GCPROTECT_BEGIN(gc);
if (pAssembly != NULL)
gc.AssemblyRef=pAssembly->GetExposedAssemblyObject();
MethodDescCallSite onResourceResolve(METHOD__ASSEMBLYLOADCONTEXT__ON_RESOURCE_RESOLVE);
gc.str = StringObject::NewString(szName);
ARG_SLOT args[2] =
{
ObjToArgSlot(gc.AssemblyRef),
ObjToArgSlot(gc.str)
};
ASSEMBLYREF ResultingAssemblyRef = (ASSEMBLYREF) onResourceResolve.Call_RetOBJECTREF(args);
if (ResultingAssemblyRef != NULL)
{
pResolvedAssembly = ResultingAssemblyRef->GetAssembly();
if (pResolvedAssembly->IsCollectible())
{
COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleAssemblyResolve"));
}
}
GCPROTECT_END();
RETURN pResolvedAssembly;
}
Assembly *
AppDomain::RaiseAssemblyResolveEvent(
AssemblySpec * pSpec)
{
CONTRACT(Assembly*)
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACT_END;
StackSString ssName;
pSpec->GetFileOrDisplayName(0, ssName);
// Elevate threads allowed loading level. This allows the host to load an assembly even in a restricted
// condition. Note, however, that this exposes us to possible recursion failures, if the host tries to
// load the assemblies currently being loaded. (Such cases would then throw an exception.)
OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
GCX_COOP();
Assembly* pAssembly = NULL;
struct _gc {
OBJECTREF AssemblyRef;
STRINGREF str;
} gc;
ZeroMemory(&gc, sizeof(gc));
GCPROTECT_BEGIN(gc);
{
if (pSpec->GetParentAssembly() != NULL)
{
gc.AssemblyRef=pSpec->GetParentAssembly()->GetExposedAssemblyObject();
}
MethodDescCallSite onAssemblyResolve(METHOD__ASSEMBLYLOADCONTEXT__ON_ASSEMBLY_RESOLVE);
gc.str = StringObject::NewString(ssName);
ARG_SLOT args[2] = {
ObjToArgSlot(gc.AssemblyRef),
ObjToArgSlot(gc.str)
};
ASSEMBLYREF ResultingAssemblyRef = (ASSEMBLYREF) onAssemblyResolve.Call_RetOBJECTREF(args);
if (ResultingAssemblyRef != NULL)
{
pAssembly = ResultingAssemblyRef->GetAssembly();
if (pAssembly->IsCollectible())
{
COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleAssemblyResolve"));
}
}
}
GCPROTECT_END();
if (pAssembly != NULL)
{
// Check that the public key token matches the one specified in the spec
// MatchPublicKeys throws as appropriate
pSpec->MatchPublicKeys(pAssembly);
}
RETURN pAssembly;
} // AppDomain::RaiseAssemblyResolveEvent
enum WorkType
{
WT_UnloadDomain = 0x1,
WT_ThreadAbort = 0x2,
WT_FinalizerThread = 0x4
};
static Volatile<DWORD> s_WorkType = 0;
void SystemDomain::ProcessDelayedUnloadLoaderAllocators()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
int iGCRefPoint=GCHeapUtilities::GetGCHeap()->CollectionCount(GCHeapUtilities::GetGCHeap()->GetMaxGeneration());
if (GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress())
iGCRefPoint--;
LoaderAllocator * pAllocatorsToDelete = NULL;
{
CrstHolder lh(&m_DelayedUnloadCrst);
LoaderAllocator ** ppAllocator=&m_pDelayedUnloadListOfLoaderAllocators;
while (*ppAllocator!= NULL)
{
LoaderAllocator * pAllocator = *ppAllocator;
if (0 < iGCRefPoint - pAllocator->GetGCRefPoint())
{
*ppAllocator = pAllocator->m_pLoaderAllocatorDestroyNext;
pAllocator->m_pLoaderAllocatorDestroyNext = pAllocatorsToDelete;
pAllocatorsToDelete = pAllocator;
}
else
{
ppAllocator = &pAllocator->m_pLoaderAllocatorDestroyNext;
}
}
}
// Delete collected loader allocators on the finalizer thread. We cannot offload it to appdomain unload thread because of
// there is not guaranteed to be one, and it is not that expensive operation anyway.
while (pAllocatorsToDelete != NULL)
{
LoaderAllocator * pAllocator = pAllocatorsToDelete;
pAllocatorsToDelete = pAllocator->m_pLoaderAllocatorDestroyNext;
delete pAllocator;
}
}
#endif // CROSSGEN_COMPILE
void AppDomain::EnumStaticGCRefs(promote_func* fn, ScanContext* sc)
{
CONTRACT_VOID
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACT_END;
_ASSERTE(GCHeapUtilities::IsGCInProgress() &&
GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
#ifndef CROSSGEN_COMPILE
if (m_pLargeHeapHandleTable != nullptr)
{
m_pLargeHeapHandleTable->EnumStaticGCRefs(fn, sc);
}
#endif // CROSSGEN_COMPILE
RETURN;
}
#endif // !DACCESS_COMPILE
//------------------------------------------------------------------------
PTR_LoaderAllocator BaseDomain::GetLoaderAllocator()
{
WRAPPER_NO_CONTRACT;
return SystemDomain::GetGlobalLoaderAllocator(); // The one and only domain is not unloadable
}
//------------------------------------------------------------------------
UINT32 BaseDomain::GetTypeID(PTR_MethodTable pMT) {
CONTRACTL {
THROWS;
GC_TRIGGERS;
PRECONDITION(pMT->GetDomain() == this);
} CONTRACTL_END;
return m_typeIDMap.GetTypeID(pMT);
}
//------------------------------------------------------------------------
// Returns the ID of the type if found. If not found, returns INVALID_TYPE_ID
UINT32 BaseDomain::LookupTypeID(PTR_MethodTable pMT)
{
CONTRACTL {
NOTHROW;
WRAPPER(GC_TRIGGERS);
PRECONDITION(pMT->GetDomain() == this);
} CONTRACTL_END;
return m_typeIDMap.LookupTypeID(pMT);
}
//------------------------------------------------------------------------
PTR_MethodTable BaseDomain::LookupType(UINT32 id) {
CONTRACTL {
NOTHROW;
WRAPPER(GC_TRIGGERS);
CONSISTENCY_CHECK(id != TYPE_ID_THIS_CLASS);
} CONTRACTL_END;
PTR_MethodTable pMT = m_typeIDMap.LookupType(id);
CONSISTENCY_CHECK(CheckPointer(pMT));
CONSISTENCY_CHECK(pMT->IsInterface());
return pMT;
}
#ifndef DACCESS_COMPILE
//---------------------------------------------------------------------------------------
void BaseDomain::RemoveTypesFromTypeIDMap(LoaderAllocator* pLoaderAllocator)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
m_typeIDMap.RemoveTypes(pLoaderAllocator);
}
#endif // DACCESS_COMPILE
//---------------------------------------------------------------------------------------
//
BOOL
AppDomain::AssemblyIterator::Next(
CollectibleAssemblyHolder<DomainAssembly *> * pDomainAssemblyHolder)
{
CONTRACTL {
NOTHROW;
WRAPPER(GC_TRIGGERS); // Triggers only in MODE_COOPERATIVE (by taking the lock)
MODE_ANY;
} CONTRACTL_END;
CrstHolder ch(m_pAppDomain->GetAssemblyListLock());
return Next_Unlocked(pDomainAssemblyHolder);
}
//---------------------------------------------------------------------------------------
//
// Note: Does not lock the assembly list, but locks collectible assemblies for adding references.
//
BOOL
AppDomain::AssemblyIterator::Next_Unlocked(
CollectibleAssemblyHolder<DomainAssembly *> * pDomainAssemblyHolder)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
} CONTRACTL_END;
#ifndef DACCESS_COMPILE
_ASSERTE(m_pAppDomain->GetAssemblyListLock()->OwnedByCurrentThread());
#endif
while (m_Iterator.Next())
{
// Get element from the list/iterator (without adding reference to the assembly)
DomainAssembly * pDomainAssembly = dac_cast<PTR_DomainAssembly>(m_Iterator.GetElement());
if (pDomainAssembly == NULL)
{
continue;
}
if (pDomainAssembly->IsError())
{
if (m_assemblyIterationFlags & kIncludeFailedToLoad)
{
*pDomainAssemblyHolder = pDomainAssembly;
return TRUE;
}
continue; // reject
}
// First, reject DomainAssemblies whose load status is not to be included in
// the enumeration
if (pDomainAssembly->IsAvailableToProfilers() &&
(m_assemblyIterationFlags & kIncludeAvailableToProfilers))
{
// The assembly has reached the state at which we would notify profilers,
// and we're supposed to include such assemblies in the enumeration. So
// don't reject it (i.e., noop here, and don't bother with the rest of
// the load status checks). Check for this first, since
// kIncludeAvailableToProfilers contains some loaded AND loading
// assemblies.
}
else if (pDomainAssembly->IsLoaded())
{
// A loaded assembly
if (!(m_assemblyIterationFlags & kIncludeLoaded))
{
continue; // reject
}
}
else
{
// A loading assembly
if (!(m_assemblyIterationFlags & kIncludeLoading))
{
continue; // reject
}
}
// Next, reject DomainAssemblies whose execution status is
// not to be included in the enumeration
// execution assembly
if (!(m_assemblyIterationFlags & kIncludeExecution))
{
continue; // reject
}
// Next, reject collectible assemblies
if (pDomainAssembly->IsCollectible())
{
if (m_assemblyIterationFlags & kExcludeCollectible)
{
_ASSERTE(!(m_assemblyIterationFlags & kIncludeCollected));
continue; // reject
}
// Un-tenured collectible assemblies should not be returned. (This can only happen in a brief
// window during collectible assembly creation. No thread should need to have a pointer
// to the just allocated DomainAssembly at this stage.)
if (!pDomainAssembly->GetAssembly()->GetManifestModule()->IsTenured())
{
continue; // reject
}
if (pDomainAssembly->GetLoaderAllocator()->AddReferenceIfAlive())
{ // The assembly is alive
// Set the holder value (incl. increasing ref-count)
*pDomainAssemblyHolder = pDomainAssembly;
// Now release the reference we took in the if-condition
pDomainAssembly->GetLoaderAllocator()->Release();
return TRUE;
}
// The assembly is not alive anymore (and we didn't increase its ref-count in the
// if-condition)
if (!(m_assemblyIterationFlags & kIncludeCollected))
{
continue; // reject
}
// Set the holder value to assembly with 0 ref-count without increasing the ref-count (won't
// call Release either)
pDomainAssemblyHolder->Assign(pDomainAssembly, FALSE);
return TRUE;
}
*pDomainAssemblyHolder = pDomainAssembly;
return TRUE;
}
*pDomainAssemblyHolder = NULL;
return FALSE;
} // AppDomain::AssemblyIterator::Next_Unlocked
#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
// Returns S_OK if the assembly was successfully loaded
HRESULT RuntimeInvokeHostAssemblyResolver(INT_PTR pManagedAssemblyLoadContextToBindWithin, IAssemblyName *pIAssemblyName, CLRPrivBinderCoreCLR *pTPABinder, BINDER_SPACE::AssemblyName *pAssemblyName, ICLRPrivAssembly **ppLoadedAssembly)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(ppLoadedAssembly != NULL);
}
CONTRACTL_END;
HRESULT hr = E_FAIL;
// Switch to COOP mode since we are going to work with managed references
GCX_COOP();
struct
{
ASSEMBLYNAMEREF oRefAssemblyName;
ASSEMBLYREF oRefLoadedAssembly;
} _gcRefs;
ZeroMemory(&_gcRefs, sizeof(_gcRefs));
GCPROTECT_BEGIN(_gcRefs);
ICLRPrivAssembly *pResolvedAssembly = NULL;
// Prepare to invoke System.Runtime.Loader.AssemblyLoadContext.Resolve method.
//
// First, initialize an assembly spec for the requested assembly
//
AssemblySpec spec;
hr = spec.Init(pIAssemblyName);
if (SUCCEEDED(hr))
{
bool fResolvedAssembly = false;
BinderTracing::ResolutionAttemptedOperation tracer{pAssemblyName, 0 /*binderID*/, pManagedAssemblyLoadContextToBindWithin, hr};
// Allocate an AssemblyName managed object
_gcRefs.oRefAssemblyName = (ASSEMBLYNAMEREF) AllocateObject(CoreLibBinder::GetClass(CLASS__ASSEMBLY_NAME));
// Initialize the AssemblyName object from the AssemblySpec
spec.AssemblyNameInit(&_gcRefs.oRefAssemblyName, NULL);
bool isSatelliteAssemblyRequest = !spec.IsNeutralCulture();
EX_TRY
{
if (pTPABinder != NULL)
{
// Step 2 (of CLRPrivBinderAssemblyLoadContext::BindUsingAssemblyName) - Invoke Load method
// This is not invoked for TPA Binder since it always returns NULL.
tracer.GoToStage(BinderTracing::ResolutionAttemptedOperation::Stage::AssemblyLoadContextLoad);
// Finally, setup arguments for invocation
MethodDescCallSite methLoadAssembly(METHOD__ASSEMBLYLOADCONTEXT__RESOLVE);
// Setup the arguments for the call
ARG_SLOT args[2] =
{
PtrToArgSlot(pManagedAssemblyLoadContextToBindWithin), // IntPtr for managed assembly load context instance
ObjToArgSlot(_gcRefs.oRefAssemblyName), // AssemblyName instance
};
// Make the call
_gcRefs.oRefLoadedAssembly = (ASSEMBLYREF) methLoadAssembly.Call_RetOBJECTREF(args);
if (_gcRefs.oRefLoadedAssembly != NULL)
{
fResolvedAssembly = true;
}
hr = fResolvedAssembly ? S_OK : COR_E_FILENOTFOUND;
// Step 3 (of CLRPrivBinderAssemblyLoadContext::BindUsingAssemblyName)
if (!fResolvedAssembly && !isSatelliteAssemblyRequest)
{
tracer.GoToStage(BinderTracing::ResolutionAttemptedOperation::Stage::DefaultAssemblyLoadContextFallback);
// If we could not resolve the assembly using Load method, then attempt fallback with TPA Binder.
// Since TPA binder cannot fallback to itself, this fallback does not happen for binds within TPA binder.
//
// Switch to pre-emp mode before calling into the binder
GCX_PREEMP();
ICLRPrivAssembly *pCoreCLRFoundAssembly = NULL;
hr = pTPABinder->BindAssemblyByName(pIAssemblyName, &pCoreCLRFoundAssembly);
if (SUCCEEDED(hr))
{
_ASSERTE(pCoreCLRFoundAssembly != NULL);
pResolvedAssembly = pCoreCLRFoundAssembly;
fResolvedAssembly = true;
}
}
}
if (!fResolvedAssembly && isSatelliteAssemblyRequest)
{
// Step 4 (of CLRPrivBinderAssemblyLoadContext::BindUsingAssemblyName)
//
// Attempt to resolve it using the ResolveSatelliteAssembly method.
// Finally, setup arguments for invocation
tracer.GoToStage(BinderTracing::ResolutionAttemptedOperation::Stage::ResolveSatelliteAssembly);
MethodDescCallSite methResolveSatelitteAssembly(METHOD__ASSEMBLYLOADCONTEXT__RESOLVESATELLITEASSEMBLY);
// Setup the arguments for the call
ARG_SLOT args[2] =
{
PtrToArgSlot(pManagedAssemblyLoadContextToBindWithin), // IntPtr for managed assembly load context instance
ObjToArgSlot(_gcRefs.oRefAssemblyName), // AssemblyName instance
};
// Make the call
_gcRefs.oRefLoadedAssembly = (ASSEMBLYREF) methResolveSatelitteAssembly.Call_RetOBJECTREF(args);
if (_gcRefs.oRefLoadedAssembly != NULL)
{
// Set the flag indicating we found the assembly
fResolvedAssembly = true;
}
hr = fResolvedAssembly ? S_OK : COR_E_FILENOTFOUND;
}
if (!fResolvedAssembly)
{
// Step 5 (of CLRPrivBinderAssemblyLoadContext::BindUsingAssemblyName)
//
// If we couldn't resolve the assembly using TPA LoadContext as well, then
// attempt to resolve it using the Resolving event.
// Finally, setup arguments for invocation
tracer.GoToStage(BinderTracing::ResolutionAttemptedOperation::Stage::AssemblyLoadContextResolvingEvent);
MethodDescCallSite methResolveUsingEvent(METHOD__ASSEMBLYLOADCONTEXT__RESOLVEUSINGEVENT);
// Setup the arguments for the call
ARG_SLOT args[2] =
{
PtrToArgSlot(pManagedAssemblyLoadContextToBindWithin), // IntPtr for managed assembly load context instance
ObjToArgSlot(_gcRefs.oRefAssemblyName), // AssemblyName instance
};
// Make the call
_gcRefs.oRefLoadedAssembly = (ASSEMBLYREF) methResolveUsingEvent.Call_RetOBJECTREF(args);
if (_gcRefs.oRefLoadedAssembly != NULL)
{
// Set the flag indicating we found the assembly
fResolvedAssembly = true;
}
hr = fResolvedAssembly ? S_OK : COR_E_FILENOTFOUND;
}
if (fResolvedAssembly && pResolvedAssembly == NULL)
{
// If we are here, assembly was successfully resolved via Load or Resolving events.
_ASSERTE(_gcRefs.oRefLoadedAssembly != NULL);
// We were able to get the assembly loaded. Now, get its name since the host could have
// performed the resolution using an assembly with different name.
DomainAssembly *pDomainAssembly = _gcRefs.oRefLoadedAssembly->GetDomainAssembly();
PEAssembly *pLoadedPEAssembly = NULL;
bool fFailLoad = false;
if (!pDomainAssembly)
{
// Reflection emitted assemblies will not have a domain assembly.
fFailLoad = true;
}
else
{
pLoadedPEAssembly = pDomainAssembly->GetFile();
if (!pLoadedPEAssembly->HasHostAssembly())
{
// Reflection emitted assemblies will not have a domain assembly.
fFailLoad = true;
}
}
// The loaded assembly's ICLRPrivAssembly* is saved as HostAssembly in PEAssembly
if (fFailLoad)
{
SString name;
spec.GetFileOrDisplayName(0, name);
COMPlusThrowHR(COR_E_INVALIDOPERATION, IDS_HOST_ASSEMBLY_RESOLVER_DYNAMICALLY_EMITTED_ASSEMBLIES_UNSUPPORTED, name);
}
pResolvedAssembly = pLoadedPEAssembly->GetHostAssembly();
}
if (fResolvedAssembly)
{
_ASSERTE(pResolvedAssembly != NULL);
// Get the ICLRPrivAssembly reference to return back to.
*ppLoadedAssembly = clr::SafeAddRef(pResolvedAssembly);
hr = S_OK;
tracer.SetFoundAssembly(static_cast<BINDER_SPACE::Assembly *>(pResolvedAssembly));
}
else
{
hr = COR_E_FILENOTFOUND;
}
}
EX_HOOK
{
Exception* ex = GET_EXCEPTION();
tracer.SetException(ex);
}
EX_END_HOOK
}
GCPROTECT_END();
return hr;
}
#endif // !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
//approximate size of loader data
//maintained for each assembly
#define APPROX_LOADER_DATA_PER_ASSEMBLY 8196
size_t AppDomain::EstimateSize()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
size_t retval = sizeof(AppDomain);
retval += GetLoaderAllocator()->EstimateSize();
//very rough estimate
retval += GetAssemblyCount() * APPROX_LOADER_DATA_PER_ASSEMBLY;
return retval;
}
#ifdef DACCESS_COMPILE
void
DomainLocalModule::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
SUPPORTS_DAC;
// Enumerate the DomainLocalModule itself. DLMs are allocated to be larger than
// sizeof(DomainLocalModule) to make room for ClassInit flags and non-GC statics.
// "DAC_ENUM_DTHIS()" probably does not account for this, so we might not enumerate
// all of the ClassInit flags and non-GC statics.
// sizeof(DomainLocalModule) == 0x28
DAC_ENUM_DTHIS();
if (m_pDomainFile.IsValid())
{
m_pDomainFile->EnumMemoryRegions(flags);
}
if (m_pDynamicClassTable.Load().IsValid())
{
DacEnumMemoryRegion(dac_cast<TADDR>(m_pDynamicClassTable.Load()),
m_aDynamicEntries * sizeof(DynamicClassInfo));
for (SIZE_T i = 0; i < m_aDynamicEntries; i++)
{
PTR_DynamicEntry entry = dac_cast<PTR_DynamicEntry>(m_pDynamicClassTable[i].m_pDynamicEntry.Load());
if (entry.IsValid())
{
// sizeof(DomainLocalModule::DynamicEntry) == 8
entry.EnumMem();
}
}
}
}
void
BaseDomain::EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
bool enumThis)
{
SUPPORTS_DAC;
if (enumThis)
{
// This is wrong. Don't do it.
// BaseDomain cannot be instantiated.
// The only thing this code can hope to accomplish is to potentially break
// memory enumeration walking through the derived class if we
// explicitly call the base class enum first.
// DAC_ENUM_VTHIS();
}
EMEM_OUT(("MEM: %p BaseDomain\n", dac_cast<TADDR>(this)));
}
void
AppDomain::EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
bool enumThis)
{
SUPPORTS_DAC;
if (enumThis)
{
//sizeof(AppDomain) == 0xeb0
DAC_ENUM_VTHIS();
}
BaseDomain::EnumMemoryRegions(flags, false);
// We don't need AppDomain name in triage dumps.
if (flags != CLRDATA_ENUM_MEM_TRIAGE)
{
m_friendlyName.EnumMemoryRegions(flags);
}
m_Assemblies.EnumMemoryRegions(flags);
AssemblyIterator assem = IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution));
CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
while (assem.Next(pDomainAssembly.This()))
{
pDomainAssembly->EnumMemoryRegions(flags);
}
}
void
SystemDomain::EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
bool enumThis)
{
SUPPORTS_DAC;
if (enumThis)
{
DAC_ENUM_VTHIS();
}
BaseDomain::EnumMemoryRegions(flags, false);
if (m_pSystemFile.IsValid())
{
m_pSystemFile->EnumMemoryRegions(flags);
}
if (m_pSystemAssembly.IsValid())
{
m_pSystemAssembly->EnumMemoryRegions(flags);
}
if (AppDomain::GetCurrentDomain())
{
AppDomain::GetCurrentDomain()->EnumMemoryRegions(flags, true);
}
}
#endif //DACCESS_COMPILE
PTR_LoaderAllocator SystemDomain::GetGlobalLoaderAllocator()
{
return PTR_LoaderAllocator(PTR_HOST_MEMBER_TADDR(SystemDomain,System(),m_GlobalAllocator));
}
#if defined(FEATURE_TYPEEQUIVALENCE)
#ifndef DACCESS_COMPILE
TypeEquivalenceHashTable * AppDomain::GetTypeEquivalenceCache()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
INJECT_FAULT(COMPlusThrowOM());
MODE_ANY;
}
CONTRACTL_END;
// Take the critical section all of the time in debug builds to ensure that it is safe to take
// the critical section in the unusual times when it may actually be needed in retail builds
#ifdef _DEBUG
CrstHolder ch(&m_TypeEquivalenceCrst);
#endif
if (m_pTypeEquivalenceTable.Load() == NULL)
{
#ifndef _DEBUG
CrstHolder ch(&m_TypeEquivalenceCrst);
#endif
if (m_pTypeEquivalenceTable.Load() == NULL)
{
m_pTypeEquivalenceTable = TypeEquivalenceHashTable::Create(this, /* bucket count */ 12, &m_TypeEquivalenceCrst);
}
}
return m_pTypeEquivalenceTable;
}
#endif //!DACCESS_COMPILE
#endif //FEATURE_TYPEEQUIVALENCE
#if !defined(DACCESS_COMPILE)
//---------------------------------------------------------------------------------------------------------------------
void AppDomain::PublishHostedAssembly(
DomainAssembly * pDomainAssembly)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END
if (pDomainAssembly->GetFile()->HasHostAssembly())
{
// We have to serialize all Add operations
CrstHolder lockAdd(&m_crstHostAssemblyMapAdd);
_ASSERTE(m_hostAssemblyMap.Lookup(pDomainAssembly->GetFile()->GetHostAssembly()) == nullptr);
// Wrapper for m_hostAssemblyMap.Add that avoids call out into host
HostAssemblyMap::AddPhases addCall;
// 1. Preallocate one element
addCall.PreallocateForAdd(&m_hostAssemblyMap);
{
// 2. Take the reader lock which can be taken during stack walking
// We cannot call out into host from ForbidSuspend region (i.e. no allocations/deallocations)
ForbidSuspendThreadHolder suspend;
{
CrstHolder lock(&m_crstHostAssemblyMap);
// 3. Add the element to the hash table (no call out into host)
addCall.Add(pDomainAssembly);
}
}
// 4. Cleanup the old memory (if any)
addCall.DeleteOldTable();
}
else
{
}
}
//---------------------------------------------------------------------------------------------------------------------
void AppDomain::UpdatePublishHostedAssembly(
DomainAssembly * pAssembly,
PTR_PEFile pFile)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END
if (pAssembly->GetFile()->HasHostAssembly())
{
// We have to serialize all Add operations
CrstHolder lockAdd(&m_crstHostAssemblyMapAdd);
{
// Wrapper for m_hostAssemblyMap.Add that avoids call out into host
OriginalFileHostAssemblyMap::AddPhases addCall;
bool fAddOrigFile = false;
// For cases where the pefile is being updated
// 1. Preallocate one element
if (pFile != pAssembly->GetFile())
{
addCall.PreallocateForAdd(&m_hostAssemblyMapForOrigFile);
fAddOrigFile = true;
}
{
// We cannot call out into host from ForbidSuspend region (i.e. no allocations/deallocations)
ForbidSuspendThreadHolder suspend;
{
CrstHolder lock(&m_crstHostAssemblyMap);
// Remove from hash table.
_ASSERTE(m_hostAssemblyMap.Lookup(pAssembly->GetFile()->GetHostAssembly()) != nullptr);
m_hostAssemblyMap.Remove(pAssembly->GetFile()->GetHostAssembly());
// Update PEFile on DomainAssembly. (This may cause the key for the hash to change, which is why we need this function)
pAssembly->UpdatePEFileWorker(pFile);
_ASSERTE(fAddOrigFile == (pAssembly->GetOriginalFile() != pAssembly->GetFile()));
if (fAddOrigFile)
{
// Add to the orig file hash table if we might be in a case where we've cached the original pefile and not the final pe file (for use during GetAssemblyIfLoaded)
addCall.Add(pAssembly);
}
// Add back to the hashtable (the call to Remove above guarantees that we will not call into host for table reallocation)
_ASSERTE(m_hostAssemblyMap.Lookup(pAssembly->GetFile()->GetHostAssembly()) == nullptr);
m_hostAssemblyMap.Add(pAssembly);
}
}
// 4. Cleanup the old memory (if any)
if (fAddOrigFile)
addCall.DeleteOldTable();
}
}
else
{
pAssembly->UpdatePEFileWorker(pFile);
}
}
//---------------------------------------------------------------------------------------------------------------------
void AppDomain::UnPublishHostedAssembly(
DomainAssembly * pAssembly)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END
if (pAssembly->GetFile()->HasHostAssembly())
{
ForbidSuspendThreadHolder suspend;
{
CrstHolder lock(&m_crstHostAssemblyMap);
_ASSERTE(m_hostAssemblyMap.Lookup(pAssembly->GetFile()->GetHostAssembly()) != nullptr);
m_hostAssemblyMap.Remove(pAssembly->GetFile()->GetHostAssembly());
// We also have an entry in m_hostAssemblyMapForOrigFile. Handle that case.
if (pAssembly->GetOriginalFile() != pAssembly->GetFile())
{
m_hostAssemblyMapForOrigFile.Remove(pAssembly->GetOriginalFile()->GetHostAssembly());
}
}
}
}
#endif //!DACCESS_COMPILE
//---------------------------------------------------------------------------------------------------------------------
PTR_DomainAssembly AppDomain::FindAssembly(PTR_ICLRPrivAssembly pHostAssembly)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END
if (pHostAssembly == nullptr)
return NULL;
{
ForbidSuspendThreadHolder suspend;
{
CrstHolder lock(&m_crstHostAssemblyMap);
PTR_DomainAssembly returnValue = m_hostAssemblyMap.Lookup(pHostAssembly);
if (returnValue == NULL)
{
// If not found in the m_hostAssemblyMap, look in the m_hostAssemblyMapForOrigFile
// This is necessary as it may happen during in a second AppDomain that the PEFile
// first discovered in the AppDomain may not be used by the DomainFile, but the CLRPrivBinderFusion
// will in some cases find the pHostAssembly associated with this no longer used PEFile
// instead of the PEFile that was finally decided upon.
returnValue = m_hostAssemblyMapForOrigFile.Lookup(pHostAssembly);
}
return returnValue;
}
}
}
#ifndef DACCESS_COMPILE
// Return native image for a given composite image file name, NULL when not found.
PTR_NativeImage AppDomain::GetNativeImage(LPCUTF8 simpleFileName)
{
CrstHolder ch(&m_nativeImageLoadCrst);
PTR_NativeImage pExistingImage;
if (m_nativeImageMap.Lookup(simpleFileName, &pExistingImage))
{
return pExistingImage;
}
return nullptr;
}
PTR_NativeImage AppDomain::SetNativeImage(LPCUTF8 simpleFileName, PTR_NativeImage pNativeImage)
{
CrstHolder ch(&m_nativeImageLoadCrst);
PTR_NativeImage pExistingImage;
if (m_nativeImageMap.Lookup(simpleFileName, &pExistingImage))
{
return pExistingImage;
}
m_nativeImageMap.Add(simpleFileName, pNativeImage);
return nullptr;
}
#endif//DACCESS_COMPILE
#if !defined(DACCESS_COMPILE) && defined(FEATURE_NATIVE_IMAGE_GENERATION)
void ZapperSetBindingPaths(ICorCompilationDomain *pDomain, SString &trustedPlatformAssemblies, SString &platformResourceRoots, SString &appPaths, SString &appNiPaths)
{
CLRPrivBinderCoreCLR *pBinder = ((CompilationDomain *)pDomain)->GetTPABinderContext();
_ASSERTE(pBinder != NULL);
pBinder->SetupBindingPaths(trustedPlatformAssemblies, platformResourceRoots, appPaths, appNiPaths);
}
#endif
|
//
// EnvironmentData.cpp
// libraries/environment/src
//
// Created by Andrzej Kapolka on 5/6/13.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <cstring>
#include "EnvironmentData.h"
#include "PacketHeaders.h"
// initial values from Sean O'Neil's GPU Gems entry (http://http.developer.nvidia.com/GPUGems2/gpugems2_chapter16.html),
// GameEngine.cpp
EnvironmentData::EnvironmentData(int id) :
_id(id),
_flat(true),
_gravity(0.0f),
_atmosphereCenter(0, -1000, 0),
_atmosphereInnerRadius(1000.0),
_atmosphereOuterRadius(1025.0),
_rayleighScattering(0.0025f),
_mieScattering(0.0010f),
_scatteringWavelengths(0.650f, 0.570f, 0.475f),
_sunLocation(1000, 900, 1000),
_sunBrightness(20.0f),
_hasStars(true) {
}
glm::vec3 EnvironmentData::getAtmosphereCenter(const glm::vec3& cameraPosition) const {
return _atmosphereCenter + (_flat ? glm::vec3(cameraPosition.x, 0.0f, cameraPosition.z) : glm::vec3());
}
glm::vec3 EnvironmentData::getSunLocation(const glm::vec3& cameraPosition) const {
return _sunLocation;
}
int EnvironmentData::getBroadcastData(unsigned char* destinationBuffer) const {
unsigned char* bufferStart = destinationBuffer;
memcpy(destinationBuffer, &_id, sizeof(_id));
destinationBuffer += sizeof(_id);
memcpy(destinationBuffer, &_flat, sizeof(_flat));
destinationBuffer += sizeof(_flat);
memcpy(destinationBuffer, &_gravity, sizeof(_gravity));
destinationBuffer += sizeof(_gravity);
memcpy(destinationBuffer, &_atmosphereCenter, sizeof(_atmosphereCenter));
destinationBuffer += sizeof(_atmosphereCenter);
memcpy(destinationBuffer, &_atmosphereInnerRadius, sizeof(_atmosphereInnerRadius));
destinationBuffer += sizeof(_atmosphereInnerRadius);
memcpy(destinationBuffer, &_atmosphereOuterRadius, sizeof(_atmosphereOuterRadius));
destinationBuffer += sizeof(_atmosphereOuterRadius);
memcpy(destinationBuffer, &_rayleighScattering, sizeof(_rayleighScattering));
destinationBuffer += sizeof(_rayleighScattering);
memcpy(destinationBuffer, &_mieScattering, sizeof(_mieScattering));
destinationBuffer += sizeof(_mieScattering);
memcpy(destinationBuffer, &_scatteringWavelengths, sizeof(_scatteringWavelengths));
destinationBuffer += sizeof(_scatteringWavelengths);
memcpy(destinationBuffer, &_sunLocation, sizeof(_sunLocation));
destinationBuffer += sizeof(_sunLocation);
memcpy(destinationBuffer, &_sunBrightness, sizeof(_sunBrightness));
destinationBuffer += sizeof(_sunBrightness);
return destinationBuffer - bufferStart;
}
int EnvironmentData::parseData(const unsigned char* sourceBuffer, int numBytes) {
const unsigned char* startPosition = sourceBuffer;
memcpy(&_id, sourceBuffer, sizeof(_id));
sourceBuffer += sizeof(_id);
memcpy(&_flat, sourceBuffer, sizeof(_flat));
sourceBuffer += sizeof(_flat);
memcpy(&_gravity, sourceBuffer, sizeof(_gravity));
sourceBuffer += sizeof(_gravity);
memcpy(&_atmosphereCenter, sourceBuffer, sizeof(_atmosphereCenter));
sourceBuffer += sizeof(_atmosphereCenter);
memcpy(&_atmosphereInnerRadius, sourceBuffer, sizeof(_atmosphereInnerRadius));
sourceBuffer += sizeof(_atmosphereInnerRadius);
memcpy(&_atmosphereOuterRadius, sourceBuffer, sizeof(_atmosphereOuterRadius));
sourceBuffer += sizeof(_atmosphereOuterRadius);
memcpy(&_rayleighScattering, sourceBuffer, sizeof(_rayleighScattering));
sourceBuffer += sizeof(_rayleighScattering);
memcpy(&_mieScattering, sourceBuffer, sizeof(_mieScattering));
sourceBuffer += sizeof(_mieScattering);
memcpy(&_scatteringWavelengths, sourceBuffer, sizeof(_scatteringWavelengths));
sourceBuffer += sizeof(_scatteringWavelengths);
memcpy(&_sunLocation, sourceBuffer, sizeof(_sunLocation));
sourceBuffer += sizeof(_sunLocation);
memcpy(&_sunBrightness, sourceBuffer, sizeof(_sunBrightness));
sourceBuffer += sizeof(_sunBrightness);
return sourceBuffer - startPosition;
}
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file has been auto-generated by code_generator_v8.py. DO NOT MODIFY!
#include "config.h"
#include "V8WebGLActiveInfo.h"
#include "bindings/core/v8/ExceptionState.h"
#include "bindings/core/v8/V8DOMConfiguration.h"
#include "bindings/core/v8/V8ObjectConstructor.h"
#include "core/dom/ContextFeatures.h"
#include "core/dom/Document.h"
#include "platform/RuntimeEnabledFeatures.h"
#include "platform/TraceEvent.h"
#include "wtf/GetPtr.h"
#include "wtf/RefPtr.h"
namespace blink {
// Suppress warning: global constructors, because struct WrapperTypeInfo is trivial
// and does not depend on another global objects.
#if defined(COMPONENT_BUILD) && defined(WIN32) && COMPILER(CLANG)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wglobal-constructors"
#endif
const WrapperTypeInfo V8WebGLActiveInfo::wrapperTypeInfo = { gin::kEmbedderBlink, V8WebGLActiveInfo::domTemplate, V8WebGLActiveInfo::refObject, V8WebGLActiveInfo::derefObject, V8WebGLActiveInfo::trace, 0, 0, V8WebGLActiveInfo::preparePrototypeObject, V8WebGLActiveInfo::installConditionallyEnabledProperties, "WebGLActiveInfo", 0, WrapperTypeInfo::WrapperTypeObjectPrototype, WrapperTypeInfo::ObjectClassId, WrapperTypeInfo::NotInheritFromEventTarget, WrapperTypeInfo::Independent, WrapperTypeInfo::WillBeGarbageCollectedObject };
#if defined(COMPONENT_BUILD) && defined(WIN32) && COMPILER(CLANG)
#pragma clang diagnostic pop
#endif
// This static member must be declared by DEFINE_WRAPPERTYPEINFO in WebGLActiveInfo.h.
// For details, see the comment of DEFINE_WRAPPERTYPEINFO in
// bindings/core/v8/ScriptWrappable.h.
const WrapperTypeInfo& WebGLActiveInfo::s_wrapperTypeInfo = V8WebGLActiveInfo::wrapperTypeInfo;
namespace WebGLActiveInfoV8Internal {
static void sizeAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info)
{
v8::Local<v8::Object> holder = info.Holder();
WebGLActiveInfo* impl = V8WebGLActiveInfo::toImpl(holder);
v8SetReturnValueInt(info, impl->size());
}
static void sizeAttributeGetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("blink", "DOMGetter");
WebGLActiveInfoV8Internal::sizeAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("v8", "V8Execution");
}
static void typeAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info)
{
v8::Local<v8::Object> holder = info.Holder();
WebGLActiveInfo* impl = V8WebGLActiveInfo::toImpl(holder);
v8SetReturnValueUnsigned(info, impl->type());
}
static void typeAttributeGetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("blink", "DOMGetter");
WebGLActiveInfoV8Internal::typeAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("v8", "V8Execution");
}
static void nameAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info)
{
v8::Local<v8::Object> holder = info.Holder();
WebGLActiveInfo* impl = V8WebGLActiveInfo::toImpl(holder);
v8SetReturnValueString(info, impl->name(), info.GetIsolate());
}
static void nameAttributeGetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("blink", "DOMGetter");
WebGLActiveInfoV8Internal::nameAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("v8", "V8Execution");
}
} // namespace WebGLActiveInfoV8Internal
static const V8DOMConfiguration::AccessorConfiguration V8WebGLActiveInfoAccessors[] = {
{"size", WebGLActiveInfoV8Internal::sizeAttributeGetterCallback, 0, 0, 0, 0, static_cast<v8::AccessControl>(v8::DEFAULT), static_cast<v8::PropertyAttribute>(v8::None), V8DOMConfiguration::ExposedToAllScripts, V8DOMConfiguration::OnPrototype, V8DOMConfiguration::CheckHolder},
{"type", WebGLActiveInfoV8Internal::typeAttributeGetterCallback, 0, 0, 0, 0, static_cast<v8::AccessControl>(v8::DEFAULT), static_cast<v8::PropertyAttribute>(v8::None), V8DOMConfiguration::ExposedToAllScripts, V8DOMConfiguration::OnPrototype, V8DOMConfiguration::CheckHolder},
{"name", WebGLActiveInfoV8Internal::nameAttributeGetterCallback, 0, 0, 0, 0, static_cast<v8::AccessControl>(v8::DEFAULT), static_cast<v8::PropertyAttribute>(v8::None), V8DOMConfiguration::ExposedToAllScripts, V8DOMConfiguration::OnPrototype, V8DOMConfiguration::CheckHolder},
};
static void installV8WebGLActiveInfoTemplate(v8::Local<v8::FunctionTemplate> functionTemplate, v8::Isolate* isolate)
{
functionTemplate->ReadOnlyPrototype();
v8::Local<v8::Signature> defaultSignature;
defaultSignature = V8DOMConfiguration::installDOMClassTemplate(isolate, functionTemplate, "WebGLActiveInfo", v8::Local<v8::FunctionTemplate>(), V8WebGLActiveInfo::internalFieldCount,
0, 0,
V8WebGLActiveInfoAccessors, WTF_ARRAY_LENGTH(V8WebGLActiveInfoAccessors),
0, 0);
v8::Local<v8::ObjectTemplate> instanceTemplate = functionTemplate->InstanceTemplate();
ALLOW_UNUSED_LOCAL(instanceTemplate);
v8::Local<v8::ObjectTemplate> prototypeTemplate = functionTemplate->PrototypeTemplate();
ALLOW_UNUSED_LOCAL(prototypeTemplate);
// Custom toString template
functionTemplate->Set(v8AtomicString(isolate, "toString"), V8PerIsolateData::from(isolate)->toStringTemplate());
}
v8::Local<v8::FunctionTemplate> V8WebGLActiveInfo::domTemplate(v8::Isolate* isolate)
{
return V8DOMConfiguration::domClassTemplate(isolate, const_cast<WrapperTypeInfo*>(&wrapperTypeInfo), installV8WebGLActiveInfoTemplate);
}
bool V8WebGLActiveInfo::hasInstance(v8::Local<v8::Value> v8Value, v8::Isolate* isolate)
{
return V8PerIsolateData::from(isolate)->hasInstance(&wrapperTypeInfo, v8Value);
}
v8::Local<v8::Object> V8WebGLActiveInfo::findInstanceInPrototypeChain(v8::Local<v8::Value> v8Value, v8::Isolate* isolate)
{
return V8PerIsolateData::from(isolate)->findInstanceInPrototypeChain(&wrapperTypeInfo, v8Value);
}
WebGLActiveInfo* V8WebGLActiveInfo::toImplWithTypeCheck(v8::Isolate* isolate, v8::Local<v8::Value> value)
{
return hasInstance(value, isolate) ? toImpl(v8::Local<v8::Object>::Cast(value)) : 0;
}
void V8WebGLActiveInfo::refObject(ScriptWrappable* scriptWrappable)
{
#if !ENABLE(OILPAN)
scriptWrappable->toImpl<WebGLActiveInfo>()->ref();
#endif
}
void V8WebGLActiveInfo::derefObject(ScriptWrappable* scriptWrappable)
{
#if !ENABLE(OILPAN)
scriptWrappable->toImpl<WebGLActiveInfo>()->deref();
#endif
}
} // namespace blink
|
#include <sql.hpp>
#include <mutex>
/**********************************************************
_SQLStatisticsRSB
Column bindings for result set for SQLStatistics.
**********************************************************/
static sCOLBIND _SQLStatisticsRSB[12] =
{
_colbindChar(1,sSQLSTATISTICSRESULTSET,szTableQualifier,TABLE_QUALIFIER_MAX),
_colbindChar(2,sSQLSTATISTICSRESULTSET,szTableOwner,TABLE_OWNER_MAX),
_colbindChar(3,sSQLSTATISTICSRESULTSET,szTableName,TABLE_NAME_MAX),
_colbindShort(4,sSQLSTATISTICSRESULTSET,fNonUnique),
_colbindChar(5,sSQLSTATISTICSRESULTSET,szIndexQualifier,INDEX_QUALIFIER_MAX),
_colbindChar(6,sSQLSTATISTICSRESULTSET,szIndexName,INDEX_NAME_MAX),
_colbindShort(7,sSQLSTATISTICSRESULTSET,fType),
_colbindShort(8,sSQLSTATISTICSRESULTSET,fSeqInIndex),
_colbindChar(9,sSQLSTATISTICSRESULTSET,szColumnName,COLUMN_NAME_MAX),
_colbindChar(10,sSQLSTATISTICSRESULTSET,cCollation,COLLATION_SIZE),
_colbindLong(11,sSQLSTATISTICSRESULTSET,nCardinality),
_colbindLong(12,sSQLSTATISTICSRESULTSET,nPages),
};
/**********************************************************
_SQLTypeInfoRSB
Column bindings for result set for SQLGetTypeInfo.
**********************************************************/
static sCOLBIND _SQLTypeInfoRSB[13] =
{
{
1,
SQL_C_CHAR,
offsetof(sSQLTYPEINFORESULTSET,szTypeName),
STRING1_MAX,
},
{
2,
SQL_C_SHORT,
offsetof(sSQLTYPEINFORESULTSET,fDataType),
sizeof(SWORD),
},
{
3,
SQL_C_LONG,
offsetof(sSQLTYPEINFORESULTSET,fPrecision),
sizeof(SDWORD),
},
{
4,
SQL_C_CHAR,
offsetof(sSQLTYPEINFORESULTSET,szLiteralPrefix),
STRING1_MAX,
},
{
5,
SQL_C_CHAR,
offsetof(sSQLTYPEINFORESULTSET,szLiteralSuffix),
STRING1_MAX,
},
{
6,
SQL_C_CHAR,
offsetof(sSQLTYPEINFORESULTSET,szCreateParams),
STRING1_MAX,
},
{
7,
SQL_C_SHORT,
offsetof(sSQLTYPEINFORESULTSET,fNullable),
sizeof(SWORD),
},
{
8,
SQL_C_SHORT,
offsetof(sSQLTYPEINFORESULTSET,fCaseSensitive),
0,
},
{
9,
SQL_C_SHORT,
offsetof(sSQLTYPEINFORESULTSET,fSearchable),
sizeof(SWORD),
},
{
10,
SQL_C_SHORT,
offsetof(sSQLTYPEINFORESULTSET,fUnsigned),
0,
},
{
11,
SQL_C_SHORT,
offsetof(sSQLTYPEINFORESULTSET,fMoney),
sizeof(SWORD),
},
{
12,
SQL_C_SHORT,
offsetof(sSQLTYPEINFORESULTSET,fAutoIncrement),
sizeof(SWORD),
},
{
13,
SQL_C_CHAR,
offsetof(sSQLTYPEINFORESULTSET,szLocalTypeName),
STRING1_MAX,
},
};
/**********************************************************
_SQLTablesRSB
Column bindings for result set for SQLTables.
**********************************************************/
static sCOLBIND _SQLTablesRSB[5] =
{
{
1,
SQL_C_CHAR,
offsetof(sSQLTABLESRESULTSET,szTableQualifier),
TABLE_QUALIFIER_MAX,
},
{
2,
SQL_C_CHAR,
offsetof(sSQLTABLESRESULTSET,szTableOwner),
TABLE_OWNER_MAX,
},
{
3,
SQL_C_CHAR,
offsetof(sSQLTABLESRESULTSET,szTableName),
TABLE_NAME_MAX,
},
{
4,
SQL_C_CHAR,
offsetof(sSQLTABLESRESULTSET,szTableType),
TABLE_TYPE_MAX,
},
{
5,
SQL_C_CHAR,
offsetof(sSQLTABLESRESULTSET,Remarks),
REMARKS_MAX,
},
};
/**********************************************************
_SQLColumnsRSB
Column bindings for result set for SQLColumns.
**********************************************************/
static sCOLBIND _SQLColumnsRSB[12] =
{
{
1,
SQL_C_CHAR,
offsetof(sSQLCOLUMNSRESULTSET,szTableQualifier),
TABLE_QUALIFIER_MAX,
},
{
2,
SQL_C_CHAR,
offsetof(sSQLCOLUMNSRESULTSET,szTableOwner),
TABLE_OWNER_MAX,
},
{
3,
SQL_C_CHAR,
offsetof(sSQLCOLUMNSRESULTSET,szTableName),
TABLE_NAME_MAX,
},
{
4,
SQL_C_CHAR,
offsetof(sSQLCOLUMNSRESULTSET,szColumnName),
COLUMN_NAME_MAX,
},
{
5,
SQL_C_SHORT,
offsetof(sSQLCOLUMNSRESULTSET,fDataType),
sizeof(SWORD),
},
{
6,
SQL_C_CHAR,
offsetof(sSQLCOLUMNSRESULTSET,szTypeName),
TYPE_NAME_MAX,
},
{
7,
SQL_C_LONG,
offsetof(sSQLCOLUMNSRESULTSET,fPrecision),
sizeof(SDWORD),
},
{
8,
SQL_C_LONG,
offsetof(sSQLCOLUMNSRESULTSET,fLength),
sizeof(SDWORD),
},
{
9,
SQL_C_SHORT,
offsetof(sSQLCOLUMNSRESULTSET,Scale),
sizeof(SWORD),
},
{
10,
SQL_C_SHORT,
offsetof(sSQLCOLUMNSRESULTSET,Radix),
sizeof(SWORD),
},
{
11,
SQL_C_SHORT,
offsetof(sSQLCOLUMNSRESULTSET,Nullable),
sizeof(SWORD),
},
{
12,
SQL_C_CHAR,
offsetof(sSQLCOLUMNSRESULTSET,Remarks),
REMARKS_MAX,
},
};
/**********************************************************
_SQLSpecialColumnsRSB
Column bindings for result set for SQLSpecialColumns.
**********************************************************/
static sCOLBIND _SQLSpecialColumnsRSB[7] =
{
{
1,
SQL_C_SHORT,
offsetof(sSQLSPECIALCOLRESULTSET,fScope),
sizeof(SWORD),
},
{
2,
SQL_C_CHAR,
offsetof(sSQLSPECIALCOLRESULTSET,szColumnName),
COLUMN_NAME_MAX,
},
{
3,
SQL_C_SHORT,
offsetof(sSQLSPECIALCOLRESULTSET,fDataType),
sizeof(SWORD),
},
{
4,
SQL_C_CHAR,
offsetof(sSQLSPECIALCOLRESULTSET,szTypeName),
TYPE_NAME_MAX,
},
{
5,
SQL_C_LONG,
offsetof(sSQLSPECIALCOLRESULTSET,fPrecision),
sizeof(SDWORD),
},
{
6,
SQL_C_LONG,
offsetof(sSQLSPECIALCOLRESULTSET,fLength),
sizeof(SDWORD),
},
{
7,
SQL_C_SHORT,
offsetof(sSQLSPECIALCOLRESULTSET,Scale),
sizeof(SWORD),
},
};
// new in v2.0
static sCOLBIND _SQLColumnPrivilegesRSB[8] =
{
{ 1,
SQL_C_CHAR,
offsetof(sCOLUMNPRIVILEGESRESULTSET, szTable_qualifier),
TABLE_QUALIFIER_SIZE,
0,
false,
"TABLE_QUALIFIER",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 2,
SQL_C_CHAR,
offsetof(sCOLUMNPRIVILEGESRESULTSET, szTable_owner),
TABLE_OWNER_SIZE,
0,
false,
"TABLE_OWNER",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 3,
SQL_C_CHAR,
offsetof(sCOLUMNPRIVILEGESRESULTSET, szTable_name),
TABLE_NAME_SIZE,
0,
false,
"TABLE_NAME",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 4,
SQL_C_CHAR,
offsetof(sCOLUMNPRIVILEGESRESULTSET, szColumn_name),
COLUMN_NAME_SIZE,
0,
false,
"column_name",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 5,
SQL_C_CHAR,
offsetof(sCOLUMNPRIVILEGESRESULTSET, szGrantor),
GRANTOR_SIZE,
0,
false,
"GRANTOR",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 6,
SQL_C_CHAR,
offsetof(sCOLUMNPRIVILEGESRESULTSET, szGrantee),
GRANTEE_SIZE,
0,
false,
"GRANTEE",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 7,
SQL_C_CHAR,
offsetof(sCOLUMNPRIVILEGESRESULTSET, szPrivilege),
PRIVILEGE_SIZE,
0,
false,
"PRIVILEGE",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 8,
SQL_C_CHAR,
offsetof(sCOLUMNPRIVILEGESRESULTSET, szIs_grantable),
IS_GRANTABLE_SIZE,
0,
false,
"IS_GRANTABLE",
SQL_VARCHAR,
0,
1,
nullptr,
},
};
const int _SQLColumnPrivilegesRSBCount
= sizeof(_SQLColumnPrivilegesRSB) / sizeof(_SQLColumnPrivilegesRSB[0]);
static sCOLBIND _SQLProceduresRSB[7] =
{
{ 1,
SQL_C_CHAR,
offsetof(sPROCEDURESRESULTSET, szProcedure_qualifier),
PROCEDURE_QUALIFIER_SIZE,
0,
false,
"PROCEDURE_QUALIFIER",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 2,
SQL_C_CHAR,
offsetof(sPROCEDURESRESULTSET, szProcedure_owner),
PROCEDURE_OWNER_SIZE,
0,
false,
"PROCEDURE_OWNER",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 3,
SQL_C_CHAR,
offsetof(sPROCEDURESRESULTSET, szProcedure_name),
PROCEDURE_NAME_SIZE,
0,
false,
"PROCEDURE_NAME",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 4,
SQL_C_LONG,
offsetof(sPROCEDURESRESULTSET, lNum_input_params),
sizeof(SDWORD),
0,
false,
"NUM_INPUT_PARAMS",
SQL_INTEGER,
0,
0,
nullptr,
},
{ 5,
SQL_C_LONG,
offsetof(sPROCEDURESRESULTSET, lNum_output_params),
sizeof(SDWORD),
0,
false,
"NUM_OUTPUT_PARAMS",
SQL_INTEGER,
0,
0,
nullptr,
},
{ 6,
SQL_C_LONG,
offsetof(sPROCEDURESRESULTSET, lNum_result_sets),
sizeof(SDWORD),
0,
false,
"NUM_RESULT_SETS",
SQL_INTEGER,
0,
0,
nullptr,
},
{ 7,
SQL_C_CHAR,
offsetof(sPROCEDURESRESULTSET, szRemarks),
REMARKS_SIZE,
0,
false,
"REMARKS",
SQL_VARCHAR,
0,
1,
nullptr,
},
};
const int _SQLProceduresRSBCount
= sizeof(_SQLProceduresRSB) / sizeof(_SQLProceduresRSB[0]);
static sCOLBIND _SQLProcedureColumnsRSB[13] =
{
{ 1,
SQL_C_CHAR,
offsetof(sPROCEDURECOLUMNSRESULTSET, szProcedure_qualifier),
PROCEDURE_QUALIFIER_SIZE,
0,
false,
"PROCEDURE_QUALIFIER",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 2,
SQL_C_CHAR,
offsetof(sPROCEDURECOLUMNSRESULTSET, szProcedure_owner),
PROCEDURE_OWNER_SIZE,
0,
false,
"PROCEDURE_OWNER",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 3,
SQL_C_CHAR,
offsetof(sPROCEDURECOLUMNSRESULTSET, szProcedure_name),
PROCEDURE_NAME_SIZE,
0,
false,
"PROCEDURE_NAME",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 4,
SQL_C_CHAR,
offsetof(sPROCEDURECOLUMNSRESULTSET, szColumn_name),
COLUMN_NAME_SIZE,
0,
false,
"COLUMN_NAME",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 5,
SQL_C_SHORT,
offsetof(sPROCEDURECOLUMNSRESULTSET, nColumn_type),
sizeof(SWORD),
0,
false,
"COLUMN_TYPE",
SQL_SMALLINT,
0,
0,
nullptr,
},
{ 6,
SQL_C_CHAR,
offsetof(sPROCEDURECOLUMNSRESULTSET, szData_type),
DATA_TYPE_SIZE,
0,
false,
"DATA_TYPE",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 7,
SQL_C_CHAR,
offsetof(sPROCEDURECOLUMNSRESULTSET, szType_name),
TYPE_NAME_SIZE,
0,
false,
"TYPE_NAME",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 8,
SQL_C_LONG,
offsetof(sPROCEDURECOLUMNSRESULTSET, lPrecision),
sizeof(SDWORD),
0,
false,
"PRECISION",
SQL_INTEGER,
0,
1,
nullptr,
},
{ 9,
SQL_C_LONG,
offsetof(sPROCEDURECOLUMNSRESULTSET, lLength),
sizeof(SDWORD),
0,
false,
"LENGTH",
SQL_INTEGER,
0,
1,
nullptr,
},
{ 10,
SQL_C_SHORT,
offsetof(sPROCEDURECOLUMNSRESULTSET, nScale),
sizeof(SWORD),
0,
false,
"SCALE",
SQL_SMALLINT,
0,
1,
nullptr,
},
{ 11,
SQL_C_SHORT,
offsetof(sPROCEDURECOLUMNSRESULTSET, nRadix),
sizeof(SDWORD),
0,
false,
"RADIX",
SQL_SMALLINT,
0,
0,
nullptr,
},
{ 12,
SQL_C_SHORT,
offsetof(sPROCEDURECOLUMNSRESULTSET, nNullable),
sizeof(SWORD),
0,
false,
"NULLABLE",
SQL_SMALLINT,
0,
1,
nullptr,
},
{ 13,
SQL_C_CHAR,
offsetof(sPROCEDURECOLUMNSRESULTSET, szRemarks),
REMARKS_SIZE,
0,
false,
"REMARKS",
SQL_VARCHAR,
0,
1,
nullptr,
},
};
const int _SQLProcedureColumnsRSBCount
= sizeof(_SQLProcedureColumnsRSB) / sizeof(_SQLProcedureColumnsRSB[0]);
static sCOLBIND _SQLForeignKeysRSB[12] =
{
{ 1,
SQL_C_CHAR,
offsetof(sFOREIGNKEYSRESULTSET, szPktable_qualifier),
PKTABLE_QUALIFIER_SIZE,
0,
false,
"PKTABLE_QUALIFIER",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 2,
SQL_C_CHAR,
offsetof(sFOREIGNKEYSRESULTSET, szPktable_owner),
PKTABLE_OWNER_SIZE,
0,
false,
"PKTABLE_OWNER",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 3,
SQL_C_CHAR,
offsetof(sFOREIGNKEYSRESULTSET, szPktable_name),
PKTABLE_NAME_SIZE,
0,
false,
"PKTABLE_NAME",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 4,
SQL_C_CHAR,
offsetof(sFOREIGNKEYSRESULTSET, szPkcolumn_name),
PKCOLUMN_NAME_SIZE,
0,
false,
"PKCOLUMN_NAME",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 5,
SQL_C_CHAR,
offsetof(sFOREIGNKEYSRESULTSET, szFktable_qualifier),
FKTABLE_QUALIFIER_SIZE,
0,
false,
"FKTABLE_QUALIFIER",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 6,
SQL_C_CHAR,
offsetof(sFOREIGNKEYSRESULTSET, szFktable_owner),
FKTABLE_OWNER_SIZE,
0,
false,
"FKTABLE_OWNER",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 7,
SQL_C_CHAR,
offsetof(sFOREIGNKEYSRESULTSET, szFktable_name),
FKTABLE_NAME_SIZE,
0,
false,
"FKTABLE_NAME",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 8,
SQL_C_CHAR,
offsetof(sFOREIGNKEYSRESULTSET, szFkcolumn_name),
FKCOLUMN_NAME_SIZE,
0,
false,
"FKCOLUMN_NAME",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 9,
SQL_C_SHORT,
offsetof(sFOREIGNKEYSRESULTSET, nKey_seq),
sizeof(SWORD),
0,
false,
"KEY_SEQ",
SQL_SMALLINT,
0,
1,
nullptr,
},
{ 10,
SQL_C_SHORT,
offsetof(sFOREIGNKEYSRESULTSET, nUpdate_rule),
sizeof(SWORD),
0,
false,
"Update_Rule",
SQL_SMALLINT,
0,
0,
nullptr,
},
{ 11,
SQL_C_SHORT,
offsetof(sFOREIGNKEYSRESULTSET, nDelete_rule),
sizeof(SWORD),
0,
false,
"Delete_Rule",
SQL_SMALLINT,
0,
0,
nullptr,
},
{ 12,
SQL_C_CHAR,
offsetof(sFOREIGNKEYSRESULTSET, szRole_name),
ROLE_NAME_SIZE,
0,
false,
"ROLE_NAME",
SQL_VARCHAR,
0,
0,
nullptr,
},
};
const int _SQLForeignKeysRSBCount
= sizeof(_SQLForeignKeysRSB) / sizeof(_SQLForeignKeysRSB[0]);
static sCOLBIND _SQLTablePrivilegesRSB[7] =
{
{ 1,
SQL_C_CHAR,
offsetof(sTABLEPRIVILEGESRESULTSET, szTable_qualifier),
TABLE_QUALIFIER_SIZE,
0,
false,
"TABLE_QUALIFIER",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 2,
SQL_C_CHAR,
offsetof(sTABLEPRIVILEGESRESULTSET, szTable_owner),
TABLE_OWNER_SIZE,
0,
false,
"TABLE_OWNER",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 3,
SQL_C_CHAR,
offsetof(sTABLEPRIVILEGESRESULTSET, szTable_name),
TABLE_NAME_SIZE,
0,
false,
"TABLE_NAME",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 4,
SQL_C_CHAR,
offsetof(sTABLEPRIVILEGESRESULTSET, szGrantor),
GRANTOR_SIZE,
0,
false,
"GRANTOR",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 5,
SQL_C_CHAR,
offsetof(sTABLEPRIVILEGESRESULTSET, szGrantee),
GRANTEE_SIZE,
0,
false,
"GRANTEE",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 6,
SQL_C_CHAR,
offsetof(sTABLEPRIVILEGESRESULTSET, szPrivilege),
PRIVILEGE_SIZE,
0,
false,
"PRIVILEGE",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 7,
SQL_C_CHAR,
offsetof(sTABLEPRIVILEGESRESULTSET, szIs_grantable),
IS_GRANTABLE_SIZE,
0,
false,
"IS_GRANTABLE",
SQL_VARCHAR,
0,
1,
nullptr,
},
};
const int _SQLTablePrivilegesRSBCount
= sizeof(_SQLTablePrivilegesRSB) / sizeof(_SQLTablePrivilegesRSB[0]);
static sCOLBIND _SQLPrimaryKeysRSB[5] =
{
{ 1,
SQL_C_CHAR,
offsetof(sPRIMARYKEYSRESULTSET, szTable_qualifier),
TABLE_QUALIFIER_SIZE,
0,
false,
"TABLE_QUALIFIER",
SQL_VARCHAR,
0,
1,
nullptr,
},
{ 2,
SQL_C_CHAR,
offsetof(sPRIMARYKEYSRESULTSET, szTable_owner),
TABLE_OWNER_SIZE,
0,
false,
"TABLE_OWNER",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 3,
SQL_C_CHAR,
offsetof(sPRIMARYKEYSRESULTSET, szTable_name),
TABLE_NAME_SIZE,
0,
false,
"TABLE_NAME",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 4,
SQL_C_CHAR,
offsetof(sPRIMARYKEYSRESULTSET, szColumn_name),
COLUMN_NAME_SIZE,
0,
false,
"column_name",
SQL_VARCHAR,
0,
0,
nullptr,
},
{ 5,
SQL_C_SHORT,
offsetof(sPRIMARYKEYSRESULTSET, nKey_seq),
sizeof(SWORD),
0,
false,
"KEY_SEQ",
SQL_SMALLINT,
0,
1,
nullptr,
},
};
const int _SQLPrimaryKeysRSBCount
= sizeof(_SQLPrimaryKeysRSB) / sizeof(_SQLPrimaryKeysRSB[0]);
// end new in v2.0
static std::mutex g_AllocCritSec;
///////////////////////////////////////////////////////////
//////////////////////// connections
///////////////////////////////////////////////////////////
/**********************************************************
odbcCONNECT
constructor. Environment handle is required; other
arguments are optional.
**********************************************************/
odbcCONNECT::odbcCONNECT(
odbcENV* penv,
LPUSTR szDSN,
LPUSTR szUID,
LPUSTR szAuthStr,
UDWORD udTimeout
)
{
pEnv = penv;
// pStmtList = 0;
isConnected = 0;
hdbc = nullptr;
bTrimAllTrailingBlanks = false;
AutoRetrieve(odbcREPERRS);
{
std::lock_guard<std::mutex> guard(g_AllocCritSec);
RETCODE rc = SQLAllocConnect(
pEnv->GetHenv(),
&hdbc
);
SetRC(rc);
}
if (sqlsuccess() && udTimeout > 0)
{
#if (ODBCVER >= 0x0300)
SQLSetConnectAttr(
hdbc,
SQL_LOGIN_TIMEOUT,
(SQLPOINTER)udTimeout,
0);
#else
SQLSetConnectOption(
hdbc,
SQL_LOGIN_TIMEOUT,
udTimeout);
#endif
}
if (sqlsuccess())
{
#if (ODBCVER >= 0x0200)
SetRC(
SetConnectOption(SQL_ODBC_CURSORS, pEnv->nCursorLibUsage)
);
#endif // ODBCVER >= 0x0200
if (szDSN && szUID && szAuthStr && sqlsuccess())
Connect(
szDSN,
szUID,
szAuthStr
);
}
// inherit error handling from environment
ErrHandler = pEnv->ErrHandler;
bGetErrorInfo = pEnv->bGetErrorInfo;
bReportErrorInfo = pEnv->bReportErrorInfo;
hwnd_ = pEnv->hwnd_;
flags = pEnv->flags;
pEnv->RegisterConnection(this);
}
odbcCONNECT::odbcCONNECT(
odbcENV* penv,
LPSTR szDSN,
LPSTR szUID,
LPSTR szAuthStr,
UDWORD udTimeout
)
{
pEnv = penv;
// pStmtList = 0;
isConnected = 0;
hdbc = nullptr;
bTrimAllTrailingBlanks = false;
AutoRetrieve(odbcREPERRS);
{
std::lock_guard<std::mutex> guard(g_AllocCritSec);
RETCODE rc = SQLAllocConnect(
pEnv->GetHenv(),
&hdbc
);
SetRC(rc);
}
if (sqlsuccess() && udTimeout > 0)
{
#if (ODBCVER >= 0x0300)
SQLSetConnectAttr(
hdbc,
SQL_LOGIN_TIMEOUT,
(SQLPOINTER)udTimeout,
0);
#else
SQLSetConnectOption(
hdbc,
SQL_LOGIN_TIMEOUT,
udTimeout);
#endif
}
if (sqlsuccess())
{
#if (ODBCVER >= 0x0200)
SetRC(
SetConnectOption(SQL_ODBC_CURSORS, pEnv->nCursorLibUsage)
);
#endif // ODBCVER >= 0x0200
if (szDSN && szUID && szAuthStr && sqlsuccess())
Connect(
szDSN,
szUID,
szAuthStr
);
}
// inherit error handling from environment
ErrHandler = pEnv->ErrHandler;
bGetErrorInfo = pEnv->bGetErrorInfo;
bReportErrorInfo = pEnv->bReportErrorInfo;
hwnd_ = pEnv->hwnd_;
flags = pEnv->flags;
pEnv->RegisterConnection(this);
}
/**********************************************************
~odbcCONNECT
Destructor.
**********************************************************/
odbcCONNECT::~odbcCONNECT()
{
// close cursors first
/* if (pStmtList)
{
delete pStmtList;
pStmtList = NULL;
}
*/
// then disconnect
if (isConnected)
Disconnect();
// then free the connection handle
if (hdbc)
{
std::lock_guard<std::mutex> guard(g_AllocCritSec);
SQLFreeConnect(hdbc);
hdbc = nullptr;
}
// if ( pEnv && !IsBadReadPtr( pEnv, sizeof(*pEnv)))
// pEnv->UnregisterConnection(this);
}
/**********************************************************
Connect
Call to SQLConnect, passing data set name, user ID,
and password.
**********************************************************/
static std::mutex g_ConnectCritSec;
RETCODE odbcCONNECT::Connect(
LPUSTR szDSN,
LPUSTR szUID,
LPUSTR szAuthStr
)
{
if (!hdbc)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
// if already connected, disconnect
if (isConnected)
Disconnect();
std::lock_guard<std::mutex> guard(g_ConnectCritSec);
RETCODE rc = SQLConnect(
hdbc,
szDSN,
SQL_NTS,
szUID,
SQL_NTS,
szAuthStr,
SQL_NTS
);
SetRC(rc);
if (sqlsuccess())
{
isConnected = true;
}
return lastRC();
}
/**********************************************************
Disconnect
Call to SQLDisconnect.
**********************************************************/
RETCODE odbcCONNECT::Disconnect()
{
if (!hdbc)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
if (isConnected)
{
std::lock_guard<std::mutex> guard(g_ConnectCritSec);
RETCODE rc = SQLDisconnect(hdbc);
SetRC(rc);
if (sqlsuccess())
isConnected = false;
}
return lastRC();
}
/**********************************************************
Commit
Call to SQLTransact to commit a transaction.
**********************************************************/
RETCODE odbcCONNECT::Commit()
{
if (!hdbc)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
SetRC(SQLTransact(
SQL_NULL_HENV,
hdbc,
SQL_COMMIT
));
return lastRC();
}
/**********************************************************
RollBack
Call to SQLTransact to roll back a transaction.
**********************************************************/
RETCODE odbcCONNECT::RollBack()
{
if (!hdbc)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
SetRC(SQLTransact(
SQL_NULL_HENV,
hdbc,
SQL_ROLLBACK
));
return lastRC();
}
/**********************************************************
RegisterError
Get more information on the most recent error code
from an ODBC operation. Results can be retrieved using
member functions in the parent odbcBASE class.
This function calls the base class member function Error()
with arguments appropriate for this object type.
**********************************************************/
RETCODE odbcCONNECT::RegisterError()
{
return Error(
henv,
(hdbc != nullptr) ? hdbc : SQL_NULL_HDBC,
SQL_NULL_HSTMT
);
}
/**********************************************************
AllocStmt
Allocate a statement object.
lpszStmt: SQL statement to use.
bPrepare: if non-zero, call SQLPrepare.
bExecute: Call SQLExecDirect (or SQLExecute
if bPrepare was non-zero).
psParmBindings: address of array of parameter bindings.
uParmCount: count of array elements.
pvParmStruct: Address of structure containing
parameter values.
**********************************************************/
odbcSTMT* odbcCONNECT::AllocStmt
(
LPUSTR lpszSentStmt,
sPARMBIND* psParmBindings,
UWORD uParmCount,
void* pvParmStruct
)
{
auto* pS = new odbcSTMT
(
this,
lpszSentStmt,
psParmBindings,
uParmCount,
pvParmStruct
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
RegisterStmt
Register an odbcSTMT object.
**********************************************************/
void odbcCONNECT::RegisterStmt(odbcSTMT* /*pStmt*/)
{
// Has no use now
}
/**********************************************************
UnregisterStmt
Unregister an odbcSTMT object.
**********************************************************/
void odbcCONNECT::UnregisterStmt(odbcSTMT* /*pStmt*/)
{
// Has no use now
}
/**********************************************************
AllocCursor
Allocate a cursor object.
lpszStmt: SQL statement to use.
bPrepare: if non-zero, call SQLPrepare.
bExecute: Call SQLExecDirect (or SQLExecute
if bPrepare was non-zero).
bAutoBind: Automatically bind columns
to dynamically allocated struct?
psParmBindings: Address of array of parameter bindings.
uParmCount: Count of array elements.
pvParmStruct: Address of structure containing
parameter values.
pColBindings: Address of array of column bindings.
uColcount: Count of array elements.
pvColStruct: Address of structure for column bindings.
**********************************************************/
odbcCURSOR* odbcCONNECT::AllocCursor
(
LPUSTR lpszSentStmt,
bool bAutoBind,
sPARMBIND* psParmBindings,
UWORD uParmCount,
void* pvParmStruct,
sCOLBIND* psColBindings,
UWORD uColCount,
void* pvColStruct
)
{
auto* pS = new odbcCURSOR
(
this,
lpszSentStmt,
bAutoBind,
psParmBindings,
uParmCount,
pvParmStruct,
psColBindings,
uColCount,
pvColStruct
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocStmt
Deallocate a statement object.
**********************************************************/
void odbcCONNECT::DeAllocStmt(
odbcSTMT* pS
)
{
delete pS;
}
/**********************************************************
AllocRecInserter
Allocate a RecInserter object.
lpszSentTableName Name of the table into which you
are inserting records. This must be a
table on the current connection.
pColBinds
uNumColBindings
pRecord If supplied, these are used for data
dictionary-style column binding instead
of the default AutoBind() mechanism.
The user is responsible for ensuring'
that the dictionary entry's column
definitions will match the order
of the query result set's columns
(matching SELECT * FROM <tablename>),
and that the conversion types make
sense (e.g., if you bind a SQL_NUMERIC
column to a storage location of type
SQL_DATE, you are responsible for the
resulting garbaggio in your record).
**********************************************************/
odbcRECINSERTER* odbcCONNECT::AllocRecInserter
(
LPCSTR lpszSentTblName,
sCOLBIND* pColBinds /* = NULL */,
UWORD uNumColBindings /* = 0 */,
void* pRecord /* = NULL */
)
{
auto* pS = new odbcRECINSERTER
(
this,
lpszSentTblName,
pColBinds,
uNumColBindings,
pRecord
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocRecInserter
Deallocate a statement object.
**********************************************************/
void odbcCONNECT::DeAllocRecInserter(
odbcRECINSERTER* pS
)
{
delete pS;
}
/**********************************************************
AllocRecUpdater
Allocate a RecUpdater object.
lpszSentTableName Name of the table into which you
are inserting records. This must be a
table on the current connection.
pColBinds
uNumColBindings
pRecord If supplied, these are used for data
dictionary-style column binding instead
of the default AutoBind() mechanism.
The user is responsible for ensuring'
that the dictionary entry's column
definitions will match the order
of the query result set's columns
(matching SELECT * FROM <tablename>),
and that the conversion types make
sense (e.g., if you bind a SQL_NUMERIC
column to a storage location of type
SQL_DATE, you are responsible for the
resulting garbaggio in your record).
**********************************************************/
odbcRECUPDATER* odbcCONNECT::AllocRecUpdater
(
LPCSTR lpszSentTblName,
LPCSTR lpszSelectStmt,
sCOLBIND* pColBinds /* = NULL*/,
UWORD uNumColBindings /* = 0*/,
void* pRecord /* = NULL*/,
bool bExecDirect /* = true*/,
UWORD fConcur /* = SQL_CONCUR_VALUES*/,
SDWORD fKeyset /* = SQL_CURSOR_STATIC*/,
UWORD fRowSet /* = 1 */
)
{
auto* pS = new odbcRECUPDATER
(
this,
lpszSentTblName,
lpszSelectStmt,
pColBinds,
uNumColBindings,
pRecord,
bExecDirect,
fConcur,
fKeyset,
fRowSet
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocRecUpdater
Deallocate a RecUpdater object.
**********************************************************/
void odbcCONNECT::DeAllocRecUpdater(
odbcRECUPDATER* pS
)
{
delete pS;
}
/**********************************************************
AllocTableCreator
Allocate a TableCreator object.
**********************************************************/
odbcTABLECREATOR* odbcCONNECT::AllocTableCreator()
{
auto* pS = new odbcTABLECREATOR
(
this
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocTableCreator
Deallocate a TableCreator object.
**********************************************************/
void odbcCONNECT::DeAllocTableCreator(
odbcTABLECREATOR* pS
)
{
delete pS;
}
/**********************************************************
AllocBLOB
Allocate a BLOB object.
Pass address of owning cursor, number of the
associated column, and maximum size of the parameter;
optionally also pass the column's SQL data type and
put- and get-chunk granularities.
**********************************************************/
odbcBLOB* odbcCONNECT::AllocBLOB(
odbcCURSOR* pCurs,
UWORD iSentCol,
UDWORD cbSentMaxSize,
SWORD fSentSqlType, /* = SQL_LONGVARBINARY */
UWORD iSentParam, /* = 0 */
SDWORD cbSentPutChunkSize, /* = BLOB_CHUNK_PUT_SIZE */
SDWORD cbSentGetChunkSize /* = BLOB_CHUNK_GET_SIZE */
)
{
auto* pS = new odbcBLOB
(
pCurs,
iSentCol,
cbSentMaxSize,
fSentSqlType,
iSentParam,
cbSentPutChunkSize,
cbSentGetChunkSize
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocBLOB
Deallocate a BLOB object.
pS BLOB to deallocate.
**********************************************************/
void odbcCONNECT::DeAllocBLOB(
odbcBLOB* pS
)
{
delete pS;
}
/**********************************************************
AllocColumnIterator
Allocate a ColumnIterator object.
**********************************************************/
odbcColumnIterator* odbcCONNECT::AllocColumnIterator(
LPSTR szTableQualifier,
LPSTR szTableOwner,
LPSTR szTableName,
LPSTR szColumnName
)
{
auto* pS = new odbcColumnIterator
(
this,
szTableQualifier,
szTableOwner,
szTableName,
szColumnName
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocColumnIterator
Deallocate a ColumnIterator object.
**********************************************************/
void odbcCONNECT::DeAllocColumnIterator(
odbcColumnIterator* pS
)
{
delete pS;
}
/**********************************************************
AllocColumnPrivilegesIterator
Allocate a ColumnPrivilegesIterator object.
**********************************************************/
odbcColumnPrivilegesIterator* odbcCONNECT::AllocColumnPrivilegesIterator(
LPSTR szTableQualifier,
LPSTR szTableOwner,
LPSTR szTableName,
LPSTR szColumnName
)
{
auto* pS = new odbcColumnPrivilegesIterator
(
this,
szTableQualifier,
szTableOwner,
szTableName,
szColumnName
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocColumnPrivilegesIterator
Deallocate a ColumnPrivilegesIterator object.
**********************************************************/
void odbcCONNECT::DeAllocColumnPrivilegesIterator(
odbcColumnPrivilegesIterator* pS
)
{
delete pS;
}
/**********************************************************
AllocForeignKeysIterator
Allocate a ForeignKeysIterator object.
**********************************************************/
odbcForeignKeysIterator* odbcCONNECT::AllocForeignKeysIterator(
LPSTR szPkTableQualifier,
LPSTR szPkTableOwner,
LPSTR szPkTableName,
LPSTR szFkTableQualifier,
LPSTR szFkTableOwner,
LPSTR szFkTableName
)
{
auto* pS = new odbcForeignKeysIterator
(
this,
szPkTableQualifier,
szPkTableOwner,
szPkTableName,
szFkTableQualifier,
szFkTableOwner,
szFkTableName
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocForeignKeysIterator
Deallocate a ForeignKeysIterator object.
**********************************************************/
void odbcCONNECT::DeAllocForeignKeysIterator(
odbcForeignKeysIterator* pS
)
{
delete pS;
}
/**********************************************************
AllocPrimaryKeysIterator
Allocate a PrimaryKeysIterator object.
**********************************************************/
odbcPrimaryKeysIterator* odbcCONNECT::AllocPrimaryKeysIterator(
LPSTR szTableQualifier,
LPSTR szTableOwner,
LPSTR szTableName
)
{
auto* pS = new odbcPrimaryKeysIterator
(
this,
szTableQualifier,
szTableOwner,
szTableName
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocPrimaryKeysIterator
Deallocate a PrimaryKeysIterator object.
**********************************************************/
void odbcCONNECT::DeAllocPrimaryKeysIterator(
odbcPrimaryKeysIterator* pS
)
{
delete pS;
}
/**********************************************************
AllocProcedureColumnsIterator
Allocate a ProcedureColumnsIterator object.
**********************************************************/
odbcProcedureColumnsIterator* odbcCONNECT::AllocProcedureColumnsIterator(
LPSTR szProcQualifier,
LPSTR szProcOwner,
LPSTR szProcName,
LPSTR szProcColumn
)
{
auto* pS = new odbcProcedureColumnsIterator
(
this,
szProcQualifier,
szProcOwner,
szProcName,
szProcColumn
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocProcedureColumnsIterator
Deallocate a ProcedureColumnsIterator object.
**********************************************************/
void odbcCONNECT::DeAllocProcedureColumnsIterator(
odbcProcedureColumnsIterator* pS
)
{
delete pS;
}
/**********************************************************
AllocProceduresIterator
Allocate a ProceduresIterator object.
**********************************************************/
odbcProceduresIterator* odbcCONNECT::AllocProceduresIterator(
LPSTR szProcQualifier,
LPSTR szProcOwner,
LPSTR szProcName
)
{
auto* pS = new odbcProceduresIterator
(
this,
szProcQualifier,
szProcOwner,
szProcName
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocProceduresIterator
Deallocate a ProceduresIterator object.
**********************************************************/
void odbcCONNECT::DeAllocProceduresIterator(
odbcProceduresIterator* pS
)
{
delete pS;
}
/**********************************************************
AllocSpecialColumnIterator
Allocate a SpecialColumnIterator object.
**********************************************************/
odbcSpecialColumnIterator* odbcCONNECT::AllocSpecialColumnIterator(
UWORD fColType,
LPSTR szTableQualifier,
LPSTR szTableOwner,
LPSTR szTableName,
UWORD fScope,
UWORD fNullable
)
{
auto* pS = new odbcSpecialColumnIterator
(
this,
fColType,
szTableQualifier,
szTableOwner,
szTableName,
fScope,
fNullable
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocSpecialColumnIterator
Deallocate a SpecialColumnIterator object.
**********************************************************/
void odbcCONNECT::DeAllocSpecialColumnIterator(
odbcSpecialColumnIterator* pS
)
{
delete pS;
}
/**********************************************************
AllocStatisticsIterator
Allocate a StatisticsIterator object.
**********************************************************/
odbcStatisticsIterator* odbcCONNECT::AllocStatisticsIterator(
LPSTR szTableQualifier,
LPSTR szTableOwner,
LPSTR szTableName,
UWORD fTblUnique,
UWORD fTblAccuracy
)
{
auto* pS = new odbcStatisticsIterator
(
this,
szTableQualifier,
szTableOwner,
szTableName,
fTblUnique,
fTblAccuracy
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocStatisticsIterator
Deallocate a StatisticsIterator object.
**********************************************************/
void odbcCONNECT::DeAllocStatisticsIterator(
odbcStatisticsIterator* pS
)
{
delete pS;
}
/**********************************************************
AllocTableIterator
Allocate a TableIterator object.
**********************************************************/
odbcTableIterator* odbcCONNECT::AllocTableIterator(
LPSTR szTableQualifier,
LPSTR szTableOwner,
LPSTR szTableName,
LPSTR szTableType
)
{
auto* pS = new odbcTableIterator
(
this,
szTableQualifier,
szTableOwner,
szTableName,
szTableType
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocTableIterator
Deallocate a TableIterator object.
**********************************************************/
void odbcCONNECT::DeAllocTableIterator(
odbcTableIterator* pS
)
{
delete pS;
}
/**********************************************************
AllocTablePrivilegesIterator
Allocate a TablePrivilegesIterator object.
**********************************************************/
odbcTablePrivilegesIterator* odbcCONNECT::AllocTablePrivilegesIterator(
LPSTR szTableQualifier,
LPSTR szTableOwner,
LPSTR szTableName
)
{
auto* pS = new odbcTablePrivilegesIterator
(
this,
szTableQualifier,
szTableOwner,
szTableName
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocTablePrivilegesIterator
Deallocate a TablePrivilegesIterator object.
**********************************************************/
void odbcCONNECT::DeAllocTablePrivilegesIterator(
odbcTablePrivilegesIterator* pS
)
{
delete pS;
}
/**********************************************************
AllocTypeInfoIterator
Allocate a TypeInfoIterator object.
**********************************************************/
odbcTypeInfoIterator* odbcCONNECT::AllocTypeInfoIterator()
{
auto* pS = new odbcTypeInfoIterator
(
this
);
if (!pS)
{
SetRC(SQL_ALLOC_FAILED);
return nullptr;
}
return pS;
}
/**********************************************************
DeAllocTypeInfoIterator
Deallocate a TypeInfoIterator object.
**********************************************************/
void odbcCONNECT::DeAllocTypeInfoIterator(
odbcTypeInfoIterator* pS
)
{
delete pS;
}
/**********************************************************
GetConnectOption
Call SQLGetConnectOption.
The various Get... member functions call the same
function with the appropriate flag.
**********************************************************/
UDWORD odbcCONNECT::GetConnectOption(UWORD fOption)
{
#if (ODBCVER >= 0x0300)
SQLULEN Result = 0;
SetRC(
SQLGetConnectAttr(
hdbc,
fOption,
&Result,
SQL_IS_UINTEGER,
nullptr
));
return Result;
#else
UDWORD Result = 0;
SetRC(
SQLGetConnectOption(
hdbc,
fOption,
&Result
));
return Result;
#endif
}
/**********************************************************
SetConnectOption
Call SQLSetConnectOption.
The various Set... member functions call the same
function with the appropriate flag.
**********************************************************/
RETCODE odbcCONNECT::SetConnectOption(
UWORD fOption,
UDWORD ulValue
)
{
#if (ODBCVER >= 0x0300)
SetRC(
SQLSetConnectAttr(
hdbc,
fOption,
(SQLPOINTER)ulValue,
0
));
#else
SetRC(
SQLSetConnectOption(
hdbc,
fOption,
ulValue
));
#endif
return lastRC();
}
/**********************************************************
DriverConnect
Call SQLDriverConnect; pass-through function.
**********************************************************/
RETCODE odbcCONNECT::DriverConnect(
HWND hwnd,
LPUCSTR szConnStrIn,
LPUSTR szConnStrOut,
SWORD cbConnStrOutMax,
SWORD *pcbConnStrOut,
UWORD fDriverCompletion
)
{
// if we didn't construct, don't try to connect
if (!hdbc)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
// if already connected, disconnect
if (isConnected)
Disconnect();
if (hwnd && !IsWindow(hwnd))
{
return SQL_ERROR;
}
SetRC(
SQLDriverConnect(
hdbc,
hwnd,
(LPUSTR)szConnStrIn,
SQL_NTS,
szConnStrOut,
cbConnStrOutMax,
pcbConnStrOut,
fDriverCompletion
));
if (sqlsuccess())
isConnected = true;
return lastRC();
}
/**********************************************************
DriverConnectNoPrompt
Call SQLDriverConnect, using contents of szConnStrIn;
do not prompt with any dialog, but return an error if
the connection information in the string is not enough.
Use strlen() to obtain the length of szConnStrOut,
but only if the function succeeded.
**********************************************************/
RETCODE odbcCONNECT::DriverConnectNoPrompt(
LPUCSTR szConnStrIn,
LPUSTR szConnStrOut,
SWORD cbConnStrOutMax
)
{
SWORD cbConnStrOut;
DriverConnect(
nullptr,
szConnStrIn,
szConnStrOut,
cbConnStrOutMax,
&cbConnStrOut,
SQL_DRIVER_NOPROMPT
);
return lastRC();
}
/**********************************************************
DriverConnectComplete
Call SQLDriverConnect; prompt to complete the connection,
if insufficient information is provided in szConnStrIn.
Use strlen() to obtain the length of szConnStrOut,
but only if the function succeeded.
**********************************************************/
RETCODE odbcCONNECT::DriverConnectComplete(
HWND hwnd,
LPUCSTR szConnStrIn,
LPUSTR szConnStrOut,
SWORD cbConnStrOutMax
)
{
SWORD cbConnStrOut;
DriverConnect(
hwnd,
szConnStrIn,
szConnStrOut,
cbConnStrOutMax,
&cbConnStrOut,
SQL_DRIVER_COMPLETE
);
return lastRC();
}
/**********************************************************
DriverConnectPrompt
Call SQLDriverConnect; tells driver to prompt for
connection information.
Use strlen() to obtain the length of szConnStrOut,
but only if the function succeeded.
**********************************************************/
RETCODE odbcCONNECT::DriverConnectPrompt(
HWND hwnd,
LPUSTR szConnStrOut,
SWORD cbConnStrOutMax
)
{
SWORD cbConnStrOut;
UCHAR szConnStrIn[255];
szConnStrIn[0] = 0;
DriverConnect(
hwnd,
szConnStrIn,
szConnStrOut,
cbConnStrOutMax,
&cbConnStrOut,
SQL_DRIVER_PROMPT
);
return lastRC();
}
/**********************************************************
DriverConnectCompleteRequired
Calls SQLDriverConnect; tells driver to complete any
required items not in the szConnStrIn argument.
Use strlen() to obtain the length of szConnStrOut,
but only if the function succeeded.
**********************************************************/
RETCODE odbcCONNECT::DriverConnectCompleteRequired(
HWND hwnd,
LPUCSTR szConnStrIn,
LPUSTR szConnStrOut,
SWORD cbConnStrOutMax
)
{
SWORD cbConnStrOut;
DriverConnect(
hwnd,
szConnStrIn,
szConnStrOut,
cbConnStrOutMax,
&cbConnStrOut,
SQL_DRIVER_COMPLETE_REQUIRED
);
return lastRC();
}
/**********************************************************
GetFunctions
Call SQLGetFunctions.
**********************************************************/
UWORD odbcCONNECT::GetFunctions(UWORD fFunction)
{
UWORD uExists = 0;
SetRC(
SQLGetFunctions(
hdbc,
fFunction,
&uExists
));
return uExists;
}
/**********************************************************
GetInfo
Call SQLGetInfo.
**********************************************************/
RETCODE odbcCONNECT::GetInfo(
UWORD fInfoType,
void* rgbInfoValue,
SWORD cbInfoValueMax,
SWORD *pcbInfoValue)
{
SetRC(
SQLGetInfo(
hdbc,
fInfoType,
rgbInfoValue,
cbInfoValueMax,
pcbInfoValue
));
return lastRC();
}
/**********************************************************
SQLColumnsRSBCount
Count of structures in array of column bindings for the
result set of SQLColumns.
**********************************************************/
UWORD odbcCONNECT::SQLColumnsRSBCount()
{
return sizeof(_SQLColumnsRSB) / sizeof(_SQLColumnsRSB[0]);
}
/**********************************************************
SQLColumnsRSB
Address of array of structures describing column bindings
for the result set of SQLColumns.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLColumnsRSB()
{
return _SQLColumnsRSB;
}
/**********************************************************
SQLSpecialColumnsRSBCount
Count of structures in array of column bindings for the
result set of SQLSpecialColumns.
**********************************************************/
UWORD odbcCONNECT::SQLSpecialColumnsRSBCount()
{
return sizeof(_SQLSpecialColumnsRSB) / sizeof(_SQLSpecialColumnsRSB[0]);
}
/**********************************************************
SQLSpecialColumnsRSB
Address of array of structures describing column bindings
for the result set of SQLSpecialColumns.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLSpecialColumnsRSB()
{
return _SQLSpecialColumnsRSB;
}
/**********************************************************
SQLTypeInfoRSBCount
Count of structures in array of column bindings for the
result set of SQLTypeInfo.
**********************************************************/
UWORD odbcCONNECT::SQLTypeInfoRSBCount()
{
return sizeof(_SQLTypeInfoRSB) / sizeof(_SQLTypeInfoRSB[0]);
}
/**********************************************************
SQLTypeInfoRSB
Address of array of structures describing column bindings
for the result set of SQLTypeInfo.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLTypeInfoRSB()
{
return _SQLTypeInfoRSB;
}
/**********************************************************
SQLProcedureColumnsRSBCount
Count of structures in array of column bindings for the
result set of SQLProcedureColumns.
**********************************************************/
UWORD odbcCONNECT::SQLProcedureColumnsRSBCount()
{
return sizeof(_SQLProcedureColumnsRSB) / sizeof(_SQLProcedureColumnsRSB[0]);
}
/**********************************************************
SQLProcedureColumnsRSB
Address of array of structures describing column bindings
for the result set of SQLProcedureColumns.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLProcedureColumnsRSB()
{
return _SQLProcedureColumnsRSB;
}
/**********************************************************
SQLTablesRSBCount
Count of structures in array of column bindings for the
result set of SQLTables.
**********************************************************/
UWORD odbcCONNECT::SQLTablesRSBCount()
{
return sizeof(_SQLTablesRSB) / sizeof(_SQLTablesRSB[0]);
}
/**********************************************************
SQLTablesRSB
Address of array of structures describing column bindings
for the result set of SQLTables.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLTablesRSB()
{
return _SQLTablesRSB;
}
/**********************************************************
SQLStatisticsRSBCount
Count of structures in array of column bindings for the
result set of SQLStatistics.
**********************************************************/
UWORD odbcCONNECT::SQLStatisticsRSBCount()
{
return sizeof(_SQLStatisticsRSB) / sizeof(_SQLStatisticsRSB[0]);
}
/**********************************************************
SQLStatisticsRSB
Address of array of structures describing column bindings
for the result set of SQLStatistics.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLStatisticsRSB()
{
return _SQLStatisticsRSB;
}
/**********************************************************
SQLColumnPrivilegesRSBCount
Count of structures in array of column bindings for the
result set of SQLColumnPrivileges.
**********************************************************/
UWORD odbcCONNECT::SQLColumnPrivilegesRSBCount()
{
return sizeof(_SQLColumnPrivilegesRSB) / sizeof(_SQLColumnPrivilegesRSB[0]);
}
/**********************************************************
SQLColumnPrivilegesRSB
Address of array of structures describing column bindings
for the result set of SQLColumnPrivileges.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLColumnPrivilegesRSB()
{
return _SQLColumnPrivilegesRSB;
}
/**********************************************************
SQLProceduresRSBCount
Count of structures in array of column bindings for the
result set of SQLProcedures.
**********************************************************/
UWORD odbcCONNECT::SQLProceduresRSBCount()
{
return sizeof(_SQLProceduresRSB) / sizeof(_SQLProceduresRSB[0]);
}
/**********************************************************
SQLProceduresRSB
Address of array of structures describing column bindings
for the result set of SQLProcedures.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLProceduresRSB()
{
return _SQLProceduresRSB;
}
/**********************************************************
SQLForeignKeysRSBCount
Count of structures in array of column bindings for the
result set of SQLForeignKeys.
**********************************************************/
UWORD odbcCONNECT::SQLForeignKeysRSBCount()
{
return sizeof(_SQLForeignKeysRSB) / sizeof(_SQLForeignKeysRSB[0]);
}
/**********************************************************
SQLForeignKeysRSB
Address of array of structures describing column bindings
for the result set of SQLForeignKeys.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLForeignKeysRSB()
{
return _SQLForeignKeysRSB;
}
/**********************************************************
SQLTablePrivilegesRSBCount
Count of structures in array of column bindings for the
result set of SQLTablePrivileges.
**********************************************************/
UWORD odbcCONNECT::SQLTablePrivilegesRSBCount()
{
return sizeof(_SQLTablePrivilegesRSB) / sizeof(_SQLTablePrivilegesRSB[0]);
}
/**********************************************************
SQLTablePrivilegesRSB
Address of array of structures describing column bindings
for the result set of SQLTablePrivileges.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLTablePrivilegesRSB()
{
return _SQLTablePrivilegesRSB;
}
/**********************************************************
SQLPrimaryKeysRSBCount
Count of structures in array of column bindings for the
result set of SQLPrimaryKeys.
**********************************************************/
UWORD odbcCONNECT::SQLPrimaryKeysRSBCount()
{
return sizeof(_SQLPrimaryKeysRSB) / sizeof(_SQLPrimaryKeysRSB[0]);
}
/**********************************************************
SQLPrimaryKeysRSB
Address of array of structures describing column bindings
for the result set of SQLPrimaryKeys.
**********************************************************/
sCOLBIND* odbcCONNECT::SQLPrimaryKeysRSB()
{
return _SQLPrimaryKeysRSB;
}
/**********************************************************
EnumColumns
Calls SQLColumns and enumerates the result set, passing
each column's data attributes as a structure to the
callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumColumns(
LPUSTR szTableQualifier,
LPUSTR szTableOwner,
LPUSTR szTableName,
LPUSTR szColumnName,
pfENUMCOLUMNS pfEnum,
void* pUser
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sSQLCOLUMNSRESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLColumnsRSB(),
SQLColumnsRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// columns
pCursor->Columns(
szTableQualifier,
szTableOwner,
szTableName,
szColumnName
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
/**********************************************************
EnumSpecialColumns
Calls SQLSpecialColumns and enumerates the result set,
passing each column's data attributes as a structure to
the callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumSpecialColumns(
UWORD fColType,
LPUSTR szTableQualifier,
LPUSTR szTableOwner,
LPUSTR szTableName,
UWORD fScope,
UWORD fNullable,
pfENUMSPECIALCOL pfEnum,
void* pUser
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sSQLSPECIALCOLRESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLSpecialColumnsRSB(),
SQLSpecialColumnsRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
SetRC(SQL_ALLOC_FAILED);
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
// columns
pCursor->SpecialColumns(
fColType,
szTableQualifier,
szTableOwner,
szTableName,
fScope,
fNullable
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
/**********************************************************
EnumTables
Calls SQLTables and enumerates the result set,
passing each table's data attributes as a structure to
the callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumTables(
LPUSTR szTableQualifier,
LPUSTR szTableOwner,
LPUSTR szTableName,
LPUSTR szTableType,
pfENUMTABLES pfEnum,
void* pUser
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sSQLTABLESRESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLTablesRSB(),
SQLTablesRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// columns
pCursor->Tables(
szTableQualifier,
szTableOwner,
szTableName,
szTableType
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
/**********************************************************
EnumStatistics
Calls SQLStatistics and enumerates the result set,
passing each table's data attributes as a structure to
the callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumStatistics(
LPUSTR szTableQualifier,
LPUSTR szTableOwner,
LPUSTR szTableName,
UWORD fUnique,
UWORD fAccuracy,
pfENUMSTATISTICS pfEnum,
void* pUser
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sSQLSTATISTICSRESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLStatisticsRSB(),
SQLStatisticsRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// columns
pCursor->Statistics(
szTableQualifier,
szTableOwner,
szTableName,
fUnique,
fAccuracy
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
/**********************************************************
GetTypeInfo
Calls SQLGetTypeInfo. Pass the flag of a specific type,
and it returns the type's attributes in the result
set structure.
**********************************************************/
RETCODE odbcCONNECT::GetTypeInfo(
UWORD fSQLType,
sSQLTYPEINFORESULTSET& rsResultSet
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
auto psResultSet = new sSQLTYPEINFORESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLTypeInfoRSB(),
SQLTypeInfoRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// columns
pCursor->GetTypeInfo(
fSQLType
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// get the result set (first one only; there should be only one).
pCursor->Fetch();
// return the result set to the user
ret = pCursor->lastRC();
if (pCursor->sqlsuccess())
{
rsResultSet = *psResultSet;
}
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
/**********************************************************
EnumTypeInfo
Calls SQLGetTypeInfo with flag to get attributes of
all types and enumerates the result set,
passing each table's data attributes as a structure to
the callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumTypeInfo(
pfENUMTYPEINFO pfEnum,
void* pUser)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sSQLTYPEINFORESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLTypeInfoRSB(),
SQLTypeInfoRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// get type info for all types
pCursor->GetTypeInfo(
SQL_ALL_TYPES
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
// new in v2.0
/**********************************************************
EnumProcedureColumns
Calls SQLProcedureColumns and enumerates the result set,
passing each column's data attributes as a structure to
the callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumProcedureColumns(
LPUSTR szProcQualifier,
LPUSTR szProcOwner,
LPUSTR szProcName,
LPUSTR szColumnName,
pfENUMPROCEDURECOL pfEnum,
void* pUser
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sPROCEDURECOLUMNSRESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLProcedureColumnsRSB(),
SQLProcedureColumnsRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
// columns
pCursor->ProcedureColumns(
szProcQualifier,
szProcOwner,
szProcName,
szColumnName
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
/**********************************************************
EnumProcedures
Calls SQLProcedures and enumerates the result set,
passing each column's data attributes as a structure to
the callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumProcedures(
LPUSTR szProcQualifier,
LPUSTR szProcOwner,
LPUSTR szProcName,
pfENUMPROCEDURES pfEnum,
void* pUser
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sPROCEDURESRESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLProceduresRSB(),
SQLProceduresRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
// columns
pCursor->Procedures(
szProcQualifier,
szProcOwner,
szProcName
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
/**********************************************************
EnumColumnPrivileges
Calls SQLColumnPrivileges and enumerates the result set,
passing each column's data attributes as a structure to
the callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumColumnPrivileges(
LPUSTR szTableQualifier,
LPUSTR szTableOwner,
LPUSTR szTableName,
LPUSTR szColumnName,
pfENUMCOLPRIVS pfEnum,
void* pUser
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sCOLUMNPRIVILEGESRESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLColumnPrivilegesRSB(),
SQLColumnPrivilegesRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// columns
pCursor->ColumnPrivileges(
szTableQualifier,
szTableOwner,
szTableName,
szColumnName
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
/**********************************************************
EnumTablePrivileges
Calls SQLTablePrivileges and enumerates the result set,
passing each column's data attributes as a structure to
the callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumTablePrivileges(
LPUSTR szTableQualifier,
LPUSTR szTableOwner,
LPUSTR szTableName,
pfENUMTABLEPRIVS pfEnum,
void* pUser
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sTABLEPRIVILEGESRESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLTablePrivilegesRSB(),
SQLTablePrivilegesRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// columns
pCursor->TablePrivileges(
szTableQualifier,
szTableOwner,
szTableName
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
/**********************************************************
EnumForeignKeys
Calls SQLForeignKeys and enumerates the result set,
passing each column's data attributes as a structure to
the callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumForeignKeys(
LPUSTR szPkTableQualifier,
LPUSTR szPkTableOwner,
LPUSTR szPkTableName,
LPUSTR szFkTableQualifier,
LPUSTR szFkTableOwner,
LPUSTR szFkTableName,
pfENUMFOREIGNKEYS pfEnum,
void* pUser
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sFOREIGNKEYSRESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLForeignKeysRSB(),
SQLForeignKeysRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// columns
pCursor->ForeignKeys(
szPkTableQualifier,
szPkTableOwner,
szPkTableName,
szFkTableQualifier,
szFkTableOwner,
szFkTableName
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
/**********************************************************
EnumPrimaryKeys
Calls SQLPrimaryKeys and enumerates the result set,
passing each column's data attributes as a structure to
the callback function supplied by the caller.
**********************************************************/
RETCODE odbcCONNECT::EnumPrimaryKeys(
LPUSTR szTableQualifier,
LPUSTR szTableOwner,
LPUSTR szTableName,
pfENUMPRIMARYKEYS pfEnum,
void* pUser
)
{
// make a cursor
odbcCURSOR* pCursor = AllocCursor();
RETCODE ret;
if (!pCursor)
{
SetRC(SQL_ALLOC_FAILED);
return lastRC();
}
auto psResultSet = new sPRIMARYKEYSRESULTSET;
if (!psResultSet)
{
SetRC(SQL_ALLOC_FAILED);
DeAllocCursor(pCursor);
return lastRC();
}
// bind columns for result set
pCursor->BindCol(
SQLPrimaryKeysRSB(),
SQLPrimaryKeysRSBCount(),
psResultSet);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// columns
pCursor->PrimaryKeys
(
szTableQualifier,
szTableOwner,
szTableName
);
if (!pCursor->sqlsuccess())
{
ret = pCursor->lastRC();
delete psResultSet;
DeAllocCursor(pCursor);
return ret;
}
// cycle through the result set.
for (
pCursor->Fetch();
pCursor->sqlsuccess();
pCursor->Fetch()
)
{
ret = (*pfEnum)(*psResultSet, pUser);
if (ret != SQL_SUCCESS)
{
DeAllocCursor(pCursor);
delete psResultSet;
return ret;
}
}
delete psResultSet;
DeAllocCursor(pCursor);
return lastRC();
}
// end new in v2.0
/**********************************************************
NativeSql
Calls SQLNativeSql to have the driver translate the
given SQL statement into .
**********************************************************/
RETCODE odbcCONNECT::NativeSql(
UCHAR *szSqlStrIn,
UCHAR *szSqlStr,
SDWORD cbSqlStrMax,
SDWORD *pcbSqlStr)
{
SetRC(
SQLNativeSql(
hdbc,
szSqlStrIn,
SQL_NTS,
szSqlStr,
cbSqlStrMax,
pcbSqlStr));
return lastRC();
}
|
// { Driver Code Starts
#include<bits/stdc++.h>
const int mod=1e9+7;
using namespace std;
int lcs(int, int, string, string);
int main()
{
int t,n,k,x,y;
cin>>t;
while(t--)
{
cin>>x>>y; // Take size of both the strings as input
string s1,s2;
cin>>s1>>s2; // Take both the string as input
cout << lcs(x, y, s1, s2) << endl;
}
return 0;
}
// } Driver Code Ends
// function to find longest common subsequence
int lcs(int x, int y, string s1, string s2){
// your code here
int dp[x+1][y+1];
for(int i=0; i<=x; i++){
dp[i][0] = 0;
}
for(int j=0; j<=y; j++){
dp[0][j] = 0;
}
for(int i=1; i<=x; i++){
for(int j=1; j<=y; j++){
if(s1[i-1] == s2[j-1]){
dp[i][j] = 1 + dp[i-1][j-1];
}else{
dp[i][j] = max(dp[i-1][j], dp[i][j-1]);
}
}
}
return dp[x][y];
}
|
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "softmax.h"
#include <ie_parallel.hpp>
#include <cpu/x64/jit_generator.hpp>
#include <cpu/x64/injectors/jit_uni_eltwise_injector.hpp>
#include <onednn/dnnl.h>
#include "utils/bfloat16.hpp"
#include "emitters/jit_bf16_emitters.hpp"
#include <algorithm>
#include <cassert>
#include <vector>
using namespace InferenceEngine;
using namespace dnnl;
using namespace dnnl::impl::cpu;
using namespace dnnl::impl::cpu::x64;
using namespace dnnl::impl::utils;
#define GET_OFF(field) offsetof(jit_args_softmax, field)
namespace ov {
namespace intel_cpu {
struct jit_args_softmax {
const void* src;
void* dst;
size_t src_stride;
size_t dst_stride;
size_t work_amount;
};
struct jit_softmax_config_params {
Precision src_dt;
Precision dst_dt;
};
struct jit_uni_softmax_kernel {
void (*ker_)(const jit_args_softmax *);
void operator()(const jit_args_softmax *args) { assert(ker_); ker_(args); }
jit_uni_softmax_kernel() : ker_(nullptr) {}
virtual ~jit_uni_softmax_kernel() {}
virtual void create_ker() = 0;
};
template <cpu_isa_t isa>
struct jit_uni_softmax_kernel_f32 : public jit_uni_softmax_kernel, public jit_generator {
DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_softmax_kernel_f32)
jit_uni_softmax_kernel_f32(jit_softmax_config_params jcp) : jcp_(jcp), jit_uni_softmax_kernel(), jit_generator() {}
void create_ker() override {
jit_generator::create_kernel();
ker_ = (decltype(ker_))jit_ker();
}
void generate() override {
exp_injector.reset(new jit_uni_eltwise_injector_f32<isa>(this, dnnl::impl::alg_kind::eltwise_exp, 0.f, 0.f, 1.0f));
if (!mayiuse(avx512_core_bf16) && mayiuse(avx512_core))
emu_vcvtneps2bf16.reset(new jit_emu_vcvtneps2bf16(this, isa));
this->preamble();
mov(reg_src, ptr[reg_params + GET_OFF(src)]);
mov(reg_dst, ptr[reg_params + GET_OFF(dst)]);
mov(reg_src_stride, ptr[reg_params + GET_OFF(src_stride)]);
mov(reg_dst_stride, ptr[reg_params + GET_OFF(dst_stride)]);
mov(reg_work_amount, ptr[reg_params + GET_OFF(work_amount)]);
Xbyak::Label max_loop_label;
Xbyak::Label max_loop_end_label;
Xbyak::Label exp_loop_label;
Xbyak::Label exp_loop_end_label;
Xbyak::Label div_loop_label;
Xbyak::Label div_loop_end_label;
mov(aux_reg_work_amount, reg_work_amount);
mov(aux_reg_src, reg_src);
load_vector(vmm_max, ptr[aux_reg_src], jcp_.src_dt);
L(max_loop_label); {
cmp(aux_reg_work_amount, 0);
jle(max_loop_end_label, T_NEAR);
load_vector(vmm_val, ptr[aux_reg_src], jcp_.src_dt);
if (isa == x64::sse41) {
uni_vmovups(vmm_mask, vmm_val);
uni_vcmpgtps(vmm_mask, vmm_mask, vmm_max);
} else if (isa == x64::avx2) {
uni_vcmpgtps(vmm_mask, vmm_val, vmm_max);
} else {
vcmpps(k_mask, vmm_val, vmm_max, _cmp_nle_us);
}
if (isa == x64::avx512_common) {
vptestmd(k_mask, vmm_mask, vmm_mask);
vblendmps(vmm_max | k_mask, vmm_max, vmm_val);
} else {
uni_vblendvps(vmm_max, vmm_max, vmm_val, vmm_mask);
}
add(aux_reg_src, reg_src_stride);
sub(aux_reg_work_amount, 1);
jmp(max_loop_label, T_NEAR);
}
L(max_loop_end_label);
mov(aux_reg_work_amount, reg_work_amount);
mov(aux_reg_src, reg_src);
mov(aux_reg_dst, reg_dst);
uni_vpxor(vmm_exp_sum, vmm_exp_sum, vmm_exp_sum);
L(exp_loop_label); {
cmp(aux_reg_work_amount, 0);
jle(exp_loop_end_label, T_NEAR);
load_vector(vmm_val, ptr[aux_reg_src], jcp_.src_dt);
uni_vsubps(vmm_val, vmm_val, vmm_max);
exp_injector->compute_vector_range(vmm_val.getIdx(), vmm_val.getIdx() + 1);
uni_vaddps(vmm_exp_sum, vmm_exp_sum, vmm_val);
store_vector(ptr[aux_reg_dst], vmm_val, jcp_.dst_dt);
add(aux_reg_src, reg_src_stride);
add(aux_reg_dst, reg_dst_stride);
sub(aux_reg_work_amount, 1);
jmp(exp_loop_label, T_NEAR);
}
L(exp_loop_end_label);
mov(aux_reg_work_amount, reg_work_amount);
mov(aux_reg_dst, reg_dst);
L(div_loop_label); {
cmp(aux_reg_work_amount, 0);
jle(div_loop_end_label, T_NEAR);
load_vector(vmm_val, ptr[aux_reg_dst], jcp_.dst_dt);
uni_vdivps(vmm_val, vmm_val, vmm_exp_sum);
store_vector(ptr[aux_reg_dst], vmm_val, jcp_.dst_dt);
add(aux_reg_dst, reg_dst_stride);
sub(aux_reg_work_amount, 1);
jmp(div_loop_label, T_NEAR);
}
L(div_loop_end_label);
this->postamble();
if (!mayiuse(avx512_core_bf16) && mayiuse(avx512_core))
emu_vcvtneps2bf16->emit_data();
exp_injector->prepare_table();
}
private:
using Vmm = typename conditional3<isa == x64::sse41, Xbyak::Xmm, isa == x64::avx2, Xbyak::Ymm, Xbyak::Zmm>::type;
size_t vlen = cpu_isa_traits<isa>::vlen;
Xbyak::Reg64 reg_src = r8;
Xbyak::Reg64 aux_reg_src = r13;
Xbyak::Reg64 reg_dst = r9;
Xbyak::Reg64 aux_reg_dst = r15;
Xbyak::Reg64 reg_work_amount = r11;
Xbyak::Reg64 aux_reg_work_amount = r12;
Xbyak::Reg64 reg_src_stride = r14;
Xbyak::Reg64 reg_dst_stride = r10;
Xbyak::Reg64 reg_params = abi_param1;
Vmm vmm_mask = Vmm(0);
Vmm vmm_val = Vmm(1);
Vmm vmm_max = Vmm(2);
Vmm vmm_exp_sum = Vmm(3);
const Xbyak::Opmask k_mask = Xbyak::Opmask(1);
std::unique_ptr<jit_emu_vcvtneps2bf16> emu_vcvtneps2bf16;
std::shared_ptr<jit_uni_eltwise_injector_f32<isa>> exp_injector;
jit_softmax_config_params jcp_;
inline void load_vector(Vmm vmm_src, const Xbyak::Address &op, Precision src_dt) {
switch (src_dt) {
case Precision::FP32:
uni_vmovups(vmm_src, op);
break;
case Precision::BF16:
vpmovzxwd(vmm_src, op);
uni_vpslld(vmm_src, vmm_src, 16);
break;
default:
assert(!"unknown src_dt");
}
}
inline void store_vector(const Xbyak::Address &op, Vmm vmm_dst, Precision dst_dt) {
Xbyak::Ymm ymm_dst = Xbyak::Ymm(vmm_dst.getIdx());
switch (dst_dt) {
case Precision::FP32:
uni_vmovups(op, vmm_dst);
break;
case Precision::BF16:
if (mayiuse(avx512_core_bf16))
vcvtneps2bf16(ymm_dst, vmm_dst);
else
emu_vcvtneps2bf16->emit_code({static_cast<size_t>(vmm_dst.getIdx())}, {static_cast<size_t>(ymm_dst.getIdx())});
vmovdqu16(op, ymm_dst);
break;
default:
assert(!"unknown dst_dt");
}
}
};
SoftmaxGeneric::SoftmaxGeneric(Precision inpPrc, Precision outPrc)
: input_prec(inpPrc), output_prec(outPrc) {
if (Precision::BF16 == output_prec) {
if (!mayiuse(avx512_core)) {
IE_THROW() << "SoftmaxGeneric doesn't support BF16 precision on this target.";
}
}
block_size = 1;
auto jcp = jit_softmax_config_params();
jcp.src_dt = inpPrc;
jcp.dst_dt = outPrc;
if (mayiuse(x64::avx512_common)) {
softmax_kernel.reset(new jit_uni_softmax_kernel_f32<x64::avx512_common>(jcp));
block_size = 16;
} else if (mayiuse(x64::avx2)) {
softmax_kernel.reset(new jit_uni_softmax_kernel_f32<x64::avx2>(jcp));
block_size = 8;
} else if (mayiuse(x64::sse41)) {
softmax_kernel.reset(new jit_uni_softmax_kernel_f32<x64::sse41>(jcp));
block_size = 4;
}
if (softmax_kernel)
softmax_kernel->create_ker();
}
template<typename in_data_t, typename out_data_t>
void SoftmaxGeneric::calculate(const in_data_t *src_data, out_data_t *dst_data, int B, int C, int H, int W) {
for (int b = 0; b < B; b++) {
int tail_start = 0;
if (softmax_kernel) {
int blocks_num = H*W / block_size;
parallel_for(blocks_num, [&](int ib) {
auto arg = jit_args_softmax();
arg.src = src_data + b * C * H * W + ib * block_size;
arg.dst = dst_data + b * C * H * W + ib * block_size;
arg.src_stride = static_cast<size_t>((size_t)(H) * W * sizeof(in_data_t));
arg.dst_stride = static_cast<size_t>((size_t)(H) * W * sizeof(out_data_t));
arg.work_amount = static_cast<size_t>(C);
(*softmax_kernel)(&arg);
});
tail_start = (H*W / block_size) * block_size;
}
parallel_for(H * W - tail_start, [&](int i) {
int offset = i + tail_start;
float max = src_data[b * C * H * W + offset];
for (int c = 0; c < C; c++) {
float val = src_data[b * C * H * W + c * H * W + offset];
if (val > max) max = val;
}
float expSum = 0;
for (int c = 0; c < C; c++) {
dst_data[b * C * H * W + c * H * W + offset] = exp(src_data[b * C * H * W + c * H * W + offset] - max);
expSum += dst_data[b * C * H * W + c * H * W + offset];
}
for (int c = 0; c < C; c++) {
dst_data[b * C * H * W + c * H * W + offset] = dst_data[b * C * H * W + c * H * W + offset] / expSum;
}
});
}
}
void SoftmaxGeneric::execute(const uint8_t *src_data, uint8_t *dst_data, int B, int C, int H, int W) {
if (Precision::FP32 == input_prec) {
auto float_src_data = reinterpret_cast<const float*>(src_data);
if (Precision::FP32 == output_prec) {
auto float_dst_data = reinterpret_cast<float*>(dst_data);
calculate(float_src_data, float_dst_data, B, C, H, W);
} else if (Precision::BF16 == output_prec) {
auto bf16_dst_data = reinterpret_cast<bfloat16_t*>(dst_data);
calculate(float_src_data, bf16_dst_data, B, C, H, W);
} else {
IE_THROW() << "Unsupported output precision: " << output_prec.name();
}
} else if (Precision::BF16 == input_prec) {
auto bf16_src_data = reinterpret_cast<const bfloat16_t*>(src_data);
if (Precision::FP32 == output_prec) {
auto float_dst_data = reinterpret_cast<float*>(dst_data);
calculate(bf16_src_data, float_dst_data, B, C, H, W);
} else if (Precision::BF16 == output_prec) {
auto bf16_dst_data = reinterpret_cast<bfloat16_t*>(dst_data);
calculate(bf16_dst_data, bf16_dst_data, B, C, H, W);
} else {
IE_THROW() << "Unsupported output precision: " << output_prec.name();
}
} else {
IE_THROW() << "Unsupported input precision: " << input_prec.name();
}
}
} // namespace intel_cpu
} // namespace ov
|
//=================================================================================================
/*!
// \file src/mathtest/smatsmatsub/MCbLCa.cpp
// \brief Source file for the MCbLCa sparse matrix/sparse matrix subtraction math test
//
// Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <cstdlib>
#include <iostream>
#include <blaze/math/CompressedMatrix.h>
#include <blaze/math/LowerMatrix.h>
#include <blazetest/mathtest/Creator.h>
#include <blazetest/mathtest/smatsmatsub/OperationTest.h>
#include <blazetest/system/MathTest.h>
//=================================================================================================
//
// MAIN FUNCTION
//
//=================================================================================================
//*************************************************************************************************
int main()
{
std::cout << " Running 'MCbLCa'..." << std::endl;
using blazetest::mathtest::TypeA;
using blazetest::mathtest::TypeB;
try
{
// Matrix type definitions
using MCb = blaze::CompressedMatrix<TypeB>;
using LCa = blaze::LowerMatrix< blaze::CompressedMatrix<TypeA> >;
// Creator type definitions
using CMCb = blazetest::Creator<MCb>;
using CLCa = blazetest::Creator<LCa>;
// Running tests with small matrices
for( size_t i=0UL; i<=6UL; ++i ) {
for( size_t j=0UL; j<=i*i; ++j ) {
for( size_t k=0UL; k<=LCa::maxNonZeros( i ); ++k ) {
RUN_SMATSMATSUB_OPERATION_TEST( CMCb( i, i, j ), CLCa( i, k ) );
}
}
}
// Running tests with large matrices
RUN_SMATSMATSUB_OPERATION_TEST( CMCb( 67UL, 67UL, 7UL ), CLCa( 67UL, 13UL ) );
RUN_SMATSMATSUB_OPERATION_TEST( CMCb( 128UL, 128UL, 16UL ), CLCa( 128UL, 8UL ) );
}
catch( std::exception& ex ) {
std::cerr << "\n\n ERROR DETECTED during sparse matrix/sparse matrix subtraction:\n"
<< ex.what() << "\n";
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
//*************************************************************************************************
|
/*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#include <AzCore/Asset/AssetManagerBus.h>
#include <AzCore/Component/TickBus.h>
#include <AzCore/IO/SystemFile.h>
#include <AzFramework/Asset/AssetSystemBus.h>
#include <AzFramework/IO/FileOperations.h>
#include <AzToolsFramework/API/EditorAssetSystemAPI.h>
#include <AzToolsFramework/SourceControl/SourceControlAPI.h>
#include <Editor/View/Windows/Tools/UpgradeTool/FileSaver.h>
#include <ScriptCanvas/Assets/ScriptCanvasFileHandling.h>
namespace ScriptCanvasEditor
{
namespace VersionExplorer
{
FileSaver::FileSaver
( AZStd::function<bool()> onReadOnlyFile
, AZStd::function<void(const FileSaveResult& result)> onComplete)
: m_onReadOnlyFile(onReadOnlyFile)
, m_onComplete(onComplete)
{}
const SourceHandle& FileSaver::GetSource() const
{
return m_source;
}
void FileSaver::PerformMove
( AZStd::string tmpFileName
, AZStd::string target
, size_t remainingAttempts)
{
if (remainingAttempts == 0)
{
AZ::SystemTickBus::QueueFunction([this, tmpFileName]()
{
FileSaveResult result;
result.fileSaveError = "Failed to move updated file from temporary location to original destination.";
result.tempFileRemovalError = RemoveTempFile(tmpFileName);
m_onComplete(result);
});
}
else if (remainingAttempts == 2)
{
auto streamer = AZ::Interface<AZ::IO::IStreamer>::Get();
// before the last attempt, flush all the caches
AZ::IO::FileRequestPtr flushRequest = streamer->FlushCaches();
streamer->SetRequestCompleteCallback(flushRequest
, [this, remainingAttempts, tmpFileName, target]([[maybe_unused]] AZ::IO::FileRequestHandle request)
{
// One last try
AZ::SystemTickBus::QueueFunction(
[this, remainingAttempts, tmpFileName, target]() { PerformMove(tmpFileName, target, remainingAttempts - 1); });
});
streamer->QueueRequest(flushRequest);
}
else
{
// the actual move attempt
auto moveResult = AZ::IO::SmartMove(tmpFileName.c_str(), target.c_str());
if (moveResult.GetResultCode() == AZ::IO::ResultCode::Success)
{
auto streamer = AZ::Interface<AZ::IO::IStreamer>::Get();
AZ::IO::FileRequestPtr flushRequest = streamer->FlushCache(target.c_str());
// Bump the slice asset up in the asset processor's queue.
AzFramework::AssetSystemRequestBus::Broadcast
(&AzFramework::AssetSystem::AssetSystemRequests::EscalateAssetBySearchTerm, target.c_str());
AZ::SystemTickBus::QueueFunction([this, tmpFileName]()
{
FileSaveResult result;
result.tempFileRemovalError = RemoveTempFile(tmpFileName);
m_onComplete(result);
});
}
else
{
AZ_Warning(ScriptCanvas::k_VersionExplorerWindow.data(), false
, "moving converted file to tmpFileName destination failed: %s, trying again", target.c_str());
auto streamer = AZ::Interface<AZ::IO::IStreamer>::Get();
AZ::IO::FileRequestPtr flushRequest = streamer->FlushCache(target.c_str());
streamer->SetRequestCompleteCallback(flushRequest
, [this, tmpFileName, target, remainingAttempts]([[maybe_unused]] AZ::IO::FileRequestHandle request)
{
// Continue saving.
AZ::SystemTickBus::QueueFunction(
[this, tmpFileName, target, remainingAttempts]() { PerformMove(tmpFileName, target, remainingAttempts - 1); });
});
streamer->QueueRequest(flushRequest);
}
}
}
void FileSaver::OnSourceFileReleased(const SourceHandle& source)
{
AZStd::string fullPath = source.Path().c_str();
AZStd::string tmpFileName;
// here we are saving the graph to a temp file instead of the original file and then copying the temp file to the original file.
// This ensures that AP will not a get a file change notification on an incomplete graph file causing it to fail processing.
// Temp files are ignored by AP.
if (!AZ::IO::CreateTempFileName(fullPath.c_str(), tmpFileName))
{
FileSaveResult result;
result.fileSaveError = "Failure to create temporary file name";
m_onComplete(result);
return;
}
AZStd::string saveError;
AZ::IO::FileIOStream fileStream(tmpFileName.c_str(), AZ::IO::OpenMode::ModeWrite | AZ::IO::OpenMode::ModeText);
if (fileStream.IsOpen())
{
auto saveOutcome = ScriptCanvasEditor::SaveToStream(source, fileStream);
if (!saveOutcome.IsSuccess())
{
saveError = saveOutcome.TakeError();
}
fileStream.Close();
}
if (!saveError.empty())
{
FileSaveResult result;
result.fileSaveError = AZStd::string::format("Save asset data to temporary file failed: %s", saveError.c_str());
m_onComplete(result);
return;
}
AzToolsFramework::SourceControlCommandBus::Broadcast
( &AzToolsFramework::SourceControlCommandBus::Events::RequestEdit
, fullPath.c_str()
, true
, [this, fullPath, tmpFileName]([[maybe_unused]] bool success, const AzToolsFramework::SourceControlFileInfo& info)
{
constexpr const size_t k_maxAttemps = 10;
if (!info.IsReadOnly())
{
PerformMove(tmpFileName, fullPath, k_maxAttemps);
}
else if (m_onReadOnlyFile && m_onReadOnlyFile())
{
AZ::IO::SystemFile::SetWritable(info.m_filePath.c_str(), true);
PerformMove(tmpFileName, fullPath, k_maxAttemps);
}
else
{
FileSaveResult result;
result.fileSaveError = "Source file was and remained read-only";
result.tempFileRemovalError = RemoveTempFile(tmpFileName);
m_onComplete(result);
}
});
}
AZStd::string FileSaver::RemoveTempFile(AZStd::string_view tempFile)
{
AZ::IO::FileIOBase* fileIO = AZ::IO::FileIOBase::GetInstance();
if (!fileIO)
{
return "No FileIO instance";
}
if (fileIO->Exists(tempFile.data()) && !fileIO->Remove(tempFile.data()))
{
return AZStd::string::format("Failed to remove temporary file: %s", tempFile.data());
}
return "";
}
void FileSaver::Save(const SourceHandle& source)
{
m_source = source;
if (source.Path().empty())
{
FileSaveResult result;
result.fileSaveError = "No save location specified";
m_onComplete(result);
}
else
{
auto streamer = AZ::Interface<AZ::IO::IStreamer>::Get();
AZ::IO::FileRequestPtr flushRequest = streamer->FlushCache(source.Path().c_str());
streamer->SetRequestCompleteCallback(flushRequest, [this]([[maybe_unused]] AZ::IO::FileRequestHandle request)
{
AZStd::lock_guard<AZStd::mutex> lock(m_mutex);
if (!m_sourceFileReleased)
{
m_sourceFileReleased = true;
AZ::SystemTickBus::QueueFunction([this]() { this->OnSourceFileReleased(m_source); });
}
});
streamer->QueueRequest(flushRequest);
}
}
}
}
|
//-----------------------------------------------------------------------------
// Copyright (c) 2013 GarageGames, LLC
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//-----------------------------------------------------------------------------
#include "persistence/taml/binary/tamlBinaryReader.h"
#ifndef _ZIPSUBSTREAM_H_
#include "core/util/zip/zipSubStream.h"
#endif
// Debug Profiling.
#include "platform/profiler.h"
//-----------------------------------------------------------------------------
SimObject* TamlBinaryReader::read( FileStream& stream )
{
// Debug Profiling.
PROFILE_SCOPE(TamlBinaryReader_Read);
// Read Taml signature.
StringTableEntry tamlSignature = stream.readSTString();
// Is the signature correct?
if ( tamlSignature != StringTable->insert( TAML_SIGNATURE ) )
{
// Warn.
Con::warnf("Taml: Cannot read binary file as signature is incorrect '%s'.", tamlSignature );
return NULL;
}
// Read version Id.
U32 versionId;
stream.read( &versionId );
// Read compressed flag.
bool compressed;
stream.read( &compressed );
SimObject* pSimObject = NULL;
// Is the stream compressed?
if ( compressed )
{
// Yes, so attach zip stream.
ZipSubRStream zipStream;
zipStream.attachStream( &stream );
// Parse element.
pSimObject = parseElement( zipStream, versionId );
// Detach zip stream.
zipStream.detachStream();
}
else
{
// No, so parse element.
pSimObject = parseElement( stream, versionId );
}
return pSimObject;
}
//-----------------------------------------------------------------------------
void TamlBinaryReader::resetParse( void )
{
// Debug Profiling.
PROFILE_SCOPE(TamlBinaryReader_ResetParse);
// Clear object reference map.
mObjectReferenceMap.clear();
}
//-----------------------------------------------------------------------------
SimObject* TamlBinaryReader::parseElement( Stream& stream, const U32 versionId )
{
// Debug Profiling.
PROFILE_SCOPE(TamlBinaryReader_ParseElement);
SimObject* pSimObject = NULL;
#ifdef TORQUE_DEBUG
// Format the type location.
char typeLocationBuffer[64];
dSprintf( typeLocationBuffer, sizeof(typeLocationBuffer), "Taml [format='binary' offset=%u]", stream.getPosition() );
#endif
// Fetch element name.
StringTableEntry typeName = stream.readSTString();
// Fetch object name.
StringTableEntry objectName = stream.readSTString();
// Read references.
U32 tamlRefId;
U32 tamlRefToId;
stream.read( &tamlRefId );
stream.read( &tamlRefToId );
// Do we have a reference to Id?
if ( tamlRefToId != 0 )
{
// Yes, so fetch reference.
typeObjectReferenceHash::Iterator referenceItr = mObjectReferenceMap.find( tamlRefToId );
// Did we find the reference?
if ( referenceItr == mObjectReferenceMap.end() )
{
// No, so warn.
Con::warnf( "Taml: Could not find a reference Id of '%d'", tamlRefToId );
return NULL;
}
// Return object.
return referenceItr->value;
}
#ifdef TORQUE_DEBUG
// Create type.
pSimObject = Taml::createType( typeName, mpTaml, typeLocationBuffer );
#else
// Create type.
pSimObject = Taml::createType( typeName, mpTaml );
#endif
// Finish if we couldn't create the type.
if ( pSimObject == NULL )
return NULL;
// Find Taml callbacks.
TamlCallbacks* pCallbacks = dynamic_cast<TamlCallbacks*>( pSimObject );
// Are there any Taml callbacks?
if ( pCallbacks != NULL )
{
// Yes, so call it.
mpTaml->tamlPreRead( pCallbacks );
}
// Parse attributes.
parseAttributes( stream, pSimObject, versionId );
// Does the object require a name?
if ( objectName == StringTable->EmptyString() )
{
// No, so just register anonymously.
pSimObject->registerObject();
}
else
{
// Yes, so register a named object.
pSimObject->registerObject( objectName );
// Was the name assigned?
if ( pSimObject->getName() != objectName )
{
// No, so warn that the name was rejected.
#ifdef TORQUE_DEBUG
Con::warnf( "Taml::parseElement() - Registered an instance of type '%s' but a request to name it '%s' was rejected. This is typically because an object of that name already exists. '%s'", typeName, objectName, typeLocationBuffer );
#else
Con::warnf( "Taml::parseElement() - Registered an instance of type '%s' but a request to name it '%s' was rejected. This is typically because an object of that name already exists.", typeName, objectName );
#endif
}
}
// Do we have a reference Id?
if ( tamlRefId != 0 )
{
// Yes, so insert reference.
mObjectReferenceMap.insertUnique( tamlRefId, pSimObject );
}
// Parse custom elements.
TamlCustomNodes customProperties;
// Parse children.
parseChildren( stream, pCallbacks, pSimObject, versionId );
// Parse custom elements.
parseCustomElements( stream, pCallbacks, customProperties, versionId );
// Are there any Taml callbacks?
if ( pCallbacks != NULL )
{
// Yes, so call it.
mpTaml->tamlPostRead( pCallbacks, customProperties );
}
// Return object.
return pSimObject;
}
//-----------------------------------------------------------------------------
void TamlBinaryReader::parseAttributes( Stream& stream, SimObject* pSimObject, const U32 versionId )
{
// Debug Profiling.
PROFILE_SCOPE(TamlBinaryReader_ParseAttributes);
// Sanity!
AssertFatal( pSimObject != NULL, "Taml: Cannot parse attributes on a NULL object." );
// Fetch attribute count.
U32 attributeCount;
stream.read( &attributeCount );
// Finish if no attributes.
if ( attributeCount == 0 )
return;
char valueBuffer[4096];
// Iterate attributes.
for ( U32 index = 0; index < attributeCount; ++index )
{
// Fetch attribute.
StringTableEntry attributeName = stream.readSTString();
stream.readLongString( 4096, valueBuffer );
// We can assume this is a field for now.
pSimObject->setPrefixedDataField(attributeName, NULL, valueBuffer);
}
}
//-----------------------------------------------------------------------------
void TamlBinaryReader::parseChildren( Stream& stream, TamlCallbacks* pCallbacks, SimObject* pSimObject, const U32 versionId )
{
// Debug Profiling.
PROFILE_SCOPE(TamlBinaryReader_ParseChildren);
// Sanity!
AssertFatal( pSimObject != NULL, "Taml: Cannot parse children on a NULL object." );
// Fetch children count.
U32 childrenCount;
stream.read( &childrenCount );
// Finish if no children.
if ( childrenCount == 0 )
return;
// Fetch the Taml children.
TamlChildren* pChildren = dynamic_cast<TamlChildren*>( pSimObject );
// Is this a sim set?
if ( pChildren == NULL )
{
// No, so warn.
Con::warnf("Taml: Child element found under parent but object cannot have children." );
return;
}
// Fetch any container child class specifier.
AbstractClassRep* pContainerChildClass = pSimObject->getClassRep()->getContainerChildClass( true );
// Iterate children.
for ( U32 index = 0; index < childrenCount; ++ index )
{
// Parse child element.
SimObject* pChildSimObject = parseElement( stream, versionId );
// Finish if child failed.
if ( pChildSimObject == NULL )
return;
// Do we have a container child class?
if ( pContainerChildClass != NULL )
{
// Yes, so is the child object the correctly derived type?
if ( !pChildSimObject->getClassRep()->isClass( pContainerChildClass ) )
{
// No, so warn.
Con::warnf("Taml: Child element '%s' found under parent '%s' but object is restricted to children of type '%s'.",
pChildSimObject->getClassName(),
pSimObject->getClassName(),
pContainerChildClass->getClassName() );
// NOTE: We can't delete the object as it may be referenced elsewhere!
pChildSimObject = NULL;
// Skip.
continue;
}
}
// Add child.
pChildren->addTamlChild( pChildSimObject );
// Find Taml callbacks for child.
TamlCallbacks* pChildCallbacks = dynamic_cast<TamlCallbacks*>( pChildSimObject );
// Do we have callbacks on the child?
if ( pChildCallbacks != NULL )
{
// Yes, so perform callback.
mpTaml->tamlAddParent( pChildCallbacks, pSimObject );
}
}
}
//-----------------------------------------------------------------------------
void TamlBinaryReader::parseCustomElements( Stream& stream, TamlCallbacks* pCallbacks, TamlCustomNodes& customNodes, const U32 versionId )
{
// Debug Profiling.
PROFILE_SCOPE(TamlBinaryReader_ParseCustomElement);
// Read custom node count.
U32 customNodeCount;
stream.read( &customNodeCount );
// Finish if no custom nodes.
if ( customNodeCount == 0 )
return;
// Iterate custom nodes.
for ( U32 nodeIndex = 0; nodeIndex < customNodeCount; ++nodeIndex )
{
//Read custom node name.
StringTableEntry nodeName = stream.readSTString();
// Add custom node.
TamlCustomNode* pCustomNode = customNodes.addNode( nodeName );
// Parse the custom node.
parseCustomNode( stream, pCustomNode, versionId );
}
// Do we have callbacks?
if ( pCallbacks == NULL )
{
// No, so warn.
Con::warnf( "Taml: Encountered custom data but object does not support custom data." );
return;
}
// Custom read callback.
mpTaml->tamlCustomRead( pCallbacks, customNodes );
}
//-----------------------------------------------------------------------------
void TamlBinaryReader::parseCustomNode( Stream& stream, TamlCustomNode* pCustomNode, const U32 versionId )
{
// Fetch if a proxy object.
bool isProxyObject;
stream.read( &isProxyObject );
// Is this a proxy object?
if ( isProxyObject )
{
// Yes, so parse proxy object.
SimObject* pProxyObject = parseElement( stream, versionId );
// Add child node.
pCustomNode->addNode( pProxyObject );
return;
}
// No, so read custom node name.
StringTableEntry nodeName = stream.readSTString();
// Add child node.
TamlCustomNode* pChildNode = pCustomNode->addNode( nodeName );
// Read child node text.
char childNodeTextBuffer[MAX_TAML_NODE_FIELDVALUE_LENGTH];
stream.readLongString( MAX_TAML_NODE_FIELDVALUE_LENGTH, childNodeTextBuffer );
pChildNode->setNodeText( childNodeTextBuffer );
// Read child node count.
U32 childNodeCount;
stream.read( &childNodeCount );
// Do we have any children nodes?
if ( childNodeCount > 0 )
{
// Yes, so parse children nodes.
for( U32 childIndex = 0; childIndex < childNodeCount; ++childIndex )
{
// Parse child node.
parseCustomNode( stream, pChildNode, versionId );
}
}
// Read child field count.
U32 childFieldCount;
stream.read( &childFieldCount );
// Do we have any child fields?
if ( childFieldCount > 0 )
{
// Yes, so parse child fields.
for( U32 childFieldIndex = 0; childFieldIndex < childFieldCount; ++childFieldIndex )
{
// Read field name.
StringTableEntry fieldName = stream.readSTString();
// Read field value.
char valueBuffer[MAX_TAML_NODE_FIELDVALUE_LENGTH];
stream.readLongString( MAX_TAML_NODE_FIELDVALUE_LENGTH, valueBuffer );
// Add field.
pChildNode->addField( fieldName, valueBuffer );
}
}
}
|
#pragma once
#include "Clove/Graphics/GhaPresentQueue.hpp"
#include <MetalKit/MetalKit.h>
@class MetalView;
namespace clove {
class MetalPresentQueue : public GhaPresentQueue {
//VARIABLES
private:
id<MTLCommandQueue> commandQueue;
MetalView *view{ nullptr };
//FUNCTIONS
public:
MetalPresentQueue() = delete;
MetalPresentQueue(id<MTLCommandQueue> commandQueue, MetalView *view);
MetalPresentQueue(MetalPresentQueue const &other) = delete;
MetalPresentQueue(MetalPresentQueue &&other) noexcept;
MetalPresentQueue &operator=(MetalPresentQueue const &other) = delete;
MetalPresentQueue &operator=(MetalPresentQueue &&other) noexcept;
~MetalPresentQueue();
Result present(PresentInfo const &presentInfo) override;
};
}
|
/*
* Copyright 2020 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file cpu_monitor_base.cpp
* @brief CPU monitor base class
*/
#include <algorithm>
#include <regex>
#include <string>
#include <boost/filesystem.hpp>
#include <boost/process.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <boost/property_tree/ptree.hpp>
#include <fmt/format.h>
#include <system_monitor/cpu_monitor/cpu_monitor_base.h>
namespace bp = boost::process;
namespace fs = boost::filesystem;
namespace pt = boost::property_tree;
CPUMonitorBase::CPUMonitorBase(const ros::NodeHandle & nh, const ros::NodeHandle & pnh)
: nh_(nh),
pnh_(pnh),
updater_(),
hostname_(),
num_cores_(0),
temps_(),
freqs_(),
mpstat_exists_(false),
temp_warn_(90.0),
temp_error_(95.0),
usage_warn_(0.90),
usage_error_(1.00),
usage_avg_(true),
load1_warn_(0.90),
load5_warn_(0.80)
{
gethostname(hostname_, sizeof(hostname_));
num_cores_ = boost::thread::hardware_concurrency();
// Check if command exists
fs::path p = bp::search_path("mpstat");
mpstat_exists_ = (p.empty()) ? false : true;
pnh_.param<float>("temp_warn", temp_warn_, 90.0);
pnh_.param<float>("temp_error", temp_error_, 95.0);
pnh_.param<float>("usage_warn", usage_warn_, 0.90);
pnh_.param<float>("usage_error", usage_error_, 1.00);
pnh_.param<bool>("usage_avg", usage_avg_, true);
pnh_.param<float>("load1_warn", load1_warn_, 0.90);
pnh_.param<float>("load5_warn", load5_warn_, 0.80);
updater_.setHardwareID(hostname_);
updater_.add("CPU Temperature", this, &CPUMonitorBase::checkTemp);
updater_.add("CPU Usage", this, &CPUMonitorBase::checkUsage);
updater_.add("CPU Load Average", this, &CPUMonitorBase::checkLoad);
updater_.add("CPU Thermal Throttling", this, &CPUMonitorBase::checkThrottling);
updater_.add("CPU Frequency", this, &CPUMonitorBase::checkFrequency);
}
void CPUMonitorBase::run(void)
{
ros::Rate rate(1.0);
while (ros::ok()) {
ros::spinOnce();
updater_.force_update();
rate.sleep();
}
}
void CPUMonitorBase::checkTemp(diagnostic_updater::DiagnosticStatusWrapper & stat)
{
if (temps_.empty()) {
stat.summary(DiagStatus::ERROR, "temperature files not found");
return;
}
int level = DiagStatus::OK;
std::string error_str = "";
for (auto itr = temps_.begin(); itr != temps_.end(); ++itr) {
// Read temperature file
const fs::path path(itr->path_);
fs::ifstream ifs(path, std::ios::in);
if (!ifs) {
stat.add("file open error", itr->path_);
error_str = "file open error";
continue;
}
float temp;
ifs >> temp;
ifs.close();
temp /= 1000;
stat.addf(itr->label_, "%.1f DegC", temp);
if (temp >= temp_error_)
level = std::max(level, static_cast<int>(DiagStatus::ERROR));
else if (temp >= temp_warn_)
level = std::max(level, static_cast<int>(DiagStatus::WARN));
}
if (!error_str.empty())
stat.summary(DiagStatus::ERROR, error_str);
else
stat.summary(level, temp_dict_.at(level));
}
void CPUMonitorBase::checkUsage(diagnostic_updater::DiagnosticStatusWrapper & stat)
{
if (!mpstat_exists_) {
stat.summary(DiagStatus::ERROR, "mpstat error");
stat.add(
"mpstat", "Command 'mpstat' not found, but can be installed with: sudo apt install sysstat");
return;
}
// Get CPU Usage
bp::ipstream is_out;
bp::ipstream is_err;
bp::child c("mpstat -P ALL 1 1 -o JSON", bp::std_out > is_out, bp::std_err > is_err);
c.wait();
if (c.exit_code() != 0) {
std::ostringstream os;
is_err >> os.rdbuf();
stat.summary(DiagStatus::ERROR, "mpstat error");
stat.add("mpstat", os.str().c_str());
return;
}
std::string cpu_name;
float usr;
float nice;
float sys;
float idle;
float usage;
int level = DiagStatus::OK;
int whole_level = DiagStatus::OK;
pt::ptree pt;
try {
// Analyze JSON output
read_json(is_out, pt);
for (const pt::ptree::value_type & child1 : pt.get_child("sysstat.hosts")) {
const pt::ptree & hosts = child1.second;
for (const pt::ptree::value_type & child2 : hosts.get_child("statistics")) {
const pt::ptree & statistics = child2.second;
for (const pt::ptree::value_type & child3 : statistics.get_child("cpu-load")) {
const pt::ptree & cpu_load = child3.second;
if (boost::optional<std::string> v = cpu_load.get_optional<std::string>("cpu"))
cpu_name = v.get();
if (boost::optional<float> v = cpu_load.get_optional<float>("usr")) usr = v.get();
if (boost::optional<float> v = cpu_load.get_optional<float>("nice")) nice = v.get();
if (boost::optional<float> v = cpu_load.get_optional<float>("sys")) sys = v.get();
if (boost::optional<float> v = cpu_load.get_optional<float>("idle")) idle = v.get();
usage = (usr + nice) * 1e-2;
level = DiagStatus::OK;
if (usage >= usage_error_)
level = DiagStatus::ERROR;
else if (usage >= usage_warn_)
level = DiagStatus::WARN;
stat.add(fmt::format("CPU {}: status", cpu_name), load_dict_.at(level));
stat.addf(fmt::format("CPU {}: usr", cpu_name), "%.2f%%", usr);
stat.addf(fmt::format("CPU {}: nice", cpu_name), "%.2f%%", nice);
stat.addf(fmt::format("CPU {}: sys", cpu_name), "%.2f%%", sys);
stat.addf(fmt::format("CPU {}: idle", cpu_name), "%.2f%%", idle);
if (usage_avg_ == true) {
if (cpu_name == "all") whole_level = level;
} else {
whole_level = std::max(whole_level, level);
}
}
}
}
} catch (const std::exception & e) {
stat.summary(DiagStatus::ERROR, "mpstat exception");
stat.add("mpstat", e.what());
return;
}
stat.summary(whole_level, load_dict_.at(whole_level));
}
void CPUMonitorBase::checkLoad(diagnostic_updater::DiagnosticStatusWrapper & stat)
{
double loadavg[3];
std::ifstream ifs("/proc/loadavg", std::ios::in);
if (!ifs) {
stat.summary(DiagStatus::ERROR, "uptime error");
stat.add("uptime", strerror(errno));
return;
}
std::string line;
if (!std::getline(ifs, line)) {
stat.summary(DiagStatus::ERROR, "uptime error");
stat.add("uptime", "format error");
return;
}
if (sscanf(line.c_str(), "%lf %lf %lf", &loadavg[0], &loadavg[1], &loadavg[2]) != 3) {
stat.summary(DiagStatus::ERROR, "uptime error");
stat.add("uptime", "format error");
return;
}
loadavg[0] /= num_cores_;
loadavg[1] /= num_cores_;
loadavg[2] /= num_cores_;
int level = DiagStatus::OK;
if (loadavg[0] > load1_warn_ || loadavg[1] > load5_warn_) level = DiagStatus::WARN;
stat.summary(level, load_dict_.at(level));
stat.addf("1min", "%.2f%%", loadavg[0] * 1e2);
stat.addf("5min", "%.2f%%", loadavg[1] * 1e2);
stat.addf("15min", "%.2f%%", loadavg[2] * 1e2);
}
void CPUMonitorBase::checkThrottling(diagnostic_updater::DiagnosticStatusWrapper & stat)
{
ROS_INFO("CPUMonitorBase::checkThrottling not implemented.");
}
void CPUMonitorBase::checkFrequency(diagnostic_updater::DiagnosticStatusWrapper & stat)
{
if (freqs_.empty()) {
stat.summary(DiagStatus::ERROR, "frequency files not found");
return;
}
for (auto itr = freqs_.begin(); itr != freqs_.end(); ++itr) {
// Read scaling_cur_freq file
const fs::path path(itr->path_);
fs::ifstream ifs(path, std::ios::in);
if (ifs) {
std::string line;
if (std::getline(ifs, line))
stat.addf(fmt::format("CPU {}: clock", itr->index_), "%d MHz", std::stoi(line) / 1000);
}
ifs.close();
}
stat.summary(DiagStatus::OK, "OK");
}
void CPUMonitorBase::getTempNames(void)
{
ROS_INFO("CPUMonitorBase::getTempNames not implemented.");
}
void CPUMonitorBase::getFreqNames(void)
{
const fs::path root("/sys/devices/system/cpu");
for (const fs::path & path :
boost::make_iterator_range(fs::directory_iterator(root), fs::directory_iterator())) {
if (!fs::is_directory(path)) continue;
std::cmatch match;
const char * cpu_dir = path.generic_string().c_str();
// /sys/devices/system/cpu[0-9] ?
if (!std::regex_match(cpu_dir, match, std::regex(".*cpu(\\d+)"))) continue;
// /sys/devices/system/cpu[0-9]/cpufreq/scaling_cur_freq
cpu_freq_info freq;
const fs::path freq_path = path / "cpufreq/scaling_cur_freq";
freq.index_ = std::stoi(match[1].str());
freq.path_ = freq_path.generic_string();
freqs_.push_back(freq);
}
std::sort(freqs_.begin(), freqs_.end(), [](const cpu_freq_info & c1, const cpu_freq_info & c2) {
return c1.index_ < c2.index_;
}); // NOLINT
}
|
#include "util.h"
#include "Option.h"
/* 1) Initialize all factor matrices and a core tensor, and 2) Build a test tensor if there exists a test input */
void Initialize(double ***&FactorMat, Tensor &G, int *&Dims, Tensor &Xtest) {
printf("Initializing all factor matrices and a core tensor ... ");
int n, i, j, k, g;
int order = Option::tensorOrder;
int rankSize = Option::rankSize;
int gridSize = Option::gridSize;
char *InputPath = Option::trainPath;
Dims = (int *)malloc(sizeof(int)*order);
int **gridDimCnt, **gridDims;
Read_Grid_Info(InputPath, gridDimCnt, gridDims);
for (i = 0; i < order; i++) Dims[i] = gridDimCnt[i][gridSize];
free(gridDimCnt[0]); free(gridDimCnt);
free(gridDims[0]); free(gridDims);
G.buildCoreTensor(rankSize);
int cntDims = 0, totalDims = 0;
for (i = 0; i < order; i++) totalDims += Dims[i];
FactorMat = (double ***)malloc(sizeof(double **)*order);
FactorMat[0] = (double **)malloc(sizeof(double *)*totalDims);
FactorMat[0][0] = (double *)malloc(sizeof(double)*totalDims*rankSize);
for (i = 0; i < order; i++) {
if (i >= 1) {
FactorMat[i] = FactorMat[i-1] + Dims[i-1];
cntDims += Dims[i-1];
}
int row = Dims[i], col = G.Dims[i];
for (j = 0; j < row; j++) {
FactorMat[i][j] = &FactorMat[0][0][(cntDims + j)*col];
for (k = 0; k < col; k++) {
if (i == 0) FactorMat[i][j][k] = 0;
else FactorMat[i][j][k] = frand(0, 1);
}
}
}
if(Option::testPath != NULL) {
FILE *fp;
char tmp[1005];
int idx; double val;
fp = fopen(Option::testPath, "r");
while (fgets(tmp, 1005, fp)) Xtest.nnz++;
fclose(fp);
Xtest.val = (double *)malloc(sizeof(double)*Xtest.nnz);
Xtest.IndexMat = (int **)malloc(sizeof(int *)*Xtest.nnz);
Xtest.IndexMat[0] = (int *)malloc(sizeof(int)*Xtest.nnz*order);
for (n = 0; n < Xtest.nnz; n++) Xtest.IndexMat[n] = &(Xtest.IndexMat[0][n*order]);
fp = fopen(Option::testPath, "r");
for (n = 0; n < Xtest.nnz; n++) {
for (i = 0; i < order; i++) {
fscanf(fp, "%d", &idx);
Xtest.IndexMat[n][i] = idx-1;
}
fscanf(fp, "%lf", &val);
Xtest.val[n] = val;
}
fclose(fp);
}
printf("Done ");
printf("=> Dimensionalities: %d ", Dims[0]);
for (i = 1; i < order; i++) printf("x %d ", Dims[i]);
printf("\n\n");
}
/* Save the factor matrices and the core tensor in the result path */
void Print(double ***FactorMat, Tensor &G, int *Dims) {
printf("Writing all the factor matrices and the core tensor to file ... ");
char temp[1024];
int i, j, k = 0;
char *ResultPath = Option::resultPath;
int order = Option::tensorOrder;
for (i = 0; i < order; i++) {
sprintf(temp, "%s/FACTOR%d", ResultPath, i);
FILE *fp_factor = fopen(temp, "w");
for (j = 0; j < Dims[i]; j++) {
for (k = 0; k < G.Dims[i]; k++) {
fprintf(fp_factor, "%e\t", FactorMat[i][j][k]);
}
fprintf(fp_factor, "\n");
}
}
sprintf(temp, "%s/CORETENSOR", ResultPath);
FILE *fp_core = fopen(temp, "w");
for (i = 0; i < G.nnz; i++) {
for (j = 0; j < order; j++) {
fprintf(fp_core, "%d\t", G.IndexMat[i][j]);
}
fprintf(fp_core, "%e\n", G.val[i]);
}
printf("Done\n");
free(Dims);
}
/* Allocate and free the cache table */
void Alloc_DeltaMat(double **&DeltaMat, int tableSize, int CoreDim) {
DeltaMat = (double **)malloc(sizeof(double *)*tableSize);
DeltaMat[0] = (double *)malloc(sizeof(double)*tableSize*CoreDim);
for (int n = 0; n < tableSize; n++) DeltaMat[n] = &DeltaMat[0][n*CoreDim];
}
void Free_DeltaMat(double **DeltaMat) {
free(DeltaMat[0]);
free(DeltaMat);
}
/* Create the (N-1)-order permutation table for efficient access to the set of subtensors */
void Precompute_Permutation(int **&gridPermu) {
int i, k;
int gridSize = Option::gridSize;
int order = Option::tensorOrder;
int partTensors_N = pow(gridSize, order-1);
gridPermu = (int **)malloc(sizeof(int *)*partTensors_N);
gridPermu[0] = (int *)malloc(sizeof(int)*partTensors_N*(order-1));
memset(gridPermu[0], 0, sizeof(int)*(order-1));
for (i = 1; i < partTensors_N; i++) {
gridPermu[i] = &gridPermu[0][i*(order-1)];
memcpy(gridPermu[i], gridPermu[i-1], sizeof(int)*(order-1));
k = order-2;
gridPermu[i][k]++;
while (gridPermu[i][k] >= gridSize) {
gridPermu[i][k] -= gridSize;
gridPermu[i][k-1]++;
k--;
}
}
}
/* Conversion operations from grid index to cell index, or from cell index to grid index */
/* for example, in case of 3-order tensor and G = 4, grid index: [1, 0, 3] <-> cell index: [49] */
int gridIdx2cellIdx(int *gridIdx) {
int order = Option::tensorOrder;
int gridSize = Option::gridSize;
int cellIdx = 0;
for (int i = 0; i < order; i++)
cellIdx += gridIdx[i]*pow(gridSize, i);
return cellIdx;
}
void cellIdx2gridIdx(int *gridIdx, int cellIdx) {
int order = Option::tensorOrder;
int gridSize = Option::gridSize;
for (int i = 0; i < order; i++) {
gridIdx[i] = cellIdx % gridSize;
cellIdx /= gridSize;
}
}
/* Lock operations for thread-safe access to the cache table */
void acquireSpinLock(spinLock &lock){
while (lock.test_and_set(std::memory_order_acquire) ){
;
}
return;
}
void releaseSpinLock(spinLock &lock){
lock.clear(std::memory_order_release);
return;
}
/* Basic random and arithmetic operations */
double frand(double x, double y) {
return ((y - x)*((double)rand() / RAND_MAX)) + x;
}
double abss(double x) {
return x > 0 ? x : -x;
}
|
// link to the problem:
// https://leetcode.com/problems/populating-next-right-pointers-in-each-node/
#include <algorithm>
#include <array>
#include <iostream>
#include <iterator>
#include <numeric>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
// Definition for a Node.
class Node {
public:
int val;
Node* left;
Node* right;
Node* next;
Node() : val(0), left(NULL), right(NULL), next(NULL) {}
Node(int _val) : val(_val), left(NULL), right(NULL), next(NULL) {}
Node(int _val, Node* _left, Node* _right, Node* _next)
: val(_val), left(_left), right(_right), next(_next) {}
void print() {
Node* it = this;
while(it != nullptr) {
std::cout << it->val << " ";
it = it->next;
}
std::cout << "#";
if (left != nullptr) left->print();
}
};
class Solution {
public:
std::unordered_map<Node*, Node*> parents;
void find_parents(Node* root) {
if (root == nullptr) return;
if (root->left != nullptr) {
parents.insert({root->left, root});
parents.insert({root->right, root});
}
find_parents(root->left);
find_parents(root->right);
}
Node* find_next(Node* root) {
Node *parent, *it = root;
if (parents.find(root) == parents.end()) return nullptr;
parent = parents[it];
int up = 1;
while (it != parent->left) {
it = parent;
if (parents.find(parent) == parents.end()) return nullptr;
parent = parents[parent];
up++;
}
it = parent->right;
while (up) {
parent = it;
it = it->left;
up--;
}
return parent;
}
void recursive_connect(Node* root) {
if (root == nullptr) return;
root->next = find_next(root);
recursive_connect(root->left);
recursive_connect(root->right);
}
Node* connect(Node* root) {
find_parents(root);
recursive_connect(root);
return (root);
}
};
int main() {
Node* root = new Node(1);
root->left = new Node(2);
root->right = new Node(3);
root->left->left = new Node(4);
root->left->right = new Node(5);
root->right->left = new Node(6);
root->right->right = new Node(7);
Solution s;
root = s.connect(root);
root->print();
}
|
// KRATOS __ __ _____ ____ _ _ ___ _ _ ____
// | \/ | ____/ ___|| | | |_ _| \ | |/ ___|
// | |\/| | _| \___ \| |_| || || \| | | _
// | | | | |___ ___) | _ || || |\ | |_| |
// |_| |_|_____|____/|_| |_|___|_| \_|\____| APPLICATION
//
// License: BSD License
// license: MeshingApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
// Project includes
#include "utilities/math_utils.h"
#include "utilities/variable_utils.h"
#include "utilities/geometry_utilities.h"
#include "custom_utilities/metrics_math_utils.h"
#include "processes/compute_nodal_gradient_process.h"
#include "custom_processes/metrics_hessian_process.h"
namespace Kratos
{
ComputeHessianSolMetricProcess::ComputeHessianSolMetricProcess(
ModelPart& rThisModelPart,
Variable<double>& rVariable,
Parameters ThisParameters
):mThisModelPart(rThisModelPart)
{
// We push the list of double variables
mrOriginVariableDoubleList.push_back(&rVariable);
// We check the parameters
Parameters default_parameters = GetDefaultParameters();
ThisParameters.RecursivelyValidateAndAssignDefaults(default_parameters);
InitializeVariables(ThisParameters);
}
/***********************************************************************************/
/***********************************************************************************/
ComputeHessianSolMetricProcess::ComputeHessianSolMetricProcess(
ModelPart& rThisModelPart,
ComponentType& rVariable,
Parameters ThisParameters
):mThisModelPart(rThisModelPart)
{
// We push the components list
mrOriginVariableComponentsList.push_back(&rVariable);
// We check the parameters
Parameters default_parameters = GetDefaultParameters();
ThisParameters.RecursivelyValidateAndAssignDefaults(default_parameters);
InitializeVariables(ThisParameters);
}
/***********************************************************************************/
/***********************************************************************************/
void ComputeHessianSolMetricProcess::Execute()
{
// Computing auxiliar Hessian
CalculateAuxiliarHessian();
// Some checks
NodesArrayType& nodes_array = mThisModelPart.Nodes();
if (mrOriginVariableDoubleList.size() > 0) {
VariableUtils().CheckVariableExists(*mrOriginVariableDoubleList[0], nodes_array);
} else {
VariableUtils().CheckVariableExists(*mrOriginVariableComponentsList[0], nodes_array);
}
// Checking NODAL_H
for (const auto& i_node : nodes_array)
KRATOS_ERROR_IF_NOT(i_node.Has(NODAL_H)) << "NODAL_H must be computed" << std::endl;
// Getting dimension
const std::size_t dimension = mThisModelPart.GetProcessInfo()[DOMAIN_SIZE];
// Computing metric
if (dimension == 2) { // 2D
CalculateMetric<2>();
} else if (dimension == 3) { // 3D
CalculateMetric<3>();
} else {
KRATOS_ERROR << "Dimension can be only 2D or 3D. Dimension: " << dimension << std::endl;
}
}
/***********************************************************************************/
/***********************************************************************************/
template<SizeType TDim>
array_1d<double, 3 * (TDim - 1)> ComputeHessianSolMetricProcess::ComputeHessianMetricTensor(
const Vector& rHessian,
const double AnisotropicRatio,
const double ElementMinSize, // This way we can impose as minimum as the previous size if we desire
const double ElementMaxSize, // This way we can impose as maximum as the previous size if we desire
const double NodalH
)
{
/// The type of array considered for the tensor
typedef typename std::conditional<TDim == 2, array_1d<double, 3>, array_1d<double, 6>>::type TensorArrayType;
/// Matrix type definition
typedef BoundedMatrix<double, TDim, TDim> MatrixType;
// We first transform the Hessian into a matrix
const MatrixType hessian_matrix = MathUtils<double>::VectorToSymmetricTensor<Vector, MatrixType>(rHessian);
// Calculating Metric parameters (using equation from remark 4.2.2 on Metric-Based Anisotropic Mesh Adaptation)
double interpolation_error = mInterpError;
if (mEstimateInterpError) {
interpolation_error = mMeshConstant * MathUtils<double>::Max(NodalH, NodalH * norm_frobenius(hessian_matrix)); // NOTE: To compute it properly instead of iterating over the nodes you should iterate over the elements and instead of ElementMaxSize you should iterate over the edges, this is equivalent when using nodes and computing NodalH previously
}
// Declaring the eigen system
MatrixType eigen_vector_matrix, eigen_values_matrix;
MathUtils<double>::EigenSystem<TDim>(hessian_matrix, eigen_vector_matrix, eigen_values_matrix, 1e-18, 20);
// We check is the interpolation error is near zero. If it is we will correct it
if (interpolation_error < std::numeric_limits<double>::epsilon()) { // In practice, the Hessian of function u can be 0, e.g. if u is linear, then |Hu| is not definite. In this particular case, the interpolation error is 0 and we want to prescribe a mesh size which is infinite. To solve this issue, this infinite size prescription is truncated by imposing maximal size hmax . This is equivalent to truncate tiny eigenvalues by lambda = 1/hmax^2 . See [1] pag. 34
KRATOS_WARNING("ComputeHessianSolMetricProcess") << "WARNING: Your interpolation error is near zero: " << interpolation_error << ". Computing a local L(inf) upper bound of the interpolation error"<< std::endl;
const double l_square_minus1 = 1.0/std::pow(ElementMaxSize, 2);
for (IndexType i = 0; i < TDim; ++i) {
eigen_values_matrix(i, i) = l_square_minus1;
}
} else { // Equation 4.4 from Metric-Based Anisotropic Mesh Adaptation
const double c_epsilon = mMeshConstant/interpolation_error;
const double min_ratio = 1.0/std::pow(ElementMinSize, 2);
const double max_ratio = 1.0/std::pow(ElementMaxSize, 2);
// Recalculate the Metric eigen values
for (IndexType i = 0; i < TDim; ++i) {
eigen_values_matrix(i, i) = MathUtils<double>::Min(MathUtils<double>::Max(c_epsilon * std::abs(eigen_values_matrix(i, i)), max_ratio), min_ratio);
}
}
// Considering anisotropic
if (AnisotropicRatio < 1.0) {
double eigen_max = eigen_values_matrix(0, 0);
double eigen_min = eigen_values_matrix(0, 0);
for (IndexType i = 1; i < TDim; ++i) {
eigen_max = MathUtils<double>::Max(eigen_max, eigen_values_matrix(i, i));
eigen_min = MathUtils<double>::Min(eigen_min, eigen_values_matrix(i, i));
}
const double eigen_radius = std::abs(eigen_max - eigen_min) * (1.0 - AnisotropicRatio);
const double relative_eigen_radius = std::abs(eigen_max - eigen_radius);
for (IndexType i = 0; i < TDim; ++i)
eigen_values_matrix(i, i) = MathUtils<double>::Max(MathUtils<double>::Min(eigen_values_matrix(i, i), eigen_max), relative_eigen_radius);
} else { // NOTE: For isotropic we should consider the maximum of the eigenvalues
double eigen_max = eigen_values_matrix(0, 0);
for (IndexType i = 1; i < TDim; ++i)
eigen_max = MathUtils<double>::Max(eigen_max, eigen_values_matrix(i, i));
for (IndexType i = 0; i < TDim; ++i)
eigen_values_matrix(i, i) = eigen_max;
eigen_vector_matrix = IdentityMatrix(TDim, TDim);
}
// We compute the product
const MatrixType& metric_matrix = prod(trans(eigen_vector_matrix), prod<MatrixType>(eigen_values_matrix, eigen_vector_matrix));
// Finally we transform to a vector
const TensorArrayType& metric = MathUtils<double>::StressTensorToVector<MatrixType, TensorArrayType>(metric_matrix);
return metric;
}
/***********************************************************************************/
/***********************************************************************************/
void ComputeHessianSolMetricProcess::CalculateAuxiliarHessian()
{
// Iterate in the elements
ElementsArrayType& elements_array = mThisModelPart.Elements();
const int num_elements = static_cast<int>(elements_array.size());
const auto& it_element_begin = elements_array.begin();
// Geometry information
const std::size_t dimension = mThisModelPart.GetProcessInfo()[DOMAIN_SIZE];
// Declaring auxiliar vector
const Vector aux_zero_hessian = ZeroVector(3 * (dimension - 1));
const array_1d<double, 3> aux_zero_vector = ZeroVector(3);
// Iterate in the nodes
NodesArrayType& nodes_array = mThisModelPart.Nodes();
const int num_nodes = static_cast<int>(nodes_array.size());
// Initialize auxiliar variables
const auto& it_nodes_begin = nodes_array.begin();
#pragma omp parallel for
for(int i_node = 0; i_node < num_nodes; ++i_node) {
auto it_node = it_nodes_begin + i_node;
it_node->SetValue(NODAL_AREA, 0.0);
it_node->SetValue(AUXILIAR_HESSIAN, aux_zero_hessian);
it_node->SetValue(AUXILIAR_GRADIENT, aux_zero_vector);
}
// Compute auxiliar gradient
if (mrOriginVariableDoubleList.size() > 0) {
auto gradient_process = ComputeNodalGradientProcess<ComputeNodalGradientProcessSettings::SaveAsNonHistoricalVariable>(mThisModelPart, *mrOriginVariableDoubleList[0], AUXILIAR_GRADIENT, NODAL_AREA);
gradient_process.Execute();
} else {
auto gradient_process = ComputeNodalGradientProcess<ComputeNodalGradientProcessSettings::SaveAsNonHistoricalVariable>(mThisModelPart, *mrOriginVariableComponentsList[0], AUXILIAR_GRADIENT, NODAL_AREA);
gradient_process.Execute();
}
// Auxiliar containers
Matrix DN_DX, J0;
Vector N;
#pragma omp parallel for firstprivate(DN_DX, N, J0)
for(int i_elem = 0; i_elem < num_elements; ++i_elem) {
auto it_elem = it_element_begin + i_elem;
auto& r_geometry = it_elem->GetGeometry();
// Current geometry information
const std::size_t local_space_dimension = r_geometry.LocalSpaceDimension();
const std::size_t number_of_nodes = r_geometry.PointsNumber();
// Resize if needed
if (DN_DX.size1() != number_of_nodes || DN_DX.size2() != dimension)
DN_DX.resize(number_of_nodes, dimension);
if (N.size() != number_of_nodes)
N.resize(number_of_nodes);
if (J0.size1() != dimension || J0.size2() != local_space_dimension)
J0.resize(dimension, local_space_dimension);
// The integration points
const auto& integration_method = r_geometry.GetDefaultIntegrationMethod();
const auto& integration_points = r_geometry.IntegrationPoints(integration_method);
const std::size_t number_of_integration_points = integration_points.size();
// The containers of the shape functions and the local gradients
const auto& rNcontainer = r_geometry.ShapeFunctionsValues(integration_method);
const auto& rDN_DeContainer = r_geometry.ShapeFunctionsLocalGradients(integration_method);
// 2D case
if (dimension == 2) {
for ( IndexType point_number = 0; point_number < number_of_integration_points; ++point_number ) {
// Getting the shape functions
noalias(N) = row(rNcontainer, point_number);
// Getting the jacobians and local gradients
GeometryUtils::JacobianOnInitialConfiguration(r_geometry, integration_points[point_number], J0);
double detJ0;
Matrix InvJ0;
MathUtils<double>::InvertMatrix(J0, InvJ0, detJ0);
const Matrix& rDN_De = rDN_DeContainer[point_number];
GeometryUtils::ShapeFunctionsGradients(rDN_De, InvJ0, DN_DX);
const double gauss_point_volume = integration_points[point_number].Weight() * detJ0;
Matrix values(number_of_nodes, 2);
for(IndexType i_node = 0; i_node < number_of_nodes; ++i_node) {
const array_1d<double, 3>& aux_grad = r_geometry[i_node].GetValue(AUXILIAR_GRADIENT);
for (IndexType i_dim = 0; i_dim < 2; ++i_dim)
values(i_node, i_dim) = aux_grad[i_dim];
}
const BoundedMatrix<double,2, 2>& hessian = prod(trans(DN_DX), values);
const array_1d<double, 3>& hessian_cond = MathUtils<double>::StressTensorToVector<BoundedMatrix<double, 2, 2>, array_1d<double, 3>>(hessian);
for(IndexType i_node = 0; i_node < number_of_nodes; ++i_node) {
auto& aux_hessian = r_geometry[i_node].GetValue(AUXILIAR_HESSIAN);
for(IndexType k = 0; k < 3; ++k) {
#pragma omp atomic
aux_hessian[k] += N[i_node] * gauss_point_volume * hessian_cond[k];
}
}
}
} else { // 3D case
for ( IndexType point_number = 0; point_number < number_of_integration_points; ++point_number ) {
// Getting the shape functions
noalias(N) = row(rNcontainer, point_number);
// Getting the jacobians and local gradients
GeometryUtils::JacobianOnInitialConfiguration(r_geometry, integration_points[point_number], J0);
double detJ0;
Matrix InvJ0;
MathUtils<double>::InvertMatrix(J0, InvJ0, detJ0);
const Matrix& rDN_De = rDN_DeContainer[point_number];
GeometryUtils::ShapeFunctionsGradients(rDN_De, InvJ0, DN_DX);
const double gauss_point_volume = integration_points[point_number].Weight() * detJ0;
Matrix values(number_of_nodes, 3);
for(IndexType i_node = 0; i_node < number_of_nodes; ++i_node) {
const array_1d<double, 3>& aux_grad = r_geometry[i_node].GetValue(AUXILIAR_GRADIENT);
for (IndexType i_dim = 0; i_dim < 3; ++i_dim)
values(i_node, i_dim) = aux_grad[i_dim];
}
const BoundedMatrix<double, 3, 3> hessian = prod(trans(DN_DX), values);
const array_1d<double, 6>& hessian_cond = MathUtils<double>::StressTensorToVector<BoundedMatrix<double, 3, 3>, array_1d<double, 6>>(hessian);
for(IndexType i_node = 0; i_node < number_of_nodes; ++i_node) {
auto& aux_hessian = r_geometry[i_node].GetValue(AUXILIAR_HESSIAN);
for(IndexType k = 0; k < 6; ++k) {
#pragma omp atomic
aux_hessian[k] += N[i_node] * gauss_point_volume * hessian_cond[k];
}
}
}
}
}
#pragma omp parallel for
for(int i_node = 0; i_node < num_nodes; ++i_node) {
auto it_node = nodes_array.begin() + i_node;
const double nodal_area = it_node->GetValue(NODAL_AREA);
if (nodal_area > std::numeric_limits<double>::epsilon()) {
it_node->GetValue(AUXILIAR_HESSIAN) /= nodal_area;
}
}
}
/***********************************************************************************/
/***********************************************************************************/
double ComputeHessianSolMetricProcess::CalculateAnisotropicRatio(
const double Distance,
const double AnisotropicRatio,
const double BoundLayer,
const Interpolation rInterpolation
)
{
const double tolerance = 1.0e-12;
double ratio = 1.0; // NOTE: Isotropic mesh
if (AnisotropicRatio < 1.0) {
if (std::abs(Distance) <= BoundLayer) {
if (rInterpolation == Interpolation::CONSTANT)
ratio = AnisotropicRatio;
else if (rInterpolation == Interpolation::LINEAR)
ratio = AnisotropicRatio + (std::abs(Distance)/BoundLayer) * (1.0 - AnisotropicRatio);
else if (rInterpolation == Interpolation::EXPONENTIAL) {
ratio = - std::log(std::abs(Distance)/BoundLayer) * AnisotropicRatio + tolerance;
if (ratio > 1.0) ratio = 1.0;
}
}
}
return ratio;
}
/***********************************************************************************/
/***********************************************************************************/
template<SizeType TDim>
void ComputeHessianSolMetricProcess::CalculateMetric()
{
/// The type of array considered for the tensor
typedef typename std::conditional<TDim == 2, array_1d<double, 3>, array_1d<double, 6>>::type TensorArrayType;
// Iterate in the nodes
NodesArrayType& nodes_array = mThisModelPart.Nodes();
const int num_nodes = static_cast<int>(nodes_array.size());
// Tensor variable definition
const Variable<TensorArrayType>& tensor_variable = KratosComponents<Variable<TensorArrayType>>::Get("METRIC_TENSOR_"+std::to_string(TDim)+"D");
// Setting metric in case not defined
const auto it_node_begin = nodes_array.begin();
if (!it_node_begin->Has(tensor_variable)) {
// Declaring auxiliar vector
const TensorArrayType aux_zero_vector = ZeroVector(3 * (TDim - 1));
VariableUtils().SetNonHistoricalVariable(tensor_variable, aux_zero_vector, nodes_array);
}
// Ratio reference variable
KRATOS_ERROR_IF_NOT(KratosComponents<Variable<double>>::Has(mRatioReferenceVariable)) << "Variable " << mRatioReferenceVariable << " is not a double variable" << std::endl;
const auto& reference_var = KratosComponents<Variable<double>>::Get(mRatioReferenceVariable);
#pragma omp parallel for
for(int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
const Vector& hessian = it_node->GetValue(AUXILIAR_HESSIAN);
const double nodal_h = it_node->GetValue(NODAL_H);
double element_min_size = mMinSize;
if ((element_min_size > nodal_h) && mEnforceCurrent) element_min_size = nodal_h;
double element_max_size = mMaxSize;
if ((element_max_size > nodal_h) && mEnforceCurrent) element_max_size = nodal_h;
// Isotropic by default
double ratio = 1.0;
if (it_node->SolutionStepsDataHas(reference_var)) {
const double ratio_reference = it_node->FastGetSolutionStepValue(reference_var);
ratio = CalculateAnisotropicRatio(ratio_reference, mAnisotropicRatio, mBoundLayer, mInterpolation);
}
// For postprocess pourposes
it_node->SetValue(ANISOTROPIC_RATIO, ratio);
// We compute the metric
KRATOS_DEBUG_ERROR_IF_NOT(it_node->Has(tensor_variable)) << "METRIC_TENSOR_" + std::to_string(TDim) + "D not defined for node " << it_node->Id() << std::endl;
TensorArrayType& metric = it_node->GetValue(tensor_variable);
const double norm_metric = norm_2(metric);
if (norm_metric > 0.0) {// NOTE: This means we combine differents metrics, at the same time means that the metric should be reseted each time
const TensorArrayType& old_metric = it_node->GetValue(tensor_variable);
const TensorArrayType& new_metric = ComputeHessianMetricTensor<TDim>(hessian, ratio, element_min_size, element_max_size, nodal_h);
metric = MetricsMathUtils<TDim>::IntersectMetrics(old_metric, new_metric);
} else {
metric = ComputeHessianMetricTensor<TDim>(hessian, ratio, element_min_size, element_max_size, nodal_h);
}
}
}
/***********************************************************************************/
/***********************************************************************************/
Parameters ComputeHessianSolMetricProcess::GetDefaultParameters() const
{
Parameters default_parameters = Parameters(R"(
{
"minimal_size" : 0.1,
"maximal_size" : 10.0,
"enforce_current" : true,
"hessian_strategy_parameters":
{
"metric_variable" : ["DISTANCE"],
"estimate_interpolation_error" : false,
"interpolation_error" : 1.0e-6,
"mesh_dependent_constant" : 0.28125
},
"anisotropy_remeshing" : true,
"anisotropy_parameters":
{
"reference_variable_name" : "DISTANCE",
"hmin_over_hmax_anisotropic_ratio" : 1.0,
"boundary_layer_max_distance" : 1.0,
"interpolation" : "Linear"
}
})" );
// Identify the dimension first
const SizeType dimension = mThisModelPart.GetProcessInfo()[DOMAIN_SIZE];
// The mesh dependent constant depends on dimension
if (dimension == 2) {
default_parameters["hessian_strategy_parameters"]["mesh_dependent_constant"].SetDouble(2.0/9.0);
} else if (dimension == 3) {
default_parameters["hessian_strategy_parameters"]["mesh_dependent_constant"].SetDouble(9.0/32.0);
} else {
KRATOS_ERROR << "Dimension can be only 2D or 3D. Dimension: " << dimension << std::endl;
}
return default_parameters;
}
/***********************************************************************************/
/***********************************************************************************/
void ComputeHessianSolMetricProcess::InitializeVariables(Parameters ThisParameters)
{
// Get default variables
Parameters default_parameters = GetDefaultParameters();
// Set variables
mMinSize = ThisParameters["minimal_size"].GetDouble();
mMaxSize = ThisParameters["maximal_size"].GetDouble();
mEnforceCurrent = ThisParameters["enforce_current"].GetBool();
// In case we have isotropic remeshing (default values)
if (ThisParameters["anisotropy_remeshing"].GetBool() == false) {
mEstimateInterpError = default_parameters["hessian_strategy_parameters"]["estimate_interpolation_error"].GetBool();
mInterpError = default_parameters["hessian_strategy_parameters"]["interpolation_error"].GetDouble();
mMeshConstant = default_parameters["hessian_strategy_parameters"]["mesh_dependent_constant"].GetDouble();
mRatioReferenceVariable = default_parameters["anisotropy_parameters"]["reference_variable_name"].GetString();
mAnisotropicRatio = default_parameters["anisotropy_parameters"]["hmin_over_hmax_anisotropic_ratio"].GetDouble();
mBoundLayer = default_parameters["anisotropy_parameters"]["boundary_layer_max_distance"].GetDouble();
mInterpolation = ConvertInter(default_parameters["anisotropy_parameters"]["interpolation"].GetString());
} else {
mEstimateInterpError = ThisParameters["hessian_strategy_parameters"]["estimate_interpolation_error"].GetBool();
mInterpError = ThisParameters["hessian_strategy_parameters"]["interpolation_error"].GetDouble();
mMeshConstant = ThisParameters["hessian_strategy_parameters"]["mesh_dependent_constant"].GetDouble();
mRatioReferenceVariable = ThisParameters["anisotropy_parameters"]["reference_variable_name"].GetString();
mAnisotropicRatio = ThisParameters["anisotropy_parameters"]["hmin_over_hmax_anisotropic_ratio"].GetDouble();
mBoundLayer = ThisParameters["anisotropy_parameters"]["boundary_layer_max_distance"].GetDouble();
mInterpolation = ConvertInter(ThisParameters["anisotropy_parameters"]["interpolation"].GetString());
}
}
};// namespace Kratos.
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin developers
// Copyright (c) 2015-2017 The WIRE developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "merkleblock.h"
#include "hash.h"
#include "primitives/block.h" // for MAX_BLOCK_SIZE
#include "utilstrencodings.h"
using namespace std;
CMerkleBlock::CMerkleBlock(const CBlock& block, CBloomFilter& filter)
{
header = block.GetBlockHeader();
vector<bool> vMatch;
vector<uint256> vHashes;
vMatch.reserve(block.vtx.size());
vHashes.reserve(block.vtx.size());
for (unsigned int i = 0; i < block.vtx.size(); i++) {
const uint256& hash = block.vtx[i].GetHash();
if (filter.IsRelevantAndUpdate(block.vtx[i])) {
vMatch.push_back(true);
vMatchedTxn.push_back(make_pair(i, hash));
} else
vMatch.push_back(false);
vHashes.push_back(hash);
}
txn = CPartialMerkleTree(vHashes, vMatch);
}
uint256 CPartialMerkleTree::CalcHash(int height, unsigned int pos, const std::vector<uint256>& vTxid)
{
if (height == 0) {
// hash at height 0 is the txids themself
return vTxid[pos];
} else {
// calculate left hash
uint256 left = CalcHash(height - 1, pos * 2, vTxid), right;
// calculate right hash if not beyond the end of the array - copy left hash otherwise1
if (pos * 2 + 1 < CalcTreeWidth(height - 1))
right = CalcHash(height - 1, pos * 2 + 1, vTxid);
else
right = left;
// combine subhashes
return Hash(BEGIN(left), END(left), BEGIN(right), END(right));
}
}
void CPartialMerkleTree::TraverseAndBuild(int height, unsigned int pos, const std::vector<uint256>& vTxid, const std::vector<bool>& vMatch)
{
// determine whether this node is the parent of at least one matched txid
bool fParentOfMatch = false;
for (unsigned int p = pos << height; p < (pos + 1) << height && p < nTransactions; p++)
fParentOfMatch |= vMatch[p];
// store as flag bit
vBits.push_back(fParentOfMatch);
if (height == 0 || !fParentOfMatch) {
// if at height 0, or nothing interesting below, store hash and stop
vHash.push_back(CalcHash(height, pos, vTxid));
} else {
// otherwise, don't store any hash, but descend into the subtrees
TraverseAndBuild(height - 1, pos * 2, vTxid, vMatch);
if (pos * 2 + 1 < CalcTreeWidth(height - 1))
TraverseAndBuild(height - 1, pos * 2 + 1, vTxid, vMatch);
}
}
uint256 CPartialMerkleTree::TraverseAndExtract(int height, unsigned int pos, unsigned int& nBitsUsed, unsigned int& nHashUsed, std::vector<uint256>& vMatch)
{
if (nBitsUsed >= vBits.size()) {
// overflowed the bits array - failure
fBad = true;
return 0;
}
bool fParentOfMatch = vBits[nBitsUsed++];
if (height == 0 || !fParentOfMatch) {
// if at height 0, or nothing interesting below, use stored hash and do not descend
if (nHashUsed >= vHash.size()) {
// overflowed the hash array - failure
fBad = true;
return 0;
}
const uint256& hash = vHash[nHashUsed++];
if (height == 0 && fParentOfMatch) // in case of height 0, we have a matched txid
vMatch.push_back(hash);
return hash;
} else {
// otherwise, descend into the subtrees to extract matched txids and hashes
uint256 left = TraverseAndExtract(height - 1, pos * 2, nBitsUsed, nHashUsed, vMatch), right;
if (pos * 2 + 1 < CalcTreeWidth(height - 1))
right = TraverseAndExtract(height - 1, pos * 2 + 1, nBitsUsed, nHashUsed, vMatch);
else
right = left;
// and combine them before returning
return Hash(BEGIN(left), END(left), BEGIN(right), END(right));
}
}
CPartialMerkleTree::CPartialMerkleTree(const std::vector<uint256>& vTxid, const std::vector<bool>& vMatch) : nTransactions(vTxid.size()), fBad(false)
{
// reset state
vBits.clear();
vHash.clear();
// calculate height of tree
int nHeight = 0;
while (CalcTreeWidth(nHeight) > 1)
nHeight++;
// traverse the partial tree
TraverseAndBuild(nHeight, 0, vTxid, vMatch);
}
CPartialMerkleTree::CPartialMerkleTree() : nTransactions(0), fBad(true) {}
uint256 CPartialMerkleTree::ExtractMatches(std::vector<uint256>& vMatch)
{
vMatch.clear();
// An empty set will not work
if (nTransactions == 0)
return 0;
// check for excessively high numbers of transactions
if (nTransactions > MAX_BLOCK_SIZE_CURRENT / 60) // 60 is the lower bound for the size of a serialized CTransaction
return 0;
// there can never be more hashes provided than one for every txid
if (vHash.size() > nTransactions)
return 0;
// there must be at least one bit per node in the partial tree, and at least one node per hash
if (vBits.size() < vHash.size())
return 0;
// calculate height of tree
int nHeight = 0;
while (CalcTreeWidth(nHeight) > 1)
nHeight++;
// traverse the partial tree
unsigned int nBitsUsed = 0, nHashUsed = 0;
uint256 hashMerkleRoot = TraverseAndExtract(nHeight, 0, nBitsUsed, nHashUsed, vMatch);
// verify that no problems occured during the tree traversal
if (fBad)
return 0;
// verify that all bits were consumed (except for the padding caused by serializing it as a byte sequence)
if ((nBitsUsed + 7) / 8 != (vBits.size() + 7) / 8)
return 0;
// verify that all hashes were consumed
if (nHashUsed != vHash.size())
return 0;
return hashMerkleRoot;
}
|
// Copyright (c) 2011-2013 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <QApplication>
#include "bitcoingui.h"
#include "transactiontablemodel.h"
#include "optionsdialog.h"
#include "aboutdialog.h"
#include "clientmodel.h"
#include "walletmodel.h"
#include "walletframe.h"
#include "optionsmodel.h"
#include "transactiondescdialog.h"
#include "bitcoinunits.h"
#include "guiconstants.h"
#include "notificator.h"
#include "guiutil.h"
#include "rpcconsole.h"
#include "ui_interface.h"
#include "wallet.h"
#include "init.h"
#ifdef Q_OS_MAC
#include "macdockiconhandler.h"
#endif
#include <QMenuBar>
#include <QMenu>
#include <QIcon>
#include <QVBoxLayout>
#include <QToolBar>
#include <QStatusBar>
#include <QLabel>
#include <QMessageBox>
#include <QProgressBar>
#include <QStackedWidget>
#include <QDateTime>
#include <QMovie>
#include <QTimer>
#include <QDragEnterEvent>
#if QT_VERSION < 0x050000
#include <QUrl>
#endif
#include <QMimeData>
#include <QStyle>
#include <QSettings>
#include <QDesktopWidget>
#include <QListWidget>
#include <iostream>
const QString BitcoinGUI::DEFAULT_WALLET = "~Default";
BitcoinGUI::BitcoinGUI(QWidget *parent) :
QMainWindow(parent),
clientModel(0),
encryptWalletAction(0),
changePassphraseAction(0),
aboutQtAction(0),
trayIcon(0),
notificator(0),
rpcConsole(0),
prevBlocks(0)
{
restoreWindowGeometry();
setWindowTitle(tr("Bitblocks") + " - " + tr("Wallet"));
#ifndef Q_OS_MAC
QApplication::setWindowIcon(QIcon(":icons/bitcoin"));
setWindowIcon(QIcon(":icons/bitcoin"));
#else
setUnifiedTitleAndToolBarOnMac(true);
QApplication::setAttribute(Qt::AA_DontShowIconsInMenus);
#endif
// Create wallet frame and make it the central widget
walletFrame = new WalletFrame(this);
setCentralWidget(walletFrame);
// Accept D&D of URIs
setAcceptDrops(true);
// Create actions for the toolbar, menu bar and tray/dock icon
// Needs walletFrame to be initialized
createActions();
// Create application menu bar
createMenuBar();
// Create the toolbars
createToolBars();
// Create system tray icon and notification
createTrayIcon();
// Create status bar
statusBar();
// Status bar notification icons
QFrame *frameBlocks = new QFrame();
frameBlocks->setContentsMargins(0,0,0,0);
frameBlocks->setMinimumWidth(56);
frameBlocks->setMaximumWidth(56);
QHBoxLayout *frameBlocksLayout = new QHBoxLayout(frameBlocks);
frameBlocksLayout->setContentsMargins(3,0,3,0);
frameBlocksLayout->setSpacing(3);
labelEncryptionIcon = new QLabel();
labelConnectionsIcon = new QLabel();
labelBlocksIcon = new QLabel();
frameBlocksLayout->addStretch();
frameBlocksLayout->addWidget(labelEncryptionIcon);
frameBlocksLayout->addStretch();
frameBlocksLayout->addWidget(labelConnectionsIcon);
frameBlocksLayout->addStretch();
frameBlocksLayout->addWidget(labelBlocksIcon);
frameBlocksLayout->addStretch();
// Progress bar and label for blocks download
progressBarLabel = new QLabel();
progressBarLabel->setVisible(false);
progressBar = new QProgressBar();
progressBar->setAlignment(Qt::AlignCenter);
progressBar->setVisible(false);
// Override style sheet for progress bar for styles that have a segmented progress bar,
// as they make the text unreadable (workaround for issue #1071)
// See https://qt-project.org/doc/qt-4.8/gallery.html
QString curStyle = QApplication::style()->metaObject()->className();
if(curStyle == "QWindowsStyle" || curStyle == "QWindowsXPStyle")
{
progressBar->setStyleSheet("QProgressBar { background-color: #e8e8e8; border: 1px solid grey; border-radius: 7px; padding: 1px; text-align: center; } QProgressBar::chunk { background: QLinearGradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 #FF8000, stop: 1 orange); border-radius: 7px; margin: 0px; }");
}
statusBar()->addWidget(progressBarLabel);
statusBar()->addWidget(progressBar);
statusBar()->addPermanentWidget(frameBlocks);
syncIconMovie = new QMovie(":/movies/update_spinner", "mng", this);
rpcConsole = new RPCConsole(this);
connect(openRPCConsoleAction, SIGNAL(triggered()), rpcConsole, SLOT(show()));
// prevents an oben debug window from becoming stuck/unusable on client shutdown
connect(quitAction, SIGNAL(triggered()), rpcConsole, SLOT(hide()));
// Install event filter to be able to catch status tip events (QEvent::StatusTip)
this->installEventFilter(this);
// Initially wallet actions should be disabled
setWalletActionsEnabled(false);
}
BitcoinGUI::~BitcoinGUI()
{
saveWindowGeometry();
if(trayIcon) // Hide tray icon, as deleting will let it linger until quit (on Ubuntu)
trayIcon->hide();
#ifdef Q_OS_MAC
delete appMenuBar;
MacDockIconHandler::instance()->setMainWindow(NULL);
#endif
}
void BitcoinGUI::createActions()
{
QActionGroup *tabGroup = new QActionGroup(this);
overviewAction = new QAction(QIcon(":/icons/overview"), tr("&Overview"), this);
overviewAction->setStatusTip(tr("Show general overview of wallet"));
overviewAction->setToolTip(overviewAction->statusTip());
overviewAction->setCheckable(true);
overviewAction->setShortcut(QKeySequence(Qt::ALT + Qt::Key_1));
tabGroup->addAction(overviewAction);
sendCoinsAction = new QAction(QIcon(":/icons/send"), tr("&Send"), this);
sendCoinsAction->setStatusTip(tr("Send coins to a Bitblocks address"));
sendCoinsAction->setToolTip(sendCoinsAction->statusTip());
sendCoinsAction->setCheckable(true);
sendCoinsAction->setShortcut(QKeySequence(Qt::ALT + Qt::Key_2));
tabGroup->addAction(sendCoinsAction);
receiveCoinsAction = new QAction(QIcon(":/icons/receiving_addresses"), tr("&Receive"), this);
receiveCoinsAction->setStatusTip(tr("Show the list of addresses for receiving payments"));
receiveCoinsAction->setToolTip(receiveCoinsAction->statusTip());
receiveCoinsAction->setCheckable(true);
receiveCoinsAction->setShortcut(QKeySequence(Qt::ALT + Qt::Key_3));
tabGroup->addAction(receiveCoinsAction);
historyAction = new QAction(QIcon(":/icons/history"), tr("&Transactions"), this);
historyAction->setStatusTip(tr("Browse transaction history"));
historyAction->setToolTip(historyAction->statusTip());
historyAction->setCheckable(true);
historyAction->setShortcut(QKeySequence(Qt::ALT + Qt::Key_4));
tabGroup->addAction(historyAction);
addressBookAction = new QAction(QIcon(":/icons/address-book"), tr("&Addresses"), this);
addressBookAction->setStatusTip(tr("Edit the list of stored addresses and labels"));
addressBookAction->setToolTip(addressBookAction->statusTip());
addressBookAction->setCheckable(true);
addressBookAction->setShortcut(QKeySequence(Qt::ALT + Qt::Key_5));
tabGroup->addAction(addressBookAction);
connect(overviewAction, SIGNAL(triggered()), this, SLOT(showNormalIfMinimized()));
connect(overviewAction, SIGNAL(triggered()), this, SLOT(gotoOverviewPage()));
connect(sendCoinsAction, SIGNAL(triggered()), this, SLOT(showNormalIfMinimized()));
connect(sendCoinsAction, SIGNAL(triggered()), this, SLOT(gotoSendCoinsPage()));
connect(receiveCoinsAction, SIGNAL(triggered()), this, SLOT(showNormalIfMinimized()));
connect(receiveCoinsAction, SIGNAL(triggered()), this, SLOT(gotoReceiveCoinsPage()));
connect(historyAction, SIGNAL(triggered()), this, SLOT(showNormalIfMinimized()));
connect(historyAction, SIGNAL(triggered()), this, SLOT(gotoHistoryPage()));
connect(addressBookAction, SIGNAL(triggered()), this, SLOT(showNormalIfMinimized()));
connect(addressBookAction, SIGNAL(triggered()), this, SLOT(gotoAddressBookPage()));
quitAction = new QAction(QIcon(":/icons/quit"), tr("E&xit"), this);
quitAction->setStatusTip(tr("Quit application"));
quitAction->setShortcut(QKeySequence(Qt::CTRL + Qt::Key_Q));
quitAction->setMenuRole(QAction::QuitRole);
aboutAction = new QAction(QIcon(":/icons/bitcoin"), tr("&About Bitblocks"), this);
aboutAction->setStatusTip(tr("Show information about Bitblocks"));
aboutAction->setMenuRole(QAction::AboutRole);
aboutQtAction = new QAction(QIcon(":/trolltech/qmessagebox/images/qtlogo-64.png"), tr("About &Qt"), this);
aboutQtAction->setStatusTip(tr("Show information about Qt"));
aboutQtAction->setMenuRole(QAction::AboutQtRole);
optionsAction = new QAction(QIcon(":/icons/options"), tr("&Options..."), this);
optionsAction->setStatusTip(tr("Modify configuration options for Bitblocks"));
optionsAction->setMenuRole(QAction::PreferencesRole);
toggleHideAction = new QAction(QIcon(":/icons/bitcoin"), tr("&Show / Hide"), this);
toggleHideAction->setStatusTip(tr("Show or hide the main Window"));
encryptWalletAction = new QAction(QIcon(":/icons/lock_closed"), tr("&Encrypt Wallet..."), this);
encryptWalletAction->setStatusTip(tr("Encrypt the private keys that belong to your wallet"));
encryptWalletAction->setCheckable(true);
backupWalletAction = new QAction(QIcon(":/icons/filesave"), tr("&Backup Wallet..."), this);
backupWalletAction->setStatusTip(tr("Backup wallet to another location"));
changePassphraseAction = new QAction(QIcon(":/icons/key"), tr("&Change Passphrase..."), this);
changePassphraseAction->setStatusTip(tr("Change the passphrase used for wallet encryption"));
signMessageAction = new QAction(QIcon(":/icons/edit"), tr("Sign &message..."), this);
signMessageAction->setStatusTip(tr("Sign messages with your Bitblocks addresses to prove you own them"));
verifyMessageAction = new QAction(QIcon(":/icons/transaction_0"), tr("&Verify message..."), this);
verifyMessageAction->setStatusTip(tr("Verify messages to ensure they were signed with specified Bitblocks addresses"));
openRPCConsoleAction = new QAction(QIcon(":/icons/debugwindow"), tr("&Debug window"), this);
openRPCConsoleAction->setStatusTip(tr("Open debugging and diagnostic console"));
connect(quitAction, SIGNAL(triggered()), qApp, SLOT(quit()));
connect(aboutAction, SIGNAL(triggered()), this, SLOT(aboutClicked()));
connect(aboutQtAction, SIGNAL(triggered()), qApp, SLOT(aboutQt()));
connect(optionsAction, SIGNAL(triggered()), this, SLOT(optionsClicked()));
connect(toggleHideAction, SIGNAL(triggered()), this, SLOT(toggleHidden()));
connect(encryptWalletAction, SIGNAL(triggered(bool)), walletFrame, SLOT(encryptWallet(bool)));
connect(backupWalletAction, SIGNAL(triggered()), walletFrame, SLOT(backupWallet()));
connect(changePassphraseAction, SIGNAL(triggered()), walletFrame, SLOT(changePassphrase()));
connect(signMessageAction, SIGNAL(triggered()), this, SLOT(gotoSignMessageTab()));
connect(verifyMessageAction, SIGNAL(triggered()), this, SLOT(gotoVerifyMessageTab()));
}
void BitcoinGUI::createMenuBar()
{
#ifdef Q_OS_MAC
// Create a decoupled menu bar on Mac which stays even if the window is closed
appMenuBar = new QMenuBar();
#else
// Get the main window's menu bar on other platforms
appMenuBar = menuBar();
#endif
// Configure the menus
QMenu *file = appMenuBar->addMenu(tr("&File"));
file->addAction(backupWalletAction);
file->addAction(signMessageAction);
file->addAction(verifyMessageAction);
file->addSeparator();
file->addAction(quitAction);
QMenu *settings = appMenuBar->addMenu(tr("&Settings"));
settings->addAction(encryptWalletAction);
settings->addAction(changePassphraseAction);
settings->addSeparator();
settings->addAction(optionsAction);
QMenu *help = appMenuBar->addMenu(tr("&Help"));
help->addAction(openRPCConsoleAction);
help->addSeparator();
help->addAction(aboutAction);
help->addAction(aboutQtAction);
}
void BitcoinGUI::createToolBars()
{
QToolBar *toolbar = addToolBar(tr("Tabs toolbar"));
toolbar->setToolButtonStyle(Qt::ToolButtonTextBesideIcon);
toolbar->addAction(overviewAction);
toolbar->addAction(sendCoinsAction);
toolbar->addAction(receiveCoinsAction);
toolbar->addAction(historyAction);
toolbar->addAction(addressBookAction);
}
void BitcoinGUI::setClientModel(ClientModel *clientModel)
{
this->clientModel = clientModel;
if(clientModel)
{
// Replace some strings and icons, when using the testnet
if(clientModel->isTestNet())
{
setWindowTitle(windowTitle() + QString(" ") + tr("[testnet]"));
#ifndef Q_OS_MAC
QApplication::setWindowIcon(QIcon(":icons/bitcoin_testnet"));
setWindowIcon(QIcon(":icons/bitcoin_testnet"));
#else
MacDockIconHandler::instance()->setIcon(QIcon(":icons/bitcoin_testnet"));
#endif
if(trayIcon)
{
// Just attach " [testnet]" to the existing tooltip
trayIcon->setToolTip(trayIcon->toolTip() + QString(" ") + tr("[testnet]"));
trayIcon->setIcon(QIcon(":/icons/toolbar_testnet"));
}
toggleHideAction->setIcon(QIcon(":/icons/toolbar_testnet"));
aboutAction->setIcon(QIcon(":/icons/toolbar_testnet"));
}
// Create system tray menu (or setup the dock menu) that late to prevent users from calling actions,
// while the client has not yet fully loaded
createTrayIconMenu();
// Keep up to date with client
setNumConnections(clientModel->getNumConnections());
connect(clientModel, SIGNAL(numConnectionsChanged(int)), this, SLOT(setNumConnections(int)));
setNumBlocks(clientModel->getNumBlocks(), clientModel->getNumBlocksOfPeers());
connect(clientModel, SIGNAL(numBlocksChanged(int,int)), this, SLOT(setNumBlocks(int,int)));
// Receive and report messages from network/worker thread
connect(clientModel, SIGNAL(message(QString,QString,unsigned int)), this, SLOT(message(QString,QString,unsigned int)));
rpcConsole->setClientModel(clientModel);
walletFrame->setClientModel(clientModel);
}
}
bool BitcoinGUI::addWallet(const QString& name, WalletModel *walletModel)
{
setWalletActionsEnabled(true);
return walletFrame->addWallet(name, walletModel);
}
bool BitcoinGUI::setCurrentWallet(const QString& name)
{
return walletFrame->setCurrentWallet(name);
}
void BitcoinGUI::removeAllWallets()
{
setWalletActionsEnabled(false);
walletFrame->removeAllWallets();
}
void BitcoinGUI::setWalletActionsEnabled(bool enabled)
{
overviewAction->setEnabled(enabled);
sendCoinsAction->setEnabled(enabled);
receiveCoinsAction->setEnabled(enabled);
historyAction->setEnabled(enabled);
encryptWalletAction->setEnabled(enabled);
backupWalletAction->setEnabled(enabled);
changePassphraseAction->setEnabled(enabled);
signMessageAction->setEnabled(enabled);
verifyMessageAction->setEnabled(enabled);
addressBookAction->setEnabled(enabled);
}
void BitcoinGUI::createTrayIcon()
{
#ifndef Q_OS_MAC
trayIcon = new QSystemTrayIcon(this);
trayIcon->setToolTip(tr("Bitblocks client"));
trayIcon->setIcon(QIcon(":/icons/toolbar"));
trayIcon->show();
#endif
notificator = new Notificator(QApplication::applicationName(), trayIcon);
}
void BitcoinGUI::createTrayIconMenu()
{
QMenu *trayIconMenu;
#ifndef Q_OS_MAC
// return if trayIcon is unset (only on non-Mac OSes)
if (!trayIcon)
return;
trayIconMenu = new QMenu(this);
trayIcon->setContextMenu(trayIconMenu);
connect(trayIcon, SIGNAL(activated(QSystemTrayIcon::ActivationReason)),
this, SLOT(trayIconActivated(QSystemTrayIcon::ActivationReason)));
#else
// Note: On Mac, the dock icon is used to provide the tray's functionality.
MacDockIconHandler *dockIconHandler = MacDockIconHandler::instance();
dockIconHandler->setMainWindow((QMainWindow *)this);
trayIconMenu = dockIconHandler->dockMenu();
#endif
// Configuration of the tray icon (or dock icon) icon menu
trayIconMenu->addAction(toggleHideAction);
trayIconMenu->addSeparator();
trayIconMenu->addAction(sendCoinsAction);
trayIconMenu->addAction(receiveCoinsAction);
trayIconMenu->addSeparator();
trayIconMenu->addAction(signMessageAction);
trayIconMenu->addAction(verifyMessageAction);
trayIconMenu->addSeparator();
trayIconMenu->addAction(optionsAction);
trayIconMenu->addAction(openRPCConsoleAction);
#ifndef Q_OS_MAC // This is built-in on Mac
trayIconMenu->addSeparator();
trayIconMenu->addAction(quitAction);
#endif
}
#ifndef Q_OS_MAC
void BitcoinGUI::trayIconActivated(QSystemTrayIcon::ActivationReason reason)
{
if(reason == QSystemTrayIcon::Trigger)
{
// Click on system tray icon triggers show/hide of the main window
toggleHideAction->trigger();
}
}
#endif
void BitcoinGUI::saveWindowGeometry()
{
QSettings settings;
settings.setValue("nWindowPos", pos());
settings.setValue("nWindowSize", size());
}
void BitcoinGUI::restoreWindowGeometry()
{
QSettings settings;
QPoint pos = settings.value("nWindowPos").toPoint();
QSize size = settings.value("nWindowSize", QSize(850, 550)).toSize();
if (!pos.x() && !pos.y())
{
QRect screen = QApplication::desktop()->screenGeometry();
pos.setX((screen.width()-size.width())/2);
pos.setY((screen.height()-size.height())/2);
}
resize(size);
move(pos);
}
void BitcoinGUI::optionsClicked()
{
if(!clientModel || !clientModel->getOptionsModel())
return;
OptionsDialog dlg;
dlg.setModel(clientModel->getOptionsModel());
dlg.exec();
}
void BitcoinGUI::aboutClicked()
{
AboutDialog dlg;
dlg.setModel(clientModel);
dlg.exec();
}
void BitcoinGUI::gotoOverviewPage()
{
if (walletFrame) walletFrame->gotoOverviewPage();
}
void BitcoinGUI::gotoHistoryPage()
{
if (walletFrame) walletFrame->gotoHistoryPage();
}
void BitcoinGUI::gotoAddressBookPage()
{
if (walletFrame) walletFrame->gotoAddressBookPage();
}
void BitcoinGUI::gotoReceiveCoinsPage()
{
if (walletFrame) walletFrame->gotoReceiveCoinsPage();
}
void BitcoinGUI::gotoSendCoinsPage(QString addr)
{
if (walletFrame) walletFrame->gotoSendCoinsPage(addr);
}
void BitcoinGUI::gotoSignMessageTab(QString addr)
{
if (walletFrame) walletFrame->gotoSignMessageTab(addr);
}
void BitcoinGUI::gotoVerifyMessageTab(QString addr)
{
if (walletFrame) walletFrame->gotoVerifyMessageTab(addr);
}
void BitcoinGUI::setNumConnections(int count)
{
QString icon;
switch(count)
{
case 0: icon = ":/icons/connect_0"; break;
case 1: case 2: case 3: icon = ":/icons/connect_1"; break;
case 4: case 5: case 6: icon = ":/icons/connect_2"; break;
case 7: case 8: case 9: icon = ":/icons/connect_3"; break;
default: icon = ":/icons/connect_4"; break;
}
labelConnectionsIcon->setPixmap(QIcon(icon).pixmap(STATUSBAR_ICONSIZE,STATUSBAR_ICONSIZE));
labelConnectionsIcon->setToolTip(tr("%n active connection(s) to Bitblocks network", "", count));
}
void BitcoinGUI::setNumBlocks(int count, int nTotalBlocks)
{
// Prevent orphan statusbar messages (e.g. hover Quit in main menu, wait until chain-sync starts -> garbelled text)
statusBar()->clearMessage();
// Acquire current block source
enum BlockSource blockSource = clientModel->getBlockSource();
switch (blockSource) {
case BLOCK_SOURCE_NETWORK:
progressBarLabel->setText(tr("Synchronizing with network..."));
break;
case BLOCK_SOURCE_DISK:
progressBarLabel->setText(tr("Importing blocks from disk..."));
break;
case BLOCK_SOURCE_REINDEX:
progressBarLabel->setText(tr("Reindexing blocks on disk..."));
break;
case BLOCK_SOURCE_NONE:
// Case: not Importing, not Reindexing and no network connection
progressBarLabel->setText(tr("No block source available..."));
break;
}
QString tooltip;
QDateTime lastBlockDate = clientModel->getLastBlockDate();
QDateTime currentDate = QDateTime::currentDateTime();
int secs = lastBlockDate.secsTo(currentDate);
if(count < nTotalBlocks)
{
tooltip = tr("Processed %1 of %2 (estimated) blocks of transaction history.").arg(count).arg(nTotalBlocks);
}
else
{
tooltip = tr("Processed %1 blocks of transaction history.").arg(count);
}
// Set icon state: spinning if catching up, tick otherwise
if(secs < 90*60 && count >= nTotalBlocks)
{
tooltip = tr("Up to date") + QString(".<br>") + tooltip;
labelBlocksIcon->setPixmap(QIcon(":/icons/synced").pixmap(STATUSBAR_ICONSIZE, STATUSBAR_ICONSIZE));
walletFrame->showOutOfSyncWarning(false);
progressBarLabel->setVisible(false);
progressBar->setVisible(false);
}
else
{
// Represent time from last generated block in human readable text
QString timeBehindText;
if(secs < 48*60*60)
{
timeBehindText = tr("%n hour(s)","",secs/(60*60));
}
else if(secs < 14*24*60*60)
{
timeBehindText = tr("%n day(s)","",secs/(24*60*60));
}
else
{
timeBehindText = tr("%n week(s)","",secs/(7*24*60*60));
}
progressBarLabel->setVisible(true);
progressBar->setFormat(tr("%1 behind").arg(timeBehindText));
progressBar->setMaximum(1000000000);
progressBar->setValue(clientModel->getVerificationProgress() * 1000000000.0 + 0.5);
progressBar->setVisible(true);
tooltip = tr("Catching up...") + QString("<br>") + tooltip;
labelBlocksIcon->setMovie(syncIconMovie);
if(count != prevBlocks)
syncIconMovie->jumpToNextFrame();
prevBlocks = count;
walletFrame->showOutOfSyncWarning(true);
tooltip += QString("<br>");
tooltip += tr("Last received block was generated %1 ago.").arg(timeBehindText);
tooltip += QString("<br>");
tooltip += tr("Transactions after this will not yet be visible.");
}
// Don't word-wrap this (fixed-width) tooltip
tooltip = QString("<nobr>") + tooltip + QString("</nobr>");
labelBlocksIcon->setToolTip(tooltip);
progressBarLabel->setToolTip(tooltip);
progressBar->setToolTip(tooltip);
}
void BitcoinGUI::message(const QString &title, const QString &message, unsigned int style, bool *ret)
{
QString strTitle = tr("Bitblocks"); // default title
// Default to information icon
int nMBoxIcon = QMessageBox::Information;
int nNotifyIcon = Notificator::Information;
QString msgType;
// Prefer supplied title over style based title
if (!title.isEmpty()) {
msgType = title;
}
else {
switch (style) {
case CClientUIInterface::MSG_ERROR:
msgType = tr("Error");
break;
case CClientUIInterface::MSG_WARNING:
msgType = tr("Warning");
break;
case CClientUIInterface::MSG_INFORMATION:
msgType = tr("Information");
break;
default:
break;
}
}
// Append title to "Bitcoin - "
if (!msgType.isEmpty())
strTitle += " - " + msgType;
// Check for error/warning icon
if (style & CClientUIInterface::ICON_ERROR) {
nMBoxIcon = QMessageBox::Critical;
nNotifyIcon = Notificator::Critical;
}
else if (style & CClientUIInterface::ICON_WARNING) {
nMBoxIcon = QMessageBox::Warning;
nNotifyIcon = Notificator::Warning;
}
// Display message
if (style & CClientUIInterface::MODAL) {
// Check for buttons, use OK as default, if none was supplied
QMessageBox::StandardButton buttons;
if (!(buttons = (QMessageBox::StandardButton)(style & CClientUIInterface::BTN_MASK)))
buttons = QMessageBox::Ok;
// Ensure we get users attention
showNormalIfMinimized();
QMessageBox mBox((QMessageBox::Icon)nMBoxIcon, strTitle, message, buttons, this);
int r = mBox.exec();
if (ret != NULL)
*ret = r == QMessageBox::Ok;
}
else
notificator->notify((Notificator::Class)nNotifyIcon, strTitle, message);
}
void BitcoinGUI::changeEvent(QEvent *e)
{
QMainWindow::changeEvent(e);
#ifndef Q_OS_MAC // Ignored on Mac
if(e->type() == QEvent::WindowStateChange)
{
if(clientModel && clientModel->getOptionsModel()->getMinimizeToTray())
{
QWindowStateChangeEvent *wsevt = static_cast<QWindowStateChangeEvent*>(e);
if(!(wsevt->oldState() & Qt::WindowMinimized) && isMinimized())
{
QTimer::singleShot(0, this, SLOT(hide()));
e->ignore();
}
}
}
#endif
}
void BitcoinGUI::closeEvent(QCloseEvent *event)
{
if(clientModel)
{
#ifndef Q_OS_MAC // Ignored on Mac
if(!clientModel->getOptionsModel()->getMinimizeToTray() &&
!clientModel->getOptionsModel()->getMinimizeOnClose())
{
QApplication::quit();
}
#endif
}
QMainWindow::closeEvent(event);
}
void BitcoinGUI::askFee(qint64 nFeeRequired, bool *payFee)
{
QString strMessage = tr("This transaction is over the size limit. You can still send it for a fee of %1, "
"which goes to the nodes that process your transaction and helps to support the network. "
"Do you want to pay the fee?").arg(BitcoinUnits::formatWithUnit(BitcoinUnits::BTC, nFeeRequired));
QMessageBox::StandardButton retval = QMessageBox::question(this, tr("Confirm transaction fee"), strMessage,
QMessageBox::Yes | QMessageBox::Cancel, QMessageBox::Yes);
*payFee = (retval == QMessageBox::Yes);
}
void BitcoinGUI::incomingTransaction(const QString& date, int unit, qint64 amount, const QString& type, const QString& address)
{
// On new transaction, make an info balloon
message((amount)<0 ? tr("Sent transaction") : tr("Incoming transaction"),
tr("Date: %1\n"
"Amount: %2\n"
"Type: %3\n"
"Address: %4\n")
.arg(date)
.arg(BitcoinUnits::formatWithUnit(unit, amount, true))
.arg(type)
.arg(address), CClientUIInterface::MSG_INFORMATION);
}
void BitcoinGUI::dragEnterEvent(QDragEnterEvent *event)
{
// Accept only URIs
if(event->mimeData()->hasUrls())
event->acceptProposedAction();
}
void BitcoinGUI::dropEvent(QDropEvent *event)
{
if(event->mimeData()->hasUrls())
{
int nValidUrisFound = 0;
QList<QUrl> uris = event->mimeData()->urls();
foreach(const QUrl &uri, uris)
{
if (walletFrame->handleURI(uri.toString()))
nValidUrisFound++;
}
// if valid URIs were found
if (nValidUrisFound)
walletFrame->gotoSendCoinsPage();
else
message(tr("URI handling"), tr("URI can not be parsed! This can be caused by an invalid Bitblocks address or malformed URI parameters."),
CClientUIInterface::ICON_WARNING);
}
event->acceptProposedAction();
}
bool BitcoinGUI::eventFilter(QObject *object, QEvent *event)
{
// Catch status tip events
if (event->type() == QEvent::StatusTip)
{
// Prevent adding text from setStatusTip(), if we currently use the status bar for displaying other stuff
if (progressBarLabel->isVisible() || progressBar->isVisible())
return true;
}
return QMainWindow::eventFilter(object, event);
}
void BitcoinGUI::handleURI(QString strURI)
{
// URI has to be valid
if (!walletFrame->handleURI(strURI))
message(tr("URI handling"), tr("URI can not be parsed! This can be caused by an invalid Bitblocks address or malformed URI parameters."),
CClientUIInterface::ICON_WARNING);
}
void BitcoinGUI::setEncryptionStatus(int status)
{
switch(status)
{
case WalletModel::Unencrypted:
labelEncryptionIcon->hide();
encryptWalletAction->setChecked(false);
changePassphraseAction->setEnabled(false);
encryptWalletAction->setEnabled(true);
break;
case WalletModel::Unlocked:
labelEncryptionIcon->show();
labelEncryptionIcon->setPixmap(QIcon(":/icons/lock_open").pixmap(STATUSBAR_ICONSIZE,STATUSBAR_ICONSIZE));
labelEncryptionIcon->setToolTip(tr("Wallet is <b>encrypted</b> and currently <b>unlocked</b>"));
encryptWalletAction->setChecked(true);
changePassphraseAction->setEnabled(true);
encryptWalletAction->setEnabled(false); // TODO: decrypt currently not supported
break;
case WalletModel::Locked:
labelEncryptionIcon->show();
labelEncryptionIcon->setPixmap(QIcon(":/icons/lock_closed").pixmap(STATUSBAR_ICONSIZE,STATUSBAR_ICONSIZE));
labelEncryptionIcon->setToolTip(tr("Wallet is <b>encrypted</b> and currently <b>locked</b>"));
encryptWalletAction->setChecked(true);
changePassphraseAction->setEnabled(true);
encryptWalletAction->setEnabled(false); // TODO: decrypt currently not supported
break;
}
}
void BitcoinGUI::showNormalIfMinimized(bool fToggleHidden)
{
// activateWindow() (sometimes) helps with keyboard focus on Windows
if (isHidden())
{
show();
activateWindow();
}
else if (isMinimized())
{
showNormal();
activateWindow();
}
else if (GUIUtil::isObscured(this))
{
raise();
activateWindow();
}
else if(fToggleHidden)
hide();
}
void BitcoinGUI::toggleHidden()
{
showNormalIfMinimized(true);
}
void BitcoinGUI::detectShutdown()
{
if (ShutdownRequested())
QMetaObject::invokeMethod(QCoreApplication::instance(), "quit", Qt::QueuedConnection);
}
|
// VK tests
//
// Copyright (c) 2015-2019 The Khronos Group Inc.
// Copyright (c) 2015-2019 Valve Corporation
// Copyright (c) 2015-2019 LunarG, Inc.
// Copyright (c) 2015-2019 Google, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "vktestframeworkandroid.h"
#include "shaderc/shaderc.hpp"
#include <android/log.h>
VkTestFramework::VkTestFramework() {}
VkTestFramework::~VkTestFramework() {}
// Define static elements
bool VkTestFramework::m_devsim_layer = false;
ANativeWindow *VkTestFramework::window = nullptr;
VkFormat VkTestFramework::GetFormat(VkInstance instance, vk_testing::Device *device) {
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(device->phy().handle(), VK_FORMAT_B8G8R8A8_UNORM, &format_props);
if (format_props.linearTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT ||
format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) {
return VK_FORMAT_B8G8R8A8_UNORM;
}
vkGetPhysicalDeviceFormatProperties(device->phy().handle(), VK_FORMAT_R8G8B8A8_UNORM, &format_props);
if (format_props.linearTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT ||
format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) {
return VK_FORMAT_R8G8B8A8_UNORM;
}
printf("Error - device does not support VK_FORMAT_B8G8R8A8_UNORM nor VK_FORMAT_R8G8B8A8_UNORM - exiting\n");
exit(0);
}
void VkTestFramework::InitArgs(int *argc, char *argv[]) {}
void VkTestFramework::Finish() {}
void TestEnvironment::SetUp() { vk_testing::set_error_callback(test_error_callback); }
void TestEnvironment::TearDown() {}
// Android specific helper functions for shaderc.
struct shader_type_mapping {
VkShaderStageFlagBits vkshader_type;
shaderc_shader_kind shaderc_type;
};
static const shader_type_mapping shader_map_table[] = {
{VK_SHADER_STAGE_VERTEX_BIT, shaderc_glsl_vertex_shader},
{VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, shaderc_glsl_tess_control_shader},
{VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, shaderc_glsl_tess_evaluation_shader},
{VK_SHADER_STAGE_GEOMETRY_BIT, shaderc_glsl_geometry_shader},
{VK_SHADER_STAGE_FRAGMENT_BIT, shaderc_glsl_fragment_shader},
{VK_SHADER_STAGE_COMPUTE_BIT, shaderc_glsl_compute_shader},
};
shaderc_shader_kind MapShadercType(VkShaderStageFlagBits vkShader) {
for (auto shader : shader_map_table) {
if (shader.vkshader_type == vkShader) {
return shader.shaderc_type;
}
}
assert(false);
return shaderc_glsl_infer_from_source;
}
// Compile a given string containing GLSL into SPIR-V
// Return value of false means an error was encountered
bool VkTestFramework::GLSLtoSPV(const VkShaderStageFlagBits shader_type, const char *pshader, std::vector<unsigned int> &spirv,
bool debug) {
// On Android, use shaderc instead.
shaderc::Compiler compiler;
shaderc::CompileOptions options;
if (debug) {
options.SetOptimizationLevel(shaderc_optimization_level_zero);
options.SetGenerateDebugInfo();
}
shaderc::SpvCompilationResult result =
compiler.CompileGlslToSpv(pshader, strlen(pshader), MapShadercType(shader_type), "shader", options);
if (result.GetCompilationStatus() != shaderc_compilation_status_success) {
__android_log_print(ANDROID_LOG_ERROR, "VkLayerValidationTests", "GLSLtoSPV compilation failed: %s",
result.GetErrorMessage().c_str());
return false;
}
for (auto iter = result.begin(); iter != result.end(); iter++) {
spirv.push_back(*iter);
}
return true;
}
//
// Compile a given string containing SPIR-V assembly into SPV for use by VK
// Return value of false means an error was encountered.
//
bool VkTestFramework::ASMtoSPV(const spv_target_env target_env, const uint32_t options, const char *pasm,
std::vector<unsigned int> &spv) {
spv_binary binary;
spv_diagnostic diagnostic = nullptr;
spv_context context = spvContextCreate(target_env);
spv_result_t error = spvTextToBinaryWithOptions(context, pasm, strlen(pasm), options, &binary, &diagnostic);
spvContextDestroy(context);
if (error) {
__android_log_print(ANDROID_LOG_ERROR, "VkLayerValidationTest", "ASMtoSPV compilation failed");
spvDiagnosticDestroy(diagnostic);
return false;
}
spv.insert(spv.end(), binary->code, binary->code + binary->wordCount);
spvBinaryDestroy(binary);
return true;
}
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/public/browser/context_factory.h"
#include "content/public/common/result_codes.h"
#include "content/shell/browser/shell_browser_context.h"
#include "ui/aura/window.h"
#include "ui/views_content_client/views_content_client.h"
#include "ui/views_content_client/views_content_client_main_parts_aura.h"
#include "ui/wm/test/wm_test_helper.h"
namespace ui {
namespace {
class ViewsContentClientMainPartsChromeOS
: public ViewsContentClientMainPartsAura {
public:
ViewsContentClientMainPartsChromeOS(
content::MainFunctionParams content_params,
ViewsContentClient* views_content_client);
ViewsContentClientMainPartsChromeOS(
const ViewsContentClientMainPartsChromeOS&) = delete;
ViewsContentClientMainPartsChromeOS& operator=(
const ViewsContentClientMainPartsChromeOS&) = delete;
~ViewsContentClientMainPartsChromeOS() override {}
// content::BrowserMainParts:
int PreMainMessageLoopRun() override;
void PostMainMessageLoopRun() override;
private:
// Enable a minimal set of views::corewm to be initialized.
std::unique_ptr<::wm::WMTestHelper> wm_test_helper_;
};
ViewsContentClientMainPartsChromeOS::ViewsContentClientMainPartsChromeOS(
content::MainFunctionParams content_params,
ViewsContentClient* views_content_client)
: ViewsContentClientMainPartsAura(std::move(content_params),
views_content_client) {}
int ViewsContentClientMainPartsChromeOS::PreMainMessageLoopRun() {
ViewsContentClientMainPartsAura::PreMainMessageLoopRun();
// Set up basic pieces of views::corewm.
wm_test_helper_ = std::make_unique<wm::WMTestHelper>(gfx::Size(1024, 768));
// Ensure the X window gets mapped.
wm_test_helper_->host()->Show();
// Ensure Aura knows where to open new windows.
aura::Window* root_window = wm_test_helper_->host()->window();
views_content_client()->OnPreMainMessageLoopRun(browser_context(),
root_window);
return content::RESULT_CODE_NORMAL_EXIT;
}
void ViewsContentClientMainPartsChromeOS::PostMainMessageLoopRun() {
wm_test_helper_.reset();
ViewsContentClientMainPartsAura::PostMainMessageLoopRun();
}
} // namespace
// static
std::unique_ptr<ViewsContentClientMainParts>
ViewsContentClientMainParts::Create(content::MainFunctionParams content_params,
ViewsContentClient* views_content_client) {
return std::make_unique<ViewsContentClientMainPartsChromeOS>(
std::move(content_params), views_content_client);
}
} // namespace ui
|
/*
* (C) Copyright 2015 ETH Zurich Systems Group (http://www.systems.ethz.ch/) and others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* Markus Pilman <mpilman@inf.ethz.ch>
* Simon Loesing <sloesing@inf.ethz.ch>
* Thomas Etter <etterth@gmail.com>
* Kevin Bocksrocker <kevin.bocksrocker@gmail.com>
* Lucas Braun <braunl@inf.ethz.ch>
*/
#pragma once
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include <initializer_list>
#include <array>
#include <atomic>
#include <mutex>
#include <type_traits>
#include <limits>
#include <stdint.h>
namespace crossbow {
template <
typename Key,
typename T,
typename Hash = std::hash<Key>,
typename KeyEqual = std::equal_to<Key>,
typename Allocator = std::allocator<std::pair<const Key, T> >,
typename MutexType = std::mutex,
size_t ConcurrencyLevel = 32,
size_t InitialCapacity = 32,
size_t LoadFactor = 75
>
class concurrent_map {
public:
typedef Key key_type;
typedef T mapped_type;
typedef const T const_mapped_type;
typedef size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef Hash hasher;
typedef KeyEqual key_equal;
typedef Allocator allocator_type;
typedef typename allocator_type::pointer pointer;
typedef typename allocator_type::const_pointer const_pointer;
typedef MutexType mutex_type;
private:
enum class ElemState {
UNASSIGNED,
DELETED,
VALID
};
struct KeyValueElement {
template <typename K, typename V>
KeyValueElement(K && key, V && value) : state(ElemState::VALID), key(std::forward<K>(key)), value(std::forward<V>(value)) {}
KeyValueElement(): state(ElemState::UNASSIGNED) {}
KeyValueElement(const KeyValueElement &) = default;
KeyValueElement(KeyValueElement &&) = default;
KeyValueElement &operator= (const KeyValueElement &) = default;
KeyValueElement &operator= (KeyValueElement && other) {
state = other.state;
key = std::move(other.key);
value = std::move(other.value);
return *this;
}
ElemState state;
key_type key;
mapped_type value;
};
public: // private types
struct Bucket {
template <typename K, typename V>
std::pair<bool, mapped_type> insert(K && key, V && value) {
for (auto & el : arr) {
if (el.state == ElemState::UNASSIGNED) {
el.state = ElemState::VALID;
el.value = value;//TODO: forward this
el.key = key;
return std::make_pair(true, mapped_type());
} else if (el.state == ElemState::VALID && el.key == key) {
mapped_type old_value = std::move(el.value);
el.value = value;//TODO: forward this
return std::make_pair(false, std::move(old_value));
}
}
for (auto & el : overflow) {
//no need to check for validity, all entries in the vector are valid
if (el.key == key) {
mapped_type old_value = std::move(el.value);
el.value = value;//TODO: forward this
return std::make_pair(false, std::move(old_value));
}
}
//nothing with this key exists, array is full -> insert in vector
overflow.push_back(KeyValueElement(std::forward<K>(key), std::forward<V>(value)));
return std::make_pair(true, mapped_type());
}
void insertNoDuplicateCheck(KeyValueElement && element) {
for (auto & el : arr) {
if (el.state == ElemState::UNASSIGNED) {
el = std::move(element);
return;
}
}
overflow.push_back(std::move(element));
}
std::pair<bool, mapped_type> erase(const key_type &key) {
for (auto el = arr.begin(); el != arr.end(); ++el) {
if (el->state == ElemState::VALID && el->key == key) {
return erase(el);
}
}
for (auto iter = overflow.begin(); iter != overflow.end(); ++iter) {
auto &el = *iter;
//no need to check for validity, all entries in the vector are valid
if (el.key == key) {
return erase(iter);
}
}
//not found
return std::make_pair(false, mapped_type());
}
std::pair<bool, mapped_type> at(const key_type &key) {
auto is_valid_and_equal = [&](KeyValueElement & el) {
return el.state == ElemState::VALID && el.key == key;
};
for (auto & el : arr) {
if (is_valid_and_equal(el)) {
return std::make_pair(true, el.value);
}
}
for (auto & el : overflow) {
if (is_valid_and_equal(el)) {
return std::make_pair(true, el.value);
}
}
return std::make_pair(false, mapped_type());
}
template<typename Fun>
void for_each(const Fun &fun) {
for (auto i = arr.begin(); i < arr.end(); ++i) {
if (i->state == ElemState::VALID) {
fun(i->key, i->value);
}
}
for (auto i = overflow.begin(); i < overflow.end(); ++i) {
if (i->state == ElemState::VALID)
fun(i->key, i->value);
}
}
template<typename Fun>
void exec_on(size_t hash, const key_type &key, const Fun &fun, std::atomic_size_t &global_count) {
auto is_valid_and_equal = [&](KeyValueElement & el) {
return el.state == ElemState::VALID && el.key == key;
};
for (auto i = arr.begin(); i != arr.end(); ++i) {
if (is_valid_and_equal(*i)) {
if (fun(i->value)) {
erase(i);
global_count.fetch_sub(1);
}
return;
}
}
for (auto i = overflow.begin(); i != overflow.end(); ++i) {
if (is_valid_and_equal(*i)) {
if (fun(i->value)) {
erase(i);
global_count.fetch_sub(1);
}
return;
}
}
auto p = KeyValueElement(key, mapped_type());
if (!fun(p.value)) {
insertNoDuplicateCheck(std::move(p));
global_count.fetch_add(1);
}
}
void clear() {
for (auto i = arr.begin(); i < arr.end(); ++i) {
if (i->state == ElemState::VALID) {
erase(i);
}
}
for (auto i = overflow.begin(); i < overflow.end(); ++i) {
if (i->state == ElemState::VALID)
erase(i);
}
}
typedef typename allocator_type::template rebind<KeyValueElement>::other key_value_alloc;
std::array<KeyValueElement, 1> arr;
std::vector<KeyValueElement, key_value_alloc> overflow;
private:
std::pair<bool, mapped_type> erase(decltype(arr.begin()) i) {
auto res = std::move(i->value);
{
auto garbage = std::move(i->key);
(void) garbage;
}
i->state = ElemState::UNASSIGNED;
if (!overflow.empty()) {
*i = std::move(overflow.back());
overflow.pop_back();
}
return std::make_pair(true, res);
}
std::pair<bool, mapped_type> erase(decltype(overflow.begin()) i) {
auto res = std::move(i->value);
i->state = ElemState::UNASSIGNED;
overflow.erase(i);
return std::make_pair(true, res);
}
};
private: // data members
hasher hash_;
key_equal equal_;
allocator_type allocator_;
std::vector<Bucket, typename allocator_type::template rebind<Bucket>::other> _buckets;
std::array<mutex_type, ConcurrencyLevel> _locks;
std::atomic_size_t _count;
std::atomic_size_t _upper_bound;
size_t bucket_flag;
public: // construction and destruction
explicit concurrent_map(const hasher &hash = hasher(),
const key_equal &equal = key_equal(),
const allocator_type &allocator = allocator_type())
: hash_(hash),
equal_(equal),
allocator_(allocator),
_buckets(InitialCapacity),
_count(0),
_upper_bound(InitialCapacity* LoadFactor / 100),
bucket_flag(((size_t) - 1) % _buckets.size()) {
}
concurrent_map(const concurrent_map<Key, T, Hash, KeyEqual, Allocator, MutexType, ConcurrencyLevel, InitialCapacity, LoadFactor> &) = default;
concurrent_map(concurrent_map<Key, T, Hash, KeyEqual, Allocator, MutexType, ConcurrencyLevel, InitialCapacity, LoadFactor> &&) = default;
concurrent_map<Key, T, Hash, KeyEqual, Allocator, MutexType, ConcurrencyLevel, InitialCapacity, LoadFactor> &
operator= (const concurrent_map<Key, T, Hash, KeyEqual, Allocator, MutexType, ConcurrencyLevel, InitialCapacity, LoadFactor> &) = default;
concurrent_map<Key, T, Hash, KeyEqual, Allocator, MutexType, ConcurrencyLevel, InitialCapacity, LoadFactor> &
operator= (concurrent_map<Key, T, Hash, KeyEqual, Allocator, MutexType, ConcurrencyLevel, InitialCapacity, LoadFactor> &&) = default;
allocator_type get_allocator() const {
return allocator_;
}
public:
size_t size() {
return _count;
}
template <typename K, typename V>
std::pair<bool, mapped_type> insert(K && key, V && value) {
size_t hash = hash_(key);
reserve(_count);
std::lock_guard<mutex_type> l(getMutex(hash));
auto ret = getBucket(hash).insert(std::forward<K>(key), std::forward<V>(value));
if (ret.first)//new entry was inserted
_count.fetch_add(1);
return ret;
}
std::pair<bool, mapped_type> erase(const key_type &key) {
size_t hash = hash_(key);
std::lock_guard<mutex_type> l(getMutex(hash));
auto ret = getBucket(hash).erase(key);
if (ret.first)//an entry was removed
_count.fetch_sub(1);
return ret;
}
std::pair<bool, mapped_type> at(const key_type &key) {
size_t hash = hash_(key);
std::lock_guard<mutex_type> l(getMutex(hash));
return getBucket(hash).at(key);
}
void clear() {
clear(0);
}
template<typename Fun>
void exec_on(const key_type &key, const Fun &fun) {
size_t hash = hash_(key);
reserve(_count);
std::lock_guard<mutex_type> l(getMutex(hash));
getBucket(hash).exec_on(hash, key, fun, _count);
}
template<typename Fun>
void for_each(const Fun &fun) {
for_each(fun, 0);
}
//allocates space to fit count elements
void reserve(size_t count) {
if (count < _upper_bound.load(std::memory_order_acquire))
return;
std::lock_guard<mutex_type> l(_locks[0]);
if (count < _upper_bound.load(std::memory_order_acquire))
return;
auto new_size = _buckets.size() * 2;
while (new_size * LoadFactor / 100 < count) {
new_size *= 2;
}
return resize(new_size, 1);
}
private:
template<typename Fun>
void for_each(const Fun &fun, size_t lock) {
if (lock < ConcurrencyLevel) {
std::lock_guard<mutex_type> l(_locks[lock]);
for_each(fun, lock + 1);
return;
}
for (Bucket & b : _buckets) {
b.for_each(fun);
}
}
void clear(size_t lock) {
if (lock < ConcurrencyLevel) {
std::lock_guard<mutex_type> l(_locks[lock]);
clear(lock + 1);
return;
}
for (Bucket & b : _buckets) {
b.clear();
}
}
mutex_type &getMutex(size_t hash) {
return _locks[hash % ConcurrencyLevel];
}
Bucket &getBucket(size_t hash) {
return _buckets[hash & bucket_flag];
}
void resize(size_t new_size, size_t lock_index) {
if (lock_index < ConcurrencyLevel) {
std::lock_guard<mutex_type> l(_locks[lock_index]);
return resize(new_size, lock_index + 1);
}
//everything is locked
//move old buckets
std::vector<Bucket, typename allocator_type::template rebind<Bucket>::other> old_buckets(std::move(_buckets));
bucket_flag = ((size_t) - 1) % new_size;
_upper_bound.store(new_size * LoadFactor / 100, std::memory_order_release);
_buckets.resize(new_size);
for (Bucket & buk : old_buckets) {
bool done = false;
for (auto & el : buk.arr) {
if (el.state == ElemState::UNASSIGNED) {
done = true;
break;
}
getBucket(hash_(el.key)).insertNoDuplicateCheck(std::move(el));
}
if (done)
continue;
for (auto & el : buk.overflow) {
getBucket(hash_(el.key)).insertNoDuplicateCheck(std::move(el));
}
}
}
};
} // namespace crossbow
|
#include <fstream>
#include "Filesystem/File.h"
namespace FILESYSTEM
{
/// Attempts to read all data in binary format from the specified file.
/// @param[in] path - The path of the file to read.
/// @return All binary data from the file, if successfully read; empty otherwise.
std::string File::ReadBinary(const std::filesystem::path& path)
{
std::ifstream file(path, std::ios::binary | std::ios::in);
auto beginning_of_file = std::istreambuf_iterator<char>(file);
auto end_of_file = std::istreambuf_iterator<char>();
std::string binary_data(beginning_of_file, end_of_file);
return binary_data;
}
}
|
//=========================================================================
// Copyright (C) 2012 The Elastos Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//=========================================================================
#include "Elastos.Droid.Content.h"
#include "Elastos.Droid.View.h"
#include "elastos/droid/ext/frameworkext.h"
#include "elastos/droid/text/method/QwertyKeyListener.h"
#include "elastos/droid/text/method/TextKeyListener.h"
#include "elastos/droid/text/method/CTextKeyListener.h"
#include "elastos/droid/text/method/CQwertyKeyListener.h"
#include "elastos/droid/text/SpannableStringInternal.h"
#include "elastos/droid/text/TextUtils.h"
#include "elastos/droid/text/Selection.h"
#include "elastos/droid/text/CAutoText.h"
#include "elastos/droid/view/KeyEvent.h"
#include <elastos/core/Math.h>
#include <elastos/core/Character.h>
#include <stdio.h>
#include <elastos/core/StringUtils.h>
using namespace Elastos::Core;
using Elastos::Core::StringUtils;
using Elastos::Droid::Text::Selection;
using Elastos::Droid::Text::INoCopySpan;
using Elastos::Droid::View::IKeyCharacterMap;
using Elastos::Droid::View::KeyEvent;
namespace Elastos {
namespace Droid {
namespace Text {
namespace Method {
const Int32 QwertyKeyListener::CAPITALIZELENGTH = 4;
AutoPtr<ArrayOf<IQwertyKeyListener*> > QwertyKeyListener::sInstance =
ArrayOf<IQwertyKeyListener*>::Alloc(QwertyKeyListener::CAPITALIZELENGTH * 2);
AutoPtr<IQwertyKeyListener> QwertyKeyListener::sFullKeyboardInstance;
CAR_INTERFACE_IMPL_2(QwertyKeyListener::Replaced, Object, IReplacedSpan, INoCopySpan)
QwertyKeyListener::Replaced::Replaced()
{
}
QwertyKeyListener::Replaced::~Replaced()
{
}
ECode QwertyKeyListener::Replaced::constructor(
/* [in] */ ArrayOf<Char32>* text)
{
mText = text;
return NOERROR;
}
static Boolean InitStaticPICKER_SETS()
{
QwertyKeyListener::PICKER_SETS['A'] = String("\u00C0\u00C1\u00C2\u00C4\u00C6\u00C3\u00C5\u0104\u0100");
QwertyKeyListener::PICKER_SETS['C'] = String("\u00C7\u0106\u010C");
QwertyKeyListener::PICKER_SETS['D'] = String("\u010E");
QwertyKeyListener::PICKER_SETS['E'] = String("\u00C8\u00C9\u00CA\u00CB\u0118\u011A\u0112");
QwertyKeyListener::PICKER_SETS['G'] = String("\u011E");
QwertyKeyListener::PICKER_SETS['L'] = String("\u0141");
QwertyKeyListener::PICKER_SETS['I'] = String("\u00CC\u00CD\u00CE\u00CF\u012A\u0130");
QwertyKeyListener::PICKER_SETS['N'] = String("\u00D1\u0143\u0147");
QwertyKeyListener::PICKER_SETS['O'] = String("\u00D8\u0152\u00D5\u00D2\u00D3\u00D4\u00D6\u014C");
QwertyKeyListener::PICKER_SETS['R'] = String("\u0158");
QwertyKeyListener::PICKER_SETS['S'] = String("\u015A\u0160\u015E");
QwertyKeyListener::PICKER_SETS['T'] = String("\u0164");
QwertyKeyListener::PICKER_SETS['U'] = String("\u00D9\u00DA\u00DB\u00DC\u016E\u016A");
QwertyKeyListener::PICKER_SETS['Y'] = String("\u00DD\u0178");
QwertyKeyListener::PICKER_SETS['Z'] = String("\u0179\u017B\u017D");
QwertyKeyListener::PICKER_SETS['a'] = String("\u00E0\u00E1\u00E2\u00E4\u00E6\u00E3\u00E5\u0105\u0101");
QwertyKeyListener::PICKER_SETS['c'] = String("\u00E7\u0107\u010D");
QwertyKeyListener::PICKER_SETS['d'] = String("\u010F");
QwertyKeyListener::PICKER_SETS['e'] = String("\u00E8\u00E9\u00EA\u00EB\u0119\u011B\u0113");
QwertyKeyListener::PICKER_SETS['g'] = String("\u011F");
QwertyKeyListener::PICKER_SETS['i'] = String("\u00EC\u00ED\u00EE\u00EF\u012B\u0131");
QwertyKeyListener::PICKER_SETS['l'] = String("\u0142");
QwertyKeyListener::PICKER_SETS['n'] = String("\u00F1\u0144\u0148");
QwertyKeyListener::PICKER_SETS['o'] = String("\u00F8\u0153\u00F5\u00F2\u00F3\u00F4\u00F6\u014D");
QwertyKeyListener::PICKER_SETS['r'] = String("\u0159");
QwertyKeyListener::PICKER_SETS['s'] = String("\u00A7\u00DF\u015B\u0161\u015F");
QwertyKeyListener::PICKER_SETS['t'] = String("\u0165");
QwertyKeyListener::PICKER_SETS['u'] = String("\u00F9\u00FA\u00FB\u00FC\u016F\u016B");
QwertyKeyListener::PICKER_SETS['y'] = String("\u00FD\u00FF");
QwertyKeyListener::PICKER_SETS['z'] = String("\u017A\u017C\u017E");
// QwertyKeyListener::PICKER_SETS[CKeyCharacterMap::PICKER_DIALOG_INPUT] = String("\u2026\u00A5\u2022\u00AE\u00A9\u00B1[]{}\\|");
QwertyKeyListener::PICKER_SETS['/'] = String("\\");
// From packages/inputmethods/LatinIME/res/xml/kbd_symbols.xml
QwertyKeyListener::PICKER_SETS['1'] = String("\u00b9\u00bd\u2153\u00bc\u215b");
QwertyKeyListener::PICKER_SETS['2'] = String("\u00b2\u2154");
QwertyKeyListener::PICKER_SETS['3'] = String("\u00b3\u00be\u215c");
QwertyKeyListener::PICKER_SETS['4'] = String("\u2074");
QwertyKeyListener::PICKER_SETS['5'] = String("\u215d");
QwertyKeyListener::PICKER_SETS['7'] = String("\u215e");
QwertyKeyListener::PICKER_SETS['0'] = String("\u207f\u2205");
QwertyKeyListener::PICKER_SETS['$'] = String("\u00a2\u00a3\u20ac\u00a5\u20a3\u20a4\u20b1");
QwertyKeyListener::PICKER_SETS['%'] = String("\u2030");
QwertyKeyListener::PICKER_SETS['*'] = String("\u2020\u2021");
QwertyKeyListener::PICKER_SETS['-'] = String("\u2013\u2014");
QwertyKeyListener::PICKER_SETS['+'] = String("\u00b1");
QwertyKeyListener::PICKER_SETS['('] = String("[{<");
QwertyKeyListener::PICKER_SETS[')'] = String("]}>");
QwertyKeyListener::PICKER_SETS['!'] = String("\u00a1");
QwertyKeyListener::PICKER_SETS['"'] = String("\u201c\u201d\u00ab\u00bb\u02dd");
QwertyKeyListener::PICKER_SETS['?'] = String("\u00bf");
QwertyKeyListener::PICKER_SETS[','] = String("\u201a\u201e");
// From packages/inputmethods/LatinIME/res/xml/kbd_symbols_shift.xml
QwertyKeyListener::PICKER_SETS['='] = String("\u2260\u2248\u221e");
QwertyKeyListener::PICKER_SETS['<'] = String("\u2264\u00ab\u2039");
QwertyKeyListener::PICKER_SETS['>'] = String("\u2265\u00bb\u203a");
return TRUE;
};
HashMap<Char32, String> QwertyKeyListener::PICKER_SETS;
Boolean QwertyKeyListener::sInitPickerSet = InitStaticPICKER_SETS();
QwertyKeyListener::QwertyKeyListener()
{}
QwertyKeyListener::~QwertyKeyListener()
{}
CAR_INTERFACE_IMPL(QwertyKeyListener, BaseKeyListener, IQwertyKeyListener)
ECode QwertyKeyListener::constructor(
/* [in] */ Capitalize cap,
/* [in] */ Boolean autotext)
{
return constructor(cap, autotext, FALSE);
}
ECode QwertyKeyListener::constructor(
/* [in] */ Capitalize cap,
/* [in] */ Boolean autotext,
/* [in] */ Boolean fullKeyboard)
{
mAutoCap = cap;
mAutoText = autotext;
mFullKeyboard = fullKeyboard;
return NOERROR;
}
ECode QwertyKeyListener::GetInstance(
/* [in] */ Boolean autoText,
/* [in] */ Capitalize cap,
/* [out] */ IQwertyKeyListener** ret)
{
VALIDATE_NOT_NULL(ret)
Int32 off = cap * 2 + (autoText ? 1 : 0);
if ((*sInstance)[off] == NULL) {
AutoPtr<IQwertyKeyListener> listener;
CQwertyKeyListener::New(cap, autoText, (IQwertyKeyListener**)&listener);
sInstance->Set(off, listener);
}
*ret = (*sInstance)[off];
REFCOUNT_ADD(*ret)
return NOERROR;
}
ECode QwertyKeyListener::GetInstanceForFullKeyboard(
/* [out] */ IQwertyKeyListener** ret)
{
VALIDATE_NOT_NULL(ret)
if (sFullKeyboardInstance == NULL) {
CQwertyKeyListener::New(Capitalize_NONE, FALSE, TRUE, (IQwertyKeyListener**)&sFullKeyboardInstance);
}
*ret = sFullKeyboardInstance;
REFCOUNT_ADD(*ret);
return NOERROR;
}
ECode QwertyKeyListener::GetInputType(
/* [out] */ Int32* ret)
{
return MakeTextContentType(mAutoCap, mAutoText, ret);
}
ECode QwertyKeyListener::OnKeyDown(
/* [in] */ IView* view,
/* [in] */ IEditable* content,
/* [in] */ Int32 keyCode,
/* [in] */ IKeyEvent* event,
/* [out] */ Boolean* ret)
{
VALIDATE_NOT_NULL(ret)
Int32 selStart, selEnd;
Int32 pref = 0;
if (view != NULL) {
AutoPtr<IContext> context;
view->GetContext((IContext**)&context);
AutoPtr<ITextKeyListener> listener;
TextKeyListener::GetInstance((ITextKeyListener**)&listener);
((TextKeyListener*)listener.Get())->GetPrefs(context, &pref);
}
{
Int32 a = Selection::GetSelectionStart(ICharSequence::Probe(content));
Int32 b = Selection::GetSelectionEnd(ICharSequence::Probe(content));
selStart = Elastos::Core::Math::Min(a, b);
selEnd = Elastos::Core::Math::Max(a, b);
if (selStart < 0 || selEnd < 0) {
selStart = selEnd = 0;
Selection::SetSelection(ISpannable::Probe(content), 0, 0);
}
}
Int32 activeStart;
ISpanned::Probe(content)->GetSpanStart(
TextKeyListener::ACTIVE, &activeStart);
Int32 activeEnd;
ISpanned::Probe(content)->GetSpanEnd(
TextKeyListener::ACTIVE, &activeEnd);
// QWERTY keyboard normal case
Int32 i;
Int32 eMetaState;
MetaKeyKeyListener::GetMetaState(ICharSequence::Probe(content), event, &eMetaState);
event->GetUnicodeChar(eMetaState, &i);
if (!mFullKeyboard) {
Int32 count;
event->GetRepeatCount(&count);
if (count > 0 && selStart == selEnd && selStart > 0) {
Char32 c;
ICharSequence::Probe(content)->GetCharAt(selStart - 1, &c);
if ((c == i || c == Character::ToUpperCase(i)) && view != NULL) {
if (ShowCharacterPicker(view, content, c, FALSE, count)) {
ResetMetaState(ISpannable::Probe(content));
*ret = TRUE;
return NOERROR;
}
}
}
}
if (i == IKeyCharacterMap::PICKER_DIALOG_INPUT) {
if (view != NULL) {
ShowCharacterPicker(view, content,
IKeyCharacterMap::PICKER_DIALOG_INPUT, TRUE, 1);
}
ResetMetaState(ISpannable::Probe(content));
*ret = TRUE;
return NOERROR;
}
if (i == IKeyCharacterMap::HEX_INPUT) {
Int32 start;
if (selStart == selEnd) {
start = selEnd;
Char32 ch;
while (start > 0 && selEnd - start < 4 &&
Character::ToDigit((ICharSequence::Probe(content)->GetCharAt(start - 1, &ch), ch), 16) >= 0) {
start--;
}
}
else {
start = selStart;
}
Int32 ch = -1;
String hex;
hex = TextUtils::Substring(ICharSequence::Probe(content), start, selEnd);
ch = StringUtils::ParseInt32(hex, 16);
if (ch >= 0) {
selStart = start;
Selection::SetSelection(ISpannable::Probe(content), selStart, selEnd);
i = ch;
}
else {
i = 0;
}
}
Boolean bHasNoModifiers, bHasModifiers;
if (i != 0) {
Boolean dead = FALSE;
if ((i & IKeyCharacterMap::COMBINING_ACCENT) != 0) {
dead = TRUE;
i = i & IKeyCharacterMap::COMBINING_ACCENT_MASK;
}
if (activeStart == selStart && activeEnd == selEnd) {
Boolean replace = FALSE;
if (selEnd - selStart - 1 == 0) {
Char32 accent;
ICharSequence::Probe(content)->GetCharAt(selStart, &accent);
Int32 composed = KeyEvent::GetDeadChar(accent, i);
if (composed != 0) {
i = composed;
replace = TRUE;
dead = FALSE;
}
}
if (!replace) {
Selection::SetSelection(ISpannable::Probe(content), selEnd);
ISpannable::Probe(content)->RemoveSpan(TextKeyListener::ACTIVE);
selStart = selEnd;
}
}
Boolean bTmp = FALSE;
if ((pref & TextKeyListener::AUTO_CAP) != 0 &&
Character::IsLowerCase(i) &&
(TextKeyListener::ShouldCap(mAutoCap, ICharSequence::Probe(content), selStart, &bTmp), bTmp)) {
Int32 where;
ISpanned::Probe(content)->GetSpanEnd(TextKeyListener::CAPPED, &where);
Int32 flags;
ISpanned::Probe(content)->GetSpanFlags(TextKeyListener::CAPPED, &flags);
if (where == selStart && (((flags >> 16) & 0xFFFF) == i)) {
ISpannable::Probe(content)->RemoveSpan(TextKeyListener::CAPPED);
}
else {
flags = i << 16;
i = Character::ToUpperCase(i);
if (selStart == 0)
ISpannable::Probe(content)->SetSpan(TextKeyListener::CAPPED, 0, 0,
ISpanned::SPAN_MARK_MARK | flags);
else
ISpannable::Probe(content)->SetSpan(TextKeyListener::CAPPED,
selStart - 1, selStart,
ISpanned::SPAN_EXCLUSIVE_EXCLUSIVE |
flags);
}
}
if (selStart != selEnd) {
Selection::SetSelection(ISpannable::Probe(content), selEnd);
}
ISpannable::Probe(content)->SetSpan(OLD_SEL_START, selStart, selStart,
ISpanned::SPAN_MARK_MARK);
String str("");
str.Append(i);
AutoPtr<ICharSequence> cs;
CString::New(str, (ICharSequence**)&cs);
content->Replace(selStart, selEnd, cs);
Int32 oldStart;
ISpanned::Probe(content)->GetSpanStart(OLD_SEL_START, &oldStart);
selEnd = Selection::GetSelectionEnd(ICharSequence::Probe(content));
if (oldStart < selEnd) {
ISpannable::Probe(content)->SetSpan(TextKeyListener::LAST_TYPED,
oldStart, selEnd,
ISpanned::SPAN_EXCLUSIVE_EXCLUSIVE);
if (dead) {
Selection::SetSelection(ISpannable::Probe(content), oldStart, selEnd);
ISpannable::Probe(content)->SetSpan(TextKeyListener::ACTIVE, oldStart, selEnd,
ISpanned::SPAN_EXCLUSIVE_EXCLUSIVE);
}
}
AdjustMetaAfterKeypress(ISpannable::Probe(content));
// potentially do autotext replacement if the character
// that was typed was an autotext terminator
Int32 end;
if ((pref & TextKeyListener::AUTO_TEXT) != 0 && mAutoText &&
(i == ' ' || i == '\t' || i == '\n' ||
i == ',' || i == '.' || i == '!' || i == '?' ||
i == '"' || Character::GetType(i) == Character::END_PUNCTUATION) &&
(ISpanned::Probe(content)->GetSpanEnd(TextKeyListener::INHIBIT_REPLACEMENT, &end), end)
!= oldStart) {
Int32 x;
for (x = oldStart; x > 0; x--) {
Char32 c;
ICharSequence::Probe(content)->GetCharAt(x - 1, &c);
if (c != '\'' && !Character::IsLetter(c)) {
break;
}
}
String rep = GetReplacement(ICharSequence::Probe(content), x, oldStart, view);
if (!rep.IsNull()) {
AutoPtr<ArrayOf<IInterface*> > repl;
Int32 len;
ICharSequence::Probe(content)->GetLength(&len);
ISpanned::Probe(content)->GetSpans(0, len, EIID_IReplacedSpan, (ArrayOf<IInterface*>**)&repl);
for (Int32 a = 0; a < repl->GetLength(); a++)
ISpannable::Probe(content)->RemoveSpan((*repl)[a]);
AutoPtr<ArrayOf<Char32> > orig = ArrayOf<Char32>::Alloc((oldStart - x) * 4);
//TODO
TextUtils::GetChars(ICharSequence::Probe(content), x, oldStart, (ArrayOf<Char32>*)orig.Get(), 0);
AutoPtr<Replaced> r = new Replaced();
r->constructor(orig.Get());
ISpannable::Probe(content)->SetSpan((INoCopySpan*)r, x, oldStart,
ISpanned::SPAN_EXCLUSIVE_EXCLUSIVE);
AutoPtr<ICharSequence> cs;
CString::New(rep, (ICharSequence**)&cs);
content->Replace(x, oldStart, cs);
}
}
// Replace two spaces by a period and a space.
if ((pref & TextKeyListener::AUTO_PERIOD) != 0 && mAutoText) {
selEnd = Selection::GetSelectionEnd(ICharSequence::Probe(content));
if (selEnd - 3 >= 0) {
Char32 ch;
if ((ICharSequence::Probe(content)->GetCharAt(selEnd - 1, &ch), ch) == ' ' &&
(ICharSequence::Probe(content)->GetCharAt(selEnd - 2, &ch), ch) == ' ') {
Char32 c;
ICharSequence::Probe(content)->GetCharAt(selEnd - 3, &c);
for (Int32 j = selEnd - 3; j > 0; j--) {
if (c == '"' ||
Character::GetType(c) == Character::END_PUNCTUATION) {
ICharSequence::Probe(content)->GetCharAt(j - 1, &c);
}
else {
break;
}
}
if (Character::IsLetter(c) || Character::IsDigit(c)) {
AutoPtr<ICharSequence> cs;
CString::New(String("."), (ICharSequence**)&cs);
content->Replace(selEnd - 2, selEnd - 1, cs);
}
}
}
}
*ret = TRUE;
return NOERROR;
}
else if ((keyCode == IKeyEvent::KEYCODE_DEL && (event->HasNoModifiers(&bHasNoModifiers), bHasNoModifiers)) ||
((event->HasModifiers(IKeyEvent::META_ALT_ON, &bHasModifiers), bHasModifiers) && selStart == selEnd)) {
// special backspace case for undoing autotext
Int32 consider = 1;
// if backspacing over the last typed character,
// it undoes the autotext prior to that character
// (unless the character typed was newline, in which
// case this behavior would be confusing)
Int32 end;
ISpanned::Probe(content)->GetSpanEnd(TextKeyListener::LAST_TYPED, &end);
if (end == selStart) {
Char32 c;
if ((ICharSequence::Probe(content)->GetCharAt(selStart - 1, &c), c) != '\n')
consider = 2;
}
AutoPtr<ArrayOf<IInterface*> > repl = NULL;
ISpanned::Probe(content)->GetSpans(
selStart - consider, selStart, EIID_IReplacedSpan, (ArrayOf<IInterface*>**)&repl);
if (repl->GetLength() > 0) {
Int32 st;
ISpanned::Probe(content)->GetSpanStart((*repl)[0], &st);
Int32 en;
ISpanned::Probe(content)->GetSpanEnd((*repl)[0], &en);
Replaced* replaced = (Replaced*)IObject::Probe((*repl)[0]);
String old(*(replaced->mText));
ISpannable::Probe(content)->RemoveSpan((*repl)[0]);
// only cancel the autocomplete if the cursor is at the end of
// the replaced span (or after it, because the user is
// backspacing over the space after the word, not the word
// itself).
if (selStart >= en) {
ISpannable::Probe(content)->SetSpan(TextKeyListener::INHIBIT_REPLACEMENT,
en, en, ISpanned::SPAN_POINT_POINT);
AutoPtr<ICharSequence> oldCs;
CString::New(old, (ICharSequence**)&oldCs);
content->Replace(st, en, oldCs);
ISpanned::Probe(content)->GetSpanStart(TextKeyListener::INHIBIT_REPLACEMENT, &en);
if (en - 1 >= 0) {
ISpannable::Probe(content)->SetSpan(
TextKeyListener::INHIBIT_REPLACEMENT,
en - 1, en, ISpanned::SPAN_EXCLUSIVE_EXCLUSIVE);
}
else {
ISpannable::Probe(content)->RemoveSpan(TextKeyListener::INHIBIT_REPLACEMENT);
}
AdjustMetaAfterKeypress(ISpannable::Probe(content));
}
else {
AdjustMetaAfterKeypress(ISpannable::Probe(content));
return BaseKeyListener::OnKeyDown(view, content, keyCode, event, ret);
}
*ret = TRUE;
return NOERROR;
}
}
return BaseKeyListener::OnKeyDown(view, content, keyCode, event, ret);
}
String QwertyKeyListener::GetReplacement(
/* [in] */ ICharSequence* src,
/* [in] */ Int32 start,
/* [in] */ Int32 end,
/* [in] */ IView* view)
{
Int32 len = end - start;
Boolean changecase = FALSE;
String replacement = CAutoText::Get(src, start, end, view);
if (replacement.IsNull()) {
String key;
key = TextUtils::Substring(src, start, end);
key = key.ToLowerCase();
AutoPtr<ICharSequence> csKey;
CString::New(key, (ICharSequence**)&csKey);
replacement = CAutoText::Get(csKey, 0, end - start, view);
changecase = TRUE;
if (replacement.IsNull())
return String(NULL);
}
Int32 caps = 0;
if (changecase) {
for (Int32 j = start; j < end; j++) {
Char32 c;
src->GetCharAt(j, &c);
if (Character::IsUpperCase(c))
caps++;
}
}
String out;
if (caps == 0)
out = replacement;
else if (caps == 1)
out = ToTitleCase(replacement);
else if (caps == len)
out = replacement.ToUpperCase();
else
out = ToTitleCase(replacement);
AutoPtr<ICharSequence> cs;
CString::New(out, (ICharSequence**)&cs);
if (out.GetLength() == len &&
TextUtils::RegionMatches(src, start, cs, 0, len))
return String(NULL);
return out;
}
/**
* Marks the specified region of <code>content</code> as having
* contained <code>original</code> prior to AutoText replacement.
* Call this method when you have done or are about to do an
* AutoText-style replacement on a region of text and want to let
* the same mechanism (the user pressing DEL immediately after the
* change) undo the replacement.
*
* @param content the Editable text where the replacement was made
* @param start the start of the replaced region
* @param end the end of the replaced region; the location of the cursor
* @param original the text to be restored if the user presses DEL
*/
ECode QwertyKeyListener::MarkAsReplaced(
/* [in] */ ISpannable* content,
/* [in] */ Int32 start,
/* [in] */ Int32 end,
/* [in] */ const String& original)
{
AutoPtr<ArrayOf<IInterface*> > repl;
Int32 len;
ICharSequence::Probe(content)->GetLength(&len);
ISpanned::Probe(content)->GetSpans(0, len, EIID_IReplacedSpan, (ArrayOf<IInterface*>**)&repl);
for (Int32 a = 0; a < repl->GetLength(); a++) {
content->RemoveSpan((*repl)[a]);
}
len = original.GetByteLength();
AutoPtr<ArrayOf<Char32> > orig = ArrayOf<Char32>::Alloc(len);
memcpy(orig->GetPayload(), original.string(), len);
AutoPtr<Replaced> r = new Replaced();
r->constructor(orig.Get());
content->SetSpan(INoCopySpan::Probe(r), start, end, ISpanned::SPAN_EXCLUSIVE_EXCLUSIVE);
return NOERROR;
}
Boolean QwertyKeyListener::ShowCharacterPicker(
/* [in] */ IView* view,
/* [in] */ IEditable* content,
/* [in] */ Char32 c,
/* [in] */ Boolean insert,
/* [in] */ Int32 count)
{
HashMap<Char32, String>::Iterator iter = PICKER_SETS.Find(c);
HashMap<Char32, String>::ValueType value = *iter;
String set = value.mSecond;
if (set == NULL) {
return FALSE;
}
if (count == 1) {
// new CharacterPickerDialog(view.getContext(),
// view, content, set, insert).show();
}
return TRUE;
}
String QwertyKeyListener::ToTitleCase(
/* [in] */ const String& src)
{
return src.ToUpperCase(0, 1);
}
ECode QwertyKeyListener::OnKeyUp(
/* [in] */ IView* view,
/* [in] */ IEditable* content,
/* [in] */ Int32 keyCode,
/* [in] */ IKeyEvent* event,
/* [out] */ Boolean* ret)
{
return MetaKeyKeyListener::OnKeyUp(view, content, keyCode, event, ret);
}
ECode QwertyKeyListener::ClearMetaKeyState(
/* [in] */ IView* view,
/* [in] */ IEditable* content,
/* [in] */ Int32 states)
{
return MetaKeyKeyListener::ClearMetaKeyState(view, content, states);
}
} // namespace Method
} // namespace Text
} // namepsace Droid
} // namespace Elastos
|
// Copyright Larry Gritz (et al)
// SPDX-License-Identifier: BSD-3-Clause
#include <Proto/Proto.h>
#include <pybind11/pybind11.h>
namespace py = pybind11;
#if PY_MAJOR_VERSION == 2
// Preferred Python string caster for Python2 is py::bytes, so it's a byte
// string (not unicode).
# define PY_STR py::bytes
#else
// Python3 is always unicode, so return a true str
# define PY_STR py::str
#endif
namespace PyProto {
// This DECLARE_PYMODULE mojo is necessary if we want to pass in the
// MODULE name as a #define. Google for Argument-Prescan for additional
// info on why this is necessary
#define DECLARE_PYMODULE(x) PYBIND11_MODULE(x, m)
DECLARE_PYMODULE(PYMODULE_NAME)
{
using namespace pybind11::literals;
m.def("hello", &Proto::hello);
m.def("add", &Proto::add, "a"_a, "b"_a);
}
} // namespace PyProto
|
#include "Data.hpp"
template <typename Type>
class Stack
{
Data <Type> *first;
unsigned long int count;
public:
Stack();
~Stack();
const unsigned int length();
const bool empty();
const Type top();
void push(Type data);
void pop();
void show();
};
template <typename Type>
Stack<Type>::Stack()
{
first = nullptr;
count = 0;
}
template <typename Type>
Stack<Type>::~Stack()
{
Data <Type> *pointer = first;
Data <Type> *temp = nullptr;
while(pointer!=nullptr)
{
temp = pointer->return_p_next_data();
delete pointer;
pointer = temp;
}
pointer = nullptr;
temp = nullptr;
count = 0;
first = nullptr;
}
template <typename Type>
const unsigned int Stack<Type>::length()
{
return count;
}
template <typename Type>
const bool Stack<Type>::empty()
{
if (count == 0)
return true;
return false;
}
template <typename Type>
const Type Stack<Type>::top()
{
if (empty()) {
std::string ExcEmpty = "Stos jest pusty";
throw ExcEmpty;
} else
return first->return_data();
}
template <typename Type>
void Stack<Type>::push(Type data)
{
Data <Type> *new_data = new Data <Type>;
new_data->new_data(data);
new_data->change_p_next_data(first);
first = new_data;
count++;
}
template <typename Type>
void Stack<Type>::show()
{
Data <Type> *pointer = first;
unsigned int long counter = 0;
while(pointer!=nullptr)
{
std::cout << "Nr " << ++counter << " : " << pointer->return_data() << std::endl;
pointer = pointer->return_p_next_data();
}
pointer = nullptr;
}
template <typename Type>
void Stack<Type>::pop()
{
if (empty()) {
std::string ExcEmpty = "Stos jest pusty";
throw ExcEmpty;
} else {
Data <Type> *pointer = first;
pointer = pointer->return_p_next_data();
delete first;
first = pointer; // new first
pointer = nullptr;
count--;
}
}
|
#include <iterator>
#include <efsw/String.hpp>
#include <efsw/Utf.hpp>
namespace efsw {
const std::size_t String::InvalidPos = StringType::npos;
std::vector < std::string > String::split ( const std::string& str, const char& splitchar, const bool& pushEmptyString )
{
std::vector < std::string > tmp;
std::string tmpstr;
for ( size_t i = 0; i < str.size(); i++ )
{
if ( str[i] == splitchar )
{
if ( pushEmptyString || tmpstr.size() )
{
tmp.push_back(tmpstr);
tmpstr = "";
}
}
else
{
tmpstr += str[i];
}
}
if ( tmpstr.size() )
{
tmp.push_back( tmpstr );
}
return tmp;
}
std::vector < String > String::split ( const String& str, const Uint32& splitchar, const bool& pushEmptyString )
{
std::vector < String > tmp;
String tmpstr;
for ( size_t i = 0; i < str.size(); i++ )
{
if ( str[i] == splitchar )
{
if ( pushEmptyString || tmpstr.size() )
{
tmp.push_back(tmpstr);
tmpstr = "";
}
}
else
{
tmpstr += str[i];
}
}
if ( tmpstr.size() )
{
tmp.push_back( tmpstr );
}
return tmp;
}
int String::strStartsWith( const std::string& start, const std::string& str )
{
int pos = -1;
size_t size = start.size();
if ( str.size() >= size )
{
for ( std::size_t i = 0; i < size; i++ )
{
if ( start[i] == str[i] )
{
pos = (int)i;
}
else
{
pos = -1;
break;
}
}
}
return pos;
}
int String::strStartsWith( const String& start, const String& str )
{
int pos = -1;
size_t size = start.size();
if ( str.size() >= size )
{
for ( std::size_t i = 0; i < size; i++ )
{
if ( start[i] == str[i] )
{
pos = (int)i;
}
else
{
pos = -1;
break;
}
}
}
return pos;
}
String::String()
{
}
String::String(char ansiChar, const std::locale& locale)
{
mString += Utf32::DecodeAnsi(ansiChar, locale);
}
#ifndef EFSW_NO_WIDECHAR
String::String(wchar_t wideChar)
{
mString += Utf32::DecodeWide(wideChar);
}
#endif
String::String(StringBaseType utf32Char)
{
mString += utf32Char;
}
String::String( const char* uf8String ) {
if (uf8String)
{
std::size_t length = strlen(uf8String);
if (length > 0)
{
mString.reserve(length + 1);
Utf8::ToUtf32(uf8String, uf8String + length, std::back_inserter(mString));
}
}
}
String::String( const std::string& utf8String ) {
mString.reserve( utf8String.length() + 1 );
Utf8::ToUtf32( utf8String.begin(), utf8String.end(), std::back_inserter( mString ) );
}
String::String(const char* ansiString, const std::locale& locale)
{
if (ansiString)
{
std::size_t length = strlen(ansiString);
if (length > 0)
{
mString.reserve(length + 1);
Utf32::FromAnsi(ansiString, ansiString + length, std::back_inserter(mString), locale);
}
}
}
String::String(const std::string& ansiString, const std::locale& locale)
{
mString.reserve(ansiString.length() + 1);
Utf32::FromAnsi(ansiString.begin(), ansiString.end(), std::back_inserter(mString), locale);
}
#ifndef EFSW_NO_WIDECHAR
String::String(const wchar_t* wideString)
{
if (wideString)
{
std::size_t length = std::wcslen(wideString);
if (length > 0)
{
mString.reserve(length + 1);
Utf32::FromWide(wideString, wideString + length, std::back_inserter(mString));
}
}
}
String::String(const std::wstring& wideString)
{
mString.reserve(wideString.length() + 1);
Utf32::FromWide(wideString.begin(), wideString.end(), std::back_inserter(mString));
}
#endif
String::String(const StringBaseType* utf32String)
{
if (utf32String)
mString = utf32String;
}
String::String(const StringType& utf32String) :
mString(utf32String)
{
}
String::String(const String& str) :
mString(str.mString)
{
}
String String::fromUtf8( const std::string& utf8String )
{
String::StringType utf32;
utf32.reserve( utf8String.length() + 1 );
Utf8::ToUtf32( utf8String.begin(), utf8String.end(), std::back_inserter( utf32 ) );
return String( utf32 );
}
String::operator std::string() const
{
return toAnsiString();
}
std::string String::toAnsiString(const std::locale& locale) const
{
// Prepare the output string
std::string output;
output.reserve(mString.length() + 1);
// Convert
Utf32::ToAnsi(mString.begin(), mString.end(), std::back_inserter(output), 0, locale);
return output;
}
#ifndef EFSW_NO_WIDECHAR
std::wstring String::toWideString() const
{
// Prepare the output string
std::wstring output;
output.reserve(mString.length() + 1);
// Convert
Utf32::ToWide(mString.begin(), mString.end(), std::back_inserter(output), 0);
return output;
}
#endif
std::string String::toUtf8() const {
// Prepare the output string
std::string output;
output.reserve(mString.length() + 1);
// Convert
Utf32::toUtf8(mString.begin(), mString.end(), std::back_inserter(output) );
return output;
}
String& String::operator =(const String& right)
{
mString = right.mString;
return *this;
}
String& String::operator =( const StringBaseType& right )
{
mString = right;
return *this;
}
String& String::operator +=(const String& right)
{
mString += right.mString;
return *this;
}
String& String::operator +=( const StringBaseType& right )
{
mString += right;
return *this;
}
String::StringBaseType String::operator [](std::size_t index) const
{
return mString[index];
}
String::StringBaseType& String::operator [](std::size_t index)
{
return mString[index];
}
String::StringBaseType String::at( std::size_t index ) const
{
return mString.at( index );
}
void String::push_back( StringBaseType c )
{
mString.push_back( c );
}
void String::swap ( String& str )
{
mString.swap( str.mString );
}
void String::clear()
{
mString.clear();
}
std::size_t String::size() const
{
return mString.size();
}
std::size_t String::length() const
{
return mString.length();
}
bool String::empty() const
{
return mString.empty();
}
void String::erase(std::size_t position, std::size_t count)
{
mString.erase(position, count);
}
String& String::insert(std::size_t position, const String& str)
{
mString.insert(position, str.mString);
return *this;
}
String& String::insert( std::size_t pos1, const String& str, std::size_t pos2, std::size_t n )
{
mString.insert( pos1, str.mString, pos2, n );
return *this;
}
String& String::insert ( size_t pos1, const char* s, size_t n )
{
String tmp( s );
mString.insert( pos1, tmp.data(), n );
return *this;
}
String& String::insert ( size_t pos1, size_t n, char c )
{
mString.insert( pos1, n, c );
return *this;
}
String& String::insert ( size_t pos1, const char* s )
{
String tmp( s );
mString.insert( pos1, tmp.data() );
return *this;
}
String::Iterator String::insert ( Iterator p, char c )
{
return mString.insert( p, c );
}
void String::insert ( Iterator p, size_t n, char c )
{
mString.insert( p, n, c );
}
const String::StringBaseType* String::c_str() const
{
return mString.c_str();
}
const String::StringBaseType* String::data() const
{
return mString.data();
}
String::Iterator String::begin()
{
return mString.begin();
}
String::ConstIterator String::begin() const
{
return mString.begin();
}
String::Iterator String::end()
{
return mString.end();
}
String::ConstIterator String::end() const
{
return mString.end();
}
String::ReverseIterator String::rbegin()
{
return mString.rbegin();
}
String::ConstReverseIterator String::rbegin() const
{
return mString.rbegin();
}
String::ReverseIterator String::rend()
{
return mString.rend();
}
String::ConstReverseIterator String::rend() const
{
return mString.rend();
}
void String::resize( std::size_t n, StringBaseType c )
{
mString.resize( n, c );
}
void String::resize( std::size_t n )
{
mString.resize( n );
}
std::size_t String::max_size() const
{
return mString.max_size();
}
void String::reserve( size_t res_arg )
{
mString.reserve( res_arg );
}
std::size_t String::capacity() const
{
return mString.capacity();
}
String& String::assign ( const String& str )
{
mString.assign( str.mString );
return *this;
}
String& String::assign ( const String& str, size_t pos, size_t n )
{
mString.assign( str.mString, pos, n );
return *this;
}
String& String::assign ( const char* s, size_t n )
{
String tmp( s );
mString.assign( tmp.mString );
return *this;
}
String& String::assign ( const char* s )
{
String tmp( s );
mString.assign( tmp.mString );
return *this;
}
String& String::assign ( size_t n, char c )
{
mString.assign( n, c );
return *this;
}
String& String::append ( const String& str )
{
mString.append( str.mString );
return *this;
}
String& String::append ( const String& str, size_t pos, size_t n )
{
mString.append( str.mString, pos, n );
return *this;
}
String& String::append ( const char* s, size_t n )
{
String tmp( s );
mString.append( tmp.mString );
return *this;
}
String& String::append ( const char* s )
{
String tmp( s );
mString.append( tmp.mString );
return *this;
}
String& String::append ( size_t n, char c )
{
mString.append( n, c );
return *this;
}
String& String::append ( std::size_t n, StringBaseType c )
{
mString.append( n, c );
return *this;
}
String& String::replace ( size_t pos1, size_t n1, const String& str )
{
mString.replace( pos1, n1, str.mString );
return *this;
}
String& String::replace ( Iterator i1, Iterator i2, const String& str )
{
mString.replace( i1, i2, str.mString );
return *this;
}
String& String::replace ( size_t pos1, size_t n1, const String& str, size_t pos2, size_t n2 )
{
mString.replace( pos1, n1, str.mString, pos2, n2 );
return *this;
}
String& String::replace ( size_t pos1, size_t n1, const char* s, size_t n2 )
{
String tmp( s );
mString.replace( pos1, n1, tmp.data(), n2 );
return *this;
}
String& String::replace ( Iterator i1, Iterator i2, const char* s, size_t n2 )
{
String tmp( s );
mString.replace( i1, i2, tmp.data(), n2 );
return *this;
}
String& String::replace ( size_t pos1, size_t n1, const char* s )
{
String tmp( s );
mString.replace( pos1, n1, tmp.mString );
return *this;
}
String& String::replace ( Iterator i1, Iterator i2, const char* s )
{
String tmp( s );
mString.replace( i1, i2, tmp.mString );
return *this;
}
String& String::replace ( size_t pos1, size_t n1, size_t n2, char c )
{
mString.replace( pos1, n1, n2, (StringBaseType)c );
return *this;
}
String& String::replace ( Iterator i1, Iterator i2, size_t n2, char c )
{
mString.replace( i1, i2, n2, (StringBaseType)c );
return *this;
}
std::size_t String::find( const String& str, std::size_t start ) const
{
return mString.find( str.mString, start );
}
std::size_t String::find ( const char* s, std::size_t pos, std::size_t n ) const
{
return find( String( s ), pos );
}
std::size_t String::find ( const char* s, std::size_t pos ) const
{
return find( String( s ), pos );
}
size_t String::find ( char c, std::size_t pos ) const
{
return mString.find( (StringBaseType)c, pos );
}
std::size_t String::rfind ( const String& str, std::size_t pos ) const
{
return mString.rfind( str.mString, pos );
}
std::size_t String::rfind ( const char* s, std::size_t pos, std::size_t n ) const
{
return rfind( String( s ), pos );
}
std::size_t String::rfind ( const char* s, std::size_t pos ) const
{
return rfind( String( s ), pos );
}
std::size_t String::rfind ( char c, std::size_t pos ) const
{
return mString.rfind( c, pos );
}
std::size_t String::copy ( StringBaseType* s, std::size_t n, std::size_t pos ) const
{
return mString.copy( s, n, pos );
}
String String::substr ( std::size_t pos, std::size_t n ) const
{
return String( mString.substr( pos, n ) );
}
int String::compare ( const String& str ) const
{
return mString.compare( str.mString );
}
int String::compare ( const char* s ) const
{
return compare( String( s ) );
}
int String::compare ( std::size_t pos1, std::size_t n1, const String& str ) const
{
return mString.compare( pos1, n1, str.mString );
}
int String::compare ( std::size_t pos1, std::size_t n1, const char* s) const
{
return compare( pos1, n1, String( s ) );
}
int String::compare ( std::size_t pos1, std::size_t n1, const String& str, std::size_t pos2, std::size_t n2 ) const
{
return mString.compare( pos1, n1, str.mString, pos2, n2 );
}
int String::compare ( std::size_t pos1, std::size_t n1, const char* s, std::size_t n2) const
{
return compare( pos1, n1, String( s ), 0, n2 );
}
std::size_t String::find_first_of ( const String& str, std::size_t pos ) const
{
return mString.find_first_of( str.mString, pos );
}
std::size_t String::find_first_of ( const char* s, std::size_t pos, std::size_t n ) const
{
return find_first_of( String( s ), pos );
}
std::size_t String::find_first_of ( const char* s, std::size_t pos ) const
{
return find_first_of( String( s ), pos );
}
std::size_t String::find_first_of ( StringBaseType c, std::size_t pos ) const
{
return mString.find_first_of( c, pos );
}
std::size_t String::find_last_of ( const String& str, std::size_t pos ) const
{
return mString.find_last_of( str.mString, pos );
}
std::size_t String::find_last_of ( const char* s, std::size_t pos, std::size_t n ) const
{
return find_last_of( String( s ), pos );
}
std::size_t String::find_last_of ( const char* s, std::size_t pos ) const
{
return find_last_of( String( s ), pos );
}
std::size_t String::find_last_of ( StringBaseType c, std::size_t pos) const
{
return mString.find_last_of( c, pos );
}
std::size_t String::find_first_not_of ( const String& str, std::size_t pos ) const
{
return mString.find_first_not_of( str.mString, pos );
}
std::size_t String::find_first_not_of ( const char* s, std::size_t pos, std::size_t n ) const
{
return find_first_not_of( String( s ), pos );
}
std::size_t String::find_first_not_of ( const char* s, std::size_t pos ) const
{
return find_first_not_of( String( s ), pos );
}
std::size_t String::find_first_not_of ( StringBaseType c, std::size_t pos ) const
{
return mString.find_first_not_of( c, pos );
}
std::size_t String::find_last_not_of ( const String& str, std::size_t pos ) const
{
return mString.find_last_not_of( str.mString, pos );
}
std::size_t String::find_last_not_of ( const char* s, std::size_t pos, std::size_t n ) const
{
return find_last_not_of( String( s ), pos );
}
std::size_t String::find_last_not_of ( const char* s, std::size_t pos ) const
{
return find_last_not_of( String( s ), pos );
}
std::size_t String::find_last_not_of ( StringBaseType c, std::size_t pos ) const
{
return mString.find_last_not_of( c, pos );
}
bool operator ==(const String& left, const String& right)
{
return left.mString == right.mString;
}
bool operator !=(const String& left, const String& right)
{
return !(left == right);
}
bool operator <(const String& left, const String& right)
{
return left.mString < right.mString;
}
bool operator >(const String& left, const String& right)
{
return right < left;
}
bool operator <=(const String& left, const String& right)
{
return !(right < left);
}
bool operator >=(const String& left, const String& right)
{
return !(left < right);
}
String operator +(const String& left, const String& right)
{
String string = left;
string += right;
return string;
}
}
|
#include <stdlib.h>
#include "Solution.h"
#include <algorithm>
#include <fstream>
#include <iostream>
#include <sstream>
#include <math.h>
#include <cmath>
#include "cuda.h"
#include "cuda_runtime.h"
using namespace std;
Solution::Solution(){
}
Solution::Solution(int _total_nodes)
{
//ctor
total_nodes = _total_nodes;
//rho = (double*) malloc (sizeof(double)*(total_nodes));
rho = new double [total_nodes +1];
if (rho==NULL) exit (1);
u = new double [total_nodes+1];
if (u==NULL) exit (1);
v = new double [total_nodes +1];
if (v==NULL) exit (1);
w = new double [total_nodes +1];
if (w==NULL) exit (1);
/* error = new double [total_nodes +1];
if (error==NULL) exit (1);
u_exact = new double [total_nodes +1];
if (u_exact==NULL) exit (1);*/
Initialise();
}
Solution::~Solution()
{
//dtor
delete [] rho;
rho = NULL;
delete [] u;
u = NULL;
delete [] v;
v= NULL;
delete [] w;
w= NULL;
/* delete [] error;
error= NULL;
delete [] u_exact;
u_exact= NULL;*/
}
void Solution::assign_memory(int _total_nodes)
{
//ctor
total_nodes = _total_nodes;
//rho = (double*) malloc (sizeof(double)*(total_nodes));
rho = new double [total_nodes +1];
if (rho==NULL) exit (1);
u = new double [total_nodes+1];
if (u==NULL) exit (1);
v = new double [total_nodes +1];
if (v==NULL) exit (1);
w = new double [total_nodes +1];
if (w==NULL) exit (1);
/* error = new double [total_nodes +1];
if (error==NULL) exit (1);
u_exact = new double [total_nodes +1];
if (u_exact==NULL) exit (1);*/
Initialise();
}
void Solution::Initialise() {
std::fill_n(rho, total_nodes , 0.00);
std::fill_n(u, total_nodes, 0.0);
std::fill_n(v, total_nodes , 0.0);
std::fill_n(w, total_nodes , 0.0);
// std::fill_n(error, total_nodes , 0.0);
// std::fill_n(u_exact, total_nodes , 0.0);
average_rho = 0.0; //default value
}
void Solution::assign_pressure_gradient( vector_var _gradient, vector_var gradient_origin,
vector_var origin_magnitude, Mesh &Mesh, global_variables &globals){
vector_var displacement;
vector_var rho_temp;
if (globals.testcase ==3){
double rho_0, rho_coeff, L,PI;
rho_0 = origin_magnitude.Magnitude();
rho_coeff = rho_0 * pow(globals.max_velocity,2)/4.0*3.0;
L = (Mesh.get_num_x()-4)*Mesh.get_dx();
PI = globals.PI;
for( int t =0 ; t< Mesh.get_total_cells(); t++){
rho[t] = rho_0 - rho_coeff* (cos( 4*PI*Mesh.get_centroid_x(t)/L )
+cos(4*PI*Mesh.get_centroid_y(t)/L));
}
}else{
for( int t =0 ; t< Mesh.get_total_cells(); t++){
displacement.x = Mesh.get_centroid_x(t)-gradient_origin.x;
displacement.y = Mesh.get_centroid_y(t)- gradient_origin.y;
displacement.z = Mesh.get_centroid_z(t) - gradient_origin.z;
rho_temp = rho_temp.line_magnitude(origin_magnitude,_gradient,displacement);
rho[t] = rho_temp.Magnitude();
}
displacement.add(rho_temp) ;
}
}
void Solution::uns_assign_pressure_gradient( vector_var _gradient, vector_var gradient_origin,
vector_var origin_magnitude, unstructured_mesh &Mesh, global_variables &globals){
vector_var displacement;
vector_var rho_temp;
for( int t =0 ; t< Mesh.get_total_cells(); t++){
displacement.x = Mesh.get_centroid_x(t)-gradient_origin.x;
displacement.y = Mesh.get_centroid_y(t)- gradient_origin.y;
displacement.z = Mesh.get_centroid_z(t) - gradient_origin.z;
rho_temp = rho_temp.line_magnitude(origin_magnitude,_gradient,displacement);
rho[t] = rho_temp.Magnitude();
}
displacement.add(rho_temp) ;
}
void Solution::assign_velocity_gradient( vector_var _gradient, vector_var gradient_origin,
vector_var origin_magnitude, Mesh &Mesh, global_variables &globals){
vector_var displacement;
vector_var vel_temp;
if (globals.testcase ==3){
double U_0, L,PI;
U_0 = globals.max_velocity;
L = (Mesh.get_num_x()-4)*Mesh.get_dx();
PI = globals.PI;
for( int t =0 ; t< Mesh.get_total_cells(); t++){
u[t] = -U_0* ( cos( 2*PI*Mesh.get_centroid_x(t)/L )
* sin(2*PI*Mesh.get_centroid_y(t)/L));
v[t] = U_0* ( sin( 2*PI*Mesh.get_centroid_x(t)/L )
* cos(2*PI*Mesh.get_centroid_y(t)/L));
}
}else{
for( int t =0 ; t< Mesh.get_total_cells(); t++){
displacement.x = Mesh.get_centroid_x(t)-gradient_origin.x;
displacement.y = Mesh.get_centroid_y(t)- gradient_origin.y;
displacement.z = Mesh.get_centroid_z(t) - gradient_origin.z;
vel_temp = vel_temp.line_magnitude(origin_magnitude,_gradient,displacement);
u[t] = vel_temp.x + vel_temp.y +vel_temp.z;
}
displacement.add(vel_temp) ;
}
}
void Solution::uns_assign_velocity_gradient( vector_var _gradient, vector_var gradient_origin,
vector_var origin_magnitude, unstructured_mesh &Mesh, global_variables &globals){
vector_var displacement;
vector_var vel_temp;
for( int t =0 ; t< Mesh.get_total_cells(); t++){
displacement.x = Mesh.get_centroid_x(t)-gradient_origin.x;
displacement.y = Mesh.get_centroid_y(t)- gradient_origin.y;
displacement.z = Mesh.get_centroid_z(t) - gradient_origin.z;
vel_temp = vel_temp.line_magnitude(origin_magnitude,_gradient,displacement);
u[t] = vel_temp.x + vel_temp.y +vel_temp.z;
}
displacement.add(vel_temp) ;
}
void Solution::update ( double _rho, double _u, double _v, double _w , int i){
rho[i] =_rho;
u[i] = _u;
v[i] = _v;
w[i] = _w;
}
void Solution::output (std::string output_location, global_variables &globals,
domain_geometry &geometry){
std::ofstream rho_txt,u_txt,v_txt,w_txt ;
std::string rho_file, u_file, v_file,w_file;
rho_file = output_location + "/rho.txt";
u_file = output_location + "/u.txt";
v_file = output_location + "/v.txt";
w_file = output_location + "/w.txt";
rho_txt.open(rho_file.c_str(), ios::out);
u_txt.open(u_file.c_str(), ios::out);
v_txt.open(v_file.c_str(), ios::out);
w_txt.open(w_file.c_str(), ios::out);
for( int i = 0; i < total_nodes; i++){
rho_txt << i << " ," << rho[i] << endl;
u_txt << i << " ," << u[i] << endl;
v_txt << i << " ," << v[i] << endl;
w_txt << i << " ," << w[i] << endl;
}
rho_txt.close();
u_txt.close();
v_txt.close();
w_txt.close();
}
void Solution::output_centrelines (std::string output_location, global_variables &globals,
Mesh &mesh, double time){
std::ofstream rho_txt,u_txt,v_txt;
std::string rho_file ;
std::ostringstream u_file, v_file;
u_file << output_location << "/uy/" << time << ".dat";
v_file << output_location << "/vx/" << time << ".dat";
u_txt.open(u_file.str(), ios::out);
v_txt.open(v_file.str(), ios::out);
int mid_x, mid_y;
mid_x = ceil(mesh.get_num_x()/2);
mid_y = ceil(mesh.get_num_y()/2);
int counter ;
counter = 0;
for ( int j =0 ; j < mesh.get_num_y(); j++){
for( int i = 0; i < mesh.get_num_x(); i++){
if (j == mid_y && (j >0) && ( j< (mesh.get_num_y()-1))) {
v_txt << mesh.get_centroid_x(counter)/mesh.get_X() << " ," << v[counter]/globals.max_velocity << endl;
}
if ( i == mid_x && (i >0) && ( i< (mesh.get_num_x()-1))){
u_txt << u[counter]/globals.max_velocity << " ," << mesh.get_centroid_y(counter)/mesh.get_Y() << endl;
}
counter = counter + 1;
}
}
rho_txt.close();
u_txt.close();
v_txt.close();
}
void Solution::clone( Solution &soln_a){
for (int i =0; i< total_nodes; i++){
rho[i] = soln_a.get_rho(i);
u[i] = soln_a.get_u(i);
v[i] = soln_a.get_v(i);
w[i] = soln_a.get_w(i);
}
average_rho = soln_a.get_average_rho();
}
void Solution::clone(double4* soln_a) {
for (int i = 0; i < total_nodes; i++) {
double4 tmp = soln_a[i];
rho[i] = tmp.w;
u[i] = tmp.x;
v[i] = tmp.y;
w[i] = tmp.z;
}
}
//
//void Solution::post_process(double gamma, Mesh &mesh, global_variables &globals,
// initial_conditions &initials){
//
//
// if( globals.testcase == 1){
//
//
// for (int i =0; i< total_nodes; i++){
// rho[i] = rho[i] /gamma;
// u_exact[i] = mesh.get_centroid_y(i) *globals.max_velocity / mesh.get_Y() ;
// error[i] = (u[i] - u_exact[i]) *100;
// }
// }else if( globals.testcase == 2){
// for (int i =0; i< total_nodes; i++){
// rho[i] = rho[i] /gamma;
// u_exact[i] = -initials.rho_gradient.x /2* mesh.get_centroid_y(i)*
// (mesh.get_Y()- mesh.get_centroid_y(i)) / ( (globals.tau - 0.5) /3) /3 ;
// //second divide by 3 for rho to P conversion
// error[i] = (u[i] - u_exact[i]) *100;
// }
//
// }
//
//}
// update gradients for each cell
void Solution::update_gradients(Boundary_Conditions &bcs,Mesh &mesh,domain_geometry &domain,
int direction, Solution &src ){
int i1,i2 ;
double dx,dy;
for(int i =0; i< mesh.get_total_cells();i++){
// x direction
if (direction == 1) {
i1 = mesh.get_e_node(i);
i2 = mesh.get_w_node(i);
if (mesh.get_w_node(i) < 0) {
dx = mesh.get_centroid_x(i1) - mesh.get_centroid_x(i);
u[i] = (src.get_u(i1) - src.get_u(i)) /dx;
v[i] = (src.get_v(i1) - src.get_v(i)) /dx;
rho[i] = (src.get_rho(i1) - src.get_rho(i)) /dx;
}else if (mesh.get_e_node(i) < 0) {
dx = mesh.get_centroid_x(i) - mesh.get_centroid_x(i2);
u[i] = (src.get_u(i) - src.get_u(i2)) /dx;
v[i] = (src.get_v(i) - src.get_v(i2)) /dx;
rho[i] = (src.get_rho(i) - src.get_rho(i2)) /dx;
}else{
dx = mesh.get_centroid_x(i1) - mesh.get_centroid_x(i2);
u[i] = (src.get_u(i1) - src.get_u(i2)) /dx;
v[i] = (src.get_v(i1) - src.get_v(i2)) /dx;
rho[i] = (src.get_rho(i1) - src.get_rho(i2)) /dx;
}
}else{
i1 = mesh.get_n_node(i);
i2 = mesh.get_s_node(i);
if (mesh.get_s_node(i) < 0) {
dy = mesh.get_centroid_y(i1) - mesh.get_centroid_y(i);
u[i] = (src.get_u(i1) - src.get_u(i)) /dy;
v[i] = (src.get_v(i1) - src.get_v(i)) /dy;
rho[i] = (src.get_rho(i1) - src.get_rho(i)) /dy;
}else if (mesh.get_n_node(i) < 0) {
dy = mesh.get_centroid_y(i) - mesh.get_centroid_y(i2);
u[i] = (src.get_u(i) - src.get_u(i2)) /dy;
v[i] = (src.get_v(i) - src.get_v(i2)) /dy;
rho[i] = (src.get_rho(i) - src.get_rho(i2)) /dy;
}else{
dy = mesh.get_centroid_y(i1) - mesh.get_centroid_y(i2);
u[i] = (src.get_u(i1) - src.get_u(i2)) /dy;
v[i] = (src.get_v(i1) - src.get_v(i2)) /dy;
rho[i] = (src.get_rho(i1) - src.get_rho(i2)) /dy;
}
}
}
}
// update gradients for each cell
void Solution::update_gradients_least_squares(Boundary_Conditions &bcs,Mesh &mesh,domain_geometry &domain,
int direction, Solution &src ){
int i1,i2 ;
double dx,dy , LHS_xx, LHS_yy, RHS_x_u,RHS_x_v,RHS_x_rho,
RHS_y_u,RHS_y_v,RHS_y_rho , d_u, d_v,d_rho;
double w; // weighting
for(int i =0; i< mesh.get_total_cells();i++){
LHS_xx = 0;
LHS_yy = 0;
RHS_x_u =0;
RHS_x_v =0;
RHS_x_rho =0;
RHS_y_u =0;
RHS_y_v =0 ;
RHS_y_rho =0;
// x direction
if (direction == 1) {
i1 = mesh.get_e_node(i);
i2 = mesh.get_w_node(i);
if (mesh.get_w_node(i) < 0) {
dx = mesh.get_centroid_x(i1) - mesh.get_centroid_x(i);
u[i] = (src.get_u(i1) - src.get_u(i)) /dx;
v[i] = (src.get_v(i1) - src.get_v(i)) /dx;
rho[i] = (src.get_rho(i1) - src.get_rho(i)) /dx;
}else if (mesh.get_e_node(i) < 0) {
dx = mesh.get_centroid_x(i) - mesh.get_centroid_x(i2);
u[i] = (src.get_u(i) - src.get_u(i2)) /dx;
v[i] = (src.get_v(i) - src.get_v(i2)) /dx;
rho[i] = (src.get_rho(i) - src.get_rho(i2)) /dx;
}else{
//least squares formulation -quick write -> unrolled or the moment
// get delta_distance
dx = mesh.get_centroid_x(i1) - mesh.get_centroid_x(i);
w = 1/pow(dx,2.0);
//delta macros
d_u = (src.get_u(i1) - src.get_u(i)) ;
d_v = (src.get_v(i1) - src.get_v(i));
d_rho = (src.get_rho(i1) - src.get_rho(i)) ;
//populate LHS and RHS
LHS_xx = LHS_xx + w *dx*dx;
RHS_x_u = RHS_x_u + w *dx*d_u;
RHS_x_v = RHS_x_v + w *dx*d_v;
RHS_x_rho = RHS_x_rho + w *dx*d_rho;
/// second cell
dx = mesh.get_centroid_x(i2) - mesh.get_centroid_x(i);
w = 1/pow(dx,2.0);
//delta macros
d_u = (src.get_u(i2) - src.get_u(i)) ;
d_v = (src.get_v(i2) - src.get_v(i));
d_rho = (src.get_rho(i2) - src.get_rho(i)) ;
//populate LHS and RHS
LHS_xx = LHS_xx + w *dx*dx;
RHS_x_u = RHS_x_u + w *dx*d_u;
RHS_x_v = RHS_x_v + w *dx*d_v;
RHS_x_rho = RHS_x_rho + w *dx*d_rho;
///calc gradients
u[i] = RHS_x_u/LHS_xx;
v[i] = RHS_x_v/LHS_xx;
rho[i] = RHS_x_rho/LHS_xx;
}
}else{
i1 = mesh.get_n_node(i);
i2 = mesh.get_s_node(i);
if (mesh.get_s_node(i) < 0) {
dy = mesh.get_centroid_y(i1) - mesh.get_centroid_y(i);
u[i] = (src.get_u(i1) - src.get_u(i)) /dy;
v[i] = (src.get_v(i1) - src.get_v(i)) /dy;
rho[i] = (src.get_rho(i1) - src.get_rho(i)) /dy;
}else if (mesh.get_n_node(i) < 0) {
dy = mesh.get_centroid_y(i) - mesh.get_centroid_y(i2);
u[i] = (src.get_u(i) - src.get_u(i2)) /dy;
v[i] = (src.get_v(i) - src.get_v(i2)) /dy;
rho[i] = (src.get_rho(i) - src.get_rho(i2)) /dy;
}else{
//least squares formulation -quick write -> unrolled or the moment
// get delta_distance
dy = mesh.get_centroid_y(i1) - mesh.get_centroid_y(i);
w = 1/pow(dy,2.0);
//delta macros
d_u = (src.get_u(i1) - src.get_u(i)) ;
d_v = (src.get_v(i1) - src.get_v(i));
d_rho = (src.get_rho(i1) - src.get_rho(i)) ;
//populate LHS and RHS
LHS_yy = LHS_yy + w *dy*dy;
RHS_y_u = RHS_y_u + w *dy*d_u;
RHS_y_v = RHS_y_v + w *dy*d_v;
RHS_y_rho = RHS_y_rho + w *dy*d_rho;
/// second cell
dy = mesh.get_centroid_y(i2) - mesh.get_centroid_y(i);
w = 1/pow(dy,2.0);
//delta macros
d_u = (src.get_u(i2) - src.get_u(i)) ;
d_v = (src.get_v(i2) - src.get_v(i));
d_rho = (src.get_rho(i2) - src.get_rho(i)) ;
//populate LHS and RHS
LHS_yy= LHS_yy + w *dy*dy;
RHS_y_u = RHS_y_u + w *dy*d_u;
RHS_y_v = RHS_y_v + w *dy*d_v;
RHS_y_rho = RHS_y_rho + w *dy*d_rho;
///calc gradients
u[i] = RHS_y_u/LHS_yy;
v[i] = RHS_y_v/LHS_yy;
rho[i] = RHS_y_rho/LHS_yy;
}
}
}
}
//
// update gradients for each cell
void Solution::update_gradients_green_gauss(Boundary_Conditions &bcs,Mesh &mesh,domain_geometry &domain,
int direction, Solution &src ){
int i1,i2 ;
double dx,dy ;
double df,alpha;
double rho_n, rho_e, rho_w, rho_s;
double u_n, u_e, u_w, u_s;
double v_n, v_e, v_w, v_s;
for(int i =0; i< mesh.get_total_cells();i++){
// x direction
if (direction == 1) {
i1 = mesh.get_e_node(i);
i2 = mesh.get_w_node(i);
// West
if (mesh.get_w_node(i) < 0) {
dx = mesh.get_centroid_x(i1) - mesh.get_centroid_x(i);
u[i] = (src.get_u(i1) - src.get_u(i)) /dx;
v[i] = (src.get_v(i1) - src.get_v(i)) /dx;
rho[i] = (src.get_rho(i1) - src.get_rho(i)) /dx;
}else if (mesh.get_e_node(i) < 0) {
dx = mesh.get_centroid_x(i) - mesh.get_centroid_x(i2);
u[i] = (src.get_u(i) - src.get_u(i2)) /dx;
v[i] = (src.get_v(i) - src.get_v(i2)) /dx;
rho[i] = (src.get_rho(i) - src.get_rho(i2)) /dx;
}else{
// East
dx = mesh.get_centroid_x(i1) - mesh.get_centroid_x(i);
df =mesh.get_centroid_x(i1) - mesh.get_east_x(i);
alpha = abs(df/dx);
rho_e = alpha* src.get_rho(i) + (1-alpha)* src.get_rho(i1);
u_e = alpha* src.get_u(i) + (1-alpha)* src.get_u(i1);
v_e = alpha* src.get_v(i) + (1-alpha)* src.get_v(i1);
//west
dx = mesh.get_centroid_x(i2) - mesh.get_centroid_x(i);
df =mesh.get_centroid_x(i2) - mesh.get_west_x(i);
alpha = abs(df/dx);
rho_w = alpha* src.get_rho(i) + (1-alpha)* src.get_rho(i2);
u_w = alpha* src.get_u(i) + (1-alpha)* src.get_u(i2);
v_w = alpha* src.get_v(i) + (1-alpha)* src.get_v(i2);
rho[i] = 1/ mesh.get_cell_volume(i)
* ( rho_e * mesh.get_e_area(i) - rho_w * mesh.get_w_area(i));
u[i] = 1/ mesh.get_cell_volume(i)
* ( u_e * mesh.get_e_area(i) - u_w * mesh.get_w_area(i));
v[i] = 1/ mesh.get_cell_volume(i)
* ( v_e * mesh.get_e_area(i) - v_w * mesh.get_w_area(i));
}
}else{
i1 = mesh.get_n_node(i);
i2 = mesh.get_s_node(i);
if (mesh.get_s_node(i) < 0) {
dy = mesh.get_centroid_y(i1) - mesh.get_centroid_y(i);
u[i] = (src.get_u(i1) - src.get_u(i)) /dy;
v[i] = (src.get_v(i1) - src.get_v(i)) /dy;
rho[i] = (src.get_rho(i1) - src.get_rho(i)) /dy;
}else if (mesh.get_n_node(i) < 0) {
dy = mesh.get_centroid_y(i) - mesh.get_centroid_y(i2);
u[i] = (src.get_u(i) - src.get_u(i2)) /dy;
v[i] = (src.get_v(i) - src.get_v(i2)) /dy;
rho[i] = (src.get_rho(i) - src.get_rho(i2)) /dy;
}else{
//least squares formulation -quick write -> unrolled or the moment
// north
dy = mesh.get_centroid_y(i1) - mesh.get_centroid_y(i);
df =mesh.get_centroid_y(i1) - mesh.get_north_y(i);
alpha = abs(df/dy);
rho_n = alpha* src.get_rho(i) + (1-alpha)* src.get_rho(i1);
u_n = alpha* src.get_u(i) + (1-alpha)* src.get_u(i1);
v_n = alpha* src.get_v(i) + (1-alpha)* src.get_v(i1);
//west
dx = mesh.get_centroid_y(i2) - mesh.get_centroid_y(i);
df =mesh.get_centroid_y(i2) - mesh.get_south_y(i);
alpha = abs(df/dy);
rho_s = alpha* src.get_rho(i) + (1-alpha)* src.get_rho(i2);
u_s = alpha* src.get_u(i) + (1-alpha)* src.get_u(i2);
v_s = alpha* src.get_v(i) + (1-alpha)* src.get_v(i2);
rho[i] = 1/ mesh.get_cell_volume(i)
* ( rho_n * mesh.get_n_area(i) - rho_s * mesh.get_s_area(i));
u[i] = 1/ mesh.get_cell_volume(i)
* ( u_n * mesh.get_n_area(i) - u_s * mesh.get_s_area(i));
v[i] = 1/ mesh.get_cell_volume(i)
* ( v_n * mesh.get_n_area(i) - v_s * mesh.get_s_area(i));
}
}
}
}
// update bc nodes to allow for changes in solution
void Solution::update_bcs(Boundary_Conditions &bcs,Mesh &mesh,domain_geometry &domain){
double dx =0;
for(int i =0; i< mesh.get_total_cells();i++){
// if bc present
if (bcs.get_bc(i)){
///NEEDS to be modified for non-uniform solver
// 1 = dirichlet, 2 = neumann, 3 = periodic
if(bcs.get_rho_type(i) == 1){
rho[i] = bcs.get_rho(i) - (rho[bcs.get_neighbour(i)] -bcs.get_rho(i));
}else if(bcs.get_rho_type(i) == 2){
rho[i] = rho[bcs.get_neighbour(i)] + dx*bcs.get_rho(i);
}else if(bcs.get_rho_type(i) == 3){
rho[i] = rho[bcs.get_periodic_node(i)];
}
if(bcs.get_vel_type(i) == 1){
u[i] = bcs.get_u(i) - (u[bcs.get_neighbour(i)] - bcs.get_u(i));
v[i] = bcs.get_v(i) - (v[bcs.get_neighbour(i)] -bcs.get_v(i));
}else if(bcs.get_vel_type(i) == 2){
u[i] = u[bcs.get_neighbour(i)] + dx*bcs.get_u(i);
v[i] = v[bcs.get_neighbour(i)] + dx*bcs.get_v(i);
}else if(bcs.get_vel_type(i) == 3){
u[i] = u[bcs.get_periodic_node(i)];
v[i] = v[bcs.get_periodic_node(i)];
}else if(bcs.get_vel_type(i) == 4){
u[i] = 4*bcs.get_u(i)/pow(domain.Y,2) * mesh.get_centroid_y(i)*
(domain.Y - mesh.get_centroid_y(i)) ;
v[i] = 4*bcs.get_v(i)/pow(domain.Y,2) * mesh.get_centroid_y(i)*
(domain.Y - mesh.get_centroid_y(i)) ;
}else if(bcs.get_vel_type(i) == 5){
u[i] = 4*bcs.get_u(i)/pow(domain.X,2) * mesh.get_centroid_x(i)*
(domain.X - mesh.get_centroid_y(i)) ;
v[i] = 4*bcs.get_v(i)/pow(domain.X,2) * mesh.get_centroid_x(i)*
(domain.X - mesh.get_centroid_x(i)) ;
}
}
}
}
// update bc nodes to allow for changes in solution
void Solution::update_unstructured_bcs(Boundary_Conditions &bcs,unstructured_mesh &mesh,domain_geometry &domain,
int time){
double dx =0;
int j,face,nb;
double taper = min((time+1)/3000.0, 1.0);
taper = 1.0;
for(int i =0; i< mesh.get_num_bc();i++){
// if bc present
if (bcs.get_bc(i)){
j = i + mesh.get_n_cells();
face = i + mesh.get_n_neighbours();
nb = mesh.get_mesh_owner(face);
///NEEDS to be modified for non-uniform solver
// 1 = dirichlet, 2 = neumann, 3 = periodic
if(bcs.get_rho_type(i) == 1){
rho[j] = bcs.get_rho(i) - (rho[nb] -bcs.get_rho(i));
}else if(bcs.get_rho_type(i) == 2){
rho[j] = rho[nb] + dx*bcs.get_rho(i);
}else if(bcs.get_rho_type(i) == 3){
rho[j] = rho[bcs.get_periodic_node(i)];
}else if(bcs.get_rho_type(i) == 6){
rho[j] = bcs.get_rho(i);
}else if(bcs.get_rho_type(i) == 7){ //wall condition -doesn't get used
rho[j] = bcs.get_rho(i);
}else if(bcs.get_rho_type(i) == 8){
rho[j] = rho[nb];
}
if(bcs.get_vel_type(i) == 1){
u[j] = bcs.get_u(i) - (u[nb] - bcs.get_u(i));
v[j] = bcs.get_v(i) - (v[nb] -bcs.get_v(i));
w[j] = bcs.get_w(i) - (w[nb] -bcs.get_w(i));
}else if(bcs.get_vel_type(i) == 2){
u[j] = u[nb] + dx*bcs.get_u(i);
v[j] = v[nb] + dx*bcs.get_v(i);
w[j] = w[nb] + dx*bcs.get_w(i);
}else if(bcs.get_vel_type(i) == 3){
u[j] = u[bcs.get_periodic_node(i)];
v[j] = v[bcs.get_periodic_node(i)];
w[j] = w[bcs.get_periodic_node(i)];
}else if(bcs.get_vel_type(i) == 4){
u[j] = 4*bcs.get_u(i)/pow(domain.Y,2) * mesh.get_centroid_y(i)*
(domain.Y - mesh.get_centroid_y(i)) ;
v[j] = 4*bcs.get_v(i)/pow(domain.Y,2) * mesh.get_centroid_y(i)*
(domain.Y - mesh.get_centroid_y(i)) ;
}else if(bcs.get_vel_type(i) == 5){
u[j] = 4*bcs.get_u(i)/pow(domain.X,2) * mesh.get_centroid_x(i)*
(domain.X - mesh.get_centroid_y(i)) ;
v[j] = 4*bcs.get_v(i)/pow(domain.X,2) * mesh.get_centroid_x(i)*
(domain.X - mesh.get_centroid_x(i)) ;
}else if(bcs.get_vel_type(i) == 6){
u[j] = bcs.get_u(i) *taper ;
v[j] = bcs.get_v(i) *taper;
w[j] = bcs.get_w(i) *taper;
}else if(bcs.get_vel_type(i) == 8){
u[j] = u[nb] ;
v[j] = -v[nb] ;
w[j] = w[nb] ;
}
}
}
}
void Solution::remove_double_errors(){
double tolerance;
tolerance = numeric_limits<double>::epsilon();
for (int i= 0; i< total_nodes; i++){
if( fabs(rho[i]) < tolerance){
rho[i] = 0.0;
}
if( fabs(u[i]) < tolerance){
u[i] = 0.0;
}
if( fabs(v[i]) < tolerance){
v[i] = 0.0;
}
}
}
void Solution::restriction(Solution &coarse_soln,Mesh &coarse_mesh,
Mesh &fine_mesh, Boundary_Conditions &bc){
int coarse_x, coarse_y;
int coarse_i;
coarse_soln.set_average_rho( average_rho);
//may need to swap the approach here around for parrelisation
// i.e. loop through coarse mesh
for (int i =0; i< total_nodes; i++){
if(!bc.get_bc(i)){
// get index in terms of x and y
// 0.5 allows for ghost cells
coarse_x = floor( (i/ fine_mesh.get_num_y())/2.0 + 0.5);
coarse_y = floor((fmod(i, fine_mesh.get_num_y()) )/2.0 +0.5);
coarse_i = coarse_mesh.get_num_y()* coarse_x + coarse_y;
// Uniform Mesh -> get area is not needed-> just divide by 4
// add area_fine/area_coarse * var to coarse_i
coarse_soln.add_rho(coarse_i, rho[i]/4.0);
coarse_soln.add_u(coarse_i, u[i]/4.0);
coarse_soln.add_v(coarse_i, v[i]/4.0);
coarse_soln.add_w(coarse_i,w[i]/4.0);
}
}
}
void Solution::prolongation(Solution &coarse_soln, Solution &temp_soln, Solution &soln,
Mesh &coarse_mesh, Mesh &fine_mesh,
Boundary_Conditions &bc ,bool fmg){
double mg_delta_rho, mg_delta_u, mg_delta_v, mg_delta_w;
//loop through the finer mesh as this will enable parrelisation later
int edge_cell_x,edge_cell_y,coarse_i,coarse_x,coarse_y;
double mg_factor[4] = {9.0/16.0 ,3.0/16.0, 3.0/16.0, 1./16.0 };
bool calculate;
Solution debug(fine_mesh.get_total_cells());
debug.Initialise();
for(int i =0; i< total_nodes; i++){
if(! bc.get_bc(i)){
// get index in terms of x and y
coarse_x = floor(i/ fine_mesh.get_num_y()/2.0 +0.5);
coarse_y = floor(fmod(i, fine_mesh.get_num_y())/2.0+ 0.5);
// for finer cells within a coarse cell
for(int j = 0; j <4; j++){
calculate = true;
switch(j) {
case 0: // nearest coarse cell
coarse_i = coarse_mesh.get_num_y()* coarse_x + coarse_y;
break;
case 1:
//North/South edge cell contribution
edge_cell_y = coarse_y + pow(-1.0,floor(fmod(fmod(i, fine_mesh.get_num_y()),2.0)));
coarse_i = coarse_mesh.get_num_y() * coarse_x + edge_cell_y;
break;
case 2:
//East/West edge cell contribution
edge_cell_x = coarse_x + pow(-1.0 , floor(fmod(floor(i/ fine_mesh.get_num_y()),2.0)));
coarse_i = coarse_mesh.get_num_y()* edge_cell_x + coarse_y;
break;
case 3:
// Vertex coarse cell Contribution
coarse_i = coarse_mesh.get_num_y()* edge_cell_x + edge_cell_y;
break;
}
// check if fine cell is on corner or edge
// West Edge
/// coarse_soln = Q2h
/// temp_soln = Q2h_(0)
/// soln = Qh
if (calculate == true){
//_delta_rho = 1*mg_factor[j]* edge_factor;
mg_delta_rho = (coarse_soln.get_rho(coarse_i) - temp_soln.get_rho(coarse_i)) *mg_factor[j];
mg_delta_u = (coarse_soln.get_u(coarse_i) - temp_soln.get_u(coarse_i))*mg_factor[j];
mg_delta_v = (coarse_soln.get_v(coarse_i) - temp_soln.get_v(coarse_i)) *mg_factor[j];
mg_delta_w = (coarse_soln.get_w(coarse_i) - temp_soln.get_w(coarse_i))*mg_factor[j];
soln.add_rho(i, mg_delta_rho ) ;
soln.add_u(i, mg_delta_u);
soln.add_v(i,mg_delta_v);
soln.add_w(i,mg_delta_v);
debug.add_rho(i, mg_delta_rho);
debug.add_u(i,mg_delta_u);
debug.add_v(i,mg_delta_v);
}
}
}
}
calculate = true;
}
void Solution::import(global_variables &globals){
std::ifstream rho_txt,u_txt,v_txt,w_txt ;
std::string rho_file, u_file, v_file,w_file;
rho_file = globals.output_file + "/rho.txt";
u_file = globals.output_file + "/u.txt";
v_file = globals.output_file + "/v.txt";
w_file = globals.output_file + "/w.txt";
rho_txt.open(rho_file.c_str(), ios::out);
std::string line;
int i,t;
i = 0;
double dummy,dummy1,dummy2;
std::string token;
while (std::getline(rho_txt, line))
{
std::istringstream iss(line);
t =0;
while(std::getline(iss,token,',')){
iss >> rho[i];
}
i++;
}
rho_txt.close();
u_txt.open(u_file.c_str(), ios::out);
i = 0;
while (std::getline(u_txt, line))
{
std::istringstream iss(line);
t =0;
while(std::getline(iss,token,',')){
iss >> u[i];
}
i++;
}
u_txt.close();
i = 0;
v_txt.open(v_file.c_str(), ios::out);
while (std::getline(v_txt, line))
{
std::istringstream iss(line);
t =0;
while(std::getline(iss,token,',')){
iss >> v[i];
}
i++;
}
v_txt.close();
i = 0;
w_txt.open(w_file.c_str(), ios::out);
while (std::getline(w_txt, line))
{
std::istringstream iss(line);
t =0;
while(std::getline(iss,token,',')){
iss >> w[i];
}
i++;
}
w_txt.close();
}
|
//==================================================================================================
/**
EVE - Expressive Vector Engine
Copyright : EVE Contributors & Maintainers
SPDX-License-Identifier: MIT
**/
//==================================================================================================
#include <eve/detail/diff_div.hpp>
#include <eve/function/diff/acos.hpp>
#include <eve/function/sqrt.hpp>
#include <type_traits>
TTS_CASE_TPL("Check diff(cos) return type", EVE_TYPE)
{
if constexpr(eve::floating_value<T>)
{
TTS_EXPR_IS(eve::diff(eve::acos)(T()), T);
}
}
TTS_CASE_TPL("Check eve::diff(eve::acos) behavior", EVE_TYPE)
{
if constexpr(eve::floating_value<T>)
{
using elt_t = eve::element_type_t<T>;
auto ulp = (sizeof(elt_t) == 4) ? 1.0e4 : 1.0e8;
auto df = [](auto f, auto x){return eve::detail::centered_diffdiv(f, x); };
TTS_ULP_EQUAL(eve::diff(eve::acos)(T{0.25}), df(eve::acos, T(0.25)) , ulp);
TTS_ULP_EQUAL(eve::diff(eve::acos)(T{0}), df(eve::acos, T(0)) , ulp);
TTS_ULP_EQUAL(eve::diff(eve::acos)(T{-0.25}), df(eve::acos, T(-0.25)), ulp);
}
}
|
#include "planta.h"
void planta::abrirMalla() {
objmodel_ptr = NULL;
if (!objmodel_ptr)
{
objmodel_ptr = glmReadOBJ("./modelos/planta1.obj");
if (!objmodel_ptr)
exit(0);
glmUnitize(objmodel_ptr);
glmFacetNormals(objmodel_ptr);
glmVertexNormals(objmodel_ptr, 90.0);
}
}
void planta::dibujarMalla(float x, float y, float z) {
glPushMatrix();
// Material parameters:
GLfloat material_Ka[] = {0.5f, 0.0f, 0.0f, 1.0f};
GLfloat material_Kd[] = {0.4f, 0.4f, 0.5f, 1.0f};
GLfloat material_Ks[] = {0.8f, 0.8f, 0.0f, 1.0f};
GLfloat material_Ke[] = {0.1f, 0.0f, 0.0f, 0.0f};
GLfloat material_Se = 20.0f;
glTranslatef(x, y, z);
glmDraw(objmodel_ptr, GLM_SMOOTH | GLM_MATERIAL);
glPopMatrix();
}
|
/*=============================================================================
Copyright (c) 2001-2011 Joel de Guzman
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#if !defined(SPIRIT_DIFFERENCE_FEBRUARY_11_2007_1250PM)
#define SPIRIT_DIFFERENCE_FEBRUARY_11_2007_1250PM
#if defined(_MSC_VER)
#pragma once
#endif
#include <boost/spirit/home/qi/domain.hpp>
#include <boost/spirit/home/qi/meta_compiler.hpp>
#include <boost/spirit/home/qi/parser.hpp>
#include <boost/spirit/home/qi/detail/attributes.hpp>
#include <boost/spirit/home/support/info.hpp>
#include <boost/spirit/home/support/has_semantic_action.hpp>
#include <boost/spirit/home/support/handles_container.hpp>
#include <boost/fusion/include/at.hpp>
namespace boost { namespace spirit
{
///////////////////////////////////////////////////////////////////////////
// Enablers
///////////////////////////////////////////////////////////////////////////
template <>
struct use_operator<qi::domain, proto::tag::minus> // enables -
: mpl::true_ {};
}}
namespace boost { namespace spirit { namespace qi
{
template <typename Left, typename Right>
struct difference : binary_parser<difference<Left, Right> >
{
typedef Left left_type;
typedef Right right_type;
template <typename Context, typename Iterator>
struct attribute
{
typedef typename
traits::attribute_of<left_type, Context, Iterator>::type
type;
};
difference(Left const& left_, Right const& right_)
: left(left_), right(right_) {}
template <typename Iterator, typename Context
, typename Skipper, typename Attribute>
bool parse(Iterator& first, Iterator const& last
, Context& context, Skipper const& skipper
, Attribute& attr_) const
{
// Unlike classic Spirit, with this version of difference, the rule
// lit("policeman") - "police" will always fail to match.
// Spirit2 does not count the matching chars while parsing and
// there is no reliable and fast way to check if the LHS matches
// more than the RHS.
// Try RHS first
Iterator start = first;
if (right.parse(first, last, context, skipper, unused))
{
// RHS succeeds, we fail.
first = start;
return false;
}
// RHS fails, now try LHS
return left.parse(first, last, context, skipper, attr_);
}
template <typename Context>
info what(Context& context) const
{
return info("difference",
std::make_pair(left.what(context), right.what(context)));
}
Left left;
Right right;
};
///////////////////////////////////////////////////////////////////////////
// Parser generators: make_xxx function (objects)
///////////////////////////////////////////////////////////////////////////
template <typename Elements, typename Modifiers>
struct make_composite<proto::tag::minus, Elements, Modifiers>
: make_binary_composite<Elements, difference>
{};
}}}
namespace boost { namespace spirit { namespace traits
{
///////////////////////////////////////////////////////////////////////////
template <typename Left, typename Right>
struct has_semantic_action<qi::difference<Left, Right> >
: binary_has_semantic_action<Left, Right> {};
///////////////////////////////////////////////////////////////////////////
template <typename Left, typename Right, typename Attribute
, typename Context, typename Iterator>
struct handles_container<qi::difference<Left, Right>, Attribute, Context
, Iterator>
: binary_handles_container<Left, Right, Attribute, Context, Iterator> {};
}}}
#endif
|
/*
MIT License
Copyright 2002 Ifara Tecnologias S.L.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "serial.h"
// Opens serial port
// Input : PortSettingType
// Output : handle to the device
// Returns 0 on success
// Returns -1 if not capable of opening the serial port handler,
// or if port config cannot be read.
// Returns -2 to -7 from specific failures in setup_serial
int open_port(PortSettingsType ps,HANDLE *handle) {
if ( (*handle = open(ps.port,O_RDWR | O_NOCTTY | O_NONBLOCK )) == -1 )
{
return -1; // cannot open serial port
}
else
{
int ret = setup_serial( *handle, ps.baudrate, ps.databits, ps.stopbits, ps.parity);
if (0 != ret) {
close(*handle);
return ret;
}
else {
return 0; // Serial port opened and configured correctly
}
}
}
// Close the serial port handle
// Returns 0 on success
// Returns -1 on error , and errno is updated.
int close_port(HANDLE handle) {
return close(handle);
}
// Send buffer of bytes
// Input: serial port handler, pointer to first byte of buffer , number of bytes to send
// Returns 0 on success
// Returns -1 on error
int send_buffer(HANDLE fd, unsigned char *tx_array, short bytes_to_send) {
if ( write(fd, tx_array,bytes_to_send)==-1) {
return -1; // Error
}
return 0; // Success
}
// Send one byte
// Input: serial port handler, byte to send
// Returns 0 on success
// Returns -1 on error
int send_byte(HANDLE fd, unsigned char car) {
if ( write(fd, &car, 1)==-1) {
return -1;
}
return 0;
}
// Check if there is a byte or not waiting to be read (RX)
// Returns 'n' > 0 as numbers of bytes to read
// Returns 0 is there is no byte waiting
int rxbyte_waiting(HANDLE fd) {
int n;
if (ioctl(fd,FIONREAD,&n)==0) {
return n;
}
return 0;
}
// Check if there is a byte or not waiting to be sent (TX)
// Returns 'n' > 0 as numbers of bytes to send
// Returns 0 is there is no byte waiting
int txbyte_waiting(HANDLE fd) {
int n=0;
if (ioctl(fd,TIOCINQ,&n)==0) {
return n;
}
return 0;
}
// Read a byte within a period of time
// Returns byte read
// Returns -1 if timeout happened.
// timeout = 0 if no timeout, timeout = 1 if timeout
unsigned char read_byte_time(HANDLE fd,int plazo, int *timeout) {
fd_set leer;
struct timeval tout;
int n;
unsigned char c;
tout.tv_sec=0;
tout.tv_usec=plazo;
FD_ZERO(&leer);
FD_SET(fd,&leer);
n=select(fd+2,&leer,NULL,NULL,&tout);
if (n==0) {
*timeout=1;
return -1;
}
*timeout=0;
read(fd,&c,1);
return c;
}
// Read a byte. Blocking call. waits until byte is received
// Returns byte read
unsigned char read_byte(HANDLE fd) {
unsigned char c;
while (!rxbyte_waiting(fd));
read(fd,&c,1);
return c;
}
// Flush TX buffer
// Returns -1 if error
// Returns 0 if OK
int flush_buffer_tx(HANDLE fd) {
if (tcflush(fd,TCOFLUSH)==-1) {
return -1;
}
return 0;
}
// Flush RX buffer
// Returns -1 if error
// Returns 0 if OK
int flush_buffer_rx(HANDLE fd) {
if (tcflush(fd,TCIFLUSH)==-1) {
return -1;
}
return 0;
}
// Sends break signal
// Returns -1 if error
// Returns 0 if OK
int send_break(HANDLE fd) {
if (tcsendbreak(fd,1)==-1) {
return -1;
}
return 0;
}
// Define serial port settings
// str1 -> serial port name
// str2 -> serial port setting baud,databits, parity, stop bits
// output PS structure
PortSettingsType str2ps (char *str1, char*str2) {
PortSettingsType ps;
char parity;
char stopbits[4];
// Default values (just in case)
ps.baudrate=9600;
ps.databits=8;
ps.parity=NOPARITY;
ps.stopbits=ONESTOPBIT;
sprintf(ps.port,"%s",str1);
if (sscanf(str2,"%d,%d,%c,%s",&ps.baudrate,&ps.databits,&parity,stopbits) == 4) {
switch (parity) {
case 'e':
ps.parity=EVENPARITY;
break;
case 'o':
ps.parity=ODDPARITY;
break;
case 'm':
ps.parity=MARKPARITY;
break;
case 's':
ps.parity=SPACEPARITY;
break;
case 'n':
ps.parity=NOPARITY;
break;
}
if (! strcmp(stopbits,"1")) {
ps.stopbits=ONESTOPBIT;
}
else if (! strcmp(stopbits,"1.5")) {
ps.stopbits=ONE5STOPBITS;
}
else if (! strcmp(stopbits,"2")) {
ps.stopbits=TWOSTOPBITS;
}
}
return ps;
}
// Set the serial port configuration ( baudrate, databits, stopbits, parity)
// Returns 0 if OK
// Returns -1 if cannot get serial port configuration
// Returns -2 if baudrate not supported
// Returns -3 if selected baudrate didn't work
// Returns -4 if databits selection failed
// Returns -5 if parity selection failed
// Returns -6 if stopbits selection failed
// Returns -7 if cannot update new options
int setup_serial(int fdes,int baud,int databits,int stopbits,int parity) {
int n;
struct termios options;
// Get the current options
if (tcgetattr(fdes,&options) != 0) {
// error getting the serial port configuration options
return -1;
}
// Set the baud rate
switch (baud) {
case 2400:
n = cfsetospeed(&options,B2400);
n += cfsetispeed(&options,B2400);
break;
case 4800:
n = cfsetospeed(&options,B4800);
n += cfsetispeed(&options,B4800);
break;
case 9600:
n = cfsetospeed(&options,B9600);
n += cfsetispeed(&options,B9600);
break;
case 19200:
n = cfsetospeed(&options,B19200);
n += cfsetispeed(&options,B19200);
break;
case 38400:
n = cfsetospeed(&options,B38400);
n += cfsetispeed(&options,B38400);
break;
case 57600:
n = cfsetospeed(&options,B57600);
n += cfsetispeed(&options,B57600);
break;
case 115200:
n = cfsetospeed(&options,B115200);
n += cfsetispeed(&options,B115200);
break;
case 230400:
n = cfsetospeed(&options,B230400);
n += cfsetispeed(&options,B230400);
break;
case 921600:
n = cfsetospeed(&options,B921600);
n += cfsetispeed(&options,B921600);
break;
default:
// not supported baudrate
return -2;
}
// If n != 0 then Baud Rate selection didn't work
if (n != 0) {
return -3; // Error settig the baud rate
}
// Set the data size
options.c_cflag &= ~CSIZE;
switch (databits) {
case 7:
options.c_cflag |= CS7;
break;
case 8:
options.c_cflag |= CS8;
break;
default:
// Not supported data size
return -4;
}
// Set up parity
switch (parity) {
case NOPARITY:
options.c_cflag &= ~PARENB; // Clear parity enable
options.c_iflag &= ~INPCK; // Enable parity checking
break;
case ODDPARITY:
options.c_cflag |= (PARODD | PARENB); // Enable odd parity
options.c_iflag |= INPCK; // Disnable parity checking
break;
case EVENPARITY:
options.c_cflag |= PARENB; // Enable parity
options.c_cflag &= ~PARODD; // Turn odd off => even
options.c_iflag |= INPCK; // Disnable parity checking
break;
default:
// Unsupported parity
return -5;
}
// Set up stop bits
switch (stopbits) {
case ONESTOPBIT:
options.c_cflag &= ~CSTOPB;
break;
case TWOSTOPBITS:
options.c_cflag |= CSTOPB;
break;
default:
// "Unsupported stop bits
return -6;
}
// Set input parity option
if (parity != NOPARITY)
options.c_iflag |= INPCK;
// Deal with hardware or software flow control
options.c_cflag &= ~CRTSCTS; // Disable RTS/CTS
//options.c_iflag |= (IXANY); // xon/xoff flow control
options.c_iflag &= ~(IXON|IXOFF|IXANY); // xon/xoff flow control
// Output processing
options.c_oflag &= ~OPOST; // No output processing
options.c_oflag &= ~ONLCR; // Don't convert linefeeds
// Input processing
options.c_iflag |= IGNBRK; // Ignore break conditions
options.c_iflag &= ~IUCLC; // Don't map upper to lower case
options.c_iflag &= ~BRKINT; // Ignore break signals
options.c_iflag &= ~INLCR; // Map NL to CR
options.c_iflag &= ~ICRNL; // Map CR to NL
// Miscellaneous stuff
options.c_cflag |= (CLOCAL | CREAD); // Enable receiver, set local
// Linux seems to have problem with the following ?
// options.c_cflag |= (IXON | IXOFF); // Software flow control
options.c_lflag = 0; // no local flags
options.c_cflag |= HUPCL; // Drop DTR on close
// Setup non blocking, return on 1 character
options.c_cc[VMIN] = 0;
options.c_cc[VTIME] = 1;
// Clear the line
tcflush(fdes,TCIFLUSH);
// Update the options and do it NOW
if (tcsetattr(fdes,TCSANOW,&options) != 0) {
return -7;
}
return 0; // Serial port configured correctly
}
|
#include <dmzEventConsts.h>
#include <dmzEventModule.h>
#include "dmzNetExtPacketCodecEventBasic.h"
#include <dmzNetModuleAttributeMap.h>
#include <dmzObjectModule.h>
#include <dmzRuntimeConfigToTypesBase.h>
#include <dmzRuntimeDefinitions.h>
#include <dmzRuntimeEventType.h>
#include <dmzRuntimeObjectType.h>
#include <dmzRuntimePluginFactoryLinkSymbol.h>
#include <dmzRuntimePluginInfo.h>
#include <dmzRuntimeUUID.h>
#include <dmzSystemMarshal.h>
#include <dmzSystemUnmarshal.h>
#include <dmzTypesMask.h>
#include <dmzTypesMatrix.h>
#include <dmzTypesUUID.h>
#include <dmzTypesVector.h>
/*!
\class dmz::NetExtPacketCodecEventBasic
\ingroup Net
\brief Basic event network codec.
*/
//! \cond
dmz::NetExtPacketCodecEventBasic::NetExtPacketCodecEventBasic (
const PluginInfo &Info,
Config &local) :
Plugin (Info),
NetExtPacketCodecEvent (Info),
_SysID (get_runtime_uuid (Info)),
_log (Info),
_time (Info.get_context ()),
_defaultHandle (0),
_sourceHandle (0),
_targetHandle (0),
_munitionsHandle (0),
_launchHandle (1),
_detonateHandle (2),
_collisionHandle (3),
_eventMod (0),
_attrMod (0),
_objMod (0) {
_init (local);
}
dmz::NetExtPacketCodecEventBasic::~NetExtPacketCodecEventBasic () {
}
// Plugin Interface
void
dmz::NetExtPacketCodecEventBasic::discover_plugin (
const PluginDiscoverEnum Mode,
const Plugin *PluginPtr) {
if (Mode == PluginDiscoverAdd) {
if (!_eventMod) {
_eventMod = EventModule::cast (PluginPtr);
}
if (!_attrMod) { _attrMod = NetModuleAttributeMap::cast (PluginPtr); }
if (!_objMod) { _objMod = ObjectModule::cast (PluginPtr); }
}
else if (Mode == PluginDiscoverRemove) {
if (_eventMod && (_eventMod = EventModule::cast (PluginPtr))) {
_eventMod = 0;
}
if (_attrMod && (_attrMod == NetModuleAttributeMap::cast (PluginPtr))) {
_attrMod = 0;
}
if (_objMod && (_objMod == ObjectModule::cast (PluginPtr))) { _objMod = 0; }
}
}
// NetExtPacketCodecEvent Interface
dmz::Boolean
dmz::NetExtPacketCodecEventBasic::decode (Unmarshal &data, Boolean &isLoopback) {
Boolean result (False);
if (_objMod && _attrMod && _eventMod) {
UUID sysID;
data.get_next_uuid (sysID);
if (_SysID == sysID) {
isLoopback = True;
}
else {
const UInt32 TypeEnum (data.get_next_uint32 ());
UUID sourceID;
data.get_next_uuid (sourceID);
EventType type;
ObjectType munitionType;
Handle sourceHandle (0), targetHandle (0), munitionsHandle (0);
sourceHandle = _objMod->lookup_handle_from_uuid (sourceID);
if (TypeEnum == _launchHandle) { type = _launchType; }
else if (TypeEnum == _detonateHandle) { type = _detonateType; }
else if (TypeEnum == _collisionHandle) { type = _collisionType; }
UUID targetID;
data.get_next_uuid (targetID);
targetHandle = _objMod->lookup_handle_from_uuid (targetID);
if ((TypeEnum == _launchHandle) || (TypeEnum == _detonateHandle)) {
UUID munitionsID;
data.get_next_uuid (munitionsID);
munitionsHandle = _objMod->lookup_handle_from_uuid (munitionsID);
ArrayUInt32 typeArray;
typeArray.set (0, data.get_next_uint32 ());
typeArray.set (1, data.get_next_uint32 ());
typeArray.set (2, data.get_next_uint32 ());
_attrMod->to_internal_object_type (typeArray, munitionType);
}
Vector pos, vel, offset;
data.get_next_vector (pos);
data.get_next_vector (vel);
data.get_next_vector (offset);
const Handle EventHandle (_eventMod->create_event (type, EventRemote));
if (EventHandle) {
_eventMod->store_object_handle (EventHandle, _sourceHandle, sourceHandle);
_eventMod->store_object_handle (EventHandle, _targetHandle, targetHandle);
if ((TypeEnum == _launchHandle) || (TypeEnum == _detonateHandle)) {
_eventMod->store_object_handle (
EventHandle,
_munitionsHandle,
munitionsHandle);
_eventMod->store_object_type (
EventHandle,
_munitionsHandle,
munitionType);
}
_eventMod->store_position (EventHandle, _defaultHandle, pos);
_eventMod->store_velocity (EventHandle, _defaultHandle, vel);
_eventMod->store_vector (EventHandle, _targetHandle, offset);
result = _eventMod->close_event (EventHandle);
}
}
}
return result;
}
dmz::Boolean
dmz::NetExtPacketCodecEventBasic::encode_event (
const Handle EventHandle,
Marshal &data) {
Boolean result (False);
if (_objMod && _attrMod && _eventMod) {
data.set_next_uuid (_SysID);
UInt32 typeEnum (0);
EventType type;
_eventMod->lookup_event_type (EventHandle, type);
if (type.is_of_type (_detonateType)) { typeEnum = _detonateHandle; }
else if (type.is_of_type (_launchType)) { typeEnum = _launchHandle; }
else if (type.is_of_type (_collisionType)) { typeEnum = _collisionHandle; }
data.set_next_uint32 (typeEnum);
Handle sourceHandle (0);
UUID sourceID, targetID, munitionsID;
if (_eventMod->lookup_object_handle (EventHandle, _sourceHandle, sourceHandle)) {
_objMod->lookup_uuid (sourceHandle, sourceID);
}
data.set_next_uuid (sourceID);
Handle targetHandle (0);
if (_eventMod->lookup_object_handle (EventHandle, _targetHandle, targetHandle)) {
_objMod->lookup_uuid (targetHandle, targetID);
}
data.set_next_uuid (targetID);
if ((typeEnum == _detonateHandle) || (typeEnum == _launchHandle)) {
Handle munitionsHandle (0);
if (_eventMod->lookup_object_handle (
EventHandle,
_munitionsHandle,
munitionsHandle)) {
_objMod->lookup_uuid (munitionsHandle, munitionsID);
}
data.set_next_uuid (munitionsID);
ObjectType munitionType;
ArrayUInt32 array;
if (_eventMod->lookup_object_type (
EventHandle,
_munitionsHandle,
munitionType)) {
_attrMod->to_net_object_type (munitionType, array);
}
data.set_next_uint32 (array.get (0));
data.set_next_uint32 (array.get (1));
data.set_next_uint32 (array.get (2));
}
Vector pos;
_eventMod->lookup_position (EventHandle, _defaultHandle, pos);
data.set_next_vector (pos);
Vector vel;
_eventMod->lookup_velocity (EventHandle, _defaultHandle, vel);
data.set_next_vector (vel);
Vector offset;
_eventMod->lookup_vector (EventHandle, _targetHandle, offset);
data.set_next_vector (offset);
result = True;
}
return result;
}
void
dmz::NetExtPacketCodecEventBasic::_init (Config &local) {
RuntimeContext *context (get_plugin_runtime_context ());
Definitions defs (context, &_log);
_defaultHandle = defs.create_named_handle (EventAttributeDefaultName);
_sourceHandle = defs.create_named_handle (EventAttributeSourceName);
_targetHandle = defs.create_named_handle (EventAttributeTargetName);
_munitionsHandle = defs.create_named_handle (EventAttributeMunitionsName);
_launchType.set_type (
config_to_string ("events.launch.name", local, EventLaunchName),
context);
_detonateType.set_type (
config_to_string ("events.detonate.name", local, EventDetonationName),
context);
_collisionType.set_type (
config_to_string ("events.collision.name", local, EventCollisionName),
context);
}
//! \endcond
extern "C" {
DMZ_PLUGIN_FACTORY_LINK_SYMBOL dmz::Plugin *
create_dmzNetExtPacketCodecEventBasic (
const dmz::PluginInfo &Info,
dmz::Config &local,
dmz::Config &global) {
return new dmz::NetExtPacketCodecEventBasic (Info, local);
}
};
|
/*
###############################################################################
#
# Temboo Arduino library
#
# Copyright 2016, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###############################################################################
*/
#include <Arduino.h>
#include <Mailbox.h>
#ifdef ARDUINO_ARCH_SAMD
#include "avr/dtostrf.h"
#endif
#include "utility/TembooGlobal.h"
#include "TembooMonitoring.h"
const int MAX_MAILBOX_MESSAGE_SIZE = 128;
const unsigned long POLL_TIMEOUT = 50;
const unsigned long INITIATE_TIMEOUT_MS = 5000;
const unsigned long MCU_PING_PERIOD_MS = 5000;
void logTembooDebug(const char *c) {
Console.print("Debug: ");
Console.println(c);
}
void addWebSocketPinData(int pin, int pinVal, bool requestResponse) {
TembooMessaging::sendData(pin, pinVal, requestResponse);
}
void updateIntervalTime(int intervalTime) {
uint8_t msg[MAX_MAILBOX_MESSAGE_SIZE] = {0};
int messageSize = snprintf((char*)msg, MAX_MAILBOX_MESSAGE_SIZE, "Mi|V%i", intervalTime);
Mailbox.writeMessage(msg, messageSize);
}
TembooMessaging::TembooMessaging(TembooSensor** sensorTable, int sensorTableSize) {
m_accountName = NULL;
m_appKey = NULL;
m_appKeyName = NULL;
m_deviceID = NULL;
m_connectionStatus = false;
m_sensorTable = sensorTable;
m_sensorTableSize = sensorTableSize;
m_sensorTableDepth = 0;
m_connectionAttemptTime = millis();
m_lastPingTime = millis();
// init sensor array
int i = 0;
for (i = 0; i < m_sensorTableSize; i++) {
m_sensorTable[i] = NULL;
}
}
int TembooMessaging::addTembooSensor(TembooSensor* sensor) {
int i = 0;
for (; i < m_sensorTableSize; i++) {
if (m_sensorTable[i] == sensor) {
logTembooDebug("Sensor already added");
return -1;
}
if (m_sensorTable[i] == NULL) {
m_sensorTable[i] = sensor;
m_sensorTableDepth++;
return 0;
}
}
logTembooDebug("Sensor table full, sensor not added");
return -1;
}
void TembooMessaging::startMessaging() {
if (!running()) {
TEMBOO_TRACELN("starting messanger");
m_connectionStatus = false;
Process::begin("tembooMessaging");
runAsynchronously();
}
}
void TembooMessaging::setSensorsToDefaultState() {
int i = 0;
for (; i < m_sensorTableDepth; i++) {
if (m_sensorTable[i]->write != NULL) {
m_sensorTable[i]->write(m_sensorTable[i]->sensorConfig, m_sensorTable[i]->defaultValue);
}
}
}
void TembooMessaging::begin() {
Mailbox.begin();
startMessaging();
}
void TembooMessaging::setAccountName(const String& accountName) {
m_accountName = accountName.c_str();
}
void TembooMessaging::setAccountName(const char* accountName) {
m_accountName = accountName;
}
void TembooMessaging::setAppKeyName(const String& appKeyName) {
m_appKeyName = appKeyName.c_str();
}
void TembooMessaging::setAppKeyName(const char* appKeyName) {
m_appKeyName = appKeyName;
}
void TembooMessaging::setAppKey(const String& appKey) {
m_appKey = appKey.c_str();
}
void TembooMessaging::setAppKey(const char* appKey) {
m_appKey = appKey;
}
void TembooMessaging::setDeviceID(const String& deviceID) {
m_deviceID = deviceID.c_str();
}
void TembooMessaging::setDeviceID(const char* deviceID) {
m_deviceID = deviceID;
}
int TembooMessaging::initiateConnection() {
unsigned long now = millis();
if (now - m_connectionAttemptTime < INITIATE_TIMEOUT_MS) {
poll();
return TEMBOO_MONITORING_ERROR_NOT_CONNECTION_TIME;
}
if (m_accountName == NULL || *m_accountName == '\0') {
return TEMBOO_MONITORING_ERROR_ACCOUNT_MISSING;
}
if (m_appKeyName == NULL || *m_appKeyName == '\0') {
return TEMBOO_MONITORING_ERROR_APPKEY_NAME_MISSING;
}
if (m_deviceID == NULL || *m_deviceID == '\0') {
return TEMBOO_MONITORING_ERROR_DEVICEID_MISSING;
}
if (m_appKey == NULL || *m_appKey == '\0') {
return TEMBOO_MONITORING_ERROR_APPKEY_MISSING;
}
startMessaging();
int messageSize = strlen(m_accountName) + strlen(m_appKey) + strlen(m_appKeyName) + strlen(m_deviceID) + 11;
uint8_t msg[messageSize];
if (messageSize < MAX_MAILBOX_MESSAGE_SIZE) {
messageSize = snprintf((char*)msg, messageSize, "MI|N%s|K%s|B%s|A%s", m_accountName, m_appKeyName, m_deviceID, m_appKey);
Mailbox.writeMessage(msg, messageSize);
m_connectionAttemptTime = now;
} else {
return TEMBOO_MONITORING_ERROR_REQUEST_TOO_LARGE;
}
return TEMBOO_MONITORING_ERROR_OK;
}
WSMessageRequest TembooMessaging::poll() {
startMessaging();
long int now = millis();
WSMessageRequest rc = WS_NO_MESSAGE;
while (millis() - now < POLL_TIMEOUT) {
if (millis() - m_lastPingTime >= MCU_PING_PERIOD_MS) {
m_lastPingTime = millis();
sendPing();
}
if (Mailbox.messageAvailable()) {
uint8_t msg[MAX_MAILBOX_MESSAGE_SIZE] = {0};
int recvLen = Mailbox.readMessage(msg, MAX_MAILBOX_MESSAGE_SIZE);
if (recvLen > 0) {
rc = handleResponse(msg, m_sensorTable, m_sensorTableDepth, m_connectionStatus);
if (rc == WS_UPDATE_CONNECTED) {
//logTembooDebug("Connected to Temboo");
m_connectionStatus = true;
} else if (rc == WS_UPDATE_DISCONNECTED) {
//logTembooDebug("Disconnected from Temboo");
m_connectionStatus = false;
} else if (rc == WS_REQUEST_ERROR) {
// disconnect
sendError("Message request error");
}
}
}
}
return rc;
}
void TembooMessaging::updatePinValue(int pinNum, int pinVal) {
// save the data to the strcuture and then send to Temboo
int i = 0;
for (; i < m_sensorTableDepth; i++) {
if (m_sensorTable[i]->getSensorPin(m_sensorTable[i]->sensorConfig) == pinNum) {
// if pin has pinWrite as NULL, it is an input
// pin and needs to be stored. If not NULL,
// pin is an actuator and should not be stored
if(m_sensorTable[i]->write == NULL){
sendData(pinNum, pinVal, false);
} else {
sendData(pinNum, pinVal, true);
}
return;
}
}
logTembooDebug("Unable to update pin");
}
int TembooMessaging::retrievePinValue(int pinNum) {
// search through pin structure and return the pin value
int i = 0;
for (; i < m_sensorTableDepth; i++) {
if (m_sensorTable[i]->getSensorPin(m_sensorTable[i]->sensorConfig) == pinNum) {
return m_sensorTable[i]->read(m_sensorTable[i]->sensorConfig);
}
}
logTembooDebug("Unable to obtain pin value");
return 0;
}
void TembooMessaging::sendError(const char* errorText) {
uint8_t msg[MAX_MAILBOX_MESSAGE_SIZE] = {0};
int messageSize = snprintf((char*)msg, MAX_MAILBOX_MESSAGE_SIZE, "ME|T%s", errorText);
Mailbox.writeMessage(msg, messageSize);
}
void TembooMessaging::disconnectWSConnection(int closeCode, const char* closeReason) {
int messageSize = strlen(closeReason) + 11;
uint8_t msg[messageSize];
messageSize = snprintf((char*)msg, messageSize, "MF|O%i|r%s", closeCode, closeReason);
Mailbox.writeMessage(msg, messageSize);
}
void TembooMessaging::sendData(int pin, int pinVal, bool requestResponse) {
uint8_t msg[MAX_MAILBOX_MESSAGE_SIZE] = {0};
int messageSize = snprintf((char*)msg, MAX_MAILBOX_MESSAGE_SIZE, "MD|P%i|V%i%s", pin, pinVal, requestResponse ? "|Q" : "");
Mailbox.writeMessage(msg, messageSize);
}
void TembooMessaging::sendData(int pin, float pinVal) {
uint8_t msg[MAX_MAILBOX_MESSAGE_SIZE] = {0};
char floatStr[12] = {0};
dtostrf(pinVal, 4, 2, floatStr);
int messageSize = snprintf((char*)msg, MAX_MAILBOX_MESSAGE_SIZE, "MD|P%i|V%s", pin, floatStr);
Mailbox.writeMessage(msg, messageSize);
}
bool TembooMessaging::isConnected() {
if (running()) {
return m_connectionStatus;
}
return false;
}
void TembooMessaging::sendPing() {
Mailbox.writeMessage((uint8_t*)"P",1);
}
|
#include "projectionwidget.h"
projectionWidget::projectionWidget(QWidget *parent) : QWidget(parent)
{
setPalette(QPalette(QColor(251,251,251)));
setAutoFillBackground(true);
}
void projectionWidget::paintEvent(QPaintEvent*)
{
QPainter* ptr = new QPainter(this);
ptr->translate(52, 102);
o3d.ShowProjection(ptr);
}
|
// Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <new>
#include <stdexcept>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
using namespace std;
#pragma warning(disable : 28251) // Inconsistent annotation for 'new': this instance has no annotations.
int allocationCount = 0;
int canCreate = 10; // Counter to force an exception when constructing a
// sufficiently large ReportAddress array
struct ReportAddress;
vector<ReportAddress*> ascendingAddressBuffer;
vector<ReportAddress*> descendingAddressBuffer;
// According to N4849, the default behavior of operator new[](size) is to return
// operator new(size), so only the latter needs to be replaced.
void* operator new(size_t size) {
void* const p = ::operator new(size, nothrow);
if (p) {
return p;
} else {
throw bad_alloc();
}
}
void* operator new(size_t size, const nothrow_t&) noexcept {
void* const result = malloc(size == 0 ? 1 : size);
++allocationCount;
return result;
}
struct InitialValue {
int value = 106;
InitialValue() = default;
InitialValue(int a, int b) : value(a + b) {}
};
struct ThreeIntWrap {
int v1;
int v2;
int v3;
};
struct alignas(32) HighlyAligned {
uint64_t a;
uint64_t b;
uint64_t c;
uint64_t d;
};
struct ReportAddress {
ReportAddress() {
if (canCreate > 0) {
ascendingAddressBuffer.push_back(this);
--canCreate;
} else {
throw runtime_error("Can't create more ReportAddress objects.");
}
}
~ReportAddress() {
++canCreate;
descendingAddressBuffer.push_back(this);
}
};
void assert_ascending_init() {
for (size_t i = 1; i < ascendingAddressBuffer.size(); ++i) {
assert(ascendingAddressBuffer[i - 1] < ascendingAddressBuffer[i]);
}
ascendingAddressBuffer.clear();
}
void assert_descending_destruct() {
for (size_t i = 1; i < descendingAddressBuffer.size(); ++i) {
assert(descendingAddressBuffer[i - 1] > descendingAddressBuffer[i]);
}
descendingAddressBuffer.clear();
}
template <class T>
void assert_shared_use_get(const shared_ptr<T>& sp) {
assert(sp.use_count() == 1);
assert(sp.get() != nullptr);
}
template <class T, class... Args>
shared_ptr<T> make_shared_assert(Args&&... vals) {
int count = allocationCount;
shared_ptr<T> sp = make_shared<T>(forward<Args>(vals)...);
assert_shared_use_get(sp);
assert(count + 1 == allocationCount);
return sp;
}
template <class T, enable_if_t<extent_v<T> != 0, int> = 0>
shared_ptr<T> make_shared_init_assert(const remove_extent_t<T>& val) {
return make_shared_assert<T>(val);
}
template <class T, enable_if_t<is_array_v<T> && extent_v<T> == 0, int> = 0>
shared_ptr<T> make_shared_init_assert(size_t size, const remove_extent_t<T>& val) {
return make_shared_assert<T>(size, val);
}
template <class T, class... Args>
void test_make_init_destruct_order(Args&&... vals) {
try {
shared_ptr<T> sp = make_shared<T>(forward<Args>(vals)...);
assert_shared_use_get(sp);
} catch (const runtime_error& exc) {
assert(exc.what() == "Can't create more ReportAddress objects."sv);
}
assert_ascending_init();
assert_descending_destruct();
}
void test_make_shared_not_array() {
shared_ptr<vector<int>> p0 = make_shared<vector<int>>();
assert_shared_use_get(p0);
assert(p0->empty());
shared_ptr<InitialValue> p1 = make_shared_assert<InitialValue>();
assert(p1->value == 106);
shared_ptr<string> p2 = make_shared<string>("Meow!", 2u, 3u);
assert_shared_use_get(p2);
assert(p2->compare("ow!") == 0);
shared_ptr<InitialValue> p3 = make_shared_assert<InitialValue>(40, 2);
assert(p3->value == 42);
shared_ptr<int> p4 = make_shared<int>();
assert_shared_use_get(p4);
assert(*p4 == 0);
shared_ptr<HighlyAligned> p5 = make_shared<HighlyAligned>();
assert_shared_use_get(p5);
assert(reinterpret_cast<uintptr_t>(p5.get()) % alignof(HighlyAligned) == 0);
assert(p5->a == 0 && p5->b == 0 && p5->c == 0 && p5->d == 0);
}
void test_make_shared_array_known_bounds() {
shared_ptr<string[100]> p0 = make_shared<string[100]>();
assert_shared_use_get(p0);
for (int i = 0; i < 100; ++i) {
assert(p0[i].empty());
}
shared_ptr<InitialValue[2][8][9]> p1 = make_shared_assert<InitialValue[2][8][9]>();
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 8; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p1[i][j][k].value == 106);
}
}
}
shared_ptr<string[10][2]> p2 = make_shared<string[10][2]>({"Meow!", "Purr"});
assert_shared_use_get(p2);
for (int i = 0; i < 10; ++i) {
assert(p2[i][0].compare("Meow!") == 0);
assert(p2[i][1].compare("Purr") == 0);
}
shared_ptr<vector<int>[3]> p3 = make_shared<vector<int>[3]>({9, 9, 9});
assert_shared_use_get(p3);
for (int i = 0; i < 3; ++i) {
assert(p3[i].size() == 3);
for (const auto& val : p3[i]) {
assert(val == 9);
}
}
shared_ptr<ThreeIntWrap[5]> p4 = make_shared_init_assert<ThreeIntWrap[5]>({2, 8, 9});
for (int i = 0; i < 5; ++i) {
assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9);
}
shared_ptr<int[1][7][2][9]> p5 = make_shared<int[1][7][2][9]>();
assert_shared_use_get(p5);
for (int i = 0; i < 7; ++i) {
for (int j = 0; j < 2; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p5[0][i][j][k] == 0);
}
}
}
shared_ptr<HighlyAligned[6]> p6 = make_shared<HighlyAligned[6]>();
assert_shared_use_get(p6);
assert(reinterpret_cast<uintptr_t>(p6.get()) % alignof(HighlyAligned) == 0);
for (int i = 0; i < 6; ++i) {
assert(p6[i].a == 0 && p6[i].b == 0 && p6[i].c == 0 && p6[i].d == 0);
}
test_make_init_destruct_order<ReportAddress[5]>(); // success one dimensional
test_make_init_destruct_order<ReportAddress[20]>(); // failure one dimensional
test_make_init_destruct_order<ReportAddress[2][2][2]>(); // success multidimensional
test_make_init_destruct_order<ReportAddress[3][3][3]>(); // failure multidimensional
shared_ptr<int[7]> p7 = make_shared<int[7]>(0);
for (int i = 0; i < 7; ++i) {
assert(p7[i] == 0);
}
}
void test_make_shared_array_unknown_bounds() {
shared_ptr<string[]> p0 = make_shared<string[]>(100);
assert_shared_use_get(p0);
for (int i = 0; i < 100; ++i) {
assert(p0[i].empty());
}
shared_ptr<InitialValue[][8][9]> p1 = make_shared_assert<InitialValue[][8][9]>(2u);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 8; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p1[i][j][k].value == 106);
}
}
}
shared_ptr<string[][2]> p2 = make_shared<string[][2]>(10, {"Meow!", "Purr"});
assert_shared_use_get(p2);
for (int i = 0; i < 10; ++i) {
assert(p2[i][0].compare("Meow!") == 0);
assert(p2[i][1].compare("Purr") == 0);
}
shared_ptr<vector<int>[]> p3 = make_shared<vector<int>[]>(3, {9, 9, 9});
assert_shared_use_get(p3);
for (int i = 0; i < 3; ++i) {
assert(p3[i].size() == 3);
for (const auto& val : p3[i]) {
assert(val == 9);
}
}
shared_ptr<ThreeIntWrap[]> p4 = make_shared_init_assert<ThreeIntWrap[]>(5, {2, 8, 9});
for (int i = 0; i < 5; ++i) {
assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9);
}
shared_ptr<int[]> p5 = make_shared_assert<int[]>(0u); // p5 cannot be dereferenced
shared_ptr<int[][5][6]> p6 = make_shared<int[][5][6]>(4u);
assert_shared_use_get(p6);
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 5; ++j) {
for (int k = 0; k < 6; ++k) {
assert(p6[i][j][k] == 0);
}
}
}
shared_ptr<HighlyAligned[]> p7 = make_shared<HighlyAligned[]>(7u);
assert_shared_use_get(p7);
assert(reinterpret_cast<uintptr_t>(p7.get()) % alignof(HighlyAligned) == 0);
for (int i = 0; i < 7; ++i) {
assert(p7[i].a == 0 && p7[i].b == 0 && p7[i].c == 0 && p7[i].d == 0);
}
test_make_init_destruct_order<ReportAddress[]>(5u); // success one dimensional
test_make_init_destruct_order<ReportAddress[]>(20u); // failure one dimensional
test_make_init_destruct_order<ReportAddress[][2][2]>(2u); // success multidimensional
test_make_init_destruct_order<ReportAddress[][3][3]>(3u); // failure multidimensional
shared_ptr<int[]> p8 = make_shared<int[]>(7u, 0);
for (int i = 0; i < 7; ++i) {
assert(p8[i] == 0);
}
}
int constructCount = 0;
int destroyCount = 0;
inline void assert_construct_destruct_equal() {
assert(constructCount == destroyCount);
}
template <class T, class ConstructAssert>
struct ConstructConstrainingAllocator {
using value_type = T;
ConstructConstrainingAllocator() = default;
template <class Other>
ConstructConstrainingAllocator(const ConstructConstrainingAllocator<Other, ConstructAssert>&) {}
ConstructConstrainingAllocator(const ConstructConstrainingAllocator&) = default;
ConstructConstrainingAllocator& operator=(const ConstructConstrainingAllocator&) = delete;
T* allocate(size_t n) {
return allocator<T>{}.allocate(n);
}
void deallocate(T* p, size_t n) noexcept {
return allocator<T>{}.deallocate(p, n);
}
template <class Other, class... Args>
void construct(Other* p, Args&&... vals) {
allocator<Other> a;
static_assert(is_same_v<Other, value_type> && is_same_v<ConstructAssert, Other>, "incorrect construct call");
allocator_traits<allocator<Other>>::construct(a, p, forward<Args>(vals)...);
++constructCount;
}
template <class Other>
void destroy(Other* p) noexcept {
allocator<Other> a;
static_assert(is_same_v<Other, value_type> && is_same_v<ConstructAssert, Other>, "incorrect destroy call");
allocator_traits<allocator<Other>>::destroy(a, p);
++destroyCount;
}
};
template <typename T>
using CustomAlloc = ConstructConstrainingAllocator<void, T>;
template <class T, class... Args>
shared_ptr<T> allocate_shared_assert(int elemCount, Args&&... vals) {
int aCount = allocationCount;
int cCount = constructCount;
shared_ptr<T> sp = allocate_shared<T>(forward<Args>(vals)...);
assert_shared_use_get(sp);
assert(aCount + 1 == allocationCount);
assert(cCount + elemCount == constructCount);
return sp;
}
template <class T, class A, enable_if_t<extent_v<T> != 0, int> = 0>
shared_ptr<T> allocate_shared_init_assert(int elemCount, const A& a, const remove_extent_t<T>& val) {
return allocate_shared_assert<T>(elemCount, a, val);
}
template <class T, class A, enable_if_t<is_array_v<T> && extent_v<T> == 0, int> = 0>
shared_ptr<T> allocate_shared_init_assert(int elemCount, const A& a, size_t size, const remove_extent_t<T>& val) {
return allocate_shared_assert<T>(elemCount, a, size, val);
}
template <class T, class... Args>
void test_allocate_init_destruct_order(Args&&... vals) {
CustomAlloc<remove_all_extents_t<T>> a{};
try {
shared_ptr<T> sp = allocate_shared<T>(a, forward<Args>(vals)...);
assert_shared_use_get(sp);
} catch (const runtime_error& exc) {
assert(exc.what() == "Can't create more ReportAddress objects."sv);
}
assert_construct_destruct_equal();
assert_ascending_init();
assert_descending_destruct();
}
void test_allocate_shared_not_array() {
CustomAlloc<vector<int>> a0{};
{
shared_ptr<vector<int>> p0 = allocate_shared<vector<int>>(a0);
assert_shared_use_get(p0);
assert(p0->empty());
}
assert_construct_destruct_equal();
CustomAlloc<InitialValue> a1{};
{
shared_ptr<InitialValue> p1 = allocate_shared_assert<InitialValue>(1, a1);
assert(p1->value == 106);
}
assert_construct_destruct_equal();
CustomAlloc<string> a2{};
{
shared_ptr<string> p2 = allocate_shared<string>(a2, "Meow!", 2u, 3u);
assert_shared_use_get(p2);
assert(p2->compare("ow!") == 0);
}
assert_construct_destruct_equal();
{
shared_ptr<InitialValue> p3 = allocate_shared_assert<InitialValue>(1, a1, 40, 2);
assert(p3->value == 42);
}
assert_construct_destruct_equal();
CustomAlloc<int> a4{};
{
shared_ptr<int> p4 = allocate_shared<int>(a4);
assert_shared_use_get(p4);
assert(*p4 == 0);
}
assert_construct_destruct_equal();
CustomAlloc<HighlyAligned> a5{};
{
shared_ptr<HighlyAligned> p5 = allocate_shared<HighlyAligned>(a5);
assert_shared_use_get(p5);
assert(reinterpret_cast<uintptr_t>(p5.get()) % alignof(HighlyAligned) == 0);
assert(p5->a == 0 && p5->b == 0 && p5->c == 0 && p5->d == 0);
}
assert_construct_destruct_equal();
}
void test_allocate_shared_array_known_bounds() {
CustomAlloc<string> a0{};
{
shared_ptr<string[100]> p0 = allocate_shared<string[100]>(a0);
assert_shared_use_get(p0);
for (int i = 0; i < 100; ++i) {
assert(p0[i].empty());
}
}
assert_construct_destruct_equal();
CustomAlloc<InitialValue> a1{};
{
shared_ptr<InitialValue[2][8][9]> p1 = allocate_shared_assert<InitialValue[2][8][9]>(144, a1);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 8; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p1[i][j][k].value == 106);
}
}
}
}
assert_construct_destruct_equal();
{
shared_ptr<string[10][2]> p2 = allocate_shared<string[10][2]>(a0, {"Meow!", "Purr"});
assert_shared_use_get(p2);
for (int i = 0; i < 10; ++i) {
assert(p2[i][0].compare("Meow!") == 0);
assert(p2[i][1].compare("Purr") == 0);
}
}
assert_construct_destruct_equal();
CustomAlloc<vector<int>> a3{};
{
shared_ptr<vector<int>[3]> p3 = allocate_shared<vector<int>[3]>(a3, {9, 9, 9});
assert_shared_use_get(p3);
for (int i = 0; i < 3; ++i) {
assert(p3[i].size() == 3);
for (const auto& val : p3[i]) {
assert(val == 9);
}
}
}
assert_construct_destruct_equal();
CustomAlloc<ThreeIntWrap> a4{};
{
shared_ptr<ThreeIntWrap[5]> p4 = allocate_shared_init_assert<ThreeIntWrap[5]>(5, a4, {2, 8, 9});
for (int i = 0; i < 5; ++i) {
assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9);
}
}
assert_construct_destruct_equal();
CustomAlloc<int> a5{};
{
shared_ptr<int[1][7][2][9]> p5 = allocate_shared<int[1][7][2][9]>(a5);
assert_shared_use_get(p5);
for (int i = 0; i < 7; ++i) {
for (int j = 0; j < 2; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p5[0][i][j][k] == 0);
}
}
}
}
assert_construct_destruct_equal();
CustomAlloc<HighlyAligned> a6{};
{
shared_ptr<HighlyAligned[6]> p6 = allocate_shared<HighlyAligned[6]>(a6);
assert_shared_use_get(p6);
assert(reinterpret_cast<uintptr_t>(p6.get()) % alignof(HighlyAligned) == 0);
for (int i = 0; i < 6; ++i) {
assert(p6[i].a == 0 && p6[i].b == 0 && p6[i].c == 0 && p6[i].d == 0);
}
}
assert_construct_destruct_equal();
test_allocate_init_destruct_order<ReportAddress[5]>(); // success one dimensional
test_allocate_init_destruct_order<ReportAddress[20]>(); // failure one dimensional
test_allocate_init_destruct_order<ReportAddress[2][2][2]>(); // success multidimensional
test_allocate_init_destruct_order<ReportAddress[3][3][3]>(); // failure multidimensional
allocator<int> a7;
shared_ptr<int[7]> p7 = allocate_shared<int[7]>(a7, 0);
for (int i = 0; i < 7; ++i) {
assert(p7[i] == 0);
}
}
void test_allocate_shared_array_unknown_bounds() {
CustomAlloc<string> a0{};
{
shared_ptr<string[]> p0 = allocate_shared<string[]>(a0, 100);
assert_shared_use_get(p0);
for (int i = 0; i < 100; ++i) {
assert(p0[i].empty());
}
}
assert_construct_destruct_equal();
CustomAlloc<InitialValue> a1{};
{
shared_ptr<InitialValue[][8][9]> p1 = allocate_shared_assert<InitialValue[][8][9]>(144, a1, 2u);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 8; ++j) {
for (int k = 0; k < 9; ++k) {
assert(p1[i][j][k].value == 106);
}
}
}
}
assert_construct_destruct_equal();
{
shared_ptr<string[][2]> p2 = allocate_shared<string[][2]>(a0, 10, {"Meow!", "Purr"});
assert_shared_use_get(p2);
for (int i = 0; i < 10; ++i) {
assert(p2[i][0].compare("Meow!") == 0);
assert(p2[i][1].compare("Purr") == 0);
}
}
assert_construct_destruct_equal();
CustomAlloc<vector<int>> a3{};
{
shared_ptr<vector<int>[]> p3 = allocate_shared<vector<int>[]>(a3, 3, {9, 9, 9});
assert_shared_use_get(p3);
for (int i = 0; i < 3; ++i) {
assert(p3[i].size() == 3);
for (const auto& val : p3[i]) {
assert(val == 9);
}
}
}
assert_construct_destruct_equal();
CustomAlloc<ThreeIntWrap> a4{};
{
shared_ptr<ThreeIntWrap[]> p4 = allocate_shared_init_assert<ThreeIntWrap[]>(5, a4, 5, {2, 8, 9});
for (int i = 0; i < 5; ++i) {
assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9);
}
}
assert_construct_destruct_equal();
CustomAlloc<int> a5{};
{ shared_ptr<int[]> p5 = allocate_shared_assert<int[]>(0, a5, 0u); } // p5 cannot be dereferenced
assert_construct_destruct_equal();
{
shared_ptr<int[][5][6]> p6 = allocate_shared<int[][5][6]>(a5, 4u);
assert_shared_use_get(p6);
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 5; ++j) {
for (int k = 0; k < 6; ++k) {
assert(p6[i][j][k] == 0);
}
}
}
}
assert_construct_destruct_equal();
CustomAlloc<HighlyAligned> a7{};
{
shared_ptr<HighlyAligned[]> p7 = allocate_shared<HighlyAligned[]>(a7, 7u);
assert_shared_use_get(p7);
assert(reinterpret_cast<uintptr_t>(p7.get()) % alignof(HighlyAligned) == 0);
for (int i = 0; i < 7; ++i) {
assert(p7[i].a == 0 && p7[i].b == 0 && p7[i].c == 0 && p7[i].d == 0);
}
}
assert_construct_destruct_equal();
test_allocate_init_destruct_order<ReportAddress[]>(5u); // success one dimensional
test_allocate_init_destruct_order<ReportAddress[]>(20u); // failure one dimensional
test_allocate_init_destruct_order<ReportAddress[][2][2]>(2u); // success multidimensional
test_allocate_init_destruct_order<ReportAddress[][3][3]>(3u); // failure multidimensional
allocator<int> a8;
shared_ptr<int[]> p8 = allocate_shared<int[]>(a8, 7u, 0);
for (int i = 0; i < 7; ++i) {
assert(p8[i] == 0);
}
}
// Test GH-1733 "<memory>: error C2694 when calling make_shared on class with throwing destructor"
struct NontrivialThrowingDtor {
~NontrivialThrowingDtor() noexcept(false) {}
};
static_assert(!is_nothrow_destructible_v<NontrivialThrowingDtor>);
static_assert(!is_trivially_destructible_v<NontrivialThrowingDtor>);
struct TrivialThrowingDtor {
~TrivialThrowingDtor() noexcept(false) = default;
};
#ifndef __EDG__ // TRANSITION, VSO-1292292
static_assert(!is_nothrow_destructible_v<TrivialThrowingDtor>);
#endif // ^^^ no workaround ^^^
static_assert(is_trivially_destructible_v<TrivialThrowingDtor>);
template <class T>
struct WeirdDeleter {
void operator()(T* const ptr) const {
delete ptr;
}
~WeirdDeleter() noexcept(false) {}
};
static_assert(!is_nothrow_destructible_v<WeirdDeleter<int>>);
void test_GH_1733() {
WeirdDeleter<NontrivialThrowingDtor> del;
allocator<int> al;
// _Ref_count
(void) shared_ptr<NontrivialThrowingDtor>{new NontrivialThrowingDtor};
// _Ref_count_resource
(void) shared_ptr<NontrivialThrowingDtor>{new NontrivialThrowingDtor, del};
// _Ref_count_resource_alloc
(void) shared_ptr<NontrivialThrowingDtor>{new NontrivialThrowingDtor, del, al};
// _Ref_count_obj2
(void) make_shared<NontrivialThrowingDtor>();
// _Ref_count_obj_alloc3
(void) allocate_shared<NontrivialThrowingDtor>(al);
// _Ref_count_unbounded_array<_Ty, true>
(void) make_shared<TrivialThrowingDtor[]>(10);
// _Ref_count_unbounded_array<_Ty, false>
(void) make_shared<NontrivialThrowingDtor[]>(10);
// _Ref_count_bounded_array
(void) make_shared<NontrivialThrowingDtor[10]>();
// _Ref_count_unbounded_array_alloc
(void) allocate_shared<NontrivialThrowingDtor[]>(al, 10);
// _Ref_count_bounded_array_alloc
(void) allocate_shared<NontrivialThrowingDtor[10]>(al);
}
int main() {
test_make_shared_not_array();
test_make_shared_array_known_bounds();
test_make_shared_array_unknown_bounds();
test_allocate_shared_not_array();
test_allocate_shared_array_known_bounds();
test_allocate_shared_array_unknown_bounds();
test_GH_1733();
}
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/task_scheduler_util/common/variations_util.h"
#include "base/command_line.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/task_scheduler/initialization_util.h"
#include "base/time/time.h"
#include "components/variations/variations_associated_data.h"
namespace task_scheduler_util {
namespace {
struct SchedulerCustomizableWorkerPoolParams {
base::SchedulerWorkerPoolParams::StandbyThreadPolicy standby_thread_policy;
int max_threads = 0;
base::TimeDelta detach_period;
};
#if !defined(OS_IOS)
constexpr char kTaskSchedulerVariationParamsSwitch[] =
"task-scheduler-variation-params";
constexpr char kSeparator[] = "|";
bool ContainsSeparator(const std::string& str) {
return str.find(kSeparator) != std::string::npos;
}
#endif // !defined(OS_IOS)
// Converts |pool_descriptor| to a SchedulerWorkerPoolVariableParams. Returns a
// default SchedulerWorkerPoolVariableParams on failure.
//
// |pool_descriptor| is a semi-colon separated value string with the following
// items:
// 0. Minimum Thread Count (int)
// 1. Maximum Thread Count (int)
// 2. Thread Count Multiplier (double)
// 3. Thread Count Offset (int)
// 4. Detach Time in Milliseconds (int)
// 5. Standby Thread Policy (string)
// Additional values may appear as necessary and will be ignored.
SchedulerCustomizableWorkerPoolParams StringToVariableWorkerPoolParams(
const base::StringPiece pool_descriptor) {
using StandbyThreadPolicy =
base::SchedulerWorkerPoolParams::StandbyThreadPolicy;
const std::vector<base::StringPiece> tokens = SplitStringPiece(
pool_descriptor, ";", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
// Normally, we wouldn't initialize the values below because we don't read
// from them before we write to them. However, some compilers (like MSVC)
// complain about uninitialized variables due to the as_string() call below.
int min = 0;
int max = 0;
double cores_multiplier = 0.0;
int offset = 0;
int detach_milliseconds = 0;
// Checking for a size greater than the expected amount allows us to be
// forward compatible if we add more variation values.
if (tokens.size() >= 5 && base::StringToInt(tokens[0], &min) &&
base::StringToInt(tokens[1], &max) &&
base::StringToDouble(tokens[2].as_string(), &cores_multiplier) &&
base::StringToInt(tokens[3], &offset) &&
base::StringToInt(tokens[4], &detach_milliseconds)) {
SchedulerCustomizableWorkerPoolParams params;
params.max_threads = base::RecommendedMaxNumberOfThreadsInPool(
min, max, cores_multiplier, offset);
params.detach_period =
base::TimeDelta::FromMilliseconds(detach_milliseconds);
params.standby_thread_policy = (tokens.size() >= 6 && tokens[5] == "lazy")
? StandbyThreadPolicy::LAZY
: StandbyThreadPolicy::ONE;
return params;
}
DLOG(ERROR) << "Invalid Worker Pool Descriptor: " << pool_descriptor;
return SchedulerCustomizableWorkerPoolParams();
}
} // namespace
SchedulerImmutableWorkerPoolParams::SchedulerImmutableWorkerPoolParams(
const char* name,
base::ThreadPriority priority_hint)
: name_(name), priority_hint_(priority_hint) {}
std::vector<base::SchedulerWorkerPoolParams> GetWorkerPoolParams(
const std::vector<SchedulerImmutableWorkerPoolParams>&
constant_worker_pool_params_vector,
const std::map<std::string, std::string>& variation_params) {
std::vector<base::SchedulerWorkerPoolParams> worker_pool_params_vector;
for (const auto& constant_worker_pool_params :
constant_worker_pool_params_vector) {
const char* const worker_pool_name = constant_worker_pool_params.name();
auto it = variation_params.find(worker_pool_name);
if (it == variation_params.end()) {
// Non-branded builds don't have access to external worker pool
// configurations.
return std::vector<base::SchedulerWorkerPoolParams>();
}
const auto variable_worker_pool_params =
StringToVariableWorkerPoolParams(it->second);
if (variable_worker_pool_params.max_threads <= 0 ||
variable_worker_pool_params.detach_period <= base::TimeDelta()) {
DLOG(ERROR) << "Invalid Worker Pool Configuration: " << worker_pool_name
<< " [" << it->second << "]";
return std::vector<base::SchedulerWorkerPoolParams>();
}
worker_pool_params_vector.emplace_back(
worker_pool_name, constant_worker_pool_params.priority_hint(),
variable_worker_pool_params.standby_thread_policy,
variable_worker_pool_params.max_threads,
variable_worker_pool_params.detach_period);
}
return worker_pool_params_vector;
}
#if !defined(OS_IOS)
void AddVariationParamsToCommandLine(base::StringPiece key_prefix,
base::CommandLine* command_line) {
DCHECK(command_line);
std::map<std::string, std::string> variation_params;
if (!variations::GetVariationParams("BrowserScheduler", &variation_params))
return;
std::vector<std::string> parts;
for (const auto& key_value : variation_params) {
if (base::StartsWith(key_value.first, key_prefix,
base::CompareCase::SENSITIVE)) {
if (ContainsSeparator(key_value.first) ||
ContainsSeparator(key_value.second)) {
DLOG(ERROR)
<< "Unexpected Character in Task Scheduler Variation Params: "
<< key_value.first << " [" << key_value.second << "]";
return;
}
parts.push_back(key_value.first);
parts.push_back(key_value.second);
}
}
if (!parts.empty()) {
command_line->AppendSwitchASCII(kTaskSchedulerVariationParamsSwitch,
base::JoinString(parts, kSeparator));
}
}
std::map<std::string, std::string> GetVariationParamsFromCommandLine(
const base::CommandLine& command_line) {
const auto serialized_variation_params =
command_line.GetSwitchValueASCII(kTaskSchedulerVariationParamsSwitch);
const auto parts =
base::SplitStringPiece(serialized_variation_params, kSeparator,
base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
std::map<std::string, std::string> variation_params;
for (auto it = parts.begin(); it != parts.end(); ++it) {
base::StringPiece key = *it;
++it;
if (it == parts.end()) {
NOTREACHED();
return std::map<std::string, std::string>();
}
base::StringPiece value = *it;
variation_params[key.as_string()] = value.as_string();
}
return variation_params;
}
#endif // !defined(OS_IOS)
} // namespace task_scheduler_util
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.