hexsha
stringlengths
40
40
size
int64
19
11.4M
ext
stringclasses
13 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
270
max_stars_repo_name
stringlengths
5
110
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
9
max_stars_count
float64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
270
max_issues_repo_name
stringlengths
5
116
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
9
max_issues_count
float64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
270
max_forks_repo_name
stringlengths
5
116
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
9
max_forks_count
float64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
19
11.4M
avg_line_length
float64
1.93
229k
max_line_length
int64
12
688k
alphanum_fraction
float64
0.07
0.99
matches
listlengths
1
10
0e7e8d1ed549c6bff75f9b85c9f90b431a171149
7,105
cpp
C++
GRTlib/GRT/CoreModules/Regressifier.cpp
vladnis/grt-android-lib
291401bfcc163677cb21f58fc28cab94fd97553b
[ "MIT" ]
1
2015-07-12T09:32:02.000Z
2015-07-12T09:32:02.000Z
GRTlib/src/CoreModules/Regressifier.cpp
vladnis/GRT-Benchmark
5cbce6919f596fea3677660d89d8d41060bacbb6
[ "Apache-2.0" ]
null
null
null
GRTlib/src/CoreModules/Regressifier.cpp
vladnis/GRT-Benchmark
5cbce6919f596fea3677660d89d8d41060bacbb6
[ "Apache-2.0" ]
null
null
null
/* GRT MIT License Copyright (c) <2012> <Nicholas Gillian, Media Lab, MIT> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "Regressifier.h" namespace GRT{ Regressifier::StringRegressifierMap* Regressifier::stringRegressifierMap = NULL; UINT Regressifier::numRegressifierInstances = 0; Regressifier* Regressifier::createInstanceFromString(string const &regressifierType){ StringRegressifierMap::iterator iter = getMap()->find( regressifierType ); if( iter == getMap()->end() ){ return NULL; } return iter->second(); } Regressifier* Regressifier::createNewInstance() const{ return createInstanceFromString( regressifierType ); } Regressifier* Regressifier::deepCopy() const{ Regressifier *newInstance = createInstanceFromString( regressifierType ); if( newInstance == NULL ) return NULL; if( !newInstance->deepCopyFrom( this ) ){ delete newInstance; return NULL; } return newInstance; } Regressifier::Regressifier(void){ baseType = MLBase::REGRESSIFIER; regressifierType = "NOT_SET"; numOutputDimensions = 0; minNumEpochs = 0; maxNumEpochs = 100; validationSetSize = 20; minChange = 1.0e-5; learningRate = 0.1; useValidationSet = false; randomiseTrainingOrder = true; rootMeanSquaredTrainingError = 0; totalSquaredTrainingError = 0; numRegressifierInstances++; } Regressifier::~Regressifier(void){ if( --numRegressifierInstances == 0 ){ delete stringRegressifierMap; stringRegressifierMap = NULL; } } bool Regressifier::copyBaseVariables(const Regressifier *regressifier){ if( regressifier == NULL ){ errorLog << "copyBaseVariables(Regressifier *regressifier) - regressifier pointer is NULL!" << endl; return false; } if( !this->copyMLBaseVariables( regressifier ) ){ return false; } this->regressifierType = regressifier->regressifierType; this->minNumEpochs = regressifier->minNumEpochs; this->maxNumEpochs = regressifier->maxNumEpochs; this->validationSetSize = regressifier->validationSetSize; this->minChange = regressifier->minChange; this->learningRate = regressifier->learningRate; this->rootMeanSquaredTrainingError = regressifier->rootMeanSquaredTrainingError; this->totalSquaredTrainingError = regressifier->totalSquaredTrainingError; this->useValidationSet = regressifier->useValidationSet; this->randomiseTrainingOrder = regressifier->randomiseTrainingOrder; this->regressionData = regressifier->regressionData; this->inputVectorRanges = regressifier->inputVectorRanges; this->targetVectorRanges = regressifier->targetVectorRanges; return true; } bool Regressifier::reset(){ //Reset the base class MLBase::reset(); rootMeanSquaredTrainingError = 0; totalSquaredTrainingError = 0; return true; } bool Regressifier::clear(){ //Clear the MLBase variables MLBase::clear(); //Clear the regressifier variables rootMeanSquaredTrainingError = 0; totalSquaredTrainingError = 0; regressionData.clear(); inputVectorRanges.clear(); targetVectorRanges.clear(); return true; } string Regressifier::getRegressifierType() const{ return regressifierType; } UINT Regressifier::getMinNumEpochs() const{ return minNumEpochs; } UINT Regressifier::getMaxNumEpochs() const{ return maxNumEpochs; } UINT Regressifier::getValidationSetSize() const{ return validationSetSize; } VectorDouble Regressifier::getRegressionData() const{ if( trained ){ return regressionData; } return VectorDouble(); } vector< MinMax > Regressifier::getInputRanges() const{ return inputVectorRanges; } vector< MinMax > Regressifier::getOutputRanges() const{ return targetVectorRanges; } double Regressifier::getLearningRate() const{ return learningRate; } double Regressifier::getMinChange() const{ return minChange; } bool Regressifier::getUseValidationSet() const{ return useValidationSet; } bool Regressifier::getRandomiseTrainingOrder() const{ return randomiseTrainingOrder; } double Regressifier::getRootMeanSquaredTrainingError() const { return rootMeanSquaredTrainingError; } double Regressifier::getTotalSquaredTrainingError() const { return totalSquaredTrainingError; } bool Regressifier::setMaxNumEpochs(const UINT maxNumEpochs){ if( maxNumEpochs == 0 ) return false; this->maxNumEpochs = maxNumEpochs; return true; } bool Regressifier::setMinNumEpochs(const UINT minNumEpochs){ this->minNumEpochs = minNumEpochs; return true; } bool Regressifier::setMinChange(const double minChange){ if( minChange < 0 ) return false; this->minChange = minChange; return true; } bool Regressifier::setLearningRate(double learningRate){ if( learningRate > 0 ){ this->learningRate = learningRate; return true; } return false; } bool Regressifier::setValidationSetSize(const UINT validationSetSize){ if( validationSetSize > 0 && validationSetSize < 100 ){ this->validationSetSize = validationSetSize; return true; } warningLog << "setValidationSetSize(const UINT validationSetSize) - The validation size must be in the range [1 99]!" << endl; return false; } bool Regressifier::setUseValidationSet(const bool useValidationSet){ this->useValidationSet = useValidationSet; return true; } bool Regressifier::setRandomiseTrainingOrder(const bool randomiseTrainingOrder){ this->randomiseTrainingOrder = randomiseTrainingOrder; return true; } const Regressifier& Regressifier::getBaseRegressifier() const{ return *this; } } //End of namespace GRT
29.978903
131
0.698241
[ "vector" ]
0e8069d8d5f4e7c38514d5e56e4e8e609d67c675
46,574
cpp
C++
lib/IRGen/ESTreeIRGen.cpp
Naturalclar/hermes
9dc56f66a48203e83bc23d43549b50afd49756d0
[ "MIT" ]
1
2020-10-28T01:09:17.000Z
2020-10-28T01:09:17.000Z
lib/IRGen/ESTreeIRGen.cpp
Naturalclar/hermes
9dc56f66a48203e83bc23d43549b50afd49756d0
[ "MIT" ]
null
null
null
lib/IRGen/ESTreeIRGen.cpp
Naturalclar/hermes
9dc56f66a48203e83bc23d43549b50afd49756d0
[ "MIT" ]
null
null
null
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "ESTreeIRGen.h" #include "llvh/ADT/StringSet.h" #include "llvh/Support/Debug.h" #include "llvh/Support/SaveAndRestore.h" namespace hermes { namespace irgen { //===----------------------------------------------------------------------===// // Free standing helpers. Instruction *emitLoad(IRBuilder &builder, Value *from, bool inhibitThrow) { if (auto *var = llvh::dyn_cast<Variable>(from)) { if (Variable::declKindNeedsTDZ(var->getDeclKind()) && var->getRelatedVariable()) { builder.createThrowIfUndefinedInst( builder.createLoadFrameInst(var->getRelatedVariable())); } return builder.createLoadFrameInst(var); } else if (auto *globalProp = llvh::dyn_cast<GlobalObjectProperty>(from)) { if (globalProp->isDeclared() || inhibitThrow) return builder.createLoadPropertyInst( builder.getGlobalObject(), globalProp->getName()); else return builder.createTryLoadGlobalPropertyInst(globalProp); } else { llvm_unreachable("unvalid value to load from"); } } Instruction * emitStore(IRBuilder &builder, Value *storedValue, Value *ptr, bool declInit) { if (auto *var = llvh::dyn_cast<Variable>(ptr)) { if (!declInit && Variable::declKindNeedsTDZ(var->getDeclKind()) && var->getRelatedVariable()) { // Must verify whether the variable is initialized. builder.createThrowIfUndefinedInst( builder.createLoadFrameInst(var->getRelatedVariable())); } auto *store = builder.createStoreFrameInst(storedValue, var); if (declInit && Variable::declKindNeedsTDZ(var->getDeclKind()) && var->getRelatedVariable()) { builder.createStoreFrameInst( builder.getLiteralBool(true), var->getRelatedVariable()); } return store; } else if (auto *globalProp = llvh::dyn_cast<GlobalObjectProperty>(ptr)) { if (globalProp->isDeclared() || !builder.getFunction()->isStrictMode()) return builder.createStorePropertyInst( storedValue, builder.getGlobalObject(), globalProp->getName()); else return builder.createTryStoreGlobalPropertyInst(storedValue, globalProp); } else { llvm_unreachable("unvalid value to load from"); } } /// \returns true if \p node is a constant expression. bool isConstantExpr(ESTree::Node *node) { // TODO: a little more agressive constant folding. switch (node->getKind()) { case ESTree::NodeKind::StringLiteral: case ESTree::NodeKind::NumericLiteral: case ESTree::NodeKind::NullLiteral: case ESTree::NodeKind::BooleanLiteral: return true; default: return false; } } //===----------------------------------------------------------------------===// // LReference IRBuilder &LReference::getBuilder() { return irgen_->Builder; } Value *LReference::emitLoad() { auto &builder = getBuilder(); IRBuilder::ScopedLocationChange slc(builder, loadLoc_); switch (kind_) { case Kind::Empty: assert(false && "empty cannot be loaded"); return builder.getLiteralUndefined(); case Kind::Member: return builder.createLoadPropertyInst(base_, property_); case Kind::VarOrGlobal: return irgen::emitLoad(builder, base_); case Kind::Destructuring: assert(false && "destructuring cannot be loaded"); return builder.getLiteralUndefined(); case Kind::Error: return builder.getLiteralUndefined(); } llvm_unreachable("invalid LReference kind"); } void LReference::emitStore(Value *value) { auto &builder = getBuilder(); switch (kind_) { case Kind::Empty: return; case Kind::Member: builder.createStorePropertyInst(value, base_, property_); return; case Kind::VarOrGlobal: irgen::emitStore(builder, value, base_, declInit_); return; case Kind::Error: return; case Kind::Destructuring: return irgen_->emitDestructuringAssignment( declInit_, destructuringTarget_, value); } llvm_unreachable("invalid LReference kind"); } bool LReference::canStoreWithoutSideEffects() const { return kind_ == Kind::VarOrGlobal && llvh::isa<Variable>(base_); } Variable *LReference::castAsVariable() const { return kind_ == Kind::VarOrGlobal ? dyn_cast_or_null<Variable>(base_) : nullptr; } GlobalObjectProperty *LReference::castAsGlobalObjectProperty() const { return kind_ == Kind::VarOrGlobal ? dyn_cast_or_null<GlobalObjectProperty>(base_) : nullptr; } //===----------------------------------------------------------------------===// // ESTreeIRGen ESTreeIRGen::ESTreeIRGen( ESTree::Node *root, const DeclarationFileListTy &declFileList, Module *M, const ScopeChain &scopeChain) : Mod(M), Builder(Mod), instrumentIR_(M, Builder), Root(root), DeclarationFileList(declFileList), lexicalScopeChain(resolveScopeIdentifiers(scopeChain)), identEval_(Builder.createIdentifier("eval")), identLet_(Builder.createIdentifier("let")), identDefaultExport_(Builder.createIdentifier("?default")) {} void ESTreeIRGen::doIt() { LLVM_DEBUG(dbgs() << "Processing top level program.\n"); ESTree::ProgramNode *Program; Program = llvh::dyn_cast<ESTree::ProgramNode>(Root); if (!Program) { Builder.getModule()->getContext().getSourceErrorManager().error( SMLoc{}, "missing 'Program' AST node"); return; } LLVM_DEBUG(dbgs() << "Found Program decl.\n"); // The function which will "execute" the module. Function *topLevelFunction; // Function context used only when compiling in an existing lexical scope // chain. It is only initialized if we have a lexical scope chain. llvh::Optional<FunctionContext> wrapperFunctionContext{}; if (!lexicalScopeChain) { topLevelFunction = Builder.createTopLevelFunction( ESTree::isStrict(Program->strictness), Program->getSourceRange()); } else { // If compiling in an existing lexical context, we need to install the // scopes in a wrapper function, which represents the "global" code. Function *wrapperFunction = Builder.createFunction( "", Function::DefinitionKind::ES5Function, ESTree::isStrict(Program->strictness), Program->getSourceRange(), true); // Initialize the wrapper context. wrapperFunctionContext.emplace(this, wrapperFunction, nullptr); // Populate it with dummy code so it doesn't crash the back-end. genDummyFunction(wrapperFunction); // Restore the previously saved parent scopes. materializeScopesInChain(wrapperFunction, lexicalScopeChain, -1); // Finally create the function which will actually be executed. topLevelFunction = Builder.createFunction( "eval", Function::DefinitionKind::ES5Function, ESTree::isStrict(Program->strictness), Program->getSourceRange(), false); } Mod->setTopLevelFunction(topLevelFunction); // Function context for topLevelFunction. FunctionContext topLevelFunctionContext{ this, topLevelFunction, Program->getSemInfo()}; // IRGen needs a pointer to the outer-most context, which is either // topLevelContext or wrapperFunctionContext, depending on whether the latter // was created. // We want to set the pointer to that outer-most context, but ensure that it // doesn't outlive the context it is pointing to. llvh::SaveAndRestore<FunctionContext *> saveTopLevelContext( topLevelContext, !wrapperFunctionContext.hasValue() ? &topLevelFunctionContext : &wrapperFunctionContext.getValue()); // Now declare all externally supplied global properties, but only if we don't // have a lexical scope chain. if (!lexicalScopeChain) { for (auto declFile : DeclarationFileList) { processDeclarationFile(declFile); } } emitFunctionPrologue( Program, Builder.createBasicBlock(topLevelFunction), InitES5CaptureState::Yes, DoEmitParameters::Yes); Value *retVal; { // Allocate the return register, initialize it to undefined. curFunction()->globalReturnRegister = Builder.createAllocStackInst(genAnonymousLabelName("ret")); Builder.createStoreStackInst( Builder.getLiteralUndefined(), curFunction()->globalReturnRegister); genBody(Program->_body); // Terminate the top-level scope with a return statement. retVal = Builder.createLoadStackInst(curFunction()->globalReturnRegister); } emitFunctionEpilogue(retVal); } void ESTreeIRGen::doCJSModule( Function *topLevelFunction, sem::FunctionInfo *semInfo, uint32_t id, llvh::StringRef filename) { assert(Root && "no root in ESTreeIRGen"); auto *func = cast<ESTree::FunctionExpressionNode>(Root); assert(func && "doCJSModule without a module"); FunctionContext topLevelFunctionContext{this, topLevelFunction, semInfo}; llvh::SaveAndRestore<FunctionContext *> saveTopLevelContext( topLevelContext, &topLevelFunctionContext); // Now declare all externally supplied global properties, but only if we don't // have a lexical scope chain. assert( !lexicalScopeChain && "Lexical scope chain not supported for CJS modules"); for (auto declFile : DeclarationFileList) { processDeclarationFile(declFile); } Identifier functionName = Builder.createIdentifier("cjs_module"); Function *newFunc = genES5Function(functionName, nullptr, func); Builder.getModule()->addCJSModule( id, Builder.createIdentifier(filename), newFunc); } static int getDepth(const std::shared_ptr<SerializedScope> chain) { int depth = 0; const SerializedScope *current = chain.get(); while (current) { depth += 1; current = current->parentScope.get(); } return depth; } std::pair<Function *, Function *> ESTreeIRGen::doLazyFunction( hbc::LazyCompilationData *lazyData) { // Create a top level function that will never be executed, because: // 1. IRGen assumes the first function always has global scope // 2. It serves as the root for dummy functions for lexical data Function *topLevel = Builder.createTopLevelFunction(lazyData->strictMode, {}); FunctionContext topLevelFunctionContext{this, topLevel, nullptr}; // Save the top-level context, but ensure it doesn't outlive what it is // pointing to. llvh::SaveAndRestore<FunctionContext *> saveTopLevelContext( topLevelContext, &topLevelFunctionContext); auto *node = cast<ESTree::FunctionLikeNode>(Root); // We restore scoping information in two separate ways: // 1. By adding them to ExternalScopes for resolution here // 2. By adding dummy functions for lexical scoping debug info later // // Instruction selection determines the delta between the ExternalScope // and the dummy function chain, so we add the ExternalScopes with // positive depth. lexicalScopeChain = lazyData->parentScope; materializeScopesInChain( topLevel, lexicalScopeChain, getDepth(lexicalScopeChain) - 1); // If lazyData->closureAlias is specified, we must create an alias binding // between originalName (which must be valid) and the variable identified by // closureAlias. Variable *parentVar = nullptr; if (lazyData->closureAlias.isValid()) { assert(lazyData->originalName.isValid() && "Original name invalid"); assert( lazyData->originalName != lazyData->closureAlias && "Original name must be different from the alias"); // NOTE: the closureAlias target must exist and must be a Variable. parentVar = cast<Variable>(nameTable_.lookup(lazyData->closureAlias)); // Re-create the alias. nameTable_.insert(lazyData->originalName, parentVar); } assert( !llvh::isa<ESTree::ArrowFunctionExpressionNode>(node) && "lazy compilation not supported for arrow functions"); auto *func = genES5Function( lazyData->originalName, parentVar, node, lazyData->isGeneratorInnerFunction); addLexicalDebugInfo(func, topLevel, lexicalScopeChain); return {func, topLevel}; } std::pair<Value *, bool> ESTreeIRGen::declareVariableOrGlobalProperty( Function *inFunc, VarDecl::Kind declKind, Identifier name) { Value *found = nameTable_.lookup(name); // If the variable is already declared in this scope, do not create a // second instance. if (found) { if (auto *var = llvh::dyn_cast<Variable>(found)) { if (var->getParent()->getFunction() == inFunc) return {found, false}; } else { assert( llvh::isa<GlobalObjectProperty>(found) && "Invalid value found in name table"); if (inFunc->isGlobalScope()) return {found, false}; } } // Create a property if global scope, variable otherwise. Value *res; if (inFunc->isGlobalScope() && declKind == VarDecl::Kind::Var) { res = Builder.createGlobalObjectProperty(name, true); } else { Variable::DeclKind vdc; if (declKind == VarDecl::Kind::Let) vdc = Variable::DeclKind::Let; else if (declKind == VarDecl::Kind::Const) vdc = Variable::DeclKind::Const; else { assert(declKind == VarDecl::Kind::Var); vdc = Variable::DeclKind::Var; } auto *var = Builder.createVariable(inFunc->getFunctionScope(), vdc, name); // For "let" and "const" create the related TDZ flag. if (Variable::declKindNeedsTDZ(vdc) && Mod->getContext().getCodeGenerationSettings().enableTDZ) { llvh::SmallString<32> strBuf{"tdz$"}; strBuf.append(name.str()); auto *related = Builder.createVariable( var->getParent(), Variable::DeclKind::Var, genAnonymousLabelName(strBuf)); var->setRelatedVariable(related); related->setRelatedVariable(var); } res = var; } // Register the variable in the scoped hash table. nameTable_.insert(name, res); return {res, true}; } GlobalObjectProperty *ESTreeIRGen::declareAmbientGlobalProperty( Identifier name) { // Avoid redefining global properties. auto *prop = dyn_cast_or_null<GlobalObjectProperty>(nameTable_.lookup(name)); if (prop) return prop; LLVM_DEBUG( llvh::dbgs() << "declaring ambient global property " << name << " " << name.getUnderlyingPointer() << "\n"); prop = Builder.createGlobalObjectProperty(name, false); nameTable_.insertIntoScope(&topLevelContext->scope, name, prop); return prop; } namespace { /// This visitor structs collects declarations within a single closure without /// descending into child closures. struct DeclHoisting { /// The list of collected identifiers (variables and functions). llvh::SmallVector<ESTree::VariableDeclaratorNode *, 8> decls{}; /// A list of functions that need to be hoisted and materialized before we /// can generate the rest of the function. llvh::SmallVector<ESTree::FunctionDeclarationNode *, 8> closures; explicit DeclHoisting() = default; ~DeclHoisting() = default; /// Extract the variable name from the nodes that can define new variables. /// The nodes that can define a new variable in the scope are: /// VariableDeclarator and FunctionDeclaration> void collectDecls(ESTree::Node *V) { if (auto VD = llvh::dyn_cast<ESTree::VariableDeclaratorNode>(V)) { return decls.push_back(VD); } if (auto FD = llvh::dyn_cast<ESTree::FunctionDeclarationNode>(V)) { return closures.push_back(FD); } } bool shouldVisit(ESTree::Node *V) { // Collect declared names, even if we don't descend into children nodes. collectDecls(V); // Do not descend to child closures because the variables they define are // not exposed to the outside function. if (llvh::isa<ESTree::FunctionDeclarationNode>(V) || llvh::isa<ESTree::FunctionExpressionNode>(V) || llvh::isa<ESTree::ArrowFunctionExpressionNode>(V)) return false; return true; } void enter(ESTree::Node *V) {} void leave(ESTree::Node *V) {} }; } // anonymous namespace. void ESTreeIRGen::processDeclarationFile(ESTree::ProgramNode *programNode) { auto Program = dyn_cast_or_null<ESTree::ProgramNode>(programNode); if (!Program) return; DeclHoisting DH; Program->visit(DH); // Create variable declarations for each of the hoisted variables. for (auto vd : DH.decls) declareAmbientGlobalProperty(getNameFieldFromID(vd->_id)); for (auto fd : DH.closures) declareAmbientGlobalProperty(getNameFieldFromID(fd->_id)); } Value *ESTreeIRGen::ensureVariableExists(ESTree::IdentifierNode *id) { assert(id && "id must be a valid Identifier node"); Identifier name = getNameFieldFromID(id); // Check if this is a known variable. if (auto *var = nameTable_.lookup(name)) return var; if (curFunction()->function->isStrictMode()) { // Report a warning in strict mode. auto currentFunc = Builder.getInsertionBlock()->getParent(); Builder.getModule()->getContext().getSourceErrorManager().warning( Warning::UndefinedVariable, id->getSourceRange(), Twine("the variable \"") + name.str() + "\" was not declared in " + currentFunc->getDescriptiveDefinitionKindStr() + " \"" + currentFunc->getInternalNameStr() + "\""); } // Undeclared variable is an ambient global property. return declareAmbientGlobalProperty(name); } Value *ESTreeIRGen::genMemberExpressionProperty( ESTree::MemberExpressionLikeNode *Mem) { // If computed is true, the node corresponds to a computed (a[b]) member // lookup and '_property' is an Expression. Otherwise, the node // corresponds to a static (a.b) member lookup and '_property' is an // Identifier. // Details of the computed field are available here: // https://github.com/estree/estree/blob/master/spec.md#memberexpression if (getComputed(Mem)) { return genExpression(getProperty(Mem)); } // Arrays and objects may be accessed with integer indices. if (auto N = llvh::dyn_cast<ESTree::NumericLiteralNode>(getProperty(Mem))) { return Builder.getLiteralNumber(N->_value); } // ESTree encodes property access as MemberExpression -> Identifier. auto Id = cast<ESTree::IdentifierNode>(getProperty(Mem)); Identifier fieldName = getNameFieldFromID(Id); LLVM_DEBUG( dbgs() << "Emitting direct label access to field '" << fieldName << "'\n"); return Builder.getLiteralString(fieldName); } bool ESTreeIRGen::canCreateLRefWithoutSideEffects( hermes::ESTree::Node *target) { // Check for an identifier bound to an existing local variable. if (auto *iden = llvh::dyn_cast<ESTree::IdentifierNode>(target)) { return dyn_cast_or_null<Variable>( nameTable_.lookup(getNameFieldFromID(iden))); } return false; } LReference ESTreeIRGen::createLRef(ESTree::Node *node, bool declInit) { SMLoc sourceLoc = node->getDebugLoc(); IRBuilder::ScopedLocationChange slc(Builder, sourceLoc); if (llvh::isa<ESTree::EmptyNode>(node)) { LLVM_DEBUG(dbgs() << "Creating an LRef for EmptyNode.\n"); return LReference( LReference::Kind::Empty, this, false, nullptr, nullptr, sourceLoc); } /// Create lref for member expression (ex: o.f). if (auto *ME = llvh::dyn_cast<ESTree::MemberExpressionNode>(node)) { LLVM_DEBUG(dbgs() << "Creating an LRef for member expression.\n"); Value *obj = genExpression(ME->_object); Value *prop = genMemberExpressionProperty(ME); return LReference( LReference::Kind::Member, this, false, obj, prop, sourceLoc); } /// Create lref for identifiers (ex: a). if (auto *iden = llvh::dyn_cast<ESTree::IdentifierNode>(node)) { LLVM_DEBUG(dbgs() << "Creating an LRef for identifier.\n"); LLVM_DEBUG( dbgs() << "Looking for identifier \"" << getNameFieldFromID(iden) << "\"\n"); auto *var = ensureVariableExists(iden); return LReference( LReference::Kind::VarOrGlobal, this, declInit, var, nullptr, sourceLoc); } /// Create lref for variable decls (ex: var a). if (auto *V = llvh::dyn_cast<ESTree::VariableDeclarationNode>(node)) { LLVM_DEBUG(dbgs() << "Creating an LRef for variable declaration.\n"); assert(V->_declarations.size() == 1 && "Malformed variable declaration"); auto *decl = cast<ESTree::VariableDeclaratorNode>(&V->_declarations.front()); return createLRef(decl->_id, true); } // Destructuring assignment. if (auto *pat = llvh::dyn_cast<ESTree::PatternNode>(node)) { return LReference(this, declInit, pat); } Builder.getModule()->getContext().getSourceErrorManager().error( node->getSourceRange(), "unsupported assignment target"); return LReference( LReference::Kind::Error, this, false, nullptr, nullptr, sourceLoc); } Value *ESTreeIRGen::genHermesInternalCall( StringRef name, Value *thisValue, ArrayRef<Value *> args) { return Builder.createCallInst( Builder.createLoadPropertyInst( Builder.createTryLoadGlobalPropertyInst("HermesInternal"), name), thisValue, args); } Value *ESTreeIRGen::genBuiltinCall( hermes::BuiltinMethod::Enum builtinIndex, ArrayRef<Value *> args) { return Builder.createCallBuiltinInst(builtinIndex, args); } void ESTreeIRGen::emitEnsureObject(Value *value, StringRef message) { // TODO: use "thisArg" when builtins get fixed to support it. genBuiltinCall( BuiltinMethod::HermesBuiltin_ensureObject, {value, Builder.getLiteralString(message)}); } Value *ESTreeIRGen::emitIteratorSymbol() { // FIXME: use the builtin value of @@iterator. Symbol could have been // overridden. return Builder.createLoadPropertyInst( Builder.createTryLoadGlobalPropertyInst("Symbol"), "iterator"); } ESTreeIRGen::IteratorRecordSlow ESTreeIRGen::emitGetIteratorSlow(Value *obj) { auto *method = Builder.createLoadPropertyInst(obj, emitIteratorSymbol()); auto *iterator = Builder.createCallInst(method, obj, {}); emitEnsureObject(iterator, "iterator is not an object"); auto *nextMethod = Builder.createLoadPropertyInst(iterator, "next"); return {iterator, nextMethod}; } Value *ESTreeIRGen::emitIteratorNextSlow(IteratorRecordSlow iteratorRecord) { auto *nextResult = Builder.createCallInst( iteratorRecord.nextMethod, iteratorRecord.iterator, {}); emitEnsureObject(nextResult, "iterator.next() did not return an object"); return nextResult; } Value *ESTreeIRGen::emitIteratorCompleteSlow(Value *iterResult) { return Builder.createLoadPropertyInst(iterResult, "done"); } Value *ESTreeIRGen::emitIteratorValueSlow(Value *iterResult) { return Builder.createLoadPropertyInst(iterResult, "value"); } void ESTreeIRGen::emitIteratorCloseSlow( hermes::irgen::ESTreeIRGen::IteratorRecordSlow iteratorRecord, bool ignoreInnerException) { auto *haveReturn = Builder.createBasicBlock(Builder.getFunction()); auto *noReturn = Builder.createBasicBlock(Builder.getFunction()); auto *returnMethod = genBuiltinCall( BuiltinMethod::HermesBuiltin_getMethod, {iteratorRecord.iterator, Builder.getLiteralString("return")}); Builder.createCompareBranchInst( returnMethod, Builder.getLiteralUndefined(), BinaryOperatorInst::OpKind::StrictlyEqualKind, noReturn, haveReturn); Builder.setInsertionBlock(haveReturn); if (ignoreInnerException) { emitTryCatchScaffolding( noReturn, // emitBody. [this, returnMethod, &iteratorRecord]() { Builder.createCallInst(returnMethod, iteratorRecord.iterator, {}); }, // emitNormalCleanup. []() {}, // emitHandler. [this](BasicBlock *nextBlock) { // We need to catch the exception, even if we don't used it. Builder.createCatchInst(); Builder.createBranchInst(nextBlock); }); } else { auto *innerResult = Builder.createCallInst(returnMethod, iteratorRecord.iterator, {}); emitEnsureObject(innerResult, "iterator.return() did not return an object"); Builder.createBranchInst(noReturn); } Builder.setInsertionBlock(noReturn); } ESTreeIRGen::IteratorRecord ESTreeIRGen::emitGetIterator(Value *obj) { // Each of these will be modified by "next", so we use a stack storage. auto *iterStorage = Builder.createAllocStackInst(genAnonymousLabelName("iter")); auto *sourceOrNext = Builder.createAllocStackInst(genAnonymousLabelName("sourceOrNext")); Builder.createStoreStackInst(obj, sourceOrNext); auto *iter = Builder.createIteratorBeginInst(sourceOrNext); Builder.createStoreStackInst(iter, iterStorage); return IteratorRecord{iterStorage, sourceOrNext}; } void ESTreeIRGen::emitDestructuringAssignment( bool declInit, ESTree::PatternNode *target, Value *source) { if (auto *APN = llvh::dyn_cast<ESTree::ArrayPatternNode>(target)) return emitDestructuringArray(declInit, APN, source); else if (auto *OPN = llvh::dyn_cast<ESTree::ObjectPatternNode>(target)) return emitDestructuringObject(declInit, OPN, source); else { Mod->getContext().getSourceErrorManager().error( target->getSourceRange(), "unsupported destructuring target"); } } void ESTreeIRGen::emitDestructuringArray( bool declInit, ESTree::ArrayPatternNode *targetPat, Value *source) { const IteratorRecord iteratorRecord = emitGetIterator(source); /// iteratorDone = undefined. auto *iteratorDone = Builder.createAllocStackInst(genAnonymousLabelName("iterDone")); Builder.createStoreStackInst(Builder.getLiteralUndefined(), iteratorDone); auto *value = Builder.createAllocStackInst(genAnonymousLabelName("iterValue")); SharedExceptionHandler handler{}; handler.exc = Builder.createAllocStackInst(genAnonymousLabelName("exc")); // All exception handlers branch to this block. handler.exceptionBlock = Builder.createBasicBlock(Builder.getFunction()); bool first = true; bool emittedRest = false; // The LReference created in the previous iteration of the destructuring // loop. We need it because we want to put the previous store and the creation // of the next LReference under one try block. llvh::Optional<LReference> lref; /// If the previous LReference is valid and non-empty, store "value" into /// it and reset the LReference. auto storePreviousValue = [&lref, &handler, this, value]() { if (lref && !lref->isEmpty()) { if (lref->canStoreWithoutSideEffects()) { lref->emitStore(Builder.createLoadStackInst(value)); } else { // If we can't store without side effects, wrap the store in try/catch. emitTryWithSharedHandler(&handler, [this, &lref, value]() { lref->emitStore(Builder.createLoadStackInst(value)); }); } lref.reset(); } }; for (auto &elem : targetPat->_elements) { ESTree::Node *target = &elem; ESTree::Node *init = nullptr; if (auto *rest = llvh::dyn_cast<ESTree::RestElementNode>(target)) { storePreviousValue(); emitRestElement(declInit, rest, iteratorRecord, iteratorDone, &handler); emittedRest = true; break; } // If we have an initializer, unwrap it. if (auto *assign = llvh::dyn_cast<ESTree::AssignmentPatternNode>(target)) { target = assign->_left; init = assign->_right; } // Can we create the new LReference without side effects and avoid a // try/catch. The complexity comes from having to check whether the last // LReference also can avoid a try/catch or not. if (canCreateLRefWithoutSideEffects(target)) { // We don't need a try/catch, but last lref might. Just let the routine // do the right thing. storePreviousValue(); lref = createLRef(target, declInit); } else { // We need a try/catch, but last lref might not. If it doesn't, emit it // directly and clear it, so we won't do anything inside our try/catch. if (lref && lref->canStoreWithoutSideEffects()) { lref->emitStore(Builder.createLoadStackInst(value)); lref.reset(); } emitTryWithSharedHandler( &handler, [this, &lref, value, target, declInit]() { // Store the previous value, if we have one. if (lref && !lref->isEmpty()) lref->emitStore(Builder.createLoadStackInst(value)); lref = createLRef(target, declInit); }); } // Pseudocode of the algorithm for a step: // // value = undefined; // if (iteratorDone) goto nextBlock // notDoneBlock: // stepResult = IteratorNext(iteratorRecord) // stepDone = IteratorComplete(stepResult) // iteratorDone = stepDone // if (stepDone) goto nextBlock // newValueBlock: // value = IteratorValue(stepResult) // nextBlock: // if (value !== undefined) goto storeBlock [if initializer present] // value = initializer [if initializer present] // storeBlock: // lref.emitStore(value) auto *notDoneBlock = Builder.createBasicBlock(Builder.getFunction()); auto *newValueBlock = Builder.createBasicBlock(Builder.getFunction()); auto *nextBlock = Builder.createBasicBlock(Builder.getFunction()); auto *getDefaultBlock = init ? Builder.createBasicBlock(Builder.getFunction()) : nullptr; auto *storeBlock = init ? Builder.createBasicBlock(Builder.getFunction()) : nullptr; Builder.createStoreStackInst(Builder.getLiteralUndefined(), value); // In the first iteration we know that "done" is false. if (first) { first = false; Builder.createBranchInst(notDoneBlock); } else { Builder.createCondBranchInst( Builder.createLoadStackInst(iteratorDone), nextBlock, notDoneBlock); } // notDoneBlock: Builder.setInsertionBlock(notDoneBlock); auto *stepValue = emitIteratorNext(iteratorRecord); auto *stepDone = emitIteratorComplete(iteratorRecord); Builder.createStoreStackInst(stepDone, iteratorDone); Builder.createCondBranchInst( stepDone, init ? getDefaultBlock : nextBlock, newValueBlock); // newValueBlock: Builder.setInsertionBlock(newValueBlock); Builder.createStoreStackInst(stepValue, value); Builder.createBranchInst(nextBlock); // nextBlock: Builder.setInsertionBlock(nextBlock); // NOTE: we can't use emitOptionalInitializationHere() because we want to // be able to jump directly to getDefaultBlock. if (init) { // if (value !== undefined) goto storeBlock [if initializer present] // value = initializer [if initializer present] // storeBlock: Builder.createCondBranchInst( Builder.createBinaryOperatorInst( Builder.createLoadStackInst(value), Builder.getLiteralUndefined(), BinaryOperatorInst::OpKind::StrictlyNotEqualKind), storeBlock, getDefaultBlock); Identifier nameHint = llvh::isa<ESTree::IdentifierNode>(target) ? getNameFieldFromID(target) : Identifier{}; // getDefaultBlock: Builder.setInsertionBlock(getDefaultBlock); Builder.createStoreStackInst(genExpression(init, nameHint), value); Builder.createBranchInst(storeBlock); // storeBlock: Builder.setInsertionBlock(storeBlock); } } storePreviousValue(); // If in the end the iterator is not done, close it. We only need to do // that if we didn't end with a rest element because it would have exhausted // the iterator. if (!emittedRest) { auto *notDoneBlock = Builder.createBasicBlock(Builder.getFunction()); auto *doneBlock = Builder.createBasicBlock(Builder.getFunction()); Builder.createCondBranchInst( Builder.createLoadStackInst(iteratorDone), doneBlock, notDoneBlock); Builder.setInsertionBlock(notDoneBlock); emitIteratorClose(iteratorRecord, false); Builder.createBranchInst(doneBlock); Builder.setInsertionBlock(doneBlock); } // If we emitted at least one try block, generate the exception handler. if (handler.emittedTry) { IRBuilder::SaveRestore saveRestore{Builder}; Builder.setInsertionBlock(handler.exceptionBlock); auto *notDoneBlock = Builder.createBasicBlock(Builder.getFunction()); auto *doneBlock = Builder.createBasicBlock(Builder.getFunction()); Builder.createCondBranchInst( Builder.createLoadStackInst(iteratorDone), doneBlock, notDoneBlock); Builder.setInsertionBlock(notDoneBlock); emitIteratorClose(iteratorRecord, true); Builder.createBranchInst(doneBlock); Builder.setInsertionBlock(doneBlock); Builder.createThrowInst(Builder.createLoadStackInst(handler.exc)); } else { // If we didn't use the exception block, we need to delete it, otherwise // it fails IR validation even though it will be never executed. handler.exceptionBlock->eraseFromParent(); // Delete the not needed exception stack allocation. It would be optimized // out later, but it is nice to produce cleaner non-optimized IR, if it is // easy to do so. assert( !handler.exc->hasUsers() && "should not have any users if no try/catch was emitted"); handler.exc->eraseFromParent(); } } void ESTreeIRGen::emitRestElement( bool declInit, ESTree::RestElementNode *rest, hermes::irgen::ESTreeIRGen::IteratorRecord iteratorRecord, hermes::AllocStackInst *iteratorDone, SharedExceptionHandler *handler) { // 13.3.3.8 BindingRestElement:...BindingIdentifier auto *notDoneBlock = Builder.createBasicBlock(Builder.getFunction()); auto *newValueBlock = Builder.createBasicBlock(Builder.getFunction()); auto *doneBlock = Builder.createBasicBlock(Builder.getFunction()); llvh::Optional<LReference> lref; if (canCreateLRefWithoutSideEffects(rest->_argument)) { lref = createLRef(rest->_argument, declInit); } else { emitTryWithSharedHandler(handler, [this, &lref, rest, declInit]() { lref = createLRef(rest->_argument, declInit); }); } auto *A = Builder.createAllocArrayInst({}, 0); auto *n = Builder.createAllocStackInst(genAnonymousLabelName("n")); // n = 0. Builder.createStoreStackInst(Builder.getLiteralPositiveZero(), n); Builder.createCondBranchInst( Builder.createLoadStackInst(iteratorDone), doneBlock, notDoneBlock); // notDoneBlock: Builder.setInsertionBlock(notDoneBlock); auto *stepValue = emitIteratorNext(iteratorRecord); auto *stepDone = emitIteratorComplete(iteratorRecord); Builder.createStoreStackInst(stepDone, iteratorDone); Builder.createCondBranchInst(stepDone, doneBlock, newValueBlock); // newValueBlock: Builder.setInsertionBlock(newValueBlock); auto *nVal = Builder.createLoadStackInst(n); nVal->setType(Type::createNumber()); // A[n] = stepValue; // Unfortunately this can throw because our arrays can have limited range. // The spec doesn't specify what to do in this case, but the reasonable thing // to do is to what we would if this was a for-of loop doing the same thing. // See section BindingRestElement:...BindingIdentifier, step f and g: // https://www.ecma-international.org/ecma-262/9.0/index.html#sec-destructuring-binding-patterns-runtime-semantics-iteratorbindinginitialization emitTryWithSharedHandler(handler, [this, stepValue, A, nVal]() { Builder.createStorePropertyInst(stepValue, A, nVal); }); // ++n; auto add = Builder.createBinaryOperatorInst( nVal, Builder.getLiteralNumber(1), BinaryOperatorInst::OpKind::AddKind); add->setType(Type::createNumber()); Builder.createStoreStackInst(add, n); Builder.createBranchInst(notDoneBlock); // doneBlock: Builder.setInsertionBlock(doneBlock); if (lref->canStoreWithoutSideEffects()) { lref->emitStore(A); } else { emitTryWithSharedHandler(handler, [&lref, A]() { lref->emitStore(A); }); } } void ESTreeIRGen::emitDestructuringObject( bool declInit, ESTree::ObjectPatternNode *target, Value *source) { // Keep track of which keys have been destructured. llvh::SmallVector<Value *, 4> excludedItems{}; if (target->_properties.empty() || llvh::isa<ESTree::RestElementNode>(target->_properties.front())) { // ES10.0 13.3.3.5 // 1. Perform ? RequireObjectCoercible(value). // The extremely unlikely case that the user is attempting to destructure // into {} or {...rest}. Any other object destructuring will fail upon // attempting to retrieve a real property from `source`. // We must check that the source can be destructured, // and the only time this will throw is if source is undefined or null. auto *throwBB = Builder.createBasicBlock(Builder.getFunction()); auto *doneBB = Builder.createBasicBlock(Builder.getFunction()); // Use == instead of === to account for both undefined and null. Builder.createCondBranchInst( Builder.createBinaryOperatorInst( source, Builder.getLiteralNull(), BinaryOperatorInst::OpKind::EqualKind), throwBB, doneBB); Builder.setInsertionBlock(throwBB); genBuiltinCall( BuiltinMethod::HermesBuiltin_throwTypeError, {source, Builder.getLiteralString( "Cannot destructure 'undefined' or 'null'.")}); // throwTypeError will always throw. // This return is here to ensure well-formed IR, and will not run. Builder.createReturnInst(Builder.getLiteralUndefined()); Builder.setInsertionBlock(doneBB); } for (auto &elem : target->_properties) { if (auto *rest = llvh::dyn_cast<ESTree::RestElementNode>(&elem)) { emitRestProperty(declInit, rest, excludedItems, source); break; } auto *propNode = cast<ESTree::PropertyNode>(&elem); ESTree::Node *valueNode = propNode->_value; ESTree::Node *init = nullptr; // If we have an initializer, unwrap it. if (auto *assign = llvh::dyn_cast<ESTree::AssignmentPatternNode>(valueNode)) { valueNode = assign->_left; init = assign->_right; } Identifier nameHint = llvh::isa<ESTree::IdentifierNode>(valueNode) ? getNameFieldFromID(valueNode) : Identifier{}; if (llvh::isa<ESTree::IdentifierNode>(propNode->_key) && !propNode->_computed) { Identifier key = getNameFieldFromID(propNode->_key); excludedItems.push_back(Builder.getLiteralString(key)); auto *loadedValue = Builder.createLoadPropertyInst(source, key); createLRef(valueNode, declInit) .emitStore(emitOptionalInitialization(loadedValue, init, nameHint)); } else { Value *key = genExpression(propNode->_key); excludedItems.push_back(key); auto *loadedValue = Builder.createLoadPropertyInst(source, key); createLRef(valueNode, declInit) .emitStore(emitOptionalInitialization(loadedValue, init, nameHint)); } } } void ESTreeIRGen::emitRestProperty( bool declInit, ESTree::RestElementNode *rest, const llvh::SmallVectorImpl<Value *> &excludedItems, hermes::Value *source) { auto lref = createLRef(rest->_argument, declInit); // Construct the excluded items. HBCAllocObjectFromBufferInst::ObjectPropertyMap exMap{}; llvh::SmallVector<Value *, 4> computedExcludedItems{}; // Keys need de-duping so we don't create a dummy exclusion object with // duplicate keys. llvh::DenseSet<Literal *> keyDeDupeSet; auto *zeroValue = Builder.getLiteralPositiveZero(); for (Value *key : excludedItems) { if (auto *lit = llvh::dyn_cast<Literal>(key)) { // If the key is a literal, we can place it in the // HBCAllocObjectFromBufferInst buffer. if (keyDeDupeSet.insert(lit).second) { exMap.emplace_back(std::make_pair(lit, zeroValue)); } } else { // If the key is not a literal, then we have to dynamically populate the // excluded object with it after creation from the buffer. computedExcludedItems.push_back(key); } } Value *excludedObj; if (excludedItems.empty()) { excludedObj = Builder.getLiteralUndefined(); } else { // This size is only a hint as the true size may change if there are // duplicates when computedExcludedItems is processed at run-time. auto excludedSizeHint = exMap.size() + computedExcludedItems.size(); if (exMap.empty()) { excludedObj = Builder.createAllocObjectInst(excludedSizeHint); } else { excludedObj = Builder.createHBCAllocObjectFromBufferInst(exMap, excludedSizeHint); } for (Value *key : computedExcludedItems) { Builder.createStorePropertyInst(zeroValue, excludedObj, key); } } auto *restValue = genBuiltinCall( BuiltinMethod::HermesBuiltin_copyDataProperties, {Builder.createAllocObjectInst(0), source, excludedObj}); lref.emitStore(restValue); } Value *ESTreeIRGen::emitOptionalInitialization( Value *value, ESTree::Node *init, Identifier nameHint) { if (!init) return value; auto *currentBlock = Builder.getInsertionBlock(); auto *getDefaultBlock = Builder.createBasicBlock(Builder.getFunction()); auto *storeBlock = Builder.createBasicBlock(Builder.getFunction()); // if (value !== undefined) goto storeBlock [if initializer present] // value = initializer [if initializer present] // storeBlock: Builder.createCondBranchInst( Builder.createBinaryOperatorInst( value, Builder.getLiteralUndefined(), BinaryOperatorInst::OpKind::StrictlyNotEqualKind), storeBlock, getDefaultBlock); // getDefaultBlock: Builder.setInsertionBlock(getDefaultBlock); auto *defaultValue = genExpression(init, nameHint); auto *defaultResultBlock = Builder.getInsertionBlock(); Builder.createBranchInst(storeBlock); // storeBlock: Builder.setInsertionBlock(storeBlock); return Builder.createPhiInst( {value, defaultValue}, {currentBlock, defaultResultBlock}); } std::shared_ptr<SerializedScope> ESTreeIRGen::resolveScopeIdentifiers( const ScopeChain &chain) { std::shared_ptr<SerializedScope> current{}; for (auto it = chain.functions.rbegin(), end = chain.functions.rend(); it < end; it++) { auto next = std::make_shared<SerializedScope>(); next->variables.reserve(it->variables.size()); for (auto var : it->variables) { next->variables.push_back(std::move(Builder.createIdentifier(var))); } next->parentScope = current; current = next; } return current; } void ESTreeIRGen::materializeScopesInChain( Function *wrapperFunction, const std::shared_ptr<const SerializedScope> &scope, int depth) { if (!scope) return; assert(depth < 1000 && "Excessive scope depth"); // First materialize parent scopes. materializeScopesInChain(wrapperFunction, scope->parentScope, depth - 1); // If scope->closureAlias is specified, we must create an alias binding // between originalName (which must be valid) and the variable identified by // closureAlias. // // We do this *before* inserting the other variables below to reflect that // the closure alias is conceptually in an outside scope and also avoid the // closure name incorrectly shadowing the same name inside the closure. if (scope->closureAlias.isValid()) { assert(scope->originalName.isValid() && "Original name invalid"); assert( scope->originalName != scope->closureAlias && "Original name must be different from the alias"); // NOTE: the closureAlias target must exist and must be a Variable. auto *closureVar = cast<Variable>(nameTable_.lookup(scope->closureAlias)); // Re-create the alias. nameTable_.insert(scope->originalName, closureVar); } // Create an external scope. ExternalScope *ES = Builder.createExternalScope(wrapperFunction, depth); for (auto variableId : scope->variables) { auto *variable = Builder.createVariable(ES, Variable::DeclKind::Var, variableId); nameTable_.insert(variableId, variable); } } namespace { void buildDummyLexicalParent( IRBuilder &builder, Function *parent, Function *child) { // FunctionScopeAnalysis works through CreateFunctionInsts, so we have to add // that even though these functions are never invoked. auto *block = builder.createBasicBlock(parent); builder.setInsertionBlock(block); builder.createUnreachableInst(); auto *inst = builder.createCreateFunctionInst(child); builder.createReturnInst(inst); } } // namespace /// Add dummy functions for lexical scope debug info. // They are never executed and serve no purpose other than filling in debug // info. This is currently necessary because we can't rely on parent bytecode // modules for lexical scoping data. void ESTreeIRGen::addLexicalDebugInfo( Function *child, Function *global, const std::shared_ptr<const SerializedScope> &scope) { if (!scope || !scope->parentScope) { buildDummyLexicalParent(Builder, global, child); return; } auto *current = Builder.createFunction( scope->originalName, Function::DefinitionKind::ES5Function, false, {}, false); for (auto &var : scope->variables) { Builder.createVariable( current->getFunctionScope(), Variable::DeclKind::Var, var); } buildDummyLexicalParent(Builder, current, child); addLexicalDebugInfo(current, global, scope->parentScope); } std::shared_ptr<SerializedScope> ESTreeIRGen::serializeScope( FunctionContext *ctx, bool includeGlobal) { // Serialize the global scope if and only if it's the only scope. // We serialize the global scope to avoid re-declaring variables, // and only do it once to avoid creating spurious scopes. if (!ctx || (ctx->function->isGlobalScope() && !includeGlobal)) return lexicalScopeChain; auto scope = std::make_shared<SerializedScope>(); auto *func = ctx->function; assert(func && "Missing function when saving scope"); scope->originalName = func->getOriginalOrInferredName(); if (auto *closure = func->getLazyClosureAlias()) { scope->closureAlias = closure->getName(); } for (auto *var : func->getFunctionScope()->getVariables()) { scope->variables.push_back(var->getName()); } scope->parentScope = serializeScope(ctx->getPreviousContext(), false); return scope; } } // namespace irgen } // namespace hermes
35.498476
146
0.696419
[ "object" ]
0e8b81e51801f66bf0310f40a99ee53274c1da26
1,282
cpp
C++
MyComponentEngine/GameObject.cpp
0ddarri/ComponentEngine
0641589c695f54c5c6bc940a1769c5cbdc772aea
[ "MIT" ]
null
null
null
MyComponentEngine/GameObject.cpp
0ddarri/ComponentEngine
0641589c695f54c5c6bc940a1769c5cbdc772aea
[ "MIT" ]
1
2022-03-30T08:27:58.000Z
2022-03-30T08:27:58.000Z
MyComponentEngine/GameObject.cpp
0ddarri/ComponentEngine
0641589c695f54c5c6bc940a1769c5cbdc772aea
[ "MIT" ]
null
null
null
#include "DXUT.h" #include "GameObject.h" #include "Component.h" #include "Transform.h" #include "MeshRenderer.h" GameObject::GameObject() { transform = new Transform(); } GameObject::~GameObject() { } Component* GameObject::AddComponent(Component* comp) { Component* component = comp; component->SetParent(this); g_inspector.push_back(component); std::cout << g_inspector.size() << std::endl; return component; } Component* GameObject::GetComponent(Component* comp) { for (Component* it : g_inspector) { if (it == comp) { std::wcout << "Find Component" << std::endl; return it; } } return nullptr; } void GameObject::ResetDevice() { for (auto it : g_inspector) { it->ResetDevice(); } } void GameObject::LostDevice() { for (auto it : g_inspector) { it->LostDevice(); } } void GameObject::Release() { for (auto& it : g_inspector) { it->Release(); } g_inspector.clear(); } //Component* GameObject::AddComponent(Component* component) //{ // Component* comp = new Component(component); // comp->Initialize(); // g_inspector.push_back(comp); // return comp; //} // //Component* GameObject::GetComponent(Component* component) //{ // for (Component* comp : g_inspector) // { // if (comp == component) return comp; // } // return nullptr; //}
16.227848
59
0.668487
[ "transform" ]
0e8d4f9774c5a7ff3eb48a5f8dd5e7f7340f7c55
1,988
hpp
C++
include/muse_armcl/evaluation/ground_truth_particle_set_distance.hpp
cogsys-tuebingen/muse_armcl
63eb0c8d3a1d03d84222acbb5b0c9978065bcc3c
[ "BSD-3-Clause" ]
5
2020-01-19T09:35:28.000Z
2021-11-04T10:08:24.000Z
include/muse_armcl/evaluation/ground_truth_particle_set_distance.hpp
cxdcxd/muse_armcl
63eb0c8d3a1d03d84222acbb5b0c9978065bcc3c
[ "BSD-3-Clause" ]
null
null
null
include/muse_armcl/evaluation/ground_truth_particle_set_distance.hpp
cxdcxd/muse_armcl
63eb0c8d3a1d03d84222acbb5b0c9978065bcc3c
[ "BSD-3-Clause" ]
1
2019-11-10T23:40:59.000Z
2019-11-10T23:40:59.000Z
#ifndef GROUND_TRUTH_PARTICLE_SET_DISTANCE_HPP #define GROUND_TRUTH_PARTICLE_SET_DISTANCE_HPP #include <sys/stat.h> #include <unistd.h> #include <string> #include <sstream> #include <fstream> namespace muse_armcl { struct GroundTruthParticleSetDistance { int true_point; int link; double likely_hood; double distance; double angle; double contact_force; double contact_force_true; inline std::string to_string(const std::string delimiter = std::string(";")) const { std::stringstream sstream; sstream << true_point << delimiter; sstream << link << delimiter; sstream << likely_hood << delimiter; sstream << distance << delimiter; sstream << angle << delimiter; sstream << contact_force << delimiter; sstream << contact_force_true << delimiter; return sstream.str(); } inline std::string header(const std::string delimiter = std::string(";")) const { std::stringstream sstream; sstream << "true_point" << delimiter; sstream << "link" << delimiter; sstream << "likely_hood" << delimiter; sstream << "distance" << delimiter; sstream << "angle" << delimiter; sstream << "contact_force" << delimiter; sstream << "contact_force_true" << delimiter; return sstream.str(); } }; inline void save(std::vector<GroundTruthParticleSetDistance>& data, const std::string& file) { auto exists = [](const std::string& name) { std::ifstream f(name.c_str()); return f.good(); }; bool print_header = !exists(file); std::ofstream of(file, std::ofstream::out | std::ofstream::app ); if(print_header) of << data.front().header() << std::endl; for(const GroundTruthParticleSetDistance& d : data){ of << d.to_string() << std::endl; } of.close(); data.clear(); } } #endif // GROUND_TRUTH_PARTICLE_SET_DISTANCE_HPP
30.121212
92
0.623742
[ "vector" ]
0e8fff4726b798856b4e832592497c81c529e097
4,416
cpp
C++
src/tool-humsort.cpp
humdrum-tools/minHumdrum
1f1e6a1281b40a6d6c9fed6666d96221cd619dc0
[ "BSD-2-Clause" ]
null
null
null
src/tool-humsort.cpp
humdrum-tools/minHumdrum
1f1e6a1281b40a6d6c9fed6666d96221cd619dc0
[ "BSD-2-Clause" ]
null
null
null
src/tool-humsort.cpp
humdrum-tools/minHumdrum
1f1e6a1281b40a6d6c9fed6666d96221cd619dc0
[ "BSD-2-Clause" ]
null
null
null
// // Programmer: Craig Stuart Sapp <craig@ccrma.stanford.edu> // Creation Date: Sat Jun 17 15:24:23 CEST 2017 // Last Modified: Sat Jul 8 17:17:21 CEST 2017 // Filename: tool-humsort.cpp // URL: https://github.com/craigsapp/humlib/blob/master/src/tool-humsort.cpp // Syntax: C++11; humlib // vim: ts=3 noexpandtab // // Description: Sort data spines in a Humdrum file. // #include "tool-humsort.h" #include "Convert.h" #include "HumRegex.h" #include <algorithm> #include <functional> #include <cmath> using namespace std; namespace hum { // START_MERGE ///////////////////////////////// // // Tool_humsort::Tool_humsort -- Set the recognized options for the tool. // Tool_humsort::Tool_humsort(void) { // add options here define("n|numeric=b", "Sort numerically"); define("r|reverse=b", "Sort in reversed order"); define("s|spine=i:1", "Spine to sort (1-indexed)"); define("I|do-not-ignore-case=b", "Do not ignore case when sorting alphabetically"); define("i|e|x|interp|exclusive-interpretation=s", "Exclusive interpretation to sort"); } ///////////////////////////////// // // Tool_humsort::run -- Do the main work of the tool. // bool Tool_humsort::run(HumdrumFileSet& infiles) { bool status = true; for (int i=0; i<infiles.getCount(); i++) { status &= run(infiles[i]); } return status; } bool Tool_humsort::run(const string& indata, ostream& out) { HumdrumFile infile(indata); bool status = run(infile); if (hasAnyText()) { getAllText(out); } else { out << infile; } return status; } bool Tool_humsort::run(HumdrumFile& infile, ostream& out) { bool status = run(infile); if (hasAnyText()) { getAllText(out); } else { out << infile; } return status; } bool Tool_humsort::run(HumdrumFile& infile) { processFile(infile); return true; } ////////////////////////////// // // Tool_humsort::processFile -- // void Tool_humsort::processFile(HumdrumFile& infile) { vector<HTp> sstarts; infile.getSpineStartList(sstarts); int spine = getInteger("spine"); if (getBoolean("exclusive-interpretation")) { string datatype = getString("exclusive-interpretation"); if (datatype.compare(0, 2, "**")) { datatype = "**" + datatype; } else if (datatype.compare(0, 1, "*")) { datatype = "*" + datatype; } for (int i=0; i<(int)sstarts.size(); i++) { if (sstarts[i]->isDataType(datatype)) { spine = sstarts[i]->getTrack(); break; } } } vector<HTp> data; data.reserve(infile.getLineCount()); HTp current = sstarts.at(spine-1); current = current->getNextToken(); while (current) { if (current->isData()) { data.push_back(current); } current = current->getNextToken(); } if (getBoolean("numeric")) { std::sort(data.begin(), data.end(), [](HTp a, HTp b) { if (*a == *b) { return 0; } if (*a == ".") { return -1; } if (*b == ".") { return 0; } char cha = a->at(0); char chb = b->at(0); if ((isdigit(cha) || cha == '-' || cha == '+' || cha == '.') && (isdigit(chb) || chb == '-' || chb == '+' || chb == '.')) { int A = stod(*a); int B = stod(*b); if (A < B) { return -1; } else { return 0; } } // one value is not a number for some reason, so compare as string return *a < *b ? -1 : 0; }); } else { // alphabetic sorting if (!getBoolean("do-not-ignore-case")) { std::sort(data.begin(), data.end(), [](HTp a, HTp b) { string A = *a; string B = *b; std::transform(A.begin(), A.end(), A.begin(), ::tolower); std::transform(B.begin(), B.end(), B.begin(), ::tolower); return A < B; }); } else { std::sort(data.begin(), data.end(), [](HTp a, HTp b) { return *a < *b; }); } } for (int i=0; i<infile.getLineCount(); i++) { if (infile[i].hasSpines()) { m_humdrum_text << infile[i] << endl; break; } m_humdrum_text << infile[i] << endl; } if (getBoolean("reverse")) { for (int i=(int)data.size()-1; i>=0; i--) { m_humdrum_text << data[i]->getOwner() << endl; } } else { for (int i=0; i<(int)data.size(); i++) { m_humdrum_text << data[i]->getOwner() << endl; } } for (int i=0; i<infile.getLineCount(); i++) { if (*infile[i].token(0) != "*-") { continue; } for (int j=i; j<infile.getLineCount(); j++) { m_humdrum_text << infile[j] << endl; } } } // END_MERGE } // end namespace hum
21.970149
87
0.574955
[ "vector", "transform" ]
0e9383fae9ceb72da755ed3259ee85cafd5fb5a9
4,167
cc
C++
RecoPixelVertexing/PixelTrackFitting/plugins/KFBasedPixelFitterProducer.cc
nistefan/cmssw
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
[ "Apache-2.0" ]
null
null
null
RecoPixelVertexing/PixelTrackFitting/plugins/KFBasedPixelFitterProducer.cc
nistefan/cmssw
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
[ "Apache-2.0" ]
null
null
null
RecoPixelVertexing/PixelTrackFitting/plugins/KFBasedPixelFitterProducer.cc
nistefan/cmssw
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
[ "Apache-2.0" ]
null
null
null
#include "FWCore/Framework/interface/Frameworkfwd.h" #include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/Framework/interface/EventSetup.h" #include "FWCore/Framework/interface/ESHandle.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/ParameterSet/interface/ParameterSetDescription.h" #include "RecoPixelVertexing/PixelTrackFitting/interface/PixelFitter.h" #include "RecoPixelVertexing/PixelTrackFitting/interface/KFBasedPixelFitter.h" #include "DataFormats/BeamSpot/interface/BeamSpot.h" #include "TrackingTools/Records/interface/TransientRecHitRecord.h" #include "TrackingTools/Records/interface/TrackingComponentsRecord.h" #include "MagneticField/Records/interface/IdealMagneticFieldRecord.h" #include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" #include "TrackingTools/Records/interface/TrackingComponentsRecord.h" class KFBasedPixelFitterProducer: public edm::global::EDProducer<> { public: explicit KFBasedPixelFitterProducer(const edm::ParameterSet& iConfig); ~KFBasedPixelFitterProducer() override {} static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: void produce(edm::StreamID, edm::Event& iEvent, const edm::EventSetup& iSetup) const override; std::string thePropagatorLabel; std::string thePropagatorOppositeLabel; std::string theTTRHBuilderName; edm::EDGetTokenT<reco::BeamSpot> theBeamSpotToken; }; KFBasedPixelFitterProducer::KFBasedPixelFitterProducer(const edm::ParameterSet& iConfig): thePropagatorLabel(iConfig.getParameter<std::string>("propagator")), thePropagatorOppositeLabel(iConfig.getParameter<std::string>("propagator")), theTTRHBuilderName(iConfig.getParameter<std::string>("TTRHBuilder")) { if(iConfig.getParameter<bool>("useBeamSpotConstraint")) { theBeamSpotToken = consumes<reco::BeamSpot>(iConfig.getParameter<edm::InputTag>("beamSpotConstraint")); } produces<PixelFitter>(); } void KFBasedPixelFitterProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; desc.add<bool>("useBeamSpotConstraint", true); desc.add<edm::InputTag>("beamSpotConstraint", edm::InputTag("offlineBeamSpot")); desc.add<std::string>("propagator", "PropagatorWithMaterial"); desc.add<std::string>("propagatorOpposite", "PropagatorWithMaterialOpposite"); desc.add<std::string>("TTRHBuilder", "PixelTTRHBuilderWithoutAngle"); descriptions.add("kfBasedPixelFitter", desc); } void KFBasedPixelFitterProducer::produce(edm::StreamID, edm::Event& iEvent, const edm::EventSetup& iSetup) const { edm::ESHandle<TransientTrackingRecHitBuilder> ttrhb; iSetup.get<TransientRecHitRecord>().get( theTTRHBuilderName, ttrhb); edm::ESHandle<Propagator> propagator; iSetup.get<TrackingComponentsRecord>().get(thePropagatorLabel, propagator); edm::ESHandle<Propagator> opropagator; iSetup.get<TrackingComponentsRecord>().get(thePropagatorOppositeLabel, opropagator); edm::ESHandle<TrackerGeometry> tracker; iSetup.get<TrackerDigiGeometryRecord>().get(tracker); edm::ESHandle<MagneticField> field; iSetup.get<IdealMagneticFieldRecord>().get(field); const reco::BeamSpot *beamspot = nullptr; if(!theBeamSpotToken.isUninitialized()) { edm::Handle<reco::BeamSpot> hbs; iEvent.getByToken(theBeamSpotToken, hbs); beamspot = hbs.product(); } auto impl = std::make_unique<KFBasedPixelFitter>(&iSetup, propagator.product(), opropagator.product(), ttrhb.product(), tracker.product(), field.product(), beamspot); auto prod = std::make_unique<PixelFitter>(std::move(impl)); iEvent.put(std::move(prod)); } DEFINE_FWK_MODULE(KFBasedPixelFitterProducer);
42.520408
114
0.737221
[ "geometry" ]
0e97d663e2c1be4dd34146556292dce10c5e5e6e
2,796
cpp
C++
src/ReplyKeyboard.cpp
miguelfazenda/AsyncTelegram
d6499369d9bc597b8f9f66ef48ce8362729c3ad5
[ "MIT" ]
null
null
null
src/ReplyKeyboard.cpp
miguelfazenda/AsyncTelegram
d6499369d9bc597b8f9f66ef48ce8362729c3ad5
[ "MIT" ]
null
null
null
src/ReplyKeyboard.cpp
miguelfazenda/AsyncTelegram
d6499369d9bc597b8f9f66ef48ce8362729c3ad5
[ "MIT" ]
null
null
null
#include "ReplyKeyboard.h" #include "Utilities.h" ReplyKeyboard::ReplyKeyboard() { m_json = "{\"keyboard\":[[]]}\""; } ReplyKeyboard::~ReplyKeyboard() {} bool ReplyKeyboard::addRow() { if(m_jsonSize < BUFFER_SMALL) m_jsonSize = BUFFER_SMALL; DynamicJsonDocument doc(m_jsonSize + 64); // Current size + space for new row (empty) deserializeJson(doc, m_json); JsonArray rows = doc["keyboard"]; rows.createNestedArray(); m_json.clear(); serializeJson(doc, m_json); m_jsonSize = doc.memoryUsage(); return true; } bool ReplyKeyboard::addButton(const char* text, ReplyKeyboardButtonType buttonType) { if ((buttonType != KeyboardButtonContact) && (buttonType != KeyboardButtonLocation) && (buttonType != KeyboardButtonSimple)) return false; // As reccomended use local JsonDocument instead global // inline keyboard json structure will be stored in a String var if(m_jsonSize < BUFFER_SMALL) m_jsonSize = BUFFER_SMALL; DynamicJsonDocument doc(m_jsonSize + 128); // Current size + space for new object (button) deserializeJson(doc, m_json); JsonArray rows = doc["keyboard"]; JsonObject button = rows[rows.size()-1].createNestedObject(); button["text"] = text; switch (buttonType){ case KeyboardButtonContact: button["request_contact"] = true; break; case KeyboardButtonLocation: button["request_location"] = true; break; default: break; } // Store inline keyboard json structure m_json.clear(); serializeJson(doc, m_json); m_jsonSize = doc.memoryUsage(); return true; } void ReplyKeyboard::enableResize() { if(m_jsonSize < BUFFER_SMALL) m_jsonSize = BUFFER_SMALL; DynamicJsonDocument doc(m_jsonSize + 64); // Current size + space for new field deserializeJson(doc, m_json); doc["resize_keyboard"] = true; m_json.clear(); serializeJson(doc, m_json); } void ReplyKeyboard::enableOneTime() { if(m_jsonSize < BUFFER_SMALL) m_jsonSize = BUFFER_SMALL; DynamicJsonDocument doc(m_jsonSize + 64); // Current size + space for new field deserializeJson(doc, m_json); doc["one_time_keyboard"] = true; m_json.clear(); serializeJson(doc, m_json); } void ReplyKeyboard::enableSelective() { if(m_jsonSize < BUFFER_SMALL) m_jsonSize = BUFFER_SMALL; DynamicJsonDocument doc(m_jsonSize + 64); // Current size + space for new field deserializeJson(doc, m_json); doc["selective"] = true; m_json.clear(); serializeJson(doc, m_json); } String ReplyKeyboard::getJSON() const { return m_json; } String ReplyKeyboard::getJSONPretty() const { uint16_t jsonSize; if(m_jsonSize < BUFFER_SMALL) jsonSize = BUFFER_SMALL; DynamicJsonDocument doc(jsonSize + 64); // Current size + space for new lines deserializeJson(doc, m_json); String serialized; serializeJsonPretty(doc, serialized); return serialized; }
24.964286
92
0.730687
[ "object" ]
0e9f8c6b11d5204abe2e7bf1cee1d0bc34afb572
5,016
cc
C++
src/Core/Algorithms/Legacy/Fields/TransformMesh/ScaleFieldMeshAndData.cc
benjaminlarson/SCIRunGUIPrototype
ed34ee11cda114e3761bd222a71a9f397517914d
[ "Unlicense" ]
null
null
null
src/Core/Algorithms/Legacy/Fields/TransformMesh/ScaleFieldMeshAndData.cc
benjaminlarson/SCIRunGUIPrototype
ed34ee11cda114e3761bd222a71a9f397517914d
[ "Unlicense" ]
null
null
null
src/Core/Algorithms/Legacy/Fields/TransformMesh/ScaleFieldMeshAndData.cc
benjaminlarson/SCIRunGUIPrototype
ed34ee11cda114e3761bd222a71a9f397517914d
[ "Unlicense" ]
null
null
null
/* For more information, please see: http://software.sci.utah.edu The MIT License Copyright (c) 2009 Scientific Computing and Imaging Institute, University of Utah. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <Core/Algorithms/Legacy/Fields/TransformMesh/ScaleFieldMeshAndData.h> #include <Core/Datatypes/Legacy/Field/VField.h> #include <Core/Datatypes/Legacy/Field/VMesh.h> #include <Core/Datatypes/Legacy/Field/Field.h> using namespace SCIRun; using namespace SCIRun::Core::Datatypes; using namespace SCIRun::Core::Algorithms::Fields; using namespace SCIRun::Core::Geometry; using namespace SCIRun::Core::Utility; using namespace SCIRun::Core::Algorithms; ALGORITHM_PARAMETER_DEF(Fields, data_scale); ALGORITHM_PARAMETER_DEF(Fields, mesh_scale); ALGORITHM_PARAMETER_DEF(Fields, scale_from_center); ScaleFieldMeshAndDataAlgo::ScaleFieldMeshAndDataAlgo() { addParameter(Parameters::data_scale,1.0); addParameter(Parameters::mesh_scale,1.0); addParameter(Parameters::scale_from_center,false); } namespace { template <class T> void ScaleFieldMeshAndDataAlgoT(double scale, FieldHandle output) { std::vector<T> values; output->vfield()->get_values(values); for (size_t j=0;j<values.size();j++) values[j] = static_cast<T>(scale*values[j]); output->vfield()->set_values(values); } } bool ScaleFieldMeshAndDataAlgo::runImpl(FieldHandle input, FieldHandle& output) const { ScopedAlgorithmStatusReporter asr(this, "ScaleFieldMeshAndData"); if (!input) { error("No input field"); return (false); } bool scale_from_center = get(Parameters::scale_from_center).toBool(); double datascale = get(Parameters::data_scale).toDouble(); double meshscale = get(Parameters::mesh_scale).toDouble(); // scale mesh, only when needed if (scale_from_center || (meshscale != 1.0)) { output.reset(input->deep_clone()); Transform tf; BBox box = input->vmesh()->get_bounding_box(); Vector center = 0.5*(box.get_min()+box.get_max()); tf.load_identity(); if (scale_from_center) tf.pre_translate(-center); tf.pre_scale(Vector(meshscale,meshscale,meshscale)); if (scale_from_center) tf.pre_translate(center); output->vmesh()->transform(tf); } else { output.reset(input->clone()); } if (!output) { error("Could not allocate output field"); return (false); } if (datascale != 1.0) { VField* ofield = output->vfield(); if (ofield->is_tensor()) ScaleFieldMeshAndDataAlgoT<Tensor>(datascale,output); if (ofield->is_vector()) ScaleFieldMeshAndDataAlgoT<Vector>(datascale,output); if (ofield->is_double()) ScaleFieldMeshAndDataAlgoT<double>(datascale,output); if (ofield->is_float()) ScaleFieldMeshAndDataAlgoT<float>(datascale,output); if (ofield->is_char()) ScaleFieldMeshAndDataAlgoT<char>(datascale,output); if (ofield->is_unsigned_char()) ScaleFieldMeshAndDataAlgoT<unsigned char>(datascale,output); if (ofield->is_short()) ScaleFieldMeshAndDataAlgoT<short>(datascale,output); if (ofield->is_unsigned_short()) ScaleFieldMeshAndDataAlgoT<unsigned short>(datascale,output); if (ofield->is_int()) ScaleFieldMeshAndDataAlgoT<int>(datascale,output); if (ofield->is_unsigned_int()) ScaleFieldMeshAndDataAlgoT<unsigned int>(datascale,output); if (ofield->is_longlong()) ScaleFieldMeshAndDataAlgoT<long long>(datascale,output); if (ofield->is_unsigned_longlong()) ScaleFieldMeshAndDataAlgoT<unsigned long long>(datascale,output); } CopyProperties(*input, *output); return true; } AlgorithmOutput ScaleFieldMeshAndDataAlgo::run_generic(const AlgorithmInput& input) const { throw "todo"; //auto field = input.get<Field>(Variables::InputField); //FieldHandle outputField; //if (!runImpl(field, outputField)) // THROW_ALGORITHM_PROCESSING_ERROR("False returned on legacy run call."); //AlgorithmOutput output; //output[Variables::OutputField] = outputField; //return output; }
36.086331
105
0.742624
[ "mesh", "geometry", "vector", "transform" ]
0ea87239ad01f0395d8cdb43db125c5296a84545
1,241
hpp
C++
range.hpp
LiadBenMoshe/Cpp-itertools-Task5
5d5052b953bf16a8ecc17572adceba8bdada467c
[ "MIT" ]
null
null
null
range.hpp
LiadBenMoshe/Cpp-itertools-Task5
5d5052b953bf16a8ecc17572adceba8bdada467c
[ "MIT" ]
null
null
null
range.hpp
LiadBenMoshe/Cpp-itertools-Task5
5d5052b953bf16a8ecc17572adceba8bdada467c
[ "MIT" ]
null
null
null
/* AUTHORS: Liad Ben Moshe */ #pragma once #include<iostream> #include<vector> using namespace std; namespace itertools{ class range{ int start; int finish; public: range(){} range(int start1,int end1){ start=start1; finish=end1; } range(int end1){ start=0; finish=end1; } class iterator{ int current; public: iterator(int num){ current=num; } int operator*() { return current; } iterator& operator=(iterator& other){ return other; } bool operator==( iterator other) { return current == other.current; } bool operator!=( iterator other) { return !(current == other.current); } iterator& operator++() { current++; return *this; } iterator& operator++(int) { ++current; return *this; } }; iterator begin() const{ return iterator(start); } iterator end() const{ return iterator(finish); } }; }
16.328947
86
0.450443
[ "vector" ]
0eb5442f0bb2dc4d12af282e23d3bdfacde2b8f5
16,597
hpp
C++
third_party/amo/amo/network.hpp
amoylel/NCUI
a3b315ebf97d9903766efdafa42c24d4212d5ad6
[ "BSD-2-Clause" ]
24
2018-11-20T14:45:57.000Z
2021-12-30T13:38:42.000Z
third_party/amo/amo/network.hpp
amoylel/NCUI
a3b315ebf97d9903766efdafa42c24d4212d5ad6
[ "BSD-2-Clause" ]
null
null
null
third_party/amo/amo/network.hpp
amoylel/NCUI
a3b315ebf97d9903766efdafa42c24d4212d5ad6
[ "BSD-2-Clause" ]
11
2018-11-29T00:09:14.000Z
2021-11-23T08:13:17.000Z
/* * Copyright (c) 2014, Peter Thorson. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the WebSocket++ Project nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL PETER THORSON BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef AMO_NETWORK_HPP #define AMO_NETWORK_HPP #include <utility> #include <string> #include <vector> #include <sstream> #include <amo/config.hpp> #include <amo/stdint.hpp> #include <iomanip> #include <amo/utility.hpp> namespace amo { #define TYP_INIT 0 #define TYP_SMLE 1 #define TYP_BIGE 2 /// Convert 64 bit value to network byte order /** * This method is prefixed to avoid conflicts with operating system level * macros for this functionality. * * TODO: figure out if it would be beneficial to use operating system level * macros for this. * * @param src The integer in host byte order * @return src converted to network byte order */ inline uint64_t _htonll(uint64_t src) { static int typ = TYP_INIT; unsigned char c; union { uint64_t ull; unsigned char c[8]; } x; if (typ == TYP_INIT) { x.ull = 0x01; typ = (x.c[7] == 0x01ULL) ? TYP_BIGE : TYP_SMLE; } if (typ == TYP_BIGE) { return src; } x.ull = src; c = x.c[0]; x.c[0] = x.c[7]; x.c[7] = c; c = x.c[1]; x.c[1] = x.c[6]; x.c[6] = c; c = x.c[2]; x.c[2] = x.c[5]; x.c[5] = c; c = x.c[3]; x.c[3] = x.c[4]; x.c[4] = c; return x.ull; } /// Convert 64 bit value to host byte order /** * This method is prefixed to avoid conflicts with operating system level * macros for this functionality. * * TODO: figure out if it would be beneficial to use operating system level * macros for this. * * @param src The integer in network byte order * @return src converted to host byte order */ inline uint64_t _ntohll(uint64_t src) { return _htonll(src); } // Looks for first MAC address of any network device, any size. static bool get_any_mac(std::vector<unsigned char> &_node) { $windows({ PIP_ADAPTER_INFO pAdapterInfo; PIP_ADAPTER_INFO pAdapter = 0; ULONG len = sizeof(IP_ADAPTER_INFO); pAdapterInfo = reinterpret_cast<IP_ADAPTER_INFO*>(new char[len]); // Make an initial call to GetAdaptersInfo to get // the necessary size into len DWORD rc = GetAdaptersInfo(pAdapterInfo, &len); if (rc == ERROR_BUFFER_OVERFLOW) { delete[] reinterpret_cast<char*>(pAdapterInfo); pAdapterInfo = reinterpret_cast<IP_ADAPTER_INFO*>(new char[len]); } else if (rc != ERROR_SUCCESS) { return ("cannot get network adapter list"), false; } bool found = false, gotten = false; if (GetAdaptersInfo(pAdapterInfo, &len) == NO_ERROR) { gotten = true; pAdapter = pAdapterInfo; while (pAdapter && !found) { if (pAdapter->Type == MIB_IF_TYPE_ETHERNET && pAdapter->AddressLength > 0) { _node.resize(pAdapter->AddressLength); std::memcpy(_node.data(), pAdapter->Address, _node.size()); found = true; } pAdapter = pAdapter->Next; } } delete[] reinterpret_cast<char*>(pAdapterInfo); if (!gotten) return ("cannot get network adapter list"), false; if (!found) return ("no Ethernet adapter found"), false; return true; }) $bsd({ struct ifaddrs* ifaphead; int rc = getifaddrs(&ifaphead); if (rc) return ("cannot get network adapter list"), false; bool foundAdapter = false; for (struct ifaddrs* ifap = ifaphead; ifap; ifap = ifap->ifa_next) { if (ifap->ifa_addr && ifap->ifa_addr->sa_family == AF_LINK) { struct sockaddr_dl* sdl = reinterpret_cast<struct sockaddr_dl*>(ifap->ifa_addr); caddr_t ap = (caddr_t)(sdl->sdl_data + sdl->sdl_nlen); int alen = sdl->sdl_alen; if (ap && alen > 0) { _node.resize(alen); std::memcpy(_node.data(), ap, _node.size()); foundAdapter = true; break; } } } freeifaddrs(ifaphead); if (!foundAdapter) return ("cannot determine MAC address (no suitable network adapter found)"), false; return true; }) $osx({ struct ifaddrs* ifaphead; int rc = getifaddrs(&ifaphead); if (rc) return ("cannot get network adapter list"), false; bool foundAdapter = false; for (struct ifaddrs* ifap = ifaphead; ifap; ifap = ifap->ifa_next) { if (ifap->ifa_addr && ifap->ifa_addr->sa_family == AF_LINK) { struct sockaddr_dl* sdl = reinterpret_cast<struct sockaddr_dl*>(ifap->ifa_addr); caddr_t ap = (caddr_t)(sdl->sdl_data + sdl->sdl_nlen); int alen = sdl->sdl_alen; if (ap && alen > 0) { _node.resize(alen); std::memcpy(_node.data(), ap, _node.size()); foundAdapter = true; break; } } } freeifaddrs(ifaphead); if (!foundAdapter) return ("cannot determine MAC address (no suitable network adapter found)"), false; return true; }) $linux({ struct ifreq ifr; int s = ::socket(PF_INET, SOCK_DGRAM, 0); if (s == -1) return ("cannot open socket"), false; std::strcpy(ifr.ifr_name, "eth0"); int rc = ioctl(s, SIOCGIFHWADDR, &ifr); close(s); if (rc < 0) return ("cannot get MAC address"), false; struct sockaddr* sa = reinterpret_cast<struct sockaddr*>(&ifr.ifr_addr); _node.resize(sizeof(sa->sa_data)); std::memcpy(_node.data(), sa->sa_data, _node.size()); return true; }) $unix({ char name[MAXHOSTNAMELEN]; if (gethostname(name, sizeof(name))) return ("cannot get host name"), false; struct hostent* pHost = gethostbyname(name); if (!pHost) return ("cannot get host IP address"), false; int s = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP); if (s == -1) return ("cannot open socket"), false; struct arpreq ar; std::memset(&ar, 0, sizeof(ar)); struct sockaddr_in* pAddr = reinterpret_cast<struct sockaddr_in*>(&ar.arp_pa); pAddr->sin_family = AF_INET; std::memcpy(&pAddr->sin_addr, *pHost->h_addr_list, sizeof(struct in_addr)); int rc = ioctl(s, SIOCGARP, &ar); close(s); if (rc < 0) return ("cannot get MAC address"), false; _node.resize(sizeof(ar.arp_ha.sa_data)); std::memcpy(_node.data(), ar.arp_ha.sa_data, _node.size()); return true; }) } // Looks for first MAC address of any network device, size truncated to 48bits. static uint64_t get_any_mac48() { std::vector<unsigned char> node; if (get_any_mac(node)) { std::stringstream ss; ss << std::hex << std::setfill('0'); node.resize(6); for (unsigned i = 0; i < 6; ++i) { ss << std::setw(2) << int(node[i]); } uint64_t t; if (ss >> t) { return t; } } return 0; } static bool is_valid_ip(const std::string& ip) { return true; /*using namespace boost::xpressive; cregex reg_ip = cregex::compile("(25[0-4]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[1-9])[.](25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])[.](25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])[.](25[0-4]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[1-9])"); return regex_match(ip.c_str(), reg_ip);*/ } static bool is_empty_ip(const std::string& ip) { return ip == std::string("0.0.0.0"); } #ifdef WIN32 typedef struct tagIpAdapterInfo { std::string name; //网卡名称 std::string description; //网卡描述 uint32_t type; //网卡类型 std::string addr; //IP地址 std::string mac; //MAC std::string mask; //子网掩码 std::string gateway; //网关 bool active() { return is_valid_ip(gateway) && !is_empty_ip(gateway); //return !gateway.empty(); } } IpAdapterInfo; static std::vector<IpAdapterInfo> get_adapters() { std::vector<IpAdapterInfo> rec; //PIP_ADAPTER_INFO结构体指针存储本机网卡信息 PIP_ADAPTER_INFO pIpAdapterInfo = new IP_ADAPTER_INFO(); //得到结构体大小,用于GetAdaptersInfo参数 unsigned long stSize = sizeof(IP_ADAPTER_INFO); //调用GetAdaptersInfo函数,填充pIpAdapterInfo指针变量;其中stSize参数既是一个输入量也是一个输出量 int nRel = GetAdaptersInfo(pIpAdapterInfo, &stSize); //记录网卡数量 int netCardNum = 0; //记录每张网卡上的IP地址数量 int IPnumPerNetCard = 0; if (ERROR_BUFFER_OVERFLOW == nRel) { //如果函数返回的是ERROR_BUFFER_OVERFLOW //则说明GetAdaptersInfo参数传递的内存空间不够,同时其传出stSize,表示需要的空间大小 //这也是说明为什么stSize既是一个输入量也是一个输出量 //释放原来的内存空间 delete pIpAdapterInfo; //重新申请内存空间用来存储所有网卡信息 pIpAdapterInfo = (PIP_ADAPTER_INFO)new BYTE[stSize]; //再次调用GetAdaptersInfo函数,填充pIpAdapterInfo指针变量 nRel = GetAdaptersInfo(pIpAdapterInfo, &stSize); } if (ERROR_SUCCESS == nRel) { //输出网卡信息 //可能有多网卡,因此通过循环去判断 while (pIpAdapterInfo) { IpAdapterInfo adapter_info; adapter_info.name = pIpAdapterInfo->AdapterName; adapter_info.description = pIpAdapterInfo->Description; adapter_info.type = pIpAdapterInfo->Type; for (DWORD i = 0; i < pIpAdapterInfo->AddressLength; i++) { if (i < pIpAdapterInfo->AddressLength - 1) { char tmp[4] = { 0 }; ::sprintf(tmp, "%02x-", pIpAdapterInfo->Address[i]); adapter_info.mac += tmp; //adapter_info.mac += (boost::format("%02x-") % pIpAdapterInfo->Address[i]).str(); } else { char tmp[4] = { 0 }; ::sprintf(tmp, "%02x", pIpAdapterInfo->Address[i]); adapter_info.mac += tmp; //adapter_info.mac += (boost::format("%02x") % pIpAdapterInfo->Address[i]).str(); } } //可能网卡有多IP,因此通过循环去判断 IP_ADDR_STRING *pIpAddrString = &(pIpAdapterInfo->IpAddressList); do { adapter_info.addr = pIpAddrString->IpAddress.String; adapter_info.mask = pIpAddrString->IpMask.String; adapter_info.gateway = pIpAdapterInfo->GatewayList.IpAddress.String; pIpAddrString = pIpAddrString->Next; } while (pIpAddrString); rec.push_back(adapter_info); pIpAdapterInfo = pIpAdapterInfo->Next; } } //释放内存空间 if (pIpAdapterInfo) { delete pIpAdapterInfo; } return rec; } class IpAdapterInfos : public std::vector < IpAdapterInfo > { public: IpAdapterInfos() { std::vector<IpAdapterInfo> vecs = get_adapters(); std::copy(vecs.begin(), vecs.end(), std::back_inserter(*this)); } iterator get_best_adapter() { IpAdapterInfos::iterator iter = begin(); for (; iter != end(); ++iter) { if (iter->active()) { break; } } return iter; } }; #endif static int32_t address_to_number(const std::string& strIP) { //IP转化为数值 //没有格式检查 //返回值就是结果 int32_t a[4]; std::string IP = strIP; std::string strTemp; size_t pos; size_t i = 3; do { pos = IP.find("."); if (pos != std::string::npos) { strTemp = IP.substr(0, pos); a[i] = atoi(strTemp.c_str()); i--; IP.erase(0, pos + 1); } else { strTemp = IP; a[i] = atoi(strTemp.c_str()); break; } } while (1); int32_t nResult = (a[3] << 24) + (a[2] << 16) + (a[1] << 8) + a[0]; return nResult; } static std::string number_to_address(const int32_t& nValue) { //数值转化为IP //没有格式检查 //返回值就是结果 char strTemp[20]; ::sprintf(strTemp, "%d.%d.%d.%d", (nValue & 0xff000000) >> 24, (nValue & 0x00ff0000) >> 16, (nValue & 0x0000ff00) >> 8, (nValue & 0x000000ff)); return std::string(strTemp); } static std::pair<int32_t, int32_t> get_ip_scope(const std::string& ip, const std::string& mask) { int32_t ip_number = 0; int32_t net_mask_number = 0, ip_count = 0; ip_number = address_to_number(ip); if (mask.empty()) { return std::pair<int32_t, int32_t>(ip_number, ip_number); } net_mask_number = address_to_number(mask); ip_count = address_to_number("255.255.255.255") - net_mask_number; int32_t net_ip = ip_number & net_mask_number; int32_t host_scope = net_ip + ip_count; return std::pair<int32_t, int32_t>(net_ip, host_scope); } } // amo #endif // AMO_NETWORK_HPP
34.794549
253
0.50967
[ "vector" ]
0eb60b8d247e6d879498607171a9ec75c33f3eb7
57,037
cpp
C++
src/gui/event/EventLocatorPanel.cpp
rockstorm101/GMAT
00b6b61a40560c095da3d83dab4ab1e9157f01c7
[ "Apache-2.0" ]
1
2018-09-18T07:09:36.000Z
2018-09-18T07:09:36.000Z
src/gui/event/EventLocatorPanel.cpp
rockstorm101/GMAT
00b6b61a40560c095da3d83dab4ab1e9157f01c7
[ "Apache-2.0" ]
null
null
null
src/gui/event/EventLocatorPanel.cpp
rockstorm101/GMAT
00b6b61a40560c095da3d83dab4ab1e9157f01c7
[ "Apache-2.0" ]
2
2020-06-18T04:45:30.000Z
2021-07-20T02:11:54.000Z
//$Id$ //------------------------------------------------------------------------------ // EventLocatorPanel //------------------------------------------------------------------------------ // GMAT: General Mission Analysis Tool // // // Copyright (c) 2002 - 2015 United States Government as represented by the // Administrator of the National Aeronautics and Space Administration. // All Other Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // You may not use this file except in compliance with the License. // You may obtain a copy of the License at: // http://www.apache.org/licenses/LICENSE-2.0. // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language // governing permissions and limitations under the License. // // Author: Wendy Shoan // Created: 2015.04.01 /** * This class sets up Event Locator parameters. */ //------------------------------------------------------------------------------ #include "EventLocatorPanel.hpp" #include "MessageInterface.hpp" #include "StringUtil.hpp" #include "GmatStaticBoxSizer.hpp" #include "TimeSystemConverter.hpp" #include "GmatAppData.hpp" #include "EventLocator.hpp" #include "GmatDefaults.hpp" #include <wx/config.h> // ?? #include "bitmaps/OpenFolder.xpm" //#define DEBUG_EVENTPANEL_CREATE //#define DEBUG_EVENTPANEL_LOAD //#define DEBUG_EVENTPANEL_SAVE //#define DEBUG_EVENTPANEL_SAVE_COEFS //#define DEBUG_EVENTPANEL_PANEL_COMBOBOX //#define DEBUG_ECLIPSE_OBSERVERS //------------------------------ // event tables for wxWidgets //------------------------------ BEGIN_EVENT_TABLE(EventLocatorPanel, GmatPanel) EVT_BUTTON(ID_BUTTON_OK, GmatPanel::OnOK) EVT_BUTTON(ID_BUTTON_APPLY, GmatPanel::OnApply) EVT_BUTTON(ID_BUTTON_CANCEL, GmatPanel::OnCancel) EVT_BUTTON(ID_BUTTON_SCRIPT, GmatPanel::OnScript) EVT_TEXT(ID_TEXTCTRL, EventLocatorPanel::OnTextChange) EVT_COMBOBOX(ID_COMBOBOX, EventLocatorPanel::OnComboBoxChange) EVT_COMBOBOX(ID_RUNMODE_COMBOBOX, EventLocatorPanel::OnComboBoxChange) EVT_BUTTON(ID_BUTTON_BROWSE, EventLocatorPanel::OnBrowseButton) EVT_CHECKBOX(ID_CHECKBOX, EventLocatorPanel::OnCheckBoxChange) EVT_CHECKLISTBOX(ID_CHECKLISTBOX_BODIES, EventLocatorPanel::OnCheckListBoxChange) EVT_CHECKLISTBOX(ID_CHECKLISTBOX_ECLIPSE, EventLocatorPanel::OnCheckListBoxChange) EVT_CHECKLISTBOX(ID_CHECKLISTBOX_OBSERVER, EventLocatorPanel::OnCheckListBoxChange) EVT_LISTBOX(ID_CHECKLISTBOX_BODIES, EventLocatorPanel::OnCheckListBoxSelect) EVT_LISTBOX(ID_CHECKLISTBOX_ECLIPSE, EventLocatorPanel::OnCheckListBoxSelect) EVT_LISTBOX(ID_CHECKLISTBOX_OBSERVER, EventLocatorPanel::OnCheckListBoxSelect) END_EVENT_TABLE() //------------------------------ // public methods //------------------------------ //------------------------------------------------------------------------------ // EventLocatorPanel(wxWindow *parent, const wxString &name) //------------------------------------------------------------------------------ /** * Constructs EventLocatorPanel object. */ //------------------------------------------------------------------------------ EventLocatorPanel::EventLocatorPanel(wxWindow *parent, const wxString &name) : GmatPanel(parent, true, true) { #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("Entering ELP::Constructor, name = %s\n", name.WX_TO_STD_STRING.c_str()); #endif mObjectName = name.c_str(); theObject = theGuiInterpreter->GetConfiguredObject(name.c_str()); #ifdef DEBUG_EVENTPANEL_CREATE if (!theObject) MessageInterface::ShowMessage("In ELP::Constructor, theObject is NULL!!!\n"); #endif if (theObject->IsOfType("EclipseLocator")) isEclipse = true; else // ContactLocator isEclipse = false; #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage ("EventLocatorPanel() constructor entered, theObject=<%p>'%s'\n", theObject, theObject->GetTypeName().c_str()); #endif ResetChangedFlags(); // theGuiManager = GuiItemManager::GetInstance(); // theGuiInterpreter = GmatAppData::Instance()->GetGuiInterpreter(); ss = theGuiInterpreter->GetSolarSystemInUse(); // To set panel object and show warning if object is NULL if (SetObject(theObject)) { Create(); Show(); } } //------------------------------------------------------------------------------ // ~EventLocatorPanel() //------------------------------------------------------------------------------ EventLocatorPanel::~EventLocatorPanel() { if (localObject != NULL) delete localObject; theGuiManager->UnregisterCheckListBox("SpacePoint", bodiesCheckListBox); theGuiManager->UnregisterComboBox("Spacecraft", scTargetComboBox); if (!isEclipse) theGuiManager->UnregisterCheckListBox("GroundStation", observersCheckListBox); } //------------------------------- // private methods //------------------------------- //---------------------------------- // methods inherited from GmatPanel //---------------------------------- //------------------------------------------------------------------------------ // void Create() //------------------------------------------------------------------------------ void EventLocatorPanel::Create() { #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() entered\n"); #endif localObject = NULL; bool centeredCheckboxes = false; // set to true for checkboxes centered horizontally Integer staticTextWidth = 110; if (centeredCheckboxes) staticTextWidth = 140; Integer staticTextLarger = staticTextWidth + 12; Integer bsize = 2; // border size // get the config object wxConfigBase *pConfig = wxConfigBase::Get(); // SetPath() understands ".." pConfig->SetPath(wxT("/Event Locator")); wxArrayString emptyList; int epochWidth = 170; int buttonWidth = 25; #ifdef __WXMAC__ epochWidth = 178; buttonWidth = 40; #endif wxBitmap openBitmap = wxBitmap(OpenFolder_xpm); //----------------------------------------------------------------- // create sizers //----------------------------------------------------------------- wxBoxSizer *eventSizer = new wxBoxSizer(wxHORIZONTAL); wxFlexGridSizer *leftGridSizer = new wxFlexGridSizer( 3, 0, 0 ); // wxFlexGridSizer *rightGridSizer = new wxFlexGridSizer( 1, 0, 0 ); wxBoxSizer *leftSizer = new wxBoxSizer(wxVERTICAL); wxBoxSizer *rightSizer = new wxBoxSizer(wxVERTICAL); wxFlexGridSizer *upperRightGridSizer = new wxFlexGridSizer( 3, 0, 0 ); wxFlexGridSizer *lowerRightGridSizer = new wxFlexGridSizer( 3, 0, 0 ); GmatStaticBoxSizer *sizerUpperRight = new GmatStaticBoxSizer( wxVERTICAL, this, "" ); GmatStaticBoxSizer *sizerLowerRight = new GmatStaticBoxSizer( wxVERTICAL, this, "" ); #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() sizers created ...\n"); #endif //----------------------------------------------------------------- // Spacecraft/Target //----------------------------------------------------------------- // label for spacecraft/target if (isEclipse) scTargetTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Spacecraft", wxDefaultPosition, wxSize(staticTextWidth,-1), 0 ); else scTargetTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Target", wxDefaultPosition, wxSize(staticTextWidth,-1), 0 ); scTargetComboBox = theGuiManager->GetSpacecraftComboBox(this, ID_COMBOBOX, wxSize(150,-1)); scTargetComboBox->SetToolTip(pConfig->Read(_T("SpacecraftOrTargetHint"))); //----------------------------------------------------------------- // Occulting Bodies //----------------------------------------------------------------- bodiesTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Occulting Bodies", wxDefaultPosition, wxSize(staticTextWidth,-1), 0 ); bodiesCheckListBox = theGuiManager->GetSpacePointCheckListBox(this, ID_CHECKLISTBOX_BODIES, wxSize(200,-1), true, false, false, false); #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() spacecraft and bodies widgets created ...\n"); #endif //----------------------------------------------------------------- // Eclipse/Observers //----------------------------------------------------------------- #ifdef DEBUG_ECLIPSE_OBSERVERS MessageInterface::ShowMessage("-- About to create eclipseTypesCLB OR observersCLB and isEclipse = %s\n", (isEclipse? "true":"false")); #endif if (isEclipse) { eclipseTypesTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Eclipse Types", wxDefaultPosition, wxSize(staticTextWidth,-1), 0 ); eclipseTypesCheckListBox = new wxCheckListBox(this, ID_CHECKLISTBOX_ECLIPSE, wxDefaultPosition, wxSize(200,-1), emptyList, wxLB_SINGLE|wxLB_SORT|wxLB_HSCROLL); } else { observersTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Observers", wxDefaultPosition, wxSize(staticTextWidth,-1), 0 ); observersCheckListBox = theGuiManager->GetGroundStationCheckListBox(this, ID_CHECKLISTBOX_OBSERVER, wxSize(200,-1)); // theGuiManager->GetSpacePointCheckListBox(this, ID_CHECKLISTBOX, wxSize(200,100), // false, false, false, true); #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() observers widgets created!!!!! ...\n"); #endif } #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() eclipse/observers widgets created ...\n"); #endif //----------------------------------------------------------------- // Filename/Report //----------------------------------------------------------------- fileNameTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Filename", wxDefaultPosition, wxSize(staticTextWidth,-1), 0 ); fileNameTxtCtrl = new wxTextCtrl(this, ID_TEXTCTRL, wxT(""), wxDefaultPosition, wxSize(200, -1), 0); fileNameBrowseButton = new wxBitmapButton(this, ID_BUTTON_BROWSE, openBitmap, wxDefaultPosition, wxSize(buttonWidth, -1)); runModeTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Run Mode", wxDefaultPosition, wxSize(staticTextWidth,-1), 0 ); runModeComboBox = new wxComboBox ( this, ID_RUNMODE_COMBOBOX, wxT(""), wxDefaultPosition, wxSize(epochWidth,-1), emptyList, wxCB_DROPDOWN | wxCB_READONLY ); runModeComboBox->SetToolTip(pConfig->Read(_T("RunMode"))); writeReportCheckBox = new wxCheckBox(this, ID_CHECKBOX, wxT("Write Report"), wxDefaultPosition, wxDefaultSize, 0); #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() report widgets created ...\n"); #endif //----------------------------------------------------------------- // Epoch //----------------------------------------------------------------- // check box entireIntervalCheckBox = new wxCheckBox(this, ID_CHECKBOX, wxT("Use Entire Interval"), wxDefaultPosition, wxDefaultSize, 0); // label for epoch format epochFormatTxt = new wxStaticText( this, ID_TEXT, "Epoch "GUI_ACCEL_KEY"Format", wxDefaultPosition, wxSize(staticTextLarger,-1), 0 ); // combo box for the epoch format epochFormatComboBox = new wxComboBox ( this, ID_COMBOBOX, wxT(""), wxDefaultPosition, wxSize(epochWidth,-1), emptyList, wxCB_DROPDOWN | wxCB_READONLY ); epochFormatComboBox->SetToolTip(pConfig->Read(_T("EpochFormatHint"))); // label for epoch initialEpochTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Initial Epoch", wxDefaultPosition, wxSize(staticTextLarger,-1), 0 ); // textfield for the initial epoch value initialEpochTxtCtrl = new wxTextCtrl( this, ID_TEXTCTRL, wxT(""), wxDefaultPosition, wxSize(epochWidth,-1), 0, wxTextValidator(wxGMAT_FILTER_NUMERIC) ); initialEpochTxtCtrl->SetToolTip(pConfig->Read(_T("EpochHint"))); // label for epoch finalEpochTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Final Epoch", wxDefaultPosition, wxSize(staticTextLarger,-1), 0 ); // textfield for the final epoch value finalEpochTxtCtrl = new wxTextCtrl( this, ID_TEXTCTRL, wxT(""), wxDefaultPosition, wxSize(epochWidth,-1), 0, wxTextValidator(wxGMAT_FILTER_NUMERIC) ); finalEpochTxtCtrl->SetToolTip(pConfig->Read(_T("EpochHint"))); #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() epoch widgets created ...\n"); #endif //----------------------------------------------------------------- // Light-time and stellar aberration //----------------------------------------------------------------- // check boxes lightTimeDelayCheckBox = new wxCheckBox(this, ID_CHECKBOX, wxT("Use light-time delay"), wxDefaultPosition, wxDefaultSize, 0); stellarAberrationCheckBox = new wxCheckBox(this, ID_CHECKBOX, wxT("Use stellar aberration"), wxDefaultPosition, wxDefaultSize, 0); if (!isEclipse) { lightTimeDirectionTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Light-time direction", wxDefaultPosition, wxSize(staticTextWidth,-1), 0 ); lightTimeDirectionComboBox = new wxComboBox ( this, ID_COMBOBOX, wxT(""), wxDefaultPosition, wxSize(epochWidth,-1), emptyList, wxCB_DROPDOWN | wxCB_READONLY ); lightTimeDirectionComboBox->SetToolTip(pConfig->Read(_T("LightTimeDirection"))); #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() light-time direction widgets created!!!!! ...\n"); #endif } stepSizeTxt = new wxStaticText( this, ID_TEXT, ""GUI_ACCEL_KEY"Step size", wxDefaultPosition, wxSize(staticTextWidth,-1), 0 ); stepSizeTxtCtrl = new wxTextCtrl( this, ID_TEXTCTRL, wxT(""), wxDefaultPosition, wxSize(epochWidth,-1), 0, wxTextValidator(wxGMAT_FILTER_NUMERIC) ); stepSizeUnitsTxt = new wxStaticText( this, ID_TEXT, "s", wxDefaultPosition, wxSize(10,-1), 0 ); // Make s small blank string to match up the static box sizers wxStaticText *blankTxt = new wxStaticText( this, ID_TEXT, " ", wxDefaultPosition, wxSize(10,-1), 0 ); #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() ALL widgets created ...\n"); #endif leftGridSizer->Add( scTargetTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add( scTargetComboBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add(20,20); leftGridSizer->Add( bodiesTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add( bodiesCheckListBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add(20,20); if (isEclipse) { leftGridSizer->Add( eclipseTypesTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add( eclipseTypesCheckListBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); } else { leftGridSizer->Add( observersTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add( observersCheckListBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); } leftGridSizer->Add(20,20); leftGridSizer->Add( fileNameTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add( fileNameTxtCtrl, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add( fileNameBrowseButton, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add( runModeTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add( runModeComboBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add(20,20); if (centeredCheckboxes) { leftGridSizer->Add(20,20); leftGridSizer->Add( writeReportCheckBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add(20,20); } else { leftGridSizer->Add( writeReportCheckBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); leftGridSizer->Add(20,20); leftGridSizer->Add(20,20); } leftSizer->Add(leftGridSizer, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() leftGridSizer set up ...\n"); #endif if (centeredCheckboxes) { upperRightGridSizer->Add(20,20); upperRightGridSizer->Add( entireIntervalCheckBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); } else { upperRightGridSizer->Add( entireIntervalCheckBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); upperRightGridSizer->Add(20,20); } upperRightGridSizer->Add( blankTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); upperRightGridSizer->Add( epochFormatTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); upperRightGridSizer->Add( epochFormatComboBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); upperRightGridSizer->Add( blankTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); upperRightGridSizer->Add( initialEpochTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); upperRightGridSizer->Add( initialEpochTxtCtrl, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); upperRightGridSizer->Add( blankTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); upperRightGridSizer->Add( finalEpochTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); upperRightGridSizer->Add( finalEpochTxtCtrl, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); upperRightGridSizer->Add( blankTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); if (centeredCheckboxes) { lowerRightGridSizer->Add(20,20); lowerRightGridSizer->Add( lightTimeDelayCheckBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); lowerRightGridSizer->Add( blankTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); lowerRightGridSizer->Add(20,20); lowerRightGridSizer->Add( stellarAberrationCheckBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); lowerRightGridSizer->Add( blankTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); } else { lowerRightGridSizer->Add( lightTimeDelayCheckBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); lowerRightGridSizer->Add(20,20); lowerRightGridSizer->Add( blankTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); lowerRightGridSizer->Add( stellarAberrationCheckBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); lowerRightGridSizer->Add(20,20); lowerRightGridSizer->Add( blankTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); } if (!isEclipse) { lowerRightGridSizer->Add( lightTimeDirectionTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); lowerRightGridSizer->Add( lightTimeDirectionComboBox, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); lowerRightGridSizer->Add( blankTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); } lowerRightGridSizer->Add( stepSizeTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); lowerRightGridSizer->Add( stepSizeTxtCtrl, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); lowerRightGridSizer->Add( stepSizeUnitsTxt, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() right sizers set up ...\n"); #endif // Upper right sizerUpperRight->Add(upperRightGridSizer, 0, wxALIGN_CENTRE|wxALL, bsize); // LowerRight sizerLowerRight->Add(lowerRightGridSizer, 0, wxALIGN_CENTRE|wxALL, bsize); rightSizer->Add(sizerUpperRight, 0, wxGROW|wxALIGN_LEFT|wxALL, bsize); rightSizer->Add(sizerLowerRight, 0, wxGROW|wxALIGN_LEFT|wxALL, bsize); eventSizer->Add( leftSizer, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); eventSizer->Add( rightSizer, 0, wxGROW|wxALIGN_LEFT | wxALL, bsize ); theMiddleSizer->Add(eventSizer, 0, wxALIGN_CENTRE|wxALL, bsize); #ifdef DEBUG_EVENTPANEL_CREATE MessageInterface::ShowMessage("EventLocatorPanel::Create() exiting\n"); #endif } //------------------------------------------------------------------------------ // void LoadData() //------------------------------------------------------------------------------ void EventLocatorPanel::LoadData() { #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage("EventLocatorPanel::LoadData() entered\n"); #endif // Set object pointer for "Show Script" mObject = theObject; Integer paramID; wxString valueString; EventLocator *theLocator = (EventLocator*) theObject; // Load the epoch formats StringArray reps = TimeConverterUtil::GetValidTimeRepresentations(); for (unsigned int i = 0; i < reps.size(); i++) epochFormatComboBox->Append(reps[i].c_str()); #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" epochFormatComboBox has loaded its data ...\n"); #endif // Load the spacecraft/target data std::string scTargetName; if (isEclipse) scTargetName = theLocator->GetStringParameter("Spacecraft"); else scTargetName = theLocator->GetStringParameter("Target"); if (scTargetName == "") { scTargetComboBox->SetSelection(0); } else { valueString = wxString(scTargetName.c_str()); scTargetComboBox->SetValue(valueString); } // load the occulting bodies wxString wxBody; paramID = theLocator->GetParameterID("OccultingBodies"); StringArray bodies = theLocator->GetStringArrayParameter(paramID); #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" occulting bodies are: ...\n"); for (Integer tt = 0; tt < bodies.size(); tt++) MessageInterface::ShowMessage(" %d %s\n", tt, bodies.at(tt).c_str()); #endif unsigned int bodiesSz = bodies.size(); for (unsigned int ii = 0; ii < bodiesSz; ii++) { wxBody = bodies.at(ii).c_str(); int itsPos = bodiesCheckListBox->FindString(wxBody); if (itsPos != wxNOT_FOUND) { bodiesCheckListBox->Check(itsPos); } } // load the eclipse types or observers wxString wxEcObs; int itsPos = -1; if (isEclipse) { paramID = theLocator->GetParameterID("EclipseTypes"); StringArray possibleEclipseTypes = theLocator->GetPropertyEnumStrings(paramID); unsigned int posEcSz = possibleEclipseTypes.size(); #ifdef DEBUG_ECLIPSE_OBSERVERS MessageInterface::ShowMessage("-- The possible EclipseTypes are:\n"); for (Integer ll = 0; ll < posEcSz; ll++) MessageInterface::ShowMessage(" %d %s\n", ll, possibleEclipseTypes.at(ll).c_str()); #endif wxString *wxPossibleEcTypes = new wxString[posEcSz]; for (unsigned int kk = 0; kk < posEcSz; kk++) { wxString asWx = STD_TO_WX_STRING(possibleEclipseTypes.at(kk)); wxPossibleEcTypes[kk] = asWx; #ifdef DEBUG_ECLIPSE_OBSERVERS MessageInterface::ShowMessage("-- Adding to Eclipse Types CBL:\n"); MessageInterface::ShowMessage(" %s\n", asWx.WX_TO_C_STRING); #endif } eclipseTypesCheckListBox->InsertItems(posEcSz, wxPossibleEcTypes, 0); StringArray eclipseTypes = theLocator->GetStringArrayParameter(paramID); unsigned int ecSz = eclipseTypes.size(); #ifdef DEBUG_ECLIPSE_OBSERVERS MessageInterface::ShowMessage("-- And these are the selected ones:\n"); for (Integer ll = 0; ll < ecSz; ll++) MessageInterface::ShowMessage(" %d %s\n", ll, eclipseTypes.at(ll).c_str()); #endif for (unsigned int kk = 0; kk < ecSz; kk++) { wxEcObs = eclipseTypes.at(kk).c_str(); itsPos = eclipseTypesCheckListBox->FindString(wxEcObs); if (itsPos != wxNOT_FOUND) { eclipseTypesCheckListBox->Check(itsPos); #ifdef DEBUG_ECLIPSE_OBSERVERS MessageInterface::ShowMessage("-- Checking the box ON for: "); MessageInterface::ShowMessage(" %s\n", wxEcObs.WX_TO_C_STRING); #endif } } } else // contact locator { paramID = theLocator->GetParameterID("Observers"); StringArray observers = theLocator->GetStringArrayParameter(paramID); unsigned int obsSz = observers.size(); #ifdef DEBUG_ECLIPSE_OBSERVERS MessageInterface::ShowMessage("-- The observers are:\n"); for (Integer ll = 0; ll < obsSz; ll++) MessageInterface::ShowMessage(" %d %s\n", ll, observers.at(ll).c_str()); #endif for (unsigned int ii = 0; ii < obsSz; ii++) { wxEcObs = observers.at(ii).c_str(); itsPos = observersCheckListBox->FindString(wxEcObs); if (itsPos != wxNOT_FOUND) { observersCheckListBox->Check(itsPos); } } } // load the filename paramID = theLocator->GetParameterID("Filename"); std::string filename = theLocator->GetStringParameter(paramID); fileNameTxtCtrl->SetValue(wxString(filename.c_str())); // load the write report flag paramID = theLocator->GetParameterID("WriteReport"); bool writeReport = theLocator->GetBooleanParameter(paramID); writeReportCheckBox->SetValue(writeReport); // load the run mode paramID = theLocator->GetParameterID("RunMode"); StringArray theModes = theLocator->GetPropertyEnumStrings(paramID); unsigned int modeSz = theModes.size(); for (unsigned int kk = 0; kk < modeSz; kk++) { runModeComboBox->Append(theModes.at(kk).c_str()); } std::string currentMode = theLocator->GetStringParameter(paramID); runModeComboBox->SetValue(wxString(currentMode.c_str())); // load the entire interval flag paramID = theLocator->GetParameterID("UseEntireInterval"); bool useEntireInterval = theLocator->GetBooleanParameter(paramID); entireIntervalCheckBox->SetValue(useEntireInterval); // load the epoch std::string epochFormat = theLocator->GetStringParameter("InputEpochFormat"); std::string initEpochStr = theLocator->GetStringParameter("InitialEpoch"); std::string finalEpochStr = theLocator->GetStringParameter("FinalEpoch"); epochFormatComboBox->SetValue(wxString(epochFormat.c_str())); fromEpochFormat = epochFormat; #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" epochFormat=%s, initEpochStr=%s, finalEpochStr=%s\n", epochFormat.c_str(), initEpochStr.c_str(), finalEpochStr.c_str()); #endif #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" loaded the epoch format ...\n"); #endif // Save to TAIModJulian string to avoid keep reading the field // and convert to proper format when ComboBox is changed. if (epochFormat == "TAIModJulian") { taiMjdInitialEpochStr = initEpochStr; taiMjdFinalEpochStr = finalEpochStr; } else { Real fromMjd = -999.999; Real outMjd; std::string outStr; #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" about to convert from the epoch format %s to TAIModJulian ...\n", fromEpochFormat.c_str()); #endif TimeConverterUtil::Convert(fromEpochFormat, fromMjd, initEpochStr, "TAIModJulian", outMjd, outStr); taiMjdInitialEpochStr = outStr; fromMjd = -999.999; TimeConverterUtil::Convert(fromEpochFormat, fromMjd, finalEpochStr, "TAIModJulian", outMjd, outStr); taiMjdFinalEpochStr = outStr; #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage(" taiMjdInitialEpochStr=%s\n", taiMjdInitialEpochStr.c_str()); MessageInterface::ShowMessage(" taiMjdFinalEpochStr =%s\n", taiMjdFinalEpochStr.c_str()); #endif } theInitEpochStr = initEpochStr; initialEpochTxtCtrl->SetValue(initEpochStr.c_str()); theFinalEpochStr = finalEpochStr; finalEpochTxtCtrl->SetValue(finalEpochStr.c_str()); // load the light-time delay flag paramID = theLocator->GetParameterID("UseLightTimeDelay"); bool useLightTime = theLocator->GetBooleanParameter(paramID); lightTimeDelayCheckBox->SetValue(useLightTime); // load the stellar aberration flag paramID = theLocator->GetParameterID("UseStellarAberration"); bool useStellarAberration = theLocator->GetBooleanParameter(paramID); stellarAberrationCheckBox->SetValue(useStellarAberration); if (!isEclipse) { paramID = theLocator->GetParameterID("LightTimeDirection"); StringArray possibleDirections = theLocator->GetPropertyEnumStrings(paramID); unsigned int dirSz = possibleDirections.size(); for (unsigned int kk = 0; kk < dirSz; kk++) { lightTimeDirectionComboBox->Append(possibleDirections.at(kk).c_str()); } std::string currentDir = theLocator->GetStringParameter(paramID); lightTimeDirectionComboBox->SetValue(wxString(currentDir.c_str())); } // load the step size paramID = theLocator->GetParameterID("StepSize"); Real step = theLocator->GetRealParameter(paramID); // std::string stepString = GmatStringUtil::Trim(GmatStringUtil::ToString(step)); stepSizeTxtCtrl->SetValue(ToString(step)); #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" about to enable or disable widgets as needed\n"); MessageInterface::ShowMessage("is useEntireInterval box checked? %s\n", (entireIntervalCheckBox->IsChecked()? "true": "false")); MessageInterface::ShowMessage("is lightTimeDelay box checked? %s\n", (lightTimeDelayCheckBox->IsChecked()? "true": "false")); #endif // Enable/disable as needed if (entireIntervalCheckBox->IsChecked()) { // epochFormatTxt->Disable(); epochFormatComboBox->Disable(); // initialEpochTxt->Disable(); initialEpochTxtCtrl->Disable(); // finalEpochTxt->Disable(); finalEpochTxtCtrl->Disable(); #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" time-related ones disabled\n"); #endif } else { // epochFormatTxt->Enable(); epochFormatComboBox->Enable(); // initialEpochTxt->Enable(); initialEpochTxtCtrl->Enable(); // finalEpochTxt->Enable(); finalEpochTxtCtrl->Enable(); #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" time-related ones enabled\n"); #endif } if (lightTimeDelayCheckBox->IsChecked()) { stellarAberrationCheckBox->Enable(); if (!isEclipse) { // lightTimeDirectionTxt->Enable(); lightTimeDirectionComboBox->Enable(); } #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" light-time-related ones enabled\n"); #endif } else { stellarAberrationCheckBox->Disable(); stellarAberrationCheckBox->SetValue(false); #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" stellar-aberration-related ones disabled\n"); #endif if (!isEclipse) { // lightTimeDirectionTxt->Disable(); lightTimeDirectionComboBox->Disable(); } #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage (" light-time-related ones disabled\n"); #endif } #ifdef DEBUG_EVENTPANEL_LOAD MessageInterface::ShowMessage("EventLocatorPanel::LoadData() exiting\n"); #endif } //------------------------------------------------------------------------------ // void SaveData() //------------------------------------------------------------------------------ void EventLocatorPanel::SaveData() { #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData() entered\n"); #endif // create local copy of mObject if (localObject != NULL) { delete localObject; } localObject = mObject->Clone(); SaveData(localObject); #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData() done setting local object\n"); #endif // if no errors, save again if (canClose) { #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData() copying data back to object\n"); #endif // mObject->Copy(localObject); theObject->Copy(localObject); #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData() about to reset changed flags\n"); #endif // // reset changed flags // ResetChangedFlags(); } #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData() exiting\n"); #endif } //------------------------------------------------------------------------------ // void SaveData() //------------------------------------------------------------------------------ void EventLocatorPanel::SaveData(GmatBase *forObject) { #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData(obj) entered\n"); #endif canClose = true; std::string str; wxString wxStr; Real theStepSize; bool theWriteReport, theEntireInterval, theLightTimeDelay; bool realDataChanged = false; bool isValid = true; //----------------------------------------------------------------- // check values from text fields //----------------------------------------------------------------- if (isStepSizeChanged) { #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData(obj) - there has been a text mod!!\n"); #endif str = stepSizeTxtCtrl->GetValue(); isValid = CheckReal(theStepSize, str, "StepSize", "Real Number >= 0", false, true, true, true); realDataChanged = true; } if (!canClose) return; try { Integer paramID; std::string epochFormat = epochFormatComboBox->GetValue().WX_TO_STD_STRING; std::string newInitEpoch = initialEpochTxtCtrl->GetValue().WX_TO_STD_STRING; std::string newFinalEpoch = finalEpochTxtCtrl->GetValue().WX_TO_STD_STRING; Real fromMjd = -999.999; Real a1mjd = -999.999; std::string outStr; #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage (" newInitEpoch=%s, newFinalEpoch=%s, epochFormat=%s\n", newInitEpoch.c_str(), newFinalEpoch.c_str(), epochFormat.c_str()); #endif if (isFileNameChanged || isFileNameTextChanged) { str = fileNameTxtCtrl->GetValue().WX_TO_STD_STRING; canClose = CheckFileName(str, "Filename"); if (!canClose) return; paramID = forObject->GetParameterID("Filename"); forObject->SetStringParameter(paramID, str); isFileNameChanged = false; isFileNameTextChanged = false; } // Only save the epoch information if the UseEntireInterval box is NOT checked if (!entireIntervalCheckBox->IsChecked()) { // Save epoch format and epoch if (isEpochFormatChanged || isInitialEpochChanged || isInitialEpochTextChanged || isFinalEpochChanged || isFinalEpochTextChanged) { bool timeOK = CheckTimeFormatAndValue(epochFormat, newInitEpoch, "InitialEpoch", true); timeOK = timeOK && CheckTimeFormatAndValue(epochFormat, newFinalEpoch, "FinalEpoch", true); #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage (" timeOK = %s\n", (timeOK? "YES!" : "no")); MessageInterface::ShowMessage(" new init epoch = %s\n", newInitEpoch.c_str()); MessageInterface::ShowMessage(" new final epoch = %s\n", newFinalEpoch.c_str()); #endif if (timeOK) { #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage (" About to set format and time on the object ---------\n"); #endif paramID = forObject->GetParameterID("InputEpochFormat"); forObject->SetStringParameter(paramID, epochFormat); paramID = forObject->GetParameterID("InitialEpoch"); forObject->SetStringParameter(paramID, newInitEpoch); paramID = forObject->GetParameterID("FinalEpoch"); forObject->SetStringParameter(paramID, newFinalEpoch); #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage (" DONE setting format and time on the object ---------\n"); #endif isEpochFormatChanged = false; isInitialEpochChanged = false; isInitialEpochTextChanged = false; isFinalEpochChanged = false; isFinalEpochTextChanged = false; } else { #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData() setting canClose to false inside (epoch) try\n"); #endif canClose = false; } } } // Save Real Data if (realDataChanged) { #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData(obj) - attempting to real data!!\n"); #endif paramID = forObject->GetParameterID("StepSize"); forObject->SetRealParameter(paramID, theStepSize); realDataChanged = false; } // SC/Target if (isSCTargetChanged) { #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData(obj) - saving SC/Target!!\n"); #endif str = scTargetComboBox->GetValue().WX_TO_STD_STRING; if (isEclipse) paramID = forObject->GetParameterID("Spacecraft"); else paramID = forObject->GetParameterID("Target"); forObject->SetStringParameter(paramID, str); isSCTargetChanged = false; } int count; // Occulting bodies if (isBodyListChanged) { #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData(obj) - saving occulting bodies!!\n"); #endif forObject->TakeAction("Clear", "OccultingBodies"); count = bodiesCheckListBox->GetCount(); paramID = forObject->GetParameterID("OccultingBodies"); for (int i = 0; i < count; i++) { if (bodiesCheckListBox->IsChecked(i)) { std::string bodyName = bodiesCheckListBox->GetString(i).WX_TO_STD_STRING; #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("--- adding %s to the list of bodies\n", bodyName.c_str()); #endif forObject->SetStringParameter(paramID, bodyName); } } isBodyListChanged = false; } // Eclipse types OR Observers if (isEclipse && isEclipseTypesChanged) { count = eclipseTypesCheckListBox->GetCount(); forObject->TakeAction("Clear", "EclipseTypes"); paramID = forObject->GetParameterID("EclipseTypes"); for (int i = 0; i < count; i++) { if (eclipseTypesCheckListBox->IsChecked(i)) { std::string str = eclipseTypesCheckListBox->GetString(i).WX_TO_STD_STRING; #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("--- adding %s to the list of eclObs\n", str.c_str()); #endif forObject->SetStringParameter(paramID, str); } } isEclipseTypesChanged = false; } if (!isEclipse && isObserverListChanged) { #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData(obj) - saving eclipse types or observers!!\n"); #endif count = observersCheckListBox->GetCount(); forObject->TakeAction("Clear", "Observers"); paramID = forObject->GetParameterID("Observers"); for (int i = 0; i < count; i++) { if (observersCheckListBox->IsChecked(i)) { std::string str = observersCheckListBox->GetString(i).WX_TO_STD_STRING; #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("--- adding %s to the list of eclObs\n", str.c_str()); #endif forObject->SetStringParameter(paramID, str); } } isObserverListChanged = false; } // run mode change if (isRunModeChanged) { str = runModeComboBox->GetValue().WX_TO_STD_STRING; paramID = forObject->GetParameterID("RunMode"); forObject->SetStringParameter(paramID, str); isRunModeChanged = false; } // Checkbox values if (isWriteReportChanged) { paramID = forObject->GetParameterID("WriteReport"); if (writeReportCheckBox->IsChecked()) forObject->SetBooleanParameter(paramID, true); else forObject->SetBooleanParameter(paramID, false); isWriteReportChanged = false; } if (isEntireIntervalChanged) { paramID = forObject->GetParameterID("UseEntireInterval"); if (entireIntervalCheckBox->IsChecked()) forObject->SetBooleanParameter(paramID, true); else forObject->SetBooleanParameter(paramID, false); isEntireIntervalChanged = false; } if (isLightTimeDelayChanged) { paramID = forObject->GetParameterID("UseLightTimeDelay"); if (lightTimeDelayCheckBox->IsChecked()) forObject->SetBooleanParameter(paramID, true); else forObject->SetBooleanParameter(paramID, false); isLightTimeDelayChanged = false; } // Stellar aberration could be set to false if the light time delay is turned off if (isStellarAberrationChanged) { paramID = forObject->GetParameterID("UseStellarAberration"); if (stellarAberrationCheckBox->IsChecked()) forObject->SetBooleanParameter(paramID, true); else forObject->SetBooleanParameter(paramID, false); isStellarAberrationChanged = false; } if (lightTimeDelayCheckBox->IsChecked()) { // if (isStellarAberrationChanged) // { // paramID = forObject->GetParameterID("UseStellarAberration"); // if (stellarAberrationCheckBox->IsChecked()) // forObject->SetBooleanParameter(paramID, true); // else // forObject->SetBooleanParameter(paramID, false); // isStellarAberrationChanged = false; // } // light time direction // SC/Target if (!isEclipse && isLightTimeDirectionChanged) { str = lightTimeDirectionComboBox->GetValue().WX_TO_STD_STRING; paramID = forObject->GetParameterID("LightTimeDirection"); forObject->SetStringParameter(paramID, str); isLightTimeDirectionChanged = false; } } } catch(BaseException &ex) { MessageInterface::PopupMessage(Gmat::ERROR_, ex.GetFullMessage()); canClose = false; } #ifdef DEBUG_EVENTPANEL_SAVE MessageInterface::ShowMessage("EventLocatorPanel::SaveData(obj) exiting\n"); #endif } //------------------------------------------------------------------------------ // void OnTextChange() //------------------------------------------------------------------------------ void EventLocatorPanel::OnTextChange(wxCommandEvent &event) { if (fileNameTxtCtrl->IsModified()) isFileNameTextChanged = true; if (initialEpochTxtCtrl->IsModified()) isInitialEpochTextChanged = true; if (finalEpochTxtCtrl->IsModified()) isFinalEpochTextChanged = true; if (stepSizeTxtCtrl->IsModified()) isStepSizeChanged = true; EnableUpdate(true); } //------------------------------------------------------------------------------ // void OnComboBoxChange() //------------------------------------------------------------------------------ void EventLocatorPanel::OnComboBoxChange(wxCommandEvent &event) { std::string toEpochFormat = epochFormatComboBox->GetValue().WX_TO_STD_STRING; #ifdef DEBUG_EVENTPANEL_PANEL_COMBOBOX MessageInterface::ShowMessage ("\nEventLocatorPanel::OnComboBoxChange() toEpochFormat=%s\n", toEpochFormat.c_str()); #endif //----------------------------------------------------------------- // epoch format change //----------------------------------------------------------------- if (event.GetEventObject() == epochFormatComboBox) { isInitialEpochChanged = true; isFinalEpochChanged = true; #ifdef DEBUG_EVENT_PANEL_COMBOBOX MessageInterface::ShowMessage ("\nEventLocatorPanel::OnComboBoxChange() attempting to change epoch format ...\n"); #endif try { Real fromMjd = -999.999; Real outMjd; std::string outStr; // if modified by user, check if initial epoch is valid first if (isInitialEpochTextChanged) { std::string theEpochStr = initialEpochTxtCtrl->GetValue().WX_TO_STD_STRING; // Save to TAIModJulian string to avoid keep reading the field // and convert to proper format when ComboBox is changed. if (fromEpochFormat == "TAIModJulian") { taiMjdInitialEpochStr = theEpochStr; } else { TimeConverterUtil::Convert(fromEpochFormat, fromMjd, theEpochStr, "TAIModJulian", outMjd, outStr); taiMjdInitialEpochStr = outStr; } // Convert to desired format with new date TimeConverterUtil::Convert(fromEpochFormat, fromMjd, theEpochStr, toEpochFormat, outMjd, outStr); initialEpochTxtCtrl->SetValue(outStr.c_str()); // isInitialEpochChanged = false; // fromEpochFormat = toEpochFormat; } else { #ifdef DEBUG_EVENTPANEL_PANEL_COMBOBOX MessageInterface::ShowMessage ("\nEventLocatorPanel::OnComboBoxChange() converting from %s to %s\n", "TAIModJulian", toEpochFormat.c_str()); #endif // Convert to desired format using TAIModJulian date TimeConverterUtil::Convert("TAIModJulian", fromMjd, taiMjdInitialEpochStr, toEpochFormat, outMjd, outStr); initialEpochTxtCtrl->SetValue(outStr.c_str()); // fromEpochFormat = toEpochFormat; } // if modified by user, check if final epoch is valid first if (isFinalEpochTextChanged) { std::string theEpochStr = finalEpochTxtCtrl->GetValue().WX_TO_STD_STRING; // Save to TAIModJulian string to avoid keep reading the field // and convert to proper format when ComboBox is changed. if (fromEpochFormat == "TAIModJulian") { taiMjdFinalEpochStr = theEpochStr; } else { TimeConverterUtil::Convert(fromEpochFormat, fromMjd, theEpochStr, "TAIModJulian", outMjd, outStr); taiMjdFinalEpochStr = outStr; } // Convert to desired format with new date TimeConverterUtil::Convert(fromEpochFormat, fromMjd, theEpochStr, toEpochFormat, outMjd, outStr); finalEpochTxtCtrl->SetValue(outStr.c_str()); // isFinalEpochChanged = false; // fromEpochFormat = toEpochFormat; } else { #ifdef DEBUG_EVENTPANEL_PANEL_COMBOBOX MessageInterface::ShowMessage ("\nEventLocatorPanel::OnComboBoxChange() converting from %s to %s\n", "TAIModJulian", toEpochFormat.c_str()); #endif // Convert to desired format using TAIModJulian date TimeConverterUtil::Convert("TAIModJulian", fromMjd, taiMjdFinalEpochStr, toEpochFormat, outMjd, outStr); finalEpochTxtCtrl->SetValue(outStr.c_str()); // fromEpochFormat = toEpochFormat; } fromEpochFormat = toEpochFormat; } catch (BaseException &e) { epochFormatComboBox->SetValue(fromEpochFormat.c_str()); MessageInterface::PopupMessage (Gmat::ERROR_, e.GetFullMessage() + "\nPlease enter valid Epoch before changing the Epoch Format\n"); } } else if (event.GetEventObject() == scTargetComboBox) { scTargetStr = scTargetComboBox->GetValue().WX_TO_STD_STRING; isSCTargetChanged = true; } else if (event.GetEventObject() == runModeComboBox) { runModeStr = runModeComboBox->GetValue().WX_TO_STD_STRING; isRunModeChanged = true; } else if (!isEclipse && event.GetEventObject() == lightTimeDirectionComboBox) { lightTimeDirectionStr = lightTimeDirectionComboBox->GetValue().WX_TO_STD_STRING; isLightTimeDirectionChanged = true; } EnableUpdate(true); #ifdef DEBUG_EVENTPANEL_PANEL_COMBOBOX MessageInterface::ShowMessage ("\nEventLocatorPanel::OnComboBoxChange() EXITing\n"); #endif } //------------------------------------------------------------------------------ // void OnBrowseButton() //------------------------------------------------------------------------------ void EventLocatorPanel::OnBrowseButton(wxCommandEvent &event) { wxString prevFilename = fileNameTxtCtrl->GetValue(); wxFileDialog dialog(this, _T("Choose a file"), _T(""), _T(""), _T("*.*")); if (dialog.ShowModal() == wxID_OK) { wxString filename; filename = dialog.GetPath().c_str(); if (!filename.IsSameAs(prevFilename)) { fileNameTxtCtrl->SetValue(filename); isFileNameChanged = true; isFileNameTextChanged = true; EnableUpdate(true); } } } //------------------------------------------------------------------------------ // void OnCheckBoxChange(wxCommandEvent& event) //------------------------------------------------------------------------------ void EventLocatorPanel::OnCheckBoxChange(wxCommandEvent& event) { if (event.GetEventObject() == writeReportCheckBox) { isWriteReportChanged = true; } else if (event.GetEventObject() == entireIntervalCheckBox) { isEntireIntervalChanged = true; if (entireIntervalCheckBox->IsChecked()) { // epochFormatTxt->Disable(); epochFormatComboBox->Disable(); // initialEpochTxt->Disable(); initialEpochTxtCtrl->Disable(); // finalEpochTxt->Disable(); finalEpochTxtCtrl->Disable(); } else { // epochFormatTxt->Enable(); epochFormatComboBox->Enable(); // initialEpochTxt->Enable(); initialEpochTxtCtrl->Enable(); // finalEpochTxt->Enable(); finalEpochTxtCtrl->Enable(); } } else if (event.GetEventObject() == lightTimeDelayCheckBox) { isLightTimeDelayChanged = true; if (lightTimeDelayCheckBox->IsChecked()) { stellarAberrationCheckBox->Enable(); if (!isEclipse) { // lightTimeDirectionTxt->Enable(); lightTimeDirectionComboBox->Enable(); } } else { stellarAberrationCheckBox->Disable(); stellarAberrationCheckBox->SetValue(false); isStellarAberrationChanged = true; if (!isEclipse) { // lightTimeDirectionTxt->Disable(); lightTimeDirectionComboBox->Disable(); } } } else if (event.GetEventObject() == stellarAberrationCheckBox) { isStellarAberrationChanged = true; } EnableUpdate(true); } //------------------------------------------------------------------------------ // void OnCheckListBoxChange(wxCommandEvent& event) //------------------------------------------------------------------------------ void EventLocatorPanel::OnCheckListBoxChange(wxCommandEvent& event) { if (event.GetEventObject() == bodiesCheckListBox) { isBodyListChanged = true; } else if (isEclipse && event.GetEventObject() == eclipseTypesCheckListBox) { isEclipseTypesChanged = true; } else if (!isEclipse && event.GetEventObject() == observersCheckListBox) { isObserverListChanged = true; } EnableUpdate(true); } //------------------------------------------------------------------------------ // void OnCheckListBoxSelect(wxCommandEvent& event) //------------------------------------------------------------------------------ void EventLocatorPanel::OnCheckListBoxSelect(wxCommandEvent& event) { EnableUpdate(true); } //------------------------------------------------------------------------------ // wxString ToString(Real rval) //------------------------------------------------------------------------------ /** * Converts a real number to a wxString. * * @param <rval> real number to convert * * @return wxString representation of the input real number */ //------------------------------------------------------------------------------ wxString EventLocatorPanel::ToString(Real rval) { return theGuiManager->ToWxString(rval); } //------------------------------------------------------------------------------ // void ResetChangedFlags() //------------------------------------------------------------------------------ void EventLocatorPanel::ResetChangedFlags() { isSCTargetChanged = false; isBodyListChanged = false; isEclipseTypesChanged = false; isObserverListChanged = false; isFileNameChanged = false; isFileNameTextChanged = false; isWriteReportChanged = false; isRunModeChanged = false; isEntireIntervalChanged = false; isEpochFormatChanged = false; isInitialEpochChanged = false; isFinalEpochChanged = false; isInitialEpochTextChanged = false; isFinalEpochTextChanged = false; isLightTimeDelayChanged = false; isStellarAberrationChanged = false; isLightTimeDirectionChanged = false; isStepSizeChanged = false; }
39.308753
129
0.589617
[ "object" ]
0eb923ef61ffcd4f28b753e2c331c82a5e985b93
1,417
hpp
C++
src/config.hpp
rainstormstudio/TetrisD3
5079a33781e08b00eac2e01a3149a1d2b10b9e36
[ "MIT" ]
null
null
null
src/config.hpp
rainstormstudio/TetrisD3
5079a33781e08b00eac2e01a3149a1d2b10b9e36
[ "MIT" ]
null
null
null
src/config.hpp
rainstormstudio/TetrisD3
5079a33781e08b00eac2e01a3149a1d2b10b9e36
[ "MIT" ]
null
null
null
#ifndef CONFIG_HPP #define CONFIG_HPP #include <string> #include <vector> #include "inputManager.hpp" class Config { std::string configFilePath; std::string comment(std::string content); std::string item(std::string tag, std::string value); std::string item(std::string tag, unsigned int value); std::string item(std::string tag, bool value); std::string trim(std::string str); bool fullscreen; Uint32 fullscreenFlag; public: std::string savePath; std::string tilesetPath; std::string fontPath; std::string titlePath; std::string UIPath; std::string tetroPath; std::string musicPath; std::string rotateSFXPath; std::string softdropSFXPath; std::string harddropSFXPath; std::string clearsingleSFXPath; std::string cleardoubleSFXPath; std::string cleartripleSFXPath; std::string cleartetrisSFXPath; std::string levelupSFXPath; std::string gameoverSFXPath; std::string countdown1SFXPath; std::string countdown2SFXPath; unsigned int screenWidth; unsigned int screenHeight; int music_volume; // 0 - 128 int sfx_volume; // 0 - 128 bool mute_music; bool mute_sfx; std::vector<std::string> input; Config(std::string filename); void setFullscreen(bool value); bool isFullscreen() const; Uint32 getFullscreenFlag() const; void saveToFile(); }; #endif
24.431034
58
0.685956
[ "vector" ]
0eb9e3daaf9c48ccc7cbb10634ac3eb84f35dbbe
3,733
cpp
C++
integ_trajectory_tracking_HT/src/CTC_controller.cpp
NSicre/integration_project_ecn
0f9161eef442173d532bca8548d36ec89b6248ba
[ "MIT" ]
8
2022-01-31T13:56:14.000Z
2022-02-09T13:06:33.000Z
integ_trajectory_tracking_HT/src/CTC_controller.cpp
NSicre/integration_project_ecn
0f9161eef442173d532bca8548d36ec89b6248ba
[ "MIT" ]
null
null
null
integ_trajectory_tracking_HT/src/CTC_controller.cpp
NSicre/integration_project_ecn
0f9161eef442173d532bca8548d36ec89b6248ba
[ "MIT" ]
13
2022-01-31T13:56:16.000Z
2022-02-21T17:03:37.000Z
#include <ros/ros.h> #include <ros/package.h> #include <string> #include <math.h> #include <sstream> #include <iostream> #include <control_toolbox/SetPidGains.h> #include <cmath> //#include <gkd_models/Dynamic.h> #include <std_msgs/Float64.h> #include <sensor_msgs/JointState.h> //inutile mais peut servir pour creer nos propres messages //#include <Suivi_traj_EK/Commande.h> using namespace std; using namespace control_toolbox; // global variables for subscriber sensor_msgs::JointState robot_state; sensor_msgs::JointState robot_trajectory; sensor_msgs::JointState commande; std_msgs::Float64 torque_q1_command; std_msgs::Float64 torque_q2_command; sensor_msgs::JointState jt_state; //gkd_models::Dynamic srv; control_toolbox::SetPidGainsRequest gains; void robot_stateCallback(const sensor_msgs::JointStatePtr & msg) { robot_state = *msg; } void robot_trajectoryCallback(const sensor_msgs::JointStatePtr & msg) //changer de message { robot_trajectory = *msg; } int main (int argc, char** argv) { ros::init(argc, argv, "CTC_controller"); ros::NodeHandle nh; // subscriber Etat // Position, velocity, acceleration of each motors ros::Subscriber robot_state_sub = nh.subscribe ("/joint_states", 10, robot_stateCallback); // subscriber Trajectoire // Position, velocity, acceleration desired of each motors ros::Subscriber robot_trajectory_sub = nh.subscribe ("/trajectory", 10, robot_trajectoryCallback); // ros::ServiceClient client = nh.serviceClient<>(""); // publisher effort q1 (Torque of first motor) ros::Publisher torque1_publisher = nh.advertise<std_msgs::Float64>("/joint1_effort_controller/command", 10); // publisher effort q2 (Torque of second motor) ros::Publisher torque2_publisher = nh.advertise<std_msgs::Float64>("/joint2_effort_controller/command", 10); float Te=0.01; // choice of PID coefficients (Index 1 for first motor, Index 2 for second motors) float Kp1=120, Kp2=100, Kd1=0.1, Kd2=0.1, Ki1=0.1, Ki2=0.1; float integral_error[2] = {}; float position_error[2] = {}, velocity_error[2] = {}; float sum_before_matrices[2]={}; ros::Rate rate(1/Te); torque_q1_command.data = 0; torque_q2_command.data = 0; //effort = acceleration robot_state.position.resize(2); robot_state.velocity.resize(2); robot_state.effort.resize(2); robot_trajectory.position.resize(2); robot_trajectory.velocity.resize(2); robot_trajectory.effort.resize(2); while(ros::ok()) { //calculus of errors sums position_error[0] = robot_trajectory.position[0] - robot_state.position[0]; velocity_error[0] = robot_trajectory.velocity[0] - robot_state.velocity[0]; integral_error[0] += position_error[0]*Te; position_error[1] = robot_trajectory.position[1] - robot_state.position[1]; velocity_error[1] = robot_trajectory.velocity[1] - robot_state.velocity[1]; integral_error[1] += position_error[1]*Te; sum_before_matrices[0]=Kp1*position_error[0] + Kd1*velocity_error[0]/Te + Ki1*integral_error[0]+robot_trajectory.effort[0]; sum_before_matrices[1]=Kp2*position_error[1] + Kd2*velocity_error[1]/Te + Ki2*integral_error[1]+robot_trajectory.effort[1]; //then we ask for N and M matrices through a fonction c++ std::vector<int> M = {0, 0, 0, 0}; std::vector<int> N = {0, 0}; torque_q1_command.data = M[0]*sum_before_matrices[0] + M[1]*sum_before_matrices[1] + N[0]; torque_q2_command.data = M[2]*sum_before_matrices[0] + M[3]*sum_before_matrices[1] + N[1]; // publish setpoint torque1_publisher.publish(torque_q1_command); torque2_publisher.publish(torque_q2_command); ros::spinOnce(); rate.sleep(); } return 0; }
31.635593
127
0.721672
[ "vector" ]
0ec1e211806dc8108f2ce5a57ba78ece16061dc0
10,510
cpp
C++
components/settingsmanager.cpp
Acidburn0zzz/symphytum
2f3eae0d923ffd5d12a412fbd595d4df37cf15a3
[ "BSD-2-Clause" ]
null
null
null
components/settingsmanager.cpp
Acidburn0zzz/symphytum
2f3eae0d923ffd5d12a412fbd595d4df37cf15a3
[ "BSD-2-Clause" ]
null
null
null
components/settingsmanager.cpp
Acidburn0zzz/symphytum
2f3eae0d923ffd5d12a412fbd595d4df37cf15a3
[ "BSD-2-Clause" ]
null
null
null
/* * Copyright (c) 2012 Giorgio Wicklein <giowckln@gmail.com> */ //----------------------------------------------------------------------------- // Hearders //----------------------------------------------------------------------------- #include "settingsmanager.h" #include "../utils/definitionholder.h" #include <QtCore/QString> #include <QtCore/QStringList> #include <QtCore/QByteArray> #include <QtCore/QSettings> #include <QtCore/QVariant> #include <QtCore/QDate> //----------------------------------------------------------------------------- // Public //----------------------------------------------------------------------------- SettingsManager::SettingsManager() { if (DefinitionHolder::WIN_PORTABLE) { m_settings = new QSettings("portable_data/settings.ini", QSettings::IniFormat); } else { m_settings = new QSettings(); } } SettingsManager::~SettingsManager() { delete m_settings; } void SettingsManager::saveGeometry(const QString &objectName, const QByteArray &geometry) { m_settings->beginGroup(objectName); m_settings->setValue("geometry", geometry); m_settings->endGroup(); } QByteArray SettingsManager::restoreGeometry(const QString &objectName) const { QByteArray g; m_settings->beginGroup(objectName); g = m_settings->value("geometry").toByteArray(); m_settings->endGroup(); return g; } void SettingsManager::saveState(const QString &objectName, const QByteArray &state) { m_settings->beginGroup(objectName); m_settings->setValue("state", state); m_settings->endGroup(); } QByteArray SettingsManager::restoreState(const QString &objectName) const { QByteArray s; m_settings->beginGroup(objectName); s = m_settings->value("state").toByteArray(); m_settings->endGroup(); return s; } void SettingsManager::saveProperty(const QString &propertyName, const QString &objectName, const QVariant &value) { m_settings->beginGroup(objectName); m_settings->setValue(propertyName, value); m_settings->endGroup(); } QVariant SettingsManager::restoreProperty(const QString &propertyName, const QString &objectName) const { QVariant v; m_settings->beginGroup(objectName); v = m_settings->value(propertyName); m_settings->endGroup(); return v; } void SettingsManager::removeAllSettings() { m_settings->clear(); } void SettingsManager::deleteObjectProperties(const QString &objectName) { m_settings->remove(objectName); } void SettingsManager::duplicateObjectProperties(const QString &originalObjectName, const QString &duplicateObjectName) { QHash<QString, QVariant> originalProperties; m_settings->beginGroup(originalObjectName); QStringList originalKeys = m_settings->allKeys(); foreach (QString key, originalKeys) { originalProperties.insert(key, m_settings->value(key)); } m_settings->endGroup(); m_settings->beginGroup(duplicateObjectName); QHash<QString, QVariant>::const_iterator it; for (it = originalProperties.begin(); it != originalProperties.end(); ++it) { m_settings->setValue(it.key(), it.value()); } m_settings->endGroup(); } void SettingsManager::saveSoftwareBuild() { m_settings->beginGroup(DefinitionHolder::NAME.toLower()); m_settings->setValue("build", DefinitionHolder::SOFTWARE_BUILD); m_settings->endGroup(); } int SettingsManager::restoreSoftwareBuild() const { int i; m_settings->beginGroup(DefinitionHolder::NAME.toLower()); i = m_settings->value("build", 0).toInt(); //0 means no previous software launch m_settings->endGroup(); //return current build if no build was saved return (i > 0) ? i : DefinitionHolder::SOFTWARE_BUILD; } void SettingsManager::saveViewMode(int mode) { m_settings->beginGroup("mainWindow"); m_settings->setValue("viewMode", mode); m_settings->endGroup(); } int SettingsManager::restoreViewMode() const { int i; m_settings->beginGroup("mainWindow"); i = m_settings->value("viewMode", 0).toInt(); //0 is form view m_settings->endGroup(); return i; } void SettingsManager::saveLastUsedRecord(int row) { m_settings->beginGroup("mainWindow"); m_settings->setValue("lastUsedRecord", row); m_settings->endGroup(); } int SettingsManager::restoreLastUsedRecord() { int i; m_settings->beginGroup("mainWindow"); i = m_settings->value("lastUsedRecord", -1).toInt(); //-1 means invalid m_settings->endGroup(); return i; } bool SettingsManager::isCloudSyncActive() { bool a; m_settings->beginGroup("cloudSync"); a = m_settings->value("cloudSyncActive", false).toBool(); m_settings->endGroup(); return a; } void SettingsManager::setCloudSyncActive(bool a) { m_settings->beginGroup("cloudSync"); m_settings->setValue("cloudSyncActive", a); m_settings->endGroup(); } void SettingsManager::saveEncodedAccessToken(const QString &token) { m_settings->beginGroup("cloudSync"); m_settings->setValue("axTk", token); m_settings->endGroup(); } QString SettingsManager::restoreEncodedAccessToken() { QString t; m_settings->beginGroup("cloudSync"); t = m_settings->value("axTk", "").toString(); m_settings->endGroup(); return t; } void SettingsManager::saveCurrentCloudSyncService(int id) { m_settings->beginGroup("cloudSync"); m_settings->setValue("service", id); m_settings->endGroup(); } int SettingsManager::restoreCurrentCloudSyncService() { int id; m_settings->beginGroup("cloudSync"); id = m_settings->value("service", -1).toInt(); m_settings->endGroup(); return id; } void SettingsManager::setCloudSyncFirstTime(bool b) { m_settings->beginGroup("cloudSync"); m_settings->setValue("cloudSyncFirstTime", b); m_settings->endGroup(); } bool SettingsManager::isCloudSyncFirstTime() { bool b; m_settings->beginGroup("cloudSync"); b = m_settings->value("cloudSyncFirstTime", true).toBool(); m_settings->endGroup(); return b; } void SettingsManager::setCloudSyncInitialized(bool b) { m_settings->beginGroup("cloudSync"); m_settings->setValue("cloudSyncInit", b); m_settings->endGroup(); } bool SettingsManager::isCloudSyncInitialized() { bool b; m_settings->beginGroup("cloudSync"); b = m_settings->value("cloudSyncInit", false).toBool(); m_settings->endGroup(); return b; } void SettingsManager::saveCloudLocalDataChanged(bool b) { m_settings->beginGroup("cloudSync"); m_settings->setValue("localDataChanged", b); m_settings->endGroup(); } bool SettingsManager::restoreCloudLocalDataChanged() { bool b; m_settings->beginGroup("cloudSync"); b = m_settings->value("localDataChanged", false).toBool(); m_settings->endGroup(); return b; } void SettingsManager::saveCloudSyncRevision(quint64 revision) { m_settings->beginGroup("cloudSync"); m_settings->setValue("revision", revision); m_settings->endGroup(); } quint64 SettingsManager::restoreCloudSyncRevision() { quint64 r; m_settings->beginGroup("cloudSync"); r = m_settings->value("revision", 0).toULongLong(); m_settings->endGroup(); return r; } void SettingsManager::saveCloudSessionKey(const QString &sessionKey) { //SM has that private method for ding it, on sync deactivation m_settings->beginGroup("cloudSync"); m_settings->setValue("sessionKey", sessionKey); m_settings->endGroup(); } QString SettingsManager::restoreCloudSessionKey() { QString k; m_settings->beginGroup("cloudSync"); k = m_settings->value("sessionKey", "invalid").toString(); m_settings->endGroup(); return k; } void SettingsManager::saveCheckUpdates(bool b) { m_settings->beginGroup(DefinitionHolder::NAME.toLower()); m_settings->setValue("checkUpdates", b); m_settings->endGroup(); } bool SettingsManager::restoreCheckUpdates() { bool b; m_settings->beginGroup(DefinitionHolder::NAME.toLower()); b = m_settings->value("checkUpdates", true).toBool(); m_settings->endGroup(); return b; } void SettingsManager::saveToUploadList(const QStringList &list) { m_settings->beginGroup("cloudSync"); m_settings->setValue("toUpload", list); m_settings->endGroup(); } QStringList SettingsManager::restoreToUploadList() { QStringList s; m_settings->beginGroup("cloudSync"); s = m_settings->value("toUpload").toStringList(); m_settings->endGroup(); return s; } void SettingsManager::saveToDeleteList(const QStringList &list) { m_settings->beginGroup("cloudSync"); m_settings->setValue("toDelete", list); m_settings->endGroup(); } QStringList SettingsManager::restoreToDeleteList() { QStringList s; m_settings->beginGroup("cloudSync"); s = m_settings->value("toDelete").toStringList(); m_settings->endGroup(); return s; } void SettingsManager::saveToWatchList(const QHash<QString, QDateTime> &map) { QList<QVariant> keys; QList<QVariant> values; QList<QString> t_keys = map.keys(); for (int i = 0; i < map.size(); i++) { keys.append(t_keys.at(i)); values.append(map.value(t_keys.at(i))); } m_settings->beginGroup("cloudSync"); m_settings->setValue("toWatchKeys", keys); m_settings->setValue("toWatchValues", values); m_settings->endGroup(); } QHash<QString, QDateTime> SettingsManager::restoreToWatchList() { QHash<QString,QDateTime> map; QList<QVariant> keys; QList<QVariant> values; m_settings->beginGroup("cloudSync"); keys = m_settings->value("toWatchKeys").toList(); values = m_settings->value("toWatchValues").toList(); m_settings->endGroup(); for (int i = 0; i < keys.size(); i++) { map.insert(keys.at(i).toString(), values.at(i).toDateTime()); } return map; } void SettingsManager::saveCustomDatabaseDir(const QString &dbDir) { m_settings->beginGroup("database"); m_settings->setValue("customDirectory", dbDir); m_settings->endGroup(); } QString SettingsManager::restoreCustomDatabaseDir() { QString d; m_settings->beginGroup("database"); d = m_settings->value("customDirectory").toString(); m_settings->endGroup(); return d; } //----------------------------------------------------------------------------- // Private //-----------------------------------------------------------------------------
24.21659
103
0.662702
[ "geometry" ]
b1bfa69a486142a4f6a27e1c2b982f7d93cb93be
54,389
cpp
C++
source/Lib/TAppCommon/TAppComCamPara.cpp
PharrellWANG/HTM-16.2
1dcb9fa5397206640bee01bfa40803d863eb8985
[ "BSD-3-Clause" ]
null
null
null
source/Lib/TAppCommon/TAppComCamPara.cpp
PharrellWANG/HTM-16.2
1dcb9fa5397206640bee01bfa40803d863eb8985
[ "BSD-3-Clause" ]
1
2020-07-21T08:28:22.000Z
2020-07-21T08:28:22.000Z
source/Lib/TAppCommon/TAppComCamPara.cpp
PharrellWANG/HTM_16_2_CNN
fd802ebcffeccdc0df73d64d79412a2ba15514aa
[ "BSD-3-Clause" ]
null
null
null
/* The copyright in this software is being made available under the BSD * License, included below. This software may be subject to other third party * and contributor rights, including patent rights, and no such rights are * granted under this license. * * Copyright (c) 2010-2016, ITU/ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the ISO/IEC nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ // Include files #include "TAppComCamPara.h" #include <stdlib.h> #include <math.h> #include <errno.h> #include <assert.h> #include <vector> #include <iostream> #include <fstream> #include <algorithm> #include <functional> #include <string> #if NH_3D_VSO || NH_3D Void TAppComCamPara::xCreateLUTs( UInt uiNumberSourceViews, UInt uiNumberTargetViews, Double****& radLUT, Int****& raiLUT, Double***& radShiftParams, Int64***& raiShiftParams ) { AOF( m_uiBitDepthForLUT == 8 ); AOF( radShiftParams == NULL && raiShiftParams == NULL && radLUT == NULL && raiLUT == NULL ); uiNumberSourceViews = std::max( (UInt) 1, uiNumberSourceViews ); uiNumberTargetViews = std::max( (UInt) 1, uiNumberTargetViews ); radShiftParams = new Double** [ uiNumberSourceViews ]; raiShiftParams = new Int64 ** [ uiNumberSourceViews ]; radLUT = new Double***[ uiNumberSourceViews ]; raiLUT = new Int ***[ uiNumberSourceViews ]; for( UInt uiSourceView = 0; uiSourceView < uiNumberSourceViews; uiSourceView++ ) { radShiftParams[ uiSourceView ] = new Double* [ uiNumberTargetViews ]; raiShiftParams[ uiSourceView ] = new Int64 * [ uiNumberTargetViews ]; radLUT [ uiSourceView ] = new Double**[ uiNumberTargetViews ]; raiLUT [ uiSourceView ] = new Int **[ uiNumberTargetViews ]; for( UInt uiTargetView = 0; uiTargetView < uiNumberTargetViews; uiTargetView++ ) { radShiftParams[ uiSourceView ][ uiTargetView ] = new Double [ 2 ]; raiShiftParams[ uiSourceView ][ uiTargetView ] = new Int64 [ 2 ]; radLUT [ uiSourceView ][ uiTargetView ] = new Double*[ 2 ]; radLUT [ uiSourceView ][ uiTargetView ][ 0 ] = new Double [ 257 ]; radLUT [ uiSourceView ][ uiTargetView ][ 1 ] = new Double [ 257 ]; raiLUT [ uiSourceView ][ uiTargetView ] = new Int* [ 2 ]; raiLUT [ uiSourceView ][ uiTargetView ][ 0 ] = new Int [ 257 ]; raiLUT [ uiSourceView ][ uiTargetView ][ 1 ] = new Int [ 257 ]; } } } Void TAppComCamPara::xCreate2dArray( UInt uiNum1Ids, UInt uiNum2Ids, Int**& raaiArray ) { AOT( raaiArray || uiNum1Ids == 0 || uiNum2Ids == 0 ); raaiArray = new Int* [ uiNum1Ids ]; for( UInt uiId1 = 0; uiId1 < uiNum1Ids; uiId1++ ) { raaiArray[ uiId1 ] = new Int [ uiNum2Ids ]; } } Void TAppComCamPara::xInit2dArray( UInt uiNum1Ids, UInt uiNum2Ids, Int**& raaiArray, Int iValue ) { for( UInt uiId1 = 0; uiId1 < uiNum1Ids; uiId1++ ) { for( UInt uiId2 = 0; uiId2 < uiNum2Ids; uiId2++ ) { raaiArray[ uiId1 ][ uiId2 ] = iValue; } } } Void TAppComCamPara::convertNumberString( TChar* pchViewNumberString, std::vector<Int>& raiViewNumbers, Double dViewNumPrec ) { Bool bStringIsRange = false; Int iIdx = 0; std::vector<Double> adViewNumbers; while( pchViewNumberString != 0 && pchViewNumberString[ iIdx ] != 0 ) { if( pchViewNumberString[ iIdx ] == ':' ) { bStringIsRange = true; pchViewNumberString[ iIdx ] = ' '; } iIdx++; } TChar* pcNextStart = pchViewNumberString; TChar* pcEnd = pcNextStart + iIdx; TChar* pcOldStart = 0; while( pcNextStart < pcEnd ) { errno = 0; adViewNumbers.push_back( ( strtod( pcNextStart, &pcNextStart ) ) ); if( errno == ERANGE || pcNextStart == pcOldStart ) { std::cerr << "Error Parsing View Number String: `" << pchViewNumberString << "'" << std::endl; AOT(true); exit( EXIT_FAILURE ); }; while( pcNextStart < pcEnd && ( *pcNextStart == ' ' || *pcNextStart == '\t' || *pcNextStart == '\r' ) ) pcNextStart++; pcOldStart = pcNextStart; } if( bStringIsRange ) { if( adViewNumbers.size() != 3 ) { std::cerr << "Error Parsing SynthViewNumbers: `" << pchViewNumberString << "'" << std::endl; AOT(true); exit( EXIT_FAILURE ); } Double dRangeBegin = adViewNumbers[0]; Double dRangeStep = adViewNumbers[1]; Double dRangeEnd = adViewNumbers[2]; if( ( ( dRangeEnd - dRangeBegin > 0 ) != ( dRangeStep > 0 ) ) || dRangeStep == 0 ) { std::cerr << "Error Parsing SynthViewNumbers: `" << pchViewNumberString << "'" << std::endl; AOT(true); exit( EXIT_FAILURE ); } raiViewNumbers.clear(); Double dFac = ( dRangeBegin > dRangeEnd ? -1 : 1 ); for( Double dViewNumber = dRangeBegin; ( dViewNumber - dRangeEnd ) * dFac <= 0; dViewNumber += dRangeStep ) { raiViewNumbers.push_back( (Int)( dViewNumber * dViewNumPrec ) ); } } else { for( UInt uiViewNum = 0; uiViewNum < adViewNumbers.size(); uiViewNum++ ) { raiViewNumbers.push_back( (Int)( adViewNumbers[ uiViewNum ] * dViewNumPrec ) ); } } } Void TAppComCamPara::xReadCameraParameterFile( TChar* pchCfgFileName ) { assert( pchCfgFileName != NULL ); std::ifstream cCfgStream( pchCfgFileName, std::ifstream::in ); if( !cCfgStream ) { std::cerr << "Failed to open camera parameter file: `" << pchCfgFileName << "'" << std::endl; exit( EXIT_FAILURE ); } Int iLineNumber = 0; do { std::string cLine; getline( cCfgStream, cLine ); iLineNumber++; size_t iStart = cLine.find_first_not_of( " \t\n\r" ); if( iStart == std::string::npos ) { continue; } if( cLine[iStart] == '#' ) { continue; } TChar* pcNextStart = (TChar*) cLine.data(); TChar* pcEnd = pcNextStart + cLine.length(); std::vector<Double> caNewLine; caNewLine.clear(); TChar* pcOldStart = 0; while( pcNextStart < pcEnd ) { errno = 0; caNewLine.push_back( strtod( pcNextStart, &pcNextStart ) ) ; if( errno == ERANGE || ( pcNextStart == pcOldStart ) ) { std::cerr << "Failed reading config file: `" << pchCfgFileName << "' Error parsing double values in Line: " << iLineNumber << ' ' << std::endl; assert( 0 ); exit( EXIT_FAILURE ); }; pcOldStart = pcNextStart; while( ( pcNextStart < pcEnd ) && ( *pcNextStart == ' ' || *pcNextStart == '\t' || *pcNextStart == '\r' ) ) pcNextStart++; } if ( ( caNewLine.size() != 2 ) && ( caNewLine.size() != 7 ) && ( caNewLine.size() != 6 ) && ( caNewLine.size() != 8 ) ) { std::cerr << "Failed reading config file: `" << pchCfgFileName << "'" << std::endl; std::cerr << "Invalid number of entries" << std::endl; AOF(false); exit( EXIT_FAILURE ); } m_aadCameraParameters.push_back( caNewLine ); } while( cCfgStream ); } Void TAppComCamPara::xGetCodedCameraData( UInt uiSourceView, UInt uiTargetView, Bool bByIdx, UInt uiFrame, Int& riScale, Int& riOffset, Int& riPrecision ) { if( bByIdx ) { uiSourceView = m_aiBaseViews[ uiSourceView ]; uiTargetView = m_aiBaseViews[ uiTargetView ]; } Int iFoundLine = -1; for( UInt uiCurViewLine = 0; uiCurViewLine < m_aadCameraParameters.size(); uiCurViewLine++ ) { if ( m_aadCameraParameters[uiCurViewLine].size() == 2 ) continue; if( ( (Int)( m_aadCameraParameters[ uiCurViewLine ][ 3 ] * m_dViewNumPrec ) == uiSourceView ) && ( (Int)( m_aadCameraParameters[ uiCurViewLine ][ 2 ] * m_dViewNumPrec ) == uiTargetView ) ) { if( ( (UInt)m_aadCameraParameters[ uiCurViewLine ][ 0 ] <= uiFrame ) && ( (UInt)m_aadCameraParameters[ uiCurViewLine ][ 1 ] >= uiFrame ) ) { if( iFoundLine != -1 ) { std::cerr << "Error CameraParameters for SourceView " << (Double) uiSourceView / m_dViewNumPrec << " and Target View " << (Double) uiTargetView / m_dViewNumPrec << " and Frame " << uiFrame << " given multiple times." << std::endl; AOT(true); exit( EXIT_FAILURE ); } else { iFoundLine = uiCurViewLine; } } } } if ( iFoundLine == -1 ) { std::cerr << "Error CameraParameters for SourceView " << (Double) uiSourceView / m_dViewNumPrec << " and Target View " << (Double) uiTargetView / m_dViewNumPrec << " and Frame " << uiFrame << " not found." << std::endl; AOT(true); exit( EXIT_FAILURE ); } riScale = (Int)( m_aadCameraParameters[ iFoundLine ][ 4 ] ); riOffset = (Int)( m_aadCameraParameters[ iFoundLine ][ 5 ] ); riPrecision = (Int)( m_aadCameraParameters[ iFoundLine ][ 6 ] ); } Bool TAppComCamPara::xGetCameraDataRow( Int iView, UInt uiFrame, UInt& ruiFoundLine ) { ruiFoundLine = -1; for( UInt uiCurViewLine = 0; uiCurViewLine < m_aadCameraParameters.size(); uiCurViewLine++ ) { if( (Int)( m_aadCameraParameters[ uiCurViewLine ][ 0 ] * m_dViewNumPrec ) == iView ) { if( ( (UInt)m_aadCameraParameters[ uiCurViewLine ][ 1 ] <= uiFrame ) && ( (UInt)m_aadCameraParameters[ uiCurViewLine ][ 2 ] >= uiFrame ) ) { if( ruiFoundLine != -1 ) { std::cerr << "Error CameraParameters for View " << (Double) iView / m_dViewNumPrec << " and Frame " << uiFrame << " given multiple times." << std::endl; exit( EXIT_FAILURE ); } else { ruiFoundLine = uiCurViewLine; } } } } return ( ruiFoundLine == -1 ); } Void TAppComCamPara::xGetSortedViewList( const std::vector<Int>& raiViews, std::vector<Int>& raiSortedViews, std::vector<Int>& raiId2SortedId, std::vector<Int>& raiSortedId2Id ) { AOF( raiViews.size() > 0 ); Int iNumViews = (Int)raiViews.size(); raiId2SortedId = std::vector<Int>( raiViews.size(), -1 ); raiSortedId2Id.clear(); raiSortedViews.clear(); for( Int iSortId = 0; iSortId < iNumViews; iSortId++ ) { Int iLeftMostBaseId = -1; for( Int iBaseId = 0; iLeftMostBaseId == -1 && iBaseId < iNumViews; iBaseId++ ) { if( raiId2SortedId[ iBaseId ] == -1 ) { UInt uiFoundLine = -1; xGetCameraDataRow( raiViews[ iBaseId ], 0, uiFoundLine ); AOT( uiFoundLine == -1 ); // something wrong Double dXPos = m_aadCameraParameters[ uiFoundLine ][ 4 ]; Double dZNear = m_aadCameraParameters[ uiFoundLine ][ 6 ]; Double dZFar = m_aadCameraParameters[ uiFoundLine ][ 7 ]; Double dSign = ( dZFar > 0 ? 1.0 : -1.0 ); Bool bLeftMost = true; AOF( dZNear * dZFar > 0.0 ); // otherwise, z parameters are not correct for( Int iTestBaseId = 0; bLeftMost && iTestBaseId < iNumViews; iTestBaseId++ ) { if( iTestBaseId != iBaseId && raiId2SortedId[ iTestBaseId ] == -1 ) { UInt uiFoundLineTest = -1; xGetCameraDataRow( raiViews[ iTestBaseId ], 0, uiFoundLineTest ); AOT( uiFoundLineTest == -1 ); // something wrong Double dXPosTest = m_aadCameraParameters[ uiFoundLineTest ][ 4 ]; Double dZNearTest = m_aadCameraParameters[ uiFoundLineTest ][ 6 ]; Double dZFarTest = m_aadCameraParameters[ uiFoundLineTest ][ 7 ]; AOF( dZNearTest * dZFarTest > 0.0 ); // otherwise, z parameters are not correct AOF( dZNearTest * dSign > 0.0 ); // otherwise, z parameters are not consistent Double dDeltaXPos = dSign * ( dXPosTest - dXPos ); bLeftMost = ( bLeftMost && dDeltaXPos > 0.0 ); } } if( bLeftMost ) { iLeftMostBaseId = iBaseId; } } } AOT( iLeftMostBaseId == -1 ); // something wrong raiId2SortedId[ iLeftMostBaseId ] = iSortId; raiSortedId2Id.push_back( iLeftMostBaseId ); raiSortedViews.push_back( raiViews[ iLeftMostBaseId ] ); } // sanity check if( iNumViews > 2 ) { Int iDeltaView = gSign( raiSortedViews[ 1 ] - raiSortedViews[ 0 ] ); Bool bOutOfOrder = false; for( Int iSIdx = 2; iSIdx < iNumViews; iSIdx++ ) { bOutOfOrder = ( bOutOfOrder || iDeltaView * gSign( raiSortedViews[ iSIdx ] - raiSortedViews[ iSIdx - 1 ] ) < 0 ); } if( bOutOfOrder ) { std::cerr << "ERROR: View numbering must be strictly increasing or decreasing from left to right" << std::endl; exit(EXIT_FAILURE); } } } Bool TAppComCamPara::xGetCamParsChangeFlag() { Bool bChangeDetected = false; for( Int iBaseViewId = 0; !bChangeDetected && iBaseViewId < m_iNumberOfBaseViews; iBaseViewId++ ) { if ( m_bSetupFromCoded ) { for( Int iTargetViewId = 0; !bChangeDetected && iTargetViewId < m_iNumberOfBaseViews; iTargetViewId++ ) { Int iTargetView = m_aiBaseViews[iTargetViewId]; Int iSourceView = m_aiBaseViews[iBaseViewId ]; Int iS1 ,iSX; Int iO1 ,iOX; Int iP1 ,iPX; if ( iSourceView == iTargetView ) continue; xGetCodedCameraData( iSourceView, iTargetView, false, 0, iS1, iO1, iP1 ); for( UInt uiFrameId = m_uiFirstFrameId + 1; !bChangeDetected && uiFrameId <= m_uiLastFrameId; uiFrameId++ ) { xGetCodedCameraData( iSourceView, iTargetView, false, uiFrameId, iSX, iOX, iPX ); if( iS1 != iSX || iO1 != iOX || iP1 != iPX ) { bChangeDetected = true; } } } } else { Int iBaseView = m_aiBaseViews[ iBaseViewId ]; Double dFL1, dFLX; Double dCP1, dCPX; Double dCS1, dCSX; Double dZN1, dZNX; Double dZF1, dZFX; Bool bInterpolated; xGetGeometryData( iBaseView, m_uiFirstFrameId, dFL1, dCP1, dCS1, bInterpolated ); AOT( bInterpolated ); xGetZNearZFar ( iBaseView, m_uiFirstFrameId, dZN1, dZF1 ); for( UInt uiFrameId = m_uiFirstFrameId + 1; !bChangeDetected && uiFrameId <= m_uiLastFrameId; uiFrameId++ ) { xGetGeometryData( iBaseView, uiFrameId, dFLX, dCPX, dCSX, bInterpolated ); AOT( bInterpolated ); xGetZNearZFar ( iBaseView, uiFrameId, dZNX, dZFX ); if( dFL1 != dFLX || dCP1 != dCPX || dCS1 != dCSX || dZN1 != dZNX || dZF1 != dZFX ) { bChangeDetected = true; } } } } return bChangeDetected; } Int TAppComCamPara::xGetViewId( std::vector<Int> aiViewList, Int iBaseView ) { Int iViewId = -1; for( Int iId = 0; iId < (Int)aiViewList.size(); iId++ ) { if( aiViewList[ iId ] == iBaseView ) { iViewId = iId; break; } } AOT( iViewId == -1 ); return iViewId; } Int TAppComCamPara::xGetBaseViewId( Int iBaseView ) { return xGetViewId( m_aiBaseViews, iBaseView ); } Bool TAppComCamPara::xGetLeftRightView( Int iView, std::vector<Int> aiSortedViews, Int& riLeftView, Int& riRightView, Int& riLeftSortedViewIdx, Int& riRightSortedViewIdx ) { Bool bFoundLRView = false; Int iLeftView = -1; Int iRightView = -1; Int iLeftViewIdx = -1; Int iRightViewIdx = -1; Bool bDecencdingVN = ( aiSortedViews.size() >= 2 && aiSortedViews[ 0 ] > aiSortedViews[ 1 ] ); Int iFactor = ( bDecencdingVN ? -1 : 1 ); for( Int iIdx = -1; iIdx < (Int)aiSortedViews.size(); iIdx++ ) { if( iIdx == -1 ) { if( ( aiSortedViews[ iIdx + 1 ] - iView ) * iFactor > 0 ) { bFoundLRView = false; iLeftView = -1; iRightView = aiSortedViews[ iIdx + 1 ]; iLeftViewIdx = -1; iRightViewIdx = iIdx + 1; break; } } else if ( iIdx == (Int)aiSortedViews.size() - 1 ) { if( ( aiSortedViews[ iIdx ] - iView ) * iFactor < 0 ) { bFoundLRView = false; iLeftView = aiSortedViews[ iIdx ]; iRightView = -1; iLeftViewIdx = iIdx; iRightViewIdx = -1; break; } } else { if( ( ( aiSortedViews[ iIdx ] - iView ) * iFactor <= 0 ) && ( ( aiSortedViews[ iIdx + 1 ] - iView ) * iFactor >= 0 ) ) { bFoundLRView = true; iLeftView = aiSortedViews[ iIdx ]; iRightView = aiSortedViews[ iIdx + 1 ]; iLeftViewIdx = iIdx; iRightViewIdx = iIdx + 1; break; } } } if ( ( iView == iLeftView ) || ( iView == iRightView ) ) { iLeftViewIdx = ( iView == iLeftView ) ? iLeftViewIdx : iRightViewIdx; iRightViewIdx = iLeftViewIdx; iLeftView = iView; iRightView = iView; bFoundLRView = false; } riLeftView = iLeftView; riRightView = iRightView; riLeftSortedViewIdx = iLeftViewIdx; riRightSortedViewIdx = iRightViewIdx; return bFoundLRView; } Void TAppComCamPara::xGetPrevAndNextBaseView( Int iSourceViewNum, Int iTargetViewNum, Int& riPrevBaseViewNum, Int& riNextBaseViewNum ) { Int iLeftView; Int iRightView; Int iDummy; xGetLeftRightView( iTargetViewNum, m_aiSortedBaseViews, iLeftView, iRightView, iDummy, iDummy ); if( iLeftView == iRightView ) { riPrevBaseViewNum = iLeftView; riNextBaseViewNum = iLeftView; } else { Bool bDecencdingVN = ( m_aiSortedBaseViews.size() >= 2 && m_aiSortedBaseViews[ 0 ] > m_aiSortedBaseViews[ 1 ] ); Bool bNextViewIsLeft = ( bDecencdingVN ? ( iSourceViewNum < iTargetViewNum ) : ( iSourceViewNum > iTargetViewNum ) ); if ( bNextViewIsLeft ) { riPrevBaseViewNum = iRightView; riNextBaseViewNum = iLeftView; } else { riPrevBaseViewNum = iLeftView; riNextBaseViewNum = iRightView; } } } Void TAppComCamPara::xGetZNearZFar( Int iView, UInt uiFrame, Double& rdZNear, Double& rdZFar ) { UInt uiFoundLine = -1; if( !xGetCameraDataRow( iView, uiFrame, uiFoundLine ) || !( m_aadCameraParameters[ uiFoundLine ].size() < 8 ) ) { rdZNear = m_aadCameraParameters[ uiFoundLine ][ 6 ]; rdZFar = m_aadCameraParameters[ uiFoundLine ][ 7 ]; } else { std::cerr << "No ZNear or no ZFar for View " << (Double)iView / m_dViewNumPrec << " and Frame " << uiFrame << " given in CameraParameterFile" << std::endl; exit( EXIT_FAILURE ); } } Void TAppComCamPara::xGetGeometryData( Int iView, UInt uiFrame, Double& rdFocalLength, Double& rdPosition, Double& rdCameraShift, Bool& rbInterpolated ) { UInt uiFoundLine = -1; if ( !xGetCameraDataRow( iView, uiFrame, uiFoundLine ) && xIsIn( m_aiSortedBaseViews, iView )) { AOT( m_aadCameraParameters[ uiFoundLine ].size() < 6 ); rbInterpolated = false; rdFocalLength = m_aadCameraParameters[ uiFoundLine ][ 3 ]; rdPosition = m_aadCameraParameters[ uiFoundLine ][ 4 ]; rdCameraShift = m_aadCameraParameters[ uiFoundLine ][ 5 ]; } else { UInt uiLeftViewLine; UInt uiRightViewLine; Int iLeftView; Int iRightView; Int iDummy; if( !xGetLeftRightView( iView, m_aiSortedBaseViews, iLeftView, iRightView, iDummy, iDummy ) || xGetCameraDataRow( iLeftView, uiFrame, uiLeftViewLine ) || xGetCameraDataRow( iRightView, uiFrame, uiRightViewLine ) ) { std::cerr << "No left or no right base view next to view " << (Double)iView / m_dViewNumPrec << " for Frame " << uiFrame << " given in CameraParameterFile" << std::endl; AOT(true); exit( EXIT_FAILURE ); } AOT( m_aadCameraParameters[ uiLeftViewLine ].size() < 6 ); AOT( m_aadCameraParameters[ uiRightViewLine ].size() < 6 ); // Linear Interpolation Double dFactor = ( (Double)( iView - iLeftView ) ) / ( (Double)( iRightView - iLeftView ) ); rdFocalLength = m_aadCameraParameters[ uiLeftViewLine ][ 3 ] + dFactor * ( m_aadCameraParameters[ uiRightViewLine ][ 3 ] - m_aadCameraParameters[ uiLeftViewLine ][ 3 ] ); rdPosition = m_aadCameraParameters[ uiLeftViewLine ][ 4 ] + dFactor * ( m_aadCameraParameters[ uiRightViewLine ][ 4 ] - m_aadCameraParameters[ uiLeftViewLine ][ 4 ] ); rdCameraShift = m_aadCameraParameters[ uiLeftViewLine ][ 5 ] + dFactor * ( m_aadCameraParameters[ uiRightViewLine ][ 5 ] - m_aadCameraParameters[ uiLeftViewLine ][ 5 ] ); rbInterpolated = true; } } Bool TAppComCamPara::xGetShiftParameterReal( UInt uiSourceView, UInt uiTargetView, UInt uiFrame, Bool bExternal, Bool bByIdx, Double& rdScale, Double& rdOffset ) { AOT( m_bSetupFromCoded ); Bool bInterpolatedSource; Double dMinDepthSource; Double dMaxDepthSource; Double dFocalLengthSource; Double dPositionSource; Double dIntersectionSource; Bool bInterpolatedTarget; Double dPositionTarget; Double dIntersectionTarget; Double dFocalLengthTarget; Int iTargetViewNum; Int iSourceViewNum; if( bByIdx ) { iSourceViewNum = m_aiBaseViews[ uiSourceView ]; iTargetViewNum = ( bExternal ? m_aiSynthViews[ uiTargetView ] : m_aiBaseViews[ uiTargetView ] ); } else { iSourceViewNum = (Int) uiSourceView; iTargetViewNum = (Int) uiTargetView; } xGetGeometryData( iSourceViewNum, uiFrame, dFocalLengthSource, dPositionSource, dIntersectionSource, bInterpolatedSource ); xGetZNearZFar ( iSourceViewNum, uiFrame, dMinDepthSource, dMaxDepthSource ); xGetGeometryData( iTargetViewNum, uiFrame, dFocalLengthTarget, dPositionTarget, dIntersectionTarget, bInterpolatedTarget ); Double dFactor = dFocalLengthSource * ( dPositionTarget - dPositionSource ); rdScale = dFactor * ( 1.0 / dMinDepthSource - 1.0 / dMaxDepthSource ) / (Double)( ( 1 << m_uiInputBitDepth ) - 1 ); rdOffset = dFactor / dMaxDepthSource - dIntersectionTarget + dIntersectionSource; return ( bInterpolatedSource || bInterpolatedTarget ); } Void TAppComCamPara::xGetShiftParameterCoded( UInt uiSourceView, UInt uiTargetView, UInt uiFrame, Bool bByIdx, Int& riScale, Int& riOffset ) { if ( m_bSetupFromCoded ) { if ( uiSourceView == uiTargetView ) { riScale = 0; riOffset = 0; return; } Int iCamParsCodedPrecision; xGetCodedCameraData( uiSourceView, uiTargetView, bByIdx, uiFrame, riScale, riOffset, iCamParsCodedPrecision ); if ( m_bCamParsCodedPrecSet ) { AOT( m_uiCamParsCodedPrecision != (UInt) iCamParsCodedPrecision ); } else { m_uiCamParsCodedPrecision = (UInt) iCamParsCodedPrecision; m_bCamParsCodedPrecSet = true; } } else { Double dScale, dOffset; Bool bInterpolated = xGetShiftParameterReal( uiSourceView, uiTargetView, uiFrame, false, bByIdx, dScale, dOffset ); AOT( bInterpolated ); // must be base view Double dMultOffset = (Double)( 1 << ( m_uiCamParsCodedPrecision + 1 ) ); Double dMultScale = (Double)( 1 << ( m_uiCamParsCodedPrecision + 1 + m_uiInputBitDepth ) ); riOffset = (Int)floor( dMultOffset * dOffset + .5 ); riScale = (Int)floor( dMultScale * dScale + .5 ); } } Void TAppComCamPara::xGetShiftParameterInt( UInt uiSourceView, UInt uiTargetView, UInt uiFrame, Bool bExternal, Bool bByIdx, Int64& riScale, Int64& riOffset ) { Int iTargetViewNum; Int iSourceViewNum; Int iPrevBaseViewNum; Int iNextBaseViewNum; Int iTargetViewRelNum; if( bByIdx ) { iSourceViewNum = m_aiBaseViews[ uiSourceView ]; if ( bExternal ) { iTargetViewNum = m_aiSynthViews [ uiTargetView ]; iTargetViewRelNum = m_aiRelSynthViewsNum[ uiTargetView ]; } else { iTargetViewNum = m_aiBaseViews [ uiTargetView ]; iTargetViewRelNum = m_aiBaseId2SortedId [ uiTargetView ] * ((Int) m_dViewNumPrec ); } } else { iSourceViewNum = (Int) uiSourceView; iTargetViewNum = (Int) uiTargetView; if ( bExternal ) { iTargetViewRelNum = m_aiRelSynthViewsNum[ xGetViewId( m_aiSynthViews, (Int) uiTargetView )]; } else { iTargetViewRelNum = m_aiBaseId2SortedId[ xGetBaseViewId( uiTargetView) ] * ((Int) m_dViewNumPrec ); } } xGetPrevAndNextBaseView( iSourceViewNum, iTargetViewNum, iPrevBaseViewNum, iNextBaseViewNum ); AOT( iPrevBaseViewNum == -1 ); // should not happen AOT( iNextBaseViewNum == -1 ); // should not happen Int iSrcId = xGetBaseViewId( iSourceViewNum ); Int iPrevId = xGetBaseViewId( iPrevBaseViewNum ); Int iNextId = xGetBaseViewId( iNextBaseViewNum ); AOF( m_aaiScaleAndOffsetSet[ iSrcId ][ iPrevId ] ); // coded scale and offset must be set AOF( m_aaiScaleAndOffsetSet[ iSrcId ][ iNextId ] ); // coded scale and offset must be set Int iNextBaseViewRelNum = m_aiBaseId2SortedId[ iNextId ] * ((Int) m_dViewNumPrec ); Int iPrevBaseViewRelNum = m_aiBaseId2SortedId[ iPrevId ] * ((Int) m_dViewNumPrec ); Int64 iPrevScale = (Int64)m_aaiCodedScale [ iSrcId ][ iPrevId ]; Int64 iNextScale = (Int64)m_aaiCodedScale [ iSrcId ][ iNextId ]; Int64 iPrevOffset = (Int64)m_aaiCodedOffset[ iSrcId ][ iPrevId ] << m_uiBitDepthForLUT; Int64 iNextOffset = (Int64)m_aaiCodedOffset[ iSrcId ][ iNextId ] << m_uiBitDepthForLUT; if( iPrevBaseViewNum == iNextBaseViewNum ) { riScale = iNextScale; riOffset = iNextOffset; } else { riScale = Int64( iTargetViewRelNum - iPrevBaseViewRelNum ) * iNextScale; riScale += Int64( iNextBaseViewRelNum - iTargetViewRelNum ) * iPrevScale; riOffset = Int64( iTargetViewRelNum - iPrevBaseViewRelNum ) * iNextOffset; riOffset += Int64( iNextBaseViewRelNum - iTargetViewRelNum ) * iPrevOffset; Int64 iD = Int64( iNextBaseViewRelNum - iPrevBaseViewRelNum ); Int64 iSA = ( riScale > 0 ? iD / 2 : -iD / 2 ); Int64 iOA = ( riOffset > 0 ? iD / 2 : -iD / 2 ); riScale = ( riScale + iSA ) / iD; riOffset = ( riOffset + iOA ) / iD; } } Void TAppComCamPara::xSetCodedScaleOffset( UInt uiFrame ) { for( UInt uiSourceId = 0; uiSourceId < m_iNumberOfBaseViews; uiSourceId++ ) { for( UInt uiTargetId = 0; uiTargetId < m_iNumberOfBaseViews; uiTargetId++ ) { Int iScale, iOffset; xGetShiftParameterCoded( uiSourceId, uiTargetId, uiFrame, true, iScale, iOffset ); m_aaiCodedScale [ uiSourceId ][ uiTargetId ] = iScale; m_aaiCodedOffset [ uiSourceId ][ uiTargetId ] = iOffset; m_aaiScaleAndOffsetSet [ uiSourceId ][ uiTargetId ] = 1; } } } Void TAppComCamPara::xSetShiftParametersAndLUT( UInt uiNumberSourceViews, UInt uiNumberTargetViews, UInt uiFrame, Bool bExternalReference , Double****& radLUT, Int****& raiLUT, Double***& radShiftParams, Int64***& raiShiftParams ) { if( uiNumberSourceViews <= 1 || uiNumberTargetViews == 0 ) { return; } AOF( radShiftParams != NULL && raiShiftParams != NULL && radLUT != NULL && raiLUT != NULL ); AOF( m_uiBitDepthForLUT == 8 ); Int iLog2DivLuma = m_uiBitDepthForLUT + m_uiCamParsCodedPrecision + 1 - m_iLog2Precision; AOF( iLog2DivLuma > 0 ); Int iLog2DivChroma = iLog2DivLuma + 1; Double dMaxDispDev = 0.0; Double dMaxRndDispDvL = 0.0; Double dMaxRndDispDvC = 0.0; for( UInt uiSourceView = 0; uiSourceView < uiNumberSourceViews; uiSourceView++ ) { for( UInt uiTargetView = 0; uiTargetView < uiNumberTargetViews; uiTargetView++ ) { // integer-valued scale and offset Int64 iScale, iOffset; xGetShiftParameterInt ( uiSourceView, uiTargetView, uiFrame, bExternalReference, true, iScale, iOffset ); raiShiftParams[ uiSourceView][ uiTargetView ][ 0 ] = iScale; raiShiftParams[ uiSourceView][ uiTargetView ][ 1 ] = iOffset; // offsets including rounding offsets Int64 iOffsetLuma = iOffset + ( ( 1 << iLog2DivLuma ) >> 1 ); Int64 iOffsetChroma = iOffset + ( ( 1 << iLog2DivChroma ) >> 1 ); // real-valued scale and offset Double dScale, dOffset; if ( m_bSetupFromCoded ) { dScale = (Double) iScale / (( Double ) ( 1 << iLog2DivLuma )); dOffset = (Double) iOffset / (( Double ) ( 1 << iLog2DivLuma )); } else { xGetShiftParameterReal( uiSourceView, uiTargetView, uiFrame, bExternalReference, true, dScale, dOffset ); } radShiftParams[ uiSourceView][ uiTargetView ][ 0 ] = dScale; radShiftParams[ uiSourceView][ uiTargetView ][ 1 ] = dOffset; for( UInt uiDepthValue = 0; uiDepthValue < 256; uiDepthValue++ ) { // real-valued look-up tables Double dShiftLuma = ( (Double)uiDepthValue * dScale + dOffset ) * Double( 1 << m_iLog2Precision ); Double dShiftChroma = dShiftLuma / 2; radLUT[ uiSourceView ][ uiTargetView ][ 0 ][ uiDepthValue ] = dShiftLuma; radLUT[ uiSourceView ][ uiTargetView ][ 1 ][ uiDepthValue ] = dShiftChroma; // integer-valued look-up tables Int64 iTempScale = (Int64)uiDepthValue * iScale; Int64 iTestScale = ( iTempScale + iOffset ); // for checking accuracy of camera parameters Int64 iShiftLuma = ( iTempScale + iOffsetLuma ) >> iLog2DivLuma; Int64 iShiftChroma = ( iTempScale + iOffsetChroma ) >> iLog2DivChroma; raiLUT[ uiSourceView ][ uiTargetView ][ 0 ][ uiDepthValue ] = (Int)iShiftLuma; raiLUT[ uiSourceView ][ uiTargetView ][ 1 ][ uiDepthValue ] = (Int)iShiftChroma; // maximum deviation #if NH_3D_REN_MAX_DEV_OUT m_dMaxShiftDeviation = std::max( m_dMaxShiftDeviation, fabs( Double( (Int) iShiftLuma ) - dShiftLuma ) / Double( 1 << m_iLog2Precision ) ); #endif dMaxDispDev = std::max( dMaxDispDev, fabs( Double( (Int) iTestScale ) - dShiftLuma * Double( 1 << iLog2DivLuma ) ) / Double( 1 << iLog2DivLuma ) ); dMaxRndDispDvL = std::max( dMaxRndDispDvL, fabs( Double( (Int) iShiftLuma ) - dShiftLuma ) ); dMaxRndDispDvC = std::max( dMaxRndDispDvC, fabs( Double( (Int) iShiftChroma ) - dShiftChroma ) ); } radLUT[ uiSourceView ][ uiTargetView ][ 0 ][ 256 ] = radLUT[ uiSourceView ][ uiTargetView ][ 0 ][ 255 ]; radLUT[ uiSourceView ][ uiTargetView ][ 1 ][ 256 ] = radLUT[ uiSourceView ][ uiTargetView ][ 1 ][ 255 ]; raiLUT[ uiSourceView ][ uiTargetView ][ 0 ][ 256 ] = raiLUT[ uiSourceView ][ uiTargetView ][ 0 ][ 255 ]; raiLUT[ uiSourceView ][ uiTargetView ][ 1 ][ 256 ] = raiLUT[ uiSourceView ][ uiTargetView ][ 1 ][ 255 ]; } } // check maximum deviation Double dMaxAllowedDispDev = Double( 1 << m_iLog2Precision ) / Double( 1 << m_uiCamParsCodedPrecision ); // counting only the impact of camera parameter rounding Double dMaxAllowedRndDispDvL = 0.5 + Double( 1 << m_iLog2Precision ) / Double( 1 << m_uiCamParsCodedPrecision ); // final rounding and impact of camera parameter rounding Double dMaxAllowedRndDispDvC = 0.5 + Double( 1 << m_iLog2Precision ) / Double( 1 << m_uiCamParsCodedPrecision ) / 2.0; // final rounding and impact of camera parameter rounding if( ( dMaxDispDev >= dMaxAllowedDispDev || dMaxRndDispDvL >= dMaxAllowedRndDispDvL || dMaxRndDispDvC >= dMaxAllowedRndDispDvC ) && !m_bSetupFromCoded ) { std::cout << "Warning: Something wrong with the accuracy of coded camera parameters:" << std::endl; if( dMaxDispDev >= dMaxAllowedDispDev ) { std::cout << " max disparity difference is " << dMaxDispDev << " (allowed: " << dMaxAllowedDispDev << ")" << std::endl; } if( dMaxRndDispDvL >= dMaxAllowedRndDispDvL ) { std::cout << " max rnd luma disp diff is " << dMaxRndDispDvL << " (allowed: " << dMaxAllowedRndDispDvL << ")" << std::endl; } if( dMaxRndDispDvC >= dMaxAllowedRndDispDvC ) { std::cout << " max rnd chroma disp diff is " << dMaxRndDispDvC << " (allowed: " << dMaxAllowedRndDispDvC << ")" << std::endl; } } } Void TAppComCamPara::xSetShiftParametersAndLUT( UInt uiFrame ) { xInit2dArray ( (UInt)m_iNumberOfBaseViews, (UInt)m_iNumberOfBaseViews, m_aaiScaleAndOffsetSet, 0 ); xSetCodedScaleOffset ( uiFrame ); xSetShiftParametersAndLUT( (UInt)m_iNumberOfBaseViews, (UInt)m_iNumberOfBaseViews, uiFrame, false, m_adBaseViewShiftLUT, m_aiBaseViewShiftLUT, m_adBaseViewShiftParameter, m_aiBaseViewShiftParameter ); xSetShiftParametersAndLUT( (UInt)m_iNumberOfBaseViews, (UInt)m_iNumberOfSynthViews, uiFrame, true, m_adSynthViewShiftLUT, m_aiSynthViewShiftLUT, m_adSynthViewShiftParameter, m_aiSynthViewShiftParameter ); }; Void TAppComCamPara::xGetCameraShifts( UInt uiSourceView, UInt uiTargetView, UInt uiFrame, Double& rdCamPosShift, Double& rdPicPosShift ) { Double dDummy, dCamPosSource, dCamPosTarget, dPicPosSource, dPicPosTarget; Bool bInterpolatedSource, bInterpolatedTarget; Int iTargetViewNum = m_aiBaseViews[ uiTargetView ]; Int iSourceViewNum = m_aiBaseViews[ uiSourceView ]; xGetGeometryData( iSourceViewNum, uiFrame, dDummy, dCamPosSource, dPicPosSource, bInterpolatedSource ); xGetGeometryData( iTargetViewNum, uiFrame, dDummy, dCamPosTarget, dPicPosTarget, bInterpolatedTarget ); AOT( bInterpolatedSource || bInterpolatedTarget ); rdCamPosShift = ( dCamPosTarget - dCamPosSource ); rdPicPosShift = -( dPicPosTarget - dPicPosSource ); // to be consistent } TAppComCamPara::TAppComCamPara() { m_dViewNumPrec = VIEW_NUM_PREC; // fixed m_iLog2Precision = -1; m_uiInputBitDepth = 0; m_uiBitDepthForLUT = 8; // fixed m_uiFirstFrameId = 0; m_uiLastFrameId = 0; m_iNumberOfBaseViews = -1; m_iNumberOfSynthViews = -1; m_uiCamParsCodedPrecision = 0; m_bCamParsVaryOverTime = true; m_aaiCodedScale = 0; m_aaiCodedOffset = 0; m_aaiScaleAndOffsetSet = 0; m_adBaseViewShiftParameter = 0; m_aiBaseViewShiftParameter = 0; m_adSynthViewShiftParameter = 0; m_aiSynthViewShiftParameter = 0; m_adBaseViewShiftLUT = 0; m_aiBaseViewShiftLUT = 0; m_adSynthViewShiftLUT = 0; m_aiSynthViewShiftLUT = 0; m_bSetupFromCoded = false; m_bCamParsCodedPrecSet = false; #if NH_3D_REN_MAX_DEV_OUT m_dMaxShiftDeviation = -1; #endif } TAppComCamPara::~TAppComCamPara() { xDeleteArray( m_adBaseViewShiftParameter, m_iNumberOfBaseViews, m_iNumberOfBaseViews ); xDeleteArray( m_aiBaseViewShiftParameter, m_iNumberOfBaseViews, m_iNumberOfBaseViews ); xDeleteArray( m_adBaseViewShiftLUT, m_iNumberOfBaseViews, m_iNumberOfBaseViews, 2 ); xDeleteArray( m_aiBaseViewShiftLUT, m_iNumberOfBaseViews, m_iNumberOfBaseViews, 2 ); xDeleteArray( m_adSynthViewShiftParameter, m_iNumberOfBaseViews, std::max((Int) 1 ,m_iNumberOfSynthViews)); xDeleteArray( m_aiSynthViewShiftParameter, m_iNumberOfBaseViews, std::max((Int) 1 ,m_iNumberOfSynthViews)); xDeleteArray( m_adSynthViewShiftLUT, m_iNumberOfBaseViews, std::max((Int) 1 ,m_iNumberOfSynthViews), 2 ); xDeleteArray( m_aiSynthViewShiftLUT, m_iNumberOfBaseViews, std::max( (Int)1 ,m_iNumberOfSynthViews), 2 ); xDeleteArray( m_aaiCodedScale, m_iNumberOfBaseViews ); xDeleteArray( m_aaiCodedOffset, m_iNumberOfBaseViews ); xDeleteArray( m_aaiScaleAndOffsetSet, m_iNumberOfBaseViews ); } Void TAppComCamPara::xSetupBaseViewsFromCoded() { //===== get and sort views given in camera parameter file and set list of base views and related arrays ===== // get left-right order and coding order from cfg-file std::vector<Int> aiViewOrderIdx; // Left Right Order std::vector<Int> aiViewId ; // Coding Order Int iMinViewOrderIdx = MAX_INT; for( UInt uiRow = 0; uiRow < m_aadCameraParameters.size(); uiRow++ ) { if (m_aadCameraParameters[uiRow].size() != 2 ) break; Int iViewOrderIdx = (Int)( m_aadCameraParameters[ uiRow ][ 1 ] ); iMinViewOrderIdx = std::min( iViewOrderIdx, iMinViewOrderIdx ); aiViewOrderIdx .push_back( iViewOrderIdx ); aiViewId .push_back( (Int) m_aadCameraParameters[ uiRow ][ 0 ] ); } // create base view numbers AOT( aiViewId.size() != aiViewOrderIdx.size() ); m_iNumberOfBaseViews = (Int) aiViewId.size(); for (Int iCurBaseView = 0; iCurBaseView < m_iNumberOfBaseViews; iCurBaseView++ ) { aiViewOrderIdx[iCurBaseView] = ( aiViewOrderIdx[iCurBaseView] - iMinViewOrderIdx); m_aiBaseViews .push_back( aiViewOrderIdx[iCurBaseView] * ( (Int) m_dViewNumPrec) ); m_aiBaseId2SortedId.push_back( iCurBaseView ); m_aiBaseSortedId2Id.push_back( iCurBaseView ); } m_iNumberOfBaseViews = (Int) m_aiBaseViews.size(); std::vector<Int> aiSortedViewOrderIdx = aiViewOrderIdx; // sort base views according to View Order Idx m_aiSortedBaseViews = m_aiBaseViews; for (Int iCurBaseView = 1; iCurBaseView < m_iNumberOfBaseViews; iCurBaseView++ ) { Int iCurViewOrder = aiSortedViewOrderIdx[iCurBaseView]; for (Int iCurSearchPos = iCurBaseView; iCurSearchPos >= 0; iCurSearchPos-- ) { if ( iCurViewOrder < aiSortedViewOrderIdx[iCurSearchPos] ) { Int iTempViewId = m_aiSortedBaseViews[iCurSearchPos]; m_aiSortedBaseViews[iCurSearchPos] = m_aiSortedBaseViews[iCurBaseView]; m_aiSortedBaseViews[iCurBaseView ] = iTempViewId; Int iTempViewOrderIdx = aiSortedViewOrderIdx[iCurSearchPos]; aiSortedViewOrderIdx[iCurSearchPos] = aiSortedViewOrderIdx[iCurBaseView]; aiSortedViewOrderIdx[iCurBaseView ] = iTempViewOrderIdx; Int iTempPos = m_aiBaseSortedId2Id[iCurSearchPos]; m_aiBaseSortedId2Id[iCurSearchPos] = m_aiBaseSortedId2Id[iCurBaseView]; m_aiBaseSortedId2Id[iCurBaseView] = iTempPos; iCurBaseView--; } } } for (Int iCurBaseView = 0; iCurBaseView < m_iNumberOfBaseViews; iCurBaseView++ ) { m_aiBaseId2SortedId[m_aiBaseSortedId2Id[iCurBaseView]] = iCurBaseView; } m_aiViewsInCfgFile = m_aiSortedBaseViews; // check if( m_aiViewsInCfgFile.size() < 2 ) { std::cerr << "Failed reading camera parameter file" << std::endl; std::cerr << "At least two views must be given" << std::endl; AOT(true); exit( EXIT_FAILURE ); } // translate coding order to view order for( UInt uiRow = 0; uiRow < m_aadCameraParameters.size(); uiRow++ ) { if (m_aadCameraParameters[uiRow].size() == 2 ) continue; m_aadCameraParameters[ uiRow ][ 2 ] = (Double) aiViewOrderIdx[ xGetViewId( aiViewId, (Int) m_aadCameraParameters[ uiRow ][ 2 ] ) ]; m_aadCameraParameters[ uiRow ][ 3 ] = (Double) aiViewOrderIdx[ xGetViewId( aiViewId, (Int) m_aadCameraParameters[ uiRow ][ 3 ] ) ]; } } Void TAppComCamPara::xSetupBaseViews( TChar* pchBaseViewNumbers, UInt uiNumBaseViews ) { // init list std::vector<Int> aiViewsInCfg; for( UInt uiRow = 0; uiRow < m_aadCameraParameters.size(); uiRow++ ) { aiViewsInCfg.push_back( (Int)( m_aadCameraParameters[ uiRow ][ 0 ] * m_dViewNumPrec ) ); } // remove duplicated items std::sort( aiViewsInCfg.begin(), aiViewsInCfg.end() ); std::vector<Int>::iterator cIterNewEnd = std::unique( aiViewsInCfg.begin(), aiViewsInCfg.end() ); aiViewsInCfg.erase( cIterNewEnd, aiViewsInCfg.end() ); // sort (from left to right) std::vector<Int> aiDummyI2SI, aiDummySI2I; xGetSortedViewList( aiViewsInCfg, m_aiViewsInCfgFile, aiDummyI2SI, aiDummySI2I ); // check if( m_aiViewsInCfgFile.size() < 2 ) { std::cerr << "Failed reading config file" << std::endl; std::cerr << "At least two views must be given" << std::endl; exit( EXIT_FAILURE ); } //===== set list of base views and related arrays ===== if( pchBaseViewNumbers == 0 ) { std::cerr << "BaseViewCameraNumbers must be given" << std::endl; exit( EXIT_FAILURE ); }; convertNumberString( pchBaseViewNumbers, m_aiBaseViews, m_dViewNumPrec ); while( (UInt)m_aiBaseViews.size() > uiNumBaseViews ) { m_aiBaseViews.pop_back(); } xGetSortedViewList( m_aiBaseViews, m_aiSortedBaseViews, m_aiBaseId2SortedId, m_aiBaseSortedId2Id ); m_iNumberOfBaseViews = (Int)m_aiBaseViews.size(); } Void TAppComCamPara::init( UInt uiNumBaseViews, UInt uiInputBitDepth, UInt uiCodedCamParsPrecision, UInt uiStartFrameId, UInt uiNumFrames, TChar* pchCfgFileName, TChar* pchBaseViewNumbers, TChar* pchSynthViewNumbers, std::vector<Int>* paiSynthViewNumbers, Int iLog2Precision ) { //===== set miscellaneous variables ===== m_uiInputBitDepth = uiInputBitDepth; m_uiFirstFrameId = uiStartFrameId; m_uiLastFrameId = uiStartFrameId + uiNumFrames - 1; m_uiCamParsCodedPrecision = uiCodedCamParsPrecision; m_iLog2Precision = iLog2Precision; xReadCameraParameterFile( pchCfgFileName ); m_bSetupFromCoded = ( m_aadCameraParameters[ 0 ].size() == 2 ); if ( m_bSetupFromCoded ) { std::cout << "Detected decoded camera parameter file. Overwriting base view settings from cfg file. " << std::endl; xSetupBaseViewsFromCoded(); } else { xSetupBaseViews( pchBaseViewNumbers, uiNumBaseViews ); } //===== set list of external (virtual) views ===== m_aiSynthViews.clear(); if( pchSynthViewNumbers != 0 || paiSynthViewNumbers != 0) { std::vector<Int> aiTmpSynthViews; AOT( ( pchSynthViewNumbers != NULL ) && ( paiSynthViewNumbers != NULL ) ); if ( pchSynthViewNumbers != NULL ) { convertNumberString( pchSynthViewNumbers, aiTmpSynthViews, m_dViewNumPrec ); } else { aiTmpSynthViews = (*paiSynthViewNumbers); } for( UInt uiSId = 0; uiSId < (UInt)aiTmpSynthViews.size(); uiSId++ ) { Int iViewNumPrec = (Int) m_dViewNumPrec; Int iLeftBaseViewIdx = aiTmpSynthViews[ uiSId ] / iViewNumPrec; Int iRightBaseViewIdx = ( aiTmpSynthViews[ uiSId ] + (iViewNumPrec - 1) ) / iViewNumPrec; if ( iLeftBaseViewIdx < 0 || iRightBaseViewIdx >= m_iNumberOfBaseViews ) { std::cerr << "SynthViewCameraNumbers must be greater and equal to 0 and smaller than number of base views" << std::endl; AOT(true); exit( EXIT_FAILURE ); } Int64 iLeftBaseViewRelNum = iLeftBaseViewIdx * iViewNumPrec; Int64 iRightBaseViewRelNum = iRightBaseViewIdx * iViewNumPrec; Int64 iDiffBaseViewRelNum = iRightBaseViewRelNum - iLeftBaseViewRelNum; Int64 iSynthViewRelNum = aiTmpSynthViews[ uiSId ]; Int64 iLeftBaseNum = m_aiSortedBaseViews[ iLeftBaseViewIdx ]; Int64 iRightBaseNum = m_aiSortedBaseViews[ iRightBaseViewIdx ]; Int64 iDiffBaseNum = iRightBaseNum - iLeftBaseNum; Int64 iSynthViewNum; if ( iDiffBaseViewRelNum != 0) { AOT( (Int) iDiffBaseViewRelNum != iViewNumPrec ); Int iFact = iDiffBaseNum > 0 ? 1 : -1; iSynthViewNum = iLeftBaseNum + ( iDiffBaseNum * ( iSynthViewRelNum - iLeftBaseViewRelNum ) + (iViewNumPrec >> 1) * iFact ) / ( iViewNumPrec ); } else { iSynthViewNum = iLeftBaseNum; } m_aiRelSynthViewsNum.push_back( aiTmpSynthViews[ uiSId ] ); m_aiSynthViews .push_back( (Int) iSynthViewNum ); } } m_iNumberOfSynthViews = (Int)m_aiSynthViews.size(); //===== set derived parameters ===== m_bCamParsVaryOverTime = xGetCamParsChangeFlag(); //===== create arrays ===== xCreateLUTs ( (UInt)m_iNumberOfBaseViews, (UInt)m_iNumberOfBaseViews, m_adBaseViewShiftLUT, m_aiBaseViewShiftLUT, m_adBaseViewShiftParameter, m_aiBaseViewShiftParameter ); xCreateLUTs ( (UInt)m_iNumberOfBaseViews, (UInt)m_iNumberOfSynthViews, m_adSynthViewShiftLUT, m_aiSynthViewShiftLUT, m_adSynthViewShiftParameter, m_aiSynthViewShiftParameter ); xCreate2dArray( (UInt)m_iNumberOfBaseViews, (UInt)m_iNumberOfBaseViews, m_aaiCodedScale ); xCreate2dArray( (UInt)m_iNumberOfBaseViews, (UInt)m_iNumberOfBaseViews, m_aaiCodedOffset ); xCreate2dArray( (UInt)m_iNumberOfBaseViews, (UInt)m_iNumberOfBaseViews, m_aaiScaleAndOffsetSet ); xInit2dArray ( (UInt)m_iNumberOfBaseViews, (UInt)m_iNumberOfBaseViews, m_aaiScaleAndOffsetSet, 0 ); //===== init arrays for first frame ===== xSetShiftParametersAndLUT( m_uiFirstFrameId ); } Void TAppComCamPara::check( Bool bCheckViewRange, Bool bCheckFrameRange ) { if( bCheckFrameRange ) { Double dDummy; for( UInt uiBaseView = 0; uiBaseView < m_aiBaseViews.size(); uiBaseView++ ) { if ( m_bSetupFromCoded ) { for( UInt uiTargetView = 0; uiTargetView < m_aiBaseViews.size(); uiTargetView++ ) { if ( uiTargetView == uiBaseView ) continue; for( UInt uiFrame = m_uiFirstFrameId; uiFrame <= m_uiLastFrameId; uiFrame++ ) { Int iDummy; xGetCodedCameraData( uiBaseView, uiTargetView, true , uiFrame, iDummy, iDummy, iDummy ); } } } else { for( UInt uiFrame = m_uiFirstFrameId; uiFrame <= m_uiLastFrameId; uiFrame++ ) { Bool bInterpolatedCur; xGetGeometryData( m_aiBaseViews[ uiBaseView ], uiFrame, dDummy, dDummy, dDummy, bInterpolatedCur ); xGetZNearZFar ( m_aiBaseViews[ uiBaseView ], uiFrame, dDummy, dDummy ); if( bInterpolatedCur ) { std::cerr << "Error: CameraParameters for BaseView " << (Double)m_aiBaseViews[ uiBaseView ] / m_dViewNumPrec << " and Frame " << uiFrame << " not defined. " << std::endl; exit( EXIT_FAILURE ); } } } } Bool bIgnoreFirst = true; for( UInt uiERView = 0; uiERView < m_aiSynthViews.size() && !m_bSetupFromCoded; uiERView++ ) { if ( xIsIn(m_aiViewsInCfgFile, m_aiSynthViews[ uiERView ] ) ) { if ( bIgnoreFirst ) { std::cout << "Ignoring CameraParameterFile entries for virtual view(s): " ; //GT: Integer precision virtual view camera parameters are always interpolated from coded views camera parameters. bIgnoreFirst = false; } std::cout << (Double)m_aiSynthViews[ uiERView ] / m_dViewNumPrec << " " ; } } if ( !bIgnoreFirst ) { std::cout << std::endl; } Bool bInterpolateFirst = true; Bool bAnyInterpolated = false; for( UInt uiERView = 0; uiERView < m_aiSynthViews.size() && !m_bSetupFromCoded; uiERView++ ) { Bool bInterpolated = false; for( UInt uiFrame = m_uiFirstFrameId; uiFrame <= m_uiLastFrameId; uiFrame++ ) { Bool bInterpolatedCur; xGetGeometryData( m_aiSynthViews[ uiERView ], uiFrame, dDummy, dDummy, dDummy, bInterpolatedCur ); bInterpolated |= bInterpolatedCur; } if( bInterpolated ) { bAnyInterpolated = true; if ( bInterpolateFirst ) { std::cout << "Interpolating camera parameters for virtual view(s): " ; bInterpolateFirst = false; } std::cout << (Double)m_aiSynthViews[ uiERView ] / m_dViewNumPrec << " " ; } } if ( bAnyInterpolated ) std::cout << std::endl; } if( bCheckViewRange ) { Bool bAllExist = true; for( Int iSynthViewIdx = 0; iSynthViewIdx < m_iNumberOfSynthViews; iSynthViewIdx++ ) { Bool bIsBaseView; Int iDummy; Bool bExist = getLeftRightBaseView( iSynthViewIdx, iDummy, iDummy, iDummy, bIsBaseView ); bAllExist &= ( bExist || bIsBaseView ); } if( !bAllExist ) { std::cerr << "SynthViewNumbers must be within the range of BaseViewNumbers" << std::endl; exit( EXIT_FAILURE ); } } } Void TAppComCamPara::update( UInt uiFrameId ) { m_iCurrentFrameId = uiFrameId; m_bCamParsCodedPrecSet = false; if ( m_bCamParsVaryOverTime ) { xSetShiftParametersAndLUT( m_uiFirstFrameId + uiFrameId ); } } #if NH_3D_VSO Void TAppComCamPara::setDispCoeff( UInt uiFrameId, Int iViewIdx ) { UInt uiFrame = m_uiFirstFrameId + uiFrameId; Int iSourceViewNum = m_aiBaseViews[ iViewIdx ]; Double dBaseLine = 0.0; Double dFL1 = 1.0, dCS1 = 1.0, dCP1 = 1.0, dZN1 = 1.0, dZF1 = 1.0; Bool bInterpolated = false; double dPos[3] = {0.0, 0.0, 0.0}; if( m_iNumberOfBaseViews == 3 ) { xGetGeometryData( m_aiBaseViews[0], uiFrame, dFL1, dPos[0], dCS1, bInterpolated ); xGetGeometryData( m_aiBaseViews[1], uiFrame, dFL1, dPos[1], dCS1, bInterpolated ); xGetGeometryData( m_aiBaseViews[2], uiFrame, dFL1, dPos[2], dCS1, bInterpolated ); xGetGeometryData( iSourceViewNum, uiFrame, dFL1, dCP1, dCS1, bInterpolated ); xGetZNearZFar ( iSourceViewNum, uiFrame, dZN1, dZF1 ); dBaseLine = ( std::max( dPos[0], std::max( dPos[1], dPos[2] ) ) - std::min( dPos[0], std::min( dPos[1], dPos[2] ) ) ) / 2.0; } else if( m_iNumberOfBaseViews == 2 ) { xGetGeometryData( m_aiBaseViews[0], uiFrame, dFL1, dPos[0], dCS1, bInterpolated ); xGetGeometryData( m_aiBaseViews[1], uiFrame, dFL1, dPos[1], dCS1, bInterpolated ); xGetGeometryData( iSourceViewNum, uiFrame, dFL1, dCP1, dCS1, bInterpolated ); xGetZNearZFar ( iSourceViewNum, uiFrame, dZN1, dZF1 ); dBaseLine = dPos[0] - dPos[1]; } m_dDispCoeff = fabs( dFL1 * ( dBaseLine / 2.0 ) / 255.0 * ( 1.0/dZN1 - 1.0/dZF1 ) ); } #endif Bool TAppComCamPara::getLeftRightBaseView( Int iSynthViewIdx, Int &riLeftViewIdx, Int &riRightViewIdx, Int &riRelDistToLeft, Bool& rbIsBaseView ) { Int iLeftSortedViewIdx, iRightSortedViewIdx, iDummy; Bool bExist = xGetLeftRightView( m_aiSynthViews[ iSynthViewIdx ], m_aiSortedBaseViews, iDummy, iDummy, iLeftSortedViewIdx, iRightSortedViewIdx ); rbIsBaseView = ( iLeftSortedViewIdx == iRightSortedViewIdx && iLeftSortedViewIdx != -1 ); Int iLeftViewIdx = ( iLeftSortedViewIdx != -1 ? m_aiBaseSortedId2Id[ iLeftSortedViewIdx ] : -1 ); Int iRightViewIdx = ( iRightSortedViewIdx != -1 ? m_aiBaseSortedId2Id[ iRightSortedViewIdx ] : -1 ); if ( iLeftSortedViewIdx != -1 && iRightSortedViewIdx != -1 ) { riRelDistToLeft = getRelDistLeft( iSynthViewIdx, iLeftViewIdx, iRightViewIdx); } else { riRelDistToLeft = -1; } riLeftViewIdx = iLeftViewIdx; riRightViewIdx = iRightViewIdx; return bExist; } Bool TAppComCamPara::xIsIn( std::vector<Int>& rVec, Int iElem) { Bool bFound = false; for (Int idx = 0; idx < rVec.size() && !bFound; idx++) { bFound = bFound || rVec[idx] == iElem; } return bFound; } Int TAppComCamPara::getRelDistLeft( Int iSynthViewIdx, Int iLeftViewIdx, Int iRightViewIdx ) { //GT: Get normalized distance Int iLeftViewDist = abs ( m_aiBaseId2SortedId[ iLeftViewIdx ] * ((Int) m_dViewNumPrec) - m_aiRelSynthViewsNum [ iSynthViewIdx ]); Int iRightViewDist = abs ( m_aiBaseId2SortedId[ iRightViewIdx ] * ((Int) m_dViewNumPrec) - m_aiRelSynthViewsNum [ iSynthViewIdx ]); Int64 iDistSum = iLeftViewDist + iRightViewDist; return (iDistSum == 0) ? (1 << (REN_VDWEIGHT_PREC -1) ) : (Int) (( (((Int64) iLeftViewDist ) << REN_VDWEIGHT_PREC ) + (iDistSum >> 1) ) / iDistSum ); } Int TAppComCamPara::synthRelNum2Idx( Int iRelNum ) { return xGetViewId(m_aiRelSynthViewsNum, iRelNum ); } #endif
37.303841
242
0.632793
[ "vector" ]
b1c522e82ac7098c923641edf10b106a9d2ef25b
3,179
hpp
C++
numerical-methods-5_newtons_method/newton.hpp
DmitriBogdanov/numerical-methods-5_newtons_method
ad8885e715940428b83f3d0f0201deca168ad2c4
[ "MIT" ]
null
null
null
numerical-methods-5_newtons_method/newton.hpp
DmitriBogdanov/numerical-methods-5_newtons_method
ad8885e715940428b83f3d0f0201deca168ad2c4
[ "MIT" ]
null
null
null
numerical-methods-5_newtons_method/newton.hpp
DmitriBogdanov/numerical-methods-5_newtons_method
ad8885e715940428b83f3d0f0201deca168ad2c4
[ "MIT" ]
null
null
null
#pragma once #include <tuple> #include <vector> #include "core_math.hpp" // @return 1 => Solution // @return 2 => Iterations // @return 3 => convergence orders inline std::tuple<double, uint, dvector> secant_solve(ScalarFunction *f, double A, double B, double precision, uint maxIterations = 50) { dvector approx; // vector of aproximations dvector orders; uint iterations = 0; double x0 = A; double x = x0; approx.push_back(x0); while (true) { x = x0 - f(x0) * (B - x0) / (f(B) - f(x0)); approx.push_back(x); ++iterations; if (std::abs(x - x0) < precision || iterations > maxIterations) break; x0 = x; } // Compute convergence orders const auto N = approx.size(); orders.reserve(N > 2 ? N - 2 : 0); for (size_t k = 1; k < N - 2; ++k) { const double err_ = std::abs(approx[k - 1] - x); const double _err_ = std::abs(approx[k] - x); const double _err = std::abs(approx[k + 1] - x); const double p = std::log(_err / _err_) / std::log(_err_ / err_); orders.push_back(p); } return { x, iterations, orders }; } // @return 1 => Solution // @return 2 => Iterations // @return 3 => convergence orders inline std::tuple<double, uint, dvector> newton_solve(ScalarFunction *f, double x0, double precision, bool dIsNumeric = true, uint maxIterations = 50) { dvector approx; // vector of aproximations dvector orders; uint iterations = 0; double x = x0; approx.push_back(x0); while (true) { const auto Derivative = dIsNumeric ? derivative(f, x0) : AnalythicalDerivative(x0); x = x0 - f(x0) / Derivative; approx.push_back(x); ++iterations; if (std::abs(x - x0) < precision || iterations > maxIterations) break; x0 = x; } // Compute convergence orders const auto N = approx.size(); orders.reserve(N > 2 ? N - 2 : 0); for (size_t k = 1; k < N - 2; ++k) { const double err_ = std::abs(approx[k - 1] - x); const double _err_ = std::abs(approx[k] - x); const double _err = std::abs(approx[k + 1] - x); const double p = std::log(_err / _err_) / std::log(_err_ / err_); orders.push_back(p); } return { x, iterations, orders }; } // @return 1 => Solution // @return 2 => Iterations // @return 3 => convergence orders inline std::tuple<Vector, uint, dvector> newton_solve_system(VectorFunction *F, Vector X0, double precision, bool dIsNumeric = true, uint maxIterations = 50) { vvector approx; // vector of aproximations dvector orders; uint iterations = 0; Vector X = X0; approx.push_back(X0); // Fill approximations while (true) { const auto Jacobian = dIsNumeric ? jacobian(F, X0) : AnalythicalJacobian(X0); X = X0 - Jacobian.inverse() * F(X0); approx.push_back(X); ++iterations; if ((X - X0).norm() < precision || iterations > maxIterations) break; X0 = X; } // Compute convergence orders const auto N = approx.size(); orders.reserve(N > 2 ? N - 2 : 0); for (size_t k = 1; k < N - 2; ++k) { const double err_ = (approx[k - 1] - X).norm(); const double _err_ = (approx[k] - X).norm(); const double _err = (approx[k + 1] - X).norm(); const double p = std::log(_err / _err_) / std::log(_err_ / err_); orders.push_back(p); } return { X, iterations, orders }; }
24.835938
159
0.63888
[ "vector" ]
b1cae7a3d9b6b47f974b322a58f34590a4634f7a
5,479
cpp
C++
source/engine/util/file.cpp
compix/CUDA-Path-Tracer
429334456d75e8c939b94e1db288a51542f70926
[ "MIT" ]
2
2017-11-25T14:26:45.000Z
2020-06-28T21:10:25.000Z
source/engine/util/file.cpp
compix/CUDA-Path-Tracer
429334456d75e8c939b94e1db288a51542f70926
[ "MIT" ]
1
2021-05-03T04:39:49.000Z
2021-05-03T21:33:43.000Z
source/engine/util/file.cpp
compix/CUDA-Path-Tracer
429334456d75e8c939b94e1db288a51542f70926
[ "MIT" ]
null
null
null
#include "file.h" #include <fstream> #include <vector> #include <sys/types.h> #include <sys/stat.h> #include <iostream> #include <GL/glew.h> #include <engine/resource/ResourceManager.h> namespace file { Path::Path(const std::string& path) : m_path(path) {} std::string Path::getExtension() const { size_t dotPos = m_path.find_last_of('.'); if (dotPos < m_path.npos) return m_path.substr(dotPos); return ""; } std::string Path::getFilename() const { size_t slashPos = m_path.find_last_of('/') + 1; if (slashPos < m_path.npos) return m_path.substr(slashPos, m_path.find_last_of('.') - slashPos); return m_path; } std::string Path::getFilenameWithExtension() const { size_t slashPos = m_path.find_last_of('/') + 1; if (slashPos < m_path.npos) return m_path.substr(m_path.find_last_of('/') + 1); return m_path; } Path Path::getParent() const { size_t slashPos = m_path.find_last_of('/'); if (slashPos < m_path.npos) { auto path = m_path.substr(0, slashPos); return Path(path.substr(0, path.find_last_of('/') + 1)); } return Path(""); } } #if defined(WIN32) || defined(_WIN32) || defined(__WIN32) #include <Windows.h> void file::forEachFileInDirectory(const std::string& directoryPath, bool recursive, const file::DirectoryIterationFunction& fileFunc) { WIN32_FIND_DATAA findFileData; HANDLE dirHandle = FindFirstFileA((directoryPath + "/*").c_str(), &findFileData); if (dirHandle == INVALID_HANDLE_VALUE) return; do { std::string filename = findFileData.cFileName; std::string fullFilename = directoryPath + "/" + filename; bool isDirectory = (findFileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0; if (filename[0] == '.') continue; if (isDirectory) { fileFunc(directoryPath, filename, isDirectory); if (recursive) forEachFileInDirectory(fullFilename, recursive, fileFunc); continue; } fileFunc(directoryPath, filename, isDirectory); } while (FindNextFileA(dirHandle, &findFileData)); FindClose(dirHandle); } #else // TODO: Write unix version #endif file::ShaderSourceInfo resolveShaderIncludes(const std::string& shaderPath, const std::string& source, size_t lineStart) { file::ShaderSourceInfo info(shaderPath, ""); info.lineStart = lineStart; std::istringstream stream(source); size_t lineNumber = lineStart; std::string line; while (std::getline(stream, line)) { size_t incPos = line.find("#include"); if (incPos != line.npos) { size_t p0 = line.find_first_of("\"", incPos); size_t p1 = line.npos; if (p0 != line.npos) p1 = line.find_first_of("\"", p0 + 1); if (p1 != line.npos && p1 > p0) { std::string includePath = line.substr(p0 + 1, p1 - p0 - 1); auto& includeSource = ResourceManager::getIncludeSource(includePath); file::ShaderSourceInfo includeInfo = resolveShaderIncludes(includePath, includeSource, lineNumber); info.children.push_back(includeInfo); info.source += includeInfo.source + "\n"; lineNumber += includeInfo.lineEnd - includeInfo.lineStart; } else { std::cout << "Failed to parse " << shaderPath << ": unexpected #include input at line: " << lineNumber << std::endl; } } else info.source += line + "\n"; ++lineNumber; } info.lineEnd = lineNumber; return info; } file::ShaderSourceInfo file::getShaderSource(const std::string& path) { std::string source = readAsString(path); if (glewIsSupported("GL_ARB_shading_language_include") == GL_TRUE) return ShaderSourceInfo(path, source); return resolveShaderIncludes(path, source, 1); } std::string file::readAsString(const std::string& path) { std::string fileAsString = ""; std::ifstream stream(path, std::ios::in); if (!stream.is_open()) { std::cerr << "Could not open file: " << path << std::endl; return ""; } std::string line = ""; while (getline(stream, line)) fileAsString += line + "\n"; stream.close(); return fileAsString; } void file::saveToFile(const std::string& path, const std::string& text) { std::string fileAsString = ""; std::ofstream stream(path, std::ios::out); if (!stream.is_open()) { std::cerr << "Could not open file: " << path << std::endl; return; } stream << text; stream.close(); } void file::loadRawBuffer(const std::string& path, std::vector<char>& outBuffer, uint32_t& outNumValues) { std::ifstream input(path, std::ios::binary); outBuffer = { std::istreambuf_iterator<char>(input), std::istreambuf_iterator<char>() }; outNumValues = *reinterpret_cast<uint32_t*>(&outBuffer[0]); } bool file::exists(const std::string& filename) noexcept { struct stat buffer; return stat(filename.c_str(), &buffer) == 0; } size_t file::getSize(const std::string& filename) { struct stat buffer; return stat(filename.c_str(), &buffer) == 0 ? buffer.st_size : 0; }
26.726829
133
0.602117
[ "vector" ]
b1cba7fa06f3d9f722dbddc07781b1b93a713915
1,069
cpp
C++
LeetCode/C++/1414. Find the Minimum Number of Fibonacci Numbers Whose Sum Is K.cpp
shreejitverma/GeeksforGeeks
d7bcb166369fffa9a031a258e925b6aff8d44e6c
[ "MIT" ]
2
2022-02-18T05:14:28.000Z
2022-03-08T07:00:08.000Z
LeetCode/C++/1414. Find the Minimum Number of Fibonacci Numbers Whose Sum Is K.cpp
shivaniverma1/Competitive-Programming-1
d7bcb166369fffa9a031a258e925b6aff8d44e6c
[ "MIT" ]
6
2022-01-13T04:31:04.000Z
2022-03-12T01:06:16.000Z
LeetCode/C++/1414. Find the Minimum Number of Fibonacci Numbers Whose Sum Is K.cpp
shivaniverma1/Competitive-Programming-1
d7bcb166369fffa9a031a258e925b6aff8d44e6c
[ "MIT" ]
2
2022-02-14T19:53:53.000Z
2022-02-18T05:14:30.000Z
//Greedy //Runtime: 4 ms, faster than 80.46% of C++ online submissions for Find the Minimum Number of Fibonacci Numbers Whose Sum Is K. //Memory Usage: 6.5 MB, less than 100.00% of C++ online submissions for Find the Minimum Number of Fibonacci Numbers Whose Sum Is K. class Solution { public: int findMinFibonacciNumbers(int k) { vector<int> fNums = {1, 1}; int i; while(fNums[fNums.size()-1] < k){ fNums.push_back(fNums[fNums.size()-1] + fNums[fNums.size()-2]); } // for(int i = 0; i < fNums.size(); i++){ // cout << fNums[i] << " "; // } // cout << endl; int ans = 0; i = fNums.size()-1; while(k > 0){ while(i >= 0 && fNums[i] > k){ i--; } //the first fibonacci number <= k // cout << i << " " << fNums[i] << " "; k -= fNums[i]; ans++; } // cout << endl; // cout << endl; return ans; } };
28.891892
132
0.451824
[ "vector" ]
b1cd0ef77d6c0bca6c7c7c6fdb6f0fc73efb94aa
388
hpp
C++
cpp/walker.hpp
altayhunter/xkcd-2529
8ab52c33135e3cedfc4d3816a5a4eb3a21c8a3bb
[ "MIT" ]
1
2021-11-22T23:17:55.000Z
2021-11-22T23:17:55.000Z
cpp/walker.hpp
altayhunter/xkcd-2529
8ab52c33135e3cedfc4d3816a5a4eb3a21c8a3bb
[ "MIT" ]
null
null
null
cpp/walker.hpp
altayhunter/xkcd-2529
8ab52c33135e3cedfc4d3816a5a4eb3a21c8a3bb
[ "MIT" ]
null
null
null
#include "point.hpp" // Point #include <unordered_set> // unordered_set #include <vector> // vector class Walker { public: Walker(unsigned n, unsigned k); int intersections() const; int steps() const; private: Point randomNeighbor(const Point& p) const; bool trapped(const Point& p) const; std::vector<Point> marbles; std::unordered_set<Point, Point::Hash> visited; };
24.25
48
0.706186
[ "vector" ]
b1ce372110e79c2b1cde28d3769c9ee5b611688c
4,842
cpp
C++
src/examples/webgpu/common/imguiapp.cpp
bfierz/vcl
6ef8d446b6a2f46543a5b3f9f76cad0d8f691969
[ "MIT" ]
15
2015-05-15T09:14:42.000Z
2022-02-20T13:00:17.000Z
src/examples/webgpu/common/imguiapp.cpp
bfierz/vcl
6ef8d446b6a2f46543a5b3f9f76cad0d8f691969
[ "MIT" ]
54
2015-05-14T09:21:51.000Z
2021-05-28T06:09:06.000Z
src/examples/webgpu/common/imguiapp.cpp
bfierz/vcl
6ef8d446b6a2f46543a5b3f9f76cad0d8f691969
[ "MIT" ]
4
2017-04-18T06:16:42.000Z
2021-07-16T08:00:12.000Z
/* * This file is part of the Visual Computing Library (VCL) release under the * MIT license. * * Copyright (c) 2020 Basil Fierz * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "imguiapp.h" // IMGUI #define NOMINMAX #include "imgui_impl_glfw.h" #include "imgui_impl_wgpu.h" ImGuiApplication::ImGuiApplication(const char* title) : Application(title) { // Setup Dear ImGui context IMGUI_CHECKVERSION(); ImGui::CreateContext(); ImGuiIO& io = ImGui::GetIO(); (void)io; //io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard; // Enable Keyboard Controls //io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad; // Enable Gamepad Controls // Setup Dear ImGui style ImGui::StyleColorsDark(); //ImGui::StyleColorsClassic(); // Setup Platform/Renderer bindings ImGui_ImplGlfw_InitForVulkan(windowHandle(), true); ImGui_ImplWGPU_Init(_wgpuDevice, NumberOfFrames, WGPUTextureFormat_RGBA8Unorm); // Load Fonts // - If no fonts are loaded, dear imgui will use the default font. You can also load multiple fonts and use ImGui::PushFont()/PopFont() to select them. // - AddFontFromFileTTF() will return the ImFont* so you can store it if you need to select the font among multiple. // - If the file cannot be loaded, the function will return NULL. Please handle those errors in your application (e.g. use an assertion, or display an error and quit). // - The fonts will be rasterized at a given size (w/ oversampling) and stored into a texture when calling ImFontAtlas::Build()/GetTexDataAsXXXX(), which ImGui_ImplXXXX_NewFrame below will call. // - Read 'docs/FONTS.txt' for more instructions and details. // - Remember that in C/C++ if you want to include a backslash \ in a string literal you need to write a double backslash \\ ! //io.Fonts->AddFontDefault(); //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Roboto-Medium.ttf", 16.0f); //io.Fonts->AddFontFromFileTTF("../../misc/fonts/Cousine-Regular.ttf", 15.0f); //io.Fonts->AddFontFromFileTTF("../../misc/fonts/DroidSans.ttf", 16.0f); //io.Fonts->AddFontFromFileTTF("../../misc/fonts/ProggyTiny.ttf", 10.0f); //ImFont* font = io.Fonts->AddFontFromFileTTF("c:\\Windows\\Fonts\\ArialUni.ttf", 18.0f, NULL, io.Fonts->GetGlyphRangesJapanese()); //IM_ASSERT(font != NULL); } ImGuiApplication::~ImGuiApplication() { ImGui_ImplWGPU_Shutdown(); ImGui_ImplGlfw_Shutdown(); ImGui::DestroyContext(); } void ImGuiApplication::updateFrame() { ImGui_ImplWGPU_NewFrame(); ImGui_ImplGlfw_NewFrame(); ImGui::NewFrame(); } void ImGuiApplication::invalidateDeviceObjects() { Application::invalidateDeviceObjects(); ImGui_ImplWGPU_InvalidateDeviceObjects(); } void ImGuiApplication::createDeviceObjects() { Application::createDeviceObjects(); ImGui_ImplWGPU_CreateDeviceObjects(); } void ImGuiApplication::renderFrame(WGPUTextureView back_buffer) { wgpu::Device device{ _wgpuDevice }; auto color_attachments = wgpu::RenderPassColorAttachment{}; color_attachments.loadOp = wgpu::LoadOp::Load; color_attachments.storeOp = wgpu::StoreOp::Store; color_attachments.clearColor = { 1.0f, 0.0f, 1.0f, 0.0f }; color_attachments.view = wgpu::TextureView{ back_buffer }; auto render_pass_desc = wgpu::RenderPassDescriptor{}; render_pass_desc.colorAttachmentCount = 1; render_pass_desc.colorAttachments = &color_attachments; render_pass_desc.depthStencilAttachment = nullptr; auto enc_desc = wgpu::CommandEncoderDescriptor{}; auto encoder = device.CreateCommandEncoder(&enc_desc); auto pass = encoder.BeginRenderPass(&render_pass_desc); ImGui::Render(); ImGui_ImplWGPU_RenderDrawData(ImGui::GetDrawData(), pass.Get()); wgpuRenderPassEncoderEndPass(pass.Get()); auto cmd_buffer_desc = wgpu::CommandBufferDescriptor{}; auto cmd_buffer = encoder.Finish(&cmd_buffer_desc); device.GetQueue().Submit(1, &cmd_buffer); }
39.688525
195
0.76043
[ "render" ]
b1d55f9e933b36a4766d890297c7fba905fef5f7
207,386
cc
C++
tensorflow/compiler/xla/service/sharding_propagation_test.cc
wpv-chan/tensorflow
5361fa77705845f88b17a156b60fa8d88332b362
[ "Apache-2.0" ]
3
2019-11-19T14:07:27.000Z
2020-10-04T12:57:40.000Z
tensorflow/compiler/xla/service/sharding_propagation_test.cc
wpv-chan/tensorflow
5361fa77705845f88b17a156b60fa8d88332b362
[ "Apache-2.0" ]
1
2020-08-28T18:17:58.000Z
2020-08-28T18:17:58.000Z
tensorflow/compiler/xla/service/sharding_propagation_test.cc
wpv-chan/tensorflow
5361fa77705845f88b17a156b60fa8d88332b362
[ "Apache-2.0" ]
4
2022-01-13T11:23:44.000Z
2022-03-02T11:11:42.000Z
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/compiler/xla/service/sharding_propagation.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "tensorflow/compiler/xla/protobuf_util.h" #include "tensorflow/compiler/xla/service/hlo_matchers.h" #include "tensorflow/compiler/xla/service/hlo_op_metadata.h" #include "tensorflow/compiler/xla/service/hlo_parser.h" #include "tensorflow/compiler/xla/status_macros.h" #include "tensorflow/compiler/xla/tests/hlo_test_base.h" #include "tensorflow/compiler/xla/xla_data.pb.h" namespace op = xla::testing::opcode_matchers; namespace xla { namespace { using ShardingPropagationTest = HloTestBase; void ClearMetadata(HloModule* module) { for (HloComputation* computation : module->computations()) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->metadata().ByteSizeLong() != 0) { instruction->set_metadata(OpMetadata()); } if (!instruction->has_sharding()) { continue; } instruction->set_sharding(instruction->sharding().WithoutMetadata()); } } } struct MetadataTestParameter { explicit MetadataTestParameter(bool propagate_metadata, bool clear_metadata) : propagate_metadata(propagate_metadata), clear_metadata(clear_metadata) {} bool propagate_metadata = false; bool clear_metadata = false; }; struct MetadataTestParameterWithOutput { explicit MetadataTestParameterWithOutput(bool propagate_metadata, bool clear_metadata, bool allow_root_sharding_propagation) : propagate_metadata(propagate_metadata), clear_metadata(clear_metadata), allow_root_sharding_propagation(allow_root_sharding_propagation) {} bool propagate_metadata = false; bool clear_metadata = false; bool allow_root_sharding_propagation = false; }; class ParameterizedMetadataTest : public HloTestBase, public ::testing::WithParamInterface<MetadataTestParameter> {}; class ParameterizedMetadataTestWithOutput : public HloTestBase, public ::testing::WithParamInterface<MetadataTestParameterWithOutput> {}; std::string OpMetadataListToString(absl::Span<const OpMetadata> metadata) { std::vector<std::string> metadata_strings; metadata_strings.reserve(metadata.size()); for (const OpMetadata& element : metadata) { metadata_strings.push_back( absl::StrCat("{", OpMetadataToString(element), "}")); } return absl::StrCat("{", absl::StrJoin(metadata_strings, ", "), "}"); } class HloShardingMetadataMatcher : public ::testing::MatcherInterface<const HloSharding&> { public: explicit HloShardingMetadataMatcher(absl::Span<const OpMetadata> metadata) : metadata_(metadata.begin(), metadata.end()) {} bool MatchAndExplain( const HloSharding& sharding, ::testing::MatchResultListener* listener) const override { if (sharding.metadata().size() != metadata_.size()) { *listener << sharding.ToString(/*include_metadata=*/true) << " has incorrect sharding metadata (expected: " << OpMetadataListToString(metadata_) << ")"; return false; } for (int i = 0, e = metadata_.size(); i < e; ++i) { if (!protobuf_util::ProtobufEquals(sharding.metadata()[i], metadata_[i])) { *listener << sharding.ToString(/*include_metadata=*/true) << " has incorrect sharding metadata (expected: " << OpMetadataListToString(metadata_) << ")"; return false; } } return true; } void DescribeTo(std::ostream* os) const override { *os << OpMetadataListToString(metadata_); } private: std::vector<OpMetadata> metadata_; }; ::testing::Matcher<const HloSharding&> ShardingMetadata( absl::Span<const OpMetadata> metadata) { return ::testing::MakeMatcher(new HloShardingMetadataMatcher(metadata)); } OpMetadata CreateMetadata(const std::string& op_name) { OpMetadata metadata; metadata.set_op_name(op_name); return metadata; } INSTANTIATE_TEST_SUITE_P( ShardingPropagation, ParameterizedMetadataTest, ::testing::Values(MetadataTestParameter(/*propagate_metadata=*/false, /*clear_metadata=*/false), MetadataTestParameter(/*propagate_metadata=*/false, /*clear_metadata=*/true), MetadataTestParameter(/*propagate_metadata=*/true, /*clear_metadata=*/false), MetadataTestParameter(/*propagate_metadata=*/true, /*clear_metadata=*/true)), [](const ::testing::TestParamInfo<MetadataTestParameter>& info) { return absl::StrCat(info.param.propagate_metadata ? "MetadataPropagation" : "NoMetadataPropagation", "_", info.param.clear_metadata ? "NoMetadataInModule" : "MetadataInModule"); }); INSTANTIATE_TEST_SUITE_P( ShardingPropagation, ParameterizedMetadataTestWithOutput, ::testing::Values(MetadataTestParameterWithOutput( /*propagate_metadata=*/false, /*clear_metadata=*/false, /*allow_root_sharding_propagation=*/false), MetadataTestParameterWithOutput( /*propagate_metadata=*/false, /*clear_metadata=*/true, /*allow_root_sharding_propagation=*/false), MetadataTestParameterWithOutput( /*propagate_metadata=*/true, /*clear_metadata=*/false, /*allow_root_sharding_propagation=*/false), MetadataTestParameterWithOutput( /*propagate_metadata=*/true, /*clear_metadata=*/true, /*allow_root_sharding_propagation=*/false), MetadataTestParameterWithOutput( /*propagate_metadata=*/false, /*clear_metadata=*/false, /*allow_root_sharding_propagation=*/true), MetadataTestParameterWithOutput( /*propagate_metadata=*/false, /*clear_metadata=*/true, /*allow_root_sharding_propagation=*/true), MetadataTestParameterWithOutput( /*propagate_metadata=*/true, /*clear_metadata=*/false, /*allow_root_sharding_propagation=*/true), MetadataTestParameterWithOutput( /*propagate_metadata=*/true, /*clear_metadata=*/true, /*allow_root_sharding_propagation=*/true)), [](const ::testing::TestParamInfo<MetadataTestParameterWithOutput>& info) { return absl::StrCat( info.param.propagate_metadata ? "MetadataPropagation" : "NoMetadataPropagation", "_", info.param.clear_metadata ? "NoMetadataInModule" : "MetadataInModule", "_", info.param.allow_root_sharding_propagation ? "PropagateToRoot" : "NoPropagateToRoot"); }); TEST_P(ParameterizedMetadataTest, ShardingMetadataFromInstruction) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3}, metadata={op_name="test"} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_EQ(changed, GetParam().propagate_metadata && !GetParam().clear_metadata); auto* instruction = FindInstruction(module.get(), "param0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("test")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoOverwrite) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="name"}}, metadata={op_name="test"} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingPropagation(/*is_spmd=*/false, /*propagate_metadata=*/true) .Run(module.get())); EXPECT_FALSE(changed); auto* instruction = FindInstruction(module.get(), "param0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("name")})); } TEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoMetadata) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="name"}} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingPropagation(/*is_spmd=*/false, /*propagate_metadata=*/true) .Run(module.get())); EXPECT_FALSE(changed); auto* instruction = FindInstruction(module.get(), "param0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("name")})); } TEST_F(ShardingPropagationTest, ShardingNoMetadataAndInstructionNoMetadata) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingPropagation(/*is_spmd=*/false, /*propagate_metadata=*/true) .Run(module.get())); EXPECT_FALSE(changed); auto* instruction = FindInstruction(module.get(), "param0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } TEST_P(ParameterizedMetadataTest, ElementwiseOperationForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1) %add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1) ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ElementwiseOperationBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0) %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1) %add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1) ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } // Regression Test for b/129569657. TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[3,2048,2048]{2,1,0} parameter(0), sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}} %broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%param0), dimensions={0,1,2} ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata, GetParam().allow_root_sharding_propagation) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "broadcast"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); } } TEST_P(ParameterizedMetadataTest, BroadcastBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[13]{0} parameter(0) %broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%param0), dimensions={3} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "broadcast"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPartial) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[3,2048]parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1} ROOT %copy = f32[3,2048,3] copy(%broadcast) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata, GetParam().allow_root_sharding_propagation) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "broadcast"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT( module->entry_computation()->root_instruction(), op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}")); } } TEST_P(ParameterizedMetadataTest, BroadcastMerge) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[3,2048]parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1} ROOT %copy = f32[3,2048,3] copy(%broadcast), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "broadcast"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a"), CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, BroadcastUser) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[24,8]{0,1} parameter(0) %copy = f32[24,8]{0,1} copy(%param0) ROOT %broadcast = f32[4,24,6,8]{3,2,1,0} broadcast(%copy), dimensions={1,3}, sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,4]0,1,2,3,4,5,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTestWithOutput, BroadcastUserPartial) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[24,8]{0,1} parameter(0) %copy = f32[24,8]{0,1} copy(%param0) ROOT %broadcast = f32[4,24,6,8] broadcast(%copy), dimensions={1,3}, sharding={devices=[4,2,1,1]0,1,2,3,4,5,6,7 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata, GetParam().allow_root_sharding_propagation) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[2,1,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[4,2,1,1]0,1,2,3,4,5,6,7}")); } } TEST_P(ParameterizedMetadataTest, MaximalReduceForwardPass) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} %init = f32[] parameter(1) %reduce = f32[5,7]{1,0} reduce(%param0, %init), dimensions={2,3}, to_apply=%add ROOT %copy = f32[5,7]{0,1} copy(%reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ShardedReduceForwardPass) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} %init = f32[] parameter(1) %reduce = f32[7,11]{1,0} reduce(%param0, %init), dimensions={0,3}, to_apply=%add ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[8,8] parameter(0), sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}} %init = f32[] parameter(1) %reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add ROOT %copy = f32[8] copy(%reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,2,1,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims2) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[8,8] parameter(0), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} %init = f32[] parameter(1) %reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add ROOT %copy = f32[8] copy(%reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[2,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReducePartiallyBackward) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[8,8] parameter(0) %input = f32[8,8] copy(%param0) %init = f32[] parameter(1) %reduce = f32[8] reduce(%input, %init), dimensions={0}, to_apply=%add, sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = f32[8] copy(%reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTestWithOutput, ShardedTupleReduceForwardAndBackwardPass) { const char* const hlo_string = R"( HloModule module %minmax_func { %lhs_value = f32[] parameter(0) %rhs_value = f32[] parameter(2) %compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT %select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value) %lhs_index = s32[] parameter(1) %rhs_index = s32[] parameter(3) %select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index) ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5) } ENTRY %main { %param0 = f32[28,10] parameter(0) %param1 = s32[28,10] parameter(1), sharding={devices=[2,1]0,1 metadata={op_name="a"}} %copy_param0 = f32[28,10] copy(%param0) %init0 = f32[] parameter(2) %init1 = s32[] parameter(3) %reduce = (f32[28], s32[28]) reduce(%copy_param0, %param1, %init0, %init1), dimensions={1}, to_apply=%minmax_func %gte0 = f32[28] get-tuple-element(%reduce), index=0 %gte1 = s32[28] get-tuple-element(%reduce), index=1 %copy0 = f32[28] copy(%gte0) %copy1 = s32[28] copy(%gte1) ROOT %tuple = (f32[28], s32[28]) tuple(%copy0, %copy1) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata, GetParam().allow_root_sharding_propagation) .Run(module.get())); EXPECT_TRUE(changed); auto* reduce = FindInstruction(module.get(), "reduce"); ASSERT_NE(reduce, nullptr); EXPECT_THAT(reduce, op::Sharding("{{devices=[2]0,1},{devices=[2]0,1}}")); auto* copy_param0 = FindInstruction(module.get(), "copy_param0"); ASSERT_NE(copy_param0, nullptr); EXPECT_THAT(copy_param0, op::Sharding("{devices=[2,1]0,1}")); for (const HloSharding& sharding : {copy_param0->sharding(), reduce->sharding().tuple_elements()[0], reduce->sharding().tuple_elements()[1]}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(sharding, ShardingMetadata({})); } } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{{devices=[2]0,1},{devices=[2]0,1}}")); } } TEST_P(ParameterizedMetadataTestWithOutput, GetTupleElementForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %gte { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0) %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple( %param0, %param0) %tuple.1 = (f32[5,7,11,13]{3,2,1,0}, (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) tuple( %param0, %tuple), sharding={{devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}, {replicated metadata={op_name="b"}}, {devices=[1,2,2,1]0,1,2,3 metadata={op_name="c"}}} %gte = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%tuple.1), index=0 %gte.1 = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) get-tuple-element( %tuple.1), index=1 %gte.2 = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%gte.1), index=0 ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%gte.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata, GetParam().allow_root_sharding_propagation) .Run(module.get())); EXPECT_TRUE(changed); auto* gte = FindInstruction(module.get(), "gte"); ASSERT_NE(gte, nullptr); EXPECT_THAT(gte, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); auto* gte1 = FindInstruction(module.get(), "gte.1"); ASSERT_NE(gte1, nullptr); EXPECT_THAT(gte1, op::Sharding("{{replicated}, {devices=[1,2,2,1]0,1,2,3}}")); auto* gte2 = FindInstruction(module.get(), "gte.2"); ASSERT_NE(gte2, nullptr); EXPECT_THAT(gte2, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(gte->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(gte1->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(gte1->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("c")})); EXPECT_THAT(gte2->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { for (const HloSharding& sharding : {gte->sharding(), gte1->sharding().tuple_elements()[0], gte1->sharding().tuple_elements()[1], gte2->sharding()}) { EXPECT_THAT(sharding, ShardingMetadata({})); } } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{replicated}")); } } TEST_P(ParameterizedMetadataTest, TupleForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %tuple { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={replicated metadata={op_name="a"}} %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="b"}} %param2 = f32[5,7,11,13]{3,2,1,0} parameter(2) %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple( %param1, %param2) %tuple.1 = (f32[5,7,11,13]{3,2,1,0}, (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) tuple( %param0, %tuple) ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) copy( %tuple.1) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* tuple = FindInstruction(module.get(), "tuple"); ASSERT_NE(tuple, nullptr); EXPECT_THAT(tuple, op::Sharding("{{devices=[1,2,2,1]0,1,2,3}," " {replicated}}")); auto* tuple1 = FindInstruction(module.get(), "tuple.1"); ASSERT_NE(tuple1, nullptr); EXPECT_THAT(tuple1, op::Sharding("{{replicated}," " {devices=[1,2,2,1]0,1,2,3}," " {replicated}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(tuple->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({})); EXPECT_THAT(tuple1->sharding().tuple_elements()[0], ShardingMetadata({})); EXPECT_THAT(tuple1->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(tuple1->sharding().tuple_elements()[2], ShardingMetadata({})); } else { for (const HloSharding& tuple_sharding : {tuple->sharding(), tuple1->sharding()}) { for (const HloSharding& sub_sharding : tuple_sharding.tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } } TEST_P(ParameterizedMetadataTest, ForwardConvolutionForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %lhs = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[2,2,2,1]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %rhs = f32[3,3,13,17]{3,2,1,0} parameter(1) %convolution = f32[5,7,11,17]{3,2,1,0} convolution(%lhs, %rhs), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f ROOT %copy = f32[5,7,11,17]{3,2,1,0} copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "convolution"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2,1]0,1,2,3,4,5,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ForwardConvolutionLargeDilationForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %lhs = f32[8,64,2]{2,1,0} parameter(0), sharding={devices=[1,4,1]0,1,2,3 metadata={op_name="a"}} %rhs = f32[3,2,2]{2,1,0} parameter(1) %convolution = f32[8,32,2]{2,1,0} convolution(%lhs, %rhs), window={size=3 rhs_dilate=16}, dim_labels=b0f_0io->b0f ROOT %copy = f32[8,32,2]{2,1,0} copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "convolution"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,4,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ForwardConvolution3DSmallKernel) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %lhs = bf16[32,32,8,7,128]{4,3,2,1,0} parameter(0), sharding={devices=[1,4,1,1,1]0,1,2,3 metadata={op_name="a"}} %rhs = bf16[3,3,3,128,256]{4,3,2,1,0} parameter(1) %convolution = bf16[16,16,8,3,256]{4,3,2,1,0} convolution(bf16[32,32,8,7,128]{4,3,2,1,0} %lhs, bf16[3,3,3,128,256]{4,3,2,1,0} %rhs), window={size=3x3x3 stride=2x2x2 pad=1_1x1_1x0_0}, dim_labels=01b2f_012io->01b2f ROOT %copy = bf16[16,16,8,3,256]{4,3,2,1,0} copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "convolution"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,4,1,1,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, TransposeForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %transpose { %param = f32[7,11,13]{2,1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3 metadata={op_name="a"}} %transpose = f32[11,13,7]{2,1,0} transpose(%param), dimensions={1,2,0} ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "transpose"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,2,1,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, TransposeBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %transpose { %param = f32[7,11,13]{2,1,0} parameter(0) %copy = f32[7,11,13]{2,1,0} copy(%param) ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%copy), dimensions={1,2,0}, sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,2,1,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReshapeForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[1430,1]{1,0} parameter(0), sharding={devices=[2,1]0,1 metadata={op_name="a"}} %reshape = f32[10,11,13]{2,1,0} reshape(%param0) ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reshape"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReshapeBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[2002,1]{1,0} parameter(0) %copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0) ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%copy), sharding={devices=[2,1,1]0,1 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PadForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %pad { %input = f32[11,17]{1,0} parameter(0), sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}} %pad_value = f32[] parameter(1) %pad = f32[27,51]{1,0} pad(%input, %pad_value), padding=2_4_1x1_1_2 ROOT %copy = f32[27,51]{1,0} copy(%pad) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "pad"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PadBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %pad { %input = f32[11,17]{1,0} parameter(0) %copy = f32[11,17]{1,0} copy(%input) %pad_value = f32[] parameter(1) %pad = f32[27,51]{1,0} pad(%copy, %pad_value), padding=2_4_1x1_1_2, sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}} ROOT %result = f32[27,51]{1,0} copy(%pad) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialReplicatedPadForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %pad { %input = f32[11,17]{1,0} parameter(0), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} %pad_value = f32[] parameter(1) %pad = f32[27,51]{1,0} pad(%input, %pad_value), padding=2_4_1x1_1_2 ROOT %copy = f32[27,51]{1,0} copy(%pad) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "pad"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ShardedPreferredOverReplicated) { const char* const hlo_string = R"( HloModule module ENTRY %replicated { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={replicated metadata={op_name="a"}} %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0) %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="b"}} %copy.1 = f32[5,7,11,13]{3,2,1,0} copy(%param1) %add = f32[5,7,11,13]{3,2,1,0} add(%copy, %copy.1) ROOT %copy.2 = f32[5,7,11,13]{3,2,1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* copy = FindInstruction(module.get(), "copy"); ASSERT_NE(copy, nullptr); EXPECT_THAT(copy, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); auto* copy1 = FindInstruction(module.get(), "copy.1"); ASSERT_NE(copy1, nullptr); EXPECT_THAT(copy1, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); auto* add = FindInstruction(module.get(), "add"); ASSERT_NE(add, nullptr); EXPECT_THAT(add, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); for (const HloSharding& sharding : {copy->sharding(), copy1->sharding(), add->sharding()}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(sharding, ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialReplicateReshapeForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[1430,1]{1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %reshape = f32[10,11,13]{2,1,0} reshape(%param0) ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reshape"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialReplicateReshapeBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[2002,1]{1,0} parameter(0) %copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0) ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%copy), sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DontShardTuplesIfAllInputIsMaximal) { const char* const hlo_string = R"( HloModule module ENTRY %tuple { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={maximal device=0 metadata={op_name="a"}} %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1), sharding={maximal device=1 metadata={op_name="b"}} %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple( %param0, %param1) ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) copy(%tuple) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_EQ(changed, !GetParam().propagate_metadata && !GetParam().clear_metadata); auto* instruction = FindInstruction(module.get(), "tuple"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::NoSharding()); } TEST_P(ParameterizedMetadataTest, ValidConvolution) { const char* const hlo_string = R"( HloModule module ENTRY conv { %lhs = f32[13,17,19]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1 metadata={op_name="a"}} %rhs = f32[19,5,19]{2,1,0} parameter(1) %conv = f32[13,13,19]{2,1,0} convolution(%lhs, %rhs), window={size=5}, dim_labels=b0f_i0o->b0f ROOT %tuple = (f32[13,13,19]{2,1,0}) tuple(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, StridedSlice) { const char* const hlo_string = R"( HloModule module ENTRY %slice { %param = f32[17,13]{1,0} parameter(0), sharding={devices=[2,1]0,1 metadata={op_name="a"}} %slice = f32[7,5]{1,0} slice(%param), slice={[1:15:2], [5:10:1]} ROOT %tuple = (f32[7,5]{1,0}) tuple(%slice) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "slice"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialReplicatedStridedSlice) { const char* const hlo_string = R"( HloModule module ENTRY %slice { %param = f32[17,13]{1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %slice = f32[7,5]{1,0} slice(%param), slice={[1:15:2], [5:10:1]} ROOT %tuple = (f32[7,5]{1,0}) tuple(%slice) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "slice"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReduceWindowBackwardPass) { const char* const hlo_string = R"( HloModule module %add (lhs: f32[], rhs: f32[]) -> f32[] { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce_window { %param = f32[13,17]{1,0} parameter(0) %param.copy = f32[13,17]{1,0} copy(%param) %init = f32[] parameter(1) ROOT %reduce-window = f32[7,17]{1,0} reduce-window(%param.copy, %init), window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add, sharding={devices=[2,1]0,1 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* param_copy = FindInstruction(module.get(), "param.copy"); ASSERT_NE(param_copy, nullptr); EXPECT_THAT(param_copy, op::Sharding("{devices=[2,1]0,1}")); auto* reduce_window = FindInstruction(module.get(), "reduce-window"); ASSERT_NE(reduce_window, nullptr); EXPECT_THAT(reduce_window, op::Sharding("{devices=[2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(param_copy->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(param_copy->sharding(), ShardingMetadata({})); EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, VariadicReduceWindowBackwardPass) { const char* const hlo_string = R"( HloModule module %add (a: f32[], b: s32[], c: f32[], d: s32[]) -> (f32[], s32[]) { %a = f32[] parameter(0) %b = s32[] parameter(1) %c = f32[] parameter(2) %d = s32[] parameter(3) %add.0 = f32[] add(%a, %c) %add.1 = s32[] add(%b, %d) ROOT %t = tuple(%add.0, %add.1) } ENTRY %reduce_window { %param.0 = f32[13,17]{1,0} parameter(0) %param.0.copy = f32[13,17]{1,0} copy(%param.0) %param.1 = s32[13,17]{1,0} parameter(1) %param.1.copy = s32[13,17]{1,0} copy(%param.1) %init.0 = f32[] parameter(2) %init.1 = s32[] parameter(3) ROOT %reduce-window = (f32[7,17]{1,0}, s32[7,17]{1,0}) reduce-window(%param.0.copy, %param.1.copy, %init.0, %init.1), window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add, sharding={{devices=[2,1]0,1 metadata={op_name="a"}}, {devices=[2,1]0,1 metadata={op_name="b"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* param_0_copy = FindInstruction(module.get(), "param.0.copy"); ASSERT_NE(param_0_copy, nullptr); EXPECT_THAT(param_0_copy, op::Sharding("{devices=[2,1]0,1}")); auto* param_1_copy = FindInstruction(module.get(), "param.1.copy"); ASSERT_NE(param_1_copy, nullptr); EXPECT_THAT(param_1_copy, op::Sharding("{devices=[2,1]0,1}")); auto* reduce_window = FindInstruction(module.get(), "reduce-window"); ASSERT_NE(reduce_window, nullptr); EXPECT_THAT(reduce_window, op::Sharding("{{devices=[2,1]0,1}, {devices=[2,1]0,1}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(param_0_copy->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(param_1_copy->sharding(), ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(reduce_window->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(reduce_window->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(param_0_copy->sharding(), ShardingMetadata({})); EXPECT_THAT(param_1_copy->sharding(), ShardingMetadata({})); EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReplicatedConvolutionLhs) { const char* const hlo_string = R"( HloModule module ENTRY conv { %lhs = f32[3,2,3]{2,1,0} parameter(0), sharding={replicated metadata={op_name="a"}} %rhs = f32[2,2,1]{2,1,0} parameter(1) %conv = f32[3,2,3]{2,1,0} convolution(%lhs, %rhs), window={size=1}, dim_labels=bf0_oi0->bf0 ROOT %tuple = (f32[3,2,3]{2,1,0}) tuple(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* lhs = FindInstruction(module.get(), "lhs"); ASSERT_NE(lhs, nullptr); EXPECT_THAT(lhs, op::Sharding("{replicated}")); auto* conv = FindInstruction(module.get(), "conv"); ASSERT_NE(conv, nullptr); EXPECT_THAT(conv, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(lhs->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(conv->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(lhs->sharding(), ShardingMetadata({})); EXPECT_THAT(conv->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConvolutionShardedFeature) { const char* const hlo_string = R"( HloModule module ENTRY conv { %lhs = f32[3,2,3]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1 metadata={op_name="a"}} %rhs = f32[2,2,1]{2,1,0} parameter(1) %conv = f32[3,2,3]{2,1,0} convolution(%lhs, %rhs), window={size=1}, dim_labels=bf0_oi0->bf0 ROOT %tuple = (f32[3,2,3]{2,1,0}) tuple(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConvolutionDifferentDimensionNumbers) { const char* const hlo_string = R"( HloModule module ENTRY conv { %lhs = f32[8,16,512] parameter(0), sharding={devices=[1,2,1]0,1 metadata={op_name="a"}} %rhs = f32[8,2,512] parameter(1) %conv = f32[3,512,512] convolution(%lhs, %rhs), window={size=2 stride=5}, dim_labels=f0b_i0o->0bf ROOT %tuple = (f32[3,512,512]) tuple(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, Concatenate) { const char* const hlo_string = R"( HloModule module ENTRY %concat { %param.0 = f32[5,7] parameter(0), sharding={devices=[2,1]0,1 metadata={op_name="a"}} %param.1 = f32[5,9] parameter(1), sharding={devices=[2,1]0,1 metadata={op_name="b"}} %concat = f32[5,16] concatenate(%param.0, %param.1), dimensions={1} ROOT %tuple = (f32[5,16]) tuple(%concat) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "concat"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, TupleBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %tuple { %param.0 = f32[1] parameter(0) %param.1 = f32[3] parameter(1) %copy.0 = f32[1] copy(%param.0) %copy.1 = f32[3] copy(%param.1) ROOT %tuple = (f32[1], f32[3]) tuple(%copy.0, %copy.1), sharding={{replicated metadata={op_name="a"}}, {devices=[2]0,1 metadata={op_name="b"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* copy0 = FindInstruction(module.get(), "copy.0"); ASSERT_NE(copy0, nullptr); EXPECT_THAT(copy0, op::Sharding("{replicated}")); auto* copy1 = FindInstruction(module.get(), "copy.1"); ASSERT_NE(copy1, nullptr); EXPECT_THAT(copy1, op::Sharding("{devices=[2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(copy0->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(copy1->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(copy0->sharding(), ShardingMetadata({})); EXPECT_THAT(copy1->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, AllReduce) { const char* const hlo_string = R"( HloModule module %add (lhs: f32[], rhs: f32[]) -> f32[] { %add_lhs = f32[] parameter(0) %add_rhs = f32[] parameter(1) ROOT %add = f32[] add(f32[] %add_lhs, f32[] %add_rhs) } ENTRY %entry { %param.0 = f32[3] parameter(0) %param.1 = f32[3] parameter(1) %copy_f_t = f32[3] copy(%param.1), sharding={devices=[2]0,1 metadata={op_name="a"}} %crs_f.tiled = f32[3] all-reduce(%copy_f_t), to_apply=%add %crs_f.none = f32[3] all-reduce(%copy_f_t), to_apply=%add, channel_id=1 %crs_b.replicated = f32[3] all-reduce(%param.0), to_apply=%add %copy_b_r = f32[3] copy(%crs_b.replicated), sharding={replicated metadata={op_name="b"}} ROOT %tuple = (f32[3], f32[3], f32[3]) tuple( %crs_f.tiled, crs_f.none, %copy_b_r) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* crs_f_tiled = FindInstruction(module.get(), "crs_f.tiled"); ASSERT_NE(crs_f_tiled, nullptr); EXPECT_THAT(crs_f_tiled, op::Sharding("{devices=[2]0,1}")); auto* crs_f_none = FindInstruction(module.get(), "crs_f.none"); ASSERT_NE(crs_f_none, nullptr); EXPECT_THAT(crs_f_none, op::NoSharding()); auto* crs_b_replicated = FindInstruction(module.get(), "crs_b.replicated"); ASSERT_NE(crs_b_replicated, nullptr); EXPECT_THAT(crs_b_replicated, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(crs_f_tiled->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(crs_b_replicated->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(crs_f_tiled->sharding(), ShardingMetadata({})); EXPECT_THAT(crs_b_replicated->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, While) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[10,10]) parameter(0) %count.cond = u32[] get-tuple-element((u32[], f32[10,10]) %vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT } %body { %vars = (u32[], f32[10,10]) parameter(0) %count = u32[] get-tuple-element(%vars), index=0 %acc = f32[10,10] get-tuple-element((u32[], f32[10,10]) %vars), index=1 %one = u32[] constant(1) %count.1 = u32[] add(u32[] %count, u32[] %one), sharding={replicated} %acc.1 = f32[10,10] add(f32[10,10] %acc, f32[10,10] %acc) ROOT %tuple = (u32[], f32[10,10]) tuple(u32[] %count.1, f32[10,10] %acc.1) } ENTRY %entry { %p0 = f32[10,10] parameter(0) %p0.copy = f32[10,10] copy(f32[10,10] %p0) %p1 = f32[10,10] parameter(1) %zero = u32[] constant(0) %init = (u32[], f32[10,10]) tuple(u32[] %zero, f32[10,10] %p0.copy) %while = (u32[], f32[10,10]) while((u32[], f32[10,10]) %init), body=%body, condition=%cond %res = f32[10,10] get-tuple-element((u32[], f32[10,10]) %while), index=1 %prev = f32[10,10] get-tuple-element((u32[], f32[10,10]) %init), index=1 %res.1 = f32[10,10] multiply(f32[10,10] %res, %prev) ROOT %res_tuple = (f32[10,10]) tuple(f32[10,10] %res.1) })"; auto while_is_sharded = [this](HloModule* module, const HloSharding& sharding, absl::Span<const absl::Span<const OpMetadata>> sharding_metadata) { if (GetParam().clear_metadata) { ClearMetadata(module); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module)); EXPECT_TRUE(changed); auto while_instr = FindInstruction(module, "while"); EXPECT_NE(nullptr, while_instr); std::vector<const HloInstruction*> instructions{ while_instr, while_instr->while_body()->root_instruction(), while_instr->while_body()->parameter_instruction(0), while_instr->while_condition()->parameter_instruction(0)}; for (auto instr : instructions) { ASSERT_TRUE(instr->has_sharding()); EXPECT_EQ(sharding, instr->sharding()); ASSERT_EQ(instr->sharding().tuple_elements().size(), sharding_metadata.size()); for (int i = 0, e = sharding_metadata.size(); i < e; ++i) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instr->sharding().tuple_elements()[i], ShardingMetadata(sharding_metadata[i])); } else { EXPECT_THAT(instr->sharding().tuple_elements()[i], ShardingMetadata({})); } } } }; { // Propagation of user-defined partial sharding of while-related instruction // (body root in this test). TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto body_root = FindInstruction(module.get(), "tuple"); EXPECT_NE(nullptr, body_root); auto sharding = ParseSharding( "{{replicated metadata={op_name=\"b\"}}, " "{devices=[2,1]0,1 metadata={op_name=\"c\"}}}") .ConsumeValueOrDie(); body_root->set_sharding(sharding); while_is_sharded(module.get(), sharding.WithoutMetadata(), {{CreateMetadata("b")}, {CreateMetadata("c")}}); } { // Propagation from acc.1 to the rest of the loop. TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto acc_1 = FindInstruction(module.get(), "acc.1"); EXPECT_NE(nullptr, acc_1); acc_1->set_sharding( ParseSharding("{devices=[2,1]0,1 metadata={op_name=\"b\"}}") .ConsumeValueOrDie()); while_is_sharded( module.get(), ParseSharding("{{replicated}, {devices=[2,1]0,1}}").ConsumeValueOrDie(), {{}, {CreateMetadata("b")}}); } { // Merge partial sharding from operand and body. TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto acc_1 = FindInstruction(module.get(), "acc.1"); EXPECT_NE(nullptr, acc_1); acc_1->set_sharding( ParseSharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate " "metadata={op_name=\"b\"}}") .ConsumeValueOrDie()); auto p0 = FindInstruction(module.get(), "p0"); p0->set_sharding( ParseSharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate " "metadata={op_name=\"c\"}}") .ConsumeValueOrDie()); while_is_sharded(module.get(), ParseSharding("{{replicated}, " "{devices=[2,2]0,1,2,3}}") .ConsumeValueOrDie(), {{}, {CreateMetadata("c"), CreateMetadata("b")}}); } } TEST_P(ParameterizedMetadataTest, WhileGetShardingFromRecvInBody) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], f32[]) parameter(0) %count = u32[] get-tuple-element(%param), index=0 %after-all = token[] after-all() %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1, sharding={maximal device=1 metadata={op_name="a"}} %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1 %data = f32[] get-tuple-element(%recv-done), index=0 ROOT %tuple = (u32[], f32[]) tuple(%count, %data) } ENTRY %entry { %p0 = f32[] parameter(0) %zero = u32[] constant(0) %init = (u32[], f32[]) tuple(%zero, %p0) %while = (u32[], f32[]) while(%init), body=%body, condition=%cond ROOT %result = f32[] get-tuple-element(%while), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); // The change happens before the fixpt loop EXPECT_EQ(changed, !GetParam().propagate_metadata && !GetParam().clear_metadata); auto sharding = ParseSharding("{{maximal device=1}, {maximal device=1}}") .ConsumeValueOrDie(); auto while_instr = FindInstruction(module.get(), "while"); ASSERT_NE(nullptr, while_instr); std::vector<const HloInstruction*> instructions{ while_instr, while_instr->while_body()->root_instruction(), while_instr->while_body()->parameter_instruction(0), while_instr->while_condition()->parameter_instruction(0)}; for (auto instr : instructions) { ASSERT_TRUE(instr->has_sharding()); EXPECT_EQ(sharding, instr->sharding()); for (const HloSharding& sub_sharding : instr->sharding().tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, WhileConflictingShardingInBodyBeforeRecv) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], f32[]) parameter(0) %count = u32[] get-tuple-element(%param), index=0, sharding={maximal device=0 metadata={op_name="a"}} %after-all = token[] after-all() %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1, sharding={maximal device=1 metadata={op_name="b"}} %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1 %data = f32[] get-tuple-element(%recv-done), index=0 ROOT %tuple = (u32[], f32[]) tuple(%count, %data) } ENTRY %entry { %p0 = f32[] parameter(0) %zero = u32[] constant(0) %init = (u32[], f32[]) tuple(%zero, %p0) %while = (u32[], f32[]) while(%init), body=%body, condition=%cond ROOT %result = f32[] get-tuple-element(%while), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } auto result = ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get()); EXPECT_THAT(result.status().error_message(), ::testing::HasSubstr( "Instruction: count is on device: 0, which conflicts with " "device: 1 of channel instruction: recv")); } TEST_P(ParameterizedMetadataTest, WhileConflictingShardingInBodyAfterRecv) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], f32[]) parameter(0) %count = u32[] get-tuple-element(%param), index=0 %after-all = token[] after-all() %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1, sharding={maximal device=1 metadata={op_name="a"}} %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1 %data = f32[] get-tuple-element(%recv-done), index=0, sharding={maximal device=0 metadata={op_name="b"}} ROOT %tuple = (u32[], f32[]) tuple(%count, %data) } ENTRY %entry { %p0 = f32[] parameter(0) %zero = u32[] constant(0) %init = (u32[], f32[]) tuple(%zero, %p0) %while = (u32[], f32[]) while(%init), body=%body, condition=%cond ROOT %result = f32[] get-tuple-element(%while), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } auto result = ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get()); EXPECT_THAT(result.status().error_message(), ::testing::HasSubstr( "Instruction: data is on device: 0, which conflicts with " "device: 1 of channel instruction: recv")); } TEST_P(ParameterizedMetadataTest, WhileConflictingShardingOnWhileInstruction) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], f32[]) parameter(0) %count = u32[] get-tuple-element(%param), index=0 %after-all = token[] after-all() %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1, sharding={maximal device=1 metadata={op_name="a"}} %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1 %data = f32[] get-tuple-element(%recv-done), index=0 ROOT %tuple = (u32[], f32[]) tuple(%count, %data) } ENTRY %entry { %p0 = f32[] parameter(0) %zero = u32[] constant(0) %init = (u32[], f32[]) tuple(%zero, %p0) %while = (u32[], f32[]) while(%init), body=%body, condition=%cond, sharding={maximal device=0 metadata={op_name="b"}} ROOT %result = f32[] get-tuple-element(%while), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } auto result = ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get()); EXPECT_THAT(result.status().error_message(), ::testing::HasSubstr( "Instruction: while is on device: 0, which conflicts with " "device: 1 of channel instruction: recv")); } TEST_P(ParameterizedMetadataTest, WhileConv) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], bf16[2048, 768], bf16[128,512,2048], bf16[128,512,768]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(2) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], bf16[2048, 768], bf16[128,512,2048], bf16[128,512,768]) parameter(0) %count = u32[] get-tuple-element(%param), index=0 %kernel = bf16[2048, 768]{1,0} get-tuple-element(%param), index=1 %lhs = bf16[128,512,2048]{2,1,0} get-tuple-element(%param), index=2, sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15} %reshape = bf16[2048,768,1]{2,1,0} reshape(bf16[2048,768]{1,0} %kernel) %convolution = bf16[128,512,768]{2,1,0} convolution(bf16[128,512,2048]{2,1,0} %lhs, bf16[2048,768,1]{2,1,0} %reshape), window={size=1}, dim_labels=0bf_io0->0bf, sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15} ROOT %tuple = (u32[], bf16[2048, 768], bf16[128,512,2048], bf16[128,512,768]) tuple(%count, %kernel, %lhs, %convolution) } ENTRY %entry { %p0 = bf16[2048,768] parameter(0), sharding={devices=[2,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate} %p1 = bf16[128,512,2048] parameter(1), sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15} %p2 = bf16[128,512,768] parameter(2) %zero = u32[] constant(0) %init = (u32[], bf16[2048, 768], bf16[128,512,2048], bf16[128,512,768]) tuple(%zero, %p0, %p1, %p2) %while = (u32[], bf16[2048, 768], bf16[128,512,2048], bf16[128,512,768]) while(%init), body=%body, condition=%cond ROOT %result = bf16[128,512,768] get-tuple-element(%while), index=3, sharding={replicated} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* kernel = FindInstruction(module.get(), "kernel"); ASSERT_NE(kernel, nullptr); EXPECT_THAT(kernel, op::Sharding("{devices=[2,1,8]0,2,4,6,8,10,12,14,1,3,5," "7,9,11,13,15 last_tile_dim_replicate}")); } TEST_P(ParameterizedMetadataTest, Dot) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %param.0 = f32[8,256,128] parameter(0) %param.1 = f32[8,128,512] parameter(1) %param.2 = f32[8,128] parameter(2) %p0_copy_0 = f32[8,256,128] copy(%param.0), sharding={devices=[1,4,1]0,1,2,3 metadata={op_name="a"}} %p1_copy_0 = f32[8,128,512] copy(%param.1), sharding={devices=[1,1,4]0,1,2,3 metadata={op_name="b"}} %p2_copy = f32[8,128] copy(%param.2) %dot_prop_rhs = f32[8,256,512] dot(%p0_copy_0, %p1_copy_0), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} %dot_prop_lhs = f32[8,512,256] dot(%p1_copy_0, %p0_copy_0), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={2} %dot_mat_vec = f32[8,256] dot(%p0_copy_0, %p2_copy), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} %p0_copy_1 = f32[8,256,128] copy(%param.0) %p1_copy_1 = f32[8,128,512] copy(%param.1) %dot_back_prop_rhs = f32[8,256,512] dot(%p0_copy_1, %p1_copy_1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} %copy_back_prop_rhs = f32[8,256,512] copy(%dot_back_prop_rhs), sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="c"}} ROOT %tuple = (f32[8,512,256], f32[8,256,512], f32[8,256], f32[8,256,512]) tuple(%dot_prop_lhs, %dot_prop_rhs, %dot_mat_vec, %copy_back_prop_rhs) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* dot_prop_rhs = FindInstruction(module.get(), "dot_prop_rhs"); ASSERT_NE(dot_prop_rhs, nullptr); EXPECT_THAT(dot_prop_rhs, op::Sharding("{devices=[1,1,4]0,1,2,3}")); auto* dot_prop_lhs = FindInstruction(module.get(), "dot_prop_lhs"); ASSERT_NE(dot_prop_lhs, nullptr); EXPECT_THAT(dot_prop_lhs, op::Sharding("{devices=[1,4,1]0,1,2,3}")); auto* dot_mat_vec = FindInstruction(module.get(), "dot_mat_vec"); ASSERT_NE(dot_mat_vec, nullptr); EXPECT_THAT(dot_mat_vec, op::Sharding("{devices=[1,4]0,1,2,3}")); auto* p0_copy_1 = FindInstruction(module.get(), "p0_copy_1"); ASSERT_NE(p0_copy_1, nullptr); EXPECT_THAT( p0_copy_1, op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}")); auto* p1_copy_1 = FindInstruction(module.get(), "p1_copy_1"); ASSERT_NE(p1_copy_1, nullptr); EXPECT_THAT( p1_copy_1, op::Sharding("{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}")); auto* dot_back_prop_rhs = FindInstruction(module.get(), "dot_back_prop_rhs"); ASSERT_NE(dot_back_prop_rhs, nullptr); EXPECT_THAT(dot_back_prop_rhs, op::Sharding("{devices=[1,2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(dot_prop_rhs->sharding(), ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(dot_prop_lhs->sharding(), ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(dot_mat_vec->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(p0_copy_1->sharding(), ShardingMetadata({CreateMetadata("c")})); EXPECT_THAT(p1_copy_1->sharding(), ShardingMetadata({CreateMetadata("c")})); EXPECT_THAT(dot_back_prop_rhs->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { for (HloInstruction* instruction : {dot_prop_rhs, dot_prop_lhs, dot_mat_vec, p0_copy_1, p1_copy_1, dot_back_prop_rhs}) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, DotTiledBatchDim) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,256,512] parameter(0) %p1 = f32[8,512,128] parameter(1) %add = f32[8,256,512] add(%p0, %p0) %dot = f32[8,256,128] dot(%add, %p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} %res = f32[8,32768] reshape(%dot), sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}} ROOT %tuple = (f32[8,32768]) tuple(%res) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DotMergeOperands) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} %p1 = f32[8,128,512] parameter(1), sharding={devices=[2,2,1,2]0,2,1,3,4,6,5,7 last_tile_dim_replicate metadata={op_name="b"}} %dot = f32[8,256,128] dot(%p0, %p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} ROOT %copy = f32[8,256,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "dot"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DotMergeOperands2) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %p1 = f32[8,128,512] parameter(1), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="b"}} %dot = f32[8,256,128] dot(%p0, %p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} ROOT %copy = f32[8,256,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "dot"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DotMergeOperands3) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[256,512] parameter(0), sharding={devices=[2,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %p1 = f32[128,512] parameter(1), sharding={devices=[4,2]0,4,2,6,3,7,1,5 metadata={op_name="b"}} %dot = f32[256,128] dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} ROOT %copy = f32[256,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "dot"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,4]0,2,3,1,4,6,7,5}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, BackwardDotFromContracting) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %p1 = f32[8,128,512] parameter(1) %copy1 = f32[8,128,512] copy(%p1) %dot = f32[8,256,128] dot(%p0, %copy1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2}, sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="b"}} ROOT %copy = f32[8,256,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy1"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a"), CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, BackwardDotFromContractingWithManual) { const char* const hlo_string = R"( HloModule module ENTRY %dot { %p0 = f32[8,512] parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} %p1 = f32[512,128] parameter(1) %copy1 = f32[512,128] copy(%p1) %dot = f32[8,128] dot(%p0, %copy1), lhs_batch_dims={}, rhs_batch_dims={}, lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dims={replicated, manual} metadata={op_name="b"}} ROOT %copy = f32[8,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy1"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConvAsDotOnTrivialDims) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %lhs = f32[128,1,1,1001] parameter(0), sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}} %rhs = f32[1,1,1024,1001] parameter(1), sharding={devices=[1,2,1,1]0,1 metadata={op_name="b"}} %convolution = f32[128,1,1,1024] convolution(%lhs, %rhs), window={size=1x1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f ROOT %copy = f32[128,1,1,1024] copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "convolution"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConvAsDotOnTrivialDimsBackward) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[128,5,5,128] parameter(0) %lhs = f32[128,5,5,128] copy(%p0) %p1 = f32[5,5,128,768] parameter(1) %rhs = f32[5,5,128,768] copy(%p1) %convolution = f32[128,1,1,768] convolution(%lhs, %rhs), window={size=5x5}, dim_labels=b01f_01io->b01f, sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}} ROOT %copy = f32[128,1,1,768] copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* lhs = FindInstruction(module.get(), "lhs"); ASSERT_NE(lhs, nullptr); auto* rhs = FindInstruction(module.get(), "rhs"); ASSERT_NE(rhs, nullptr); for (HloInstruction* instruction : {lhs, rhs}) { EXPECT_THAT(instruction, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ConvolutionFilterIFOFPartitionedInputPartialReplicate) { const char* const hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[128,112,112,12] parameter(0) %lhs.copy = f32[128,112,112,12] copy(f32[128,112,112,12] %lhs), sharding={devices=[1,1,1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %rhs = f32[7,7,12,64] parameter(1) %rhs.copy = f32[7,7,12,64] copy(f32[7,7,12,64] %rhs), sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="b"}} %conv = f32[128,56,56,64] convolution( f32[128,112,112,12] %lhs.copy, f32[7,7,12,64] %rhs.copy), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f ROOT %copy = f32[128,56,56,64] copy(conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConcatFromUserUnshardedDim) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,128] parameter(0) %p1 = f32[8,128] parameter(1) %c0 = f32[8,128] copy(%p0) %c1 = f32[8,128] copy(%p1) %concat = f32[16,128] concatenate(%c0, %c1), dimensions={0}, sharding={devices=[1,2]0,1 metadata={op_name="a"}} ROOT %tuple = (f32[16,128]) tuple(%concat) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* c0 = FindInstruction(module.get(), "c0"); ASSERT_NE(c0, nullptr); auto* c1 = FindInstruction(module.get(), "c1"); ASSERT_NE(c1, nullptr); for (HloInstruction* instruction : {c0, c1}) { EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ConcatFromUserShardedDim) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,128] parameter(0) %p1 = f32[8,128] parameter(1) %c0 = f32[8,128] copy(%p0) %c1 = f32[8,128] copy(%p1) %concat = f32[16,128] concatenate(%c0, %c1), dimensions={0}, sharding={devices=[3,1]0,1,2 metadata={op_name="a"}} ROOT %tuple = (f32[16,128]) tuple(%concat) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* c0 = FindInstruction(module.get(), "c0"); EXPECT_THAT(c0, op::Sharding("{devices=[2,1]0,1}")); ASSERT_NE(c0, nullptr); auto* c1 = FindInstruction(module.get(), "c1"); ASSERT_NE(c1, nullptr); EXPECT_THAT(c1, op::Sharding("{devices=[2,1]1,2}")); for (HloInstruction* instruction : {c0, c1}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ConcatFromUserShardedDimMaximalOperand) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,128] parameter(0) %p1 = f32[24,128] parameter(1) %c0 = f32[8,128] copy(%p0) %c1 = f32[24,128] copy(%p1) %concat = f32[32,128] concatenate(%c0, %c1), dimensions={0}, sharding={devices=[4,1]0,1,2,3 metadata={op_name="a"}} ROOT %tuple = (f32[32,128]) tuple(%concat) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* c0 = FindInstruction(module.get(), "c0"); ASSERT_NE(c0, nullptr); EXPECT_THAT(c0, op::NoSharding()); auto* c1 = FindInstruction(module.get(), "c1"); ASSERT_NE(c1, nullptr); EXPECT_THAT(c1, op::Sharding("{devices=[3,1]1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(c1->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(c1->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReplicatedToSideEffecting) { const char* const hlo_string = R"( HloModule module ENTRY entry_computation { %const.0 = s32[] constant(0), sharding={replicated metadata={op_name="a"}} %const.1 = s32[] constant(2147483647), sharding={replicated metadata={op_name="b"}} %rng = s32[4]{0} rng(%const.0, %const.1), distribution=rng_uniform ROOT %root = (s32[4]{0}) tuple(%rng) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_EQ(changed, !GetParam().propagate_metadata && !GetParam().clear_metadata); auto* instruction = FindInstruction(module.get(), "rng"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::NoSharding()); } TEST_P(ParameterizedMetadataTest, PartReplicatedTupleUser) { const char* const hlo_string = R"( HloModule module ENTRY entry_computation { %param.0 = f32[5] parameter(0) %param.1 = f32[7] parameter(1) %param.2 = f32[9] parameter(2) %tuple.0 = (f32[5], f32[7]) tuple(%param.0, %param.1) ROOT %tuple.1 = ((f32[5], f32[7]), f32[9]) tuple(%tuple.0, %param.2), sharding={{maximal device=0 metadata={op_name="a"}}, {replicated metadata={op_name="b"}}, {maximal device=1 metadata={op_name="c"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "tuple.0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{maximal device=0}, {replicated}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(instruction->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); } else { for (const HloSharding& sub_sharding : instruction->sharding().tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, Conditional) { const char* const hlo_string = R"( HloModule module %true_comp { %tp = (f32[3,5]) parameter(0) %tgte = f32[3,5] get-tuple-element(%tp), index=0 %ttr = f32[5,3] transpose(%tgte), dimensions={1,0} ROOT %tr = (f32[5,3]) tuple(%ttr) } %false_comp { %fp = (f32[5,3]) parameter(0) %fgte = f32[5,3] get-tuple-element(%fp), index=0 ROOT %fr = (f32[5,3]) tuple(%fgte) } ENTRY entry { %cond = pred[] parameter(0) %true_param = (f32[3,5]) parameter(1), sharding={{devices=[1,2]0,1 metadata={op_name="a"}}} %false_param = (f32[5,3]) parameter(2), sharding={{devices=[1,3]0,1,2 metadata={op_name="b"}}} %conditional = (f32[5,3]) conditional( %cond, %true_param, %false_param), true_computation=%true_comp, false_computation=%false_comp ROOT %root = f32[5,3] get-tuple-element(%conditional), index=0 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* tp = FindInstruction(module.get(), "tp"); ASSERT_NE(tp, nullptr); EXPECT_THAT(tp, op::Sharding("{{devices=[1,2]0,1}}")); auto* tgte = FindInstruction(module.get(), "tgte"); ASSERT_NE(tgte, nullptr); EXPECT_THAT(tgte, op::Sharding("{devices=[1,2]0,1}")); auto* ttr = FindInstruction(module.get(), "ttr"); ASSERT_NE(ttr, nullptr); EXPECT_THAT(ttr, op::Sharding("{devices=[2,1]0,1}")); auto* tr = FindInstruction(module.get(), "tr"); ASSERT_NE(tr, nullptr); EXPECT_THAT(tr, op::Sharding("{{devices=[1,3]0,1,2}}")); auto* fp = FindInstruction(module.get(), "fp"); ASSERT_NE(fp, nullptr); EXPECT_THAT(fp, op::Sharding("{{devices=[1,3]0,1,2}}")); auto* fgte = FindInstruction(module.get(), "fgte"); ASSERT_NE(fgte, nullptr); EXPECT_THAT(fgte, op::Sharding("{devices=[1,3]0,1,2}")); auto* fr = FindInstruction(module.get(), "fr"); ASSERT_NE(fr, nullptr); EXPECT_THAT(fr, op::Sharding("{{devices=[1,3]0,1,2}}")); auto* conditional = FindInstruction(module.get(), "conditional"); ASSERT_NE(conditional, nullptr); EXPECT_THAT(conditional, op::Sharding("{{devices=[1,3]0,1,2}}")); auto check_metadata = [](const HloSharding& sharding, const OpMetadata& metadata) { if (sharding.IsTuple()) { EXPECT_THAT(sharding.tuple_elements()[0], ShardingMetadata({metadata})); } else { EXPECT_THAT(sharding, ShardingMetadata({metadata})); } }; auto check_empty_metadata = [](const HloSharding& sharding) { if (sharding.IsTuple()) { EXPECT_THAT(sharding.tuple_elements()[0], ShardingMetadata({})); } else { EXPECT_THAT(sharding, ShardingMetadata({})); } }; for (HloInstruction* instruction : {tp, tgte, ttr}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { check_metadata(instruction->sharding(), CreateMetadata("a")); } else { check_empty_metadata(instruction->sharding()); } } for (HloInstruction* instruction : {tr, fp, fgte, fr, conditional}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { check_metadata(instruction->sharding(), CreateMetadata("b")); } else { check_empty_metadata(instruction->sharding()); } } } TEST_P(ParameterizedMetadataTest, TupleFromUser) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[13] parameter(0) %p1 = f32[15] parameter(1) %p2 = f32[17] parameter(2) %t0 = (f32[13], f32[15]) tuple(%p0, %p1) %t1 = ((f32[13], f32[15]), f32[17]) tuple(%t0, %p2) %gte.0 = (f32[13], f32[15]) get-tuple-element(%t1), index=0 %gte.1 = f32[13] get-tuple-element(%gte.0), index=0 %gte.2 = f32[15] get-tuple-element(%gte.0), index=1 %gte.3 = f32[17] get-tuple-element(%t1), index=1 ROOT %t2 = (f32[13], f32[15], f32[17]) tuple(%gte.1, %gte.2, %gte.3), sharding={{replicated metadata={op_name="a"}}, {devices=[2]0,1 metadata={op_name="b"}}, {devices=[3]1,2,3 metadata={op_name="c"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* t0 = FindInstruction(module.get(), "t0"); ASSERT_NE(t0, nullptr); EXPECT_THAT(t0, op::Sharding("{{replicated}, {devices=[2]0,1}}")); auto* t1 = FindInstruction(module.get(), "t1"); ASSERT_NE(t1, nullptr); EXPECT_THAT( t1, op::Sharding("{{replicated}, {devices=[2]0,1}, {devices=[3]1,2,3}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(t0->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(t0->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(t1->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(t1->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(t1->sharding().tuple_elements()[2], ShardingMetadata({CreateMetadata("c")})); } else { for (HloInstruction* instruction : {t0, t1}) { for (const HloSharding& sub_sharding : instruction->sharding().tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } } TEST_P(ParameterizedMetadataTest, DynamicSliceForwardPass) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0), sharding={devices=[1,1,2]0,1 metadata={op_name="a"}} %p1 = s32[] parameter(1) %i0 = s32[] constant(0) %ds = f32[11,1,15] dynamic-slice(%c0, %i0, %p1, %i0), dynamic_slice_sizes={11,1,15} ROOT %root = (f32[11,1,15]) tuple(%ds) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "ds"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DynamicSliceBackwardPass) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0) %p1 = s32[] parameter(1) %i0 = s32[] constant(0) %ds = f32[11,1,15] dynamic-slice(%c0, %i0, %p1, %i0), dynamic_slice_sizes={11,1,15}, sharding={devices=[1,1,2]0,1 metadata={op_name="a"}} ROOT %root = (f32[11,1,15]) tuple(%ds) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "c0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassBase) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0), sharding={devices=[1,1,2]0,1 metadata={op_name="a"}} %p1 = f32[11,1,15] parameter(1) %c1 = f32[11,1,15] copy(%p1) %p2 = s32[] parameter(2) %i0 = s32[] constant(0) %dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0) ROOT %root = (f32[11,13,15]) tuple(%dus) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* dus = FindInstruction(module.get(), "dus"); ASSERT_NE(dus, nullptr); EXPECT_THAT(dus, op::Sharding("{devices=[1,1,2]0,1}")); auto* c1 = FindInstruction(module.get(), "c1"); ASSERT_NE(c1, nullptr); EXPECT_THAT(c1, op::Sharding("{devices=[1,1,2]0,1}")); for (HloInstruction* instruction : {dus, c1}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassUpdate) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0) %p1 = f32[11,1,15] parameter(1) %c1 = f32[11,1,15] copy(%p1), sharding={devices=[1,1,2]0,1 metadata={op_name="a"}} %p2 = s32[] parameter(2) %i0 = s32[] constant(0) %dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0) ROOT %root = (f32[11,13,15]) tuple(%dus) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* dus = FindInstruction(module.get(), "dus"); ASSERT_NE(dus, nullptr); EXPECT_THAT(dus, op::Sharding("{devices=[1,1,2]0,1}")); auto* c0 = FindInstruction(module.get(), "c0"); ASSERT_NE(c0, nullptr); EXPECT_THAT(c0, op::Sharding("{devices=[1,1,2]0,1}")); for (HloInstruction* instruction : {dus, c0}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceBackwardPass) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0) %p1 = f32[11,1,15] parameter(1) %c1 = f32[11,1,15] copy(%p1) %p2 = s32[] parameter(2) %i0 = s32[] constant(0) %dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0), sharding={devices=[1,1,2]0,1 metadata={op_name="a"}} ROOT %root = (f32[11,13,15]) tuple(%dus) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* c0 = FindInstruction(module.get(), "c0"); ASSERT_NE(c0, nullptr); EXPECT_THAT(c0, op::Sharding("{devices=[1,1,2]0,1}")); auto* c1 = FindInstruction(module.get(), "c1"); ASSERT_NE(c1, nullptr); EXPECT_THAT(c1, op::Sharding("{devices=[1,1,2]0,1}")); for (HloInstruction* instruction : {c0, c1}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTestWithOutput, EinsumLHSBatchPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64] parameter(0) %lhs.copy = f32[32,24,64] copy(%lhs), sharding={devices=[2,1,1]0,1 metadata={op_name="a"}} %rhs = f32[32,39296,64] parameter(1) %rhs.copy = f32[32,39296,64] copy(%rhs) %conv = f32[32,24,39296] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf_0oi->0bf, window={size=32 stride=31 lhs_dilate=32} ROOT %copy = f32[32,24,39296] copy(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata, GetParam().allow_root_sharding_propagation) .Run(module.get())); EXPECT_TRUE(changed); auto* rhs_copy = FindInstruction(module.get(), "rhs.copy"); ASSERT_NE(rhs_copy, nullptr); EXPECT_THAT(rhs_copy, op::Sharding("{devices=[2,1,1]0,1}")); auto* conv = FindInstruction(module.get(), "conv"); ASSERT_NE(conv, nullptr); EXPECT_THAT(conv, op::Sharding("{devices=[2,1,1]0,1}")); for (HloInstruction* instruction : {rhs_copy, conv}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[2,1,1]0,1}")); } } TEST_P(ParameterizedMetadataTest, EinsumOutputBatchPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64] parameter(0) %lhs.copy = f32[32,24,64] copy(%lhs) %rhs = f32[32,39296,64] parameter(1) %rhs.copy = f32[32,39296,64] copy(%rhs) %conv = f32[32,24,39296] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf_0oi->0bf, window={size=32 stride=31 lhs_dilate=32}, sharding={devices=[2,1,1]0,1 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* lhs_copy = FindInstruction(module.get(), "lhs.copy"); ASSERT_NE(lhs_copy, nullptr); EXPECT_THAT(lhs_copy, op::Sharding("{devices=[2,1,1]0,1}")); auto* rhs_copy = FindInstruction(module.get(), "rhs.copy"); ASSERT_NE(rhs_copy, nullptr); EXPECT_THAT(rhs_copy, op::Sharding("{devices=[2,1,1]0,1}")); for (HloInstruction* instruction : {lhs_copy, rhs_copy}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, EinsumLHSNonContractingPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,128] parameter(0) %lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}} %rhs = f32[32,39296,64,1] parameter(1) %rhs.copy = f32[32,39296,64,1] copy(%rhs) %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x1 stride=31x1 lhs_dilate=32x1} ROOT %copy = f32[32,24,39296,128] copy(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, EinsumOutputLHSNonContractingPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,128] parameter(0) %lhs.copy = f32[32,24,64,128] copy(%lhs) %rhs = f32[32,39296,64,1] parameter(1) %rhs.copy = f32[32,39296,64,1] copy(%rhs) ROOT %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x1 stride=31x1 lhs_dilate=32x1}, sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "lhs.copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, EinsumRHSNonContractingPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,1] parameter(0) %lhs.copy = f32[32,24,64,1] copy(%lhs) %rhs = f32[32,39296,64,128] parameter(1) %rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}} %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1} ROOT %copy = f32[32,24,39296,128] copy(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, EinsumOutputRHSNonContractingPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,1] parameter(0) %lhs.copy = f32[32,24,64,1] copy(%lhs) %rhs = f32[32,39296,64,128] parameter(1) %rhs.copy = f32[32,39296,64,128] copy(%rhs) ROOT %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}, sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "rhs.copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, EinsumChooseLargerOperand) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,1] parameter(0) %lhs.copy = f32[32,24,64,1] copy(%lhs), sharding={devices=[1,4,1,1]0,1,2,3 metadata={op_name="a"}} %rhs = f32[32,39296,64,128] parameter(1) %rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="b"}} %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1} ROOT %copy = f32[32,24,39296,128] copy(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, EinsumChooseBatchFirst) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,1] parameter(0) %lhs.copy = f32[32,24,64,1] copy(%lhs), sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}} %rhs = f32[32,39296,64,128] parameter(1) %rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={devices=[2,1,1,1]0,1 metadata={op_name="b"}} %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1} ROOT %copy = f32[32,24,39296,128] copy(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherFromIndex) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[2,3,4] parameter(1), sharding={devices=[1,2,1]0,1 metadata={op_name="b"}} %gather = f32[3,4,9] gather(%input, %indices), offset_dims={2}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,9} ROOT %copy = f32[3,4,9] copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherFromIndex_PartialReplicate) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9} ROOT %copy = f32[3,9] copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherFromDataOperand) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,9] parameter(0), sharding={devices=[1,2]0,1 metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9} ROOT %copy = f32[3,9] copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherFromDataOperand_PartialReplicate) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,9] parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9} ROOT %copy = f32[3,9] copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToIndex) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[3] parameter(1) %indices = s32[3] copy(%p1) ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9}, sharding={devices=[2,1]0,1 metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToIndex_PartialReplicate) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[3] parameter(1) %indices = s32[3] copy(%p1) ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9}, sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToIndex2) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = bf16[2,4819,4] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[2,1000,2] parameter(1) %indices = s32[2,1000,2] copy(%p1) ROOT %gather = bf16[2,1000,4] gather(bf16[2,4819,4] %input, s32[2,1000,2] %indices), offset_dims={2}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4}, sharding={devices=[1,2,1]0,1 metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToIndex2_PartialReplicate) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = bf16[2,4819,4] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[2,1000,2] parameter(1) %indices = s32[2,1000,2] copy(%p1) ROOT %gather = bf16[2,1000,4] gather(bf16[2,4819,4] %input, s32[2,1000,2] %indices), offset_dims={2}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4}, sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToIndex3) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = bf16[2,4819,4] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[2,2,1000] parameter(1) %indices = s32[2,2,1000] copy(%p1) ROOT %gather = bf16[2,1000,4] gather(bf16[2,4819,4] %input, s32[2,2,1000] %indices), offset_dims={2}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1,4}, sharding={devices=[1,2,1]0,1 metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToDataOperand) { const char* hlo_string = R"( HloModule module ENTRY entry { %p0 = f32[2,9] parameter(0) %input = f32[2,9] copy(%p0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9}, sharding={devices=[1,2]0,1 metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToDataOperand_PartialReplicate) { const char* hlo_string = R"( HloModule module ENTRY entry { %p0 = f32[2,9] parameter(0) %input = f32[2,9] copy(%p0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9}, sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DataOperandToScatter) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={devices=[1,2]0,1 metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %updates = f32[3,9] parameter(2), sharding={replicated metadata={op_name="c"}} %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT %copy = f32[2,9] copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DataOperandToScatter_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %updates = f32[3,9] parameter(2), sharding={replicated metadata={op_name="c"}} %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT %copy = f32[2,9] copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %updates = f32[3,9] parameter(2), sharding={devices=[1,2]0,1 metadata={op_name="c"}} %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT %copy = f32[2,9] copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %updates = f32[3,9] parameter(2), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}} %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT %copy = f32[2,9] copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterToDataOperand_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %p0 = f32[2,9] parameter(0) %input = f32[2,9] copy(%p0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} %updates = f32[3,9] parameter(2), sharding={replicated metadata={op_name="b"}} ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterToDataOperand) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %p0 = f32[2,9] parameter(0) %input = f32[2,9] copy(%p0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} %updates = f32[3,9] parameter(2), sharding={replicated metadata={op_name="b"}} ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={devices=[1,2]0,1 metadata={op_name="c"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} %p2 = f32[3,9] parameter(2) %updates = f32[3,9] copy(%p2) ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} %p2 = f32[3,9] parameter(2) %updates = f32[3,9] copy(%p2) ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={devices=[1,2]0,1 metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %indices = s32[3] copy(%p1) %updates = f32[3,9] parameter(2), sharding={devices=[2,1]0,1 metadata={op_name="c"}} ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %indices = s32[3] copy(%p1) %updates = f32[3,9] parameter(2), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}} ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_RankMismatch) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[1,24,24,24,3,3] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[1,24,24,24,5] parameter(1), sharding={replicated metadata={op_name="b"}} %indices = s32[1,24,24,24,5] copy(%p1) %updates = f32[1,24,24,24,3] parameter(2), sharding={devices=[1,2,2,2,1]0,1,2,3,4,5,6,7 metadata={op_name="c"}} %scatter = f32[1,24,24,24,3,3] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={4}, inserted_window_dims={0,1,2,3,4}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, sharding={replicated metadata={op_name="d"}} ROOT %copy = f32[1,24,24,24,3,3] copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,2,1]0,1,2,3,4,5,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={devices=[2]0,1 metadata={op_name="b"}} %p2 = f32[3,9] parameter(2), sharding={replicated metadata={op_name="c"}} %updates = f32[3,9] copy(%p2) ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} %p2 = f32[3,9] parameter(2), sharding={replicated metadata={op_name="c"}} %updates = f32[3,9] copy(%p2) ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingOnElementwise) { const char* const hlo_string = R"( HloModule module ENTRY entry { %p0 = f32[2,9] parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %p1 = f32[2,9] parameter(1), sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="b"}} %lhs = f32[2,9] copy(%p0) %rhs = f32[2,9] copy(%p1) %add = f32[2,9] add(%lhs, %rhs) ROOT %copy = f32[2,9] copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* lhs = FindInstruction(module.get(), "lhs"); ASSERT_NE(lhs, nullptr); EXPECT_THAT(lhs, op::Sharding("{devices=[2,2]0,2,1,3}")); auto* rhs = FindInstruction(module.get(), "rhs"); ASSERT_NE(rhs, nullptr); EXPECT_THAT(rhs, op::Sharding("{devices=[2,2]0,2,1,3}")); auto* add = FindInstruction(module.get(), "add"); ASSERT_NE(add, nullptr); EXPECT_THAT(add, op::Sharding("{devices=[2,2]0,2,1,3}")); for (HloInstruction* instruction : {lhs, rhs, add}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialShardingOnElementwise2) { const char* const hlo_string = R"( HloModule module ENTRY entry { %p0 = f32[2,9] parameter(0), sharding={devices=[1,2,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} %p1 = f32[2,9] parameter(1), sharding={devices=[2,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}} %lhs = f32[2,9] copy(%p0) %rhs = f32[2,9] copy(%p1) %add = f32[2,9] add(%lhs, %rhs) ROOT %copy = f32[2,9] copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* lhs = FindInstruction(module.get(), "lhs"); ASSERT_NE(lhs, nullptr); EXPECT_THAT( lhs, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); auto* rhs = FindInstruction(module.get(), "rhs"); ASSERT_NE(rhs, nullptr); EXPECT_THAT( rhs, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); auto* add = FindInstruction(module.get(), "add"); ASSERT_NE(add, nullptr); EXPECT_THAT( add, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(lhs->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); EXPECT_THAT(rhs->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); EXPECT_THAT(add->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); } else { for (HloInstruction* instruction : {lhs, rhs}) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialShardingTransposeForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %transpose { %param = f32[7,11,13]{2,1,0} parameter(0), sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} %transpose = f32[11,13,7]{2,1,0} transpose(%param), dimensions={1,2,0} ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "transpose"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[1,2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingTransposeBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %transpose { %param = f32[7,11,13]{2,1,0} parameter(0) %copy = f32[7,11,13]{2,1,0} copy(%param) ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%copy), dimensions={1,2,0}, sharding={devices=[1,2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[2,1,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelGatherFromOperandForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelGatherFromIndexForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1, sharding={devices=[1,8,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelGatherBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %copy.p, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}")); auto* copy_p = FindInstruction(module.get(), "copy.p"); ASSERT_NE(copy_p, nullptr); EXPECT_THAT(copy_p, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); for (HloInstruction* instruction : {concatenate, copy_p}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ParallelGatherBackwardPass2) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0) %copy.p = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[4,8,2,2]{3,2,1,0} %copy.p, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[1,4,1,1]0,1,4,5 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT(concatenate, op::Sharding("{devices=[1,1,4]0,1,4,5}")); auto* copy_p = FindInstruction(module.get(), "copy.p"); ASSERT_NE(copy_p, nullptr); EXPECT_THAT(copy_p, op::Sharding("{devices=[4,1,1,1]0,1,4,5}")); for (HloInstruction* instruction : {concatenate, copy_p}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherFromOperandForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherFromIndexForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1, sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %copy.p, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT( concatenate, op::Sharding( "{devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); auto* copy_p = FindInstruction(module.get(), "copy.p"); ASSERT_NE(copy_p, nullptr); EXPECT_THAT( copy_p, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); for (HloInstruction* instruction : {concatenate, copy_p}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherBackwardPass2) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0) %copy.p = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[4,8,2,2]{3,2,1,0} %copy.p, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT( concatenate, op::Sharding("{devices=[1,1,2,2]0,1,4,5 last_tile_dim_replicate}")); auto* copy_p = FindInstruction(module.get(), "copy.p"); ASSERT_NE(copy_p, nullptr); EXPECT_THAT( copy_p, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (HloInstruction* instruction : {concatenate, copy_p}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherParallelAndPassthroughMerged) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg0 = s32[4,8,2,2]{3,2,1,0} parameter(0) %arg1 = s32[4]{0} parameter(1) %input = s32[4,8,2,2]{3,2,1,0} copy(%arg0), sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}} %seq_size = s32[4]{0} copy(s32[4]{0} %arg1) %seq_b = s32[1,4,8]{2,1,0} broadcast(s32[4]{0} %seq_size ), dimensions={1} %iota.11 = s32[1,4,8]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,4,8]{2,1,0} concatenate(s32[1,4,8]{2,1,0} %iota.11, s32[1,4,8]{2,1,0} %seq_b), dimensions={0} %gather = s32[4,8,2,2]{3,2,1,0} gather(s32[4,8,2,2]{3,2,1,0} %input, s32[2,4,8]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[4,8,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* input = FindInstruction(module.get(), "input"); ASSERT_NE(input, nullptr); EXPECT_THAT(input, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }")); const HloInstruction* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT( concatenate, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); for (const HloInstruction* instruction : {input, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherParallelAndTrivialMerged) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg0 = s32[4,8,2,2]{3,2,1,0} parameter(0) %arg1 = s32[4]{0} parameter(1) %input = s32[4,8,2,2]{3,2,1,0} copy(%arg0), sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}} %seq_size = s32[4]{0} copy(s32[4]{0} %arg1) %seq_b = s32[1,4,1]{2,1,0} broadcast(s32[4]{0} %seq_size), dimensions={1} %iota.11 = s32[1,4,1]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,4,1]{2,1,0} concatenate(s32[1,4,1]{2,1,0} %iota.11, s32[1,4,1]{2,1,0} %seq_b), dimensions={0} %gather = s32[4,1,2,2]{3,2,1,0} gather(s32[4,8,2,2]{3,2,1,0} %input, s32[2,4,1]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[4,1,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* input = FindInstruction(module.get(), "input"); ASSERT_NE(input, nullptr); EXPECT_THAT(input, op::Sharding("{devices=[2,2,1,1]0,1,4,5}")); const HloInstruction* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT( concatenate, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT( gather, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {input, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherParallelAndPassthroughMergedBackwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg0 = s32[4,8,2,2]{3,2,1,0} parameter(0) %arg1 = s32[4]{0} parameter(1) %input = s32[4,8,2,2]{3,2,1,0} copy(%arg0) %seq_size = s32[4]{0} copy(s32[4]{0} %arg1) %seq_b = s32[1,4,8]{2,1,0} broadcast(s32[4]{0} %seq_size ), dimensions={1} %iota.11 = s32[1,4,8]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,4,8]{2,1,0} concatenate(s32[1,4,8]{2,1,0} %iota.11, s32[1,4,8]{2,1,0} %seq_b), dimensions={0} %gather = s32[2,2,4,8]{3,2,1,0} gather(s32[4,8,2,2]{3,2,1,0} %input, s32[2,4,8]{2,1,0} %concatenate), offset_dims={0,1}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[2,1,2,1]0,4,1,5 metadata={op_name="a"}} ROOT %copy = s32[2,2,4,8]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* input = FindInstruction(module.get(), "input"); ASSERT_NE(input, nullptr); EXPECT_THAT(input, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }")); const HloInstruction* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT( concatenate, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,4,1,5}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(gather->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(gather->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, CorrectlyReplicateGatherIndex) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = bf16[1,2,2,2,8]{4,3,2,1,0} parameter(0) %parameter.1 = s32[1,2,2]{2,1,0} parameter(1) %index = s32[1,2,2]{2,1,0} copy(%parameter.1) %gather = bf16[1,2,2,2,8]{4,3,2,1,0} gather( bf16[1,2,2,2,8]{4,3,2,1,0} %parameter.0, s32[1,2,2]{2,1,0} %index), offset_dims={2,3,4}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,2,2,8}, sharding={devices=[1,1,2,1,1]0,1 metadata={op_name="a"}} ROOT %copy = bf16[1,2,2,2,8]{4,3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* index = FindInstruction(module.get(), "index"); ASSERT_NE(index, nullptr); EXPECT_THAT(index, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(index->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(index->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToOperand_ParallelDimIsNotPartitioned) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[2,1000,1]{2,1,0} parameter(0) %parameter.1 = bf16[2,4819,4]{2,1,0} parameter(1) %iota = s32[2,1000,1]{1,0,2} iota(), iota_dimension=0 %operand = bf16[2,4819,4]{2,1,0} copy(%parameter.1) %index = s32[2,1000,2]{2,1,0} concatenate(s32[2,1000,1]{1,0,2} %parameter.0, s32[2,1000,1]{2,1,0} %iota), dimensions={2}, sharding={devices=[1,4,1]0,1,2,3} ROOT %gather = bf16[2,1000,4]{2,1,0} gather(bf16[2,4819,4]{2,1,0} %operand, s32[2,1000,2]{2,1,0} %index), offset_dims={2}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4}, sharding={devices=[1,4,1]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); EXPECT_THAT(operand, op::Sharding("{replicated}")); } TEST_P(ParameterizedMetadataTest, ManualSubgroupForward) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3]{1,0} parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} %copy = f32[6,3]{1,0} copy(%param0) %param1 = f32[6,3]{1,0} parameter(1), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} %copy.1 = f32[6,3]{1,0} copy(%param1) %add = f32[6,3]{1,0} add(%copy, %copy.1) ROOT %copy.2 = f32[6,3]{1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ManualSubgroup_SingleOperandHasSharding) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3]{1,0} parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} %copy = f32[6,3]{1,0} copy(%param0) %param1 = f32[6,3]{1,0} parameter(1) %copy.1 = f32[6,3]{1,0} copy(%param1) %add = f32[6,3]{1,0} add(%copy, %copy.1) ROOT %copy.2 = f32[6,3]{1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } // Check other operand's sharding auto* operand = FindInstruction(module.get(), "copy"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(operand->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(operand->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ManualSubgroup_OneOperandReplicate) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3]{1,0} parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} %copy = f32[6,3]{1,0} copy(%param0) %param1 = f32[6,3]{1,0} parameter(1), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dims={replicated, manual} metadata={op_name="a"}} %copy.1 = f32[6,3]{1,0} copy(%param1) %add = f32[6,3]{1,0} add(%copy, %copy.1) ROOT %copy.2 = f32[6,3]{1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } // Check other operand's sharding auto* operand = FindInstruction(module.get(), "copy"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(operand->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(operand->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ManualSubgroupBackward) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3]{1,0} parameter(0) %copy = f32[6,3]{1,0} copy(%param0) %param1 = f32[6,3]{1,0} parameter(1) %copy.1 = f32[6,3]{1,0} copy(%param1) %add = f32[6,3]{1,0} add(%copy, %copy.1), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} ROOT %copy.2 = f32[6,3]{1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_F(ShardingPropagationTest, SimpleManual) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %entry { %param0 = f32[6,3] parameter(0) %copy = f32[6,3] copy(%param0), sharding={devices=[2,1]0,1} %annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding", sharding={devices=[2,1]0,1} %to_manual = f32[3,3] custom-call(%annotate), custom_call_target="SPMDFullToShardShape", sharding={manual} %zero = f32[] constant(0) %reduce = f32[3] reduce(%to_manual, %zero), dimensions={1}, to_apply=%add %annotate2 = f32[3] custom-call(%reduce), custom_call_target="Sharding", sharding={manual} %to_auto = f32[6] custom-call(%annotate2), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1]0,1} ROOT %copy.2 = f32[6] copy(%to_auto) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, /*propagate_metadata=*/true) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{manual}")); } TEST_F(ShardingPropagationTest, RefineUnspecifiedDims) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3] parameter(0) %copy = f32[6,3] copy(%param0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate} %annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding", backend_config="unspecified_dims=[1]", sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate} %copy.2 = f32[6,3] copy(%annotate) ROOT %copy.3 = f32[6,3] copy(%copy.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, /*propagate_metadata=*/true) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy.2"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,2,1,3}")); } TEST_F(ShardingPropagationTest, RefineUnspecifiedDimsWithManualConversion) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3,8] parameter(0) %copy = f32[6,3,8] copy(%param0), sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate} %annotate = f32[6,3,8] custom-call(%copy), custom_call_target="Sharding", backend_config="unspecified_dims=[1,2]", sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate} %to_manual = f32[3,3,8] custom-call(%annotate), custom_call_target="SPMDFullToShardShape", backend_config="unspecified_dims=[1,2]", sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}} %annotate2 = f32[3,3,8] custom-call(%to_manual), custom_call_target="Sharding", backend_config="unspecified_dims=[1,2]", sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}} %to_auto = f32[6,3,8] custom-call(%annotate2), custom_call_target="SPMDShardToFullShape", backend_config="unspecified_dims=[1,2]", sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate} %copy.2 = f32[6,3,8] copy(%to_auto) ROOT %copy.3 = f32[6,3,8] copy(%copy.2), sharding={devices=[1,1,2,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, /*propagate_metadata=*/true) .Run(module.get())); EXPECT_TRUE(changed); auto* copy2 = FindInstruction(module.get(), "copy.2"); ASSERT_NE(copy2, nullptr); EXPECT_THAT(copy2, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7}")); auto* to_manual = FindInstruction(module.get(), "to_manual"); ASSERT_NE(to_manual, nullptr); EXPECT_THAT( to_manual, op::Sharding( "{devices=[1,2,2,2]0,2,1,3,4,6,5,7 last_tile_dims={manual}}")); auto* to_auto = FindInstruction(module.get(), "to_auto"); ASSERT_NE(to_auto, nullptr); EXPECT_THAT(to_auto, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7}")); } TEST_F(ShardingPropagationTest, ReshapeNoMatchSubgroupManual) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[1,3,3] parameter(0), sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dims={manual}} %reshape = f32[3,1,3,1] reshape(%param0) ROOT %copy = f32[3,1,3,1] copy(%reshape) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(/*is_spmd=*/true, /*propagate_metadata=*/true) .Run(module.get())); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reshape"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[1,1,1,1,2,2]0,2,1,3 last_tile_dims={manual,replicated}}")); } } // namespace } // namespace xla
37.94109
122
0.654379
[ "vector" ]
b1dc66a5c3d077d6ad4e271987e5e5c8d8c2243d
16,761
hpp
C++
contracts/identity/identity.hpp
camielvanramele/Graduation-Internship
ab0e9b8db65b775459ddb3cb813e8e51b55ad1d1
[ "MIT" ]
1
2018-05-16T07:04:50.000Z
2018-05-16T07:04:50.000Z
contracts/identity/identity.hpp
ElementhFoundation/blockchain
5f63038c0e6fc90bc4bc0bc576410087785d8099
[ "MIT" ]
null
null
null
contracts/identity/identity.hpp
ElementhFoundation/blockchain
5f63038c0e6fc90bc4bc0bc576410087785d8099
[ "MIT" ]
null
null
null
#pragma once #include <eosiolib/chain.h> #include <eosiolib/dispatcher.hpp> #include <eosiolib/singleton.hpp> #include <eosiolib/multi_index.hpp> #include <eosiolib/vector.hpp> namespace identity { using eosio::action_meta; using eosio::singleton; using eosio::key256; using std::string; using std::vector; /** * This contract maintains a graph database of certified statements about an * identity. An identity is separated from the concept of an account because * the mapping of identity to accounts is subject to community consensus. * * Some use cases need a global source of trust, this trust rooted in the voter * who selects block producers. A block producer's opinion is "trusted" and so * is the opinion of anyone the block producer marks as "trusted". * * When a block producer is voted out the implicit trust in every certification * they made or those they trusted made is removed. All users are liable for * making false certifications. * * An account needs to claim the identity and a trusted account must certify the * claim. * * Data for an identity is stored: * * DeployToAccount / identity / certs / [property, trusted, certifier] => value * * Questions database is designed to answer: * * 1. has $identity.$unique been certified a "trusted" certifier * 2. has $identity.$property been certified by $account * 3. has $identity.$trusted been certified by a "trusted" certifier * 4. what account has authority to speak on behalf of identity? * - for each trusted owner certification * check to see if the account has claimed it * * 5. what identity does account have authority to speak on behalf? * - check what identity the account has self certified owner * - verify that a trusted certifier has confirmed owner * * This database structure enables parallel opeartions on independent identities. * * When an account certs a property we check to see if that */ template<uint64_t DeployToAccount> class contract { public: static const uint64_t code = DeployToAccount; typedef uint64_t identity_name; typedef uint64_t property_name; typedef uint64_t property_type_name; /** * This action create a new globally unique 64 bit identifier, * to minimize collisions each account is automatically assigned * a 32 bit identity prefix based upon hash(account_name) ^ hash(tapos). * * With this method no two accounts are likely to be assigned the same * 32 bit prefix consistantly due to the constantly changing tapos. This prevents * abuse of 'creator' selection to generate intentional conflicts with other users. * * The creator can determine the last 32 bits using an algorithm of their choice. We * presume the creator's algorithm can avoid collisions with itself. * * Even if two accounts get a collision in first 32 bits, a proper creator algorithm * should generate randomness in last 32 bits that will minimize collisions. In event * of collision transaction will fail and creator can try again. * * A 64 bit identity is used because the key is used frequently and it makes for more * effecient tables/scopes/etc. */ struct create : public action_meta< code, N(create) > { account_name creator; uint64_t identity = 0; ///< first 32 bits determinsitically derived from creator and tapos EOSLIB_SERIALIZE( create, (creator)(identity) ) }; struct certvalue { property_name property; ///< name of property, base32 encoded i64 string type; ///< defines type serialized in data vector<char> data; ///< string memo; ///< meta data documenting basis of certification uint8_t confidence = 1; ///< used to define liability for lies, /// 0 to delete EOSLIB_SERIALIZE( certvalue, (property)(type)(data)(memo)(confidence) ) }; struct certprop : public action_meta< code, N(certprop) > { account_name bill_storage_to; ///< account which is paying for storage account_name certifier; identity_name identity; vector<certvalue> values; EOSLIB_SERIALIZE( certprop, (bill_storage_to)(certifier)(identity)(values) ) }; struct settrust : public action_meta< code, N(settrust) > { account_name trustor; ///< the account authorizing the trust account_name trusting; ///< the account receiving the trust uint8_t trust = 0; /// 0 to remove, -1 to mark untrusted, 1 to mark trusted EOSLIB_SERIALIZE( settrust, (trustor)(trusting)(trust) ) }; struct certrow { uint64_t id; property_name property; uint64_t trusted; account_name certifier; uint8_t confidence = 0; string type; vector<char> data; uint64_t primary_key() const { return id; } /* constexpr */ static key256 key(uint64_t property, uint64_t trusted, uint64_t certifier) { /* key256 key; key.uint64s[0] = property; key.uint64s[1] = trusted; key.uint64s[2] = certifier; key.uint64s[3] = 0; */ return key256::make_from_word_sequence<uint64_t>(property, trusted, certifier); } key256 get_key() const { return key(property, trusted, certifier); } EOSLIB_SERIALIZE( certrow , (property)(trusted)(certifier)(confidence)(type)(data)(id) ) }; struct identrow { uint64_t identity; account_name creator; uint64_t primary_key() const { return identity; } EOSLIB_SERIALIZE( identrow , (identity)(creator) ) }; struct trustrow { account_name account; uint64_t primary_key() const { return account; } EOSLIB_SERIALIZE( trustrow, (account) ) }; typedef eosio::multi_index<N(certs), certrow, eosio::indexed_by< N(bytuple), eosio::const_mem_fun<certrow, key256, &certrow::get_key> > > certs_table; typedef eosio::multi_index<N(ident), identrow> idents_table; typedef singleton<code, N(account), code, identity_name> accounts_table; typedef eosio::multi_index<N(trust), trustrow> trust_table; static identity_name get_claimed_identity( account_name acnt ) { return accounts_table::get_or_default(acnt, 0); } static account_name get_owner_for_identity( identity_name ident ) { // for each trusted owner certification // check to see if the certification is still trusted // check to see if the account has claimed it certs_table certs( code, ident ); auto idx = certs.template get_index<N(bytuple)>(); auto itr = idx.lower_bound(certrow::key(N(owner), 1, 0)); account_name owner = 0; while (itr != idx.end() && itr->property == N(owner) && itr->trusted) { if (sizeof(account_name) == itr->data.size()) { account_name account = *reinterpret_cast<const account_name*>(itr->data.data()); if (ident == get_claimed_identity(account)) { if (is_trusted(itr->certifier) ) { // the certifier is still trusted if (!owner || owner == account) { owner = account; } else { //contradiction found: different owners certified for the same identity return 0; } } else if (DeployToAccount == current_receiver()){ //the certifier is no longer trusted, need to unset the flag idx.modify(itr, 0, [&](certrow& r) { r.trusted = 0; }); } else { // the certifier is no longer trusted, but the code runs in read-only mode } } } else { // bad row - skip it } ++itr; } if (owner) { //owner found, no contradictions among certifications flaged as trusted return owner; } // trusted certification not found // let's see if any untrusted certifications became trusted itr = idx.lower_bound(certrow::key(N(owner), 0, 0)); while (itr != idx.end() && itr->property == N(owner) && !itr->trusted) { if (sizeof(account_name) == itr->data.size()) { account_name account = *reinterpret_cast<const account_name*>(itr->data.data()); if (ident == get_claimed_identity(account) && is_trusted(itr->certifier)) { if (DeployToAccount == current_receiver()) { // the certifier became trusted and we have permissions to update the flag idx.modify(itr, 0, [&](certrow& r) { r.trusted = 1; }); } if (!owner || owner == account) { owner = account; } else { //contradiction found: different owners certified for the same identity return 0; } } } else { // bad row - skip it } ++itr; } return owner; } static identity_name get_identity_for_account( account_name acnt ) { // check what identity the account has self certified owner // verify that a trusted certifier has confirmed owner auto identity = get_claimed_identity(acnt); return (identity != 0 && acnt == get_owner_for_identity(identity)) ? identity : 0; } static bool is_trusted_by( account_name trusted, account_name by ) { trust_table t( code, by ); return t.find( trusted ) != t.end(); } static bool is_trusted( account_name acnt ) { account_name active_producers[21]; auto count = get_active_producers( active_producers, sizeof(active_producers) ); for( size_t i = 0; i < count; ++i ) { if( active_producers[i] == acnt ) return true; } for( size_t i = 0; i < count; ++i ) { if( is_trusted_by( acnt, active_producers[i] ) ) return true; } return false; } static void on( const settrust& t ) { require_auth( t.trustor ); require_recipient( t.trusting ); trust_table table( code, t.trustor ); auto itr = table.find(t.trusting); if( itr == table.end() && t.trust > 0 ) { table.emplace( t.trustor, [&](trustrow& row) { row.account = t.trusting; }); } else if( itr != table.end() && t.trust == 0 ) { table.erase(itr); } } static void on( const create& c ) { require_auth( c.creator ); idents_table t( code, code); auto itr = t.find( c.identity ); eosio_assert( itr == t.end(), "identity already exists" ); eosio_assert( c.identity != 0, "identity=0 is not allowed" ); t.emplace(c.creator, [&](identrow& i) { i.identity = c.identity; i.creator = c.creator; }); } static void on( const certprop& cert ) { require_auth( cert.certifier ); if( cert.bill_storage_to != cert.certifier ) require_auth( cert.bill_storage_to ); idents_table t( code, code ); eosio_assert( t.find( cert.identity ) != t.end(), "identity does not exist" ); /// the table exists in the scope of the identity certs_table certs( code, cert.identity ); bool trusted = is_trusted( cert.certifier ); for( const auto& value : cert.values ) { auto idx = certs.template get_index<N(bytuple)>(); if (value.confidence) { eosio_assert(value.type.size() <= 32, "certrow::type should be not longer than 32 bytes"); auto itr = idx.lower_bound( certrow::key(value.property, trusted, cert.certifier) ); if (itr != idx.end() && itr->property == value.property && itr->trusted == trusted && itr->certifier == cert.certifier) { idx.modify(itr, 0, [&](certrow& row) { row.confidence = value.confidence; row.type = value.type; row.data = value.data; }); } else { auto pk = certs.available_primary_key(); certs.emplace(code, [&](certrow& row) { row.id = pk; row.property = value.property; row.trusted = trusted; row.certifier = cert.certifier; row.confidence = value.confidence; row.type = value.type; row.data = value.data; }); } auto itr_old = idx.lower_bound( certrow::key(value.property, !trusted, cert.certifier) ); if (itr_old != idx.end() && itr_old->property == value.property && itr_old->trusted == !trusted && itr_old->certifier == cert.certifier) { idx.erase(itr_old); } //special handling for owner if (value.property == N(owner)) { eosio_assert(sizeof(account_name) == value.data.size(), "data size doesn't match account_name size"); account_name acnt = *reinterpret_cast<const account_name*>(value.data.data()); if (cert.certifier == acnt) { //only self-certitication affects accounts_table accounts_table::set( cert.identity, acnt ); } } } else { bool removed = false; auto itr = idx.lower_bound( certrow::key(value.property, trusted, cert.certifier) ); if (itr != idx.end() && itr->property == value.property && itr->trusted == trusted && itr->certifier == cert.certifier) { idx.erase(itr); } else { removed = true; } itr = idx.lower_bound( certrow::key(value.property, !trusted, cert.certifier) ); if (itr != idx.end() && itr->property == value.property && itr->trusted == !trusted && itr->certifier == cert.certifier) { idx.erase(itr); } else { removed = true; } //special handling for owner if (value.property == N(owner)) { eosio_assert(sizeof(account_name) == value.data.size(), "data size doesn't match account_name size"); account_name acnt = *reinterpret_cast<const account_name*>(value.data.data()); if (cert.certifier == acnt) { //only self-certitication affects accounts_table accounts_table::remove( acnt ); } } } } } static void apply( account_name c, action_name act) { eosio::dispatch<contract, create, certprop, settrust>(c,act); } }; } /// namespace identity
44.815508
156
0.529921
[ "vector" ]
b1de31481582a3ef70d8a00ee5419eb108a8e3d7
2,823
cpp
C++
3C1V_Donkey_Kong/DonkeyKong_Solution/Source/HowHigh.cpp
unaidiaz/pryecto1
aa074c32587e8207cb89d6634391bb43aa9a4657
[ "BSD-3-Clause" ]
2
2020-05-20T15:48:29.000Z
2020-08-17T03:35:56.000Z
3C1V_Donkey_Kong/DonkeyKong_Solution/Source/HowHigh.cpp
unaidiaz/proyecto1
aa074c32587e8207cb89d6634391bb43aa9a4657
[ "BSD-3-Clause" ]
null
null
null
3C1V_Donkey_Kong/DonkeyKong_Solution/Source/HowHigh.cpp
unaidiaz/proyecto1
aa074c32587e8207cb89d6634391bb43aa9a4657
[ "BSD-3-Clause" ]
null
null
null
#include "HowHigh.h" #include "ModuleObjet.h" #include "stdio.h" #include <time.h> #include "Application.h" #include "ModuleInput.h" #include "Globals.h" #include "Module.h" #include "ModuleTextures.h" #include "ModuleRender.h" #include "ModuleAudio.h" #include "ModuleCollisions.h" #include "ModuleEnemies.h" #include "ModulePlayer.h" #include "Animation.h" #include "ModuleFonts.h" #include "SDL/include/SDL_scancode.h" #include "ModuleFadeToBlack.h" HowHigh::HowHigh(bool startEnabled) : Module(startEnabled) { } HowHigh::~HowHigh() { } // Load assets bool HowHigh::Start() { srand(time(NULL)); LOG("Loading background assets"); bool ret = true; App->audio->PlayMusic("Assets/5. How High Can You Get.ogg"); background = App->textures->Load("Assets/howhigh1.png"); return ret; } void HowHigh::DebugDrawGamepadInfo() { GamePad& pad = App->input->pads[0]; sprintf_s(_scoreText, 150, "pad 0 %s, press 1/2/3 for rumble", (pad.enabled) ? "plugged" : "not detected"); App->fonts->BlitText(5, 10, blancas, _scoreText); sprintf_s(_scoreText, 150, "buttons %s %s %s %s %s %s %s %s %s %s %s", (pad.a) ? "a" : "", (pad.b) ? "b" : "", (pad.x) ? "x" : "", (pad.y) ? "y" : "", (pad.start) ? "start" : "", (pad.back) ? "back" : "", (pad.guide) ? "guide" : "", (pad.l1) ? "lb" : "", (pad.r1) ? "rb" : "", (pad.l3) ? "l3" : "", (pad.r3) ? "r3" : "" ); App->fonts->BlitText(5, 20, blancas, _scoreText); sprintf_s(_scoreText, 150, "dpad %s %s %s %s", (pad.up) ? "up" : "", (pad.down) ? "down" : "", (pad.left) ? "left" : "", (pad.right) ? "right" : "" ); App->fonts->BlitText(5, 30, blancas, _scoreText); sprintf_s(_scoreText, 150, "left trigger %0.2f", pad.l2); App->fonts->BlitText(5, 40, blancas, _scoreText); sprintf_s(_scoreText, 150, "right trigger %0.2f", pad.r2); App->fonts->BlitText(5, 50, blancas, _scoreText); sprintf_s(_scoreText, 150, "left thumb %.2fx, %0.2fy", pad.l_x, pad.l_y); App->fonts->BlitText(5, 60, blancas, _scoreText); sprintf_s(_scoreText, 150, " deadzone %0.2f", pad.l_dz); App->fonts->BlitText(5, 70, blancas, _scoreText); sprintf_s(_scoreText, 150, "right thumb %.2fx, %0.2fy", pad.r_x, pad.r_y); App->fonts->BlitText(5, 80, blancas, _scoreText); sprintf_s(_scoreText, 150, " deadzone %0.2f", pad.r_dz); App->fonts->BlitText(5, 90, blancas, _scoreText); } update_status HowHigh::Update() { App->render->Blit(background, 0, 0, nullptr); if (App->input->keys[SDL_SCANCODE_ESCAPE] == KEY_STATE::KEY_DOWN) { exit(0); } return update_status::UPDATE_CONTINUE; } // Update: draw background update_status HowHigh::PostUpdate() { App->fade->FadeToBlack((Module*)App->howhigh, (Module*)App->scene1, 90); return update_status::UPDATE_CONTINUE; } bool HowHigh::CleanUp() { App->textures->CleanUp(); return true; }
23.722689
108
0.642579
[ "render" ]
b1ead2aff9bed4f625105deb43e3a18607eabb30
1,444
hpp
C++
epoch/lucca/include/lucca/viewport.hpp
oprogramadorreal/vize
042c16f96d8790303563be6787200558e1ec00b2
[ "MIT" ]
47
2020-03-30T14:36:46.000Z
2022-03-06T07:44:54.000Z
epoch/lucca/include/lucca/viewport.hpp
oprogramadorreal/vize
042c16f96d8790303563be6787200558e1ec00b2
[ "MIT" ]
null
null
null
epoch/lucca/include/lucca/viewport.hpp
oprogramadorreal/vize
042c16f96d8790303563be6787200558e1ec00b2
[ "MIT" ]
8
2020-04-01T01:22:45.000Z
2022-01-02T13:06:09.000Z
#ifndef LUCCA_VIEWPORT_HPP #define LUCCA_VIEWPORT_HPP #include "lucca/config.hpp" #include "lucca/serialization/viewport_serializer.hpp" namespace lucca { class RenderTarget; /** * A rectangular area on a render target. * * @see lucca::RenderTarget * * @author O Programador */ class LUCCA_API Viewport final { public: Viewport(); Viewport(Float relativeX, Float relativeY, Float relativeWidth, Float relativeHeight); Viewport(Float relativeX, Float relativeY, Float relativeWidth, Float relativeHeight, RenderTarget* renderTarget); ~Viewport(); public: Float getRelativeX() const; Float getRelativeY() const; Float getRelativeWidth() const; Float getRelativeHeight() const; SizeType getAbsoluteX() const; SizeType getAbsoluteY() const; SizeType getAbsoluteWidth() const; SizeType getAbsoluteHeight() const; void setRelativeX(Float value); void setRelativeY(Float value); void setRelativeWidth(Float value); void setRelativeHeight(Float value); void setRenderTarget(RenderTarget* renderTarget); SizeType getDefaultFramebufferObject() const; void postRedisplay(); private: RenderTarget* _renderTarget = nullptr; Float _relativeX = Float(0.0f); Float _relativeY = Float(0.0f); Float _relativeWidth = Float(1.0f); Float _relativeHeight = Float(1.0f); template<class Archive> friend void boost::serialization::serialize(Archive&, lucca::Viewport&, const unsigned int); }; } #endif // LUCCA_VIEWPORT_HPP
24.066667
117
0.772853
[ "render" ]
b1ebbe8e18a7b3232cf5633ad1f4b931eb749d20
18,636
hpp
C++
include/am/graphchi/shards/slidingshard.hpp
izenecloud/izenelib
9d5958100e2ce763fc75f27217adf982d7c9d902
[ "Apache-2.0" ]
31
2015-03-03T19:13:42.000Z
2020-09-03T08:11:56.000Z
include/am/graphchi/shards/slidingshard.hpp
izenecloud/izenelib
9d5958100e2ce763fc75f27217adf982d7c9d902
[ "Apache-2.0" ]
1
2016-12-24T00:12:11.000Z
2016-12-24T00:12:11.000Z
include/am/graphchi/shards/slidingshard.hpp
izenecloud/izenelib
9d5958100e2ce763fc75f27217adf982d7c9d902
[ "Apache-2.0" ]
8
2015-09-06T01:55:21.000Z
2021-12-20T02:16:13.000Z
/** * @file * @author Aapo Kyrola <akyrola@cs.cmu.edu> * @version 1.0 * * @section LICENSE * * Copyright [2012] [Aapo Kyrola, Guy Blelloch, Carlos Guestrin / Carnegie Mellon University] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @section DESCRIPTION * * The sliding shard. */ #ifdef DYNAMICEDATA #include <am/graphchi/shards/dynamicdata/slidingshard.hpp> #else #ifndef DEF_GRAPHCHI_SLIDINGSHARD #define DEF_GRAPHCHI_SLIDINGSHARD #include <iostream> #include <cstdio> #include <sstream> #include <vector> #include <fcntl.h> #include <unistd.h> #include <assert.h> #include <string> #include <am/graphchi/api/graph_objects.hpp> #include <am/graphchi/metrics/metrics.hpp> #include <am/graphchi/logger/logger.hpp> #include <am/graphchi/io/stripedio.hpp> #include <am/graphchi/graphchi_types.hpp> namespace graphchi { /** * A streaming block. */ struct sblock { int writedesc; int readdesc; size_t offset; size_t end; uint8_t * data; uint8_t * ptr; bool active; bool is_edata_block; sblock() : writedesc(0), readdesc(0), active(false) { data = NULL; } sblock(int wdesc, int rdesc, bool is_edata_block=false) : writedesc(wdesc), readdesc(rdesc), active(false), is_edata_block(is_edata_block){ data = NULL; } void commit_async(stripedio * iomgr) { if (active && data != NULL && writedesc >= 0) { if (is_edata_block) { iomgr->managed_pwritea_async(writedesc, &data, end-offset, 0, true, true); data = NULL; } else { iomgr->managed_pwritea_async(writedesc, &data, end-offset, offset, true); } } } void commit_now(stripedio * iomgr) { if (active && data != NULL && writedesc >= 0) { size_t len = ptr-data; if (len > end-offset) len = end-offset; if (is_edata_block) { iomgr->managed_pwritea_now(writedesc, &data, end - offset, 0); /* Need to write whole block in the compressed regime */ } else { iomgr->managed_pwritea_now(writedesc, &data, len, offset); } } } void read_async(stripedio * iomgr) { if (is_edata_block) { iomgr->managed_preada_async(readdesc, &data, (end - offset), 0); } else { iomgr->managed_preada_async(readdesc, &data, end - offset, offset); } } void read_now(stripedio * iomgr) { if (is_edata_block) { iomgr->managed_preada_now(readdesc, &data, end-offset, 0); } else { iomgr->managed_preada_now(readdesc, &data, end-offset, offset); } } void release(stripedio * iomgr) { if (data != NULL) { iomgr->managed_release(readdesc, &data); if (is_edata_block) { iomgr->close_session(readdesc); } } data = NULL; } }; struct indexentry { size_t adjoffset, edataoffset; indexentry(size_t a, size_t e) : adjoffset(a), edataoffset(e) {} }; /* * Graph shard that is streamed. I.e, it can only read in one direction, a chunk * a time. */ template <typename VT, typename ET, typename svertex_t = graphchi_vertex<VT, ET>, typename ETspecial = ET> class sliding_shard { stripedio * iomgr; std::string filename_edata; std::string filename_adj; vid_t range_st, range_end; size_t blocksize; vid_t curvid; size_t adjoffset, edataoffset, adjfilesize, edatafilesize; size_t window_start_edataoffset; std::vector<sblock> activeblocks; int adjfile_session; int writedesc; sblock * curblock; sblock * curadjblock; metrics &m; std::map<int, indexentry> sparse_index; // Sparse index that can be created in the fly bool disable_writes; bool async_edata_loading; // bool need_read_outedges; // Disabled - does not work with compressed data: whole block needs to be read. public: bool only_adjacency; sliding_shard(stripedio * iomgr, std::string _filename_edata, std::string _filename_adj, vid_t _range_st, vid_t _range_en, size_t _blocksize, metrics &_m, bool _disable_writes=false, bool onlyadj = false) : iomgr(iomgr), filename_edata(_filename_edata), filename_adj(_filename_adj), range_st(_range_st), range_end(_range_en), blocksize(_blocksize), m(_m), disable_writes(_disable_writes) { curvid = 0; adjoffset = 0; edataoffset = 0; disable_writes = false; only_adjacency = onlyadj; curblock = NULL; curadjblock = NULL; window_start_edataoffset = 0; while(blocksize % sizeof(ET) != 0) blocksize++; assert(blocksize % sizeof(ET)==0); adjfilesize = get_filesize(filename_adj); if (!only_adjacency) { edatafilesize = get_shard_edata_filesize<ET>(filename_edata); logstream(LOG_DEBUG) << "Total edge data size: " << edatafilesize << ", " << filename_edata << "sizeof(ET): " << sizeof(ET) << std::endl; } else { // Nothing } adjfile_session = iomgr->open_session(filename_adj, true); save_offset(); async_edata_loading = !svertex_t().computational_edges(); #ifdef SUPPORT_DELETIONS async_edata_loading = false; // See comment above for memshard, async_edata_loading = false; #endif } ~sliding_shard() { release_prior_to_offset(true); if (curblock != NULL) { curblock->release(iomgr); delete curblock; curblock = NULL; } if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } iomgr->close_session(adjfile_session); } size_t num_edges() { return edatafilesize / sizeof(ET); } protected: size_t get_adjoffset() { return adjoffset; } size_t get_edataoffset() { return edataoffset; } void save_offset() { // Note, so that we can use the lower bound operation in map, we need // to insert indices in reverse order sparse_index.insert(std::pair<int, indexentry>(-((int)curvid), indexentry(adjoffset, edataoffset))); } void move_close_to(vid_t v) { if (curvid >= v) return; std::map<int,indexentry>::iterator lowerbd_iter = sparse_index.lower_bound(-((int)v)); int closest_vid = -((int)lowerbd_iter->first); assert(closest_vid>=0); indexentry closest_offset = lowerbd_iter->second; assert(closest_vid <= (int)v); if (closest_vid > (int)curvid) { /* Note: this will fail if we have over 2B vertices! */ if (curblock != NULL) // Move the pointer - this may invalidate the curblock, but it is being checked later curblock->ptr += closest_offset.edataoffset - edataoffset; if (curadjblock != NULL) curadjblock->ptr += closest_offset.adjoffset - adjoffset; curvid = (vid_t)closest_vid; adjoffset = closest_offset.adjoffset; edataoffset = closest_offset.edataoffset; return; } else { // Do nothing - just continue from current pos. return; } } inline void check_curblock(size_t toread) { if (curblock == NULL || curblock->end < edataoffset+toread) { if (curblock != NULL) { if (!curblock->active) { curblock->release(iomgr); } } // Load next std::string blockfilename = filename_shard_edata_block(filename_edata, (int) (edataoffset / blocksize), blocksize); int edata_session = iomgr->open_session(blockfilename, false, true); sblock newblock(edata_session, edata_session, true); // We align blocks always to the blocksize, even if that requires // allocating and reading some unnecessary data. newblock.offset = (edataoffset / blocksize) * blocksize; // Align size_t correction = edataoffset - newblock.offset; newblock.end = std::min(edatafilesize, newblock.offset + blocksize); assert(newblock.end >= newblock.offset); iomgr->managed_malloc(edata_session, &newblock.data, newblock.end - newblock.offset, newblock.offset); newblock.ptr = newblock.data + correction; activeblocks.push_back(newblock); curblock = &activeblocks[activeblocks.size()-1]; } } inline void check_adjblock(size_t toread) { if (curadjblock == NULL || curadjblock->end <= adjoffset + toread) { if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } sblock * newblock = new sblock(0, adjfile_session); newblock->offset = adjoffset; newblock->end = std::min(adjfilesize, adjoffset+blocksize); assert(newblock->end > 0); assert(newblock->end >= newblock->offset); iomgr->managed_malloc(adjfile_session, &newblock->data, newblock->end - newblock->offset, adjoffset); newblock->ptr = newblock->data; metrics_entry me = m.start_time(); iomgr->managed_preada_now(adjfile_session, &newblock->data, newblock->end - newblock->offset, adjoffset); m.stop_time(me, "blockload"); curadjblock = newblock; } } template <typename U> inline U read_val() { check_adjblock(sizeof(U)); U res = *((U*)curadjblock->ptr); adjoffset += sizeof(U); curadjblock->ptr += sizeof(U); return res; } template <typename U> inline U * read_edgeptr() { if (only_adjacency) return NULL; check_curblock(sizeof(U)); U * resptr = ((U*)curblock->ptr); edataoffset += sizeof(U); curblock->ptr += sizeof(U); return resptr; } inline void skip(int n, int sz) { size_t tot = n * sz; adjoffset += tot; if (curadjblock != NULL) curadjblock->ptr += tot; edataoffset += sizeof(ET)*n; if (curblock != NULL) curblock->ptr += sizeof(ET)*n; } public: /** * Read out-edges for vertices. */ void read_next_vertices(int nvecs, vid_t start, std::vector<svertex_t> & prealloc, bool record_index=false, bool disable_writes=false) { metrics_entry me = m.start_time(); if (!record_index) move_close_to(start); /* Release the blocks we do not need anymore */ curblock = NULL; release_prior_to_offset(false, disable_writes); assert(activeblocks.size() <= 1); /* Read next */ if (!activeblocks.empty() && !only_adjacency) { curblock = &activeblocks[0]; } vid_t lastrec = start; window_start_edataoffset = edataoffset; for(int i=((int)curvid) - ((int)start); i<nvecs; i++) { if (adjoffset >= adjfilesize) break; // TODO: skip unscheduled vertices. int n; if (record_index && (size_t)(curvid - lastrec) >= (size_t) std::max((int)100000, nvecs/16)) { save_offset(); lastrec = curvid; } uint8_t ns = read_val<uint8_t>(); if (ns == 0x00) { curvid++; uint8_t nz = read_val<uint8_t>(); curvid += nz; i += nz; continue; } if (ns == 0xff) { n = read_val<uint32_t>(); } else { n = ns; } if (i<0) { // Just skipping skip(n, sizeof(vid_t)); } else { svertex_t& vertex = prealloc[i]; assert(vertex.id() == curvid); if (vertex.scheduled) { while(--n >= 0) { bool special_edge = false; vid_t target = (sizeof(ET) == sizeof(ETspecial) ? read_val<vid_t>() : translate_edge(read_val<vid_t>(), special_edge)); ET * evalue = (special_edge ? (ET*)read_edgeptr<ETspecial>(): read_edgeptr<ET>()); if (!only_adjacency) { if (!curblock->active) { if (async_edata_loading) { curblock->read_async(iomgr); } else { curblock->read_now(iomgr); } } // Note: this needs to be set always because curblock might change during this loop. curblock->active = true; // This block has an scheduled vertex - need to commit } vertex.add_outedge(target, evalue, special_edge); if (!((target >= range_st && target <= range_end))) { logstream(LOG_ERROR) << "Error : " << target << " not in [" << range_st << " - " << range_end << "]" << std::endl; iomgr->print_session(adjfile_session); } assert(target >= range_st && target <= range_end); } } else { // This vertex was not scheduled, so we can just skip its edges. skip(n, sizeof(vid_t)); } } curvid++; } m.stop_time(me, "read_next_vertices"); curblock = NULL; } /** * Commit modifications. */ void commit(sblock &b, bool synchronously, bool disable_writes=false) { if (synchronously) { metrics_entry me = m.start_time(); if (!disable_writes) b.commit_now(iomgr); m.stop_time(me, "commit"); b.release(iomgr); } else { if (!disable_writes) b.commit_async(iomgr); else b.release(iomgr); } } /** * Release all buffers */ void flush() { release_prior_to_offset(true); if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } } /** * Set the position of the sliding shard. */ void set_offset(size_t newoff, vid_t _curvid, size_t edgeptr) { this->adjoffset = newoff; this->curvid = _curvid; this->edataoffset = edgeptr; if (curadjblock != NULL) { curadjblock->release(iomgr); delete curadjblock; curadjblock = NULL; } } /** * Release blocks that come prior to the current offset/ */ void release_prior_to_offset(bool all=false, bool disable_writes=false) { // disable writes is for the dynamic case for(int i=(int)activeblocks.size() - 1; i >= 0; i--) { sblock &b = activeblocks[i]; if (b.end <= edataoffset || all) { commit(b, all, disable_writes); activeblocks.erase(activeblocks.begin() + (unsigned int)i); } } } std::string get_info_json() { std::stringstream json; json << "\"size\": "; json << edatafilesize << std::endl; json << ", \"windowStart\": "; json << window_start_edataoffset; json << ", \"windowEnd\": "; json << edataoffset; json << ", \"intervalStart\": "; json << range_st; json << ", \"intervalEnd\": "; json << range_end; return json.str(); } }; }; #endif #endif
36.469667
162
0.495385
[ "vector" ]
b1f1388557c34fc975c2b0ad5066f4f8f3edf7a1
22,561
cc
C++
demo_drivers/young_laplace/refineable_young_laplace.cc
PuneetMatharu/oomph-lib
edd590cbb4f3ef9940b9738f18275ea2fb828c55
[ "RSA-MD" ]
null
null
null
demo_drivers/young_laplace/refineable_young_laplace.cc
PuneetMatharu/oomph-lib
edd590cbb4f3ef9940b9738f18275ea2fb828c55
[ "RSA-MD" ]
1
2022-03-23T16:16:41.000Z
2022-03-23T16:16:41.000Z
demo_drivers/young_laplace/refineable_young_laplace.cc
PuneetMatharu/oomph-lib
edd590cbb4f3ef9940b9738f18275ea2fb828c55
[ "RSA-MD" ]
null
null
null
//LIC// ==================================================================== //LIC// This file forms part of oomph-lib, the object-oriented, //LIC// multi-physics finite-element library, available //LIC// at http://www.oomph-lib.org. //LIC// //LIC// Copyright (C) 2006-2022 Matthias Heil and Andrew Hazel //LIC// //LIC// This library is free software; you can redistribute it and/or //LIC// modify it under the terms of the GNU Lesser General Public //LIC// License as published by the Free Software Foundation; either //LIC// version 2.1 of the License, or (at your option) any later version. //LIC// //LIC// This library is distributed in the hope that it will be useful, //LIC// but WITHOUT ANY WARRANTY; without even the implied warranty of //LIC// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU //LIC// Lesser General Public License for more details. //LIC// //LIC// You should have received a copy of the GNU Lesser General Public //LIC// License along with this library; if not, write to the Free Software //LIC// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA //LIC// 02110-1301 USA. //LIC// //LIC// The authors may be contacted at oomph-lib@maths.man.ac.uk. //LIC// //LIC//==================================================================== //Driver for refineable Young Laplace problem //Generic routines #include "generic.h" // The YoungLaplace equations #include "young_laplace.h" // The mesh #include "meshes/rectangular_quadmesh.h" // Namespaces using namespace std; using namespace oomph; // Namespace (shared with non-refineable version) #include "common_young_laplace_stuff.h" //====== start_of_problem_class======================================= /// 2D RefineableYoungLaplace problem on rectangular domain, discretised with /// 2D QRefineableYoungLaplace elements. The specific type of element is /// specified via the template parameter. //==================================================================== template<class ELEMENT> class RefineableYoungLaplaceProblem : public Problem { public: /// Constructor: RefineableYoungLaplaceProblem(); /// Destructor (empty) ~RefineableYoungLaplaceProblem(){}; /// Update the problem specs before solve: Empty void actions_before_newton_solve(){}; /// Update the problem after solve: Empty void actions_after_newton_solve(){}; /// Actions before adapt: Wipe the mesh of contact angle elements void actions_before_adapt() { // Kill the contact angle elements and wipe contact angle mesh if (Contact_angle_mesh_pt!=0) delete_contact_angle_elements(); // Rebuild the Problem's global mesh from its various sub-meshes rebuild_global_mesh(); } /// Actions after adapt: Rebuild the mesh of contact angle elements void actions_after_adapt() { // Create contact angle elements on boundaries 1 and 3 of bulk mesh if (GlobalParameters::Case== GlobalParameters::T_junction_with_nonzero_contact_angle) { create_contact_angle_elements(1); create_contact_angle_elements(3); // Set function pointers for contact-angle elements unsigned nel=Contact_angle_mesh_pt->nelement(); for (unsigned e=0;e<nel;e++) { // Upcast from GeneralisedElement to YoungLaplace contact angle // element YoungLaplaceContactAngleElement<ELEMENT> *el_pt = dynamic_cast<YoungLaplaceContactAngleElement<ELEMENT>*>( Contact_angle_mesh_pt->element_pt(e)); // Set the pointer to the prescribed contact angle el_pt->prescribed_cos_gamma_pt() = &GlobalParameters::Cos_gamma; } } // Rebuild the Problem's global mesh from its various sub-meshes rebuild_global_mesh(); } /// Increase the problem parameters before each solve void increment_parameters(); /// Doc the solution. DocInfo object stores flags/labels for where the /// output gets written to and the trace file void doc_solution(DocInfo& doc_info, ofstream& trace_file); private: /// Create YoungLaplace contact angle elements on the /// b-th boundary of the bulk mesh and add them to contact angle mesh void create_contact_angle_elements(const unsigned& b); /// Delete contact angle elements void delete_contact_angle_elements(); /// Pointer to the "bulk" mesh RefineableRectangularQuadMesh<ELEMENT>* Bulk_mesh_pt; /// Pointer to the contact angle mesh Mesh* Contact_angle_mesh_pt; /// Pointer to mesh containing the height control element Mesh* Height_control_mesh_pt; /// Pointer to height control element HeightControlElement* Height_control_element_pt; /// Node at which the height (displacement along spine) is controlled/doced Node* Control_node_pt; }; // end of problem class //=====start_of_constructor=============================================== /// Constructor for RefineableYoungLaplace problem //======================================================================== template<class ELEMENT> RefineableYoungLaplaceProblem<ELEMENT>::RefineableYoungLaplaceProblem() { // Setup dependent parameters in namespace GlobalParameters::setup_dependent_parameters_and_sanity_check(); // Setup bulk mesh //---------------- // # of elements in x-direction unsigned n_x=GlobalParameters::N_x; // # of elements in y-direction unsigned n_y=GlobalParameters::N_y; // Domain length in x-direction double l_x=GlobalParameters::L_x; // Domain length in y-direction double l_y=GlobalParameters::L_y; // Print Size of the mesh cout << "Lx = " << l_x << " and Ly = " << l_y << endl; // Build and assign mesh Bulk_mesh_pt=new RefineableRectangularQuadMesh<ELEMENT>(n_x,n_y,l_x,l_y); // Create/set error estimator Bulk_mesh_pt->spatial_error_estimator_pt()=new Z2ErrorEstimator; // Set targets for spatial adaptivity Bulk_mesh_pt->max_permitted_error()=1.0e-4; Bulk_mesh_pt->min_permitted_error()=1.0e-6; // Add bulk mesh to the global mesh add_sub_mesh(Bulk_mesh_pt); // Prescribed height? //------------------- // Choose the prescribed height element ELEMENT* prescribed_height_element_pt= dynamic_cast<ELEMENT*>( Bulk_mesh_pt->element_pt(GlobalParameters::Control_element)); // ...and the associated control node (node 0 in that element) // (we're storing this node even if there's no height-control, for // output purposes...) Control_node_pt= static_cast<Node*>( prescribed_height_element_pt->node_pt(0)); cout << "Controlling height at (x,y) : (" << Control_node_pt->x(0) << "," << Control_node_pt->x(1) << ")" << endl; // If needed, create a height control element and store the // pointer to the Kappa Data created by this object Height_control_element_pt=0; Height_control_mesh_pt=0; if (GlobalParameters::Use_height_control) { Height_control_element_pt=new HeightControlElement( Control_node_pt,&GlobalParameters::Controlled_height); GlobalParameters::Kappa_pt=Height_control_element_pt->kappa_pt(); Height_control_element_pt->kappa_pt()-> set_value(0,GlobalParameters::Kappa_initial); // Add to mesh Height_control_mesh_pt = new Mesh; Height_control_mesh_pt->add_element_pt(Height_control_element_pt); // Add height control mesh to the global mesh add_sub_mesh(Height_control_mesh_pt); } //...otherwise create a kappa data item from scratch and pin its // single unknown value else { if (GlobalParameters::Kappa_pt!=0) delete GlobalParameters::Kappa_pt; GlobalParameters::Kappa_pt=new Data(1); GlobalParameters::Kappa_pt->set_value(0,GlobalParameters::Kappa_initial); GlobalParameters::Kappa_pt->pin(0); } // Contact angle elements //----------------------- // Create prescribed-contact-angle elements from all elements that are // adjacent to boundary 1 and 3 and add them to their own mesh Contact_angle_mesh_pt=0; if (GlobalParameters::Case== GlobalParameters::T_junction_with_nonzero_contact_angle) { // set up new mesh Contact_angle_mesh_pt=new Mesh; // creation of contact angle elements create_contact_angle_elements(1); create_contact_angle_elements(3); // Add contact angle mesh to the global mesh add_sub_mesh(Contact_angle_mesh_pt); } // Build global mesh //------------------ build_global_mesh(); // Boundary conditions //-------------------- // Set the boundary conditions for this problem: All nodes are // free by default -- only need to pin the ones that have Dirichlet conditions // here. unsigned n_bound = Bulk_mesh_pt->nboundary(); for(unsigned b=0;b<n_bound;b++) { // Pin all boundaries for three cases and only boundaries // 0 and 2 in all others: if ((GlobalParameters::Case==GlobalParameters::All_pinned)|| (b==0)|| (b==2)) { unsigned n_node = Bulk_mesh_pt->nboundary_node(b); for (unsigned n=0;n<n_node;n++) { Bulk_mesh_pt->boundary_node_pt(b,n)->pin(0); } } } // Complete build of elements //--------------------------- // Complete the build of all elements so they are fully functional unsigned n_bulk=Bulk_mesh_pt->nelement(); for(unsigned i=0;i<n_bulk;i++) { // Upcast from GeneralsedElement to the present element ELEMENT *el_pt = dynamic_cast<ELEMENT*>(Bulk_mesh_pt->element_pt(i)); if ( GlobalParameters::Use_spines ) { //Set the spine function pointers el_pt->spine_base_fct_pt() = GlobalParameters::spine_base_function; el_pt->spine_fct_pt() = GlobalParameters::spine_function; } // Set the curvature data for the element el_pt->set_kappa(GlobalParameters::Kappa_pt); } // Set function pointers for contact-angle elements if (GlobalParameters::Case== GlobalParameters::T_junction_with_nonzero_contact_angle) { // Set function pointers for contact-angle elements unsigned nel=Contact_angle_mesh_pt->nelement(); for (unsigned e=0;e<nel;e++) { // Upcast from GeneralisedElement to YoungLaplace contact angle // element YoungLaplaceContactAngleElement<ELEMENT> *el_pt = dynamic_cast<YoungLaplaceContactAngleElement<ELEMENT>*>( Contact_angle_mesh_pt->element_pt(e)); // Set the pointer to the prescribed contact angle el_pt->prescribed_cos_gamma_pt() = &GlobalParameters::Cos_gamma; } } // Setup equation numbering scheme cout <<"\nNumber of equations: " << assign_eqn_numbers() << endl; cout << "\n********************************************\n" << endl; } // end of constructor //============start_of_create_contact_angle_elements===================== /// Create YoungLaplace contact angle elements on the b-th boundary of the /// bulk mesh and add them to the contact angle mesh //======================================================================= template<class ELEMENT> void RefineableYoungLaplaceProblem<ELEMENT>::create_contact_angle_elements( const unsigned &b) { // How many bulk elements are adjacent to boundary b? unsigned n_element = Bulk_mesh_pt->nboundary_element(b); // Loop over the bulk elements adjacent to boundary b? for(unsigned e=0;e<n_element;e++) { // Get pointer to the bulk element that is adjacent to boundary b ELEMENT* bulk_elem_pt = dynamic_cast<ELEMENT*>( Bulk_mesh_pt->boundary_element_pt(b,e)); // What is the index of the face of the bulk element at the boundary int face_index = Bulk_mesh_pt->face_index_at_boundary(b,e); // Build the corresponding contact angle element YoungLaplaceContactAngleElement<ELEMENT>* contact_angle_element_pt = new YoungLaplaceContactAngleElement<ELEMENT>(bulk_elem_pt,face_index); //Add the contact angle element to the contact angle mesh Contact_angle_mesh_pt->add_element_pt(contact_angle_element_pt); } //end of loop over bulk elements adjacent to boundary b } // end of create_contact_angle_elements //============start_of_delete_contact_angle_elements===================== /// Delete YoungLaplace contact angle elements //======================================================================= template<class ELEMENT> void RefineableYoungLaplaceProblem<ELEMENT>::delete_contact_angle_elements() { // How many contact angle elements are there? unsigned n_element = Contact_angle_mesh_pt->nelement(); // Loop over the surface elements for(unsigned e=0;e<n_element;e++) { // Kill surface element delete Contact_angle_mesh_pt->element_pt(e); } // Wipe the mesh Contact_angle_mesh_pt->flush_element_and_node_storage(); } // end of delete_contact_angle_elements //===============start_of_update_parameters============================ /// Update (increase/decrease) parameters //===================================================================== template<class ELEMENT> void RefineableYoungLaplaceProblem<ELEMENT>::increment_parameters() { // Increment kappa or height value if (!GlobalParameters::Use_height_control) { double kappa=GlobalParameters::Kappa_pt->value(0); kappa+=GlobalParameters::Kappa_increment; GlobalParameters::Kappa_pt->set_value(0,kappa); cout << "Solving for Prescribed KAPPA Value = " ; cout << GlobalParameters::Kappa_pt->value(0) << "\n" << endl; } else { GlobalParameters::Controlled_height+= GlobalParameters::Controlled_height_increment; cout << "Solving for Prescribed HEIGHT Value = " ; cout << GlobalParameters::Controlled_height << "\n" << endl; } } //===============start_of_doc============================================= /// Doc the solution: doc_info contains labels/output directory etc. //======================================================================== template<class ELEMENT> void RefineableYoungLaplaceProblem<ELEMENT>::doc_solution(DocInfo& doc_info, ofstream& trace_file) { // Output kappa vs height //----------------------- trace_file << -1.0*GlobalParameters::Kappa_pt->value(0) << " "; trace_file << GlobalParameters::get_exact_kappa() << " "; trace_file << Control_node_pt->value(0) ; trace_file << endl; // Number of plot points: npts x npts unsigned npts=5; // Output full solution //--------------------- ofstream some_file; char filename[100]; //YoungLaplaceEquations::Output_meniscus_and_spines=false; sprintf(filename,"%s/soln%i.dat",doc_info.directory().c_str(), doc_info.number()); some_file.open(filename); Bulk_mesh_pt->output(some_file,npts); some_file.close(); // Output contact angle //--------------------- //Doc contact angle stuff if (GlobalParameters::Case== GlobalParameters::T_junction_with_nonzero_contact_angle) { ofstream tangent_file; sprintf(filename,"%s/tangent_to_contact_line%i.dat", doc_info.directory().c_str(), doc_info.number()); tangent_file.open(filename); ofstream normal_file; sprintf(filename,"%s/normal_to_contact_line%i.dat", doc_info.directory().c_str(), doc_info.number()); normal_file.open(filename); ofstream contact_angle_file; sprintf(filename,"%s/contact_angle%i.dat", doc_info.directory().c_str(), doc_info.number()); contact_angle_file.open(filename); // Tangent and normal vectors to contact line Vector<double> tangent(3); Vector<double> normal(3); Vector<double> r_contact(3); // How many contact angle elements are there? unsigned n_element = Contact_angle_mesh_pt->nelement(); // Loop over the surface elements for(unsigned e=0;e<n_element;e++) { tangent_file << "ZONE" << std::endl; normal_file << "ZONE" << std::endl; contact_angle_file << "ZONE" << std::endl; // Upcast from GeneralisedElement to YoungLaplace contact angle element YoungLaplaceContactAngleElement<ELEMENT>* el_pt = dynamic_cast<YoungLaplaceContactAngleElement<ELEMENT>*>( Contact_angle_mesh_pt->element_pt(e)); // Loop over a few points in the contact angle element Vector<double> s(1); for (unsigned i=0;i<npts;i++) { s[0]=-1.0+2.0*double(i)/double(npts-1); dynamic_cast<ELEMENT*>(el_pt->bulk_element_pt())-> position(el_pt->local_coordinate_in_bulk(s),r_contact); el_pt->contact_line_vectors(s,tangent,normal); tangent_file << r_contact[0] << " " << r_contact[1] << " " << r_contact[2] << " " << tangent[0] << " " << tangent[1] << " " << tangent[2] << " " << std::endl; normal_file << r_contact[0] << " " << r_contact[1] << " " << r_contact[2] << " " << normal[0] << " " << normal[1] << " " << normal[2] << " " << std::endl; contact_angle_file << r_contact[1] << " " << el_pt->actual_cos_contact_angle(s) << std::endl; } } // end of loop over both boundaries tangent_file.close(); normal_file.close(); contact_angle_file.close(); } cout << "\n********************************************" << endl << endl; } // end of doc //======================================================================== /// Run code for current setting of parameter values -- specify name /// of output directory //======================================================================== void run_it(const string& output_directory) { // Create label for output //------------------------ DocInfo doc_info; // Set outputs //------------ // Trace file ofstream trace_file; // Set output directory doc_info.set_directory(output_directory); // Open a trace file char filename[100]; sprintf(filename,"%s/trace.dat",doc_info.directory().c_str()); trace_file.open(filename); // Write kappa, exact kappa if exists and height values trace_file << "VARIABLES=\"<GREEK>k</GREEK>\",\"<GREEK>k</GREEK>_{ex}\",\"h\"" << std::endl; trace_file << "ZONE" << std::endl; //Set up the problem //------------------ // Create the problem with 2D nine-node elements from the // RefineableQYoungLaplaceElement family. RefineableYoungLaplaceProblem<RefineableQYoungLaplaceElement<3> > problem; problem.refine_uniformly(); //Output the solution problem.doc_solution(doc_info,trace_file); //Increment counter for solutions doc_info.number()++; // Parameter incrementation //------------------------- // Loop over steps for (unsigned istep=0;istep<GlobalParameters::Nsteps;istep++) { // Bump up parameters problem.increment_parameters(); // Solve the problem unsigned max_adapt=1; problem.newton_solve(max_adapt); //Output the solution problem.doc_solution(doc_info,trace_file); //Increment counter for solutions doc_info.number()++; } // Close output file trace_file.close(); } //end of run_it() //===== start_of_main===================================================== /// Driver code for 2D RefineableYoungLaplace problem. Input arguments: none /// (for validation) or case (0,1,2,3 for all pinned, barrel with spines, /// barrel without spines, and T junction), and number of steps. //======================================================================== int main(int argc, char* argv[]) { // Store command line arguments CommandLineArgs::setup(argc,argv); // Cases to run (By default (validation) run all unsigned case_lo=0; unsigned case_hi=3; // No command line args: Running every case with // limited number of steps if (CommandLineArgs::Argc==1) { std::cout << "Running every case with limited number of steps for validation" << std::endl; // Number of steps GlobalParameters::Nsteps=2; } else { // Which case to run? case_lo=atoi(argv[1]); case_hi=atoi(argv[1]); // Number of steps GlobalParameters::Nsteps=atoi(argv[2]); } // Loop over chosen case(s) //------------------------- for (unsigned my_case=case_lo;my_case<=case_hi;my_case++) { // Choose switch (my_case) { case 0: cout << endl << endl << "//////////////////////////////////////////////////////////\n" << "All pinned solution \n" << "//////////////////////////////////////////////////////////\n\n"; GlobalParameters::Case=GlobalParameters::All_pinned; // Run with spines GlobalParameters::Use_spines=true; run_it("RESLT_adapt_all_pinned"); break; case 1: cout << endl << endl << "//////////////////////////////////////////////////////////\n" << "Barrel-shaped solution with spine \n" << "/////////////////////////////////////////////////////////\n\n"; GlobalParameters::Case= GlobalParameters::Barrel_shape; GlobalParameters::Controlled_height_increment=0.025; GlobalParameters::Use_spines=true; run_it("RESLT_adapt_barrel_shape"); break; case 2: cout << endl << endl << "//////////////////////////////////////////////////////////\n" << "Barrel-shaped solution without spines \n" << "/////////////////////////////////////////////////////////\n\n"; GlobalParameters::Case= GlobalParameters::Barrel_shape; GlobalParameters::Controlled_height_increment=0.025; GlobalParameters::Use_spines=false; run_it("RESLT_adapt_barrel_shape_without_spines"); break; case 3: cout << endl << endl << "//////////////////////////////////////////////////////////\n" << "T-junction solution \n" << "//////////////////////////////////////////////////////////\n\n"; GlobalParameters::Case= GlobalParameters::T_junction_with_nonzero_contact_angle; GlobalParameters::Gamma=MathematicalConstants::Pi/6.0; GlobalParameters::L_x=1.0; GlobalParameters::L_y=5.0; // Run with spines GlobalParameters::Use_spines=true; run_it("RESLT_adapt_T_junction"); break; default: std::cout << "Wrong case! Options are:\n" << "0: adaptive All pinned\n" << "1: adaptive Barrel with spines\n" << "2: adaptive Barrel without spines\n" << "3: adaptive T_junction\n" << std::endl; assert(false); } } } //end of main
30.61194
79
0.62218
[ "mesh", "object", "vector" ]
b1fad03a4e6ee314ec97967a953c80032ddc328f
6,588
cpp
C++
src/pyinterp/core/module/dateutils.cpp
CNES/pangeo-pyinterp
5f75f62a6c681db89c5aa8c74e43fc04a77418c3
[ "BSD-3-Clause" ]
67
2019-07-09T09:10:22.000Z
2022-03-01T09:46:35.000Z
src/pyinterp/core/module/dateutils.cpp
CNES/pangeo-pyinterp
5f75f62a6c681db89c5aa8c74e43fc04a77418c3
[ "BSD-3-Clause" ]
8
2019-07-15T13:54:31.000Z
2021-06-28T05:06:34.000Z
src/pyinterp/core/module/dateutils.cpp
CNES/pangeo-pyinterp
5f75f62a6c681db89c5aa8c74e43fc04a77418c3
[ "BSD-3-Clause" ]
7
2019-07-15T17:28:16.000Z
2022-01-19T19:43:47.000Z
// Copyright (c) 2021 CNES // // All rights reserved. Use of this source code is governed by a // BSD-style license that can be found in the LICENSE file. #include "pyinterp/dateutils.hpp" #include <datetime.h> #include <pybind11/numpy.h> #include <pybind11/pybind11.h> namespace py = pybind11; namespace dateutils = pyinterp::dateutils; namespace detail { static auto date(const py::array& array) -> py::array_t<dateutils::Date> { auto frac = dateutils::FractionalSeconds(array.dtype()); auto result = py::array_t<dateutils::Date>(py::array::ShapeContainer({array.size()})); auto _array = array.unchecked<int64_t, 1>(); auto _result = result.mutable_unchecked<1>(); { auto gil = py::gil_scoped_release(); for (auto ix = 0; ix < array.size(); ++ix) { _result[ix] = dateutils::year_month_day(frac.seconds(_array[ix])); } } return result; } static auto time(const py::array& array) -> py::array_t<dateutils::Time> { auto frac = dateutils::FractionalSeconds(array.dtype()); auto result = py::array_t<dateutils::Time>(py::array::ShapeContainer({array.size()})); auto _array = array.unchecked<int64_t, 1>(); auto _result = result.mutable_unchecked<1>(); { auto gil = py::gil_scoped_release(); for (auto ix = 0; ix < array.size(); ++ix) { _result[ix] = dateutils::hour_minute_second(frac.seconds(_array[ix])); } } return result; } static auto isocalendar(const py::array& array) -> py::array_t<dateutils::ISOCalendar> { auto frac = dateutils::FractionalSeconds(array.dtype()); auto result = py::array_t<dateutils::ISOCalendar>( py::array::ShapeContainer({array.size()})); auto _array = array.unchecked<int64_t, 1>(); auto _result = result.mutable_unchecked<1>(); { auto gil = py::gil_scoped_release(); for (auto ix = 0; ix < array.size(); ++ix) { _result[ix] = dateutils::isocalendar(frac.seconds(_array[ix])); } } return result; } static auto weekday(const py::array& array) -> py::array_t<unsigned> { auto frac = dateutils::FractionalSeconds(array.dtype()); auto result = py::array_t<unsigned>(py::array::ShapeContainer({array.size()})); auto _array = array.unchecked<int64_t, 1>(); auto _result = result.mutable_unchecked<1>(); { auto gil = py::gil_scoped_release(); for (auto ix = 0; ix < array.size(); ++ix) { _result[ix] = dateutils::weekday(frac.seconds(_array[ix])); } } return result; } static auto timedelta_since_january(const py::array& array) -> py::array { auto frac = dateutils::FractionalSeconds(array.dtype()); auto result = py::array(py::dtype("timedelta64[" + frac.units() + "]"), py::array::ShapeContainer({array.size()}), nullptr); auto _array = array.unchecked<int64_t, 1>(); auto _result = result.mutable_unchecked<int64_t, 1>(); { auto gil = py::gil_scoped_release(); for (auto ix = 0; ix < array.size(); ++ix) { auto epoch = frac.seconds(_array[ix]); auto days_since_january = dateutils::days_since_january(dateutils::year_month_day(epoch)); auto hms = dateutils::hour_minute_second(epoch); _result[ix] = (days_since_january * 86400LL + hms.hour * 3600LL + hms.minute * 60LL + hms.second) * frac.scale() + frac.fractional(_array[ix]); } } return result; } static auto datetime(const py::array& array) -> py::array { auto frac = dateutils::FractionalSeconds(array.dtype()); auto* buffer = new PyObject*[array.size()]; auto _array = array.unchecked<int64_t, 1>(); if (PyDateTimeAPI == nullptr) { PyDateTime_IMPORT; } for (auto ix = 0; ix < array.size(); ++ix) { auto epoch = frac.seconds(_array[ix]); auto date = dateutils::year_month_day(epoch); auto time = dateutils::hour_minute_second(epoch); auto msec = frac.microsecond(_array[ix]); buffer[ix] = PyDateTime_FromDateAndTime(date.year, date.month, date.day, time.hour, time.minute, time.second, static_cast<int>(msec)); } auto capsule = py::capsule( buffer, [](void* ptr) { delete[] static_cast<PyObject*>(ptr); }); return py::array(py::dtype("object"), pybind11::array::ShapeContainer({array.size()}), buffer, capsule); } } // namespace detail void init_dateutils(py::module& m) { PYBIND11_NUMPY_DTYPE(dateutils::Date, year, month, day); PYBIND11_NUMPY_DTYPE(dateutils::Time, hour, minute, second); PYBIND11_NUMPY_DTYPE(dateutils::ISOCalendar, year, week, weekday); m.def("date", &detail::date, py::arg("array"), R"__doc__( Return the date part of the dates. Args: array (numpy.ndarray): Numpy array of datetime64 to process. Returns: numpy.ndarray: A structured numpy array containing three fields: ``year``, ``month`` and ``day``. )__doc__") .def("datetime", &detail::datetime, py::arg("array"), R"__doc__( Return the data as an array of native Python datetime objects. Args: array (numpy.ndarray): Numpy array of datetime64 to process. Returns: numpy.ndarray: Object dtype array containing native Python datetime objects. )__doc__") .def("timedelta_since_january", &detail::timedelta_since_january, py::arg("array"), R"__doc__( Return the number the timedelta since the first January. Args: array (numpy.ndarray): Numpy array of datetime64 to process. Returns: numpy.ndarray: timedelta64 dtype array containing the time delta since the first January. )__doc__") .def("isocalendar", &detail::isocalendar, py::arg("array"), R"__doc__( Return the ISO calendar of dates. Args: array (numpy.ndarray): Numpy array of datetime64 to process. Returns: numpy.ndarray: A structured numpy array containing three fields: ``year``, ``week`` and ``weekday``. .. seealso:: datetime.date.isocalendar. )__doc__") .def("time", &detail::time, py::arg("array"), R"__doc__( Return the time part of the dates. Args: array (numpy.ndarray): Numpy array of datetime64 to process. Returns: numpy.ndarray: A structured numpy array containing three fields: ``hour``, ``minute`` and ``second``. )__doc__") .def("weekday", &detail::weekday, py::arg("array"), R"__doc__( Return the weekday of the dates; Sunday is 0 ... Saturday is 6. Args: array (numpy.ndarray): Numpy array of datetime64 to process. Returns: numpy.ndarray: int dtype array containing weekday of the dates. )__doc__"); }
31.826087
80
0.653461
[ "object" ]
b1fbb6d106b4db79d012e8718df1d9753b6c574b
19,015
cpp
C++
src/IO/H5/VolumeData.cpp
Ambrou/spectre
a819ebbcca607d8af9683db3683bea14bf4ac23c
[ "MIT" ]
null
null
null
src/IO/H5/VolumeData.cpp
Ambrou/spectre
a819ebbcca607d8af9683db3683bea14bf4ac23c
[ "MIT" ]
1
2022-03-25T18:26:16.000Z
2022-03-25T19:30:39.000Z
src/IO/H5/VolumeData.cpp
isaaclegred/spectre
5765da85dad680cad992daccd479376c67458a8c
[ "MIT" ]
1
2019-01-03T21:47:04.000Z
2019-01-03T21:47:04.000Z
// Distributed under the MIT License. // See LICENSE.txt for details. #include "IO/H5/VolumeData.hpp" #include <algorithm> #include <boost/algorithm/string.hpp> #include <boost/iterator/transform_iterator.hpp> #include <hdf5.h> #include <memory> #include <ostream> #include <string> #include <vector> #include "DataStructures/DataVector.hpp" #include "DataStructures/Tensor/TensorData.hpp" #include "IO/Connectivity.hpp" #include "IO/H5/AccessType.hpp" #include "IO/H5/Header.hpp" #include "IO/H5/Helpers.hpp" #include "IO/H5/SpectralIo.hpp" #include "IO/H5/Version.hpp" #include "Utilities/Algorithm.hpp" #include "Utilities/ErrorHandling/Assert.hpp" #include "Utilities/ErrorHandling/Error.hpp" #include "Utilities/ErrorHandling/ExpectsAndEnsures.hpp" #include "Utilities/GetOutput.hpp" #include "Utilities/Gsl.hpp" #include "Utilities/Literals.hpp" #include "Utilities/MakeString.hpp" #include "Utilities/Numeric.hpp" /// \cond HIDDEN_SYMBOLS namespace h5 { namespace { // Append the element extents and connectevity to the total extents and // connectivity void append_element_extents_and_connectivity( const gsl::not_null<std::vector<size_t>*> total_extents, const gsl::not_null<std::vector<int>*> total_connectivity, const gsl::not_null<int*> total_points_so_far, const size_t dim, const ExtentsAndTensorVolumeData& element) noexcept { // Process the element extents const auto& extents = element.extents; if (extents.size() != dim) { ERROR("Trying to write data of dimensionality" << extents.size() << "but the VolumeData file has dimensionality" << dim << "."); } total_extents->insert(total_extents->end(), extents.begin(), extents.end()); // Find the number of points in the local connectivity const int element_num_points = alg::accumulate(extents, 1, std::multiplies<>{}); // Generate the connectivity data for the element // Possible optimization: local_connectivity.reserve(BLAH) if we can figure // out size without computing all the connectivities. const std::vector<int> connectivity = [&extents, &total_points_so_far]() noexcept { std::vector<int> local_connectivity; for (const auto& cell : vis::detail::compute_cells(extents)) { for (const auto& bounding_indices : cell.bounding_indices) { local_connectivity.emplace_back(*total_points_so_far + static_cast<int>(bounding_indices)); } } return local_connectivity; }(); *total_points_so_far += element_num_points; total_connectivity->insert(total_connectivity->end(), connectivity.begin(), connectivity.end()); } // Append the name of an element to the string of grid names void append_element_name(const gsl::not_null<std::string*> grid_names, const ExtentsAndTensorVolumeData& element) noexcept { // Get the name of the element const auto& first_tensor_name = element.tensor_components.front().name; ASSERT(first_tensor_name.find_last_of('/') != std::string::npos, "The expected format of the tensor component names is " "'GROUP_NAME/COMPONENT_NAME' but could not find a '/' in '" << first_tensor_name << "'."); const auto spatial_name = first_tensor_name.substr(0, first_tensor_name.find_last_of('/')); *grid_names += spatial_name + VolumeData::separator(); } } // namespace VolumeData::VolumeData(const bool subfile_exists, detail::OpenGroup&& group, const hid_t /*location*/, const std::string& name, const uint32_t version) noexcept : group_(std::move(group)), name_(name.size() > extension().size() ? (extension() == name.substr(name.size() - extension().size()) ? name : name + extension()) : name + extension()), version_(version), volume_data_group_(group_.id(), name_, h5::AccessType::ReadWrite) { if (subfile_exists) { // We treat this as an internal version for now. We'll need to deal with // proper versioning later. const Version open_version(true, detail::OpenGroup{}, volume_data_group_.id(), "version"); version_ = open_version.get_version(); const Header header(true, detail::OpenGroup{}, volume_data_group_.id(), "header"); header_ = header.get_header(); } else { // file does not exist // Subfiles are closed as they go out of scope, so we have the extra // braces here to add the necessary scope { Version open_version(false, detail::OpenGroup{}, volume_data_group_.id(), "version", version_); } { Header header(false, detail::OpenGroup{}, volume_data_group_.id(), "header"); header_ = header.get_header(); } } } // Write Volume Data stored in a vector of `ExtentsAndTensorVolumeData` to // an `observation_group` in a `VolumeData` file. void VolumeData::write_volume_data( const size_t observation_id, const double observation_value, const std::vector<ElementVolumeData>& elements) noexcept { const std::string path = "ObservationId" + std::to_string(observation_id); detail::OpenGroup observation_group(volume_data_group_.id(), path, AccessType::ReadWrite); if (contains_attribute(observation_group.id(), "", "observation_value")) { ERROR("Trying to write ObservationId " << std::to_string(observation_id) << " with observation_value " << observation_group.id() << " which already exists in file at " << path << "."); } h5::write_to_attribute(observation_group.id(), "observation_value", observation_value); // Get first element to extract the component names and dimension const auto get_component_name = [](const auto& component) noexcept { ASSERT(component.name.find_last_of('/') != std::string::npos, "The expected format of the tensor component names is " "'GROUP_NAME/COMPONENT_NAME' but could not find a '/' in '" << component.name << "'."); return component.name.substr(component.name.find_last_of('/') + 1); }; const std::vector<std::string> component_names( boost::make_transform_iterator(elements.front().tensor_components.begin(), get_component_name), boost::make_transform_iterator(elements.front().tensor_components.end(), get_component_name)); // The dimension of the grid is the number of extents per element. I.e., if // the extents are [8,5,7] for any element, the dimension of the grid is 3. // Only written once per VolumeData file (All volume data in a single file // should have the same dimensionality) if (not contains_attribute(volume_data_group_.id(), "", "dimension")) { h5::write_to_attribute(volume_data_group_.id(), "dimension", elements.front().extents.size()); } const auto dim = h5::read_value_attribute<size_t>(volume_data_group_.id(), "dimension"); // Extract Tensor Data one component at a time std::vector<size_t> total_extents; std::string grid_names; std::vector<int> total_connectivity; std::vector<int> quadratures; std::vector<int> bases; // Keep a running count of the number of points so far to use as a global // index for the connectivity int total_points_so_far = 0; // Loop over tensor componenents for (size_t i = 0; i < component_names.size(); i++) { std::string component_name = component_names[i]; // Write the data for the tensor component if (h5::contains_dataset_or_group(observation_group.id(), "", component_name)) { ERROR("Trying to write tensor component '" << component_name << "' which already exists in HDF5 file in group '" << name_ << '/' << "ObservationId" << std::to_string(observation_id) << "'"); } std::vector<double> contiguous_tensor_data{}; for (const auto& element : elements) { if (UNLIKELY(i == 0)) { // True if first tensor component being accessed append_element_name(&grid_names, element); // append element basis alg::transform(element.basis, std::back_inserter(bases), [](const Spectral::Basis t) noexcept { return static_cast<int>(t); }); // append element quadraature alg::transform(element.quadrature, std::back_inserter(quadratures), [](const Spectral::Quadrature t) noexcept { return static_cast<int>(t); }); append_element_extents_and_connectivity( &total_extents, &total_connectivity, &total_points_so_far, dim, element); } const DataVector& tensor_data_on_grid = element.tensor_components[i].data; contiguous_tensor_data.insert(contiguous_tensor_data.end(), tensor_data_on_grid.begin(), tensor_data_on_grid.end()); } // for each element h5::write_data(observation_group.id(), contiguous_tensor_data, {contiguous_tensor_data.size()}, component_name); } // for each component // Write the grid extents contiguously, the first `dim` belong to the // First grid, the second `dim` belong to the second grid, and so on, // Ordering is `x, y, z, ... ` h5::write_data(observation_group.id(), total_extents, {total_extents.size()}, "total_extents"); // Write the names of the grids as vector of chars with individual names // separated by `separator()` std::vector<char> grid_names_as_chars(grid_names.begin(), grid_names.end()); h5::write_data(observation_group.id(), grid_names_as_chars, {grid_names_as_chars.size()}, "grid_names"); // Write the coded quadrature, along with the dictionary const auto io_quadratures = h5_detail::allowed_quadratures(); std::vector<std::string> quadrature_dict(io_quadratures.size()); alg::transform(io_quadratures, quadrature_dict.begin(), get_output<Spectral::Quadrature>); h5_detail::write_dictionary("Quadrature dictionary", quadrature_dict, observation_group); h5::write_data(observation_group.id(), quadratures, {quadratures.size()}, "quadratures"); // Write the coded basis, along with the dictionary const auto io_bases = h5_detail::allowed_bases(); std::vector<std::string> basis_dict(io_bases.size()); alg::transform(io_bases, basis_dict.begin(), get_output<Spectral::Basis>); h5_detail::write_dictionary("Basis dictionary", basis_dict, observation_group); h5::write_data(observation_group.id(), bases, {bases.size()}, "bases"); // Write the Connectivity h5::write_data(observation_group.id(), total_connectivity, {total_connectivity.size()}, "connectivity"); } std::vector<size_t> VolumeData::list_observation_ids() const noexcept { const auto names = get_group_names(volume_data_group_.id(), ""); const auto helper = [](const std::string& s) noexcept { return std::stoul(s.substr(std::string("ObservationId").size())); }; return {boost::make_transform_iterator(names.begin(), helper), boost::make_transform_iterator(names.end(), helper)}; } double VolumeData::get_observation_value( const size_t observation_id) const noexcept { const std::string path = "ObservationId" + std::to_string(observation_id); detail::OpenGroup observation_group(volume_data_group_.id(), path, AccessType::ReadOnly); return h5::read_value_attribute<double>(observation_group.id(), "observation_value"); } std::vector<std::string> VolumeData::list_tensor_components( const size_t observation_id) const noexcept { auto tensor_components = get_group_names(volume_data_group_.id(), "ObservationId" + std::to_string(observation_id)); auto remove_data_name = [&tensor_components](const std::string& data_name) { // NOLINTNEXTLINE(bugprone-unused-return-value) alg::remove(tensor_components, data_name); }; remove_data_name("connectivity"); remove_data_name("total_extents"); remove_data_name("grid_names"); remove_data_name("quadratures"); remove_data_name("bases"); // std::remove moves the element to the end of the vector, so we still need to // actually erase it from the vector tensor_components.erase(tensor_components.end() - 5, tensor_components.end()); return tensor_components; } std::vector<std::string> VolumeData::get_grid_names( const size_t observation_id) const noexcept { const std::string path = "ObservationId" + std::to_string(observation_id); detail::OpenGroup observation_group(volume_data_group_.id(), path, AccessType::ReadOnly); const std::vector<char> names = h5::read_data<1, std::vector<char>>(observation_group.id(), "grid_names"); const std::string all_names(names.begin(), names.end()); std::vector<std::string> grid_names{}; boost::split(grid_names, all_names, [](const char c) noexcept { return c == h5::VolumeData::separator(); }); // boost::split counts the last separator as a split even though there are no // characters after it, so the last entry of the vector is empty grid_names.pop_back(); return grid_names; } DataVector VolumeData::get_tensor_component( const size_t observation_id, const std::string& tensor_component) const noexcept { const std::string path = "ObservationId" + std::to_string(observation_id); detail::OpenGroup observation_group(volume_data_group_.id(), path, AccessType::ReadOnly); const hid_t dataset_id = h5::open_dataset(observation_group.id(), tensor_component); const hid_t dataspace_id = h5::open_dataspace(dataset_id); const auto rank = static_cast<size_t>(H5Sget_simple_extent_ndims(dataspace_id)); h5::close_dataspace(dataspace_id); h5::close_dataset(dataset_id); switch (rank) { case 1: return h5::read_data<1, DataVector>(observation_group.id(), tensor_component); case 2: return h5::read_data<2, DataVector>(observation_group.id(), tensor_component); case 3: return h5::read_data<3, DataVector>(observation_group.id(), tensor_component); default: ERROR("Rank must be 1, 2, or 3. Received data with Rank = " << rank); } } std::vector<std::vector<size_t>> VolumeData::get_extents( const size_t observation_id) const noexcept { const std::string path = "ObservationId" + std::to_string(observation_id); detail::OpenGroup observation_group(volume_data_group_.id(), path, AccessType::ReadOnly); const auto dim = h5::read_value_attribute<size_t>(volume_data_group_.id(), "dimension"); const auto extents_per_element = static_cast<long>(dim); const auto total_extents = h5::read_data<1, std::vector<size_t>>( observation_group.id(), "total_extents"); std::vector<std::vector<size_t>> individual_extents; individual_extents.reserve(total_extents.size() / dim); for (auto iter = total_extents.begin(); iter != total_extents.end(); iter += extents_per_element) { individual_extents.emplace_back(iter, iter + extents_per_element); } return individual_extents; } std::pair<size_t, size_t> offset_and_length_for_grid( const std::string& grid_name, const std::vector<std::string>& all_grid_names, const std::vector<std::vector<size_t>>& all_extents) noexcept { auto found_grid_name = alg::find(all_grid_names, grid_name); if (found_grid_name == all_grid_names.end()) { ERROR("Found no grid named '" + grid_name + "'."); } else { const auto element_index = std::distance(all_grid_names.begin(), found_grid_name); const size_t element_data_offset = std::accumulate( all_extents.begin(), all_extents.begin() + element_index, 0_st, [](const size_t offset, const std::vector<size_t>& extents) noexcept { return offset + alg::accumulate(extents, 1_st, std::multiplies<>{}); }); const size_t element_data_length = alg::accumulate( gsl::at(all_extents, element_index), 1_st, std::multiplies<>{}); return {element_data_offset, element_data_length}; } } size_t VolumeData::get_dimension() const noexcept { return h5::read_value_attribute<double>(volume_data_group_.id(), "dimension"); } std::vector<std::vector<std::string>> VolumeData::get_bases( const size_t observation_id) const noexcept { const std::string path = "ObservationId" + std::to_string(observation_id); detail::OpenGroup observation_group(volume_data_group_.id(), path, AccessType::ReadOnly); const auto dim = h5::read_value_attribute<size_t>(volume_data_group_.id(), "dimension"); const auto bases_per_element = static_cast<long>(dim); const std::vector<int> bases_coded = h5::read_data<1, std::vector<int>>(observation_group.id(), "bases"); auto all_bases = h5_detail::decode_with_dictionary_name( "Basis dictionary", bases_coded, observation_group); std::vector<std::vector<std::string>> element_bases; for (auto iter = all_bases.begin(); iter != all_bases.end(); std::advance(iter, bases_per_element)) { element_bases.emplace_back(iter, std::next(iter, bases_per_element)); } return element_bases; } std::vector<std::vector<std::string>> VolumeData::get_quadratures( const size_t observation_id) const noexcept { const std::string path = "ObservationId" + std::to_string(observation_id); detail::OpenGroup observation_group(volume_data_group_.id(), path, AccessType::ReadOnly); const auto dim = h5::read_value_attribute<size_t>(volume_data_group_.id(), "dimension"); const auto quadratures_per_element = static_cast<long>(dim); const std::vector<int> quadratures_coded = h5::read_data<1, std::vector<int>>(observation_group.id(), "quadratures"); auto all_quadratures = h5_detail::decode_with_dictionary_name( "Quadrature dictionary", quadratures_coded, observation_group); std::vector<std::vector<std::string>> element_quadratures; for (auto iter = all_quadratures.begin(); iter != all_quadratures.end(); std::advance(iter, quadratures_per_element)) { element_quadratures.emplace_back(iter, std::next(iter, quadratures_per_element)); } return element_quadratures; } } // namespace h5 /// \endcond HIDDEN_SYMBOLS
45.381862
80
0.667946
[ "vector", "transform" ]
b1fbcc00c2d97da248592bf1d2533efbd1680db7
21,008
cc
C++
device/fido/pin.cc
Ron423c/chromium
2edf7b980065b648f8b2a6e52193d83832fe36b7
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
device/fido/pin.cc
Ron423c/chromium
2edf7b980065b648f8b2a6e52193d83832fe36b7
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
device/fido/pin.cc
Ron423c/chromium
2edf7b980065b648f8b2a6e52193d83832fe36b7
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2021-03-07T14:20:02.000Z
2021-03-07T14:20:02.000Z
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "device/fido/pin.h" #include <numeric> #include <string> #include <utility> #include "base/i18n/char_iterator.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" #include "components/cbor/reader.h" #include "components/cbor/values.h" #include "components/cbor/writer.h" #include "device/fido/fido_constants.h" #include "device/fido/pin_internal.h" #include "third_party/boringssl/src/include/openssl/aes.h" #include "third_party/boringssl/src/include/openssl/ec.h" #include "third_party/boringssl/src/include/openssl/nid.h" #include "third_party/boringssl/src/include/openssl/sha.h" namespace device { namespace pin { namespace { uint8_t PermissionsToByte(base::span<const pin::Permissions> permissions) { return std::accumulate(permissions.begin(), permissions.end(), 0, [](uint8_t byte, pin::Permissions flag) { return byte |= static_cast<uint8_t>(flag); }); } } // namespace // HasAtLeastFourCodepoints returns true if |pin| is UTF-8 encoded and contains // four or more code points. This reflects the "4 Unicode characters" // requirement in CTAP2. static bool HasAtLeastFourCodepoints(const std::string& pin) { base::i18n::UTF8CharIterator it(pin); return it.Advance() && it.Advance() && it.Advance() && it.Advance(); } PINEntryError ValidatePIN(const std::string& pin, uint32_t min_pin_length, base::Optional<std::string> current_pin) { if (pin.size() < min_pin_length) { return PINEntryError::kTooShort; } if (pin.size() > kMaxBytes || pin.back() == 0 || !base::IsStringUTF8(pin)) { return PINEntryError::kInvalidCharacters; } if (!HasAtLeastFourCodepoints(pin)) { return PINEntryError::kTooShort; } if (pin == current_pin) { return pin::PINEntryError::kSameAsCurrentPIN; } return PINEntryError::kNoError; } PINEntryError ValidatePIN(const base::string16& pin16, uint32_t min_pin_length, base::Optional<std::string> current_pin) { std::string pin; if (!base::UTF16ToUTF8(pin16.c_str(), pin16.size(), &pin)) { return pin::PINEntryError::kInvalidCharacters; } return ValidatePIN(std::move(pin), min_pin_length, std::move(current_pin)); } // EncodePINCommand returns a CTAP2 PIN command for the operation |subcommand|. // Additional elements of the top-level CBOR map can be added with the optional // |add_additional| callback. static std::pair<CtapRequestCommand, base::Optional<cbor::Value>> EncodePINCommand( PINUVAuthProtocol protocol_version, Subcommand subcommand, std::function<void(cbor::Value::MapValue*)> add_additional = nullptr) { cbor::Value::MapValue map; map.emplace(static_cast<int>(RequestKey::kProtocol), static_cast<uint8_t>(protocol_version)); map.emplace(static_cast<int>(RequestKey::kSubcommand), static_cast<int>(subcommand)); if (add_additional) { add_additional(&map); } return std::make_pair(CtapRequestCommand::kAuthenticatorClientPin, cbor::Value(std::move(map))); } RetriesResponse::RetriesResponse() = default; // static base::Optional<RetriesResponse> RetriesResponse::ParsePinRetries( const base::Optional<cbor::Value>& cbor) { return RetriesResponse::Parse(std::move(cbor), static_cast<int>(ResponseKey::kRetries)); } // static base::Optional<RetriesResponse> RetriesResponse::ParseUvRetries( const base::Optional<cbor::Value>& cbor) { return RetriesResponse::Parse(std::move(cbor), static_cast<int>(ResponseKey::kUvRetries)); } // static base::Optional<RetriesResponse> RetriesResponse::Parse( const base::Optional<cbor::Value>& cbor, const int retries_key) { if (!cbor || !cbor->is_map()) { return base::nullopt; } const auto& response_map = cbor->GetMap(); auto it = response_map.find(cbor::Value(retries_key)); if (it == response_map.end() || !it->second.is_unsigned()) { return base::nullopt; } const int64_t retries = it->second.GetUnsigned(); if (retries > INT_MAX) { return base::nullopt; } RetriesResponse ret; ret.retries = static_cast<int>(retries); return ret; } KeyAgreementResponse::KeyAgreementResponse() = default; // static base::Optional<KeyAgreementResponse> KeyAgreementResponse::Parse( const base::Optional<cbor::Value>& cbor) { if (!cbor || !cbor->is_map()) { return base::nullopt; } const auto& response_map = cbor->GetMap(); // The ephemeral key is encoded as a COSE structure. auto it = response_map.find( cbor::Value(static_cast<int>(ResponseKey::kKeyAgreement))); if (it == response_map.end() || !it->second.is_map()) { return base::nullopt; } const auto& cose_key = it->second.GetMap(); return ParseFromCOSE(cose_key); } // static base::Optional<KeyAgreementResponse> KeyAgreementResponse::ParseFromCOSE( const cbor::Value::MapValue& cose_key) { // The COSE key must be a P-256 point. See // https://tools.ietf.org/html/rfc8152#section-7.1 for (const auto& pair : std::vector<std::pair<int, int>>({ {1 /* key type */, 2 /* elliptic curve, uncompressed */}, {3 /* algorithm */, -25 /* ECDH, ephemeral–static, HKDF-SHA-256 */}, {-1 /* curve */, 1 /* P-256 */}, })) { auto it = cose_key.find(cbor::Value(pair.first)); if (it == cose_key.end() || !it->second.is_integer() || it->second.GetInteger() != pair.second) { return base::nullopt; } } // See https://tools.ietf.org/html/rfc8152#section-13.1.1 const auto& x_it = cose_key.find(cbor::Value(-2)); const auto& y_it = cose_key.find(cbor::Value(-3)); if (x_it == cose_key.end() || y_it == cose_key.end() || !x_it->second.is_bytestring() || !y_it->second.is_bytestring()) { return base::nullopt; } const auto& x = x_it->second.GetBytestring(); const auto& y = y_it->second.GetBytestring(); KeyAgreementResponse ret; if (x.size() != sizeof(ret.x) || y.size() != sizeof(ret.y)) { return base::nullopt; } memcpy(ret.x, x.data(), sizeof(ret.x)); memcpy(ret.y, y.data(), sizeof(ret.y)); bssl::UniquePtr<EC_GROUP> group( EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1)); // Check that the point is on the curve. auto point = PointFromKeyAgreementResponse(group.get(), ret); if (!point) { return base::nullopt; } return ret; } std::array<uint8_t, kP256X962Length> KeyAgreementResponse::X962() const { std::array<uint8_t, kP256X962Length> ret; static_assert(ret.size() == 1 + sizeof(this->x) + sizeof(this->y), "Bad length for return type"); ret[0] = POINT_CONVERSION_UNCOMPRESSED; memcpy(&ret[1], this->x, sizeof(this->x)); memcpy(&ret[1 + sizeof(this->x)], this->y, sizeof(this->y)); return ret; } SetRequest::SetRequest(PINUVAuthProtocol protocol, const std::string& pin, const KeyAgreementResponse& peer_key) : protocol_(protocol), peer_key_(peer_key) { DCHECK_EQ(ValidatePIN(pin), PINEntryError::kNoError); memset(pin_, 0, sizeof(pin_)); memcpy(pin_, pin.data(), pin.size()); } cbor::Value::MapValue EncodeCOSEPublicKey( base::span<const uint8_t, kP256X962Length> x962) { cbor::Value::MapValue cose_key; cose_key.emplace(1 /* key type */, 2 /* uncompressed elliptic curve */); cose_key.emplace(3 /* algorithm */, -25 /* ECDH, ephemeral–static, HKDF-SHA-256 */); cose_key.emplace(-1 /* curve */, 1 /* P-256 */); cose_key.emplace(-2 /* x */, x962.subspan(1, 32)); cose_key.emplace(-3 /* y */, x962.subspan(33, 32)); return cose_key; } ChangeRequest::ChangeRequest(PINUVAuthProtocol protocol, const std::string& old_pin, const std::string& new_pin, const KeyAgreementResponse& peer_key) : protocol_(protocol), peer_key_(peer_key) { uint8_t digest[SHA256_DIGEST_LENGTH]; SHA256(reinterpret_cast<const uint8_t*>(old_pin.data()), old_pin.size(), digest); memcpy(old_pin_hash_, digest, sizeof(old_pin_hash_)); DCHECK_EQ(ValidatePIN(new_pin), PINEntryError::kNoError); memset(new_pin_, 0, sizeof(new_pin_)); memcpy(new_pin_, new_pin.data(), new_pin.size()); } // static base::Optional<EmptyResponse> EmptyResponse::Parse( const base::Optional<cbor::Value>& cbor) { // Yubikeys can return just the status byte, and no CBOR bytes, for the empty // response, which will end up here with |cbor| being |nullopt|. This seems // wrong, but is handled. (The response should, instead, encode an empty CBOR // map.) if (cbor && (!cbor->is_map() || !cbor->GetMap().empty())) { return base::nullopt; } EmptyResponse ret; return ret; } TokenResponse::TokenResponse(PINUVAuthProtocol protocol) : protocol_(protocol) {} TokenResponse::~TokenResponse() = default; TokenResponse::TokenResponse(const TokenResponse&) = default; TokenResponse& TokenResponse::operator=(const TokenResponse&) = default; base::Optional<TokenResponse> TokenResponse::Parse( PINUVAuthProtocol protocol, base::span<const uint8_t> shared_key, const base::Optional<cbor::Value>& cbor) { if (!cbor || !cbor->is_map()) { return base::nullopt; } const auto& response_map = cbor->GetMap(); auto it = response_map.find(cbor::Value(static_cast<int>(ResponseKey::kPINToken))); if (it == response_map.end() || !it->second.is_bytestring()) { return base::nullopt; } const auto& encrypted_token = it->second.GetBytestring(); if (encrypted_token.size() % AES_BLOCK_SIZE != 0) { return base::nullopt; } std::vector<uint8_t> token = ProtocolVersion(protocol).Decrypt(shared_key, encrypted_token); // The token must have the correct size for the given protocol. switch (protocol) { case PINUVAuthProtocol::kV1: // In CTAP2.1, V1 tokens are fixed at 16 or 32 bytes. But in CTAP2.0 they // may be any multiple of 16 bytes. We don't know the CTAP version, so // only enforce the latter. if (token.empty() || token.size() % AES_BLOCK_SIZE != 0) { return base::nullopt; } break; case PINUVAuthProtocol::kV2: if (token.size() != 32u) { return base::nullopt; } break; } TokenResponse ret(protocol); ret.token_ = std::move(token); return ret; } std::pair<PINUVAuthProtocol, std::vector<uint8_t>> TokenResponse::PinAuth( base::span<const uint8_t> client_data_hash) const { return {protocol_, ProtocolVersion(protocol_).Authenticate(token_, client_data_hash)}; } // static std::pair<CtapRequestCommand, base::Optional<cbor::Value>> AsCTAPRequestValuePair(const PinRetriesRequest& request) { return EncodePINCommand(request.protocol, Subcommand::kGetRetries); } // static std::pair<CtapRequestCommand, base::Optional<cbor::Value>> AsCTAPRequestValuePair(const UvRetriesRequest& request) { return EncodePINCommand(request.protocol, Subcommand::kGetUvRetries); } // static std::pair<CtapRequestCommand, base::Optional<cbor::Value>> AsCTAPRequestValuePair(const KeyAgreementRequest& request) { return EncodePINCommand(request.protocol, Subcommand::kGetKeyAgreement); } // static std::pair<CtapRequestCommand, base::Optional<cbor::Value>> AsCTAPRequestValuePair(const SetRequest& request) { // See // https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-client-to-authenticator-protocol-v2.0-rd-20180702.html#settingNewPin std::vector<uint8_t> shared_key; const Protocol& pin_protocol = ProtocolVersion(request.protocol_); auto cose_key = EncodeCOSEPublicKey( pin_protocol.Encapsulate(request.peer_key_, &shared_key)); static_assert((sizeof(request.pin_) % AES_BLOCK_SIZE) == 0, "pin_ is not a multiple of the AES block size"); std::vector<uint8_t> encrypted_pin = pin_protocol.Encrypt(shared_key, request.pin_); std::vector<uint8_t> pin_auth = pin_protocol.Authenticate(shared_key, encrypted_pin); return EncodePINCommand( request.protocol_, Subcommand::kSetPIN, [&cose_key, &encrypted_pin, &pin_auth](cbor::Value::MapValue* map) { map->emplace(static_cast<int>(RequestKey::kKeyAgreement), std::move(cose_key)); map->emplace(static_cast<int>(RequestKey::kNewPINEnc), std::move(encrypted_pin)); map->emplace(static_cast<int>(RequestKey::kPINAuth), std::move(pin_auth)); }); } // static std::pair<CtapRequestCommand, base::Optional<cbor::Value>> AsCTAPRequestValuePair(const ChangeRequest& request) { // See // https://fidoalliance.org/specs/fido-v2.0-rd-20180702/fido-client-to-authenticator-protocol-v2.0-rd-20180702.html#changingExistingPin std::vector<uint8_t> shared_key; const Protocol& pin_protocol = ProtocolVersion(request.protocol_); auto cose_key = EncodeCOSEPublicKey( pin_protocol.Encapsulate(request.peer_key_, &shared_key)); static_assert((sizeof(request.new_pin_) % AES_BLOCK_SIZE) == 0, "new_pin_ is not a multiple of the AES block size"); std::vector<uint8_t> encrypted_pin = pin_protocol.Encrypt(shared_key, request.new_pin_); static_assert((sizeof(request.old_pin_hash_) % AES_BLOCK_SIZE) == 0, "old_pin_hash_ is not a multiple of the AES block size"); std::vector<uint8_t> old_pin_hash_enc = pin_protocol.Encrypt(shared_key, request.old_pin_hash_); std::vector<uint8_t> ciphertexts_concat(encrypted_pin.size() + old_pin_hash_enc.size()); memcpy(ciphertexts_concat.data(), encrypted_pin.data(), encrypted_pin.size()); memcpy(ciphertexts_concat.data() + encrypted_pin.size(), old_pin_hash_enc.data(), old_pin_hash_enc.size()); std::vector<uint8_t> pin_auth = pin_protocol.Authenticate(shared_key, ciphertexts_concat); return EncodePINCommand( request.protocol_, Subcommand::kChangePIN, [&cose_key, &encrypted_pin, &old_pin_hash_enc, &pin_auth](cbor::Value::MapValue* map) { map->emplace(static_cast<int>(RequestKey::kKeyAgreement), std::move(cose_key)); map->emplace(static_cast<int>(RequestKey::kPINHashEnc), std::move(old_pin_hash_enc)); map->emplace(static_cast<int>(RequestKey::kNewPINEnc), std::move(encrypted_pin)); map->emplace(static_cast<int>(RequestKey::kPINAuth), std::move(pin_auth)); }); } // static std::pair<CtapRequestCommand, base::Optional<cbor::Value>> AsCTAPRequestValuePair(const ResetRequest&) { return std::make_pair(CtapRequestCommand::kAuthenticatorReset, base::nullopt); } TokenRequest::TokenRequest(PINUVAuthProtocol protocol, const KeyAgreementResponse& peer_key) : protocol_(protocol), public_key_( ProtocolVersion(protocol_).Encapsulate(peer_key, &shared_key_)) {} TokenRequest::~TokenRequest() = default; TokenRequest::TokenRequest(TokenRequest&& other) = default; const std::vector<uint8_t>& TokenRequest::shared_key() const { return shared_key_; } PinTokenRequest::PinTokenRequest(PINUVAuthProtocol protocol, const std::string& pin, const KeyAgreementResponse& peer_key) : TokenRequest(protocol, peer_key) { uint8_t digest[SHA256_DIGEST_LENGTH]; SHA256(reinterpret_cast<const uint8_t*>(pin.data()), pin.size(), digest); memcpy(pin_hash_, digest, sizeof(pin_hash_)); } PinTokenRequest::~PinTokenRequest() = default; PinTokenRequest::PinTokenRequest(PinTokenRequest&& other) = default; // static std::pair<CtapRequestCommand, base::Optional<cbor::Value>> AsCTAPRequestValuePair(const PinTokenRequest& request) { static_assert((sizeof(request.pin_hash_) % AES_BLOCK_SIZE) == 0, "pin_hash_ is not a multiple of the AES block size"); std::vector<uint8_t> encrypted_pin = ProtocolVersion(request.protocol_) .Encrypt(request.shared_key_, request.pin_hash_); return EncodePINCommand( request.protocol_, Subcommand::kGetPINToken, [&request, &encrypted_pin](cbor::Value::MapValue* map) { map->emplace(static_cast<int>(RequestKey::kKeyAgreement), EncodeCOSEPublicKey(request.public_key_)); map->emplace(static_cast<int>(RequestKey::kPINHashEnc), std::move(encrypted_pin)); }); } PinTokenWithPermissionsRequest::PinTokenWithPermissionsRequest( PINUVAuthProtocol protocol, const std::string& pin, const KeyAgreementResponse& peer_key, base::span<const pin::Permissions> permissions, const base::Optional<std::string> rp_id) : PinTokenRequest(protocol, pin, peer_key), permissions_(PermissionsToByte(permissions)), rp_id_(rp_id) {} // static std::pair<CtapRequestCommand, base::Optional<cbor::Value>> AsCTAPRequestValuePair(const PinTokenWithPermissionsRequest& request) { std::vector<uint8_t> encrypted_pin = ProtocolVersion(request.protocol_) .Encrypt(request.shared_key_, request.pin_hash_); return EncodePINCommand( request.protocol_, Subcommand::kGetPinUvAuthTokenUsingPinWithPermissions, [&request, &encrypted_pin](cbor::Value::MapValue* map) { map->emplace(static_cast<int>(RequestKey::kKeyAgreement), EncodeCOSEPublicKey(request.public_key_)); map->emplace(static_cast<int>(RequestKey::kPINHashEnc), std::move(encrypted_pin)); map->emplace(static_cast<int>(RequestKey::kPermissions), std::move(request.permissions_)); if (request.rp_id_) { map->emplace(static_cast<int>(RequestKey::kPermissionsRPID), *request.rp_id_); } }); } PinTokenWithPermissionsRequest::~PinTokenWithPermissionsRequest() = default; PinTokenWithPermissionsRequest::PinTokenWithPermissionsRequest( PinTokenWithPermissionsRequest&& other) = default; UvTokenRequest::UvTokenRequest(PINUVAuthProtocol protocol, const KeyAgreementResponse& peer_key, base::Optional<std::string> rp_id, base::span<const pin::Permissions> permissions) : TokenRequest(protocol, peer_key), rp_id_(rp_id), permissions_(PermissionsToByte(permissions)) {} UvTokenRequest::~UvTokenRequest() = default; UvTokenRequest::UvTokenRequest(UvTokenRequest&& other) = default; // static std::pair<CtapRequestCommand, base::Optional<cbor::Value>> AsCTAPRequestValuePair(const UvTokenRequest& request) { return EncodePINCommand( request.protocol_, Subcommand::kGetUvToken, [&request](cbor::Value::MapValue* map) { map->emplace(static_cast<int>(RequestKey::kKeyAgreement), EncodeCOSEPublicKey(request.public_key_)); map->emplace(static_cast<int>(RequestKey::kPermissions), request.permissions_); if (request.rp_id_) { map->emplace(static_cast<int>(RequestKey::kPermissionsRPID), *request.rp_id_); } }); } static std::vector<uint8_t> ConcatSalts( base::span<const uint8_t, 32> salt1, const base::Optional<std::array<uint8_t, 32>>& salt2) { const size_t salts_size = salt1.size() + (salt2.has_value() ? salt2->size() : 0); std::vector<uint8_t> salts(salts_size); memcpy(salts.data(), salt1.data(), salt1.size()); if (salt2.has_value()) { memcpy(salts.data() + salt1.size(), salt2->data(), salt2->size()); } return salts; } HMACSecretRequest::HMACSecretRequest( PINUVAuthProtocol protocol, const KeyAgreementResponse& peer_key, base::span<const uint8_t, 32> salt1, const base::Optional<std::array<uint8_t, 32>>& salt2) : protocol_(protocol), public_key_x962( ProtocolVersion(protocol_).Encapsulate(peer_key, &shared_key_)), encrypted_salts( ProtocolVersion(protocol_).Encrypt(shared_key_, ConcatSalts(salt1, salt2))), salts_auth(ProtocolVersion(protocol_).Authenticate(shared_key_, encrypted_salts)) {} HMACSecretRequest::~HMACSecretRequest() = default; HMACSecretRequest::HMACSecretRequest(const HMACSecretRequest& other) = default; base::Optional<std::vector<uint8_t>> HMACSecretRequest::Decrypt( base::span<const uint8_t> ciphertext) { if (ciphertext.size() != this->encrypted_salts.size()) { return base::nullopt; } return pin::ProtocolVersion(protocol_).Decrypt(shared_key_, ciphertext); } } // namespace pin } // namespace device
36.472222
137
0.676409
[ "vector" ]
b1fd685e5ba0df8bfc03f8e48392ff61990e27bb
1,662
cpp
C++
sse-sumbytes/int8_t/benchmark.cpp
clayne/toys
ec06411e2d3b920403607888d4a573e41390ee5b
[ "BSD-2-Clause" ]
null
null
null
sse-sumbytes/int8_t/benchmark.cpp
clayne/toys
ec06411e2d3b920403607888d4a573e41390ee5b
[ "BSD-2-Clause" ]
null
null
null
sse-sumbytes/int8_t/benchmark.cpp
clayne/toys
ec06411e2d3b920403607888d4a573e41390ee5b
[ "BSD-2-Clause" ]
null
null
null
#include <cstdio> #include <vector> #include "benchmark.h" #include "all.h" class Benchmark { std::vector<int8_t> input; size_t result; public: Benchmark(size_t size) : input(size) {} public: void run() { test("scalar", scalar_sumsignedbytes); test("scalar (C++)", scalar_cpp_sumsignedbytes); test("SSE", sse_sumsignedbytes); test("SSE (v2)", sse_sumsignedbytes_variant2); test("SSE (sadbw)", sse_sadbw_sumsignedbytes); test("SSE (sadbw, unrolled)", sse_sadbw_sumsignedbytes); #ifdef HAVE_AVX2 test("AVX2", avx2_sumsignedbytes); test("AVX2 (v2)", avx2_sumsignedbytes_variant2); test("AVX2 (sadbw)", avx2_sadbw_sumsignedbytes); test("AVX2 (sadbw, unrolled)", avx2_sadbw_unrolled4_sumsignedbytes); test("AVX2 (sadbw, variant)", avx2_sadbw_variant_sumsignedbytes); test("AVX2 (maddubs)", avx2_maddubs_sumsignedbytes); #endif } private: template <typename FUN> void test(const char* name, FUN function) { const size_t repeat = 10000; const size_t size = input.size(); auto wrapper = [this, function]() { result = function(&input[0], input.size()); }; BEST_TIME(/**/, wrapper(), name, repeat, size); } }; int main() { std::vector<size_t> sizes = {1024*4, 1024*16, 1024*32}; for (size_t size: sizes) { printf("element count %lu\n", size); Benchmark bench(size); bench.run(); } return 0; }
27.245902
78
0.561974
[ "vector" ]
b1fd79363bf9600a8e265f563b0d36cd43574dcc
9,245
cpp
C++
src/TGUI/Global.cpp
cyanskies/TGUI
9d84916313aacdfc33dc9a8b9e60609449fddce7
[ "Zlib" ]
null
null
null
src/TGUI/Global.cpp
cyanskies/TGUI
9d84916313aacdfc33dc9a8b9e60609449fddce7
[ "Zlib" ]
null
null
null
src/TGUI/Global.cpp
cyanskies/TGUI
9d84916313aacdfc33dc9a8b9e60609449fddce7
[ "Zlib" ]
null
null
null
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // TGUI - Texus's Graphical User Interface // Copyright (C) 2012-2017 Bruno Van de Velde (vdv_b@tgui.eu) // // This software is provided 'as-is', without any express or implied warranty. // In no event will the authors be held liable for any damages arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it freely, // subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; // you must not claim that you wrote the original software. // If you use this software in a product, an acknowledgment // in the product documentation would be appreciated but is not required. // // 2. Altered source versions must be plainly marked as such, // and must not be misrepresented as being the original software. // // 3. This notice may not be removed or altered from any source distribution. // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <TGUI/Global.hpp> #include <TGUI/Clipboard.hpp> #include <TGUI/Texture.hpp> #include <TGUI/Loading/Deserializer.hpp> #include <functional> #include <cctype> #include <cmath> ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// namespace tgui { Clipboard TGUI_Clipboard; bool TGUI_TabKeyUsageEnabled = true; std::string TGUI_ResourcePath = ""; ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void enableTabKeyUsage() { TGUI_TabKeyUsageEnabled = true; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void disableTabKeyUsage() { TGUI_TabKeyUsageEnabled = false; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void setResourcePath(const std::string& path) { TGUI_ResourcePath = path; if (!TGUI_ResourcePath.empty()) { if (TGUI_ResourcePath[TGUI_ResourcePath.length()-1] != '/') TGUI_ResourcePath.push_back('/'); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// const std::string& getResourcePath() { return TGUI_ResourcePath; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// bool compareFloats(float x, float y) { return (std::abs(x - y) < 0.0000001f); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// bool isWhitespace(char character) { if (character == ' ' || character == '\t' || character == '\r' || character == '\n') return true; else return false; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int stoi(const std::string& value) { int result = 0; std::istringstream iss(value); iss.imbue(std::locale::classic()); iss >> result; if (iss.fail()) result = 0; return result; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float stof(const std::string& value) { float result = 0; std::istringstream iss(value); iss.imbue(std::locale::classic()); iss >> result; if (iss.fail()) result = 0; return result; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// bool extractBoolFromString(const std::string& property, const std::string& value) { if ((value == "true") || (value == "True") || (value == "TRUE") || (value == "1")) return true; else if ((value == "false") || (value == "False") || (value == "FALSE") || (value == "0")) return false; else throw Exception{"Failed to parse boolean value of property '" + property + "'."}; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// bool removeWhitespace(const std::string& line, std::string::const_iterator& c) { while (c != line.end()) { if ((*c == ' ') || (*c == '\t') || (*c == '\r')) ++c; else return true; } return false; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::string toLower(std::string str) { for (std::string::iterator i = str.begin(); i != str.end(); ++i) *i = static_cast<char>(std::tolower(*i)); return str; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::string trim(std::string str) { str.erase(str.begin(), std::find_if(str.begin(), str.end(), std::not1(std::ptr_fun<int, int>(std::isspace)))); str.erase(std::find_if(str.rbegin(), str.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), str.end()); return str; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::vector<std::string> split(const std::string& str, char delim) { std::vector<std::string> tokens; std::size_t start = 0; std::size_t end = 0; while ((end = str.find(delim, start)) != std::string::npos) { tokens.push_back(str.substr(start, end - start)); start = end + 1; } tokens.push_back(str.substr(start)); return tokens; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// sf::Color calcColorOpacity(const sf::Color& color, float alpha) { if (alpha == 1) return color; else return {color.r, color.g, color.b, static_cast<sf::Uint8>(color.a * alpha)}; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float getTextVerticalCorrection(const std::shared_ptr<sf::Font>& font, unsigned int characterSize, sf::Uint32 style) { if (!font) return 0; bool bold = (style & sf::Text::Bold) != 0; // Calculate the height of the first line (char size = everything above baseline, height + top = part below baseline) float lineHeight = characterSize + font->getGlyph('g', characterSize, bold).bounds.height + font->getGlyph('g', characterSize, bold).bounds.top; // Get the line spacing sfml returns float lineSpacing = font->getLineSpacing(characterSize); // Calculate the offset of the text return lineHeight - lineSpacing; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// unsigned int findBestTextSize(const std::shared_ptr<sf::Font>& font, float height, int fit) { if (!font) return 0; if (height < 2) return 1; std::vector<unsigned int> textSizes(static_cast<std::size_t>(height)); for (unsigned int i = 0; i < static_cast<unsigned int>(height); ++i) textSizes[i] = i + 1; auto high = std::lower_bound(textSizes.begin(), textSizes.end(), height, [&font](unsigned int charSize, float h){ return font->getLineSpacing(charSize) < h; }); if (high == textSizes.end()) return static_cast<unsigned int>(height); float highLineSpacing = font->getLineSpacing(*high); if (highLineSpacing == height) return *high; auto low = high - 1; float lowLineSpacing = font->getLineSpacing(*low); if (fit < 0) return *low; else if (fit > 0) return *high; else { if (std::abs(height - lowLineSpacing) < std::abs(height - highLineSpacing)) return *low; else return *high; } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
35.018939
168
0.40119
[ "vector" ]
5900e714fc75f116dc0eb416e69d4bac60139b5d
16,513
hpp
C++
include/System/Linq/Expressions/BlockExpressionList.hpp
v0idp/virtuoso-codegen
6f560f04822c67f092d438a3f484249072c1d21d
[ "Unlicense" ]
null
null
null
include/System/Linq/Expressions/BlockExpressionList.hpp
v0idp/virtuoso-codegen
6f560f04822c67f092d438a3f484249072c1d21d
[ "Unlicense" ]
null
null
null
include/System/Linq/Expressions/BlockExpressionList.hpp
v0idp/virtuoso-codegen
6f560f04822c67f092d438a3f484249072c1d21d
[ "Unlicense" ]
1
2022-03-30T21:07:35.000Z
2022-03-30T21:07:35.000Z
// Autogenerated from CppHeaderCreator // Created by Sc2ad // ========================================================================= #pragma once // Begin includes #include "beatsaber-hook/shared/utils/typedefs.h" #include "beatsaber-hook/shared/utils/byref.hpp" // Including type: System.Collections.Generic.IList`1 #include "System/Collections/Generic/IList_1.hpp" #include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp" #include "beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp" #include "beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp" #include "beatsaber-hook/shared/utils/utils.h" #include "beatsaber-hook/shared/utils/typedefs-array.hpp" // Completed includes // Begin forward declares // Forward declaring namespace: System::Linq::Expressions namespace System::Linq::Expressions { // Forward declaring type: Expression class Expression; // Forward declaring type: BlockExpression class BlockExpression; } // Forward declaring namespace: System::Collections::Generic namespace System::Collections::Generic { // Forward declaring type: IEnumerator`1<T> template<typename T> class IEnumerator_1; } // Forward declaring namespace: System::Collections namespace System::Collections { // Forward declaring type: IEnumerator class IEnumerator; } // Completed forward declares // Type namespace: System.Linq.Expressions namespace System::Linq::Expressions { // Forward declaring type: BlockExpressionList class BlockExpressionList; } #include "beatsaber-hook/shared/utils/il2cpp-type-check.hpp" NEED_NO_BOX(::System::Linq::Expressions::BlockExpressionList); DEFINE_IL2CPP_ARG_TYPE(::System::Linq::Expressions::BlockExpressionList*, "System.Linq.Expressions", "BlockExpressionList"); // Type namespace: System.Linq.Expressions namespace System::Linq::Expressions { // Size: 0x20 #pragma pack(push, 1) // Autogenerated type: System.Linq.Expressions.BlockExpressionList // [TokenAttribute] Offset: FFFFFFFF // [DefaultMemberAttribute] Offset: 6A0CBC class BlockExpressionList : public ::Il2CppObject/*, public ::System::Collections::Generic::IList_1<::System::Linq::Expressions::Expression*>*/ { public: // Nested type: ::System::Linq::Expressions::BlockExpressionList::$GetEnumerator$d__18 class $GetEnumerator$d__18; public: // private readonly System.Linq.Expressions.BlockExpression _block // Size: 0x8 // Offset: 0x10 ::System::Linq::Expressions::BlockExpression* block; // Field size check static_assert(sizeof(::System::Linq::Expressions::BlockExpression*) == 0x8); // private readonly System.Linq.Expressions.Expression _arg0 // Size: 0x8 // Offset: 0x18 ::System::Linq::Expressions::Expression* arg0; // Field size check static_assert(sizeof(::System::Linq::Expressions::Expression*) == 0x8); public: // Creating interface conversion operator: operator ::System::Collections::Generic::IList_1<::System::Linq::Expressions::Expression*> operator ::System::Collections::Generic::IList_1<::System::Linq::Expressions::Expression*>() noexcept { return *reinterpret_cast<::System::Collections::Generic::IList_1<::System::Linq::Expressions::Expression*>*>(this); } // Get instance field reference: private readonly System.Linq.Expressions.BlockExpression _block [[deprecated("Use field access instead!")]] ::System::Linq::Expressions::BlockExpression*& dyn__block(); // Get instance field reference: private readonly System.Linq.Expressions.Expression _arg0 [[deprecated("Use field access instead!")]] ::System::Linq::Expressions::Expression*& dyn__arg0(); // public System.Linq.Expressions.Expression get_Item(System.Int32 index) // Offset: 0x11F4F94 ::System::Linq::Expressions::Expression* get_Item(int index); // public System.Void set_Item(System.Int32 index, System.Linq.Expressions.Expression value) // Offset: 0x11F4FC0 void set_Item(int index, ::System::Linq::Expressions::Expression* value); // public System.Int32 get_Count() // Offset: 0x11F51F4 int get_Count(); // public System.Boolean get_IsReadOnly() // Offset: 0x11F5214 bool get_IsReadOnly(); // System.Void .ctor(System.Linq.Expressions.BlockExpression provider, System.Linq.Expressions.Expression arg0) // Offset: 0x11F4E70 template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary> static BlockExpressionList* New_ctor(::System::Linq::Expressions::BlockExpression* provider, ::System::Linq::Expressions::Expression* arg0) { static auto ___internal__logger = ::Logger::get().WithContext("::System::Linq::Expressions::BlockExpressionList::.ctor"); return THROW_UNLESS((::il2cpp_utils::New<BlockExpressionList*, creationType>(provider, arg0))); } // public System.Int32 IndexOf(System.Linq.Expressions.Expression item) // Offset: 0x11F4EA8 int IndexOf(::System::Linq::Expressions::Expression* item); // public System.Void Insert(System.Int32 index, System.Linq.Expressions.Expression item) // Offset: 0x11F4F3C void Insert(int index, ::System::Linq::Expressions::Expression* item); // public System.Void RemoveAt(System.Int32 index) // Offset: 0x11F4F68 void RemoveAt(int index); // public System.Void Add(System.Linq.Expressions.Expression item) // Offset: 0x11F4FEC void Add(::System::Linq::Expressions::Expression* item); // public System.Void Clear() // Offset: 0x11F5018 void Clear(); // public System.Boolean Contains(System.Linq.Expressions.Expression item) // Offset: 0x11F5044 bool Contains(::System::Linq::Expressions::Expression* item); // public System.Void CopyTo(System.Linq.Expressions.Expression[] array, System.Int32 index) // Offset: 0x11F5060 void CopyTo(::ArrayW<::System::Linq::Expressions::Expression*> array, int index); // public System.Boolean Remove(System.Linq.Expressions.Expression item) // Offset: 0x11F5240 bool Remove(::System::Linq::Expressions::Expression* item); // public System.Collections.Generic.IEnumerator`1<System.Linq.Expressions.Expression> GetEnumerator() // Offset: 0x11F526C ::System::Collections::Generic::IEnumerator_1<::System::Linq::Expressions::Expression*>* GetEnumerator(); // private System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator() // Offset: 0x11F52D8 ::System::Collections::IEnumerator* System_Collections_IEnumerable_GetEnumerator(); }; // System.Linq.Expressions.BlockExpressionList #pragma pack(pop) static check_size<sizeof(BlockExpressionList), 24 + sizeof(::System::Linq::Expressions::Expression*)> __System_Linq_Expressions_BlockExpressionListSizeCheck; static_assert(sizeof(BlockExpressionList) == 0x20); } #include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp" // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::get_Item // Il2CppName: get_Item template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::System::Linq::Expressions::Expression* (System::Linq::Expressions::BlockExpressionList::*)(int)>(&System::Linq::Expressions::BlockExpressionList::get_Item)> { static const MethodInfo* get() { static auto* index = &::il2cpp_utils::GetClassFromName("System", "Int32")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "get_Item", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{index}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::set_Item // Il2CppName: set_Item template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (System::Linq::Expressions::BlockExpressionList::*)(int, ::System::Linq::Expressions::Expression*)>(&System::Linq::Expressions::BlockExpressionList::set_Item)> { static const MethodInfo* get() { static auto* index = &::il2cpp_utils::GetClassFromName("System", "Int32")->byval_arg; static auto* value = &::il2cpp_utils::GetClassFromName("System.Linq.Expressions", "Expression")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "set_Item", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{index, value}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::get_Count // Il2CppName: get_Count template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<int (System::Linq::Expressions::BlockExpressionList::*)()>(&System::Linq::Expressions::BlockExpressionList::get_Count)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "get_Count", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::get_IsReadOnly // Il2CppName: get_IsReadOnly template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<bool (System::Linq::Expressions::BlockExpressionList::*)()>(&System::Linq::Expressions::BlockExpressionList::get_IsReadOnly)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "get_IsReadOnly", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::New_ctor // Il2CppName: .ctor // Cannot get method pointer of value based method overload from template for constructor! // Try using FindMethod instead! // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::IndexOf // Il2CppName: IndexOf template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<int (System::Linq::Expressions::BlockExpressionList::*)(::System::Linq::Expressions::Expression*)>(&System::Linq::Expressions::BlockExpressionList::IndexOf)> { static const MethodInfo* get() { static auto* item = &::il2cpp_utils::GetClassFromName("System.Linq.Expressions", "Expression")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "IndexOf", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{item}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::Insert // Il2CppName: Insert template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (System::Linq::Expressions::BlockExpressionList::*)(int, ::System::Linq::Expressions::Expression*)>(&System::Linq::Expressions::BlockExpressionList::Insert)> { static const MethodInfo* get() { static auto* index = &::il2cpp_utils::GetClassFromName("System", "Int32")->byval_arg; static auto* item = &::il2cpp_utils::GetClassFromName("System.Linq.Expressions", "Expression")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "Insert", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{index, item}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::RemoveAt // Il2CppName: RemoveAt template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (System::Linq::Expressions::BlockExpressionList::*)(int)>(&System::Linq::Expressions::BlockExpressionList::RemoveAt)> { static const MethodInfo* get() { static auto* index = &::il2cpp_utils::GetClassFromName("System", "Int32")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "RemoveAt", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{index}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::Add // Il2CppName: Add template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (System::Linq::Expressions::BlockExpressionList::*)(::System::Linq::Expressions::Expression*)>(&System::Linq::Expressions::BlockExpressionList::Add)> { static const MethodInfo* get() { static auto* item = &::il2cpp_utils::GetClassFromName("System.Linq.Expressions", "Expression")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "Add", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{item}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::Clear // Il2CppName: Clear template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (System::Linq::Expressions::BlockExpressionList::*)()>(&System::Linq::Expressions::BlockExpressionList::Clear)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "Clear", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::Contains // Il2CppName: Contains template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<bool (System::Linq::Expressions::BlockExpressionList::*)(::System::Linq::Expressions::Expression*)>(&System::Linq::Expressions::BlockExpressionList::Contains)> { static const MethodInfo* get() { static auto* item = &::il2cpp_utils::GetClassFromName("System.Linq.Expressions", "Expression")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "Contains", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{item}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::CopyTo // Il2CppName: CopyTo template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (System::Linq::Expressions::BlockExpressionList::*)(::ArrayW<::System::Linq::Expressions::Expression*>, int)>(&System::Linq::Expressions::BlockExpressionList::CopyTo)> { static const MethodInfo* get() { static auto* array = &il2cpp_functions::array_class_get(::il2cpp_utils::GetClassFromName("System.Linq.Expressions", "Expression"), 1)->byval_arg; static auto* index = &::il2cpp_utils::GetClassFromName("System", "Int32")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "CopyTo", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{array, index}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::Remove // Il2CppName: Remove template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<bool (System::Linq::Expressions::BlockExpressionList::*)(::System::Linq::Expressions::Expression*)>(&System::Linq::Expressions::BlockExpressionList::Remove)> { static const MethodInfo* get() { static auto* item = &::il2cpp_utils::GetClassFromName("System.Linq.Expressions", "Expression")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "Remove", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{item}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::GetEnumerator // Il2CppName: GetEnumerator template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::System::Collections::Generic::IEnumerator_1<::System::Linq::Expressions::Expression*>* (System::Linq::Expressions::BlockExpressionList::*)()>(&System::Linq::Expressions::BlockExpressionList::GetEnumerator)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "GetEnumerator", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: System::Linq::Expressions::BlockExpressionList::System_Collections_IEnumerable_GetEnumerator // Il2CppName: System.Collections.IEnumerable.GetEnumerator template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::System::Collections::IEnumerator* (System::Linq::Expressions::BlockExpressionList::*)()>(&System::Linq::Expressions::BlockExpressionList::System_Collections_IEnumerable_GetEnumerator)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(System::Linq::Expressions::BlockExpressionList*), "System.Collections.IEnumerable.GetEnumerator", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } };
63.511538
279
0.749349
[ "vector" ]
59071357bc54e89c3f8b43d760fed8a4be0806d4
4,209
hpp
C++
src/common/comm/l0/devices/ccl_gpu_base_comm.hpp
sazanovd/oneCCL
18a54aafb4e441e0f51d143d047520457b710c0d
[ "Apache-2.0" ]
null
null
null
src/common/comm/l0/devices/ccl_gpu_base_comm.hpp
sazanovd/oneCCL
18a54aafb4e441e0f51d143d047520457b710c0d
[ "Apache-2.0" ]
null
null
null
src/common/comm/l0/devices/ccl_gpu_base_comm.hpp
sazanovd/oneCCL
18a54aafb4e441e0f51d143d047520457b710c0d
[ "Apache-2.0" ]
null
null
null
/* Copyright 2016-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <map> #include <memory> #include <list> #include <set> #include <vector> #include "coll/algorithms/algorithms_enum.hpp" #include "common/comm/l0/device_group_routing_schema.hpp" #include "common/comm/l0/gpu_device_types.hpp" #include "common/comm/l0/modules/ring/allgatherv_entry_module.hpp" #include "common/comm/l0/modules/ring/allreduce_entry_module.hpp" #include "common/comm/l0/modules/ring/alltoallv_entry_module.hpp" #include "common/comm/l0/modules/ring/bcast_entry_module.hpp" #include "common/comm/l0/modules/ring/reduce_entry_module.hpp" #include "common/comm/l0/modules/a2a/allreduce_module.hpp" #include "common/comm/l0/modules/supported_modules.hpp" #include "common/comm/l0/modules/modules_source_data.hpp" #include "common/comm/l0/gpu_comm_utils.hpp" namespace native { template <class gpu_impl, gpu_types type> class ccl_gpu_base_comm { public: using comm_rank_t = int; using type_idx_t = typename std::underlying_type<gpu_types>::type; ccl_gpu_base_comm(ccl_device& assigned_device, comm_rank_t idx) : index_in_group(idx), device(assigned_device) {} ~ccl_gpu_base_comm() = default; gpu_impl* get_this() { return static_cast<gpu_impl*>(this); } const gpu_impl* get_this() const { return static_cast<const gpu_impl*>(this); } static constexpr const char* name() { return gpu_impl::name_impl(); } std::string to_string() const { return get_this()->to_string_impl(); } static constexpr type_idx_t type_idx() { return static_cast<type_idx_t>(type); } ccl_device& get_device() { return device; } [[deprecated]] comm_rank_t get_index_in_group() const { return index_in_group; } template <ccl::group_split_type group_id, ccl::device_topology_type class_id> bool reset_rank(comm_rank_t new_rank, comm_rank_t new_size) { rank = new_rank; size = new_size; return device_routing_web.insert<group_id, class_id>(new_rank, new_size); //consider inheritance } template <ccl::group_split_type group_id, ccl::device_topology_type class_id> const topology_addr<group_id, class_id>& get_comm_data() const { return device_routing_web.get<group_id, class_id>(); } template <ccl::group_split_type group_id, ccl::device_topology_type class_id> bool is_registered() const { return device_routing_web.is_registered<group_id, class_id>(); } template <ccl::group_split_type group_id, ccl::device_topology_type class_id> std::string comm_to_str() const { return device_routing_web.to_string<group_id, class_id>(); } std::string comm_to_str() const { return device_routing_web.to_string(); } template <ccl_coll_type module_type, ccl::group_split_type group_id, ccl::device_topology_type class_id, template <ccl_coll_type, ccl::group_split_type, ccl::device_topology_type> class module_impl> static std::shared_ptr<module_impl<module_type, group_id, class_id>>& get_gpu_module_unsafe( supported_device_modules<module_impl>& modules) { return std::get<utils::enum_to_underlying(class_id)>( std::get<utils::enum_to_underlying(group_id)>(std::get<module_type>(modules))); } protected: size_t index_in_group; aggregated_topology_addr device_routing_web; ccl_device& device; mutable int rank; //TODO mutable int size; //TODO }; } // namespace native
31.886364
96
0.707531
[ "vector" ]
590917e255dc7dc8fab8b1b2aaef73fe8d71066b
7,278
cc
C++
Microassembler/Path.cc
LukeGoodsell/scalpel-shareddeps
07653cad8a2917855b86b0c25b29c48536253fb0
[ "MIT" ]
null
null
null
Microassembler/Path.cc
LukeGoodsell/scalpel-shareddeps
07653cad8a2917855b86b0c25b29c48536253fb0
[ "MIT" ]
null
null
null
Microassembler/Path.cc
LukeGoodsell/scalpel-shareddeps
07653cad8a2917855b86b0c25b29c48536253fb0
[ "MIT" ]
null
null
null
#include "Path.hh" /****************************************************************** ** Path.cc ** ** Path of a de Bruijn graph ** Routines to extract sequence and coverage information ** ** Authors: Giuseppe Narzisi & Michael C. Schatz ** Date: December 11, 2013 ** *******************************************************************/ string Path_t::pathstr() { string retval; for (unsigned int i = 0; i < nodes_m.size(); i++) { if (i) { retval += ":"; } retval += nodes_m[i]->nodeid_m; if (i < edgedir_m.size()) { retval += ":"; retval += Edge_t::toString(edgedir_m[i]); } } return retval; } // pathlen ////////////////////////////////////////////////////////////// int Path_t::pathlen() { int len = 0; for (unsigned int i = 0; i < nodes_m.size(); i++) { Node_t * n = nodes_m[i]; if (!n->isRef_m) { len++; } } return len; } // str ////////////////////////////////////////////////////////////// string Path_t::str() { string retval; Ori_t dir = Edge_t::edgedir_start(edgedir_m[0]); for (unsigned int i = 0; i < nodes_m.size(); i++) { Node_t * n = nodes_m[i]; string nstr = n->str_m; if (dir == R) { nstr = rc_str(nstr); } if (!n->isRef_m) { if (retval.length() > 0) { assert(retval.substr(retval.length()-K+1) == nstr.substr(0, K-1)); retval += nstr.substr(K-1); } else { retval = nstr; } } if (i < edgedir_m.size()) { dir = Edge_t::edgedir_dest(edgedir_m[i]); } } return retval; } // coverage distribution for nodes in string format ////////////////////////////////////////////////////////////// string Path_t::covstr() { stringstream ss; vector<float> node_coverage; Ori_t dir = Edge_t::edgedir_start(edgedir_m[0]); for (unsigned int i = 0; i < nodes_m.size(); i++) { node_coverage.clear(); Node_t * n = nodes_m[i]; if (dir == R) { for (unsigned int j=n->cov_distr.size(); j>0; j--) { node_coverage.push_back(n->cov_distr[j-1]); } } else { for (unsigned int j=0; j < n->cov_distr.size(); j++) { node_coverage.push_back(n->cov_distr[j]); } } if (!n->isRef_m) { if ((ss.str()).size() == 0) { // first node for (unsigned int j=0; j < node_coverage.size(); j++) { if (i != (nodes_m.size()-1)) { ss << node_coverage[j] << " "; } else { ss << node_coverage[j]; } } } else { // not the first node: update coverage of overlapping region // add coverage info for the new base-pairs for (unsigned int j = (K-1); j < node_coverage.size(); j++) { if (i != (nodes_m.size()-1)) { ss << node_coverage[j] << " "; } else { ss << node_coverage[j]; } } } } if (i < edgedir_m.size()) { dir = Edge_t::edgedir_dest(edgedir_m[i]); } } return ss.str(); } // coverage distribution for nodes ////////////////////////////////////////////////////////////// vector<int> Path_t::covDistr() { vector<int> path_coverage; vector<int> node_coverage; Ori_t dir = Edge_t::edgedir_start(edgedir_m[0]); //cerr << "Num nodes in path: " << nodes_m.size() << endl; path_coverage.clear(); for (unsigned int i = 0; i < nodes_m.size(); i++) { node_coverage.clear(); Node_t * n = nodes_m[i]; if (dir == R) { for (unsigned int j=n->cov_distr.size(); j>0; j--) { node_coverage.push_back(n->cov_distr[j-1]); } } else { for (unsigned int j=0; j < n->cov_distr.size(); j++) { node_coverage.push_back(n->cov_distr[j]); } } if (!n->isRef_m) { if (path_coverage.size() == 0) { // first node for (unsigned int j=0; j < node_coverage.size(); j++) { path_coverage.push_back(node_coverage[j]); } } else { // not the first node: update coverage of overlapping region // add coverage info for the new base-pairs for (unsigned int j = (K-1); j < node_coverage.size(); j++) { path_coverage.push_back(node_coverage[j]); } } } if (i < edgedir_m.size()) { dir = Edge_t::edgedir_dest(edgedir_m[i]); } } return path_coverage; } // coverage at position ////////////////////////////////////////////////////////////// int Path_t::covAt(int pos) { int retval = -1; int p = 0; vector<int> coverage; Ori_t dir = Edge_t::edgedir_start(edgedir_m[0]); for (unsigned int i = 0; i < nodes_m.size(); i++) { coverage.clear(); Node_t * n = nodes_m[i]; if (dir == R) { for (unsigned int j=n->cov_distr.size(); j>0; j--) { coverage.push_back(n->cov_distr[j-1]); } } else { for (unsigned int j=0; j < n->cov_distr.size(); j++) { coverage.push_back(n->cov_distr[j]); } } if (!n->isRef_m) { unsigned int j = 0; if (p > 0) { // if not first node, scan only the extra base-pairs for j = K-1; } //for (; j < n->cov_distr.size(); j++) { for (; j < coverage.size(); j++) { //if(p == pos) { return n->cov_distr[j]; } if(p == pos) { return coverage[j]; } p++; } } if (i < edgedir_m.size()) { dir = Edge_t::edgedir_dest(edgedir_m[i]); } } return retval; } // coverage distribution for edges ////////////////////////////////////////////////////////////// vector<float> Path_t::readCovNodes() { vector<float> nodes_coverage; //cerr << "Num Nodes in path: " << nodes_m.size() << endl; nodes_coverage.clear(); for (unsigned int i = 1; i < nodes_m.size(); i++) { Node_t* n1 = nodes_m[i-1]; Node_t* n2 = nodes_m[i]; nodes_coverage.push_back(n1->readOverlaps(*n2)); } return nodes_coverage; } // cov ////////////////////////////////////////////////////////////// float Path_t::cov() { float covsum = 0; float strlen = 0; for (unsigned int i = 0; i < nodes_m.size(); i++) { Node_t * n = nodes_m[i]; if (!n->isRef_m) { int merlen = n->strlen() - K + 1; covsum += n->cov_m * merlen; strlen += merlen; } } return covsum / strlen; } // mincov ////////////////////////////////////////////////////////////// float Path_t::mincov() { float mincov = -1; for (unsigned int i = 0; i < nodes_m.size(); i++) { Node_t * n = nodes_m[i]; if (!n->isRef_m) { if ((mincov == -1) || (n->cov_m < mincov)) { mincov = n->cov_m; } } } return mincov; } // maxcov ////////////////////////////////////////////////////////////// float Path_t::maxcov() { float maxcov = -1; for (unsigned int i = 0; i < nodes_m.size(); i++) { Node_t * n = nodes_m[i]; if (!n->isRef_m) { if ((maxcov == -1) || (n->cov_m > maxcov)) { maxcov = n->cov_m; } } } return maxcov; } // pathcontig ////////////////////////////////////////////////////////////// Node_t * Path_t::pathcontig(int pos) { int curpos = 0; for (unsigned int i = 0; i < nodes_m.size(); i++) { Node_t * n = nodes_m[i]; if (!n->isRef_m) { int span = n->str_m.length(); if (curpos + span >= pos) { // in the right node return n; } curpos += span - K + 1; } } return NULL; } // contains ////////////////////////////////////////////////////////////// int Path_t::hasCycle(Node_t * node) { if (hasCycle_m) return hasCycle_m; for (vector<Node_t *>::iterator ni = nodes_m.begin(); ni != nodes_m.end(); ni++) { if (*ni == node) { hasCycle_m = 1; return 1; } } return 0; }
18.806202
81
0.497389
[ "vector" ]
590920ea4e6cb64483f9d4b94d9bd8c84c105237
1,836
cc
C++
src/command.cc
oftc/oftc-ircd
4d03219f6b7e2aeda8b7848eea4294b7b85aad12
[ "MIT" ]
2
2015-01-11T19:14:24.000Z
2016-12-08T16:00:10.000Z
src/command.cc
oftc/oftc-ircd
4d03219f6b7e2aeda8b7848eea4294b7b85aad12
[ "MIT" ]
null
null
null
src/command.cc
oftc/oftc-ircd
4d03219f6b7e2aeda8b7848eea4294b7b85aad12
[ "MIT" ]
3
2021-05-02T17:15:51.000Z
2021-05-02T17:16:04.000Z
/* Copyright (c) 2012 Stuart Walsh Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "stdinc.h" #include "command.h" #include <algorithm> using std::transform; Command::Command() { Logging::trace << "Created Command: " << this << Logging::endl; } Command::~Command() { Logging::trace << "Destroyed Command: " << this << Logging::endl; } Command::Command(CommandHandler handler, string name, AccessLevel level=Registered, unsigned int min_args=0, unsigned int max_args=0, int rate_control=0, void *data=0) : handler(handler), name(name), min_access(level), min_args(min_args), max_args(max_args), rate_control(rate_control), data(data) { Logging::trace << "Created Command: " << this << Logging::endl; transform(name.begin(), name.end(), name.begin(), toupper); }
32.785714
82
0.732026
[ "transform" ]
590be72953d90142f802f1a10dcd2d60ed4f332d
5,302
cpp
C++
src/stereo_visual_slam_main/visualization.cpp
shangzhouye/stereo-visual-slam
23abdb95b08a69e0ae630d4f4e6c3a64248b1284
[ "MIT" ]
34
2020-03-25T02:36:15.000Z
2022-03-25T07:20:51.000Z
src/stereo_visual_slam_main/visualization.cpp
ujasmandavia/STEREO-VISUAL-SLAM
cb34d91319f4f03c3b047d12016f40dd9bae6ec3
[ "MIT" ]
2
2020-05-07T20:21:36.000Z
2021-05-27T17:55:02.000Z
src/stereo_visual_slam_main/visualization.cpp
ujasmandavia/STEREO-VISUAL-SLAM
cb34d91319f4f03c3b047d12016f40dd9bae6ec3
[ "MIT" ]
7
2020-10-23T22:58:38.000Z
2021-11-23T04:13:34.000Z
/// \file /// \brief Visualization module #include <cmath> #include <iostream> #include <stereo_visual_slam_main/library_include.hpp> #include <stereo_visual_slam_main/types_def.hpp> #include <vector> #include <string> #include <unistd.h> #include <stereo_visual_slam_main/visualization.hpp> #include <tf/transform_broadcaster.h> #include <tf/transform_datatypes.h> #include <visualization_msgs/Marker.h> namespace vslam { int VslamVisual::points_to_feature_map(const std::vector<cv::Point3f> &point_3d) { if (point_3d.size() == 0) { std::cout << "Invalid input: empty 3D points" << std::endl; } const int num_channels = 3; // x y z feature_map_ = sensor_msgs::PointCloud2(); feature_map_.header.stamp = ros::Time::now(); // Modify this to current frame feature_map_.header.frame_id = "/map"; feature_map_.height = 1; feature_map_.width = point_3d.size(); feature_map_.is_bigendian = false; feature_map_.is_dense = true; feature_map_.point_step = num_channels * sizeof(float); feature_map_.row_step = feature_map_.point_step * feature_map_.width; feature_map_.fields.resize(num_channels); std::string channel_id[] = {"x", "y", "z"}; for (int i = 0; i < num_channels; i++) { feature_map_.fields[i].name = channel_id[i]; feature_map_.fields[i].offset = i * sizeof(float); feature_map_.fields[i].count = 1; feature_map_.fields[i].datatype = sensor_msgs::PointField::FLOAT32; } feature_map_.data.resize(feature_map_.row_step * feature_map_.height); unsigned char *feature_map__data_ptr = &(feature_map_.data[0]); float data_array[num_channels]; for (unsigned int i = 0; i < feature_map_.width; i++) { data_array[0] = point_3d.at(i).x; data_array[1] = point_3d.at(i).y; data_array[2] = point_3d.at(i).z; memcpy(feature_map__data_ptr + (i * feature_map_.point_step), data_array, num_channels * sizeof(float)); } return 0; } int VslamVisual::publish_feature_map(const std::vector<cv::Point3f> &point_3d) { points_to_feature_map(point_3d); feature_map_publisher_.publish(feature_map_); return 0; } int VslamVisual::publish_transform(const SE3 &T_c_w) { SE3 T_w_c; T_w_c = T_c_w.inverse(); // T_world_current(camera) Eigen::Matrix3d rotation = T_w_c.rotationMatrix(); Eigen::Vector3d translation = T_w_c.translation(); // extract sohpus transformation to tf format tf::Matrix3x3 tf_rotation(rotation(0, 0), rotation(0, 1), rotation(0, 2), rotation(1, 0), rotation(1, 1), rotation(1, 2), rotation(2, 0), rotation(2, 1), rotation(2, 2)); tf::Vector3 tf_translation(translation(0), translation(1), translation(2)); tf::Transform tf_transformation(tf_rotation, tf_translation); // publish the tf static tf::TransformBroadcaster tf_broadcaster; tf_broadcaster.sendTransform(tf::StampedTransform(tf_transformation, ros::Time::now(), "/map", "/camera")); } void VslamVisual::publish_fixed_pose(const Frame &frame) { visualization_msgs::Marker marker; marker.header.frame_id = "/map"; marker.header.stamp = ros::Time(0); marker.ns = "fixed_pose"; marker.id = frame.frame_id_; uint32_t shape = visualization_msgs::Marker::CUBE; marker.type = shape; marker.action = visualization_msgs::Marker::ADD; SE3 T_w_c = frame.T_c_w_.inverse(); marker.pose.position.x = T_w_c.translation()(0); marker.pose.position.y = T_w_c.translation()(1); marker.pose.position.z = T_w_c.translation()(2); marker.pose.orientation.x = T_w_c.unit_quaternion().x(); marker.pose.orientation.y = T_w_c.unit_quaternion().y(); marker.pose.orientation.z = T_w_c.unit_quaternion().z(); marker.pose.orientation.w = T_w_c.unit_quaternion().w(); marker.scale.x = 5; marker.scale.y = 5; marker.scale.z = 5; marker.color.r = 0.0; marker.color.g = 0.0; marker.color.b = 1.0; marker.color.a = 1.0; marker.lifetime = ros::Duration(); fixed_pose_pub_.publish(marker); ros::spinOnce(); } visualization_msgs::Marker VslamVisual::create_pose_marker(const Frame &frame) { visualization_msgs::Marker marker; marker.header.frame_id = "/map"; marker.header.stamp = ros::Time(0); marker.ns = "fixed_pose"; marker.id = frame.frame_id_; uint32_t shape = visualization_msgs::Marker::CUBE; marker.type = shape; marker.action = visualization_msgs::Marker::ADD; SE3 T_w_c = frame.T_c_w_.inverse(); marker.pose.position.x = T_w_c.translation()(0); marker.pose.position.y = T_w_c.translation()(1); marker.pose.position.z = T_w_c.translation()(2); marker.pose.orientation.x = T_w_c.unit_quaternion().x(); marker.pose.orientation.y = T_w_c.unit_quaternion().y(); marker.pose.orientation.z = T_w_c.unit_quaternion().z(); marker.pose.orientation.w = T_w_c.unit_quaternion().w(); marker.scale.x = 5; marker.scale.y = 5; marker.scale.z = 5; marker.color.r = 0.0; marker.color.g = 1.0; marker.color.b = 0.0; marker.color.a = 1.0; // publishing at around 4 Hz marker.lifetime = ros::Duration(1.0 / 4.0); return marker; } } // namespace vslam
29.455556
112
0.671256
[ "shape", "vector", "transform", "3d" ]
590bf269c835ea4be602f0033c09925a1944a8f3
2,546
cc
C++
third_party/blink/renderer/core/workers/global_scope_creation_params.cc
zipated/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
2,151
2020-04-18T07:31:17.000Z
2022-03-31T08:39:18.000Z
third_party/blink/renderer/core/workers/global_scope_creation_params.cc
cangulcan/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
395
2020-04-18T08:22:18.000Z
2021-12-08T13:04:49.000Z
third_party/blink/renderer/core/workers/global_scope_creation_params.cc
cangulcan/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
338
2020-04-18T08:03:10.000Z
2022-03-29T12:33:22.000Z
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/workers/global_scope_creation_params.h" #include <memory> #include "third_party/blink/renderer/platform/network/content_security_policy_parsers.h" namespace blink { GlobalScopeCreationParams::GlobalScopeCreationParams( const KURL& script_url, ScriptType script_type, const String& user_agent, const Vector<CSPHeaderAndType>* content_security_policy_parsed_headers, ReferrerPolicy referrer_policy, const SecurityOrigin* starter_origin, bool starter_secure_context, WorkerClients* worker_clients, mojom::IPAddressSpace address_space, const Vector<String>* origin_trial_tokens, const base::UnguessableToken& parent_devtools_token, std::unique_ptr<WorkerSettings> worker_settings, V8CacheOptions v8_cache_options, WorkletModuleResponsesMap* module_responses_map, service_manager::mojom::blink::InterfaceProviderPtrInfo interface_provider_info, BeginFrameProviderParams begin_frame_provider_params) : script_url(script_url.Copy()), script_type(script_type), user_agent(user_agent.IsolatedCopy()), referrer_policy(referrer_policy), starter_origin(starter_origin ? starter_origin->IsolatedCopy() : nullptr), starter_secure_context(starter_secure_context), worker_clients(worker_clients), address_space(address_space), parent_devtools_token(parent_devtools_token), worker_settings(std::move(worker_settings)), v8_cache_options(v8_cache_options), module_responses_map(module_responses_map), interface_provider(std::move(interface_provider_info)), begin_frame_provider_params(std::move(begin_frame_provider_params)) { this->content_security_policy_parsed_headers = std::make_unique<Vector<CSPHeaderAndType>>(); if (content_security_policy_parsed_headers) { for (const auto& header : *content_security_policy_parsed_headers) { CSPHeaderAndType copied_header(header.first.IsolatedCopy(), header.second); this->content_security_policy_parsed_headers->push_back(copied_header); } } this->origin_trial_tokens = std::make_unique<Vector<String>>(); if (origin_trial_tokens) { for (const String& token : *origin_trial_tokens) this->origin_trial_tokens->push_back(token.IsolatedCopy()); } } } // namespace blink
41.064516
88
0.764336
[ "vector" ]
59123faf78fb3d09b085cdda55e0dd2fdac45e05
28,715
hpp
C++
kernel/src/modelingTools/NewtonEulerDS.hpp
siconos/siconos-deb
2739a23f23d797dbfecec79d409e914e13c45c67
[ "Apache-2.0" ]
null
null
null
kernel/src/modelingTools/NewtonEulerDS.hpp
siconos/siconos-deb
2739a23f23d797dbfecec79d409e914e13c45c67
[ "Apache-2.0" ]
null
null
null
kernel/src/modelingTools/NewtonEulerDS.hpp
siconos/siconos-deb
2739a23f23d797dbfecec79d409e914e13c45c67
[ "Apache-2.0" ]
null
null
null
/* Siconos is a program dedicated to modeling, simulation and control * of non smooth dynamical systems. * * Copyright 2016 INRIA. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** \file NewtonEulerDS.hpp */ #ifndef NEWTONEULERNLDS_H #define NEWTONEULERNLDS_H #include "DynamicalSystem.hpp" #include "BoundaryCondition.hpp" /** Pointer to function for plug-in. */ typedef void (*FInt_NE)(double t, double* q, double* v, double *f, unsigned int size_z, double* z); typedef void (*FExt_NE)(double t, double* f, unsigned int size_z, double *z); void computeMObjToAbs(SP::SiconosVector q, SP::SimpleMatrix mObjToAbs); void computeT(SP::SiconosVector q, SP::SimpleMatrix T); /** \class NewtonEulerDS * \brief NewtonEuler non linear dynamical systems - Second Order Non Linear Dynamical Systems. * NewtonEuler non linear dynamical systems - Derived from DynamicalSystem - * * The equations of motion in the Newton-Euler formalism can be stated as * \f{equation} * \label{eq:NewtonEuler} * \left\{\begin{array}{rcl} * M \dot v + F_{int}(q,v, \Omega, t)&=& F_{ext}(t), \\ * I \dot \Omega + \Omega \wedge I\Omega + M_{int}(q,v, \Omega, t) &=& M_{ext}(t), \\ * \dot q &=& T(q) [ v, \Omega] \\ * \dot R &=& R \tilde \Omega,\quad R^{-1}=R^T,\quad \det(R)=1 . * \end{array}\right. * \f} * with * <ul> * <li> \f$x_G,v_G\f$ position and velocity of the center of mass expressed in a inertial frame of * reference (world frame) </li> * <li> \f$\Omega\f$ angular velocity vector expressed in the body-fixed frame (frame attached to the object) </li> * <li> \f$R\f$ rotation matrix form the inertial frame to the bosy-fixed frame \f$R^{-1}=R^T, \det(R)=1\f$, i.e \f$ R\in SO^+(3)\f$ </li> * <li> \f$M=m\,I_{3\times 3}\f$ diagonal mass matrix with \f$m \in \mathbb{R}\f$ the scalar mass </li> * <li> \f$I\f$ constant inertia matrix </li> * <li> \f$F_{ext}\f$ and \f$ M_{ext}\f$ are the external applied forces and torques </li> * </ul> * * * In the current implementation, \f$R\f$ is parametrized by a unit quaternion. * */ class NewtonEulerDS : public DynamicalSystem { public: enum WorkNames {xfree, sizeWorkVec}; protected: /** serialization hooks */ ACCEPT_SERIALIZATION(NewtonEulerDS); void internalInit(SP::SiconosVector Q0, SP::SiconosVector Velocity0, double mass , SP::SiconosMatrix inertialMatrix); // -- MEMBERS -- /** _v contains the velocity of the Newton Euler dynamical system. * _v[0:2] : \f$v_G \in \RR^3 \f$ velocity of the center of mass in * the inertial frame of reference (world frame). * _v[3:5] : \f$\Omega\in\RR^3\f$ angular velocity expressed in the body-fixed frame */ SP::SiconosVector _v; /** Initial velocity */ SP::SiconosVector _v0; /** Memory vectors that stores the values within the time--step */ SP::SiconosMemory _vMemory; SP::SiconosMemory _qMemory; SP::SiconosMemory _forcesMemory; SP::SiconosMemory _dotqMemory; /** _q dimension, is not necessary _n. In our case, _qDim = 7 and _n =6*/ unsigned int _qDim; /** _q contains the representation of the system * In the current implementation, we have * _q[0:2] : the coordinates of the center of mass expressed * in the inertial frame of reference (world frame) * _q[3:6] : an unit quaternion representing the orientation of the solid. * This unit quaternion encodes the rotation mapping from the inertial frame of reference * to the body-fixed frame */ SP::SiconosVector _q; //SP::SiconosVector _deltaq; /** Initial position */ SP::SiconosVector _q0; /** The time derivative of \f$q\f$, \f$\dot q\f$*/ SP::SiconosVector _dotq; /* the rotation matrix that converts a vector in body coordinates (in the body fixed frame) * in the absolute coordinates in the inertial frame of reference. */ SP::SimpleMatrix _MObjToAbs; /** Inertial matrix */ SP::SiconosMatrix _I; /** Scalar mass of the system */ double _scalarMass; /** used for concatenate _I and _scalarMass.I_3 */ SP::SimpleMatrix _massMatrix; /** Contains the LU factorization of the Mass (or the iteration matrix.). */ SP::SimpleMatrix _luW; /** Matrix depending on the parametrization of the orientation * \f$v = T(q) \dot q\f$ */ SP::SimpleMatrix _T; /** Time derivative of T. * * \f$\dot v = \dot T(q) \dot q + T(q) \ddot q\f$ */ SP::SimpleMatrix _Tdot; /** "Reaction" due to the non smooth law - The index corresponds to the dynamic levels. */ std::vector<SP::SiconosVector> _p; /** external forces of the system */ SP::SiconosVector _fExt; /** internal forces of the system */ SP::SiconosVector _fInt; /** external moment of the forces */ SP::SiconosVector _mExt; /** internal moment of the forces */ SP::SiconosVector _mInt; /** jacobian_q FInt*/ SP::SimpleMatrix _jacobianFIntq; /** jacobian_{v} FInt*/ SP::SimpleMatrix _jacobianFIntv; /** jacobian_q MInt*/ SP::SimpleMatrix _jacobianMIntq; /** jacobian_{v} MInt*/ SP::SimpleMatrix _jacobianMIntv; /** internal forces of the system */ SP::SiconosVector _fGyr; /** jacobian_v FGyr*/ SP::SimpleMatrix _jacobianFGyrv; /** If true, we compute the missing Jacobian by forward finite difference */ bool _computeJacobianFIntqByFD; /** If true, we compute the missing Jacobian by forward finite difference */ bool _computeJacobianFIntvByFD; /** If true, we compute the missing Jacobian by forward finite difference */ bool _computeJacobianMIntqByFD; /** If true, we compute the missing Jacobian by forward finite difference */ bool _computeJacobianMIntvByFD; /** value of the step in finite difference */ double _epsilonFD; /** Plugin to compute strength of external forces */ SP::PluggedObject _pluginFExt; /** Plugin to compute moments of external forces */ SP::PluggedObject _pluginMExt; /** Plugin to compute strength of internal forces */ SP::PluggedObject _pluginFInt; /** Plugin to compute moments of internal forces */ SP::PluggedObject _pluginMInt; /** The following code is commented because the jacobian of _mInt and _fInt * are not yet used by the numerical scheme. * Will be needed by a fully implicit scheme for instance. */ /** jacobian_q */ // SP::SimpleMatrix _jacobianqmInt; /** jacobian_{qDot} */ // SP::SimpleMatrix _jacobianqDotmInt; /** NewtonEulerDS plug-in to compute \f$\nabla_qF_{Int}(\dot q, q, t)\f$, id = "jacobianFIntq" * @param time : current time * @param sizeOfq : size of vector q * @param q : pointer to the first element of q * @param velocity : pointer to the first element of velocity * @param[in,out] jacob : pointer to the first element of the jacobian * @param size of vector z * @param[in,out] z : a vector of user-defined parameters */ SP::PluggedObject _pluginJacqFInt; /** NewtonEulerDS plug-in to compute \f$\nabla_{\dot q}F_{Int}(\dot q, q, t)\f$, id = "jacobianFIntv" * @param time : current time * @param sizeOfq : size of vector q * @param q : pointer to the first element of q * @param velocity : pointer to the first element of velocity * @param[in,out] jacob : pointer to the first element of the jacobian * @param size of vector z * @param[in,out] z : a vector of user-defined parameters */ SP::PluggedObject _pluginJacvFInt; /** NewtonEulerDS plug-in to compute \f$\nabla_qM_{Int}(\dot q, q, t)\f$, id = "jacobianMIntq" * @param time : current time * @param sizeOfq : size of vector q * @param q : pointer to the first element of q * @param velocity : pointer to the first element of velocity * @param[in,out] jacob : pointer to the first element of the jacobian * @param size of vector z * @param[in,out] z : a vector of user-defined parameters */ SP::PluggedObject _pluginJacqMInt; /** NewtonEulerDS plug-in to compute \f$\nabla_{\dot q}M_{Int}(\dot q, q, t)\f$, id = "jacobianMIntv" * @param time : current time * @param sizeOfq : size of vector q * @param q : pointer to the first element of q * @param velocity : pointer to the first element of velocity * @param[in,out] jacob : pointer to the first element of the jacobian * @param size of vector z * @param[in,out] z : a vector of user-defined parameters */ SP::PluggedObject _pluginJacvMInt; /** forces(q,v,t)= fExt - fInt - fGyr */ SP::SiconosVector _forces; /** jacobian_q forces*/ SP::SimpleMatrix _jacobianqForces; /** jacobian_{v} forces*/ SP::SimpleMatrix _jacobianvForces; /** Boundary condition applied to a dynamical system*/ SP::BoundaryCondition _boundaryConditions; /** Reaction to an applied boundary condition */ SP::SiconosVector _reactionToBoundaryConditions; /** set links with DS members */ void connectToDS(); /** Default constructor */ NewtonEulerDS(); void zeroPlugin(); public: // === CONSTRUCTORS - DESTRUCTOR === /** constructor from a minimum set of data * \param position initial coordinates of this DynamicalSystem * \param velocity initial velocity of this DynamicalSystem * \param mass the mass * \param inertia the inertia matrix */ NewtonEulerDS(SP::SiconosVector position, SP::SiconosVector velocity, double mass, SP::SiconosMatrix inertia); /** destructor */ virtual ~NewtonEulerDS(); /** check that the system is complete (ie all required data are well set) * \return a bool */ bool checkDynamicalSystem(); /** allocate memory for forces and its jacobians, if required. */ void initForces(); /** Initialization function for the rhs and its jacobian. * \param time the time of initialization */ void initRhs(double time) ; /** dynamical system initialization function except for _p: * mainly set memory and compute plug-in for initial state values. * \param time the time of initialization, default value = 0 * \param size the size of the memory, default size = 1. */ void initialize(double time = 0, unsigned int size = 1) ; /** dynamical system initialization function for _p * \param level for _p */ void initializeNonSmoothInput(unsigned int level) ; // === GETTERS AND SETTERS === /** return the dim. of the system (n for first order). * Useful to avoid if(typeOfDS) when size is required. * \return an unsigned int. */ virtual inline unsigned int dimension() const { return _n; } virtual inline unsigned int getqDim() const { return _qDim; } // -- q -- /** get q * \return pointer on a SiconosVector */ inline SP::SiconosVector q() const { return _q; } // inline SP::SiconosVector deltaq() const // { // return _deltaq; // } // -- q0 -- /** get q0 * \return pointer on a SiconosVector */ inline SP::SiconosVector q0() const { return _q0; } inline SP::SiconosVector v0() const { return _v0; } // Q memory /** get all the values of the state vector q stored in memory * \return a memory */ inline SP::SiconosMemory qMemory() const { return _qMemory; } inline SP::SiconosMemory vMemory() const { return _vMemory; } // -- velocity -- /** get velocity * \return pointer on a SiconosVector */ inline SP::SiconosVector velocity() const { return _v; } // -- velocity0 -- /** get velocity0 * \return pointer on a SiconosVector */ inline SP::SiconosVector velocity0() const { return _v0; } // Velocity memory /** get all the values of the state vector velocity stored in memory * \return a memory */ inline SP::SiconosMemory velocityMemory() const { return _vMemory; } // -- p -- /** get p * \param level unsigned int, required level for p, default = 2 * \return pointer on a SiconosVector */ inline SP::SiconosVector p(unsigned int level = 2) const { return _p[level]; } // -- Mass -- /** get mass value * \return a double */ inline double scalarMass() const { return _scalarMass; }; // -- Fext -- /** get fExt * \return pointer on a plugged vector */ inline SP::SiconosVector fExt() const { return _fExt; } /** set fExt to pointer newPtr * \param newPtr a SP to a Simple vector */ inline void setFExtPtr(SP::SiconosVector newPtr) { _fExt = newPtr; } /** set mExt to pointer newPtr * \param newPtr a SP to a Simple vector */ inline void setMExtPtr(SP::SiconosVector newPtr) { _mExt = newPtr; } // -- forces -- /** get forces * \return pointer on a SiconosVector */ inline SP::SiconosVector forces() const { return _forces; } // -- Jacobian Forces w.r.t q -- /** get JacobianqForces * \return pointer on a SiconosMatrix */ inline SP::SimpleMatrix jacobianqForces() const { return _jacobianqForces; } /** get JacobianvForces * \return pointer on a SiconosMatrix */ inline SP::SimpleMatrix jacobianvForces() const { return _jacobianvForces; } // inline SP::SiconosMatrix jacobianZFL() const { return jacobianZFL; } inline void setComputeJacobianFIntqByFD(bool value) { _computeJacobianFIntqByFD=value; } inline void setComputeJacobianFIntvByFD(bool value) { _computeJacobianFIntvByFD=value; } inline void setComputeJacobianMIntqByFD(bool value) { _computeJacobianMIntqByFD=value; } inline void setComputeJacobianMIntvByFD(bool value) { _computeJacobianMIntvByFD=value; } // --- PLUGINS RELATED FUNCTIONS --- /** allow to set a specified function to compute _fExt * \param pluginPath the complete path to the plugin * \param functionName the name of the function to use in this plugin */ void setComputeFExtFunction(const std::string& pluginPath, const std::string& functionName) { _pluginFExt->setComputeFunction(pluginPath, functionName); } /** allow to set a specified function to compute _mExt * \param pluginPath the complete path to the plugin * \param functionName the name of the function to use in this plugin */ void setComputeMExtFunction(const std::string& pluginPath, const std::string& functionName) { _pluginMExt->setComputeFunction(pluginPath, functionName); } /** set a specified function to compute _fExt * \param fct a pointer on the plugin function */ void setComputeFExtFunction(FExt_NE fct) { _pluginFExt->setComputeFunction((void*)fct); } /** set a specified function to compute _mExt * \param fct a pointer on the plugin function */ void setComputeMExtFunction(FExt_NE fct) { _pluginMExt->setComputeFunction((void*)fct); } /** allow to set a specified function to compute _fInt * \param pluginPath the complete path to the plugin * \param functionName the name of the function to use in this plugin */ void setComputeFIntFunction(const std::string& pluginPath, const std::string& functionName) { _pluginFInt->setComputeFunction(pluginPath, functionName); } /** allow to set a specified function to compute _mInt * \param pluginPath the complete path to the plugin * \param functionName the name of the function to use in this plugin */ void setComputeMIntFunction(const std::string& pluginPath, const std::string& functionName) { _pluginMInt->setComputeFunction(pluginPath, functionName); } /** set a specified function to compute _fInt * \param fct a pointer on the plugin function */ void setComputeFIntFunction(FInt_NE fct) { _pluginFInt->setComputeFunction((void*)fct); } /** set a specified function to compute _mInt * \param fct a pointer on the plugin function */ void setComputeMExtFunction(FInt_NE fct) { _pluginMInt->setComputeFunction((void*)fct); } /** allow to set a specified function to compute the jacobian w.r.t q of the internal forces * \param pluginPath std::string : the complete path to the plugin * \param functionName std::string : the name of the function to use in this plugin */ void setComputeJacobianFIntqFunction(const std::string& pluginPath, const std::string& functionName); /** allow to set a specified function to compute the jacobian following v of the internal forces w.r.t. * \param pluginPath std::string : the complete path to the plugin * \param functionName std::string : the name of the function to use in this plugin */ void setComputeJacobianFIntvFunction(const std::string& pluginPath, const std::string& functionName); /** set a specified function to compute jacobian following q of the FInt * \param fct a pointer on the plugin function */ void setComputeJacobianFIntqFunction(FInt_NE fct); /** set a specified function to compute jacobian following v of the FInt * \param fct a pointer on the plugin function */ void setComputeJacobianFIntvFunction(FInt_NE fct); /** allow to set a specified function to compute the jacobian w.r.t q of the internal forces * \param pluginPath std::string : the complete path to the plugin * \param functionName std::string : the name of the function to use in this plugin */ void setComputeJacobianMIntqFunction(const std::string& pluginPath, const std::string& functionName); /** allow to set a specified function to compute the jacobian following v of the internal forces w.r.t. * \param pluginPath std::string : the complete path to the plugin * \param functionName std::string : the name of the function to use in this plugin */ void setComputeJacobianMIntvFunction(const std::string& pluginPath, const std::string& functionName); /** set a specified function to compute jacobian following q of the FInt * \param fct a pointer on the plugin function */ void setComputeJacobianMIntqFunction(FInt_NE fct); /** set a specified function to compute jacobian following v of the FInt * \param fct a pointer on the plugin function */ void setComputeJacobianMIntvFunction(FInt_NE fct); /** default function to compute the external forces * \param time the current time */ virtual void computeFExt(double time); /** default function to compute the external moments * \param time the current time */ virtual void computeMExt(double time); /** default function to compute the internal forces * \param time the current time */ void computeFInt(double time); /** default function to compute the internal moments * \param time the current time */ void computeMInt(double time); /** default function to compute the internal forces * \param time the current time * \param q * \param v */ void computeFInt(double time, SP::SiconosVector q, SP::SiconosVector v); /** default function to compute the internal moments * \param time the current time * \param q * \param v */ void computeMInt(double time, SP::SiconosVector q, SP::SiconosVector v); /** default function to compute the internal forces * \param time the current time * \param q * \param v * \param fInt the computed internal force vector */ virtual void computeFInt(double time, SP::SiconosVector q, SP::SiconosVector v, SP::SiconosVector fInt); /** default function to compute the internal moments * \param time the current time * \param q * \param v * \param mInt the computed internal moment vector */ virtual void computeMInt(double time, SP::SiconosVector q, SP::SiconosVector v, SP::SiconosVector mInt); /** Default function to compute the right-hand side term * \param time current time * \param isDSup flag to avoid recomputation of operators */ virtual void computeRhs(double time, bool isDSup = false); /** Default function to compute jacobian of the right-hand side term according to x * \param time current time * \param isDup flag to avoid recomputation of operators */ virtual void computeJacobianRhsx(double time, bool isDup = false); /** Default function to compute forces * \param time double, the current time */ virtual void computeForces(double time); /** function to compute forces with some specific values for q and velocity (ie not those of the current state). * \param time double : the current time * \param q SP::SiconosVector: pointers on q * \param velocity SP::SiconosVector: pointers on velocity */ virtual void computeForces(double time, SP::SiconosVector q, SP::SiconosVector velocity); /** Default function to compute the jacobian w.r.t. q of forces * \param time double, the current time */ virtual void computeJacobianqForces(double time); /** Default function to compute the jacobian w.r.t. v of forces * \param time double, the current time */ virtual void computeJacobianvForces(double time); /** function to compute gyroscopic forces with some specific values for q and velocity (ie not those of the current state). * \param velocity SP::SiconosVector: pointers on velocity vector */ virtual void computeFGyr(SP::SiconosVector velocity); /** function to compute gyroscopic forces with some specific values for q and velocity (ie not those of the current state). * \param velocity SP::SiconosVector: pointers on velocity vector * \param SP::SiconosVector fGyr */ virtual void computeFGyr(SP::SiconosVector velocity, SP::SiconosVector fGyr); /** Default function to compute the jacobian following q of fGyr * \param time the current time */ virtual void computeJacobianFGyrv(double time); /** Default function to compute the jacobian following q of fGyr * by forward finite difference * \param time the current time */ virtual void computeJacobianFGyrvByFD(double time, SP::SiconosVector q, SP::SiconosVector velocity); // /** Default function to compute the jacobian following v of fGyr // * \param time the current time // */ // virtual void computeJacobianvForces(double time); /** To compute the jacobian w.r.t q of the internal forces * \param time double : the current time */ void computeJacobianFIntq(double time); /** To compute the jacobian w.r.t v of the internal forces * \param time double : the current time */ void computeJacobianFIntv(double time); /** To compute the jacobian w.r.t q of the internal forces * \param time double * \param position SP::SiconosVector * \param velocity SP::SiconosVector */ virtual void computeJacobianFIntq(double time, SP::SiconosVector position, SP::SiconosVector velocity); /** To compute the jacobian w.r.t q of the internal forces * by forward finite difference * \param time double * \param position SP::SiconosVector * \param velocity SP::SiconosVector */ void computeJacobianFIntqByFD(double time, SP::SiconosVector position, SP::SiconosVector velocity); /** To compute the jacobian w.r.t. v of the internal forces * \param time double: the current time * \param position SP::SiconosVector * \param velocity SP::SiconosVector */ virtual void computeJacobianFIntv(double time, SP::SiconosVector position, SP::SiconosVector velocity); /** To compute the jacobian w.r.t v of the internal forces * by forward finite difference * \param time double * \param position SP::SiconosVector * \param velocity SP::SiconosVector */ void computeJacobianFIntvByFD(double time, SP::SiconosVector position, SP::SiconosVector velocity); /** To compute the jacobian w.r.t q of the internal forces * \param time double : the current time */ virtual void computeJacobianMIntq(double time); /** To compute the jacobian w.r.t v of the internal forces * \param time double : the current time */ virtual void computeJacobianMIntv(double time); /** To compute the jacobian w.r.t q of the internal forces * \param time double : the current time, * \param position SP::SiconosVector * \param velocity SP::SiconosVector */ virtual void computeJacobianMIntq(double time, SP::SiconosVector position, SP::SiconosVector velocity); /** To compute the jacobian w.r.t q of the internal moments * by forward finite difference * \param time double * \param position SP::SiconosVector * \param velocity SP::SiconosVector */ void computeJacobianMIntqByFD(double time, SP::SiconosVector position, SP::SiconosVector velocity); /** To compute the jacobian w.r.t. v of the internal forces * \param time double: the current time * \param position SP::SiconosVector * \param velocity SP::SiconosVector */ virtual void computeJacobianMIntv(double time, SP::SiconosVector position, SP::SiconosVector velocity); /** To compute the jacobian w.r.t v of the internal moments * by forward finite difference * \param time double * \param position SP::SiconosVector * \param velocity SP::SiconosVector */ void computeJacobianMIntvByFD(double time, SP::SiconosVector position, SP::SiconosVector velocity); /** To compute the kinetic energy */ double computeKineticEnergy(); // --- miscellaneous --- /** print the data to the screen */ void display() const; /** initialize the SiconosMemory objects with a positive size. * \param steps the size of the SiconosMemory (i) */ void initMemory(unsigned int steps); /** push the current values of x, q and r in the stored previous values * xMemory, qMemory, rMemory, * \todo Modify the function swapIn Memory with the new Object Memory */ void swapInMemory(); /** set p[...] to zero */ void resetAllNonSmoothPart(); /** set p[...] to zero for a given level * \param level */ void resetNonSmoothPart(unsigned int level); virtual void computeT(); virtual void computeTdot(); virtual void normalizeq(); inline SP::SimpleMatrix mass() { return _massMatrix; } inline SP::SimpleMatrix T() { return _T; } inline SP::SimpleMatrix Tdot() { assert(_Tdot); return _Tdot; } inline SP::SiconosMemory forcesMemory() { return _forcesMemory; } inline SP::SiconosMemory dotqMemory() { return _dotqMemory; } inline SP::SiconosVector dotq() { return _dotq; } /** set Boundary Conditions * \param newbd BoundaryConditions */ inline void setBoundaryConditions(SP::BoundaryCondition newbd) { _boundaryConditions = newbd; }; /** get Boundary Conditions * \return SP::BoundaryCondition pointer on a BoundaryConditions */ inline SP::BoundaryCondition boundaryConditions() { return _boundaryConditions; }; /** set Reaction to Boundary Conditions * \param newrbd BoundaryConditions pointer */ inline void setReactionToBoundaryConditions(SP::SiconosVector newrbd) { _reactionToBoundaryConditions = newrbd; }; /** get Reaction to Boundary Conditions * \return pointer on a BoundaryConditions */ inline SP::SiconosVector reactionToBoundaryConditions() { return _reactionToBoundaryConditions; }; /** get the matrix converting the object coordinates in the absolute coordinates. \return SP::SimpleMatrix */ SP::SimpleMatrix MObjToAbs() { return _MObjToAbs; } /*update the _MObjToAbs from the current quaternion.*/ void computeMObjToAbs(); // /* update the _MObjToAbs from a given quaternion. // * \param q // */ // void computeMObjToAbs(SP::SiconosVector q); ACCEPT_STD_VISITORS(); }; #endif // NEWTONEULERNLDS_H
29.664256
139
0.675779
[ "object", "vector", "solid" ]
5912fb4e02d70755effbb677140fba6d7c3bec63
4,329
hpp
C++
lib/boost/mpl/vector/aux_/preprocessed/typeof_based/vector20.hpp
efedo/Utilogeny
03b7e7d2650c326f8493df35c14470f21de3be78
[ "MIT" ]
null
null
null
lib/boost/mpl/vector/aux_/preprocessed/typeof_based/vector20.hpp
efedo/Utilogeny
03b7e7d2650c326f8493df35c14470f21de3be78
[ "MIT" ]
null
null
null
lib/boost/mpl/vector/aux_/preprocessed/typeof_based/vector20.hpp
efedo/Utilogeny
03b7e7d2650c326f8493df35c14470f21de3be78
[ "MIT" ]
null
null
null
// Copyright Aleksey Gurtovoy 2000-2004 // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // // Preprocessed version of "Utilogeny/lib/boost/mpl/vector/vector20.hpp" header // -- DO NOT modify by hand! namespace boost { namespace mpl { template< typename T0, typename T1, typename T2, typename T3, typename T4 , typename T5, typename T6, typename T7, typename T8, typename T9 , typename T10 > struct vector11 : v_item< T10 , vector10< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9 > > { typedef vector11 type; }; template< typename T0, typename T1, typename T2, typename T3, typename T4 , typename T5, typename T6, typename T7, typename T8, typename T9 , typename T10, typename T11 > struct vector12 : v_item< T11 , vector11< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10 > > { typedef vector12 type; }; template< typename T0, typename T1, typename T2, typename T3, typename T4 , typename T5, typename T6, typename T7, typename T8, typename T9 , typename T10, typename T11, typename T12 > struct vector13 : v_item< T12 , vector12< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11 > > { typedef vector13 type; }; template< typename T0, typename T1, typename T2, typename T3, typename T4 , typename T5, typename T6, typename T7, typename T8, typename T9 , typename T10, typename T11, typename T12, typename T13 > struct vector14 : v_item< T13 , vector13< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12 > > { typedef vector14 type; }; template< typename T0, typename T1, typename T2, typename T3, typename T4 , typename T5, typename T6, typename T7, typename T8, typename T9 , typename T10, typename T11, typename T12, typename T13, typename T14 > struct vector15 : v_item< T14 , vector14< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13 > > { typedef vector15 type; }; template< typename T0, typename T1, typename T2, typename T3, typename T4 , typename T5, typename T6, typename T7, typename T8, typename T9 , typename T10, typename T11, typename T12, typename T13, typename T14 , typename T15 > struct vector16 : v_item< T15 , vector15< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14 > > { typedef vector16 type; }; template< typename T0, typename T1, typename T2, typename T3, typename T4 , typename T5, typename T6, typename T7, typename T8, typename T9 , typename T10, typename T11, typename T12, typename T13, typename T14 , typename T15, typename T16 > struct vector17 : v_item< T16 , vector16< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15 > > { typedef vector17 type; }; template< typename T0, typename T1, typename T2, typename T3, typename T4 , typename T5, typename T6, typename T7, typename T8, typename T9 , typename T10, typename T11, typename T12, typename T13, typename T14 , typename T15, typename T16, typename T17 > struct vector18 : v_item< T17 , vector17< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16 > > { typedef vector18 type; }; template< typename T0, typename T1, typename T2, typename T3, typename T4 , typename T5, typename T6, typename T7, typename T8, typename T9 , typename T10, typename T11, typename T12, typename T13, typename T14 , typename T15, typename T16, typename T17, typename T18 > struct vector19 : v_item< T18 , vector18< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17 > > { typedef vector19 type; }; template< typename T0, typename T1, typename T2, typename T3, typename T4 , typename T5, typename T6, typename T7, typename T8, typename T9 , typename T10, typename T11, typename T12, typename T13, typename T14 , typename T15, typename T16, typename T17, typename T18, typename T19 > struct vector20 : v_item< T19 , vector19< T0,T1,T2,T3,T4,T5,T6,T7,T8,T9,T10,T11,T12,T13,T14,T15,T16,T17,T18 > > { typedef vector20 type; }; }}
27.05625
87
0.641488
[ "vector" ]
5917da54c967d8c5e99110b61212f681e0c5cda0
1,643
cpp
C++
data_structure/dual_segment_tree.cpp
kmyk/noshi91-Library
c03d1be31b13fc3d25e05d7af272c418d835b6de
[ "CC0-1.0" ]
1
2022-02-04T09:46:07.000Z
2022-02-04T09:46:07.000Z
data_structure/dual_segment_tree.cpp
kmyk/noshi91-Library
c03d1be31b13fc3d25e05d7af272c418d835b6de
[ "CC0-1.0" ]
null
null
null
data_structure/dual_segment_tree.cpp
kmyk/noshi91-Library
c03d1be31b13fc3d25e05d7af272c418d835b6de
[ "CC0-1.0" ]
null
null
null
#include <cassert> #include <cstddef> #include <vector> template <class Monoid> class dual_segment_tree { using size_t = std::size_t; using T = typename Monoid::value_type; public: using value_type = T; using size_type = size_t; private: static size_t lsb(const size_t x) { return __builtin_ctz(x); } static size_t msb(const size_t x) { return 31 - __builtin_clz(x); } static void add(T &x, const T y) { x = Monoid::operation(x, y); } std::vector<T> tree; void push(const size_t index) { add(tree[index * 2], tree[index]); add(tree[index * 2 + 1], tree[index]); tree[index] = Monoid::identity; } void propagate(const size_t index) { if (index == 0) return; const size_t lsb_ = lsb(index); for (size_t h = msb(index); h != lsb_; h -= 1) push(index >> h); } public: dual_segment_tree() = default; explicit dual_segment_tree(const size_t n) : tree(n * 2, Monoid::identity) {} size_t size() const noexcept { return tree.size() / 2; } T fold(size_t index) const { assert(index < size()); index += size(); T ret = tree[index]; while (index != 1) { index /= 2; add(ret, tree[index]); } return ret; } void update(size_t first, size_t last, const T x) { assert(first <= last); assert(last <= size()); first += size(); last += size(); propagate(first); propagate(last); while (first != last) { if (first % 2 != 0) { add(tree[first], x); first += 1; } first /= 2; if (last % 2 != 0) { last -= 1; add(tree[last], x); } last /= 2; } } };
23.471429
79
0.570298
[ "vector" ]
591a0fade5c2c60a1ff70fb3f96feddfff919757
3,997
cpp
C++
tests/RaZ/Utils/TypeUtils.cpp
Razakhel/RaZ
d7bc8d4631a2ebd212950f8001f192bcd7d3e80a
[ "MIT" ]
339
2017-09-24T17:26:15.000Z
2022-03-20T13:25:39.000Z
tests/RaZ/Utils/TypeUtils.cpp
xiaohunqupo/RaZ
ad0a1e0f336d8beb20afc73c0a5e6ee8a319a8f1
[ "MIT" ]
24
2017-09-22T10:30:12.000Z
2022-01-05T21:32:20.000Z
tests/RaZ/Utils/TypeUtils.cpp
xiaohunqupo/RaZ
ad0a1e0f336d8beb20afc73c0a5e6ee8a319a8f1
[ "MIT" ]
24
2018-01-21T17:38:18.000Z
2022-02-02T11:16:22.000Z
#include "Catch.hpp" #include "RaZ/Utils/TypeUtils.hpp" namespace { class AttributeTest { public: AttributeTest() = default; AttributeTest(const AttributeTest&) = default; AttributeTest(AttributeTest&&) = delete; AttributeTest& operator=(const AttributeTest&) = default; bool operator==(const AttributeTest&) { return true; } operator bool() { return true; } ~AttributeTest() = default; protected: bool operator!=(const AttributeTest&) { return true; } private: AttributeTest& operator=(AttributeTest&&) = default; }; } // namespace enum class EnumTest { VALUE, TEST, AGAIN }; TEST_CASE("TypeUtils type str") { int testInt {}; const int& testIntRef = testInt; CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<int>() == "int"); CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<decltype(testInt)>() == "int"); #if defined(RAZ_COMPILER_CLANG) CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<const int*>() == "const int *"); CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<decltype(testIntRef)>() == "const int &"); CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<decltype("Hello world!")>() == "char const (&)[13]"); #if __clang_major__ == 11 CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<std::string_view>() == "std::basic_string_view<char>"); #else CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<std::string_view>() == "std::basic_string_view<char, std::char_traits<char> >"); #endif #elif defined(RAZ_COMPILER_GCC) CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<const int*>() == "const int*"); CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<decltype(testIntRef)>() == "const int&"); CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<decltype("Hello world!")>() == "const char (&)[13]"); CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<std::string_view>() == "std::basic_string_view<char>"); #elif defined(RAZ_COMPILER_MSVC) CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<const int*>() == "const int*"); CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<decltype(testIntRef)>() == "const int&"); CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<decltype("Hello world!")>() == "const char(&)[13]"); CHECK_NOFAIL(Raz::TypeUtils::getTypeStr<std::string_view>() == "class std::basic_string_view<char,struct std::char_traits<char> >"); #endif } TEST_CASE("TypeUtils enum str") { #if defined(RAZ_COMPILER_GCC) && __GNUC__ < 9 // Prior to version 9, GCC prints enum values as (Type)value CHECK_NOFAIL(Raz::TypeUtils::getEnumStr<EnumTest::VALUE>() == "(EnumTest)0"); CHECK_NOFAIL(Raz::TypeUtils::getEnumStr<EnumTest::TEST>() == "(EnumTest)1"); CHECK_NOFAIL(Raz::TypeUtils::getEnumStr<EnumTest::AGAIN>() == "(EnumTest)2"); #else CHECK_NOFAIL(Raz::TypeUtils::getEnumStr<EnumTest::VALUE>() == "EnumTest::VALUE"); CHECK_NOFAIL(Raz::TypeUtils::getEnumStr<EnumTest::TEST>() == "EnumTest::TEST"); CHECK_NOFAIL(Raz::TypeUtils::getEnumStr<EnumTest::AGAIN>() == "EnumTest::AGAIN"); #endif } #if !defined(RAZ_COMPILER_MSVC) TEST_CASE("TypeUtils has attribute") { CHECK(Raz::TypeUtils::hasDefaultConstructor<AttributeTest>()); CHECK(Raz::TypeUtils::hasCopyConstructor<AttributeTest>()); CHECK_FALSE(Raz::TypeUtils::hasMoveConstructor<AttributeTest>()); // Deleted CHECK(Raz::TypeUtils::hasCopyAssignmentOperator<AttributeTest>()); CHECK_FALSE(Raz::TypeUtils::hasMoveAssignmentOperator<AttributeTest>()); // Existing but private CHECK(Raz::TypeUtils::hasEqualityOperator<AttributeTest>()); CHECK_FALSE(Raz::TypeUtils::hasInequalityOperator<AttributeTest>()); // Existing but protected CHECK(Raz::TypeUtils::hasDefaultDestructor<AttributeTest>()); CHECK(Raz::TypeUtils::hasReturnType<bool, Raz::TypeUtils::Attribute::EqualityOperator, AttributeTest>()); CHECK(Raz::TypeUtils::hasReturnType<AttributeTest&, Raz::TypeUtils::Attribute::CopyAssignmentOperator, AttributeTest>()); // AttributeTest has a non-explicit operator bool(); the created object is then implicitly convertible to bool CHECK(Raz::TypeUtils::hasReturnTypeConvertible<bool, Raz::TypeUtils::Attribute::DefaultConstructor, AttributeTest>()); } #endif
39.97
134
0.727796
[ "object" ]
591d72fdf8de99e66d4f9a5830dde9e0fb2ecbf5
1,874
hpp
C++
libhail/src/hail/query/backend/compile.hpp
tdeboer-ilmn/hail
98fffc9b4e13cd5d5ced8322112894361d0b7052
[ "MIT" ]
789
2016-09-05T04:14:25.000Z
2022-03-30T09:51:54.000Z
libhail/src/hail/query/backend/compile.hpp
tdeboer-ilmn/hail
98fffc9b4e13cd5d5ced8322112894361d0b7052
[ "MIT" ]
5,724
2016-08-29T18:58:40.000Z
2022-03-31T23:49:42.000Z
libhail/src/hail/query/backend/compile.hpp
tdeboer-ilmn/hail
98fffc9b4e13cd5d5ced8322112894361d0b7052
[ "MIT" ]
233
2016-08-31T20:42:38.000Z
2022-02-17T16:42:39.000Z
#ifndef HAIL_QUERY_BACKEND_COMPILE_HPP_INCLUDED #define HAIL_QUERY_BACKEND_COMPILE_HPP_INCLUDED 1 #include <llvm/IR/IRBuilder.h> #include "hail/query/backend/stype.hpp" #include "hail/query/ir_type.hpp" namespace llvm { class LLVMContext; class Type; class Module; class Function; class AllocaInst; } namespace hail { class TypeContext; class Module; class Function; class CompileModule { public: TypeContext &tc; STypeContext &stc; llvm::LLVMContext &llvm_context; llvm::Module *llvm_module; CompileModule(TypeContext &tc, STypeContext &stc, Module *module, const std::vector<EmitType> &param_types, EmitType return_type, llvm::LLVMContext &llvm_context, llvm::Module *llvm_module); }; class CompileFunction { public: TypeContext &tc; STypeContext &stc; Function *function; const std::vector<EmitType> &param_types; EmitType return_type; llvm::LLVMContext &llvm_context; llvm::Module *llvm_module; /* Indexed by parameter index, the entry is the index of the first `llvm_function` parameter. */ std::vector<size_t> param_llvm_start; llvm::Function *llvm_function; // FIXME rename llvm_builder llvm::IRBuilder<> llvm_ir_builder; IRType ir_type; // FIXME move to SType const SType *get_default_stype(const Type *t); llvm::Type *get_llvm_type(PrimitiveType pt) const; llvm::AllocaInst *make_entry_alloca(llvm::Type *llvm_type); EmitValue emit(Block *x); EmitValue emit(Input *x); EmitValue emit(Literal *x); EmitValue emit(NA *x); EmitValue emit(IsNA *x); EmitValue emit(MakeTuple *x); EmitValue emit(GetTupleElement *x); EmitValue emit(IR *x); CompileFunction(TypeContext &tc, STypeContext &stc, Function *function, const std::vector<EmitType> &param_types, EmitType return_type, llvm::LLVMContext &llvm_context, llvm::Module *llvm_module); }; } #endif
21.295455
68
0.738527
[ "vector" ]
59224dd7a9dbb2feef3d8aeb0fc48859bed6ad9c
3,327
cc
C++
tests/unit/test_random_access_sequence_file.cc
isovic/raptor
171e0f1b94366f20250a00389400a2fcd267bcc6
[ "BSD-3-Clause-Clear" ]
60
2019-07-09T14:57:48.000Z
2022-03-29T06:53:39.000Z
tests/unit/test_random_access_sequence_file.cc
isovic/raptor
171e0f1b94366f20250a00389400a2fcd267bcc6
[ "BSD-3-Clause-Clear" ]
2
2019-05-28T01:59:50.000Z
2021-05-18T13:15:10.000Z
tests/unit/test_random_access_sequence_file.cc
isovic/raptor
171e0f1b94366f20250a00389400a2fcd267bcc6
[ "BSD-3-Clause-Clear" ]
4
2019-05-25T15:41:56.000Z
2019-07-10T11:44:22.000Z
#include <gtest/gtest.h> #include <fstream> #include <iostream> #include <string> #include <sstream> #include <vector> #include <log/log_tools.h> #include <lib/argparser.h> #include <version.h> #include <sequences/sequence_file.h> #include <sequences/sequence_file_composite_fofn.h> #include <sequences/random_access_sequence_file.h> #include <sequences/sequence_serializer.h> #include <utility/memtime.h> #include <raptor_fetch/params_raptor_fetch.h> TEST(RandomAccessSequenceFile, FetchSequenceUsingId) { // The RaptorDB was generated using: "$ raptor-reshape -r test-data/ecoli-small/reads.6x.fwd.fasta -o test-data/raptordb-fetch/test-1 --split-blocks --block-size 0.1". std::string in_path("test-data/raptordb-fetch/test-1.rdb"); std::string in_exp_path("test-data/ecoli-small/reads.6x.fwd.fasta"); int64_t max_open_files = 50; auto random_seq_file = mindex::createRandomAccessSequenceFile(in_path, max_open_files); auto seq_file_parser = mindex::createSequenceFileCompositeFofn({in_exp_path}, mindex::SequenceFormat::Auto); auto seq_file = seq_file_parser->YieldAll(); for (const auto& seq: seq_file->seqs()) { auto result_seq = random_seq_file->FetchSequence(seq->abs_id()); // std::cerr << "Testing: abs_id = " << seq->abs_id() << ", name: '" << seq->header() << "'\n"; ASSERT_NE(result_seq, nullptr); ASSERT_EQ(result_seq->header(), seq->header()); ASSERT_EQ(result_seq->GetSequenceAsString(), seq->GetSequenceAsString()); ASSERT_EQ(result_seq->GetQualityAsString(), seq->GetQualityAsString()); } } TEST(RandomAccessSequenceFile, FetchSequenceUsingQname) { std::string in_path("test-data/raptordb-fetch/test-1.rdb"); std::string in_exp_path("test-data/ecoli-small/reads.6x.fwd.fasta"); int64_t max_open_files = 50; auto random_seq_file = mindex::createRandomAccessSequenceFile(in_path, max_open_files); auto seq_file_parser = mindex::createSequenceFileCompositeFofn({in_exp_path}, mindex::SequenceFormat::Auto); auto seq_file = seq_file_parser->YieldAll(); for (const auto& seq: seq_file->seqs()) { auto result_seq = random_seq_file->FetchSequence(seq->header()); ASSERT_NE(result_seq, nullptr); ASSERT_EQ(result_seq->header(), seq->header()); ASSERT_EQ(result_seq->GetSequenceAsString(), seq->GetSequenceAsString()); ASSERT_EQ(result_seq->GetQualityAsString(), seq->GetQualityAsString()); } } TEST(RandomAccessSequenceFile, FetchSequenceUsingQnameSingleFile) { std::string in_path("test-data/raptordb-fetch/test-2.rdb"); std::string in_exp_path("test-data/ecoli-small/reads.6x.fwd.fasta"); int64_t max_open_files = 50; auto random_seq_file = mindex::createRandomAccessSequenceFile(in_path, max_open_files); auto seq_file_parser = mindex::createSequenceFileCompositeFofn({in_exp_path}, mindex::SequenceFormat::Auto); auto seq_file = seq_file_parser->YieldAll(); for (const auto& seq: seq_file->seqs()) { auto result_seq = random_seq_file->FetchSequence(seq->header()); ASSERT_NE(result_seq, nullptr); ASSERT_EQ(result_seq->header(), seq->header()); ASSERT_EQ(result_seq->GetSequenceAsString(), seq->GetSequenceAsString()); ASSERT_EQ(result_seq->GetQualityAsString(), seq->GetQualityAsString()); } }
41.074074
171
0.727983
[ "vector" ]
5924abaa9e4f0bce296c4d157da1916e1c3f73ca
2,119
hpp
C++
apps/opencs/view/render/unpagedworldspacewidget.hpp
Bodillium/openmw
5fdd264d0704e33b44b1ccf17ab4fb721f362e34
[ "Unlicense" ]
null
null
null
apps/opencs/view/render/unpagedworldspacewidget.hpp
Bodillium/openmw
5fdd264d0704e33b44b1ccf17ab4fb721f362e34
[ "Unlicense" ]
null
null
null
apps/opencs/view/render/unpagedworldspacewidget.hpp
Bodillium/openmw
5fdd264d0704e33b44b1ccf17ab4fb721f362e34
[ "Unlicense" ]
null
null
null
#ifndef OPENCS_VIEW_UNPAGEDWORLDSPACEWIDGET_H #define OPENCS_VIEW_UNPAGEDWORLDSPACEWIDGET_H #include <string> #include <memory> #include "worldspacewidget.hpp" #include "cell.hpp" class QModelIndex; namespace CSMDoc { class Document; } namespace CSMWorld { class IdTable; } namespace CSVRender { class UnpagedWorldspaceWidget : public WorldspaceWidget { Q_OBJECT std::string mCellId; CSMWorld::IdTable *mCellsModel; CSMWorld::IdTable *mReferenceablesModel; std::auto_ptr<Cell> mCell; void update(); protected: virtual void addVisibilitySelectorButtons (CSVWidget::SceneToolToggle *tool); public: UnpagedWorldspaceWidget (const std::string& cellId, CSMDoc::Document& document, QWidget *parent); virtual dropRequirments getDropRequirements(DropType type) const; /// \return Drop handled? virtual bool handleDrop (const std::vector<CSMWorld::UniversalId>& data, DropType type); private: virtual void referenceableDataChanged (const QModelIndex& topLeft, const QModelIndex& bottomRight); virtual void referenceableAboutToBeRemoved (const QModelIndex& parent, int start, int end); virtual void referenceableAdded (const QModelIndex& index, int start, int end); virtual void referenceDataChanged (const QModelIndex& topLeft, const QModelIndex& bottomRight); virtual void referenceAboutToBeRemoved (const QModelIndex& parent, int start, int end); virtual void referenceAdded (const QModelIndex& index, int start, int end); virtual std::string getStartupInstruction(); private slots: void cellDataChanged (const QModelIndex& topLeft, const QModelIndex& bottomRight); void cellRowsAboutToBeRemoved (const QModelIndex& parent, int start, int end); signals: void cellChanged(const CSMWorld::UniversalId& id); }; } #endif
26.4875
107
0.657386
[ "vector" ]
5925cdb024087eec68b37540a89f28956d8f9249
3,461
cpp
C++
test/testMain.cpp
mera-company/cpp-serialization-library
b49d16e4c5e02c55dae122fe2d4f6cc7649f5ff1
[ "Apache-2.0" ]
12
2019-11-01T15:06:38.000Z
2021-03-24T09:08:01.000Z
test/testMain.cpp
mera-company/cpp-serialization-library
b49d16e4c5e02c55dae122fe2d4f6cc7649f5ff1
[ "Apache-2.0" ]
2
2020-02-17T18:25:10.000Z
2020-02-24T08:01:13.000Z
test/testMain.cpp
mera-company/cpp-serialization-library
b49d16e4c5e02c55dae122fe2d4f6cc7649f5ff1
[ "Apache-2.0" ]
2
2020-02-16T10:40:35.000Z
2020-05-21T11:17:18.000Z
/** * @file testMain.cpp * * @author Alexander Ganyukhin (alexander.ganyukhin@mera.com) * * @date 2019-October-23 * * Copyright 2019 Mera * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ #include <cstdint> #include <iostream> #include <object_invoke.h> template<typename T> struct InstanceCounter { static size_t instances; InstanceCounter() { this->incAndLog(); } InstanceCounter(InstanceCounter const &) { this->incAndLog(); } InstanceCounter(InstanceCounter &&) noexcept { this->incAndLog(); } ~InstanceCounter() { this->decAndLog(); } InstanceCounter & operator=(InstanceCounter const &) = default; InstanceCounter & operator=(InstanceCounter &&) = default; void incAndLog() const { ++instances; std::cout << " +++New object \'" << typeid(T).name() << "\', addr: \'" << static_cast<void const *>(this) << "\' created, total: \'" << instances << "\'" << std::endl; } void decAndLog() const { --instances; std::cout << " ---Object \'" << typeid(T).name() << "\', addr: \'" << static_cast<void const *>(this) << "\' destroyed, total: \'" << instances << "\'" << std::endl; } }; template<typename T> size_t InstanceCounter<T>::instances { 0ull }; struct Object1 : public InstanceCounter<Object1> { void getValue(int & i) { // std::cout << " getValue invoked!" << std::endl; static int ii { 11111 }; ii += 11111; i = ii; } }; struct Object2 : public InstanceCounter<Object2> { void getObject1(Object1 & obj) { // std::cout << " getObject1 invoked!" << std::endl; (void)obj; } }; struct Object3 : public InstanceCounter<Object3> { void getObject2(Object2 * obj) { //std::cout << " getObject2 invoked!" << std::endl; } }; struct Serializer { template<typename ... T> void operator()(char const * tag, std::tuple<T...> const & aTuple) { std::cout << "\'" << tag << "\': \'"; putStream(std::cout, aTuple, std::make_index_sequence<sizeof...(T)>{}); std::cout << "\'\n"; } template<typename Tuple, size_t ... Idx> static std::ostream & putStream(std::ostream & aOs, Tuple const & t, std::index_sequence<Idx...>) { return (aOs << ... << std::get<Idx>(t)); } }; constexpr mil::object_invoke invoke { mil::useAcceptor<Serializer>(), mil::delayedInvoke<&Object3::getObject2, &Object2::getObject1, &Object1::getValue>("call1"), mil::delayedInvoke<&Object3::getObject2, &Object2::getObject1, &Object1::getValue>("call2"), mil::delayedInvoke<&Object3::getObject2, &Object2::getObject1, &Object1::getValue>("call3"), mil::delayedInvoke<&Object3::getObject2, &Object2::getObject1, &Object1::getValue>("call4") }; int main() { Object3 obj {}; Serializer si; invoke(obj, si); return 0; }
29.836207
103
0.60705
[ "object" ]
592688fdb2f6ab64421658406ad52526c0b49de0
4,071
cpp
C++
src/route_planner.cpp
TypHo22/A_Star_Algorithm_OpenStreetMaps
30a718ca6bbbea95db235e372ad6db16ff334dd2
[ "MIT" ]
2
2021-08-20T11:26:36.000Z
2021-12-27T14:44:12.000Z
src/route_planner.cpp
TypHo22/A_Star_Algorithm_OpenStreetMaps
30a718ca6bbbea95db235e372ad6db16ff334dd2
[ "MIT" ]
null
null
null
src/route_planner.cpp
TypHo22/A_Star_Algorithm_OpenStreetMaps
30a718ca6bbbea95db235e372ad6db16ff334dd2
[ "MIT" ]
2
2021-09-05T16:33:27.000Z
2021-12-27T14:44:10.000Z
#include "route_planner.h" #include <algorithm> RoutePlanner::RoutePlanner(RouteModel &model, float start_x, float start_y, float end_x, float end_y): m_Model(model) { // Convert inputs to percentage: start_x *= 0.01; start_y *= 0.01; end_x *= 0.01; end_y *= 0.01; start_node = &m_Model.FindClosestNode(start_x,start_y); end_node = &m_Model.FindClosestNode(end_x,end_y); } float RoutePlanner::CalculateHValue(RouteModel::Node const *node) { //-Task 3 start float hValue = node->distance(*this->end_node); return hValue; //-Task 3 end } void RoutePlanner::AddNeighbors(RouteModel::Node *current_node) { current_node->FindNeighbors(); for(size_t a = 0; a < current_node->neighbors.size();a++) { if(!current_node->neighbors[a]->visited) { current_node->neighbors[a]->parent = current_node; current_node->neighbors[a]->g_value = current_node->g_value + current_node->neighbors[a]->distance(*current_node); current_node->neighbors[a]->h_value = CalculateHValue(current_node->neighbors[a]); this->open_list.push_back(current_node->neighbors[a]); current_node->neighbors[a]->visited = true; } } } // Function to sort the map according // to value in a (key-value) pairs namespace local { bool cmp(std::pair<RouteModel::Node*,float>& a, std::pair<RouteModel::Node*,float>& b) { return a.second < b.second; } std::vector<std::pair<RouteModel::Node*,float>> sort(std::unordered_map<RouteModel::Node*,float>& M) { // Declare vector of pairs std::vector<std::pair<RouteModel::Node*,float> > A; // Copy key-value pair from Map // to vector of pairs for (auto& it : M) { A.push_back(it); } // Sort using comparator function sort(A.begin(), A.end(), cmp); return A; } } RouteModel::Node *RoutePlanner::NextNode() { std::unordered_map<RouteModel::Node*,float> myMap; for(size_t a = 0; a < open_list.size();a++) { myMap.insert({open_list[a],open_list[a]->h_value + open_list[a]->g_value}); } std::vector<std::pair<RouteModel::Node*,float>> sorted = local::sort(myMap); for(size_t b = 0; b < sorted.size();b++) { open_list[b] = sorted[b].first; } open_list.erase(open_list.begin()); return sorted[0].first; } std::vector<RouteModel::Node> RoutePlanner::ConstructFinalPath(RouteModel::Node *current_node) { // Create path_found vector distance = 0.0f; std::vector<RouteModel::Node> path_found; // TODO: Implement your solution here. // For each node in the path while (current_node->parent != nullptr) { // Add the distance distance += current_node->distance(*current_node->parent); // Store node in the path path_found.push_back(*current_node); // Move to the parent node current_node = current_node->parent; } // Push back initial node path_found.push_back(*current_node); // Sort path (end2start) -> (start2end) std::reverse(path_found.begin(), path_found.end()); distance *= m_Model.MetricScale(); // Multiply the distance by the scale of the map to get meters. return path_found; } void RoutePlanner::AStarSearch() { RouteModel::Node *current_node = nullptr; // TODO: Implement your solution here. this->open_list.push_back(this->start_node); // Add start node to open list this->start_node->visited = true; // While there are still nodes to check while (open_list.size() > 0) { // Get the next node current_node = this->NextNode(); if (current_node == this->end_node) { this->m_Model.path = this->ConstructFinalPath(current_node); return; } // If not done, expand search to current node's neighbors this->AddNeighbors(current_node); } // If the search is not successful std::cout << "No path was found! :(\n"; return; }
28.468531
125
0.629329
[ "vector", "model" ]
592bc92e8a43a81d58a9cc9168134e770198327d
998
cpp
C++
src/cell-simulation/simulation/resource.cpp
firestack/cell-simulation
11eacc685afe7c283c1fc2ed6f8b312785f45f98
[ "MIT" ]
null
null
null
src/cell-simulation/simulation/resource.cpp
firestack/cell-simulation
11eacc685afe7c283c1fc2ed6f8b312785f45f98
[ "MIT" ]
null
null
null
src/cell-simulation/simulation/resource.cpp
firestack/cell-simulation
11eacc685afe7c283c1fc2ed6f8b312785f45f98
[ "MIT" ]
null
null
null
#include "resource.h" #include <nex/math/mathhelper.h> Resource::Resource(real32 max, vec2f location, World& world, type::ResourceType type) : Entity(location, world, type::Resource), m_max(max), m_amount(max), m_resourceType(type) { m_shape.setPosition(location.x, location.y); m_friction = vec2f(0.98f); } real32 Resource::consume(real32 amount) { real32 toEat = nx::clamp(amount, 0.0f, m_amount); m_amount -= toEat; return toEat; } void Resource::update(const float dt) { // Scale the radius to the amount of resource left. m_radius = (m_amount / m_max) * 8.0f; // Update the shape to reflect the new radius. m_shape.setRadius(m_radius); m_shape.setOrigin(m_radius, m_radius); //m_amount += 0.01f * dt; // Don't let the resource over regenerate. if (m_amount > m_max) { m_amount = m_max; } // Die when out of resource. if (m_amount < 1.0f) { m_alive = false; } Entity::update(dt); }
22.681818
87
0.642285
[ "shape" ]
592bc9f06684a912b77937c834c19a6da87926b6
429,763
cc
C++
project4/mariadb/server/storage/rocksdb/ha_rocksdb.cc
jiunbae/ITE4065
3b9fcf9317e93ca7c829f1438b85f0f5ea2885db
[ "MIT" ]
11
2017-10-28T08:41:08.000Z
2021-06-24T07:24:21.000Z
project4/mariadb/server/storage/rocksdb/ha_rocksdb.cc
jiunbae/ITE4065
3b9fcf9317e93ca7c829f1438b85f0f5ea2885db
[ "MIT" ]
null
null
null
project4/mariadb/server/storage/rocksdb/ha_rocksdb.cc
jiunbae/ITE4065
3b9fcf9317e93ca7c829f1438b85f0f5ea2885db
[ "MIT" ]
4
2017-09-07T09:33:26.000Z
2021-02-19T07:45:08.000Z
/* Copyright (c) 2012, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif #define MYSQL_SERVER 1 /* For use of 'PRIu64': */ #define __STDC_FORMAT_MACROS #include <my_config.h> #include <inttypes.h> /* The C++ file's header */ #include "./ha_rocksdb.h" /* C++ standard header files */ #include <algorithm> #include <limits> #include <map> #include <queue> #include <set> #include <string> #include <vector> /* MySQL includes */ #include "./debug_sync.h" #include "./my_bit.h" #include "./my_stacktrace.h" #include "./my_sys.h" #include "./sql_audit.h" #include "./sql_table.h" #include "./sql_hset.h" #include <mysql/psi/mysql_table.h> #ifdef MARIAROCKS_NOT_YET #include <mysql/thread_pool_priv.h> #endif #include <mysys_err.h> // Both MySQL and RocksDB define the same constant. To avoid compilation errors // till we make the fix in RocksDB, we'll temporary undefine it here. #undef CACHE_LINE_SIZE /* RocksDB includes */ #include "monitoring/histogram.h" #include "rocksdb/compaction_filter.h" #include "rocksdb/env.h" #include "rocksdb/persistent_cache.h" #include "rocksdb/rate_limiter.h" #include "rocksdb/slice_transform.h" #include "rocksdb/thread_status.h" #include "rocksdb/utilities/checkpoint.h" #include "rocksdb/utilities/convenience.h" #include "rocksdb/utilities/memory_util.h" #include "rocksdb/utilities/sim_cache.h" #include "util/stop_watch.h" /* MyRocks includes */ #include "./event_listener.h" #include "./ha_rocksdb_proto.h" #include "./logger.h" #include "./rdb_cf_manager.h" #include "./rdb_cf_options.h" #include "./rdb_datadic.h" #include "./rdb_i_s.h" #include "./rdb_index_merge.h" #include "./rdb_mutex_wrapper.h" #include "./rdb_psi.h" #include "./rdb_threads.h" #include "./rdb_mariadb_server_port.h" // Internal MySQL APIs not exposed in any header. extern "C" { /** Mark transaction to rollback and mark error as fatal to a sub-statement. @param thd Thread handle @param all TRUE <=> rollback main transaction. */ void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all); /** * Get the user thread's binary logging format * @param thd user thread * @return Value to be used as index into the binlog_format_names array */ int thd_binlog_format(const MYSQL_THD thd); /** * Check if binary logging is filtered for thread's current db. * @param thd Thread handle * @retval 1 the query is not filtered, 0 otherwise. */ bool thd_binlog_filter_ok(const MYSQL_THD thd); } MYSQL_PLUGIN_IMPORT bool my_disable_leak_check; namespace myrocks { static st_global_stats global_stats; static st_export_stats export_stats; static st_memory_stats memory_stats; static st_io_stall_stats io_stall_stats; const std::string DEFAULT_CF_NAME("default"); const std::string DEFAULT_SYSTEM_CF_NAME("__system__"); const std::string PER_INDEX_CF_NAME("$per_index_cf"); /** Updates row counters based on the table type and operation type. */ void ha_rocksdb::update_row_stats(const operation_type &type) { DBUG_ASSERT(type < ROWS_MAX); // Find if we are modifying system databases. if (table->s && m_tbl_def->m_is_mysql_system_table) global_stats.system_rows[type].inc(); else global_stats.rows[type].inc(); } void dbug_dump_database(rocksdb::DB *db); static handler *rocksdb_create_handler(my_core::handlerton *hton, my_core::TABLE_SHARE *table_arg, my_core::MEM_ROOT *mem_root); bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, const rocksdb::Slice &eq_cond, const bool use_all_keys); static rocksdb::CompactRangeOptions getCompactRangeOptions() { rocksdb::CompactRangeOptions compact_range_options; compact_range_options.bottommost_level_compaction = rocksdb::BottommostLevelCompaction::kForce; compact_range_options.exclusive_manual_compaction = false; return compact_range_options; } /////////////////////////////////////////////////////////// // Parameters and settings /////////////////////////////////////////////////////////// static char *rocksdb_default_cf_options = nullptr; static char *rocksdb_override_cf_options = nullptr; static char *rocksdb_update_cf_options = nullptr; /////////////////////////////////////////////////////////// // Globals /////////////////////////////////////////////////////////// handlerton *rocksdb_hton; rocksdb::TransactionDB *rdb = nullptr; rocksdb::HistogramImpl *commit_latency_stats = nullptr; static std::shared_ptr<rocksdb::Statistics> rocksdb_stats; static std::unique_ptr<rocksdb::Env> flashcache_aware_env; static std::shared_ptr<Rdb_tbl_prop_coll_factory> properties_collector_factory; Rdb_dict_manager dict_manager; Rdb_cf_manager cf_manager; Rdb_ddl_manager ddl_manager; Rdb_binlog_manager binlog_manager; #if !defined(_WIN32) && !defined(__APPLE__) Rdb_io_watchdog *io_watchdog = nullptr; #endif /** MyRocks background thread control N.B. This is besides RocksDB's own background threads (@see rocksdb::CancelAllBackgroundWork()) */ static Rdb_background_thread rdb_bg_thread; // List of table names (using regex) that are exceptions to the strict // collation check requirement. Regex_list_handler *rdb_collation_exceptions; static const char **rdb_get_error_messages(int nr); static void rocksdb_flush_all_memtables() { const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); for (const auto &cf_handle : cf_manager.get_all_cf()) { rdb->Flush(rocksdb::FlushOptions(), cf_handle); } } static void rocksdb_compact_column_family_stub( THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, const void *const save) {} static int rocksdb_compact_column_family(THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, struct st_mysql_value *const value) { char buff[STRING_BUFFER_USUAL_SIZE]; int len = sizeof(buff); DBUG_ASSERT(value != nullptr); if (const char *const cf = value->val_str(value, buff, &len)) { auto cfh = cf_manager.get_cf(cf); if (cfh != nullptr && rdb != nullptr) { sql_print_information("RocksDB: Manual compaction of column family: %s\n", cf); rdb->CompactRange(getCompactRangeOptions(), cfh, nullptr, nullptr); } } return HA_EXIT_SUCCESS; } /////////////////////////////////////////////////////////// // Hash map: table name => open table handler /////////////////////////////////////////////////////////// namespace // anonymous namespace = not visible outside this source file { const ulong TABLE_HASH_SIZE = 32; typedef Hash_set<Rdb_table_handler> Rdb_table_set; struct Rdb_open_tables_map { /* Hash table used to track the handlers of open tables */ Rdb_table_set m_hash; /* The mutex used to protect the hash table */ mutable mysql_mutex_t m_mutex; static uchar *get_hash_key(const Rdb_table_handler *const table_handler, size_t *const length, my_bool not_used MY_ATTRIBUTE((__unused__))); Rdb_table_handler *get_table_handler(const char *const table_name); void release_table_handler(Rdb_table_handler *const table_handler); Rdb_open_tables_map() : m_hash(get_hash_key, system_charset_info) { } std::vector<std::string> get_table_names(void) const; }; } // anonymous namespace static Rdb_open_tables_map rdb_open_tables; static std::string rdb_normalize_dir(std::string dir) { while (dir.size() > 0 && dir.back() == '/') { dir.resize(dir.size() - 1); } return dir; } static int rocksdb_create_checkpoint( THD *const thd MY_ATTRIBUTE((__unused__)), struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const save MY_ATTRIBUTE((__unused__)), struct st_mysql_value *const value) { char buf[FN_REFLEN]; int len = sizeof(buf); const char *const checkpoint_dir_raw = value->val_str(value, buf, &len); if (checkpoint_dir_raw) { if (rdb != nullptr) { std::string checkpoint_dir = rdb_normalize_dir(checkpoint_dir_raw); // NO_LINT_DEBUG sql_print_information("RocksDB: creating checkpoint in directory : %s\n", checkpoint_dir.c_str()); rocksdb::Checkpoint *checkpoint; auto status = rocksdb::Checkpoint::Create(rdb, &checkpoint); // We can only return HA_EXIT_FAILURE/HA_EXIT_SUCCESS here which is why // the return code is ignored, but by calling into rdb_error_to_mysql, // it will call my_error for us, which will propogate up to the client. int rc __attribute__((__unused__)); if (status.ok()) { status = checkpoint->CreateCheckpoint(checkpoint_dir.c_str()); delete checkpoint; if (status.ok()) { sql_print_information( "RocksDB: created checkpoint in directory : %s\n", checkpoint_dir.c_str()); return HA_EXIT_SUCCESS; } else { rc = ha_rocksdb::rdb_error_to_mysql(status); } } else { rc = ha_rocksdb::rdb_error_to_mysql(status); } } } return HA_EXIT_FAILURE; } /* This method is needed to indicate that the ROCKSDB_CREATE_CHECKPOINT command is not read-only */ static void rocksdb_create_checkpoint_stub(THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, const void *const save) {} static void rocksdb_force_flush_memtable_now_stub( THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, const void *const save) {} static int rocksdb_force_flush_memtable_now( THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, struct st_mysql_value *const value) { sql_print_information("RocksDB: Manual memtable flush."); rocksdb_flush_all_memtables(); return HA_EXIT_SUCCESS; } static void rocksdb_force_flush_memtable_and_lzero_now_stub( THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, const void *const save) {} static int rocksdb_force_flush_memtable_and_lzero_now( THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, struct st_mysql_value *const value) { sql_print_information("RocksDB: Manual memtable and L0 flush."); rocksdb_flush_all_memtables(); const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); rocksdb::CompactionOptions c_options = rocksdb::CompactionOptions(); rocksdb::ColumnFamilyMetaData metadata; rocksdb::ColumnFamilyDescriptor cf_descr; for (const auto &cf_handle : cf_manager.get_all_cf()) { rdb->GetColumnFamilyMetaData(cf_handle, &metadata); cf_handle->GetDescriptor(&cf_descr); c_options.output_file_size_limit = cf_descr.options.target_file_size_base; DBUG_ASSERT(metadata.levels[0].level == 0); std::vector<std::string> file_names; for (auto &file : metadata.levels[0].files) { file_names.emplace_back(file.db_path + file.name); } if (!file_names.empty()) { rocksdb::Status s; s = rdb->CompactFiles(c_options, cf_handle, file_names, 1); if (!s.ok() && !s.IsAborted()) { rdb_handle_io_error(s, RDB_IO_ERROR_GENERAL); return HA_EXIT_FAILURE; } } } return HA_EXIT_SUCCESS; } static void rocksdb_drop_index_wakeup_thread( my_core::THD *const thd MY_ATTRIBUTE((__unused__)), struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save); static my_bool rocksdb_pause_background_work = 0; static mysql_mutex_t rdb_sysvars_mutex; static void rocksdb_set_pause_background_work( my_core::THD *const thd MY_ATTRIBUTE((__unused__)), struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const bool pause_requested = *static_cast<const bool *>(save); if (rocksdb_pause_background_work != pause_requested) { if (pause_requested) { rdb->PauseBackgroundWork(); } else { rdb->ContinueBackgroundWork(); } rocksdb_pause_background_work = pause_requested; } RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } static void rocksdb_set_compaction_options(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save); static void rocksdb_set_table_stats_sampling_pct(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save); static void rocksdb_set_rate_limiter_bytes_per_sec(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save); static void rocksdb_set_sst_mgr_rate_bytes_per_sec(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save); static void rocksdb_set_delayed_write_rate(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save); static void rocksdb_set_max_latest_deadlocks(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save); static void rdb_set_collation_exception_list(const char *exception_list); static void rocksdb_set_collation_exception_list(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save); void rocksdb_set_update_cf_options(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save); static void rocksdb_set_bulk_load(THD *thd, struct st_mysql_sys_var *var MY_ATTRIBUTE((__unused__)), void *var_ptr, const void *save); static void rocksdb_set_bulk_load_allow_unsorted( THD *thd, struct st_mysql_sys_var *var MY_ATTRIBUTE((__unused__)), void *var_ptr, const void *save); static void rocksdb_set_max_background_jobs(THD *thd, struct st_mysql_sys_var *const var, void *const var_ptr, const void *const save); ////////////////////////////////////////////////////////////////////////////// // Options definitions ////////////////////////////////////////////////////////////////////////////// static long long rocksdb_block_cache_size; static long long rocksdb_sim_cache_size; static my_bool rocksdb_use_clock_cache; /* Use unsigned long long instead of uint64_t because of MySQL compatibility */ static unsigned long long // NOLINT(runtime/int) rocksdb_rate_limiter_bytes_per_sec; static unsigned long long // NOLINT(runtime/int) rocksdb_sst_mgr_rate_bytes_per_sec; static unsigned long long rocksdb_delayed_write_rate; static uint32_t rocksdb_max_latest_deadlocks; static unsigned long // NOLINT(runtime/int) rocksdb_persistent_cache_size_mb; static ulong rocksdb_info_log_level; static char *rocksdb_wal_dir; static char *rocksdb_persistent_cache_path; static ulong rocksdb_index_type; static uint32_t rocksdb_flush_log_at_trx_commit; static uint32_t rocksdb_debug_optimizer_n_rows; static my_bool rocksdb_force_compute_memtable_stats; static uint32_t rocksdb_force_compute_memtable_stats_cachetime; static my_bool rocksdb_debug_optimizer_no_zero_cardinality; static uint32_t rocksdb_wal_recovery_mode; static uint32_t rocksdb_access_hint_on_compaction_start; static char *rocksdb_compact_cf_name; static char *rocksdb_checkpoint_name; static my_bool rocksdb_signal_drop_index_thread; static my_bool rocksdb_strict_collation_check = 1; static my_bool rocksdb_enable_2pc = 0; static char *rocksdb_strict_collation_exceptions; static my_bool rocksdb_collect_sst_properties = 1; static my_bool rocksdb_force_flush_memtable_now_var = 0; static my_bool rocksdb_force_flush_memtable_and_lzero_now_var = 0; static my_bool rocksdb_enable_ttl = 1; static my_bool rocksdb_enable_ttl_read_filtering = 1; static int rocksdb_debug_ttl_rec_ts = 0; static int rocksdb_debug_ttl_snapshot_ts = 0; static int rocksdb_debug_ttl_read_filter_ts = 0; static my_bool rocksdb_debug_ttl_ignore_pk = 0; static my_bool rocksdb_reset_stats = 0; static uint32_t rocksdb_io_write_timeout_secs = 0; static uint64_t rocksdb_number_stat_computes = 0; static uint32_t rocksdb_seconds_between_stat_computes = 3600; static long long rocksdb_compaction_sequential_deletes = 0l; static long long rocksdb_compaction_sequential_deletes_window = 0l; static long long rocksdb_compaction_sequential_deletes_file_size = 0l; static uint32_t rocksdb_validate_tables = 1; static char *rocksdb_datadir; static uint32_t rocksdb_table_stats_sampling_pct; static my_bool rocksdb_enable_bulk_load_api = 1; static my_bool rocksdb_print_snapshot_conflict_queries = 0; static my_bool rocksdb_large_prefix = 0; char *compression_types_val= const_cast<char*>(get_rocksdb_supported_compression_types()); std::atomic<uint64_t> rocksdb_snapshot_conflict_errors(0); std::atomic<uint64_t> rocksdb_wal_group_syncs(0); static std::unique_ptr<rocksdb::DBOptions> rdb_init_rocksdb_db_options(void) { auto o = std::unique_ptr<rocksdb::DBOptions>(new rocksdb::DBOptions()); o->create_if_missing = true; o->listeners.push_back(std::make_shared<Rdb_event_listener>(&ddl_manager)); o->info_log_level = rocksdb::InfoLogLevel::INFO_LEVEL; o->max_subcompactions = DEFAULT_SUBCOMPACTIONS; o->concurrent_prepare = true; o->manual_wal_flush = true; return o; } /* DBOptions contains Statistics and needs to be destructed last */ static std::unique_ptr<rocksdb::BlockBasedTableOptions> rocksdb_tbl_options = std::unique_ptr<rocksdb::BlockBasedTableOptions>( new rocksdb::BlockBasedTableOptions()); static std::unique_ptr<rocksdb::DBOptions> rocksdb_db_options = rdb_init_rocksdb_db_options(); static std::shared_ptr<rocksdb::RateLimiter> rocksdb_rate_limiter; /* This enum needs to be kept up to date with rocksdb::InfoLogLevel */ static const char *info_log_level_names[] = {"debug_level", "info_level", "warn_level", "error_level", "fatal_level", NullS}; static TYPELIB info_log_level_typelib = { array_elements(info_log_level_names) - 1, "info_log_level_typelib", info_log_level_names, nullptr}; static void rocksdb_set_rocksdb_info_log_level( THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, const void *const save) { DBUG_ASSERT(save != nullptr); RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); rocksdb_info_log_level = *static_cast<const uint64_t *>(save); rocksdb_db_options->info_log->SetInfoLogLevel( static_cast<const rocksdb::InfoLogLevel>(rocksdb_info_log_level)); RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } static void rocksdb_set_reset_stats( my_core::THD *const /* unused */, my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr, const void *const save) { DBUG_ASSERT(save != nullptr); DBUG_ASSERT(rdb != nullptr); DBUG_ASSERT(rocksdb_stats != nullptr); RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); *static_cast<bool *>(var_ptr) = *static_cast<const bool *>(save); if (rocksdb_reset_stats) { rocksdb::Status s = rdb->ResetStats(); // RocksDB will always return success. Let's document this assumption here // as well so that we'll get immediately notified when contract changes. DBUG_ASSERT(s == rocksdb::Status::OK()); s = rocksdb_stats->Reset(); DBUG_ASSERT(s == rocksdb::Status::OK()); } RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } static void rocksdb_set_io_write_timeout( my_core::THD *const thd MY_ATTRIBUTE((__unused__)), my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { DBUG_ASSERT(save != nullptr); DBUG_ASSERT(rdb != nullptr); #if !defined(_WIN32) && !defined(__APPLE__) DBUG_ASSERT(io_watchdog != nullptr); #endif RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const uint32_t new_val = *static_cast<const uint32_t *>(save); rocksdb_io_write_timeout_secs = new_val; #if !defined(_WIN32) && !defined(__APPLE__) io_watchdog->reset_timeout(rocksdb_io_write_timeout_secs); #endif RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } static const char *index_type_names[] = {"kBinarySearch", "kHashSearch", NullS}; static TYPELIB index_type_typelib = {array_elements(index_type_names) - 1, "index_type_typelib", index_type_names, nullptr}; const ulong RDB_MAX_LOCK_WAIT_SECONDS = 1024 * 1024 * 1024; const ulong RDB_MAX_ROW_LOCKS = 1024 * 1024 * 1024; const ulong RDB_DEFAULT_BULK_LOAD_SIZE = 1000; const ulong RDB_MAX_BULK_LOAD_SIZE = 1024 * 1024 * 1024; const size_t RDB_DEFAULT_MERGE_BUF_SIZE = 64 * 1024 * 1024; const size_t RDB_MIN_MERGE_BUF_SIZE = 100; const size_t RDB_DEFAULT_MERGE_COMBINE_READ_SIZE = 1024 * 1024 * 1024; const size_t RDB_MIN_MERGE_COMBINE_READ_SIZE = 100; const size_t RDB_DEFAULT_MERGE_TMP_FILE_REMOVAL_DELAY = 0; const size_t RDB_MIN_MERGE_TMP_FILE_REMOVAL_DELAY = 0; const int64 RDB_DEFAULT_BLOCK_CACHE_SIZE = 512 * 1024 * 1024; const int64 RDB_MIN_BLOCK_CACHE_SIZE = 1024; const int RDB_MAX_CHECKSUMS_PCT = 100; const ulong RDB_DEADLOCK_DETECT_DEPTH = 50; // TODO: 0 means don't wait at all, and we don't support it yet? static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG, "Number of seconds to wait for lock", nullptr, nullptr, /*default*/ 1, /*min*/ 1, /*max*/ RDB_MAX_LOCK_WAIT_SECONDS, 0); static MYSQL_THDVAR_BOOL(deadlock_detect, PLUGIN_VAR_RQCMDARG, "Enables deadlock detection", nullptr, nullptr, FALSE); static MYSQL_THDVAR_ULONG(deadlock_detect_depth, PLUGIN_VAR_RQCMDARG, "Number of transactions deadlock detection will " "traverse through before assuming deadlock", nullptr, nullptr, /*default*/ RDB_DEADLOCK_DETECT_DEPTH, /*min*/ 2, /*max*/ ULONG_MAX, 0); static MYSQL_THDVAR_BOOL( trace_sst_api, PLUGIN_VAR_RQCMDARG, "Generate trace output in the log for each call to the SstFileWriter", nullptr, nullptr, FALSE); static MYSQL_THDVAR_BOOL( bulk_load, PLUGIN_VAR_RQCMDARG, "Use bulk-load mode for inserts. This disables " "unique_checks and enables rocksdb_commit_in_the_middle.", nullptr, rocksdb_set_bulk_load, FALSE); static MYSQL_THDVAR_BOOL(bulk_load_allow_unsorted, PLUGIN_VAR_RQCMDARG, "Allow unsorted input during bulk-load. " "Can be changed only when bulk load is disabled.", nullptr, rocksdb_set_bulk_load_allow_unsorted, FALSE); static MYSQL_SYSVAR_BOOL(enable_bulk_load_api, rocksdb_enable_bulk_load_api, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Enables using SstFileWriter for bulk loading", nullptr, nullptr, rocksdb_enable_bulk_load_api); static MYSQL_THDVAR_STR(tmpdir, PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_MEMALLOC, "Directory for temporary files during DDL operations.", nullptr, nullptr, ""); static MYSQL_THDVAR_STR( skip_unique_check_tables, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, "Skip unique constraint checking for the specified tables", nullptr, nullptr, ".*"); static MYSQL_THDVAR_BOOL( commit_in_the_middle, PLUGIN_VAR_RQCMDARG, "Commit rows implicitly every rocksdb_bulk_load_size, on bulk load/insert, " "update and delete", nullptr, nullptr, FALSE); static MYSQL_THDVAR_BOOL( blind_delete_primary_key, PLUGIN_VAR_RQCMDARG, "Deleting rows by primary key lookup, without reading rows (Blind Deletes)." " Blind delete is disabled if the table has secondary key", nullptr, nullptr, FALSE); static MYSQL_THDVAR_STR( read_free_rpl_tables, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, "List of tables that will use read-free replication on the slave " "(i.e. not lookup a row during replication)", nullptr, nullptr, ""); static MYSQL_THDVAR_BOOL(skip_bloom_filter_on_read, PLUGIN_VAR_RQCMDARG, "Skip using bloom filter for reads", nullptr, nullptr, FALSE); static MYSQL_THDVAR_ULONG(max_row_locks, PLUGIN_VAR_RQCMDARG, "Maximum number of locks a transaction can have", nullptr, nullptr, /*default*/ RDB_MAX_ROW_LOCKS, /*min*/ 1, /*max*/ RDB_MAX_ROW_LOCKS, 0); static MYSQL_THDVAR_ULONGLONG( write_batch_max_bytes, PLUGIN_VAR_RQCMDARG, "Maximum size of write batch in bytes. 0 means no limit.", nullptr, nullptr, /* default */ 0, /* min */ 0, /* max */ SIZE_T_MAX, 1); static MYSQL_THDVAR_BOOL( lock_scanned_rows, PLUGIN_VAR_RQCMDARG, "Take and hold locks on rows that are scanned but not updated", nullptr, nullptr, FALSE); static MYSQL_THDVAR_ULONG(bulk_load_size, PLUGIN_VAR_RQCMDARG, "Max #records in a batch for bulk-load mode", nullptr, nullptr, /*default*/ RDB_DEFAULT_BULK_LOAD_SIZE, /*min*/ 1, /*max*/ RDB_MAX_BULK_LOAD_SIZE, 0); static MYSQL_THDVAR_ULONGLONG( merge_buf_size, PLUGIN_VAR_RQCMDARG, "Size to allocate for merge sort buffers written out to disk " "during inplace index creation.", nullptr, nullptr, /* default (64MB) */ RDB_DEFAULT_MERGE_BUF_SIZE, /* min (100B) */ RDB_MIN_MERGE_BUF_SIZE, /* max */ SIZE_T_MAX, 1); static MYSQL_THDVAR_ULONGLONG( merge_combine_read_size, PLUGIN_VAR_RQCMDARG, "Size that we have to work with during combine (reading from disk) phase " "of " "external sort during fast index creation.", nullptr, nullptr, /* default (1GB) */ RDB_DEFAULT_MERGE_COMBINE_READ_SIZE, /* min (100B) */ RDB_MIN_MERGE_COMBINE_READ_SIZE, /* max */ SIZE_T_MAX, 1); static MYSQL_THDVAR_ULONGLONG( merge_tmp_file_removal_delay_ms, PLUGIN_VAR_RQCMDARG, "Fast index creation creates a large tmp file on disk during index " "creation. Removing this large file all at once when index creation is " "complete can cause trim stalls on Flash. This variable specifies a " "duration to sleep (in milliseconds) between calling chsize() to truncate " "the file in chunks. The chunk size is the same as merge_buf_size.", nullptr, nullptr, /* default (0ms) */ RDB_DEFAULT_MERGE_TMP_FILE_REMOVAL_DELAY, /* min (0ms) */ RDB_MIN_MERGE_TMP_FILE_REMOVAL_DELAY, /* max */ SIZE_T_MAX, 1); static MYSQL_SYSVAR_BOOL( create_if_missing, *reinterpret_cast<my_bool *>(&rocksdb_db_options->create_if_missing), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::create_if_missing for RocksDB", nullptr, nullptr, rocksdb_db_options->create_if_missing); static MYSQL_SYSVAR_BOOL( concurrent_prepare, *reinterpret_cast<my_bool *>(&rocksdb_db_options->concurrent_prepare), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::concurrent_prepare for RocksDB", nullptr, nullptr, rocksdb_db_options->concurrent_prepare); static MYSQL_SYSVAR_BOOL( manual_wal_flush, *reinterpret_cast<my_bool *>(&rocksdb_db_options->manual_wal_flush), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::manual_wal_flush for RocksDB", nullptr, nullptr, rocksdb_db_options->manual_wal_flush); static MYSQL_SYSVAR_BOOL( create_missing_column_families, *reinterpret_cast<my_bool *>( &rocksdb_db_options->create_missing_column_families), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::create_missing_column_families for RocksDB", nullptr, nullptr, rocksdb_db_options->create_missing_column_families); static MYSQL_SYSVAR_BOOL( error_if_exists, *reinterpret_cast<my_bool *>(&rocksdb_db_options->error_if_exists), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::error_if_exists for RocksDB", nullptr, nullptr, rocksdb_db_options->error_if_exists); static MYSQL_SYSVAR_BOOL( paranoid_checks, *reinterpret_cast<my_bool *>(&rocksdb_db_options->paranoid_checks), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::paranoid_checks for RocksDB", nullptr, nullptr, rocksdb_db_options->paranoid_checks); static MYSQL_SYSVAR_ULONGLONG( rate_limiter_bytes_per_sec, rocksdb_rate_limiter_bytes_per_sec, PLUGIN_VAR_RQCMDARG, "DBOptions::rate_limiter bytes_per_sec for RocksDB", nullptr, rocksdb_set_rate_limiter_bytes_per_sec, /* default */ 0L, /* min */ 0L, /* max */ MAX_RATE_LIMITER_BYTES_PER_SEC, 0); static MYSQL_SYSVAR_ULONGLONG( sst_mgr_rate_bytes_per_sec, rocksdb_sst_mgr_rate_bytes_per_sec, PLUGIN_VAR_RQCMDARG, "DBOptions::sst_file_manager rate_bytes_per_sec for RocksDB", nullptr, rocksdb_set_sst_mgr_rate_bytes_per_sec, /* default */ DEFAULT_SST_MGR_RATE_BYTES_PER_SEC, /* min */ 0L, /* max */ UINT64_MAX, 0); static MYSQL_SYSVAR_ULONGLONG(delayed_write_rate, rocksdb_delayed_write_rate, PLUGIN_VAR_RQCMDARG, "DBOptions::delayed_write_rate", nullptr, rocksdb_set_delayed_write_rate, rocksdb_db_options->delayed_write_rate, 0, UINT64_MAX, 0); static MYSQL_SYSVAR_UINT(max_latest_deadlocks, rocksdb_max_latest_deadlocks, PLUGIN_VAR_RQCMDARG, "Maximum number of recent " "deadlocks to store", nullptr, rocksdb_set_max_latest_deadlocks, rocksdb::kInitialMaxDeadlocks, 0, UINT32_MAX, 0); static MYSQL_SYSVAR_ENUM( info_log_level, rocksdb_info_log_level, PLUGIN_VAR_RQCMDARG, "Filter level for info logs to be written mysqld error log. " "Valid values include 'debug_level', 'info_level', 'warn_level'" "'error_level' and 'fatal_level'.", nullptr, rocksdb_set_rocksdb_info_log_level, rocksdb::InfoLogLevel::ERROR_LEVEL, &info_log_level_typelib); static MYSQL_THDVAR_INT( perf_context_level, PLUGIN_VAR_RQCMDARG, "Perf Context Level for rocksdb internal timer stat collection", nullptr, nullptr, /* default */ rocksdb::PerfLevel::kUninitialized, /* min */ rocksdb::PerfLevel::kUninitialized, /* max */ rocksdb::PerfLevel::kOutOfBounds - 1, 0); static MYSQL_SYSVAR_UINT( wal_recovery_mode, rocksdb_wal_recovery_mode, PLUGIN_VAR_RQCMDARG, "DBOptions::wal_recovery_mode for RocksDB. Default is kAbsoluteConsistency", nullptr, nullptr, /* default */ (uint)rocksdb::WALRecoveryMode::kAbsoluteConsistency, /* min */ (uint)rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords, /* max */ (uint)rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords, 0); static MYSQL_SYSVAR_SIZE_T(compaction_readahead_size, rocksdb_db_options->compaction_readahead_size, PLUGIN_VAR_RQCMDARG, "DBOptions::compaction_readahead_size for RocksDB", nullptr, nullptr, rocksdb_db_options->compaction_readahead_size, /* min */ 0L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_BOOL( new_table_reader_for_compaction_inputs, *reinterpret_cast<my_bool *>( &rocksdb_db_options->new_table_reader_for_compaction_inputs), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::new_table_reader_for_compaction_inputs for RocksDB", nullptr, nullptr, rocksdb_db_options->new_table_reader_for_compaction_inputs); static MYSQL_SYSVAR_UINT( access_hint_on_compaction_start, rocksdb_access_hint_on_compaction_start, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::access_hint_on_compaction_start for RocksDB", nullptr, nullptr, /* default */ (uint)rocksdb::Options::AccessHint::NORMAL, /* min */ (uint)rocksdb::Options::AccessHint::NONE, /* max */ (uint)rocksdb::Options::AccessHint::WILLNEED, 0); static MYSQL_SYSVAR_BOOL( allow_concurrent_memtable_write, *reinterpret_cast<my_bool *>( &rocksdb_db_options->allow_concurrent_memtable_write), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::allow_concurrent_memtable_write for RocksDB", nullptr, nullptr, false); static MYSQL_SYSVAR_BOOL( enable_write_thread_adaptive_yield, *reinterpret_cast<my_bool *>( &rocksdb_db_options->enable_write_thread_adaptive_yield), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::enable_write_thread_adaptive_yield for RocksDB", nullptr, nullptr, false); static MYSQL_SYSVAR_INT(max_open_files, rocksdb_db_options->max_open_files, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_open_files for RocksDB", nullptr, nullptr, rocksdb_db_options->max_open_files, /* min */ -1, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_UINT64_T(max_total_wal_size, rocksdb_db_options->max_total_wal_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_total_wal_size for RocksDB", nullptr, nullptr, rocksdb_db_options->max_total_wal_size, /* min */ 0, /* max */ LONGLONG_MAX, 0); static MYSQL_SYSVAR_BOOL( use_fsync, *reinterpret_cast<my_bool *>(&rocksdb_db_options->use_fsync), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::use_fsync for RocksDB", nullptr, nullptr, rocksdb_db_options->use_fsync); static MYSQL_SYSVAR_STR(wal_dir, rocksdb_wal_dir, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::wal_dir for RocksDB", nullptr, nullptr, rocksdb_db_options->wal_dir.c_str()); static MYSQL_SYSVAR_STR( persistent_cache_path, rocksdb_persistent_cache_path, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Path for BlockBasedTableOptions::persistent_cache for RocksDB", nullptr, nullptr, ""); static MYSQL_SYSVAR_ULONG( persistent_cache_size_mb, rocksdb_persistent_cache_size_mb, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Size of cache in MB for BlockBasedTableOptions::persistent_cache " "for RocksDB", nullptr, nullptr, rocksdb_persistent_cache_size_mb, /* min */ 0L, /* max */ ULONG_MAX, 0); static MYSQL_SYSVAR_UINT64_T( delete_obsolete_files_period_micros, rocksdb_db_options->delete_obsolete_files_period_micros, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::delete_obsolete_files_period_micros for RocksDB", nullptr, nullptr, rocksdb_db_options->delete_obsolete_files_period_micros, /* min */ 0, /* max */ LONGLONG_MAX, 0); static MYSQL_SYSVAR_INT(max_background_jobs, rocksdb_db_options->max_background_jobs, PLUGIN_VAR_RQCMDARG, "DBOptions::max_background_jobs for RocksDB", nullptr, rocksdb_set_max_background_jobs, rocksdb_db_options->max_background_jobs, /* min */ -1, /* max */ MAX_BACKGROUND_JOBS, 0); static MYSQL_SYSVAR_UINT(max_subcompactions, rocksdb_db_options->max_subcompactions, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_subcompactions for RocksDB", nullptr, nullptr, rocksdb_db_options->max_subcompactions, /* min */ 1, /* max */ MAX_SUBCOMPACTIONS, 0); static MYSQL_SYSVAR_SIZE_T(max_log_file_size, rocksdb_db_options->max_log_file_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_log_file_size for RocksDB", nullptr, nullptr, rocksdb_db_options->max_log_file_size, /* min */ 0L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_SIZE_T(log_file_time_to_roll, rocksdb_db_options->log_file_time_to_roll, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::log_file_time_to_roll for RocksDB", nullptr, nullptr, rocksdb_db_options->log_file_time_to_roll, /* min */ 0L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_SIZE_T(keep_log_file_num, rocksdb_db_options->keep_log_file_num, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::keep_log_file_num for RocksDB", nullptr, nullptr, rocksdb_db_options->keep_log_file_num, /* min */ 0L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_UINT64_T(max_manifest_file_size, rocksdb_db_options->max_manifest_file_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_manifest_file_size for RocksDB", nullptr, nullptr, rocksdb_db_options->max_manifest_file_size, /* min */ 0L, /* max */ ULONGLONG_MAX, 0); static MYSQL_SYSVAR_INT(table_cache_numshardbits, rocksdb_db_options->table_cache_numshardbits, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::table_cache_numshardbits for RocksDB", nullptr, nullptr, rocksdb_db_options->table_cache_numshardbits, /* min */ 0, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_UINT64_T(wal_ttl_seconds, rocksdb_db_options->WAL_ttl_seconds, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::WAL_ttl_seconds for RocksDB", nullptr, nullptr, rocksdb_db_options->WAL_ttl_seconds, /* min */ 0L, /* max */ LONGLONG_MAX, 0); static MYSQL_SYSVAR_UINT64_T(wal_size_limit_mb, rocksdb_db_options->WAL_size_limit_MB, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::WAL_size_limit_MB for RocksDB", nullptr, nullptr, rocksdb_db_options->WAL_size_limit_MB, /* min */ 0L, /* max */ LONGLONG_MAX, 0); static MYSQL_SYSVAR_SIZE_T(manifest_preallocation_size, rocksdb_db_options->manifest_preallocation_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::manifest_preallocation_size for RocksDB", nullptr, nullptr, rocksdb_db_options->manifest_preallocation_size, /* min */ 0L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_BOOL( use_direct_reads, *reinterpret_cast<my_bool *>(&rocksdb_db_options->use_direct_reads), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::use_direct_reads for RocksDB", nullptr, nullptr, rocksdb_db_options->use_direct_reads); static MYSQL_SYSVAR_BOOL( use_direct_io_for_flush_and_compaction, *reinterpret_cast<my_bool *>(&rocksdb_db_options->use_direct_io_for_flush_and_compaction), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::use_direct_io_for_flush_and_compaction for RocksDB", nullptr, nullptr, rocksdb_db_options->use_direct_io_for_flush_and_compaction); static MYSQL_SYSVAR_BOOL( allow_mmap_reads, *reinterpret_cast<my_bool *>(&rocksdb_db_options->allow_mmap_reads), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::allow_mmap_reads for RocksDB", nullptr, nullptr, rocksdb_db_options->allow_mmap_reads); static MYSQL_SYSVAR_BOOL( allow_mmap_writes, *reinterpret_cast<my_bool *>(&rocksdb_db_options->allow_mmap_writes), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::allow_mmap_writes for RocksDB", nullptr, nullptr, rocksdb_db_options->allow_mmap_writes); static MYSQL_SYSVAR_BOOL( is_fd_close_on_exec, *reinterpret_cast<my_bool *>(&rocksdb_db_options->is_fd_close_on_exec), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::is_fd_close_on_exec for RocksDB", nullptr, nullptr, rocksdb_db_options->is_fd_close_on_exec); static MYSQL_SYSVAR_UINT(stats_dump_period_sec, rocksdb_db_options->stats_dump_period_sec, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::stats_dump_period_sec for RocksDB", nullptr, nullptr, rocksdb_db_options->stats_dump_period_sec, /* min */ 0, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_BOOL( advise_random_on_open, *reinterpret_cast<my_bool *>(&rocksdb_db_options->advise_random_on_open), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::advise_random_on_open for RocksDB", nullptr, nullptr, rocksdb_db_options->advise_random_on_open); static MYSQL_SYSVAR_SIZE_T(db_write_buffer_size, rocksdb_db_options->db_write_buffer_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::db_write_buffer_size for RocksDB", nullptr, nullptr, rocksdb_db_options->db_write_buffer_size, /* min */ 0L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_BOOL( use_adaptive_mutex, *reinterpret_cast<my_bool *>(&rocksdb_db_options->use_adaptive_mutex), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::use_adaptive_mutex for RocksDB", nullptr, nullptr, rocksdb_db_options->use_adaptive_mutex); static MYSQL_SYSVAR_UINT64_T(bytes_per_sync, rocksdb_db_options->bytes_per_sync, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::bytes_per_sync for RocksDB", nullptr, nullptr, rocksdb_db_options->bytes_per_sync, /* min */ 0L, /* max */ ULONGLONG_MAX, 0); static MYSQL_SYSVAR_UINT64_T(wal_bytes_per_sync, rocksdb_db_options->wal_bytes_per_sync, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::wal_bytes_per_sync for RocksDB", nullptr, nullptr, rocksdb_db_options->wal_bytes_per_sync, /* min */ 0L, /* max */ ULONGLONG_MAX, 0); static MYSQL_SYSVAR_BOOL( enable_thread_tracking, *reinterpret_cast<my_bool *>(&rocksdb_db_options->enable_thread_tracking), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::enable_thread_tracking for RocksDB", nullptr, nullptr, true); static MYSQL_SYSVAR_LONGLONG(block_cache_size, rocksdb_block_cache_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "block_cache size for RocksDB", nullptr, nullptr, /* default */ RDB_DEFAULT_BLOCK_CACHE_SIZE, /* min */ RDB_MIN_BLOCK_CACHE_SIZE, /* max */ LONGLONG_MAX, /* Block size */ RDB_MIN_BLOCK_CACHE_SIZE); static MYSQL_SYSVAR_LONGLONG(sim_cache_size, rocksdb_sim_cache_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Simulated cache size for RocksDB", nullptr, nullptr, /* default */ 0, /* min */ 0, /* max */ LONGLONG_MAX, /* Block size */ 0); static MYSQL_SYSVAR_BOOL( use_clock_cache, rocksdb_use_clock_cache, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Use ClockCache instead of default LRUCache for RocksDB", nullptr, nullptr, false); static MYSQL_SYSVAR_BOOL( cache_index_and_filter_blocks, *reinterpret_cast<my_bool *>( &rocksdb_tbl_options->cache_index_and_filter_blocks), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::cache_index_and_filter_blocks for RocksDB", nullptr, nullptr, true); // When pin_l0_filter_and_index_blocks_in_cache is true, RocksDB will use the // LRU cache, but will always keep the filter & idndex block's handle checked // out (=won't call ShardedLRUCache::Release), plus the parsed out objects // the LRU cache will never push flush them out, hence they're pinned. // // This fixes the mutex contention between :ShardedLRUCache::Lookup and // ShardedLRUCache::Release which reduced the QPS ratio (QPS using secondary // index / QPS using PK). static MYSQL_SYSVAR_BOOL( pin_l0_filter_and_index_blocks_in_cache, *reinterpret_cast<my_bool *>( &rocksdb_tbl_options->pin_l0_filter_and_index_blocks_in_cache), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "pin_l0_filter_and_index_blocks_in_cache for RocksDB", nullptr, nullptr, true); static MYSQL_SYSVAR_ENUM(index_type, rocksdb_index_type, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::index_type for RocksDB", nullptr, nullptr, (ulong)rocksdb_tbl_options->index_type, &index_type_typelib); static MYSQL_SYSVAR_BOOL( hash_index_allow_collision, *reinterpret_cast<my_bool *>( &rocksdb_tbl_options->hash_index_allow_collision), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::hash_index_allow_collision for RocksDB", nullptr, nullptr, rocksdb_tbl_options->hash_index_allow_collision); static MYSQL_SYSVAR_BOOL( no_block_cache, *reinterpret_cast<my_bool *>(&rocksdb_tbl_options->no_block_cache), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::no_block_cache for RocksDB", nullptr, nullptr, rocksdb_tbl_options->no_block_cache); static MYSQL_SYSVAR_SIZE_T(block_size, rocksdb_tbl_options->block_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::block_size for RocksDB", nullptr, nullptr, rocksdb_tbl_options->block_size, /* min */ 1L, /* max */ SIZE_T_MAX, 0); static MYSQL_SYSVAR_INT( block_size_deviation, rocksdb_tbl_options->block_size_deviation, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::block_size_deviation for RocksDB", nullptr, nullptr, rocksdb_tbl_options->block_size_deviation, /* min */ 0, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_INT( block_restart_interval, rocksdb_tbl_options->block_restart_interval, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::block_restart_interval for RocksDB", nullptr, nullptr, rocksdb_tbl_options->block_restart_interval, /* min */ 1, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_BOOL( whole_key_filtering, *reinterpret_cast<my_bool *>(&rocksdb_tbl_options->whole_key_filtering), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::whole_key_filtering for RocksDB", nullptr, nullptr, rocksdb_tbl_options->whole_key_filtering); static MYSQL_SYSVAR_STR(default_cf_options, rocksdb_default_cf_options, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "default cf options for RocksDB", nullptr, nullptr, ""); static MYSQL_SYSVAR_STR(override_cf_options, rocksdb_override_cf_options, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "option overrides per cf for RocksDB", nullptr, nullptr, ""); static MYSQL_SYSVAR_STR(update_cf_options, rocksdb_update_cf_options, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC /* psergey-merge: need this? : PLUGIN_VAR_ALLOCATED*/, "Option updates per column family for RocksDB", nullptr, rocksdb_set_update_cf_options, nullptr); enum rocksdb_flush_log_at_trx_commit_type : unsigned int { FLUSH_LOG_NEVER = 0, FLUSH_LOG_SYNC, FLUSH_LOG_BACKGROUND, FLUSH_LOG_MAX /* must be last */ }; static MYSQL_SYSVAR_UINT(flush_log_at_trx_commit, rocksdb_flush_log_at_trx_commit, PLUGIN_VAR_RQCMDARG, "Sync on transaction commit. Similar to " "innodb_flush_log_at_trx_commit. 1: sync on commit, " "0,2: not sync on commit", nullptr, nullptr, /* default */ FLUSH_LOG_SYNC, /* min */ FLUSH_LOG_NEVER, /* max */ FLUSH_LOG_BACKGROUND, 0); static MYSQL_THDVAR_BOOL(write_disable_wal, PLUGIN_VAR_RQCMDARG, "WriteOptions::disableWAL for RocksDB", nullptr, nullptr, rocksdb::WriteOptions().disableWAL); static MYSQL_THDVAR_BOOL( write_ignore_missing_column_families, PLUGIN_VAR_RQCMDARG, "WriteOptions::ignore_missing_column_families for RocksDB", nullptr, nullptr, rocksdb::WriteOptions().ignore_missing_column_families); static MYSQL_THDVAR_BOOL(skip_fill_cache, PLUGIN_VAR_RQCMDARG, "Skip filling block cache on read requests", nullptr, nullptr, FALSE); static MYSQL_THDVAR_BOOL( unsafe_for_binlog, PLUGIN_VAR_RQCMDARG, "Allowing statement based binary logging which may break consistency", nullptr, nullptr, FALSE); static MYSQL_THDVAR_UINT(records_in_range, PLUGIN_VAR_RQCMDARG, "Used to override the result of records_in_range(). " "Set to a positive number to override", nullptr, nullptr, 0, /* min */ 0, /* max */ INT_MAX, 0); static MYSQL_THDVAR_UINT(force_index_records_in_range, PLUGIN_VAR_RQCMDARG, "Used to override the result of records_in_range() " "when FORCE INDEX is used.", nullptr, nullptr, 0, /* min */ 0, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_UINT( debug_optimizer_n_rows, rocksdb_debug_optimizer_n_rows, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOSYSVAR, "Test only to override rocksdb estimates of table size in a memtable", nullptr, nullptr, 0, /* min */ 0, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_BOOL(force_compute_memtable_stats, rocksdb_force_compute_memtable_stats, PLUGIN_VAR_RQCMDARG, "Force to always compute memtable stats", nullptr, nullptr, TRUE); static MYSQL_SYSVAR_UINT(force_compute_memtable_stats_cachetime, rocksdb_force_compute_memtable_stats_cachetime, PLUGIN_VAR_RQCMDARG, "Time in usecs to cache memtable estimates", nullptr, nullptr, /* default */ 60 * 1000 * 1000, /* min */ 0, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_BOOL( debug_optimizer_no_zero_cardinality, rocksdb_debug_optimizer_no_zero_cardinality, PLUGIN_VAR_RQCMDARG, "In case if cardinality is zero, overrides it with some value", nullptr, nullptr, TRUE); static MYSQL_SYSVAR_STR(compact_cf, rocksdb_compact_cf_name, PLUGIN_VAR_RQCMDARG, "Compact column family", rocksdb_compact_column_family, rocksdb_compact_column_family_stub, ""); static MYSQL_SYSVAR_STR(create_checkpoint, rocksdb_checkpoint_name, PLUGIN_VAR_RQCMDARG, "Checkpoint directory", rocksdb_create_checkpoint, rocksdb_create_checkpoint_stub, ""); static MYSQL_SYSVAR_BOOL(signal_drop_index_thread, rocksdb_signal_drop_index_thread, PLUGIN_VAR_RQCMDARG, "Wake up drop index thread", nullptr, rocksdb_drop_index_wakeup_thread, FALSE); static MYSQL_SYSVAR_BOOL(pause_background_work, rocksdb_pause_background_work, PLUGIN_VAR_RQCMDARG, "Disable all rocksdb background operations", nullptr, rocksdb_set_pause_background_work, FALSE); static MYSQL_SYSVAR_BOOL( enable_ttl, rocksdb_enable_ttl, PLUGIN_VAR_RQCMDARG, "Enable expired TTL records to be dropped during compaction.", nullptr, nullptr, TRUE); static MYSQL_SYSVAR_BOOL( enable_ttl_read_filtering, rocksdb_enable_ttl_read_filtering, PLUGIN_VAR_RQCMDARG, "For tables with TTL, expired records are skipped/filtered out during " "processing and in query results. Disabling this will allow these records " "to be seen, but as a result rows may disappear in the middle of " "transactions as they are dropped during compaction. Use with caution.", nullptr, nullptr, TRUE); static MYSQL_SYSVAR_INT( debug_ttl_rec_ts, rocksdb_debug_ttl_rec_ts, PLUGIN_VAR_RQCMDARG, "For debugging purposes only. Overrides the TTL of records to " "now() + debug_ttl_rec_ts. The value can be +/- to simulate " "a record inserted in the past vs a record inserted in the 'future'. " "A value of 0 denotes that the variable is not set. This variable is a " "no-op in non-debug builds.", nullptr, nullptr, 0, /* min */ -3600, /* max */ 3600, 0); static MYSQL_SYSVAR_INT( debug_ttl_snapshot_ts, rocksdb_debug_ttl_snapshot_ts, PLUGIN_VAR_RQCMDARG, "For debugging purposes only. Sets the snapshot during compaction to " "now() + debug_set_ttl_snapshot_ts. The value can be +/- to simulate " "a snapshot in the past vs a snapshot created in the 'future'. " "A value of 0 denotes that the variable is not set. This variable is a " "no-op in non-debug builds.", nullptr, nullptr, 0, /* min */ -3600, /* max */ 3600, 0); static MYSQL_SYSVAR_INT( debug_ttl_read_filter_ts, rocksdb_debug_ttl_read_filter_ts, PLUGIN_VAR_RQCMDARG, "For debugging purposes only. Overrides the TTL read filtering time to " "time + debug_ttl_read_filter_ts. A value of 0 denotes that the variable " "is not set. This variable is a no-op in non-debug builds.", nullptr, nullptr, 0, /* min */ -3600, /* max */ 3600, 0); static MYSQL_SYSVAR_BOOL( debug_ttl_ignore_pk, rocksdb_debug_ttl_ignore_pk, PLUGIN_VAR_RQCMDARG, "For debugging purposes only. If true, compaction filtering will not occur " "on PK TTL data. This variable is a no-op in non-debug builds.", nullptr, nullptr, FALSE); static MYSQL_SYSVAR_BOOL( reset_stats, rocksdb_reset_stats, PLUGIN_VAR_RQCMDARG, "Reset the RocksDB internal statistics without restarting the DB.", nullptr, rocksdb_set_reset_stats, FALSE); static MYSQL_SYSVAR_UINT(io_write_timeout, rocksdb_io_write_timeout_secs, PLUGIN_VAR_RQCMDARG, "Timeout for experimental I/O watchdog.", nullptr, rocksdb_set_io_write_timeout, /* default */ 0, /* min */ 0L, /* max */ UINT_MAX, 0); static MYSQL_SYSVAR_BOOL(enable_2pc, rocksdb_enable_2pc, PLUGIN_VAR_RQCMDARG, "Enable two phase commit for MyRocks", nullptr, nullptr, TRUE); static MYSQL_SYSVAR_BOOL(strict_collation_check, rocksdb_strict_collation_check, PLUGIN_VAR_RQCMDARG, "Enforce case sensitive collation for MyRocks indexes", nullptr, nullptr, TRUE); static MYSQL_SYSVAR_STR(strict_collation_exceptions, rocksdb_strict_collation_exceptions, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, "List of tables (using regex) that are excluded " "from the case sensitive collation enforcement", nullptr, rocksdb_set_collation_exception_list, ""); static MYSQL_SYSVAR_BOOL(collect_sst_properties, rocksdb_collect_sst_properties, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Enables collecting SST file properties on each flush", nullptr, nullptr, rocksdb_collect_sst_properties); static MYSQL_SYSVAR_BOOL( force_flush_memtable_now, rocksdb_force_flush_memtable_now_var, PLUGIN_VAR_RQCMDARG, "Forces memstore flush which may block all write requests so be careful", rocksdb_force_flush_memtable_now, rocksdb_force_flush_memtable_now_stub, FALSE); static MYSQL_SYSVAR_BOOL( force_flush_memtable_and_lzero_now, rocksdb_force_flush_memtable_and_lzero_now_var, PLUGIN_VAR_RQCMDARG, "Acts similar to force_flush_memtable_now, but also compacts all L0 files.", rocksdb_force_flush_memtable_and_lzero_now, rocksdb_force_flush_memtable_and_lzero_now_stub, FALSE); static MYSQL_THDVAR_BOOL( flush_memtable_on_analyze, PLUGIN_VAR_RQCMDARG, "Forces memtable flush on ANALZYE table to get accurate cardinality", nullptr, nullptr, true); static MYSQL_SYSVAR_UINT( seconds_between_stat_computes, rocksdb_seconds_between_stat_computes, PLUGIN_VAR_RQCMDARG, "Sets a number of seconds to wait between optimizer stats recomputation. " "Only changed indexes will be refreshed.", nullptr, nullptr, rocksdb_seconds_between_stat_computes, /* min */ 0L, /* max */ UINT_MAX, 0); static MYSQL_SYSVAR_LONGLONG(compaction_sequential_deletes, rocksdb_compaction_sequential_deletes, PLUGIN_VAR_RQCMDARG, "RocksDB will trigger compaction for the file if " "it has more than this number sequential deletes " "per window", nullptr, rocksdb_set_compaction_options, DEFAULT_COMPACTION_SEQUENTIAL_DELETES, /* min */ 0L, /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES, 0); static MYSQL_SYSVAR_LONGLONG( compaction_sequential_deletes_window, rocksdb_compaction_sequential_deletes_window, PLUGIN_VAR_RQCMDARG, "Size of the window for counting rocksdb_compaction_sequential_deletes", nullptr, rocksdb_set_compaction_options, DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW, /* min */ 0L, /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW, 0); static MYSQL_SYSVAR_LONGLONG( compaction_sequential_deletes_file_size, rocksdb_compaction_sequential_deletes_file_size, PLUGIN_VAR_RQCMDARG, "Minimum file size required for compaction_sequential_deletes", nullptr, rocksdb_set_compaction_options, 0L, /* min */ -1L, /* max */ LONGLONG_MAX, 0); static MYSQL_SYSVAR_BOOL( compaction_sequential_deletes_count_sd, rocksdb_compaction_sequential_deletes_count_sd, PLUGIN_VAR_RQCMDARG, "Counting SingleDelete as rocksdb_compaction_sequential_deletes", nullptr, nullptr, rocksdb_compaction_sequential_deletes_count_sd); static MYSQL_SYSVAR_BOOL( print_snapshot_conflict_queries, rocksdb_print_snapshot_conflict_queries, PLUGIN_VAR_RQCMDARG, "Logging queries that got snapshot conflict errors into *.err log", nullptr, nullptr, rocksdb_print_snapshot_conflict_queries); static MYSQL_THDVAR_INT(checksums_pct, PLUGIN_VAR_RQCMDARG, "How many percentages of rows to be checksummed", nullptr, nullptr, RDB_MAX_CHECKSUMS_PCT, /* min */ 0, /* max */ RDB_MAX_CHECKSUMS_PCT, 0); static MYSQL_THDVAR_BOOL(store_row_debug_checksums, PLUGIN_VAR_RQCMDARG, "Include checksums when writing index/table records", nullptr, nullptr, false /* default value */); static MYSQL_THDVAR_BOOL(verify_row_debug_checksums, PLUGIN_VAR_RQCMDARG, "Verify checksums when reading index/table records", nullptr, nullptr, false /* default value */); static MYSQL_THDVAR_BOOL(master_skip_tx_api, PLUGIN_VAR_RQCMDARG, "Skipping holding any lock on row access. " "Not effective on slave.", nullptr, nullptr, false); static MYSQL_SYSVAR_UINT( validate_tables, rocksdb_validate_tables, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Verify all .frm files match all RocksDB tables (0 means no verification, " "1 means verify and fail on error, and 2 means verify but continue", nullptr, nullptr, 1 /* default value */, 0 /* min value */, 2 /* max value */, 0); static MYSQL_SYSVAR_STR(datadir, rocksdb_datadir, PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, "RocksDB data directory", nullptr, nullptr, "./.rocksdb"); static MYSQL_SYSVAR_STR(supported_compression_types, compression_types_val, PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY, "Compression algorithms supported by RocksDB", nullptr, nullptr, compression_types_val); static MYSQL_SYSVAR_UINT( table_stats_sampling_pct, rocksdb_table_stats_sampling_pct, PLUGIN_VAR_RQCMDARG, "Percentage of entries to sample when collecting statistics about table " "properties. Specify either 0 to sample everything or percentage " "[" STRINGIFY_ARG(RDB_TBL_STATS_SAMPLE_PCT_MIN) ".." STRINGIFY_ARG( RDB_TBL_STATS_SAMPLE_PCT_MAX) "]. " "By default " STRINGIFY_ARG( RDB_DEFAULT_TBL_STATS_SAMPLE_PCT) "% " "of" " e" "nt" "ri" "es" " a" "re" " " "sa" "mp" "le" "d" ".", nullptr, rocksdb_set_table_stats_sampling_pct, /* default */ RDB_DEFAULT_TBL_STATS_SAMPLE_PCT, /* everything */ 0, /* max */ RDB_TBL_STATS_SAMPLE_PCT_MAX, 0); static MYSQL_SYSVAR_BOOL( large_prefix, rocksdb_large_prefix, PLUGIN_VAR_RQCMDARG, "Support large index prefix length of 3072 bytes. If off, the maximum " "index prefix length is 767.", nullptr, nullptr, FALSE); static const int ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE = 100; static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(lock_wait_timeout), MYSQL_SYSVAR(deadlock_detect), MYSQL_SYSVAR(deadlock_detect_depth), MYSQL_SYSVAR(max_row_locks), MYSQL_SYSVAR(write_batch_max_bytes), MYSQL_SYSVAR(lock_scanned_rows), MYSQL_SYSVAR(bulk_load), MYSQL_SYSVAR(bulk_load_allow_unsorted), MYSQL_SYSVAR(skip_unique_check_tables), MYSQL_SYSVAR(trace_sst_api), MYSQL_SYSVAR(commit_in_the_middle), MYSQL_SYSVAR(blind_delete_primary_key), MYSQL_SYSVAR(read_free_rpl_tables), MYSQL_SYSVAR(bulk_load_size), MYSQL_SYSVAR(merge_buf_size), MYSQL_SYSVAR(enable_bulk_load_api), MYSQL_SYSVAR(tmpdir), MYSQL_SYSVAR(merge_combine_read_size), MYSQL_SYSVAR(merge_tmp_file_removal_delay_ms), MYSQL_SYSVAR(skip_bloom_filter_on_read), MYSQL_SYSVAR(create_if_missing), MYSQL_SYSVAR(concurrent_prepare), MYSQL_SYSVAR(manual_wal_flush), MYSQL_SYSVAR(create_missing_column_families), MYSQL_SYSVAR(error_if_exists), MYSQL_SYSVAR(paranoid_checks), MYSQL_SYSVAR(rate_limiter_bytes_per_sec), MYSQL_SYSVAR(sst_mgr_rate_bytes_per_sec), MYSQL_SYSVAR(delayed_write_rate), MYSQL_SYSVAR(max_latest_deadlocks), MYSQL_SYSVAR(info_log_level), MYSQL_SYSVAR(max_open_files), MYSQL_SYSVAR(max_total_wal_size), MYSQL_SYSVAR(use_fsync), MYSQL_SYSVAR(wal_dir), MYSQL_SYSVAR(persistent_cache_path), MYSQL_SYSVAR(persistent_cache_size_mb), MYSQL_SYSVAR(delete_obsolete_files_period_micros), MYSQL_SYSVAR(max_background_jobs), MYSQL_SYSVAR(max_log_file_size), MYSQL_SYSVAR(max_subcompactions), MYSQL_SYSVAR(log_file_time_to_roll), MYSQL_SYSVAR(keep_log_file_num), MYSQL_SYSVAR(max_manifest_file_size), MYSQL_SYSVAR(table_cache_numshardbits), MYSQL_SYSVAR(wal_ttl_seconds), MYSQL_SYSVAR(wal_size_limit_mb), MYSQL_SYSVAR(manifest_preallocation_size), MYSQL_SYSVAR(use_direct_reads), MYSQL_SYSVAR(use_direct_io_for_flush_and_compaction), MYSQL_SYSVAR(allow_mmap_reads), MYSQL_SYSVAR(allow_mmap_writes), MYSQL_SYSVAR(is_fd_close_on_exec), MYSQL_SYSVAR(stats_dump_period_sec), MYSQL_SYSVAR(advise_random_on_open), MYSQL_SYSVAR(db_write_buffer_size), MYSQL_SYSVAR(use_adaptive_mutex), MYSQL_SYSVAR(bytes_per_sync), MYSQL_SYSVAR(wal_bytes_per_sync), MYSQL_SYSVAR(enable_thread_tracking), MYSQL_SYSVAR(perf_context_level), MYSQL_SYSVAR(wal_recovery_mode), MYSQL_SYSVAR(access_hint_on_compaction_start), MYSQL_SYSVAR(new_table_reader_for_compaction_inputs), MYSQL_SYSVAR(compaction_readahead_size), MYSQL_SYSVAR(allow_concurrent_memtable_write), MYSQL_SYSVAR(enable_write_thread_adaptive_yield), MYSQL_SYSVAR(block_cache_size), MYSQL_SYSVAR(sim_cache_size), MYSQL_SYSVAR(use_clock_cache), MYSQL_SYSVAR(cache_index_and_filter_blocks), MYSQL_SYSVAR(pin_l0_filter_and_index_blocks_in_cache), MYSQL_SYSVAR(index_type), MYSQL_SYSVAR(hash_index_allow_collision), MYSQL_SYSVAR(no_block_cache), MYSQL_SYSVAR(block_size), MYSQL_SYSVAR(block_size_deviation), MYSQL_SYSVAR(block_restart_interval), MYSQL_SYSVAR(whole_key_filtering), MYSQL_SYSVAR(default_cf_options), MYSQL_SYSVAR(override_cf_options), MYSQL_SYSVAR(update_cf_options), MYSQL_SYSVAR(flush_log_at_trx_commit), MYSQL_SYSVAR(write_disable_wal), MYSQL_SYSVAR(write_ignore_missing_column_families), MYSQL_SYSVAR(skip_fill_cache), MYSQL_SYSVAR(unsafe_for_binlog), MYSQL_SYSVAR(records_in_range), MYSQL_SYSVAR(force_index_records_in_range), MYSQL_SYSVAR(debug_optimizer_n_rows), MYSQL_SYSVAR(force_compute_memtable_stats), MYSQL_SYSVAR(force_compute_memtable_stats_cachetime), MYSQL_SYSVAR(debug_optimizer_no_zero_cardinality), MYSQL_SYSVAR(compact_cf), MYSQL_SYSVAR(signal_drop_index_thread), MYSQL_SYSVAR(pause_background_work), MYSQL_SYSVAR(enable_2pc), MYSQL_SYSVAR(strict_collation_check), MYSQL_SYSVAR(strict_collation_exceptions), MYSQL_SYSVAR(collect_sst_properties), MYSQL_SYSVAR(force_flush_memtable_now), MYSQL_SYSVAR(force_flush_memtable_and_lzero_now), MYSQL_SYSVAR(enable_ttl), MYSQL_SYSVAR(enable_ttl_read_filtering), MYSQL_SYSVAR(debug_ttl_rec_ts), MYSQL_SYSVAR(debug_ttl_snapshot_ts), MYSQL_SYSVAR(debug_ttl_read_filter_ts), MYSQL_SYSVAR(debug_ttl_ignore_pk), MYSQL_SYSVAR(reset_stats), MYSQL_SYSVAR(io_write_timeout), MYSQL_SYSVAR(flush_memtable_on_analyze), MYSQL_SYSVAR(seconds_between_stat_computes), MYSQL_SYSVAR(compaction_sequential_deletes), MYSQL_SYSVAR(compaction_sequential_deletes_window), MYSQL_SYSVAR(compaction_sequential_deletes_file_size), MYSQL_SYSVAR(compaction_sequential_deletes_count_sd), MYSQL_SYSVAR(print_snapshot_conflict_queries), MYSQL_SYSVAR(datadir), MYSQL_SYSVAR(supported_compression_types), MYSQL_SYSVAR(create_checkpoint), MYSQL_SYSVAR(checksums_pct), MYSQL_SYSVAR(store_row_debug_checksums), MYSQL_SYSVAR(verify_row_debug_checksums), MYSQL_SYSVAR(master_skip_tx_api), MYSQL_SYSVAR(validate_tables), MYSQL_SYSVAR(table_stats_sampling_pct), MYSQL_SYSVAR(large_prefix), nullptr}; static rocksdb::WriteOptions rdb_get_rocksdb_write_options(my_core::THD *const thd) { rocksdb::WriteOptions opt; opt.sync = (rocksdb_flush_log_at_trx_commit == FLUSH_LOG_SYNC); opt.disableWAL = THDVAR(thd, write_disable_wal); opt.ignore_missing_column_families = THDVAR(thd, write_ignore_missing_column_families); return opt; } /////////////////////////////////////////////////////////////////////////////////////////// /** @brief Function we use in the creation of our hash to get key. */ uchar * Rdb_open_tables_map::get_hash_key(const Rdb_table_handler *const table_handler, size_t *const length, my_bool not_used MY_ATTRIBUTE((__unused__))) { *length = table_handler->m_table_name_length; return reinterpret_cast<uchar *>(table_handler->m_table_name); } /* Drop index thread's control */ static Rdb_drop_index_thread rdb_drop_idx_thread; static void rocksdb_drop_index_wakeup_thread( my_core::THD *const thd MY_ATTRIBUTE((__unused__)), struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { if (*static_cast<const bool *>(save)) { rdb_drop_idx_thread.signal(); } } static inline uint32_t rocksdb_perf_context_level(THD *const thd) { DBUG_ASSERT(thd != nullptr); const int session_perf_context_level = THDVAR(thd, perf_context_level); if (session_perf_context_level > rocksdb::PerfLevel::kUninitialized) { return session_perf_context_level; } /* Fallback to global thdvar, if session specific one was not set to a valid value. */ const int global_perf_context_level = THDVAR(nullptr, perf_context_level); if (global_perf_context_level > rocksdb::PerfLevel::kUninitialized) { return global_perf_context_level; } return rocksdb::PerfLevel::kDisable; } /* Very short (functor-like) interface to be passed to Rdb_transaction::walk_tx_list() */ interface Rdb_tx_list_walker { virtual ~Rdb_tx_list_walker() {} virtual void process_tran(const Rdb_transaction *const) = 0; }; /* This is a helper class that is passed to RocksDB to get notifications when a snapshot gets created. */ class Rdb_snapshot_notifier : public rocksdb::TransactionNotifier { Rdb_transaction *m_owning_tx; void SnapshotCreated(const rocksdb::Snapshot *snapshot) override; public: Rdb_snapshot_notifier(const Rdb_snapshot_notifier &) = delete; Rdb_snapshot_notifier &operator=(const Rdb_snapshot_notifier &) = delete; explicit Rdb_snapshot_notifier(Rdb_transaction *const owning_tx) : m_owning_tx(owning_tx) {} // If the owning Rdb_transaction gets destructed we need to not reference // it anymore. void detach() { m_owning_tx = nullptr; } }; #ifdef MARIAROCKS_NOT_YET // ER_LOCK_WAIT_TIMEOUT error also has a reason in facebook/mysql-5.6 #endif String timeout_message(const char *command, const char *name1, const char *name2) { String msg; msg.append("Timeout on "); msg.append(command); msg.append(": "); msg.append(name1); if (name2 && name2[0]) { msg.append("."); msg.append(name2); } return msg; } /* This is the base class for transactions when interacting with rocksdb. */ class Rdb_transaction { protected: ulonglong m_write_count = 0; ulonglong m_insert_count = 0; ulonglong m_update_count = 0; ulonglong m_delete_count = 0; ulonglong m_lock_count = 0; bool m_is_delayed_snapshot = false; bool m_is_two_phase = false; THD *m_thd = nullptr; rocksdb::ReadOptions m_read_opts; static std::multiset<Rdb_transaction *> s_tx_list; static mysql_mutex_t s_tx_list_mutex; Rdb_io_perf *m_tbl_io_perf; bool m_tx_read_only = false; int m_timeout_sec; /* Cached value of @@rocksdb_lock_wait_timeout */ /* Maximum number of locks the transaction can have */ ulonglong m_max_row_locks; bool m_is_tx_failed = false; bool m_rollback_only = false; std::shared_ptr<Rdb_snapshot_notifier> m_notifier; // This should be used only when updating binlog information. virtual rocksdb::WriteBatchBase *get_write_batch() = 0; virtual bool commit_no_binlog() = 0; virtual rocksdb::Iterator * get_iterator(const rocksdb::ReadOptions &options, rocksdb::ColumnFamilyHandle *column_family) = 0; public: const char *m_mysql_log_file_name; my_off_t m_mysql_log_offset; #ifdef MARIAROCKS_NOT_YET // TODO: MariaDB probably doesn't need these at all: const char *m_mysql_gtid; const char *m_mysql_max_gtid; #endif String m_detailed_error; int64_t m_snapshot_timestamp = 0; bool m_ddl_transaction; /* Tracks the number of tables in use through external_lock. This should not be reset during start_tx(). */ int64_t m_n_mysql_tables_in_use = 0; /* MariaDB's group commit: */ bool commit_ordered_done; bool commit_ordered_res; /* for distinction between rdb_transaction_impl and rdb_writebatch_impl when using walk tx list */ virtual bool is_writebatch_trx() const = 0; static void init_mutex() { mysql_mutex_init(key_mutex_tx_list, &s_tx_list_mutex, MY_MUTEX_INIT_FAST); } static void term_mutex() { DBUG_ASSERT(s_tx_list.size() == 0); mysql_mutex_destroy(&s_tx_list_mutex); } static void walk_tx_list(Rdb_tx_list_walker *walker) { DBUG_ASSERT(walker != nullptr); RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex); for (auto it : s_tx_list) walker->process_tran(it); RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex); } int set_status_error(THD *const thd, const rocksdb::Status &s, const Rdb_key_def &kd, Rdb_tbl_def *const tbl_def, Rdb_table_handler *const table_handler) { DBUG_ASSERT(!s.ok()); DBUG_ASSERT(tbl_def != nullptr); if (s.IsTimedOut()) { /* SQL layer has weird expectations. If we return an error when doing a read in DELETE IGNORE, it will ignore the error ("because it's an IGNORE command!) but then will fail an assert, because "error code was returned, but no error happened". Do what InnoDB's convert_error_code_to_mysql() does: force a statement rollback before returning HA_ERR_LOCK_WAIT_TIMEOUT: */ my_core::thd_mark_transaction_to_rollback(thd, false /*just statement*/); m_detailed_error.copy(timeout_message( "index", tbl_def->full_tablename().c_str(), kd.get_name().c_str())); table_handler->m_lock_wait_timeout_counter.inc(); return HA_ERR_LOCK_WAIT_TIMEOUT; } if (s.IsDeadlock()) { my_core::thd_mark_transaction_to_rollback(thd, false /* just statement */); m_detailed_error = String(); table_handler->m_deadlock_counter.inc(); return HA_ERR_LOCK_DEADLOCK; } else if (s.IsBusy()) { rocksdb_snapshot_conflict_errors++; if (rocksdb_print_snapshot_conflict_queries) { char user_host_buff[MAX_USER_HOST_SIZE + 1]; make_user_name(thd, user_host_buff); // NO_LINT_DEBUG sql_print_warning("Got snapshot conflict errors: User: %s " "Query: %s", user_host_buff, thd->query()); } m_detailed_error = String(" (snapshot conflict)", system_charset_info); table_handler->m_deadlock_counter.inc(); return HA_ERR_LOCK_DEADLOCK; } if (s.IsIOError() || s.IsCorruption()) { rdb_handle_io_error(s, RDB_IO_ERROR_GENERAL); } return ha_rocksdb::rdb_error_to_mysql(s); } THD *get_thd() const { return m_thd; } /* Used for tracking io_perf counters */ void io_perf_start(Rdb_io_perf *const io_perf) { /* Since perf_context is tracked per thread, it is difficult and expensive to maintain perf_context on a per table basis. Therefore, roll all perf_context data into the first table used in a query. This works well for single table queries and is probably good enough for queries that hit multiple tables. perf_context stats gathering is started when the table lock is acquired or when ha_rocksdb::start_stmt is called in case of LOCK TABLES. They are recorded when the table lock is released, or when commit/rollback is called on the transaction, whichever comes first. Table lock release and commit/rollback can happen in different orders. In the case where the lock is released before commit/rollback is called, an extra step to gather stats during commit/rollback is needed. */ if (m_tbl_io_perf == nullptr && io_perf->start(rocksdb_perf_context_level(m_thd))) { m_tbl_io_perf = io_perf; } } void io_perf_end_and_record(void) { if (m_tbl_io_perf != nullptr) { m_tbl_io_perf->end_and_record(rocksdb_perf_context_level(m_thd)); m_tbl_io_perf = nullptr; } } void io_perf_end_and_record(Rdb_io_perf *const io_perf) { if (m_tbl_io_perf == io_perf) { io_perf_end_and_record(); } } void update_bytes_written(ulonglong bytes_written) { if (m_tbl_io_perf != nullptr) { m_tbl_io_perf->update_bytes_written(rocksdb_perf_context_level(m_thd), bytes_written); } } void set_params(int timeout_sec_arg, int max_row_locks_arg) { m_timeout_sec = timeout_sec_arg; m_max_row_locks = max_row_locks_arg; set_lock_timeout(timeout_sec_arg); } virtual void set_lock_timeout(int timeout_sec_arg) = 0; ulonglong get_write_count() const { return m_write_count; } ulonglong get_insert_count() const { return m_insert_count; } ulonglong get_update_count() const { return m_update_count; } ulonglong get_delete_count() const { return m_delete_count; } void incr_insert_count() { ++m_insert_count; } void incr_update_count() { ++m_update_count; } void incr_delete_count() { ++m_delete_count; } int get_timeout_sec() const { return m_timeout_sec; } ulonglong get_lock_count() const { return m_lock_count; } virtual void set_sync(bool sync) = 0; virtual void release_lock(rocksdb::ColumnFamilyHandle *const column_family, const std::string &rowkey) = 0; virtual bool prepare(const rocksdb::TransactionName &name) = 0; bool commit_or_rollback() { bool res; if (m_is_tx_failed) { rollback(); res = false; } else res = commit(); return res; } bool commit() { if (get_write_count() == 0) { rollback(); return false; } else if (m_rollback_only) { /* Transactions marked as rollback_only are expected to be rolled back at prepare(). But there are some exceptions like below that prepare() is never called and commit() is called instead. 1. Binlog is disabled 2. No modification exists in binlog cache for the transaction (#195) In both cases, rolling back transaction is safe. Nothing is written to binlog. */ my_error(ER_ROLLBACK_ONLY, MYF(0)); rollback(); return true; } else { mysql_bin_log_commit_pos(m_thd, &m_mysql_log_offset, &m_mysql_log_file_name); binlog_manager.update(m_mysql_log_file_name, m_mysql_log_offset, get_write_batch()); return commit_no_binlog(); } } virtual void rollback() = 0; void snapshot_created(const rocksdb::Snapshot *const snapshot) { DBUG_ASSERT(snapshot != nullptr); m_read_opts.snapshot = snapshot; rdb->GetEnv()->GetCurrentTime(&m_snapshot_timestamp); m_is_delayed_snapshot = false; } virtual void acquire_snapshot(bool acquire_now) = 0; virtual void release_snapshot() = 0; bool has_snapshot() const { return m_read_opts.snapshot != nullptr; } private: // The tables we are currently loading. In a partitioned table this can // have more than one entry std::vector<ha_rocksdb *> m_curr_bulk_load; public: int finish_bulk_load() { int rc = 0; std::vector<ha_rocksdb *>::iterator it; while ((it = m_curr_bulk_load.begin()) != m_curr_bulk_load.end()) { int rc2 = (*it)->finalize_bulk_load(); if (rc2 != 0 && rc == 0) { rc = rc2; } } DBUG_ASSERT(m_curr_bulk_load.size() == 0); return rc; } void start_bulk_load(ha_rocksdb *const bulk_load) { /* If we already have an open bulk load of a table and the name doesn't match the current one, close out the currently running one. This allows multiple bulk loads to occur on a partitioned table, but then closes them all out when we switch to another table. */ DBUG_ASSERT(bulk_load != nullptr); if (!m_curr_bulk_load.empty() && !bulk_load->same_table(*m_curr_bulk_load[0])) { const auto res = finish_bulk_load(); SHIP_ASSERT(res == 0); } m_curr_bulk_load.push_back(bulk_load); } void end_bulk_load(ha_rocksdb *const bulk_load) { for (auto it = m_curr_bulk_load.begin(); it != m_curr_bulk_load.end(); it++) { if (*it == bulk_load) { m_curr_bulk_load.erase(it); return; } } // Should not reach here SHIP_ASSERT(0); } int num_ongoing_bulk_load() const { return m_curr_bulk_load.size(); } /* Flush the data accumulated so far. This assumes we're doing a bulk insert. @detail This should work like transaction commit, except that we don't synchronize with the binlog (there is no API that would allow to have binlog flush the changes accumulated so far and return its current position) @todo Add test coverage for what happens when somebody attempts to do bulk inserts while inside a multi-statement transaction. */ bool flush_batch() { if (get_write_count() == 0) return false; /* Commit the current transaction */ if (commit_no_binlog()) return true; /* Start another one */ start_tx(); return false; } virtual rocksdb::Status put(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, const rocksdb::Slice &value) = 0; virtual rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key) = 0; virtual rocksdb::Status single_delete(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key) = 0; virtual bool has_modifications() const = 0; virtual rocksdb::WriteBatchBase *get_indexed_write_batch() = 0; /* Return a WriteBatch that one can write to. The writes will skip any transaction locking. The writes will NOT be visible to the transaction. */ rocksdb::WriteBatchBase *get_blind_write_batch() { return get_indexed_write_batch()->GetWriteBatch(); } virtual rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, std::string *value) const = 0; virtual rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, std::string *const value, bool exclusive) = 0; rocksdb::Iterator * get_iterator(rocksdb::ColumnFamilyHandle *const column_family, bool skip_bloom_filter, bool fill_cache, bool read_current = false, bool create_snapshot = true) { // Make sure we are not doing both read_current (which implies we don't // want a snapshot) and create_snapshot which makes sure we create // a snapshot DBUG_ASSERT(column_family != nullptr); DBUG_ASSERT(!read_current || !create_snapshot); if (create_snapshot) acquire_snapshot(true); rocksdb::ReadOptions options = m_read_opts; if (skip_bloom_filter) { options.total_order_seek = true; } else { // With this option, Iterator::Valid() returns false if key // is outside of the prefix bloom filter range set at Seek(). // Must not be set to true if not using bloom filter. options.prefix_same_as_start = true; } options.fill_cache = fill_cache; if (read_current) { options.snapshot = nullptr; } return get_iterator(options, column_family); } virtual bool is_tx_started() const = 0; virtual void start_tx() = 0; virtual void start_stmt() = 0; virtual void rollback_stmt() = 0; void set_tx_failed(bool failed_arg) { m_is_tx_failed = failed_arg; } bool can_prepare() const { if (m_rollback_only) { my_error(ER_ROLLBACK_ONLY, MYF(0)); return false; } return true; } int rollback_to_savepoint(void *const savepoint) { if (has_modifications()) { my_error(ER_ROLLBACK_TO_SAVEPOINT, MYF(0)); m_rollback_only = true; return HA_EXIT_FAILURE; } return HA_EXIT_SUCCESS; } /* This is used by transactions started with "START TRANSACTION WITH " "CONSISTENT [ROCKSDB] SNAPSHOT". When tx_read_only is turned on, snapshot has to be created via DB::GetSnapshot(), not via Transaction API. */ bool is_tx_read_only() const { return m_tx_read_only; } bool is_two_phase() const { return m_is_two_phase; } void set_tx_read_only(bool val) { m_tx_read_only = val; } explicit Rdb_transaction(THD *const thd) : m_thd(thd), m_tbl_io_perf(nullptr) { RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex); s_tx_list.insert(this); RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex); } virtual ~Rdb_transaction() { RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex); s_tx_list.erase(this); RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex); } }; /* This is a rocksdb transaction. Its members represent the current transaction, which consists of: - the snapshot - the changes we've made but are not seeing yet. The changes are made to individual tables, which store them here and then this object commits them on commit. */ class Rdb_transaction_impl : public Rdb_transaction { rocksdb::Transaction *m_rocksdb_tx = nullptr; rocksdb::Transaction *m_rocksdb_reuse_tx = nullptr; public: void set_lock_timeout(int timeout_sec_arg) override { if (m_rocksdb_tx) m_rocksdb_tx->SetLockTimeout(rdb_convert_sec_to_ms(m_timeout_sec)); } void set_sync(bool sync) override { m_rocksdb_tx->GetWriteOptions()->sync = sync; } void release_lock(rocksdb::ColumnFamilyHandle *const column_family, const std::string &rowkey) override { if (!THDVAR(m_thd, lock_scanned_rows)) { m_rocksdb_tx->UndoGetForUpdate(column_family, rocksdb::Slice(rowkey)); } } virtual bool is_writebatch_trx() const override { return false; } private: void release_tx(void) { // We are done with the current active transaction object. Preserve it // for later reuse. DBUG_ASSERT(m_rocksdb_reuse_tx == nullptr); m_rocksdb_reuse_tx = m_rocksdb_tx; m_rocksdb_tx = nullptr; } bool prepare(const rocksdb::TransactionName &name) override { rocksdb::Status s; s = m_rocksdb_tx->SetName(name); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); return false; } s = m_rocksdb_tx->Prepare(); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); return false; } return true; } bool commit_no_binlog() override { bool res = false; release_snapshot(); const rocksdb::Status s = m_rocksdb_tx->Commit(); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); res = true; } /* Save the transaction object to be reused */ release_tx(); m_write_count = 0; m_insert_count = 0; m_update_count = 0; m_delete_count = 0; m_lock_count = 0; set_tx_read_only(false); m_rollback_only = false; return res; } public: void rollback() override { m_write_count = 0; m_insert_count = 0; m_update_count = 0; m_delete_count = 0; m_lock_count = 0; m_ddl_transaction = false; if (m_rocksdb_tx) { release_snapshot(); /* This will also release all of the locks: */ m_rocksdb_tx->Rollback(); /* Save the transaction object to be reused */ release_tx(); set_tx_read_only(false); m_rollback_only = false; } } void acquire_snapshot(bool acquire_now) override { if (m_read_opts.snapshot == nullptr) { if (is_tx_read_only()) { snapshot_created(rdb->GetSnapshot()); } else if (acquire_now) { m_rocksdb_tx->SetSnapshot(); snapshot_created(m_rocksdb_tx->GetSnapshot()); } else if (!m_is_delayed_snapshot) { m_rocksdb_tx->SetSnapshotOnNextOperation(m_notifier); m_is_delayed_snapshot = true; } } } void release_snapshot() override { bool need_clear = m_is_delayed_snapshot; if (m_read_opts.snapshot != nullptr) { m_snapshot_timestamp = 0; if (is_tx_read_only()) { rdb->ReleaseSnapshot(m_read_opts.snapshot); need_clear = false; } else { need_clear = true; } m_read_opts.snapshot = nullptr; } if (need_clear && m_rocksdb_tx != nullptr) m_rocksdb_tx->ClearSnapshot(); } bool has_snapshot() { return m_read_opts.snapshot != nullptr; } rocksdb::Status put(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, const rocksdb::Slice &value) override { ++m_write_count; ++m_lock_count; if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); return m_rocksdb_tx->Put(column_family, key, value); } rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key) override { ++m_write_count; ++m_lock_count; if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); return m_rocksdb_tx->Delete(column_family, key); } rocksdb::Status single_delete(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key) override { ++m_write_count; ++m_lock_count; if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); return m_rocksdb_tx->SingleDelete(column_family, key); } bool has_modifications() const override { return m_rocksdb_tx->GetWriteBatch() && m_rocksdb_tx->GetWriteBatch()->GetWriteBatch() && m_rocksdb_tx->GetWriteBatch()->GetWriteBatch()->Count() > 0; } rocksdb::WriteBatchBase *get_write_batch() override { if (is_two_phase()) { return m_rocksdb_tx->GetCommitTimeWriteBatch(); } return m_rocksdb_tx->GetWriteBatch()->GetWriteBatch(); } /* Return a WriteBatch that one can write to. The writes will skip any transaction locking. The writes WILL be visible to the transaction. */ rocksdb::WriteBatchBase *get_indexed_write_batch() override { ++m_write_count; return m_rocksdb_tx->GetWriteBatch(); } rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, std::string *value) const override { global_stats.queries[QUERIES_POINT].inc(); return m_rocksdb_tx->Get(m_read_opts, column_family, key, value); } rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, std::string *const value, bool exclusive) override { if (++m_lock_count > m_max_row_locks) return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); return m_rocksdb_tx->GetForUpdate(m_read_opts, column_family, key, value, exclusive); } rocksdb::Iterator * get_iterator(const rocksdb::ReadOptions &options, rocksdb::ColumnFamilyHandle *const column_family) override { global_stats.queries[QUERIES_RANGE].inc(); return m_rocksdb_tx->GetIterator(options, column_family); } const rocksdb::Transaction *get_rdb_trx() const { return m_rocksdb_tx; } bool is_tx_started() const override { return (m_rocksdb_tx != nullptr); } void start_tx() override { rocksdb::TransactionOptions tx_opts; rocksdb::WriteOptions write_opts; tx_opts.set_snapshot = false; tx_opts.lock_timeout = rdb_convert_sec_to_ms(m_timeout_sec); tx_opts.deadlock_detect = THDVAR(m_thd, deadlock_detect); tx_opts.deadlock_detect_depth = THDVAR(m_thd, deadlock_detect_depth); tx_opts.max_write_batch_size = THDVAR(m_thd, write_batch_max_bytes); write_opts.sync = (rocksdb_flush_log_at_trx_commit == FLUSH_LOG_SYNC); write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); write_opts.ignore_missing_column_families = THDVAR(m_thd, write_ignore_missing_column_families); m_is_two_phase = rocksdb_enable_2pc; commit_ordered_done= false; /* If m_rocksdb_reuse_tx is null this will create a new transaction object. Otherwise it will reuse the existing one. */ m_rocksdb_tx = rdb->BeginTransaction(write_opts, tx_opts, m_rocksdb_reuse_tx); m_rocksdb_reuse_tx = nullptr; m_read_opts = rocksdb::ReadOptions(); m_ddl_transaction = false; } /* Start a statement inside a multi-statement transaction. @todo: are we sure this is called once (and not several times) per statement start? For hooking to start of statement that is its own transaction, see ha_rocksdb::external_lock(). */ void start_stmt() override { // Set the snapshot to delayed acquisition (SetSnapshotOnNextOperation) acquire_snapshot(false); m_rocksdb_tx->SetSavePoint(); } /* This must be called when last statement is rolled back, but the transaction continues */ void rollback_stmt() override { /* TODO: here we must release the locks taken since the start_stmt() call */ if (m_rocksdb_tx) { const rocksdb::Snapshot *const org_snapshot = m_rocksdb_tx->GetSnapshot(); m_rocksdb_tx->RollbackToSavePoint(); const rocksdb::Snapshot *const cur_snapshot = m_rocksdb_tx->GetSnapshot(); if (org_snapshot != cur_snapshot) { if (org_snapshot != nullptr) m_snapshot_timestamp = 0; m_read_opts.snapshot = cur_snapshot; if (cur_snapshot != nullptr) rdb->GetEnv()->GetCurrentTime(&m_snapshot_timestamp); else m_is_delayed_snapshot = true; } } } explicit Rdb_transaction_impl(THD *const thd) : Rdb_transaction(thd), m_rocksdb_tx(nullptr) { // Create a notifier that can be called when a snapshot gets generated. m_notifier = std::make_shared<Rdb_snapshot_notifier>(this); } virtual ~Rdb_transaction_impl() { rollback(); // Theoretically the notifier could outlive the Rdb_transaction_impl // (because of the shared_ptr), so let it know it can't reference // the transaction anymore. m_notifier->detach(); // Free any transaction memory that is still hanging around. delete m_rocksdb_reuse_tx; DBUG_ASSERT(m_rocksdb_tx == nullptr); } }; /* This is a rocksdb write batch. This class doesn't hold or wait on any transaction locks (skips rocksdb transaction API) thus giving better performance. The commit is done through rdb->GetBaseDB()->Commit(). Currently this is only used for replication threads which are guaranteed to be non-conflicting. Any further usage of this class should completely be thought thoroughly. */ class Rdb_writebatch_impl : public Rdb_transaction { rocksdb::WriteBatchWithIndex *m_batch; rocksdb::WriteOptions write_opts; // Called after commit/rollback. void reset() { m_batch->Clear(); m_read_opts = rocksdb::ReadOptions(); m_ddl_transaction = false; } private: bool prepare(const rocksdb::TransactionName &name) override { return true; } bool commit_no_binlog() override { bool res = false; release_snapshot(); const rocksdb::Status s = rdb->GetBaseDB()->Write(write_opts, m_batch->GetWriteBatch()); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); res = true; } reset(); m_write_count = 0; m_insert_count = 0; m_update_count = 0; m_delete_count = 0; set_tx_read_only(false); m_rollback_only = false; return res; } public: bool is_writebatch_trx() const override { return true; } void set_lock_timeout(int timeout_sec_arg) override { // Nothing to do here. } void set_sync(bool sync) override { write_opts.sync = sync; } void release_lock(rocksdb::ColumnFamilyHandle *const column_family, const std::string &rowkey) override { // Nothing to do here since we don't hold any row locks. } void rollback() override { m_write_count = 0; m_insert_count = 0; m_update_count = 0; m_delete_count = 0; m_lock_count = 0; release_snapshot(); reset(); set_tx_read_only(false); m_rollback_only = false; } void acquire_snapshot(bool acquire_now) override { if (m_read_opts.snapshot == nullptr) snapshot_created(rdb->GetSnapshot()); } void release_snapshot() override { if (m_read_opts.snapshot != nullptr) { rdb->ReleaseSnapshot(m_read_opts.snapshot); m_read_opts.snapshot = nullptr; } } rocksdb::Status put(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, const rocksdb::Slice &value) override { ++m_write_count; m_batch->Put(column_family, key, value); // Note Put/Delete in write batch doesn't return any error code. We simply // return OK here. return rocksdb::Status::OK(); } rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key) override { ++m_write_count; m_batch->Delete(column_family, key); return rocksdb::Status::OK(); } rocksdb::Status single_delete(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key) override { ++m_write_count; m_batch->SingleDelete(column_family, key); return rocksdb::Status::OK(); } bool has_modifications() const override { return m_batch->GetWriteBatch()->Count() > 0; } rocksdb::WriteBatchBase *get_write_batch() override { return m_batch; } rocksdb::WriteBatchBase *get_indexed_write_batch() override { ++m_write_count; return m_batch; } rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, std::string *const value) const override { return m_batch->GetFromBatchAndDB(rdb, m_read_opts, column_family, key, value); } rocksdb::Status get_for_update(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, std::string *const value, bool exclusive) override { return get(column_family, key, value); } rocksdb::Iterator * get_iterator(const rocksdb::ReadOptions &options, rocksdb::ColumnFamilyHandle *const column_family) override { const auto it = rdb->NewIterator(options); return m_batch->NewIteratorWithBase(it); } bool is_tx_started() const override { return (m_batch != nullptr); } void start_tx() override { commit_ordered_done= false; // Do we need this here? reset(); write_opts.sync = (rocksdb_flush_log_at_trx_commit == FLUSH_LOG_SYNC); write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); write_opts.ignore_missing_column_families = THDVAR(m_thd, write_ignore_missing_column_families); } void start_stmt() override { m_batch->SetSavePoint(); } void rollback_stmt() override { if (m_batch) m_batch->RollbackToSavePoint(); } explicit Rdb_writebatch_impl(THD *const thd) : Rdb_transaction(thd), m_batch(nullptr) { m_batch = new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0, true); } virtual ~Rdb_writebatch_impl() { rollback(); delete m_batch; } }; void Rdb_snapshot_notifier::SnapshotCreated( const rocksdb::Snapshot *const snapshot) { if (m_owning_tx != nullptr) { m_owning_tx->snapshot_created(snapshot); } } std::multiset<Rdb_transaction *> Rdb_transaction::s_tx_list; mysql_mutex_t Rdb_transaction::s_tx_list_mutex; static Rdb_transaction *&get_tx_from_thd(THD *const thd) { return *reinterpret_cast<Rdb_transaction **>( my_core::thd_ha_data(thd, rocksdb_hton)); } namespace { class Rdb_perf_context_guard { Rdb_io_perf m_io_perf; Rdb_io_perf *m_io_perf_ptr; Rdb_transaction *m_tx; uint m_level; public: Rdb_perf_context_guard(const Rdb_perf_context_guard &) = delete; Rdb_perf_context_guard &operator=(const Rdb_perf_context_guard &) = delete; explicit Rdb_perf_context_guard(Rdb_io_perf *io_perf, uint level) : m_io_perf_ptr(io_perf), m_tx(nullptr), m_level(level) { m_io_perf_ptr->start(m_level); } explicit Rdb_perf_context_guard(Rdb_transaction *tx, uint level) : m_io_perf_ptr(nullptr), m_tx(tx), m_level(level) { /* if perf_context information is already being recorded, this becomes a no-op */ if (tx != nullptr) { tx->io_perf_start(&m_io_perf); } } ~Rdb_perf_context_guard() { if (m_tx != nullptr) { m_tx->io_perf_end_and_record(); } else if (m_io_perf_ptr != nullptr) { m_io_perf_ptr->end_and_record(m_level); } } }; } // anonymous namespace /* TODO: maybe, call this in external_lock() and store in ha_rocksdb.. */ static Rdb_transaction *get_or_create_tx(THD *const thd) { Rdb_transaction *&tx = get_tx_from_thd(thd); // TODO: this is called too many times.. O(#rows) if (tx == nullptr) { bool rpl_skip_tx_api= false; // MARIAROCKS_NOT_YET. if ((rpl_skip_tx_api && thd->rgi_slave) || false /* MARIAROCKS_NOT_YET: THDVAR(thd, master_skip_tx_api) && !thd->rgi_slave)*/) { tx = new Rdb_writebatch_impl(thd); } else { tx = new Rdb_transaction_impl(thd); } tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks)); tx->start_tx(); } else { tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks)); if (!tx->is_tx_started()) { tx->start_tx(); } } return tx; } static int rocksdb_close_connection(handlerton *const hton, THD *const thd) { Rdb_transaction *&tx = get_tx_from_thd(thd); if (tx != nullptr) { int rc = tx->finish_bulk_load(); if (rc != 0) { // NO_LINT_DEBUG sql_print_error("RocksDB: Error %d finalizing last SST file while " "disconnecting", rc); abort_with_stack_traces(); } delete tx; tx = nullptr; } return HA_EXIT_SUCCESS; } /* * Serializes an xid to a string so that it can * be used as a rocksdb transaction name */ static std::string rdb_xid_to_string(const XID &src) { DBUG_ASSERT(src.gtrid_length >= 0 && src.gtrid_length <= MAXGTRIDSIZE); DBUG_ASSERT(src.bqual_length >= 0 && src.bqual_length <= MAXBQUALSIZE); std::string buf; buf.reserve(RDB_XIDHDR_LEN + src.gtrid_length + src.bqual_length); /* * expand formatID to fill 8 bytes if it doesn't already * then reinterpret bit pattern as unsigned and store in network order */ uchar fidbuf[RDB_FORMATID_SZ]; int64 signed_fid8 = src.formatID; const uint64 raw_fid8 = *reinterpret_cast<uint64 *>(&signed_fid8); rdb_netbuf_store_uint64(fidbuf, raw_fid8); buf.append(reinterpret_cast<const char *>(fidbuf), RDB_FORMATID_SZ); buf.push_back(src.gtrid_length); buf.push_back(src.bqual_length); buf.append(src.data, (src.gtrid_length) + (src.bqual_length)); return buf; } #if 0 // MARIAROCKS: MariaDB doesn't have flush_wal method /** Called by hton->flush_logs after MySQL group commit prepares a set of transactions. */ static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__))) DBUG_ASSERT(rdb != nullptr); rocksdb::Status s; /* target_lsn is set to 0 when MySQL wants to sync the wal files */ if (target_lsn == 0 || rocksdb_flush_log_at_trx_commit != FLUSH_LOG_NEVER) { rocksdb_wal_group_syncs++; s = rdb->FlushWAL(target_lsn == 0 || rocksdb_flush_log_at_trx_commit == FLUSH_LOG_SYNC); } if (!s.ok()) { rdb_log_status_error(s); return HA_EXIT_FAILURE; } return HA_EXIT_SUCCESS; } #endif /** For a slave, prepare() updates the slave_gtid_info table which tracks the replication progress. */ static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx) { bool async=false; // This is "ASYNC_COMMIT" feature which is only present in webscalesql Rdb_transaction *&tx = get_tx_from_thd(thd); if (!tx->can_prepare()) { return HA_EXIT_FAILURE; } if (prepare_tx || (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { /* We were instructed to prepare the whole transaction, or this is an SQL statement end and autocommit is on */ #ifdef MARIAROCKS_NOT_YET // Crash-safe slave does not work yet std::vector<st_slave_gtid_info> slave_gtid_info; my_core::thd_slave_gtid_info(thd, &slave_gtid_info); for (const auto &it : slave_gtid_info) { rocksdb::WriteBatchBase *const write_batch = tx->get_blind_write_batch(); binlog_manager.update_slave_gtid_info(it.id, it.db, it.gtid, write_batch); } #endif if (tx->is_two_phase()) { /* MariaDB: the following branch is never taken. We always flush at Prepare and rely on RocksDB's internal Group Commit to do some grouping. */ if (thd->durability_property == HA_IGNORE_DURABILITY || async) { tx->set_sync(false); } /* MariaDB: do not flush logs if we are running in a non-crash-safe mode. */ if (!rocksdb_flush_log_at_trx_commit) tx->set_sync(false); XID xid; thd_get_xid(thd, reinterpret_cast<MYSQL_XID *>(&xid)); if (!tx->prepare(rdb_xid_to_string(xid))) { return HA_EXIT_FAILURE; } /* MariaDB: our Group Commit implementation does not use the hton->flush_logs call (at least currently) so the following is not needed (TODO: will we need this for binlog rotation?) */ #ifdef MARIAROCKS_NOT_YET if (thd->durability_property == HA_IGNORE_DURABILITY ) (rocksdb_flush_log_at_trx_commit != FLUSH_LOG_NEVER)) && THDVAR(thd, flush_log_at_trx_commit)) #endif #ifdef MARIAROCKS_NOT_YET { // MariaRocks: disable the // "write/sync redo log before flushing binlog cache to file" // feature. See a869c56d361bb44f46c0efeb11a8f03561676247 /** we set the log sequence as '1' just to trigger hton->flush_logs */ thd_store_lsn(thd, 1, DB_TYPE_ROCKSDB); } #endif } DEBUG_SYNC(thd, "rocksdb.prepared"); } return HA_EXIT_SUCCESS; } /** do nothing for prepare/commit by xid this is needed to avoid crashes in XA scenarios */ static int rocksdb_commit_by_xid(handlerton *const hton, XID *const xid) { DBUG_ENTER_FUNC(); DBUG_ASSERT(hton != nullptr); DBUG_ASSERT(xid != nullptr); DBUG_ASSERT(commit_latency_stats != nullptr); rocksdb::StopWatchNano timer(rocksdb::Env::Default(), true); const auto name = rdb_xid_to_string(*xid); DBUG_ASSERT(!name.empty()); rocksdb::Transaction *const trx = rdb->GetTransactionByName(name); if (trx == nullptr) { DBUG_RETURN(HA_EXIT_FAILURE); } const rocksdb::Status s = trx->Commit(); if (!s.ok()) { rdb_log_status_error(s); DBUG_RETURN(HA_EXIT_FAILURE); } delete trx; // `Add()` is implemented in a thread-safe manner. commit_latency_stats->Add(timer.ElapsedNanos() / 1000); DBUG_RETURN(HA_EXIT_SUCCESS); } static int rocksdb_rollback_by_xid(handlerton *const hton MY_ATTRIBUTE((__unused__)), XID *const xid) { DBUG_ENTER_FUNC(); DBUG_ASSERT(hton != nullptr); DBUG_ASSERT(xid != nullptr); DBUG_ASSERT(rdb != nullptr); const auto name = rdb_xid_to_string(*xid); rocksdb::Transaction *const trx = rdb->GetTransactionByName(name); if (trx == nullptr) { DBUG_RETURN(HA_EXIT_FAILURE); } const rocksdb::Status s = trx->Rollback(); if (!s.ok()) { rdb_log_status_error(s); DBUG_RETURN(HA_EXIT_FAILURE); } delete trx; DBUG_RETURN(HA_EXIT_SUCCESS); } /** Rebuilds an XID from a serialized version stored in a string. */ static void rdb_xid_from_string(const std::string &src, XID *const dst) { DBUG_ASSERT(dst != nullptr); uint offset = 0; uint64 raw_fid8 = rdb_netbuf_to_uint64(reinterpret_cast<const uchar *>(src.data())); const int64 signed_fid8 = *reinterpret_cast<int64 *>(&raw_fid8); dst->formatID = signed_fid8; offset += RDB_FORMATID_SZ; dst->gtrid_length = src.at(offset); offset += RDB_GTRID_SZ; dst->bqual_length = src.at(offset); offset += RDB_BQUAL_SZ; DBUG_ASSERT(dst->gtrid_length >= 0 && dst->gtrid_length <= MAXGTRIDSIZE); DBUG_ASSERT(dst->bqual_length >= 0 && dst->bqual_length <= MAXBQUALSIZE); src.copy(dst->data, (dst->gtrid_length) + (dst->bqual_length), RDB_XIDHDR_LEN); } /** Reading last committed binary log info from RocksDB system row. The info is needed for crash safe slave/master to work. */ static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len) #ifdef MARIAROCKS_NOT_YET char* const binlog_file, my_off_t *const binlog_pos, Gtid *const binlog_max_gtid) { #endif { #ifdef MARIAROCKS_NOT_YET if (binlog_file && binlog_pos) { char file_buf[FN_REFLEN + 1] = {0}; my_off_t pos; char gtid_buf[FN_REFLEN + 1] = {0}; if (binlog_manager.read(file_buf, &pos, gtid_buf)) { if (is_binlog_advanced(binlog_file, *binlog_pos, file_buf, pos)) { memcpy(binlog_file, file_buf, FN_REFLEN + 1); *binlog_pos = pos; fprintf(stderr, "RocksDB: Last binlog file position %llu," " file name %s\n", pos, file_buf); if (*gtid_buf) { global_sid_lock->rdlock(); binlog_max_gtid->parse(global_sid_map, gtid_buf); global_sid_lock->unlock(); fprintf(stderr, "RocksDB: Last MySQL Gtid %s\n", gtid_buf); } } } } #endif if (len == 0 || xid_list == nullptr) { return HA_EXIT_SUCCESS; } std::vector<rocksdb::Transaction *> trans_list; rdb->GetAllPreparedTransactions(&trans_list); uint count = 0; for (auto &trans : trans_list) { if (count >= len) { break; } auto name = trans->GetName(); rdb_xid_from_string(name, &xid_list[count]); count++; } return count; } /* Handle a commit checkpoint request from server layer. InnoDB does this: We put the request in a queue, so that we can notify upper layer about checkpoint complete when we have flushed the redo log. If we have already flushed all relevant redo log, we notify immediately. MariaRocks just flushes everything right away ATM */ static void rocksdb_checkpoint_request(handlerton *hton, void *cookie) { const rocksdb::Status s= rdb->SyncWAL(); //TODO: what to do on error? if (s.ok()) { rocksdb_wal_group_syncs++; commit_checkpoint_notify_ha(hton, cookie); } } /* @param all: TRUE - commit the transaction FALSE - SQL statement ended */ static void rocksdb_commit_ordered(handlerton *hton, THD* thd, bool all) { // Same assert as InnoDB has DBUG_ASSERT(all || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))); Rdb_transaction *&tx = get_tx_from_thd(thd); if (!tx->is_two_phase()) { /* ordered_commit is supposedly slower as it is done sequentially in order to preserve commit order. if we are not required do 2-phase commit with the binlog, do not do anything here. */ return; } tx->set_sync(false); /* This will note the master position also */ tx->commit_ordered_res= tx->commit(); tx->commit_ordered_done= true; } static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx) { DBUG_ENTER_FUNC(); DBUG_ASSERT(hton != nullptr); DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(commit_latency_stats != nullptr); rocksdb::StopWatchNano timer(rocksdb::Env::Default(), true); /* note: h->external_lock(F_UNLCK) is called after this function is called) */ Rdb_transaction *&tx = get_tx_from_thd(thd); /* this will trigger saving of perf_context information */ Rdb_perf_context_guard guard(tx, rocksdb_perf_context_level(thd)); if (tx != nullptr) { if (commit_tx || (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { /* This will not add anything to commit_latency_stats, and this is correct right? */ if (tx->commit_ordered_done) { thd_wakeup_subsequent_commits(thd, 0); DBUG_RETURN((tx->commit_ordered_res? HA_ERR_INTERNAL_ERROR: 0)); } /* We get here - For a COMMIT statement that finishes a multi-statement transaction - For a statement that has its own transaction */ if (tx->commit()) { DBUG_RETURN(HA_ERR_ROCKSDB_COMMIT_FAILED); } thd_wakeup_subsequent_commits(thd, 0); } else { /* We get here when committing a statement within a transaction. We don't need to do anything here. tx->start_stmt() will notify Rdb_transaction_impl that another statement has started. */ tx->set_tx_failed(false); } if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { // For READ_COMMITTED, we release any existing snapshot so that we will // see any changes that occurred since the last statement. tx->release_snapshot(); } } // `Add()` is implemented in a thread-safe manner. commit_latency_stats->Add(timer.ElapsedNanos() / 1000); DBUG_RETURN(HA_EXIT_SUCCESS); } static int rocksdb_rollback(handlerton *const hton, THD *const thd, bool rollback_tx) { Rdb_transaction *&tx = get_tx_from_thd(thd); Rdb_perf_context_guard guard(tx, rocksdb_perf_context_level(thd)); if (tx != nullptr) { if (rollback_tx) { /* We get here, when - ROLLBACK statement is issued. Discard the changes made by the transaction */ tx->rollback(); } else { /* We get here when - a statement with AUTOCOMMIT=1 is being rolled back (because of some error) - a statement inside a transaction is rolled back */ tx->rollback_stmt(); tx->set_tx_failed(true); } if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { // For READ_COMMITTED, we release any existing snapshot so that we will // see any changes that occurred since the last statement. tx->release_snapshot(); } } return HA_EXIT_SUCCESS; } static bool print_stats(THD *const thd, std::string const &type, std::string const &name, std::string const &status, stat_print_fn *stat_print) { return stat_print(thd, type.c_str(), type.size(), name.c_str(), name.size(), status.c_str(), status.size()); } static std::string format_string(const char *const format, ...) { std::string res; va_list args; va_list args_copy; char static_buff[256]; DBUG_ASSERT(format != nullptr); va_start(args, format); va_copy(args_copy, args); // Calculate how much space we will need int len = vsnprintf(nullptr, 0, format, args); va_end(args); if (len < 0) { res = std::string("<format error>"); } else if (len == 0) { // Shortcut for an empty string res = std::string(""); } else { // For short enough output use a static buffer char *buff = static_buff; std::unique_ptr<char[]> dynamic_buff = nullptr; len++; // Add one for null terminator // for longer output use an allocated buffer if (static_cast<uint>(len) > sizeof(static_buff)) { dynamic_buff.reset(new char[len]); buff = dynamic_buff.get(); } // Now re-do the vsnprintf with the buffer which is now large enough (void)vsnprintf(buff, len, format, args_copy); // Convert to a std::string. Note we could have created a std::string // large enough and then converted the buffer to a 'char*' and created // the output in place. This would probably work but feels like a hack. // Since this isn't code that needs to be super-performant we are going // with this 'safer' method. res = std::string(buff); } va_end(args_copy); return res; } class Rdb_snapshot_status : public Rdb_tx_list_walker { private: std::string m_data; static std::string current_timestamp(void) { static const char *const format = "%d-%02d-%02d %02d:%02d:%02d"; time_t currtime; struct tm currtm; time(&currtime); localtime_r(&currtime, &currtm); return format_string(format, currtm.tm_year + 1900, currtm.tm_mon + 1, currtm.tm_mday, currtm.tm_hour, currtm.tm_min, currtm.tm_sec); } static std::string get_header(void) { return "\n============================================================\n" + current_timestamp() + " ROCKSDB TRANSACTION MONITOR OUTPUT\n" "============================================================\n" "---------\n" "SNAPSHOTS\n" "---------\n" "LIST OF SNAPSHOTS FOR EACH SESSION:\n"; } static std::string get_footer(void) { return "-----------------------------------------\n" "END OF ROCKSDB TRANSACTION MONITOR OUTPUT\n" "=========================================\n"; } static std::string get_dlock_txn_info(const rocksdb::DeadlockInfo &txn, const GL_INDEX_ID &gl_index_id, bool is_last_path = false) { std::string txn_data; /* extract table name and index names using the index id */ std::string table_name = ddl_manager.safe_get_table_name(gl_index_id); if (table_name.empty()) { table_name = "NOT FOUND; INDEX_ID: " + std::to_string(gl_index_id.index_id); } auto kd = ddl_manager.safe_find(gl_index_id); std::string idx_name = (kd) ? kd->get_name() : "NOT FOUND; INDEX_ID: " + std::to_string(gl_index_id.index_id); /* get the name of the column family */ rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(txn.m_cf_id); std::string cf_name = cfh->GetName(); txn_data += format_string( "TRANSACTIONID: %u\n" "COLUMN FAMILY NAME: %s\n" "WAITING KEY: %s\n" "LOCK TYPE: %s\n" "INDEX NAME: %s\n" "TABLE NAME: %s\n", txn.m_txn_id, cf_name.c_str(), rdb_hexdump(txn.m_waiting_key.c_str(), txn.m_waiting_key.length()) .c_str(), txn.m_exclusive ? "EXCLUSIVE" : "SHARED", idx_name.c_str(), table_name.c_str()); if (!is_last_path) { txn_data += "---------------WAITING FOR---------------\n"; } return txn_data; } static std::string get_dlock_path_info(const rocksdb::DeadlockPath &path_entry) { std::string path_data; if (path_entry.limit_exceeded) { path_data += "\n-------DEADLOCK EXCEEDED MAX DEPTH-------\n"; } else { path_data += "\n*** DEADLOCK PATH\n" "=========================================\n"; for (auto it = path_entry.path.begin(); it != path_entry.path.end(); it++) { auto txn = *it; const GL_INDEX_ID gl_index_id = { txn.m_cf_id, rdb_netbuf_to_uint32(reinterpret_cast<const uchar *>( txn.m_waiting_key.c_str()))}; path_data += get_dlock_txn_info(txn, gl_index_id); } DBUG_ASSERT_IFF(path_entry.limit_exceeded, path_entry.path.empty()); /* print the first txn in the path to display the full deadlock cycle */ if (!path_entry.path.empty() && !path_entry.limit_exceeded) { auto txn = path_entry.path[0]; const GL_INDEX_ID gl_index_id = { txn.m_cf_id, rdb_netbuf_to_uint32(reinterpret_cast<const uchar *>( txn.m_waiting_key.c_str()))}; path_data += get_dlock_txn_info(txn, gl_index_id, true); /* prints the txn id of the transaction that caused the deadlock */ auto deadlocking_txn = *(path_entry.path.end() - 1); path_data += format_string("\n--------TRANSACTIONID: %u GOT DEADLOCK---------\n", deadlocking_txn.m_txn_id); } } return path_data; } public: Rdb_snapshot_status() : m_data(get_header()) {} std::string getResult() { return m_data + get_footer(); } /* Implement Rdb_transaction interface */ /* Create one row in the snapshot status table */ void process_tran(const Rdb_transaction *const tx) override { DBUG_ASSERT(tx != nullptr); /* Calculate the duration the snapshot has existed */ int64_t snapshot_timestamp = tx->m_snapshot_timestamp; if (snapshot_timestamp != 0) { int64_t curr_time; rdb->GetEnv()->GetCurrentTime(&curr_time); char buffer[1024]; #ifdef MARIAROCKS_NOT_YET thd_security_context(tx->get_thd(), buffer, sizeof buffer, 0); #endif m_data += format_string( "---SNAPSHOT, ACTIVE %lld sec\n" "%s\n" "lock count %llu, write count %llu\n" "insert count %llu, update count %llu, delete count %llu\n", (longlong)(curr_time - snapshot_timestamp), buffer, tx->get_lock_count(), tx->get_write_count(), tx->get_insert_count(), tx->get_update_count(), tx->get_delete_count()); } } void populate_deadlock_buffer() { auto dlock_buffer = rdb->GetDeadlockInfoBuffer(); m_data += "----------LATEST DETECTED DEADLOCKS----------\n"; for (auto path_entry : dlock_buffer) { m_data += get_dlock_path_info(path_entry); } } }; /** * @brief * walks through all non-replication transactions and copies * out relevant information for information_schema.rocksdb_trx */ class Rdb_trx_info_aggregator : public Rdb_tx_list_walker { private: std::vector<Rdb_trx_info> *m_trx_info; public: explicit Rdb_trx_info_aggregator(std::vector<Rdb_trx_info> *const trx_info) : m_trx_info(trx_info) {} void process_tran(const Rdb_transaction *const tx) override { static const std::map<int, std::string> state_map = { {rocksdb::Transaction::STARTED, "STARTED"}, {rocksdb::Transaction::AWAITING_PREPARE, "AWAITING_PREPARE"}, {rocksdb::Transaction::PREPARED, "PREPARED"}, {rocksdb::Transaction::AWAITING_COMMIT, "AWAITING_COMMIT"}, {rocksdb::Transaction::COMMITED, "COMMITED"}, {rocksdb::Transaction::AWAITING_ROLLBACK, "AWAITING_ROLLBACK"}, {rocksdb::Transaction::ROLLEDBACK, "ROLLEDBACK"}, }; DBUG_ASSERT(tx != nullptr); THD *const thd = tx->get_thd(); ulong thread_id = thd_get_thread_id(thd); if (tx->is_writebatch_trx()) { const auto wb_impl = static_cast<const Rdb_writebatch_impl *>(tx); DBUG_ASSERT(wb_impl); m_trx_info->push_back( {"", /* name */ 0, /* trx_id */ wb_impl->get_write_count(), 0, /* lock_count */ 0, /* timeout_sec */ "", /* state */ "", /* waiting_key */ 0, /* waiting_cf_id */ 1, /*is_replication */ 1, /* skip_trx_api */ wb_impl->is_tx_read_only(), 0, /* deadlock detection */ wb_impl->num_ongoing_bulk_load(), thread_id, "" /* query string */}); } else { const auto tx_impl = static_cast<const Rdb_transaction_impl *>(tx); DBUG_ASSERT(tx_impl); const rocksdb::Transaction *rdb_trx = tx_impl->get_rdb_trx(); if (rdb_trx == nullptr) { return; } char query_buf[NAME_LEN+1]; thd_query_safe(thd, query_buf, sizeof(query_buf)); std::string query_str(query_buf); const auto state_it = state_map.find(rdb_trx->GetState()); DBUG_ASSERT(state_it != state_map.end()); const int is_replication = (thd->rgi_slave != nullptr); uint32_t waiting_cf_id; std::string waiting_key; rdb_trx->GetWaitingTxns(&waiting_cf_id, &waiting_key), m_trx_info->push_back( {rdb_trx->GetName(), rdb_trx->GetID(), tx_impl->get_write_count(), tx_impl->get_lock_count(), tx_impl->get_timeout_sec(), state_it->second, waiting_key, waiting_cf_id, is_replication, 0, /* skip_trx_api */ tx_impl->is_tx_read_only(), rdb_trx->IsDeadlockDetect(), tx_impl->num_ongoing_bulk_load(), thread_id, query_str}); } } }; /* returns a vector of info for all non-replication threads for use by information_schema.rocksdb_trx */ std::vector<Rdb_trx_info> rdb_get_all_trx_info() { std::vector<Rdb_trx_info> trx_info; Rdb_trx_info_aggregator trx_info_agg(&trx_info); Rdb_transaction::walk_tx_list(&trx_info_agg); return trx_info; } #ifdef MARIAROCKS_NOT_YET /* Generate the snapshot status table */ static bool rocksdb_show_snapshot_status(handlerton *const hton, THD *const thd, stat_print_fn *const stat_print) { Rdb_snapshot_status showStatus; Rdb_transaction::walk_tx_list(&showStatus); showStatus.populate_deadlock_buffer(); /* Send the result data back to MySQL */ return print_stats(thd, "rocksdb", "", showStatus.getResult(), stat_print); } #endif /* This is called for SHOW ENGINE ROCKSDB STATUS | LOGS | etc. For now, produce info about live files (which gives an imprecise idea about what column families are there). */ static bool rocksdb_show_status(handlerton *const hton, THD *const thd, stat_print_fn *const stat_print, enum ha_stat_type stat_type) { DBUG_ASSERT(hton != nullptr); DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(stat_print != nullptr); bool res = false; char buf[100] = {'\0'}; if (stat_type == HA_ENGINE_STATUS) { DBUG_ASSERT(rdb != nullptr); std::string str; /* Global DB Statistics */ if (rocksdb_stats) { str = rocksdb_stats->ToString(); // Use the same format as internal RocksDB statistics entries to make // sure that output will look unified. DBUG_ASSERT(commit_latency_stats != nullptr); snprintf(buf, sizeof(buf), "rocksdb.commit_latency statistics " "Percentiles :=> 50 : %.2f 95 : %.2f " "99 : %.2f 100 : %.2f\n", commit_latency_stats->Percentile(50), commit_latency_stats->Percentile(95), commit_latency_stats->Percentile(99), commit_latency_stats->Percentile(100)); str.append(buf); uint64_t v = 0; // Retrieve additional stalling related numbers from RocksDB and append // them to the buffer meant for displaying detailed statistics. The intent // here is to avoid adding another row to the query output because of // just two numbers. // // NB! We're replacing hyphens with underscores in output to better match // the existing naming convention. if (rdb->GetIntProperty("rocksdb.is-write-stopped", &v)) { snprintf(buf, sizeof(buf), "rocksdb.is_write_stopped COUNT : %llu\n", (ulonglong)v); str.append(buf); } if (rdb->GetIntProperty("rocksdb.actual-delayed-write-rate", &v)) { snprintf(buf, sizeof(buf), "rocksdb.actual_delayed_write_rate " "COUNT : %llu\n", (ulonglong)v); str.append(buf); } res |= print_stats(thd, "STATISTICS", "rocksdb", str, stat_print); } /* Per DB stats */ if (rdb->GetProperty("rocksdb.dbstats", &str)) { res |= print_stats(thd, "DBSTATS", "rocksdb", str, stat_print); } /* Per column family stats */ for (const auto &cf_name : cf_manager.get_cf_names()) { rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(cf_name); if (cfh == nullptr) { continue; } if (!rdb->GetProperty(cfh, "rocksdb.cfstats", &str)) { continue; } res |= print_stats(thd, "CF_COMPACTION", cf_name, str, stat_print); } /* Memory Statistics */ std::vector<rocksdb::DB *> dbs; std::unordered_set<const rocksdb::Cache *> cache_set; size_t internal_cache_count = 0; size_t kDefaultInternalCacheSize = 8 * 1024 * 1024; dbs.push_back(rdb); cache_set.insert(rocksdb_tbl_options->block_cache.get()); for (const auto &cf_handle : cf_manager.get_all_cf()) { rocksdb::ColumnFamilyDescriptor cf_desc; cf_handle->GetDescriptor(&cf_desc); auto *const table_factory = cf_desc.options.table_factory.get(); if (table_factory != nullptr) { std::string tf_name = table_factory->Name(); if (tf_name.find("BlockBasedTable") != std::string::npos) { const rocksdb::BlockBasedTableOptions *const bbt_opt = reinterpret_cast<rocksdb::BlockBasedTableOptions *>( table_factory->GetOptions()); if (bbt_opt != nullptr) { if (bbt_opt->block_cache.get() != nullptr) { cache_set.insert(bbt_opt->block_cache.get()); } else { internal_cache_count++; } cache_set.insert(bbt_opt->block_cache_compressed.get()); } } } } std::map<rocksdb::MemoryUtil::UsageType, uint64_t> temp_usage_by_type; str.clear(); rocksdb::MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set, &temp_usage_by_type); snprintf(buf, sizeof(buf), "\nMemTable Total: %llu", (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kMemTableTotal]); str.append(buf); snprintf(buf, sizeof(buf), "\nMemTable Unflushed: %llu", (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kMemTableUnFlushed]); str.append(buf); snprintf(buf, sizeof(buf), "\nTable Readers Total: %llu", (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kTableReadersTotal]); str.append(buf); snprintf(buf, sizeof(buf), "\nCache Total: %llu", (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kCacheTotal]); str.append(buf); snprintf(buf, sizeof(buf), "\nDefault Cache Capacity: %llu", (ulonglong)internal_cache_count * kDefaultInternalCacheSize); str.append(buf); res |= print_stats(thd, "MEMORY_STATS", "rocksdb", str, stat_print); #ifdef MARIAROCKS_NOT_YET /* Show the background thread status */ std::vector<rocksdb::ThreadStatus> thread_list; rocksdb::Status s = rdb->GetEnv()->GetThreadList(&thread_list); if (!s.ok()) { sql_print_error("RocksDB: Returned error (%s) from GetThreadList.\n", s.ToString().c_str()); res |= true; } else { /* For each background thread retrieved, print out its information */ for (auto &it : thread_list) { /* Only look at background threads. Ignore user threads, if any. */ if (it.thread_type > rocksdb::ThreadStatus::LOW_PRIORITY) { continue; } str = "\nthread_type: " + it.GetThreadTypeName(it.thread_type) + "\ncf_name: " + it.cf_name + "\noperation_type: " + it.GetOperationName(it.operation_type) + "\noperation_stage: " + it.GetOperationStageName(it.operation_stage) + "\nelapsed_time_ms: " + it.MicrosToString(it.op_elapsed_micros); for (auto &it_props : it.InterpretOperationProperties(it.operation_type, it.op_properties)) { str += "\n" + it_props.first + ": " + std::to_string(it_props.second); } str += "\nstate_type: " + it.GetStateName(it.state_type); res |= print_stats(thd, "BG_THREADS", std::to_string(it.thread_id), str, stat_print); } } #endif #ifdef MARIAROCKS_NOT_YET } else if (stat_type == HA_ENGINE_TRX) { /* Handle the SHOW ENGINE ROCKSDB TRANSACTION STATUS command */ res |= rocksdb_show_snapshot_status(hton, thd, stat_print); #endif } return res; } static inline void rocksdb_register_tx(handlerton *const hton, THD *const thd, Rdb_transaction *const tx) { DBUG_ASSERT(tx != nullptr); trans_register_ha(thd, FALSE, rocksdb_hton); if (my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { tx->start_stmt(); trans_register_ha(thd, TRUE, rocksdb_hton); } } static const char *ha_rocksdb_exts[] = {NullS}; /* Supporting START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT Features: 1. Supporting START TRANSACTION WITH CONSISTENT SNAPSHOT 2. Getting current binlog position in addition to #1. The second feature is done by START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT. This is Facebook's extension, and it works like existing START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT. - When not setting engine, START TRANSACTION WITH CONSISTENT SNAPSHOT takes both InnoDB and RocksDB snapshots, and both InnoDB and RocksDB participate in transaction. When executing COMMIT, both InnoDB and RocksDB modifications are committed. Remember that XA is not supported yet, so mixing engines is not recommended anyway. - When setting engine, START TRANSACTION WITH CONSISTENT.. takes snapshot for the specified engine only. But it starts both InnoDB and RocksDB transactions. */ static int rocksdb_start_tx_and_assign_read_view( handlerton *const hton, /*!< in: RocksDB handlerton */ THD* thd) /*!< in: MySQL thread handle of the user for whom the transaction should be committed */ { ulong const tx_isolation = my_core::thd_tx_isolation(thd); if (tx_isolation != ISO_REPEATABLE_READ) { my_error(ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT, MYF(0)); return HA_EXIT_FAILURE; } /* MariaDB: there is no need to call mysql_bin_log_lock_commits and then unlock back. SQL layer calls start_consistent_snapshot() for all engines, including the binlog under LOCK_commit_ordered mutex. The mutex prevents binlog commits from happening (right?) while the storage engine(s) allocate read snapshots. That way, each storage engine is synchronized with current binlog position. */ mysql_mutex_assert_owner(&LOCK_commit_ordered); Rdb_transaction *const tx = get_or_create_tx(thd); Rdb_perf_context_guard guard(tx, rocksdb_perf_context_level(thd)); DBUG_ASSERT(!tx->has_snapshot()); tx->set_tx_read_only(true); rocksdb_register_tx(hton, thd, tx); tx->acquire_snapshot(true); return HA_EXIT_SUCCESS; } /* Dummy SAVEPOINT support. This is needed for long running transactions * like mysqldump (https://bugs.mysql.com/bug.php?id=71017). * Current SAVEPOINT does not correctly handle ROLLBACK and does not return * errors. This needs to be addressed in future versions (Issue#96). */ static int rocksdb_savepoint(handlerton *const hton, THD *const thd, void *const savepoint) { return HA_EXIT_SUCCESS; } static int rocksdb_rollback_to_savepoint(handlerton *const hton, THD *const thd, void *const savepoint) { Rdb_transaction *&tx = get_tx_from_thd(thd); return tx->rollback_to_savepoint(savepoint); } static bool rocksdb_rollback_to_savepoint_can_release_mdl(handlerton *const hton, THD *const thd) { return true; } #ifdef MARIAROCKS_NOT_YET /* This is called for INFORMATION_SCHEMA */ static void rocksdb_update_table_stats( /* per-table stats callback */ void (*cb)(const char *db, const char *tbl, bool is_partition, my_io_perf_t *r, my_io_perf_t *w, my_io_perf_t *r_blob, my_io_perf_t *r_primary, my_io_perf_t *r_secondary, page_stats_t *page_stats, comp_stats_t *comp_stats, int n_lock_wait, int n_lock_wait_timeout, int n_lock_deadlock, const char *engine)) { my_io_perf_t io_perf_read; my_io_perf_t io_perf_write; my_io_perf_t io_perf; page_stats_t page_stats; comp_stats_t comp_stats; uint lock_wait_timeout_stats; uint deadlock_stats; std::vector<std::string> tablenames; /* Most of these are for innodb, so setting them to 0. TODO: possibly separate out primary vs. secondary index reads */ memset(&io_perf, 0, sizeof(io_perf)); memset(&page_stats, 0, sizeof(page_stats)); memset(&comp_stats, 0, sizeof(comp_stats)); memset(&io_perf_write, 0, sizeof(io_perf_write)); tablenames = rdb_open_tables.get_table_names(); for (const auto &it : tablenames) { Rdb_table_handler *table_handler; std::string str, dbname, tablename, partname; char dbname_sys[NAME_LEN + 1]; char tablename_sys[NAME_LEN + 1]; bool is_partition; if (rdb_normalize_tablename(it, &str) != HA_EXIT_SUCCESS) { /* Function needs to return void because of the interface and we've * detected an error which shouldn't happen. There's no way to let * caller know that something failed. */ SHIP_ASSERT(false); return; } if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) { continue; } is_partition = (partname.size() != 0); table_handler = rdb_open_tables.get_table_handler(it.c_str()); if (table_handler == nullptr) { continue; } io_perf_read.bytes = table_handler->m_io_perf_read.bytes.load(); io_perf_read.requests = table_handler->m_io_perf_read.requests.load(); io_perf_write.bytes = table_handler->m_io_perf_write.bytes.load(); io_perf_write.requests = table_handler->m_io_perf_write.requests.load(); lock_wait_timeout_stats = table_handler->m_lock_wait_timeout_counter.load(); deadlock_stats = table_handler->m_deadlock_counter.load(); /* Convert from rocksdb timer to mysql timer. RocksDB values are in nanoseconds, but table statistics expect the value to be in my_timer format. */ io_perf_read.svc_time = my_core::microseconds_to_my_timer( table_handler->m_io_perf_read.svc_time.load() / 1000); io_perf_read.svc_time_max = my_core::microseconds_to_my_timer( table_handler->m_io_perf_read.svc_time_max.load() / 1000); io_perf_read.wait_time = my_core::microseconds_to_my_timer( table_handler->m_io_perf_read.wait_time.load() / 1000); io_perf_read.wait_time_max = my_core::microseconds_to_my_timer( table_handler->m_io_perf_read.wait_time_max.load() / 1000); io_perf_read.slow_ios = table_handler->m_io_perf_read.slow_ios.load(); rdb_open_tables.release_table_handler(table_handler); /* Table stats expects our database and table name to be in system encoding, not filename format. Convert before calling callback. */ my_core::filename_to_tablename(dbname.c_str(), dbname_sys, sizeof(dbname_sys)); my_core::filename_to_tablename(tablename.c_str(), tablename_sys, sizeof(tablename_sys)); (*cb)(dbname_sys, tablename_sys, is_partition, &io_perf_read, &io_perf_write, &io_perf, &io_perf, &io_perf, &page_stats, &comp_stats, 0, lock_wait_timeout_stats, deadlock_stats, rocksdb_hton_name); } } #endif static rocksdb::Status check_rocksdb_options_compatibility( const char *const dbpath, const rocksdb::Options &main_opts, const std::vector<rocksdb::ColumnFamilyDescriptor> &cf_descr) { DBUG_ASSERT(rocksdb_datadir != nullptr); rocksdb::DBOptions loaded_db_opt; std::vector<rocksdb::ColumnFamilyDescriptor> loaded_cf_descs; rocksdb::Status status = LoadLatestOptions(dbpath, rocksdb::Env::Default(), &loaded_db_opt, &loaded_cf_descs); // If we're starting from scratch and there are no options saved yet then this // is a valid case. Therefore we can't compare the current set of options to // anything. if (status.IsNotFound()) { return rocksdb::Status::OK(); } if (!status.ok()) { return status; } if (loaded_cf_descs.size() != cf_descr.size()) { return rocksdb::Status::NotSupported("Mismatched size of column family " "descriptors."); } // Please see RocksDB documentation for more context about why we need to set // user-defined functions and pointer-typed options manually. for (size_t i = 0; i < loaded_cf_descs.size(); i++) { loaded_cf_descs[i].options.compaction_filter = cf_descr[i].options.compaction_filter; loaded_cf_descs[i].options.compaction_filter_factory = cf_descr[i].options.compaction_filter_factory; loaded_cf_descs[i].options.comparator = cf_descr[i].options.comparator; loaded_cf_descs[i].options.memtable_factory = cf_descr[i].options.memtable_factory; loaded_cf_descs[i].options.merge_operator = cf_descr[i].options.merge_operator; loaded_cf_descs[i].options.prefix_extractor = cf_descr[i].options.prefix_extractor; loaded_cf_descs[i].options.table_factory = cf_descr[i].options.table_factory; } // This is the essence of the function - determine if it's safe to open the // database or not. status = CheckOptionsCompatibility(dbpath, rocksdb::Env::Default(), main_opts, loaded_cf_descs); return status; } /* Storage Engine initialization function, invoked when plugin is loaded. */ static int rocksdb_init_func(void *const p) { DBUG_ENTER_FUNC(); // Validate the assumption about the size of ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN. static_assert(sizeof(longlong) == 8, "Assuming that longlong is 8 bytes."); init_rocksdb_psi_keys(); rocksdb_hton = (handlerton *)p; mysql_mutex_init(rdb_psi_open_tbls_mutex_key, &rdb_open_tables.m_mutex, MY_MUTEX_INIT_FAST); #ifdef HAVE_PSI_INTERFACE rdb_bg_thread.init(rdb_signal_bg_psi_mutex_key, rdb_signal_bg_psi_cond_key); rdb_drop_idx_thread.init(rdb_signal_drop_idx_psi_mutex_key, rdb_signal_drop_idx_psi_cond_key); #else rdb_bg_thread.init(); rdb_drop_idx_thread.init(); #endif mysql_mutex_init(rdb_collation_data_mutex_key, &rdb_collation_data_mutex, MY_MUTEX_INIT_FAST); mysql_mutex_init(rdb_mem_cmp_space_mutex_key, &rdb_mem_cmp_space_mutex, MY_MUTEX_INIT_FAST); #if defined(HAVE_PSI_INTERFACE) rdb_collation_exceptions = new Regex_list_handler(key_rwlock_collation_exception_list); #else rdb_collation_exceptions = new Regex_list_handler(); #endif mysql_mutex_init(rdb_sysvars_psi_mutex_key, &rdb_sysvars_mutex, MY_MUTEX_INIT_FAST); Rdb_transaction::init_mutex(); rocksdb_hton->state = SHOW_OPTION_YES; rocksdb_hton->create = rocksdb_create_handler; rocksdb_hton->close_connection = rocksdb_close_connection; rocksdb_hton->prepare = rocksdb_prepare; rocksdb_hton->prepare_ordered = NULL; // Do not need it rocksdb_hton->commit_by_xid = rocksdb_commit_by_xid; rocksdb_hton->rollback_by_xid = rocksdb_rollback_by_xid; rocksdb_hton->recover = rocksdb_recover; rocksdb_hton->commit_ordered= rocksdb_commit_ordered; rocksdb_hton->commit = rocksdb_commit; rocksdb_hton->commit_checkpoint_request= rocksdb_checkpoint_request; rocksdb_hton->rollback = rocksdb_rollback; rocksdb_hton->show_status = rocksdb_show_status; rocksdb_hton->start_consistent_snapshot = rocksdb_start_tx_and_assign_read_view; rocksdb_hton->savepoint_set = rocksdb_savepoint; rocksdb_hton->savepoint_rollback = rocksdb_rollback_to_savepoint; rocksdb_hton->savepoint_rollback_can_release_mdl = rocksdb_rollback_to_savepoint_can_release_mdl; #ifdef MARIAROCKS_NOT_YET rocksdb_hton->update_table_stats = rocksdb_update_table_stats; #endif // MARIAROCKS_NOT_YET /* Not needed in MariaDB: rocksdb_hton->flush_logs = rocksdb_flush_wal; */ rocksdb_hton->flags = HTON_TEMPORARY_NOT_SUPPORTED | HTON_SUPPORTS_EXTENDED_KEYS | HTON_CAN_RECREATE; rocksdb_hton->tablefile_extensions= ha_rocksdb_exts; DBUG_ASSERT(!mysqld_embedded); rocksdb_stats = rocksdb::CreateDBStatistics(); rocksdb_db_options->statistics = rocksdb_stats; if (rocksdb_rate_limiter_bytes_per_sec != 0) { rocksdb_rate_limiter.reset( rocksdb::NewGenericRateLimiter(rocksdb_rate_limiter_bytes_per_sec)); rocksdb_db_options->rate_limiter = rocksdb_rate_limiter; } rocksdb_db_options->delayed_write_rate = rocksdb_delayed_write_rate; std::shared_ptr<Rdb_logger> myrocks_logger = std::make_shared<Rdb_logger>(); rocksdb::Status s = rocksdb::CreateLoggerFromOptions( rocksdb_datadir, *rocksdb_db_options, &rocksdb_db_options->info_log); if (s.ok()) { myrocks_logger->SetRocksDBLogger(rocksdb_db_options->info_log); } rocksdb_db_options->info_log = myrocks_logger; myrocks_logger->SetInfoLogLevel( static_cast<rocksdb::InfoLogLevel>(rocksdb_info_log_level)); rocksdb_db_options->wal_dir = rocksdb_wal_dir; rocksdb_db_options->wal_recovery_mode = static_cast<rocksdb::WALRecoveryMode>(rocksdb_wal_recovery_mode); rocksdb_db_options->access_hint_on_compaction_start = static_cast<rocksdb::Options::AccessHint>( rocksdb_access_hint_on_compaction_start); if (rocksdb_db_options->allow_mmap_reads && rocksdb_db_options->use_direct_reads) { // allow_mmap_reads implies !use_direct_reads and RocksDB will not open if // mmap_reads and direct_reads are both on. (NO_LINT_DEBUG) sql_print_error("RocksDB: Can't enable both use_direct_reads " "and allow_mmap_reads\n"); DBUG_RETURN(HA_EXIT_FAILURE); } if (rocksdb_db_options->allow_mmap_writes && rocksdb_db_options->use_direct_io_for_flush_and_compaction) { // See above comment for allow_mmap_reads. (NO_LINT_DEBUG) sql_print_error("RocksDB: Can't enable both " "use_direct_io_for_flush_and_compaction and " "allow_mmap_writes\n"); DBUG_RETURN(HA_EXIT_FAILURE); } // sst_file_manager will move deleted rocksdb sst files to trash_dir // to be deleted in a background thread. std::string trash_dir = std::string(rocksdb_datadir) + "/trash"; rocksdb_db_options->sst_file_manager.reset( NewSstFileManager(rocksdb_db_options->env, myrocks_logger, trash_dir)); rocksdb_db_options->sst_file_manager->SetDeleteRateBytesPerSecond( rocksdb_sst_mgr_rate_bytes_per_sec); std::vector<std::string> cf_names; rocksdb::Status status; status = rocksdb::DB::ListColumnFamilies(*rocksdb_db_options, rocksdb_datadir, &cf_names); if (!status.ok()) { /* When we start on an empty datadir, ListColumnFamilies returns IOError, and RocksDB doesn't provide any way to check what kind of error it was. Checking system errno happens to work right now. */ if (status.IsIOError() #ifndef _WIN32 && errno == ENOENT #endif ) { sql_print_information("RocksDB: Got ENOENT when listing column families"); sql_print_information( "RocksDB: assuming that we're creating a new database"); } else { rdb_log_status_error(status, "Error listing column families"); DBUG_RETURN(HA_EXIT_FAILURE); } } else sql_print_information("RocksDB: %ld column families found", cf_names.size()); std::vector<rocksdb::ColumnFamilyDescriptor> cf_descr; std::vector<rocksdb::ColumnFamilyHandle *> cf_handles; rocksdb_tbl_options->index_type = (rocksdb::BlockBasedTableOptions::IndexType)rocksdb_index_type; if (!rocksdb_tbl_options->no_block_cache) { std::shared_ptr<rocksdb::Cache> block_cache = rocksdb_use_clock_cache ? rocksdb::NewClockCache(rocksdb_block_cache_size) : rocksdb::NewLRUCache(rocksdb_block_cache_size); if (rocksdb_sim_cache_size > 0) { // Simulated cache enabled // Wrap block cache inside a simulated cache and pass it to RocksDB rocksdb_tbl_options->block_cache = rocksdb::NewSimCache(block_cache, rocksdb_sim_cache_size, 6); } else { // Pass block cache to RocksDB rocksdb_tbl_options->block_cache = block_cache; } } // Using newer BlockBasedTable format version for better compression // and better memory allocation. // See: // https://github.com/facebook/rocksdb/commit/9ab5adfc59a621d12357580c94451d9f7320c2dd rocksdb_tbl_options->format_version = 2; if (rocksdb_collect_sst_properties) { properties_collector_factory = std::make_shared<Rdb_tbl_prop_coll_factory>(&ddl_manager); rocksdb_set_compaction_options(nullptr, nullptr, nullptr, nullptr); RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); DBUG_ASSERT(rocksdb_table_stats_sampling_pct <= RDB_TBL_STATS_SAMPLE_PCT_MAX); properties_collector_factory->SetTableStatsSamplingPct( rocksdb_table_stats_sampling_pct); RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } if (rocksdb_persistent_cache_size_mb > 0) { std::shared_ptr<rocksdb::PersistentCache> pcache; uint64_t cache_size_bytes= rocksdb_persistent_cache_size_mb * 1024 * 1024; rocksdb::NewPersistentCache( rocksdb::Env::Default(), std::string(rocksdb_persistent_cache_path), cache_size_bytes, myrocks_logger, true, &pcache); rocksdb_tbl_options->persistent_cache = pcache; } else if (strlen(rocksdb_persistent_cache_path)) { sql_print_error("RocksDB: Must specify rocksdb_persistent_cache_size_mb"); DBUG_RETURN(HA_EXIT_FAILURE); } std::unique_ptr<Rdb_cf_options> cf_options_map(new Rdb_cf_options()); if (!cf_options_map->init(*rocksdb_tbl_options, properties_collector_factory, rocksdb_default_cf_options, rocksdb_override_cf_options)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize CF options map."); DBUG_RETURN(HA_EXIT_FAILURE); } /* If there are no column families, we're creating the new database. Create one column family named "default". */ if (cf_names.size() == 0) cf_names.push_back(DEFAULT_CF_NAME); std::vector<int> compaction_enabled_cf_indices; sql_print_information("RocksDB: Column Families at start:"); for (size_t i = 0; i < cf_names.size(); ++i) { rocksdb::ColumnFamilyOptions opts; cf_options_map->get_cf_options(cf_names[i], &opts); sql_print_information(" cf=%s", cf_names[i].c_str()); sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size); sql_print_information(" target_file_size_base=%" PRIu64, opts.target_file_size_base); /* Temporarily disable compactions to prevent a race condition where compaction starts before compaction filter is ready. */ if (!opts.disable_auto_compactions) { compaction_enabled_cf_indices.push_back(i); opts.disable_auto_compactions = true; } cf_descr.push_back(rocksdb::ColumnFamilyDescriptor(cf_names[i], opts)); } rocksdb::Options main_opts(*rocksdb_db_options, cf_options_map->get_defaults()); rocksdb::TransactionDBOptions tx_db_options; tx_db_options.transaction_lock_timeout = 2; // 2 seconds tx_db_options.custom_mutex_factory = std::make_shared<Rdb_mutex_factory>(); status = check_rocksdb_options_compatibility(rocksdb_datadir, main_opts, cf_descr); // We won't start if we'll determine that there's a chance of data corruption // because of incompatible options. if (!status.ok()) { rdb_log_status_error( status, "Compatibility check against existing database options failed"); DBUG_RETURN(HA_EXIT_FAILURE); } status = rocksdb::TransactionDB::Open( main_opts, tx_db_options, rocksdb_datadir, cf_descr, &cf_handles, &rdb); if (!status.ok()) { rdb_log_status_error(status, "Error opening instance"); DBUG_RETURN(HA_EXIT_FAILURE); } cf_manager.init(std::move(cf_options_map), &cf_handles); if (dict_manager.init(rdb->GetBaseDB(), &cf_manager)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize data dictionary."); DBUG_RETURN(HA_EXIT_FAILURE); } if (binlog_manager.init(&dict_manager)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize binlog manager."); DBUG_RETURN(HA_EXIT_FAILURE); } if (ddl_manager.init(&dict_manager, &cf_manager, rocksdb_validate_tables)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize DDL manager."); DBUG_RETURN(HA_EXIT_FAILURE); } Rdb_sst_info::init(rdb); /* Enable auto compaction, things needed for compaction filter are finished initializing */ std::vector<rocksdb::ColumnFamilyHandle *> compaction_enabled_cf_handles; compaction_enabled_cf_handles.reserve(compaction_enabled_cf_indices.size()); for (const auto &index : compaction_enabled_cf_indices) { compaction_enabled_cf_handles.push_back(cf_handles[index]); } status = rdb->EnableAutoCompaction(compaction_enabled_cf_handles); if (!status.ok()) { rdb_log_status_error(status, "Error enabling compaction"); DBUG_RETURN(HA_EXIT_FAILURE); } auto err = rdb_bg_thread.create_thread(BG_THREAD_NAME #ifdef HAVE_PSI_INTERFACE , rdb_background_psi_thread_key #endif ); if (err != 0) { sql_print_error("RocksDB: Couldn't start the background thread: (errno=%d)", err); DBUG_RETURN(HA_EXIT_FAILURE); } err = rdb_drop_idx_thread.create_thread(INDEX_THREAD_NAME #ifdef HAVE_PSI_INTERFACE , rdb_drop_idx_psi_thread_key #endif ); if (err != 0) { sql_print_error("RocksDB: Couldn't start the drop index thread: (errno=%d)", err); DBUG_RETURN(HA_EXIT_FAILURE); } rdb_set_collation_exception_list(rocksdb_strict_collation_exceptions); if (rocksdb_pause_background_work) { rdb->PauseBackgroundWork(); } // NO_LINT_DEBUG sql_print_information("RocksDB: global statistics using %s indexer", STRINGIFY_ARG(RDB_INDEXER)); #if defined(HAVE_SCHED_GETCPU) if (sched_getcpu() == -1) { // NO_LINT_DEBUG sql_print_information( "RocksDB: sched_getcpu() failed - " "global statistics will use thread_id_indexer_t instead"); } #endif /** Rocksdb does not always shutdown its threads, when plugin is shut down. Disable server's leak check at exit to avoid crash. */ my_disable_leak_check = true; err = my_error_register(rdb_get_error_messages, HA_ERR_ROCKSDB_FIRST, HA_ERR_ROCKSDB_LAST); if (err != 0) { // NO_LINT_DEBUG sql_print_error("RocksDB: Couldn't initialize error messages"); rdb_open_tables.m_hash.~Rdb_table_set(); DBUG_RETURN(HA_EXIT_FAILURE); } // Creating an instance of HistogramImpl should only happen after RocksDB // has been successfully initialized. commit_latency_stats = new rocksdb::HistogramImpl(); // Construct a list of directories which will be monitored by I/O watchdog // to make sure that we won't lose write access to them. std::vector<std::string> directories; // 1. Data directory. directories.push_back(mysql_real_data_home); // 2. Transaction logs. if (myrocks::rocksdb_wal_dir && *myrocks::rocksdb_wal_dir) { directories.push_back(myrocks::rocksdb_wal_dir); } #if !defined(_WIN32) && !defined(__APPLE__) io_watchdog = new Rdb_io_watchdog(directories); io_watchdog->reset_timeout(rocksdb_io_write_timeout_secs); #endif // NO_LINT_DEBUG sql_print_information("MyRocks storage engine plugin has been successfully " "initialized."); DBUG_RETURN(HA_EXIT_SUCCESS); } /* Storage Engine deinitialization function, invoked when plugin is unloaded. */ static int rocksdb_done_func(void *const p) { DBUG_ENTER_FUNC(); int error = 0; // signal the drop index thread to stop rdb_drop_idx_thread.signal(true); // Flush all memtables for not losing data, even if WAL is disabled. rocksdb_flush_all_memtables(); // Stop all rocksdb background work CancelAllBackgroundWork(rdb->GetBaseDB(), true); // Signal the background thread to stop and to persist all stats collected // from background flushes and compactions. This will add more keys to a new // memtable, but since the memtables were just flushed, it should not trigger // a flush that can stall due to background threads being stopped. As long // as these keys are stored in a WAL file, they can be retrieved on restart. rdb_bg_thread.signal(true); // Wait for the background thread to finish. auto err = rdb_bg_thread.join(); if (err != 0) { // We'll log the message and continue because we're shutting down and // continuation is the optimal strategy. // NO_LINT_DEBUG sql_print_error("RocksDB: Couldn't stop the background thread: (errno=%d)", err); } // Wait for the drop index thread to finish. err = rdb_drop_idx_thread.join(); if (err != 0) { // NO_LINT_DEBUG sql_print_error("RocksDB: Couldn't stop the index thread: (errno=%d)", err); } if (rdb_open_tables.m_hash.size()) { // Looks like we are getting unloaded and yet we have some open tables // left behind. error = 1; } /* destructors for static objects can be called at _exit(), but we want to free the memory at dlclose() */ rdb_open_tables.m_hash.~Rdb_table_set(); mysql_mutex_destroy(&rdb_open_tables.m_mutex); mysql_mutex_destroy(&rdb_sysvars_mutex); delete rdb_collation_exceptions; mysql_mutex_destroy(&rdb_collation_data_mutex); mysql_mutex_destroy(&rdb_mem_cmp_space_mutex); Rdb_transaction::term_mutex(); for (auto &it : rdb_collation_data) { delete it; it = nullptr; } ddl_manager.cleanup(); binlog_manager.cleanup(); dict_manager.cleanup(); cf_manager.cleanup(); delete rdb; rdb = nullptr; delete commit_latency_stats; commit_latency_stats = nullptr; #if !defined(_WIN32) && !defined(__APPLE__) delete io_watchdog; io_watchdog = nullptr; #endif // Disown the cache data since we're shutting down. // This results in memory leaks but it improved the shutdown time. // Don't disown when running under valgrind #ifndef HAVE_purify if (rocksdb_tbl_options->block_cache) { rocksdb_tbl_options->block_cache->DisownData(); } #endif /* HAVE_purify */ rocksdb_db_options = nullptr; rocksdb_tbl_options = nullptr; rocksdb_stats = nullptr; my_error_unregister(HA_ERR_ROCKSDB_FIRST, HA_ERR_ROCKSDB_LAST); DBUG_RETURN(error); } static inline void rocksdb_smart_seek(bool seek_backward, rocksdb::Iterator *const iter, const rocksdb::Slice &key_slice) { if (seek_backward) { iter->SeekForPrev(key_slice); } else { iter->Seek(key_slice); } } static inline void rocksdb_smart_next(bool seek_backward, rocksdb::Iterator *const iter) { if (seek_backward) { iter->Prev(); } else { iter->Next(); } } /** @brief Example of simple lock controls. The "table_handler" it creates is a structure we will pass to each ha_rocksdb handler. Do you have to have one of these? Well, you have pieces that are used for locking, and they are needed to function. */ Rdb_table_handler * Rdb_open_tables_map::get_table_handler(const char *const table_name) { Rdb_table_handler *table_handler; uint length; char *tmp_name; DBUG_ASSERT(table_name != nullptr); length = (uint)strlen(table_name); // First, look up the table in the hash map. RDB_MUTEX_LOCK_CHECK(m_mutex); if (!m_hash.size() || !(table_handler = m_hash.find(table_name, length))) { // Since we did not find it in the hash map, attempt to create and add it // to the hash map. if (!(table_handler = reinterpret_cast<Rdb_table_handler *>(my_multi_malloc( MYF(MY_WME | MY_ZEROFILL), &table_handler, sizeof(*table_handler), &tmp_name, length + 1, NullS)))) { // Allocating a new Rdb_table_handler and a new table name failed. RDB_MUTEX_UNLOCK_CHECK(m_mutex); return nullptr; } table_handler->m_ref_count = 0; table_handler->m_table_name_length = length; table_handler->m_table_name = tmp_name; strmov(table_handler->m_table_name, table_name); if (m_hash.insert(table_handler)) { // Inserting into the hash map failed. RDB_MUTEX_UNLOCK_CHECK(m_mutex); my_free(table_handler); return nullptr; } thr_lock_init(&table_handler->m_thr_lock); #ifdef MARIAROCKS_NOT_YET table_handler->m_io_perf_read.init(); table_handler->m_io_perf_write.init(); #endif } DBUG_ASSERT(table_handler->m_ref_count >= 0); table_handler->m_ref_count++; RDB_MUTEX_UNLOCK_CHECK(m_mutex); return table_handler; } std::vector<std::string> rdb_get_open_table_names(void) { return rdb_open_tables.get_table_names(); } std::vector<std::string> Rdb_open_tables_map::get_table_names(void) const { size_t i; const Rdb_table_handler *table_handler; std::vector<std::string> names; RDB_MUTEX_LOCK_CHECK(m_mutex); for (i = 0; (table_handler = m_hash.at(i)); i++) { DBUG_ASSERT(table_handler != nullptr); names.push_back(table_handler->m_table_name); } DBUG_ASSERT(i == m_hash.size()); RDB_MUTEX_UNLOCK_CHECK(m_mutex); return names; } void ha_rocksdb::load_auto_incr_value() { const int save_active_index = active_index; active_index = table->s->next_number_index; const uint8 save_table_status = table->status; /* load_auto_incr_value() may be called by statements that do not execute implicit commits (i.e. SHOW CREATE TABLE). index_last() creates a snapshot. When a snapshot is created here, it has to be released as well. (GitHub issue#189) */ Rdb_transaction *const tx = get_or_create_tx(table->in_use); const bool is_new_snapshot = !tx->has_snapshot(); // Do a lookup. We only need index column, so it should be index-only. // (another reason to make it index-only is that table->read_set is // not set appropriately and non-index-only lookup will not read the value) const bool save_keyread_only = m_keyread_only; m_keyread_only = true; if (!index_last(table->record[0])) update_auto_incr_val(); m_keyread_only = save_keyread_only; if (is_new_snapshot) { tx->release_snapshot(); } table->status = save_table_status; active_index = save_active_index; /* Do what ha_rocksdb::index_end() does. (Why don't we use index_init/index_end? class handler defines index_init as private, for some reason). */ release_scan_iterator(); } /* Get PK value from table->record[0]. */ /* TODO(alexyang): No existing support for auto_increment on non-pk columns, see end of ha_rocksdb::create. Also see opened issue here: https://github.com/facebook/mysql-5.6/issues/153 */ void ha_rocksdb::update_auto_incr_val() { Field *field; longlong new_val; field = table->key_info[table->s->next_number_index].key_part[0].field; my_bitmap_map *const old_map = dbug_tmp_use_all_columns(table, table->read_set); new_val = field->val_int(); // don't increment if we would wrap around if (new_val != std::numeric_limits<longlong>::max()) { new_val++; } dbug_tmp_restore_column_map(table->read_set, old_map); longlong auto_incr_val = m_tbl_def->m_auto_incr_val; while (auto_incr_val < new_val && !m_tbl_def->m_auto_incr_val.compare_exchange_weak(auto_incr_val, new_val)) { // Do nothing - just loop until auto_incr_val is >= new_val or // we successfully set it } } int ha_rocksdb::load_hidden_pk_value() { const int save_active_index = active_index; active_index = m_tbl_def->m_key_count - 1; const uint8 save_table_status = table->status; Rdb_transaction *const tx = get_or_create_tx(table->in_use); const bool is_new_snapshot = !tx->has_snapshot(); // Do a lookup. if (!index_last(table->record[0])) { /* Decode PK field from the key */ longlong hidden_pk_id = 0; auto err = read_hidden_pk_id_from_rowkey(&hidden_pk_id); if (err) { if (is_new_snapshot) { tx->release_snapshot(); } return err; } hidden_pk_id++; longlong old = m_tbl_def->m_hidden_pk_val; while ( old < hidden_pk_id && !m_tbl_def->m_hidden_pk_val.compare_exchange_weak(old, hidden_pk_id)) { } } if (is_new_snapshot) { tx->release_snapshot(); } table->status = save_table_status; active_index = save_active_index; release_scan_iterator(); return HA_EXIT_SUCCESS; } /* Get PK value from m_tbl_def->m_hidden_pk_info. */ longlong ha_rocksdb::update_hidden_pk_val() { DBUG_ASSERT(has_hidden_pk(table)); const longlong new_val = m_tbl_def->m_hidden_pk_val++; return new_val; } /* Get the id of the hidden pk id from m_last_rowkey */ int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong *const hidden_pk_id) { DBUG_ASSERT(hidden_pk_id != nullptr); DBUG_ASSERT(table != nullptr); DBUG_ASSERT(has_hidden_pk(table)); rocksdb::Slice rowkey_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); // Get hidden primary key from old key slice Rdb_string_reader reader(&rowkey_slice); if ((!reader.read(Rdb_key_def::INDEX_NUMBER_SIZE))) return HA_ERR_ROCKSDB_CORRUPT_DATA; const int length= 8; /* was Field_longlong::PACK_LENGTH in FB MySQL tree */ const uchar *from = reinterpret_cast<const uchar *>(reader.read(length)); if (from == nullptr) { /* Mem-comparable image doesn't have enough bytes */ return HA_ERR_ROCKSDB_CORRUPT_DATA; } *hidden_pk_id = rdb_netbuf_read_uint64(&from); return HA_EXIT_SUCCESS; } /** @brief Free lock controls. We call this whenever we close a table. If the table had the last reference to the table_handler, then we free the memory associated with it. */ void Rdb_open_tables_map::release_table_handler( Rdb_table_handler *const table_handler) { RDB_MUTEX_LOCK_CHECK(m_mutex); DBUG_ASSERT(table_handler != nullptr); DBUG_ASSERT(table_handler->m_ref_count > 0); if (!--table_handler->m_ref_count) { // Last reference was released. Tear down the hash entry. const auto ret MY_ATTRIBUTE((__unused__)) = m_hash.remove(table_handler); DBUG_ASSERT(!ret); // the hash entry must actually be found and deleted my_core::thr_lock_delete(&table_handler->m_thr_lock); my_free(table_handler); } RDB_MUTEX_UNLOCK_CHECK(m_mutex); } static handler *rocksdb_create_handler(my_core::handlerton *const hton, my_core::TABLE_SHARE *const table_arg, my_core::MEM_ROOT *const mem_root) { return new (mem_root) ha_rocksdb(hton, table_arg); } ha_rocksdb::ha_rocksdb(my_core::handlerton *const hton, my_core::TABLE_SHARE *const table_arg) : handler(hton, table_arg), m_table_handler(nullptr), m_scan_it(nullptr), m_scan_it_skips_bloom(false), m_scan_it_snapshot(nullptr), m_tbl_def(nullptr), m_pk_descr(nullptr), m_key_descr_arr(nullptr), m_pk_can_be_decoded(false), m_maybe_unpack_info(false), m_pk_tuple(nullptr), m_pk_packed_tuple(nullptr), m_sk_packed_tuple(nullptr), m_end_key_packed_tuple(nullptr), m_sk_match_prefix(nullptr), m_sk_match_prefix_buf(nullptr), m_sk_packed_tuple_old(nullptr), m_dup_sk_packed_tuple(nullptr), m_dup_sk_packed_tuple_old(nullptr), m_pack_buffer(nullptr), m_lock_rows(RDB_LOCK_NONE), m_keyread_only(FALSE), m_bulk_load_tx(nullptr), m_encoder_arr(nullptr), m_row_checksums_checked(0), m_in_rpl_delete_rows(false), m_in_rpl_update_rows(false), m_force_skip_unique_check(false) { // TODO(alexyang): create a valid PSI_mutex_key for this mutex mysql_mutex_init(0, &m_bulk_load_mutex, MY_MUTEX_INIT_FAST); } bool ha_rocksdb::same_table(const ha_rocksdb &other) const { return m_tbl_def->base_tablename() == other.m_tbl_def->base_tablename(); } /** @return false OK other Error inpacking the data */ bool ha_rocksdb::init_with_fields() { DBUG_ENTER_FUNC(); const uint pk = table_share->primary_key; if (pk != MAX_KEY) { const uint key_parts = table_share->key_info[pk].user_defined_key_parts; check_keyread_allowed(pk /*PK*/, key_parts - 1, true); } else m_pk_can_be_decoded = false; cached_table_flags = table_flags(); DBUG_RETURN(false); /* Ok */ } /* If the key is a TTL key, we may need to filter it out. The purpose of read filtering for tables with TTL is to ensure that during a transaction a key which has expired already but not removed by compaction yet is not returned to the user. Without this the user might be hit with problems such as disappearing rows within a transaction, etc, because the compaction filter ignores snapshots when filtering keys. */ bool ha_rocksdb::should_hide_ttl_rec(const Rdb_key_def &kd, const rocksdb::Slice &ttl_rec_val, const int64_t curr_ts) { DBUG_ASSERT(kd.has_ttl()); DBUG_ASSERT(kd.m_ttl_rec_offset != UINT_MAX); /* Curr_ts can only be 0 if there are no snapshots open. should_hide_ttl_rec can only be called when there is >=1 snapshots, unless we are filtering on the write path (single INSERT/UPDATE) in which case we are passed in the current time as curr_ts. In the event curr_ts is 0, we always decide not to filter the record. We also log a warning and increment a diagnostic counter. */ if (curr_ts == 0) { update_row_stats(ROWS_HIDDEN_NO_SNAPSHOT); return false; } if (!rdb_is_ttl_read_filtering_enabled() || !rdb_is_ttl_enabled()) { return false; } Rdb_string_reader reader(&ttl_rec_val); /* Find where the 8-byte ttl is for each record in this index. */ uint64 ts; if (!reader.read(kd.m_ttl_rec_offset) || reader.read_uint64(&ts)) { /* This condition should never be reached since all TTL records have an 8 byte ttl field in front. Don't filter the record out, and log an error. */ std::string buf; buf = rdb_hexdump(ttl_rec_val.data(), ttl_rec_val.size(), RDB_MAX_HEXDUMP_LEN); const GL_INDEX_ID gl_index_id = kd.get_gl_index_id(); // NO_LINT_DEBUG sql_print_error("Decoding ttl from PK value failed, " "for index (%u,%u), val: %s", gl_index_id.cf_id, gl_index_id.index_id, buf.c_str()); DBUG_ASSERT(0); return false; } /* Hide record if it has expired before the current snapshot time. */ uint64 read_filter_ts = 0; #ifndef NDEBUG read_filter_ts += rdb_dbug_set_ttl_read_filter_ts(); #endif return ts + kd.m_ttl_duration + read_filter_ts <= static_cast<uint64>(curr_ts); } void ha_rocksdb::rocksdb_skip_expired_records(const Rdb_key_def &kd, rocksdb::Iterator *const iter, bool seek_backward) { if (kd.has_ttl()) { while (iter->Valid() && should_hide_ttl_rec( kd, iter->value(), get_or_create_tx(table->in_use)->m_snapshot_timestamp)) { rocksdb_smart_next(seek_backward, iter); } } } /** Convert record from table->record[0] form into a form that can be written into rocksdb. @param pk_packed_slice Packed PK tuple. We need it in order to compute and store its CRC. @param packed_rec OUT Data slice with record data. */ int ha_rocksdb::convert_record_to_storage_format( const struct update_row_info &row_info, rocksdb::Slice *const packed_rec) { DBUG_ASSERT_IMP(m_maybe_unpack_info, row_info.new_pk_unpack_info); DBUG_ASSERT(m_pk_descr != nullptr); const rocksdb::Slice &pk_packed_slice = row_info.new_pk_slice; Rdb_string_writer *const pk_unpack_info = row_info.new_pk_unpack_info; bool has_ttl = m_pk_descr->has_ttl(); bool has_ttl_column = !m_pk_descr->m_ttl_column.empty(); bool ttl_in_pk = has_ttl_column && (row_info.ttl_pk_offset != UINT_MAX); m_storage_record.length(0); if (has_ttl) { /* If it's a TTL record, reserve space for 8 byte TTL value in front. */ m_storage_record.fill(ROCKSDB_SIZEOF_TTL_RECORD + m_null_bytes_in_rec, 0); m_ttl_bytes_updated = false; /* If the TTL is contained within the key, we use the offset to find the TTL value and place it in the beginning of the value record. */ if (ttl_in_pk) { Rdb_string_reader reader(&pk_packed_slice); const char *ts; if (!reader.read(row_info.ttl_pk_offset) || !(ts = reader.read(ROCKSDB_SIZEOF_TTL_RECORD))) { std::string buf; buf = rdb_hexdump(pk_packed_slice.data(), pk_packed_slice.size(), RDB_MAX_HEXDUMP_LEN); const GL_INDEX_ID gl_index_id = m_pk_descr->get_gl_index_id(); // NO_LINT_DEBUG sql_print_error("Decoding ttl from PK failed during insert, " "for index (%u,%u), key: %s", gl_index_id.cf_id, gl_index_id.index_id, buf.c_str()); return HA_EXIT_FAILURE; } char *const data = const_cast<char *>(m_storage_record.ptr()); memcpy(data, ts, ROCKSDB_SIZEOF_TTL_RECORD); #ifndef NDEBUG // Adjust for test case if needed rdb_netbuf_store_uint64( reinterpret_cast<uchar *>(data), rdb_netbuf_to_uint64(reinterpret_cast<const uchar *>(data)) + rdb_dbug_set_ttl_rec_ts()); #endif // Also store in m_ttl_bytes to propagate to update_sk memcpy(m_ttl_bytes, data, ROCKSDB_SIZEOF_TTL_RECORD); } else if (!has_ttl_column) { /* For implicitly generated TTL records we need to copy over the old TTL value from the old record in the event of an update. It was stored in m_ttl_bytes. Otherwise, generate a timestamp using the current time. */ if (!row_info.old_pk_slice.empty()) { char *const data = const_cast<char *>(m_storage_record.ptr()); memcpy(data, m_ttl_bytes, sizeof(uint64)); } else { uint64 ts = static_cast<uint64>(std::time(nullptr)); #ifndef NDEBUG ts += rdb_dbug_set_ttl_rec_ts(); #endif char *const data = const_cast<char *>(m_storage_record.ptr()); rdb_netbuf_store_uint64(reinterpret_cast<uchar *>(data), ts); // Also store in m_ttl_bytes to propagate to update_sk memcpy(m_ttl_bytes, data, ROCKSDB_SIZEOF_TTL_RECORD); } } } else { /* All NULL bits are initially 0 */ m_storage_record.fill(m_null_bytes_in_rec, 0); } // If a primary key may have non-empty unpack_info for certain values, // (m_maybe_unpack_info=TRUE), we write the unpack_info block. The block // itself was prepared in Rdb_key_def::pack_record. if (m_maybe_unpack_info) { m_storage_record.append(reinterpret_cast<char *>(pk_unpack_info->ptr()), pk_unpack_info->get_current_pos()); } for (uint i = 0; i < table->s->fields; i++) { /* Don't pack decodable PK key parts */ if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) { continue; } Field *const field = table->field[i]; if (m_encoder_arr[i].maybe_null()) { char *data = const_cast<char *>(m_storage_record.ptr()); if (has_ttl) { data += ROCKSDB_SIZEOF_TTL_RECORD; } if (field->is_null()) { data[m_encoder_arr[i].m_null_offset] |= m_encoder_arr[i].m_null_mask; /* Don't write anything for NULL values */ continue; } } if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_BLOB) { my_core::Field_blob *blob = (my_core::Field_blob *)field; /* Get the number of bytes needed to store length*/ const uint length_bytes = blob->pack_length() - portable_sizeof_char_ptr; /* Store the length of the value */ m_storage_record.append(reinterpret_cast<char *>(blob->ptr), length_bytes); /* Store the blob value itself */ char *data_ptr; memcpy(&data_ptr, blob->ptr + length_bytes, sizeof(uchar **)); m_storage_record.append(data_ptr, blob->get_length()); } else if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_VARCHAR) { Field_varstring *const field_var = (Field_varstring *)field; uint data_len; /* field_var->length_bytes is 1 or 2 */ if (field_var->length_bytes == 1) { data_len = field_var->ptr[0]; } else { DBUG_ASSERT(field_var->length_bytes == 2); data_len = uint2korr(field_var->ptr); } m_storage_record.append(reinterpret_cast<char *>(field_var->ptr), field_var->length_bytes + data_len); } else { /* Copy the field data */ const uint len = field->pack_length_in_rec(); m_storage_record.append(reinterpret_cast<char *>(field->ptr), len); /* Check if this is the TTL field within the table, if so store the TTL in the front of the record as well here. */ if (has_ttl && has_ttl_column && i == m_pk_descr->get_ttl_field_offset()) { DBUG_ASSERT(len == ROCKSDB_SIZEOF_TTL_RECORD); DBUG_ASSERT(field->real_type() == MYSQL_TYPE_LONGLONG); DBUG_ASSERT(m_pk_descr->get_ttl_field_offset() != UINT_MAX); char *const data = const_cast<char *>(m_storage_record.ptr()); uint64 ts = uint8korr(field->ptr); #ifndef NDEBUG ts += rdb_dbug_set_ttl_rec_ts(); #endif rdb_netbuf_store_uint64(reinterpret_cast<uchar *>(data), ts); // If this is an update and the timestamp has been updated, take note // so we can avoid updating SKs unnecessarily. if (!row_info.old_pk_slice.empty()) { m_ttl_bytes_updated = memcmp(m_ttl_bytes, data, ROCKSDB_SIZEOF_TTL_RECORD); } // Store timestamp in m_ttl_bytes to propagate to update_sk memcpy(m_ttl_bytes, data, ROCKSDB_SIZEOF_TTL_RECORD); } } } if (should_store_row_debug_checksums()) { const uint32_t key_crc32 = my_core::crc32( 0, rdb_slice_to_uchar_ptr(&pk_packed_slice), pk_packed_slice.size()); const uint32_t val_crc32 = my_core::crc32(0, rdb_mysql_str_to_uchar_str(&m_storage_record), m_storage_record.length()); uchar key_crc_buf[RDB_CHECKSUM_SIZE]; uchar val_crc_buf[RDB_CHECKSUM_SIZE]; rdb_netbuf_store_uint32(key_crc_buf, key_crc32); rdb_netbuf_store_uint32(val_crc_buf, val_crc32); m_storage_record.append((const char *)&RDB_CHECKSUM_DATA_TAG, 1); m_storage_record.append((const char *)key_crc_buf, RDB_CHECKSUM_SIZE); m_storage_record.append((const char *)val_crc_buf, RDB_CHECKSUM_SIZE); } *packed_rec = rocksdb::Slice(m_storage_record.ptr(), m_storage_record.length()); return HA_EXIT_SUCCESS; } /* @brief Setup which fields will be unpacked when reading rows @detail Two special cases when we still unpack all fields: - When this table is being updated (m_lock_rows==RDB_LOCK_WRITE). - When @@rocksdb_verify_row_debug_checksums is ON (In this mode, we need to read all fields to find whether there is a row checksum at the end. We could skip the fields instead of decoding them, but currently we do decoding.) @seealso ha_rocksdb::setup_field_converters() ha_rocksdb::convert_record_from_storage_format() */ void ha_rocksdb::setup_read_decoders() { m_decoders_vect.clear(); int last_useful = 0; int skip_size = 0; for (uint i = 0; i < table->s->fields; i++) { // We only need the decoder if the whole record is stored. if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) { continue; } // bitmap is cleared on index merge, but it still needs to decode columns if (m_lock_rows == RDB_LOCK_WRITE || m_verify_row_debug_checksums || bitmap_is_clear_all(table->read_set) || bitmap_is_set(table->read_set, table->field[i]->field_index)) { // We will need to decode this field m_decoders_vect.push_back({&m_encoder_arr[i], true, skip_size}); last_useful = m_decoders_vect.size(); skip_size = 0; } else { if (m_encoder_arr[i].uses_variable_len_encoding() || m_encoder_arr[i].maybe_null()) { // For variable-length field, we need to read the data and skip it m_decoders_vect.push_back({&m_encoder_arr[i], false, skip_size}); skip_size = 0; } else { // Fixed-width field can be skipped without looking at it. // Add appropriate skip_size to the next field. skip_size += m_encoder_arr[i].m_pack_length_in_rec; } } } // It could be that the last few elements are varchars that just do // skipping. Remove them. m_decoders_vect.erase(m_decoders_vect.begin() + last_useful, m_decoders_vect.end()); } #ifndef NDEBUG void dbug_append_garbage_at_end(std::string &on_disk_rec) { on_disk_rec.append("abc"); } void dbug_truncate_record(std::string &on_disk_rec) { on_disk_rec.resize(0); } void dbug_modify_rec_varchar12(std::string &on_disk_rec) { std::string res; // The record is NULL-byte followed by VARCHAR(10). // Put the NULL-byte res.append("\0", 1); // Then, add a valid VARCHAR(12) value. res.append("\xC", 1); res.append("123456789ab", 12); on_disk_rec.assign(res); } void dbug_modify_key_varchar8(String &on_disk_rec) { std::string res; // The key starts with index number res.append(on_disk_rec.ptr(), Rdb_key_def::INDEX_NUMBER_SIZE); // Then, a mem-comparable form of a varchar(8) value. res.append("ABCDE\0\0\0\xFC", 9); on_disk_rec.length(0); on_disk_rec.append(res.data(), res.size()); } void dbug_create_err_inplace_alter() { my_printf_error(ER_UNKNOWN_ERROR, "Intentional failure in inplace alter occurred.", MYF(0)); } #endif int ha_rocksdb::convert_record_from_storage_format( const rocksdb::Slice *const key, uchar *const buf) { DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read1", dbug_append_garbage_at_end(m_retrieved_record);); DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read2", dbug_truncate_record(m_retrieved_record);); DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read3", dbug_modify_rec_varchar12(m_retrieved_record);); const rocksdb::Slice retrieved_rec_slice(&m_retrieved_record.front(), m_retrieved_record.size()); return convert_record_from_storage_format(key, &retrieved_rec_slice, buf); } int ha_rocksdb::convert_blob_from_storage_format( my_core::Field_blob *const blob, Rdb_string_reader *const reader, bool decode) { /* Get the number of bytes needed to store length*/ const uint length_bytes = blob->pack_length() - portable_sizeof_char_ptr; const char *data_len_str; if (!(data_len_str = reader->read(length_bytes))) { return HA_ERR_ROCKSDB_CORRUPT_DATA; } memcpy(blob->ptr, data_len_str, length_bytes); const uint32 data_len = blob->get_length( reinterpret_cast<const uchar*>(data_len_str), length_bytes); const char *blob_ptr; if (!(blob_ptr = reader->read(data_len))) { return HA_ERR_ROCKSDB_CORRUPT_DATA; } if (decode) { // set 8-byte pointer to 0, like innodb does (relevant for 32-bit // platforms) memset(blob->ptr + length_bytes, 0, 8); memcpy(blob->ptr + length_bytes, &blob_ptr, sizeof(uchar **)); } return HA_EXIT_SUCCESS; } int ha_rocksdb::convert_varchar_from_storage_format( my_core::Field_varstring *const field_var, Rdb_string_reader *const reader, bool decode) { const char *data_len_str; if (!(data_len_str = reader->read(field_var->length_bytes))) return HA_ERR_ROCKSDB_CORRUPT_DATA; uint data_len; /* field_var->length_bytes is 1 or 2 */ if (field_var->length_bytes == 1) { data_len = (uchar)data_len_str[0]; } else { DBUG_ASSERT(field_var->length_bytes == 2); data_len = uint2korr(data_len_str); } if (data_len > field_var->field_length) { /* The data on disk is longer than table DDL allows? */ return HA_ERR_ROCKSDB_CORRUPT_DATA; } if (!reader->read(data_len)) { return HA_ERR_ROCKSDB_CORRUPT_DATA; } if (decode) { memcpy(field_var->ptr, data_len_str, field_var->length_bytes + data_len); } return HA_EXIT_SUCCESS; } int ha_rocksdb::convert_field_from_storage_format( my_core::Field *const field, Rdb_string_reader *const reader, bool decode, uint len) { const char *data_bytes; if (len > 0) { if ((data_bytes = reader->read(len)) == nullptr) { return HA_ERR_ROCKSDB_CORRUPT_DATA; } if (decode) memcpy(field->ptr, data_bytes, len); } return HA_EXIT_SUCCESS; } /* @brief Unpack the record in this->m_retrieved_record and this->m_last_rowkey from storage format into buf (which can be table->record[0] or table->record[1]). @param key Table record's key in mem-comparable form. @param buf Store record in table->record[0] format here @detail If the table has blobs, the unpacked data in buf may keep pointers to the data in this->m_retrieved_record. The key is only needed to check its checksum value (the checksum is in m_retrieved_record). @seealso ha_rocksdb::setup_read_decoders() Sets up data structures which tell which columns to decode. @return 0 OK other Error inpacking the data */ int ha_rocksdb::convert_record_from_storage_format( const rocksdb::Slice *const key, const rocksdb::Slice *const value, uchar *const buf) { DBUG_ASSERT(key != nullptr); DBUG_ASSERT(buf != nullptr); Rdb_string_reader reader(value); /* Decode PK fields from the key */ DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_read1", dbug_modify_key_varchar8(m_last_rowkey);); const rocksdb::Slice rowkey_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); const char *unpack_info = nullptr; uint16 unpack_info_len = 0; rocksdb::Slice unpack_slice; /* If it's a TTL record, skip the 8 byte TTL value */ const char *ttl_bytes; if (m_pk_descr->has_ttl()) { if ((ttl_bytes = reader.read(ROCKSDB_SIZEOF_TTL_RECORD))) { memcpy(m_ttl_bytes, ttl_bytes, ROCKSDB_SIZEOF_TTL_RECORD); } else { return HA_ERR_ROCKSDB_CORRUPT_DATA; } } /* Other fields are decoded from the value */ const char *null_bytes = nullptr; if (m_null_bytes_in_rec && !(null_bytes = reader.read(m_null_bytes_in_rec))) { return HA_ERR_ROCKSDB_CORRUPT_DATA; } if (m_maybe_unpack_info) { unpack_info = reader.get_current_ptr(); if (!unpack_info || !Rdb_key_def::is_unpack_data_tag(unpack_info[0]) || !reader.read(Rdb_key_def::get_unpack_header_size(unpack_info[0]))) { return HA_ERR_ROCKSDB_CORRUPT_DATA; } unpack_info_len = rdb_netbuf_to_uint16(reinterpret_cast<const uchar *>(unpack_info + 1)); unpack_slice = rocksdb::Slice(unpack_info, unpack_info_len); reader.read(unpack_info_len - Rdb_key_def::get_unpack_header_size(unpack_info[0])); } int err = m_pk_descr->unpack_record(table, buf, &rowkey_slice, unpack_info ? &unpack_slice : nullptr, false /* verify_checksum */); if (err != HA_EXIT_SUCCESS) { return err; } for (auto it = m_decoders_vect.begin(); it != m_decoders_vect.end(); it++) { const Rdb_field_encoder *const field_dec = it->m_field_enc; const bool decode = it->m_decode; const bool isNull = field_dec->maybe_null() && ((null_bytes[field_dec->m_null_offset] & field_dec->m_null_mask) != 0); Field *const field = table->field[field_dec->m_field_index]; /* Skip the bytes we need to skip */ if (it->m_skip && !reader.read(it->m_skip)) { return HA_ERR_ROCKSDB_CORRUPT_DATA; } uint field_offset = field->ptr - table->record[0]; uint null_offset = field->null_offset(); bool maybe_null = field->real_maybe_null(); field->move_field(buf + field_offset, maybe_null ? buf + null_offset : nullptr, field->null_bit); // WARNING! - Don't return before restoring field->ptr and field->null_ptr! if (isNull) { if (decode) { /* This sets the NULL-bit of this record */ field->set_null(); /* Besides that, set the field value to default value. CHECKSUM TABLE depends on this. */ memcpy(field->ptr, table->s->default_values + field_offset, field->pack_length()); } } else { if (decode) { field->set_notnull(); } if (field_dec->m_field_type == MYSQL_TYPE_BLOB) { err = convert_blob_from_storage_format( (my_core::Field_blob *) field, &reader, decode); } else if (field_dec->m_field_type == MYSQL_TYPE_VARCHAR) { err = convert_varchar_from_storage_format( (my_core::Field_varstring *) field, &reader, decode); } else { err = convert_field_from_storage_format( field, &reader, decode, field_dec->m_pack_length_in_rec); } } // Restore field->ptr and field->null_ptr field->move_field(table->record[0] + field_offset, maybe_null ? table->record[0] + null_offset : nullptr, field->null_bit); if (err != HA_EXIT_SUCCESS) { return err; } } if (m_verify_row_debug_checksums) { if (reader.remaining_bytes() == RDB_CHECKSUM_CHUNK_SIZE && reader.read(1)[0] == RDB_CHECKSUM_DATA_TAG) { uint32_t stored_key_chksum = rdb_netbuf_to_uint32((const uchar *)reader.read(RDB_CHECKSUM_SIZE)); uint32_t stored_val_chksum = rdb_netbuf_to_uint32((const uchar *)reader.read(RDB_CHECKSUM_SIZE)); const uint32_t computed_key_chksum = my_core::crc32(0, rdb_slice_to_uchar_ptr(key), key->size()); const uint32_t computed_val_chksum = my_core::crc32(0, rdb_slice_to_uchar_ptr(value), value->size() - RDB_CHECKSUM_CHUNK_SIZE); DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum1", stored_key_chksum++;); if (stored_key_chksum != computed_key_chksum) { m_pk_descr->report_checksum_mismatch(true, key->data(), key->size()); return HA_ERR_ROCKSDB_CHECKSUM_MISMATCH; } DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum2", stored_val_chksum++;); if (stored_val_chksum != computed_val_chksum) { m_pk_descr->report_checksum_mismatch(false, value->data(), value->size()); return HA_ERR_ROCKSDB_CHECKSUM_MISMATCH; } m_row_checksums_checked++; } if (reader.remaining_bytes()) return HA_ERR_ROCKSDB_CORRUPT_DATA; } return HA_EXIT_SUCCESS; } void ha_rocksdb::get_storage_type(Rdb_field_encoder *const encoder, const uint &kp) { // STORE_SOME uses unpack_info. if (m_pk_descr->has_unpack_info(kp)) { DBUG_ASSERT(m_pk_descr->can_unpack(kp)); encoder->m_storage_type = Rdb_field_encoder::STORE_SOME; m_maybe_unpack_info = true; } else if (m_pk_descr->can_unpack(kp)) { encoder->m_storage_type = Rdb_field_encoder::STORE_NONE; } } /* Setup data needed to convert table->record[] to and from record storage format. @seealso ha_rocksdb::convert_record_to_storage_format, ha_rocksdb::convert_record_from_storage_format */ void ha_rocksdb::setup_field_converters() { uint i; uint null_bytes = 0; uchar cur_null_mask = 0x1; DBUG_ASSERT(m_encoder_arr == nullptr); m_encoder_arr = static_cast<Rdb_field_encoder *>( my_malloc(table->s->fields * sizeof(Rdb_field_encoder), MYF(0))); if (m_encoder_arr == nullptr) { return; } for (i = 0; i < table->s->fields; i++) { Field *const field = table->field[i]; m_encoder_arr[i].m_storage_type = Rdb_field_encoder::STORE_ALL; /* Check if this field is - a part of primary key, and - it can be decoded back from its key image. If both hold, we don't need to store this field in the value part of RocksDB's key-value pair. If hidden pk exists, we skip this check since the field will never be part of the hidden pk. */ if (!has_hidden_pk(table)) { KEY *const pk_info = &table->key_info[table->s->primary_key]; for (uint kp = 0; kp < pk_info->user_defined_key_parts; kp++) { /* key_part->fieldnr is counted from 1 */ if (field->field_index + 1 == pk_info->key_part[kp].fieldnr) { get_storage_type(&m_encoder_arr[i], kp); break; } } } m_encoder_arr[i].m_field_type = field->real_type(); m_encoder_arr[i].m_field_index = i; m_encoder_arr[i].m_pack_length_in_rec = field->pack_length_in_rec(); if (field->real_maybe_null()) { m_encoder_arr[i].m_null_mask = cur_null_mask; m_encoder_arr[i].m_null_offset = null_bytes; if (cur_null_mask == 0x80) { cur_null_mask = 0x1; null_bytes++; } else cur_null_mask = cur_null_mask << 1; } else { m_encoder_arr[i].m_null_mask = 0; } } /* Count the last, unfinished NULL-bits byte */ if (cur_null_mask != 0x1) null_bytes++; m_null_bytes_in_rec = null_bytes; } int ha_rocksdb::alloc_key_buffers(const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg, bool alloc_alter_buffers) { DBUG_ENTER_FUNC(); DBUG_ASSERT(m_pk_tuple == nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); std::shared_ptr<Rdb_key_def> *const kd_arr = tbl_def_arg->m_key_descr_arr; uint key_len = 0; uint max_packed_sk_len = 0; uint pack_key_len = 0; m_pk_descr = kd_arr[pk_index(table_arg, tbl_def_arg)]; if (has_hidden_pk(table_arg)) { m_pk_key_parts = 1; } else { m_pk_key_parts = table->key_info[table->s->primary_key].user_defined_key_parts; key_len = table->key_info[table->s->primary_key].key_length; } // move this into get_table_handler() ?? m_pk_descr->setup(table_arg, tbl_def_arg); m_pk_tuple = reinterpret_cast<uchar *>(my_malloc(key_len, MYF(0))); pack_key_len = m_pk_descr->max_storage_fmt_length(); m_pk_packed_tuple = reinterpret_cast<uchar *>(my_malloc(pack_key_len, MYF(0))); /* Sometimes, we may use m_sk_packed_tuple for storing packed PK */ max_packed_sk_len = pack_key_len; for (uint i = 0; i < table_arg->s->keys; i++) { if (i == table_arg->s->primary_key) /* Primary key was processed above */ continue; // TODO: move this into get_table_handler() ?? kd_arr[i]->setup(table_arg, tbl_def_arg); const uint packed_len = kd_arr[i]->max_storage_fmt_length(); if (packed_len > max_packed_sk_len) { max_packed_sk_len = packed_len; } } m_sk_packed_tuple = reinterpret_cast<uchar *>(my_malloc(max_packed_sk_len, MYF(0))); m_sk_match_prefix_buf = reinterpret_cast<uchar *>(my_malloc(max_packed_sk_len, MYF(0))); m_sk_packed_tuple_old = reinterpret_cast<uchar *>(my_malloc(max_packed_sk_len, MYF(0))); m_end_key_packed_tuple = reinterpret_cast<uchar *>(my_malloc(max_packed_sk_len, MYF(0))); m_pack_buffer = reinterpret_cast<uchar *>(my_malloc(max_packed_sk_len, MYF(0))); /* If inplace alter is happening, allocate special buffers for unique secondary index duplicate checking. */ if (alloc_alter_buffers) { m_dup_sk_packed_tuple = reinterpret_cast<uchar *>(my_malloc(max_packed_sk_len, MYF(0))); m_dup_sk_packed_tuple_old = reinterpret_cast<uchar *>(my_malloc(max_packed_sk_len, MYF(0))); } if (m_pk_tuple == nullptr || m_pk_packed_tuple == nullptr || m_sk_packed_tuple == nullptr || m_sk_packed_tuple_old == nullptr || m_end_key_packed_tuple == nullptr || m_pack_buffer == nullptr || (alloc_alter_buffers && (m_dup_sk_packed_tuple == nullptr || m_dup_sk_packed_tuple_old == nullptr))) { // One or more of the above allocations failed. Clean up and exit free_key_buffers(); DBUG_RETURN(HA_ERR_OUT_OF_MEM); } DBUG_RETURN(HA_EXIT_SUCCESS); } void ha_rocksdb::free_key_buffers() { my_free(m_pk_tuple); m_pk_tuple = nullptr; my_free(m_pk_packed_tuple); m_pk_packed_tuple = nullptr; my_free(m_sk_packed_tuple); m_sk_packed_tuple = nullptr; my_free(m_sk_match_prefix_buf); m_sk_match_prefix_buf = nullptr; my_free(m_sk_packed_tuple_old); m_sk_packed_tuple_old = nullptr; my_free(m_end_key_packed_tuple); m_end_key_packed_tuple = nullptr; my_free(m_pack_buffer); m_pack_buffer = nullptr; my_free(m_dup_sk_packed_tuple); m_dup_sk_packed_tuple = nullptr; my_free(m_dup_sk_packed_tuple_old); m_dup_sk_packed_tuple_old = nullptr; } #ifdef MARIAROCKS_NOT_YET void ha_rocksdb::set_use_read_free_rpl(const char *const whitelist) { DBUG_ASSERT(whitelist != nullptr); #if defined(HAVE_PSI_INTERFACE) Regex_list_handler regex_handler(key_rwlock_read_free_rpl_tables); #else Regex_list_handler regex_handler; #endif if (!regex_handler.set_patterns(whitelist)) { warn_about_bad_patterns(&regex_handler, "read_free_rpl_tables"); } m_use_read_free_rpl = regex_handler.matches(m_tbl_def->base_tablename()); } #endif void ha_rocksdb::set_skip_unique_check_tables(const char *const whitelist) { DBUG_ASSERT(whitelist != nullptr); #if defined(HAVE_PSI_INTERFACE) Regex_list_handler regex_handler(key_rwlock_skip_unique_check_tables); #else Regex_list_handler regex_handler; #endif if (!regex_handler.set_patterns(whitelist)) { warn_about_bad_patterns(&regex_handler, "skip_unique_check_tables"); } m_skip_unique_check = regex_handler.matches(m_tbl_def->base_tablename()); } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::open(const char *const name, int mode, uint test_if_locked) { DBUG_ENTER_FUNC(); int err = close(); if (err) { DBUG_RETURN(err); } m_table_handler = rdb_open_tables.get_table_handler(name); if (m_table_handler == nullptr) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); } my_core::thr_lock_data_init(&m_table_handler->m_thr_lock, &m_db_lock, nullptr); m_io_perf.init(&m_table_handler->m_table_perf_context, &m_table_handler->m_io_perf_read, &m_table_handler->m_io_perf_write, &stats); Rdb_perf_context_guard guard(&m_io_perf, rocksdb_perf_context_level(ha_thd())); std::string fullname; err = rdb_normalize_tablename(name, &fullname); if (err != HA_EXIT_SUCCESS) { DBUG_RETURN(err); } m_tbl_def = ddl_manager.find(fullname); if (m_tbl_def == nullptr) { my_error(ER_INTERNAL_ERROR, MYF(0), "Attempt to open a table that is not present in RocksDB-SE data " "dictionary"); DBUG_RETURN(HA_ERR_ROCKSDB_INVALID_TABLE); } m_lock_rows = RDB_LOCK_NONE; m_key_descr_arr = m_tbl_def->m_key_descr_arr; /* Full table scan actually uses primary key (UPDATE needs to know this, otherwise it will go into infinite loop on queries like "UPDATE tbl SET pk=pk+100") */ key_used_on_scan = table->s->primary_key; // close() above has already called free_key_buffers(). No need to do it here. err = alloc_key_buffers(table, m_tbl_def); if (err) { DBUG_RETURN(err); } /* init_with_fields() is used to initialize table flags based on the field definitions in table->field[]. It is called by open_binary_frm(), but that function calls the method for a temporary ha_rocksdb object which is later destroyed. If we are here in ::open(), then init_with_fields() has not been called for this object. Call it ourselves, we want all member variables to be properly initialized. */ init_with_fields(); setup_field_converters(); info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); /* The following load_XXX code calls row decode functions, and they do that without having done ::external_lock() or index_init()/rnd_init(). (Note: this also means we're doing a read when there was no setup_field_converters() call) Initialize the necessary variables for them: */ m_verify_row_debug_checksums = false; /* TODO: move the following to where TABLE_SHARE is opened: */ if (table->found_next_number_field) load_auto_incr_value(); if (has_hidden_pk(table) && (err = load_hidden_pk_value()) != HA_EXIT_SUCCESS) { free_key_buffers(); DBUG_RETURN(err); } /* Index block size in MyRocks: used by MySQL in query optimization */ stats.block_size = rocksdb_tbl_options->block_size; #ifdef MARIAROCKS_NOT_YET // MDEV-10976 /* Determine at open whether we can use Read Free Replication or not */ set_use_read_free_rpl(THDVAR(ha_thd(), read_free_rpl_tables)); #endif /* Determine at open whether we should skip unique checks for this table */ set_skip_unique_check_tables(THDVAR(ha_thd(), skip_unique_check_tables)); DBUG_RETURN(HA_EXIT_SUCCESS); } int ha_rocksdb::close(void) { DBUG_ENTER_FUNC(); m_pk_descr = nullptr; m_key_descr_arr = nullptr; free_key_buffers(); my_free(m_encoder_arr); m_encoder_arr = nullptr; if (m_table_handler != nullptr) { rdb_open_tables.release_table_handler(m_table_handler); m_table_handler = nullptr; } // These are needed to suppress valgrind errors in rocksdb.partition m_storage_record.free(); m_last_rowkey.free(); m_sk_tails.free(); m_sk_tails_old.free(); m_pk_unpack_info.free(); DBUG_RETURN(HA_EXIT_SUCCESS); } static const char *rdb_error_messages[] = { "Table must have a PRIMARY KEY.", "Specifying DATA DIRECTORY for an individual table is not supported.", "Specifying INDEX DIRECTORY for an individual table is not supported.", "RocksDB commit failed.", "Failure during bulk load operation.", "Found data corruption.", "CRC checksum mismatch.", "Invalid table.", "Could not access RocksDB properties.", "File I/O error during merge/sort operation.", "RocksDB status: not found.", "RocksDB status: corruption.", "RocksDB status: invalid argument.", "RocksDB status: io error.", "RocksDB status: no space.", "RocksDB status: merge in progress.", "RocksDB status: incomplete.", "RocksDB status: shutdown in progress.", "RocksDB status: timed out.", "RocksDB status: aborted.", "RocksDB status: lock limit reached.", "RocksDB status: busy.", "RocksDB status: deadlock.", "RocksDB status: expired.", "RocksDB status: try again.", }; static_assert((sizeof(rdb_error_messages) / sizeof(rdb_error_messages[0])) == ((HA_ERR_ROCKSDB_LAST - HA_ERR_ROCKSDB_FIRST) + 1), "Number of error messages doesn't match number of error codes"); //psergey-merge: do we need this in MariaDB: we have get_error_messages //below... #if 0 static const char *rdb_get_error_message(int nr) { return rdb_error_messages[nr - HA_ERR_ROCKSDB_FIRST]; } #endif static const char **rdb_get_error_messages(int nr) { return rdb_error_messages; } bool ha_rocksdb::get_error_message(const int error, String *const buf) { DBUG_ENTER_FUNC(); static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST, "HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST"); static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_LAST, "HA_ERR_ROCKSDB_LAST > HA_ERR_LAST"); DBUG_ASSERT(buf != nullptr); if (error == HA_ERR_LOCK_WAIT_TIMEOUT || error == HA_ERR_LOCK_DEADLOCK) { Rdb_transaction *const tx = get_tx_from_thd(ha_thd()); DBUG_ASSERT(tx != nullptr); buf->append(tx->m_detailed_error); DBUG_RETURN(true); } if (error >= HA_ERR_ROCKSDB_FIRST && error <= HA_ERR_ROCKSDB_LAST) { buf->append(rdb_error_messages[error - HA_ERR_ROCKSDB_FIRST]); } // We can be called with the values which are < HA_ERR_FIRST because most // MySQL internal functions will just return HA_EXIT_FAILURE in case of // an error. DBUG_RETURN(false); } /* Generalized way to convert RocksDB status errors into MySQL error code, and print error message. Each error code below maps to a RocksDB status code found in: rocksdb/include/rocksdb/status.h */ int ha_rocksdb::rdb_error_to_mysql(const rocksdb::Status &s, const char *opt_msg) { DBUG_ASSERT(!s.ok()); int err; switch (s.code()) { case rocksdb::Status::Code::kOk: err = HA_EXIT_SUCCESS; break; case rocksdb::Status::Code::kNotFound: err = HA_ERR_ROCKSDB_STATUS_NOT_FOUND; break; case rocksdb::Status::Code::kCorruption: err = HA_ERR_ROCKSDB_STATUS_CORRUPTION; break; case rocksdb::Status::Code::kNotSupported: err = HA_ERR_ROCKSDB_STATUS_NOT_SUPPORTED; break; case rocksdb::Status::Code::kInvalidArgument: err = HA_ERR_ROCKSDB_STATUS_INVALID_ARGUMENT; break; case rocksdb::Status::Code::kIOError: err = (s.IsNoSpace()) ? HA_ERR_ROCKSDB_STATUS_NO_SPACE : HA_ERR_ROCKSDB_STATUS_IO_ERROR; break; case rocksdb::Status::Code::kMergeInProgress: err = HA_ERR_ROCKSDB_STATUS_MERGE_IN_PROGRESS; break; case rocksdb::Status::Code::kIncomplete: err = HA_ERR_ROCKSDB_STATUS_INCOMPLETE; break; case rocksdb::Status::Code::kShutdownInProgress: err = HA_ERR_ROCKSDB_STATUS_SHUTDOWN_IN_PROGRESS; break; case rocksdb::Status::Code::kTimedOut: err = HA_ERR_ROCKSDB_STATUS_TIMED_OUT; break; case rocksdb::Status::Code::kAborted: err = (s.IsLockLimit()) ? HA_ERR_ROCKSDB_STATUS_LOCK_LIMIT : HA_ERR_ROCKSDB_STATUS_ABORTED; break; case rocksdb::Status::Code::kBusy: err = (s.IsDeadlock()) ? HA_ERR_ROCKSDB_STATUS_DEADLOCK : HA_ERR_ROCKSDB_STATUS_BUSY; break; case rocksdb::Status::Code::kExpired: err = HA_ERR_ROCKSDB_STATUS_EXPIRED; break; case rocksdb::Status::Code::kTryAgain: err = HA_ERR_ROCKSDB_STATUS_TRY_AGAIN; break; default: DBUG_ASSERT(0); return -1; } if (opt_msg) { my_error(ER_RDB_STATUS_MSG, MYF(0), opt_msg, s.code(), s.ToString().c_str()); } else { my_error(ER_RDB_STATUS_GENERAL, MYF(0), s.code(), s.ToString().c_str()); } return err; } /* MyRocks supports only the following collations for indexed columns */ static const std::set<uint> RDB_INDEX_COLLATIONS = { COLLATION_BINARY, COLLATION_UTF8_BIN, COLLATION_LATIN1_BIN}; static bool rdb_is_index_collation_supported(const my_core::Field *const field) { const my_core::enum_field_types type = field->real_type(); /* Handle [VAR](CHAR|BINARY) or TEXT|BLOB */ if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING || type == MYSQL_TYPE_BLOB) { return RDB_INDEX_COLLATIONS.find(field->charset()->number) != RDB_INDEX_COLLATIONS.end(); } return true; } /* Create structures needed for storing data in rocksdb. This is called when the table is created. The structures will be shared by all TABLE* objects. @param table_arg Table with definition db_table "dbname.tablename" len strlen of the above tbl_def_arg tbl_def whose key_descr is being created/populated old_tbl_def_arg tbl_def from which keys are being copied over from (for use during inplace alter) @return 0 - Ok other - error, either given table ddl is not supported by rocksdb or OOM. */ int ha_rocksdb::create_key_defs( const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, const TABLE *const old_table_arg /* = nullptr */, const Rdb_tbl_def *const old_tbl_def_arg /* = nullptr */) const { DBUG_ENTER_FUNC(); DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); uint i; /* These need to be one greater than MAX_INDEXES since the user can create MAX_INDEXES secondary keys and no primary key which would cause us to generate a hidden one. */ std::array<key_def_cf_info, MAX_INDEXES + 1> cfs; /* NOTE: All new column families must be created before new index numbers are allocated to each key definition. See below for more details. http://github.com/MySQLOnRocksDB/mysql-5.6/issues/86#issuecomment-138515501 */ if (create_cfs(table_arg, tbl_def_arg, &cfs)) { DBUG_RETURN(HA_EXIT_FAILURE); } if (!old_tbl_def_arg) { /* old_tbl_def doesn't exist. this means we are in the process of creating a new table. Get the index numbers (this will update the next_index_number) and create Rdb_key_def structures. */ for (i = 0; i < tbl_def_arg->m_key_count; i++) { if (create_key_def(table_arg, i, tbl_def_arg, &m_key_descr_arr[i], cfs[i])) { DBUG_RETURN(HA_EXIT_FAILURE); } } } else { /* old_tbl_def exists. This means we are creating a new tbl_def as part of in-place alter table. Copy over existing keys from the old_tbl_def and generate the necessary new key definitions if any. */ if (create_inplace_key_defs(table_arg, tbl_def_arg, old_table_arg, old_tbl_def_arg, cfs)) { DBUG_RETURN(HA_EXIT_FAILURE); } } DBUG_RETURN(HA_EXIT_SUCCESS); } /* Checks index parameters and creates column families needed for storing data in rocksdb if necessary. @param in table_arg Table with definition db_table Table name tbl_def_arg Table def structure being populated @param out cfs CF info for each key definition in 'key_info' order @return 0 - Ok other - error */ int ha_rocksdb::create_cfs( const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, std::array<struct key_def_cf_info, MAX_INDEXES + 1> *const cfs) const { DBUG_ENTER_FUNC(); DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); char tablename_sys[NAME_LEN + 1]; bool tsys_set= false; /* The first loop checks the index parameters and creates column families if necessary. */ for (uint i = 0; i < tbl_def_arg->m_key_count; i++) { rocksdb::ColumnFamilyHandle *cf_handle; if (rocksdb_strict_collation_check && !is_hidden_pk(i, table_arg, tbl_def_arg) && tbl_def_arg->base_tablename().find(tmp_file_prefix) != 0) { if (!tsys_set) { tsys_set= true; my_core::filename_to_tablename(tbl_def_arg->base_tablename().c_str(), tablename_sys, sizeof(tablename_sys)); } for (uint part = 0; part < table_arg->key_info[i].ext_key_parts; part++) { if (!rdb_is_index_collation_supported( table_arg->key_info[i].key_part[part].field) && !rdb_collation_exceptions->matches(tablename_sys)) { std::string collation_err; for (const auto &coll : RDB_INDEX_COLLATIONS) { if (collation_err != "") { collation_err += ", "; } collation_err += get_charset_name(coll); } my_error(ER_UNSUPPORTED_COLLATION, MYF(0), tbl_def_arg->full_tablename().c_str(), table_arg->key_info[i].key_part[part].field->field_name, collation_err.c_str()); DBUG_RETURN(HA_EXIT_FAILURE); } } } // Internal consistency check to make sure that data in TABLE and // Rdb_tbl_def structures matches. Either both are missing or both are // specified. Yes, this is critical enough to make it into SHIP_ASSERT. SHIP_ASSERT(!table_arg->part_info == tbl_def_arg->base_partition().empty()); // Generate the name for the column family to use. bool per_part_match_found = false; std::string cf_name = generate_cf_name(i, table_arg, tbl_def_arg, &per_part_match_found); // Prevent create from using the system column family. if (cf_name == DEFAULT_SYSTEM_CF_NAME) { my_error(ER_WRONG_ARGUMENTS, MYF(0), "column family not valid for storing index data."); DBUG_RETURN(HA_EXIT_FAILURE); } // Here's how `get_or_create_cf` will use the input parameters: // // `cf_name` - will be used as a CF name. cf_handle = cf_manager.get_or_create_cf(rdb, cf_name); if (!cf_handle) { DBUG_RETURN(HA_EXIT_FAILURE); } auto &cf = (*cfs)[i]; cf.cf_handle = cf_handle; cf.is_reverse_cf = Rdb_cf_manager::is_cf_name_reverse(cf_name.c_str()); cf.is_per_partition_cf = per_part_match_found; } DBUG_RETURN(HA_EXIT_SUCCESS); } /* Create key definition needed for storing data in rocksdb during ADD index inplace operations. @param in table_arg Table with definition tbl_def_arg New table def structure being populated old_tbl_def_arg Old(current) table def structure cfs Struct array which contains column family information @return 0 - Ok other - error, either given table ddl is not supported by rocksdb or OOM. */ int ha_rocksdb::create_inplace_key_defs( const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, const TABLE *const old_table_arg, const Rdb_tbl_def *const old_tbl_def_arg, const std::array<key_def_cf_info, MAX_INDEXES + 1> &cfs) const { DBUG_ENTER_FUNC(); DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); DBUG_ASSERT(old_tbl_def_arg != nullptr); std::shared_ptr<Rdb_key_def> *const old_key_descr = old_tbl_def_arg->m_key_descr_arr; std::shared_ptr<Rdb_key_def> *const new_key_descr = tbl_def_arg->m_key_descr_arr; const std::unordered_map<std::string, uint> old_key_pos = get_old_key_positions(table_arg, tbl_def_arg, old_table_arg, old_tbl_def_arg); uint i; for (i = 0; i < tbl_def_arg->m_key_count; i++) { const auto &it = old_key_pos.find(get_key_name(i, table_arg, tbl_def_arg)); if (it != old_key_pos.end()) { /* Found matching index in old table definition, so copy it over to the new one created. */ const Rdb_key_def &okd = *old_key_descr[it->second]; const GL_INDEX_ID gl_index_id = okd.get_gl_index_id(); struct Rdb_index_info index_info; if (!dict_manager.get_index_info(gl_index_id, &index_info)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Could not get index information " "for Index Number (%u,%u), table %s", gl_index_id.cf_id, gl_index_id.index_id, old_tbl_def_arg->full_tablename().c_str()); DBUG_RETURN(HA_EXIT_FAILURE); } uint32 ttl_rec_offset = Rdb_key_def::has_index_flag(index_info.m_index_flags, Rdb_key_def::TTL_FLAG) ? Rdb_key_def::calculate_index_flag_offset( index_info.m_index_flags, Rdb_key_def::TTL_FLAG) : UINT_MAX; /* We can't use the copy constructor because we need to update the keynr within the pack_info for each field and the keyno of the keydef itself. */ new_key_descr[i] = std::make_shared<Rdb_key_def>( okd.get_index_number(), i, okd.get_cf(), index_info.m_index_dict_version, index_info.m_index_type, index_info.m_kv_version, okd.m_is_reverse_cf, okd.m_is_per_partition_cf, okd.m_name.c_str(), dict_manager.get_stats(gl_index_id), index_info.m_index_flags, ttl_rec_offset, index_info.m_ttl_duration); } else if (create_key_def(table_arg, i, tbl_def_arg, &new_key_descr[i], cfs[i])) { DBUG_RETURN(HA_EXIT_FAILURE); } DBUG_ASSERT(new_key_descr[i] != nullptr); new_key_descr[i]->setup(table_arg, tbl_def_arg); } DBUG_RETURN(HA_EXIT_SUCCESS); } std::unordered_map<std::string, uint> ha_rocksdb::get_old_key_positions( const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg, const TABLE *const old_table_arg, const Rdb_tbl_def *const old_tbl_def_arg) const { DBUG_ENTER_FUNC(); DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(old_table_arg != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); DBUG_ASSERT(old_tbl_def_arg != nullptr); std::shared_ptr<Rdb_key_def> *const old_key_descr = old_tbl_def_arg->m_key_descr_arr; std::unordered_map<std::string, uint> old_key_pos; std::unordered_map<std::string, uint> new_key_pos; uint i; for (i = 0; i < tbl_def_arg->m_key_count; i++) { new_key_pos[get_key_name(i, table_arg, tbl_def_arg)] = i; } for (i = 0; i < old_tbl_def_arg->m_key_count; i++) { if (is_hidden_pk(i, old_table_arg, old_tbl_def_arg)) { old_key_pos[old_key_descr[i]->m_name] = i; continue; } /* In case of matching key name, need to check key parts of keys as well, in case a simultaneous drop + add is performed, where the key name is the same but the key parts are different. Example: CREATE TABLE t1 (a INT, b INT, KEY ka(a)) ENGINE=RocksDB; ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE; */ const KEY *const old_key = &old_table_arg->key_info[i]; const auto &it = new_key_pos.find(old_key->name); if (it == new_key_pos.end()) { continue; } KEY *const new_key = &table_arg->key_info[it->second]; /* Check that the key is identical between old and new tables. If not, we still need to create a new index. The exception is if there is an index changed from unique to non-unique, in these cases we don't need to rebuild as they are stored the same way in RocksDB. */ bool unique_to_non_unique = ((old_key->flags ^ new_key->flags) == HA_NOSAME) && (old_key->flags & HA_NOSAME); if (compare_keys(old_key, new_key) && !unique_to_non_unique) { continue; } /* Check to make sure key parts match. */ if (compare_key_parts(old_key, new_key)) { continue; } old_key_pos[old_key->name] = i; } DBUG_RETURN(old_key_pos); } /* Check to see if two keys are identical. */ int ha_rocksdb::compare_keys(const KEY *const old_key, const KEY *const new_key) const { DBUG_ENTER_FUNC(); DBUG_ASSERT(old_key != nullptr); DBUG_ASSERT(new_key != nullptr); /* Check index name. */ if (strcmp(old_key->name, new_key->name) != 0) { DBUG_RETURN(HA_EXIT_FAILURE); } /* If index algorithms are different then keys are different. */ if (old_key->algorithm != new_key->algorithm) { DBUG_RETURN(HA_EXIT_FAILURE); } /* Check that the key is identical between old and new tables. */ if ((old_key->flags ^ new_key->flags) & HA_KEYFLAG_MASK) { DBUG_RETURN(HA_EXIT_FAILURE); } /* Check index comment. (for column family changes) */ std::string old_comment(old_key->comment.str, old_key->comment.length); std::string new_comment(new_key->comment.str, new_key->comment.length); if (old_comment.compare(new_comment) != 0) { DBUG_RETURN(HA_EXIT_FAILURE); } DBUG_RETURN(HA_EXIT_SUCCESS); } /* Check two keys to ensure that key parts within keys match */ int ha_rocksdb::compare_key_parts(const KEY *const old_key, const KEY *const new_key) const { DBUG_ENTER_FUNC(); DBUG_ASSERT(old_key != nullptr); DBUG_ASSERT(new_key != nullptr); /* Skip if key parts do not match, as it is a different key */ if (new_key->user_defined_key_parts != old_key->user_defined_key_parts) { DBUG_RETURN(HA_EXIT_FAILURE); } /* Check to see that key parts themselves match */ for (uint i = 0; i < old_key->user_defined_key_parts; i++) { if (strcmp(old_key->key_part[i].field->field_name, new_key->key_part[i].field->field_name) != 0) { DBUG_RETURN(HA_EXIT_FAILURE); } /* Check if prefix index key part length has changed */ if (old_key->key_part[i].length != new_key->key_part[i].length) { DBUG_RETURN(HA_EXIT_FAILURE); } } DBUG_RETURN(HA_EXIT_SUCCESS); } /* Create key definition needed for storing data in rocksdb. This can be called either during CREATE table or doing ADD index operations. @param in table_arg Table with definition i Position of index being created inside table_arg->key_info tbl_def_arg Table def structure being populated cf_info Struct which contains column family information @param out new_key_def Newly created index definition. @return 0 - Ok other - error, either given table ddl is not supported by rocksdb or OOM. */ int ha_rocksdb::create_key_def(const TABLE *const table_arg, const uint &i, const Rdb_tbl_def *const tbl_def_arg, std::shared_ptr<Rdb_key_def> *const new_key_def, const struct key_def_cf_info &cf_info) const { DBUG_ENTER_FUNC(); DBUG_ASSERT(new_key_def != nullptr); DBUG_ASSERT(*new_key_def == nullptr); uint64 ttl_duration = 0; std::string ttl_column; uint ttl_field_offset; uint err; if ((err = Rdb_key_def::extract_ttl_duration(table_arg, tbl_def_arg, &ttl_duration))) { DBUG_RETURN(err); } if ((err = Rdb_key_def::extract_ttl_col(table_arg, tbl_def_arg, &ttl_column, &ttl_field_offset))) { DBUG_RETURN(err); } /* We don't currently support TTL on tables with hidden primary keys. */ if (ttl_duration > 0 && is_hidden_pk(i, table_arg, tbl_def_arg)) { my_error(ER_RDB_TTL_UNSUPPORTED, MYF(0)); DBUG_RETURN(HA_EXIT_FAILURE); } /* If TTL duration is not specified but TTL column was specified, throw an error because TTL column requires duration. */ if (ttl_duration == 0 && !ttl_column.empty()) { my_error(ER_RDB_TTL_COL_FORMAT, MYF(0), ttl_column.c_str()); DBUG_RETURN(HA_EXIT_FAILURE); } const uint index_id = ddl_manager.get_and_update_next_number(&dict_manager); const uint16_t index_dict_version = Rdb_key_def::INDEX_INFO_VERSION_LATEST; uchar index_type; uint16_t kv_version; if (is_hidden_pk(i, table_arg, tbl_def_arg)) { index_type = Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY; kv_version = Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; } else if (i == table_arg->s->primary_key) { index_type = Rdb_key_def::INDEX_TYPE_PRIMARY; uint16 pk_latest_version = Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; kv_version = pk_latest_version; } else { index_type = Rdb_key_def::INDEX_TYPE_SECONDARY; uint16 sk_latest_version = Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; kv_version = sk_latest_version; } // Use PRIMARY_FORMAT_VERSION_UPDATE1 here since it is the same value as // SECONDARY_FORMAT_VERSION_UPDATE1 so it doesn't matter if this is a // primary key or secondary key. DBUG_EXECUTE_IF("MYROCKS_LEGACY_VARBINARY_FORMAT", { kv_version = Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1; }); DBUG_EXECUTE_IF("MYROCKS_NO_COVERED_BITMAP_FORMAT", { if (index_type == Rdb_key_def::INDEX_TYPE_SECONDARY) { kv_version = Rdb_key_def::SECONDARY_FORMAT_VERSION_UPDATE2; } }); uint32 index_flags = (ttl_duration > 0 ? Rdb_key_def::TTL_FLAG : 0); uint32 ttl_rec_offset = Rdb_key_def::has_index_flag(index_flags, Rdb_key_def::TTL_FLAG) ? Rdb_key_def::calculate_index_flag_offset(index_flags, Rdb_key_def::TTL_FLAG) : UINT_MAX; const char *const key_name = get_key_name(i, table_arg, m_tbl_def); *new_key_def = std::make_shared<Rdb_key_def>( index_id, i, cf_info.cf_handle, index_dict_version, index_type, kv_version, cf_info.is_reverse_cf, cf_info.is_per_partition_cf, key_name, Rdb_index_stats(), index_flags, ttl_rec_offset, ttl_duration); if (!ttl_column.empty()) { (*new_key_def)->m_ttl_column = ttl_column; } DBUG_RETURN(HA_EXIT_SUCCESS); } int rdb_normalize_tablename(const std::string &tablename, std::string *const strbuf) { DBUG_ASSERT(strbuf != nullptr); if (tablename.size() < 2 || tablename[0] != '.' || tablename[1] != FN_LIBCHAR) { DBUG_ASSERT(0); // We were not passed table name? return HA_ERR_ROCKSDB_INVALID_TABLE; } size_t pos = tablename.find_first_of(FN_LIBCHAR, 2); if (pos == std::string::npos) { DBUG_ASSERT(0); // We were not passed table name? return HA_ERR_ROCKSDB_INVALID_TABLE; } *strbuf = tablename.substr(2, pos - 2) + "." + tablename.substr(pos + 1); return HA_EXIT_SUCCESS; } /* Check to see if the user's original statement includes foreign key references */ bool ha_rocksdb::contains_foreign_key(THD *const thd) { bool success; const char *str = thd_query_string(thd)->str; DBUG_ASSERT(str != nullptr); while (*str != '\0') { // Scan from our current pos looking for 'FOREIGN' str = rdb_find_in_string(str, "FOREIGN", &success); if (!success) { return false; } // Skip past the found "FOREIGN' str = rdb_check_next_token(&my_charset_bin, str, "FOREIGN", &success); DBUG_ASSERT(success); if (!my_isspace(&my_charset_bin, *str)) { return false; } // See if the next token is 'KEY' str = rdb_check_next_token(&my_charset_bin, str, "KEY", &success); if (!success) { continue; } // See if the next token is '(' str = rdb_check_next_token(&my_charset_bin, str, "(", &success); if (!success) { // There is an optional index id after 'FOREIGN KEY', skip it str = rdb_skip_id(&my_charset_bin, str); // Now check for '(' again str = rdb_check_next_token(&my_charset_bin, str, "(", &success); } // If we have found 'FOREIGN KEY [<word>] (' we can be confident we have // a foreign key clause. return success; } // We never found a valid foreign key clause return false; } /** @brief splits the normalized table name of <dbname>.<tablename>#P#<part_no> into the <dbname>, <tablename> and <part_no> components. @param dbbuf returns database name/table_schema @param tablebuf returns tablename @param partitionbuf returns partition suffix if there is one @return HA_EXIT_SUCCESS on success, non-zero on failure to split */ int rdb_split_normalized_tablename(const std::string &fullname, std::string *const db, std::string *const table, std::string *const partition) { DBUG_ASSERT(!fullname.empty()); #define RDB_PARTITION_STR "#P#" /* Normalize returns dbname.tablename. */ size_t dotpos = fullname.find('.'); /* Invalid table name? */ if (dotpos == std::string::npos) { return HA_ERR_ROCKSDB_INVALID_TABLE; } // Table must have a database name associated with it. DBUG_ASSERT(dotpos > 0); if (db != nullptr) { *db = fullname.substr(0, dotpos); } dotpos++; const size_t partpos = fullname.find(RDB_PARTITION_STR, dotpos, strlen(RDB_PARTITION_STR)); if (partpos != std::string::npos) { DBUG_ASSERT(partpos >= dotpos); if (table != nullptr) { *table = fullname.substr(dotpos, partpos - dotpos); } if (partition != nullptr) { *partition = fullname.substr(partpos + strlen(RDB_PARTITION_STR)); } } else if (table != nullptr) { *table = fullname.substr(dotpos); } return HA_EXIT_SUCCESS; } /** @brief create() is called to create a database. The variable name will have the name of the table. @details When create() is called you do not need to worry about opening the table. Also, the .frm file will have already been created so adjusting create_info is not necessary. You can overwrite the .frm file at this point if you wish to change the table definition, but there are no methods currently provided for doing so. Called from handle.cc by ha_create_table(). @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) @see ha_create_table() in handle.cc */ int ha_rocksdb::create(const char *const name, TABLE *const table_arg, HA_CREATE_INFO *const create_info) { DBUG_ENTER_FUNC(); DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(create_info != nullptr); if (create_info->data_file_name) { // DATA DIRECTORY is used to create tables under a specific location // outside the MySQL data directory. We don't support this for MyRocks. // The `rocksdb_datadir` setting should be used to configure RocksDB data // directory. print_error(HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED, MYF(ME_JUST_WARNING)); DBUG_RETURN(HA_WRONG_CREATE_OPTION); } if (create_info->index_file_name) { // Similar check for INDEX DIRECTORY as well. print_error(HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED, MYF(ME_JUST_WARNING)); DBUG_RETURN(HA_WRONG_CREATE_OPTION); } std::string str; int err; THD *const thd = my_core::thd_get_current_thd(); if (get_table_if_exists(name)) { if (thd->lex->sql_command == SQLCOM_TRUNCATE) { err = delete_table(name); if (err != HA_EXIT_SUCCESS) { DBUG_RETURN(err); } } else { err = rdb_normalize_tablename(name, &str); if (err != HA_EXIT_SUCCESS) { DBUG_RETURN(err); } my_error(ER_METADATA_INCONSISTENCY, MYF(0), str.c_str(), name); DBUG_RETURN(HA_ERR_ROCKSDB_CORRUPT_DATA); } } /* Construct dbname.tablename ourselves, because parititioning passes strings like "./test/t14#P#p0" for individual partitions, while table_arg->s->table_name has none of that. */ err = rdb_normalize_tablename(name, &str); if (err != HA_EXIT_SUCCESS) { DBUG_RETURN(err); } if (contains_foreign_key(thd)) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "FOREIGN KEY for the RocksDB storage engine"); DBUG_RETURN(HA_ERR_UNSUPPORTED); } const std::unique_ptr<rocksdb::WriteBatch> wb = dict_manager.begin(); rocksdb::WriteBatch *const batch = wb.get(); /* Create table/key descriptions and put them into the data dictionary */ m_tbl_def = new Rdb_tbl_def(str); uint n_keys = table_arg->s->keys; /* If no primary key found, create a hidden PK and place it inside table definition */ if (has_hidden_pk(table_arg)) { n_keys += 1; } m_key_descr_arr = new std::shared_ptr<Rdb_key_def>[n_keys]; m_tbl_def->m_key_count = n_keys; m_tbl_def->m_key_descr_arr = m_key_descr_arr; err = create_key_defs(table_arg, m_tbl_def); if (err != HA_EXIT_SUCCESS) { goto error; } m_pk_descr = m_key_descr_arr[pk_index(table_arg, m_tbl_def)]; dict_manager.lock(); err = ddl_manager.put_and_write(m_tbl_def, batch); if (err != HA_EXIT_SUCCESS) { dict_manager.unlock(); goto error; } err = dict_manager.commit(batch); if (err != HA_EXIT_SUCCESS) { dict_manager.unlock(); goto error; } dict_manager.unlock(); if (create_info->auto_increment_value) m_tbl_def->m_auto_incr_val = create_info->auto_increment_value; /* We only support auto_increment at start of the PRIMARY KEY. */ // Field *field; // if ((field= table_arg->next_number_field)) /* TODO mdcallag: disable this for now to let UNIQUE indexes kind of work if ((field= table_arg->found_next_number_field)) { int pk= table_arg->s->primary_key; Field *pk_field= table_arg->key_info[pk].key_part[0].field; if (field->field_index != pk_field->field_index) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } */ DBUG_RETURN(HA_EXIT_SUCCESS); error: /* Delete what we have allocated so far */ delete m_tbl_def; m_tbl_def = nullptr; m_key_descr_arr = nullptr; DBUG_RETURN(err); } /** @note This function is used only when the table has not yet been opened, and keyread_allowed bitmap doesn't have the correct values yet. See comment in ha_rocksdb::index_flags() for details. */ bool ha_rocksdb::check_keyread_allowed(uint inx, uint part, bool all_parts) const { bool res = true; KEY *const key_info = &table_share->key_info[inx]; Rdb_field_packing dummy1; res = dummy1.setup(nullptr, key_info->key_part[part].field, inx, part, key_info->key_part[part].length); if (res && all_parts) { for (uint i = 0; i < part; i++) { Field *field; if ((field = key_info->key_part[i].field)) { Rdb_field_packing dummy; if (!dummy.setup(nullptr, field, inx, i, key_info->key_part[i].length)) { /* Cannot do index-only reads for this column */ res = false; break; } } } } const uint pk = table_share->primary_key; if (inx == pk && all_parts && part + 1 == table_share->key_info[pk].user_defined_key_parts) { m_pk_can_be_decoded = res; } return res; } int ha_rocksdb::read_key_exact(const Rdb_key_def &kd, rocksdb::Iterator *const iter, const bool &full_key_match, const rocksdb::Slice &key_slice, const int64_t ttl_filter_ts) { DBUG_ASSERT(iter != nullptr); /* We are looking for the first record such that index_tuple= lookup_tuple. lookup_tuple may be a prefix of the index. */ rocksdb_smart_seek(kd.m_is_reverse_cf, iter, key_slice); while (iter->Valid() && kd.value_matches_prefix(iter->key(), key_slice)) { /* If TTL is enabled we need to check if the given key has already expired from the POV of the current transaction. If it has, try going to the next key. */ if (kd.has_ttl() && should_hide_ttl_rec(kd, iter->value(), ttl_filter_ts)) { rocksdb_smart_next(kd.m_is_reverse_cf, iter); continue; } return HA_EXIT_SUCCESS; } /* Got a record that is not equal to the lookup value, or even a record from another table.index. */ return HA_ERR_KEY_NOT_FOUND; } int ha_rocksdb::read_before_key(const Rdb_key_def &kd, const bool &full_key_match, const rocksdb::Slice &key_slice, const int64_t ttl_filter_ts) { /* We are looking for record with the biggest t.key such that t.key < lookup_tuple. */ rocksdb_smart_seek(!kd.m_is_reverse_cf, m_scan_it, key_slice); while (m_scan_it->Valid()) { /* We are using full key and we've hit an exact match, or... If TTL is enabled we need to check if the given key has already expired from the POV of the current transaction. If it has, try going to the next key. */ if ((full_key_match && kd.value_matches_prefix(m_scan_it->key(), key_slice)) || (kd.has_ttl() && should_hide_ttl_rec(kd, m_scan_it->value(), ttl_filter_ts))) { rocksdb_smart_next(!kd.m_is_reverse_cf, m_scan_it); continue; } return HA_EXIT_SUCCESS; } return HA_ERR_KEY_NOT_FOUND; } int ha_rocksdb::read_after_key(const Rdb_key_def &kd, const rocksdb::Slice &key_slice, const int64_t ttl_filter_ts) { /* We are looking for the first record such that index_tuple $GT lookup_tuple with HA_READ_AFTER_KEY, $GT = '>', with HA_READ_KEY_OR_NEXT, $GT = '>=' */ rocksdb_smart_seek(kd.m_is_reverse_cf, m_scan_it, key_slice); /* If TTL is enabled we need to check if the given key has already expired from the POV of the current transaction. If it has, try going to the next key. */ while (m_scan_it->Valid() && kd.has_ttl() && should_hide_ttl_rec(kd, m_scan_it->value(), ttl_filter_ts)) { rocksdb_smart_next(kd.m_is_reverse_cf, m_scan_it); } return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; } int ha_rocksdb::position_to_correct_key( const Rdb_key_def &kd, const enum ha_rkey_function &find_flag, const bool &full_key_match, const uchar *const key, const key_part_map &keypart_map, const rocksdb::Slice &key_slice, bool *const move_forward, const int64_t ttl_filter_ts) { int rc = 0; *move_forward = true; switch (find_flag) { case HA_READ_KEY_EXACT: rc = read_key_exact(kd, m_scan_it, full_key_match, key_slice, ttl_filter_ts); break; case HA_READ_BEFORE_KEY: *move_forward = false; rc = read_before_key(kd, full_key_match, key_slice, ttl_filter_ts); if (rc == 0 && !kd.covers_key(m_scan_it->key())) { /* The record we've got is not from this index */ rc = HA_ERR_KEY_NOT_FOUND; } break; case HA_READ_AFTER_KEY: case HA_READ_KEY_OR_NEXT: rc = read_after_key(kd, key_slice, ttl_filter_ts); if (rc == 0 && !kd.covers_key(m_scan_it->key())) { /* The record we've got is not from this index */ rc = HA_ERR_KEY_NOT_FOUND; } break; case HA_READ_KEY_OR_PREV: case HA_READ_PREFIX: /* This flag is not used by the SQL layer, so we don't support it yet. */ rc = HA_ERR_UNSUPPORTED; break; case HA_READ_PREFIX_LAST: case HA_READ_PREFIX_LAST_OR_PREV: *move_forward = false; /* Find the last record with the specified index prefix lookup. - HA_READ_PREFIX_LAST requires that the record has the prefix=lookup (if there are no such records, HA_ERR_KEY_NOT_FOUND should be returned). - HA_READ_PREFIX_LAST_OR_PREV has no such requirement. If there are no records with prefix=lookup, we should return the last record before that. */ rc = read_before_key(kd, full_key_match, key_slice, ttl_filter_ts); if (rc == 0) { const rocksdb::Slice &rkey = m_scan_it->key(); if (!kd.covers_key(rkey)) { /* The record we've got is not from this index */ rc = HA_ERR_KEY_NOT_FOUND; } else if (find_flag == HA_READ_PREFIX_LAST) { uint size = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, key, keypart_map); rocksdb::Slice lookup_tuple(reinterpret_cast<char *>(m_sk_packed_tuple), size); // We need to compare the key we've got with the original search prefix. if (!kd.value_matches_prefix(rkey, lookup_tuple)) { rc = HA_ERR_KEY_NOT_FOUND; } } } break; default: DBUG_ASSERT(0); break; } return rc; } int ha_rocksdb::calc_eq_cond_len(const Rdb_key_def &kd, const enum ha_rkey_function &find_flag, const rocksdb::Slice &slice, const int &bytes_changed_by_succ, const key_range *const end_key, uint *const end_key_packed_size) { if (find_flag == HA_READ_KEY_EXACT) return slice.size(); if (find_flag == HA_READ_PREFIX_LAST) { /* We have made the kd.successor(m_sk_packed_tuple) call above. The slice is at least Rdb_key_def::INDEX_NUMBER_SIZE bytes long. */ return slice.size() - bytes_changed_by_succ; } if (end_key) { *end_key_packed_size = kd.pack_index_tuple(table, m_pack_buffer, m_end_key_packed_tuple, end_key->key, end_key->keypart_map); /* Calculating length of the equal conditions here. 4 byte index id is included. Example1: id1 BIGINT, id2 INT, id3 BIGINT, PRIMARY KEY (id1, id2, id3) WHERE id1=1 AND id2=1 AND id3>=2 => eq_cond_len= 4+8+4= 16 WHERE id1=1 AND id2>=1 AND id3>=2 => eq_cond_len= 4+8= 12 Example2: id1 VARCHAR(30), id2 INT, PRIMARY KEY (id1, id2) WHERE id1 = 'AAA' and id2 < 3; => eq_cond_len=13 (varchar used 9 bytes) */ rocksdb::Slice end_slice(reinterpret_cast<char *>(m_end_key_packed_tuple), *end_key_packed_size); return slice.difference_offset(end_slice); } /* On range scan without any end key condition, there is no eq cond, and eq cond length is the same as index_id size (4 bytes). Example1: id1 BIGINT, id2 INT, id3 BIGINT, PRIMARY KEY (id1, id2, id3) WHERE id1>=1 AND id2 >= 2 and id2 <= 5 => eq_cond_len= 4 */ return Rdb_key_def::INDEX_NUMBER_SIZE; } int ha_rocksdb::read_row_from_primary_key(uchar *const buf) { DBUG_ASSERT(buf != nullptr); int rc; const rocksdb::Slice &rkey = m_scan_it->key(); const uint pk_size = rkey.size(); const char *pk_data = rkey.data(); memcpy(m_pk_packed_tuple, pk_data, pk_size); m_last_rowkey.copy(pk_data, pk_size, &my_charset_bin); if (m_lock_rows != RDB_LOCK_NONE) { /* We need to put a lock and re-read */ rc = get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); } else { /* Unpack from the row we've read */ const rocksdb::Slice &value = m_scan_it->value(); rc = convert_record_from_storage_format(&rkey, &value, buf); } return rc; } int ha_rocksdb::read_row_from_secondary_key(uchar *const buf, const Rdb_key_def &kd, bool move_forward) { DBUG_ASSERT(buf != nullptr); int rc = 0; uint pk_size; /* Get the key columns and primary key value */ const rocksdb::Slice &rkey = m_scan_it->key(); const rocksdb::Slice &value = m_scan_it->value(); bool covered_lookup = m_keyread_only || kd.covers_lookup(table, &value, &m_lookup_bitmap); if (covered_lookup && m_lock_rows == RDB_LOCK_NONE && !has_hidden_pk(table)) { pk_size = kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, m_pk_packed_tuple); if (pk_size == RDB_INVALID_KEY_LEN) { rc = HA_ERR_ROCKSDB_CORRUPT_DATA; } else { rc = kd.unpack_record(table, buf, &rkey, &value, m_verify_row_debug_checksums); global_stats.covered_secondary_key_lookups.inc(); } } else { if (kd.m_is_reverse_cf) move_forward = !move_forward; rc = find_icp_matching_index_rec(move_forward, buf); if (!rc) { const rocksdb::Slice &rkey = m_scan_it->key(); pk_size = kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, m_pk_packed_tuple); if (pk_size == RDB_INVALID_KEY_LEN) { rc = HA_ERR_ROCKSDB_CORRUPT_DATA; } else { rc = get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); } } } if (!rc) { m_last_rowkey.copy((const char *)m_pk_packed_tuple, pk_size, &my_charset_bin); } return rc; } /** @note The problem with this function is that SQL layer calls it, when - the table has not been yet opened (no ::open() call done) - this->table_share already exists, but it is in the process of being filled, so some of fields are still NULL. - In particular, table_share->key_info[inx].key_part[] is filled only up to part #part. Subsequent key parts are not yet filled. To complicate things further, SQL layer will call index_flags() with all_parts=TRUE. Essentially, we're asked to provide flags for reading keyparts whose datatype is not yet known. We walk around this problem by using check_keyread_allowed(), which uses table_share object and is careful not to step on unitialized data. When we get a call with all_parts=TRUE, we try to analyze all parts but ignore those that have key_part->field==nullptr (these are not initialized yet). */ ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const { DBUG_ENTER_FUNC(); ulong base_flags = HA_READ_NEXT | // doesn't seem to be used HA_READ_ORDER | HA_READ_RANGE | HA_READ_PREV; if (check_keyread_allowed(inx, part, all_parts)) base_flags |= HA_KEYREAD_ONLY; if (inx == table_share->primary_key) { /* Index-only reads on primary key are the same as table scan for us. Still, we need to explicitly "allow" them, otherwise SQL layer will miss some plans. */ base_flags |= HA_KEYREAD_ONLY | HA_CLUSTERED_INDEX; } else { /* We can Index Condition Pushdown any key except the primary. With primary key, we get (pk, record) pair immediately, there is no place to put the ICP check. */ base_flags |= HA_DO_INDEX_COND_PUSHDOWN; } DBUG_RETURN(base_flags); } /** @brief Read next index tuple through the secondary index. @details m_scan_it points at the index key-value pair that we should read the (pk,row) pair for. */ int ha_rocksdb::secondary_index_read(const int keyno, uchar *const buf) { DBUG_ASSERT(buf != nullptr); DBUG_ASSERT(table != nullptr); #ifdef MARIAROCKS_NOT_YET stats.rows_requested++; #endif /* Use STATUS_NOT_FOUND when record not found or some error occurred */ table->status = STATUS_NOT_FOUND; if (m_scan_it->Valid()) { rocksdb::Slice key = m_scan_it->key(); /* Check if we've ran out of records of this index */ if (m_key_descr_arr[keyno]->covers_key(key)) { int rc = 0; // TODO: We could here check if we have ran out of range we're scanning const uint size = m_key_descr_arr[keyno]->get_primary_key_tuple( table, *m_pk_descr, &key, m_pk_packed_tuple); if (size == RDB_INVALID_KEY_LEN) { return HA_ERR_ROCKSDB_CORRUPT_DATA; } m_last_rowkey.copy((const char *)m_pk_packed_tuple, size, &my_charset_bin); rocksdb::Slice value = m_scan_it->value(); bool covered_lookup = m_keyread_only || m_key_descr_arr[keyno]->covers_lookup( table, &value, &m_lookup_bitmap); if (covered_lookup && m_lock_rows == RDB_LOCK_NONE && !has_hidden_pk(table)) { rc = m_key_descr_arr[keyno]->unpack_record( table, buf, &key, &value, m_verify_row_debug_checksums); global_stats.covered_secondary_key_lookups.inc(); } else { rc = get_row_by_rowid(buf, m_pk_packed_tuple, size); } if (!rc) { table->status = 0; #ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_next++; #endif update_row_stats(ROWS_READ); } return rc; } } return HA_ERR_END_OF_FILE; } bool ha_rocksdb::is_using_full_key(key_part_map keypart_map, uint actual_key_parts) { return (keypart_map == HA_WHOLE_KEY) || (keypart_map == ((key_part_map(1) << actual_key_parts) - 1)); } /* ha_rocksdb::read_range_first overrides handler::read_range_first. The only difference from handler::read_range_first is that ha_rocksdb::read_range_first passes end_key to ha_rocksdb::index_read_map_impl function. @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::read_range_first(const key_range *const start_key, const key_range *const end_key, bool eq_range_arg, bool sorted) { DBUG_ENTER_FUNC(); int result; eq_range = eq_range_arg; /* MariaDB: Pass RANGE_SCAN_ASC when we support ICP on backwards scans */ set_end_range(end_key); range_key_part = table->key_info[active_index].key_part; if (!start_key) // Read first record result = ha_index_first(table->record[0]); else { #ifdef MARIAROCKS_NOT_YET if (is_using_prohibited_gap_locks( is_using_full_unique_key(active_index, start_key->keypart_map, start_key->flag))) { DBUG_RETURN(HA_ERR_LOCK_DEADLOCK); } #endif increment_statistics(&SSV::ha_read_key_count); MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, { result = index_read_map_impl(table->record[0], start_key->key, start_key->keypart_map, start_key->flag, end_key); }) } if (result) DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) ? HA_ERR_END_OF_FILE : result); if (compare_key(end_range) <= 0) { DBUG_RETURN(HA_EXIT_SUCCESS); } else { /* The last read row does not fall in the range. So request storage engine to release row lock if possible. */ unlock_row(); DBUG_RETURN(HA_ERR_END_OF_FILE); } } int ha_rocksdb::prepare_index_scan() { range_key_part= table->key_info[active_index].key_part; set_end_range(NULL); return 0; } int ha_rocksdb::prepare_range_scan(const key_range *start_key, const key_range *end_key) { range_key_part= table->key_info[active_index].key_part; set_end_range(end_key); return 0; } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::index_read_map(uchar *const buf, const uchar *const key, key_part_map keypart_map, enum ha_rkey_function find_flag) { DBUG_ENTER_FUNC(); DBUG_RETURN(index_read_map_impl(buf, key, keypart_map, find_flag, nullptr)); } /* See storage/rocksdb/rocksdb-range-access.txt for description of how MySQL index navigation commands are converted into RocksDB lookup commands. This function takes end_key as an argument, and it is set on range scan. MyRocks needs to decide whether prefix bloom filter can be used or not. To decide to use prefix bloom filter or not, calculating equal condition length is needed. On equal lookups (find_flag == HA_READ_KEY_EXACT), equal condition length is the same as rocksdb::Slice.size() of the start key. On range scan, equal condition length is MIN(start_key, end_key) of the rocksdb::Slice expression. @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, key_part_map keypart_map, enum ha_rkey_function find_flag, const key_range *end_key) { DBUG_ENTER_FUNC(); int rc = 0; const Rdb_key_def &kd = *m_key_descr_arr[active_index]; const uint actual_key_parts = kd.get_key_parts(); bool using_full_key = is_using_full_key(keypart_map, actual_key_parts); if (!end_key) end_key = end_range; /* By default, we don't need the retrieved records to match the prefix */ m_sk_match_prefix = nullptr; #ifdef MARIAROCKS_NOT_YET stats.rows_requested++; #endif if (active_index == table->s->primary_key && find_flag == HA_READ_KEY_EXACT && using_full_key) { /* Equality lookup over primary key, using full tuple. This is a special case, use DB::Get. */ const uint size = kd.pack_index_tuple(table, m_pack_buffer, m_pk_packed_tuple, key, keypart_map); bool skip_lookup = is_blind_delete_enabled(); rc = get_row_by_rowid(buf, m_pk_packed_tuple, size, skip_lookup, false); if (!rc && !skip_lookup) { #ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_first++; #endif update_row_stats(ROWS_READ); } DBUG_RETURN(rc); } /* Unique secondary index performs lookups without the extended key fields */ uint packed_size; if (active_index != table->s->primary_key && table->key_info[active_index].flags & HA_NOSAME && find_flag == HA_READ_KEY_EXACT && using_full_key) { key_part_map tmp_map = (key_part_map(1) << table->key_info[active_index] .user_defined_key_parts) - 1; packed_size = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, key, tmp_map); if (table->key_info[active_index].user_defined_key_parts != kd.get_key_parts()) using_full_key = false; } else { packed_size = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, key, keypart_map); } if ((pushed_idx_cond && pushed_idx_cond_keyno == active_index) && (find_flag == HA_READ_KEY_EXACT || find_flag == HA_READ_PREFIX_LAST)) { /* We are doing a point index lookup, and ICP is enabled. It is possible that this call will be followed by ha_rocksdb->index_next_same() call. Do what InnoDB does: save the lookup tuple now. We will need it in index_next_same/find_icp_matching_index_rec in order to stop scanning as soon as index record doesn't match the lookup tuple. When not using ICP, handler::index_next_same() will make sure that rows that don't match the lookup prefix are not returned. row matches the lookup prefix. */ m_sk_match_prefix = m_sk_match_prefix_buf; m_sk_match_length = packed_size; memcpy(m_sk_match_prefix, m_sk_packed_tuple, packed_size); } int bytes_changed_by_succ = 0; if (find_flag == HA_READ_PREFIX_LAST_OR_PREV || find_flag == HA_READ_PREFIX_LAST || find_flag == HA_READ_AFTER_KEY) { /* See below */ bytes_changed_by_succ = kd.successor(m_sk_packed_tuple, packed_size); } rocksdb::Slice slice(reinterpret_cast<const char *>(m_sk_packed_tuple), packed_size); uint end_key_packed_size = 0; const uint eq_cond_len = calc_eq_cond_len(kd, find_flag, slice, bytes_changed_by_succ, end_key, &end_key_packed_size); bool use_all_keys = false; if (find_flag == HA_READ_KEY_EXACT && my_count_bits(keypart_map) == kd.get_key_parts()) use_all_keys = true; Rdb_transaction *const tx = get_or_create_tx(table->in_use); const bool is_new_snapshot = !tx->has_snapshot(); // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) for (;;) { /* This will open the iterator and position it at a record that's equal or greater than the lookup tuple. */ setup_scan_iterator(kd, &slice, use_all_keys, eq_cond_len); /* Once we are positioned on from above, move to the position we really want: See storage/rocksdb/rocksdb-range-access.txt */ bool move_forward; rc = position_to_correct_key(kd, find_flag, using_full_key, key, keypart_map, slice, &move_forward, tx->m_snapshot_timestamp); if (rc) { /* This status is returned on any error */ table->status = STATUS_NOT_FOUND; DBUG_RETURN(rc); } m_skip_scan_it_next_call = false; /* Now get the data for the row into 'buf'. If we were using a primary key then we have all the rows we need. For a secondary key we now need to lookup the primary key. */ if (active_index == table->s->primary_key) rc = read_row_from_primary_key(buf); else rc = read_row_from_secondary_key(buf, kd, move_forward); if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) break; /* Exit the loop */ // release the snapshot and iterator so they will be regenerated tx->release_snapshot(); release_scan_iterator(); } if (rc) { /* the only possible error condition is record-not-found */ table->status = STATUS_NOT_FOUND; } else { table->status = 0; #ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_first++; #endif update_row_stats(ROWS_READ); } DBUG_RETURN(rc); } /* @brief Scan the secondary index until we find an index record that satisfies ICP @param move_forward TRUE <=> move m_scan_it forward FALSE <=> move m_scan_it backward @param buf Record buffer (must be the same buffer that pushed index condition points to, in practice it is table->record[0]) @detail Move the current iterator m_scan_it until we get an index tuple that satisfies the pushed Index Condition. (if there is no pushed index condition, return right away) @return 0 - Index tuple satisfies ICP, can do index read. other - error code */ int ha_rocksdb::find_icp_matching_index_rec(const bool &move_forward, uchar *const buf) { DBUG_ASSERT(buf != nullptr); if (pushed_idx_cond && pushed_idx_cond_keyno == active_index) { const Rdb_key_def &kd = *m_key_descr_arr[active_index]; while (1) { rocksdb_skip_expired_records(kd, m_scan_it, !move_forward); if (!m_scan_it->Valid()) { table->status = STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; } const rocksdb::Slice rkey = m_scan_it->key(); if (!kd.covers_key(rkey)) { table->status = STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; } if (m_sk_match_prefix) { const rocksdb::Slice prefix((const char *)m_sk_match_prefix, m_sk_match_length); if (!kd.value_matches_prefix(rkey, prefix)) { table->status = STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; } } const rocksdb::Slice value = m_scan_it->value(); int err = kd.unpack_record(table, buf, &rkey, &value, m_verify_row_debug_checksums); if (err != HA_EXIT_SUCCESS) { return err; } const enum icp_result icp_status= handler_index_cond_check(this); if (icp_status == ICP_NO_MATCH) { rocksdb_smart_next(!move_forward, m_scan_it); continue; /* Get the next (or prev) index tuple */ } else if (icp_status == ICP_OUT_OF_RANGE || icp_status == ICP_ABORTED_BY_USER) { /* We have walked out of range we are scanning */ table->status = STATUS_NOT_FOUND; return HA_ERR_END_OF_FILE; } else /* icp_status == ICP_MATCH */ { /* Index Condition is satisfied. We have rc==0, proceed to fetch the * row. */ break; } /* TODO: should we have this here, or RockDB handles this internally? if (my_core::thd_killed(current_thd)) { rc= HA_ERR_INTERNAL_ERROR; // doesn't matter break; } */ } } return HA_EXIT_SUCCESS; } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ /** @return HA_ADMIN_OK OK other HA_ADMIN error code */ int ha_rocksdb::check(THD *const thd, HA_CHECK_OPT *const check_opt) { DBUG_ENTER_FUNC(); DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(check_opt != nullptr); const uint pk = pk_index(table, m_tbl_def); String rowkey_copy; String sec_key_copy; const char *const table_name = table->s->table_name.str; // Only when debugging: don't use snapshot when reading // Rdb_transaction *tx= get_or_create_tx(table->in_use); // tx->snapshot= nullptr; const bool save_verify_row_debug_checksums = m_verify_row_debug_checksums; m_verify_row_debug_checksums = true; /* For each secondary index, check that we can get a PK value from it */ // NO_LINT_DEBUG sql_print_information("CHECKTABLE %s: Checking table %s", table_name, table_name); ha_rows UNINIT_VAR(row_checksums_at_start); // set/used iff first_index==true ha_rows row_checksums = ha_rows(-1); bool first_index = true; for (uint keyno = 0; keyno < table->s->keys; keyno++) { if (keyno != pk) { extra(HA_EXTRA_KEYREAD); ha_index_init(keyno, true); ha_rows rows = 0; ha_rows checksums = 0; if (first_index) row_checksums_at_start = m_row_checksums_checked; int res; // NO_LINT_DEBUG sql_print_information("CHECKTABLE %s: Checking index %s", table_name, table->key_info[keyno].name); while (1) { if (!rows) res = index_first(table->record[0]); else res = index_next(table->record[0]); if (res == HA_ERR_END_OF_FILE) break; if (res) { // error // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: .. row %lld: index scan error %d", table_name, rows, res); goto error; } rocksdb::Slice key = m_scan_it->key(); sec_key_copy.copy(key.data(), key.size(), &my_charset_bin); rowkey_copy.copy(m_last_rowkey.ptr(), m_last_rowkey.length(), &my_charset_bin); if (m_key_descr_arr[keyno]->unpack_info_has_checksum( m_scan_it->value())) { checksums++; } if ((res = get_row_by_rowid(table->record[0], rowkey_copy.ptr(), rowkey_copy.length()))) { // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: .. row %lld: " "failed to fetch row by rowid", table_name, rows); goto error; } longlong hidden_pk_id = 0; if (has_hidden_pk(table) && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) goto error; /* Check if we get the same PK value */ uint packed_size = m_pk_descr->pack_record( table, m_pack_buffer, table->record[0], m_pk_packed_tuple, nullptr, false, hidden_pk_id); if (packed_size != rowkey_copy.length() || memcmp(m_pk_packed_tuple, rowkey_copy.ptr(), packed_size)) { // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: .. row %lld: PK value mismatch", table_name, rows); goto print_and_error; } /* Check if we get the same secondary key value */ packed_size = m_key_descr_arr[keyno]->pack_record( table, m_pack_buffer, table->record[0], m_sk_packed_tuple, &m_sk_tails, false, hidden_pk_id); if (packed_size != sec_key_copy.length() || memcmp(m_sk_packed_tuple, sec_key_copy.ptr(), packed_size)) { // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: .. row %lld: " "secondary index value mismatch", table_name, rows); goto print_and_error; } rows++; continue; print_and_error : { std::string buf; buf = rdb_hexdump(rowkey_copy.ptr(), rowkey_copy.length(), RDB_MAX_HEXDUMP_LEN); // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: rowkey: %s", table_name, buf.c_str()); buf = rdb_hexdump(m_retrieved_record.data(), m_retrieved_record.size(), RDB_MAX_HEXDUMP_LEN); // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: record: %s", table_name, buf.c_str()); buf = rdb_hexdump(sec_key_copy.ptr(), sec_key_copy.length(), RDB_MAX_HEXDUMP_LEN); // NO_LINT_DEBUG sql_print_error("CHECKTABLE %s: index: %s", table_name, buf.c_str()); goto error; } } // NO_LINT_DEBUG sql_print_information("CHECKTABLE %s: ... %lld index entries checked " "(%lld had checksums)", table_name, rows, checksums); if (first_index) { row_checksums = m_row_checksums_checked - row_checksums_at_start; first_index = false; } ha_index_end(); } } if (row_checksums != ha_rows(-1)) { // NO_LINT_DEBUG sql_print_information("CHECKTABLE %s: %lld table records had checksums", table_name, row_checksums); } extra(HA_EXTRA_NO_KEYREAD); m_verify_row_debug_checksums = save_verify_row_debug_checksums; /* TODO: we should check also for PK records that are missing in the secondary indexes. For that, need to walk through the PK and check that every PK record has a proper counterpart in each secondary index. */ DBUG_RETURN(HA_ADMIN_OK); error: m_verify_row_debug_checksums = save_verify_row_debug_checksums; ha_index_or_rnd_end(); extra(HA_EXTRA_NO_KEYREAD); DBUG_RETURN(HA_ADMIN_CORRUPT); } static void dbug_dump_str(FILE *const out, const char *const str, int len) { fprintf(out, "\""); for (int i = 0; i < len; i++) { if (str[i] > 32) fprintf(out, "%c", str[i]); else fprintf(out, "\\%d", str[i]); } fprintf(out, "\""); } /* Debugging help: dump the whole database into a human-readable file. Usage: dbug_dump_database(rdb); */ void dbug_dump_database(rocksdb::DB *const db) { FILE *const out = fopen("/tmp/rocksdb.dump", "wt"); if (!out) return; rocksdb::Iterator *it = db->NewIterator(rocksdb::ReadOptions()); for (it->SeekToFirst(); it->Valid(); it->Next()) { rocksdb::Slice key = it->key(); rocksdb::Slice val = it->value(); dbug_dump_str(out, key.data(), key.size()); fprintf(out, " -> "); dbug_dump_str(out, val.data(), val.size()); fprintf(out, "\n"); } delete it; fclose(out); } rocksdb::Status ha_rocksdb::get_for_update( Rdb_transaction *const tx, rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, std::string *const value) const { DBUG_ASSERT(m_lock_rows != RDB_LOCK_NONE); const bool exclusive = m_lock_rows != RDB_LOCK_READ; rocksdb::Status s = tx->get_for_update(column_family, key, value, exclusive); // If we have a lock conflict and we are running in READ COMMITTTED mode // release and reacquire the snapshot and then retry the get_for_update(). if (s.IsBusy() && !s.IsDeadlock() && my_core::thd_tx_isolation(ha_thd()) == ISO_READ_COMMITTED) { tx->release_snapshot(); tx->acquire_snapshot(false); s = tx->get_for_update(column_family, key, value, exclusive); } return s; } bool ha_rocksdb::is_blind_delete_enabled() { THD *thd = ha_thd(); return (THDVAR(thd, blind_delete_primary_key) && thd->lex->sql_command == SQLCOM_DELETE && thd->lex->table_count == 1 && table->s->keys == 1 && !has_hidden_pk(table) && !thd->rgi_slave); } /* Given a rowid (i.e. packed PK) as a parameter, get the record. @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid, const uint rowid_size, const bool skip_lookup, const bool skip_ttl_check) { DBUG_ENTER_FUNC(); DBUG_ASSERT(buf != nullptr); DBUG_ASSERT(rowid != nullptr); DBUG_ASSERT(table != nullptr); int rc; rocksdb::Slice key_slice(rowid, rowid_size); Rdb_transaction *const tx = get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); DEBUG_SYNC(ha_thd(), "rocksdb.get_row_by_rowid"); DBUG_EXECUTE_IF("dbug.rocksdb.get_row_by_rowid", { THD *thd = ha_thd(); const char act[] = "now signal Reached " "wait_for signal.rocksdb.get_row_by_rowid_let_running"; DBUG_ASSERT(opt_debug_sync_timeout > 0); DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN(act))); };); bool found; rocksdb::Status s; /* Pretend row found without looking up */ if (skip_lookup) { #ifdef MARIAROCKS_NOT_YET stats.rows_deleted_blind++; #endif update_row_stats(ROWS_DELETED_BLIND); m_last_rowkey.copy((const char *)rowid, rowid_size, &my_charset_bin); table->status = 0; DBUG_RETURN(0); } if (m_lock_rows == RDB_LOCK_NONE) { tx->acquire_snapshot(true); s = tx->get(m_pk_descr->get_cf(), key_slice, &m_retrieved_record); } else { s = get_for_update(tx, m_pk_descr->get_cf(), key_slice, &m_retrieved_record); } if (!s.IsNotFound() && !s.ok()) { DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def, m_table_handler)); } found = !s.IsNotFound(); table->status = STATUS_NOT_FOUND; if (found) { /* If we found the record, but it's expired, pretend we didn't find it. */ if (!skip_ttl_check && m_pk_descr->has_ttl() && should_hide_ttl_rec(*m_pk_descr, rocksdb::Slice(&m_retrieved_record.front(), m_retrieved_record.size()), tx->m_snapshot_timestamp)) { DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); } m_last_rowkey.copy((const char *)rowid, rowid_size, &my_charset_bin); rc = convert_record_from_storage_format(&key_slice, buf); if (!rc) { table->status = 0; } } else { /* Note: we don't need to unlock the row. It is intentional that we keep locks on rows that don't exist. */ rc = HA_ERR_KEY_NOT_FOUND; } DBUG_RETURN(rc); } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::index_next(uchar *const buf) { DBUG_ENTER_FUNC(); bool moves_forward = true; if (m_key_descr_arr[active_index]->m_is_reverse_cf) { moves_forward = false; } int rc = index_next_with_direction(buf, moves_forward); if (rc == HA_ERR_KEY_NOT_FOUND) rc = HA_ERR_END_OF_FILE; DBUG_RETURN(rc); } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::index_prev(uchar *const buf) { DBUG_ENTER_FUNC(); bool moves_forward = false; if (m_key_descr_arr[active_index]->m_is_reverse_cf) { moves_forward = true; } int rc = index_next_with_direction(buf, moves_forward); if (rc == HA_ERR_KEY_NOT_FOUND) rc = HA_ERR_END_OF_FILE; DBUG_RETURN(rc); } int ha_rocksdb::index_next_with_direction(uchar *const buf, bool move_forward) { DBUG_ENTER_FUNC(); int rc; if (active_index == pk_index(table, m_tbl_def)) { rc = rnd_next_with_direction(buf, move_forward); } else { if (m_skip_scan_it_next_call) { m_skip_scan_it_next_call = false; } else { if (move_forward) m_scan_it->Next(); /* this call cannot fail */ else m_scan_it->Prev(); } rocksdb_skip_expired_records(*m_key_descr_arr[active_index], m_scan_it, !move_forward); rc = find_icp_matching_index_rec(move_forward, buf); if (!rc) rc = secondary_index_read(active_index, buf); } DBUG_RETURN(rc); } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::index_first(uchar *const buf) { DBUG_ENTER_FUNC(); m_sk_match_prefix = nullptr; int rc = m_key_descr_arr[active_index]->m_is_reverse_cf ? index_last_intern(buf) : index_first_intern(buf); if (rc == HA_ERR_KEY_NOT_FOUND) rc = HA_ERR_END_OF_FILE; DBUG_RETURN(rc); } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::index_last(uchar *const buf) { DBUG_ENTER_FUNC(); m_sk_match_prefix = nullptr; int rc = m_key_descr_arr[active_index]->m_is_reverse_cf ? index_first_intern(buf) : index_last_intern(buf); if (rc == HA_ERR_KEY_NOT_FOUND) rc = HA_ERR_END_OF_FILE; DBUG_RETURN(rc); } /* Start scanning from the "first" value. The 'first' here means "the first from start of the key space". For reverse-ordered key spaces, the first value will be the biggest, 'zzzz'. An picture of a forward-ordered keyspace (remember, the keys have form 'indexnr-keyval'. Suppose the index we are at has number n) (n-1) - ... ( n ) <--- 1. (n) doesn't exist in the db but it would be here. ( n ) - aaa <--- 2. Seek("n") will put us here on the first index ( n ) - bbb record. ( n ) - cc So, need to do: Seek(n); A backward-ordered keyspace: (n+1) - bbb (n+1) - aaa (n+1) <--- (n+1) doesn't exist in the db but would be here. ( n ) - ccc <--- 1. We need to be here. ( n ) - bbb ( n ) - aaa ( n ) So, need to: Seek(n+1); */ int ha_rocksdb::index_first_intern(uchar *const buf) { DBUG_ENTER_FUNC(); DBUG_ASSERT(buf != nullptr); uchar *key; uint key_size; int rc; if (is_pk(active_index, table, m_tbl_def)) { key = m_pk_packed_tuple; } else { key = m_sk_packed_tuple; } DBUG_ASSERT(key != nullptr); const Rdb_key_def &kd = *m_key_descr_arr[active_index]; if (kd.m_is_reverse_cf) { kd.get_supremum_key(key, &key_size); } else { kd.get_infimum_key(key, &key_size); } rocksdb::Slice index_key((const char *)key, key_size); Rdb_transaction *const tx = get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); const bool is_new_snapshot = !tx->has_snapshot(); // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) for (;;) { setup_scan_iterator(kd, &index_key, false, Rdb_key_def::INDEX_NUMBER_SIZE); m_scan_it->Seek(index_key); m_skip_scan_it_next_call = true; rc = index_next_with_direction(buf, true); if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) break; // exit the loop // release the snapshot and iterator so they will be regenerated tx->release_snapshot(); release_scan_iterator(); } if (!rc) { /* index_next is always incremented on success, so decrement if it is index_first instead */ #ifdef MARIAROCKS_NOT_YET stats.rows_index_first++; stats.rows_index_next--; #endif } DBUG_RETURN(rc); } /** @details Start scanning from the "last" value The 'last' here means "the last from start of the key space". For reverse-ordered key spaces, we will actually read the smallest value. An picture of a forward-ordered keyspace (remember, the keys have form 'indexnr-keyval'. Suppose the we are at a key that has number n) (n-1)-something ( n )-aaa ( n )-bbb ( n )-ccc <----------- Need to seek to here. (n+1) <---- Doesn't exist, but would be here. (n+1)-smth, or no value at all RocksDB's Iterator::SeekForPrev($val) seeks to "at $val or last value that's smaller". We can't seek to "(n)-ccc" directly, because we don't know what is the value of 'ccc' (the biggest record with prefix (n)). Instead, we seek to "(n+1)", which is the least possible value that's greater than any value in index #n. So, need to: it->SeekForPrev(n+1) A backward-ordered keyspace: (n+1)-something ( n ) - ccc ( n ) - bbb ( n ) - aaa <---------------- (*) Need to seek here. ( n ) <--- Doesn't exist, but would be here. (n-1)-smth, or no value at all So, need to: it->SeekForPrev(n) */ int ha_rocksdb::index_last_intern(uchar *const buf) { DBUG_ENTER_FUNC(); DBUG_ASSERT(buf != nullptr); uchar *key; uint key_size; int rc; if (is_pk(active_index, table, m_tbl_def)) { key = m_pk_packed_tuple; } else { key = m_sk_packed_tuple; } DBUG_ASSERT(key != nullptr); const Rdb_key_def &kd = *m_key_descr_arr[active_index]; if (kd.m_is_reverse_cf) { kd.get_infimum_key(key, &key_size); } else { kd.get_supremum_key(key, &key_size); } rocksdb::Slice index_key((const char *)key, key_size); Rdb_transaction *const tx = get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); bool is_new_snapshot = !tx->has_snapshot(); // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) for (;;) { setup_scan_iterator(kd, &index_key, false, Rdb_key_def::INDEX_NUMBER_SIZE); m_scan_it->SeekForPrev(index_key); m_skip_scan_it_next_call = false; if (is_pk(active_index, table, m_tbl_def)) { m_skip_scan_it_next_call = true; rc = rnd_next_with_direction(buf, false); } else { rc = find_icp_matching_index_rec(false /*move_forward*/, buf); if (!rc) rc = secondary_index_read(active_index, buf); } if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) break; /* exit the loop */ // release the snapshot and iterator so they will be regenerated tx->release_snapshot(); release_scan_iterator(); } if (!rc) { /* index_next is always incremented on success, so decrement if it is index_first instead */ #ifdef MARIAROCKS_NOT_YET stats.rows_index_first++; stats.rows_index_next--; #endif } DBUG_RETURN(rc); } void ha_rocksdb::unlock_row() { DBUG_ENTER_FUNC(); if (m_lock_rows != RDB_LOCK_NONE) { Rdb_transaction *const tx = get_or_create_tx(table->in_use); tx->release_lock(m_pk_descr->get_cf(), std::string(m_last_rowkey.ptr(), m_last_rowkey.length())); } DBUG_VOID_RETURN; } /* Returning true if SingleDelete can be used. - Secondary Indexes can always use SingleDelete. - If the index is PRIMARY KEY, and if all of the columns of the table are covered by the PRIMARY KEY, SingleDelete can be used. */ bool ha_rocksdb::can_use_single_delete(const uint &index) const { return (index != pk_index(table, m_tbl_def) || (!has_hidden_pk(table) && table->key_info[index].ext_key_parts == table->s->fields)); } bool ha_rocksdb::skip_unique_check() const { /* We want to skip unique checks if: 1) bulk_load is on 2) this table is in the whitelist of tables to skip and the replication lag has reached a large enough value (see unique_check_lag_threshold and unique_check_lage_reset_threshold) 3) the user set unique_checks option to 0, and the table does not have any indexes. If the table has secondary keys, then those might becomes inconsisted/corrupted */ return THDVAR(table->in_use, bulk_load) || (m_force_skip_unique_check && m_skip_unique_check) || (my_core::thd_test_options(table->in_use, OPTION_RELAXED_UNIQUE_CHECKS) && m_tbl_def->m_key_count == 1); } #ifdef MARIAROCKS_NOT_YET // MDEV-10975 void ha_rocksdb::set_force_skip_unique_check(bool skip) { DBUG_ENTER_FUNC(); m_force_skip_unique_check = skip; DBUG_VOID_RETURN; } #endif bool ha_rocksdb::commit_in_the_middle() { return THDVAR(table->in_use, bulk_load) || THDVAR(table->in_use, commit_in_the_middle); } /* Executing bulk commit if it should. @retval true if bulk commit failed @retval false if bulk commit was skipped or succeeded */ bool ha_rocksdb::do_bulk_commit(Rdb_transaction *const tx) { DBUG_ASSERT(tx != nullptr); return commit_in_the_middle() && tx->get_write_count() >= THDVAR(table->in_use, bulk_load_size) && tx->flush_batch(); } /* If table was created without primary key, SQL layer represents the primary key number as MAX_INDEXES. Hence, this function returns true if the table does not contain a primary key. (In which case we generate a hidden 'auto-incremented' pk.) */ bool ha_rocksdb::has_hidden_pk(const TABLE *const table) const { DBUG_ASSERT(table != nullptr); return Rdb_key_def::table_has_hidden_pk(table); } /* Returns true if given index number is a hidden_pk. - This is used when a table is created with no primary key. */ bool ha_rocksdb::is_hidden_pk(const uint index, const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); return (table_arg->s->primary_key == MAX_INDEXES && index == tbl_def_arg->m_key_count - 1); } /* Returns index of primary key */ uint ha_rocksdb::pk_index(const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); return table_arg->s->primary_key == MAX_INDEXES ? tbl_def_arg->m_key_count - 1 : table_arg->s->primary_key; } /* Returns true if given index number is a primary key */ bool ha_rocksdb::is_pk(const uint index, const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); return index == table_arg->s->primary_key || is_hidden_pk(index, table_arg, tbl_def_arg); } uint ha_rocksdb::max_supported_key_part_length() const { DBUG_ENTER_FUNC(); DBUG_RETURN(rocksdb_large_prefix ? MAX_INDEX_COL_LEN_LARGE : MAX_INDEX_COL_LEN_SMALL); } const char *ha_rocksdb::get_key_name(const uint index, const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); if (is_hidden_pk(index, table_arg, tbl_def_arg)) { return HIDDEN_PK_NAME; } DBUG_ASSERT(table_arg->key_info != nullptr); DBUG_ASSERT(table_arg->key_info[index].name != nullptr); return table_arg->key_info[index].name; } const char *ha_rocksdb::get_key_comment(const uint index, const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); if (is_hidden_pk(index, table_arg, tbl_def_arg)) { return nullptr; } DBUG_ASSERT(table_arg->key_info != nullptr); return table_arg->key_info[index].comment.str; } const std::string ha_rocksdb::generate_cf_name(const uint index, const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg, bool *per_part_match_found) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(tbl_def_arg != nullptr); DBUG_ASSERT(per_part_match_found != nullptr); // When creating CF-s the caller needs to know if there was a custom CF name // specified for a given paritition. *per_part_match_found = false; // Index comment is used to define the column family name specification(s). // If there was no comment, we get an emptry string, and it means "use the // default column family". const char *const comment = get_key_comment(index, table_arg, tbl_def_arg); // `get_key_comment` can return `nullptr`, that's why this. std::string key_comment = comment ? comment : ""; std::string cf_name = Rdb_key_def::parse_comment_for_qualifier( key_comment, table_arg, tbl_def_arg, per_part_match_found, RDB_CF_NAME_QUALIFIER); if (table_arg->part_info != nullptr && !*per_part_match_found) { // At this point we tried to search for a custom CF name for a partition, // but none was specified. Therefore default one will be used. return ""; } // If we didn't find any partitioned/non-partitioned qualifiers, return the // comment itself. NOTE: this currently handles returning the cf name // specified in the index comment in the case of no partitions, which doesn't // use any qualifiers at the moment. (aka its a special case) if (cf_name.empty() && !key_comment.empty()) { return key_comment; } return cf_name; } const std::string ha_rocksdb::get_table_comment(const TABLE *const table_arg) { DBUG_ASSERT(table_arg != nullptr); DBUG_ASSERT(table_arg->s != nullptr); return table_arg->s->comment.str; } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::write_row(uchar *const buf) { DBUG_ENTER_FUNC(); DBUG_ASSERT(buf != nullptr); DBUG_ASSERT(buf == table->record[0]); DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE); /* Note: "buf == table->record[0]" is copied from innodb. I am not aware of any use cases where this condition is not true. */ if (table->next_number_field && buf == table->record[0]) { int err; if ((err = update_auto_increment())) { DBUG_RETURN(err); } } const int rv = update_write_row(nullptr, buf, skip_unique_check()); if (rv == 0) { #ifdef MARIAROCKS_NOT_YET stats.rows_inserted++; #endif update_row_stats(ROWS_INSERTED); } DBUG_RETURN(rv); } /** Constructing m_last_rowkey (MyRocks key expression) from before_update|delete image (MySQL row expression). m_last_rowkey is normally set during lookup phase, such as rnd_next_with_direction() and rnd_pos(). With Read Free Replication, these read functions are skipped and update_rows(), delete_rows() are called without setting m_last_rowkey. This function sets m_last_rowkey for Read Free Replication. */ void ha_rocksdb::set_last_rowkey(const uchar *const old_data) { #ifdef MARIAROCKS_NOT_YET if (old_data && use_read_free_rpl()) { const int old_pk_size = m_pk_descr->pack_record( table, m_pack_buffer, old_data, m_pk_packed_tuple, nullptr, false); m_last_rowkey.copy((const char *)m_pk_packed_tuple, old_pk_size, &my_charset_bin); } #endif } int ha_rocksdb::get_pk_for_update(struct update_row_info *const row_info) { int size; /* Get new row key for any insert, and any update where the pk is not hidden. Row key for updates with hidden pk is handled below. */ if (!has_hidden_pk(table)) { row_info->hidden_pk_id = 0; row_info->new_pk_unpack_info = &m_pk_unpack_info; size = m_pk_descr->pack_record(table, m_pack_buffer, row_info->new_data, m_pk_packed_tuple, row_info->new_pk_unpack_info, false, 0, 0, nullptr, &row_info->ttl_pk_offset); } else if (row_info->old_data == nullptr) { row_info->hidden_pk_id = update_hidden_pk_val(); size = m_pk_descr->pack_hidden_pk(row_info->hidden_pk_id, m_pk_packed_tuple); } else { /* If hidden primary key, rowkey for new record will always be the same as before */ size = row_info->old_pk_slice.size(); memcpy(m_pk_packed_tuple, row_info->old_pk_slice.data(), size); int err = read_hidden_pk_id_from_rowkey(&row_info->hidden_pk_id); if (err) { return err; } } row_info->new_pk_slice = rocksdb::Slice((const char *)m_pk_packed_tuple, size); return HA_EXIT_SUCCESS; } int ha_rocksdb::check_and_lock_unique_pk(const uint &key_id, const struct update_row_info &row_info, bool *const found, bool *const pk_changed) { DBUG_ASSERT(found != nullptr); DBUG_ASSERT(pk_changed != nullptr); *pk_changed = false; /* For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs always require locking. */ if (row_info.old_pk_slice.size() > 0) { /* If the keys are the same, then no lock is needed */ if (!Rdb_pk_comparator::bytewise_compare(row_info.new_pk_slice, row_info.old_pk_slice)) { *found = false; return HA_EXIT_SUCCESS; } *pk_changed = true; } /* Perform a read to determine if a duplicate entry exists. For primary keys, a point lookup will be sufficient. note: we intentionally don't set options.snapshot here. We want to read the latest committed data. */ /* To prevent race conditions like below, it is necessary to take a lock for a target row. get_for_update() holds a gap lock if target key does not exist, so below conditions should never happen. 1) T1 Get(empty) -> T2 Get(empty) -> T1 Put(insert) -> T1 commit -> T2 Put(overwrite) -> T2 commit 2) T1 Get(empty) -> T1 Put(insert, not committed yet) -> T2 Get(empty) -> T2 Put(insert, blocked) -> T1 commit -> T2 commit(overwrite) */ const rocksdb::Status s = get_for_update(row_info.tx, m_pk_descr->get_cf(), row_info.new_pk_slice, &m_retrieved_record); if (!s.ok() && !s.IsNotFound()) { return row_info.tx->set_status_error( table->in_use, s, *m_key_descr_arr[key_id], m_tbl_def, m_table_handler); } *found = !s.IsNotFound(); return HA_EXIT_SUCCESS; } int ha_rocksdb::check_and_lock_sk(const uint &key_id, const struct update_row_info &row_info, bool *const found) { DBUG_ASSERT(found != nullptr); *found = false; /* Can skip checking this key if none of the key fields have changed. */ if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id)) { return HA_EXIT_SUCCESS; } KEY *key_info = nullptr; uint n_null_fields = 0; uint user_defined_key_parts = 1; key_info = &table->key_info[key_id]; user_defined_key_parts = key_info->user_defined_key_parts; /* If there are no uniqueness requirements, there's no need to obtain a lock for this key. */ if (!(key_info->flags & HA_NOSAME)) { return HA_EXIT_SUCCESS; } const Rdb_key_def &kd = *m_key_descr_arr[key_id]; /* Calculate the new key for obtaining the lock For unique secondary indexes, the key used for locking does not include the extended fields. */ int size = kd.pack_record(table, m_pack_buffer, row_info.new_data, m_sk_packed_tuple, nullptr, false, 0, user_defined_key_parts, &n_null_fields); if (n_null_fields > 0) { /* If any fields are marked as NULL this will never match another row as to NULL never matches anything else including another NULL. */ return HA_EXIT_SUCCESS; } const rocksdb::Slice new_slice = rocksdb::Slice((const char *)m_sk_packed_tuple, size); /* For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs always require locking. */ if (row_info.old_data != nullptr) { size = kd.pack_record(table, m_pack_buffer, row_info.old_data, m_sk_packed_tuple_old, nullptr, false, 0, user_defined_key_parts); const rocksdb::Slice old_slice = rocksdb::Slice((const char *)m_sk_packed_tuple_old, size); /* For updates, if the keys are the same, then no lock is needed Also check to see if the key has any fields set to NULL. If it does, then this key is unique since NULL is not equal to each other, so no lock is needed. */ if (!Rdb_pk_comparator::bytewise_compare(new_slice, old_slice)) { return HA_EXIT_SUCCESS; } } /* Perform a read to determine if a duplicate entry exists - since this is a secondary indexes a range scan is needed. note: we intentionally don't set options.snapshot here. We want to read the latest committed data. */ const bool all_parts_used = (user_defined_key_parts == kd.get_key_parts()); /* This iterator seems expensive since we need to allocate and free memory for each unique index. If this needs to be optimized, for keys without NULL fields, the extended primary key fields can be migrated to the value portion of the key. This enables using Get() instead of Seek() as in the primary key case. The bloom filter may need to be disabled for this lookup. */ const bool total_order_seek = !can_use_bloom_filter( ha_thd(), kd, new_slice, all_parts_used); const bool fill_cache = !THDVAR(ha_thd(), skip_fill_cache); /* psergey-todo: we just need to take lock, lookups not needed: */ std::string dummy_value; const rocksdb::Status s = get_for_update(row_info.tx, kd.get_cf(), new_slice, &dummy_value); if (!s.ok() && !s.IsNotFound()) { return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def, m_table_handler); } rocksdb::Iterator *const iter = row_info.tx->get_iterator( kd.get_cf(), total_order_seek, fill_cache, true /* read current data */, false /* acquire snapshot */); /* Need to scan the transaction to see if there is a duplicate key. Also need to scan RocksDB and verify the key has not been deleted in the transaction. */ iter->Seek(new_slice); *found = !read_key_exact(kd, iter, all_parts_used, new_slice, row_info.tx->m_snapshot_timestamp); delete iter; return HA_EXIT_SUCCESS; } int ha_rocksdb::check_uniqueness_and_lock( const struct update_row_info &row_info, bool *const pk_changed) { /* Go through each index and determine if the index has uniqueness requirements. If it does, then try to obtain a row lock on the new values. Once all locks have been obtained, then perform the changes needed to update/insert the row. */ for (uint key_id = 0; key_id < m_tbl_def->m_key_count; key_id++) { bool found; int rc; if (is_pk(key_id, table, m_tbl_def)) { rc = check_and_lock_unique_pk(key_id, row_info, &found, pk_changed); } else { rc = check_and_lock_sk(key_id, row_info, &found); } if (rc != HA_EXIT_SUCCESS) { return rc; } /* If the pk key has ttl, we may need to pretend the row wasn't found if it is already expired. The pk record is read into m_retrieved_record by check_and_lock_unique_pk(). */ if (is_pk(key_id, table, m_tbl_def) && found && m_pk_descr->has_ttl() && should_hide_ttl_rec(*m_pk_descr, rocksdb::Slice(&m_retrieved_record.front(), m_retrieved_record.size()), (row_info.tx->m_snapshot_timestamp ? row_info.tx->m_snapshot_timestamp : static_cast<int64_t>(std::time(nullptr))))) { found = false; } if (found) { /* There is a row with this key already, so error out. */ errkey = key_id; m_dupp_errkey = errkey; return HA_ERR_FOUND_DUPP_KEY; } } return HA_EXIT_SUCCESS; } int ha_rocksdb::check_duplicate_sk(const TABLE *table_arg, const Rdb_key_def &index, const rocksdb::Slice *key, struct unique_sk_buf_info *sk_info) { uint n_null_fields = 0; const rocksdb::Comparator *index_comp = index.get_cf()->GetComparator(); /* Get proper SK buffer. */ uchar *sk_buf = sk_info->swap_and_get_sk_buf(); /* Get memcmp form of sk without extended pk tail */ uint sk_memcmp_size = index.get_memcmp_sk_parts(table_arg, *key, sk_buf, &n_null_fields); sk_info->sk_memcmp_key = rocksdb::Slice(reinterpret_cast<char *>(sk_buf), sk_memcmp_size); if (sk_info->sk_memcmp_key_old.size() > 0 && n_null_fields == 0 && index_comp->Compare(sk_info->sk_memcmp_key, sk_info->sk_memcmp_key_old) == 0) { return 1; } sk_info->sk_memcmp_key_old = sk_info->sk_memcmp_key; return 0; } int ha_rocksdb::bulk_load_key(Rdb_transaction *const tx, const Rdb_key_def &kd, const rocksdb::Slice &key, const rocksdb::Slice &value, bool sort) { DBUG_ENTER_FUNC(); rocksdb::ColumnFamilyHandle *cf = kd.get_cf(); DBUG_ASSERT(cf != nullptr); int res = HA_EXIT_SUCCESS; if (sort) { GL_INDEX_ID kd_gl_id = kd.get_gl_index_id(); auto it = m_key_merge.find(kd_gl_id); if (it == m_key_merge.end()) { m_key_merge.emplace( std::piecewise_construct, std::make_tuple(kd_gl_id), std::make_tuple( thd_rocksdb_tmpdir(), THDVAR(ha_thd(), merge_buf_size), THDVAR(ha_thd(), merge_combine_read_size), THDVAR(ha_thd(), merge_tmp_file_removal_delay_ms), cf)); it = m_key_merge.find(kd_gl_id); if ((res = it->second.init()) != 0) { DBUG_RETURN(res); } if (m_bulk_load_tx == nullptr) { tx->start_bulk_load(this); m_bulk_load_tx = tx; } } res = it->second.add(key, value); } else { if (!m_sst_info) { m_sst_info.reset(new Rdb_sst_info(rdb, m_table_handler->m_table_name, kd.get_name(), cf, *rocksdb_db_options, THDVAR(ha_thd(), trace_sst_api))); tx->start_bulk_load(this); m_bulk_load_tx = tx; } DBUG_ASSERT(m_sst_info); res = m_sst_info->put(key, value); } DBUG_RETURN(res); } int ha_rocksdb::finalize_bulk_load() { DBUG_ENTER_FUNC(); DBUG_ASSERT_IMP(!m_key_merge.empty() || m_sst_info, m_bulk_load_tx != nullptr); /* Skip if there are no possible ongoing bulk loads */ if (m_key_merge.empty() && !m_sst_info && m_bulk_load_tx == nullptr) { DBUG_RETURN(HA_EXIT_SUCCESS); } int res = HA_EXIT_SUCCESS; RDB_MUTEX_LOCK_CHECK(m_bulk_load_mutex); if (m_sst_info) { res = m_sst_info->commit(); m_sst_info.reset(); } if (!m_key_merge.empty()) { rocksdb::Slice merge_key; rocksdb::Slice merge_val; for (auto it = m_key_merge.begin(); it != m_key_merge.end(); it++) { const std::string &index_name = ddl_manager.safe_find(it->first)->get_name(); Rdb_index_merge &rdb_merge = it->second; Rdb_sst_info sst_info(rdb, m_table_handler->m_table_name, index_name, rdb_merge.get_cf(), *rocksdb_db_options, THDVAR(ha_thd(), trace_sst_api)); while ((res = rdb_merge.next(&merge_key, &merge_val)) == 0) { if ((res = sst_info.put(merge_key, merge_val)) != 0) { break; } } // res == -1 => finished ok; res > 0 => error if (res <= 0) { if ((res = sst_info.commit()) != 0) { break; } } } m_key_merge.clear(); /* Explicitly tell jemalloc to clean up any unused dirty pages at this point. See https://reviews.facebook.net/D63723 for more details. */ purge_all_jemalloc_arenas(); } if (m_bulk_load_tx != nullptr) { m_bulk_load_tx->end_bulk_load(this); m_bulk_load_tx = nullptr; } RDB_MUTEX_UNLOCK_CHECK(m_bulk_load_mutex); DBUG_RETURN(res); } int ha_rocksdb::update_pk(const Rdb_key_def &kd, const struct update_row_info &row_info, const bool &pk_changed) { const uint key_id = kd.get_keyno(); const bool hidden_pk = is_hidden_pk(key_id, table, m_tbl_def); ulonglong bytes_written = 0; /* If the PK has changed, or if this PK uses single deletes and this is an update, the old key needs to be deleted. In the single delete case, it might be possible to have this sequence of keys: PUT(X), PUT(X), SD(X), resulting in the first PUT(X) showing up. */ if (!hidden_pk && (pk_changed || ((row_info.old_pk_slice.size() > 0) && can_use_single_delete(key_id)))) { const rocksdb::Status s = delete_or_singledelete( key_id, row_info.tx, kd.get_cf(), row_info.old_pk_slice); if (!s.ok()) { return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def, m_table_handler); } else { bytes_written = row_info.old_pk_slice.size(); } } if (table->next_number_field) { update_auto_incr_val(); } int rc = HA_EXIT_SUCCESS; rocksdb::Slice value_slice; /* Prepare the new record to be written into RocksDB */ if ((rc = convert_record_to_storage_format(row_info, &value_slice))) { return rc; } const auto cf = m_pk_descr->get_cf(); if (rocksdb_enable_bulk_load_api && THDVAR(table->in_use, bulk_load) && !hidden_pk) { /* Write the primary key directly to an SST file using an SstFileWriter */ rc = bulk_load_key(row_info.tx, kd, row_info.new_pk_slice, value_slice, THDVAR(table->in_use, bulk_load_allow_unsorted)); } else if (row_info.skip_unique_check || row_info.tx->m_ddl_transaction) { /* It is responsibility of the user to make sure that the data being inserted doesn't violate any unique keys. */ row_info.tx->get_indexed_write_batch()->Put(cf, row_info.new_pk_slice, value_slice); } else { const auto s = row_info.tx->put(cf, row_info.new_pk_slice, value_slice); if (!s.ok()) { if (s.IsBusy()) { errkey = table->s->primary_key; m_dupp_errkey = errkey; rc = HA_ERR_FOUND_DUPP_KEY; } else { rc = row_info.tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def, m_table_handler); } } } if (rc == HA_EXIT_SUCCESS) { row_info.tx->update_bytes_written( bytes_written + row_info.new_pk_slice.size() + value_slice.size()); } return rc; } int ha_rocksdb::update_sk(const TABLE *const table_arg, const Rdb_key_def &kd, const struct update_row_info &row_info) { int new_packed_size; int old_packed_size; rocksdb::Slice new_key_slice; rocksdb::Slice new_value_slice; rocksdb::Slice old_key_slice; const uint key_id = kd.get_keyno(); ulonglong bytes_written = 0; /* Can skip updating this key if none of the key fields have changed and, if this table has TTL, the TTL timestamp has not changed. */ if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id) && (!kd.has_ttl() || !m_ttl_bytes_updated)) { return HA_EXIT_SUCCESS; } const bool store_row_debug_checksums = should_store_row_debug_checksums(); new_packed_size = kd.pack_record(table_arg, m_pack_buffer, row_info.new_data, m_sk_packed_tuple, &m_sk_tails, store_row_debug_checksums, row_info.hidden_pk_id, 0, nullptr, nullptr, m_ttl_bytes); if (row_info.old_data != nullptr) { // The old value old_packed_size = kd.pack_record( table_arg, m_pack_buffer, row_info.old_data, m_sk_packed_tuple_old, &m_sk_tails_old, store_row_debug_checksums, row_info.hidden_pk_id, 0, nullptr, nullptr, m_ttl_bytes); /* Check if we are going to write the same value. This can happen when one does UPDATE tbl SET col='foo' and we are looking at the row that already has col='foo'. We also need to compare the unpack info. Suppose, the collation is case-insensitive, and unpack info contains information about whether the letters were uppercase and lowercase. Then, both 'foo' and 'FOO' will have the same key value, but different data in unpack_info. (note: anyone changing bytewise_compare should take this code into account) */ if (old_packed_size == new_packed_size && m_sk_tails_old.get_current_pos() == m_sk_tails.get_current_pos() && !(kd.has_ttl() && m_ttl_bytes_updated) && memcmp(m_sk_packed_tuple_old, m_sk_packed_tuple, old_packed_size) == 0 && memcmp(m_sk_tails_old.ptr(), m_sk_tails.ptr(), m_sk_tails.get_current_pos()) == 0) { return HA_EXIT_SUCCESS; } /* Deleting entries from secondary index should skip locking, but be visible to the transaction. (also note that DDL statements do not delete rows, so this is not a DDL statement) */ old_key_slice = rocksdb::Slice( reinterpret_cast<const char *>(m_sk_packed_tuple_old), old_packed_size); row_info.tx->get_indexed_write_batch()->SingleDelete(kd.get_cf(), old_key_slice); bytes_written = old_key_slice.size(); } new_key_slice = rocksdb::Slice( reinterpret_cast<const char *>(m_sk_packed_tuple), new_packed_size); new_value_slice = rocksdb::Slice(reinterpret_cast<const char *>(m_sk_tails.ptr()), m_sk_tails.get_current_pos()); row_info.tx->get_indexed_write_batch()->Put(kd.get_cf(), new_key_slice, new_value_slice); row_info.tx->update_bytes_written(bytes_written + new_key_slice.size() + new_value_slice.size()); return HA_EXIT_SUCCESS; } int ha_rocksdb::update_indexes(const struct update_row_info &row_info, const bool &pk_changed) { int rc; // The PK must be updated first to pull out the TTL value. rc = update_pk(*m_pk_descr, row_info, pk_changed); if (rc != HA_EXIT_SUCCESS) { return rc; } // Update the remaining indexes. for (uint key_id = 0; key_id < m_tbl_def->m_key_count; key_id++) { if (is_pk(key_id, table, m_tbl_def)) { continue; } rc = update_sk(table, *m_key_descr_arr[key_id], row_info); if (rc != HA_EXIT_SUCCESS) { return rc; } } return HA_EXIT_SUCCESS; } int ha_rocksdb::update_write_row(const uchar *const old_data, const uchar *const new_data, const bool skip_unique_check) { DBUG_ENTER_FUNC(); bool pk_changed = false; struct update_row_info row_info; row_info.old_data = old_data; row_info.new_data = new_data; row_info.skip_unique_check = skip_unique_check; row_info.new_pk_unpack_info = nullptr; set_last_rowkey(old_data); row_info.tx = get_or_create_tx(table->in_use); if (old_data != nullptr) { row_info.old_pk_slice = rocksdb::Slice(m_last_rowkey.ptr(), m_last_rowkey.length()); /* Determine which indexes need updating. */ calc_updated_indexes(); } /* Get the new row key into row_info.new_pk_slice */ int rc = get_pk_for_update(&row_info); if (rc != HA_EXIT_SUCCESS) { DBUG_RETURN(rc); } if (!skip_unique_check) { /* Check to see if we are going to have failures because of unique keys. Also lock the appropriate key values. */ rc = check_uniqueness_and_lock(row_info, &pk_changed); if (rc != HA_EXIT_SUCCESS) { DBUG_RETURN(rc); } } DEBUG_SYNC(ha_thd(), "rocksdb.update_write_row_after_unique_check"); /* At this point, all locks have been obtained, and all checks for duplicate keys have been performed. No further errors can be allowed to occur from here because updates to the transaction will be made and those updates cannot be easily removed without rolling back the entire transaction. */ rc = update_indexes(row_info, pk_changed); if (rc != HA_EXIT_SUCCESS) { DBUG_RETURN(rc); } if (old_data != nullptr) { row_info.tx->incr_update_count(); } else { row_info.tx->incr_insert_count(); } if (do_bulk_commit(row_info.tx)) { DBUG_RETURN(HA_ERR_ROCKSDB_BULK_LOAD); } DBUG_RETURN(HA_EXIT_SUCCESS); } /* Open a cursor */ void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *const slice, const bool use_all_keys, const uint eq_cond_len) { DBUG_ASSERT(slice != nullptr); DBUG_ASSERT(slice->size() >= eq_cond_len); Rdb_transaction *const tx = get_or_create_tx(table->in_use); bool skip_bloom = true; const rocksdb::Slice eq_cond(slice->data(), eq_cond_len); if (can_use_bloom_filter(ha_thd(), kd, eq_cond, use_all_keys)) { skip_bloom = false; } /* In some cases, setup_scan_iterator() is called multiple times from the same query but bloom filter can not always be used. Suppose the following query example. id2 is VARCHAR(30) and PRIMARY KEY (id1, id2). select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', '100'); In this case, setup_scan_iterator() is called twice, the first time is for (id1, id2)=(100, '00000000000000000000') and the second time is for (100, '100'). If prefix bloom filter length is 24 bytes, prefix bloom filter can be used for the first condition but not for the second condition. If bloom filter condition is changed, currently it is necessary to destroy and re-create Iterator. */ if (m_scan_it_skips_bloom != skip_bloom) { release_scan_iterator(); } /* SQL layer can call rnd_init() multiple times in a row. In that case, re-use the iterator, but re-position it at the table start. */ if (!m_scan_it) { const bool fill_cache = !THDVAR(ha_thd(), skip_fill_cache); if (commit_in_the_middle()) { DBUG_ASSERT(m_scan_it_snapshot == nullptr); m_scan_it_snapshot = rdb->GetSnapshot(); auto read_opts = rocksdb::ReadOptions(); read_opts.total_order_seek = true; // TODO: set based on WHERE conditions read_opts.snapshot = m_scan_it_snapshot; m_scan_it = rdb->NewIterator(read_opts, kd.get_cf()); } else { m_scan_it = tx->get_iterator(kd.get_cf(), skip_bloom, fill_cache); } m_scan_it_skips_bloom = skip_bloom; } } void ha_rocksdb::release_scan_iterator() { delete m_scan_it; m_scan_it = nullptr; if (m_scan_it_snapshot) { rdb->ReleaseSnapshot(m_scan_it_snapshot); m_scan_it_snapshot = nullptr; } } void ha_rocksdb::setup_iterator_for_rnd_scan() { uint key_size; if (m_pk_descr->m_is_reverse_cf) m_pk_descr->get_supremum_key(m_pk_packed_tuple, &key_size); else m_pk_descr->get_infimum_key(m_pk_packed_tuple, &key_size); rocksdb::Slice table_key((const char *)m_pk_packed_tuple, key_size); setup_scan_iterator(*m_pk_descr, &table_key); m_scan_it->Seek(table_key); m_skip_scan_it_next_call = true; } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::rnd_init(bool scan) { DBUG_ENTER_FUNC(); Rdb_transaction *const tx = get_or_create_tx(table->in_use); setup_read_decoders(); if (scan) { m_rnd_scan_is_new_snapshot = !tx->has_snapshot(); setup_iterator_for_rnd_scan(); } else { /* We don't need any preparations for rnd_pos() calls. */ } // If m_lock_rows is on then we will be doing a get_for_update when accessing // the index, so don't acquire the snapshot right away. Otherwise acquire // the snapshot immediately. tx->acquire_snapshot(m_lock_rows == RDB_LOCK_NONE); DBUG_RETURN(HA_EXIT_SUCCESS); } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::rnd_next(uchar *const buf) { DBUG_ENTER_FUNC(); int rc; for (;;) { rc = rnd_next_with_direction(buf, true); if (rc != HA_ERR_LOCK_DEADLOCK || !m_rnd_scan_is_new_snapshot) break; /* exit the loop */ // release the snapshot and iterator and then regenerate them Rdb_transaction *tx = get_or_create_tx(table->in_use); tx->release_snapshot(); release_scan_iterator(); setup_iterator_for_rnd_scan(); } m_rnd_scan_is_new_snapshot = false; if (rc == HA_ERR_KEY_NOT_FOUND) rc = HA_ERR_END_OF_FILE; DBUG_RETURN(rc); } /* See also secondary_index_read(). */ int ha_rocksdb::rnd_next_with_direction(uchar *const buf, bool move_forward) { DBUG_ENTER_FUNC(); int rc; table->status = STATUS_NOT_FOUND; #ifdef MARIAROCKS_NOT_YET stats.rows_requested++; #endif if (!m_scan_it || !m_scan_it->Valid()) { /* We can get here when SQL layer has called h->index_init(PRIMARY); h->index_read_map(full index tuple, HA_READ_KEY_EXACT); In this case, we should return EOF. */ DBUG_RETURN(HA_ERR_END_OF_FILE); } for (;;) { if (m_skip_scan_it_next_call) { m_skip_scan_it_next_call = false; } else { if (move_forward) m_scan_it->Next(); /* this call cannot fail */ else m_scan_it->Prev(); /* this call cannot fail */ } if (!m_scan_it->Valid()) { rc = HA_ERR_END_OF_FILE; break; } /* check if we're out of this table */ const rocksdb::Slice key = m_scan_it->key(); if (!m_pk_descr->covers_key(key)) { rc = HA_ERR_END_OF_FILE; break; } if (m_lock_rows != RDB_LOCK_NONE) { /* Lock the row we've just read. Now we call get_for_update which will 1) Take a lock and 2) Will fail if the row was deleted since the snapshot was taken. */ Rdb_transaction *const tx = get_or_create_tx(table->in_use); DEBUG_SYNC(ha_thd(), "rocksdb_concurrent_delete"); if (m_pk_descr->has_ttl() && should_hide_ttl_rec(*m_pk_descr, m_scan_it->value(), tx->m_snapshot_timestamp)) { continue; } const rocksdb::Status s = get_for_update(tx, m_pk_descr->get_cf(), key, &m_retrieved_record); if (s.IsNotFound() && my_core::thd_tx_isolation(ha_thd()) == ISO_READ_COMMITTED) { // This occurs if we accessed a row, tried to lock it, failed, // released and reacquired the snapshot (because of READ COMMITTED // mode) and the row was deleted by someone else in the meantime. // If so, we just want to move on to the next row. continue; } if (!s.ok()) { DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def, m_table_handler)); } // If we called get_for_update() use the value from that call not from // the iterator as it may be stale since we don't have a snapshot // when m_lock_rows is not RDB_LOCK_NONE. m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); rc = convert_record_from_storage_format(&key, buf); } else { // Use the value from the iterator rocksdb::Slice value = m_scan_it->value(); if (m_pk_descr->has_ttl() && should_hide_ttl_rec( *m_pk_descr, value, get_or_create_tx(table->in_use)->m_snapshot_timestamp)) { continue; } m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); rc = convert_record_from_storage_format(&key, &value, buf); } table->status = 0; break; } if (!rc) { #ifdef MARIAROCKS_NOT_YET stats.rows_read++; stats.rows_index_next++; #endif update_row_stats(ROWS_READ); } DBUG_RETURN(rc); } int ha_rocksdb::rnd_end() { DBUG_ENTER_FUNC(); release_scan_iterator(); DBUG_RETURN(HA_EXIT_SUCCESS); } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::index_init(uint idx, bool sorted) { DBUG_ENTER_FUNC(); Rdb_transaction *const tx = get_or_create_tx(table->in_use); DBUG_ASSERT(tx != nullptr); setup_read_decoders(); if (!m_keyread_only) { m_key_descr_arr[idx]->get_lookup_bitmap(table, &m_lookup_bitmap); } // If m_lock_rows is not RDB_LOCK_NONE then we will be doing a get_for_update // when accessing the index, so don't acquire the snapshot right away. // Otherwise acquire the snapshot immediately. tx->acquire_snapshot(m_lock_rows == RDB_LOCK_NONE); active_index = idx; DBUG_RETURN(HA_EXIT_SUCCESS); } /** @return HA_EXIT_SUCCESS OK */ int ha_rocksdb::index_end() { DBUG_ENTER_FUNC(); release_scan_iterator(); bitmap_free(&m_lookup_bitmap); active_index = MAX_KEY; in_range_check_pushed_down = FALSE; DBUG_RETURN(HA_EXIT_SUCCESS); } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::truncate() { DBUG_ENTER_FUNC(); DBUG_ASSERT(m_tbl_def != nullptr); m_tbl_def->m_auto_incr_val = 1; m_tbl_def->m_hidden_pk_val = 1; DBUG_RETURN(remove_rows(m_tbl_def)); } /* Delete the row we've last read. The row is also passed as parameter. @detail The caller guarantees table buf points to the row that was just read. The row is either table->record[0] or table->record[1]. (Check out InnoDB: row_update_for_mysql() has "UT_NOT_USED(mysql_rec)" @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::delete_row(const uchar *const buf) { DBUG_ENTER_FUNC(); DBUG_ASSERT(buf != nullptr); set_last_rowkey(buf); rocksdb::Slice key_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); Rdb_transaction *const tx = get_or_create_tx(table->in_use); ulonglong bytes_written = 0; const uint index = pk_index(table, m_tbl_def); rocksdb::Status s = delete_or_singledelete(index, tx, m_pk_descr->get_cf(), key_slice); if (!s.ok()) { DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def, m_table_handler)); } else { bytes_written = key_slice.size(); } longlong hidden_pk_id = 0; if (m_tbl_def->m_key_count > 1 && has_hidden_pk(table)) { int err = read_hidden_pk_id_from_rowkey(&hidden_pk_id); if (err) { DBUG_RETURN(err); } } // Delete the record for every secondary index for (uint i = 0; i < m_tbl_def->m_key_count; i++) { if (!is_pk(i, table, m_tbl_def)) { int packed_size; const Rdb_key_def &kd = *m_key_descr_arr[i]; packed_size = kd.pack_record(table, m_pack_buffer, buf, m_sk_packed_tuple, nullptr, false, hidden_pk_id); rocksdb::Slice secondary_key_slice( reinterpret_cast<const char *>(m_sk_packed_tuple), packed_size); /* Deleting on secondary key doesn't need any locks: */ tx->get_indexed_write_batch()->SingleDelete(kd.get_cf(), secondary_key_slice); bytes_written += secondary_key_slice.size(); } } tx->incr_delete_count(); if (do_bulk_commit(tx)) { DBUG_RETURN(HA_ERR_ROCKSDB_BULK_LOAD); } #ifdef MARIAROCKS_NOT_YET stats.rows_deleted++; #endif update_row_stats(ROWS_DELETED); tx->update_bytes_written(bytes_written); DBUG_RETURN(HA_EXIT_SUCCESS); } rocksdb::Status ha_rocksdb::delete_or_singledelete( uint index, Rdb_transaction *const tx, rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key) { if (can_use_single_delete(index)) return tx->single_delete(column_family, key); return tx->delete_key(column_family, key); } void ha_rocksdb::update_stats(void) { DBUG_ENTER_FUNC(); stats.records = 0; stats.index_file_length = 0ul; stats.data_file_length = 0ul; stats.mean_rec_length = 0; for (uint i = 0; i < m_tbl_def->m_key_count; i++) { if (is_pk(i, table, m_tbl_def)) { stats.data_file_length = m_pk_descr->m_stats.m_actual_disk_size; stats.records = m_pk_descr->m_stats.m_rows; } else { stats.index_file_length += m_key_descr_arr[i]->m_stats.m_actual_disk_size; } } DBUG_VOID_RETURN; } /** @return HA_EXIT_SUCCESS OK HA_EXIT_FAILURE Error */ int ha_rocksdb::info(uint flag) { DBUG_ENTER_FUNC(); if (!table) DBUG_RETURN(HA_EXIT_FAILURE); if (flag & HA_STATUS_VARIABLE) { /* Test only to simulate corrupted stats */ DBUG_EXECUTE_IF("myrocks_simulate_negative_stats", m_pk_descr->m_stats.m_actual_disk_size = -m_pk_descr->m_stats.m_actual_disk_size;); update_stats(); /* If any stats are negative due to bad cached stats, re-run analyze table and re-retrieve the stats. */ if (static_cast<longlong>(stats.data_file_length) < 0 || static_cast<longlong>(stats.index_file_length) < 0 || static_cast<longlong>(stats.records) < 0) { if (analyze(nullptr, nullptr)) { DBUG_RETURN(HA_EXIT_FAILURE); } update_stats(); } // if number of records is hardcoded, we do not want to force computation // of memtable cardinalities if (stats.records == 0 || (rocksdb_force_compute_memtable_stats && rocksdb_debug_optimizer_n_rows == 0)) { // First, compute SST files stats uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; auto r = get_range(pk_index(table, m_tbl_def), buf); uint64_t sz = 0; uint8_t include_flags = rocksdb::DB::INCLUDE_FILES; // recompute SST files stats only if records count is 0 if (stats.records == 0) { rdb->GetApproximateSizes(m_pk_descr->get_cf(), &r, 1, &sz, include_flags); stats.records+= sz/ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; stats.data_file_length+= sz; } // Second, compute memtable stats. This call is expensive, so cache // values computed for some time. uint64_t cachetime = rocksdb_force_compute_memtable_stats_cachetime; uint64_t time = (cachetime == 0) ? 0 : my_interval_timer() / 1000; if (cachetime == 0 || time > m_table_handler->m_mtcache_last_update + cachetime) { uint64_t memtableCount; uint64_t memtableSize; rdb->GetApproximateMemTableStats(m_pk_descr->get_cf(), r, &memtableCount, &memtableSize); // Atomically update all of these fields at the same time if (cachetime > 0) { if (m_table_handler->m_mtcache_lock.fetch_add( 1, std::memory_order_acquire) == 0) { m_table_handler->m_mtcache_count = memtableCount; m_table_handler->m_mtcache_size = memtableSize; m_table_handler->m_mtcache_last_update = time; } m_table_handler->m_mtcache_lock.fetch_sub(1, std::memory_order_release); } stats.records += memtableCount; stats.data_file_length += memtableSize; } else { // Cached data is still valid, so use it instead stats.records += m_table_handler->m_mtcache_count; stats.data_file_length += m_table_handler->m_mtcache_size; } if (rocksdb_debug_optimizer_n_rows > 0) stats.records = rocksdb_debug_optimizer_n_rows; } if (stats.records != 0) stats.mean_rec_length = stats.data_file_length / stats.records; } if (flag & HA_STATUS_CONST) { ref_length = m_pk_descr->max_storage_fmt_length(); // TODO: Needs to reimplement after having real index statistics for (uint i = 0; i < m_tbl_def->m_key_count; i++) { if (is_hidden_pk(i, table, m_tbl_def)) { continue; } KEY *const k = &table->key_info[i]; for (uint j = 0; j < k->ext_key_parts; j++) { const Rdb_index_stats &k_stats = m_key_descr_arr[i]->m_stats; uint x = k_stats.m_distinct_keys_per_prefix.size() > j && k_stats.m_distinct_keys_per_prefix[j] > 0 ? k_stats.m_rows / k_stats.m_distinct_keys_per_prefix[j] : 0; if (x > stats.records) x = stats.records; if ((x == 0 && rocksdb_debug_optimizer_no_zero_cardinality) || rocksdb_debug_optimizer_n_rows > 0) { // Fake cardinality implementation. For example, (idx1, idx2, idx3) // index /* Make MariaRocks behave the same way as MyRocks does: 1. SQL layer thinks that unique secondary indexes are not extended with PK columns (both in MySQL and MariaDB) 2. MariaDB also thinks that indexes with partially-covered columns are not extended with PK columns. Use the same number of keyparts that MyRocks would use. */ uint ext_key_parts2; if (k->flags & HA_NOSAME) ext_key_parts2= k->ext_key_parts; // This is #1 else ext_key_parts2= m_key_descr_arr[i]->get_key_parts(); // This is #2. // will have rec_per_key for (idx1)=4, (idx1,2)=2, and (idx1,2,3)=1. // rec_per_key for the whole index is 1, and multiplied by 2^n if // n suffix columns of the index are not used. x = 1 << (ext_key_parts2 - j - 1); } k->rec_per_key[j] = x; } } } if (flag & HA_STATUS_ERRKEY) { /* Currently we support only primary keys so we know which key had a uniqueness violation. */ errkey = m_dupp_errkey; dup_ref = m_pk_tuple; // TODO(?): this should store packed PK. } if (flag & HA_STATUS_AUTO) { stats.auto_increment_value = m_tbl_def->m_auto_incr_val; } DBUG_RETURN(HA_EXIT_SUCCESS); } void ha_rocksdb::position(const uchar *const record) { DBUG_ENTER_FUNC(); longlong hidden_pk_id = 0; if (has_hidden_pk(table) && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) DBUG_ASSERT(false); // should never reach here /* Get packed primary key value from the record. (Note: m_last_rowkey has the packed PK of last-read-row, which allows to handle most cases, but there is an exception to this: when slave applies RBR events, it fills the record and then calls position(); rnd_pos()) Also note that we *can* rely on PK column values being available. This is because table_flags() includes HA_PRIMARY_KEY_REQUIRED_FOR_POSITION bit. When that is true, table->prepare_for_position() adds PK columns into the read set (this may potentially disable index-only access when PK column cannot be restored from its mem-comparable form in the secondary indexes). */ const uint packed_size = m_pk_descr->pack_record( table, m_pack_buffer, record, ref, nullptr, false, hidden_pk_id); /* It could be that mem-comparable form of PK occupies less than ref_length bytes. Fill the remainder with zeros. */ if (ref_length > packed_size) memset(ref + packed_size, 0, ref_length - packed_size); DBUG_VOID_RETURN; } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::rnd_pos(uchar *const buf, uchar *const pos) { DBUG_ENTER_FUNC(); int rc; size_t len; #ifdef MARIAROCKS_NOT_YET stats.rows_requested++; #endif len = m_pk_descr->key_length(table, rocksdb::Slice((const char *)pos, ref_length)); if (len == size_t(-1)) { DBUG_RETURN(HA_ERR_ROCKSDB_CORRUPT_DATA); /* Data corruption? */ } rc = get_row_by_rowid(buf, pos, len); if (!rc) { #ifdef MARIAROCKS_NOT_YET stats.rows_read++; #endif update_row_stats(ROWS_READ); } DBUG_RETURN(rc); } /* @brief Calculate (if needed) the bitmap of indexes that are modified by the current query @detail The calculation done by checking index definitions against the table->write_set column bitmap. */ void ha_rocksdb::calc_updated_indexes() { if (!m_update_scope_is_valid) { m_update_scope_is_valid = true; m_update_scope.clear_all(); for (uint keynr = 0; keynr < table->s->keys; keynr++) { const Rdb_key_def &kd = *m_key_descr_arr[keynr]; /* Walk over all key parts, including the "extended key" suffix */ const uint key_parts = kd.get_key_parts(); for (uint kp = 0; kp < key_parts; kp++) { if (has_hidden_pk(table) && kp + 1 == key_parts) break; Field *const field = kd.get_table_field_for_part_no(table, kp); if (bitmap_is_set(table->write_set, field->field_index)) { m_update_scope.set_bit(keynr); break; } } } } } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::update_row(const uchar *const old_data, uchar *const new_data) { DBUG_ENTER_FUNC(); DBUG_ASSERT(old_data != nullptr); DBUG_ASSERT(new_data != nullptr); DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE); /* old_data points to record we're updating. It is the same as the record we've just read (for multi-table UPDATE, too, because SQL layer will make an rnd_pos() call to re-read the record before calling update_row()) */ DBUG_ASSERT(new_data == table->record[0]); const int rv = update_write_row(old_data, new_data, false); if (rv == 0) { #ifdef MARIAROCKS_NOT_YET stats.rows_updated++; #endif update_row_stats(ROWS_UPDATED); } DBUG_RETURN(rv); } /* MariaDB's temporary: MyRocks has this function in sql/handler.cc: */ bool can_hold_read_locks_on_select(THD *thd, thr_lock_type lock_type) { return (lock_type == TL_READ_WITH_SHARED_LOCKS || lock_type == TL_READ_NO_INSERT || (lock_type != TL_IGNORE && thd->lex->sql_command != SQLCOM_SELECT)); } /* The following function was copied from ha_blackhole::store_lock: */ THR_LOCK_DATA **ha_rocksdb::store_lock(THD *const thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { DBUG_ENTER_FUNC(); DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(to != nullptr); bool in_lock_tables = my_core::thd_in_lock_tables(thd); /* First, make a decision about MyRocks's internal locking */ if (lock_type >= TL_WRITE_ALLOW_WRITE) { m_lock_rows = RDB_LOCK_WRITE; } else if (lock_type == TL_READ_WITH_SHARED_LOCKS) { m_lock_rows = RDB_LOCK_READ; } else { m_lock_rows = RDB_LOCK_NONE; if (THDVAR(thd, lock_scanned_rows)) { /* The following logic was copied directly from ha_innobase::store_lock_with_x_type() in storage/innobase/handler/ha_innodb.cc and causes MyRocks to leave locks in place on rows that are in a table that is not being updated. */ const uint sql_command = my_core::thd_sql_command(thd); if ((lock_type == TL_READ && in_lock_tables) || (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) || can_hold_read_locks_on_select(thd, lock_type)) { ulong tx_isolation = my_core::thd_tx_isolation(thd); if (sql_command != SQLCOM_CHECKSUM && ((my_core::thd_test_options(thd, OPTION_BIN_LOG) && tx_isolation > ISO_READ_COMMITTED) || tx_isolation == ISO_SERIALIZABLE || (lock_type != TL_READ && lock_type != TL_READ_NO_INSERT) || (sql_command != SQLCOM_INSERT_SELECT && sql_command != SQLCOM_REPLACE_SELECT && sql_command != SQLCOM_UPDATE && sql_command != SQLCOM_DELETE && sql_command != SQLCOM_CREATE_TABLE))) { m_lock_rows = RDB_LOCK_READ; } } } } /* Then, tell the SQL layer what kind of locking it should use: */ if (lock_type != TL_IGNORE && m_db_lock.type == TL_UNLOCK) { /* Here is where we get into the guts of a row level lock. If TL_UNLOCK is set If we are not doing a LOCK TABLE or DISCARD/IMPORT TABLESPACE, then allow multiple writers */ if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && !in_lock_tables && !my_core::thd_tablespace_op(thd)) { lock_type = TL_WRITE_ALLOW_WRITE; } /* In queries of type INSERT INTO t1 SELECT ... FROM t2 ... MySQL would use the lock TL_READ_NO_INSERT on t2, and that would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts to t2. Convert the lock to a normal read lock to allow concurrent inserts to t2. */ if (lock_type == TL_READ_NO_INSERT && !in_lock_tables) { lock_type = TL_READ; } m_db_lock.type = lock_type; } *to++ = &m_db_lock; DBUG_RETURN(to); } void ha_rocksdb::read_thd_vars(THD *const thd) { m_store_row_debug_checksums = THDVAR(thd, store_row_debug_checksums); m_verify_row_debug_checksums = THDVAR(thd, verify_row_debug_checksums); m_checksums_pct = THDVAR(thd, checksums_pct); } const char *ha_rocksdb::thd_rocksdb_tmpdir() { const char *tmp_dir = THDVAR(ha_thd(), tmpdir); /* We want to treat an empty string as nullptr, in these cases DDL operations will use the default --tmpdir passed to mysql instead. */ if (tmp_dir != nullptr && *tmp_dir == '\0') { tmp_dir = nullptr; } return (tmp_dir); } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (cannot be SE-specific) */ int ha_rocksdb::external_lock(THD *const thd, int lock_type) { DBUG_ENTER_FUNC(); DBUG_ASSERT(thd != nullptr); int res = HA_EXIT_SUCCESS; int binlog_format = my_core::thd_binlog_format(thd); bool unsafe_for_binlog = THDVAR(ha_thd(), unsafe_for_binlog); /* A note about (*) below: In fb/mysql, LOCK TABLE myrocks_table WRITE - produces an error when @@binlog_format='ROW'. - does not produce an error with @@binlog_format='STATEMENT' We cannot easily provide this behavior in MariaDB, because LOCK ROWS changes @@binlog_format for the duration of the statement. Instead we will make the check in ha_rocksdb::start_stmt and will produce an error in the first DML statement done under LOCK TABLE (**) - The same happens for other "DDL-type" commands, so allow them here as well. */ if (lock_type == F_WRLCK && !thd->rgi_slave && !unsafe_for_binlog && binlog_format != BINLOG_FORMAT_ROW && binlog_format != BINLOG_FORMAT_UNSPEC && thd->lex->sql_command != SQLCOM_LOCK_TABLES && // (*) thd->lex->sql_command != SQLCOM_ANALYZE && // (**) thd->lex->sql_command != SQLCOM_OPTIMIZE && // (**) my_core::thd_binlog_filter_ok(thd)) { my_error(ER_REQUIRE_ROW_BINLOG_FORMAT, MYF(0)); DBUG_RETURN(HA_ERR_UNSUPPORTED); } if (lock_type == F_UNLCK) { Rdb_transaction *const tx = get_or_create_tx(thd); tx->io_perf_end_and_record(&m_io_perf); tx->m_n_mysql_tables_in_use--; if (tx->m_n_mysql_tables_in_use == 0 && !my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* Do like InnoDB: when we get here, it's time to commit a single-statement transaction. If the statement involved multiple tables, this code will be executed for each of them, but that's ok because non-first tx->commit() calls will be no-ops. */ if (tx->commit_or_rollback()) { res = HA_ERR_INTERNAL_ERROR; } } } else { if (my_core::thd_tx_isolation(thd) < ISO_READ_COMMITTED || my_core::thd_tx_isolation(thd) > ISO_REPEATABLE_READ) { my_error(ER_ISOLATION_MODE_NOT_SUPPORTED, MYF(0), tx_isolation_names[my_core::thd_tx_isolation(thd)]); DBUG_RETURN(HA_ERR_UNSUPPORTED); } /* It's nice to do the following on start of every statement. The problem is, handler->start_stmt() is not called for INSERTs. So, we put this code here. */ Rdb_transaction *const tx = get_or_create_tx(thd); read_thd_vars(thd); m_update_scope_is_valid = false; if (skip_unique_check()) { if ((thd->lex->sql_command == SQLCOM_INSERT || thd->lex->sql_command == SQLCOM_LOAD || thd->lex->sql_command == SQLCOM_REPLACE) && (thd->lex->duplicates == DUP_REPLACE || thd->lex->duplicates == DUP_UPDATE)) { my_error(ER_ON_DUPLICATE_DISABLED, MYF(0), thd->query()); DBUG_RETURN(HA_ERR_UNSUPPORTED); } } if (lock_type == F_WRLCK) { if (tx->is_tx_read_only()) { my_error(ER_UPDATES_WITH_CONSISTENT_SNAPSHOT, MYF(0)); DBUG_RETURN(HA_ERR_UNSUPPORTED); } /* SQL layer signals us to take a write lock. It does so when starting DML statement. We should put locks on the rows we're reading. Note: sometimes, external_lock() can be called without a prior ::store_lock call. That's why we need to set lock_* members here, too. */ m_lock_rows = RDB_LOCK_WRITE; if (thd->lex->sql_command == SQLCOM_CREATE_INDEX || thd->lex->sql_command == SQLCOM_DROP_INDEX || thd->lex->sql_command == SQLCOM_ALTER_TABLE) { tx->m_ddl_transaction = true; } } tx->m_n_mysql_tables_in_use++; rocksdb_register_tx(rocksdb_hton, thd, tx); tx->io_perf_start(&m_io_perf); } DBUG_RETURN(res); } /** @note A quote from ha_innobase::start_stmt(): <quote> MySQL calls this function at the start of each SQL statement inside LOCK TABLES. Inside LOCK TABLES the ::external_lock method does not work to mark SQL statement borders. </quote> @return HA_EXIT_SUCCESS OK */ int ha_rocksdb::start_stmt(THD *const thd, thr_lock_type lock_type) { DBUG_ENTER_FUNC(); /* MariaDB: the following is a copy of the check in ha_rocksdb::external_lock: */ int binlog_format = my_core::thd_binlog_format(thd); bool unsafe_for_binlog = THDVAR(ha_thd(), unsafe_for_binlog); if (lock_type >= TL_WRITE_ALLOW_WRITE && !thd->rgi_slave && !unsafe_for_binlog && binlog_format != BINLOG_FORMAT_ROW && binlog_format != BINLOG_FORMAT_UNSPEC && my_core::thd_binlog_filter_ok(thd)) { my_error(ER_REQUIRE_ROW_BINLOG_FORMAT, MYF(0)); DBUG_RETURN(HA_ERR_UNSUPPORTED); } DBUG_ASSERT(thd != nullptr); Rdb_transaction *const tx = get_or_create_tx(thd); read_thd_vars(thd); rocksdb_register_tx(ht, thd, tx); tx->io_perf_start(&m_io_perf); DBUG_RETURN(HA_EXIT_SUCCESS); } rocksdb::Range get_range(uint32_t i, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2], int offset1, int offset2) { uchar *buf_begin = buf; uchar *buf_end = buf + Rdb_key_def::INDEX_NUMBER_SIZE; rdb_netbuf_store_index(buf_begin, i + offset1); rdb_netbuf_store_index(buf_end, i + offset2); return rocksdb::Range( rocksdb::Slice((const char *)buf_begin, Rdb_key_def::INDEX_NUMBER_SIZE), rocksdb::Slice((const char *)buf_end, Rdb_key_def::INDEX_NUMBER_SIZE)); } static rocksdb::Range get_range(const Rdb_key_def &kd, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2], int offset1, int offset2) { return get_range(kd.get_index_number(), buf, offset1, offset2); } rocksdb::Range get_range(const Rdb_key_def &kd, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]) { if (kd.m_is_reverse_cf) { return myrocks::get_range(kd, buf, 1, 0); } else { return myrocks::get_range(kd, buf, 0, 1); } } rocksdb::Range ha_rocksdb::get_range(const int &i, uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]) const { return myrocks::get_range(*m_key_descr_arr[i], buf); } static bool is_myrocks_index_empty( rocksdb::ColumnFamilyHandle *cfh, const bool is_reverse_cf, const rocksdb::ReadOptions &read_opts, const uint index_id) { bool index_removed = false; uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; rdb_netbuf_store_uint32(key_buf, index_id); const rocksdb::Slice key = rocksdb::Slice(reinterpret_cast<char *>(key_buf), sizeof(key_buf)); std::unique_ptr<rocksdb::Iterator> it(rdb->NewIterator(read_opts, cfh)); rocksdb_smart_seek(is_reverse_cf, it.get(), key); if (!it->Valid()) { index_removed = true; } else { if (memcmp(it->key().data(), key_buf, Rdb_key_def::INDEX_NUMBER_SIZE)) { // Key does not have same prefix index_removed = true; } } return index_removed; } /* Drop index thread's main logic */ void Rdb_drop_index_thread::run() { RDB_MUTEX_LOCK_CHECK(m_signal_mutex); for (;;) { // The stop flag might be set by shutdown command // after drop_index_thread releases signal_mutex // (i.e. while executing expensive Seek()). To prevent drop_index_thread // from entering long cond_timedwait, checking if stop flag // is true or not is needed, with drop_index_interrupt_mutex held. if (m_stop) { break; } timespec ts; int sec= dict_manager.is_drop_index_empty() ? 24 * 60 * 60 // no filtering : 60; // filtering set_timespec(ts,sec); const auto ret MY_ATTRIBUTE((__unused__)) = mysql_cond_timedwait(&m_signal_cond, &m_signal_mutex, &ts); if (m_stop) { break; } // make sure, no program error is returned DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT); RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); std::unordered_set<GL_INDEX_ID> indices; dict_manager.get_ongoing_drop_indexes(&indices); if (!indices.empty()) { std::unordered_set<GL_INDEX_ID> finished; rocksdb::ReadOptions read_opts; read_opts.total_order_seek = true; // disable bloom filter for (const auto d : indices) { uint32 cf_flags = 0; if (!dict_manager.get_cf_flags(d.cf_id, &cf_flags)) { sql_print_error("RocksDB: Failed to get column family flags " "from cf id %u. MyRocks data dictionary may " "get corrupted.", d.cf_id); abort_with_stack_traces(); } rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(d.cf_id); DBUG_ASSERT(cfh); const bool is_reverse_cf = cf_flags & Rdb_key_def::REVERSE_CF_FLAG; if (is_myrocks_index_empty(cfh, is_reverse_cf, read_opts, d.index_id)) { finished.insert(d); continue; } uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; rocksdb::Range range = get_range(d.index_id, buf, is_reverse_cf ? 1 : 0, is_reverse_cf ? 0 : 1); rocksdb::Status status = DeleteFilesInRange(rdb->GetBaseDB(), cfh, &range.start, &range.limit); if (!status.ok()) { if (status.IsShutdownInProgress()) { break; } rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD); } status = rdb->CompactRange(getCompactRangeOptions(), cfh, &range.start, &range.limit); if (!status.ok()) { if (status.IsShutdownInProgress()) { break; } rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD); } if (is_myrocks_index_empty(cfh, is_reverse_cf, read_opts, d.index_id)) { finished.insert(d); } } if (!finished.empty()) { dict_manager.finish_drop_indexes(finished); } } RDB_MUTEX_LOCK_CHECK(m_signal_mutex); } RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); } Rdb_tbl_def *ha_rocksdb::get_table_if_exists(const char *const tablename) { DBUG_ASSERT(tablename != nullptr); std::string str; if (rdb_normalize_tablename(tablename, &str) != HA_EXIT_SUCCESS) { // We were not passed table name? DBUG_ASSERT(0); return nullptr; } return ddl_manager.find(str); } /* Note: the following function is called when the table is not open. That is, this->table==nullptr, pk_key_descr==nullptr, etc. tablename points to line in form "./dbname/tablename". @return HA_EXIT_SUCCESS OK other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::delete_table(const char *const tablename) { DBUG_ENTER_FUNC(); DBUG_ASSERT(tablename != nullptr); const std::unique_ptr<rocksdb::WriteBatch> wb = dict_manager.begin(); rocksdb::WriteBatch *const batch = wb.get(); /* Find the table in the hash */ Rdb_tbl_def *const tbl = get_table_if_exists(tablename); if (!tbl) DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); dict_manager.add_drop_table(tbl->m_key_descr_arr, tbl->m_key_count, batch); /* Remove the table entry in data dictionary (this will also remove it from the persistent data dictionary). */ ddl_manager.remove(tbl, batch, true); int err = dict_manager.commit(batch); if (err) { DBUG_RETURN(err); } rdb_drop_idx_thread.signal(); DBUG_RETURN(HA_EXIT_SUCCESS); } int ha_rocksdb::remove_rows(Rdb_tbl_def *const tbl) { const rocksdb::WriteOptions wo = rdb_get_rocksdb_write_options(handler::ha_thd()); rocksdb::ReadOptions opts; opts.total_order_seek = true; Rdb_transaction *const tx = get_or_create_tx(table->in_use); char key_buf[MAX_KEY_LENGTH]; uint key_len; ulonglong bytes_written = 0; /* Remove all records in each index. (This is is not crash-safe, but it doesn't matter, because bulk row deletion will be handled on rocksdb side) */ for (uint i = 0; i < tbl->m_key_count; i++) { const Rdb_key_def &kd = *tbl->m_key_descr_arr[i]; kd.get_infimum_key(reinterpret_cast<uchar *>(key_buf), &key_len); rocksdb::ColumnFamilyHandle *cf = kd.get_cf(); std::unique_ptr<rocksdb::Iterator> it(rdb->NewIterator(opts, cf)); const rocksdb::Slice table_key(key_buf, key_len); it->Seek(table_key); while (it->Valid()) { const rocksdb::Slice key = it->key(); if (!kd.covers_key(key)) { break; } rocksdb::Status s; if (can_use_single_delete(i)) { s = rdb->SingleDelete(wo, cf, key); } else { s = rdb->Delete(wo, cf, key); } if (!s.ok()) { return tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def, m_table_handler); } bytes_written += key.size(); it->Next(); } } tx->update_bytes_written(bytes_written); return HA_EXIT_SUCCESS; } /** @return HA_EXIT_SUCCESS OK other HA_ERR error code (cannot be SE-specific) */ int ha_rocksdb::rename_table(const char *const from, const char *const to) { DBUG_ENTER_FUNC(); DBUG_ASSERT(from != nullptr); DBUG_ASSERT(to != nullptr); std::string from_str; std::string to_str; std::string from_db; std::string to_db; int rc = rdb_normalize_tablename(from, &from_str); if (rc != HA_EXIT_SUCCESS) { DBUG_RETURN(rc); } rc = rdb_split_normalized_tablename(from_str, &from_db); if (rc != HA_EXIT_SUCCESS) { DBUG_RETURN(rc); } rc = rdb_normalize_tablename(to, &to_str); if (rc != HA_EXIT_SUCCESS) { DBUG_RETURN(rc); } rc = rdb_split_normalized_tablename(to_str, &to_db); if (rc != HA_EXIT_SUCCESS) { DBUG_RETURN(rc); } // If the user changed the database part of the name then validate that the // 'to' database exists. if (from_db != to_db && !rdb_database_exists(to_db)) { // If we return a RocksDB specific error code here we get // "error: 206 - Unknown error 206". InnoDB gets // "error -1 - Unknown error -1" so let's match them. DBUG_RETURN(-1); } DBUG_EXECUTE_IF("gen_sql_table_name", to_str = to_str + "#sql-test";); const std::unique_ptr<rocksdb::WriteBatch> wb = dict_manager.begin(); rocksdb::WriteBatch *const batch = wb.get(); dict_manager.lock(); if (ddl_manager.rename(from_str, to_str, batch)) { rc = HA_ERR_NO_SUCH_TABLE; } else { rc = dict_manager.commit(batch); } dict_manager.unlock(); DBUG_RETURN(rc); } /** check_if_incompatible_data() called if ALTER TABLE can't detect otherwise if new and old definition are compatible @details If there are no other explicit signs like changed number of fields this function will be called by compare_tables() (sql/sql_tables.cc) to decide should we rewrite whole table or only .frm file. */ bool ha_rocksdb::check_if_incompatible_data(HA_CREATE_INFO *const info, uint table_changes) { DBUG_ENTER_FUNC(); DBUG_ASSERT(info != nullptr); // this function is needed only for online alter-table DBUG_RETURN(COMPATIBLE_DATA_NO); } /** @return HA_EXIT_SUCCESS OK */ int ha_rocksdb::extra(enum ha_extra_function operation) { DBUG_ENTER_FUNC(); switch (operation) { case HA_EXTRA_KEYREAD: m_keyread_only = true; break; case HA_EXTRA_NO_KEYREAD: m_keyread_only = false; break; case HA_EXTRA_FLUSH: /* If the table has blobs, then they are part of m_retrieved_record. This call invalidates them. */ m_retrieved_record.clear(); break; default: break; } DBUG_RETURN(HA_EXIT_SUCCESS); } /* Given a starting key and an ending key, estimate the number of rows that will exist between the two keys. */ ha_rows ha_rocksdb::records_in_range(uint inx, key_range *const min_key, key_range *const max_key) { DBUG_ENTER_FUNC(); ha_rows ret = THDVAR(ha_thd(), records_in_range); if (ret) { DBUG_RETURN(ret); } if (table->force_index) { const ha_rows force_rows = THDVAR(ha_thd(), force_index_records_in_range); if (force_rows) { DBUG_RETURN(force_rows); } } const Rdb_key_def &kd = *m_key_descr_arr[inx]; uint size1 = 0; if (min_key) { size1 = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, min_key->key, min_key->keypart_map); if (min_key->flag == HA_READ_PREFIX_LAST_OR_PREV || min_key->flag == HA_READ_PREFIX_LAST || min_key->flag == HA_READ_AFTER_KEY) { kd.successor(m_sk_packed_tuple, size1); } } else { kd.get_infimum_key(m_sk_packed_tuple, &size1); } uint size2 = 0; if (max_key) { size2 = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple_old, max_key->key, max_key->keypart_map); if (max_key->flag == HA_READ_PREFIX_LAST_OR_PREV || max_key->flag == HA_READ_PREFIX_LAST || max_key->flag == HA_READ_AFTER_KEY) { kd.successor(m_sk_packed_tuple_old, size2); } // pad the upper key with FFFFs to make sure it is more than the lower if (size1 > size2) { memset(m_sk_packed_tuple_old + size2, 0xff, size1 - size2); size2 = size1; } } else { kd.get_supremum_key(m_sk_packed_tuple_old, &size2); } const rocksdb::Slice slice1((const char *)m_sk_packed_tuple, size1); const rocksdb::Slice slice2((const char *)m_sk_packed_tuple_old, size2); // slice1 >= slice2 means no row will match if (slice1.compare(slice2) >= 0) { DBUG_RETURN(HA_EXIT_SUCCESS); } rocksdb::Range r(kd.m_is_reverse_cf ? slice2 : slice1, kd.m_is_reverse_cf ? slice1 : slice2); uint64_t sz = 0; auto disk_size = kd.m_stats.m_actual_disk_size; if (disk_size == 0) disk_size = kd.m_stats.m_data_size; auto rows = kd.m_stats.m_rows; if (rows == 0 || disk_size == 0) { rows = 1; disk_size = ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; } // Getting statistics, including from Memtables uint8_t include_flags = rocksdb::DB::INCLUDE_FILES; rdb->GetApproximateSizes(kd.get_cf(), &r, 1, &sz, include_flags); ret = rows * sz / disk_size; uint64_t memTableCount; rdb->GetApproximateMemTableStats(kd.get_cf(), r, &memTableCount, &sz); ret += memTableCount; /* GetApproximateSizes() gives estimates so ret might exceed stats.records. MySQL then decides to use full index scan rather than range scan, which is not efficient for most cases. To prevent this, changing estimated records slightly smaller than stats.records. */ if (ret >= stats.records) { ret = stats.records * 0.99; } if (rocksdb_debug_optimizer_n_rows > 0) { ret = rocksdb_debug_optimizer_n_rows; } else if (ret == 0) { ret = 1; } DBUG_RETURN(ret); } void ha_rocksdb::update_create_info(HA_CREATE_INFO *const create_info) { DBUG_ENTER_FUNC(); DBUG_ASSERT(create_info != nullptr); if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) { create_info->auto_increment_value = m_tbl_def->m_auto_incr_val; } DBUG_VOID_RETURN; } /** @brief Doing manual compaction on OPTIMIZE TABLE in RocksDB. Compaction itself is executed by background thread in RocksDB, but CompactRange() waits until compaction completes so this function may take a long time. Since RocksDB dataset is allocated per index id, OPTIMIZE TABLE triggers manual compaction for all indexes of the table. @details Compaction range is from the beginning of the index id to the first row of the next index id. When using reverse order column family, the first row of the next index id should be the last row of the previous index id. @return HA_ADMIN_OK OK other HA_ADMIN error code */ int ha_rocksdb::optimize(THD *const thd, HA_CHECK_OPT *const check_opt) { DBUG_ENTER_FUNC(); DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(check_opt != nullptr); for (uint i = 0; i < table->s->keys; i++) { uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; auto range = get_range(i, buf); const rocksdb::Status s = rdb->CompactRange(getCompactRangeOptions(), m_key_descr_arr[i]->get_cf(), &range.start, &range.limit); if (!s.ok()) { DBUG_RETURN(rdb_error_to_mysql(s)); } } DBUG_RETURN(HA_EXIT_SUCCESS); } int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, HA_CHECK_OPT *const check_opt) { DBUG_ENTER_FUNC(); // find per column family key ranges which need to be queried std::unordered_map<rocksdb::ColumnFamilyHandle *, std::vector<rocksdb::Range>> ranges; std::unordered_set<GL_INDEX_ID> ids_to_check; std::unordered_map<GL_INDEX_ID, uint> ids_to_keyparts; std::vector<uchar> buf(table_arg->s->keys * 2 * Rdb_key_def::INDEX_NUMBER_SIZE); for (uint i = 0; i < table_arg->s->keys; i++) { const auto bufp = &buf[i * 2 * Rdb_key_def::INDEX_NUMBER_SIZE]; const Rdb_key_def &kd = *m_key_descr_arr[i]; ranges[kd.get_cf()].push_back(get_range(i, bufp)); ids_to_check.insert(kd.get_gl_index_id()); ids_to_keyparts[kd.get_gl_index_id()] = kd.get_key_parts(); } // for analyze statements, force flush on memtable to get accurate cardinality Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); if (thd != nullptr && THDVAR(thd, flush_memtable_on_analyze) && !rocksdb_pause_background_work) { for (auto it : ids_to_check) { rdb->Flush(rocksdb::FlushOptions(), cf_manager.get_cf(it.cf_id)); } } // get RocksDB table properties for these ranges rocksdb::TablePropertiesCollection props; for (auto it : ranges) { const auto old_size MY_ATTRIBUTE((__unused__)) = props.size(); const auto status = rdb->GetPropertiesOfTablesInRange( it.first, &it.second[0], it.second.size(), &props); DBUG_ASSERT(props.size() >= old_size); if (!status.ok()) { DBUG_RETURN( rdb_error_to_mysql(status, "Could not access RocksDB properties")); } } int num_sst = 0; // group stats per index id std::unordered_map<GL_INDEX_ID, Rdb_index_stats> stats; for (const auto &it : ids_to_check) { // Initialize the stats to 0. If there are no files that contain // this gl_index_id, then 0 should be stored for the cached stats. stats[it] = Rdb_index_stats(it); DBUG_ASSERT(ids_to_keyparts.count(it) > 0); stats[it].m_distinct_keys_per_prefix.resize(ids_to_keyparts[it]); } for (const auto &it : props) { std::vector<Rdb_index_stats> sst_stats; Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats); /* sst_stats is a list of index statistics for indexes that have entries in the current SST file. */ for (const auto &it1 : sst_stats) { /* Only update statistics for indexes that belong to this SQL table. The reason is: We are walking through all SST files that have entries from this table (and so can compute good statistics). For other SQL tables, it can be that we're only seeing a small fraction of table's entries (and so we can't update statistics based on that). */ if (ids_to_check.find(it1.m_gl_index_id) == ids_to_check.end()) continue; auto kd = ddl_manager.safe_find(it1.m_gl_index_id); DBUG_ASSERT(kd != nullptr); stats[it1.m_gl_index_id].merge(it1, true, kd->max_storage_fmt_length()); } num_sst++; } // set and persist new stats ddl_manager.set_stats(stats); ddl_manager.persist_stats(true); DBUG_RETURN(HA_EXIT_SUCCESS); } /* @return HA_ADMIN_OK OK other HA_ADMIN error code */ int ha_rocksdb::analyze(THD *const thd, HA_CHECK_OPT *const check_opt) { DBUG_ENTER_FUNC(); if (table && calculate_stats(table, thd, check_opt) != HA_EXIT_SUCCESS) { DBUG_RETURN(HA_ADMIN_FAILED); } DBUG_RETURN(HA_ADMIN_OK); } void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, ulonglong nb_desired_values, ulonglong *const first_value, ulonglong *const nb_reserved_values) { /* MySQL has a somewhat complicated way of handling the auto-increment value. The first time get_auto_increment is called for a statement, nb_desired_values is the estimate for how many values will be needed. The engine can then reserve some values, and those will be automatically used by MySQL, until a hard-coded value shows up in the insert statement, after which MySQL again calls this function to reset its starting value. * For simplicity we will just ignore nb_desired_values - we aren't going to reserve any extra values for a multi-insert statement. Each row will simply acquire the next value as needed and we will always tell MySQL that we only reserved 1 value. Since we are using an atomic value for m_auto_incr_val this should be safe - if we had to grab a mutex, doing an actual reserve of some values might be a better solution. */ DEBUG_SYNC(ha_thd(), "rocksdb.autoinc_vars"); if (off > inc) { off = 1; } longlong new_val; // Local variable reference to simplify code below std::atomic<longlong> &auto_incr = m_tbl_def->m_auto_incr_val; if (inc == 1 && off == 1) { // Optimization for the standard case where we are always simply // incrementing from the last position // Use CAS operation in a loop to make sure automically get the next auto // increment value while ensuring tha we don't wrap around to a negative // number. new_val = auto_incr; while (new_val != std::numeric_limits<longlong>::max()) { if (auto_incr.compare_exchange_weak(new_val, new_val + 1)) { break; } } } else { // The next value can be more complicated if either `inc` or 'off' is not 1 longlong last_val = auto_incr; // Loop until we can correctly update the atomic value do { if (((last_val - off) / inc) == (std::numeric_limits<longlong>::max() - off) / inc) { // The 'last_val' value is already equal to or larger than the largest // value in the sequence. Continuing would wrap around (technically // the behavior would be undefined). What should we do? // We could: // 1) set the new value to the last possible number in our sequence // as described above. The problem with this is that this // number could be smaller than a value in an existing row. // 2) set the new value to the largest possible number. This number // may not be in our sequence, but it is guaranteed to be equal // to or larger than any other value already inserted. // // For now I'm going to take option @2. new_val = std::numeric_limits<longlong>::max(); auto_incr = new_val; // Store the largest value into auto_incr break; } // Calculate the next value in the auto increment series: // offset + N * increment // where N is 0, 1, 2, ... // // For further information please visit: // http://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html // // The following is confusing so here is an explanation: // To get the next number in the sequence above you subtract out // the offset, calculate the next sequence (N * increment) and then add // the offset back in. new_val = (((last_val - off) + (inc - 1)) / inc) * inc + off; // Attempt to store the new value (plus 1 since m_auto_incr_val contains // the next available value) into the atomic value. If the current // value no longer matches what we have in 'last_val' this will fail and // we will repeat the loop (`last_val` will automatically get updated // with the current value). } while (!auto_incr.compare_exchange_weak(last_val, new_val + 1)); } *first_value = new_val; *nb_reserved_values = 1; } #ifndef DBUG_OFF /* Debugger help function */ static char dbug_item_print_buf[512]; const char *dbug_print_item(Item *const item) { char *const buf = dbug_item_print_buf; String str(buf, sizeof(dbug_item_print_buf), &my_charset_bin); str.length(0); if (!item) return "(Item*)nullptr"; item->print(&str, QT_ORDINARY); if (str.c_ptr() == buf) return buf; else return "Couldn't fit into buffer"; } #endif /*DBUG_OFF*/ /** SQL layer calls this function to push an index condition. @details The condition is for index keyno (only one condition can be pushed at a time). The caller guarantees that condition refers only to index fields; besides that, fields must have $field->part_of_key.set_bit(keyno) which means that (handler->index_flags(keyno, $keypart, 0) & HA_KEYREAD_ONLY) == 1 which means that field value can be restored from the index tuple. @return Part of condition we couldn't check (always nullptr). */ class Item *ha_rocksdb::idx_cond_push(uint keyno, class Item *const idx_cond) { DBUG_ENTER_FUNC(); DBUG_ASSERT(keyno != MAX_KEY); DBUG_ASSERT(idx_cond != nullptr); pushed_idx_cond = idx_cond; pushed_idx_cond_keyno = keyno; in_range_check_pushed_down = TRUE; /* We will check the whole condition */ DBUG_RETURN(nullptr); } /* Checks if inplace alter is supported for a given operation. */ my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter( TABLE *altered_table, my_core::Alter_inplace_info *const ha_alter_info) { DBUG_ENTER_FUNC(); DBUG_ASSERT(ha_alter_info != nullptr); if (ha_alter_info->handler_flags & ~(my_core::Alter_inplace_info::DROP_INDEX | my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | my_core::Alter_inplace_info::ADD_INDEX | my_core::Alter_inplace_info::ALTER_PARTITIONED | my_core::Alter_inplace_info::ADD_UNIQUE_INDEX)) { DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); } /* We don't support unique keys on table w/ no primary keys */ if ((ha_alter_info->handler_flags & my_core::Alter_inplace_info::ADD_UNIQUE_INDEX) && has_hidden_pk(altered_table)) { DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); } DBUG_RETURN(my_core::HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE); } /** Allows the storage engine to update internal structures with concurrent writes blocked. If check_if_supported_inplace_alter() returns HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE or HA_ALTER_INPLACE_SHARED_AFTER_PREPARE, this function is called with exclusive lock otherwise the same level of locking as for inplace_alter_table() will be used. @note Storage engines are responsible for reporting any errors by calling my_error()/print_error() @note If this function reports error, commit_inplace_alter_table() will be called with commit= false. @note For partitioning, failing to prepare one partition, means that commit_inplace_alter_table() will be called to roll back changes for all partitions. This means that commit_inplace_alter_table() might be called without prepare_inplace_alter_table() having been called first for a given partition. @param altered_table TABLE object for new version of table. @param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. @retval true Error @retval false Success */ bool ha_rocksdb::prepare_inplace_alter_table( TABLE *const altered_table, my_core::Alter_inplace_info *const ha_alter_info) { DBUG_ENTER_FUNC(); DBUG_ASSERT(altered_table != nullptr); DBUG_ASSERT(ha_alter_info != nullptr); const uint old_n_keys = m_tbl_def->m_key_count; uint new_n_keys = altered_table->s->keys; if (has_hidden_pk(altered_table)) { new_n_keys += 1; } const TABLE *const old_table = table; std::shared_ptr<Rdb_key_def> *const old_key_descr = m_tbl_def->m_key_descr_arr; std::shared_ptr<Rdb_key_def> *const new_key_descr = new std::shared_ptr<Rdb_key_def>[new_n_keys]; Rdb_tbl_def *const new_tdef = new Rdb_tbl_def(m_tbl_def->full_tablename()); new_tdef->m_key_descr_arr = new_key_descr; new_tdef->m_key_count = new_n_keys; new_tdef->m_auto_incr_val = m_tbl_def->m_auto_incr_val.load(std::memory_order_relaxed); new_tdef->m_hidden_pk_val = m_tbl_def->m_hidden_pk_val.load(std::memory_order_relaxed); if (ha_alter_info->handler_flags & (my_core::Alter_inplace_info::DROP_INDEX | my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | my_core::Alter_inplace_info::ADD_INDEX | my_core::Alter_inplace_info::ADD_UNIQUE_INDEX) && create_key_defs(altered_table, new_tdef, table, m_tbl_def)) { /* Delete the new key descriptors */ delete[] new_key_descr; /* Explicitly mark as nullptr so we don't accidentally remove entries from data dictionary on cleanup (or cause double delete[]). */ new_tdef->m_key_descr_arr = nullptr; delete new_tdef; my_error(ER_KEY_CREATE_DURING_ALTER, MYF(0)); DBUG_RETURN(HA_EXIT_FAILURE); } std::unordered_set<std::shared_ptr<Rdb_key_def>> added_indexes; std::unordered_set<GL_INDEX_ID> dropped_index_ids; uint i; uint j; /* Determine which(if any) key definition(s) need to be dropped */ for (i = 0; i < ha_alter_info->index_drop_count; i++) { const KEY *const dropped_key = ha_alter_info->index_drop_buffer[i]; for (j = 0; j < old_n_keys; j++) { const KEY *const old_key = &old_table->key_info[old_key_descr[j]->get_keyno()]; if (!compare_keys(old_key, dropped_key)) { dropped_index_ids.insert(old_key_descr[j]->get_gl_index_id()); break; } } } /* Determine which(if any) key definitions(s) need to be added */ int identical_indexes_found = 0; for (i = 0; i < ha_alter_info->index_add_count; i++) { const KEY *const added_key = &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]]; for (j = 0; j < new_n_keys; j++) { const KEY *const new_key = &altered_table->key_info[new_key_descr[j]->get_keyno()]; if (!compare_keys(new_key, added_key)) { /* Check for cases where an 'identical' index is being dropped and re-added in a single ALTER statement. Turn this into a no-op as the index has not changed. E.G. Unique index -> non-unique index requires no change Note that cases where the index name remains the same but the key-parts are changed is already handled in create_inplace_key_defs. In these cases the index needs to be rebuilt. */ if (dropped_index_ids.count(new_key_descr[j]->get_gl_index_id())) { dropped_index_ids.erase(new_key_descr[j]->get_gl_index_id()); identical_indexes_found++; } else { added_indexes.insert(new_key_descr[j]); } break; } } } const uint n_dropped_keys = ha_alter_info->index_drop_count - identical_indexes_found; const uint n_added_keys = ha_alter_info->index_add_count - identical_indexes_found; DBUG_ASSERT(dropped_index_ids.size() == n_dropped_keys); DBUG_ASSERT(added_indexes.size() == n_added_keys); DBUG_ASSERT(new_n_keys == (old_n_keys - n_dropped_keys + n_added_keys)); ha_alter_info->handler_ctx = new Rdb_inplace_alter_ctx( new_tdef, old_key_descr, new_key_descr, old_n_keys, new_n_keys, added_indexes, dropped_index_ids, n_added_keys, n_dropped_keys); DBUG_RETURN(HA_EXIT_SUCCESS); } /** Alter the table structure in-place with operations specified using HA_ALTER_FLAGS and Alter_inplace_info. The level of concurrency allowed during this operation depends on the return value from check_if_supported_inplace_alter(). @note Storage engines are responsible for reporting any errors by calling my_error()/print_error() @note If this function reports error, commit_inplace_alter_table() will be called with commit= false. @param altered_table TABLE object for new version of table. @param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. @retval true Error @retval false Success */ bool ha_rocksdb::inplace_alter_table( TABLE *const altered_table, my_core::Alter_inplace_info *const ha_alter_info) { DBUG_ENTER_FUNC(); DBUG_ASSERT(altered_table != nullptr); DBUG_ASSERT(ha_alter_info != nullptr); DBUG_ASSERT(ha_alter_info->handler_ctx != nullptr); Rdb_inplace_alter_ctx *const ctx = static_cast<Rdb_inplace_alter_ctx *>(ha_alter_info->handler_ctx); if (ha_alter_info->handler_flags & (my_core::Alter_inplace_info::ADD_INDEX | my_core::Alter_inplace_info::ADD_UNIQUE_INDEX)) { /* Buffers need to be set up again to account for new, possibly longer secondary keys. */ free_key_buffers(); DBUG_ASSERT(ctx != nullptr); /* If adding unique index, allocate special buffers for duplicate checking. */ int err; if ((err = alloc_key_buffers( altered_table, ctx->m_new_tdef, ha_alter_info->handler_flags & my_core::Alter_inplace_info::ADD_UNIQUE_INDEX))) { my_error(ER_OUT_OF_RESOURCES, MYF(0)); DBUG_RETURN(err); } /* Populate all new secondary keys by scanning the primary key. */ if ((err = inplace_populate_sk(altered_table, ctx->m_added_indexes))) { my_error(ER_SK_POPULATE_DURING_ALTER, MYF(0)); DBUG_RETURN(HA_EXIT_FAILURE); } } DBUG_EXECUTE_IF("myrocks_simulate_index_create_rollback", { dbug_create_err_inplace_alter(); DBUG_RETURN(HA_EXIT_FAILURE); };); DBUG_RETURN(HA_EXIT_SUCCESS); } /** Scan the Primary Key index entries and populate the new secondary keys. */ int ha_rocksdb::inplace_populate_sk( TABLE *const new_table_arg, const std::unordered_set<std::shared_ptr<Rdb_key_def>> &indexes) { DBUG_ENTER_FUNC(); int res = HA_EXIT_SUCCESS; const std::unique_ptr<rocksdb::WriteBatch> wb = dict_manager.begin(); rocksdb::WriteBatch *const batch = wb.get(); /* Update the data dictionary */ std::unordered_set<GL_INDEX_ID> create_index_ids; for (const auto &index : indexes) { create_index_ids.insert(index->get_gl_index_id()); } dict_manager.add_create_index(create_index_ids, batch); res = dict_manager.commit(batch); if (res != HA_EXIT_SUCCESS) { return res; } /* Add uncommitted key definitons to ddl_manager. We need to do this so that the property collector can find this keydef when it needs to update stats. The property collector looks for the keydef in the data dictionary, but it won't be there yet since this key definition is still in the creation process. */ ddl_manager.add_uncommitted_keydefs(indexes); const bool hidden_pk_exists = has_hidden_pk(table); Rdb_transaction *tx = get_or_create_tx(table->in_use); /* There is one specific scenario where m_sst_info may not be nullptr. This happens if the handler we're using happens to be the handler where the PK bulk load was done on. The sequence of events that lead to this is as follows (T1 is PK bulk load, T2 is SK alter table): T1: Execute last INSERT statement T1: Return TABLE and handler object back to Table_cache_manager T1: Close connection T2: Execute ALTER statement T2: Take same TABLE/handler from Table_cache_manager T2: Call closefrm which will call finalize_bulk_load on every other open table/handler *except* the one it's on. T2: Acquire stale snapshot of PK T1: Call finalize_bulk_load This is rare because usually, closefrm will call the destructor (and thus finalize_bulk_load) on the handler where PK bulk load is done. However, if the thread ids of the bulk load thread and the alter thread differ by a multiple of table_cache_instances (8 by default), then they hash to the same bucket in Table_cache_manager and the alter thread will not not call the destructor on the handler it is holding. Thus, its m_sst_info will not be nullptr. At this point, it is safe to refresh the snapshot because we know all other open handlers have been closed at this point, and the one we're on is the only one left. */ if (m_sst_info) { if ((res = finalize_bulk_load())) { DBUG_RETURN(res); } tx->commit(); } const ulonglong rdb_merge_buf_size = THDVAR(ha_thd(), merge_buf_size); const ulonglong rdb_merge_combine_read_size = THDVAR(ha_thd(), merge_combine_read_size); const ulonglong rdb_merge_tmp_file_removal_delay = THDVAR(ha_thd(), merge_tmp_file_removal_delay_ms); for (const auto &index : indexes) { bool is_unique_index = new_table_arg->key_info[index->get_keyno()].flags & HA_NOSAME; Rdb_index_merge rdb_merge( thd_rocksdb_tmpdir(), rdb_merge_buf_size, rdb_merge_combine_read_size, rdb_merge_tmp_file_removal_delay, index->get_cf()); if ((res = rdb_merge.init())) { DBUG_RETURN(res); } /* Note: We pass in the currently existing table + tbl_def object here, as the pk index position may have changed in the case of hidden primary keys. */ const uint pk = pk_index(table, m_tbl_def); ha_index_init(pk, true); /* Scan each record in the primary key in order */ for (res = index_first(table->record[0]); res == 0; res = index_next(table->record[0])) { longlong hidden_pk_id = 0; if (hidden_pk_exists && (res = read_hidden_pk_id_from_rowkey(&hidden_pk_id))) { // NO_LINT_DEBUG sql_print_error("Error retrieving hidden pk id."); ha_index_end(); DBUG_RETURN(res); } /* Create new secondary index entry */ const int new_packed_size = index->pack_record( new_table_arg, m_pack_buffer, table->record[0], m_sk_packed_tuple, &m_sk_tails, should_store_row_debug_checksums(), hidden_pk_id, 0, nullptr, nullptr, m_ttl_bytes); const rocksdb::Slice key = rocksdb::Slice( reinterpret_cast<const char *>(m_sk_packed_tuple), new_packed_size); const rocksdb::Slice val = rocksdb::Slice(reinterpret_cast<const char *>(m_sk_tails.ptr()), m_sk_tails.get_current_pos()); /* Add record to offset tree in preparation for writing out to disk in sorted chunks. */ if ((res = rdb_merge.add(key, val))) { ha_index_end(); DBUG_RETURN(res); } } if (res != HA_ERR_END_OF_FILE) { // NO_LINT_DEBUG sql_print_error("Error retrieving index entry from primary key."); ha_index_end(); DBUG_RETURN(res); } ha_index_end(); /* Perform an n-way merge of n sorted buffers on disk, then writes all results to RocksDB via SSTFileWriter API. */ rocksdb::Slice merge_key; rocksdb::Slice merge_val; struct unique_sk_buf_info sk_info; sk_info.dup_sk_buf = m_dup_sk_packed_tuple; sk_info.dup_sk_buf_old = m_dup_sk_packed_tuple_old; while ((res = rdb_merge.next(&merge_key, &merge_val)) == 0) { /* Perform uniqueness check if needed */ if (is_unique_index) { if (check_duplicate_sk(new_table_arg, *index, &merge_key, &sk_info)) { /* Duplicate entry found when trying to create unique secondary key. We need to unpack the record into new_table_arg->record[0] as it is used inside print_keydup_error so that the error message shows the duplicate record. */ if (index->unpack_record(new_table_arg, new_table_arg->record[0], &merge_key, nullptr, m_verify_row_debug_checksums)) { /* Should never reach here */ DBUG_ASSERT(0); } print_keydup_error(new_table_arg, &new_table_arg->key_info[index->get_keyno()], MYF(0)); DBUG_RETURN(ER_DUP_ENTRY); } } /* Insert key and slice to SST via SSTFileWriter API. */ if ((res = bulk_load_key(tx, *index, merge_key, merge_val, false))) { break; } } /* Here, res == -1 means that we are finished, while > 0 means an error occurred. */ if (res > 0) { // NO_LINT_DEBUG sql_print_error("Error while bulk loading keys in external merge sort."); DBUG_RETURN(res); } if ((res = tx->finish_bulk_load())) { // NO_LINT_DEBUG sql_print_error("Error finishing bulk load."); DBUG_RETURN(res); } } /* Explicitly tell jemalloc to clean up any unused dirty pages at this point. See https://reviews.facebook.net/D63723 for more details. */ purge_all_jemalloc_arenas(); DBUG_EXECUTE_IF("crash_during_online_index_creation", DBUG_SUICIDE();); DBUG_RETURN(res); } /** Commit or rollback the changes made during prepare_inplace_alter_table() and inplace_alter_table() inside the storage engine. Note that in case of rollback the allowed level of concurrency during this operation will be the same as for inplace_alter_table() and thus might be higher than during prepare_inplace_alter_table(). (For example, concurrent writes were blocked during prepare, but might not be during rollback). @note Storage engines are responsible for reporting any errors by calling my_error()/print_error() @note If this function with commit= true reports error, it will be called again with commit= false. @note In case of partitioning, this function might be called for rollback without prepare_inplace_alter_table() having been called first. Also partitioned tables sets ha_alter_info->group_commit_ctx to a NULL terminated array of the partitions handlers and if all of them are committed as one, then group_commit_ctx should be set to NULL to indicate to the partitioning handler that all partitions handlers are committed. @see prepare_inplace_alter_table(). @param altered_table TABLE object for new version of table. @param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. @param commit True => Commit, False => Rollback. @retval true Error @retval false Success */ bool ha_rocksdb::commit_inplace_alter_table( my_core::TABLE *const altered_table, my_core::Alter_inplace_info *const ha_alter_info, bool commit) { DBUG_ENTER_FUNC(); DBUG_ASSERT(altered_table != nullptr); DBUG_ASSERT(ha_alter_info != nullptr); Rdb_inplace_alter_ctx *const ctx0 = static_cast<Rdb_inplace_alter_ctx *>(ha_alter_info->handler_ctx); DEBUG_SYNC(ha_thd(), "rocksdb.commit_in_place_alter_table"); /* IMPORTANT: When rollback is requested, mysql will abort with an assertion failure. That means every failed commit during inplace alter table will result in a fatal error on the server. Indexes ongoing creation will be detected when the server restarts, and dropped. For partitioned tables, a rollback call to this function (commit == false) is done for each partition. A successful commit call only executes once for all partitions. */ if (!commit) { /* If ctx has not been created yet, nothing to do here */ if (!ctx0) { DBUG_RETURN(HA_EXIT_SUCCESS); } /* Cannot call destructor for Rdb_tbl_def directly because we don't want to erase the mappings inside the ddl_manager, as the old_key_descr is still using them. */ if (ctx0->m_new_key_descr) { /* Delete the new key descriptors */ for (uint i = 0; i < ctx0->m_new_tdef->m_key_count; i++) { ctx0->m_new_key_descr[i] = nullptr; } delete[] ctx0->m_new_key_descr; ctx0->m_new_key_descr = nullptr; ctx0->m_new_tdef->m_key_descr_arr = nullptr; delete ctx0->m_new_tdef; } /* Remove uncommitted key definitons from ddl_manager */ ddl_manager.remove_uncommitted_keydefs(ctx0->m_added_indexes); /* Rollback any partially created indexes */ dict_manager.rollback_ongoing_index_creation(); DBUG_RETURN(HA_EXIT_SUCCESS); } DBUG_ASSERT(ctx0); /* For partitioned tables, we need to commit all changes to all tables at once, unlike in the other inplace alter API methods. */ inplace_alter_handler_ctx **ctx_array; inplace_alter_handler_ctx *ctx_single[2]; if (ha_alter_info->group_commit_ctx) { DBUG_EXECUTE_IF("crash_during_index_creation_partition", DBUG_SUICIDE();); ctx_array = ha_alter_info->group_commit_ctx; } else { ctx_single[0] = ctx0; ctx_single[1] = nullptr; ctx_array = ctx_single; } DBUG_ASSERT(ctx0 == ctx_array[0]); ha_alter_info->group_commit_ctx = nullptr; if (ha_alter_info->handler_flags & (my_core::Alter_inplace_info::DROP_INDEX | my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | my_core::Alter_inplace_info::ADD_INDEX | my_core::Alter_inplace_info::ADD_UNIQUE_INDEX)) { const std::unique_ptr<rocksdb::WriteBatch> wb = dict_manager.begin(); rocksdb::WriteBatch *const batch = wb.get(); std::unordered_set<GL_INDEX_ID> create_index_ids; m_tbl_def = ctx0->m_new_tdef; m_key_descr_arr = m_tbl_def->m_key_descr_arr; m_pk_descr = m_key_descr_arr[pk_index(altered_table, m_tbl_def)]; dict_manager.lock(); for (inplace_alter_handler_ctx **pctx = ctx_array; *pctx; pctx++) { Rdb_inplace_alter_ctx *const ctx = static_cast<Rdb_inplace_alter_ctx *>(*pctx); /* Mark indexes to be dropped */ dict_manager.add_drop_index(ctx->m_dropped_index_ids, batch); for (const auto &index : ctx->m_added_indexes) { create_index_ids.insert(index->get_gl_index_id()); } if (ddl_manager.put_and_write(ctx->m_new_tdef, batch)) { /* Failed to write new entry into data dictionary, this should never happen. */ DBUG_ASSERT(0); } /* Remove uncommitted key definitons from ddl_manager, as they are now committed into the data dictionary. */ ddl_manager.remove_uncommitted_keydefs(ctx->m_added_indexes); } if (dict_manager.commit(batch)) { /* Should never reach here. We assume MyRocks will abort if commit fails. */ DBUG_ASSERT(0); } dict_manager.unlock(); /* Mark ongoing create indexes as finished/remove from data dictionary */ dict_manager.finish_indexes_operation( create_index_ids, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); /* We need to recalculate the index stats here manually. The reason is that the secondary index does not exist inside m_index_num_to_keydef until it is committed to the data dictionary, which prevents us from updating the stats normally as the ddl_manager cannot find the proper gl_index_ids yet during adjust_stats calls. */ if (calculate_stats(altered_table, nullptr, nullptr)) { /* Failed to update index statistics, should never happen */ DBUG_ASSERT(0); } rdb_drop_idx_thread.signal(); } DBUG_RETURN(HA_EXIT_SUCCESS); } #define SHOW_FNAME(name) rocksdb_show_##name #define DEF_SHOW_FUNC(name, key) \ static int SHOW_FNAME(name)(MYSQL_THD thd, SHOW_VAR * var, char *buff) { \ rocksdb_status_counters.name = \ rocksdb_stats->getTickerCount(rocksdb::key); \ var->type = SHOW_LONGLONG; \ var->value = (char *)&rocksdb_status_counters.name; \ return HA_EXIT_SUCCESS; \ } #define DEF_STATUS_VAR(name) \ { "rocksdb_" #name, (char *)&SHOW_FNAME(name), SHOW_FUNC } #define DEF_STATUS_VAR_PTR(name, ptr, option) \ { "rocksdb_" name, (char *)ptr, option } #define DEF_STATUS_VAR_FUNC(name, ptr, option) \ { name, reinterpret_cast<char *>(ptr), option } struct rocksdb_status_counters_t { uint64_t block_cache_miss; uint64_t block_cache_hit; uint64_t block_cache_add; uint64_t block_cache_index_miss; uint64_t block_cache_index_hit; uint64_t block_cache_filter_miss; uint64_t block_cache_filter_hit; uint64_t block_cache_data_miss; uint64_t block_cache_data_hit; uint64_t bloom_filter_useful; uint64_t memtable_hit; uint64_t memtable_miss; uint64_t compaction_key_drop_new; uint64_t compaction_key_drop_obsolete; uint64_t compaction_key_drop_user; uint64_t number_keys_written; uint64_t number_keys_read; uint64_t number_keys_updated; uint64_t bytes_written; uint64_t bytes_read; uint64_t no_file_closes; uint64_t no_file_opens; uint64_t no_file_errors; uint64_t stall_micros; uint64_t rate_limit_delay_millis; uint64_t num_iterators; uint64_t number_multiget_get; uint64_t number_multiget_keys_read; uint64_t number_multiget_bytes_read; uint64_t number_deletes_filtered; uint64_t number_merge_failures; uint64_t bloom_filter_prefix_checked; uint64_t bloom_filter_prefix_useful; uint64_t number_reseeks_iteration; uint64_t getupdatessince_calls; uint64_t block_cachecompressed_miss; uint64_t block_cachecompressed_hit; uint64_t wal_synced; uint64_t wal_bytes; uint64_t write_self; uint64_t write_other; uint64_t write_timedout; uint64_t write_wal; uint64_t flush_write_bytes; uint64_t compact_read_bytes; uint64_t compact_write_bytes; uint64_t number_superversion_acquires; uint64_t number_superversion_releases; uint64_t number_superversion_cleanups; uint64_t number_block_not_compressed; }; static rocksdb_status_counters_t rocksdb_status_counters; DEF_SHOW_FUNC(block_cache_miss, BLOCK_CACHE_MISS) DEF_SHOW_FUNC(block_cache_hit, BLOCK_CACHE_HIT) DEF_SHOW_FUNC(block_cache_add, BLOCK_CACHE_ADD) DEF_SHOW_FUNC(block_cache_index_miss, BLOCK_CACHE_INDEX_MISS) DEF_SHOW_FUNC(block_cache_index_hit, BLOCK_CACHE_INDEX_HIT) DEF_SHOW_FUNC(block_cache_filter_miss, BLOCK_CACHE_FILTER_MISS) DEF_SHOW_FUNC(block_cache_filter_hit, BLOCK_CACHE_FILTER_HIT) DEF_SHOW_FUNC(block_cache_data_miss, BLOCK_CACHE_DATA_MISS) DEF_SHOW_FUNC(block_cache_data_hit, BLOCK_CACHE_DATA_HIT) DEF_SHOW_FUNC(bloom_filter_useful, BLOOM_FILTER_USEFUL) DEF_SHOW_FUNC(memtable_hit, MEMTABLE_HIT) DEF_SHOW_FUNC(memtable_miss, MEMTABLE_MISS) DEF_SHOW_FUNC(compaction_key_drop_new, COMPACTION_KEY_DROP_NEWER_ENTRY) DEF_SHOW_FUNC(compaction_key_drop_obsolete, COMPACTION_KEY_DROP_OBSOLETE) DEF_SHOW_FUNC(compaction_key_drop_user, COMPACTION_KEY_DROP_USER) DEF_SHOW_FUNC(number_keys_written, NUMBER_KEYS_WRITTEN) DEF_SHOW_FUNC(number_keys_read, NUMBER_KEYS_READ) DEF_SHOW_FUNC(number_keys_updated, NUMBER_KEYS_UPDATED) DEF_SHOW_FUNC(bytes_written, BYTES_WRITTEN) DEF_SHOW_FUNC(bytes_read, BYTES_READ) DEF_SHOW_FUNC(no_file_closes, NO_FILE_CLOSES) DEF_SHOW_FUNC(no_file_opens, NO_FILE_OPENS) DEF_SHOW_FUNC(no_file_errors, NO_FILE_ERRORS) DEF_SHOW_FUNC(stall_micros, STALL_MICROS) DEF_SHOW_FUNC(rate_limit_delay_millis, RATE_LIMIT_DELAY_MILLIS) DEF_SHOW_FUNC(num_iterators, NO_ITERATORS) DEF_SHOW_FUNC(number_multiget_get, NUMBER_MULTIGET_CALLS) DEF_SHOW_FUNC(number_multiget_keys_read, NUMBER_MULTIGET_KEYS_READ) DEF_SHOW_FUNC(number_multiget_bytes_read, NUMBER_MULTIGET_BYTES_READ) DEF_SHOW_FUNC(number_deletes_filtered, NUMBER_FILTERED_DELETES) DEF_SHOW_FUNC(number_merge_failures, NUMBER_MERGE_FAILURES) DEF_SHOW_FUNC(bloom_filter_prefix_checked, BLOOM_FILTER_PREFIX_CHECKED) DEF_SHOW_FUNC(bloom_filter_prefix_useful, BLOOM_FILTER_PREFIX_USEFUL) DEF_SHOW_FUNC(number_reseeks_iteration, NUMBER_OF_RESEEKS_IN_ITERATION) DEF_SHOW_FUNC(getupdatessince_calls, GET_UPDATES_SINCE_CALLS) DEF_SHOW_FUNC(block_cachecompressed_miss, BLOCK_CACHE_COMPRESSED_MISS) DEF_SHOW_FUNC(block_cachecompressed_hit, BLOCK_CACHE_COMPRESSED_HIT) DEF_SHOW_FUNC(wal_synced, WAL_FILE_SYNCED) DEF_SHOW_FUNC(wal_bytes, WAL_FILE_BYTES) DEF_SHOW_FUNC(write_self, WRITE_DONE_BY_SELF) DEF_SHOW_FUNC(write_other, WRITE_DONE_BY_OTHER) DEF_SHOW_FUNC(write_timedout, WRITE_TIMEDOUT) DEF_SHOW_FUNC(write_wal, WRITE_WITH_WAL) DEF_SHOW_FUNC(flush_write_bytes, FLUSH_WRITE_BYTES) DEF_SHOW_FUNC(compact_read_bytes, COMPACT_READ_BYTES) DEF_SHOW_FUNC(compact_write_bytes, COMPACT_WRITE_BYTES) DEF_SHOW_FUNC(number_superversion_acquires, NUMBER_SUPERVERSION_ACQUIRES) DEF_SHOW_FUNC(number_superversion_releases, NUMBER_SUPERVERSION_RELEASES) DEF_SHOW_FUNC(number_superversion_cleanups, NUMBER_SUPERVERSION_CLEANUPS) DEF_SHOW_FUNC(number_block_not_compressed, NUMBER_BLOCK_NOT_COMPRESSED) static void myrocks_update_status() { export_stats.rows_deleted = global_stats.rows[ROWS_DELETED]; export_stats.rows_inserted = global_stats.rows[ROWS_INSERTED]; export_stats.rows_read = global_stats.rows[ROWS_READ]; export_stats.rows_updated = global_stats.rows[ROWS_UPDATED]; export_stats.rows_deleted_blind = global_stats.rows[ROWS_DELETED_BLIND]; export_stats.rows_expired = global_stats.rows[ROWS_EXPIRED]; export_stats.system_rows_deleted = global_stats.system_rows[ROWS_DELETED]; export_stats.system_rows_inserted = global_stats.system_rows[ROWS_INSERTED]; export_stats.system_rows_read = global_stats.system_rows[ROWS_READ]; export_stats.system_rows_updated = global_stats.system_rows[ROWS_UPDATED]; export_stats.queries_point = global_stats.queries[QUERIES_POINT]; export_stats.queries_range = global_stats.queries[QUERIES_RANGE]; export_stats.covered_secondary_key_lookups = global_stats.covered_secondary_key_lookups; } static void myrocks_update_memory_status() { std::vector<rocksdb::DB *> dbs; std::unordered_set<const rocksdb::Cache *> cache_set; dbs.push_back(rdb); std::map<rocksdb::MemoryUtil::UsageType, uint64_t> temp_usage_by_type; rocksdb::MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set, &temp_usage_by_type); memory_stats.memtable_total = temp_usage_by_type[rocksdb::MemoryUtil::kMemTableTotal]; memory_stats.memtable_unflushed = temp_usage_by_type[rocksdb::MemoryUtil::kMemTableUnFlushed]; } static SHOW_VAR myrocks_status_variables[] = { DEF_STATUS_VAR_FUNC("rows_deleted", &export_stats.rows_deleted, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("rows_inserted", &export_stats.rows_inserted, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("rows_read", &export_stats.rows_read, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("rows_updated", &export_stats.rows_updated, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("rows_deleted_blind", &export_stats.rows_deleted_blind, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("rows_expired", &export_stats.rows_expired, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_deleted", &export_stats.system_rows_deleted, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_inserted", &export_stats.system_rows_inserted, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_read", &export_stats.system_rows_read, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_updated", &export_stats.system_rows_updated, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("memtable_total", &memory_stats.memtable_total, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("memtable_unflushed", &memory_stats.memtable_unflushed, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("queries_point", &export_stats.queries_point, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("queries_range", &export_stats.queries_range, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("covered_secondary_key_lookups", &export_stats.covered_secondary_key_lookups, SHOW_LONGLONG), {NullS, NullS, SHOW_LONG}}; static void show_myrocks_vars(THD *thd, SHOW_VAR *var, char *buff) { myrocks_update_status(); myrocks_update_memory_status(); var->type = SHOW_ARRAY; var->value = reinterpret_cast<char *>(&myrocks_status_variables); } static ulonglong io_stall_prop_value(const std::map<std::string, std::string> &props, const std::string &key) { std::map<std::string, std::string>::const_iterator iter = props.find("io_stalls." + key); if (iter != props.end()) { return std::stoull(iter->second); } else { DBUG_PRINT("warning", ("RocksDB GetMapPropery hasn't returned key=%s", key.c_str())); DBUG_ASSERT(0); return 0; } } static void update_rocksdb_stall_status() { st_io_stall_stats local_io_stall_stats; for (const auto &cf_name : cf_manager.get_cf_names()) { rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(cf_name); if (cfh == nullptr) { continue; } std::map<std::string, std::string> props; if (!rdb->GetMapProperty(cfh, "rocksdb.cfstats", &props)) { continue; } local_io_stall_stats.level0_slowdown += io_stall_prop_value(props, "level0_slowdown"); local_io_stall_stats.level0_slowdown_with_compaction += io_stall_prop_value(props, "level0_slowdown_with_compaction"); local_io_stall_stats.level0_numfiles += io_stall_prop_value(props, "level0_numfiles"); local_io_stall_stats.level0_numfiles_with_compaction += io_stall_prop_value(props, "level0_numfiles_with_compaction"); local_io_stall_stats.stop_for_pending_compaction_bytes += io_stall_prop_value(props, "stop_for_pending_compaction_bytes"); local_io_stall_stats.slowdown_for_pending_compaction_bytes += io_stall_prop_value(props, "slowdown_for_pending_compaction_bytes"); local_io_stall_stats.memtable_compaction += io_stall_prop_value(props, "memtable_compaction"); local_io_stall_stats.memtable_slowdown += io_stall_prop_value(props, "memtable_slowdown"); local_io_stall_stats.total_stop += io_stall_prop_value(props, "total_stop"); local_io_stall_stats.total_slowdown += io_stall_prop_value(props, "total_slowdown"); } io_stall_stats = local_io_stall_stats; } static SHOW_VAR rocksdb_stall_status_variables[] = { DEF_STATUS_VAR_FUNC("l0_file_count_limit_slowdowns", &io_stall_stats.level0_slowdown, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("locked_l0_file_count_limit_slowdowns", &io_stall_stats.level0_slowdown_with_compaction, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("l0_file_count_limit_stops", &io_stall_stats.level0_numfiles, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("locked_l0_file_count_limit_stops", &io_stall_stats.level0_numfiles_with_compaction, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("pending_compaction_limit_stops", &io_stall_stats.stop_for_pending_compaction_bytes, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("pending_compaction_limit_slowdowns", &io_stall_stats.slowdown_for_pending_compaction_bytes, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("memtable_limit_stops", &io_stall_stats.memtable_compaction, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("memtable_limit_slowdowns", &io_stall_stats.memtable_slowdown, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("total_stops", &io_stall_stats.total_stop, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("total_slowdowns", &io_stall_stats.total_slowdown, SHOW_LONGLONG), // end of the array marker {NullS, NullS, SHOW_LONG}}; static void show_rocksdb_stall_vars(THD *thd, SHOW_VAR *var, char *buff) { update_rocksdb_stall_status(); var->type = SHOW_ARRAY; var->value = reinterpret_cast<char *>(&rocksdb_stall_status_variables); } static SHOW_VAR rocksdb_status_vars[] = { DEF_STATUS_VAR(block_cache_miss), DEF_STATUS_VAR(block_cache_hit), DEF_STATUS_VAR(block_cache_add), DEF_STATUS_VAR(block_cache_index_miss), DEF_STATUS_VAR(block_cache_index_hit), DEF_STATUS_VAR(block_cache_filter_miss), DEF_STATUS_VAR(block_cache_filter_hit), DEF_STATUS_VAR(block_cache_data_miss), DEF_STATUS_VAR(block_cache_data_hit), DEF_STATUS_VAR(bloom_filter_useful), DEF_STATUS_VAR(memtable_hit), DEF_STATUS_VAR(memtable_miss), DEF_STATUS_VAR(compaction_key_drop_new), DEF_STATUS_VAR(compaction_key_drop_obsolete), DEF_STATUS_VAR(compaction_key_drop_user), DEF_STATUS_VAR(number_keys_written), DEF_STATUS_VAR(number_keys_read), DEF_STATUS_VAR(number_keys_updated), DEF_STATUS_VAR(bytes_written), DEF_STATUS_VAR(bytes_read), DEF_STATUS_VAR(no_file_closes), DEF_STATUS_VAR(no_file_opens), DEF_STATUS_VAR(no_file_errors), DEF_STATUS_VAR(stall_micros), DEF_STATUS_VAR(rate_limit_delay_millis), DEF_STATUS_VAR(num_iterators), DEF_STATUS_VAR(number_multiget_get), DEF_STATUS_VAR(number_multiget_keys_read), DEF_STATUS_VAR(number_multiget_bytes_read), DEF_STATUS_VAR(number_deletes_filtered), DEF_STATUS_VAR(number_merge_failures), DEF_STATUS_VAR(bloom_filter_prefix_checked), DEF_STATUS_VAR(bloom_filter_prefix_useful), DEF_STATUS_VAR(number_reseeks_iteration), DEF_STATUS_VAR(getupdatessince_calls), DEF_STATUS_VAR(block_cachecompressed_miss), DEF_STATUS_VAR(block_cachecompressed_hit), DEF_STATUS_VAR(wal_synced), DEF_STATUS_VAR(wal_bytes), DEF_STATUS_VAR(write_self), DEF_STATUS_VAR(write_other), DEF_STATUS_VAR(write_timedout), DEF_STATUS_VAR(write_wal), DEF_STATUS_VAR(flush_write_bytes), DEF_STATUS_VAR(compact_read_bytes), DEF_STATUS_VAR(compact_write_bytes), DEF_STATUS_VAR(number_superversion_acquires), DEF_STATUS_VAR(number_superversion_releases), DEF_STATUS_VAR(number_superversion_cleanups), DEF_STATUS_VAR(number_block_not_compressed), DEF_STATUS_VAR_PTR("snapshot_conflict_errors", &rocksdb_snapshot_conflict_errors, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("wal_group_syncs", &rocksdb_wal_group_syncs, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("number_stat_computes", &rocksdb_number_stat_computes, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("number_sst_entry_put", &rocksdb_num_sst_entry_put, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("number_sst_entry_delete", &rocksdb_num_sst_entry_delete, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("number_sst_entry_singledelete", &rocksdb_num_sst_entry_singledelete, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("number_sst_entry_merge", &rocksdb_num_sst_entry_merge, SHOW_LONGLONG), DEF_STATUS_VAR_PTR("number_sst_entry_other", &rocksdb_num_sst_entry_other, SHOW_LONGLONG), // the variables generated by SHOW_FUNC are sorted only by prefix (first // arg in the tuple below), so make sure it is unique to make sorting // deterministic as quick sort is not stable {"rocksdb", reinterpret_cast<char *>(&show_myrocks_vars), SHOW_FUNC}, {"rocksdb_stall", reinterpret_cast<char *>(&show_rocksdb_stall_vars), SHOW_FUNC}, {NullS, NullS, SHOW_LONG}}; /* Background thread's main logic */ void Rdb_background_thread::run() { // How many seconds to wait till flushing the WAL next time. const int WAKE_UP_INTERVAL = 1; timespec ts_next_sync; set_timespec(ts_next_sync, WAKE_UP_INTERVAL); for (;;) { // Wait until the next timeout or until we receive a signal to stop the // thread. Request to stop the thread should only be triggered when the // storage engine is being unloaded. RDB_MUTEX_LOCK_CHECK(m_signal_mutex); const auto ret MY_ATTRIBUTE((__unused__)) = mysql_cond_timedwait(&m_signal_cond, &m_signal_mutex, &ts_next_sync); // Check that we receive only the expected error codes. DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT); const bool local_stop = m_stop; const bool local_save_stats = m_save_stats; reset(); RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); if (local_stop) { // If we're here then that's because condition variable was signaled by // another thread and we're shutting down. Break out the loop to make // sure that shutdown thread can proceed. break; } // This path should be taken only when the timer expired. DBUG_ASSERT(ret == ETIMEDOUT); if (local_save_stats) { ddl_manager.persist_stats(); } // Set the next timestamp for mysql_cond_timedwait() (which ends up calling // pthread_cond_timedwait()) to wait on. set_timespec(ts_next_sync, WAKE_UP_INTERVAL); // Flush the WAL. Sync it for both background and never modes to copy // InnoDB's behavior. For mode never, the wal file isn't even written, // whereas background writes to the wal file, but issues the syncs in a // background thread. if (rdb && (rocksdb_flush_log_at_trx_commit != FLUSH_LOG_SYNC)) { DBUG_ASSERT(!rocksdb_db_options->allow_mmap_writes); const rocksdb::Status s = rdb->FlushWAL(true); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_BG_THREAD); } } } // save remaining stats which might've left unsaved ddl_manager.persist_stats(); } /** Deciding if it is possible to use bloom filter or not. @detail Even if bloom filter exists, it is not always possible to use bloom filter. If using bloom filter when you shouldn't, false negative may happen -- fewer rows than expected may be returned. It is users' responsibility to use bloom filter correctly. If bloom filter does not exist, return value does not matter because RocksDB does not use bloom filter internally. @param kd @param eq_cond Equal condition part of the key. This always includes system index id (4 bytes). @param use_all_keys True if all key parts are set with equal conditions. This is aware of extended keys. */ bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, const rocksdb::Slice &eq_cond, const bool use_all_keys) { bool can_use = false; if (THDVAR(thd, skip_bloom_filter_on_read)) { return can_use; } const rocksdb::SliceTransform *prefix_extractor = kd.get_extractor(); if (prefix_extractor) { /* This is an optimized use case for CappedPrefixTransform. If eq_cond length >= prefix extractor length and if all keys are used for equal lookup, it is always possible to use bloom filter. Prefix bloom filter can't be used on descending scan with prefix lookup (i.e. WHERE id1=1 ORDER BY id2 DESC), because of RocksDB's limitation. On ascending (or not sorting) scan, keys longer than the capped prefix length will be truncated down to the capped length and the resulting key is added to the bloom filter. Keys shorter than the capped prefix length will be added to the bloom filter. When keys are looked up, key conditionals longer than the capped length can be used; key conditionals shorter require all parts of the key to be available for the short key match. */ if ((use_all_keys && prefix_extractor->InRange(eq_cond)) || prefix_extractor->SameResultWhenAppended(eq_cond)) can_use = true; else can_use = false; } else { /* if prefix extractor is not defined, all key parts have to be used by eq_cond. */ if (use_all_keys) can_use = true; else can_use = false; } return can_use; } /* For modules that need access to the global data structures */ rocksdb::TransactionDB *rdb_get_rocksdb_db() { return rdb; } Rdb_cf_manager &rdb_get_cf_manager() { return cf_manager; } const rocksdb::BlockBasedTableOptions &rdb_get_table_options() { return *rocksdb_tbl_options; } bool rdb_is_ttl_enabled() { return rocksdb_enable_ttl; } bool rdb_is_ttl_read_filtering_enabled() { return rocksdb_enable_ttl_read_filtering; } #ifndef NDEBUG int rdb_dbug_set_ttl_rec_ts() { return rocksdb_debug_ttl_rec_ts; } int rdb_dbug_set_ttl_snapshot_ts() { return rocksdb_debug_ttl_snapshot_ts; } int rdb_dbug_set_ttl_read_filter_ts() { return rocksdb_debug_ttl_read_filter_ts; } bool rdb_dbug_set_ttl_ignore_pk() { return rocksdb_debug_ttl_ignore_pk; } #endif void rdb_update_global_stats(const operation_type &type, uint count, bool is_system_table) { DBUG_ASSERT(type < ROWS_MAX); if (count == 0) { return; } if (is_system_table) { global_stats.system_rows[type].add(count); } else { global_stats.rows[type].add(count); } } int rdb_get_table_perf_counters(const char *const tablename, Rdb_perf_counters *const counters) { DBUG_ASSERT(counters != nullptr); DBUG_ASSERT(tablename != nullptr); Rdb_table_handler *table_handler; table_handler = rdb_open_tables.get_table_handler(tablename); if (table_handler == nullptr) { return HA_ERR_ROCKSDB_INVALID_TABLE; } counters->load(table_handler->m_table_perf_context); rdb_open_tables.release_table_handler(table_handler); return HA_EXIT_SUCCESS; } const char *get_rdb_io_error_string(const RDB_IO_ERROR_TYPE err_type) { // If this assertion fails then this means that a member has been either added // to or removed from RDB_IO_ERROR_TYPE enum and this function needs to be // changed to return the appropriate value. static_assert(RDB_IO_ERROR_LAST == 4, "Please handle all the error types."); switch (err_type) { case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_TX_COMMIT: return "RDB_IO_ERROR_TX_COMMIT"; case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_DICT_COMMIT: return "RDB_IO_ERROR_DICT_COMMIT"; case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_BG_THREAD: return "RDB_IO_ERROR_BG_THREAD"; case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_GENERAL: return "RDB_IO_ERROR_GENERAL"; default: DBUG_ASSERT(false); return "(unknown)"; } } // In case of core dump generation we want this function NOT to be optimized // so that we can capture as much data as possible to debug the root cause // more efficiently. #ifdef __GNUC__ #pragma GCC push_options #pragma GCC optimize("O0") #endif void rdb_handle_io_error(const rocksdb::Status status, const RDB_IO_ERROR_TYPE err_type) { if (status.IsIOError()) { switch (err_type) { case RDB_IO_ERROR_TX_COMMIT: case RDB_IO_ERROR_DICT_COMMIT: { rdb_log_status_error(status, "failed to write to WAL"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting on WAL write error."); abort_with_stack_traces(); break; } case RDB_IO_ERROR_BG_THREAD: { rdb_log_status_error(status, "BG thread failed to write to RocksDB"); break; } case RDB_IO_ERROR_GENERAL: { rdb_log_status_error(status, "failed on I/O"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting on I/O error."); abort_with_stack_traces(); break; } default: DBUG_ASSERT(0); break; } } else if (status.IsCorruption()) { rdb_log_status_error(status, "data corruption detected!"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting because of data corruption."); abort_with_stack_traces(); } else if (!status.ok()) { switch (err_type) { case RDB_IO_ERROR_DICT_COMMIT: { rdb_log_status_error(status, "Failed to write to WAL (dictionary)"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting on WAL write error."); abort_with_stack_traces(); break; } default: rdb_log_status_error(status, "Failed to read/write in RocksDB"); break; } } } #ifdef __GNUC__ #pragma GCC pop_options #endif Rdb_dict_manager *rdb_get_dict_manager(void) { return &dict_manager; } Rdb_ddl_manager *rdb_get_ddl_manager(void) { return &ddl_manager; } Rdb_binlog_manager *rdb_get_binlog_manager(void) { return &binlog_manager; } void rocksdb_set_compaction_options( my_core::THD *const thd MY_ATTRIBUTE((__unused__)), my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr, const void *const save) { if (var_ptr && save) { *(uint64_t *)var_ptr = *(const uint64_t *)save; } const Rdb_compact_params params = { (uint64_t)rocksdb_compaction_sequential_deletes, (uint64_t)rocksdb_compaction_sequential_deletes_window, (uint64_t)rocksdb_compaction_sequential_deletes_file_size}; if (properties_collector_factory) { properties_collector_factory->SetCompactionParams(params); } } void rocksdb_set_table_stats_sampling_pct( my_core::THD *const thd MY_ATTRIBUTE((__unused__)), my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const uint32_t new_val = *static_cast<const uint32_t *>(save); if (new_val != rocksdb_table_stats_sampling_pct) { rocksdb_table_stats_sampling_pct = new_val; if (properties_collector_factory) { properties_collector_factory->SetTableStatsSamplingPct( rocksdb_table_stats_sampling_pct); } } RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } /* This function allows setting the rate limiter's bytes per second value but only if the rate limiter is turned on which has to be done at startup. If the rate is already 0 (turned off) or we are changing it to 0 (trying to turn it off) this function will push a warning to the client and do nothing. This is similar to the code in innodb_doublewrite_update (found in storage/innobase/handler/ha_innodb.cc). */ void rocksdb_set_rate_limiter_bytes_per_sec( my_core::THD *const thd, my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { const uint64_t new_val = *static_cast<const uint64_t *>(save); if (new_val == 0 || rocksdb_rate_limiter_bytes_per_sec == 0) { /* If a rate_limiter was not enabled at startup we can't change it nor can we disable it if one was created at startup */ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_ARGUMENTS, "RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot " "be dynamically changed to or from 0. Do a clean " "shutdown if you want to change it from or to 0."); } else if (new_val != rocksdb_rate_limiter_bytes_per_sec) { /* Apply the new value to the rate limiter and store it locally */ DBUG_ASSERT(rocksdb_rate_limiter != nullptr); rocksdb_rate_limiter_bytes_per_sec = new_val; rocksdb_rate_limiter->SetBytesPerSecond(new_val); } } void rocksdb_set_sst_mgr_rate_bytes_per_sec( my_core::THD *const thd, my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const uint64_t new_val = *static_cast<const uint64_t *>(save); if (new_val != rocksdb_sst_mgr_rate_bytes_per_sec) { rocksdb_sst_mgr_rate_bytes_per_sec = new_val; rocksdb_db_options->sst_file_manager->SetDeleteRateBytesPerSecond( rocksdb_sst_mgr_rate_bytes_per_sec); } RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } void rocksdb_set_delayed_write_rate(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save) { RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const uint64_t new_val = *static_cast<const uint64_t *>(save); if (rocksdb_delayed_write_rate != new_val) { rocksdb_delayed_write_rate = new_val; rocksdb::Status s = rdb->SetDBOptions({{"delayed_write_rate", std::to_string(new_val)}}); if (!s.ok()) { /* NO_LINT_DEBUG */ sql_print_warning("MyRocks: failed to update delayed_write_rate. " "status code = %d, status = %s", s.code(), s.ToString().c_str()); } } RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } void rocksdb_set_max_latest_deadlocks(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save) { RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const uint32_t new_val = *static_cast<const uint32_t *>(save); if (rocksdb_max_latest_deadlocks != new_val) { rocksdb_max_latest_deadlocks = new_val; rdb->SetDeadlockInfoBufferSize(rocksdb_max_latest_deadlocks); } RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } void rdb_set_collation_exception_list(const char *const exception_list) { DBUG_ASSERT(rdb_collation_exceptions != nullptr); if (!rdb_collation_exceptions->set_patterns(exception_list)) { my_core::warn_about_bad_patterns(rdb_collation_exceptions, "strict_collation_exceptions"); } } void rocksdb_set_collation_exception_list(THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, const void *const save) { const char *const val = *static_cast<const char *const *>(save); rdb_set_collation_exception_list(val == nullptr ? "" : val); //psergey-todo: what is the purpose of the below?? const char *val_copy= val? my_strdup(val, MYF(0)): nullptr; my_free(*static_cast<char**>(var_ptr)); *static_cast<const char**>(var_ptr) = val_copy; } void rocksdb_set_bulk_load(THD *const thd, struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr, const void *const save) { Rdb_transaction *&tx = get_tx_from_thd(thd); if (tx != nullptr) { const int rc = tx->finish_bulk_load(); if (rc != 0) { // NO_LINT_DEBUG sql_print_error("RocksDB: Error %d finalizing last SST file while " "setting bulk loading variable", rc); /* MariaDB doesn't do the following: abort_with_stack_traces(); because it doesn't seem a good idea to crash a server when a user makes a mistake. Instead, we return an error to the user. The error has already been produced inside ha_rocksdb::finalize_bulk_load(). */ } } *static_cast<bool *>(var_ptr) = *static_cast<const bool *>(save); } void rocksdb_set_bulk_load_allow_unsorted( THD *const thd, struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), void *const var_ptr, const void *const save) { if (THDVAR(thd, bulk_load)) { my_error(ER_ERROR_WHEN_EXECUTING_COMMAND, MYF(0), "SET", "Cannot change this setting while bulk load is enabled"); } else { *static_cast<bool *>(var_ptr) = *static_cast<const bool *>(save); } } static void rocksdb_set_max_background_jobs(THD *thd, struct st_mysql_sys_var *const var, void *const var_ptr, const void *const save) { DBUG_ASSERT(save != nullptr); DBUG_ASSERT(rocksdb_db_options != nullptr); DBUG_ASSERT(rocksdb_db_options->env != nullptr); RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const int new_val = *static_cast<const int *>(save); if (rocksdb_db_options->max_background_jobs != new_val) { rocksdb_db_options->max_background_jobs = new_val; rocksdb::Status s = rdb->SetDBOptions({{"max_background_jobs", std::to_string(new_val)}}); if (!s.ok()) { /* NO_LINT_DEBUG */ sql_print_warning("MyRocks: failed to update max_background_jobs. " "Status code = %d, status = %s.", s.code(), s.ToString().c_str()); } } RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } void rocksdb_set_update_cf_options(THD *const /* unused */, struct st_mysql_sys_var *const /* unused */, void *const var_ptr, const void *const save) { const char *const val = *static_cast<const char *const *>(save); if (!val) { // NO_LINT_DEBUG sql_print_warning("MyRocks: NULL is not a valid option for updates to " "column family settings."); return; } RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); DBUG_ASSERT(val != nullptr); // Do the real work of applying the changes. Rdb_cf_options::Name_to_config_t option_map; // Basic sanity checking and parsing the options into a map. If this fails // then there's no point to proceed. if (!Rdb_cf_options::parse_cf_options(val, &option_map)) { *reinterpret_cast<char**>(var_ptr) = nullptr; // NO_LINT_DEBUG sql_print_warning("MyRocks: failed to parse the updated column family " "options = '%s'.", val); RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); return; } // For each CF we have, see if we need to update any settings. for (const auto &cf_name : cf_manager.get_cf_names()) { DBUG_ASSERT(!cf_name.empty()); rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(cf_name); DBUG_ASSERT(cfh != nullptr); const auto it = option_map.find(cf_name); std::string per_cf_options = (it != option_map.end()) ? it->second : ""; if (!per_cf_options.empty()) { Rdb_cf_options::Name_to_config_t opt_map; rocksdb::Status s = rocksdb::StringToMap(per_cf_options, &opt_map); if (s != rocksdb::Status::OK()) { // NO_LINT_DEBUG sql_print_warning("MyRocks: failed to convert the options for column " "family '%s' to a map. %s", cf_name.c_str(), s.ToString().c_str()); } else { DBUG_ASSERT(rdb != nullptr); // Finally we can apply the options. s = rdb->SetOptions(cfh, opt_map); if (s != rocksdb::Status::OK()) { // NO_LINT_DEBUG sql_print_warning("MyRocks: failed to apply the options for column " "family '%s'. %s", cf_name.c_str(), s.ToString().c_str()); } else { // NO_LINT_DEBUG sql_print_information("MyRocks: options for column family '%s' " "have been successfully updated.", cf_name.c_str()); // Make sure that data is internally consistent as well and update // the CF options. This is necessary also to make sure that the CF // options will be correctly reflected in the relevant table: // ROCKSDB_CF_OPTIONS in INFORMATION_SCHEMA. rocksdb::ColumnFamilyOptions cf_options = rdb->GetOptions(cfh); std::string updated_options; s = rocksdb::GetStringFromColumnFamilyOptions(&updated_options, cf_options); DBUG_ASSERT(s == rocksdb::Status::OK()); DBUG_ASSERT(!updated_options.empty()); cf_manager.update_options_map(cf_name, updated_options); } } } } // Reset the pointers regardless of how much success we had with updating // the CF options. This will results in consistent behavior and avoids // dealing with cases when only a subset of CF-s was successfully updated. if (val) { *reinterpret_cast<char**>(var_ptr) = my_strdup(val, MYF(0)); } else { *reinterpret_cast<char**>(var_ptr) = nullptr; } // Our caller (`plugin_var_memalloc_global_update`) will call `my_free` to // free up resources used before. RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } void rdb_queue_save_stats_request() { rdb_bg_thread.request_save_stats(); } #ifdef MARIAROCKS_NOT_YET // MDEV-10976 void ha_rocksdb::rpl_before_delete_rows() { DBUG_ENTER_FUNC(); m_in_rpl_delete_rows = true; DBUG_VOID_RETURN; } void ha_rocksdb::rpl_after_delete_rows() { DBUG_ENTER_FUNC(); m_in_rpl_delete_rows = false; DBUG_VOID_RETURN; } void ha_rocksdb::rpl_before_update_rows() { DBUG_ENTER_FUNC(); m_in_rpl_update_rows = true; DBUG_VOID_RETURN; } void ha_rocksdb::rpl_after_update_rows() { DBUG_ENTER_FUNC(); m_in_rpl_update_rows = false; DBUG_VOID_RETURN; } /** @brief Read Free Replication can be used or not. Returning False means Read Free Replication can be used. Read Free Replication can be used on UPDATE or DELETE row events, and table must have user defined primary key. */ bool ha_rocksdb::use_read_free_rpl() { DBUG_ENTER_FUNC(); DBUG_RETURN((m_in_rpl_delete_rows || m_in_rpl_update_rows) && !has_hidden_pk(table) && m_use_read_free_rpl); } #endif // MARIAROCKS_NOT_YET double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows) { DBUG_ENTER_FUNC(); if (index != table->s->primary_key) { /* Non covering index range scan */ DBUG_RETURN(handler::read_time(index, ranges, rows)); } DBUG_RETURN((rows / 20.0) + 1); } } // namespace myrocks /** Construct and emit duplicate key error message using information from table's record buffer. @sa print_keydup_error(table, key, msg, errflag, thd, org_table_name). */ void print_keydup_error(TABLE *table, KEY *key, myf errflag, const THD *thd, const char *org_table_name) { print_keydup_error(table, key, ER(ER_DUP_ENTRY_WITH_KEY_NAME), errflag); } /* Register the storage engine plugin outside of myrocks namespace so that mysql_declare_plugin does not get confused when it does its name generation. */ struct st_mysql_storage_engine rocksdb_storage_engine = { MYSQL_HANDLERTON_INTERFACE_VERSION}; maria_declare_plugin(rocksdb_se){ MYSQL_STORAGE_ENGINE_PLUGIN, /* Plugin Type */ &rocksdb_storage_engine, /* Plugin Descriptor */ "ROCKSDB", /* Plugin Name */ "Monty Program Ab", /* Plugin Author */ "RocksDB storage engine", /* Plugin Description */ PLUGIN_LICENSE_GPL, /* Plugin Licence */ myrocks::rocksdb_init_func, /* Plugin Entry Point */ myrocks::rocksdb_done_func, /* Plugin Deinitializer */ 0x0001, /* version number (0.1) */ myrocks::rocksdb_status_vars, /* status variables */ myrocks::rocksdb_system_variables, /* system variables */ "1.0", /* string version */ MariaDB_PLUGIN_MATURITY_ALPHA /* maturity */ }, myrocks::rdb_i_s_cfstats, myrocks::rdb_i_s_dbstats, myrocks::rdb_i_s_perf_context, myrocks::rdb_i_s_perf_context_global, myrocks::rdb_i_s_cfoptions, myrocks::rdb_i_s_compact_stats, myrocks::rdb_i_s_global_info, myrocks::rdb_i_s_ddl, myrocks::rdb_i_s_index_file_map, myrocks::rdb_i_s_lock_info, myrocks::rdb_i_s_trx_info maria_declare_plugin_end;
34.513572
94
0.66999
[ "object", "vector" ]
592ff7c6465acf6b154e6071a880eed8553ec3e3
16,735
cc
C++
node_modules/react-native/third-party/double-conversion-1.1.6/test/cctest/test-fixed-dtoa.cc
MarcelWepper/PAXETApp
a66a82e4fa38fb7f9f02d34344a9a01fccda4119
[ "CC-BY-3.0", "Apache-2.0" ]
1,510
2019-04-11T07:36:35.000Z
2022-03-31T03:47:40.000Z
src/double-conversion/test/cctest/test-fixed-dtoa.cc
kattkieru/libpypa
5e7a4833da515d0cd2d850d51f082000c9e9f651
[ "Apache-2.0" ]
64
2019-04-11T13:49:55.000Z
2020-12-25T02:18:19.000Z
src/double-conversion/test/cctest/test-fixed-dtoa.cc
kattkieru/libpypa
5e7a4833da515d0cd2d850d51f082000c9e9f651
[ "Apache-2.0" ]
214
2019-04-11T09:36:41.000Z
2022-02-19T08:10:31.000Z
// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <stdlib.h> #include "cctest.h" #include "fixed-dtoa.h" #include "gay-fixed.h" #include "ieee.h" #include "utils.h" using namespace double_conversion; static const int kBufferSize = 500; TEST(FastFixedVariousDoubles) { char buffer_container[kBufferSize]; Vector<char> buffer(buffer_container, kBufferSize); int length; int point; CHECK(FastFixedDtoa(1.0, 1, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(1.0, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(1.0, 0, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0xFFFFFFFF, 5, buffer, &length, &point)); CHECK_EQ("4294967295", buffer.start()); CHECK_EQ(10, point); CHECK(FastFixedDtoa(4294967296.0, 5, buffer, &length, &point)); CHECK_EQ("4294967296", buffer.start()); CHECK_EQ(10, point); CHECK(FastFixedDtoa(1e21, 5, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); // CHECK_EQ(22, point); CHECK_EQ(22, point); CHECK(FastFixedDtoa(999999999999999868928.00, 2, buffer, &length, &point)); CHECK_EQ("999999999999999868928", buffer.start()); CHECK_EQ(21, point); CHECK(FastFixedDtoa(6.9999999999999989514240000e+21, 5, buffer, &length, &point)); CHECK_EQ("6999999999999998951424", buffer.start()); CHECK_EQ(22, point); CHECK(FastFixedDtoa(1.5, 5, buffer, &length, &point)); CHECK_EQ("15", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(1.55, 5, buffer, &length, &point)); CHECK_EQ("155", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(1.55, 1, buffer, &length, &point)); CHECK_EQ("16", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(1.00000001, 15, buffer, &length, &point)); CHECK_EQ("100000001", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.1, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(0, point); CHECK(FastFixedDtoa(0.01, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-1, point); CHECK(FastFixedDtoa(0.001, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-2, point); CHECK(FastFixedDtoa(0.0001, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-3, point); CHECK(FastFixedDtoa(0.00001, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-4, point); CHECK(FastFixedDtoa(0.000001, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-5, point); CHECK(FastFixedDtoa(0.0000001, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-6, point); CHECK(FastFixedDtoa(0.00000001, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-7, point); CHECK(FastFixedDtoa(0.000000001, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-8, point); CHECK(FastFixedDtoa(0.0000000001, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-9, point); CHECK(FastFixedDtoa(0.00000000001, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-10, point); CHECK(FastFixedDtoa(0.000000000001, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-11, point); CHECK(FastFixedDtoa(0.0000000000001, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-12, point); CHECK(FastFixedDtoa(0.00000000000001, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-13, point); CHECK(FastFixedDtoa(0.000000000000001, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-14, point); CHECK(FastFixedDtoa(0.0000000000000001, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-15, point); CHECK(FastFixedDtoa(0.00000000000000001, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-16, point); CHECK(FastFixedDtoa(0.000000000000000001, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-17, point); CHECK(FastFixedDtoa(0.0000000000000000001, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-18, point); CHECK(FastFixedDtoa(0.00000000000000000001, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-19, point); CHECK(FastFixedDtoa(0.10000000004, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(0, point); CHECK(FastFixedDtoa(0.01000000004, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-1, point); CHECK(FastFixedDtoa(0.00100000004, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-2, point); CHECK(FastFixedDtoa(0.00010000004, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-3, point); CHECK(FastFixedDtoa(0.00001000004, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-4, point); CHECK(FastFixedDtoa(0.00000100004, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-5, point); CHECK(FastFixedDtoa(0.00000010004, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-6, point); CHECK(FastFixedDtoa(0.00000001004, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-7, point); CHECK(FastFixedDtoa(0.00000000104, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-8, point); CHECK(FastFixedDtoa(0.0000000001000004, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-9, point); CHECK(FastFixedDtoa(0.0000000000100004, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-10, point); CHECK(FastFixedDtoa(0.0000000000010004, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-11, point); CHECK(FastFixedDtoa(0.0000000000001004, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-12, point); CHECK(FastFixedDtoa(0.0000000000000104, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-13, point); CHECK(FastFixedDtoa(0.000000000000001000004, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-14, point); CHECK(FastFixedDtoa(0.000000000000000100004, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-15, point); CHECK(FastFixedDtoa(0.000000000000000010004, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-16, point); CHECK(FastFixedDtoa(0.000000000000000001004, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-17, point); CHECK(FastFixedDtoa(0.000000000000000000104, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-18, point); CHECK(FastFixedDtoa(0.000000000000000000014, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-19, point); CHECK(FastFixedDtoa(0.10000000006, 10, buffer, &length, &point)); CHECK_EQ("1000000001", buffer.start()); CHECK_EQ(0, point); CHECK(FastFixedDtoa(0.01000000006, 10, buffer, &length, &point)); CHECK_EQ("100000001", buffer.start()); CHECK_EQ(-1, point); CHECK(FastFixedDtoa(0.00100000006, 10, buffer, &length, &point)); CHECK_EQ("10000001", buffer.start()); CHECK_EQ(-2, point); CHECK(FastFixedDtoa(0.00010000006, 10, buffer, &length, &point)); CHECK_EQ("1000001", buffer.start()); CHECK_EQ(-3, point); CHECK(FastFixedDtoa(0.00001000006, 10, buffer, &length, &point)); CHECK_EQ("100001", buffer.start()); CHECK_EQ(-4, point); CHECK(FastFixedDtoa(0.00000100006, 10, buffer, &length, &point)); CHECK_EQ("10001", buffer.start()); CHECK_EQ(-5, point); CHECK(FastFixedDtoa(0.00000010006, 10, buffer, &length, &point)); CHECK_EQ("1001", buffer.start()); CHECK_EQ(-6, point); CHECK(FastFixedDtoa(0.00000001006, 10, buffer, &length, &point)); CHECK_EQ("101", buffer.start()); CHECK_EQ(-7, point); CHECK(FastFixedDtoa(0.00000000106, 10, buffer, &length, &point)); CHECK_EQ("11", buffer.start()); CHECK_EQ(-8, point); CHECK(FastFixedDtoa(0.0000000001000006, 15, buffer, &length, &point)); CHECK_EQ("100001", buffer.start()); CHECK_EQ(-9, point); CHECK(FastFixedDtoa(0.0000000000100006, 15, buffer, &length, &point)); CHECK_EQ("10001", buffer.start()); CHECK_EQ(-10, point); CHECK(FastFixedDtoa(0.0000000000010006, 15, buffer, &length, &point)); CHECK_EQ("1001", buffer.start()); CHECK_EQ(-11, point); CHECK(FastFixedDtoa(0.0000000000001006, 15, buffer, &length, &point)); CHECK_EQ("101", buffer.start()); CHECK_EQ(-12, point); CHECK(FastFixedDtoa(0.0000000000000106, 15, buffer, &length, &point)); CHECK_EQ("11", buffer.start()); CHECK_EQ(-13, point); CHECK(FastFixedDtoa(0.000000000000001000006, 20, buffer, &length, &point)); CHECK_EQ("100001", buffer.start()); CHECK_EQ(-14, point); CHECK(FastFixedDtoa(0.000000000000000100006, 20, buffer, &length, &point)); CHECK_EQ("10001", buffer.start()); CHECK_EQ(-15, point); CHECK(FastFixedDtoa(0.000000000000000010006, 20, buffer, &length, &point)); CHECK_EQ("1001", buffer.start()); CHECK_EQ(-16, point); CHECK(FastFixedDtoa(0.000000000000000001006, 20, buffer, &length, &point)); CHECK_EQ("101", buffer.start()); CHECK_EQ(-17, point); CHECK(FastFixedDtoa(0.000000000000000000106, 20, buffer, &length, &point)); CHECK_EQ("11", buffer.start()); CHECK_EQ(-18, point); CHECK(FastFixedDtoa(0.000000000000000000016, 20, buffer, &length, &point)); CHECK_EQ("2", buffer.start()); CHECK_EQ(-19, point); CHECK(FastFixedDtoa(0.6, 0, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.96, 1, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.996, 2, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.9996, 3, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.99996, 4, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.999996, 5, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.9999996, 6, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.99999996, 7, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.999999996, 8, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.9999999996, 9, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.99999999996, 10, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.999999999996, 11, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.9999999999996, 12, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.99999999999996, 13, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.999999999999996, 14, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.9999999999999996, 15, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(0.00999999999999996, 16, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-1, point); CHECK(FastFixedDtoa(0.000999999999999996, 17, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-2, point); CHECK(FastFixedDtoa(0.0000999999999999996, 18, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-3, point); CHECK(FastFixedDtoa(0.00000999999999999996, 19, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-4, point); CHECK(FastFixedDtoa(0.000000999999999999996, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-5, point); CHECK(FastFixedDtoa(323423.234234, 10, buffer, &length, &point)); CHECK_EQ("323423234234", buffer.start()); CHECK_EQ(6, point); CHECK(FastFixedDtoa(12345678.901234, 4, buffer, &length, &point)); CHECK_EQ("123456789012", buffer.start()); CHECK_EQ(8, point); CHECK(FastFixedDtoa(98765.432109, 5, buffer, &length, &point)); CHECK_EQ("9876543211", buffer.start()); CHECK_EQ(5, point); CHECK(FastFixedDtoa(42, 20, buffer, &length, &point)); CHECK_EQ("42", buffer.start()); CHECK_EQ(2, point); CHECK(FastFixedDtoa(0.5, 0, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(1, point); CHECK(FastFixedDtoa(1e-23, 10, buffer, &length, &point)); CHECK_EQ("", buffer.start()); CHECK_EQ(-10, point); CHECK(FastFixedDtoa(1e-123, 2, buffer, &length, &point)); CHECK_EQ("", buffer.start()); CHECK_EQ(-2, point); CHECK(FastFixedDtoa(1e-123, 0, buffer, &length, &point)); CHECK_EQ("", buffer.start()); CHECK_EQ(0, point); CHECK(FastFixedDtoa(1e-23, 20, buffer, &length, &point)); CHECK_EQ("", buffer.start()); CHECK_EQ(-20, point); CHECK(FastFixedDtoa(1e-21, 20, buffer, &length, &point)); CHECK_EQ("", buffer.start()); CHECK_EQ(-20, point); CHECK(FastFixedDtoa(1e-22, 20, buffer, &length, &point)); CHECK_EQ("", buffer.start()); CHECK_EQ(-20, point); CHECK(FastFixedDtoa(6e-21, 20, buffer, &length, &point)); CHECK_EQ("1", buffer.start()); CHECK_EQ(-19, point); CHECK(FastFixedDtoa(9.1193616301674545152000000e+19, 0, buffer, &length, &point)); CHECK_EQ("91193616301674545152", buffer.start()); CHECK_EQ(20, point); CHECK(FastFixedDtoa(4.8184662102767651659096515e-04, 19, buffer, &length, &point)); CHECK_EQ("4818466210276765", buffer.start()); CHECK_EQ(-3, point); CHECK(FastFixedDtoa(1.9023164229540652612705182e-23, 8, buffer, &length, &point)); CHECK_EQ("", buffer.start()); CHECK_EQ(-8, point); CHECK(FastFixedDtoa(1000000000000000128.0, 0, buffer, &length, &point)); CHECK_EQ("1000000000000000128", buffer.start()); CHECK_EQ(19, point); } TEST(FastFixedDtoaGayFixed) { char buffer_container[kBufferSize]; Vector<char> buffer(buffer_container, kBufferSize); bool status; int length; int point; Vector<const PrecomputedFixed> precomputed = PrecomputedFixedRepresentations(); for (int i = 0; i < precomputed.length(); ++i) { const PrecomputedFixed current_test = precomputed[i]; double v = current_test.v; int number_digits = current_test.number_digits; status = FastFixedDtoa(v, number_digits, buffer, &length, &point); CHECK(status); CHECK_EQ(current_test.decimal_point, point); CHECK(number_digits >= length - point); CHECK_EQ(current_test.representation, buffer.start()); } }
32.685547
77
0.679474
[ "vector" ]
5931d5800d02cd2c2e027fddd0f7b1582e9e7b6a
1,891
cpp
C++
src/storage/serializer/CategorySerializer.cpp
mbassale/ownpass
a84e0cd3933ec8c3febf0e09647990baf3c2d506
[ "MIT" ]
null
null
null
src/storage/serializer/CategorySerializer.cpp
mbassale/ownpass
a84e0cd3933ec8c3febf0e09647990baf3c2d506
[ "MIT" ]
null
null
null
src/storage/serializer/CategorySerializer.cpp
mbassale/ownpass
a84e0cd3933ec8c3febf0e09647990baf3c2d506
[ "MIT" ]
null
null
null
// // Created by Marco Bassaletti on 07-03-21. // #include <boost/uuid/uuid.hpp> #include <boost/uuid/uuid_io.hpp> #include <boost/uuid/string_generator.hpp> #include "GroupSerializer.h" #include "CategorySerializer.h" namespace NSPass::Storage::Serializer { boost::json::object CategorySerializer::serialize(const CategoryPtr& obj) { GroupSerializer group_serializer; return { { "id", boost::uuids::to_string(obj->get_id()) }, { "name", obj->get_name() }, { "groups", group_serializer.serialize(obj->get_groups()) } }; } boost::json::array CategorySerializer::serialize(const std::vector<CategoryPtr>& objs) { boost::json::array category_data; for (auto& category : objs) { auto category_datum = serialize(category); category_data.push_back(category_datum); } return category_data; } CategoryPtr CategorySerializer::deserialize(boost::json::object& obj) { auto& id_str = obj["id"].as_string(); boost::uuids::string_generator gen; boost::uuids::uuid category_id = gen(id_str.c_str()); auto& category_name = obj["name"].as_string(); auto& groups_data = obj["groups"].as_array(); std::vector<GroupPtr> groups; if (!groups_data.empty()) { GroupSerializer group_serializer; groups = group_serializer.deserialize(groups_data); } return std::make_shared<Category>(category_id, category_name.c_str(), groups); } std::vector<CategoryPtr> CategorySerializer::deserialize(boost::json::array& objs) { std::vector<CategoryPtr> categories; categories.reserve(objs.size()); for (auto category_datum : objs) { auto category_obj = category_datum.as_object(); auto category = deserialize(category_obj); categories.push_back(category); } if (categories.empty()) return make_default(); return categories; } std::vector<CategoryPtr> CategorySerializer::make_default() { return std::vector<CategoryPtr>(); } }
28.223881
87
0.718667
[ "object", "vector" ]
59353d650fbda82c29e859265fd032e38f2123bb
3,198
cxx
C++
ThirdParty/QtTesting/vtkqttesting/pqAbstractStringEventPlayer.cxx
brown-ccv/paraview-scalable
64b221a540737d2ac94a120039bd8d1e661bdc8f
[ "Apache-2.0", "BSD-3-Clause" ]
2
2021-07-07T22:53:19.000Z
2021-07-31T19:29:35.000Z
ThirdParty/QtTesting/vtkqttesting/pqAbstractStringEventPlayer.cxx
brown-ccv/paraview-scalable
64b221a540737d2ac94a120039bd8d1e661bdc8f
[ "Apache-2.0", "BSD-3-Clause" ]
2
2020-11-18T16:50:34.000Z
2022-01-21T13:31:47.000Z
ThirdParty/QtTesting/vtkqttesting/pqAbstractStringEventPlayer.cxx
brown-ccv/paraview-scalable
64b221a540737d2ac94a120039bd8d1e661bdc8f
[ "Apache-2.0", "BSD-3-Clause" ]
5
2020-10-02T10:14:35.000Z
2022-03-10T07:50:22.000Z
/*========================================================================= Program: ParaView Module: pqAbstractStringEventPlayer.cxx Copyright (c) 2005-2008 Sandia Corporation, Kitware Inc. All rights reserved. ParaView is a free software; you can redistribute it and/or modify it under the terms of the ParaView license version 1.2. See License_v1.2.txt for the full ParaView license. A copy of this license can be obtained by contacting Kitware Inc. 28 Corporate Drive Clifton Park, NY 12065 USA THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =========================================================================*/ #include "pqAbstractStringEventPlayer.h" #include <QComboBox> #include <QLineEdit> #include <QPlainTextEdit> #include <QTextDocument> #include <QTextEdit> #include <QtDebug> #include "pqObjectNaming.h" pqAbstractStringEventPlayer::pqAbstractStringEventPlayer(QObject* p) : pqWidgetEventPlayer(p) { } bool pqAbstractStringEventPlayer::playEvent( QObject* Object, const QString& Command, const QString& Arguments, bool& Error) { if (Command != "set_string") return false; const QString value = Arguments; if (QComboBox* const comboBox = qobject_cast<QComboBox*>(Object)) { int index = comboBox->findText(value); if (index != -1) { comboBox->setCurrentIndex(index); } else { QString possibles; for (int i = 0; i < comboBox->count(); i++) { possibles += QString("\t") + comboBox->itemText(i) + QString("\n"); } qCritical() << "Unable to find " << value << " in combo box: " << pqObjectNaming::GetName(*Object) << "\nPossible values are:\n" << possibles; Error = true; } return true; } if (QLineEdit* const lineEdit = qobject_cast<QLineEdit*>(Object)) { lineEdit->setText(value); return true; } if (QTextEdit* const textEdit = qobject_cast<QTextEdit*>(Object)) { textEdit->setFocus(Qt::OtherFocusReason); textEdit->document()->setPlainText(value); textEdit->clearFocus(); return true; } if (QPlainTextEdit* const plainTextEdit = qobject_cast<QPlainTextEdit*>(Object)) { plainTextEdit->setFocus(Qt::OtherFocusReason); plainTextEdit->document()->setPlainText(value); plainTextEdit->clearFocus(); return true; } qCritical() << "calling set_string on unhandled type " << Object; Error = true; return true; }
29.88785
82
0.665729
[ "object" ]
593952dfaeccdced6e6da0f86b42dc44fbee8b11
1,750
hpp
C++
include/crocoddyl/core/mathbase.hpp
spykspeigel/crocoddyl
0500e398861564b6986d99206a1e0ccec0d66a33
[ "BSD-3-Clause" ]
322
2019-06-04T12:04:00.000Z
2022-03-28T14:37:44.000Z
include/crocoddyl/core/mathbase.hpp
spykspeigel/crocoddyl
0500e398861564b6986d99206a1e0ccec0d66a33
[ "BSD-3-Clause" ]
954
2019-09-02T10:07:27.000Z
2022-03-31T16:14:25.000Z
include/crocoddyl/core/mathbase.hpp
spykspeigel/crocoddyl
0500e398861564b6986d99206a1e0ccec0d66a33
[ "BSD-3-Clause" ]
89
2019-08-13T13:37:52.000Z
2022-03-31T15:55:07.000Z
/////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (C) 2020-2021, LAAS-CNRS, University of Edinburgh // Copyright note valid unless otherwise stated in individual files. // All rights reserved. /////////////////////////////////////////////////////////////////////////////// #ifndef CROCODDYL_CORE_MATHBASE_HPP_ #define CROCODDYL_CORE_MATHBASE_HPP_ #include <Eigen/Core> #include <Eigen/Geometry> namespace crocoddyl { template <typename _Scalar> struct MathBaseTpl { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW typedef _Scalar Scalar; typedef Eigen::Matrix<Scalar, 2, 1> Vector2s; typedef Eigen::Matrix<Scalar, 3, 1> Vector3s; typedef Eigen::Matrix<Scalar, 4, 1> Vector4s; typedef Eigen::Matrix<Scalar, 6, 1> Vector6s; typedef Eigen::Matrix<Scalar, 2, 2> Matrix2s; typedef Eigen::Matrix<Scalar, 3, 3> Matrix3s; typedef Eigen::Matrix<Scalar, 4, 6> Matrix46s; typedef Eigen::Matrix<Scalar, 6, 6> Matrix6s; typedef Eigen::Matrix<Scalar, 1, 2> RowVector2s; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 3> MatrixX3s; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 6> MatrixX6s; typedef Eigen::Matrix<Scalar, 3, Eigen::Dynamic> Matrix3xs; typedef Eigen::Matrix<Scalar, 6, Eigen::Dynamic> Matrix6xs; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> VectorXs; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> MatrixXs; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatrixXsRowMajor; typedef Eigen::Array<Scalar, Eigen::Dynamic, 1> ArrayXs; typedef Eigen::Quaternion<Scalar> Quaternions; typedef Eigen::DiagonalMatrix<Scalar, Eigen::Dynamic> DiagonalMatrixXs; }; } // namespace crocoddyl #endif
35.714286
98
0.681714
[ "geometry" ]
593b2afee55c2837b7499e2357b6522109bfa3e6
28,875
cpp
C++
sources/modules/ocl/src/color.cpp
ovb197310/opencv_2.4.13.2
940159dab8ea8f5ee019d2038b59e1daf4119d1c
[ "BSD-3-Clause" ]
null
null
null
sources/modules/ocl/src/color.cpp
ovb197310/opencv_2.4.13.2
940159dab8ea8f5ee019d2038b59e1daf4119d1c
[ "BSD-3-Clause" ]
null
null
null
sources/modules/ocl/src/color.cpp
ovb197310/opencv_2.4.13.2
940159dab8ea8f5ee019d2038b59e1daf4119d1c
[ "BSD-3-Clause" ]
null
null
null
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved. // Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // @Authors // Wang Weiyan, wangweiyanster@gmail.com // Peng Xiao, pengxiao@multicorewareinc.com // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "precomp.hpp" #include "opencl_kernels.hpp" using namespace cv; using namespace cv::ocl; static void fromRGB_caller(const oclMat &src, oclMat &dst, int bidx, const std::string & kernelName, const std::string & additionalOptions = std::string(), const oclMat & data1 = oclMat(), const oclMat & data2 = oclMat()) { int src_offset = src.offset / src.elemSize1(), src_step = src.step1(); int dst_offset = dst.offset / dst.elemSize1(), dst_step = dst.step1(); int pixels_per_work_item = 1; if (Context::getContext()->supportsFeature(FEATURE_CL_INTEL_DEVICE)) { if ((src.cols % 4 == 0) && (src.depth() == CV_8U)) pixels_per_work_item = 4; else if (src.cols % 2 == 0) pixels_per_work_item = 2; else pixels_per_work_item = 1; } std::string build_options = format("-D DEPTH_%d -D scn=%d -D bidx=%d -D pixels_per_work_item=%d", src.depth(), src.oclchannels(), bidx, pixels_per_work_item); if (!additionalOptions.empty()) build_options += additionalOptions; vector<pair<size_t , const void *> > args; args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_step)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_offset )); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_offset )); if (!data1.empty()) args.push_back( make_pair( sizeof(cl_mem) , (void *)&data1.data )); if (!data2.empty()) args.push_back( make_pair( sizeof(cl_mem) , (void *)&data2.data )); size_t gt[3] = { (size_t)dst.cols/pixels_per_work_item, (size_t)dst.rows, 1 }; #ifdef ANDROID size_t lt[3] = { 16, 10, 1 }; #else size_t lt[3] = { 16, 16, 1 }; #endif openCLExecuteKernel(src.clCxt, &cvt_color, kernelName.c_str(), gt, lt, args, -1, -1, build_options.c_str()); } static void toHSV_caller(const oclMat &src, oclMat &dst, int bidx, const std::string & kernelName, const std::string & additionalOptions = std::string(), const oclMat & data1 = oclMat(), const oclMat & data2 = oclMat()) { int src_offset = src.offset / src.elemSize1(), src_step = src.step1(); int dst_offset = dst.offset / dst.elemSize1(), dst_step = dst.step1(); std::string build_options = format("-D DEPTH_%d -D scn=%d -D bidx=%d", src.depth(), src.oclchannels(), bidx); if (!additionalOptions.empty()) build_options += additionalOptions; vector<pair<size_t , const void *> > args; args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_step)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_offset )); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_offset )); if (!data1.empty()) args.push_back( make_pair( sizeof(cl_mem) , (void *)&data1.data )); if (!data2.empty()) args.push_back( make_pair( sizeof(cl_mem) , (void *)&data2.data )); size_t gt[3] = { (size_t)dst.cols, (size_t)dst.rows, 1 }; #ifdef ANDROID size_t lt[3] = { 16, 10, 1 }; #else size_t lt[3] = { 16, 16, 1 }; #endif openCLExecuteKernel(src.clCxt, &cvt_color, kernelName.c_str(), gt, lt, args, -1, -1, build_options.c_str()); } static void fromGray_caller(const oclMat &src, oclMat &dst, int bidx, const std::string & kernelName, const std::string & additionalOptions = std::string(), const oclMat & data = oclMat()) { std::string build_options = format("-D DEPTH_%d -D dcn=%d -D bidx=%d", src.depth(), dst.channels(), bidx); if (!additionalOptions.empty()) build_options += additionalOptions; int src_offset = src.offset / src.elemSize1(), src_step = src.step1(); int dst_offset = dst.offset / dst.elemSize1(), dst_step = dst.step1(); vector<pair<size_t , const void *> > args; args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_step)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_offset )); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_offset )); if (!data.empty()) args.push_back( make_pair( sizeof(cl_mem) , (void *)&data.data )); size_t gt[3] = { (size_t)dst.cols, (size_t)dst.rows, 1 }; #ifdef ANDROID size_t lt[3] = { 16, 10, 1 }; #else size_t lt[3] = { 16, 16, 1 }; #endif openCLExecuteKernel(src.clCxt, &cvt_color, kernelName.c_str(), gt, lt, args, -1, -1, build_options.c_str()); } static void toRGB_caller(const oclMat &src, oclMat &dst, int bidx, const std::string & kernelName, const std::string & additionalOptions = std::string(), const oclMat & data = oclMat()) { int src_offset = src.offset / src.elemSize1(), src_step = src.step1(); int dst_offset = dst.offset / dst.elemSize1(), dst_step = dst.step1(); int pixels_per_work_item = 1; if (Context::getContext()->supportsFeature(FEATURE_CL_INTEL_DEVICE)) { if ((src.cols % 4 == 0) && (src.depth() == CV_8U)) pixels_per_work_item = 4; else if (src.cols % 2 == 0) pixels_per_work_item = 2; else pixels_per_work_item = 1; } std::string build_options = format("-D DEPTH_%d -D dcn=%d -D bidx=%d -D pixels_per_work_item=%d", src.depth(), dst.channels(), bidx, pixels_per_work_item); if (!additionalOptions.empty()) build_options += additionalOptions; vector<pair<size_t , const void *> > args; args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_step)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_offset )); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_offset )); if (!data.empty()) args.push_back( make_pair( sizeof(cl_mem) , (void *)&data.data )); size_t gt[3] = { (size_t)dst.cols/pixels_per_work_item, (size_t)dst.rows, 1 }; #ifdef ANDROID size_t lt[3] = { 16, 10, 1 }; #else size_t lt[3] = { 16, 16, 1 }; #endif openCLExecuteKernel(src.clCxt, &cvt_color, kernelName.c_str(), gt, lt, args, -1, -1, build_options.c_str()); } static void toRGB_NV12_caller(const oclMat &src, oclMat &dst, int bidx, const std::string & kernelName, const std::string & additionalOptions = std::string(), const oclMat & data = oclMat()) { std::string build_options = format("-D DEPTH_%d -D dcn=%d -D bidx=%d", src.depth(), dst.channels(), bidx); if (!additionalOptions.empty()) build_options += additionalOptions; int src_offset = src.offset / src.elemSize1(), src_step = src.step1(); int dst_offset = dst.offset / dst.elemSize1(), dst_step = dst.step1(); vector<pair<size_t , const void *> > args; args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_step)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_offset )); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_offset )); if (!data.empty()) args.push_back( make_pair( sizeof(cl_mem) , (void *)&data.data )); size_t gt[3] = {(size_t)src.cols, (size_t)src.rows, 1}; #ifdef ANDROID size_t lt[3] = {16, 10, 1}; #else size_t lt[3] = {16, 16, 1}; #endif openCLExecuteKernel(src.clCxt, &cvt_color, kernelName.c_str(), gt, lt, args, -1, -1, build_options.c_str()); } static void fromHSV_caller(const oclMat &src, oclMat &dst, int bidx, const std::string & kernelName, const std::string & additionalOptions = std::string(), const oclMat & data = oclMat()) { std::string build_options = format("-D DEPTH_%d -D dcn=%d -D bidx=%d", src.depth(), dst.channels(), bidx); if (!additionalOptions.empty()) build_options += additionalOptions; int src_offset = src.offset / src.elemSize1(), src_step = src.step1(); int dst_offset = dst.offset / dst.elemSize1(), dst_step = dst.step1(); vector<pair<size_t , const void *> > args; args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_step)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_offset )); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_offset )); if (!data.empty()) args.push_back( make_pair( sizeof(cl_mem) , (void *)&data.data )); size_t gt[3] = { (size_t)dst.cols, (size_t)dst.rows, 1 }; #ifdef ANDROID size_t lt[3] = { 16, 10, 1 }; #else size_t lt[3] = { 16, 16, 1 }; #endif openCLExecuteKernel(src.clCxt, &cvt_color, kernelName.c_str(), gt, lt, args, -1, -1, build_options.c_str()); } static void RGB_caller(const oclMat &src, oclMat &dst, bool reverse) { int src_offset = src.offset / src.elemSize1(), src_step = src.step1(); int dst_offset = dst.offset / dst.elemSize1(), dst_step = dst.step1(); std::string build_options = format("-D DEPTH_%d -D dcn=%d -D scn=%d -D %s", src.depth(), dst.channels(), src.channels(), reverse ? "REVERSE" : "ORDER"); vector<pair<size_t , const void *> > args; args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_step)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_offset )); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_offset )); size_t gt[3] = { (size_t)dst.cols, (size_t)dst.rows, 1 }; #ifdef ANDROID size_t lt[3] = { 16, 10, 1 }; #else size_t lt[3] = { 16, 16, 1 }; #endif openCLExecuteKernel(src.clCxt, &cvt_color, "RGB", gt, lt, args, -1, -1, build_options.c_str()); } static void fromRGB5x5_caller(const oclMat &src, oclMat &dst, int bidx, int greenbits, const std::string & kernelName) { std::string build_options = format("-D DEPTH_%d -D greenbits=%d -D dcn=%d -D bidx=%d", src.depth(), greenbits, dst.channels(), bidx); int src_offset = src.offset >> 1, src_step = src.step >> 1; int dst_offset = dst.offset / dst.elemSize1(), dst_step = dst.step / dst.elemSize1(); vector<pair<size_t , const void *> > args; args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_step)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_offset )); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_offset )); size_t gt[3] = { (size_t)dst.cols, (size_t)dst.rows, 1 }; #ifdef ANDROID size_t lt[3] = { 16, 10, 1 }; #else size_t lt[3] = { 16, 16, 1 }; #endif openCLExecuteKernel(src.clCxt, &cvt_color, kernelName.c_str(), gt, lt, args, -1, -1, build_options.c_str()); } static void toRGB5x5_caller(const oclMat &src, oclMat &dst, int bidx, int greenbits, const std::string & kernelName) { std::string build_options = format("-D DEPTH_%d -D greenbits=%d -D scn=%d -D bidx=%d", src.depth(), greenbits, src.channels(), bidx); int src_offset = (int)src.offset, src_step = (int)src.step; int dst_offset = dst.offset >> 1, dst_step = dst.step >> 1; vector<pair<size_t , const void *> > args; args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.cols)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst.rows)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_step)); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data)); args.push_back( make_pair( sizeof(cl_mem) , (void *)&dst.data)); args.push_back( make_pair( sizeof(cl_int) , (void *)&src_offset )); args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_offset )); size_t gt[3] = { (size_t)dst.cols, (size_t)dst.rows, 1 }; #ifdef ANDROID size_t lt[3] = { 16, 10, 1 }; #else size_t lt[3] = { 16, 16, 1 }; #endif openCLExecuteKernel(src.clCxt, &cvt_color, kernelName.c_str(), gt, lt, args, -1, -1, build_options.c_str()); } static void cvtColor_caller(const oclMat &src, oclMat &dst, int code, int dcn) { Size sz = src.size(); int scn = src.channels(), depth = src.depth(), bidx; CV_Assert(depth == CV_8U || depth == CV_16U || depth == CV_32F); switch (code) { case CV_BGR2BGRA: case CV_RGB2BGRA: case CV_BGRA2BGR: case CV_RGBA2BGR: case CV_RGB2BGR: case CV_BGRA2RGBA: { CV_Assert(scn == 3 || scn == 4); dcn = code == CV_BGR2BGRA || code == CV_RGB2BGRA || code == CV_BGRA2RGBA ? 4 : 3; bool reverse = !(code == CV_BGR2BGRA || code == CV_BGRA2BGR); dst.create(sz, CV_MAKE_TYPE(depth, dcn)); RGB_caller(src, dst, reverse); break; } case CV_BGR2BGR565: case CV_BGR2BGR555: case CV_RGB2BGR565: case CV_RGB2BGR555: case CV_BGRA2BGR565: case CV_BGRA2BGR555: case CV_RGBA2BGR565: case CV_RGBA2BGR555: { CV_Assert((scn == 3 || scn == 4) && depth == CV_8U ); bidx = code == CV_BGR2BGR565 || code == CV_BGR2BGR555 || code == CV_BGRA2BGR565 || code == CV_BGRA2BGR555 ? 0 : 2; int greenbits = code == CV_BGR2BGR565 || code == CV_RGB2BGR565 || code == CV_BGRA2BGR565 || code == CV_RGBA2BGR565 ? 6 : 5; dst.create(sz, CV_8UC2); toRGB5x5_caller(src, dst, bidx, greenbits, "RGB2RGB5x5"); break; } case CV_BGR5652BGR: case CV_BGR5552BGR: case CV_BGR5652RGB: case CV_BGR5552RGB: case CV_BGR5652BGRA: case CV_BGR5552BGRA: case CV_BGR5652RGBA: case CV_BGR5552RGBA: { dcn = code == CV_BGR5652BGRA || code == CV_BGR5552BGRA || code == CV_BGR5652RGBA || code == CV_BGR5552RGBA ? 4 : 3; CV_Assert((dcn == 3 || dcn == 4) && scn == 2 && depth == CV_8U); bidx = code == CV_BGR5652BGR || code == CV_BGR5552BGR || code == CV_BGR5652BGRA || code == CV_BGR5552BGRA ? 0 : 2; int greenbits = code == CV_BGR5652BGR || code == CV_BGR5652RGB || code == CV_BGR5652BGRA || code == CV_BGR5652RGBA ? 6 : 5; dst.create(sz, CV_MAKETYPE(depth, dcn)); fromRGB5x5_caller(src, dst, bidx, greenbits, "RGB5x52RGB"); break; } case CV_BGR5652GRAY: case CV_BGR5552GRAY: { CV_Assert(scn == 2 && depth == CV_8U); dst.create(sz, CV_8UC1); int greenbits = code == CV_BGR5652GRAY ? 6 : 5; fromRGB5x5_caller(src, dst, -1, greenbits, "BGR5x52Gray"); break; } case CV_GRAY2BGR565: case CV_GRAY2BGR555: { CV_Assert(scn == 1 && depth == CV_8U); dst.create(sz, CV_8UC2); int greenbits = code == CV_GRAY2BGR565 ? 6 : 5; toRGB5x5_caller(src, dst, -1, greenbits, "Gray2BGR5x5"); break; } case CV_RGB2GRAY: case CV_BGR2GRAY: case CV_RGBA2GRAY: case CV_BGRA2GRAY: { CV_Assert(scn == 3 || scn == 4); bidx = code == CV_BGR2GRAY || code == CV_BGRA2GRAY ? 0 : 2; dst.create(sz, CV_MAKETYPE(depth, 1)); fromRGB_caller(src, dst, bidx, "RGB2Gray"); break; } case CV_GRAY2BGR: case CV_GRAY2BGRA: { CV_Assert(scn == 1); dcn = code == CV_GRAY2BGRA ? 4 : 3; dst.create(sz, CV_MAKETYPE(depth, dcn)); fromGray_caller(src, dst, 0, "Gray2RGB"); break; } case CV_BGR2YUV: case CV_RGB2YUV: { CV_Assert(scn == 3 || scn == 4); bidx = code == CV_BGR2YUV ? 0 : 2; dst.create(sz, CV_MAKETYPE(depth, 3)); fromRGB_caller(src, dst, bidx, "RGB2YUV"); break; } case CV_YUV2BGR: case CV_YUV2RGB: { if( dcn <= 0 ) dcn = 3; CV_Assert(scn == 3 && (dcn == 3 || dcn == 4)); bidx = code == CV_YUV2BGR ? 0 : 2; dst.create(sz, CV_MAKETYPE(depth, dcn)); toRGB_caller(src, dst, bidx, "YUV2RGB"); break; } case CV_YUV2RGB_NV12: case CV_YUV2BGR_NV12: case CV_YUV2RGBA_NV12: case CV_YUV2BGRA_NV12: { CV_Assert(scn == 1); CV_Assert( sz.width % 2 == 0 && sz.height % 3 == 0 && depth == CV_8U ); dcn = code == CV_YUV2BGRA_NV12 || code == CV_YUV2RGBA_NV12 ? 4 : 3; bidx = code == CV_YUV2BGRA_NV12 || code == CV_YUV2BGR_NV12 ? 0 : 2; Size dstSz(sz.width, sz.height * 2 / 3); dst.create(dstSz, CV_MAKETYPE(depth, dcn)); toRGB_NV12_caller(src, dst, bidx, "YUV2RGBA_NV12"); break; } case CV_BGR2YCrCb: case CV_RGB2YCrCb: { CV_Assert(scn == 3 || scn == 4); bidx = code == CV_BGR2YCrCb ? 0 : 2; dst.create(sz, CV_MAKETYPE(depth, 3)); fromRGB_caller(src, dst, bidx, "RGB2YCrCb"); break; } case CV_YCrCb2BGR: case CV_YCrCb2RGB: { if( dcn <= 0 ) dcn = 3; CV_Assert(scn == 3 && (dcn == 3 || dcn == 4)); bidx = code == CV_YCrCb2BGR ? 0 : 2; dst.create(sz, CV_MAKETYPE(depth, dcn)); toRGB_caller(src, dst, bidx, "YCrCb2RGB"); break; } case CV_BGR2XYZ: case CV_RGB2XYZ: { CV_Assert(scn == 3 || scn == 4); bidx = code == CV_BGR2XYZ ? 0 : 2; dst.create(sz, CV_MAKE_TYPE(depth, 3)); Mat c; if (depth == CV_32F) { float coeffs[] = { 0.412453f, 0.357580f, 0.180423f, 0.212671f, 0.715160f, 0.072169f, 0.019334f, 0.119193f, 0.950227f }; if (bidx == 0) { std::swap(coeffs[0], coeffs[2]); std::swap(coeffs[3], coeffs[5]); std::swap(coeffs[6], coeffs[8]); } Mat(1, 9, CV_32FC1, &coeffs[0]).copyTo(c); } else { int coeffs[] = { 1689, 1465, 739, 871, 2929, 296, 79, 488, 3892 }; if (bidx == 0) { std::swap(coeffs[0], coeffs[2]); std::swap(coeffs[3], coeffs[5]); std::swap(coeffs[6], coeffs[8]); } Mat(1, 9, CV_32SC1, &coeffs[0]).copyTo(c); } oclMat oclCoeffs(c); fromRGB_caller(src, dst, bidx, "RGB2XYZ", "", oclCoeffs); break; } case CV_XYZ2BGR: case CV_XYZ2RGB: { if (dcn <= 0) dcn = 3; CV_Assert(scn == 3 && (dcn == 3 || dcn == 4)); bidx = code == CV_XYZ2BGR ? 0 : 2; dst.create(sz, CV_MAKE_TYPE(depth, dcn)); Mat c; if (depth == CV_32F) { float coeffs[] = { 3.240479f, -1.53715f, -0.498535f, -0.969256f, 1.875991f, 0.041556f, 0.055648f, -0.204043f, 1.057311f }; if (bidx == 0) { std::swap(coeffs[0], coeffs[6]); std::swap(coeffs[1], coeffs[7]); std::swap(coeffs[2], coeffs[8]); } Mat(1, 9, CV_32FC1, &coeffs[0]).copyTo(c); } else { int coeffs[] = { 13273, -6296, -2042, -3970, 7684, 170, 228, -836, 4331 }; if (bidx == 0) { std::swap(coeffs[0], coeffs[6]); std::swap(coeffs[1], coeffs[7]); std::swap(coeffs[2], coeffs[8]); } Mat(1, 9, CV_32SC1, &coeffs[0]).copyTo(c); } oclMat oclCoeffs(c); toRGB_caller(src, dst, bidx, "XYZ2RGB", "", oclCoeffs); break; } case CV_BGR2HSV: case CV_RGB2HSV: case CV_BGR2HSV_FULL: case CV_RGB2HSV_FULL: case CV_BGR2HLS: case CV_RGB2HLS: case CV_BGR2HLS_FULL: case CV_RGB2HLS_FULL: { CV_Assert((scn == 3 || scn == 4) && (depth == CV_8U || depth == CV_32F)); bidx = code == CV_BGR2HSV || code == CV_BGR2HLS || code == CV_BGR2HSV_FULL || code == CV_BGR2HLS_FULL ? 0 : 2; int hrange = depth == CV_32F ? 360 : code == CV_BGR2HSV || code == CV_RGB2HSV || code == CV_BGR2HLS || code == CV_RGB2HLS ? 180 : 256; bool is_hsv = code == CV_BGR2HSV || code == CV_RGB2HSV || code == CV_BGR2HSV_FULL || code == CV_RGB2HSV_FULL; dst.create(sz, CV_MAKETYPE(depth, 3)); std::string kernelName = std::string("RGB2") + (is_hsv ? "HSV" : "HLS"); if (is_hsv && depth == CV_8U) { static oclMat sdiv_data; static oclMat hdiv_data180; static oclMat hdiv_data256; static int sdiv_table[256]; static int hdiv_table180[256]; static int hdiv_table256[256]; static volatile bool initialized180 = false, initialized256 = false; volatile bool & initialized = hrange == 180 ? initialized180 : initialized256; if (!initialized) { int * const hdiv_table = hrange == 180 ? hdiv_table180 : hdiv_table256, hsv_shift = 12; oclMat & hdiv_data = hrange == 180 ? hdiv_data180 : hdiv_data256; sdiv_table[0] = hdiv_table180[0] = hdiv_table256[0] = 0; int v = 255 << hsv_shift; if (!initialized180 && !initialized256) { for(int i = 1; i < 256; i++ ) sdiv_table[i] = saturate_cast<int>(v/(1.*i)); sdiv_data.upload(Mat(1, 256, CV_32SC1, sdiv_table)); } v = hrange << hsv_shift; for (int i = 1; i < 256; i++ ) hdiv_table[i] = saturate_cast<int>(v/(6.*i)); hdiv_data.upload(Mat(1, 256, CV_32SC1, hdiv_table)); initialized = true; } toHSV_caller(src, dst, bidx, kernelName, format(" -D hrange=%d", hrange), sdiv_data, hrange == 256 ? hdiv_data256 : hdiv_data180); return; } toHSV_caller(src, dst, bidx, kernelName, format(" -D hscale=%f", hrange*(1.f/360.f))); break; } case CV_HSV2BGR: case CV_HSV2RGB: case CV_HSV2BGR_FULL: case CV_HSV2RGB_FULL: case CV_HLS2BGR: case CV_HLS2RGB: case CV_HLS2BGR_FULL: case CV_HLS2RGB_FULL: { if (dcn <= 0) dcn = 3; CV_Assert(scn == 3 && (dcn == 3 || dcn == 4) && (depth == CV_8U || depth == CV_32F)); bidx = code == CV_HSV2BGR || code == CV_HLS2BGR || code == CV_HSV2BGR_FULL || code == CV_HLS2BGR_FULL ? 0 : 2; int hrange = depth == CV_32F ? 360 : code == CV_HSV2BGR || code == CV_HSV2RGB || code == CV_HLS2BGR || code == CV_HLS2RGB ? 180 : 255; bool is_hsv = code == CV_HSV2BGR || code == CV_HSV2RGB || code == CV_HSV2BGR_FULL || code == CV_HSV2RGB_FULL; dst.create(sz, CV_MAKETYPE(depth, dcn)); std::string kernelName = std::string(is_hsv ? "HSV" : "HLS") + "2RGB"; fromHSV_caller(src, dst, bidx, kernelName, format(" -D hrange=%d -D hscale=%f", hrange, 6.f/hrange)); break; } case CV_RGBA2mRGBA: case CV_mRGBA2RGBA: { CV_Assert(scn == 4 && depth == CV_8U); dst.create(sz, CV_MAKETYPE(depth, 4)); std::string kernelName = code == CV_RGBA2mRGBA ? "RGBA2mRGBA" : "mRGBA2RGBA"; fromRGB_caller(src, dst, 0, kernelName); break; } default: CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" ); } } void cv::ocl::cvtColor(const oclMat &src, oclMat &dst, int code, int dcn) { cvtColor_caller(src, dst, code, dcn); }
43.949772
163
0.580571
[ "vector" ]
593def80f0329f8c91b442d6508e4ab9513ff70f
1,413
cpp
C++
Problem801-900/p830_1.cpp
dingqunfei/LeetCode
c74a21ea56ee7b35308d2f387ef24ab29b031e24
[ "Apache-2.0" ]
null
null
null
Problem801-900/p830_1.cpp
dingqunfei/LeetCode
c74a21ea56ee7b35308d2f387ef24ab29b031e24
[ "Apache-2.0" ]
null
null
null
Problem801-900/p830_1.cpp
dingqunfei/LeetCode
c74a21ea56ee7b35308d2f387ef24ab29b031e24
[ "Apache-2.0" ]
null
null
null
/** * @file p830_1.cpp * @brief * @author dingqunfei (dqflying@gmail.com) * @version 1.0 * @date 2021-04-07 * * @copyright Copyright (c) 2021 DQFLYING * * @par : * * * Date : 2021-04-07 * Version : 1.0 * Author : dqflying * Lisence : * Description : * * * * */ class Solution { public: vector<vector<int>> largeGroupPositions(string s) { vector<vector<int>> res; if(s.empty()) { return res; } int start_index = 0; char last_ch = s[0]; int curr_index = 0; char curr_ch = s[0]; for(auto it = s.cbegin(); it != s.cend(); ++it) { curr_ch = *it; if(last_ch != curr_ch) { if(curr_index - start_index >= 3) { vector<int> vec; vec.push_back(start_index); vec.push_back(curr_index-1); res.push_back(vec); } start_index = curr_index; } last_ch = curr_ch; ++curr_index; } if(curr_index - start_index >= 3) { vector<int> vec; vec.push_back(start_index); vec.push_back(curr_index-1); res.push_back(vec); } return res; } };
21.738462
55
0.428875
[ "vector" ]
86d0a25f512d22157cdf7ca1d10b220512f6cd7c
1,689
cpp
C++
Summer Graph Training 2013/A.cpp
michaelarakel/local-trainings-and-upsolvings
7ec663fd80e6a9f7c9ffa37bd97b5197f1e4a73c
[ "Unlicense" ]
null
null
null
Summer Graph Training 2013/A.cpp
michaelarakel/local-trainings-and-upsolvings
7ec663fd80e6a9f7c9ffa37bd97b5197f1e4a73c
[ "Unlicense" ]
null
null
null
Summer Graph Training 2013/A.cpp
michaelarakel/local-trainings-and-upsolvings
7ec663fd80e6a9f7c9ffa37bd97b5197f1e4a73c
[ "Unlicense" ]
null
null
null
#include <iostream> #include <algorithm> #include <vector> #define edge pair <int, pair <int, int> > const int INF = 1000 * 1000 * 1000; using namespace std; void dfs (const vector <vector <int> >& g, vector <char>& used, const int node, vector <int>& connected) { used[node] = true; connected.push_back(node); for (int i = 0; i < g[node].size(); ++i) if (!used[g[node][i]]) dfs(g, used, g[node][i], connected); } int main () { int n, m; cin >> n >> m; vector <edge> g; vector <vector <int> > graph(n); for (int i = 0; i < m; ++i) { int a, b, c; cin >> a >> b >> c; graph[b - 1].push_back(a - 1); g.push_back(make_pair(-c, make_pair(a - 1, b - 1))); } vector <int> d(n, INF); d[0] = 0; vector <int> changed; for (int i = 0; i < n; ++i) { bool any_change = false; for (int j = 0; j < m; ++j) { if (d[g[j].second.first] < INF) { if (d[g[j].second.second] > (d[g[j].second.first] + g[j].first)) { d[g[j].second.second] = (d[g[j].second.first] + g[j].first); any_change = true; if (i == n - 1) changed.push_back(g[j].second.second); } } } if (!any_change) break; if (i == n - 1) { vector <char> used(n); vector <int> connected; dfs(graph, used, n - 1, connected); vector <int> end; sort(changed.begin(), changed.end()); sort(connected.begin(), connected.end()); set_intersection(changed.begin(), changed.end(), connected.begin(), connected.end(), back_inserter(end)); if (end.size() != 0) { cout << ":)"; return 0; } } } if (d[n - 1] == INF) cout << ":("; else cout << -d[n - 1]; }
22.824324
109
0.521018
[ "vector" ]
86d4f3bcf38ffec7c2166c6eddefb0cb5c002306
26,778
cpp
C++
neo/renderer/Interaction.cpp
vic3t3chn0/OpenKrown
201c8fb6895cb0439e39c984d2fbc2c2eaf185b4
[ "MIT" ]
1
2018-11-07T22:44:23.000Z
2018-11-07T22:44:23.000Z
neo/renderer/Interaction.cpp
vic3t3chn0/OpenKrown
201c8fb6895cb0439e39c984d2fbc2c2eaf185b4
[ "MIT" ]
null
null
null
neo/renderer/Interaction.cpp
vic3t3chn0/OpenKrown
201c8fb6895cb0439e39c984d2fbc2c2eaf185b4
[ "MIT" ]
null
null
null
/* =========================================================================== Doom 3 BFG Edition GPL Source Code Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company. This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code"). Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>. In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below. If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA. =========================================================================== */ #pragma hdrstop #include "precompiled.h" #include "tr_local.h" /* =========================================================================== idInteraction implementation =========================================================================== */ /* ================ R_CalcInteractionFacing Determines which triangles of the surface are facing towards the light origin. The facing array should be allocated with one extra index than the number of surface triangles, which will be used to handle dangling edge silhouettes. ================ */ void R_CalcInteractionFacing( const idRenderEntityLocal* ent, const srfTriangles_t* tri, const idRenderLightLocal* light, srfCullInfo_t& cullInfo ) { SCOPED_PROFILE_EVENT( "R_CalcInteractionFacing" ); if( cullInfo.facing != NULL ) { return; } idVec3 localLightOrigin; R_GlobalPointToLocal( ent->modelMatrix, light->globalLightOrigin, localLightOrigin ); const int numFaces = tri->numIndexes / 3; cullInfo.facing = ( byte* ) R_StaticAlloc( ( numFaces + 1 ) * sizeof( cullInfo.facing[0] ), TAG_RENDER_INTERACTION ); // exact geometric cull against face for( int i = 0, face = 0; i < tri->numIndexes; i += 3, face++ ) { const idDrawVert& v0 = tri->verts[tri->indexes[i + 0]]; const idDrawVert& v1 = tri->verts[tri->indexes[i + 1]]; const idDrawVert& v2 = tri->verts[tri->indexes[i + 2]]; const idPlane plane( v0.xyz, v1.xyz, v2.xyz ); const float d = plane.Distance( localLightOrigin ); cullInfo.facing[face] = ( d >= 0.0f ); } cullInfo.facing[numFaces] = 1; // for dangling edges to reference } /* ===================== R_CalcInteractionCullBits We want to cull a little on the sloppy side, because the pre-clipping of geometry to the lights in dmap will give many cases that are right at the border. We throw things out on the border, because if any one vertex is clearly inside, the entire triangle will be accepted. ===================== */ void R_CalcInteractionCullBits( const idRenderEntityLocal* ent, const srfTriangles_t* tri, const idRenderLightLocal* light, srfCullInfo_t& cullInfo ) { SCOPED_PROFILE_EVENT( "R_CalcInteractionCullBits" ); if( cullInfo.cullBits != NULL ) { return; } idPlane frustumPlanes[6]; idRenderMatrix::GetFrustumPlanes( frustumPlanes, light->baseLightProject, true, true ); int frontBits = 0; // cull the triangle surface bounding box for( int i = 0; i < 6; i++ ) { R_GlobalPlaneToLocal( ent->modelMatrix, frustumPlanes[i], cullInfo.localClipPlanes[i] ); // get front bits for the whole surface if( tri->bounds.PlaneDistance( cullInfo.localClipPlanes[i] ) >= LIGHT_CLIP_EPSILON ) { frontBits |= 1 << i; } } // if the surface is completely inside the light frustum if( frontBits == ( ( 1 << 6 ) - 1 ) ) { cullInfo.cullBits = LIGHT_CULL_ALL_FRONT; return; } cullInfo.cullBits = ( byte* ) R_StaticAlloc( tri->numVerts * sizeof( cullInfo.cullBits[0] ), TAG_RENDER_INTERACTION ); memset( cullInfo.cullBits, 0, tri->numVerts * sizeof( cullInfo.cullBits[0] ) ); for( int i = 0; i < 6; i++ ) { // if completely infront of this clipping plane if( frontBits & ( 1 << i ) ) { continue; } for( int j = 0; j < tri->numVerts; j++ ) { float d = cullInfo.localClipPlanes[i].Distance( tri->verts[j].xyz ); cullInfo.cullBits[j] |= ( d < LIGHT_CLIP_EPSILON ) << i; } } } /* ================ R_FreeInteractionCullInfo ================ */ void R_FreeInteractionCullInfo( srfCullInfo_t& cullInfo ) { if( cullInfo.facing != NULL ) { R_StaticFree( cullInfo.facing ); cullInfo.facing = NULL; } if( cullInfo.cullBits != NULL ) { if( cullInfo.cullBits != LIGHT_CULL_ALL_FRONT ) { R_StaticFree( cullInfo.cullBits ); } cullInfo.cullBits = NULL; } } /* ==================== R_CreateInteractionLightTris This is only used for the static interaction case, dynamic interactions just draw everything and let the GPU deal with it. The resulting surface will be a subset of the original triangles, it will never clip triangles, but it may cull on a per-triangle basis. ==================== */ static srfTriangles_t* R_CreateInteractionLightTris( const idRenderEntityLocal* ent, const srfTriangles_t* tri, const idRenderLightLocal* light, const idMaterial* shader ) { SCOPED_PROFILE_EVENT( "R_CreateInteractionLightTris" ); int i; int numIndexes; triIndex_t* indexes; srfTriangles_t* newTri; int c_backfaced; int c_distance; idBounds bounds; bool includeBackFaces; int faceNum; c_backfaced = 0; c_distance = 0; numIndexes = 0; indexes = NULL; // it is debatable if non-shadowing lights should light back faces. we aren't at the moment // RB: now we do with r_useHalfLambert, so don't cull back faces if we have smooth shadowing enabled if( r_lightAllBackFaces.GetBool() || light->lightShader->LightEffectsBackSides() || shader->ReceivesLightingOnBackSides() || ent->parms.noSelfShadow || ent->parms.noShadow || ( r_useHalfLambertLighting.GetInteger() && r_useShadowMapping.GetBool() ) ) { includeBackFaces = true; } else { includeBackFaces = false; } // allocate a new surface for the lit triangles newTri = R_AllocStaticTriSurf(); // save a reference to the original surface newTri->ambientSurface = const_cast<srfTriangles_t*>( tri ); // the light surface references the verts of the ambient surface newTri->numVerts = tri->numVerts; R_ReferenceStaticTriSurfVerts( newTri, tri ); // calculate cull information srfCullInfo_t cullInfo = {}; if( !includeBackFaces ) { R_CalcInteractionFacing( ent, tri, light, cullInfo ); } R_CalcInteractionCullBits( ent, tri, light, cullInfo ); // if the surface is completely inside the light frustum if( cullInfo.cullBits == LIGHT_CULL_ALL_FRONT ) { // if we aren't self shadowing, let back facing triangles get // through so the smooth shaded bump maps light all the way around if( includeBackFaces ) { // the whole surface is lit so the light surface just references the indexes of the ambient surface newTri->indexes = tri->indexes; newTri->indexCache = tri->indexCache; // R_ReferenceStaticTriSurfIndexes( newTri, tri ); numIndexes = tri->numIndexes; bounds = tri->bounds; } else { // the light tris indexes are going to be a subset of the original indexes so we generally // allocate too much memory here but we decrease the memory block when the number of indexes is known R_AllocStaticTriSurfIndexes( newTri, tri->numIndexes ); // back face cull the individual triangles indexes = newTri->indexes; const byte* facing = cullInfo.facing; for( faceNum = i = 0; i < tri->numIndexes; i += 3, faceNum++ ) { if( !facing[ faceNum ] ) { c_backfaced++; continue; } indexes[numIndexes + 0] = tri->indexes[i + 0]; indexes[numIndexes + 1] = tri->indexes[i + 1]; indexes[numIndexes + 2] = tri->indexes[i + 2]; numIndexes += 3; } // get bounds for the surface SIMDProcessor->MinMax( bounds[0], bounds[1], tri->verts, indexes, numIndexes ); // decrease the size of the memory block to the size of the number of used indexes newTri->numIndexes = numIndexes; R_ResizeStaticTriSurfIndexes( newTri, numIndexes ); } } else { // the light tris indexes are going to be a subset of the original indexes so we generally // allocate too much memory here but we decrease the memory block when the number of indexes is known R_AllocStaticTriSurfIndexes( newTri, tri->numIndexes ); // cull individual triangles indexes = newTri->indexes; const byte* facing = cullInfo.facing; const byte* cullBits = cullInfo.cullBits; for( faceNum = i = 0; i < tri->numIndexes; i += 3, faceNum++ ) { int i1, i2, i3; // if we aren't self shadowing, let back facing triangles get // through so the smooth shaded bump maps light all the way around if( !includeBackFaces ) { // back face cull if( !facing[ faceNum ] ) { c_backfaced++; continue; } } i1 = tri->indexes[i + 0]; i2 = tri->indexes[i + 1]; i3 = tri->indexes[i + 2]; // fast cull outside the frustum // if all three points are off one plane side, it definately isn't visible if( cullBits[i1] & cullBits[i2] & cullBits[i3] ) { c_distance++; continue; } // add to the list indexes[numIndexes + 0] = i1; indexes[numIndexes + 1] = i2; indexes[numIndexes + 2] = i3; numIndexes += 3; } // get bounds for the surface SIMDProcessor->MinMax( bounds[0], bounds[1], tri->verts, indexes, numIndexes ); // decrease the size of the memory block to the size of the number of used indexes newTri->numIndexes = numIndexes; R_ResizeStaticTriSurfIndexes( newTri, numIndexes ); } // free the cull information when it's no longer needed R_FreeInteractionCullInfo( cullInfo ); if( !numIndexes ) { R_FreeStaticTriSurf( newTri ); return NULL; } newTri->numIndexes = numIndexes; newTri->bounds = bounds; return newTri; } /* ===================== R_CreateInteractionShadowVolume Note that dangling edges outside the light frustum don't make silhouette planes because a triangle outside the light frustum is considered facing and the "fake triangle" on the outside of the dangling edge is also set to facing: cullInfo.facing[numFaces] = 1; ===================== */ static srfTriangles_t* R_CreateInteractionShadowVolume( const idRenderEntityLocal* ent, const srfTriangles_t* tri, const idRenderLightLocal* light ) { SCOPED_PROFILE_EVENT( "R_CreateInteractionShadowVolume" ); srfCullInfo_t cullInfo = {}; R_CalcInteractionFacing( ent, tri, light, cullInfo ); R_CalcInteractionCullBits( ent, tri, light, cullInfo ); int numFaces = tri->numIndexes / 3; int numShadowingFaces = 0; const byte* facing = cullInfo.facing; // if all the triangles are inside the light frustum if( cullInfo.cullBits == LIGHT_CULL_ALL_FRONT ) { // count the number of shadowing faces for( int i = 0; i < numFaces; i++ ) { numShadowingFaces += facing[i]; } numShadowingFaces = numFaces - numShadowingFaces; } else { // make all triangles that are outside the light frustum "facing", so they won't cast shadows const triIndex_t* indexes = tri->indexes; byte* modifyFacing = cullInfo.facing; const byte* cullBits = cullInfo.cullBits; for( int i = 0, j = 0; i < tri->numIndexes; i += 3, j++ ) { if( !modifyFacing[j] ) { int i1 = indexes[i + 0]; int i2 = indexes[i + 1]; int i3 = indexes[i + 2]; if( cullBits[i1] & cullBits[i2] & cullBits[i3] ) { modifyFacing[j] = 1; } else { numShadowingFaces++; } } } } if( !numShadowingFaces ) { // no faces are inside the light frustum and still facing the right way R_FreeInteractionCullInfo( cullInfo ); return NULL; } // shadowVerts will be NULL on these surfaces, so the shadowVerts will be taken from the ambient surface srfTriangles_t* newTri = R_AllocStaticTriSurf(); newTri->numVerts = tri->numVerts * 2; // alloc the max possible size R_AllocStaticTriSurfIndexes( newTri, ( numShadowingFaces + tri->numSilEdges ) * 6 ); triIndex_t* tempIndexes = newTri->indexes; triIndex_t* shadowIndexes = newTri->indexes; // create new triangles along sil planes const silEdge_t* sil = tri->silEdges; for( int i = tri->numSilEdges; i > 0; i--, sil++ ) { int f1 = facing[sil->p1]; int f2 = facing[sil->p2]; if( !( f1 ^ f2 ) ) { continue; } int v1 = sil->v1 << 1; int v2 = sil->v2 << 1; // set the two triangle winding orders based on facing // without using a poorly-predictable branch shadowIndexes[0] = v1; shadowIndexes[1] = v2 ^ f1; shadowIndexes[2] = v2 ^ f2; shadowIndexes[3] = v1 ^ f2; shadowIndexes[4] = v1 ^ f1; shadowIndexes[5] = v2 ^ 1; shadowIndexes += 6; } int numShadowIndexes = shadowIndexes - tempIndexes; // we aren't bothering to separate front and back caps on these newTri->numIndexes = newTri->numShadowIndexesNoFrontCaps = numShadowIndexes + numShadowingFaces * 6; newTri->numShadowIndexesNoCaps = numShadowIndexes; newTri->shadowCapPlaneBits = SHADOW_CAP_INFINITE; // decrease the size of the memory block to only store the used indexes // R_ResizeStaticTriSurfIndexes( newTri, newTri->numIndexes ); // these have no effect, because they extend to infinity newTri->bounds.Clear(); // put some faces on the model and some on the distant projection const triIndex_t* indexes = tri->indexes; shadowIndexes = newTri->indexes + numShadowIndexes; for( int i = 0, j = 0; i < tri->numIndexes; i += 3, j++ ) { if( facing[j] ) { continue; } int i0 = indexes[i + 0] << 1; int i1 = indexes[i + 1] << 1; int i2 = indexes[i + 2] << 1; shadowIndexes[0] = i2; shadowIndexes[1] = i1; shadowIndexes[2] = i0; shadowIndexes[3] = i0 ^ 1; shadowIndexes[4] = i1 ^ 1; shadowIndexes[5] = i2 ^ 1; shadowIndexes += 6; } R_FreeInteractionCullInfo( cullInfo ); return newTri; } /* =============== idInteraction::idInteraction =============== */ idInteraction::idInteraction() { numSurfaces = 0; surfaces = NULL; entityDef = NULL; lightDef = NULL; lightNext = NULL; lightPrev = NULL; entityNext = NULL; entityPrev = NULL; staticInteraction = false; } /* =============== idInteraction::AllocAndLink =============== */ idInteraction* idInteraction::AllocAndLink( idRenderEntityLocal* edef, idRenderLightLocal* ldef ) { if( edef == NULL || ldef == NULL ) { common->Error( "idInteraction::AllocAndLink: NULL parm" ); return NULL; } idRenderWorldLocal* renderWorld = edef->world; idInteraction* interaction = renderWorld->interactionAllocator.Alloc(); // link and initialize interaction->lightDef = ldef; interaction->entityDef = edef; interaction->numSurfaces = -1; // not checked yet interaction->surfaces = NULL; // link at the start of the entity's list interaction->lightNext = ldef->firstInteraction; interaction->lightPrev = NULL; ldef->firstInteraction = interaction; if( interaction->lightNext != NULL ) { interaction->lightNext->lightPrev = interaction; } else { ldef->lastInteraction = interaction; } // link at the start of the light's list interaction->entityNext = edef->firstInteraction; interaction->entityPrev = NULL; edef->firstInteraction = interaction; if( interaction->entityNext != NULL ) { interaction->entityNext->entityPrev = interaction; } else { edef->lastInteraction = interaction; } // update the interaction table if( renderWorld->interactionTable != NULL ) { int index = ldef->index * renderWorld->interactionTableWidth + edef->index; if( renderWorld->interactionTable[index] != NULL ) { common->Error( "idInteraction::AllocAndLink: non NULL table entry" ); } renderWorld->interactionTable[ index ] = interaction; } return interaction; } /* =============== idInteraction::FreeSurfaces Frees the surfaces, but leaves the interaction linked in, so it will be regenerated automatically =============== */ void idInteraction::FreeSurfaces() { // anything regenerated is no longer an optimized static version this->staticInteraction = false; if( this->surfaces != NULL ) { for( int i = 0; i < this->numSurfaces; i++ ) { surfaceInteraction_t& srf = this->surfaces[i]; Mem_Free( srf.shadowIndexes ); srf.shadowIndexes = NULL; } R_StaticFree( this->surfaces ); this->surfaces = NULL; } this->numSurfaces = -1; } /* =============== idInteraction::Unlink =============== */ void idInteraction::Unlink() { // unlink from the entity's list if( this->entityPrev ) { this->entityPrev->entityNext = this->entityNext; } else { this->entityDef->firstInteraction = this->entityNext; } if( this->entityNext ) { this->entityNext->entityPrev = this->entityPrev; } else { this->entityDef->lastInteraction = this->entityPrev; } this->entityNext = this->entityPrev = NULL; // unlink from the light's list if( this->lightPrev ) { this->lightPrev->lightNext = this->lightNext; } else { this->lightDef->firstInteraction = this->lightNext; } if( this->lightNext ) { this->lightNext->lightPrev = this->lightPrev; } else { this->lightDef->lastInteraction = this->lightPrev; } this->lightNext = this->lightPrev = NULL; } /* =============== idInteraction::UnlinkAndFree Removes links and puts it back on the free list. =============== */ void idInteraction::UnlinkAndFree() { // clear the table pointer idRenderWorldLocal* renderWorld = this->lightDef->world; // RB: added check for NULL if( renderWorld->interactionTable != NULL ) { int index = this->lightDef->index * renderWorld->interactionTableWidth + this->entityDef->index; if( renderWorld->interactionTable[index] != this && renderWorld->interactionTable[index] != INTERACTION_EMPTY ) { common->Error( "idInteraction::UnlinkAndFree: interactionTable wasn't set" ); } renderWorld->interactionTable[index] = NULL; } // RB end Unlink(); FreeSurfaces(); // put it back on the free list renderWorld->interactionAllocator.Free( this ); } /* =============== idInteraction::MakeEmpty Relinks the interaction at the end of both the light and entity chains and adds the INTERACTION_EMPTY marker to the interactionTable. It is necessary to keep the empty interaction so when entities or lights move they can set all the interactionTable values to NULL. =============== */ void idInteraction::MakeEmpty() { // an empty interaction has no surfaces numSurfaces = 0; Unlink(); // relink at the end of the entity's list this->entityNext = NULL; this->entityPrev = this->entityDef->lastInteraction; this->entityDef->lastInteraction = this; if( this->entityPrev ) { this->entityPrev->entityNext = this; } else { this->entityDef->firstInteraction = this; } // relink at the end of the light's list this->lightNext = NULL; this->lightPrev = this->lightDef->lastInteraction; this->lightDef->lastInteraction = this; if( this->lightPrev ) { this->lightPrev->lightNext = this; } else { this->lightDef->firstInteraction = this; } // store the special marker in the interaction table const int interactionIndex = lightDef->index * entityDef->world->interactionTableWidth + entityDef->index; assert( entityDef->world->interactionTable[ interactionIndex ] == this ); entityDef->world->interactionTable[ interactionIndex ] = INTERACTION_EMPTY; } /* =============== idInteraction::HasShadows =============== */ bool idInteraction::HasShadows() const { return !entityDef->parms.noShadow && lightDef->LightCastsShadows(); } /* ====================== CreateStaticInteraction Called by idRenderWorldLocal::GenerateAllInteractions ====================== */ void idInteraction::CreateStaticInteraction() { // note that it is a static interaction staticInteraction = true; const idRenderModel* model = entityDef->parms.hModel; if( model == NULL || model->NumSurfaces() <= 0 || model->IsDynamicModel() != DM_STATIC ) { MakeEmpty(); return; } const idBounds bounds = model->Bounds( &entityDef->parms ); // if it doesn't contact the light frustum, none of the surfaces will if( R_CullModelBoundsToLight( lightDef, bounds, entityDef->modelRenderMatrix ) ) { MakeEmpty(); return; } // // create slots for each of the model's surfaces // numSurfaces = model->NumSurfaces(); surfaces = ( surfaceInteraction_t* )R_ClearedStaticAlloc( sizeof( *surfaces ) * numSurfaces ); bool interactionGenerated = false; // check each surface in the model for( int c = 0 ; c < model->NumSurfaces() ; c++ ) { const modelSurface_t* surf = model->Surface( c ); const srfTriangles_t* tri = surf->geometry; if( tri == NULL ) { continue; } // determine the shader for this surface, possibly by skinning // Note that this will be wrong if customSkin/customShader are // changed after map load time without invalidating the interaction! const idMaterial* const shader = R_RemapShaderBySkin( surf->shader, entityDef->parms.customSkin, entityDef->parms.customShader ); if( shader == NULL ) { continue; } // try to cull each surface if( R_CullModelBoundsToLight( lightDef, tri->bounds, entityDef->modelRenderMatrix ) ) { continue; } surfaceInteraction_t* sint = &surfaces[c]; // generate a set of indexes for the lit surfaces, culling away triangles that are // not at least partially inside the light if( shader->ReceivesLighting() ) { srfTriangles_t* lightTris = R_CreateInteractionLightTris( entityDef, tri, lightDef, shader ); if( lightTris != NULL ) { // make a static index cache sint->numLightTrisIndexes = lightTris->numIndexes; sint->lightTrisIndexCache = vertexCache.AllocStaticIndex( lightTris->indexes, ALIGN( lightTris->numIndexes * sizeof( lightTris->indexes[0] ), INDEX_CACHE_ALIGN ) ); interactionGenerated = true; R_FreeStaticTriSurf( lightTris ); } } // if the interaction has shadows and this surface casts a shadow if( HasShadows() && shader->SurfaceCastsShadow() && tri->silEdges != NULL ) { // if the light has an optimized shadow volume, don't create shadows for any models that are part of the base areas if( lightDef->parms.prelightModel == NULL || !model->IsStaticWorldModel() || r_skipPrelightShadows.GetBool() ) { srfTriangles_t* shadowTris = R_CreateInteractionShadowVolume( entityDef, tri, lightDef ); if( shadowTris != NULL ) { // make a static index cache sint->shadowIndexCache = vertexCache.AllocStaticIndex( shadowTris->indexes, ALIGN( shadowTris->numIndexes * sizeof( shadowTris->indexes[0] ), INDEX_CACHE_ALIGN ) ); sint->numShadowIndexes = shadowTris->numIndexes; #if defined( KEEP_INTERACTION_CPU_DATA ) sint->shadowIndexes = shadowTris->indexes; shadowTris->indexes = NULL; #endif if( shader->Coverage() != MC_OPAQUE ) { // if any surface is a shadow-casting perforated or translucent surface, or the // base surface is suppressed in the view (world weapon shadows) we can't use // the external shadow optimizations because we can see through some of the faces sint->numShadowIndexesNoCaps = shadowTris->numIndexes; } else { sint->numShadowIndexesNoCaps = shadowTris->numShadowIndexesNoCaps; } R_FreeStaticTriSurf( shadowTris ); } interactionGenerated = true; } } } // if none of the surfaces generated anything, don't even bother checking? if( !interactionGenerated ) { MakeEmpty(); } } /* =================== R_ShowInteractionMemory_f =================== */ void R_ShowInteractionMemory_f( const idCmdArgs& args ) { int entities = 0; int interactions = 0; int deferredInteractions = 0; int emptyInteractions = 0; int lightTris = 0; int lightTriIndexes = 0; int shadowTris = 0; int shadowTriIndexes = 0; int maxInteractionsForEntity = 0; int maxInteractionsForLight = 0; for( int i = 0; i < tr.primaryWorld->lightDefs.Num(); i++ ) { idRenderLightLocal* light = tr.primaryWorld->lightDefs[i]; if( light == NULL ) { continue; } int numInteractionsForLight = 0; for( idInteraction* inter = light->firstInteraction; inter != NULL; inter = inter->lightNext ) { if( !inter->IsEmpty() ) { numInteractionsForLight++; } } if( numInteractionsForLight > maxInteractionsForLight ) { maxInteractionsForLight = numInteractionsForLight; } } for( int i = 0; i < tr.primaryWorld->entityDefs.Num(); i++ ) { idRenderEntityLocal* def = tr.primaryWorld->entityDefs[i]; if( def == NULL ) { continue; } if( def->firstInteraction == NULL ) { continue; } entities++; int numInteractionsForEntity = 0; for( idInteraction* inter = def->firstInteraction; inter != NULL; inter = inter->entityNext ) { interactions++; if( !inter->IsEmpty() ) { numInteractionsForEntity++; } if( inter->IsDeferred() ) { deferredInteractions++; continue; } if( inter->IsEmpty() ) { emptyInteractions++; continue; } for( int j = 0; j < inter->numSurfaces; j++ ) { surfaceInteraction_t* srf = &inter->surfaces[j]; if( srf->numLightTrisIndexes ) { lightTris++; lightTriIndexes += srf->numLightTrisIndexes; } if( srf->numShadowIndexes ) { shadowTris++; shadowTriIndexes += srf->numShadowIndexes; } } } if( numInteractionsForEntity > maxInteractionsForEntity ) { maxInteractionsForEntity = numInteractionsForEntity; } } common->Printf( "%i entities with %i total interactions\n", entities, interactions ); common->Printf( "%i deferred interactions, %i empty interactions\n", deferredInteractions, emptyInteractions ); common->Printf( "%5i indexes in %5i light tris\n", lightTriIndexes, lightTris ); common->Printf( "%5i indexes in %5i shadow tris\n", shadowTriIndexes, shadowTris ); common->Printf( "%i maxInteractionsForEntity\n", maxInteractionsForEntity ); common->Printf( "%i maxInteractionsForLight\n", maxInteractionsForLight ); }
27.577755
366
0.678953
[ "geometry", "model" ]
86d5df37582156ac8e42f88fa1315b8261ff8190
312,923
cpp
C++
core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
selvaganesang/trafodion
627370c61974be023e48e0ebf0d7ff9d5d711d49
[ "Apache-2.0" ]
null
null
null
core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
selvaganesang/trafodion
627370c61974be023e48e0ebf0d7ff9d5d711d49
[ "Apache-2.0" ]
null
null
null
core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
selvaganesang/trafodion
627370c61974be023e48e0ebf0d7ff9d5d711d49
[ "Apache-2.0" ]
null
null
null
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ ********************************************************************/ // // MODULE: SrvrConnect.cpp // // PURPOSE: Implements the following methods // odbc_SQLSvc_InitializeDialogue_sme_ // odbc_SQLSvc_SetConnectionOption_sme_ // odbc_SQLSvc_TerminateDialogue_sme_ // ImplInit // odbc_SQLSvc_Prepare_ame_ // odbc_SQLSvc_ExecuteN_ame_ // odbc_SQLSvc_Close_ame_ // odbc_SQLSvc_FetchN_ame_ // odbc_SQLSvc_EndTransaction_ame_ // odbc_SQLSvc_SetDiagInfo_ame_ // odbc_SQLSvc_ExecuteCall_ame_ // // odbc_SQLSrvr_Prepare_ame_ // odbc_SQLSrvr_Fetch_ame_ // odbc_SQLSrvr_Close_ame_ // // HISTORY: // 98/04/15 made changes to initializeDialogue for generation of // and use of the user identity at logon time. // changes primarily to allow for anonymous logon // // MODIFICATION: Add trace messages -- 4/29/98 // // 00/10/23 - change this to fix MS Access problem. // Since MS Access does insert, update // and delete while SQL_ACCESS_MODE is SQL_MODE_READ_ONLY. #include <platform_ndcs.h> #include <platform_utils.h> #include <stdio.h> #include <sql.h> #include <sqlext.h> #include <dlfcn.h> #include <tal.h> #include <string.h> #include "DrvrSrvr.h" #include "Global.h" #include "QSGlobal.h" #include "QSData.h" #include "TransportBase.h" #include "odbcCommon.h" #include "odbc_sv.h" #include "odbcas_cl.h" #include "srvrcommon.h" #include "sqlinterface.h" #include "SQLWrapper.h" #include "odbcMxSecurity.h" #include "RegValues.h" #include "CommonDiags.h" #include "tdm_odbcSrvrMsg.h" #include "RegValues.h" #include "tmf_tipapi/ntiperror.h" #include "ODBCMXTraceMsgs.h" #include "SrvrConnect.h" #include "commonFunctions.h" #include <dlfcn.h> #include "ResStatisticsSession.h" #include "ResStatisticsStatement.h" #include "ComDllload.h" //#include "QSExceptions.h" #include <dlfcn.h> #include "secsrvrmxo.h" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #ifdef PERF_TEST // Added for performance testing #include "PerformanceMeasure.h" PerformanceMeasure *perf = 0; #endif #include <arpa/inet.h> #include <netinet/in.h> #include "zookeeper/zookeeper.h" //extern ZK_GLOBAL_Def zkGlobals; #include <tr1/memory> #include <pthread.h> #include <queue> #include <fstream> using namespace std; extern zhandle_t *zh; extern stringstream availSrvrNode; extern string regSrvrData; extern string dcsRegisteredNode; extern string availSrvrData; extern int shutdownThisThing; extern char instanceId[8]; extern char zkRootNode[256]; extern int sdconn; extern int clientConnTimeOut; extern short stopOnDisconnect; extern int aggrInterval; extern int queryPubThreshold; extern statistics_type statisticsPubType; extern bool bStatisticsEnabled; extern int myNid; extern int myPid; extern string myProcName; extern bool bPlanEnabled; extern long maxHeapPctExit; extern long initSessMemSize ; int fd = -1; bool heapSizeExit = false; int interval_count=0; int interval_max=1; int limit_count=0; int limit_max=-1; bool updateZKState(DCS_SERVER_STATE currState, DCS_SERVER_STATE newState); static void free_String_vector(struct String_vector *v) { if (v->data) { for (int32_t i=0; i < v->count; i++) { free(v->data[i]); } free(v->data); v->data = NULL; v->count = 0; } } void sync_string_completion(int rc, const char *name, const void *data) { if( rc != ZOK ) { char tmpString[1024]; sprintf(tmpString, "sync_string_completion...Error %d calling zoo_async() for %s. Server exiting.", rc, (char *)data); SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); SRVR::exitServerProcess(); } // delete [] (char *)data; } short DO_WouldLikeToExecute( IDL_char *stmtLabel , Long stmtHandle , IDL_long* returnCode , IDL_long* sqlWarningOrErrorLength , BYTE*& sqlWarningOrError ); short qrysrvc_ExecuteFinished( const IDL_char *stmtLabel , const Long stmtHandle , const bool bCheckSqlQueryType , const short error_code , const bool bFetch , const bool bException = false , const bool bErase = true ); extern char zkHost[256]; //extern void sendAggrStats(pub_struct_type pub_type, pSESSION_AGGREGATION pAggr_info); extern void sendAggrStats(pub_struct_type pub_type, std::tr1::shared_ptr<SESSION_AGGREGATION> pAggr_info); //extern void sendSessionEnd(pSESSION_END pSession_info); extern void sendSessionEnd(std::tr1::shared_ptr<SESSION_END> pSession_info); extern void sendQueryStats(pub_struct_type pub_type, std::tr1::shared_ptr<STATEMENT_QUERYEXECUTION> pQuery_info); CEE_handle_def StatisticsTimerHandle; SRVR_STMT_HDL * pQueryStmt = NULL; typedef struct _REPOS_STATS { std::tr1::shared_ptr<SESSION_END> m_pSessionStats; std::tr1::shared_ptr<STATEMENT_QUERYEXECUTION> m_pQuery_stats; std::tr1::shared_ptr<SESSION_AGGREGATION> m_pAggr_stats; pub_struct_type m_pub_type; }REPOS_STATS, *pREPOS_STATS; #include "dbUserAuth.h" // to get the dbUser ID after authorization #include "ndcsversion.h" //LCOV_EXCL_START // Needed for bypassing checks in compiler once component privileges have been tested // Internal calls - Defined in libcli.so void SQL_EXEC_SetParserFlagsForExSqlComp_Internal( /*IN*/ unsigned int flagbits); void SQL_EXEC_ResetParserFlagsForExSqlComp_Internal( /*IN*/ unsigned int flagbits); //LCOV_EXCL_STOP #define SKIP_COMPRIV_CHECK 0x100000 #define MAX_EVAR_VALUE_LENGTH 3900 + 1 #define START_CONN_IDLE_TIMER \ if (srvrGlobal != NULL && \ srvrGlobal->srvrState == SRVR_CONNECTED && \ ((srvrGlobal->javaConnIdleTimeout > JDBC_INFINITE_CONN_IDLE_TIMEOUT) || \ (srvrGlobal->srvrContext.connIdleTimeout != INFINITE_CONN_IDLE_TIMEOUT))) \ startConnIdleTimer(); #define DESTROY_CONN_IDLE_TIMER \ if (srvrGlobal != NULL && \ srvrGlobal->srvrState == SRVR_CONNECTED && \ ((srvrGlobal->javaConnIdleTimeout > JDBC_INFINITE_CONN_IDLE_TIMEOUT) || \ (srvrGlobal->srvrContext.connIdleTimeout != INFINITE_CONN_IDLE_TIMEOUT))) \ destroyConnIdleTimer(); #define CHECK_QUERYTYPE(y) \ (( y == SQL_SELECT_NON_UNIQUE || y == SQL_INSERT_NON_UNIQUE || \ y == SQL_UPDATE_NON_UNIQUE || y == SQL_DELETE_NON_UNIQUE || y == 10000) ? TRUE : FALSE) using namespace SRVR; #include "ceercv.h" // #ifndef NSK_CLPS_LIB #include "Transport.h" #include "FileSystemSrvr.h" #include "TCPIPSystemSrvr.h" #include "odbcs_srvr_res.h" #include "QSData.h" #include "commonFunctions.h" // #endif #include "NskUtil.h" //LCOV_EXCL_START extern void logError( short Code, short Severity, short Operation ); extern char errStrBuf1[], errStrBuf2[], errStrBuf3[], errStrBuf4[], errStrBuf5[]; //LCOV_EXCL_STOP extern ResStatisticsSession *resStatSession; extern ResStatisticsStatement *resStatStatement; extern struct collect_info setinit; extern IDL_short tempSqlStmtType; extern bool informas; extern bool sqlflag; extern bool securitySetup; // Component privileges bitmask_type wmsPrivMask; bitmask_type hpdcsPrivMask; extern void ClearAdaptiveSegment(short adapiveSeg = -1); extern "C" void releaseGlobalBuffer(); /////////////////////////////////////////////////////////////// MonitorCallContext *monitorCallContext = NULL; CEE_handle_def callIdStopServer; char srvrSessionId[SESSION_ID_LEN]; char savedDefaultSchema[MAX_SQL_IDENTIFIER_LEN+3]; // this is to allow double quotes around the schema name bool InsertControls(char* sqlString, odbc_SQLSvc_ExecDirect_exc_ *exception_); bool LoadControls(char* sqlString, bool genOrexc, char* genRequestError, odbc_SQLSvc_PrepareRowset_exc_ *exception_, SRVR_STMT_HDL **stmtHandle); //3155 bool ResetControls(char* genRequestError); bool GetHashInfo(char* sqlString, char* genRequestError, char *HashTableInfo); bool getSQLInfo(E_GetSQLInfoType option, long stmtHandle=NULL, char *stmtLabel=NULL ); bool loadPrivileges( char *component, bitmask_type mask); void setPrivMask( short priv, bitmask_type bitMask ); // QSSYNC registered processes info REG_PROC_INFO regProcInfo[256]; pthread_mutex_t Thread_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; template<class T> class Repos_Queue { private: std::queue<T> my_queue; //boost::condition_variable_any cond; pthread_cond_t cond; pthread_mutex_t my_mutex; public: Repos_Queue(){} ~Repos_Queue() { pthread_cond_destroy(&cond); } private: Repos_Queue(const Repos_Queue&); const Repos_Queue& operator=(const Repos_Queue&); public: void push_task(const T & repos_stats) { //mutex lock //boost::unique_lock<pthread_mutex_t> lock(my_mutex); pthread_mutex_lock(&my_mutex); my_queue.push(repos_stats); //Notify other threads //cond.notify_one(); pthread_cond_signal(&cond); pthread_mutex_unlock(&my_mutex); } T get_task() { //mutex lock //boost::unique_lock<pthread_mutex_t> lock(my_mutex); pthread_mutex_lock(&my_mutex); if(my_queue.size()==0) { //if no task in the queue, waite for mutex //cond.wait(lock); pthread_cond_wait(&cond,&my_mutex); } //point to head of the queue T repos_stats(my_queue.front()); //dequeue my_queue.pop(); pthread_mutex_unlock(&my_mutex); return repos_stats; } int get_size() { return my_queue.size(); } }; static Repos_Queue<REPOS_STATS> repos_queue; static bool record_session_done = true; static void* SessionWatchDog(void* arg) { record_session_done = false; SRVR_STMT_HDL *pSrvrStmt = NULL; SQLCTX_HANDLE thread_context_handle = 0; char tmpString[128]; int rc = pthread_mutex_lock(&Thread_mutex); if (rc != 0) { sprintf(tmpString, "Failed to acquire mutex lock for repository session: error code %d", rc); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); record_session_done = true; return 0; } try { if (WSQL_EXEC_CreateContext(&thread_context_handle, NULL, 0) < 0) { SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Failed to create new SQL context"); record_session_done = true; pthread_mutex_unlock(&Thread_mutex); return 0; } SendEventMsg(MSG_SERVER_TRACE_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskASProcessInfo.processId, ODBCMX_SERVICE, srvrGlobal->srvrObjRef, 3, srvrGlobal->sessionId, "Created new SQL context", "0"); stringstream errMsg; string errStr; ERROR_DESC_def *p_buffer = NULL; int retcode; bool okToGo = true; stringstream ss; string execStr; retcode = WSQL_EXEC_SwitchContext(thread_context_handle, NULL); if (retcode < 0) { SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Failed to switch to new SQL context"); okToGo = false; } if (okToGo) { SQL_EXEC_SetParserFlagsForExSqlComp_Internal(0x20000); pSrvrStmt = getSrvrStmt("STMT_PUBLICATION", FALSE); if (pSrvrStmt != NULL) { pSrvrStmt->cleanupAll(); pSrvrStmt->Close(SQL_DROP); pSrvrStmt = NULL; } if ((pSrvrStmt = getSrvrStmt("STMT_PUBLICATION", TRUE)) == NULL) { SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Failed to allocate statement for repository publications" ); okToGo = false; } } if (okToGo) { retcode = pSrvrStmt->ExecDirect(NULL, "CONTROL QUERY DEFAULT traf_no_dtm_xn 'ON'", INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (retcode < 0) { errMsg.str(""); if(pSrvrStmt->sqlError.errorList._length > 0) p_buffer = pSrvrStmt->sqlError.errorList._buffer; else if(pSrvrStmt->sqlWarning._length > 0) p_buffer = pSrvrStmt->sqlWarning._buffer; if(p_buffer != NULL && p_buffer->errorText) errMsg << "Failed to skip transaction - " << p_buffer->errorText; else errMsg << "Failed to skip transaction - " << " no additional information"; errStr = errMsg.str(); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, errStr.c_str()); okToGo = false; } } if (okToGo) { retcode = pSrvrStmt->ExecDirect(NULL, "CONTROL QUERY DEFAULT attempt_esp_parallelism 'OFF'", INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (retcode < 0) { errMsg.str(""); if(pSrvrStmt->sqlError.errorList._length > 0) p_buffer = pSrvrStmt->sqlError.errorList._buffer; else if(pSrvrStmt->sqlWarning._length > 0) p_buffer = pSrvrStmt->sqlWarning._buffer; if(p_buffer != NULL && p_buffer->errorText) errMsg << "Failed to disable ESP startup - " << p_buffer->errorText; else errMsg << "Failed to disable ESP startup - " << " no additional information"; errStr = errMsg.str(); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, errStr.c_str() ); okToGo = false; } } if (okToGo) { retcode = pSrvrStmt->ExecDirect(NULL, "CQD DETAILED_STATISTICS 'OFF'", INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (retcode < 0) { errMsg.str(""); if(pSrvrStmt->sqlError.errorList._length > 0) p_buffer = pSrvrStmt->sqlError.errorList._buffer; else if(pSrvrStmt->sqlWarning._length > 0) p_buffer = pSrvrStmt->sqlWarning._buffer; if(p_buffer != NULL && p_buffer->errorText) errMsg << "Failed to turn off statistics - " << p_buffer->errorText; else errMsg << "Failed to turn off statistics - " << " no additional information"; errStr = errMsg.str(); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, errStr.c_str()); okToGo = false; } } while(!record_session_done && okToGo) { REPOS_STATS repos_stats = repos_queue.get_task(); ss.str(""); ss.clear(); if (repos_stats.m_pub_type == PUB_TYPE_SESSION_END) { std::tr1::shared_ptr<SESSION_END> pSessionEnd = repos_stats.m_pSessionStats; if(NULL == pSessionEnd) { SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Invalid data pointer founded in SessionWatchDog()"); break; } ss << "insert into Trafodion.\"_REPOS_\".metric_session_table values("; ss << pSessionEnd->m_instance_id << ","; ss << pSessionEnd->m_tenant_id << ","; ss << pSessionEnd->m_component_id << ","; ss << pSessionEnd->m_process_id << ","; ss << pSessionEnd->m_thread_id << ","; ss << pSessionEnd->m_node_id << ","; ss << pSessionEnd->m_pnid_id << ","; ss << pSessionEnd->m_host_id << ",'"; ss << pSessionEnd->m_ip_address_id.c_str() << "',"; ss << pSessionEnd->m_sequence_number << ",'"; ss << pSessionEnd->m_process_name.c_str() << "','"; ss << pSessionEnd->m_sessionId.c_str() << "','"; ss << pSessionEnd->m_session_status.c_str() << "',CONVERTTIMESTAMP("; ss << pSessionEnd->m_session_start_utc_ts << "),CONVERTTIMESTAMP("; ss << pSessionEnd->m_session_end_utc_ts << "),"; ss << pSessionEnd->m_user_id << ",'"; ss << pSessionEnd->m_user_name.c_str() << "','"; ss << pSessionEnd->m_role_name.c_str() << "','"; ss << pSessionEnd->m_client_name.c_str() << "','"; ss << pSessionEnd->m_client_user_name.c_str() << "','"; ss << pSessionEnd->m_application_name.c_str() << "',"; ss << pSessionEnd->m_total_odbc_exection_time << ","; ss << pSessionEnd->m_total_odbc_elapsed_time << ","; ss << pSessionEnd->m_total_insert_stmts_executed << ","; ss << pSessionEnd->m_total_delete_stmts_executed << ","; ss << pSessionEnd->m_total_update_stmts_executed << ","; ss << pSessionEnd->m_total_select_stmts_executed << ","; ss << pSessionEnd->m_total_catalog_stmts << ","; ss << pSessionEnd->m_total_prepares << ","; ss << pSessionEnd->m_total_executes << ","; ss << pSessionEnd->m_total_fetches << ","; ss << pSessionEnd->m_total_closes << ","; ss << pSessionEnd->m_total_execdirects << ","; ss << pSessionEnd->m_total_errors << ","; ss << pSessionEnd->m_total_warnings << ","; ss << pSessionEnd->m_total_login_elapsed_time_mcsec << ","; ss << pSessionEnd->m_ldap_login_elapsed_time_mcsec << ","; ss << pSessionEnd->m_sql_user_elapsed_time_mcsec << ","; ss << pSessionEnd->m_search_connection_elapsed_time_mcsec << ","; ss << pSessionEnd->m_search_elapsed_time_mcsec << ","; ss << pSessionEnd->m_authentication_connection_elapsed_time_mcsec << ","; ss << pSessionEnd->m_authentication_elapsed_time_mcsec << ")"; } else if (repos_stats.m_pub_type == PUB_TYPE_STATEMENT_NEW_QUERYEXECUTION) { std::tr1::shared_ptr<STATEMENT_QUERYEXECUTION> pQueryAdd = repos_stats.m_pQuery_stats; if(NULL == pQueryAdd) { SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Invalid data pointer founded in SessionWatchDog()"); break; } ss << "insert into Trafodion.\"_REPOS_\".metric_query_table values("; ss << pQueryAdd->m_instance_id << ","; ss << pQueryAdd->m_tenant_id << ","; ss << pQueryAdd->m_component_id << ","; ss << pQueryAdd->m_process_id << ","; ss << pQueryAdd->m_thread_id << ","; ss << pQueryAdd->m_node_id << ","; ss << pQueryAdd->m_pnid_id << ","; ss << pQueryAdd->m_host_id << ",'"; ss << pQueryAdd->m_ip_address_id.c_str() << "',"; ss << pQueryAdd->m_sequence_number << ",'"; ss << pQueryAdd->m_process_name.c_str() << "',CONVERTTIMESTAMP("; ss << pQueryAdd->m_exec_start_utc_ts << "),'"; ss << pQueryAdd->m_query_id.c_str() << "','"; ss << pQueryAdd->m_user_name.c_str() << "','"; ss << pQueryAdd->m_role_name.c_str() << "',"; ss << pQueryAdd->m_start_priority << ",'"; ss << pQueryAdd->m_master_process_id.c_str() << "','"; ss << pQueryAdd->m_session_id.c_str() << "','"; ss << pQueryAdd->m_client_name.c_str() << "','"; ss << pQueryAdd->m_application_name.c_str() << "','"; ss << pQueryAdd->m_statement_id.c_str() << "','"; ss << pQueryAdd->m_statement_type.c_str() << "','"; ss << pQueryAdd->m_statement_subtype.c_str() << "',"; if (pQueryAdd->m_submit_utc_ts > 0) ss << "CONVERTTIMESTAMP(" << pQueryAdd->m_submit_utc_ts << "),"; else ss << "NULL,"; if (pQueryAdd->m_compile_start_utc_ts > 0) ss << "CONVERTTIMESTAMP(" << pQueryAdd->m_compile_start_utc_ts << "),"; else ss << "NULL,"; if (pQueryAdd->m_compile_end_utc_ts > 0) ss << "CONVERTTIMESTAMP(" << pQueryAdd->m_compile_end_utc_ts << "),"; else ss << "NULL,"; ss << pQueryAdd->m_compile_elapsed_time << ","; ss << pQueryAdd->m_cmp_affinity_num << ","; ss << pQueryAdd->m_cmp_dop << ","; ss << pQueryAdd->m_cmp_txn_needed << ","; ss << pQueryAdd->m_cmp_mandatory_x_prod << ","; ss << pQueryAdd->m_cmp_missing_stats << ","; ss << pQueryAdd->m_cmp_num_joins << ","; ss << pQueryAdd->m_cmp_full_scan_on_table << ","; ss << pQueryAdd->m_cmp_rows_accessed_full_scan << ","; ss << pQueryAdd->m_est_accessed_rows << ","; ss << pQueryAdd->m_est_used_rows << ",'"; ss << pQueryAdd->m_cmp_compiler_id.c_str() << "',"; ss << pQueryAdd->m_cmp_cpu_path_length << ","; ss << pQueryAdd->m_cmp_cpu_binder << ","; ss << pQueryAdd->m_cmp_cpu_normalizer << ","; ss << pQueryAdd->m_cmp_cpu_analyzer << ","; ss << pQueryAdd->m_cmp_cpu_optimizer << ","; ss << pQueryAdd->m_cmp_cpu_generator << ","; ss << pQueryAdd->m_cmp_metadata_cache_hits << ","; ss << pQueryAdd->m_cmp_metadata_cache_lookups << ","; ss << pQueryAdd->m_cmp_query_cache_status << ","; ss << pQueryAdd->m_cmp_histogram_cache_hits << ","; ss << pQueryAdd->m_cmp_histogram_cache_lookups << ","; ss << pQueryAdd->m_cmp_stmt_heap_size << ","; ss << pQueryAdd->m_cmp_context_heap_size << ","; ss << pQueryAdd->m_cmp_optimization_tasks << ","; ss << pQueryAdd->m_cmp_optimization_contexts << ","; ss << pQueryAdd->m_cmp_is_recompile << ","; ss << pQueryAdd->m_est_num_seq_ios << ","; ss << pQueryAdd->m_est_num_rand_ios << ","; ss << pQueryAdd->m_est_cost << ","; ss << pQueryAdd->m_est_cardinality << ","; ss << pQueryAdd->m_est_io_time << ","; ss << pQueryAdd->m_est_msg_time << ","; ss << pQueryAdd->m_est_idle_time << ","; ss << pQueryAdd->m_est_cpu_time << ","; ss << pQueryAdd->m_est_total_time << ","; ss << pQueryAdd->m_est_total_mem << ","; ss << pQueryAdd->m_est_resource_usage << ",'"; ss << pQueryAdd->m_aggregation_option.c_str() << "',"; ss << pQueryAdd->m_cmp_number_of_bmos << ",'"; ss << pQueryAdd->m_cmp_overflow_mode.c_str() << "',"; ss << pQueryAdd->m_cmp_overflow_size << ","; ss << pQueryAdd->m_aggregate_total << ","; ss << pQueryAdd->m_stats_error_code << ","; ss << pQueryAdd->m_query_elapsed_time << ","; ss << pQueryAdd->m_sql_process_busy_time << ","; ss << pQueryAdd->m_disk_process_busy_time << ","; ss << pQueryAdd->m_disk_ios << ","; ss << pQueryAdd->m_num_sql_processes << ","; ss << pQueryAdd->m_sql_space_allocated << ","; ss << pQueryAdd->m_sql_space_used << ","; ss << pQueryAdd->m_sql_heap_allocated << ","; ss << pQueryAdd->m_sql_heap_used << ","; ss << pQueryAdd->m_total_mem_alloc << ","; ss << pQueryAdd->m_max_mem_used << ",'"; ss << pQueryAdd->m_transaction_id.c_str() << "',"; ss << pQueryAdd->m_num_request_msgs << ","; ss << pQueryAdd->m_num_request_msg_bytes << ","; ss << pQueryAdd->m_num_reply_msgs << ","; ss << pQueryAdd->m_num_reply_msg_bytes << ","; if (pQueryAdd->m_first_result_return_utc_ts > 0) ss << "CONVERTTIMESTAMP(" << pQueryAdd->m_first_result_return_utc_ts << "),"; else ss << "NULL,"; ss << pQueryAdd->m_rows_returned_to_master << ",'"; ss << pQueryAdd->m_parent_query_id.c_str() << "','"; ss << pQueryAdd->m_parent_system_name.c_str() << "',"; if (pQueryAdd->m_exec_end_utc_ts > 0) ss << "CONVERTTIMESTAMP(" << pQueryAdd->m_exec_end_utc_ts << "),"; else ss << "NULL,"; ss << pQueryAdd->m_master_execution_time << ","; ss << pQueryAdd->m_master_elapse_time << ",'"; ss << pQueryAdd->m_query_status << "','"; ss << pQueryAdd->m_query_sub_status << "',"; ss << pQueryAdd->m_error_code << ","; ss << pQueryAdd->m_sql_error_code << ",'"; ss << pQueryAdd->m_error_text.c_str() << "','"; ss << pQueryAdd->m_query_text.c_str() << "','"; ss << "',"; // Explain plan. Updated later below using a CLI call ss << pQueryAdd->m_last_error_before_aqr << ","; ss << pQueryAdd->m_delay_time_before_aqr_sec << ","; ss << pQueryAdd->m_total_num_aqr_retries << ","; ss << pQueryAdd->m_msg_bytes_to_disk << ","; ss << pQueryAdd->m_msgs_to_disk << ","; ss << pQueryAdd->m_rows_accessed << ","; ss << pQueryAdd->m_rows_retrieved << ","; ss << pQueryAdd->m_num_rows_iud << ","; ss << pQueryAdd->m_processes_created << ","; ss << pQueryAdd->m_process_create_busy_time << ","; ss << pQueryAdd->m_ovf_file_count << ","; ss << pQueryAdd->m_ovf_space_allocated << ","; ss << pQueryAdd->m_ovf_space_used << ","; ss << pQueryAdd->m_ovf_block_size << ","; ss << pQueryAdd->m_ovf_write_read_count << ","; ss << pQueryAdd->m_ovf_write_count << ","; ss << pQueryAdd->m_ovf_buffer_blocks_written << ","; ss << pQueryAdd->m_ovf_buffer_bytes_written << ","; ss << pQueryAdd->m_ovf_read_count << ","; ss << pQueryAdd->m_ovf_buffer_blocks_read << ","; ss << pQueryAdd->m_ovf_buffer_bytes_read << ","; ss << pQueryAdd->m_num_nodes << ","; ss << pQueryAdd->m_udr_process_busy_time << ","; ss << pQueryAdd->m_pertable_stats << ")"; } else if (repos_stats.m_pub_type == PUB_TYPE_STATEMENT_UPDATE_QUERYEXECUTION) { std::tr1::shared_ptr<STATEMENT_QUERYEXECUTION> pQueryUpdate = repos_stats.m_pQuery_stats; if(NULL == pQueryUpdate) { SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Invalid data pointer founded in SessionWatchDog()"); break; } ss << "update Trafodion.\"_REPOS_\".metric_query_table "; ss << "set STATEMENT_TYPE= '" << pQueryUpdate->m_statement_type.c_str() << "',"; ss << "STATEMENT_SUBTYPE= '" << pQueryUpdate->m_statement_subtype.c_str() << "',"; ss << "AGGREGATE_TOTAL= " << pQueryUpdate->m_aggregate_total << ","; ss << "STATS_ERROR_CODE= " << pQueryUpdate->m_stats_error_code << ","; ss << "QUERY_ELAPSED_TIME= " << pQueryUpdate->m_query_elapsed_time << ","; ss << "SQL_PROCESS_BUSY_TIME= " << pQueryUpdate->m_sql_process_busy_time << ","; ss << "DISK_PROCESS_BUSY_TIME= " << pQueryUpdate->m_disk_process_busy_time << ","; ss << "DISK_IOS= " << pQueryUpdate->m_disk_ios << ","; ss << "NUM_SQL_PROCESSES= " << pQueryUpdate->m_num_sql_processes << ","; ss << "SQL_SPACE_ALLOCATED= " << pQueryUpdate->m_sql_space_allocated << ","; ss << "SQL_SPACE_USED= " << pQueryUpdate->m_sql_space_used << ","; ss << "SQL_HEAP_ALLOCATED= " << pQueryUpdate->m_sql_heap_allocated << ","; ss << "SQL_HEAP_USED= " << pQueryUpdate->m_sql_heap_used << ","; ss << "TOTAL_MEM_ALLOC= " << pQueryUpdate->m_total_mem_alloc << ","; ss << "MAX_MEM_USED= " << pQueryUpdate->m_max_mem_used << ","; ss << "NUM_REQUEST_MSGS= " << pQueryUpdate->m_num_request_msgs << ","; ss << "NUM_REQUEST_MSG_BYTES= " << pQueryUpdate->m_num_request_msg_bytes << ","; ss << "NUM_REPLY_MSGS= " << pQueryUpdate->m_num_reply_msgs << ","; ss << "NUM_REPLY_MSG_BYTES= " << pQueryUpdate->m_num_reply_msg_bytes << ","; if (pQueryUpdate->m_first_result_return_utc_ts > 0) ss << "FIRST_RESULT_RETURN_UTC_TS= CONVERTTIMESTAMP(" << pQueryUpdate->m_first_result_return_utc_ts << "),"; ss << "ROWS_RETURNED_TO_MASTER= " << pQueryUpdate->m_rows_returned_to_master << ","; if (pQueryUpdate->m_exec_end_utc_ts > 0) ss << "EXEC_END_UTC_TS= CONVERTTIMESTAMP(" << pQueryUpdate->m_exec_end_utc_ts << "),"; ss << "MASTER_EXECUTION_TIME= " << pQueryUpdate->m_master_execution_time << ","; ss << "MASTER_ELAPSED_TIME= " << pQueryUpdate->m_master_elapse_time << ","; ss << "QUERY_STATUS= '" << pQueryUpdate->m_query_status << "',"; ss << "QUERY_SUB_STATUS= '" << pQueryUpdate->m_query_sub_status << "',"; ss << "ERROR_CODE= " << pQueryUpdate->m_error_code << ","; ss << "SQL_ERROR_CODE= " << pQueryUpdate->m_sql_error_code << ","; ss << "ERROR_TEXT= '" << pQueryUpdate->m_error_text.c_str()<< "',"; ss << "LAST_ERROR_BEFORE_AQR= " << pQueryUpdate->m_last_error_before_aqr << ","; ss << "DELAY_TIME_BEFORE_AQR_SEC= " << pQueryUpdate->m_delay_time_before_aqr_sec << ","; ss << "TOTAL_NUM_AQR_RETRIES= " << pQueryUpdate->m_total_num_aqr_retries << ","; ss << "MSG_BYTES_TO_DISK= " << pQueryUpdate->m_msg_bytes_to_disk << ","; ss << "MSGS_TO_DISK= " << pQueryUpdate->m_msgs_to_disk << ","; ss << "ROWS_ACCESSED= " << pQueryUpdate->m_rows_accessed << ","; ss << "ROWS_RETRIEVED= " << pQueryUpdate->m_rows_retrieved << ","; ss << "NUM_ROWS_IUD= " << pQueryUpdate->m_num_rows_iud << ","; ss << "PROCESSES_CREATED= " << pQueryUpdate->m_processes_created << ","; ss << "PROCESS_CREATE_BUSY_TIME= " << pQueryUpdate->m_process_create_busy_time << ","; ss << "OVF_FILE_COUNT= " << pQueryUpdate->m_ovf_file_count << ","; ss << "OVF_SPACE_ALLOCATED= " << pQueryUpdate->m_ovf_space_allocated << ","; ss << "OVF_SPACE_USED= " << pQueryUpdate->m_ovf_space_used << ","; ss << "OVF_BLOCK_SIZE= " << pQueryUpdate->m_ovf_block_size << ","; ss << "OVF_WRITE_READ_COUNT= " << pQueryUpdate->m_ovf_write_read_count << ","; ss << "OVF_WRITE_COUNT= " << pQueryUpdate->m_ovf_write_count << ","; ss << "OVF_BUFFER_BLOCKS_WRITTEN= " << pQueryUpdate->m_ovf_buffer_blocks_written << ","; ss << "OVF_BUFFER_BYTES_WRITTEN= " << pQueryUpdate->m_ovf_buffer_bytes_written << ","; ss << "OVF_READ_COUNT= " << pQueryUpdate->m_ovf_read_count << ","; ss << "OVF_BUFFER_BLOCKS_READ= " << pQueryUpdate->m_ovf_buffer_blocks_read << ","; ss << "OVF_BUFFER_BYTES_READ= " << pQueryUpdate->m_ovf_buffer_bytes_read << ","; ss << "NUM_NODES= " << pQueryUpdate->m_num_nodes << ","; ss << "UDR_PROCESS_BUSY_TIME= " << pQueryUpdate->m_udr_process_busy_time << ","; ss << "PERTABLE_STATS= " << pQueryUpdate->m_pertable_stats; ss << " where QUERY_ID = '" << pQueryUpdate->m_query_id.c_str() << "'"; ss << " and EXEC_START_UTC_TS = CONVERTTIMESTAMP(" << pQueryUpdate->m_exec_start_utc_ts << ")"; } else if (repos_stats.m_pub_type == PUB_TYPE_SESSION_START_AGGREGATION) { std::tr1::shared_ptr<SESSION_AGGREGATION> pAggrStat = repos_stats.m_pAggr_stats; if(NULL == pAggrStat) { SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Invalid data pointer founded in SessionWatchDog()"); break; } ss << "insert into Trafodion.\"_REPOS_\".metric_query_aggr_table values("; ss << pAggrStat->m_instance_id << ","; ss << pAggrStat->m_tenant_id << ","; ss << pAggrStat->m_component_id << ","; ss << pAggrStat->m_process_id << ","; ss << pAggrStat->m_thread_id << ","; ss << pAggrStat->m_node_id << ","; ss << pAggrStat->m_pnid_id << ","; ss << pAggrStat->m_host_id << ",'"; ss << pAggrStat->m_ip_address_id.c_str() << "',"; ss << pAggrStat->m_sequence_number << ",'"; ss << pAggrStat->m_process_name.c_str() << "','"; ss << pAggrStat->m_sessionId.c_str() << "',CONVERTTIMESTAMP("; ss << pAggrStat->m_session_start_utc_ts << "),CONVERTTIMESTAMP("; ss << pAggrStat->m_aggregation_last_update_utc_ts << "),"; ss << pAggrStat->m_aggregation_last_elapsed_time << ","; ss << pAggrStat->m_user_id << ",'"; ss << pAggrStat->m_user_name.c_str() << "','"; ss << pAggrStat->m_role_name.c_str() << "','"; ss << pAggrStat->m_client_name.c_str() << "','"; ss << pAggrStat->m_client_user_name.c_str() << "','"; ss << pAggrStat->m_application_name.c_str() << "',"; ss << pAggrStat->m_total_est_rows_accessed << ","; ss << pAggrStat->m_total_est_rows_used << ","; ss << pAggrStat->m_total_rows_retrieved << ","; ss << pAggrStat->m_total_num_rows_iud << ","; ss << pAggrStat->m_total_selects << ","; ss << pAggrStat->m_total_inserts << ","; ss << pAggrStat->m_total_updates << ","; ss << pAggrStat->m_total_deletes << ","; ss << pAggrStat->m_total_ddl_stmts << ","; ss << pAggrStat->m_total_util_stmts << ","; ss << pAggrStat->m_total_catalog_stmts << ","; ss << pAggrStat->m_total_other_stmts << ","; ss << pAggrStat->m_total_insert_errors << ","; ss << pAggrStat->m_total_delete_errors << ","; ss << pAggrStat->m_total_update_errors << ","; ss << pAggrStat->m_total_select_errors << ","; ss << pAggrStat->m_total_ddl_errors << ","; ss << pAggrStat->m_total_util_errors << ","; ss << pAggrStat->m_total_catalog_errors << ","; ss << pAggrStat->m_total_other_errors << ","; ss << pAggrStat->m_delta_estimated_rows_accessed << ","; ss << pAggrStat->m_delta_estimated_rows_used << ","; ss << pAggrStat->m_delta_rows_accessed << ","; ss << pAggrStat->m_delta_rows_retrieved << ","; ss << pAggrStat->m_delta_num_rows_iud << ","; ss << pAggrStat->m_delta_total_selects << ","; ss << pAggrStat->m_delta_total_inserts << ","; ss << pAggrStat->m_delta_total_updates << ","; ss << pAggrStat->m_delta_total_deletes << ","; ss << pAggrStat->m_delta_total_ddl_stmts << ","; ss << pAggrStat->m_delta_total_util_stmts << ","; ss << pAggrStat->m_delta_total_catalog_stmts << ","; ss << pAggrStat->m_delta_total_other_stmts << ","; ss << pAggrStat->m_delta_insert_errors << ","; ss << pAggrStat->m_delta_delete_errors << ","; ss << pAggrStat->m_delta_update_errors << ","; ss << pAggrStat->m_delta_select_errors << ","; ss << pAggrStat->m_delta_ddl_errors << ","; ss << pAggrStat->m_delta_util_errors << ","; ss << pAggrStat->m_delta_catalog_errors << ","; ss << pAggrStat->m_delta_other_errors << ")"; } else if (repos_stats.m_pub_type == PUB_TYPE_SESSION_UPDATE_AGGREGATION || repos_stats.m_pub_type == PUB_TYPE_SESSION_END_AGGREGATION) { std::tr1::shared_ptr<SESSION_AGGREGATION> pAggrStat = repos_stats.m_pAggr_stats; if(NULL == pAggrStat) { SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Invalid data pointer founded in SessionWatchDog()"); break; } ss << "update Trafodion.\"_REPOS_\".metric_query_aggr_table "; ss << "set AGGREGATION_LAST_UPDATE_UTC_TS = CONVERTTIMESTAMP(" << pAggrStat->m_aggregation_last_update_utc_ts << "),"; ss << "AGGREGATION_LAST_ELAPSED_TIME = " << pAggrStat->m_aggregation_last_elapsed_time << ","; ss << "TOTAL_EST_ROWS_ACCESSED = " << pAggrStat->m_total_est_rows_accessed << ","; ss << "TOTAL_EST_ROWS_USED = " << pAggrStat->m_total_est_rows_used << ","; ss << "TOTAL_ROWS_RETRIEVED = " << pAggrStat->m_total_rows_retrieved << ","; ss << "TOTAL_NUM_ROWS_IUD = " << pAggrStat->m_total_num_rows_iud << ","; ss << "TOTAL_SELECTS = " << pAggrStat->m_total_selects << ","; ss << "TOTAL_INSERTS = " << pAggrStat->m_total_inserts << ","; ss << "TOTAL_UPDATES = " << pAggrStat->m_total_updates << ","; ss << "TOTAL_DELETES = " << pAggrStat->m_total_deletes << ","; ss << "TOTAL_DDL_STMTS = " << pAggrStat->m_total_ddl_stmts << ","; ss << "TOTAL_UTIL_STMTS = " << pAggrStat->m_total_util_stmts << ","; ss << "TOTAL_CATALOG_STMTS = " << pAggrStat->m_total_catalog_stmts << ","; ss << "TOTAL_OTHER_STMTS = " << pAggrStat->m_total_other_stmts << ","; ss << "TOTAL_INSERT_ERRORS = " << pAggrStat->m_total_insert_errors << ","; ss << "TOTAL_DELETE_ERRORS = " << pAggrStat->m_total_delete_errors << ","; ss << "TOTAL_UPDATE_ERRORS = " << pAggrStat->m_total_update_errors << ","; ss << "TOTAL_SELECT_ERRORS = " << pAggrStat->m_total_select_errors << ","; ss << "TOTAL_DDL_ERRORS = " << pAggrStat->m_total_ddl_errors << ","; ss << "TOTAL_UTIL_ERRORS = " << pAggrStat->m_total_util_errors << ","; ss << "TOTAL_CATALOG_ERRORS = " << pAggrStat->m_total_catalog_errors << ","; ss << "TOTAL_OTHER_ERRORS = " << pAggrStat->m_total_other_errors << ","; ss << "DELTA_ESTIMATED_ROWS_ACCESSED = " << pAggrStat->m_delta_estimated_rows_accessed << ","; ss << "DELTA_ESTIMATED_ROWS_USED = " << pAggrStat->m_delta_estimated_rows_used << ","; ss << "DELTA_ROWS_ACCESSED = " << pAggrStat->m_delta_rows_accessed << ","; ss << "DELTA_ROWS_RETRIEVED = " << pAggrStat->m_delta_rows_retrieved << ","; ss << "DELTA_NUM_ROWS_IUD = " << pAggrStat->m_delta_num_rows_iud << ","; ss << "DELTA_SELECTS = " << pAggrStat->m_delta_total_selects << ","; ss << "DELTA_INSERTS = " << pAggrStat->m_delta_total_inserts << ","; ss << "DELTA_UPDATES = " << pAggrStat->m_delta_total_updates << ","; ss << "DELTA_DELETES = " << pAggrStat->m_delta_total_deletes << ","; ss << "DELTA_DDL_STMTS = " << pAggrStat->m_delta_total_ddl_stmts << ","; ss << "DELTA_UTIL_STMTS = " << pAggrStat->m_delta_total_util_stmts << ","; ss << "DELTA_CATALOG_STMTS = " << pAggrStat->m_delta_total_catalog_stmts << ","; ss << "DELTA_OTHER_STMTS = " << pAggrStat->m_delta_total_other_stmts << ","; ss << "DELTA_INSERT_ERRORS = " << pAggrStat->m_delta_insert_errors << ","; ss << "DELTA_DELETE_ERRORS = " << pAggrStat->m_delta_delete_errors << ","; ss << "DELTA_UPDATE_ERRORS = " << pAggrStat->m_delta_update_errors << ","; ss << "DELTA_SELECT_ERRORS = " << pAggrStat->m_delta_select_errors << ","; ss << "DELTA_DDL_ERRORS = " << pAggrStat->m_delta_ddl_errors << ","; ss << "DELTA_UTIL_ERRORS = " << pAggrStat->m_delta_util_errors << ","; ss << "DELTA_CATALOG_ERRORS = " << pAggrStat->m_delta_catalog_errors << ","; ss << "DELTA_OTHER_ERRORS = " << pAggrStat->m_delta_other_errors; ss << " where SESSION_START_UTC_TS = CONVERTTIMESTAMP(" << pAggrStat->m_session_start_utc_ts << ")"; ss << " and SESSION_ID = '" << pAggrStat->m_sessionId.c_str() << "'"; } else { break; } execStr = ss.str(); retcode = pSrvrStmt->ExecDirect(NULL, execStr.c_str(), INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (retcode < 0) { errMsg.str(""); if(pSrvrStmt->sqlError.errorList._length > 0) p_buffer = pSrvrStmt->sqlError.errorList._buffer; else if(pSrvrStmt->sqlWarning._length > 0) p_buffer = pSrvrStmt->sqlWarning._buffer; if(p_buffer != NULL && p_buffer->errorText) errMsg << "Failed to write statistics: " << execStr.c_str() << "----Error detail - " << p_buffer->errorText; else errMsg << "Failed to write statistics: " << execStr.c_str() << "----Error detail - " << " no additional information"; errStr = errMsg.str(); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, errStr.c_str()); } else { // Update QUERY_TABLE with explain plan if needed if (repos_stats.m_pub_type == PUB_TYPE_STATEMENT_NEW_QUERYEXECUTION && TRUE == srvrGlobal->sqlPlan) { std::tr1::shared_ptr<STATEMENT_QUERYEXECUTION> pQueryAdd = repos_stats.m_pQuery_stats; if(NULL == pQueryAdd) { SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Invalid data pointer found in SessionWatchDog(). Cannot write explain plan."); break; } if (pQueryAdd->m_explain_plan && (pQueryAdd->m_explain_plan_len > 0)) { retcode = SQL_EXEC_StoreExplainData( &(pQueryAdd->m_exec_start_utc_ts), (char *)(pQueryAdd->m_query_id.c_str()), pQueryAdd->m_explain_plan, pQueryAdd->m_explain_plan_len ); if (retcode == -EXE_EXPLAIN_PLAN_TOO_LARGE) { // explain info is too big to be stored in repository. // ignore this error and continue with query execution. retcode = 0; } else if (retcode < 0) { char errStr[256]; sprintf( errStr, "Error updating explain data. SQL_EXEC_StoreExplainData() returned: %d", retcode ); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, errStr); } } } } }//End while } catch(...) { //Write to Log4cxx the error message.. } if (pSrvrStmt != NULL) pSrvrStmt->cleanupAll(); // Statements allocated earlier will get cleaned up // during stop processing WSQL_EXEC_DeleteContext(thread_context_handle); SendEventMsg(MSG_SERVER_TRACE_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskASProcessInfo.processId, ODBCMX_SERVICE, srvrGlobal->srvrObjRef, 3, srvrGlobal->sessionId, "Deleted new SQL context", "0"); record_session_done = true; rc = pthread_mutex_unlock(&Thread_mutex); if (rc != 0) { sprintf(tmpString, "Failed to release mutex lock for repository session: error code %d", rc); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); } } #ifdef __TIME_LOGGER void createTimeLoggerFile() { MS_Mon_Process_Info_Type proc_info; char myProcname[128]; short procname_len; int error =0; char tmpString[1024]; if ((error = PROCESSHANDLE_DECOMPOSE_ ( TPT_REF(srvrGlobal->nskProcessInfo.pHandle) ,OMITREF //[ short *cpu ] ,OMITREF //[ short *pin ] ,OMITREF //[ long *nodenumber ] ,OMITREF //[ char *nodename ] ,OMITSHORT //[ short maxlen ] ,OMITREF //[ short *nodename-length ] ,myProcname //[ char *procname ] ,sizeof(myProcname) //[ short maxlen ] ,&procname_len //[ short *procname-length ] ,OMITREF //[ long long *sequence-number ] )) != 0) { //LCOV_EXCL_START tmpString[0]='\0'; sprintf(tmpString, "%d", error); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); //LCOV_EXCL_STOP } myProcname[procname_len] = 0; error = msg_mon_get_process_info_detail(myProcname, &proc_info); if (error != XZFIL_ERR_OK ) { //LCOV_EXCL_START tmpString[0]='\0'; sprintf(tmpString, "%d", error); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); //LCOV_EXCL_STOP } srvrGlobal->process_id = proc_info.pid; srvrGlobal->cpu = proc_info.nid; srvrGlobal->timeLogger.createLogFile(proc_info.nid, proc_info.pid,myProcname); } #endif // "ImplInit" is the 'object'initialization function for // the odbc_SQLSvc object . // extern "C" void ImplInit ( /* In */ const CEE_handle_def *objectHandle, /* In */ const char *initParam, /* In */ long initParamLen, /* Out */ CEE_status *returnSts, /* Out */ CEE_tag_def *objTag, /* Out */ CEE_handle_def *implementationHandle) { CEE_handle_def intf; char tmpString[100]; // Added for exit on SQL un-recoverable errors // Initialize sqlErrorExit and errorIndex. errorIndex = 0; for( int i = 0; i < 8; i++ ) sqlErrorExit[i] = 0; SRVR_INIT_PARAM_Def *srvrInitParam; srvrInitParam = (SRVR_INIT_PARAM_Def *)initParam; *returnSts = CEE_SUCCESS; if (srvrGlobal == NULL) { srvrGlobal = new SRVR_GLOBAL_Def; if (srvrGlobal == NULL) { //LCOV_EXCL_START SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "srvrGlobal"); exitServerProcess(); //LCOV_EXCL_STOP } } srvrGlobal->dialogueId = -1; srvrGlobal->receiveThrId = getpid(); timer_register(); CEE_HANDLE_SET_NIL(&srvrGlobal->connIdleTimerHandle); CEE_HANDLE_SET_NIL(&srvrGlobal->srvrIdleTimerHandle); CEE_HANDLE_SET_NIL(&StatisticsTimerHandle); srvrGlobal->srvrVersion.componentId = 0; // Unknown if (srvrGlobal->srvrVersion.componentId == 0) { srvrGlobal->srvrVersion.componentId = NSK_ENDIAN + ODBC_SRVR_COMPONENT; // done to support backward compatibility if (WSQL_EXEC_CLI_VERSION() >= 91) // Roadrunner and above { srvrGlobal->srvrVersion.majorVersion = MXOSRVR_VERSION_MAJOR; srvrGlobal->srvrVersion.minorVersion = MXOSRVR_VERSION_MINOR; srvrGlobal->srvrVersion.buildId = MXOSRVR_VERSION_BUILD; } else { //LCOV_EXCL_START srvrGlobal->srvrVersion.majorVersion = NSK_VERSION_MAJOR_1; srvrGlobal->srvrVersion.minorVersion = NSK_VERSION_MAJOR_1; // Confusing! don't. Legacy code carried forward .... srvrGlobal->srvrVersion.buildId = NSK_BUILD_1; //LCOV_EXCL_STOP } } short error; short errorDetail; srvrGlobal->sqlVersion.majorVersion = 0; srvrGlobal->sqlVersion.minorVersion = 0; srvrGlobal->sqlVersion.buildId = 0; srvrGlobal->sqlVersion.majorVersion = VERS_PV_MAJ; srvrGlobal->sqlVersion.minorVersion = VERS_PV_MIN; srvrGlobal->sqlVersion.buildId = VERS_PV_UPD; strcpy(srvrInitParam->asSrvrObjRef, "DUMMY"); // To let it work in RegisterSrvr() if (initParam != NULL && initParamLen != 0) { short error; srvrGlobal->debugFlag = srvrInitParam->debugFlag; strcpy(srvrGlobal->asSrvrObjRef, srvrInitParam->asSrvrObjRef); srvrGlobal->srvrType = srvrInitParam->srvrType; srvrGlobal->DSId = srvrInitParam->DSId; strcpy(srvrGlobal->DSName, srvrInitParam->DSName); srvrGlobal->eventFlag = srvrInitParam->eventFlag; srvrGlobal->stopTypeFlag=STOP_UNKNOWN; srvrGlobal->timeLoggerFlag=srvrInitParam->timeLogger; BUILD_OBJECTREF(srvrInitParam->asSrvrObjRef, srvrGlobal->srvrObjRef, "NonStopODBC", srvrInitParam->portNumber); strcpy(srvrGlobal->ASProcessName, srvrInitParam->ASProcessName); if ((error = getProcessHandle(srvrGlobal->ASProcessName, TPT_REF(srvrGlobal->nskASProcessInfo.pHandle))) != 0) { //LCOV_EXCL_START *returnSts = 9999; SET_ERROR((long)0, NSK, UNKNOWN_TRANSPORT, SRVR_API_INIT, E_SERVER, "ImplInit", O_INIT_PROCESS, F_FILENAME_TO_PROCESSHANDLE_, *returnSts, error); return; //LCOV_EXCL_STOP } MS_Mon_Process_Info_Type proc_info; //Get my node id then get registered processes info from QSSYNC //If QSSYNC not found then append my node id to init param -QS name //otherwise use QSMGR name for my node id returned from QSSYNC msg_mon_get_process_info_detail(NULL, &proc_info); int myNid = proc_info.nid; strcpy(srvrGlobal->QSProcessName, srvrInitParam->QSProcessName); /* error = msg_mon_get_process_info_detail(srvrInitParam->QSsyncProcessName, &proc_info); if (error == XZFIL_ERR_OK ) { //QSSYNC process is running char segmentArray[256]; if(true == loadSyncValues(segmentArray,srvrInitParam->QSsyncProcessName)) { if(regProcInfo[myNid].qsname) sprintf(srvrGlobal->QSProcessName,"%s",regProcInfo[myNid].qsname); else { //QSSYNC returned info but no QSMGR running on my node...use default QSMGR //LCOV_EXCL_START sprintf(tmpString, "WMS manager process not registered on node %d", myNid); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_WARNING_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); sprintf(srvrGlobal->QSProcessName,"%s%02d",srvrInitParam->QSProcessName,myNid); //LCOV_EXCL_STOP } } else //Error getting data from QSSYNC...use default QSMGR sprintf(srvrGlobal->QSProcessName,"%s%02d",srvrInitParam->QSProcessName,myNid); } else { //QSSYNC process not running...use default QSMGR sprintf(tmpString, "WMS process %s does not exist", srvrInitParam->QSsyncProcessName); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_WARNING_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); sprintf(srvrGlobal->QSProcessName,"%s%02d",srvrInitParam->QSProcessName,myNid); } */ // Added for performance improvement to avoid msg_mon calls in qsmgr. The info collected // here will be passed to WMS. MS_Mon_Node_Info_Type node_info; error = msg_mon_get_node_info_detail(proc_info.nid,&node_info); if (error == XZFIL_ERR_OK ) strcpy(srvrGlobal->segmentname, node_info.node[0].node_name); //LCOV_EXCL_START else bzero(srvrGlobal->segmentname, sizeof(srvrGlobal->segmentname)); //LCOV_EXCL_STOP srvrGlobal->QSProcessLen = strlen(srvrGlobal->QSProcessName); srvrGlobal->mute = srvrInitParam->mute;//Dashboard testing - no 21036 message srvrGlobal->ext_21036 = srvrInitParam->ext_21036;//extended 21036 message - for SRPQ strcpy(srvrGlobal->TraceCollector, srvrInitParam->TraceCollector); if (stricmp(srvrInitParam->RSCollector,srvrInitParam->EmsName)==0) { strcpy(srvrGlobal->RSCollector, srvrInitParam->RSCollector); } else { GetSystemNm(tmpString); sprintf(srvrGlobal->RSCollector, "%s.%s", tmpString, srvrInitParam->RSCollector); } srvrGlobal->portNumber = srvrInitParam->portNumber; } //LCOV_EXCL_START if ((*returnSts = InstantiateRGObject()) != ERROR_SUCCESS) { sprintf(tmpString, "%ld", *returnSts); SendEventMsg(MSG_ODBC_NT_ERROR, EVENTLOG_ERROR_TYPE, _getpid(), ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); SendEventMsg(MSG_READ_REGISTRY_ERROR, EVENTLOG_ERROR_TYPE, _getpid(), ODBCMX_SERVER, srvrGlobal->srvrObjRef, 0); SET_ERROR((long)0, NSK, UNKNOWN_TRANSPORT, SRVR_API_INIT, E_SERVER, "ImplInit", O_INIT_PROCESS, F_INSTANTIATE_RG_OBJECT, *returnSts, 0); return; } //LCOV_EXCL_STOP srvrGlobal->bAutoCommitOn = FALSE; srvrGlobal->resGovernOn = FALSE; srvrGlobal->envVariableOn = FALSE; srvrGlobal->EnvironmentType = MXO_ODBC_35; srvrGlobal->clientLCID = LANG_NEUTRAL; srvrGlobal->clientErrorLCID = LANG_NEUTRAL; srvrGlobal->clientACP = GetACP(); strcpy(srvrGlobal->userSID, "Invalid User"); srvrGlobal->WMSSrvrType = WMS_SRVR_SRVR; // Added for replacing USER_GETINFO_() with PROCESS_GETINFO(). srvrGlobal->userID = 0; srvrGlobal->resourceStatistics = 0; TCPU_DECL(processId); int iprocessId; // for OSS and NT process Ids if ((error = PROCESSHANDLE_GETMINE_(TPT_REF(srvrGlobal->nskProcessInfo.pHandle))) != 0) { //LCOV_EXCL_START sprintf(tmpString, "%d", error); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); *returnSts = 9998; SET_ERROR((long)0, NSK, UNKNOWN_TRANSPORT, SRVR_API_INIT, E_SERVER, "ImplInit", O_INIT_PROCESS, F_PROCESSHANDLE_GETMINE_, *returnSts, error); return; //LCOV_EXCL_STOP } if ((error = PROCESSHANDLE_DECOMPOSE_(TPT_REF(srvrGlobal->nskProcessInfo.pHandle), &srvrGlobal->nskProcessInfo.nodeId, &processId, OMITREF, OMITREF, OMITSHORT, OMITREF, OMITREF, OMITSHORT, OMITREF, OMITREF)) != 0) //LCOV_EXCL_START { sprintf(tmpString, "%d", error); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); *returnSts = 9997; SET_ERROR((long)0, NSK, UNKNOWN_TRANSPORT, SRVR_API_INIT, E_SERVER, "ImplInit", O_INIT_PROCESS, F_PROCESSHANDLE_DECOMPOSE_, *returnSts, error); return; } //LCOV_EXCL_STOP srvrGlobal->nskProcessInfo.processId = processId; // Create Session ID char tmpsrvrSessionId[SESSION_ID_LEN]; getSessionId(tmpsrvrSessionId); strcpy(srvrSessionId, tmpsrvrSessionId); strcpy(srvrGlobal->sessionId,srvrSessionId); srvrGlobal->numConnection = 0; srvrGlobal->lastCleanupTime = time(NULL); srvrGlobal->cleanupByConnection = 0; srvrGlobal->cleanupByTime = 0; //LCOV_EXCL_START // srvrInitParam->srvrTrace = true; // Server Trace Class initialization if (srvrInitParam != NULL && srvrInitParam->srvrTrace == true) { sprintf(tmpString, "Server Trace Enabled."); SendEventMsg(MSG_SERVER_TRACE_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskASProcessInfo.processId, ODBCMX_SERVICE, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "EnableServerTrace", "0", tmpString); srvrGlobal->traceLogger = new ODBCMXTraceMsg(srvrGlobal->nskProcessInfo.processId, srvrGlobal->srvrObjRef); if (srvrGlobal->traceLogger == NULL) { SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "srvrGlobal->traceLogger"); exitServerProcess(); } srvrGlobal->traceLogger->OpenTraceCollector(srvrGlobal->TraceCollector); } else //LCOV_EXCL_STOP srvrGlobal->traceLogger = NULL; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceImplInitEnter(srvrInitParam, initParamLen); } if (srvrGlobal->asSrvrObjRef[0] == '\0') { //LCOV_EXCL_START srvrGlobal->srvrContext.srvrIdleTimeout = INFINITE_SRVR_IDLE_TIMEOUT; srvrGlobal->srvrContext.connIdleTimeout = INFINITE_CONN_IDLE_TIMEOUT; srvrGlobal->srvrContext.resDescList._buffer = NULL; srvrGlobal->srvrContext.resDescList._length = 0; srvrGlobal->srvrContext.envDescList._buffer = NULL; srvrGlobal->srvrContext.envDescList._length = 0; //LCOV_EXCL_STOP } srvrGlobal->tip_gateway = (tip_handle_t)NULL; srvrGlobal->pxid_url = (char *)NULL; srvrGlobal->local_xid = NULL; srvrGlobal->xid_length = 0; srvrGlobal->DefaultCatalog[0] = '\0'; srvrGlobal->DefaultSchema[0] = '\0'; // srvrGlobal->ext_21036 = false; srvrGlobal->CSObject = new CRITICAL_SECTION; InitializeCriticalSection(srvrGlobal->CSObject); //Call SQLInitialization function here initSqlCore(); // checkIfRowsetSupported(); srvrGlobal->bRowsetSupported = TRUE; srvrGlobal->SystemCatalog[0] = '\0'; if (envGetMXSystemCatalogName (&srvrGlobal->SystemCatalog[0]) != TRUE) { *returnSts = 9997; SET_ERROR((long)0, NSK, UNKNOWN_TRANSPORT, SRVR_API_INIT, E_SERVER, "ImplInit", O_INIT_PROCESS, F_ENV_GET_MX_SYSTEM_CATALOG_NAME, *returnSts, 0); return; } srvrGlobal->bSkipASTimer = false; srvrGlobal->m_NodeId = myNid; strncpy(srvrGlobal->m_ProcName, myProcName.c_str(), MS_MON_MAX_PROCESS_NAME); srvrGlobal->m_statisticsPubType = statisticsPubType; srvrGlobal->m_bStatisticsEnabled = bStatisticsEnabled; srvrGlobal->m_iAggrInterval = aggrInterval; interval_max=aggrInterval/MIN_INTERVAL; if(aggrInterval%MIN_INTERVAL) interval_max+=1; srvrGlobal->m_iQueryPubThreshold = queryPubThreshold; if(queryPubThreshold>=0) { limit_max=queryPubThreshold/MIN_INTERVAL; if(queryPubThreshold%MIN_INTERVAL) limit_max+=1; } if (!srvrGlobal->m_bStatisticsEnabled) bPlanEnabled = false; srvrGlobal->sqlPlan = bPlanEnabled; CEE_TIMER_CREATE2(DEFAULT_AS_POLLING,0,ASTimerExpired,(CEE_tag_def)NULL, &srvrGlobal->ASTimerHandle,srvrGlobal->receiveThrId); resStatSession = NULL; resStatStatement = NULL; if (srvrGlobal->m_bStatisticsEnabled) { resStatSession = new ResStatisticsSession(); //LCOV_EXCL_START if (resStatSession == NULL) { SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "resStatSession"); exitServerProcess(); } //LCOV_EXCL_STOP resStatStatement = new ResStatisticsStatement(); if (resStatStatement == NULL) { //LCOV_EXCL_START SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "resStatStatement"); exitServerProcess(); //LCOV_EXCL_STOP } } if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceImplInitExit(*returnSts); } #ifdef PERF_TEST // Added for performance testing char fName[256], tmpStr[SESSION_ID_LEN+1]; strcpy( tmpStr, srvrSessionId ); short slen = strlen(tmpStr); for( int i =0; i < slen; i++ ) { if( tmpStr[i] == '$' ) tmpStr[i] = '_'; else if( tmpStr[i] == ':' ) tmpStr[i] = '_'; else if( tmpStr[i] == '?' ) tmpStr[i] = 'X'; } sprintf( fName, "/home/sqperf1/wms_perf/Srvr%s.xml", tmpStr ); perf = new PerformanceMeasure( fName ); // , pToken ); #endif #ifdef __TIME_LOGGER if(srvrGlobal->timeLoggerFlag) { createTimeLoggerFile(); } #endif } extern void SRVR::RegisterSrvr(char* IpAddress, char* HostName) { SRVRTRACE_ENTER(FILE_AME+1); CEE_status retcode = CEE_SUCCESS; IDL_OBJECT_def srvrObjRef; if (srvrGlobal->asSrvrObjRef[0] != '\0') { AS_CALL_CONTEXT* asCallContext; MS_Mon_Process_Info_Type process_info; msg_mon_get_process_info_detail(NULL,&process_info); srvrGlobal->nskProcessInfo.nodeId = process_info.nid; srvrGlobal->nskQSProcessInfo.nodeId = process_info.nid; asCallContext = new AS_CALL_CONTEXT; if (asCallContext == NULL) { //LCOV_EXCL_START SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "asCallContext"); exitServerProcess(); //LCOV_EXCL_STOP } strcpy(srvrObjRef, srvrGlobal->srvrObjRef); insertIpAddressAndHostNameInObjRef(srvrObjRef, IpAddress, HostName); if(retcode = odbcas_ASSvc_RegProcess_pst_( &(asCallContext->ASSvc_proxy), asCallContext, odbcas_ASSvc_RegProcess_ccf_, &srvrGlobal->srvrVersion, CORE_SRVR, srvrObjRef, &srvrGlobal->nskProcessInfo) != CEE_SUCCESS) { //LCOV_EXCL_START delete asCallContext; SendEventMsg(MSG_SRVR_REGISTER_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 0); exitServerProcess(); //LCOV_EXCL_STOP } } SRVRTRACE_EXIT(FILE_AME+1); } void __cdecl SRVR::ASTimerExpired(CEE_tag_def timer_tag) { SRVRTRACE_ENTER(FILE_AME+2); if(srvrGlobal->mutex->locked()) return; srvrGlobal->mutex->lock(); if(srvrGlobal->bSkipASTimer == false && (srvrGlobal->srvrState == SRVR_AVAILABLE || srvrGlobal->srvrState == SRVR_UNINITIALIZED)) { // Shutdown if DCS stops. // Look for nodes under master and server and determine if DCS has stopped int rc = ZOK; Stat stat; struct String_vector children; children.count = 0; children.data = NULL; stringstream ss; string pathStr; ss.str(""); ss << zkRootNode << "/dcs/master"; pathStr = ss.str(); rc = zoo_get_children(zh, pathStr.c_str(), 0, &children); if( rc != ZOK || !(children.count > 0) ) { free_String_vector(&children); ss.str(""); ss << zkRootNode << "/dcs/servers/running"; pathStr = ss.str(); rc = zoo_get_children(zh, pathStr.c_str(), 0, &children); if( rc != ZOK || !(children.count > 0) ) shutdownThisThing = 1; } free_String_vector(&children); if( !shutdownThisThing ) { static long long timeout = JULIANTIMESTAMP(); static long prevDialogueId = 0; static short clientConnErrorTimeOut = 10; // secs long currDialogueId = 0; char zkData[256]; int zkDataLen = sizeof(zkData); rc = zoo_get(zh, dcsRegisteredNode.c_str(), false, zkData, &zkDataLen, &stat); if( rc == ZOK ) { // The first token should be state char *tkn = NULL; char state[32]; bool zkStatus = true; tkn = strtok(zkData, ":"); if( tkn == NULL ) goto HandleNoTokens; if( stricmp(tkn, "CONNECTING") && stricmp(tkn, "CONNECT_FAILED") && stricmp(tkn, "CONNECT_REJECTED") ) // Not in CONNECTING state { timeout = JULIANTIMESTAMP(); prevDialogueId = 0; } else { strcpy( state, tkn ); // Skip second token - Timestamp tkn = strtok(NULL, ":"); if( tkn == NULL ) goto HandleNoTokens; // Third token is dialogue ID tkn = strtok(NULL, ":"); if( tkn == NULL ) goto HandleNoTokens; currDialogueId = atoi(tkn); if( prevDialogueId == 0 || prevDialogueId != currDialogueId ) { prevDialogueId = currDialogueId; timeout = JULIANTIMESTAMP(); } if ( prevDialogueId == currDialogueId ) { // In CONNECTING state and timeout > clientConnTimeOut if( ((JULIANTIMESTAMP() - timeout) > (clientConnTimeOut * 1000000)) && stricmp(state, "CONNECTING") == 0 ) zkStatus = updateZKState(CONNECTING, AVAILABLE); else if( (JULIANTIMESTAMP() - timeout) > (clientConnErrorTimeOut * 1000000)) { // In CONNECT_FAILED or CONNECT_REJECTED state and timeout > clientConnErrorTimeOut if (stricmp(state, "CONNECT_FAILED") == 0) zkStatus = updateZKState(CONNECT_FAILED, AVAILABLE); else if (stricmp(state, "CONNECT_REJECTED") == 0) zkStatus = updateZKState(CONNECT_REJECTED, AVAILABLE); } } if( !zkStatus ) { srvrGlobal->mutex->unlock(); zookeeper_close(zh); exitServerProcess(); } } HandleNoTokens: ; // Cannot retrieve tokens from ZK entry at this time. } else shutdownThisThing = 1; } if( shutdownThisThing ) { srvrGlobal->mutex->unlock(); zookeeper_close(zh); exitServerProcess(); } } srvrGlobal->mutex->unlock(); SRVRTRACE_EXIT(FILE_AME+2); } BOOL SRVR::checkIfASSvcLives( void ) { return processExists(NULL, TPT_REF(srvrGlobal->nskASProcessInfo.pHandle) ); } /* * Call Completion function for * operation 'odbcas_ASSvc_RegProcess' */ extern "C" void odbcas_ASSvc_RegProcess_ccf_( /* In */ CEE_tag_def cmptag_ , /* In */ const odbcas_ASSvc_RegProcess_exc_ *exception_ , /* In */ const SRVR_CONTEXT_def *srvrContext ) { SRVRTRACE_ENTER(FILE_AME+4); int i; short wms_nid; RES_DESC_def *pResValuesIn; RES_DESC_def *pResValues; ENV_DESC_def *pEnvValuesIn; ENV_DESC_def *pEnvValues; char *saveptr; CEE_status sts; char tmpString[25]; AS_CALL_CONTEXT *asCallContext = (AS_CALL_CONTEXT *)cmptag_; srvrGlobal->srvrContext.resDescList._buffer = NULL; srvrGlobal->srvrContext.resDescList._length = 0; srvrGlobal->resGovernOn = FALSE; srvrGlobal->srvrContext.envDescList._buffer = NULL; srvrGlobal->srvrContext.envDescList._length = 0; srvrGlobal->envVariableOn = FALSE; srvrGlobal->srvrContext.connIdleTimeout = DEFAULT_CONN_IDLE_TIMEOUT_MINS; srvrGlobal->srvrContext.srvrIdleTimeout = INFINITE_SRVR_IDLE_TIMEOUT; srvrGlobal->srvrState = SRVR_AVAILABLE; delete asCallContext; SRVRTRACE_EXIT(FILE_AME+4); } long getMemSize(char *sessionPhase) { //int fd = -1; char tmpString[128]; int local_n; char nameBuf[1000]; char dataBuf[1000]; char sessionPhaseStr[40]; long memSize =0 ; memset(sessionPhaseStr,'\0', 40); if(!memcmp(sessionPhase,"Initial",7)) memcpy(sessionPhaseStr,"odbc_SQLSvc_InitializeDialogue_ame_",35); else if(!memcmp(sessionPhase,"Terminate",9)) memcpy(sessionPhaseStr,"odbc_SQLSvc_TerminateDialogue_ame_",34); else if(!memcmp(sessionPhase,"Break",5)) memcpy(sessionPhaseStr,"BreakDialogue",13); memset(nameBuf,'\0',1000); memset(dataBuf,'\0',1000); sprintf(nameBuf, "/proc/%d/statm", srvrGlobal->nskProcessInfo.processId); if(fd == -1){ if ((fd = open(nameBuf, O_RDONLY)) == -1) { memset(tmpString,'\0',128); sprintf(tmpString, "open %s error in %s", nameBuf,sessionPhaseStr); SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); } else { if ((local_n = read(fd, dataBuf, sizeof dataBuf - 1)) < 0) { memset(tmpString,'\0',128); sprintf(tmpString, "read %s error in %s", nameBuf,sessionPhaseStr); SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); } else sscanf(dataBuf, "%*ld %ld ", &memSize); } } else { lseek(fd, 0L, SEEK_SET); if ((local_n = read(fd, dataBuf, sizeof dataBuf - 1)) < 0) { memset(tmpString,'\0',128); sprintf(tmpString, "read %s error %s", nameBuf,sessionPhaseStr); SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); } else sscanf(dataBuf, "%*ld %ld ", &memSize); } return memSize; } /* * Asynchronous method function prototype for * operation 'odbc_SQLSvc_InitializeDialogue' */ extern "C" void odbc_SQLSvc_InitializeDialogue_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ const USER_DESC_def *userDesc , /* In */ const CONNECTION_CONTEXT_def *inContext , /* In */ DIALOGUE_ID_def dialogueId ) { SRVRTRACE_ENTER(FILE_AME+5); OUT_CONNECTION_CONTEXT_def outContext; odbc_SQLSvc_MonitorCall_exc_ monitorException_={0,0}; odbc_SQLSvc_InitializeDialogue_exc_ exception_={0,0,0}; odbc_SQLSvc_SetConnectionOption_exc_ setConnectException={0,0,0}; char tmpString[100]; ERROR_DESC_LIST_def sqlWarning = {0,0}; unsigned long curRowNo; ENV_DESC_def *pEnvDesc; char VariableValue[3900]; char seps[] = " \t\n."; char *EnvTypes[] = {"SET"}; char *SetTypes[] = {"CATALOG", "SCHEMA"}; char *token; char *saveptr; unsigned long computerNameLen = MAX_COMPUTERNAME_LENGTH*4 + 1; VERSION_def *versionPtr; VERSION_def *clientVersionPtr; CEE_status sts; short retCode = 0; char TmpstrRole[MAX_ROLENAME_LEN + 1]; __int64 prevRedefTime = 0; //Initialize the isShapeLoaded srvrGlobal->isShapeLoaded = false; // cleanupJniDLL(); strcpy(TmpstrRole, srvrGlobal->QSRoleName); prevRedefTime = srvrGlobal->redefTime; srvrGlobal->QSServiceId = 0; bzero(srvrGlobal->QSServiceName, sizeof(srvrGlobal->QSServiceName)); bzero(srvrGlobal->QSRoleName, sizeof(srvrGlobal->QSRoleName)); bzero(srvrGlobal->QSRuleName, sizeof(srvrGlobal->QSRuleName)); bzero(srvrGlobal->QSUserName, sizeof(srvrGlobal->QSUserName)); bzero(srvrGlobal->QSDBUserName, sizeof(srvrGlobal->QSDBUserName)); bzero(srvrGlobal->ApplicationName, sizeof(srvrGlobal->ApplicationName)); bzero(&outContext, sizeof(outContext)); srvrGlobal->bSpjEnableProxy = FALSE; srvrGlobal->bspjTxnJoined = FALSE; srvrGlobal->spjTxnId = 0; srvrGlobal->enableLongVarchar = false; if (inContext->inContextOptions1 & INCONTEXT_OPT1_FETCHAHEAD) srvrGlobal->fetchAhead = TRUE; else srvrGlobal->fetchAhead = FALSE; srvrGlobal->defaultSchemaAccessOnly = false; // Added for 64bit work srvrGlobal->stmtHandleMap.clear(); SRVR_STMT_HDL::globalKey = 0; int catLen; char TmpstrCat[MAX_SQL_IDENTIFIER_LEN+3]; char TmpstrSch[MAX_SQL_IDENTIFIER_LEN+3]; char *tmpPtr, *tmpPtr2; bool delimit = true; char *UTF8ErrorText; long UTF8ErrorTextLen; DBUserAuth *userSession = DBUserAuth::GetInstance(); TmpstrCat[0]='\0'; TmpstrSch[0]='\0'; diagnostic_flags=inContext->diagnosticFlag; strncpy(srvrGlobal->ClientComputerName, inContext->computerName, sizeof(srvrGlobal->ClientComputerName) - 1); srvrGlobal->ClientComputerName[sizeof(srvrGlobal->ClientComputerName) -1] = '\0'; strncpy(srvrGlobal->ApplicationName, inContext->windowText,sizeof(srvrGlobal->ApplicationName) - 1); srvrGlobal->ApplicationName[sizeof(srvrGlobal->ApplicationName) -1] = '\0'; strcpy(srvrGlobal->QSRoleName, inContext->userRole); strcpy(srvrGlobal->QSUserName, userDesc->userName != NULL ? userDesc->userName: ""); char tmpsrvrSessionId[SESSION_ID_LEN]; getSessionId(tmpsrvrSessionId); strcpy(srvrGlobal->sessionId,tmpsrvrSessionId); srvrGlobal->m_bNewConnection = true; char schemaValueStr[MAX_SQL_IDENTIFIER_LEN+MAX_SQL_IDENTIFIER_LEN+5+1]; // 5 for quotes + dot char catTempStr[MAX_SQL_IDENTIFIER_LEN+1]; char schTempStr[MAX_SQL_IDENTIFIER_LEN+1]; catTempStr[0] = '\0'; schTempStr[0] = '\0'; BOOL InternalUse = TRUE; srvrGlobal->bWMS_AdaptiveSegment = false; srvrGlobal->EnvironmentType = MXO_ODBC_35; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceConnectEnter(userDesc, inContext, dialogueId); } // This check is not possible, since we can update the srvr state from client also // Client updates the state as SRVR_CLIENT_DISAPPEARED in case of login timeout and // the srvr state may not be in sync with the corresponding srvr state in AS // Hence commenting // ---- we removed update the state from the client // if (srvrGlobal->srvrState != SRVR_AVAILABLE) // exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_InvalidConnection_exn_; // else sts = CEE_TMP_ALLOCATE(call_id_, sizeof(VERSION_def)*2, (void **)&versionPtr); if( sts != CEE_SUCCESS) { //LCOV_EXCL_START strcpy( errStrBuf2, "SrvrConnect.cpp"); strcpy( errStrBuf3, "odbc_SQLSvc_InitializeDialogue_sme_"); strcpy( errStrBuf4, "CEE_TMP_ALLOCATE"); sprintf( errStrBuf5, "Failed to get <%d> bytes", sizeof(VERSION_def)*2); logError( NO_MEMORY, SEVERITY_MAJOR, CAPTURE_ALL + PROCESS_STOP ); //LCOV_EXCL_STOP } int len_length = inContext->clientVersionList._length; VERSION_def *p_buffer = inContext->clientVersionList._buffer; for(int i=0; i < len_length; i++) { clientVersionPtr = p_buffer + i; switch( clientVersionPtr->componentId ) { case DRVR_COMPONENT: case WIN_UNICODE_DRVR_COMPONENT: case LINUX_UNICODE_DRVR_COMPONENT: case HPUX_UNICODE_DRVR_COMPONENT: case OLEDB_DRVR_COMPONENT: case DOT_NET_DRVR_COMPONENT: case JDBC_DRVR_COMPONENT: case LINUX_DRVR_COMPONENT: case HPUX_DRVR_COMPONENT: case AIX_DRVR_COMPONENT: case SUNSPARC32_DRVR_COMPONENT: case SUNSPARC64_DRVR_COMPONENT: srvrGlobal->drvrVersion.componentId = clientVersionPtr->componentId; srvrGlobal->drvrVersion.majorVersion = clientVersionPtr->majorVersion; srvrGlobal->drvrVersion.minorVersion = clientVersionPtr->minorVersion; srvrGlobal->drvrVersion.buildId = clientVersionPtr->buildId; break; case APP_COMPONENT: srvrGlobal->appVersion.componentId = clientVersionPtr->componentId; srvrGlobal->appVersion.majorVersion = clientVersionPtr->majorVersion; srvrGlobal->appVersion.minorVersion = clientVersionPtr->minorVersion; srvrGlobal->appVersion.buildId = clientVersionPtr->buildId; break; } } outContext.versionList._length = 2; outContext.versionList._buffer = versionPtr; versionPtr = outContext.versionList._buffer + 0; // First element versionPtr->componentId = srvrGlobal->srvrVersion.componentId; versionPtr->majorVersion = srvrGlobal->srvrVersion.majorVersion; versionPtr->minorVersion = srvrGlobal->srvrVersion.minorVersion; versionPtr->buildId = srvrGlobal->srvrVersion.buildId; versionPtr = outContext.versionList._buffer + 1; // Second element versionPtr->componentId = SQL_COMPONENT; versionPtr->majorVersion = srvrGlobal->sqlVersion.majorVersion; versionPtr->minorVersion = srvrGlobal->sqlVersion.minorVersion; versionPtr->buildId = srvrGlobal->sqlVersion.buildId; outContext.nodeId = srvrGlobal->nskProcessInfo.nodeId; outContext.processId = srvrGlobal->nskProcessInfo.processId; if (!GetComputerName(outContext.computerName, &computerNameLen)) outContext.computerName[0] = '\0'; outContext.outContextOptions1 = 0; outContext.outContextOptions2 = 0; char zkData[256]; char state[32]; int zkDataLen = sizeof(zkData); Stat stat; bool zk_error = false; char zkErrStr[128]; char *data = NULL; int rc = zoo_exists(zh, dcsRegisteredNode.c_str(), 0, &stat); if( rc == ZOK ) { bool stateOk = false; short cnt = 6; char *tkn = NULL; while(!stateOk && cnt) { // call sync to get up to date data data = strdup(dcsRegisteredNode.c_str()); rc = zoo_async(zh, dcsRegisteredNode.c_str(), sync_string_completion, data); if ( data != NULL ) free(data); if( rc != ZOK ) { sprintf(tmpString, "odbc_SQLSvc_InitializeDialogue_ame_...Error %d calling zoo_async() for %s. Server exiting.", rc, dcsRegisteredNode.c_str()); SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); exitServerProcess(); } cnt--; // Get the dialogue ID from the data part of connecting znode rc = zoo_get(zh, dcsRegisteredNode.c_str(), false, zkData, &zkDataLen, &stat); if( rc == ZOK ) { // The first token should be CONNECTING state tkn = strtok(zkData, ":"); if( tkn == NULL || stricmp(tkn, "CONNECTING") ) { // If last try then return error if(!cnt) break; else { // Wait a short while for the state update sleep(10); continue; } } else stateOk = true; } else break; } if( rc == ZOK ) { if( tkn == NULL || stricmp(tkn, "CONNECTING") ) { char errMsg[512]; if( tkn == NULL ) sprintf( errMsg, "Trafodion Internal error: Zookeeper entry not in connecting state for %ld. Current state is NULL", srvrGlobal->portNumber); else sprintf( errMsg, "Trafodion Internal error: Zookeeper entry not in connecting state for %ld. Current state is %s", srvrGlobal->portNumber, tkn ); exception_.exception_detail = -1; //exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_InvalidConnection_exn_; exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_SQLError_exn_; SETSRVRERROR(SQLERRWARN, -1, "HY000", errMsg, &exception_.u.SQLError.errorList); //SETSRVRERROR(SQLERRWARN, -1, "HY000", "Trafodion Internal error: Zookeeper entry not in connecting state", &exception_.u.SQLError.errorList); odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); return; } // Skip second token - Timestamp tkn = strtok(NULL, ":"); // Third token in data is dialogue ID srvrGlobal->dialogueId = -1; tkn = strtok(NULL, ":"); if( tkn != NULL ) srvrGlobal->dialogueId = atoi(tkn); if( tkn == NULL || srvrGlobal->dialogueId == -1 ) { SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "No dialogue ID in registered node. Server exiting."); exitServerProcess(); } // Return error if dialogue ID does not match. if (srvrGlobal->dialogueId != dialogueId) { exception_.exception_detail = -1; //exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_InvalidConnection_exn_; //SETSRVRERROR(SECURITYERR, -1, "HY000", "Dialogue ID does not match", &exception_.u.SQLError.errorList); exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_SQLError_exn_; SETSRVRERROR(SQLERRWARN, -1, "HY000", "Trafodion: Dialogue ID does not match", &exception_.u.SQLError.errorList); odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); return; } } } if( rc != ZOK ) { sprintf(tmpString, "Error %d getting registered node data from Zookeeper. Server exiting.", rc); SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); exitServerProcess(); } sdconn = ((CTCPIPSystemSrvr* )objtag_)->m_nSocketFnum; // If the server state is connecting then Initialize_Dialogue // is called second time with or without changing the password // Update the SrvrState as Connecting if (srvrGlobal->srvrState == SRVR_AVAILABLE ) srvrGlobal->srvrState = SRVR_CONNECTING; if( TestPointArray != NULL ) { //LCOV_EXCL_START delete[] TestPointArray; TestPointArray = NULL; //LCOV_EXCL_STOP } setConnectException.exception_nr = 0; WSQL_EXEC_ClearDiagnostics(NULL); // volatile int done = 0; // while (!done) { // sleep(10); // } // Security Initialization if (!securitySetup) { retCode = SECURITY_SETUP_(); if (retCode == SECMXO_NO_ERROR) securitySetup = true; else { //LCOV_EXCL_START exception_.exception_detail = retCode; exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_SQLError_exn_; SETSECURITYERROR(retCode, &exception_.u.SQLError.errorList); odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); updateSrvrState(SRVR_CONNECT_REJECTED); if (retCode == SECMXO_INTERNAL_ERROR_FATAL) { SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Security layer returned fatal error. Server exiting."); exitServerProcess(); } return; //LCOV_EXCL_STOP } } // R2.93 - Check if password security is required and reject old driver // R2.5 - Allow old driver connection without password encryption if security policy allows it if ( !(srvrGlobal->drvrVersion.buildId & PASSWORD_SECURITY)) { //LCOV_EXCL_START exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_SQLError_exn_; SETSRVRERROR(SECURITYERR, -8837, "HY000", SQLSVC_EXCEPTION_PASSWORD_ENCRYPTION_REQUIRED, &exception_.u.SQLError.errorList); odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); updateSrvrState(SRVR_CONNECT_REJECTED); return; //LCOV_EXCL_STOP } if (srvrGlobal->drvrVersion.buildId & PASSWORD_SECURITY) { if (inContext->inContextOptions1 & INCONTEXT_OPT1_CERTIFICATE_TIMESTAMP) { short certificateLen = 0; char* certificatePtr = NULL; char* certificateTS = inContext->connectOptions; if (userDesc->password._buffer == NULL || IS_CERTIFICATE_NEEDED_((char *)(userDesc->password._buffer))) retCode = VALIDATE_CERTIFICATE_TS(certificateTS); switch(retCode) { case SECMXO_CERTIFICATE_UPDATED: // certificates don't match, but policy permits downloading // start an autodownload of the certificate retCode = GET_CERTIFICATE(NULL, &certificateLen); if (retCode == SECMXO_NO_ERROR) { //LCOV_EXCL_START sts = CEE_TMP_ALLOCATE(call_id_, certificateLen+1, (void **)&certificatePtr); if( sts != CEE_SUCCESS) { strcpy( errStrBuf2, "SrvrConnect.cpp"); strcpy( errStrBuf3, "odbc_SQLSvc_InitializeDialogue_ame_"); strcpy( errStrBuf4, "CEE_TMP_ALLOCATE"); sprintf( errStrBuf5, "Failed to get <%d> bytes", certificateLen); logError( NO_MEMORY, SEVERITY_MAJOR, CAPTURE_ALL + PROCESS_STOP ); //LCOV_EXCL_STOP } retCode = GET_CERTIFICATE(certificatePtr, &certificateLen); } if (retCode == SECMXO_NO_ERROR) { outContext.outContextOptions1 = outContext.outContextOptions1 | OUTCONTEXT_OPT1_DOWNLOAD_CERTIFICATE; outContext.outContextOptionString = certificatePtr; outContext.outContextOptionStringLen = certificateLen; exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_InvalidUser_exn_; odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); return; } else { //LCOV_EXCL_START exception_.exception_detail = retCode; exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_SQLError_exn_; SETSECURITYERROR(retCode, &exception_.u.SQLError.errorList); odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); updateSrvrState(SRVR_CONNECT_REJECTED); if (retCode == SECMXO_INTERNAL_ERROR_FATAL) { SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Security layer returned fatal error. Server exiting."); exitServerProcess(); } return; //LCOV_EXCL_STOP } break; case SECMXO_NO_CERTIFICATE: // certificates don't match, and policy prohibits downloading // report error to user, no autodownload exception_.exception_detail = retCode; exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_SQLError_exn_; SETSECURITYERROR(retCode, &exception_.u.SQLError.errorList); odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); return; break; //LCOV_EXCL_START case SECMXO_CERTIFICATE_EXPIRED: // certificates match, but they are expired, and policy enforces certificate expiration // report error to user, no autodownload case SECMXO_INTERNAL_ERROR: case SECMXO_INTERNAL_ERROR_FATAL: // unexpected error, an EMS log entry was made // report error to user exception_.exception_detail = retCode; exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_SQLError_exn_; SETSECURITYERROR(retCode, &exception_.u.SQLError.errorList); odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); updateSrvrState(SRVR_CONNECT_REJECTED); if (retCode == SECMXO_INTERNAL_ERROR_FATAL) { SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Security layer returned fatal error. Server exiting."); exitServerProcess(); //LCOV_EXCL_STOP } return; break; case SECMXO_NO_ERROR: // certificates match // continue, certificate is good default: break; } } } if (userDesc->userName == NULL || userDesc->password._buffer == NULL) { //LCOV_EXCL_START exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_InvalidUser_exn_; SETSRVRERROR(SQLERRWARN, -8837, "28000", "Invalid authorization specification", &exception_.u.SQLError.errorList); odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); updateSrvrState(SRVR_CONNECT_REJECTED); return; //LCOV_EXCL_STOP } if (strlen(inContext->catalog) > MAX_SQL_IDENTIFIER_LEN || strlen(inContext->schema) > MAX_SQL_IDENTIFIER_LEN) { exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_ParamError_exn_; exception_.u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_OPTION_VALUE_STR; updateSrvrState(SRVR_CONNECT_REJECTED); } // RAJANI KANTH /* * Read the set values as it has SQL_ATTR_WARNING has to be * set before the connection is established * SET SQL_ATTR_WARNING 0 - Supress the connection time warnings * SET SQL_ATTR_WARNING 1 - Not to Supress the connection time warnings */ if (srvrGlobal->envVariableOn) { int len_length = srvrGlobal->srvrContext.envDescList._length; ENV_DESC_def *p_buffer = (ENV_DESC_def *)srvrGlobal->srvrContext.envDescList._buffer; char *saveptr; for (curRowNo = 0; curRowNo < len_length; curRowNo ++) { pEnvDesc = p_buffer + curRowNo; if ( pEnvDesc->VarType == ENV_SET ) { strcpy(VariableValue, pEnvDesc->VarVal); token = strtok_r(VariableValue, seps, &saveptr); if (_stricmp(token, EnvTypes[0]) == 0) { token = strtok_r(NULL, seps, &saveptr ); if (_stricmp(token,ATTR_TYPE7) == 0) { token = strtok_r(NULL,seps, &saveptr); if(_stricmp(token,ATTR_TYPE7_VALUE2) == 0) // Not to Supress the connection time warnings srvrGlobal->EnvironmentType = srvrGlobal->EnvironmentType | MXO_PASSWORD_EXPIRY; } else if(_stricmp(token,ATTR_TYPE15) == 0) // "SQL_ATTR_IGNORE_CANCEL" { token = strtok_r(NULL,seps, &saveptr); if(_stricmp(token,ATTR_TYPE15_VALUE2) == 0) // Force SQLCancel to be ignored outContext.outContextOptions1 = outContext.outContextOptions1 | OUTCONTEXT_OPT1_IGNORE_SQLCANCEL; } } } } } // Rajani End /* We should get rid of this, but we cant right now - if we remove it, because of a bug in SQL that does not reset transactions, an MXOSRVR could get into an unusable state for ex: an application sets txn isolation level to read uncommited. All subseq connection will also get this. - ideally we need to have a reset txn cqd (which will only be available in 2.5) - now setting this isolation to read committed is also a problem. If the system defaults has read uncommitted, then any scenario which will cause a new compiler to be created (ex: a diff userid logging on) will cause it to get a read committed isolation level so potential error 73s can happen. The workaround for this ofcourse is to set it at the datasource level */ odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SQL_TXN_ISOLATION, SQL_TXN_READ_COMMITTED, NULL , &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START sprintf(tmpString, "%ld", inContext->txnIsolationLevel); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "SQL_TXN_ISOLATION", tmpString); goto MapException; //LCOV_EXCL_STOP } odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, RESET_DEFAULTS, 0, NULL, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "RESET_DEFAULTS", ""); goto MapException; //LCOV_EXCL_STOP } odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, CUT_CONTROLQUERYSHAPE, 0, NULL, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "CUT_CONTROLQUERYSHAPE", ""); goto MapException; //LCOV_EXCL_STOP } // collect information for auditing and repository memset(setinit.clientId,'\0',MAX_COMPUTERNAME_LENGTH*4 + 1); memset(setinit.applicationId,'\0',APPLICATIONID_LENGTH*4 + 1); memset(setinit.clientUserName,'\0',MAX_SQL_IDENTIFIER_LEN + 1); strcpyUTF8(setinit.clientId,inContext->computerName, sizeof(setinit.clientId)); strcpyUTF8(setinit.applicationId,inContext->windowText,sizeof(setinit.applicationId)); if (inContext->clientUserName != NULL) strcpyUTF8(setinit.clientUserName,inContext->clientUserName, sizeof(setinit.clientUserName)); else strcpy(setinit.clientUserName, "<N/A>"); odbc_SQLSvc_InitializeDialogue_sme_(objtag_, call_id_, &exception_, userDesc, inContext, dialogueId, &outContext); // If there is an exception, do not proceed to set the server initial context if (exception_.exception_nr != 0) { odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); updateSrvrState(SRVR_CONNECT_REJECTED); if (outContext.outContextOptionStringLen > 0) delete [] outContext.outContextOptionString; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceConnectExit(exception_, outContext); } return; } else { // Get Default Catalog Schema getCurrentCatalogSchema(); if ( srvrGlobal->DefaultSchema[0] == '\0' || srvrGlobal->DefaultCatalog[0] == '\0' ) { strcpy(srvrGlobal->DefaultCatalog, ODBCMX_DEFAULT_CATALOG); strcpy(srvrGlobal->DefaultSchema, ODBCMX_DEFAULT_SCHEMA); } if (inContext->catalog[0] != NULL) { // Temporary - till drivers get fixed // if (stricmp(inContext->catalog, ODBCMX_PREV_DEFAULT_CATALOG) != 0) { strcpy(srvrGlobal->DefaultCatalog, """"); strcat(srvrGlobal->DefaultCatalog, inContext->catalog); strcat(srvrGlobal->DefaultCatalog, """"); } else // Convert the default catalog set by old drivers to the current strcpy(srvrGlobal->DefaultCatalog, ODBCMX_DEFAULT_CATALOG); } // inContext->catalog[0] != NULL static bool defaultSchemaSaved = false; if (stricmp(TmpstrRole, srvrGlobal->QSRoleName) != 0) // it means user role has been updated - default schema also needs to be updated defaultSchemaSaved = false; if (!defaultSchemaSaved) { if(!getSQLInfo( SCHEMA_DEFAULT )) // populate savedDefaultSchema { //this should not happen - but let's put defensive code to set it to "USR" strcpy(savedDefaultSchema,ODBCMX_DEFAULT_SCHEMA); } defaultSchemaSaved = true; } if (inContext->schema[0] == NULL) { strcpy(srvrGlobal->DefaultSchema, savedDefaultSchema); strcpy(schemaValueStr, """"); strcat(schemaValueStr, srvrGlobal->DefaultCatalog); strcat(schemaValueStr, """"); strcat(schemaValueStr, "."); strcat(schemaValueStr, savedDefaultSchema); odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SET_SCHEMA, 0, (IDL_string)schemaValueStr ,&sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "RESET_SCHEMA", schemaValueStr); goto MapException; //LCOV_EXCL_STOP } } if (inContext->schema[0] != NULL) { // Fix: If the catalog or schema name itself has a dot within it like in below // "a&new*.cat"."a&new*.sch" then we have to handle that. tmpPtr = tmpPtr2 = NULL; if( tmpPtr = (char *) strrchr(inContext->schema,'.') ) { // Search backwards for a double quotes if it exists then check if there is // any dot before that and pick that position. char TmpstrCatSch[257]; strcpy( TmpstrCatSch, inContext->schema ); TmpstrCatSch[tmpPtr - inContext->schema] = '\x0'; tmpPtr2 = strrchr(TmpstrCatSch,'"'); if( tmpPtr2 != NULL ) { TmpstrCatSch[tmpPtr2 - TmpstrCatSch] = '\x0'; if( tmpPtr2 = strrchr(TmpstrCatSch,'.') ) tmpPtr = (char *)(inContext->schema + (tmpPtr2 - TmpstrCatSch)); else tmpPtr = NULL; } } if( tmpPtr != NULL ) { catLen = strlen(inContext->schema) - strlen(tmpPtr); //copying the Catalog strncpy(TmpstrCat,inContext->schema,catLen); TmpstrCat[catLen] = '\0'; *tmpPtr++; //copying the Schema strcpy(TmpstrSch, tmpPtr); strcpy(srvrGlobal->DefaultCatalog, """"); strcat(srvrGlobal->DefaultCatalog, TmpstrCat); strcat(srvrGlobal->DefaultCatalog, """"); strcpy(srvrGlobal->DefaultSchema, """"); strcat(srvrGlobal->DefaultSchema, TmpstrSch); strcat(srvrGlobal->DefaultSchema, """"); if ( srvrGlobal->DefaultSchema[0] == '\0' || srvrGlobal->DefaultCatalog[0] == '\0' ) { exception_.exception_nr = odbc_SQLSvc_InitializeDialogue_ParamError_exn_; exception_.u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_SCHEMA_CATALOG_OPTION; updateSrvrState(SRVR_CONNECT_REJECTED); } } else { strcpy(srvrGlobal->DefaultSchema, """"); strcat(srvrGlobal->DefaultSchema, inContext->schema); strcat(srvrGlobal->DefaultSchema, """"); } } strcpy(outContext.catalog, srvrGlobal->DefaultCatalog); // The size of srvrGlobal->DefaultSchema is increased to 131 // to allow double-quotes around the schema name // we need to be careful not to overrun outContext.schema strncpy(outContext.schema, srvrGlobal->DefaultSchema, sizeof(outContext.schema)); outContext.schema[sizeof(outContext.schema)-1] = '\0'; } // Added to detect MODE_SPECIAL_1 CQD static bool firstTime = true; if ( firstTime ) { srvrGlobal->modeSpecial_1 = false; if( getSQLInfo( MODE_SPECIAL_1 )) srvrGlobal->modeSpecial_1 = true; firstTime = false; } if( srvrGlobal->modeSpecial_1 ) outContext.versionList._buffer->buildId = outContext.versionList._buffer->buildId | MXO_SPECIAL_1_MODE; // assign client locale information to srvrGlobal srvrGlobal->clientLCID = inContext->ctxDataLang; srvrGlobal->clientErrorLCID = inContext->ctxErrorLang; srvrGlobal->clientACP = inContext->ctxACP; srvrGlobal->useCtrlInferNCHAR = inContext->ctxCtrlInferNCHAR; if (srvrGlobal->tip_gateway != NULL) { #ifdef TIP_DEFINED tip_close(srvrGlobal->tip_gateway); #endif srvrGlobal->tip_gateway = NULL; } //getSQLInfo(USER_ROLE); // srvrGlobal->RoleName and srvrGlobal->QSRoleName is set here if (srvrGlobal->QSRoleName[0] != '\0') { outContext.outContextOptions1 = outContext.outContextOptions1 | OUTCONTEXT_OPT1_ROLENAME; outContext.outContextOptionStringLen = strlen(srvrGlobal->QSRoleName)+5; outContext.outContextOptionString = new char[outContext.outContextOptionStringLen]; sprintf(outContext.outContextOptionString, "RN=%s;", srvrGlobal->QSRoleName); } else outContext.outContextOptionStringLen = 0; // +++ Fix for update stats problem on volatile table. This code was earlier // just before SET_ODBC_PROCESS connection attr above. // Have moved the BEGIN_SESSION here to fix an issue with AQR. // // Session ID: // =========== // MXID<version><segment><cpu><pin><processStartTS><sessNum><unLen><userName><snLen><sessionName> // <version>: version number of ID : 2 digits // <segment>: segment number : 3 digits // <cpu>: cpu number : 2 digits // <pin>: pin : 4 digits // <processStartTS>: time when master exe process started : 18 digits // <sessNum>: sequentially increasing session number : 10 digits // <unLen>: length of user ALIAS name : 2 digits // <userName>: actual user name : unLen bytes(max 32) // <snLen>: length of user specified session name : 2 digits // <sessionName>: actual session name : snLen bytes(max 24) // Query ID: // ========= // <Session ID>_<queryNum>_<userStmtName> // <queryNum>: unique query number : max 18 digits // <userStmtName>: odbc generated stmt name : max 32 bytes // Max Query ID Len: 160 bytes odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, BEGIN_SESSION, 0, (IDL_string)inContext->sessionName, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "BEGIN_SESSION", ""); goto MapException; } else { enum SESSIONATTR_TYPE {SESSION_ATTR_ID = 1}; char tmpsrvrSessionId[SESSION_ID_LEN]; Int32 tmpsrvrSessionIdLen=0; if (WSQL_EXEC_GetSessionAttr(1, 0, tmpsrvrSessionId, SESSION_ID_LEN, &tmpsrvrSessionIdLen) == 0) { tmpsrvrSessionId[tmpsrvrSessionIdLen] = '\0'; strcpy(srvrGlobal->sessionId,tmpsrvrSessionId); } else { getSessionId(tmpsrvrSessionId); strcpy(srvrGlobal->sessionId,tmpsrvrSessionId); } } if (srvrGlobal->srvrState == SRVR_CONNECTING) { updateSrvrState(SRVR_CONNECTED); } // For performance reasons, SQL statements to setup the initial context // are executed after responding back to client // odbc_SQLSvc_InitializeDialogue_ts_res_(objtag_, call_id_, &exception_, &outContext); if (outContext.outContextOptionStringLen > 0) delete [] outContext.outContextOptionString; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceConnectExit(exception_, outContext); } odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SET_ODBC_PROCESS, 0, NULL, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "SET_ODBC_PROCESS", ""); goto MapException; //LCOV_EXCL_STOP } odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, WMS_QUERY_MONITORING, 0, NULL, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "WMS_QUERY_MONITORING", ""); goto MapException; //LCOV_EXCL_STOP } // Need to enable this for JDBC driver odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SET_JDBC_PROCESS, 0, NULL, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "SET_JDBC_PROCESS", ""); goto MapException; //LCOV_EXCL_STOP } // Need to enable this for NCI if (strcmp(srvrGlobal->ApplicationName, HPDCI_APPLICATION) == 0) { odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SET_NVCI_PROCESS, 0, NULL, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "SET_NVCI_PROCESS", ""); goto MapException; //LCOV_EXCL_STOP } } // Need to enable this for to generate explain plans by default. odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SET_EXPLAIN_PLAN, 0, NULL, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "SET_EXPLAIN_PLAN", ""); goto MapException; //LCOV_EXCL_STOP } // This is added for dynamic reconfiguration. To reset the nametype back to ANSI. // Then is set according to Data Source configured. odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SET_CATALOGNAMETYPE, 0, NULL, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "SET_CATALOGNAMETYPE", ""); goto MapException; //LCOV_EXCL_STOP } odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SET_AUTOBEGIN, 0, NULL, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "SET_AUTOBEGIN", ""); goto MapException; //LCOV_EXCL_STOP } odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SQL_AUTOCOMMIT, inContext->autoCommit, NULL , &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START sprintf(tmpString, "%ld", inContext->autoCommit); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "SQL_AUTOCOMMIT", tmpString); goto MapException; //LCOV_EXCL_STOP } srvrGlobal->estCardinality = srvrGlobal->estCost = -1; if (srvrGlobal->envVariableOn) { int len_length = srvrGlobal->srvrContext.envDescList._length; ENV_DESC_def *p_buffer = (ENV_DESC_def *)srvrGlobal->srvrContext.envDescList._buffer; char* saveptr; for (curRowNo = 0; curRowNo < len_length; curRowNo ++) // scan through each RG policy { pEnvDesc = p_buffer + curRowNo; // VarType if ((pEnvDesc->VarType == ENV_SET) || (pEnvDesc->VarType == ENV_CONTROL))// Set & Control statements { strncpy(VariableValue, pEnvDesc->VarVal, sizeof(VariableValue)); VariableValue[sizeof(VariableValue)-1] = 0; token = strtok_r(VariableValue, seps, &saveptr ); if (_stricmp(token, EnvTypes[0]) == 0) { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE1) == 0) { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE1_VALUE1) == 0) { odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SQL_ACCESS_MODE, SQL_MODE_READ_WRITE, NULL , &sqlWarning ); } else if (_stricmp(token, ATTR_TYPE1_VALUE2) == 0) { odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SQL_ACCESS_MODE, SQL_MODE_READ_ONLY, NULL , &sqlWarning ); } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE2) == 0) { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE2_VALUE1) == 0) { odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SQL_TXN_ISOLATION, SQL_TXN_READ_UNCOMMITTED, NULL , &sqlWarning ); } else if (_stricmp(token, ATTR_TYPE2_VALUE2) == 0) { odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SQL_TXN_ISOLATION, SQL_TXN_READ_COMMITTED, NULL , &sqlWarning ); } else if (_stricmp(token, ATTR_TYPE2_VALUE3) == 0) { odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SQL_TXN_ISOLATION, SQL_TXN_REPEATABLE_READ, NULL , &sqlWarning ); } else if (_stricmp(token, ATTR_TYPE2_VALUE4) == 0) { odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SQL_TXN_ISOLATION, SQL_TXN_SERIALIZABLE, NULL , &sqlWarning ); } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE3) == 0) { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE3_VALUE1) == 0) { srvrGlobal->EnvironmentType = srvrGlobal->EnvironmentType | MXO_MSACCESS_1997; odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SQL_ACCESS_MODE, SQL_MODE_READ_WRITE, NULL , &sqlWarning ); } else if (_stricmp(token, ATTR_TYPE3_VALUE2) == 0) { srvrGlobal->EnvironmentType = srvrGlobal->EnvironmentType | MXO_MSACCESS_2000; odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SQL_ACCESS_MODE, SQL_MODE_READ_WRITE, NULL , &sqlWarning ); } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE4) == 0) { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE4_VALUE1) == 0) { srvrGlobal->EnvironmentType = srvrGlobal->EnvironmentType | MXO_BIGINT_NUMERIC; } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE5) == 0)//for error recovery { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE5_VALUE1) == 0) { srvrGlobal->EnvironmentType = srvrGlobal->EnvironmentType | MXO_ROWSET_ERROR_RECOVERY; } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE6) == 0) //for metadata id { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE6_VALUE1) == 0) { srvrGlobal->EnvironmentType = srvrGlobal->EnvironmentType | MXO_METADATA_ID; } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE7) == 0)// Rajani - Check for password expiry { token = strtok_r(NULL, seps, &saveptr); if ((_stricmp(token, ATTR_TYPE7_VALUE1) == 0) || (_stricmp(token, ATTR_TYPE7_VALUE2) == 0)) { // DO Nothing since we already tookcare of it before calling odbc_SQLSvc_InitializeDialogue_sme_ } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE8) == 0)//for microsec option { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE8_VALUE1) == 0) //for the microsecs option { srvrGlobal->EnvironmentType = srvrGlobal->EnvironmentType | MXO_FRACTION_IN_MICROSECS; } else if(_stricmp(token, ATTR_TYPE8_VALUE2) == 0) { srvrGlobal->EnvironmentType = srvrGlobal->EnvironmentType | MXO_FRACTION_IN_NANOSECS; } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } // generic SET attributes else if (_stricmp(token, ATTR_TYPE9) == 0) { long temp_val = 0; double est_val = 0.0; char seps2[] = " \t\n,:"; token = strtok_r(NULL, seps2, &saveptr); // CLEANUP_CONNECTION | CLEANUP_TIME | FATAL_ERROR if (_stricmp(token, ATTR_TYPE9_VALUE1) == 0) // CLEANUP_CONNECTION { token = strtok_r(NULL,seps2, &saveptr); if (token != NULL) { temp_val = atol(token); if (temp_val < 1) InternalUse = FALSE; else srvrGlobal->cleanupByConnection = temp_val; } else InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE9_VALUE2) == 0) // CLEANUP_TIME { token = strtok_r(NULL,seps2, &saveptr); if (token != NULL) { temp_val = atol(token); if (temp_val < 1) InternalUse = FALSE; else srvrGlobal->cleanupByTime = temp_val; } else InternalUse = FALSE; } // Added for workaround for cases where a large number of short // running queries can make the QSMGR very busy and impact the // overall system performance. Compiler estimates can now be // entered in the DSN and MXOSRVR will bypass WMS for queries with // estimates lower than those entered. // This is currently an undocumented feature and will be exposed // only as need basis. else if (_stricmp(token, ATTR_TYPE9_VALUE3) == 0) // EST_CARDINALITY { token = strtok_r(NULL,seps2, &saveptr); if (token != NULL) { est_val = atof(token); if (est_val < 0) InternalUse = FALSE; else srvrGlobal->estCardinality = est_val; } else InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE9_VALUE4) == 0) // EST_COST { token = strtok_r(NULL,seps2, &saveptr); if (token != NULL) { est_val = atof(token); if (est_val < 0) InternalUse = FALSE; else srvrGlobal->estCost = est_val; } else InternalUse = FALSE; } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE10) == 0)//for WMS Adaptive Segmentation { if (srvrGlobal->fnumAS != -1) FILE_CLOSE_(srvrGlobal->fnumAS); srvrGlobal->fnumAS == -1; token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE10_VALUE1) == 0) //for ON { srvrGlobal->bWMS_AdaptiveSegment = true; _cc_status cc; long timeout = AS_TIMEOUT; // bits <1> ON - nowait short option = 0x4000; short error = FILE_OPEN_(srvrGlobal->QSProcessName , strlen(srvrGlobal->QSProcessName) , &srvrGlobal->fnumAS , 0 //access , 0 //exclusion , 1 //nowait_depth , 0 //sync-or-receive-depth , option //options ); if (error == 0) { if (_status_lt(cc)) FILE_GETINFO_ (srvrGlobal->fnumAS, &error); else error = 0; } if (error == 0) { if (! processExists(srvrGlobal->QSProcessName, TPT_REF(srvrGlobal->pASHandle))) error = 1; } if (error) { if (srvrGlobal->fnumAS != -1) //timeout FILE_CLOSE_(srvrGlobal->fnumAS); srvrGlobal->fnumAS = -1; } } else if(_stricmp(token, ATTR_TYPE10_VALUE2) == 0) //for OFF { srvrGlobal->bWMS_AdaptiveSegment = false; } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE11) == 0) // To turn the 21036 EMS messages ON/OFF { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE11_VALUE1) == 0) //for ON { srvrGlobal->mute = false; } else if(_stricmp(token, ATTR_TYPE11_VALUE2) == 0) //for OFF { srvrGlobal->mute = true; } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE14) == 0)//for SQLTABLES MV option { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE14_VALUE1) == 0) //for SQLTABLES TABLE TYPE as TABLE { srvrGlobal->EnvironmentType = srvrGlobal->EnvironmentType | MXO_SQLTABLES_MV_TABLE; } else if(_stricmp(token, ATTR_TYPE14_VALUE2) == 0) //for SQLTABLES TABLE TYPE as VIEW { srvrGlobal->EnvironmentType = srvrGlobal->EnvironmentType | MXO_SQLTABLES_MV_VIEW; } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE15) == 0) // SQL_ATTR_IGNORE_CANCEL { token = strtok_r(NULL, seps, &saveptr); if ((_stricmp(token, ATTR_TYPE15_VALUE1) == 0) || (_stricmp(token, ATTR_TYPE15_VALUE2) == 0)) { // DO Nothing since we already tookcare of it } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE16) == 0) // SQL_ATTR_FETCH_AHEAD { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE16_VALUE1) == 0) // SQL_ATTR_FETCH_AHEAD 'ON' { srvrGlobal->fetchAhead = TRUE; } else if (_stricmp(token, ATTR_TYPE16_VALUE2) == 0) // SQL_ATTR_FETCH_AHEAD 'OFF' { // SQL_ATTR_FETCH_AHEAD 'OFF' by default srvrGlobal->fetchAhead = FALSE; } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE17) == 0) // To turn the extended 21036 EMS messages ON/OFF { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE17_VALUE1) == 0) //for ON { srvrGlobal->ext_21036 = true; } else if(_stricmp(token, ATTR_TYPE17_VALUE2) == 0) //for OFF { srvrGlobal->ext_21036 = false; } else InternalUse = FALSE; token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else if (_stricmp(token, ATTR_TYPE18) == 0) // SQL_ATTR_ENABLE_LONGVARCHAR { token = strtok_r(NULL, seps, &saveptr); if (_stricmp(token, ATTR_TYPE18_VALUE1) == 0) // SQL_ATTR_ENABLE_LONGVARCHAR 'ON' { srvrGlobal->enableLongVarchar = true; } else //LONGVARCHAR is disabled (OFF) by default. { srvrGlobal->enableLongVarchar = false; } token = strtok_r(NULL, seps, &saveptr); // Check for forth token should be NULL else error. if (token != NULL) InternalUse = FALSE; } else InternalUse = FALSE; } else InternalUse = FALSE; if (!InternalUse) { odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SET_SETANDCONTROLSTMTS, 0, (IDL_string)pEnvDesc->VarVal, &sqlWarning ); InternalUse = TRUE; } if (setConnectException.exception_nr != CEE_SUCCESS) { SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "SET_SETANDCONTROLSTMTS", pEnvDesc->VarVal); goto MapException; } } } // for loop } srvrGlobal->EnvironmentType |= MXO_ROWSET_ERROR_RECOVERY; if(inContext->schema[0] != '\0') { // With character-set changes we need to take care of four scenarios // 1. schema names in ascci with case sensitive behavior (means with and without double quotes) // 2. schema names in UTF-8 characters with and without double quotes // To make logic simple and to take care of unsymmetric double quotes in // schema names (for eg ""abc", ""def, edfg" ..etc) the code // removes all quotes if present and re-introduce them as needed. bool delimit = true; if((srvrGlobal->DefaultSchema[0] == '"') || (srvrGlobal->DefaultSchema[strlen(srvrGlobal->DefaultSchema)-1] == '"')) { char tmpDefaultcatalog[MAX_SQL_IDENTIFIER_LEN+3]; //remove multiple double quotes, if any char* startPtr = srvrGlobal->DefaultSchema; while (*startPtr == '"') ++startPtr; char* endPtr = NULL; if(startPtr < (srvrGlobal->DefaultSchema + strlen(srvrGlobal->DefaultSchema))) { endPtr = strchr(startPtr,'"'); if(endPtr == NULL) // We have a string with no ending quotes! { strcpy(tmpDefaultcatalog, startPtr); } else { int length = endPtr-startPtr; strncpy(tmpDefaultcatalog,startPtr, length); tmpDefaultcatalog[length]='\0'; } strcpy(srvrGlobal->DefaultSchema, tmpDefaultcatalog); } } else //we have a schema name with no " around, check whether it is ascii, then // we don't need to delimit the schema name { delimit = false; for (int i=0; i < strlen(srvrGlobal->DefaultSchema); i++) if (!isalnum(srvrGlobal->DefaultSchema[i])) { delimit = true; break; } } strcpy(schemaValueStr, """"); strcat(schemaValueStr, srvrGlobal->DefaultCatalog); strcat(schemaValueStr, """"); strcat(schemaValueStr, "."); if (delimit) strcat(schemaValueStr, "\""); strcat(schemaValueStr, srvrGlobal->DefaultSchema); if (delimit) strcat(schemaValueStr, "\""); odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, SET_SCHEMA, 0, (IDL_string)schemaValueStr ,&sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "SET_SCHEMA", schemaValueStr); goto MapException; //LCOV_EXCL_STOP } } odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &setConnectException, dialogueId, RESET_RESET_DEFAULTS, 0, NULL, &sqlWarning ); if (setConnectException.exception_nr != CEE_SUCCESS) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "RESET_RESET_DEFAULTS", ""); goto MapException; //LCOV_EXCL_STOP } SRVR_STMT_HDL *RbwSrvrStmt; SRVR_STMT_HDL *CmwSrvrStmt; if ((RbwSrvrStmt = getSrvrStmt("STMT_ROLLBACK_1", FALSE)) != NULL) RbwSrvrStmt->Close(SQL_DROP); if ((RbwSrvrStmt = getSrvrStmt("STMT_ROLLBACK_1", TRUE)) == NULL) { //LCOV_EXCL_START setConnectException.exception_nr = 99; sprintf(tmpString, "%s", "Unable to allocate statement to Rollback."); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "STMT_ROLLBACK_1", tmpString); goto MapException; //LCOV_EXCL_STOP } retCode = RbwSrvrStmt->Prepare("ROLLBACK WORK",INTERNAL_STMT,SQL_ASYNC_ENABLE_OFF, 0); if (retCode == SQL_ERROR) { //LCOV_EXCL_START setConnectException.exception_nr = 99; sprintf(tmpString, "%s", "Error in Preparing Query for Rollback."); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "STMT_ROLLBACK_1", tmpString); goto MapException; //LCOV_EXCL_STOP } if ((CmwSrvrStmt = getSrvrStmt("STMT_COMMIT_1", FALSE)) != NULL) CmwSrvrStmt->Close(SQL_DROP); if ((CmwSrvrStmt = getSrvrStmt("STMT_COMMIT_1", TRUE)) == NULL) { //LCOV_EXCL_START setConnectException.exception_nr = 99; sprintf(tmpString, "%s", "Unable to allocate statement for Commit."); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "STMT_ROLLBACK_1", tmpString); goto MapException; //LCOV_EXCL_STOP } retCode = CmwSrvrStmt->Prepare("COMMIT WORK",INTERNAL_STMT,SQL_ASYNC_ENABLE_OFF, 0); if (retCode == SQL_ERROR) { //LCOV_EXCL_START setConnectException.exception_nr = 99; sprintf(tmpString, "%s", "Error in Preparing Query for Commit."); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "STMT_ROLLBACK_1", tmpString); goto MapException; //LCOV_EXCL_STOP } // batch job support for T4 SRVR_STMT_HDL *TranOnSrvrStmt; SRVR_STMT_HDL *TranOffSrvrStmt; if ((TranOnSrvrStmt = getSrvrStmt("STMT_TRANS_ON_1", FALSE)) != NULL) TranOnSrvrStmt->Close(SQL_DROP); if ((TranOnSrvrStmt = getSrvrStmt("STMT_TRANS_ON_1", TRUE)) == NULL) { //LCOV_EXCL_START setConnectException.exception_nr = 99; sprintf(tmpString, "%s", "Unable to allocate statement to set transaction on."); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "STMT_TRANS_ON_1", tmpString); goto MapException; //LCOV_EXCL_STOP } retCode = TranOnSrvrStmt->Prepare("SET TRANSACTION AUTOCOMMIT ON",INTERNAL_STMT,SQL_ASYNC_ENABLE_OFF, 0); if (retCode == SQL_ERROR) { //LCOV_EXCL_START setConnectException.exception_nr = 99; sprintf(tmpString, "%s", "Error in Preparing Query for set transaction on."); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "STMT_TRANS_ON_1", tmpString); goto MapException; //LCOV_EXCL_STOP } if ((TranOffSrvrStmt = getSrvrStmt("STMT_TRANS_OFF_1", FALSE)) != NULL) TranOffSrvrStmt->Close(SQL_DROP); if ((TranOffSrvrStmt = getSrvrStmt("STMT_TRANS_OFF_1", TRUE)) == NULL) { //LCOV_EXCL_START setConnectException.exception_nr = 99; sprintf(tmpString, "%s", "Unable to allocate statement to set transaction off."); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "STMT_TRANS_OFF_1", tmpString); goto MapException; //LCOV_EXCL_STOP } retCode = TranOffSrvrStmt->Prepare("SET TRANSACTION AUTOCOMMIT OFF",INTERNAL_STMT,SQL_ASYNC_ENABLE_OFF, 0); if (retCode == SQL_ERROR) { //LCOV_EXCL_START setConnectException.exception_nr = 99; sprintf(tmpString, "%s", "Error in Preparing Query for set transaction off."); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "STMT_TRANS_OFF_1", tmpString); goto MapException; //LCOV_EXCL_STOP } srvrGlobal->javaConnIdleTimeout = JDBC_DATASOURCE_CONN_IDLE_TIMEOUT; if ((srvrGlobal->drvrVersion.componentId == JDBC_DRVR_COMPONENT) && ((long) (inContext->idleTimeoutSec) > JDBC_DATASOURCE_CONN_IDLE_TIMEOUT)) srvrGlobal->javaConnIdleTimeout = inContext->idleTimeoutSec; START_CONN_IDLE_TIMER // collect information for resource statistics char nodename[100]; short error; char cpuPin[20]; char systemNm[10]; short priority; char procName[MAX_PROCESS_NAME_LEN]; char userName[UNLEN + 1 + UNLEN + 1]; short username_len; memset(setinit.cpuPin,'\0',20); memset(setinit.nodeName,'\0',10); memset(setinit.DSName,'\0',MAX_DSOURCE_NAME + 1); memset(setinit.userName,'\0',USERNAME_LENGTH + 1); setinit.userId = 0; setinit.startPriority = 0; // The following two are directly setup in srvrothers.cpp // setinit.totalLoginTime = 0; // setinit.ldapLoginTime = 0; sprintf(cpuPin,"%d,%d",srvrGlobal->nskProcessInfo.nodeId,srvrGlobal->nskProcessInfo.processId); strcpy(setinit.cpuPin,cpuPin); strcpyUTF8(setinit.userName,userDesc->userName, sizeof(setinit.userName)); strcpyUTF8(setinit.DSName,srvrGlobal->DSName, sizeof(setinit.DSName)); GetSystemNm(systemNm); strcpy(setinit.nodeName,systemNm); // Modified below code for replacing the expensive USER_GETINFO_() call with // PROCESS_GETINFO() call for better performance. int crID; crID = userSession->getUserID(); userSession->getDBUserName(srvrGlobal->QSDBUserName, sizeof(srvrGlobal->QSDBUserName)); // Get the current external name of the user. userSession->getExternalUsername(srvrGlobal->QSUserName, sizeof(srvrGlobal->QSUserName)); strcpyUTF8(setinit.userName,srvrGlobal->QSUserName, sizeof(setinit.userName)); // For component privileges bzero(hpdcsPrivMask, sizeof(hpdcsPrivMask)); #ifdef NSK_PLATFORM if ((error = PROCESS_GETINFO_(TPT_REF(srvrGlobal->nskProcessInfo.pHandle), OMITREF, OMITSHORT,OMITREF, // proc string,max buf len,act len &priority, // priority OMITREF, // Mom's proc handle OMITREF, OMITSHORT,OMITREF, // home term,max buf len,act len OMITREF, // Process execution time &crID, // Creator Access Id OMITREF, // Process Access Id OMITREF, // Grand Mom's proc handle OMITREF, // Job Id OMITREF, OMITSHORT,OMITREF, // Program file,max buf len,act len OMITREF, OMITSHORT,OMITREF, // Swap file,max buf len,act len OMITREF, OMITREF, // Process type OMITREF) ) != 0) // OSS or NT process Id { sprintf(tmpString, "%d", error); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); } setinit.startPriority = priority; #else MS_Mon_Process_Info_Type proc_info; char myProcname[128]; short procname_len; if ((error = PROCESSHANDLE_DECOMPOSE_ ( TPT_REF(srvrGlobal->nskProcessInfo.pHandle) ,OMITREF //[ short *cpu ] ,OMITREF //[ short *pin ] ,OMITREF //[ long *nodenumber ] ,OMITREF //[ char *nodename ] ,OMITSHORT //[ short maxlen ] ,OMITREF //[ short *nodename-length ] ,myProcname //[ char *procname ] ,sizeof(myProcname) //[ short maxlen ] ,&procname_len //[ short *procname-length ] ,OMITREF //[ long long *sequence-number ] )) != 0) { //LCOV_EXCL_START sprintf(tmpString, "%d", error); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); //LCOV_EXCL_STOP } myProcname[procname_len] = 0; error = msg_mon_get_process_info_detail(myProcname, &proc_info); if (error != XZFIL_ERR_OK ) { //LCOV_EXCL_START sprintf(tmpString, "%d", error); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); //LCOV_EXCL_STOP } setinit.startPriority = priority = (short)proc_info.priority; srvrGlobal->process_id = proc_info.pid; srvrGlobal->cpu = proc_info.nid; #endif srvrGlobal->ProcessAccessId = setinit.userId = crID; // Set the userSID for WMS. // strcpy(srvrGlobal->userSID, userDesc->userName); strcpy(srvrGlobal->userSID, srvrGlobal->QSUserName); // srvrGlobal->srvrPriority = priority; // Fix for WMS SQL_DEFAULTS not resetting back to original // process priority. When the priority is changed by one service // and a new set service does NOT have a process priority // set then it should default to the priority that the process // started off with. // ++++ Note: When service-level default process priority feature is enabled // ++++ then this code will not be relevant and should be removed at that time. static bool prtySet = false; if ( !prtySet ) { srvrGlobal->srvrPriority = priority; prtySet = true; } srvrGlobal->prtyChanged = false; // If the client sets the fetchbuffer size to zero then we don't allocate memory // the output buffer in the case of unique selects and can corrupt memory in // SRVR::BuildSQLDesc2(). // Defaulting to 512K if the fetch buffer size is set as zero. if (inContext->rowSetSize <= 0) srvrGlobal->m_FetchBufferSize = 524288; else srvrGlobal->m_FetchBufferSize = inContext->rowSetSize*1024; // Moved watch dog thread creation from ImplInit() to below to avoid some initialization issues static bool sessionWatchDogStarted = false; if (srvrGlobal->m_bStatisticsEnabled && !sessionWatchDogStarted) { //boost::thread thrd(&SessionWatchDog); pthread_t thrd; pthread_create(&thrd, NULL, SessionWatchDog, NULL); sessionWatchDogStarted = true; } if (resStatSession != NULL) { resStatSession->init(); resStatSession->start(&setinit); if ((srvrGlobal->m_bStatisticsEnabled)&&(srvrGlobal->m_statisticsPubType==STATISTICS_AGGREGATED)) { if (CEE_HANDLE_IS_NIL(&StatisticsTimerHandle) == IDL_FALSE) { CEE_TIMER_DESTROY(&StatisticsTimerHandle); CEE_HANDLE_SET_NIL(&StatisticsTimerHandle); } interval_count=0; CEE_TIMER_CREATE2(MIN_INTERVAL, 0, StatisticsTimerExpired, (CEE_tag_def)NULL, &StatisticsTimerHandle, srvrGlobal->receiveThrId); } } if (resStatStatement != NULL) { // if statement is on resStatStatement->prepareQuery(&setinit); } // Trying to preserve the cached SQL objects in case of invalid user and hence doing this // after WSQL_EXEC_Set_AuthID call. However I am not sure of the effects in releasing the SQL objects // when effective user ID is changed. if( crID != srvrGlobal->userID ) { releaseCachedObject(TRUE, NDCS_DLG_INIT); srvrGlobal->userID = crID; } else { // If the ID is the same, then check if the compiler cache related to roles needs // to be cleared. // // Do note in the case when the crID is not the same as userID // new compilers get started - hence there is no need to clear compiler cache if ( prevRedefTime != 0 && prevRedefTime != srvrGlobal->redefTime ) { char errorMsg[100] = {}; if (! SRVR::CompilerCacheReset(errorMsg)) { //LCOV_EXCL_START SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "COMPILER_CACHE_RESET", errorMsg); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "COMPILER_CACHE_RESET", "Fatal Error - Server exiting"); exitServerProcess(); //LCOV_EXCL_STOP } } } // if( (maxHeapPctExit != 0) && (initSessMemSize == 0)) initSessMemSize = getMemSize("Initial"); return; MapException: // Write to event log and update to SRVR CONNECT FAILED // IDL_unsigned_long curErrorNo; ERROR_DESC_def *error_desc_def; odbc_SQLSvc_SQLError *SQLError; switch (setConnectException.exception_nr) { case CEE_SUCCESS: break; case odbc_SQLSvc_SetConnectionOption_SQLError_exn_ : { //LCOV_EXCL_START SQLError = &setConnectException.u.SQLError; int len_length = SQLError->errorList._length; ERROR_DESC_def *p_buffer = SQLError->errorList._buffer; char *UTF8ErrorText = NULL; long UTF8ErrorTextLen = 0; for (curErrorNo = 0;curErrorNo < len_length ; curErrorNo++) { error_desc_def = p_buffer + curErrorNo; if( error_desc_def->sqlcode == 0 && error_desc_def->errorText == NULL ) continue; // Check for error -8841. This error happens if transaction is aborted externally. // User process is expected to clear the transaction state by calling ROLLBACK or COMMIT WORK. // // Since during connection time a user initiated ROLLBACK is not possible, // we report this as fatal error and exit. if( error_desc_def->sqlcode == -8841 ) sprintf(tmpString, "%ld returned during connection (Fatal error). Server exiting", error_desc_def->sqlcode); else sprintf(tmpString, "%ld", error_desc_def->sqlcode); UTF8ErrorTextLen = strlen(error_desc_def->errorText)*4; markNewOperator,UTF8ErrorText = new char[UTF8ErrorTextLen]; translateToUTF8(srvrGlobal->isoMapping, error_desc_def->errorText, strlen(error_desc_def->errorText), UTF8ErrorText, UTF8ErrorTextLen); SendEventMsg(MSG_SQL_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 3, ODBCMX_SERVER, tmpString, UTF8ErrorText); delete [] UTF8ErrorText; if( error_desc_def->sqlcode == -8841 ) exitServerProcess(); } } break; case odbc_SQLSvc_SetConnectionOption_ParamError_exn_: SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, setConnectException.u.ParamError.ParamDesc); break; case odbc_SQLSvc_SetConnectionOption_InvalidConnection_exn_: case odbc_SQLSvc_SetConnectionOption_SQLInvalidHandle_exn_: break; default: sprintf(tmpString, "%ld", setConnectException.exception_nr); SendEventMsg(MSG_KRYPTON_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, tmpString, FORMAT_LAST_ERROR()); break; //LCOV_EXCL_STOP } if (! updateSrvrState(SRVR_CONNECT_FAILED)) return; SRVRTRACE_EXIT(FILE_AME+5); return; } /* * Asynchronous method function prototype for * operation 'odbc_SQLSvc_TerminateDialogue' */ extern "C" void odbc_SQLSvc_TerminateDialogue_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId ) { SRVRTRACE_ENTER(FILE_AME+6); long status = 0; odbc_SQLSvc_TerminateDialogue_exc_ exception_={0,0,0}; odbc_SQLSvc_MonitorCall_exc_ monitorException_={0,0}; exception_.exception_nr = CEE_SUCCESS; long exitSesMemSize = 0; char tmpStringEnv[1024]; sprintf(tmpStringEnv, "Client %s Disconnecting: Data Source: %s, Application: %s, Server Reference: %s", srvrGlobal->ClientComputerName, srvrGlobal->DSName, srvrGlobal->ApplicationName, srvrGlobal->srvrObjRef); if (srvrGlobal->traceLogger != NULL) { //LCOV_EXCL_START SendEventMsg(MSG_SERVER_TRACE_INFO , EVENTLOG_INFORMATION_TYPE , srvrGlobal->nskProcessInfo.processId , ODBCMX_SERVER , srvrGlobal->srvrObjRef , 4 , srvrGlobal->sessionId , "TerminateDialog" , "0" , tmpStringEnv); srvrGlobal->traceLogger->TraceDisconnectEnter(dialogueId); //LCOV_EXCL_STOP } // Suspend any joined txn if they are still active if( srvrGlobal->bspjTxnJoined && srvrGlobal->spjTxnId != 0) { status = SUSPENDTRANSACTION( (short*)&(srvrGlobal->spjTxnId) ); // ??? Int32 enough if(status != 0) { exception_.exception_nr = odbc_SQLSvc_TerminateDialogue_SQLError_exn_; exception_.exception_detail = 25000; odbc_SQLSvc_TerminateDialogue_ts_res_(objtag_, call_id_, &exception_); exitServerProcess(); } srvrGlobal->bspjTxnJoined = FALSE; srvrGlobal->spjTxnId = 0; } else { status = WSQL_EXEC_Xact(SQLTRANS_STATUS,NULL); if (srvrGlobal->bAutoCommitOn == FALSE) { if (status == 0) { // transaction is running for autocommit set to off exception_.exception_nr = odbc_SQLSvc_TerminateDialogue_SQLError_exn_; exception_.exception_detail = 25000; odbc_SQLSvc_TerminateDialogue_ts_res_(objtag_, call_id_, &exception_); goto bailout; } } } if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId != srvrGlobal->dialogueId) exception_.exception_nr = odbc_SQLSvc_TerminateDialogue_InvalidConnection_exn_; } else exception_.exception_nr = odbc_SQLSvc_TerminateDialogue_InvalidConnection_exn_; if (exception_.exception_nr == CEE_SUCCESS) { if (srvrGlobal->tip_gateway != NULL) { #ifdef TIP_DEFINED tip_close(srvrGlobal->tip_gateway); #endif srvrGlobal->tip_gateway = NULL; } odbc_SQLSvc_TerminateDialogue_sme_(objtag_, call_id_, &exception_, dialogueId); releaseCachedObject(FALSE, NDCS_DLG_TERM); // Ignore any error, since we need to cleanup anyway diagnostic_flags = 0; srvrGlobal->bAutoCommitOn = FALSE; SRVR::SrvrSessionCleanup(); srvrGlobal->dialogueId = -1; } exitSesMemSize = 0; if( maxHeapPctExit != 0 && initSessMemSize != 0 ) exitSesMemSize = getMemSize("Terminate"); if((exitSesMemSize - initSessMemSize) > initSessMemSize*maxHeapPctExit/100 ) heapSizeExit = true; else heapSizeExit = false; if( heapSizeExit == false ){ if( !updateZKState(CONNECTED, AVAILABLE) ) { exception_.exception_nr = odbc_SQLSvc_TerminateDialogue_SQLError_exn_; exception_.exception_detail = 25000; odbc_SQLSvc_TerminateDialogue_ts_res_(objtag_, call_id_, &exception_); exitServerProcess(); } } odbc_SQLSvc_TerminateDialogue_ts_res_(objtag_, call_id_, &exception_); if( heapSizeExit == true ) { odbc_SQLSvc_StopServer_exc_ StopException; StopException.exception_nr=0; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceStopServerExit(StopException); } exitServerProcess(); } bailout: if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceDisconnectExit(exception_); } SRVRTRACE_EXIT(FILE_AME+6); return; } void __cdecl SRVR::SrvrSessionCleanup(void) { double t = 0; // Rollback the transaction, Don't bother to check if autocommit is on or off, since SQL // doesn't check for it // When there is no transaction outstanding, SQL would give an error and ignore this error. // Suspend any joined txn if they are still active short status; if( srvrGlobal->bspjTxnJoined && srvrGlobal->spjTxnId != 0) { status = SUSPENDTRANSACTION( (short*)&(srvrGlobal->spjTxnId) ); if(status != 0) exitServerProcess(); srvrGlobal->bspjTxnJoined = FALSE; srvrGlobal->spjTxnId = 0; } else { if (WSQL_EXEC_Xact(SQLTRANS_STATUS,NULL) == 0) if (EXECDIRECT("ROLLBACK WORK") == ODBC_SERVER_ERROR) exitServerProcess(); } releaseCachedObject(FALSE, NDCS_DLG_BREAK); // resource statistics if (resStatSession != NULL) { resStatSession->end(); if (CEE_HANDLE_IS_NIL(&StatisticsTimerHandle) == IDL_FALSE) { CEE_TIMER_DESTROY(&StatisticsTimerHandle); CEE_HANDLE_SET_NIL(&StatisticsTimerHandle); } } //end rs strcpy(srvrGlobal->sessionId, srvrSessionId); if (srvrGlobal->cleanupByTime > 0) t = difftime(time(NULL), srvrGlobal->lastCleanupTime); // seconds else t = 0; // Fix for - The below code has been moved to before the // end of session call since this was causing issues with RMS. // If the service context does not have a priority set then have to // default to the priority what the process was started with. // We set only the master priority. SQL will adjust the compiler and // ESP priorities accordingly. // ++++ Note: When service-level default process priority feature is enabled // ++++ then this code will not be relevant and should be removed at that time. if( srvrGlobal->prtyChanged ) { char sqlcmd[64]; sprintf(sqlcmd, "SET SESSION DEFAULT MASTER_PRIORITY '%d'", srvrGlobal->srvrPriority ); EXECDIRECT(sqlcmd); srvrGlobal->prtyChanged = false; } // Fixed a problem when AutoCommit is OFF and SQL starts a transaction // during session end (for e.g. dropping of any volatile tables). In this // case the transaction does not get commited and new connections to the // server fails and the server process becomes useless. The fix // is to set AutoCommit to ON so that SQL can commit the transaction. // The fix is marked as "AutoCommit OFF fix" below. // AutoCommit OFF fix EXECDIRECT("SET TRANSACTION AUTOCOMMIT ON"); if ((srvrGlobal->numConnection+1 == srvrGlobal->cleanupByConnection) || (t > (double)srvrGlobal->cleanupByTime * 60)) { srvrGlobal->numConnection = 0; srvrGlobal->lastCleanupTime = time(NULL); EXECDIRECT("SET SESSION DEFAULT SQL_SESSION 'END:CLEANUP_ESPS'"); // all ESPs are stopped srvrGlobal->allocatedResources = 0; } else { EXECDIRECT("SET SESSION DEFAULT SQL_SESSION 'END'"); if (srvrGlobal->numConnection == 2147483647) srvrGlobal->numConnection = 0; // reset to prevent overflow else srvrGlobal->numConnection++; } ClearAdaptiveSegment(); srvrGlobal->bConfigurationChanged = false; releaseGlobalBuffer(); } /* SRVR::SrvrSessionCleanUp() */ bool __cdecl SRVR::CompilerCacheReset(char *errorMsg) { // Clear compiler cache by executing the following DELETE statements // // DELETE ALL FROM TABLE(QUERYCACHE()) // DELETE ALL FROM TABLE(NATABLECACHE()) // DELETE ALL FROM TABLE(NAROUTINECACHE()) SRVR_STMT_HDL *CmpStmt = NULL; SQLRETURN retcode = SQL_SUCCESS; char CmpQuery[100] = {0}; if ((CmpStmt = getSrvrStmt("STMT_CMP_CACHE_RESET_1", TRUE)) == NULL) { //LCOV_EXCL_START strcpy (errorMsg, "Allocate Statement"); return false; //LCOV_EXCL_STOP } strcpy(CmpQuery,"DELETE ALL FROM TABLE(QUERYCACHE())"); retcode = CmpStmt->ExecDirect(NULL, CmpQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (retcode != SQL_ERROR) { strcpy(CmpQuery,"DELETE ALL FROM TABLE(NATABLECACHE())"); retcode = CmpStmt->ExecDirect(NULL, CmpQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); } CmpStmt->InternalStmtClose(SQL_DROP); //LCOV_EXCL_START if (retcode == SQL_ERROR) { strcpy (errorMsg, "Error while clearing internal compiler cache"); return false; } //LCOV_EXCL_STOP return true; } /* SRVR::CompilerCacheReset() */ void __cdecl SRVR::BreakDialogue(CEE_tag_def monitor_tag) { long exitSesMemSize = 0; SRVRTRACE_ENTER(FILE_AME+7); if (srvrGlobal->srvrState == SRVR_AVAILABLE) { updateSrvrState(SRVR_CLIENT_DISAPPEARED); return; } if(srvrGlobal->dialogueId != -1) { SRVR::SrvrSessionCleanup(); srvrGlobal->dialogueId = -1; } if(srvrGlobal->stopTypeFlag == STOP_WHEN_DISCONNECTED) { odbc_SQLSvc_StopServer_exc_ StopException; StopException.exception_nr=0; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceStopServerExit(StopException); } exitServerProcess(); } exitSesMemSize = 0; if( maxHeapPctExit != 0 && initSessMemSize != 0 ) exitSesMemSize = getMemSize("Break"); if((exitSesMemSize - initSessMemSize) > initSessMemSize*maxHeapPctExit/100 ) { odbc_SQLSvc_StopServer_exc_ StopException; StopException.exception_nr=0; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceStopServerExit(StopException); } exitServerProcess(); } if (srvrGlobal->srvrState == SRVR_CONNECTED) { updateSrvrState(SRVR_DISCONNECTED); } else updateSrvrState(SRVR_CLIENT_DISAPPEARED); SRVRTRACE_EXIT(FILE_AME+7); } // Timer Expiration routine, when connIdleTimeout expires void __cdecl SRVR::connIdleTimerExpired(CEE_tag_def timer_tag) { SRVRTRACE_ENTER(FILE_AME+8); if(srvrGlobal->mutex->locked()) // a tcp/ip request was received just in time, ignore this timeout return; srvrGlobal->mutex->lock(); char tmpStringEnv[1024]; sprintf(tmpStringEnv, "Idle Connection Timer Expired. Client %s Disconnecting: Data Source: %s, Application: %s, Server Reference: %s", srvrGlobal->ClientComputerName, srvrGlobal->DSName, srvrGlobal->ApplicationName, srvrGlobal->srvrObjRef); if (srvrGlobal->traceLogger != NULL) { //LCOV_EXCL_START SendEventMsg(MSG_SERVER_TRACE_INFO , EVENTLOG_INFORMATION_TYPE , srvrGlobal->nskProcessInfo.processId , ODBCMX_SERVER , srvrGlobal->srvrObjRef , 4 , srvrGlobal->sessionId , "connIdleTimerExpired" , "0" , tmpStringEnv); //LCOV_EXCL_STOP } releaseCachedObject(FALSE, NDCS_CONN_IDLE); SRVR::SrvrSessionCleanup(); srvrGlobal->dialogueId = -1; if (srvrGlobal->stopTypeFlag == STOP_WHEN_DISCONNECTED) updateSrvrState(SRVR_STOP_WHEN_DISCONNECTED); else updateSrvrState(SRVR_DISCONNECTED); if (srvrGlobal->stopTypeFlag == STOP_WHEN_DISCONNECTED) { srvrGlobal->mutex->unlock(); exitServerProcess(); } else { GTransport.m_TCPIPSystemSrvr_list->cleanup(); GTransport.m_FSystemSrvr_list->cleanup(); } srvrGlobal->mutex->unlock(); SRVRTRACE_EXIT(FILE_AME+8); return; } // Timer Expiration routine, when srvrIdleTimeout expires void __cdecl SRVR::srvrIdleTimerExpired(CEE_tag_def timer_tag) { SRVRTRACE_ENTER(FILE_AME+9); CEE_status sts; char tmpString[128]; if(srvrGlobal->mutex->locked()) return; // Post a message to Assoicatio to check if the srvr should live only if the srvr state is // SRVR_AVAILABLE. Normally you should reach this function only when the server state is SRVR_AVAILABLE // The timer is destoryed when the server is connected and made active only when the srvr is disconnected // We should rather post a message, than make a synchornous call to association server, since the server // can be ready to take a connection message from the client if ( checkIfASSvcLives() == TRUE && srvrGlobal->srvrState == SRVR_AVAILABLE ) { AS_CALL_CONTEXT* asCallContext; asCallContext = new AS_CALL_CONTEXT; if (asCallContext == NULL) { //LCOV_EXCL_START SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "asCallContext"); exitServerProcess(); //LCOV_EXCL_STOP } if((sts = odbcas_ASSvc_WouldLikeToLive_pst_( &(asCallContext->ASSvc_proxy), asCallContext, odbcas_ASSvc_WouldLikeToLive_ccf_, srvrGlobal->srvrType, srvrGlobal->srvrObjRef)) != CEE_SUCCESS) { //LCOV_EXCL_START delete asCallContext; sprintf(tmpString, "%ld", sts); SendEventMsg(MSG_KRYPTON_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, tmpString, FORMAT_LAST_ERROR()); exitServerProcess(); //LCOV_EXCL_STOP } } SRVRTRACE_EXIT(FILE_AME+9); } /* * Call Completion function prototype for * operation 'odbcas_ASSvc_WouldLikeToLive' */ extern "C" void odbcas_ASSvc_WouldLikeToLive_ccf_( /* In */ CEE_tag_def cmptag_ , /* In */ const odbcas_ASSvc_WouldLikeToLive_exc_ *exception_ , /* In */ IDL_long lifePermit ) { SRVRTRACE_ENTER(FILE_AME+10); BOOL createTimer = FALSE; CEE_status sts; char tmpString[25]; AS_CALL_CONTEXT *asCallContext = (AS_CALL_CONTEXT *)cmptag_; if (EXECDIRECT("SET SESSION DEFAULT SQL_SESSION 'BEGIN';") == ODBC_SERVER_ERROR) exitServerProcess(); if (EXECDIRECT("SET SESSION DEFAULT SQL_SESSION 'END:CLEANUP_ESPS'") == ODBC_SERVER_ERROR) exitServerProcess(); // all ESPs are stopped srvrGlobal->allocatedResources = 0; if (exception_->exception_nr == CEE_SUCCESS) { // Check if server is SRVR_AVAILABLE state if (srvrGlobal->srvrState == SRVR_AVAILABLE) { // add the following lines of code if (lifePermit == DIE) exitServerProcess(); else createTimer = TRUE; } else createTimer = FALSE; } else { //LCOV_EXCL_START sprintf(tmpString, "%ld", exception_->exception_nr); SendEventMsg(MSG_KRYPTON_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, tmpString, FORMAT_LAST_ERROR()); SendEventMsg(MSG_SRVR_IDLE_TIMEOUT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 0); // Better, this server process die, since it is idle exitServerProcess(); //LCOV_EXCL_STOP } if (createTimer) { if (srvrGlobal->srvrContext.srvrIdleTimeout != INFINITE_SRVR_IDLE_TIMEOUT) { if (CEE_HANDLE_IS_NIL(&srvrGlobal->srvrIdleTimerHandle) == IDL_FALSE) { CEE_TIMER_DESTROY(&srvrGlobal->srvrIdleTimerHandle); CEE_HANDLE_SET_NIL(&srvrGlobal->srvrIdleTimerHandle); } sts = CEE_TIMER_CREATE2((long)srvrGlobal->srvrContext.srvrIdleTimeout * 60, 0, srvrIdleTimerExpired, NULL, &srvrGlobal->srvrIdleTimerHandle,srvrGlobal->receiveThrId); if (sts != CEE_SUCCESS) { //LCOV_EXCL_START CEE_HANDLE_SET_NIL(&srvrGlobal->srvrIdleTimerHandle); sprintf(tmpString, "%ld", sts); SendEventMsg(MSG_KRYPTON_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, tmpString, FORMAT_LAST_ERROR()); SendEventMsg(MSG_SRVR_IDLE_TIMEOUT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 0); //LCOV_EXCL_STOP } } } delete asCallContext; SRVRTRACE_EXIT(FILE_AME+10); } void SRVR::destroyConnIdleTimer() { SRVRTRACE_ENTER(FILE_AME+11); if (srvrGlobal != NULL && srvrGlobal->srvrState == SRVR_CONNECTED) { if (CEE_HANDLE_IS_NIL(&srvrGlobal->connIdleTimerHandle) == IDL_FALSE) { CEE_TIMER_DESTROY(&srvrGlobal->connIdleTimerHandle); CEE_HANDLE_SET_NIL(&srvrGlobal->connIdleTimerHandle); } } SRVRTRACE_EXIT(FILE_AME+11); } void SRVR::startConnIdleTimer() { SRVRTRACE_ENTER(FILE_AME+12); CEE_status sts; long connIdleTimeout = INFINITE_CONN_IDLE_TIMEOUT; if (srvrGlobal != NULL && srvrGlobal->srvrState == SRVR_CONNECTED) { if (CEE_HANDLE_IS_NIL(&srvrGlobal->connIdleTimerHandle) == IDL_FALSE) { CEE_TIMER_DESTROY(&srvrGlobal->connIdleTimerHandle); CEE_HANDLE_SET_NIL(&srvrGlobal->connIdleTimerHandle); } if ((srvrGlobal->drvrVersion.componentId == JDBC_DRVR_COMPONENT) && (srvrGlobal->javaConnIdleTimeout > JDBC_DATASOURCE_CONN_IDLE_TIMEOUT)) { if (srvrGlobal->javaConnIdleTimeout != JDBC_INFINITE_CONN_IDLE_TIMEOUT) connIdleTimeout = (long)srvrGlobal->javaConnIdleTimeout; } else if (srvrGlobal->srvrContext.connIdleTimeout != INFINITE_CONN_IDLE_TIMEOUT) { connIdleTimeout = (long)srvrGlobal->srvrContext.connIdleTimeout * 60; } if (connIdleTimeout != INFINITE_CONN_IDLE_TIMEOUT) { sts = CEE_TIMER_CREATE2((long)connIdleTimeout, 0, connIdleTimerExpired, NULL, &srvrGlobal->connIdleTimerHandle,srvrGlobal->receiveThrId); if (sts != CEE_SUCCESS) { //LCOV_EXCL_START char tmpString[32]; CEE_HANDLE_SET_NIL(&srvrGlobal->connIdleTimerHandle); sprintf(tmpString, "%ld", sts); SendEventMsg(MSG_KRYPTON_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, tmpString, FORMAT_LAST_ERROR()); SendEventMsg(MSG_SRVR_IDLE_TIMEOUT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 0); //LCOV_EXCL_STOP } } } SRVRTRACE_EXIT(FILE_AME+12); } BOOL SRVR::updateSrvrState(SRVR_STATE srvrState) { SRVRTRACE_ENTER(FILE_AME+13); CEE_status sts; char tmpString[32]; double t = 0; bool result = TRUE; // Fix for - clients hang on a connect because the server does not cleanup after an exception if(srvrState == SRVR_DISCONNECTED || srvrState == SRVR_CLIENT_DISAPPEARED || srvrState == SRVR_CONNECT_REJECTED || srvrState == SRVR_CONNECT_FAILED) { GTransport.m_TCPIPSystemSrvr_list->cleanup(); GTransport.m_FSystemSrvr_list->cleanup(); } switch (srvrState) { case SRVR_DISCONNECTED: case SRVR_CLIENT_DISAPPEARED: case SRVR_CONNECT_REJECTED: case SRVR_CONNECT_FAILED: //This fix is for db Transporter. Since dbt makes TMF calls for some reason SQL_EXEC_Xact // is not returning there is a active transaction in this case. So work around is before putting // server in availabe state check from active transaction and rollback. This is safe any way since // server can be continue to run. ABORTTRANSACTION(); srvrGlobal->srvrState = SRVR_AVAILABLE; bool result; if( srvrState == SRVR_CONNECT_REJECTED || srvrState == SRVR_CONNECT_FAILED ) { // Commenting the following code. MXOSRVR remains in this state // for timeout duration (clientConnErrorTimeOut). // Changing the state to AVAILABLE immediately. // Leaving the related code in place (ASTimerExpired) // if this gets revisited. // // result = updateZKState(CONNECTING, srvrState == SRVR_CONNECT_REJECTED? CONNECT_REJECTED : CONNECT_FAILED); result = updateZKState(CONNECTING, AVAILABLE); } else result = updateZKState(CONNECTED, AVAILABLE); if( !result ) exitServerProcess(); // May be this TimerHandle is OLD Timer, Destroy it if (CEE_HANDLE_IS_NIL(&srvrGlobal->connIdleTimerHandle) == IDL_FALSE) { //LCOV_EXCL_START CEE_TIMER_DESTROY(&srvrGlobal->connIdleTimerHandle); CEE_HANDLE_SET_NIL(&srvrGlobal->connIdleTimerHandle); //LCOV_EXCL_STOP } // The server need to die, when disconnected, hence don't start any timer if (srvrGlobal->stopTypeFlag == STOP_WHEN_DISCONNECTED) break; // If server is available, restart timer if (CEE_HANDLE_IS_NIL(&srvrGlobal->ASTimerHandle) == IDL_FALSE) { //LCOV_EXCL_START CEE_TIMER_DESTROY(&srvrGlobal->ASTimerHandle); CEE_HANDLE_SET_NIL(&srvrGlobal->ASTimerHandle); //LCOV_EXCL_STOP } srvrGlobal->bSkipASTimer = false; CEE_TIMER_CREATE2(DEFAULT_AS_POLLING,0,ASTimerExpired,(CEE_tag_def)NULL, &srvrGlobal->ASTimerHandle,srvrGlobal->receiveThrId); // Create SrvrIdleTimeout timer if (srvrGlobal->srvrContext.srvrIdleTimeout != INFINITE_SRVR_IDLE_TIMEOUT) { if (CEE_HANDLE_IS_NIL(&srvrGlobal->srvrIdleTimerHandle) == IDL_FALSE) { CEE_TIMER_DESTROY(&srvrGlobal->srvrIdleTimerHandle); CEE_HANDLE_SET_NIL(&srvrGlobal->srvrIdleTimerHandle); } sts = CEE_TIMER_CREATE2((long)srvrGlobal->srvrContext.srvrIdleTimeout * 60, 0, srvrIdleTimerExpired, NULL, &srvrGlobal->srvrIdleTimerHandle,srvrGlobal->receiveThrId); if (sts != CEE_SUCCESS) { //LCOV_EXCL_START CEE_HANDLE_SET_NIL(&srvrGlobal->srvrIdleTimerHandle); sprintf(tmpString, "%ld", sts); SendEventMsg(MSG_KRYPTON_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, tmpString, FORMAT_LAST_ERROR()); SendEventMsg(MSG_SRVR_IDLE_TIMEOUT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 0); //LCOV_EXCL_STOP } } break; case SRVR_CONNECTED: srvrGlobal->srvrState = srvrState; // If server is connected, stop checking AS if (CEE_HANDLE_IS_NIL(&srvrGlobal->ASTimerHandle) == IDL_FALSE) { srvrGlobal->bSkipASTimer = true; } // Destory the srvrIdleTimeout timer if (CEE_HANDLE_IS_NIL(&srvrGlobal->srvrIdleTimerHandle) == IDL_FALSE) { CEE_TIMER_DESTROY(&srvrGlobal->srvrIdleTimerHandle); CEE_HANDLE_SET_NIL(&srvrGlobal->srvrIdleTimerHandle); } if( !updateZKState(CONNECTING, CONNECTED) ) exitServerProcess(); break; case SRVR_STOP_WHEN_DISCONNECTED: if (srvrGlobal->cleanupByTime > 0) t = difftime(time(NULL), srvrGlobal->lastCleanupTime); // seconds else t = 0; // AutoCommit OFF fix EXECDIRECT("SET TRANSACTION AUTOCOMMIT ON"); if ((srvrGlobal->numConnection+1 == srvrGlobal->cleanupByConnection) || (t > (double)srvrGlobal->cleanupByTime * 60)) { srvrGlobal->numConnection = 0; srvrGlobal->lastCleanupTime = time(NULL); EXECDIRECT("SET SESSION DEFAULT SQL_SESSION 'END:CLEANUP_ESPS'"); // all ESPs are stopped srvrGlobal->allocatedResources = 0; } else { EXECDIRECT("SET SESSION DEFAULT SQL_SESSION 'END'"); if (srvrGlobal->numConnection == 2147483647) srvrGlobal->numConnection = 0; // reset to prevent overflow else srvrGlobal->numConnection++; } break; default: break; } SRVRTRACE_EXIT(FILE_AME+13); return TRUE; } void SRVR::UPDATE_SERVER_WAITED(IDL_long TraceType, IDL_long StatisticsType, IDL_long ContextType,const SRVR_CONTEXT_def *srvrContext) { SRVRTRACE_ENTER(FILE_AME+14); if (TraceType & 0x80000000) DISABLE_SERVER_TRACE(TraceType & ~0x80000000); else if (TraceType != 0) ENABLE_SERVER_TRACE(TraceType); if (StatisticsType & 0x80000000) DISABLE_STATISTICS(); else if (StatisticsType != 0) ENABLE_STATISTICS(StatisticsType); if (ContextType) UPDATE_SERVER_CONTEXT(srvrContext); SRVRTRACE_EXIT(FILE_AME+14); } /* * Call Completion function pointer type for * operation 'odbcas_ASSvc_UpdateSrvrState' */ extern "C" void odbcas_ASSvc_UpdateSrvrState_ccf_( /* In */ CEE_tag_def cmptag_ , /* In */ const odbcas_ASSvc_UpdateSrvrState_exc_ *exception_ , /* In */ IDL_long TraceType , /* In */ IDL_long StatisticsType , /* In */ IDL_long ContextType , /* In */ const SRVR_CONTEXT_def *srvrContext ) { SRVRTRACE_ENTER(FILE_AME+15); char errorMessage[100]; AS_CALL_CONTEXT *asCallContext = (AS_CALL_CONTEXT *)cmptag_; SRVR_STATE srvrState = asCallContext->srvrState; delete asCallContext; switch (exception_->exception_nr) { case CEE_SUCCESS: break; case odbcas_ASSvc_UpdateSrvrState_ASTimeout_exn_ : // We use this exception to signal server to stop exitServerProcess(); break; case odbcas_ASSvc_UpdateSrvrState_ASParamError_exn_ : break; // This exception is not raised by AS Now //LCOV_EXCL_START case odbcas_ASSvc_UpdateSrvrState_ASStateChangeError_exn_: SendEventMsg(MSG_UPDATE_SRVR_STATE_FAILED, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 0); break; default: SendEventMsg(MSG_UPDATE_SRVR_STATE_FAILED, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 0); //LCOV_EXCL_STOP } if (exception_->exception_nr != 0) { if (srvrGlobal->srvrState == srvrState) // check if the current state is what we tried to update { if (srvrGlobal->srvrState != SRVR_CONNECTED) { //LCOV_EXCL_START sprintf(errorMessage, "Exception=%ld CurrentSrvrState=%d SrvrState=%d", exception_->exception_nr, srvrGlobal->srvrState, srvrState); SendEventMsg(MSG_KRYPTON_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, errorMessage, FORMAT_LAST_ERROR()); exitServerProcess(); //LCOV_EXCL_STOP } else // trigger stopping when disconnected { CEE_HANDLE_SET_NIL(&(callIdStopServer)); srvrGlobal->stopTypeFlag = STOP_WHEN_DISCONNECTED; } } else { //LCOV_EXCL_START // late response from AS and hence it is better for the server to die sprintf(errorMessage, "Exception=%ld CurrentSrvrState=%d SrvrState=%d", exception_->exception_nr, srvrGlobal->srvrState, srvrState); SendEventMsg(MSG_KRYPTON_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, errorMessage, FORMAT_LAST_ERROR()); exitServerProcess(); //LCOV_EXCL_STOP } } else { switch (srvrState) { case SRVR_DISCONNECTED: case SRVR_CLIENT_DISAPPEARED: case SRVR_CONNECT_REJECTED: case SRVR_CONNECT_FAILED: UPDATE_SERVER_WAITED(TraceType, StatisticsType, ContextType, srvrContext); break; } } SRVRTRACE_EXIT(FILE_AME+15); return ; } void SRVR::exitServerProcess() { SRVRTRACE_ENTER(FILE_AME+16); short nskError; char tmpString[32]; SendEventMsg(MSG_SERVER_TRACE_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskASProcessInfo.processId, ODBCMX_SERVICE, srvrGlobal->srvrObjRef, 3, srvrGlobal->sessionId, "exitServerProcess()", "0"); terminateThreads(1); exit(1); SRVRTRACE_EXIT(FILE_AME+16); } /* * Asynchronous method function prototype for * operation 'odbc_SQLSvc_StopServer' */ extern "C" void odbc_SQLSvc_StopServer_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ IDL_long StopType , /* In */ IDL_string ReasonText ) { SRVRTRACE_ENTER(FILE_AME+17); odbc_SQLSvc_StopServer_exc_ StopException={0,0}; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceStopServerEnter(dialogueId, StopType, ReasonText); } StopException.exception_nr = 0; if (srvrGlobal->srvrState == SRVR_STOPPING) { StopException.exception_nr = odbc_SQLSvc_StopServer_ProcessStopError_exn_; StopException.u.ProcessStopError.ErrorText = "Already Stopped"; odbc_SQLSvc_StopServer_ts_res_(objtag_, call_id_, &StopException); if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceStopServerExit(StopException); } } if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId == srvrGlobal->dialogueId) { //Susan , changed STOP to STOP_SRVR in Global.H if(StopType == STOP_SRVR) { if (WSQL_EXEC_Xact(SQLTRANS_STATUS,NULL) == 0) EXECDIRECT("ROLLBACK WORK"); releaseCachedObject(FALSE, NDCS_STOP_SRVR); odbc_SQLSvc_StopServer_ts_res_(objtag_, call_id_, &StopException); if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceStopServerExit(StopException); } exitServerProcess(); } else { srvrGlobal->stopTypeFlag = STOP_WHEN_DISCONNECTED; memcpy(&(callIdStopServer), call_id_, sizeof(CEE_handle_def)); // ASSOC server knows when server exists from filesystem. odbc_SQLSvc_StopServer_ts_res_(objtag_, call_id_, &StopException); } } } else { if (WSQL_EXEC_Xact(SQLTRANS_STATUS,NULL) == 0) EXECDIRECT("ROLLBACK WORK"); releaseCachedObject(FALSE, NDCS_STOP_SRVR); odbc_SQLSvc_StopServer_ts_res_(objtag_, call_id_, &StopException); if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceStopServerExit(StopException); } exitServerProcess(); } SRVRTRACE_EXIT(FILE_AME+17); } /* * Asynchronous method implementation for * operation 'odbc_SQLSrvr_Close' */ extern "C" void odbc_SQLSrvr_Close_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ const IDL_char *stmtLabel , /* In */ IDL_unsigned_short freeResourceOpt ) { SRVRTRACE_ENTER(FILE_AME+18); odbc_SQLSvc_Close_exc_ exception_={0,0,0}; IDL_long rowsAffected = -1; IDL_long returnCode = SQL_SUCCESS; IDL_long sqlWarningOrErrorLength = 0; BYTE *sqlWarningOrError = NULL; RETCODE rc = SQL_SUCCESS; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceCloseEnter(dialogueId, stmtLabel, freeResourceOpt); } DESTROY_CONN_IDLE_TIMER if (srvrGlobal != NULL && srvrGlobal->srvrType == CORE_SRVR) { if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId != srvrGlobal->dialogueId) { //LCOV_EXCL_START returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); //LCOV_EXCL_STOP } else { odbc_SQLSrvr_Close_sme_( objtag_, call_id_, dialogueId, stmtLabel, freeResourceOpt, &rowsAffected, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError); } } else { returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); } } else { odbc_SQLSrvr_Close_sme_( objtag_, call_id_, dialogueId, stmtLabel, freeResourceOpt, &rowsAffected, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError); } qrysrvc_ExecuteFinished(stmtLabel, NULL, false, returnCode, false); SRVR_STMT_HDL *pSrvrStmt = SRVR::getSrvrStmt(stmtLabel, FALSE); if (pSrvrStmt != NULL) { if ((resStatStatement != NULL) && (pSrvrStmt->stmtType == EXTERNAL_STMT)) resStatStatement->endRepository(pSrvrStmt, sqlWarningOrErrorLength, sqlWarningOrError, true); } odbc_SQLSrvr_Close_ts_res_(objtag_, call_id_, returnCode, rowsAffected, sqlWarningOrErrorLength, sqlWarningOrError); //LCOV_EXCL_START if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceClose2Exit(returnCode, rowsAffected, sqlWarningOrErrorLength, sqlWarningOrError); } //LCOV_EXCL_STOP START_CONN_IDLE_TIMER SRVRTRACE_EXIT(FILE_AME+18); return; } /* odbc_SQLSrvr_Close_ame_() */ void SRVR::ENABLE_SERVER_TRACE(IDL_long TraceType) { char tmpString[50]; if(srvrGlobal->traceLogger == NULL) // check if trace is enabled or not { // trace class here srvrGlobal->traceLogger = new ODBCMXTraceMsg(srvrGlobal->nskProcessInfo.processId, srvrGlobal->srvrObjRef); if (srvrGlobal->traceLogger == NULL) { //LCOV_EXCL_START SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "srvrGlobal->traceLogger"); exitServerProcess(); //LCOV_EXCL_STOP } srvrGlobal->traceLogger->OpenTraceCollector(srvrGlobal->TraceCollector); sprintf(tmpString, "Server Trace Enabled."); SendEventMsg(MSG_SERVER_TRACE_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskASProcessInfo.processId, ODBCMX_SERVICE, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "EnableServerTrace", "0", tmpString); } } /* * Asynchronous method function for * operation 'odbc_SQLSvc_EnableServerTrace' */ extern "C" void odbc_SQLSvc_EnableServerTrace_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ IDL_long TraceType ) { SRVRTRACE_ENTER(FILE_AME+29); char tmpString[50]; odbc_SQLSvc_EnableServerTrace_exc_ exception={0,0,0}; if (srvrGlobal->srvrState == SRVR_STOPPING) { exception.exception_nr = odbc_SQLSvc_EnableServerTrace_TraceError_exn_; } else if ((srvrGlobal->srvrState == SRVR_CONNECTED) || (srvrGlobal->srvrState == SRVR_AVAILABLE)) { if ((dialogueId == srvrGlobal->dialogueId) || (dialogueId == 0)) { ENABLE_SERVER_TRACE(TraceType); } else exception.exception_nr = odbc_SQLSvc_EnableServerTrace_TraceError_exn_; } else exception.exception_nr = odbc_SQLSvc_EnableServerTrace_TraceError_exn_; if (exception.exception_nr != 0) { //LCOV_EXCL_START sprintf(tmpString, "Server Trace Enable failed."); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); //LCOV_EXCL_STOP } odbc_SQLSvc_EnableServerTrace_ts_res_(objtag_, call_id_, &exception); SRVRTRACE_EXIT(FILE_AME+29); } void SRVR::DISABLE_SERVER_TRACE(IDL_long TraceType) { char tmpString[50]; if(srvrGlobal->traceLogger != NULL) // check if trace is enabled or not { // trace class here delete srvrGlobal->traceLogger; srvrGlobal->traceLogger = NULL; sprintf(tmpString, "Server Trace Disabled."); SendEventMsg(MSG_SERVER_TRACE_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskASProcessInfo.processId, ODBCMX_SERVICE, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "DisableServerTrace", "0", tmpString); } } /* * Asynchronous method function for * operation 'odbc_SQLSvc_DisableServerTrace' */ extern "C" void odbc_SQLSvc_DisableServerTrace_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ IDL_long TraceType ) { SRVRTRACE_ENTER(FILE_AME+30); char tmpString[50]; odbc_SQLSvc_DisableServerTrace_exc_ exception={0,0,0}; if (srvrGlobal->srvrState == SRVR_STOPPING) { exception.exception_nr = odbc_SQLSvc_DisableServerTrace_TraceError_exn_; } else if (srvrGlobal->srvrState == SRVR_AVAILABLE) { if ((dialogueId == srvrGlobal->dialogueId) || (dialogueId == 0)) DISABLE_SERVER_TRACE(TraceType); else exception.exception_nr = odbc_SQLSvc_DisableServerTrace_TraceError_exn_; } else exception.exception_nr = odbc_SQLSvc_DisableServerTrace_TraceError_exn_; if (exception.exception_nr != 0) { sprintf(tmpString, "Server Trace Disable failed."); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); } odbc_SQLSvc_DisableServerTrace_ts_res_(objtag_, call_id_, &exception); SRVRTRACE_EXIT(FILE_AME+30); } void SRVR::ENABLE_STATISTICS(IDL_long StatisticsType) { char tmpString[50]; // Statistics class here if (srvrGlobal->resourceStatistics == 0 && StatisticsType > 0) { #ifdef RES_STATS_EVENT stringstream ss; ss << "Server Statistics Enabled." << " (DSName=" << srvrGlobal->DSName << " resourceStatistics=0x" << hex << StatisticsType << ")"; //sprintf(tmpString, "Server Statistics Enabled."); SendEventMsg( MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", //tmpString); ss.str().c_str()); #endif } srvrGlobal->resourceStatistics = StatisticsType; } /* * Asynchronous method function for * operation 'odbc_SQLSvc_EnableStatistics' */ extern "C" void odbc_SQLSvc_EnableServerStatistics_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ IDL_long StatisticsType ) { SRVRTRACE_ENTER(FILE_AME+31); char tmpString[50]; odbc_SQLSvc_EnableServerStatistics_exc_ exception={0,0,0}; if (srvrGlobal->srvrState == SRVR_STOPPING) { exception.exception_nr = odbc_SQLSvc_EnableServerStatistics_StatisticsError_exn_; } else if (srvrGlobal->srvrState == SRVR_AVAILABLE) { if ((dialogueId == srvrGlobal->dialogueId) || (dialogueId == 0)) ENABLE_STATISTICS(StatisticsType); else exception.exception_nr = odbc_SQLSvc_EnableServerStatistics_StatisticsError_exn_; } else exception.exception_nr = odbc_SQLSvc_EnableServerStatistics_StatisticsError_exn_; if (exception.exception_nr != 0) { sprintf(tmpString, "Server Statistics Enable failed."); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); } odbc_SQLSvc_EnableServerStatistics_ts_res_(objtag_, call_id_, &exception); SRVRTRACE_EXIT(FILE_AME+31); } void SRVR::DISABLE_STATISTICS() { char tmpString[50]; if(srvrGlobal->resourceStatistics > 0) // check if Statistics is disabled or not { #ifdef RES_STATS_EVENT stringstream ss; ss << "Server Statistics Disabled." << " (DSName=" << srvrGlobal->DSName << " resourceStatistics=0x" << hex << srvrGlobal->resourceStatistics << ")"; // Statistics class here //sprintf(tmpString, "Server Statistics Disabled."); SendEventMsg( MSG_RES_STAT_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 4, srvrGlobal->sessionId, "STATISTICS INFORMATION", "0", //tmpString); ss.str().c_str()); #endif srvrGlobal->resourceStatistics = 0; } } /* * Asynchronous method function for * operation 'odbc_SQLSvc_DisableServerStatistics' */ extern "C" void odbc_SQLSvc_DisableServerStatistics_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId ) { SRVRTRACE_ENTER(FILE_AME+32); char tmpString[50]; odbc_SQLSvc_DisableServerStatistics_exc_ exception={0,0,0}; if (srvrGlobal->srvrState == SRVR_STOPPING) { exception.exception_nr = odbc_SQLSvc_DisableServerStatistics_StatisticsError_exn_; } else if (srvrGlobal->srvrState == SRVR_AVAILABLE) { if ((dialogueId == srvrGlobal->dialogueId) || (dialogueId == 0)) DISABLE_STATISTICS(); else exception.exception_nr = odbc_SQLSvc_DisableServerStatistics_StatisticsError_exn_; } else exception.exception_nr = odbc_SQLSvc_DisableServerStatistics_StatisticsError_exn_; if (exception.exception_nr != 0) { sprintf(tmpString, "Server Statistics Disable failed."); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); } odbc_SQLSvc_DisableServerStatistics_ts_res_(objtag_, call_id_, &exception); SRVRTRACE_EXIT(FILE_AME+32); } void SRVR::UPDATE_SERVER_CONTEXT(const SRVR_CONTEXT_def *srvrContext) { SRVRTRACE_ENTER(FILE_AME+33); int i; CEE_status sts; RES_DESC_def *pResValuesIn; RES_DESC_def *pResValues; ENV_DESC_def *pEnvValuesIn; ENV_DESC_def *pEnvValues; srvrGlobal->srvrContext.srvrIdleTimeout = srvrContext->srvrIdleTimeout; srvrGlobal->srvrContext.connIdleTimeout = srvrContext->connIdleTimeout; if (srvrGlobal->srvrContext.srvrIdleTimeout == DEFAULT_SRVR_IDLE_TIMEOUT) srvrGlobal->srvrContext.srvrIdleTimeout = DEFAULT_SRVR_IDLE_TIMEOUT_MINS; if (srvrGlobal->srvrContext.connIdleTimeout == DEFAULT_CONN_IDLE_TIMEOUT) srvrGlobal->srvrContext.connIdleTimeout = DEFAULT_CONN_IDLE_TIMEOUT_MINS; if (srvrGlobal->srvrContext.resDescList._length > 0) { int len_length = srvrGlobal->srvrContext.resDescList._length; RES_DESC_def *p_buffer = (RES_DESC_def *)srvrGlobal->srvrContext.resDescList._buffer; for( i=0; i < len_length; i++) { pResValues = p_buffer + i; if (pResValues->Action != NULL) delete pResValues->Action; } delete srvrGlobal->srvrContext.resDescList._buffer; } srvrGlobal->resGovernOn = FALSE; srvrGlobal->srvrContext.resDescList._buffer = NULL; srvrGlobal->srvrContext.resDescList._length = 0; if (srvrGlobal->srvrContext.envDescList._length > 0) { int len_length = srvrGlobal->srvrContext.envDescList._length; ENV_DESC_def *p_buffer = (ENV_DESC_def *)srvrGlobal->srvrContext.envDescList._buffer; for( i=0; i < len_length; i++) { pEnvValues = p_buffer + i; if (pEnvValues->VarVal != NULL) delete pEnvValues->VarVal; } delete srvrGlobal->srvrContext.envDescList._buffer; } srvrGlobal->envVariableOn = FALSE; srvrGlobal->srvrContext.envDescList._buffer = NULL; srvrGlobal->srvrContext.envDescList._length = 0; // Copy the srvr Context to Global Structure if(srvrContext->resDescList._length > 0) { srvrGlobal->resGovernOn = TRUE; srvrGlobal->srvrContext.resDescList._length=srvrContext->resDescList._length; srvrGlobal->srvrContext.resDescList._buffer = new RES_DESC_def[srvrContext->resDescList._length]; if (srvrGlobal->srvrContext.resDescList._buffer == NULL) { //LCOV_EXCL_START SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "resDescList._buffer"); exitServerProcess(); //LCOV_EXCL_STOP } memcpy((void *)(srvrGlobal->srvrContext.resDescList._buffer), srvrContext->resDescList._buffer, (sizeof(RES_DESC_def)*srvrGlobal->srvrContext.resDescList._length)); int len_length = srvrContext->resDescList._length; RES_DESC_def *p_Inbuffer = (RES_DESC_def *)srvrContext->resDescList._buffer; RES_DESC_def *p_buffer = (RES_DESC_def *)srvrGlobal->srvrContext.resDescList._buffer; for( i=0; i < len_length; i++) { pResValuesIn = p_Inbuffer + i; pResValues = p_buffer + i; pResValues->Action = new char[strlen(pResValuesIn->Action) + 1]; if (pResValues->Action == NULL) { //LCOV_EXCL_START SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "Action"); exitServerProcess(); //LCOV_EXCL_STOP } strcpy(pResValues->Action,pResValuesIn->Action); } } if(srvrContext->envDescList._length > 0) { srvrGlobal->envVariableOn = TRUE; srvrGlobal->srvrContext.envDescList._length=srvrContext->envDescList._length; srvrGlobal->srvrContext.envDescList._buffer = new ENV_DESC_def[srvrContext->envDescList._length]; if (srvrGlobal->srvrContext.envDescList._buffer == NULL) { //LCOV_EXCL_START SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "envDescList._buffer"); exitServerProcess(); //LCOV_EXCL_STOP } memcpy((void *)(srvrGlobal->srvrContext.envDescList._buffer), srvrContext->envDescList._buffer, (sizeof(ENV_DESC_def)*srvrGlobal->srvrContext.envDescList._length)); int len_length = srvrContext->envDescList._length; ENV_DESC_def *p_Inbuffer = (ENV_DESC_def *)srvrContext->envDescList._buffer; ENV_DESC_def *p_buffer = srvrGlobal->srvrContext.envDescList._buffer; for( i=0; i < len_length; i++) { pEnvValuesIn = p_Inbuffer + i; pEnvValues = p_buffer + i; pEnvValues->VarVal = new char[strlen(pEnvValuesIn->VarVal) + 1]; if (pEnvValues->VarVal == NULL) { //LCOV_EXCL_START SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, "VarVal"); exitServerProcess(); //LCOV_EXCL_STOP } strcpy(pEnvValues->VarVal,pEnvValuesIn->VarVal); } } SRVRTRACE_EXIT(FILE_AME+33); } /* * Asynchronous method function for * operation 'odbc_SQLSvc_UpdateServerContext' */ extern "C" void odbc_SQLSvc_UpdateServerContext_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ const SRVR_CONTEXT_def *srvrContext ) { SRVRTRACE_ENTER(FILE_AME+34); CEE_status sts; char tmpString[100]; odbc_SQLSvc_UpdateServerContext_exc_ exception={0,0,0}; if (srvrGlobal->srvrState == SRVR_STOPPING || srvrGlobal->stopTypeFlag == STOP_WHEN_DISCONNECTED) { exception.exception_nr = odbc_SQLSvc_UpdateServerContext_SQLError_exn_; } else if (srvrGlobal->srvrState == SRVR_AVAILABLE) { UPDATE_SERVER_CONTEXT(srvrContext); if (CEE_HANDLE_IS_NIL(&srvrGlobal->srvrIdleTimerHandle) == IDL_FALSE) { CEE_TIMER_DESTROY(&srvrGlobal->srvrIdleTimerHandle); CEE_HANDLE_SET_NIL(&srvrGlobal->srvrIdleTimerHandle); } if (srvrGlobal->srvrContext.srvrIdleTimeout != INFINITE_SRVR_IDLE_TIMEOUT) { sts = CEE_TIMER_CREATE2((long)srvrGlobal->srvrContext.srvrIdleTimeout * 60, 0, srvrIdleTimerExpired, NULL, &srvrGlobal->srvrIdleTimerHandle,srvrGlobal->receiveThrId); if (sts != CEE_SUCCESS) { //LCOV_EXCL_START char tmpString[30]; CEE_HANDLE_SET_NIL(&srvrGlobal->srvrIdleTimerHandle); sprintf(tmpString, "%ld", sts); SendEventMsg(MSG_KRYPTON_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, tmpString, FORMAT_LAST_ERROR()); SendEventMsg(MSG_SRVR_IDLE_TIMEOUT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 0); //LCOV_EXCL_STOP } } if (CEE_HANDLE_IS_NIL(&srvrGlobal->connIdleTimerHandle) == IDL_FALSE) { destroyConnIdleTimer(); } } else exception.exception_nr = odbc_SQLSvc_UpdateServerContext_SQLError_exn_; if (exception.exception_nr != 0) { //LCOV_EXCL_START sprintf(tmpString, "Update Server Context failed due to server state not Available."); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, tmpString); //LCOV_EXCL_STOP } odbc_SQLSvc_UpdateServerContext_ts_res_(objtag_, call_id_, &exception); SRVRTRACE_EXIT(FILE_AME+34); } //LCOV_EXCL_START void SQL_EXECDIRECT(SRVR_INIT_PARAM_Def* initParam) { char RequestError[200]; SQLRETURN rc = SQL_SUCCESS; SRVR_STMT_HDL *CleanupStmt = NULL; RequestError[0] = '\0'; srvrGlobal = new SRVR_GLOBAL_Def; if (srvrGlobal == NULL) { SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, "N/A", 1, "srvrGlobal"); return; } initSqlCore(); if ((CleanupStmt = getSrvrStmt("STMT_CLEANUP_ON_1", TRUE)) == NULL) { SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, "N/A", 1, SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT); } else { rc = CleanupStmt->ExecDirect(NULL, "SET TRANSACTION AUTOCOMMIT ON", INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (rc == SQL_ERROR) { ERROR_DESC_def *p_buffer = CleanupStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[199] = '\0'; SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, "N/A", 1, RequestError); } else { CleanupStmt->ExecDirect(NULL, initParam->sql, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (rc == SQL_ERROR) { ERROR_DESC_def *p_buffer = CleanupStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[199] = '\0'; SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, "N/A", 1, RequestError); } else { sprintf(RequestError, "Executed %s command successfully", initParam->sql); SendEventMsg(MSG_SRVR_ENV, EVENTLOG_INFORMATION_TYPE, 0, ODBCMX_SERVER, "N/A", 1, RequestError); } } CleanupStmt->cleanupAll(); CleanupStmt->currentMethod = odbc_SQLSvc_Close_ldx_; CleanupStmt->freeResourceOpt = SQL_DROP; FREESTATEMENT(CleanupStmt); } return; } short SQL_EXECDIRECT_FETCH(SRVR_INIT_PARAM_Def* initParam) { SRVR_STMT_HDL *pSrvrStmt; SQLRETURN rc = SQL_SUCCESS; short SQLDataInd=0; short SQLDataValueLen; unsigned long Index=0; long cnt = -1; char RequestError[200]; RequestError[0] = '\0'; srvrGlobal = new SRVR_GLOBAL_Def; if (srvrGlobal == NULL) { SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, "N/A", 1, "srvrGlobal"); return -1; } srvrGlobal->srvrType = initParam->srvrType; initSqlCore(); if ((pSrvrStmt = getSrvrStmt("STMT_SYSCATCNT_ON_1", TRUE)) == NULL) { SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, "N/A", 1, SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT); } else { rc = pSrvrStmt->ExecDirect(NULL, initParam->sql, EXTERNAL_STMT, TYPE_SELECT, SQL_ASYNC_ENABLE_OFF, 0); if (rc == SQL_ERROR) { ERROR_DESC_def *p_buffer = pSrvrStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[199] = '\0'; SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, "N/A", 1, RequestError); } else { rc = pSrvrStmt->FetchPerf(100, 0, SQL_ASYNC_ENABLE_OFF, 0); if (rc == SQL_ERROR) { ERROR_DESC_def *p_buffer = pSrvrStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[199] = '\0'; SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, "N/A", 1, RequestError); } else { if (rc != SQL_NO_DATA_FOUND) { Index = 0; while (Index < pSrvrStmt->outputDataValue._length - 1) { SQLDataInd = (short)*(unsigned char*)(pSrvrStmt->outputDataValue._buffer + Index); Index = Index + 1; if (SQLDataInd == 0) { cnt = *(long*)(pSrvrStmt->outputDataValue._buffer + Index); Index = Index + sizeof(SQLDataValueLen); Index = Index + 1; } } } else { sprintf(RequestError, "Get registered system catalog count failed, no data found."); SendEventMsg(MSG_SRVR_ENV, EVENTLOG_INFORMATION_TYPE, 0, ODBCMX_SERVER, "N/A", 1, RequestError); } } } pSrvrStmt->cleanupAll(); pSrvrStmt->currentMethod = odbc_SQLSvc_Close_ldx_; pSrvrStmt->freeResourceOpt = SQL_DROP; FREESTATEMENT(pSrvrStmt); } return (short)cnt; } //LCOV_EXCL_STOP //LCOV_EXCL_START /* * Asynchronous method function for * operation 'odbcas_ASSvc_SrvrMonitorCall' */ #define odbc_SQLSvc_MonitorCall_InvalidConnection_exn_ 2 extern "C" void odbc_SQLSvc_MonitorCall_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId ) { SRVRTRACE_ENTER(FILE_AME+36); odbc_SQLSvc_MonitorCall_exc_ exception_; if (srvrGlobal != NULL && srvrGlobal->srvrType == CORE_SRVR) { if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId != srvrGlobal->dialogueId) exception_.exception_nr = odbc_SQLSvc_MonitorCall_InvalidConnection_exn_; else odbc_SQLSvc_MonitorCall_sme_(objtag_, call_id_, &exception_, dialogueId); } else exception_.exception_nr = odbc_SQLSvc_MonitorCall_InvalidConnection_exn_; } else exception_.exception_nr = odbc_SQLSvc_MonitorCall_InvalidConnection_exn_; odbc_SQLSvc_MonitorCall_ts_res_(objtag_, call_id_, &exception_); SRVRTRACE_EXIT(FILE_AME+36); } extern "C" void odbc_SQLSvc_MonitorCall_sme_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* Out */ odbc_SQLSvc_MonitorCall_exc_ *exception_ , /* In */ DIALOGUE_ID_def dialogueId ) { } bool InsertControls(char* sqlString, odbc_SQLSvc_ExecDirect_exc_ *exception_) { SRVR_STMT_HDL *pSrvrStmt; char ControlType[32]; char StatementName[128 + 1]; char RequestError[200]; // // skip white spaces and check first parameter // char VariableValue[200]; char seps[] = " \t\n"; char *token; char *saveptr; char ControlQuery[5000]; SQLRETURN iqqcode = SQL_SUCCESS; SRVR_STMT_HDL *QryControlSrvrStmt = NULL; long i = 0; long j = 0; long index = 0; char *sqlStringIndex; char ResetQuery[200]; if (sqlString[0] == 0) return false; VariableValue[0] = '\0'; ControlType[0] = '\0'; StatementName[0] = '\0'; strncpy(VariableValue, sqlString, 200); VariableValue[200 - 1]='\0'; token = strtok_r(VariableValue, seps, &saveptr); if (token == NULL) return false; if (strnicmp(token,"PLAN",4) != 0) return false; if ((strnicmp(token,"PLANINSCQD",10) == 0) || (strnicmp(token,"PLANINSCQS",10) == 0)) { strcpy(ControlType, token); token = strtok_r(NULL, seps, &saveptr); if (token == NULL) { exception_->exception_nr = odbc_SQLSvc_ExecDirect_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_PLAN_STMT_LABEL; return true; } else { strcpy(StatementName, token); strupr(StatementName); token = strtok_r(NULL, seps, &saveptr); if (token == NULL) { exception_->exception_nr = odbc_SQLSvc_ExecDirect_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_PLAN_STMT_LABEL; return true; } index = token - VariableValue; sqlStringIndex = sqlString+index; } } else if ((strnicmp(token,"PLANDELCQD",10) == 0) || (strnicmp(token,"PLANDELCQS",10) == 0)) { strcpy(ControlType, token); token = strtok_r(NULL, seps, &saveptr); if (token == NULL) { exception_->exception_nr = odbc_SQLSvc_ExecDirect_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_PLAN_STMT_LABEL; return true; } else { strcpy(StatementName, token); strupr(StatementName); token = strtok_r(NULL, seps, &saveptr); if (token != NULL) { exception_->exception_nr = odbc_SQLSvc_ExecDirect_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_PLAN_STMT_LABEL; return true; } } } else return false; if ((QryControlSrvrStmt = getSrvrStmt("STMT_QRYSTS_ON_1", TRUE)) == NULL) { SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", "Allocate Statement"); exception_->exception_nr = odbc_SQLSvc_ExecDirect_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT; return true; } ResetQuery[0] = '\0'; ResetControls(ResetQuery); if ((strnicmp(ControlType,"PLANINSCQD",10) == 0) || (strnicmp(ControlType,"PLANINSCQS",10) == 0)) { iqqcode = QryControlSrvrStmt->Prepare(sqlStringIndex, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[200] = '\0'; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", RequestError); exception_->exception_nr = odbc_SQLSvc_ExecDirect_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; return true; } ResetQuery[0] = '\0'; ResetControls(ResetQuery); char *in = (char *)sqlStringIndex; short singlequote=0; char *sqlStringOut; while (*in != '\0') { if (*in == '\'') singlequote++; in++; } if(singlequote>1) { sqlStringOut = new char[(strlen(sqlStringIndex)+singlequote)+1]; sqlStringOut[0] = '\0'; char *out = (char *)sqlStringOut; in = (char *)sqlStringIndex; while (*in != '\0') { if (*in == '\'') *out++ = '\''; *out++ = *in++; } *out++ = '\0'; } else { sqlStringOut = new char[strlen(sqlStringIndex)]; sqlStringOut[0] = '\0'; strcpy(sqlStringOut,sqlStringIndex); } if (strnicmp(ControlType,"PLANINSCQD",10) == 0) { ControlQuery[0] = '\0'; sprintf(ControlQuery,"INSERT INTO NEO.PUBLIC_ACCESS_SCHEMA.MXCS_STATEMENT_CONTROLS VALUES (UPSHIFT('%s'), 1, 1, '%s', CURRENT_TIMESTAMP(6))", StatementName, sqlStringOut); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, EXTERNAL_STMT, TYPE_INSERT, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[200] = '\0'; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", RequestError); exception_->exception_nr = odbc_SQLSvc_ExecDirect_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; delete sqlStringOut; return true; } } else if (strnicmp(ControlType,"PLANINSCQS",10) == 0) { ControlQuery[0] = '\0'; sprintf(ControlQuery,"SELECT STATEMENT_NAME FROM NEO.PUBLIC_ACCESS_SCHEMA.MXCS_STATEMENT_CONTROLS WHERE STATEMENT_NAME = UPSHIFT('%s') AND CONTROL_TYPE = 2 FOR BROWSE ACCESS", StatementName); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, EXTERNAL_STMT, TYPE_SELECT, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[200] = '\0'; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", RequestError); exception_->exception_nr = odbc_SQLSvc_ExecDirect_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; return true; } else { iqqcode = QryControlSrvrStmt->FetchPerf(100, 0, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[200] = '\0'; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", RequestError); exception_->exception_nr = odbc_SQLSvc_ExecDirect_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; return true; } else if (iqqcode != SQL_NO_DATA_FOUND) { exception_->exception_nr = odbc_SQLSvc_ExecDirect_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_PLAN_STMT_LABEL_ALREADY_EXISTS; return true; } } i = 0; short k = 1; j = 0; while (i < strlen(sqlStringOut)) { ControlQuery[0] = '\0'; sprintf(ControlQuery,"INSERT INTO NEO.PUBLIC_ACCESS_SCHEMA.MXCS_STATEMENT_CONTROLS VALUES (UPSHIFT('%s'), 2, %d, '%.3800s', CURRENT_TIMESTAMP(6))", StatementName, k, sqlStringOut+i); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, EXTERNAL_STMT, TYPE_INSERT, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[200] = '\0'; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", RequestError); exception_->exception_nr = odbc_SQLSvc_ExecDirect_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; delete sqlStringOut; return true; } i = i + 3800; k++; } } delete sqlStringOut; return true; } else if (strnicmp(ControlType,"PLANDELCQD",10) == 0) { ControlQuery[0] = '\0'; sprintf(ControlQuery,"DELETE FROM NEO.PUBLIC_ACCESS_SCHEMA.MXCS_STATEMENT_CONTROLS WHERE STATEMENT_NAME = UPSHIFT('%s') and CONTROL_TYPE = 1", StatementName); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, EXTERNAL_STMT, TYPE_DELETE, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[200] = '\0'; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", RequestError); exception_->exception_nr = odbc_SQLSvc_ExecDirect_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; return true; } } else if (strnicmp(ControlType,"PLANDELCQS",10) == 0) { ControlQuery[0] = '\0'; sprintf(ControlQuery,"DELETE FROM NEO.PUBLIC_ACCESS_SCHEMA.MXCS_STATEMENT_CONTROLS WHERE STATEMENT_NAME = UPSHIFT('%s') and CONTROL_TYPE = 2", StatementName); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, EXTERNAL_STMT, TYPE_DELETE, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; strncpy(RequestError, p_buffer->errorText,200); RequestError[200] = '\0'; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", RequestError); exception_->exception_nr = odbc_SQLSvc_ExecDirect_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; return true; } } else return false; return true; } //3155 bool LoadControls(char* sqlString, bool genOrexc, char* genRequestError, odbc_SQLSvc_PrepareRowset_exc_ *exception_, SRVR_STMT_HDL **stmtHandle) { SRVR_STMT_HDL *pSrvrStmt; char ControlType[32]; char StatementName[128 + 1]; // // skip white spaces and check first parameter // char VariableValue[200]; char seps[] = " \t\n"; char *token; char *saveptr; char ControlQuery[5000]; SQLRETURN iqqcode = SQL_SUCCESS; SRVR_STMT_HDL *QryControlSrvrStmt = NULL; long i = 0; long j = 0; long Index = 0; char ResetQuery[200]; bool isControlNotPresent = false; if (sqlString[0] == 0) return false; VariableValue[0] = '\0'; ControlType[0] = '\0'; StatementName[0] = '\0'; strncpy(VariableValue, sqlString, 200); VariableValue[200]='\0'; token = strtok_r(VariableValue, seps, &saveptr); if (token == NULL) return false; if (strnicmp(token,"PLANLOADTABLE",13) == 0) return false; if (strnicmp(token,"PLAN",4) != 0) return false; else { strcpy(ControlType, token); token = strtok_r(NULL, seps, &saveptr); if (token == NULL) { if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_PLAN_STMT_LABEL; } strcpy (genRequestError, "Invalid Statement Name."); return true; } else { strcpy(StatementName, token); strupr(StatementName); if (StatementName[strlen(StatementName)-1] == ';') StatementName[strlen(StatementName)-1] = '\0'; token = strtok_r(NULL, seps, &saveptr); if (token != NULL) { if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_PLAN_STMT_LABEL; } else strcpy (genRequestError, "Invalid Statement, Format: PLANLOADSHAPE <stmt name>."); return true; } } } if ((QryControlSrvrStmt = getSrvrStmt("STMT_QRYSTS_ON_1", TRUE)) == NULL) { SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", "Allocate Statement"); if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT; } sprintf(genRequestError, "Allocate Statement STMT_QRYSTS_ON_1 failed."); return true; } //3155 *stmtHandle = QryControlSrvrStmt; if (strnicmp(ControlType,"PLANLOADSHAPE",13) == 0) { unsigned long Index=0; short SQLDataInd=0; short SQLDataValueLen; unsigned long ControlQueryLen; unsigned long TotalSQLDataValueLen=0; ResetQuery[0] = '\0'; ResetControls(ResetQuery); ControlQuery[0] = '\0'; sprintf(ControlQuery,"SELECT CONTROL_TEXT FROM NEO.PUBLIC_ACCESS_SCHEMA.MXCS_STATEMENT_CONTROLS where STATEMENT_NAME = UPSHIFT('%s') and CONTROL_TYPE = 1 FOR BROWSE ACCESS", StatementName); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, EXTERNAL_STMT, TYPE_SELECT, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_SUCCESS) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", genRequestError); if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; } strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; ResetQuery[0] = '\0'; ResetControls(ResetQuery); return true; } else { iqqcode = QryControlSrvrStmt->FetchPerf(100, 0, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", genRequestError); if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; } strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; ResetQuery[0] = '\0'; ResetControls(ResetQuery); return true; } else if (iqqcode != SQL_NO_DATA_FOUND) { ControlQueryLen=QryControlSrvrStmt->outputDataValue._length; Index = 0; while (Index < QryControlSrvrStmt->outputDataValue._length - 1) { ControlQuery[0] = '\0'; SQLDataInd = (short)*(unsigned char*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + 1; if (SQLDataInd == 0) { SQLDataValueLen = 0; SQLDataValueLen = *(short*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + sizeof(SQLDataValueLen); strncat(ControlQuery, (char *)QryControlSrvrStmt->outputDataValue._buffer + Index, SQLDataValueLen); ControlQuery[SQLDataValueLen] = '\0'; Index = Index + SQLDataValueLen + 1; } else { if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT; } strcpy(genRequestError, "Invalid CQD entered."); ResetQuery[0] = '\0'; ResetControls(ResetQuery); return true; } iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", genRequestError); if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; } strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; ResetQuery[0] = '\0'; ResetControls(ResetQuery); return true; } } } else { isControlNotPresent = true; } } ControlQuery[0] = '\0'; sprintf(ControlQuery,"SELECT CONTROL_TEXT FROM NEO.PUBLIC_ACCESS_SCHEMA.MXCS_STATEMENT_CONTROLS where STATEMENT_NAME = UPSHIFT('%s') and CONTROL_TYPE = 2 ORDER BY CONTROL_SEQUENCE FOR BROWSE ACCESS", StatementName); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, EXTERNAL_STMT, TYPE_SELECT, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_SUCCESS) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", genRequestError); if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; } strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; ResetQuery[0] = '\0'; ResetControls(ResetQuery); return true; } else { iqqcode = QryControlSrvrStmt->FetchPerf(100, 0, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", genRequestError); if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; } strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; ResetQuery[0] = '\0'; ResetControls(ResetQuery); return true; } else if (iqqcode != SQL_NO_DATA_FOUND) { ControlQueryLen=QryControlSrvrStmt->outputDataValue._length; char *ControlQueryShape; ControlQueryShape = new char[ControlQueryLen]; ControlQueryShape[0] = '\0'; Index = 0; while (Index < QryControlSrvrStmt->outputDataValue._length - 1) { SQLDataInd = (short)*(unsigned char*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + 1; if (SQLDataInd == 0) { SQLDataValueLen = 0; SQLDataValueLen = *(short*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + sizeof(SQLDataValueLen); strncat(ControlQueryShape+TotalSQLDataValueLen, (char *)QryControlSrvrStmt->outputDataValue._buffer + Index, SQLDataValueLen); Index = Index + SQLDataValueLen + 1; TotalSQLDataValueLen = TotalSQLDataValueLen + SQLDataValueLen; } else { if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT; } strcpy(genRequestError, "Invalid CQS entered."); ResetQuery[0] = '\0'; ResetControls(ResetQuery); if (ControlQueryShape != NULL) delete ControlQueryShape; return true; } } ControlQueryShape[TotalSQLDataValueLen] = '\0'; iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQueryShape, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", "Setting CQDs failed"); if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_SQLError_exn_; exception_->u.SQLError.errorList._length = QryControlSrvrStmt->sqlError.errorList._length; exception_->u.SQLError.errorList._buffer = QryControlSrvrStmt->sqlError.errorList._buffer; } strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; ResetQuery[0] = '\0'; ResetControls(ResetQuery); if (ControlQueryShape != NULL) delete ControlQueryShape; return true; } if (ControlQueryShape != NULL) delete ControlQueryShape; } else { if(isControlNotPresent) { if (genOrexc) { exception_->exception_nr = odbc_SQLSvc_PrepareRowset_ParamError_exn_; exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_PLAN_STMT_LABEL; } strcpy (genRequestError, "Invalid Statement Name."); ResetQuery[0] = '\0'; ResetControls(ResetQuery); return true; } } } } else return false; return true; } bool ResetControls(char* genRequestError) { SRVR_STMT_HDL *QryControlSrvrStmt = NULL; char ControlQuery[200]; SQLRETURN iqqcode = SQL_SUCCESS; if ((QryControlSrvrStmt = getSrvrStmt("STMT_QRYRES_ON_1", TRUE)) == NULL) { SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", "Allocate Statement"); sprintf(genRequestError, "Allocate Statement STMT_QRYRES_ON_1 failed."); return true; } ControlQuery[0] = '\0'; sprintf(ControlQuery,"control query shape cut"); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", genRequestError); strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; return true; } ControlQuery[0] = '\0'; sprintf(ControlQuery,"control query default * reset"); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "LOCK_PLAN", genRequestError); strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; return true; } return true; } bool GetHashInfo(char* sqlString, char* genRequestError, char* HashTableInfo) { SRVR_STMT_HDL *pSrvrStmt; char ControlType[32]; char StatementName[128 + 1]; // // skip white spaces and check first parameter // char VariableValue[200]; char seps[] = " \t\n"; char seps2[] = " \t\n.;"; char *token; char ControlQuery[1024]; SQLRETURN iqqcode = SQL_SUCCESS; SRVR_STMT_HDL *QryControlSrvrStmt = NULL; long i = 0; long Index = 0; char *catToken, *schemaToken, *tablenameToken; SQL_IDENTIFIER_def tableName; bool syskeysPresent = false; bool resetNestedJoinsCQD = false; if (sqlString[0] == 0) return false; trim(sqlString); VariableValue[0] = '\0'; ControlType[0] = '\0'; StatementName[0] = '\0'; strncpy(VariableValue, sqlString, 200); VariableValue[200] = '\0'; char* saveptr; token = strtok_r(VariableValue, seps, &saveptr); if (token == NULL) return false; if (strnicmp(token,"PLANLOADTABLE",13) != 0) return false; else strcpy(ControlType, token); catToken = strtok_r(NULL, seps2, &saveptr); if (catToken == NULL) { strcpy (genRequestError, "Invalid Table Name."); return true; } else { schemaToken = strtok_r(NULL, seps2, &saveptr); if(schemaToken == NULL) { // format is PLANLOADTABLE <tableName> // Use default catalog and schema tablenameToken = catToken; catToken = srvrGlobal->DefaultCatalog; schemaToken = srvrGlobal->DefaultSchema; } else { tablenameToken = strtok_r(NULL, seps2, &saveptr); if(tablenameToken == NULL) { // format is PLANLOADTABLE <schema.tableName> // Use default catalog tablenameToken = schemaToken; schemaToken = catToken; catToken = srvrGlobal->DefaultCatalog; } } } if((tablenameToken[0] == '\"') && (tablenameToken[strlen(tablenameToken) -1] == '\"')) { strncpy(tableName,tablenameToken +1,strlen(tablenameToken) -2); tableName[strlen(tablenameToken) -2] = '\0'; } else strcpy(tableName,tablenameToken); if (strnicmp(ControlType,"PLANLOADTABLE",13) == 0) { unsigned long Index=0; short SQLDataInd=0; short SQLDataValueLen; unsigned long ControlQueryLen = 0; HashTableInfo[0] = '\0'; if ((QryControlSrvrStmt = getSrvrStmt("STMT_QRYSTS_ON_1", TRUE)) == NULL) { SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "TABLE_INFO", "Allocate Statement"); sprintf(genRequestError, "Allocate Statement STMT_QRYSTS_ON_1 failed."); return true; } // Some data sources may have the nested_joins CQD off. This can lead to poor // performance of the meta data queries that we're going to make here. So we'll // check if this CQD is OFF, and if it is, we'll turn it ON temporarily if( getSQLInfo(NESTED_JOINS) == false ) { strcpy(ControlQuery,"CONTROL QUERY DEFAULT nested_joins 'ON';"); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_SUCCESS) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; if(p_buffer != NULL) { strncpy(genRequestError, p_buffer->errorText,199); genRequestError[199] = '\0'; } else strcpy(genRequestError,"Unable to set nested_join CQD"); SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "TABLE_INFO", genRequestError); return true; } resetNestedJoinsCQD = true; } //For getting the reference of schema version char verBuffer[10]; verBuffer[0] = '\0'; for(i = 0; i < 7; i++) { Index = 0; SQLDataInd = 0; SQLDataValueLen = 0; ControlQuery[0] = '\0'; switch(i) { case 0: sprintf(ControlQuery,"select cast(max(sc.schema_version) as varchar(10) character set ISO88591) from %s.SYSTEM_SCHEMA.CATSYS cs, %s.SYSTEM_SCHEMA.SCHEMATA sc where cs.cat_name = '%s' and (sc.schema_name = '%s' or sc.schema_name = 'SYSTEM_SCHEMA') and cs.cat_uid = sc.cat_uid FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, srvrGlobal->SystemCatalog, catToken,schemaToken); break; case 1: HashTableInfo[0] = '\0'; sprintf(ControlQuery,"select cast((case ac.PARTITIONING_SCHEME when 'HA' then '1' when 'H2' then '2' else '0' end) as varchar(10) character set ISO88591) from %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.ACCESS_PATHS ac where sc.SCHEMA_VERSION = %d and sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = ac.ACCESS_PATH_UID and ob.OBJECT_TYPE = 'BT' FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, atol(verBuffer), schemaToken, tableName); strcpy(HashTableInfo+ControlQueryLen, "HT="); // HT means PARTITION TYPE HASH, HASH2 or RANGE. ControlQueryLen = ControlQueryLen + 3; break; case 2: sprintf(ControlQuery,"select cast(count(*) as varchar(10) character set ISO88591) from %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.PARTITIONS pt where sc.SCHEMA_VERSION = %d and sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = pt.OBJECT_UID and ob.OBJECT_TYPE = 'BT' FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, atol(verBuffer), schemaToken, tableName); strcpy(HashTableInfo+ControlQueryLen, ";HP="); // HP means Number of PARTITION in the TABLE. ControlQueryLen = ControlQueryLen + 4; break; //case 3: // sprintf(ControlQuery,"select cast(concat(concat(concat(SUBSTRING(pt.SYSTEM_NAME FROM 7 FOR 2), ','), pt.FIRST_KEY),',') as varchar(10)) from %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.DEFINITION_SCHEMA_VERSION_%s.OBJECTS ob, NEO.DEFINITION_SCHEMA_VERSION_%s.PARTITIONS pt, NEO.DEFINITION_SCHEMA_VERSION_%s.ACCESS_PATHS ac where sc.SCHEMA_VERSION = %d and sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = pt.OBJECT_UID and ob.OBJECT_UID = ac.ACCESS_PATH_UID and pt.OBJECT_UID = ac.ACCESS_PATH_UID and ob.OBJECT_TYPE = 'BT' ORDER BY pt.SYSTEM_NAME, pt.DATA_SOURCE, pt.FIRST_KEY FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, verBuffer, verBuffer, verBuffer, atol(verBuffer), schemaToken, tableName); // strcpy(HashTableInfo+ControlQueryLen, ";HI="); // HI means combination of Segment Number, PARTITION Number in the TABLE. // ControlQueryLen = ControlQueryLen + 4; // break; case 3: sprintf(ControlQuery,"select cast(count(*) as varchar(10) character set ISO88591) from %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.ACCESS_PATH_COLS ac where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and ac.PART_KEY_SEQ_NUM > 0 and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = ac.ACCESS_PATH_UID FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, schemaToken, tableName); strcpy(HashTableInfo+ControlQueryLen, ";HN="); // HN means HASH COLUMNS in the TABLE. ControlQueryLen = ControlQueryLen + 4; break; case 4: // to check if there are any syskeys sprintf(ControlQuery,"select ac.SYSTEM_ADDED_COLUMN from %s.SYSTEM_SCHEMA.SCHEMATA sc,NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.ACCESS_PATH_COLS ac where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and ac.SYSTEM_ADDED_COLUMN = 'Y' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = ac.ACCESS_PATH_UID FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, schemaToken, tableName); break; case 5: if(syskeysPresent) sprintf(ControlQuery,"select concat(cast(ac.COLUMN_NUMBER as varchar(10) character set ISO88591),',') from %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.ACCESS_PATH_COLS ac where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and ac.PART_KEY_SEQ_NUM > 0 and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = ac.ACCESS_PATH_UID ORDER BY ac.POSITION_IN_ROW FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, schemaToken, tableName); else sprintf(ControlQuery,"select concat(cast(ac.COLUMN_NUMBER+1 as varchar(10) character set ISO88591),',') from %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.ACCESS_PATH_COLS ac where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and ac.PART_KEY_SEQ_NUM > 0 and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = ac.ACCESS_PATH_UID ORDER BY ac.POSITION_IN_ROW FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, schemaToken, tableName); strcpy(HashTableInfo+ControlQueryLen, ";HC="); // HC means HASH COLUMNS in the TABLE. ControlQueryLen = ControlQueryLen + 4; break; case 6: sprintf(ControlQuery,"select cast(cast((52 * 1024 * 128) / (sum(co.column_size)) as integer) as varchar(10) character set ISO88591) from %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.COLS co where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = co.OBJECT_UID and ob.OBJECT_TYPE = 'BT' FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, verBuffer, verBuffer, atol(verBuffer), schemaToken, tableName); strcpy(HashTableInfo+ControlQueryLen, ";HE="); // HE means Guesstimated rowset size. Change 128 to HP soon. ControlQueryLen = ControlQueryLen + 4; break; default: break; } iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, EXTERNAL_STMT, TYPE_SELECT, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_SUCCESS) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "TABLE_INFO", genRequestError); strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; break; //return true; } else { iqqcode = QryControlSrvrStmt->FetchPerf(512, 0, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "TABLE_INFO", genRequestError); strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; break; //return true; } else if (iqqcode != SQL_NO_DATA_FOUND) { if(i == 4) { syskeysPresent = true; continue; } Index = 0; while (Index < QryControlSrvrStmt->outputDataValue._length - 1) { SQLDataInd = (short)*(unsigned char*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + 1; if (SQLDataInd == 0) { SQLDataValueLen = 0; SQLDataValueLen = *(short*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + sizeof(SQLDataValueLen); if ( i == 0 ) { strncat(HashTableInfo, (char *)QryControlSrvrStmt->outputDataValue._buffer + Index, SQLDataValueLen); strcpy(verBuffer,HashTableInfo); Index = Index + SQLDataValueLen + 1; //For getting the reference of schema version } else { strncat(HashTableInfo+ControlQueryLen, (char *)QryControlSrvrStmt->outputDataValue._buffer + Index, SQLDataValueLen); Index = Index + SQLDataValueLen + 1; ControlQueryLen = ControlQueryLen + SQLDataValueLen; } } else { strcpy(genRequestError, "Invalid Table Info."); break; //return true; } } } //For getting the reference of schema version if ((i == 0) && ( (atol(verBuffer) != 1200) && (atol(verBuffer) != 2000) && (atol(verBuffer) != 2300) && (atol(verBuffer) != 2400) )) {strcpy(verBuffer,"2400");} } } HashTableInfo[ControlQueryLen] = '\0'; } else return false; if( resetNestedJoinsCQD ) { strcpy(ControlQuery,"CONTROL QUERY DEFAULT nested_joins 'OFF';"); iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_SUCCESS) { ERROR_DESC_def *p_buffer = QryControlSrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SRVR_POST_CONNECT_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 2, "TABLE_INFO", genRequestError); strncpy(genRequestError, p_buffer->errorText,200); genRequestError[199] = '\0'; } } return true; } // GetHashInfo() //LCOV_EXCL_STOP // Function to get specific information from SQL // The supported options right now are: // 1 - Explain plan for a query. Also writes it to a QS config table. // 2 - Gets the mode value set in the system defaults table. // Returns TRUE if mode is set FALSE otherwise bool getSQLInfo(E_GetSQLInfoType option, long stmtHandle, char *stmtLabel ) { #ifdef PERF_TEST perf->clockIt("getSQLInfo_START", true); #endif SRVR_STMT_HDL *pSrvrStmt = NULL; char sqlQuery[300]; SQLRETURN iqqcode = SQL_SUCCESS; SRVR_STMT_HDL *QrySrvrStmt = NULL; long i = 0; long j = 0; long Index = 0; long sqlStrLen = 0; short SQLDataInd=0; short SQLDataValueLen; unsigned long TotalSQLDataValueLen=0; bool returnVal = false, freeMem = true; char *QueryOutput = NULL; int explainDataLen = 50000; // start with 50K bytes int retExplainLen = 0; char *explainData = NULL; if ((QrySrvrStmt = getSrvrStmt("STMT_QRYSTS_ON_1", TRUE)) == NULL) return false; sqlQuery[0] = '\0'; bool testNewExplain = false; if(option == EXPLAIN_PLAN && testNewExplain) { if (stmtHandle != NULL) pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; else if (stmtLabel != NULL) pSrvrStmt = getSrvrStmt(stmtLabel, FALSE); if (pSrvrStmt == NULL || pSrvrStmt->sqlUniqueQueryID == NULL || pSrvrStmt->sqlUniqueQueryID[0] == '\0' ) return false; // If the current WMS service context does not need plan then // don't collect it. if (FALSE == srvrGlobal->sqlPlan) return true; if (pSrvrStmt->exPlan == SRVR_STMT_HDL::COLLECTED) return true; if (QrySrvrStmt->sqlString != NULL) { delete QrySrvrStmt->sqlString; QrySrvrStmt->sqlString = new char[256]; } if (QrySrvrStmt->sqlString == NULL) return false; sprintf(QrySrvrStmt->sqlString, "EXPLAIN %s", pSrvrStmt->stmtName); QrySrvrStmt->sqlStringLen = strlen(QrySrvrStmt->sqlString); QrySrvrStmt->sqlStmtType = (short)TYPE_SELECT; QrySrvrStmt->maxRowsetSize = 1; QrySrvrStmt->inputRowCnt = 1; QrySrvrStmt->currentMethod = odbc_SQLSvc_PrepareRowset_ldx_; iqqcode = PREPARE2(QrySrvrStmt); if (iqqcode != SQL_ERROR) { QrySrvrStmt->cursorNameLen = 0; QrySrvrStmt->cursorName[0] = '\0'; pSrvrStmt->currentMethod = odbc_SQLSvc_ExecuteN_ldx_; iqqcode = EXECUTE2(QrySrvrStmt); if (iqqcode != SQL_ERROR) { QrySrvrStmt->maxRowCnt = srvrGlobal->m_FetchBufferSize/QrySrvrStmt->outputDescVarBufferLen; QrySrvrStmt->maxRowLen = QrySrvrStmt->outputDescVarBufferLen; if (QrySrvrStmt->outputDataValue._length > 0) { if (QrySrvrStmt->outputDataValue._buffer != NULL) delete QrySrvrStmt->outputDataValue._buffer; QrySrvrStmt->outputDataValue._buffer = NULL; QrySrvrStmt->outputDataValue._length = 0; } QrySrvrStmt->currentMethod = odbc_SQLSvc_FetchPerf_ldx_; iqqcode = FETCH2bulk(QrySrvrStmt); if (iqqcode != SQL_ERROR) { returnVal = true; freeMem = false; pSrvrStmt->exPlan = SRVR_STMT_HDL::COLLECTED; } } } if (QrySrvrStmt->sqlWarningOrErrorLength > 0) { if (QrySrvrStmt->sqlWarningOrError != NULL) delete QrySrvrStmt->sqlWarningOrError; QrySrvrStmt->sqlWarningOrErrorLength = 0; QrySrvrStmt->sqlWarningOrError = NULL; } } else { switch( option ) { case EXPLAIN_PLAN: // Explain if (FALSE == srvrGlobal->sqlPlan) return true; if (stmtHandle != NULL) pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; else if (stmtLabel != NULL) pSrvrStmt = getSrvrStmt(stmtLabel, FALSE); if (pSrvrStmt == NULL || pSrvrStmt->sqlUniqueQueryID == NULL || pSrvrStmt->sqlUniqueQueryID[0] == '\0' ) return false; if (pSrvrStmt->exPlan == SRVR_STMT_HDL::COLLECTED) return true; // Ignore plan collection of unique queries and ones with no stats for performance reasons if (pSrvrStmt->sqlNewQueryType == SQL_SELECT_UNIQUE || pSrvrStmt->sqlNewQueryType == SQL_INSERT_UNIQUE || pSrvrStmt->sqlNewQueryType == SQL_UPDATE_UNIQUE || pSrvrStmt->sqlNewQueryType == SQL_DELETE_UNIQUE || (pSrvrStmt->comp_stats_info.statsCollectionType == SQLCLI_NO_STATS && pSrvrStmt->comp_stats_info.compilationStats.compilerId[0] != 0)) return true; // allocate explainDataLen bytes of explainData space explainData = new char[explainDataLen]; if (explainData == NULL) { char errStr[128]; sprintf( errStr, "Packed explain for %d bytes", explainDataLen ); SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, errStr); return false; } iqqcode = SQL_EXEC_GetExplainData(&(pSrvrStmt->stmt), explainData, explainDataLen, &retExplainLen); if (iqqcode == -CLI_GENCODE_BUFFER_TOO_SMALL) { explainDataLen = retExplainLen; // allocate explainDataLen bytes of explainData space if (explainData) delete explainData; explainData = new char[explainDataLen]; if (explainData == NULL) { char errStr[128]; sprintf( errStr, "Packed explain for %d bytes", explainDataLen ); SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, errStr); return false; } iqqcode = SQL_EXEC_GetExplainData(&(pSrvrStmt->stmt), explainData, explainDataLen, &retExplainLen); } else if (iqqcode == -EXE_NO_EXPLAIN_INFO) { retExplainLen = 0; if (explainData) delete explainData; explainData = 0; } else if (iqqcode < 0) { char errStr[256]; sprintf( errStr, "Error retrieving packed explain. SQL_EXEC_GetExplainData() returned: %d", iqqcode ); SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, errStr); delete explainData; return false; } if (pSrvrStmt->sqlPlan != NULL) { delete pSrvrStmt->sqlPlan; pSrvrStmt->sqlPlan = NULL; } pSrvrStmt->sqlPlan = explainData; pSrvrStmt->sqlPlanLen = retExplainLen; pSrvrStmt->exPlan = SRVR_STMT_HDL::COLLECTED; return true; break; case MODE_SPECIAL_1: sprintf(sqlQuery,"control query default showcontrol_show_all 'ON'"); iqqcode = QrySrvrStmt->ExecDirect(NULL, sqlQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_SUCCESS) return false; sprintf(sqlQuery,"showcontrol default mode_special_1, match full, no header"); sqlStrLen = 0; break; case NESTED_JOINS: sprintf(sqlQuery,"control query default showcontrol_show_all 'ON'"); iqqcode = QrySrvrStmt->ExecDirect(NULL, sqlQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_SUCCESS) return false; sprintf(sqlQuery,"showcontrol default NESTED_JOINS, match full, no header"); sqlStrLen = 0; break; case USER_ROLE: sprintf(sqlQuery,"values(current_role)"); sqlStrLen = 0; break; case SCHEMA_DEFAULT: sprintf(sqlQuery,"SHOWCONTROL DEFAULT SCHEMA, match full, no header"); sqlStrLen = 0; break; case DEFAULT_SCHEMA_ACCESS_ONLY: sprintf(sqlQuery,"control query default showcontrol_show_all 'ON'"); iqqcode = QrySrvrStmt->ExecDirect(NULL, sqlQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_SUCCESS) return false; sprintf(sqlQuery,"showcontrol default DEFAULT_SCHEMA_ACCESS_ONLY, match full, no header"); sqlStrLen = 0; break; default: return false; } iqqcode = QrySrvrStmt->ExecDirect(NULL, sqlQuery, EXTERNAL_STMT, TYPE_SELECT, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_ERROR) { iqqcode = QrySrvrStmt->FetchPerf(100, 0, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_NO_DATA_FOUND && iqqcode != SQL_ERROR) { unsigned long QueryLen; QueryLen = QrySrvrStmt->outputDataValue._length + 512; QueryOutput = new char[QueryLen]; if( !QueryOutput ) { returnVal = false; goto Handle_Return; } QueryOutput[0] = '\0'; Index = 0; while (Index < QrySrvrStmt->outputDataValue._length - 1) { SQLDataInd = (short)*(unsigned char*)(QrySrvrStmt->outputDataValue._buffer + Index); Index = Index + 1; if (SQLDataInd == 0) { SQLDataValueLen = 0; SQLDataValueLen = *(short*)(QrySrvrStmt->outputDataValue._buffer + Index); Index = Index + sizeof(SQLDataValueLen); if( TotalSQLDataValueLen+SQLDataValueLen+1 > QueryLen ) break; strncat(QueryOutput+TotalSQLDataValueLen, (const char *)QrySrvrStmt->outputDataValue._buffer + Index, SQLDataValueLen); Index = Index + SQLDataValueLen + 1; TotalSQLDataValueLen = TotalSQLDataValueLen + SQLDataValueLen; strncat(QueryOutput+TotalSQLDataValueLen, "\n", 1); TotalSQLDataValueLen = TotalSQLDataValueLen + 1; } else break; } if( SQLDataInd == 0 ) { QueryOutput[TotalSQLDataValueLen] = '\0'; if( option == MODE_SPECIAL_1 && strnicmp( QueryOutput, "ON", 2 ) == 0 ) { returnVal = true; } else if( option == NESTED_JOINS && strnicmp( QueryOutput, "ON", 2 ) == 0 ) { returnVal = true; } else if ( option == USER_ROLE ) { //LCOV_EXCL_START char *rolename; bzero(srvrGlobal->QSRoleName, sizeof(srvrGlobal->QSRoleName)); bzero(srvrGlobal->RoleName, sizeof(srvrGlobal->RoleName)); QueryOutput[SQLDataValueLen] = '\0'; strcpy(srvrGlobal->QSRoleName, QueryOutput); // Output is always in the form <max 8 chars>.<max 8 chars> rolename = (char *)memchr(QueryOutput, '.', 8 + 1); // Extract the role name if output starts with "ROLE" if (rolename != NULL && (memcmp(QueryOutput, ROLE_PREFIX, ROLE_PREFIX_LEN) == 0)) { strcpy(srvrGlobal->RoleName, rolename + 1); } else { strcpy(srvrGlobal->RoleName, QueryOutput); } // SQ TBD: Roles are strcpy(srvrGlobal->QSRoleName,"SUPER.SERVICES"); strcpy(srvrGlobal->RoleName,"SUPER.SERVICES"); returnVal = true; //LCOV_EXCL_STOP } else if ( option == SCHEMA_DEFAULT ) { if (SQLDataValueLen > 0) { QueryOutput[SQLDataValueLen] = '\0'; strncpy(savedDefaultSchema,QueryOutput,sizeof(savedDefaultSchema)); savedDefaultSchema[sizeof(savedDefaultSchema) -1] = '\0'; returnVal = true; } else // Default Schema is empty { strcpy(savedDefaultSchema,ODBCMX_DEFAULT_SCHEMA); SendEventMsg(MSG_SQL_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 3, ODBCMX_SERVER, "HY000", "Default Schema is empty."); } } else if( option == DEFAULT_SCHEMA_ACCESS_ONLY ){ if( strnicmp( QueryOutput, "ON", 2 ) == 0 ) srvrGlobal->defaultSchemaAccessOnly = true; else srvrGlobal->defaultSchemaAccessOnly = false; returnVal = true; } } } else // FetchPerf returned error { //LCOV_EXCL_START if(iqqcode == SQL_ERROR) { GETSQLERROR(QrySrvrStmt->bSQLMessageSet, &QrySrvrStmt->sqlError); ERROR_DESC_def *sqlError = QrySrvrStmt->sqlError.errorList._buffer; SendEventMsg(MSG_SQL_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 3, ODBCMX_SERVER, sqlError->sqlstate, sqlError->errorText); } //LCOV_EXCL_STOP } } else // ExecDirect returned error { //LCOV_EXCL_START char errorBuf[512]; int numSQLErrors = QrySrvrStmt->sqlError.errorList._length; ERROR_DESC_def *sqlError = QrySrvrStmt->sqlError.errorList._buffer; if(numSQLErrors > 0) { if(sqlError->errorText) snprintf(errorBuf,sizeof(errorBuf),"Error executing %s : sqlcode=%d,sqlerror=%s",QrySrvrStmt->sqlString,sqlError->sqlcode,sqlError->errorText); else snprintf(errorBuf,sizeof(errorBuf),"Error executing %s : sqlcode=%d",QrySrvrStmt->sqlString,sqlError->sqlcode); } else snprintf(errorBuf,sizeof(errorBuf),"Error executing %s, no SQL diagnostics available ",QrySrvrStmt->sqlString); SendEventMsg(MSG_SQL_ERROR, EVENTLOG_ERROR_TYPE, srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 3, ODBCMX_SERVER, sqlError->sqlstate,errorBuf); //LCOV_EXCL_STOP } } Handle_Return: if (option == MODE_SPECIAL_1 || option == NESTED_JOINS || option == DEFAULT_SCHEMA_ACCESS_ONLY) { sprintf(sqlQuery,"control query default showcontrol_show_all 'OFF'"); QrySrvrStmt->ExecDirect(NULL, sqlQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0); } if (QueryOutput != NULL && freeMem) { delete [] QueryOutput; QueryOutput = NULL; } #ifdef PERF_TEST perf->clockIt("getSQLInfo_END", true); #endif return returnVal; } bool ChkWSvcCommands(char* wsname, int& retcode, long type) { SRVR_STMT_HDL *QryControlSrvrStmt; char ControlQuery[200]; SQLRETURN iqqcode = SQL_SUCCESS; retcode = -1; short length; char buffer[10]; char* service_id = wsname; unsigned long Index=0; short SQLDataInd=0; short SQLDataValueLen; unsigned long ConfigQueryLen; unsigned long TotalSQLDataValueLen=0; if ((QryControlSrvrStmt = getSrvrStmt("STMT_QRYSTS_ON_1", FALSE)) != NULL) QryControlSrvrStmt->Close(SQL_DROP); if ((QryControlSrvrStmt = getSrvrStmt("STMT_QRYSTS_ON_1", TRUE)) == NULL) return false; ControlQuery[0] = '\0'; switch (type) { case CHECK_SERVICE: sprintf(ControlQuery,"select service_id from NEO.NWMS_SCHEMA.SERVICES where service_name = \'%s\' for browse access", wsname); break; case CHECK_SERVICEMAX: sprintf(ControlQuery,"select MAX(service_id) from NEO.NWMS_SCHEMA.SERVICES for browse access"); break; case CHECK_SERVICEPRTY: sprintf(ControlQuery,"select service_priority from NEO.NWMS_SCHEMA.SERVICES where service_name = \'%s\' for browse access", wsname); break; // case CHECK_MAXQUERIES_TOTAL: // sprintf(ControlQuery,"select cast(sum(cast(limit_value as integer)) as integer) from NEO.NWMS_SCHEMA.THRESHOLDS where threshold_type in (0,1) for browse access"); // break; case CHECK_MAXQUERIES_OTHERS: sprintf(ControlQuery,"select cast(sum(cast(limit_value as integer)) as integer) from NEO.NWMS_SCHEMA.THRESHOLDS where threshold_type in (0,1) and service_id <> %s for browse access", service_id); break; case CHECK_QUERIES_WAITING: sprintf(ControlQuery,"select limit_value from NEO.NWMS_SCHEMA.THRESHOLDS where threshold_type = 1 and service_id = %s for browse access", service_id); break; case CHECK_QUERIES_EXECUTING: sprintf(ControlQuery,"select limit_value from NEO.NWMS_SCHEMA.THRESHOLDS where threshold_type = 0 and service_id = %s for browse access", service_id); break; default: return false; } iqqcode = QryControlSrvrStmt->ExecDirect(NULL, ControlQuery, EXTERNAL_STMT, TYPE_SELECT, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_SUCCESS) { return false; } else { iqqcode = QryControlSrvrStmt->FetchPerf(100, 0, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode == SQL_ERROR) { return false; } else if (iqqcode == SQL_NO_DATA_FOUND) { retcode = -1; return true; } else { switch (type) { case CHECK_SERVICE: Index = 0; if (Index < QryControlSrvrStmt->outputDataValue._length - 1) { ControlQuery[0] = '\0'; SQLDataInd = (short)*(unsigned char*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + 1; if (SQLDataInd == 0) { retcode = *(long*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + sizeof(SQLDataValueLen); Index = Index + 1; } } return true; case CHECK_SERVICEMAX: Index = 0; if (Index < QryControlSrvrStmt->outputDataValue._length - 1) { ControlQuery[0] = '\0'; SQLDataInd = (short)*(unsigned char*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + 1; if (SQLDataInd == 0) { retcode = *(long*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + sizeof(SQLDataValueLen); Index = Index + 1; } } return true; case CHECK_SERVICEPRTY: Index = 0; if (Index < QryControlSrvrStmt->outputDataValue._length - 1) { ControlQuery[0] = '\0'; SQLDataInd = (short)*(unsigned char*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + 1; if (SQLDataInd == 0) { retcode = (long)*(short*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + sizeof(SQLDataValueLen); Index = Index + 1; } } return true; case CHECK_MAXQUERIES_OTHERS: Index = 0; if (Index < QryControlSrvrStmt->outputDataValue._length - 1) { ControlQuery[0] = '\0'; SQLDataInd = (short)*(unsigned char*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + 1; if (SQLDataInd == 0) { retcode = *(long*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + sizeof(SQLDataValueLen); Index = Index + 1; } } return true; case CHECK_QUERIES_WAITING: case CHECK_QUERIES_EXECUTING: Index = 0; if (Index < QryControlSrvrStmt->outputDataValue._length - 1) { ControlQuery[0] = '\0'; SQLDataInd = (short)*(unsigned char*)(QryControlSrvrStmt->outputDataValue._buffer + Index); Index = Index + 1; if (SQLDataInd == 0) { memcpy(&length, (unsigned char*)(QryControlSrvrStmt->outputDataValue._buffer + Index), 2); Index += 2; memcpy(buffer, (unsigned char*)(QryControlSrvrStmt->outputDataValue._buffer + Index), length); buffer[Index] = 0; retcode = atol(buffer); } } return true; default: return false; } } } } /* * New wire protocol method for Prepare */ extern "C" void odbc_SQLSrvr_Prepare_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ IDL_long sqlAsyncEnable , /* In */ IDL_long queryTimeout , /* In */ IDL_short stmtType , /* In */ IDL_long sqlStmtType , /* In */ IDL_long stmtLength , /* In */ const IDL_char *stmtLabel , /* In */ IDL_long stmtLabelCharset , /* In */ IDL_long cursorLength , /* In */ IDL_string cursorName , /* In */ IDL_long cursorCharset , /* In */ IDL_long moduleNameLength , /* In */ const IDL_char *moduleName , /* In */ IDL_long moduleCharset , /* In */ IDL_long_long moduleTimestamp , /* In */ IDL_long sqlStringLength , /* In */ IDL_string sqlString , /* In */ IDL_long sqlStringCharset , /* In */ IDL_long setStmtOptionsLength , /* In */ IDL_string setStmtOptions , /* In */ IDL_long stmtExplainLabelLength , /* In */ IDL_string stmtExplainLabel , /* In */ IDL_long maxRowsetSize , /* In */ IDL_long_long txnID // T4 driver sends a transaction ID which we need to join , /* In */ IDL_short *extTransId // T4 driver sends a transaction ID which we need to join , /* In */ IDL_long holdableCursor ) { #ifdef PERF_TEST perf->init(); perf->clockIt("SQLSrvr_Prepare_ame_START", true); #endif SRVRTRACE_ENTER(FILE_AME+19); IDL_long returnCode = SQL_SUCCESS; IDL_long sqlWarningOrErrorLength = 0; BYTE *sqlWarningOrError = NULL; IDL_long sqlQueryType = 0; Long stmtHandle = 0; IDL_long estimatedCost = 0; IDL_long inputParamsLength = 0; IDL_long inputDescLength = 0; BYTE *inputDesc = NULL; IDL_long outputColumnsLength = 0; IDL_long outputDescLength = 0; BYTE *outputDesc = NULL; RETCODE rc = 0; char errorBuffer[512]; // a buffer for formatting error messages IDL_boolean bPrepareWithRowsets = IDL_FALSE; if (maxRowsetSize > 1) { bPrepareWithRowsets = IDL_TRUE; } if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TracePrepare2Enter(dialogueId, sqlAsyncEnable, queryTimeout, maxRowsetSize, sqlStmtType, stmtLength, stmtLabel, stmtLabelCharset, cursorLength, cursorName, cursorCharset, moduleNameLength, moduleName, moduleCharset, moduleTimestamp, sqlStringLength, sqlString, sqlStringCharset, setStmtOptionsLength, setStmtOptions, txnID, holdableCursor); } DESTROY_CONN_IDLE_TIMER if (srvrGlobal != NULL && srvrGlobal->srvrType == CORE_SRVR) { if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId != srvrGlobal->dialogueId) { //LCOV_EXCL_START returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); //LCOV_EXCL_STOP } else { bool rcPlan = false; char genRequestError[200] = {0}; odbc_SQLSvc_PrepareRowset_exc_ exception_={0,0,0}; if (sqlStmtType == TYPE_UNKNOWN) { rcPlan = LoadControls(sqlString, false, genRequestError, &exception_, (SRVR_STMT_HDL **)&stmtHandle); //3155 if (rcPlan) { if (genRequestError[0] != 0) { returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", genRequestError, &sqlWarningOrErrorLength, sqlWarningOrError); } else srvrGlobal->isShapeLoaded = true; } if (rcPlan == false) { char HashTableInfo[1024] = {0}; rcPlan = GetHashInfo(sqlString, genRequestError, HashTableInfo); //LCOV_EXCL_START if (rcPlan) { if (genRequestError[0] != 0) { returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", genRequestError, &sqlWarningOrErrorLength, sqlWarningOrError); } if (HashTableInfo[0] != 0) { returnCode = SQL_SUCCESS_WITH_INFO; GETMXCSWARNINGORERROR(-1, "01000", HashTableInfo, &sqlWarningOrErrorLength, sqlWarningOrError); sqlQueryType = 10001; } } //LCOV_EXCL_STOP } } if (rcPlan == false) { if(bPrepareWithRowsets) { odbc_SQLSvc_Prepare2withRowsets_sme_(objtag_, call_id_, dialogueId, sqlAsyncEnable, queryTimeout, maxRowsetSize, sqlStmtType, stmtLength, stmtLabel, stmtLabelCharset, cursorLength, cursorName, cursorCharset, moduleNameLength, moduleName, moduleCharset, moduleTimestamp, sqlStringLength, sqlString, sqlStringCharset, setStmtOptionsLength, setStmtOptions, holdableCursor, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &sqlQueryType, &stmtHandle, &estimatedCost, &inputDescLength, inputDesc, &outputDescLength, outputDesc); } else { odbc_SQLSvc_Prepare2_sme_(maxRowsetSize, sqlStmtType, stmtLabel, sqlString, holdableCursor, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &sqlQueryType, &stmtHandle, &estimatedCost, &inputDescLength, inputDesc, &outputDescLength, outputDesc); //odbc_SQLSvc_Prepare2_sme_(objtag_, call_id_, dialogueId, sqlAsyncEnable, queryTimeout, // maxRowsetSize, sqlStmtType, stmtLength, stmtLabel, stmtLabelCharset, cursorLength, cursorName, // cursorCharset, moduleNameLength, moduleName, moduleCharset, moduleTimestamp, // sqlStringLength, sqlString, sqlStringCharset, setStmtOptionsLength, setStmtOptions, // &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &sqlQueryType, // &stmtHandle, &estimatedCost, &inputDescLength, inputDesc, // &outputDescLength, outputDesc); } if (srvrGlobal->isShapeLoaded == true) { //LCOV_EXCL_START rcPlan = ResetControls(genRequestError); if (rcPlan) { if (genRequestError[0] != 0) { returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", genRequestError, &sqlWarningOrErrorLength, sqlWarningOrError); } else srvrGlobal->isShapeLoaded = false; } //LCOV_EXCL_STOP } } } } else { //LCOV_EXCL_START returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); //LCOV_EXCL_STOP } } else { if(bPrepareWithRowsets) { odbc_SQLSvc_Prepare2withRowsets_sme_(objtag_, call_id_, dialogueId, sqlAsyncEnable, queryTimeout, maxRowsetSize, sqlStmtType, stmtLength, stmtLabel, stmtLabelCharset, cursorLength, cursorName, cursorCharset, moduleNameLength, moduleName, moduleCharset, moduleTimestamp, sqlStringLength, sqlString, sqlStringCharset, setStmtOptionsLength, setStmtOptions, holdableCursor, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &sqlQueryType, &stmtHandle, &estimatedCost, &inputDescLength, inputDesc, &outputDescLength, outputDesc); } else { odbc_SQLSvc_Prepare2_sme_(maxRowsetSize, sqlStmtType, stmtLabel, sqlString, holdableCursor, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &sqlQueryType, &stmtHandle, &estimatedCost, &inputDescLength, inputDesc, &outputDescLength, outputDesc); } } // For 64bit since the wire protocol has the stmtHandle defined as a Int32 we can no longer pass // the srvrStmt address to the client. Instead we'll now pass a key value, which will be used to // retrieve the stmtHandle from std::map definition defined in SRVR_GLOBAL_Def. The SRVR_STMT_HDL // constructor and destructor will be responsible to add and delete entries from the above map. SRVR_STMT_HDL *pSrvrStmt=NULL; pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; //publishing compile error if((returnCode != SQL_SUCCESS) && (returnCode != SQL_SUCCESS_WITH_INFO) && pSrvrStmt) { pSrvrStmt->m_need_21036_end_msg = true; pSrvrStmt->inState = STMTSTAT_CLOSE; pSrvrStmt->m_bqueryFinish = true; if(pSrvrStmt->queryStartTime <= 0) pSrvrStmt->queryStartTime = JULIANTIMESTAMP(); if ((resStatStatement != NULL) && (pSrvrStmt->stmtType == EXTERNAL_STMT)) // if statement is on resStatStatement->endRepository(pSrvrStmt, sqlWarningOrErrorLength, sqlWarningOrError, false); } odbc_SQLSrvr_Prepare_ts_res_(objtag_, call_id_, returnCode, sqlWarningOrErrorLength, sqlWarningOrError, sqlQueryType, (pSrvrStmt !=NULL) ? pSrvrStmt->myKey : 0, estimatedCost, inputDescLength, inputDesc, outputDescLength, outputDesc); if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TracePrepare2Exit(returnCode, sqlWarningOrErrorLength, sqlWarningOrError, sqlQueryType, stmtHandle, estimatedCost, inputDescLength, inputDesc, outputDescLength, outputDesc); } START_CONN_IDLE_TIMER SRVRTRACE_EXIT(FILE_AME+19); #ifdef PERF_TEST perf->clockIt("SQLSrvr_Prepare_ame_END", true); #endif return; } /* odbc_SQLSrvr_Prepare_ame_() */ extern "C" void odbc_SQLSrvr_Fetch_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ IDL_short operation_id , /* In */ IDL_long sqlAsyncEnable , /* In */ IDL_long queryTimeout , /* In */ Long stmtHandle , /* In */ const IDL_string stmtLabel , /* In */ IDL_unsigned_long_long maxRowCnt , /* In */ IDL_unsigned_long_long maxRowLen ) { SRVRTRACE_ENTER(FILE_AME+37); IDL_long returnCode = SQL_SUCCESS; IDL_long rowsAffected = 0; IDL_long outValuesFormat = UNKNOWN_DATA_FORMAT; IDL_long outValuesLength = 0; BYTE *outValues = NULL; IDL_long sqlWarningOrErrorLength = 0; BYTE *sqlWarningOrError = NULL; RETCODE rc = 0; SQL_DataValue_def outputDataValue = {0,0}; char errorBuffer[512]; // a buffer for formatting error messages if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceSrvrFetchEnter( dialogueId , sqlAsyncEnable , queryTimeout , stmtHandle , maxRowCnt , maxRowLen , (long)srvrGlobal->fetchAhead); } DESTROY_CONN_IDLE_TIMER bool firstFetch = false; SRVR_STMT_HDL *pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; if(pSrvrStmt == NULL) { pSrvrStmt = SRVR::getSrvrStmt(stmtLabel, FALSE); if(pSrvrStmt == NULL) { returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Statement Label not found.", &sqlWarningOrErrorLength, sqlWarningOrError); odbc_SQLSrvr_Fetch_ts_res_( objtag_ , call_id_ , returnCode , sqlWarningOrErrorLength , sqlWarningOrError , rowsAffected , outValuesFormat , outputDataValue._length , outputDataValue._buffer); goto FETCH_EXIT; } } if (srvrGlobal->fetchAhead && pSrvrStmt->sqlStmtType != TYPE_SELECT_CATALOG) { // set firstFetch here if (pSrvrStmt->rowsAffected == 0 && pSrvrStmt->m_curRowsFetched == 0 && !pSrvrStmt->sqlWarningOrError) firstFetch = true; if (!firstFetch) { // need to send response to the client returnCode = pSrvrStmt->returnCodeForDelayedError; sqlWarningOrErrorLength = pSrvrStmt->delayedSqlWarningOrErrorLength; sqlWarningOrError = pSrvrStmt->delayedSqlWarningOrError; rowsAffected = pSrvrStmt->delayedRowsAffected; if (srvrGlobal->drvrVersion.buildId & ROWWISE_ROWSET) outValuesFormat = ROWWISE_ROWSETS; else outValuesFormat = COLUMNWISE_ROWSETS; outputDataValue._buffer = pSrvrStmt->delayedOutputDataValue._buffer; outputDataValue._length = pSrvrStmt->delayedOutputDataValue._length; odbc_SQLSrvr_Fetch_ts_res_( objtag_ , call_id_ , returnCode , sqlWarningOrErrorLength , sqlWarningOrError , rowsAffected , outValuesFormat , outputDataValue._length , outputDataValue._buffer); if (returnCode == SQL_NO_DATA_FOUND || returnCode == SQL_ERROR || returnCode == SQL_INVALID_HANDLE || returnCode == SQL_STILL_EXECUTING) { pSrvrStmt->returnCodeForDelayedError = SQL_SUCCESS; // reset returnCodeForDelayedError goto FETCH_EXIT; // fetch ahead is stopped } } if (srvrGlobal != NULL && srvrGlobal->srvrType == CORE_SRVR) { if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId != srvrGlobal->dialogueId) { //LCOV_EXCL_START returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); //LCOV_EXCL_STOP } else { odbc_SQLSrvr_FetchPerf_sme_(objtag_, call_id_, &returnCode, dialogueId, stmtLabel, maxRowCnt, maxRowLen, sqlAsyncEnable, queryTimeout, &rowsAffected, &outValuesFormat, &outputDataValue, &sqlWarningOrErrorLength, sqlWarningOrError); } /* dialogueId == srvrGlobal->dialogueId */ } else { returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); } /* srvrGlobal->srvrState != SRVR_CONNECTED */ } else { odbc_SQLSrvr_FetchPerf_sme_(objtag_, call_id_, &returnCode, dialogueId, stmtLabel, maxRowCnt, maxRowLen, sqlAsyncEnable, queryTimeout, &rowsAffected, &outValuesFormat, &outputDataValue, &sqlWarningOrErrorLength, sqlWarningOrError); } /* srvrGlobal->srvrType != CORE_SRVR */ qrysrvc_ExecuteFinished(stmtLabel, NULL, false, returnCode, true); if ((resStatStatement != NULL) && (pSrvrStmt->stmtType == EXTERNAL_STMT)) // if statement is on resStatStatement->endRepository(pSrvrStmt, sqlWarningOrErrorLength, sqlWarningOrError, true); //LCOV_EXCL_START if (firstFetch) { odbc_SQLSrvr_Fetch_ts_res_( objtag_ , call_id_ , returnCode , sqlWarningOrErrorLength , sqlWarningOrError , rowsAffected , outValuesFormat , outputDataValue._length , outputDataValue._buffer); if (returnCode == SQL_SUCCESS || returnCode == SQL_SUCCESS_WITH_INFO) { odbc_SQLSrvr_FetchPerf_sme_(objtag_, call_id_, &returnCode, dialogueId, stmtLabel, maxRowCnt, maxRowLen, sqlAsyncEnable, queryTimeout, &rowsAffected, &outValuesFormat, &outputDataValue, &sqlWarningOrErrorLength, sqlWarningOrError); } } //LCOV_EXCL_STOP if (pSrvrStmt != NULL) { pSrvrStmt->returnCodeForDelayedError = returnCode; pSrvrStmt->delayedRowsAffected = rowsAffected; // Daniel - if ahead fetch got no data found return code, do not use old data buffer for sending useless data. if(returnCode==SQL_NO_DATA_FOUND){ pSrvrStmt->delayedOutputDataValue._buffer=NULL; pSrvrStmt->delayedOutputDataValue._length=0; } else{ pSrvrStmt->delayedOutputDataValue._buffer = outputDataValue._buffer; pSrvrStmt->delayedOutputDataValue._length = outputDataValue._length; } pSrvrStmt->delayedSqlWarningOrErrorLength = sqlWarningOrErrorLength; pSrvrStmt->delayedSqlWarningOrError = sqlWarningOrError; } } else { // if (!srvrGlobal->fetchAhead) - keep original code to prevent regression if (srvrGlobal != NULL && srvrGlobal->srvrType == CORE_SRVR) { if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId != srvrGlobal->dialogueId) { //LCOV_EXCL_START returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); //LCOV_EXCL_STOP } else { odbc_SQLSrvr_FetchPerf_sme_(objtag_, call_id_, &returnCode, dialogueId, stmtLabel, maxRowCnt, maxRowLen, sqlAsyncEnable, queryTimeout, &rowsAffected, &outValuesFormat, &outputDataValue, &sqlWarningOrErrorLength, sqlWarningOrError); } /* dialogueId == srvrGlobal->dialogueId */ } else { //LCOV_EXCL_START returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); //LCOV_EXCL_STOP } /* srvrGlobal->srvrState != SRVR_CONNECTED */ } else { odbc_SQLSrvr_FetchPerf_sme_(objtag_, call_id_, &returnCode, dialogueId, stmtLabel, maxRowCnt, maxRowLen, sqlAsyncEnable, queryTimeout, &rowsAffected, &outValuesFormat, &outputDataValue, &sqlWarningOrErrorLength, sqlWarningOrError); } /* srvrGlobal->srvrType != CORE_SRVR */ if (pSrvrStmt->sqlNewQueryType == SQL_SP_RESULT_SET) { if (pSrvrStmt->callStmtHandle->inState == STMTSTAT_CLOSE) { if (returnCode == SQL_ERROR && sqlWarningOrError != NULL && resStatStatement != NULL) resStatStatement->setSqlErrorCode(*(Int32 *)(sqlWarningOrError+8)); else { resStatStatement->setSqlErrorCode(returnCode); } pSrvrStmt = pSrvrStmt->callStmtHandle; } } qrysrvc_ExecuteFinished(NULL, (long)pSrvrStmt, false, returnCode, true); if (pSrvrStmt != NULL) { if ((resStatStatement != NULL) && (pSrvrStmt->stmtType == EXTERNAL_STMT)) // if statement is on resStatStatement->endRepository(pSrvrStmt, sqlWarningOrErrorLength, sqlWarningOrError, true); } odbc_SQLSrvr_Fetch_ts_res_( objtag_ , call_id_ , returnCode , sqlWarningOrErrorLength , sqlWarningOrError , rowsAffected , outValuesFormat , outputDataValue._length , outputDataValue._buffer); } FETCH_EXIT: if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceSrvrFetchExit(returnCode, sqlWarningOrErrorLength, sqlWarningOrError, rowsAffected, outValuesLength, outValues); } START_CONN_IDLE_TIMER SRVRTRACE_EXIT(FILE_AME+37); return; } /* end odbc_SQLSrvr_Fetch_ame_() */ extern "C" void odbc_SQLSrvr_ExecDirect_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ const IDL_char *stmtLabel , /* In */ IDL_string cursorName , /* In */ const IDL_char *stmtExplainLabel , /* In */ IDL_short stmtType , /* In */ IDL_short sqlStmtType , /* In */ IDL_string sqlString , /* In */ IDL_short sqlAsyncEnable , /* In */ IDL_long queryTimeout , /* In */ IDL_long inputRowCnt , /* In */ IDL_long_long txnID // T4 driver sends a transaction ID which we need to join , /* In */ IDL_long holdableCursor ) { #ifdef PERF_TEST perf->init(); perf->clockIt("SQLSrvr_ExecDirect_ame_START", true); #endif SRVRTRACE_ENTER(FILE_AME+23); IDL_long returnCode = SQL_SUCCESS; IDL_long sqlWarningOrErrorLength = 0; BYTE *sqlWarningOrError = NULL; IDL_long rowsAffected = 0; IDL_long sqlQueryType = SQL_UNKNOWN; IDL_long estimatedCost = 0; IDL_long outValuesLength = 0; BYTE *outValues = NULL; SQLItemDescList_def outputItemDescList = {0,0}; SQLValueList_def outputValueList = {0,0}; char errorBuffer[512]; // a buffer for formatting error messages IDL_long inputDescLength = 0; // Output from Prepare BYTE *inputDesc = NULL; // Output from Prepare IDL_long outputDescLength = 0; // Output from Prepare BYTE *outputDesc = NULL; // Output from Prepare Long stmtHandle = 0; // Output from Prepare IDL_long cursorCharset = 1; // Input for Execute IDL_long inValuesLength = 0; // Input for Execute BYTE *inValues = NULL; // Input for Execute IDL_long rowLength = 0; IDL_long cursorLength = 0; char *outparams[20]; bool CmdOpenOrClose = false; ERROR_DESC_LIST_def sqlWarning = {0,0}; odbc_SQLSvc_ExecDirect_exc_ ExecDirect_exception_={0,0,0}; odbc_SQLSvc_ExecuteCall_exc_ ExecCall_exception_={0,0,0}; RETCODE rc = 0; bool executed = false; bool noRepository = true; IDL_long tmpPrepareRC = SQL_SUCCESS; SRVR_STMT_HDL *pSrvrStmt = NULL; if(cursorName != NULL) cursorLength = strlen(cursorName); if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceExecDirectEnter(dialogueId, stmtLabel, cursorName, stmtExplainLabel, stmtType, sqlStmtType, sqlString, sqlAsyncEnable, queryTimeout); } DESTROY_CONN_IDLE_TIMER if (srvrGlobal != NULL && srvrGlobal->srvrType == CORE_SRVR) { if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId != srvrGlobal->dialogueId) { //LCOV_EXCL_START returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); //LCOV_EXCL_STOP } //Aruna else { bool rcPlan = false; short error; struct qrysrvc_exc_ wms_exception_ = {0}; char serviceName[MAX_SERVICE_NAME_LEN + 1]; // // check if command format is INFO SYSTEM // -- Added for manageability requirement. // if (isInfoSystem(sqlString, stmtLabel, error)) { if (error != 0) { //LCOV_EXCL_START returnCode = SQL_ERROR; sprintf(errorBuffer, "Operation Failed"); GETMXCSWARNINGORERROR(-1, "S1008", errorBuffer, &sqlWarningOrErrorLength, sqlWarningOrError); goto cfgerrexit; //LCOV_EXCL_STOP } } // // check if command format is INFO OBJECT // -- Added for manageability requirement. // else if (isInfoObject(sqlString, stmtLabel, error)) { if (error != 0) { //LCOV_EXCL_START returnCode = SQL_ERROR; sprintf(errorBuffer, "Operation Failed"); GETMXCSWARNINGORERROR(-1, "S1008", errorBuffer, &sqlWarningOrErrorLength, sqlWarningOrError); goto cfgerrexit; //LCOV_EXCL_STOP } } // // check if command format is INFO DISKS // -- Added for HPDM. // else if ((strcmp(srvrGlobal->ApplicationName, "HPDM") == 0) && isInfoDisk(sqlString, stmtLabel, error, errorBuffer)) { if (error != 0) { //LCOV_EXCL_START returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "S1008", errorBuffer, &sqlWarningOrErrorLength, sqlWarningOrError); // Clear diagnostics WSQL_EXEC_ClearDiagnostics(NULL); goto cfgerrexit; //LCOV_EXCL_STOP } } // // check if command format is HPDM_GETPRIVILEGES // Internal command added for HPDM // else if ((strcmp(srvrGlobal->ApplicationName, "HPDM") == 0) && isGetPrivileges(sqlString, stmtLabel, error)) { if (error != 0) { //LCOV_EXCL_START returnCode = SQL_ERROR; sprintf(errorBuffer, "Operation Failed"); GETMXCSWARNINGORERROR(-1, "S1008", errorBuffer, &sqlWarningOrErrorLength, sqlWarningOrError); goto cfgerrexit; //LCOV_EXCL_STOP } } if(!executed) { if (rcPlan == false) { odbc_SQLSvc_Prepare2_sme_(inputRowCnt, sqlStmtType, stmtLabel, sqlString, holdableCursor, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &sqlQueryType, &stmtHandle, &estimatedCost, &inputDescLength, inputDesc, &outputDescLength, outputDesc, true); // prepare is called from ExecDirect if(returnCode == SQL_SUCCESS || returnCode == SQL_SUCCESS_WITH_INFO) { tmpPrepareRC = returnCode; pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; if (srvrGlobal->isShapeLoaded == false) { DO_WouldLikeToExecute(NULL, stmtHandle, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError); if (returnCode == SQL_SUCCESS || returnCode == SQL_SUCCESS_WITH_INFO) { if(((inputRowCnt > 1) || ((inputRowCnt==1) && (pSrvrStmt->preparedWithRowsets))) && (pSrvrStmt->sqlQueryType != SQL_RWRS_SPECIAL_INSERT))//&& (paramCount > 0)) { odbc_SQLSvc_Execute2withRowsets_sme_(objtag_, call_id_, dialogueId, sqlAsyncEnable, queryTimeout, inputRowCnt, sqlStmtType, stmtHandle, cursorLength, cursorName, cursorCharset, holdableCursor, inValuesLength, inValues, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &rowsAffected, &outValuesLength, outValues); } else { if((inputRowCnt > 0) || sqlQueryType != SQL_UNKNOWN || (inputRowCnt == 0 && pSrvrStmt != NULL && (pSrvrStmt->sqlQueryType != SQL_INSERT_UNIQUE && pSrvrStmt->sqlQueryType != SQL_INSERT_NON_UNIQUE))) odbc_SQLSvc_Execute2_sme_(objtag_, call_id_, dialogueId, sqlAsyncEnable, queryTimeout, inputRowCnt, sqlStmtType, stmtHandle, cursorLength, cursorName, cursorCharset, holdableCursor, inValuesLength, inValues, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &rowsAffected, &outValuesLength, outValues); } estimatedCost = pSrvrStmt->rowsAffectedHigherBytes; // combine both rowsAffected and rowsAffectedHigherBytes as __int64 when interface between drvr/srvr changes if((tmpPrepareRC == SQL_SUCCESS_WITH_INFO) && (returnCode == SQL_SUCCESS)) returnCode = SQL_SUCCESS_WITH_INFO; if (pSrvrStmt->m_need_21036_end_msg == true) noRepository = false; } } // srvrGlobal->isShapeLoaded == false goto cfgerrexit;; } else { //publishing compile error. pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; pSrvrStmt->inState = STMTSTAT_CLOSE; pSrvrStmt->m_need_21036_end_msg = true; pSrvrStmt->m_bqueryFinish = true; if(pSrvrStmt->queryStartTime <= 0) pSrvrStmt->queryStartTime = JULIANTIMESTAMP(); noRepository = false; } } /* if(rcPlan == false) */ } /* if(!executed) */ } /* else if (dialogueId == srvrGlobal->dialogueId) */ } /* if (srvrGlobal->srvrState == SRVR_CONNECTED) */ else { returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); } } else { odbc_SQLSvc_Prepare2_sme_(inputRowCnt, sqlStmtType, stmtLabel, sqlString, holdableCursor, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &sqlQueryType, &stmtHandle, &estimatedCost, &inputDescLength, inputDesc, &outputDescLength, outputDesc); if(returnCode == SQL_SUCCESS || returnCode == SQL_SUCCESS_WITH_INFO) { pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; if (srvrGlobal->isShapeLoaded == false) { DO_WouldLikeToExecute(NULL, stmtHandle, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError); if (returnCode == SQL_SUCCESS) { if(((inputRowCnt > 1) || ((inputRowCnt==1) && (pSrvrStmt->preparedWithRowsets))) && (pSrvrStmt->sqlQueryType != SQL_RWRS_SPECIAL_INSERT))//&& (paramCount > 0)) { odbc_SQLSvc_Execute2withRowsets_sme_(objtag_, call_id_, dialogueId, sqlAsyncEnable, queryTimeout, inputRowCnt, sqlStmtType, stmtHandle, cursorLength, cursorName, cursorCharset, holdableCursor, inValuesLength, inValues, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &rowsAffected, &outValuesLength, outValues); } else { if((inputRowCnt > 0) || sqlQueryType != SQL_UNKNOWN || (inputRowCnt == 0 && pSrvrStmt != NULL && (pSrvrStmt->sqlQueryType != SQL_INSERT_UNIQUE && pSrvrStmt->sqlQueryType != SQL_INSERT_NON_UNIQUE))) odbc_SQLSvc_Execute2_sme_(objtag_, call_id_, dialogueId, sqlAsyncEnable, queryTimeout, inputRowCnt, sqlStmtType, stmtHandle, cursorLength, cursorName, cursorCharset, holdableCursor, inValuesLength, inValues, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &rowsAffected, &outValuesLength, outValues); } estimatedCost = pSrvrStmt->rowsAffectedHigherBytes; // combine both rowsAffected and rowsAffectedHigherBytes as __int64 when interface between drvr/srvr changes if (pSrvrStmt->m_need_21036_end_msg == true) noRepository = false; } } // srvrGlobal->isShapeLoaded == false } // returnCode == SQL_SUCCESS || returnCode == SQL_SUCCESS_WITH_INFO else { //publishing compile error pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; pSrvrStmt->inState = STMTSTAT_CLOSE; pSrvrStmt->m_need_21036_end_msg = true; pSrvrStmt->m_bqueryFinish = true; if(pSrvrStmt->queryStartTime <= 0) pSrvrStmt->queryStartTime = JULIANTIMESTAMP(); noRepository = false; } } cfgerrexit: qrysrvc_ExecuteFinished(NULL,stmtHandle, true, returnCode, false); if (noRepository == false ) { if ((resStatStatement != NULL) && (pSrvrStmt->stmtType == EXTERNAL_STMT)) // if statement is on /* resStatStatement->endRepository(pSrvrStmt->inState, pSrvrStmt->sqlQueryType, pSrvrStmt->sqlString, pSrvrStmt->isClosed, pSrvrStmt->cost_info, pSrvrStmt->comp_stats_info, &pSrvrStmt->m_need_21036_end_msg, sqlWarningOrErrorLength, sqlWarningOrError); */ resStatStatement->endRepository(pSrvrStmt, sqlWarningOrErrorLength, sqlWarningOrError, false); } odbc_SQLSrvr_Execute_ts_res_(objtag_, call_id_, returnCode, sqlWarningOrErrorLength, sqlWarningOrError, rowsAffected, sqlQueryType, estimatedCost, outValuesLength, // for exec2 outValues, // for exec2 outputDescLength, // for execdirect calls outputDesc, // for execdirect calls stmtHandle, // for SPJ result sets (pSrvrStmt != NULL) ? pSrvrStmt->myKey : 0 ); if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceExecDirectExit(ExecDirect_exception_, estimatedCost, outputItemDescList, rowsAffected, sqlWarning); } START_CONN_IDLE_TIMER SRVRTRACE_EXIT(FILE_AME+23); #ifdef PERF_TEST perf->clockIt("SQLSrvr_ExecDirect_ame_END", true); #endif } /* odbc_SQLSrvr_ExecDirect_ame_() */ extern "C" void odbc_SQLSrvr_Execute2_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ IDL_long sqlAsyncEnable , /* In */ IDL_long queryTimeout , /* In */ IDL_long inputRowCnt , /* In */ IDL_long sqlStmtType , /* In */ Long stmtHandle , /* In */ IDL_string cursorName , /* In */ IDL_long cursorCharset , /* In */ IDL_long inValuesLength , /* In */ BYTE *inValues , /* In */ IDL_long sqlQueryType // Used with execdirect. Execdirect will call prepare/execute. This is one of the output params from prepare , /* In */ IDL_long outputDescLength // Used with execdirect. Execdirect will call prepare/execute. This is one of the output params from prepare , /* In */ BYTE *outputDesc // Used with execdirect. Execdirect will call prepare/execute. This is one of the output params from prepare , /* In */ IDL_long rowLength // For DBT to obtain the Rowlength , /* In */ IDL_long_long txnID // T4 driver sends a transaction ID which we need to join , /* In */ IDL_long holdableCursor ) { #ifdef PERF_TEST perf->clockIt("SQLSrvr_Execute2_ame_START", true); #endif SRVRTRACE_ENTER(FILE_AME+19); IDL_long returnCode = SQL_SUCCESS; IDL_long sqlWarningOrErrorLength = 0; BYTE *sqlWarningOrError = NULL; IDL_long rowsAffected = 0; IDL_long estimatedCost = 0; IDL_long outValuesLength = 0; BYTE *outValues = NULL; SQLItemDescList_def outputItemDescList = {0,0}; SQLValueList_def outputValueList = {0,0}; ERROR_DESC_LIST_def sqlWarning = {0,0}; char errorText[512]; IDL_long cursorLength = (cursorName != NULL) ? strlen(cursorName) : 0; odbc_SQLSvc_ExecDirect_exc_ ExecDirect_exception_={0,0,0}; odbc_SQLSvc_ExecuteCall_exc_ ExecCall_exception_={0,0,0}; IDL_long paramCount = 0; RETCODE rc = 0; bool isStatusRowsetDelayed = false; bool noRepository = true; SRVR_STMT_HDL *pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceExecute2Enter(dialogueId, sqlAsyncEnable, queryTimeout, inputRowCnt, sqlStmtType, stmtHandle, cursorLength, cursorName, cursorCharset, inValuesLength, inValues); } if(pSrvrStmt == NULL) { returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Statement Handle.", &sqlWarningOrErrorLength, sqlWarningOrError); } DESTROY_CONN_IDLE_TIMER if (pSrvrStmt != NULL) { paramCount = pSrvrStmt->paramCount; if ( pSrvrStmt->sqlQueryType == SQL_RWRS_SPECIAL_INSERT) pSrvrStmt->maxRowLen = rowLength; } //LCOV_EXCL_START // To improve the throughput as the server gets the first rowset, it returns back a success // code back to the application before it processes the rowset. The application can then send the // second rowset to the driver. By doing this, both the server and driver are always busy by // piggybacking the messages back and forth. Because the server and driver are always busy, the // application will always get the status error delayed by one rowset. For example, the application // sends 4 rowset of 10 rows. The first rowset will get all success back, then the second rowset // will get the status array for the first rowset. The second status array may have success, // warning, and errors for first rowset. Then the third rowset will have status array for second // rowset and so on. The last rowset will be a dummy to get the last status error for the previous // rowset which is the rowset with the valid data. if (srvrGlobal->drvrVersion.buildId & STREAMING_MODE || srvrGlobal->drvrVersion.buildId & STREAMING_DELAYEDERROR_MODE) { if(pSrvrStmt == NULL) { returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Statement Label not found.", &sqlWarningOrErrorLength, sqlWarningOrError); } else { if(srvrGlobal->drvrVersion.buildId & STREAMING_DELAYEDERROR_MODE) { sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength; sqlWarningOrError = pSrvrStmt->sqlWarningOrError; rowsAffected = pSrvrStmt->rowsAffected; estimatedCost = pSrvrStmt->rowsAffectedHigherBytes; // combine both rowsAffected and rowsAffectedHigherBytes as __int64 when interface between drvr/srvr changes outValuesLength = pSrvrStmt->outputDescVarBufferLen; outValues = pSrvrStmt->outputDescVarBuffer; returnCode = pSrvrStmt->returnCodeForDelayedError; } paramCount = pSrvrStmt->paramCount; } if(srvrGlobal->drvrVersion.buildId & STREAMING_DELAYEDERROR_MODE) { odbc_SQLSrvr_Execute_ts_res_(objtag_, call_id_, returnCode, sqlWarningOrErrorLength, sqlWarningOrError, rowsAffected, sqlQueryType, estimatedCost, outValuesLength, // for exec2 outValues, // for exec2 outputDescLength, // for execdirect calls outputDesc, // for execdirect calls stmtHandle, // for SPJ result sets (pSrvrStmt != NULL) ? pSrvrStmt->myKey : 0 ); isStatusRowsetDelayed = true; returnCode = SQL_SUCCESS; } } //LCOV_EXCL_STOP if(returnCode != SQL_SUCCESS) { // do nothing } else if (srvrGlobal != NULL && srvrGlobal->srvrType == CORE_SRVR) { if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId != srvrGlobal->dialogueId) { //LCOV_EXCL_START returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); //LCOV_EXCL_STOP } else { if (srvrGlobal->isShapeLoaded == false) { bool bExecute2withRowsets = true; bool bExecute2 = true; bExecute2withRowsets = ((inputRowCnt > 1) || ((inputRowCnt==1) && (pSrvrStmt->preparedWithRowsets))) && (pSrvrStmt->sqlQueryType != SQL_RWRS_SPECIAL_INSERT); bExecute2 = (inputRowCnt > 0) || sqlQueryType != SQL_UNKNOWN || ( inputRowCnt == 0 && pSrvrStmt != NULL && (pSrvrStmt->sqlQueryType != SQL_INSERT_UNIQUE && pSrvrStmt->sqlQueryType != SQL_INSERT_NON_UNIQUE)); DO_WouldLikeToExecute(NULL, stmtHandle, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError); if (returnCode == SQL_SUCCESS && pSrvrStmt != NULL) { if(bExecute2withRowsets) { odbc_SQLSvc_Execute2withRowsets_sme_(objtag_, call_id_, dialogueId, sqlAsyncEnable, queryTimeout, inputRowCnt, sqlStmtType, stmtHandle, cursorLength, cursorName, cursorCharset, holdableCursor, inValuesLength, inValues, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &rowsAffected, &outValuesLength, outValues); } else { if(bExecute2) odbc_SQLSvc_Execute2_sme_(objtag_, call_id_, dialogueId, sqlAsyncEnable, queryTimeout, inputRowCnt, sqlStmtType, stmtHandle, cursorLength, cursorName, cursorCharset, holdableCursor, inValuesLength, inValues, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &rowsAffected, &outValuesLength, outValues); } if (pSrvrStmt->m_need_21036_end_msg == true) noRepository = false; } } // srvrGlobal->isShapeLoaded == false } } else { returnCode = SQL_ERROR; GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Connection.", &sqlWarningOrErrorLength, sqlWarningOrError); } } else { odbc_SQLSvc_Execute2_sme_(objtag_, call_id_, dialogueId, sqlAsyncEnable, queryTimeout, inputRowCnt, sqlStmtType, stmtHandle, cursorLength, cursorName, cursorCharset, holdableCursor, inValuesLength, inValues, &returnCode, &sqlWarningOrErrorLength, sqlWarningOrError, &rowsAffected, &outValuesLength, outValues); if (pSrvrStmt->m_need_21036_end_msg == true) noRepository = false; } qrysrvc_ExecuteFinished(NULL,stmtHandle, true, returnCode, false); if (noRepository == false) { if ((resStatStatement != NULL) && (pSrvrStmt->stmtType == EXTERNAL_STMT)) // if statement is on /* resStatStatement->endRepository(pSrvrStmt->inState, pSrvrStmt->sqlQueryType, pSrvrStmt->sqlString, pSrvrStmt->isClosed, pSrvrStmt->cost_info, pSrvrStmt->comp_stats_info, &pSrvrStmt->m_need_21036_end_msg, sqlWarningOrErrorLength, sqlWarningOrError); */ resStatStatement->endRepository(pSrvrStmt, sqlWarningOrErrorLength, sqlWarningOrError, false); } //LCOV_EXCL_START // To improve the throughput as the server gets the first rowset, it returns back a success // code back to the application before it processes the rowset. The application can then send the // second rowset to the driver. By doing this, both the server and driver are always busy by // piggybacking the messages back and forth. Because the server and driver are always busy, the // application will always get the status error delayed by one rowset. For example, the application // sends 4 rowset of 10 rows. The first rowset will get all success back, then the second rowset // will get the status array for the first rowset. The second status array may have success, // warning, and errors for first rowset. Then the third rowset will have status array for second // rowset and so on. The last rowset will be a dummy to get the last status error for the previous // rowset which is the rowset with the valid data. if (!isStatusRowsetDelayed) { if (pSrvrStmt != NULL) estimatedCost = pSrvrStmt->rowsAffectedHigherBytes; // combine both rowsAffected and rowsAffectedHigherBytes as __int64 when interface between drvr/srvr changes odbc_SQLSrvr_Execute_ts_res_(objtag_, call_id_, returnCode, sqlWarningOrErrorLength, sqlWarningOrError, rowsAffected, sqlQueryType, estimatedCost, outValuesLength, // for exec2 outValues, // for exec2 outputDescLength, // for execdirect calls outputDesc, // for execdirect calls stmtHandle, // for SPJ result sets (pSrvrStmt != NULL) ? pSrvrStmt->myKey : 0 ); } //LCOV_EXCL_STOP if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceExecute2Exit(returnCode, sqlWarningOrErrorLength, sqlWarningOrError, rowsAffected, outValuesLength, outValues); } START_CONN_IDLE_TIMER SRVRTRACE_EXIT(FILE_AME+19); #ifdef PERF_TEST perf->clockIt("SQLSrvr_Execute2_ame_END", true); #endif return; } /* odbc_SQLSrvr_Execute2_ame_() */ /* * Asynchronous method function prototype for * operation 'odbc_SQLSvc_SetConnectionOption' */ extern "C" void odbc_SQLSrvr_SetConnectionOption_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ IDL_short connectionOption , /* In */ IDL_long optionValueNum , /* In */ IDL_string optionValueStr) { SRVRTRACE_ENTER(FILE_AME+21); odbc_SQLSvc_SetConnectionOption_exc_ exception_={0,0,0}; ERROR_DESC_LIST_def sqlWarning = {0,0}; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceConnectOptionEnter(dialogueId, connectionOption, optionValueNum, optionValueStr); } DESTROY_CONN_IDLE_TIMER if (srvrGlobal != NULL && srvrGlobal->srvrType == CORE_SRVR) { if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId != srvrGlobal->dialogueId) exception_.exception_nr = odbc_SQLSvc_SetConnectionOption_InvalidConnection_exn_; else odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &exception_, dialogueId, connectionOption, optionValueNum, optionValueStr, &sqlWarning); } else exception_.exception_nr = odbc_SQLSvc_SetConnectionOption_InvalidConnection_exn_; } else odbc_SQLSvc_SetConnectionOption_sme_(objtag_, call_id_, &exception_, dialogueId, connectionOption, optionValueNum, optionValueStr, &sqlWarning); odbc_SQLSrvr_SetConnectionOption_ts_res_(objtag_, call_id_, &exception_, &sqlWarning); if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceConnectOptionExit(exception_, sqlWarning); } START_CONN_IDLE_TIMER SRVRTRACE_EXIT(FILE_AME+21); } // odbc_SQLSrvr_SetConnectionOption_ame_() extern "C" void odbc_SQLSrvr_EndTransaction_ame_( /* In */ CEE_tag_def objtag_ , /* In */ const CEE_handle_def *call_id_ , /* In */ DIALOGUE_ID_def dialogueId , /* In */ IDL_unsigned_short transactionOpt ) { SRVRTRACE_ENTER(FILE_AME+20); odbc_SQLSvc_EndTransaction_exc_ exception_={0,0,0}; ERROR_DESC_LIST_def sqlWarning = {0,0}; RETCODE rc = 0; if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceEndTransactEnter(dialogueId, transactionOpt); } DESTROY_CONN_IDLE_TIMER if (srvrGlobal != NULL && srvrGlobal->srvrType == CORE_SRVR) { if (srvrGlobal->srvrState == SRVR_CONNECTED) { if (dialogueId != srvrGlobal->dialogueId) exception_.exception_nr = odbc_SQLSvc_EndTransaction_InvalidConnection_exn_; else odbc_SQLSvc_EndTransaction_sme_(objtag_, call_id_, &exception_, dialogueId, transactionOpt, &sqlWarning); } else exception_.exception_nr = odbc_SQLSvc_EndTransaction_InvalidConnection_exn_; } else odbc_SQLSvc_EndTransaction_sme_(objtag_, call_id_, &exception_, dialogueId, transactionOpt, &sqlWarning); odbc_SQLSrvr_EndTransaction_ts_res_(objtag_, call_id_, &exception_, &sqlWarning); if (srvrGlobal->traceLogger != NULL) { srvrGlobal->traceLogger->TraceEndTransactExit(exception_, sqlWarning); } START_CONN_IDLE_TIMER SRVRTRACE_EXIT(FILE_AME+20); return; } // odbc_SQLSrvr_EndTransaction_ame_() //LCOV_EXCL_START //#endif /* NSK_CLPS_LIB */ //LCOV_EXCL_STOP void getCurrentCatalogSchema() { short Index; char sqlQuery[] = "showcontrol default schema"; SQLRETURN iqqcode = SQL_SUCCESS; SRVR_STMT_HDL *QrySrvrStmt = NULL; //"Current DEFAULTS" //" CATALOG NEO" //" SCHEMA USR" //char Defaults[] = "Current DEFAULTS "; char Defaults[] = "Current DEFAULTS"; char CatPatern[] = "CATALOG"; char SchPatern[] = "SCHEMA"; short lenDef = sizeof(Defaults) - 1; short lenCat = sizeof(CatPatern) - 1; short lenSch = sizeof(SchPatern) - 1; char* ptr; unsigned long len; char seps[] = " \t\n"; char *token; char *saveptr; char temp[300]; unsigned long templen = 0; ; // showcontrol default schema; if ((QrySrvrStmt = getSrvrStmt("STMT_QRYSTS_ON_1", TRUE)) == NULL) return; Index = 0; iqqcode = QrySrvrStmt->ExecDirect(NULL, sqlQuery, EXTERNAL_STMT, TYPE_SELECT, SQL_ASYNC_ENABLE_OFF, 0); if (iqqcode != SQL_ERROR) { while((iqqcode = QrySrvrStmt->FetchPerf(1, 0, SQL_ASYNC_ENABLE_OFF, 0)) != SQL_NO_DATA_FOUND && iqqcode != SQL_ERROR) { ptr = (char*)QrySrvrStmt->outputDataValue._buffer; len = *(unsigned long*)ptr; len &= 0x00FFFF00; len >>= 8; ptr += 3; if (len == 0) continue; switch(Index) { case 0: if (memcmp(ptr, Defaults, lenDef) == 0) Index = 1; break; case 1: memcpy(temp,ptr,len); temp[len] = '\0'; token = strtok_r(temp, seps, &saveptr); if (token != NULL) { if (memcmp(token, CatPatern, lenCat) == 0) { token = strtok_r(NULL, seps, &saveptr); if (token != NULL) { bzero(srvrGlobal->DefaultCatalog, sizeof(srvrGlobal->DefaultCatalog)); templen = token - temp; len = _min((temp+len)-token, sizeof(srvrGlobal->DefaultCatalog)-1); memcpy(srvrGlobal->DefaultCatalog, ptr+templen, len); } } else if (memcmp(token, SchPatern, lenSch) == 0) { token = strtok_r(NULL, seps, &saveptr); if (token != NULL) { bzero(srvrGlobal->DefaultSchema, sizeof(srvrGlobal->DefaultSchema)); templen = token - temp; len = _min((temp+len)-token, sizeof(srvrGlobal->DefaultSchema)-1); memcpy(srvrGlobal->DefaultSchema, ptr+templen, len); } } } break; } } } QrySrvrStmt->Close(SQL_DROP); } void flushCollectors() { } static bool strincmp(char* in, char* out, short ilen) { short i = 0; char* iin = in; char* oout = out; char ich; char och; while (*iin != '\0' && i++ < ilen) { ich = *iin++; och = *oout++; if ((ich | 0x20) != (och | 0x20)) return false; } return true; } bool checkSyntaxInfoSystem(char* sqlString) { char* in = sqlString; while (*in != '\0' && isspace(*in)) in++; // Skip the leading blanks if (strincmp(in,"INFO",4) == false) return false; in += 4; if (*in == '\0' || false == isspace(*in)) return false; while (*in != '\0' && isspace(*in)) in++; // Skip the leading blanks if (strincmp(in,"SYSTEM",6) == false) return false; in += 6; if (*in != '\0' && *in != ';' && false == isspace(*in)) return false; return true; } bool isInfoSystem(char*& sqlString, const IDL_char *stmtLabel, short& error) { if (false == checkSyntaxInfoSystem(sqlString)) return false; error = 0; static char buffer[4000]; char* in = sqlString; SRVR_STMT_HDL *pSrvrStmt = NULL; // get Timezone and GMT offset time_t tim = time(NULL); struct tm *now = localtime(&tim); char pattern[] = "SELECT [first 1]" "current_timestamp as \"CURRENT_TIME\"," "'%s' as \"NDCS_VERSION\"," "'%s' as \"TM_ZONE\"," "'%d' as \"TM_GMTOFF_SEC\"" // "FROM hp_system_catalog.mxcs_schema.datasources FOR READ UNCOMMITTED ACCESS;"; "FROM (values(1)) X(A);"; sprintf (buffer, pattern, ndcs_vers_str(), now->tm_zone, now->tm_gmtoff); // other comments: // the repository view does not exist - maybe a M6 item // platform version: SCM have anything to return just the platform version. // its tagged on at the end of the version string for each component (the bits after 'Release' // ex: mxosrvr Version 1.0.1 Release 5.0.0 (Build release [5939], date 03Apr11) if (stmtLabel != NULL && stmtLabel[0] != 0) pSrvrStmt = SRVR::getSrvrStmt(stmtLabel, TRUE); if (pSrvrStmt == NULL) { error = 1; return true; } pSrvrStmt->m_bSkipWouldLikeToExecute = true; sqlString = buffer; return true; } bool checkSyntaxInfoObject(char* sqlString, short &idx) { char* in = sqlString; while (*in != '\0' && isspace(*in)) in++; // Skip the leading blanks if (strincmp(in,"INFO",4) == false) return false; in += 4; if (*in == '\0' || false == isspace(*in)) return false; while (*in != '\0' && isspace(*in)) in++; // Skip the leading blanks if (strincmp(in,"OBJECT",6) == false) return false; in += 6; while (*in != '\0' && isspace(*in)) in++; // Skip the leading blanks if (*in == '\0' || *in == ';') return false; idx = in - sqlString; if(idx <= 0 || idx >= strlen(sqlString)) return false; return true; } // This method checks if sqlString starts with "INFO OBJECT <object_name>" and does a SQL // invoke on the SQL object passed with the command. bool isInfoObject(char*& sqlString, const IDL_char *stmtLabel, short& error ) { short idx = 0; // idx will contain the index in the sqlString where the table name will start if (false == checkSyntaxInfoObject(sqlString, idx)) return false; static char buffer[512]; char pattern[] = "INVOKE %s;"; char* in = sqlString; SRVR_STMT_HDL *pSrvrStmt = NULL; char *pStr = sqlString+idx; sprintf (buffer, pattern, pStr); if (stmtLabel != NULL && stmtLabel[0] != 0) pSrvrStmt = SRVR::getSrvrStmt(stmtLabel, TRUE); if (pSrvrStmt == NULL) { error = 1; return true; } pSrvrStmt->m_bSkipWouldLikeToExecute = true; sqlString = buffer; return true; } bool checkSyntaxGetPrivileges(char* sqlString, short& idx) { char* in = sqlString; while (*in != '\0' && isspace(*in)) in++; // Skip the leading blanks if (strincmp(in,"HPDM_GETPRIVILEGES",18) == false) return false; in += 18; if (*in == '\0' || false == isspace(*in)) return false; while (*in != '\0' && isspace(*in)) in++; // Skip the leading blanks idx = (short)(in - sqlString); if(idx >= strlen(sqlString)) return false; return true; } bool isGetPrivileges(char*& sqlString, const IDL_char *stmtLabel, short& error ) { static char buffer[1000]; char* in = sqlString; SRVR_STMT_HDL *pSrvrStmt = NULL; char temp[4]; short idx; if (false == checkSyntaxGetPrivileges(sqlString, idx)) return false; strcpy(buffer, &sqlString[idx]); if (stmtLabel != NULL && stmtLabel[0] != 0) pSrvrStmt = SRVR::getSrvrStmt(stmtLabel, TRUE); if (pSrvrStmt == NULL) { error = 1; return true; } //pSrvrStmt->m_bSkipWouldLikeToExecute = true; pSrvrStmt->querySpl = SPEC_QUERY_IMMEDIATE; sqlString = buffer; return true; } // Valid Syntax: // INFO DISK [ALL | <disk_name>] bool checkSyntaxInfoDisk(char* sqlString, char *diskName) { char* in = sqlString; while (*in != '\0' && isspace(*in)) in++; // Skip the leading blanks if (strincmp(in,"INFO",4) == false) return false; in += 4; if (*in == '\0' || false == isspace(*in)) return false; while (*in != '\0' && isspace(*in)) in++; // Skip the leading blanks if (strincmp(in,"DISK",4) == false) return false; in += 4; if (*in == '\0') return true; if (false == isspace(*in)) return false; while (*in != '\0' && isspace(*in)) in++; // Skip the leading blanks if (*in == '\0') return true; if (strincmp(in,"ALL",3) == true) { strcpy( diskName, "ALL" ); in += 3; } else // If a disk name is provided then use that { short i=0; while (*in != '\0' && !isspace(*in)) { diskName[i] = *in; in++; i++; } } if (*in != '\0' && *in != ';' && false == isspace(*in)) return false; return true; } bool isInfoDisk(char*& sqlString, const IDL_char *stmtLabel, short& error, char *errBuf ) { static char buffer[1000]; static char * str = NULL; static int strSize = 0; int newSize = 0; char diskName[MS_MON_MAX_PROCESS_NAME +1]; memset(diskName, '\x0', sizeof(diskName)); if (false == checkSyntaxInfoDisk(sqlString, diskName)) return false; static char pattern[] = "SELECT " "DISK_NAME," "CAPACITY," "FREESPACE " "FROM(VALUES(" "CAST('%s' as VARCHAR(50) CHARACTER SET UTF8)," "CAST(%lld AS LARGEINT)," "CAST(%lld AS LARGEINT)" ")) " "QTABLE (" "\"DISK_NAME\"," "\"CAPACITY\"," "\"FREESPACE\"" ")"; Int64 capacity, freeSpace; int retCode; SRVR_STMT_HDL *pSrvrStmt = NULL; char *diskBuf = NULL; int numTSE = 0, maxTSELen = 0, diskBufLen = 0; short retryCnt; char volume[MS_MON_MAX_PROCESS_NAME +1]; stringstream ss; stringstream ss1; stringstream ss2; stringstream ss3; // If no disk name specified then default to $SYSTEM if(strlen(diskName) == 0) strcpy(diskName, "$SYSTEM"); if (strincmp(diskName,"ALL",3) == true) { diskBufLen = 16384; // If SQL_EXEC_GetListOfDisks() returns with an error for insufficient diskBuf space // then we'll need to retry with the corrected size returned back in diskBufLen. retryCnt = 3; while( retryCnt-- ) { diskBuf = new (nothrow) char[diskBufLen]; if (diskBuf == NULL) { error = -1; sprintf(errBuf, "Operation Failed. isInfoDisk:new diskBuf failed"); goto out; } retCode = SQL_EXEC_GetListOfDisks( diskBuf, &numTSE, &maxTSELen, &diskBufLen); // If error then numTSE and maxTSELen will be populated to calculate // correct diskBuf size if(retCode == -8879) // Error CLI_BUFFER_TOO_SMALL - retry { if(diskBuf != NULL ) { delete [] diskBuf; diskBuf = NULL; } if( retryCnt > 0 ) // Clear diagnostics in case we had got a -8879 error SRVR::WSQL_EXEC_ClearDiagnostics(NULL); } else break; } // end while if( retCode != 0 ) { error = retCode; sprintf(errBuf, "Operation Failed. SQL_EXEC_GetListOfDisks failed with error %d", retCode); goto out; } bool isFirst=true; ss1 << "DISK_NAME,"; ss1 << "CAPACITY,"; ss1 << "FREESPACE "; for (int i= 0; i<numTSE; i++) { strcpy(volume, &diskBuf[i*maxTSELen]); // each dik name will be NULL terminated retCode = SQL_EXEC_GetDiskMaxSize(volume, &capacity, &freeSpace); if( retCode != 0 ) { error = retCode; sprintf(errBuf, "Operation Failed. SQL_EXEC_GetDiskMaxSize failed with error %d", retCode); goto out; } if(isFirst == false) ss2 << ","; ss2 << "("; ss2 << "CAST('"; ss2 << volume; ss2 << "' as VARCHAR(50) CHARACTER SET UTF8),"; ss2 << "CAST("; ss2 << capacity; ss2 << " AS LARGEINT),"; ss2 << "CAST("; ss2 << freeSpace; ss2 << " AS LARGEINT)"; ss2 << ")"; isFirst = false; } ss3 << "\"DISK_NAME\","; ss3 << "\"CAPACITY\","; ss3 << "\"FREESPACE\""; ss << "SELECT "; ss << ss1.str().c_str(); ss << " FROM(VALUES"; ss << ss2.str().c_str(); ss << ") "; ss << "QTABLE("; ss << ss3.str().c_str(); ss << ")"; // str will not be deleted but instead reused for subsequent calls newSize = ss.str().length() + 1; if( strSize < newSize ) { if( str != NULL ) { delete [] str; str = NULL; } strSize = newSize; str = new char[strSize]; } if (str == NULL) { error = 1; sprintf(errBuf, "Operation Failed. isInfoDisk:new str failed"); goto out; } strcpy(str, ss.str().c_str()); sqlString = str; } else { retCode = SQL_EXEC_GetDiskMaxSize(diskName, &capacity, &freeSpace); if( retCode == 0 ) { sprintf (buffer, pattern, diskName, capacity, freeSpace ); sqlString = buffer; } else { error = retCode; sprintf(errBuf, "Operation Failed. SQL_EXEC_GetDiskMaxSize failed with error %d", retCode); goto out; } } if (stmtLabel != NULL && stmtLabel[0] != 0) pSrvrStmt = SRVR::getSrvrStmt(stmtLabel, TRUE); if (pSrvrStmt == NULL) { error = -1; sprintf(errBuf, "Operation Failed. SRVR::getSrvrStmt failed"); goto out; } pSrvrStmt->m_bSkipWouldLikeToExecute = true; out: if(diskBuf != NULL ) { delete [] diskBuf; diskBuf = NULL; } return true; } bool updateZKState(DCS_SERVER_STATE currState, DCS_SERVER_STATE newState) { int rc = ZOK; stringstream ss; Stat stat; bool zk_error = false; string nodeToCheck; char realpath[1024]; char zkErrStr[2048]; char zkData[256]; char state[32]; int zkDataLen = sizeof(zkData); if( currState == CONNECTING && newState == CONNECTED ) { struct sockaddr_in clientaddr; socklen_t addrlen = sizeof(clientaddr); char str[INET6_ADDRSTRLEN]; char s_port[10]; rc = zoo_exists(zh, dcsRegisteredNode.c_str(), 0, &stat); if( rc == ZOK ) { /* // Get the dialogue ID from the data part of connecting znode rc = zoo_get(zh, dcsRegisteredNode.c_str(), false, zkData, &zkDataLen, &stat); if( rc != ZOK ) { zk_error = true; sprintf(zkErrStr, "***** zoo_get() for %s failed with error %d",dcsRegisteredNode.c_str(), rc); goto bailout; } // The first token should be CONNECTING state char *tkn = NULL; tkn = strtok(zkData, ":"); if( tkn == NULL || stricmp(tkn, "CONNECTING") ) { zk_error = true; sprintf(zkErrStr, "***** State not in CONNECTING. State: %s", tkn); goto bailout; } // Skip second token - Timestamp tkn = strtok(NULL, ":"); // Third token in data is dialogue ID srvrGlobal->dialogueId = -1; tkn = strtok(NULL, ":"); if( tkn != NULL ) srvrGlobal->dialogueId = atoi(tkn); if( tkn == NULL || srvrGlobal->dialogueId == -1 ) { zk_error = true; sprintf(zkErrStr, "***** Connecting state dialogue ID not found"); goto bailout; } */ string connectedSrvrData; getpeername (sdconn, (struct sockaddr *) &clientaddr, &addrlen); ss.str(""); if (inet_ntop(AF_INET, &clientaddr.sin_addr, str, sizeof(str))){ sprintf(s_port, "%d", ntohs(clientaddr.sin_port)); ss << "CONNECTED" << ":" << JULIANTIMESTAMP() << ":" << srvrGlobal->dialogueId << ":" << regSrvrData << ":" << srvrGlobal->ClientComputerName << ":" << str << ":" << s_port << ":" << srvrGlobal->ApplicationName << ":"; } else { ss << "CONNECTED" << ":" << JULIANTIMESTAMP() << ":" << srvrGlobal->dialogueId << ":" << regSrvrData << ":" << srvrGlobal->ClientComputerName << ":" << strerror(errno) << ":" << errno << ":" << srvrGlobal->ApplicationName << ":"; } string data(ss.str()); rc = zoo_set(zh, dcsRegisteredNode.c_str(), data.c_str(), data.length(), -1); if( rc != ZOK ) { zk_error = true; sprintf(zkErrStr, "***** zoo_set() failed for %s with error %d", dcsRegisteredNode.c_str(), rc); goto bailout; } else srvrGlobal->dcsCurrState = CONNECTED; } else { zk_error = true; sprintf(zkErrStr, "***** zoo_exists() for %s failed with error %d",dcsRegisteredNode.c_str(), rc); goto bailout; } } else if( currState == CONNECTING && newState == AVAILABLE ) // A Connection failure { rc = zoo_exists(zh, dcsRegisteredNode.c_str(), 0, &stat); if( rc == ZOK ) { ss.str(""); ss << "AVAILABLE" << ":" << JULIANTIMESTAMP() << ":" // Dialogue ID << ":" << regSrvrData << ":" // Client computer name << ":" // Client address << ":" // Client port << ":" // Client Appl name << ":"; string data(ss.str()); rc = zoo_set(zh, dcsRegisteredNode.c_str(), data.c_str(), data.length(), -1); if( rc != ZOK ) { zk_error = true; sprintf(zkErrStr, "***** zoo_set() failed for %s with error %d", dcsRegisteredNode.c_str(), rc); goto bailout; } else srvrGlobal->dcsCurrState = AVAILABLE; } else { zk_error = true; sprintf(zkErrStr, "***** zoo_exists() for %s failed with error %d",dcsRegisteredNode.c_str(), rc); goto bailout; } } else if( currState == CONNECTED && newState == AVAILABLE) // Move from connected to available { // Fix for bug #1315537 - ZK dialogue ID mismatch. // Added check to not set to AVAILABLE if already in that state in case a break dialogue is called after a terminate dialogue. if( srvrGlobal->dcsCurrState == AVAILABLE ) return true; rc = zoo_exists(zh, dcsRegisteredNode.c_str(), 0, &stat); if( rc == ZOK ) { ss.str(""); ss << "AVAILABLE" << ":" << JULIANTIMESTAMP() << ":" // Dialogue ID << ":" << regSrvrData << ":" // Client computer name << ":" // Client address << ":" // Client port << ":" // Client Appl name << ":"; string data(ss.str()); rc = zoo_set(zh, dcsRegisteredNode.c_str(), data.c_str(), data.length(), -1); if( rc != ZOK ) { zk_error = true; sprintf(zkErrStr, "***** zoo_set() failed for %s with error %d", dcsRegisteredNode.c_str(), rc); goto bailout; } else srvrGlobal->dcsCurrState = AVAILABLE; } else { zk_error = true; sprintf(zkErrStr, "***** zoo_exists() for %s failed with error %d",dcsRegisteredNode.c_str(), rc); goto bailout; } } else if( currState == CONNECTING && (newState == CONNECT_FAILED || newState == CONNECT_REJECTED) ) // A Connection failure { rc = zoo_exists(zh, dcsRegisteredNode.c_str(), 0, &stat); if( rc == ZOK ) { ss.str(""); if (newState == CONNECT_FAILED) ss << "CONNECT_FAILED" << ":" << JULIANTIMESTAMP() << ":" // Dialogue ID << srvrGlobal->dialogueId << ":" << regSrvrData << ":" // Client computer name << ":" // Client address << ":" // Client port << ":" // Client Appl name << ":"; else ss << "CONNECT_REJECTED" << ":" << JULIANTIMESTAMP() << ":" // Dialogue ID << srvrGlobal->dialogueId << ":" << regSrvrData << ":" // Client computer name << ":" // Client address << ":" // Client port << ":" // Client Appl name << ":"; string data(ss.str()); rc = zoo_set(zh, dcsRegisteredNode.c_str(), data.c_str(), data.length(), -1); if( rc != ZOK ) { zk_error = true; sprintf(zkErrStr, "***** zoo_set() failed for %s with error %d", dcsRegisteredNode.c_str(), rc); goto bailout; } else { if (newState == CONNECT_FAILED) srvrGlobal->dcsCurrState = CONNECT_FAILED; else if (newState == CONNECT_REJECTED) srvrGlobal->dcsCurrState = CONNECT_REJECTED; } } else { zk_error = true; sprintf(zkErrStr, "***** zoo_exists() for %s failed with error %d",dcsRegisteredNode.c_str(), rc); goto bailout; } } else if( (currState == CONNECT_FAILED || currState == CONNECT_REJECTED) && newState == AVAILABLE) // Move from connected to available { rc = zoo_exists(zh, dcsRegisteredNode.c_str(), 0, &stat); if( rc == ZOK ) { ss.str(""); ss << "AVAILABLE" << ":" << JULIANTIMESTAMP() << ":" // Dialogue ID << ":" << regSrvrData << ":" // Client computer name << ":" // Client address << ":" // Client port << ":" // Client Appl name << ":"; string data(ss.str()); rc = zoo_set(zh, dcsRegisteredNode.c_str(), data.c_str(), data.length(), -1); if( rc != ZOK ) { zk_error = true; sprintf(zkErrStr, "***** zoo_set() failed for %s with error %d", dcsRegisteredNode.c_str(), rc); goto bailout; } else srvrGlobal->dcsCurrState = AVAILABLE; } else { zk_error = true; sprintf(zkErrStr, "***** zoo_exists() for %s failed with error %d",dcsRegisteredNode.c_str(), rc); goto bailout; } } bailout: if(zk_error) SendEventMsg(MSG_ODBC_NSK_ERROR, EVENTLOG_ERROR_TYPE, 0, ODBCMX_SERVER, srvrGlobal->srvrObjRef, 1, zkErrStr); return (zk_error == ZOK) ? true : false; } short DO_WouldLikeToExecute( IDL_char *stmtLabel , Long stmtHandle , IDL_long* returnCode , IDL_long* sqlWarningOrErrorLength , BYTE*& sqlWarningOrError ) { SRVR_STMT_HDL *pSrvrStmt = NULL; if (stmtLabel != NULL && stmtLabel[0] != 0) pSrvrStmt = SRVR::getSrvrStmt(stmtLabel, FALSE); else pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; if (pSrvrStmt == NULL) return 0; if (srvrGlobal->sqlPlan) getSQLInfo( EXPLAIN_PLAN, stmtHandle, stmtLabel ); if(resStatStatement != NULL) { resStatStatement->wouldLikeToStart_ts = JULIANTIMESTAMP(); resStatStatement->pubStarted = false; resStatStatement->queryFinished = false; } // Update the query status pSrvrStmt->m_state = QUERY_EXECUTING; pQueryStmt = pSrvrStmt; pSrvrStmt->m_bDoneWouldLikeToExecute = true; if ((srvrGlobal->m_bStatisticsEnabled)&&(srvrGlobal->m_statisticsPubType==STATISTICS_AGGREGATED)&&(srvrGlobal->m_iQueryPubThreshold>=0)) { limit_count=0; } return 0; } short qrysrvc_ExecuteFinished( const IDL_char *stmtLabel , const Long stmtHandle , const bool bCheckSqlQueryType , const short error_code , const bool bFetch , const bool bException , const bool bErase ) { SRVR_STMT_HDL *pSrvrStmt = NULL; #define RC_SUCCESS(retcode) \ ((!bException && (retcode == SQL_SUCCESS || retcode == SQL_SUCCESS_WITH_INFO)) || \ ( bException && (retcode == CEE_SUCCESS)) ? TRUE : FALSE) if (stmtLabel != NULL && stmtLabel[0] != 0) pSrvrStmt = SRVR::getSrvrStmt(stmtLabel, FALSE); else pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle; if (pSrvrStmt == NULL) return 0; if (pSrvrStmt->m_bDoneWouldLikeToExecute == false) return 0; pSrvrStmt->m_bqueryFinish = true; if (bCheckSqlQueryType) { if (RC_SUCCESS(error_code) && (pSrvrStmt->sqlQueryType == SQL_SELECT_NON_UNIQUE || pSrvrStmt->sqlQueryType == SQL_SELECT_UNIQUE || pSrvrStmt->sqlQueryType == SQL_CALL_WITH_RESULT_SETS || pSrvrStmt->sqlQueryType == SQL_SP_RESULT_SET)) { pSrvrStmt->m_bqueryFinish = false; return 0; } } else if (bFetch) { if (RC_SUCCESS(error_code)) { pSrvrStmt->m_bqueryFinish = false; return 0; } } // Update the query status if (pSrvrStmt->m_bqueryFinish) { pSrvrStmt->m_state = QUERY_COMPLETED; if (pSrvrStmt->sqlWarningOrError) { Int32 sqlError = *(Int32 *)(pSrvrStmt->sqlWarningOrError+8); if (sqlError == -8007) { pSrvrStmt->m_state = QUERY_COMPLETED_BY_ADMIN_SERVER; } } else if (STMTSTAT_CLOSE == pSrvrStmt->inState) { pSrvrStmt->m_state = QUERY_COMPLETED_BY_CLIENT; } } if (resStatStatement != NULL) { resStatStatement->queryFinished = true; resStatStatement->wouldLikeToStart_ts = 0; } pQueryStmt = NULL; return 0; } void sendSessionEnd(std::tr1::shared_ptr<SESSION_END> pSession_info) { REPOS_STATS session_stats; session_stats.m_pSessionStats = pSession_info; session_stats.m_pub_type = PUB_TYPE_SESSION_END; if (record_session_done) { //boost::thread thrd(&SessionWatchDog); pthread_t thrd; pthread_create(&thrd, NULL, SessionWatchDog, NULL); } repos_queue.push_task(session_stats); } void sendAggrStats(pub_struct_type pub_type, std::tr1::shared_ptr<SESSION_AGGREGATION> pAggr_info) { REPOS_STATS aggr_stats; aggr_stats.m_pAggr_stats = pAggr_info; aggr_stats.m_pub_type = pub_type; if (record_session_done) { //boost::thread thrd(&SessionWatchDog); pthread_t thrd; pthread_create(&thrd, NULL, SessionWatchDog, NULL); } repos_queue.push_task(aggr_stats); } void sendQueryStats(pub_struct_type pub_type, std::tr1::shared_ptr<STATEMENT_QUERYEXECUTION> pQuery_info) { REPOS_STATS query_stats; query_stats.m_pQuery_stats = pQuery_info; query_stats.m_pub_type = pub_type; if (record_session_done) { //boost::thread thrd(&SessionWatchDog); pthread_t thrd; pthread_create(&thrd, NULL, SessionWatchDog, NULL); } repos_queue.push_task(query_stats); } void __cdecl StatisticsTimerExpired(CEE_tag_def timer_tag) { if (!(srvrGlobal->m_bStatisticsEnabled && srvrGlobal->m_statisticsPubType==STATISTICS_AGGREGATED)) return; //update aggregation stats per interval if(++interval_count >=interval_max) { if(resStatSession != NULL) resStatSession->update(); interval_count=0; } //update query stats once longer than limit if(limit_max>=0 && resStatStatement != NULL && !resStatStatement->queryFinished && !resStatStatement->pubStarted && resStatStatement->wouldLikeToStart_ts > 0 && pQueryStmt!= NULL) { if(limit_count++ >=limit_max) { resStatStatement->SendQueryStats(true, pQueryStmt); limit_count=0; } } } void SyncPublicationThread() { if (!record_session_done) { REPOS_STATS exit_stats; exit_stats.m_pub_type = PUB_TYPE_INIT; // Fix for bug 1404108 where mxosrvr processes do not stop when sqstop is called // Will loop until the SessionWatchDog thread exits, which is holding on to Thread_mutex bool mDone = false; int mReturn = 0; while(!mDone) { repos_queue.push_task(exit_stats); mReturn = pthread_mutex_trylock(&Thread_mutex); char tmpstr[256]; sprintf( tmpstr, "pthread_mutex_trylock()...returned...%d", mReturn ); SendEventMsg(MSG_SERVER_TRACE_INFO, EVENTLOG_INFORMATION_TYPE, srvrGlobal->nskASProcessInfo.processId, ODBCMX_SERVICE, srvrGlobal->srvrObjRef, 3, srvrGlobal->sessionId, tmpstr, "0"); if( mReturn == 0 ) mDone = true; else sleep(1); } pthread_mutex_unlock(&Thread_mutex); } }
32.725685
758
0.679595
[ "object", "shape" ]
86d60cb802510612618c93bbeef51c56ee9e02be
5,675
cpp
C++
src/collision/area/EventsToCollisions.cpp
Kaosumaru/libmx
7f412a7b845a315da02deb279fb208c739fb2f30
[ "MIT" ]
null
null
null
src/collision/area/EventsToCollisions.cpp
Kaosumaru/libmx
7f412a7b845a315da02deb279fb208c739fb2f30
[ "MIT" ]
null
null
null
src/collision/area/EventsToCollisions.cpp
Kaosumaru/libmx
7f412a7b845a315da02deb279fb208c739fb2f30
[ "MIT" ]
null
null
null
#include "EventsToCollisions.h" #include "collision/area/Area.h" using namespace MX; using namespace Collision; TouchShape::TouchShape(const Touch::pointer& touch, ClassID<>::type id) : touch(touch) { SetClassID(id); _trackCollisions = true; } TouchShape::~TouchShape() { } MouseTouchShape::MouseTouchShape(const std::shared_ptr<MouseTouch>& touch, ClassID<>::type id) : TouchShape(touch, id) , mouse_touch(touch) { } MouseShape::MouseShape() { SetClassID(ClassID<MouseShape>::id()); _trackCollisions = true; } DragShape::DragShape() { SetClassID(ClassID<DragShape>::id()); } DropShape::DropShape() { SetClassID(ClassID<DropShape>::id()); } EventsToCollisions::EventsToCollisions(const std::shared_ptr<Collision::LayeredArea>& area, const Mouse::pointer& mouse, const Touches::pointer& touches, const MouseTouches::pointer& mouseTouches, const std::shared_ptr<Widgets::DragSystem>& dragSystem) { using namespace std::placeholders; _area = area; mouse->on_mouse_enter.connect(std::bind(&EventsToCollisions::OnMouseEnter, this, _1), this); mouse->on_mouse_leave.connect(std::bind(&EventsToCollisions::OnMouseLeave, this, _1), this); if (mouse) { mouse->on_mouse_move.connect(std::bind(&EventsToCollisions::OnMouseMove, this, _1), this); mouse->on_mouse_button_up.connect(std::bind(&EventsToCollisions::OnMouseUp, this, _1, _2), this); mouse->on_mouse_button_down.connect(std::bind(&EventsToCollisions::OnMouseDown, this, _1, _2), this); } if (touches) touches->on_touch_begin.connect(std::bind(&EventsToCollisions::OnTouchBegin, this, _1, false), this); if (mouseTouches) mouseTouches->on_touch_begin.connect(std::bind(&EventsToCollisions::OnTouchBegin, this, _1, true), this); if (dragSystem) { dragSystem->on_started_drag.connect(std::bind(&EventsToCollisions::OnDragBegin, this, _1), this); dragSystem->on_moved_drag.connect(std::bind(&EventsToCollisions::OnDragMove, this, _1), this); dragSystem->on_ended_drag.connect(std::bind(&EventsToCollisions::OnDragEnd, this, _1), this); } _mouse = std::make_shared<MouseShape>(); _drag = std::make_shared<DragShape>(); _drag->SetRadius(10.0f); //TODO _drop = std::make_shared<DropShape>(); _drop->SetRadius(10.0f); //TODO } EventsToCollisions::~EventsToCollisions() { } void EventsToCollisions::OnTouchBegin(const Touch::pointer& touch, bool mouseTouch) { if (!mouseTouch) { auto touchShape = std::make_shared<TouchShape>(touch, ClassID<TouchShape::TypeBegin>::id()); touchShape->SetPoint(touch->point()); _area->TestForeignShape(ClassID<Collision::EventsToCollisions>::id(), touchShape); } else { auto touchShape = std::make_shared<MouseTouchShape>(std::static_pointer_cast<MouseTouch>(touch), ClassID<MouseTouchShape::TypeBegin>::id()); touchShape->SetPoint(touch->point()); _area->TestForeignShape(ClassID<Collision::EventsToCollisions>::id(), touchShape); } std::shared_ptr<TouchShape> touchShape = !mouseTouch ? std::make_shared<TouchShape>(touch) : std::make_shared<MouseTouchShape>(std::static_pointer_cast<MouseTouch>(touch)); using namespace std::placeholders; touch->on_move.connect_front(std::bind(&EventsToCollisions::OnTouchMove, this, _1, touchShape, mouseTouch), this); touch->on_end.connect_front(std::bind(&EventsToCollisions::OnTouchEnd, this, _1, touchShape, mouseTouch), this); touchShape->SetPoint(touch->point()); _area->AddShape(ClassID<Collision::EventsToCollisions>::id(), touchShape); } void EventsToCollisions::OnTouchMove(const Touch::pointer& touch, const std::shared_ptr<TouchShape>& shape, bool mouseTouch) { shape->SetPoint(touch->point()); } void EventsToCollisions::OnTouchEnd(const Touch::pointer& touch, const std::shared_ptr<TouchShape>& shape, bool mouseTouch) { shape->Unlink(); if (!mouseTouch) { auto touchShape = std::make_shared<TouchShape>(touch, ClassID<TouchShape::TypeEnd>::id()); touchShape->SetPoint(touch->point()); _area->TestForeignShape(ClassID<Collision::EventsToCollisions>::id(), touchShape); } else { auto touchShape = std::make_shared<MouseTouchShape>(std::static_pointer_cast<MouseTouch>(touch), ClassID<MouseTouchShape::TypeEnd>::id()); touchShape->SetPoint(touch->point()); _area->TestForeignShape(ClassID<Collision::EventsToCollisions>::id(), touchShape); } } void EventsToCollisions::OnMouseEnter(const glm::vec2& position) { _mouse->SetPoint(position); if (!_mouse->linked()) _area->AddShape(ClassID<Collision::EventsToCollisions>::id(), _mouse); } void EventsToCollisions::OnMouseMove(const glm::vec2& position) { _mouse->SetPoint(position); } void EventsToCollisions::OnMouseLeave(const glm::vec2& position) { _mouse->SetPoint(position); _mouse->SetPoint({ -1.0f, -1.0f }); _mouse->Unlink(); } void EventsToCollisions::OnMouseUp(const glm::vec2& position, int button) { } void EventsToCollisions::OnMouseDown(const glm::vec2& position, int button) { } void EventsToCollisions::OnDragBegin(const glm::vec2& position) { _drag->SetPosition(position); _area->AddShape(ClassID<Collision::EventsToCollisions>::id(), _drag); } void EventsToCollisions::OnDragMove(const glm::vec2& position) { _drag->SetPosition(position); } void EventsToCollisions::OnDragEnd(const glm::vec2& position) { _drag->SetPosition(position); _drag->Unlink(); _drop->SetPosition(position); _area->TestForeignShape(ClassID<Collision::EventsToCollisions>::id(), _drop); }
34.393939
252
0.711894
[ "shape" ]
86e20509eef83d791bd3819e1c8933b2243be79c
29,659
hpp
C++
framework/areg/component/Event.hpp
Ali-Nasrolahi/areg-sdk
4fbc2f2644220196004a31672a697a864755f0b6
[ "Apache-2.0" ]
70
2021-07-20T11:26:16.000Z
2022-03-27T11:17:43.000Z
framework/areg/component/Event.hpp
Ali-Nasrolahi/areg-sdk
4fbc2f2644220196004a31672a697a864755f0b6
[ "Apache-2.0" ]
32
2021-07-31T05:20:44.000Z
2022-03-20T10:11:52.000Z
framework/areg/component/Event.hpp
Ali-Nasrolahi/areg-sdk
4fbc2f2644220196004a31672a697a864755f0b6
[ "Apache-2.0" ]
40
2021-11-02T09:45:38.000Z
2022-03-27T11:17:46.000Z
#pragma once /************************************************************************ * This file is part of the AREG SDK core engine. * AREG SDK is dual-licensed under Free open source (Apache version 2.0 * License) and Commercial (with various pricing models) licenses, depending * on the nature of the project (commercial, research, academic or free). * You should have received a copy of the AREG SDK license description in LICENSE.txt. * If not, please contact to info[at]aregtech.com * * \copyright (c) 2017-2021 Aregtech UG. All rights reserved. * \file areg/component/Event.hpp * \ingroup AREG SDK, Asynchronous Event Generator Software Development Kit * \author Artak Avetyan * \brief AREG Platform, Event Base class declaration. * This event class is generic and base class for all kind * of events in system. This should be used for communication, * but can be extended for custom events. * ************************************************************************/ /************************************************************************ * Include files. ************************************************************************/ #include "areg/base/GEGlobal.h" #include "areg/base/RuntimeObject.hpp" #include "areg/base/IEIOStream.hpp" /************************************************************************ * \brief Predefined MACRO to use for event declaration and implementation ************************************************************************/ /** * \brief MACRO, declares static functions to add and remove * event consumer, which should be available in every Event class. * Do not use them directly, instead use DECLARE_RUNTIME_EVENT **/ #define DECLARE_EVENT_STATIC_REGISTRATION(EventClass) \ public: \ /* Declare static function to add/register event consumer to start processing event. */ \ static bool addListener(IEEventConsumer& eventConsumer, const char* whichThread = nullptr); \ /* Declare static function to add/register event consumer to start processing event. */ \ static bool addListener(IEEventConsumer& eventConsumer, id_type whichThread); \ /* Declare static function to add/register event consumer to start processing event. */ \ static bool addListener(IEEventConsumer& eventConsumer, DispatcherThread & dispThread); \ /* Declare static function to remove/unregister event consumer to stop processing event. */ \ static bool removeListener(IEEventConsumer& eventConsumer, const char* whichThread = nullptr); \ /* Declare static function to remove/unregister event consumer to stop processing event. */ \ static bool removeListener(IEEventConsumer& eventConsumer, id_type whichThread); \ /* Declare static function to remove/unregister event consumer to stop processing event. */ \ static bool removeListener(IEEventConsumer& eventConsumer, DispatcherThread & dispThread); /** * \brief MACRO, implements static functions to add and remove * event consumer, which should be available in every Event class. * Do not use them directly, instead use IMPLEMENT_RUNTIME_EVENT **/ #define IMPLEMENT_EVENT_STATIC_REGISTRATION(EventClass) \ /* Implementation of adding / registering event consumer. */ \ bool EventClass::addListener(IEEventConsumer& eventConsumer, const char* whichThread /*= nullptr*/) \ { return Event::addListener(EventClass::_getClassId(), eventConsumer, whichThread); } \ /* Implementation of adding / registering event consumer. */ \ bool EventClass::addListener(IEEventConsumer& eventConsumer, id_type whichThread) \ { return Event::addListener(EventClass::_getClassId(), eventConsumer, whichThread); } \ /* Implementation of adding / registering event consumer. */ \ bool EventClass::addListener(IEEventConsumer& eventConsumer, DispatcherThread & dispThread) \ { return Event::addListener(EventClass::_getClassId(), eventConsumer, dispThread); } \ /* Implementation of removing / unregistering event consumer. */ \ bool EventClass::removeListener(IEEventConsumer& eventConsumer, const char* whichThread /*= nullptr*/) \ { return Event::removeListener(EventClass::_getClassId(), eventConsumer, whichThread); } \ /* Implementation of removing / unregistering event consumer. */ \ bool EventClass::removeListener(IEEventConsumer& eventConsumer, id_type whichThread) \ { return Event::removeListener(EventClass::_getClassId(), eventConsumer, whichThread); } \ /* Implementation of removing / unregistering event consumer. */ \ bool EventClass::removeListener(IEEventConsumer& eventConsumer, DispatcherThread & dispThread) \ { return Event::removeListener(EventClass::_getClassId(), eventConsumer, dispThread); } /** * \brief MACRO, to declare Event object as a runtime object * to be able to check the type of event, find right consumer * and trigger right event processing function call. * Use this MACRO in every declared event class. * * \param EventClass Event class name. **/ #define DECLARE_RUNTIME_EVENT(EventClass) \ /* Declare runtime functions and objects. */ \ DECLARE_RUNTIME(EventClass) \ /* Declare static functions to add and remove event consumer. */ \ DECLARE_EVENT_STATIC_REGISTRATION(EventClass) /** * \brief MACRO, to implement appropriate runtime and event functions * Use this MACRO in every event class source code. * * \param EventClass Event class name. * \param EventBaseClass The base (parent) class of Event * At least it should be Event. **/ #define IMPLEMENT_RUNTIME_EVENT(EventClass, EventBaseClass) \ /* Implement event runtime functions. */ \ IMPLEMENT_RUNTIME(EventClass, EventBaseClass) \ /* Implement event static functions. */ \ IMPLEMENT_EVENT_STATIC_REGISTRATION(EventClass) /************************************************************************ * Dependencies ************************************************************************/ class IEEventConsumer; class EventDispatcher; class DispatcherThread; class Thread; ////////////////////////////////////////////////////////////////////////// // Event class declaration ////////////////////////////////////////////////////////////////////////// /** * \brief A generic Base Event class. All events are instances of Event * class. Use unique custom or specific Event classes. * * The events are forwarded to be processed in dispatcher thread * and depending on even type, they are queued either in internal * or external queue. The events are created dynamically in the heap * to remain valid as long until they are processed. After processing * the events are automatically destroied. * * Before sending Event to the target dispatcher thread, the thread * should have registered event consumer object which processes * the events. The event consumers are registered and unregistered in * the dispatcher thread by calling addListener() and removeListener() * methods. User DECLARE_RUNTIME_EVENT() and IMPLEMENT_RUNTIME_EVENT() * macros to have appropriate method definition in the event object. * * In addition, the system contains several predefined event objects, * which are used by the system. * **/ class AREG_API Event : public RuntimeObject { ////////////////////////////////////////////////////////////////////////// // Defines and constants. ////////////////////////////////////////////////////////////////////////// public: /** * \brief Event::eEventType * Event types. **/ typedef enum class E_EventType : unsigned int { EventUnknown = 0 /*0x0000*/ //!< Unknown event type. Bit set: 0000 0000 0000 0000 , EventInternal = 1 /*0x0001*/ //!< Internal event. Bit set: 0000 0000 0000 0001 , EventExternal = 2 /*0x0002*/ //!< External event. Bit set: 0000 0000 0000 0010 , EventLocal = 16 /*0x0010*/ //!< Local event. Bit set: 0000 0000 0001 0000 , EventRemote = 32 /*0x0020*/ //!< Remote event. Bit set: 0000 0000 0010 0000 , EventNotify = 256 /*0x0100*/ //!< Notification event. Bit set: 0000 0001 0000 0000 , EventToStub = 512 /*0x0200*/ //!< Stub event. Bit set: 0000 0010 0000 0000 , EventToProxy = 1024 /*0x0400*/ //!< Proxy event. Bit set: 0000 0100 0000 0000 , EventConnect = 2048 /*0x0800*/ //!< Connection event. Bit set: 0000 1000 0000 0000 , EventNotifyClient = 273 /*0x0111*/ //!< Client Notification. Bit set: 0000 0001 0001 0001 , EventLocalServiceRequest = 530 /*0x0212*/ //!< Local Service Request. Bit set: 0000 0010 0001 0010 , EventRemoteServiceRequest = 546 /*0x0222*/ //!< Remote Service Request. Bit set: 0000 0010 0010 0010 , EventLocalNotifyRequest = 786 /*0x0312*/ //!< Local Request notify. Bit set: 0000 0011 0001 0010 , EventRemoteNotifyRequest = 802 /*0x0322*/ //!< Remote Request notify. Bit set: 0000 0011 0010 0010 , EventLocalServiceResponse = 1042 /*0x0412*/ //!< Local Service Response. Bit set: 0000 0100 0001 0010 , EventRemoteServiceResponse= 1058 /*0x0422*/ //!< Remote Service Response. Bit set: 0000 0100 0010 0010 , EventLocalStubConnect = 2834 /*0x0B12*/ //!< Local Stub Connection. Bit set: 0000 1011 0001 0010 , EventRemoteStubConnect = 2850 /*0x0B22*/ //!< Remote Stub Connection. Bit set: 0000 1011 0010 0010 , EventLocalProxyConnect = 3346 /*0x0D12*/ //!< Local Proxy Connection. Bit set: 0000 1101 0001 0010 , EventRemoteProxyConnect = 3362 /*0x0D22*/ //!< Remote Proxy Connection. Bit set: 0000 1101 0010 0010 , EventCustom =32784 /*0x8010*/ //!< Custom event. Bit set: 1000 0000 0001 0000 , EventCustomInternal =32785 /*0x8011*/ //!< Custom Notification. Bit set: 1000 0000 0001 0001 , EventCustomExternal =32786 /*0x8012*/ //!< Custom Thread event. Bit set: 1000 0000 0001 0010 } eEventType; /** * \return Returns string value of Event::eEventType **/ static inline const char* getString( Event::eEventType eventType ); /** * \brief Predefined invalid event object. It has Unknown type with ID 0. **/ static const Event BAD_EVENT; ////////////////////////////////////////////////////////////////////////// // Event class ////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////// // Declare Event runtime information. ////////////////////////////////////////////////////////////////////////// DECLARE_RUNTIME_EVENT(Event) ////////////////////////////////////////////////////////////////////////// // Event class statics ////////////////////////////////////////////////////////////////////////// public: /** * \brief Static method to add the listener to specified thread, * i.e. registers consumer for specified event class. * \param classId Runtime ID of event class. * \param eventConsumer Consumer to register. * \param whichThread The registered thread name to add the * event consumer. If this is nullptr or empty, * it will use current thread for registering consumer. * \return Returns true if successfully registered. * Returns false, if failed or specified thread already had specified * consumer registered for specified event class type. **/ static bool addListener(const RuntimeClassID & classId, IEEventConsumer & eventConsumer, const char* whichThread); /** * \brief Static method to add the listener to specified thread, * i.e. registers consumer for specified event class. * \param classId Runtime ID of event class. * \param eventConsumer Consumer to register. * \param whichThread The valid registered thread ID to add listener. * \return Returns true if successfully registered. * Returns false, if failed or specified thread already had specified * consumer registered for specified event class type. **/ static bool addListener( const RuntimeClassID & classId, IEEventConsumer & eventConsumer, id_type whichThread ); /** * \brief Static method to add the listener to specified thread, * i.e. registers consumer for specified event class. * \param classId Runtime ID of event class. * \param eventConsumer Consumer to register. * \param dispThread The dispatcher thread, which dispatches messages * \return Returns true if successfully registered. * Returns false, if failed or specified thread already had specified * consumer registered for specified event class type. **/ static bool addListener(const RuntimeClassID & classId, IEEventConsumer & eventConsumer, DispatcherThread & dispThread); /** * \brief Static method to remove listener from specified thread, * i.e. unregister consumer in specified thread. * \param classId Runtime ID of event class. * \param eventConsumer Consumer to unregister. * \param whichThread Thread name to unregister. If this is nullptr or empty, * it will use current thread to unregister consumer. * \return Returns true if successfully unregistered. **/ static bool removeListener(const RuntimeClassID & classId, IEEventConsumer & eventConsumer, const char* whichThread); /** * \brief Static method to remove listener from specified thread, * i.e. unregister consumer in specified thread. * \param classId Runtime ID of event class. * \param eventConsumer Consumer to unregister. * \param whichThread The valid registered thread ID to remove listener. * \return Returns true if successfully unregistered. **/ static bool removeListener( const RuntimeClassID & classId, IEEventConsumer & eventConsumer, id_type whichThread ); /** * \brief Static method to remove listener from specified thread, * i.e. unregister consumer in specified thread. * \param classId Runtime ID of event class. * \param eventConsumer Consumer to unregister. * \param dispThread The dispatcher thread, which dispatches messages * \return Returns true if successfully unregistered. **/ static bool removeListener(const RuntimeClassID & classId, IEEventConsumer & eventConsumer, DispatcherThread & dispThread); ////////////////////////////////////////////////////////////////////////// // Constructor / Destructor. Protected ////////////////////////////////////////////////////////////////////////// protected: /** * \brief Default constructor. **/ Event( void ); /** * \brief Initialization constructor. * Creates event object of specified type. * \param eventType The type of Event. **/ Event( Event::eEventType eventType ); /** * \brief Destructor. **/ virtual ~Event( void ); ////////////////////////////////////////////////////////////////////////// // Overrides ////////////////////////////////////////////////////////////////////////// public: /************************************************************************/ // Event class overrides /************************************************************************/ /** * \brief Call to destroy Event object. * Overwrite if there is any special action should be performed * before destroying event object. **/ virtual void destroy( void ) override; /** * \brief Dispatch event itself. Overwrite function if needed. * \param consumer Registered event consumer **/ virtual void dispatchSelf( IEEventConsumer * consumer ); /** * \brief Delivers the event to target thread. If target thread * is nullptr, it deliveres to current thread. **/ virtual void deliverEvent( void ); /** * \brief Adds the listener to target thread, i.e. registers * consumer for the event. It uses runtime information * as an event identifier. If target thread is not * specified, it will register consumer in current thread. * \param eventConsumer Event consumer to add * \return Returns true if could register consumer in target thread. * Returns false if failed or target thread already had consumer * registered for current event class. **/ virtual bool addEventListener( IEEventConsumer & eventConsumer ); /** * \brief Removes listener from target thread, i.e. unregisters consumer * from target thread. If target thread is nullptr it will unregister * consumer in current thread. * \param eventConsumer The consumer object to unregister for current * event class. * \return Returns true if successfully unregistered consumer. **/ virtual bool removeEventListener( IEEventConsumer & eventConsumer ); ////////////////////////////////////////////////////////////////////////// // Operations ////////////////////////////////////////////////////////////////////////// public: /** * \brief Registers event for specified thread, i.e. sets target thread. * By specified thread ID it will search dispatching thread and * will set target thread object. * \param whichThread The ID of target thread. If this parameter is null * it will get current thread as an event target. * \return Returns true if dispatching thread found in system, the thread is * running and ready to dispatch events. **/ bool registerForThread( id_type whichThread = 0); /** * \brief Searches dispatcher thread by given name and sets as an event * target thread. * \param whichThread The unique name of event dispatcher thread. * \return Returns true if dispatching thread found in system, the thread is * running and ready to dispatch events. **/ bool registerForThread( const char* whichThread ); /** * \brief Set or re-set target thread. If target thread is nullptr, * all events will be forwarded to current thread. * \param dispatchThread Target Dispatcher thread * \return Returns true if target thread is not nullptr and ready * to dispatch events. **/ bool registerForThread( DispatcherThread * dispatchThread ); /** * \brief Returns true if the target thread has a consumer * registered for event. It uses runtime information * as an event identifier. **/ bool isEventRegistered( void ) const; ////////////////////////////////////////////////////////////////////////// // Attributes ////////////////////////////////////////////////////////////////////////// /** * \brief Returns the type of Event. * For more information see description of Event::eEventType * \see Event::eEventType **/ inline Event::eEventType getEventType( void ) const; /** * \brief Sets the type of Event. * For more information see description of Event::eEventType * \param eventType The type of Event. * \see Event::eEventType **/ inline void setEventType( Event::eEventType eventType ); /** * \brief Returns pointer of Event Consumer object. * If nullptr, no Event Consumer is set and the Event cannot be processed. **/ inline IEEventConsumer * getEventConsumer( void ); /** * \brief Sets the Event Consumer object. * \param consumer The Event Consumer object, which should process event **/ inline void setEventConsumer( IEEventConsumer * consumer ); /** * \brief Checks whether the given event type is internal or not. * \param eventType The event type to check. **/ inline static bool isInternal( Event::eEventType eventType ); /** * \brief Checks whether the given event type is external or not. * \param eventType The event type to check. **/ inline static bool isExternal( Event::eEventType eventType ); /** * \brief Checks whether the given event type is local or not. * \param eventType The event type to check. **/ inline static bool isLocal( Event::eEventType eventType ); /** * \brief Checks whether the given event type is remote or not. * \param eventType The event type to check. **/ inline static bool isRemote( Event::eEventType eventType ); /** * \brief Checks whether the given event type is developer custom or system predefined. * \param eventType The event type to check. **/ inline static bool isCustom( Event::eEventType eventType ); /** * \brief Returns true, if event is internal, i.e. should be queued in internal event queue **/ inline bool isInternal( void ) const; /** * \brief Returns true, if event is external, i.e. should be queued in external event queue **/ inline bool isExternal( void ) const; /** * \brief Returns true, if event is local, i.e. cannot be processed in other process **/ inline bool isLocal( void ) const; /** * \brief Returns true, if event is remote, i.e. can be processed local and in other process **/ inline bool isRemote( void ) const; /** * \brief Returns true, if event is developer custom to communicate with worker thread or system predefined. **/ inline bool isCustom( void ) const; ////////////////////////////////////////////////////////////////////////// // Protected members ////////////////////////////////////////////////////////////////////////// protected: /** * \brief Returns reference to dispatcher of target or current thread. * If target thread is unknown, this will return dispatcher of * current thread. **/ EventDispatcher & getDispatcher( void ) const; ////////////////////////////////////////////////////////////////////////// // Hidden methods. ////////////////////////////////////////////////////////////////////////// private: /** * \brief Returns reference to event object **/ inline Event & self( void ); ////////////////////////////////////////////////////////////////////////// // Member variables. ////////////////////////////////////////////////////////////////////////// protected: /** * \brief The event type **/ eEventType mEventType; /** * \brief Event consumer. **/ IEEventConsumer* mConsumer; /** * \brief Target thread. **/ DispatcherThread* mTargetThread; ////////////////////////////////////////////////////////////////////////// // Forbidden method calls. ////////////////////////////////////////////////////////////////////////// private: DECLARE_NOCOPY_NOMOVE( Event ); }; IMPLEMENT_STREAMABLE(Event::eEventType) ////////////////////////////////////////////////////////////////////////// // Event class inline function implementation ////////////////////////////////////////////////////////////////////////// inline Event::eEventType Event::getEventType( void ) const { return mEventType; } inline void Event::setEventType( Event::eEventType eventType ) { mEventType = eventType; } inline IEEventConsumer * Event::getEventConsumer( void ) { return mConsumer; } inline void Event::setEventConsumer( IEEventConsumer * consumer ) { mConsumer = consumer; } inline bool Event::isInternal( Event::eEventType eventType ) { return (static_cast<unsigned int>(eventType) & static_cast<unsigned int>(Event::eEventType::EventInternal)) != 0; } inline bool Event::isExternal( Event::eEventType eventType ) { return (static_cast<unsigned int>(eventType) & static_cast<unsigned int>(Event::eEventType::EventExternal)) != 0; } inline bool Event::isLocal( Event::eEventType eventType ) { return (static_cast<unsigned int>(eventType) & static_cast<unsigned int>(Event::eEventType::EventLocal)) != 0; } inline bool Event::isRemote( Event::eEventType eventType ) { return (static_cast<unsigned int>(eventType) & static_cast<unsigned int>(Event::eEventType::EventRemote)) != 0; } inline bool Event::isCustom( Event::eEventType eventType ) { return (static_cast<unsigned int>(eventType) & static_cast<unsigned int>(Event::eEventType::EventCustom)) != 0; } inline bool Event::isInternal(void) const { return Event::isInternal(mEventType); } inline bool Event::isExternal(void) const { return Event::isExternal( mEventType ); } inline bool Event::isLocal(void) const { return Event::isLocal( mEventType ); } inline bool Event::isRemote(void) const { return Event::isRemote( mEventType ); } inline bool Event::isCustom( void ) const { return Event::isCustom( mEventType ); } inline const char* Event::getString(Event::eEventType eventType) { switch ( eventType ) { case Event::eEventType::EventUnknown: return "Event::eEventType::EventUnknown"; case Event::eEventType::EventInternal: return "Event::eEventType::EventInternal"; case Event::eEventType::EventExternal: return "Event::eEventType::EventExternal"; case Event::eEventType::EventLocal: return "Event::eEventType::EventLocal"; case Event::eEventType::EventRemote: return "Event::eEventType::EventRemote"; case Event::eEventType::EventNotify: return "Event::eEventType::EventNotify"; case Event::eEventType::EventToStub: return "Event::eEventType::EventToStub"; case Event::eEventType::EventToProxy: return "Event::eEventType::EventToProxy"; case Event::eEventType::EventConnect: return "Event::eEventType::EventConnect"; case Event::eEventType::EventNotifyClient: return "Event::eEventType::EventNotifyClient"; case Event::eEventType::EventLocalServiceRequest: return "Event::eEventType::EventLocalServiceRequest"; case Event::eEventType::EventRemoteServiceRequest: return "Event::eEventType::EventRemoteServiceRequest"; case Event::eEventType::EventLocalNotifyRequest: return "Event::eEventType::EventLocalNotifyRequest"; case Event::eEventType::EventRemoteNotifyRequest: return "Event::eEventType::EventRemoteNotifyRequest"; case Event::eEventType::EventLocalServiceResponse: return "Event::eEventType::EventLocalServiceResponse"; case Event::eEventType::EventRemoteServiceResponse: return "Event::eEventType::EventRemoteServiceResponse"; case Event::eEventType::EventLocalStubConnect: return "Event::eEventType::EventLocalStubConnect"; case Event::eEventType::EventRemoteStubConnect: return "Event::eEventType::EventRemoteStubConnect"; case Event::eEventType::EventLocalProxyConnect: return "Event::eEventType::EventLocalProxyConnect"; case Event::eEventType::EventRemoteProxyConnect: return "Event::eEventType::EventRemoteProxyConnect"; case Event::eEventType::EventCustom: return "Event::eEventType::EventCustom"; case Event::eEventType::EventCustomInternal: return "Event::eEventType::EventCustomInternal"; case Event::eEventType::EventCustomExternal: return "Event::eEventType::EventCustomExternal"; default: return "ERR: Undefined Event::eEventType value!"; } }
44.937879
127
0.575171
[ "object" ]
86e2df13509d1b58134eba81505176b2d6249370
7,113
cpp
C++
OpenXLSX/sources/XLDateTime.cpp
martbelko/OpenXLSX
87d1c8c4c5f71aeb4ce0214b20babffaf3f17f4c
[ "BSD-3-Clause" ]
3
2021-06-29T08:27:17.000Z
2021-09-09T16:01:35.000Z
OpenXLSX/sources/XLDateTime.cpp
martbelko/OpenXLSX
87d1c8c4c5f71aeb4ce0214b20babffaf3f17f4c
[ "BSD-3-Clause" ]
null
null
null
OpenXLSX/sources/XLDateTime.cpp
martbelko/OpenXLSX
87d1c8c4c5f71aeb4ce0214b20babffaf3f17f4c
[ "BSD-3-Clause" ]
1
2022-02-24T14:35:00.000Z
2022-02-24T14:35:00.000Z
// // Created by Kenneth Balslev on 28/08/2021. // #include "XLDateTime.hpp" #include "XLException.hpp" #include <string> #include <cmath> namespace { /** * @brief * @param year * @return */ bool isLeapYear(int year) { if (year == 1900) return true; if (year % 400 == 0 || (year % 4 == 0 && year % 100 != 0)) return true; return false; } /** * @brief * @param month * @param year * @return */ int daysInMonth(int month, int year) { switch (month) { case 1: return 31; case 2: return (isLeapYear(year) ? 29 : 28); case 3: return 31; case 4: return 30; case 5: return 31; case 6: return 30; case 7: return 31; case 8: return 31; case 9: return 30; case 10: return 31; case 11: return 30; case 12: return 31; default: return 0; } } /** * @brief * @param serial * @return */ int dayOfWeek(double serial) { auto day = static_cast<int32_t>(serial) % 7; return (day == 0 ? 6 : day - 1); } } // namespace namespace OpenXLSX { /** * @details Conctructor. Default implementation. */ XLDateTime::XLDateTime() = default; /** * @details Constructor taking an Excel date/time serial number as an argument. */ XLDateTime::XLDateTime(double serial) : m_serial(serial) { if (serial < 1.0) throw XLDateTimeError("Excel date/time serial number is invalid (must be >= 1.0.)"); } /** * @details Constructor taking a std::tm object as an argument. */ XLDateTime::XLDateTime(const std::tm& timepoint) { // ===== Check validity of tm struct. // ===== Only year, month and day of the month are checked. Other variables are ignored. if (timepoint.tm_year < 0) throw XLDateTimeError("Invalid year. Must be >= 0."); if (timepoint.tm_mon < 0 || timepoint.tm_mon > 11) throw XLDateTimeError("Invalid month. Must be >= 0 or <= 11."); if (timepoint.tm_mday <= 0 || timepoint.tm_mday > daysInMonth(timepoint.tm_mon + 1, timepoint.tm_year + 1900)) throw XLDateTimeError("Invalid day. Must be >= 1 or <= total days in the month."); // ===== Count the number of days for full years past 1900 for (int i = 0; i < timepoint.tm_year; ++i) { m_serial += (isLeapYear(1900 + i) ? 366 : 365); } // ===== Count the number of days for full months of the last year for (int i = 0; i < timepoint.tm_mon; ++i) { m_serial += daysInMonth(i + 1, timepoint.tm_year + 1900); } // ===== Add the number of days of the month, minus one. // ===== (The reason for the 'minus one' is that unlike the other fields in the struct, // ===== tm_day represents the date of a month, whereas the other fields typically // ===== represents the number of whole units since the start). m_serial += timepoint.tm_mday - 1; // ===== Convert hour, minute and second to fraction of a full day. int32_t seconds = timepoint.tm_hour * 3600 + timepoint.tm_min * 60 + timepoint.tm_sec; m_serial += seconds / 86400.0; } /** * @details Constructor taking a unixtime format (seconds since 1/1/1970) as an argument. */ XLDateTime::XLDateTime(time_t unixtime) { // There are 86400 seconds in a day // There are 25569 days between 1/1/1970 and 30/12/1899 (the epoch used by Excel) m_serial = static_cast<double>(unixtime) / 86400 + 25569; } /** * @details Copy constructor. Default implementation. */ XLDateTime::XLDateTime(const XLDateTime& other) = default; /** * @details Move constructor. Default implementation. */ XLDateTime::XLDateTime(XLDateTime&& other) noexcept = default; /** * @details Destructor. Default implementation. */ XLDateTime::~XLDateTime() = default; /** * @details Copy assignment operator. Default implementation. */ XLDateTime& XLDateTime::operator=(const XLDateTime& other) = default; /** * @details Move assignment operator. Default implementation. */ XLDateTime& XLDateTime::operator=(XLDateTime&& other) noexcept = default; /** * @details */ XLDateTime& XLDateTime::operator=(double serial) { XLDateTime temp(serial); std::swap(*this, temp); return *this; } /** * @details */ XLDateTime& XLDateTime::operator=(const std::tm& timepoint) { XLDateTime temp(timepoint); std::swap(*this, temp); return *this; } /** * @details */ XLDateTime::operator std::tm() const { return tm(); } /** * @details Get the time point as an Excel date/time serial number. */ double XLDateTime::serial() const { return m_serial; } /** * @details Get the time point as a std::tm object. */ std::tm XLDateTime::tm() const { // ===== Create and initialize the resulting object. std::tm result {}; result.tm_year = 0; result.tm_mon = 0; result.tm_mday = 0; result.tm_wday = 0; result.tm_yday = 0; result.tm_hour = 0; result.tm_min = 0; result.tm_sec = 0; result.tm_isdst = -1; double serial = m_serial; // ===== Count the number of whole years since 1900. while (true) { auto days = (isLeapYear(result.tm_year + 1900) ? 366 : 365); if (days > serial) break; serial -= days; ++result.tm_year; } // ===== Calculate the day of the year, and the day of the week result.tm_yday = static_cast<int>(serial) - 1; result.tm_wday = dayOfWeek(m_serial); // ===== Count the number of whole months in the year. while (true) { auto days = daysInMonth(result.tm_mon + 1, 1900 + result.tm_year); if (days > serial) break; serial -= days; ++result.tm_mon; } // ===== Calculate the number of days. result.tm_mday = static_cast<int>(serial); serial -= result.tm_mday; // ===== Calculate the number of hours. result.tm_hour = static_cast<int>(serial * 24); serial -= (result.tm_hour / 24.0); // ===== Calculate the number of minutes. result.tm_min = static_cast<int>(serial * 24 * 60); serial -= (result.tm_min / (24.0 * 60.0)); // ===== Calculate the number of seconds. result.tm_sec = static_cast<int>(lround(serial * 24 * 60 * 60)); return result; } } // namespace OpenXLSX
28.452
118
0.539013
[ "object" ]
86e87788b487060c1c6469dd167fa1676b34cfe0
890
cpp
C++
chap09/Exer09_26.cpp
sjbarigye/CPP_Primer
d9d31a73a45ca46909bae104804fc9503ab242f2
[ "Apache-2.0" ]
50
2016-01-08T14:28:53.000Z
2022-01-21T12:55:00.000Z
chap09/Exer09_26.cpp
sjbarigye/CPP_Primer
d9d31a73a45ca46909bae104804fc9503ab242f2
[ "Apache-2.0" ]
2
2017-06-05T16:45:20.000Z
2021-04-17T13:39:24.000Z
chap09/Exer09_26.cpp
sjbarigye/CPP_Primer
d9d31a73a45ca46909bae104804fc9503ab242f2
[ "Apache-2.0" ]
18
2016-08-17T15:23:51.000Z
2022-03-26T18:08:43.000Z
#include <iostream> #include <list> #include <vector> #include <iterator> using std::cout; using std::endl; using std::list; using std::vector; using std::begin; using std::end; int main() { int ia[] = { 0, 1, 1, 2, 3, 5, 8, 13, 21, 55, 89 }; list<int> il(begin(ia), end(ia)); vector<int> iv(begin(ia), end(ia)); auto iter = il.begin(); while(iter != il.end()) { if(*iter % 2) iter = il.erase(iter); else ++iter; } for(auto it = iv.begin(); it != iv.end();) { if(!(*it % 2)) it = iv.erase(it); else ++it; } cout << "list after removing odd elements: "; for(const auto &i : il) cout << i << " "; cout << endl; cout << "vector after removing even elements: "; for(const auto &i : iv) cout << i << " "; cout << endl; return 0; }
21.707317
55
0.486517
[ "vector" ]
86ea33a0897f5e9d8b383c72d98b77ed94b4bcde
5,405
cpp
C++
frameworks/lua/libs/kernel/dmzLuaKernelSphere.cpp
tongli/dmz
f2242027a17ea804259f9412b07d69f719a527c5
[ "MIT" ]
1
2016-05-08T22:02:35.000Z
2016-05-08T22:02:35.000Z
frameworks/lua/libs/kernel/dmzLuaKernelSphere.cpp
tongli/dmz
f2242027a17ea804259f9412b07d69f719a527c5
[ "MIT" ]
null
null
null
frameworks/lua/libs/kernel/dmzLuaKernelSphere.cpp
tongli/dmz
f2242027a17ea804259f9412b07d69f719a527c5
[ "MIT" ]
null
null
null
#include <dmzLuaKernel.h> #include "dmzLuaKernelPrivate.h" #include <dmzLuaKernelValidate.h> #include <dmzSystem.h> #include <dmzTypesString.h> #include <dmzTypesSphere.h> #include <dmzTypesVector.h> #include <luacpp.h> using namespace dmz; namespace { static const char SphereName[] = "dmz.types.sphere"; inline Sphere* sphere_check (lua_State *L, int index) { if (index < 0) { index = lua_gettop (L) + index + 1; } Sphere *result (0); Sphere **ptr = (Sphere **)luaL_checkudata (L, index, SphereName); if (ptr) { result = *ptr; } return result; } static int sphere_new (lua_State *L) { int result (0); const int StackCount (lua_gettop (L)); if (!StackCount) { Sphere value; result = (lua_create_sphere (L, value) ? 1 : 0); } else if (StackCount == 2) { Vector *origin = lua_check_vector (L, 1); lua_Number radius = luaL_checknumber (L, 2); if (origin) { Sphere value (*origin, radius); result = (lua_create_sphere (L, value) ? 1 : 0); } } else { luaL_error (L, "Unsupported parameters."); } return result; } static int sphere_is_a (lua_State *L) { if (lua_to_sphere (L, 1)) { lua_pushvalue (L, 1); } else { lua_pushnil (L); } return 1; } static const luaL_Reg arrayFunc [] = { {"new", sphere_new}, {"is_a", sphere_is_a}, {NULL, NULL}, }; static int sphere_to_string (lua_State *L) { int result (0); Sphere *sphere = sphere_check (L, 1); if (sphere) { String str; str << "origin: " << sphere->get_origin () << " radius: " << sphere->get_radius (); lua_pushstring (L, str.get_buffer ()); result = 1; } return result; } static int sphere_equal (lua_State *L) { int result (0); Sphere *sphere1 = sphere_check (L, 1); Sphere *sphere2 = sphere_check (L, 2); if (sphere1 && sphere2) { lua_pushboolean (L, (sphere1 == sphere2 ? 1 : 0)); result = 1; } return result; } static int sphere_set_origin (lua_State *L) { Sphere *sphere = sphere_check (L, 1); Vector *origin = lua_check_vector (L, 2); if (sphere && origin) { sphere->set_origin (*origin); } return 0; } static int sphere_get_origin (lua_State *L) { int result = 0; Sphere *sphere = sphere_check (L, 1); if (sphere) { const Vector Origin = sphere->get_origin (); lua_create_vector (L, &Origin); result = 1; } return result; } static int sphere_contains_point (lua_State *L) { int result = 0; Sphere *sphere = sphere_check (L, 1); Vector *point = lua_check_vector (L, 2); if (sphere && point) { lua_pushboolean (L, sphere->contains_point (*point) ? 1 : 0); result = 1; } return result; } static int sphere_get_extents (lua_State *L) { int result = 0; Sphere *sphere = sphere_check (L, 1); if (sphere) { Vector origin, min, max; sphere->get_extents (origin, min, max); lua_create_vector (L, &origin); lua_create_vector (L, &min); lua_create_vector (L, &max); result = 3; } return result; } static int sphere_set_radius (lua_State *L) { Sphere *sphere = sphere_check (L, 1); lua_Number radius = luaL_checknumber (L, 2); if (sphere) { sphere->set_radius (radius); } return 0; } static int sphere_get_radius (lua_State *L) { int result = 0; Sphere *sphere = sphere_check (L, 1); if (sphere) { lua_pushnumber (L, sphere->get_radius ()); result = 1; } return result; } static int sphere_delete (lua_State *L) { LUA_START_VALIDATE (L); Sphere **sphere = (Sphere **)luaL_checkudata (L, 1, SphereName); if (sphere) { delete (*sphere); *sphere = 0; } LUA_END_VALIDATE (L, 0); return 0; } static const luaL_Reg arrayMembers[] = { {"__tostring", sphere_to_string}, {"__eq", sphere_equal}, {"set_origin", sphere_set_origin}, {"get_origin", sphere_get_origin}, {"contains_point", sphere_contains_point}, {"get_extents", sphere_get_extents}, {"set_radius", sphere_set_radius}, {"get_radius", sphere_get_radius}, {"__gc", sphere_delete}, {NULL, NULL}, }; }; void dmz::open_lua_kernel_sphere_lib (lua_State *L) { LUA_START_VALIDATE (L); luaL_newmetatable (L, SphereName); luaL_register (L, NULL, arrayMembers); lua_pushvalue (L, -1); lua_setfield (L, -2, "__index"); lua_create_dmz_namespace (L, "sphere"); luaL_register (L, NULL, arrayFunc); lua_make_readonly (L, -1); // make sphere read only. lua_pop (L, 2); // pops meta table and dmz.sphere table. LUA_END_VALIDATE (L, 0); } dmz::Sphere * dmz::lua_create_sphere (lua_State *L, const Sphere &Value) { LUA_START_VALIDATE (L); Sphere *result (0); Sphere **ptr = (Sphere **)lua_newuserdata (L, sizeof (Sphere *)); if (ptr) { *ptr = new Sphere; if (*ptr) { (**ptr) = Value; } luaL_getmetatable (L, SphereName); lua_setmetatable (L, -2); result = *ptr; } LUA_END_VALIDATE (L, (result ? 1 : 0)); return result; } dmz::Sphere * dmz::lua_to_sphere (lua_State *L, int narg) { Sphere *result (0); Sphere **value = (Sphere **) lua_is_object (L, narg, SphereName); if (value) { result = *value; } return result; } dmz::Sphere * dmz::lua_check_sphere (lua_State *L, int narg) { return sphere_check (L, narg); }
17.268371
89
0.614801
[ "vector" ]
86f4c66b170a91d12afaed70c4a5b378ddc4baf8
16,312
cc
C++
Dragon/src/operators/vision/conv_op_base.cc
neopenx/Dragon
0e639a7319035ddc81918bd3df059230436ee0a1
[ "BSD-2-Clause" ]
212
2015-07-05T07:57:17.000Z
2022-02-27T01:55:35.000Z
Dragon/src/operators/vision/conv_op_base.cc
neopenx/Dragon
0e639a7319035ddc81918bd3df059230436ee0a1
[ "BSD-2-Clause" ]
6
2016-07-07T14:31:56.000Z
2017-12-12T02:21:15.000Z
Dragon/src/operators/vision/conv_op_base.cc
neopenx/Dragon
0e639a7319035ddc81918bd3df059230436ee0a1
[ "BSD-2-Clause" ]
71
2016-03-24T09:02:41.000Z
2021-06-03T01:52:41.000Z
#include "operators/vision/conv_op_base.h" #include "core/workspace.h" #include "utils/filler.h" namespace dragon { template <class Context> void ConvOpBase<Context>::ComputeOutputShape() { output_shape.clear(); for (int i = 0; i < num_spatial_axes; i++) { if (!ReverseDimensions()) { const TIndex input_dim = bottom_shape[spatial_axis + i]; const TIndex dilated_kernel = dilation[i] * (kernel_size[i] - 1) + 1; if (padding != "SAME") { const TIndex output_dim = (input_dim + 2 * pad[i] - dilated_kernel) / stride[i] + 1; output_shape.push_back(output_dim); } else { TIndex output_dim = (input_dim + stride[i] - 1) / (float)stride[i]; TIndex padding_needed = std::max(TIndex(0), (output_dim - 1) * stride[i] + dilated_kernel - input_dim); TIndex pad_l = padding_needed / 2; TIndex pad_r = padding_needed - pad_l; pad[i] = pad_l; output_shape.push_back(output_dim); } } else { const TIndex input_dim = bottom_shape[spatial_axis + i]; const TIndex dilated_kernel = dilation[i] * (kernel_size[i] - 1) + 1; if (padding != "SAME") { const TIndex output_dim = stride[i] * (input_dim - 1) + dilated_kernel - 2 * pad[i]; output_shape.push_back(output_dim); } else { CHECK(output_dims_desc.size() > 0 || output_dims_value.size() > 0) << "\nThe output shape must be specified if using SAME padding algorithm."; int given_ndim = (int)std::max(output_dims_desc.size(), output_dims_value.size()); CHECK_EQ(given_ndim, num_spatial_axes + 2) << "\nThe len of output shape should be " << num_spatial_axes + 2 << ", but got " << output_dims_desc.size() << "."; TIndex output_dim = output_dims(spatial_axis + i); TIndex padding_needed = stride[i] * (input_dim - 1) + dilated_kernel - output_dim; CHECK_GE(padding_needed, 0) << "\nThe output shape is incorrect." << "\nWith the given stride and kernel, dimension of axis " << spatial_axis + i << " can be at most " << stride[i] * (input_dim - 1) + dilated_kernel << "."; TIndex pad_l = padding_needed / 2; TIndex pad_r = padding_needed - pad_l; pad[i] = pad_l; output_shape.push_back(output_dim); } } } } template <class Context> template <typename T> void ConvOpBase<Context>::Wx(const T* x, const T* weights, T* y, bool skip_im2col) { const T* col_buff_ = x; if (!is_1x1) { if (!skip_im2col) Im2Col(x, col_buffer->template mutable_data<T, Context>()); col_buff_ = col_buffer->data<T, Context>(); } for (int g = 0; g < group; g++) { if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, conv_out_channels / group, conv_out_spatial_dim, kernel_dim, 1.0, weights + weight_offset * g, col_buff_ + col_offset * g, 0.0, y + output_offset * g); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, conv_out_spatial_dim, conv_out_channels / group, kernel_dim, 1.0, col_buff_ + col_offset * g, weights + weight_offset * g, 0.0, y + output_offset * g); } } } template <class Context> template <typename T> void ConvOpBase<Context>::Pb(const T* bias, T* y) { if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, num_output, out_spatial_dim, 1, 1.0, bias, bias_multiplier->template data<T, Context>(), 1.0, y); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasNoTrans, out_spatial_dim, num_output, 1, 1.0, bias_multiplier->template data<T, Context>(), bias, 1.0, y); } } template <class Context> template <typename T> void ConvOpBase<Context>::Dx(const T* dy, const T* weights, T* dx) { T* col_buff_ = col_buffer->template mutable_data<T, Context>(); if (is_1x1) col_buff_ = dx; for (int g = 0; g < group; g++) { if (data_format == "NCHW") { math::Gemm<T, Context>(CblasTrans, CblasNoTrans, kernel_dim, conv_out_spatial_dim, conv_out_channels / group, 1.0, weights + weight_offset * g, dy + output_offset * g, 0.0, col_buff_ + col_offset * g); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasNoTrans, CblasTrans, conv_out_spatial_dim, kernel_dim, conv_out_channels / group, 1.0, dy + output_offset * g, weights + weight_offset * g, 0.0, col_buff_ + col_offset * g); } } if (!is_1x1) Col2Im(col_buff_, dx); } template <class Context> template <typename T> void ConvOpBase<Context>::Dw(const T* dy, const T* x, T *dw) { const T *col_buff_ = x; if (!is_1x1) { Im2Col(x, col_buffer->template mutable_data<T, Context>()); col_buff_ = col_buffer->template data<T, Context>(); } for (int g = 0; g < group; g++) { if (data_format == "NCHW") { math::Gemm<T, Context>(CblasNoTrans, CblasTrans, conv_out_channels / group, kernel_dim, conv_out_spatial_dim, 1.0, dy + output_offset * g, col_buff_ + col_offset * g, 1.0, dw + weight_offset * g); } else if (data_format == "NHWC") { math::Gemm<T, Context>(CblasTrans, CblasNoTrans, kernel_dim, conv_out_channels / group, conv_out_spatial_dim, 1.0, col_buff_ + col_offset * g, dy + output_offset * g, 1.0, dw + weight_offset * g); } } } template <class Context> template <typename T> void ConvOpBase<Context>::Db(const T* dy, T* db) { if (data_format == "NCHW") { math::Gemv<T, Context>(CblasNoTrans, num_output, out_spatial_dim, 1.0, dy, bias_multiplier->template data<T, Context>(), 1.0, db); } else if (data_format == "NHWC") { math::Gemv<T, Context>(CblasTrans, out_spatial_dim, num_output, 1.0, dy, bias_multiplier->template data<T, Context>(), 1.0, db); } } template <class Context> void ConvOpBase<Context>::Setup() { vector<int> ks = OperatorBase::GetRepeatedArg<int>("kernel_size"); for (int i = 0; i < num_spatial_axes; i++) kernel_size.push_back(i < ks.size() ? ks[i] : ks[0]); vector<int> s = OperatorBase::GetRepeatedArg<int>("stride"); for (int i = 0; i < num_spatial_axes; i++) stride.push_back(i < s.size() ? s[i] : s[0]); vector<int> p = OperatorBase::GetRepeatedArg<int>("pad"); for (int i = 0; i < num_spatial_axes; i++) pad.push_back(i < p.size() ? p[i] : p[0]); vector<int> d = OperatorBase::GetRepeatedArg<int>("dilation"); for (int i = 0; i < num_spatial_axes; i++) dilation.push_back(i < d.size() ? d[i] : d[0]); is_1x1 = true; for (int i = 0; i < num_spatial_axes; i++) { is_1x1 &= (kernel_size[i] == 1 && stride[i] == 1 && pad[i] == 0); if (!is_1x1) break; } } template <class Context> void ConvOpBase<Context>::Reshape() { channels = data_format == "NCHW" ? input(0).dim(1) : input(0).dim(-1); if (ReverseDimensions()) { conv_out_channels = channels; conv_in_channels = num_output; } else { conv_out_channels = num_output; conv_in_channels = channels; } // determine the weight and bias shape if (data_format == "NCHW") { weight_shape.assign({ conv_out_channels, conv_in_channels / group }); for (int i = 0; i < num_spatial_axes; i++) weight_shape.push_back(kernel_size[i]); } else if (data_format == "NHWC") { weight_shape.clear(); for (int i = 0; i < num_spatial_axes; i++) weight_shape.push_back(kernel_size[i]); weight_shape.push_back(conv_in_channels / group); weight_shape.push_back(conv_out_channels); } bias_shape.assign(1, num_output); // determine the bottom and top shape bottom_shape = input(0).dims(); ComputeOutputShape(); if (data_format == "NCHW") { top_shape.assign({ input(0).dim(0), num_output }); for (int i = 0; i < num_spatial_axes; i++) top_shape.push_back(output_shape[i]); } else if (data_format == "NHWC") { top_shape.assign({ input(0).dim(0) }); for (int i = 0; i < num_spatial_axes; i++) top_shape.push_back(output_shape[i]); top_shape.push_back(num_output); } output(0)->Reshape(top_shape); // determine the input shape for im2col/col2im input_shape.clear(); for (int i = 0; i < num_spatial_axes; i++) { if (ReverseDimensions()) { input_shape.push_back(output(0)->dim(spatial_axis + i)); } else { input_shape.push_back(input(0).dim(spatial_axis + i)); } } // determine the out spatial dim if (data_format == "NCHW") { if (ReverseDimensions()) { conv_out_spatial_dim = input(0).count(spatial_axis); } else { conv_out_spatial_dim = output(0)->count(spatial_axis); } out_spatial_dim = output(0)->count(spatial_axis); } else if (data_format == "NHWC") { if (ReverseDimensions()) { conv_out_spatial_dim = input(0).count(spatial_axis, (int)input(0).ndim() - 1); } else { conv_out_spatial_dim = output(0)->count(spatial_axis, (int)output(0)->ndim() - 1); } out_spatial_dim = output(0)->count(spatial_axis, (int)output(0)->ndim() - 1); } // determine the misc x_offset = input(0).count(1); y_offset = output(0)->count(1); kernel_dim = conv_in_channels / group * kernel_size[0] * kernel_size[1]; weight_offset = conv_out_channels * kernel_dim / group; col_offset = kernel_dim * conv_out_spatial_dim; output_offset = conv_out_channels * conv_out_spatial_dim / group; // determine the col shape col_shape.clear(); if (data_format == "NCHW") { col_shape.push_back(kernel_dim * group); for (int i = 0; i < num_spatial_axes; i++) { if (ReverseDimensions()) col_shape.push_back(bottom_shape[spatial_axis + i]); else col_shape.push_back(output_shape[i]); } } else if (data_format == "NHWC") { for (int i = 0; i < num_spatial_axes; i++) { if (ReverseDimensions()) col_shape.push_back(bottom_shape[spatial_axis + i]); else col_shape.push_back(output_shape[i]); } col_shape.push_back(kernel_dim * group); } } template <class Context> void ConvOpBase<Context>::GradientReshape() { channels = data_format == "NCHW" ? input(0).dim(1) : input(0).dim(-1); if (ReverseDimensions()) { conv_out_channels = channels; conv_in_channels = num_output; } else{ conv_out_channels = num_output; conv_in_channels = channels; } // determine the bottom and top shape bottom_shape = input(0).dims(); ComputeOutputShape(); output(0)->Reshape(bottom_shape); output(1)->ReshapeLike(input(1)); output(2)->Reshape(vector<TIndex>(1, num_output)); // determine the input shape for im2col/col2im input_shape.clear(); for (int i = 0; i < num_spatial_axes; i++) { if (ReverseDimensions()) { input_shape.push_back(input(-1).dim(spatial_axis + i)); } else { input_shape.push_back(input(0).dim(spatial_axis + i)); } } // determine the out spatial dim if (data_format == "NCHW") { if (ReverseDimensions()) { conv_out_spatial_dim = input(0).count(spatial_axis); } else { conv_out_spatial_dim = input(-1).count(spatial_axis); } out_spatial_dim = input(-1).count(spatial_axis); } else if (data_format == "NHWC") { if (ReverseDimensions()) { conv_out_spatial_dim = input(0).count(spatial_axis, (int)input(0).ndim() - 1); } else { conv_out_spatial_dim = input(-1).count(spatial_axis, (int)input(-1).ndim() - 1); } out_spatial_dim = input(-1).count(spatial_axis, (int)input(-1).ndim() - 1); } // determine the misc x_offset = input(0).count(1); y_offset = input(-1).count(1); kernel_dim = conv_in_channels / group * kernel_size[0] * kernel_size[1]; weight_offset = conv_out_channels * kernel_dim / group; col_offset = kernel_dim * conv_out_spatial_dim; output_offset = conv_out_channels * conv_out_spatial_dim / group; // determine the col shape col_shape.clear(); if (data_format == "NCHW") { col_shape.push_back(kernel_dim * group); for (int i = 0; i < num_spatial_axes; i++) { if (ReverseDimensions()) col_shape.push_back(bottom_shape[spatial_axis + i]); else col_shape.push_back(output_shape[i]); } } else if (data_format == "NHWC") { for (int i = 0; i < num_spatial_axes; i++) { if (ReverseDimensions()) col_shape.push_back(bottom_shape[spatial_axis + i]); else col_shape.push_back(output_shape[i]); } col_shape.push_back(kernel_dim * group); } } template class ConvOpBase<CPUContext>;; template void ConvOpBase<CPUContext>::Wx(const float*, const float*, float*, bool); template void ConvOpBase<CPUContext>::Pb(const float*, float*); template void ConvOpBase<CPUContext>::Dx(const float*, const float*, float*); template void ConvOpBase<CPUContext>::Dw(const float*, const float*, float*); template void ConvOpBase<CPUContext>::Db(const float*, float*); #ifdef WITH_CUDA template class ConvOpBase<CUDAContext>; template void ConvOpBase<CUDAContext>::Wx(const float*, const float*, float*, bool); template void ConvOpBase<CUDAContext>::Pb(const float*, float*); template void ConvOpBase<CUDAContext>::Dx(const float*, const float*, float*); template void ConvOpBase<CUDAContext>::Dw(const float*, const float*, float*); template void ConvOpBase<CUDAContext>::Db(const float*, float*); #endif } // namespace dragon
43.498667
119
0.52912
[ "shape", "vector" ]
86f50a73511b65b417c3840648c6a0ccb0779dd6
3,203
hpp
C++
Utils/Hash/Hash.hpp
lydiaxing/fprime
f6b3e03f89e9aca1614243c9896d4a72aa0cc726
[ "Apache-2.0" ]
2
2020-09-08T05:39:05.000Z
2021-05-04T14:58:51.000Z
Utils/Hash/Hash.hpp
JPLOpenSource/fprime-sw-Rel1.0
18364596c24fa369c938ef8758e5aa945ecc6a9b
[ "Apache-2.0" ]
2
2019-02-27T03:17:15.000Z
2019-03-01T22:34:30.000Z
Utils/Hash/Hash.hpp
JPLOpenSource/fprime-sw-Rel1.0
18364596c24fa369c938ef8758e5aa945ecc6a9b
[ "Apache-2.0" ]
3
2019-02-17T20:41:15.000Z
2019-02-26T21:06:50.000Z
// ====================================================================== // \title Hash.hpp // \author dinkel // \brief hpp file for Hash class // // \copyright // Copyright 2009-2015, by the California Institute of Technology. // ALL RIGHTS RESERVED. United States Government Sponsorship // acknowledged. Any commercial use must be negotiated with the Office // of Technology Transfer at the California Institute of Technology. // // This software may be subject to U.S. export control laws and // regulations. By accepting this document, the user agrees to comply // with all U.S. export laws and regulations. User has the // responsibility to obtain export licenses, or other export authority // as may be required before exporting such information to foreign // countries or providing access to foreign persons. // ====================================================================== #ifndef UTILS_HASH_HPP #define UTILS_HASH_HPP #include <Utils/Hash/HashBuffer.hpp> namespace Utils { //! \class Hash //! \brief A generic interface for creating and comparing hash values //! class Hash { public: // ---------------------------------------------------------------------- // Construction and destruction // ---------------------------------------------------------------------- //! Construct a Hash object //! Hash(); //! Destroy a Hash object //! ~Hash(); public: // ---------------------------------------------------------------------- // Public static methods // ---------------------------------------------------------------------- //! Create a hash value all at once from raw data //! static void hash( const void *data, //! Pointer to start of data const NATIVE_INT_TYPE len, //! Length of the data HashBuffer& buffer //! Resulting hash value ); public: // ---------------------------------------------------------------------- // Public instance methods // ---------------------------------------------------------------------- //! Initialize a Hash object for incremental hash computation //! void init(void); //! Update an incremental computation with new data //! void update( const void *const data, //! Pointer to start of data const NATIVE_INT_TYPE len //! Length of the data ); //! Finalize an incremental computation and return the result //! void final( HashBuffer& buffer //! The result ); //! Get the file extension for the supported hash type //! E.g., could return "SHA256" //! static const char* getFileExtensionString(void); //! Get the length of the file extension string //! static NATIVE_UINT_TYPE getFileExtensionLength(void); private: // ---------------------------------------------------------------------- // Private member variables // ---------------------------------------------------------------------- //! The hash handle //! HASH_HANDLE_TYPE hash_handle; }; } #endif
29.934579
80
0.482985
[ "object" ]
86fb21edd43bb2e49657ce7b44a7134e594bfaa4
7,104
cpp
C++
modules/Alexa/APLClientLibrary/APLClient/src/Extensions/Backstack/AplBackstackExtension.cpp
germanviscuso/alexa-smart-screen-sdk
9878cafa35df05d862b3bfc027aa6b1b463ef9c1
[ "Apache-2.0" ]
null
null
null
modules/Alexa/APLClientLibrary/APLClient/src/Extensions/Backstack/AplBackstackExtension.cpp
germanviscuso/alexa-smart-screen-sdk
9878cafa35df05d862b3bfc027aa6b1b463ef9c1
[ "Apache-2.0" ]
null
null
null
modules/Alexa/APLClientLibrary/APLClient/src/Extensions/Backstack/AplBackstackExtension.cpp
germanviscuso/alexa-smart-screen-sdk
9878cafa35df05d862b3bfc027aa6b1b463ef9c1
[ "Apache-2.0" ]
null
null
null
/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0/ * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <string> #include "APLClient/Extensions/Backstack/AplBackstackExtension.h" namespace APLClient { namespace Extensions { namespace Backstack { /// String to identify log entries originating from this file. static const std::string TAG("AplBackstackExtension"); static const std::string URI = "aplext:backstack:10"; static const std::string ENVIRONMENT_RESPONSIBLE_FOR_BACK_BUTTON = "responsibleForBackButton"; static const std::string ENVIRONMENT_BACKSTACK = "backstack"; static const std::string SETTING_PROPERTY_BACKSTACK_ID = "backstackId"; static const std::string SETTING_PROPERTY_BACKSTACK_ARRAY_NAME = "backstackArrayName"; static const std::string COMMAND_GO_BACK_NAME = "GoBack"; static const std::string COMMAND_CLEAR_NAME = "Clear"; static const std::string PROPERTY_BACK_TYPE = "backType"; static const std::string PROPERTY_BACK_VALUE = "backValue"; AplBackstackExtension::AplBackstackExtension(std::shared_ptr<AplBackstackExtensionObserverInterface> observer) : m_observer{observer} { m_backstackArrayName = ""; } void AplBackstackExtension::setResponsibleForBackButton(bool isResponsibleForBackButton) { m_responsibleForBackButton = isResponsibleForBackButton; } bool AplBackstackExtension::shouldCacheActiveDocument() { return !m_activeDocumentId.empty(); } void AplBackstackExtension::addDocumentStateToBackstack(const AplDocumentStatePtr& documentState) { documentState->id = m_activeDocumentId; m_backstack.addDocumentState(documentState); clearActiveDocumentId(); } void AplBackstackExtension::clearActiveDocumentId() { m_activeDocumentId = ""; } void AplBackstackExtension::reset() { clearActiveDocumentId(); m_backstack.clear(); } bool AplBackstackExtension::handleBack() { if (!m_responsibleForBackButton) { return goBackCount(1); } return false; } std::string AplBackstackExtension::getUri() { return URI; } apl::Object AplBackstackExtension::getEnvironment() { auto env = std::make_shared<apl::ObjectMap>(); env->emplace(ENVIRONMENT_RESPONSIBLE_FOR_BACK_BUTTON, m_responsibleForBackButton); env->emplace(ENVIRONMENT_BACKSTACK, m_backstack.getBackstackIdsArray()); return apl::Object(env); } std::list<apl::ExtensionCommandDefinition> AplBackstackExtension::getCommandDefinitions() { std::list<apl::ExtensionCommandDefinition> extCmdDefs( {apl::ExtensionCommandDefinition(URI, COMMAND_GO_BACK_NAME) .allowFastMode(true) .property(PROPERTY_BACK_TYPE, PROPERTY_BACK_TYPE_COUNT, false) .property(PROPERTY_BACK_VALUE, 1, false), apl::ExtensionCommandDefinition(URI, COMMAND_CLEAR_NAME).allowFastMode(true)}); return extCmdDefs; } std::list<apl::ExtensionEventHandler> AplBackstackExtension::getEventHandlers() { std::list<apl::ExtensionEventHandler> extensionEventHandlers({}); return extensionEventHandlers; } std::unordered_map<std::string, apl::LiveObjectPtr> AplBackstackExtension::getLiveDataObjects() { auto liveObjects = std::unordered_map<std::string, apl::LiveObjectPtr>(); if (!m_backstackArrayName.empty()) { liveObjects.emplace(m_backstackArrayName, m_backstack.getBackstackIds()); } return liveObjects; } void AplBackstackExtension::applySettings(const apl::Object& settings) { // Reset to defaults clearActiveDocumentId(); m_backstackArrayName = ""; logMessage(LOGLEVEL_DEBUG, TAG, "backstack_settings", settings.toDebugString()); /// Apply @c apl::Content defined settings if (settings.isMap()) { if (settings.has(SETTING_PROPERTY_BACKSTACK_ID)) { m_activeDocumentId = settings.get(SETTING_PROPERTY_BACKSTACK_ID).getString(); } if (settings.has(SETTING_PROPERTY_BACKSTACK_ARRAY_NAME)) { m_backstackArrayName = settings.get(SETTING_PROPERTY_BACKSTACK_ARRAY_NAME).getString(); } } } bool AplBackstackExtension::restoreDocumentState(const AplDocumentStatePtr& documentState) { if (documentState) { clearActiveDocumentId(); m_observer->onRestoreDocumentState(documentState); return true; } return false; } bool AplBackstackExtension::goBackId(const std::string& id) { return restoreDocumentState(m_backstack.popDocuments(id)); } bool AplBackstackExtension::goBackIndex(unsigned int index) { return restoreDocumentState(m_backstack.popDocumentsAtIndex(index)); } bool AplBackstackExtension::goBackCount(unsigned int count) { return restoreDocumentState(m_backstack.popDocuments(count)); } bool AplBackstackExtension::handleGoBack(const apl::Object& params) { if (confirmEventParams(TAG, {PROPERTY_BACK_TYPE, PROPERTY_BACK_VALUE}, params)) { auto backType = backTypeFromString(params.get(PROPERTY_BACK_TYPE).getString()); auto backValue = params.get(PROPERTY_BACK_VALUE); switch (backType) { case AplBackType::COUNT: if (backValue.isNumber()) { return goBackCount(backValue.getUnsigned()); } case AplBackType::INDEX: if (backValue.isNumber()) { return goBackIndex(backValue.getUnsigned()); } case AplBackType::ID: if (backValue.isString()) { return goBackId(backValue.getString()); } } } return false; } void AplBackstackExtension::onExtensionEvent( const std::string& uri, const std::string& name, const apl::Object& source, const apl::Object& params, unsigned int event, std::shared_ptr<AplCoreExtensionEventCallbackResultInterface> resultCallback) { auto eventDebugString = getEventDebugString(uri, name, params); logMessage(LOGLEVEL_DEBUG, TAG, __func__, eventDebugString); bool succeeded = true; if (m_observer) { if (COMMAND_GO_BACK_NAME == name) { succeeded = handleGoBack(params); } else if (COMMAND_CLEAR_NAME == name) { m_backstack.clear(); } else { logMessage(apl::LogLevel::ERROR, TAG, __func__, "Invalid Command: " + eventDebugString); succeeded = false; } } else { logMessage(apl::LogLevel::ERROR, TAG, __func__, "No Event Observer: " + eventDebugString); succeeded = false; } if (resultCallback) { resultCallback->onExtensionEventResult(event, succeeded); } } } // namespace Backstack } // namespace Extensions } // namespace APLClient
35.878788
112
0.716216
[ "object" ]
8104d41dbe45ae5265e76380014b0539d1b2c9cf
4,222
cpp
C++
src/animations/animationsblinking.cpp
mamontov-cpp/saddy
f20a0030e18af9e0714fe56c19407fbeacc529a7
[ "BSD-2-Clause" ]
58
2015-08-09T14:56:35.000Z
2022-01-15T22:06:58.000Z
src/animations/animationsblinking.cpp
mamontov-cpp/saddy-graphics-engine-2d
e25a6637fcc49cb26614bf03b70e5d03a3a436c7
[ "BSD-2-Clause" ]
245
2015-08-08T08:44:22.000Z
2022-01-04T09:18:08.000Z
src/animations/animationsblinking.cpp
mamontov-cpp/saddy
f20a0030e18af9e0714fe56c19407fbeacc529a7
[ "BSD-2-Clause" ]
23
2015-12-06T03:57:49.000Z
2020-10-12T14:15:50.000Z
#include "animations/animationsblinking.h" #include "animations/animationsinstance.h" #include "animations/setstate/methodcall.h" #include "animations/setstate/setproperty.h" #include "animations/setstate/dummycommand.h" #include "animations/easing/easingfunction.h" #include "label.h" #include "sprite2d.h" #include "db/custom/customobject.h" #include "db/schema/schema.h" #include "db/dbproperty.h" #include "db/save.h" #include "db/load.h" #include "db/dbproperty.h" #include "db/dbfield.h" #include "db/dbmethodpair.h" #include "db/dbtable.h" #include <util/fs.h> #include <resource/resourcefile.h> #include <3rdparty/picojson/valuetotype.h> #include "sadmutex.h" #include <fstream> DECLARE_SOBJ_INHERITANCE(sad::animations::Blinking, sad::animations::Animation); // =============================== PUBLIC METHODS ========================== sad::animations::Blinking::Blinking() : m_frequency(1) { m_creators.pushProperty<bool>("visible", "visible"); } sad::animations::Blinking::~Blinking() { } static sad::db::schema::Schema* AnimationBlinkingSchema = nullptr; static sad::Mutex AnimationsBlinkingSchemaInit; sad::db::schema::Schema* sad::animations::Blinking::basicSchema() { if (AnimationBlinkingSchema == nullptr) { AnimationsBlinkingSchemaInit.lock(); if (AnimationBlinkingSchema == nullptr) { AnimationBlinkingSchema = new sad::db::schema::Schema(); AnimationBlinkingSchema->addParent(sad::animations::Animation::basicSchema()); AnimationBlinkingSchema->add( "frequency", new sad::db::MethodPair<sad::animations::Blinking, unsigned int>( &sad::animations::Blinking::frequency, &sad::animations::Blinking::setFrequency ) ); sad::ClassMetaDataContainer::ref()->pushGlobalSchema(AnimationBlinkingSchema); } AnimationsBlinkingSchemaInit.unlock(); } return AnimationBlinkingSchema; } sad::db::schema::Schema* sad::animations::Blinking::schema() const { return sad::animations::Blinking::basicSchema(); } bool sad::animations::Blinking::loadFromValue(const picojson::value& v) { bool flag = this->sad::animations::Animation::loadFromValue(v); if (flag) { sad::Maybe<unsigned int> frequency = picojson::to_type<unsigned int>( picojson::get_property(v, "frequency") ); bool result = frequency.exists(); if (result) { setFrequency(frequency.value()); } flag = flag && result; } return flag; } void sad::animations::Blinking::setFrequency(unsigned int freq) { m_frequency = freq; m_inner_valid = m_frequency != 0; this->updateValidFlag(); } unsigned int sad::animations::Blinking::frequency() const { return m_frequency; } void sad::animations::Blinking::setState(sad::animations::Instance* i, double time) { double timeposition = m_easing->eval(time, m_time); unsigned int pos = static_cast<unsigned int>(timeposition * m_frequency); i->stateCommandAs<bool>()->call((pos % 2) != 0); } sad::animations::setstate::AbstractSetStateCommand* sad::animations::Blinking::stateCommand(sad::db::Object* o) { if (this->applicableTo(o)) { sad::animations::setstate::AbstractSetStateCommand* c; if (o->isInstanceOf("sad::SceneNode")) { c = sad::animations::setstate::make( o, &sad::SceneNode::setVisible ); } else { c = new sad::animations::setstate::SetProperty<bool>(o, "visible"); } return c; } return new sad::animations::setstate::DummyCommand<bool>(); } bool sad::animations::Blinking::applicableTo(sad::db::Object* o) { bool result = false; if (o && m_valid) { result = o->getProperty<bool>("visible").exists(); } return result; }
27.23871
112
0.599953
[ "object" ]
8107b27720e79f7cbb3d18b69efb2e5b6b2bdd64
1,878
cc
C++
companyreg/src/model/AcceptPartnerNotificationRequest.cc
iamzken/aliyun-openapi-cpp-sdk
3c991c9ca949b6003c8f498ce7a672ea88162bf1
[ "Apache-2.0" ]
null
null
null
companyreg/src/model/AcceptPartnerNotificationRequest.cc
iamzken/aliyun-openapi-cpp-sdk
3c991c9ca949b6003c8f498ce7a672ea88162bf1
[ "Apache-2.0" ]
null
null
null
companyreg/src/model/AcceptPartnerNotificationRequest.cc
iamzken/aliyun-openapi-cpp-sdk
3c991c9ca949b6003c8f498ce7a672ea88162bf1
[ "Apache-2.0" ]
1
2020-11-27T09:13:12.000Z
2020-11-27T09:13:12.000Z
/* * Copyright 2009-2017 Alibaba Cloud All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <alibabacloud/companyreg/model/AcceptPartnerNotificationRequest.h> using AlibabaCloud::Companyreg::Model::AcceptPartnerNotificationRequest; AcceptPartnerNotificationRequest::AcceptPartnerNotificationRequest() : RpcServiceRequest("companyreg", "2019-05-08", "AcceptPartnerNotification") { setMethod(HttpRequest::Method::Post); } AcceptPartnerNotificationRequest::~AcceptPartnerNotificationRequest() {} std::string AcceptPartnerNotificationRequest::getOfficialFileURL()const { return officialFileURL_; } void AcceptPartnerNotificationRequest::setOfficialFileURL(const std::string& officialFileURL) { officialFileURL_ = officialFileURL; setParameter("OfficialFileURL", officialFileURL); } std::string AcceptPartnerNotificationRequest::getBizId()const { return bizId_; } void AcceptPartnerNotificationRequest::setBizId(const std::string& bizId) { bizId_ = bizId; setParameter("BizId", bizId); } int AcceptPartnerNotificationRequest::getApplicationStatus()const { return applicationStatus_; } void AcceptPartnerNotificationRequest::setApplicationStatus(int applicationStatus) { applicationStatus_ = applicationStatus; setParameter("ApplicationStatus", std::to_string(applicationStatus)); }
29.809524
94
0.781683
[ "model" ]
8109999fd15f1d0e343fd122b61679e537dc38ab
4,934
cc
C++
lib/ts/test_Vec.cc
equalitie/trafficserver
42b09742c7e09e142081b31001e80852ed9e7a09
[ "Apache-2.0" ]
null
null
null
lib/ts/test_Vec.cc
equalitie/trafficserver
42b09742c7e09e142081b31001e80852ed9e7a09
[ "Apache-2.0" ]
null
null
null
lib/ts/test_Vec.cc
equalitie/trafficserver
42b09742c7e09e142081b31001e80852ed9e7a09
[ "Apache-2.0" ]
null
null
null
/* -*-Mode: c++;-*- Various vector related code. @section license License Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* UnionFind after Tarjan */ #include <cstdint> #include <cstdio> #include "ts/ink_assert.h" #include "ts/Vec.h" static void test_append() { static const char value[] = "this is a string"; unsigned int len = (int)sizeof(value) - 1; Vec<char> str; str.append(value, 0); ink_assert(str.length() == 0); str.append(value, len); ink_assert(memcmp(&str[0], value, len) == 0); ink_assert(str.length() == len); str.clear(); ink_assert(str.length() == 0); for (unsigned i = 0; i < 1000; ++i) { str.append(value, len); ink_assert(memcmp(&str[i * len], value, len) == 0); } ink_assert(str.length() == 1000 * len); } static void test_basic() { Vec<void *> v, vv, vvv; int tt = 99 * 50, t = 0; for (size_t i = 0; i < 100; i++) v.add((void *)(intptr_t)i); for (size_t i = 0; i < 100; i++) t += (int)(intptr_t)v.v[i]; ink_assert(t == tt); t = 0; for (size_t i = 1; i < 100; i++) vv.set_add((void *)(intptr_t)i); for (size_t i = 1; i < 100; i++) vvv.set_add((void *)(intptr_t)i); for (size_t i = 1; i < 100; i++) vvv.set_add((void *)(intptr_t)(i * 1000)); vv.set_union(vvv); for (size_t i = 0; i < vv.n; i++) if (vv.v[i]) t += (int)(intptr_t)vv.v[i]; ink_assert(t == tt + 1000 * tt); v.clear(); v.reserve(1000); t = 0; for (size_t i = 0; i < 1000; i++) v.add((void *)(intptr_t)i); for (size_t i = 0; i < 1000; i++) t += (int)(intptr_t)v.v[i]; ink_assert(t == 999 * 500); printf("%zu %zu\n", v.n, v.i); Intervals in; in.insert(1); ink_assert(in.n == 2); in.insert(2); ink_assert(in.n == 2); in.insert(6); ink_assert(in.n == 4); in.insert(7); ink_assert(in.n == 4); in.insert(9); ink_assert(in.n == 6); in.insert(4); ink_assert(in.n == 8); in.insert(5); ink_assert(in.n == 6); in.insert(3); ink_assert(in.n == 4); in.insert(8); ink_assert(in.n == 2); UnionFind uf; uf.size(4); uf.unify(0, 1); uf.unify(2, 3); ink_assert(uf.find(2) == uf.find(3)); ink_assert(uf.find(0) == uf.find(1)); ink_assert(uf.find(0) != uf.find(3)); ink_assert(uf.find(1) != uf.find(3)); ink_assert(uf.find(1) != uf.find(2)); ink_assert(uf.find(0) != uf.find(2)); uf.unify(1, 2); ink_assert(uf.find(0) == uf.find(3)); ink_assert(uf.find(1) == uf.find(3)); } static bool compare(void *a, void *b) { return a < b; } static void test_sort() { Vec<void *> v; for (long i = 1; i <= 1000; ++i) v.add(reinterpret_cast<void *>(static_cast<intptr_t>(((i * 149) % 1000) + 1))); v.qsort(&compare); for (int i = 0; i < 1000; ++i) ink_assert(reinterpret_cast<void *>(static_cast<intptr_t>(i + 1)) == v[i]); v.clear(); for (long i = 1; i <= 1000000; ++i) { v.add(reinterpret_cast<void *>(static_cast<intptr_t>(((i * 51511) % 1000000) + 1))); } v.qsort(&compare); for (long i = 0; i < 1000000; ++i) { ink_assert(reinterpret_cast<void *>(static_cast<intptr_t>(i + 1)) == v[i]); } v.clear(); for (long i = 1; i <= 1000000; ++i) { // This should be every number 1..500000 twice. v.add(reinterpret_cast<void *>(static_cast<intptr_t>(((i * 199999) % 500000) + 1))); } v.qsort(&compare); for (long i = 0; i < 1000000; ++i) { ink_assert(reinterpret_cast<void *>(static_cast<intptr_t>((i / 2) + 1)) == v[i]); } // Very long array, already sorted. This is what broke before. v.clear(); for (long i = 1; i <= 10000000; ++i) v.add(reinterpret_cast<void *>(static_cast<intptr_t>(i))); v.qsort(&compare); for (long i = 0; i < 10000000; ++i) ink_assert(reinterpret_cast<void *>(static_cast<intptr_t>(i + 1)) == v[i]); // very long, reverse sorted. v.clear(); for (long i = 10000000; i >= 1; --i) v.add(reinterpret_cast<void *>(static_cast<intptr_t>(i))); v.qsort(&compare); for (long i = 0; i < 10000000; ++i) ink_assert(reinterpret_cast<void *>(static_cast<intptr_t>(i + 1)) == v[i]); } int main(int /* argc ATS_UNUSED */, char ** /* argv ATS_UNUSED */) { test_append(); test_basic(); test_sort(); printf("test_Vec PASSED\n"); }
26.244681
88
0.605188
[ "vector" ]
810aedd335a418ac10399e5c88c0e07b0a961d71
945
hpp
C++
libraries/plugins/block_info/include/bmchain/plugins/block_info/block_info_api.hpp
igorsoldatov/BuisnessWiki
ccafad446c2d5db8b13499b587fc75d71ffeebc8
[ "MIT" ]
4
2018-02-02T12:56:53.000Z
2018-02-17T23:07:39.000Z
libraries/plugins/block_info/include/bmchain/plugins/block_info/block_info_api.hpp
igorsoldatov/BuisnessWiki
ccafad446c2d5db8b13499b587fc75d71ffeebc8
[ "MIT" ]
null
null
null
libraries/plugins/block_info/include/bmchain/plugins/block_info/block_info_api.hpp
igorsoldatov/BuisnessWiki
ccafad446c2d5db8b13499b587fc75d71ffeebc8
[ "MIT" ]
4
2018-02-02T12:50:24.000Z
2019-03-26T11:32:21.000Z
#pragma once #include <fc/api.hpp> #include <bmchain/plugins/block_info/block_info.hpp> namespace bmchain { namespace app { struct api_context; } } namespace bmchain { namespace plugin { namespace block_info { namespace detail { class block_info_api_impl; } struct get_block_info_args { uint32_t start_block_num = 0; uint32_t count = 1000; }; class block_info_api { public: block_info_api( const bmchain::app::api_context& ctx ); void on_api_startup(); std::vector< block_info > get_block_info( get_block_info_args args ); std::vector< block_with_info > get_blocks_with_info( get_block_info_args args ); private: std::shared_ptr< detail::block_info_api_impl > my; }; } } } FC_REFLECT( bmchain::plugin::block_info::get_block_info_args, (start_block_num) (count) ) FC_API( bmchain::plugin::block_info::block_info_api, (get_block_info) (get_blocks_with_info) )
19.285714
86
0.713228
[ "vector" ]
810af531bbdcfc30db06c5fb642206e061c09d9b
18,223
cpp
C++
ReactAndroid/third-party-ndk/boost/boost_1_66_0/libs/geometry/test/algorithms/overlay/overlay.cpp
yinhangfeng/react-native
35e88f14195aa7a75ace8881956a0eb4bdadea62
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
ReactAndroid/third-party-ndk/boost/boost_1_66_0/libs/geometry/test/algorithms/overlay/overlay.cpp
yinhangfeng/react-native
35e88f14195aa7a75ace8881956a0eb4bdadea62
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
ReactAndroid/third-party-ndk/boost/boost_1_66_0/libs/geometry/test/algorithms/overlay/overlay.cpp
yinhangfeng/react-native
35e88f14195aa7a75ace8881956a0eb4bdadea62
[ "CC-BY-4.0", "BSD-3-Clause" ]
1
2019-03-08T11:06:22.000Z
2019-03-08T11:06:22.000Z
// Boost.Geometry (aka GGL, Generic Geometry Library) // Unit Test // Copyright (c) 2015 Barend Gehrels, Amsterdam, the Netherlands. // This file was modified by Oracle on 2017. // Modifications copyright (c) 2017, Oracle and/or its affiliates. // Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle // Use, modification and distribution is subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <iostream> #include <iomanip> #include <fstream> #include <sstream> #include <string> #include <boost/type_traits/is_same.hpp> #if defined(TEST_WITH_SVG) # include <boost/geometry/io/svg/svg_mapper.hpp> #endif #include <geometry_test_common.hpp> #include <boost/geometry.hpp> #include <boost/geometry/algorithms/detail/overlay/debug_turn_info.hpp> #include <boost/geometry/geometries/geometries.hpp> //#include <boost/geometry/extensions/algorithms/inverse.hpp> #if defined(TEST_WITH_SVG) # include <boost/geometry/io/svg/svg_mapper.hpp> #endif #include "multi_overlay_cases.hpp" #if defined(TEST_WITH_SVG) template <typename Mapper> struct map_visitor { map_visitor(Mapper& mapper) : m_mapper(mapper) , m_traverse_seq(0) , m_do_output(true) {} void print(char const* header) {} template <typename Turns> void print(char const* header, Turns const& turns, int turn_index) { std::string style = "fill:rgb(0,0,0);font-family:Arial;font-size:6px"; stream(turns, turns[turn_index], turns[turn_index].operations[0], header, style); } template <typename Turns> void print(char const* header, Turns const& turns, int turn_index, int op_index) { std::string style = "fill:rgb(0,0,0);font-family:Arial;font-size:6px"; stream(turns, turns[turn_index], turns[turn_index].operations[op_index], header, style); } template <typename Turns> void visit_turns(int phase, Turns const& turns) { typedef typename boost::range_value<Turns>::type turn_type; int index = 0; BOOST_FOREACH(turn_type const& turn, turns) { switch (phase) { case 1 : m_mapper.map(turn.point, "fill:rgb(255,128,0);" "stroke:rgb(0,0,0);stroke-width:1", 3); break; case 11 : m_mapper.map(turn.point, "fill:rgb(92,255,0);" // Greenish "stroke:rgb(0,0,0);stroke-width:1", 3); break; case 21 : m_mapper.map(turn.point, "fill:rgb(0,128,255);" // Blueish "stroke:rgb(0,0,0);stroke-width:1", 3); break; case 3 : label_turn(index, turn); break; } index++; } } template <typename Turns, typename Turn, typename Operation> std::string stream_turn_index(Turns const& turns, Turn const& turn, Operation const& op) { std::ostringstream out; if (turn.cluster_id >= 0) { out << "cl=" << turn.cluster_id << " "; } // Because turn index is unknown here, and still useful for debugging, std::size_t index = 0; for (typename Turns::const_iterator it = turns.begin(); it != turns.end(); ++it, ++index) { Turn const& t = *it; if (&t == &turn) { out << index; break; } } if (&op == &turn.operations[0]) { out << "[0]"; } if (&op == &turn.operations[1]) { out << "[1]"; } return out.str(); } template <typename Clusters, typename Turns> void visit_clusters(Clusters const& clusters, Turns const& turns) { typedef typename boost::range_value<Turns>::type turn_type; int index = 0; BOOST_FOREACH(turn_type const& turn, turns) { if (turn.cluster_id >= 0) { std::cout << " TURN: " << index << " part of cluster " << turn.cluster_id << std::endl; } index++; } for (typename Clusters::const_iterator it = clusters.begin(); it != clusters.end(); ++it) { std::cout << " CLUSTER " << it->first << ": "; for (typename std::set<bg::signed_size_type>::const_iterator sit = it->second.turn_indices.begin(); sit != it->second.turn_indices.end(); ++sit) { std::cout << " " << *sit; } std::cout << std::endl; } std::cout << std::endl; } template <typename Turns, typename Turn, typename Operation> void visit_traverse(Turns const& turns, Turn const& turn, Operation const& op, const std::string& header) { typedef typename boost::range_value<Turns>::type turn_type; if (! m_do_output) { return; } std::cout << "Visit turn " << stream_turn_index(turns, turn, op) << " " << bg::operation_char(turn.operations[0].operation) << bg::operation_char(turn.operations[1].operation) << " (" << bg::operation_char(op.operation) << ")" << " " << header << std::endl; // Uncomment for more detailed debug info in SVG on traversal std::string style = header == "Visit" ? "fill:rgb(80,80,80)" : "fill:rgb(0,0,0)"; style += ";font-family:Arial;font-size:6px"; stream(turns, turn, op, header.substr(0, 1), style); } template <typename Turns, typename Turn, typename Operation> void visit_traverse_reject(Turns const& turns, Turn const& turn, Operation const& op, bg::detail::overlay::traverse_error_type error) { if (! m_do_output) { return; } std::cout << "Reject turn " << stream_turn_index(turns, turn, op) << bg::operation_char(turn.operations[0].operation) << bg::operation_char(turn.operations[1].operation) << " (" << bg::operation_char(op.operation) << ")" << " " << bg::detail::overlay::traverse_error_string(error) << std::endl; //return; std::string style = "fill:rgb(255,0,0);font-family:Arial;font-size:7px"; stream(turns, turn, op, bg::detail::overlay::traverse_error_string(error), style); m_do_output = false; } template <typename Turns, typename Turn, typename Operation> void visit_traverse_select_turn_from_cluster(Turns const& turns, Turn const& turn, Operation const& op) { std::cout << "Visit turn from cluster " << stream_turn_index(turns, turn, op) << " " << bg::operation_char(turn.operations[0].operation) << bg::operation_char(turn.operations[1].operation) << " (" << bg::operation_char(op.operation) << ")" << std::endl; return; } template <typename Turns, typename Turn, typename Operation> void stream(Turns const& turns, Turn const& turn, Operation const& op, const std::string& header, const std::string& style) { std::ostringstream out; out << m_traverse_seq++ << " " << header << " " << stream_turn_index(turns, turn, op); out << " " << bg::visited_char(op.visited); add_text(turn, out.str(), style); } template <typename Turn> bool label_operation(Turn const& turn, int index, std::ostream& os) { os << bg::operation_char(turn.operations[index].operation); bool result = false; if (! turn.discarded) { if (turn.operations[index].enriched.next_ip_index != -1) { os << "->" << turn.operations[index].enriched.next_ip_index; if (turn.operations[index].enriched.next_ip_index != -1) { result = true; } } else { os << "->" << turn.operations[index].enriched.travels_to_ip_index; if (turn.operations[index].enriched.travels_to_ip_index != -1) { result = true; } } os << " {" << turn.operations[index].enriched.region_id << (turn.operations[index].enriched.isolated ? " ISO" : "") << "}"; if (! turn.operations[index].enriched.startable) { os << "$"; } } return result; } template <typename Turn> void label_turn(int index, Turn const& turn) { std::ostringstream out; out << index << " "; if (turn.cluster_id != -1) { out << " c=" << turn.cluster_id << " "; } bool lab1 = label_operation(turn, 0, out); out << " / "; bool lab2 = label_operation(turn, 1, out); if (turn.switch_source) { out << "#"; } if (turn.discarded) { out << "!"; } if (turn.has_colocated_both) { out << "+"; } bool const self_turn = bg::detail::overlay::is_self_turn<bg::overlay_union>(turn); if (self_turn) { out << "@"; } std::string font8 = "font-family:Arial;font-size:6px"; std::string font6 = "font-family:Arial;font-size:4px"; std::string style = "fill:rgb(0,0,255);" + font8; if (self_turn) { if (turn.discarded) { style = "fill:rgb(128,28,128);" + font6; } else { style = "fill:rgb(255,0,255);" + font8; } } else if (turn.discarded) { style = "fill:rgb(92,92,92);" + font6; } else if (turn.cluster_id != -1) { style = "fill:rgb(0,0,255);" + font8; } else if (! lab1 || ! lab2) { style = "fill:rgb(0,0,255);" + font6; } add_text(turn, out.str(), style); } template <typename Turn> void add_text(Turn const& turn, std::string const& text, std::string const& style) { int const margin = 5; int const lineheight = 6; double const half = 0.5; double const ten = 10; // Map characteristics // Create a rounded off point std::pair<int, int> p = std::make_pair( boost::numeric_cast<int>(half + ten * bg::get<0>(turn.point)), boost::numeric_cast<int>(half + ten * bg::get<1>(turn.point)) ); m_mapper.text(turn.point, text, style, margin, m_offsets[p], lineheight); m_offsets[p] += lineheight; } Mapper& m_mapper; std::map<std::pair<int, int>, int> m_offsets; int m_traverse_seq; bool m_do_output; }; #endif template <typename Geometry, bg::overlay_type OverlayType> void test_overlay(std::string const& caseid, std::string const& wkt1, std::string const& wkt2, double expected_area, std::size_t expected_clip_count, std::size_t expected_hole_count) { Geometry g1; bg::read_wkt(wkt1, g1); Geometry g2; bg::read_wkt(wkt2, g2); // Reverse if necessary bg::correct(g1); bg::correct(g2); #if defined(TEST_WITH_SVG) bool const ccw = bg::point_order<Geometry>::value == bg::counterclockwise; bool const open = bg::closure<Geometry>::value == bg::open; std::ostringstream filename; filename << "overlay" << "_" << caseid << "_" << string_from_type<typename bg::coordinate_type<Geometry>::type>::name() << (ccw ? "_ccw" : "") << (open ? "_open" : "") #ifdef BOOST_GEOMETRY_INCLUDE_SELF_TURNS << "_self" #endif #if defined(BOOST_GEOMETRY_NO_ROBUSTNESS) << "_no_rob" #endif << ".svg"; std::ofstream svg(filename.str().c_str()); typedef bg::svg_mapper<typename bg::point_type<Geometry>::type> svg_mapper; svg_mapper mapper(svg, 500, 500); mapper.add(g1); mapper.add(g2); // Input shapes in green (src=0) / blue (src=1) mapper.map(g1, "fill-opacity:0.5;fill:rgb(153,204,0);" "stroke:rgb(153,204,0);stroke-width:3"); mapper.map(g2, "fill-opacity:0.3;fill:rgb(51,51,153);" "stroke:rgb(51,51,153);stroke-width:3"); #endif typedef typename boost::range_value<Geometry>::type geometry_out; typedef bg::detail::overlay::overlay < Geometry, Geometry, bg::detail::overlay::do_reverse<bg::point_order<Geometry>::value>::value, OverlayType == bg::overlay_difference ? ! bg::detail::overlay::do_reverse<bg::point_order<Geometry>::value>::value : bg::detail::overlay::do_reverse<bg::point_order<Geometry>::value>::value, bg::detail::overlay::do_reverse<bg::point_order<Geometry>::value>::value, geometry_out, OverlayType > overlay; typedef typename bg::strategy::intersection::services::default_strategy < typename bg::cs_tag<Geometry>::type >::type strategy_type; strategy_type strategy; typedef typename bg::rescale_overlay_policy_type < Geometry, Geometry >::type rescale_policy_type; rescale_policy_type robust_policy = bg::get_rescale_policy<rescale_policy_type>(g1, g2); #if defined(TEST_WITH_SVG) map_visitor<svg_mapper> visitor(mapper); #else bg::detail::overlay::overlay_null_visitor visitor; #endif Geometry result; overlay::apply(g1, g2, robust_policy, std::back_inserter(result), strategy, visitor); BOOST_CHECK_CLOSE(bg::area(result), expected_area, 0.001); BOOST_CHECK_MESSAGE((bg::num_interior_rings(result) == expected_hole_count), caseid << " hole count: detected: " << bg::num_interior_rings(result) << " expected: " << expected_hole_count); BOOST_CHECK_MESSAGE((result.size() == expected_clip_count), caseid << " clip count: detected: " << result.size() << " expected: " << expected_clip_count); #if defined(TEST_WITH_SVG) mapper.map(result, "fill-opacity:0.2;stroke-opacity:0.4;fill:rgb(255,0,0);" "stroke:rgb(255,0,255);stroke-width:8"); #endif } #define TEST_INTERSECTION(caseid, area, clips, holes) (test_overlay<multi_polygon, bg::overlay_intersection>) \ ( #caseid "_int", caseid[0], caseid[1], area, clips, holes) #define TEST_UNION(caseid, area, clips, holes) (test_overlay<multi_polygon, bg::overlay_union>) \ ( #caseid "_union", caseid[0], caseid[1], area, clips, holes) #define TEST_DIFFERENCE_A(caseid, area, clips, holes) (test_overlay<multi_polygon, bg::overlay_difference>) \ ( #caseid "_diff_a", caseid[0], caseid[1], area, clips, holes) #define TEST_DIFFERENCE_B(caseid, area, clips, holes) (test_overlay<multi_polygon, bg::overlay_difference>) \ ( #caseid "_diff_b", caseid[1], caseid[0], area, clips, holes) #define TEST_INTERSECTION_WITH(caseid, index1, index2, area, clips, holes) (test_overlay<multi_polygon, bg::overlay_intersection>) \ ( #caseid "_int_" #index1 "_" #index2, caseid[index1], caseid[index2], area, clips, holes) #define TEST_UNION_WITH(caseid, index1, index2, area, clips, holes) (test_overlay<multi_polygon, bg::overlay_union>) \ ( #caseid "_union" #index1 "_" #index2, caseid[index1], caseid[index2], area, clips, holes) template <typename T, bool Clockwise> void test_all() { typedef bg::model::point<T, 2, bg::cs::cartesian> point_type; typedef bg::model::polygon<point_type, Clockwise> polygon; typedef bg::model::multi_polygon<polygon> multi_polygon; TEST_UNION(case_multi_simplex, 14.58, 1, 0); TEST_INTERSECTION(case_multi_simplex, 6.42, 2, 0); TEST_DIFFERENCE_A(case_multi_simplex, 5.58, 5, 0); TEST_DIFFERENCE_B(case_multi_simplex, 2.58, 4, 0); // Contains 5 clusters, needing immediate selection of next turn TEST_UNION_WITH(case_58_multi, 0, 3, 19.8333333, 2, 0); // Contains many clusters, needing to exclude u/u turns TEST_UNION(case_recursive_boxes_3, 56.5, 17, 6); // Contains 4 clusters, one of which having 4 turns TEST_UNION(case_recursive_boxes_7, 7.0, 2, 0); // Contains 5 clusters, needing immediate selection of next turn TEST_UNION(case_89_multi, 6.0, 1, 0); // Needs ux/next_turn_index==-1 to be filtered out TEST_INTERSECTION(case_77_multi, 9.0, 5, 0); TEST_UNION(case_101_multi, 22.25, 1, 3); TEST_INTERSECTION(case_101_multi, 4.75, 4, 0); TEST_INTERSECTION(case_recursive_boxes_11, 1.0, 2, 0); TEST_INTERSECTION(case_recursive_boxes_30, 6.0, 4, 0); TEST_UNION(case_recursive_boxes_4, 96.75, 1, 2); TEST_INTERSECTION_WITH(case_58_multi, 2, 6, 13.25, 1, 1); TEST_INTERSECTION_WITH(case_72_multi, 1, 2, 6.15, 3, 1); TEST_UNION(case_recursive_boxes_12, 6.0, 6, 0); TEST_UNION(case_recursive_boxes_13, 10.25, 3, 0); // std::cout // << " \"" // << bg::inverse<multi_polygon>(case_65_multi[0], 1.0) // << "\"" << std::endl; } int test_main(int, char* []) { test_all<double, true>(); // test_all<double, false>(); return 0; }
34.644487
133
0.558305
[ "geometry", "model" ]
8113213abf69ef85a009a24ff4dfe5a94e245e89
32,011
cxx
C++
sources/etx/rt/integrators/bidirectional.cxx
sergeyreznik/etx-tracer
e9941102daa5693af2a975e561022920a85826ef
[ "MIT" ]
180
2022-01-18T18:55:06.000Z
2022-03-21T14:24:39.000Z
sources/etx/rt/integrators/bidirectional.cxx
sergeyreznik/etx-tracer
e9941102daa5693af2a975e561022920a85826ef
[ "MIT" ]
1
2022-02-25T14:05:49.000Z
2022-02-25T14:05:49.000Z
sources/etx/rt/integrators/bidirectional.cxx
sergeyreznik/etx-tracer
e9941102daa5693af2a975e561022920a85826ef
[ "MIT" ]
7
2022-02-02T00:57:57.000Z
2022-02-22T15:53:15.000Z
#include <etx/core/core.hxx> #include <etx/log/log.hxx> #include <etx/render/host/film.hxx> #include <etx/rt/integrators/bidirectional.hxx> #include <atomic> namespace etx { struct CPUBidirectionalImpl : public Task { struct PathVertex : public Intersection { enum class Class : uint32_t { Invalid, Camera, Emitter, Surface, Medium, }; Class cls = Class::Invalid; uint32_t emitter_index = kInvalidIndex; uint32_t medium_index = kInvalidIndex; SpectralResponse throughput = {}; struct { float forward = 0.0f; float backward = 0.0f; } pdf; bool delta = false; PathVertex() = default; PathVertex(Class c, const Intersection& i) : Intersection(i) , cls(c) { } PathVertex(const Medium::Sample& i, const float3& a_w_i) : cls(Class::Medium) { pos = i.pos; w_i = a_w_i; } PathVertex(Class c) : cls(c) { } bool is_specific_emitter() const { return (emitter_index != kInvalidIndex); } bool is_environment_emitter() const { return (cls == Class::Emitter) && (triangle_index == kInvalidIndex); } bool is_emitter() const { return is_specific_emitter() || is_environment_emitter(); } bool is_surface_interaction() const { return (triangle_index != kInvalidIndex); } bool is_medium_interaction() const { return (cls == Class::Medium) && (medium_index != kInvalidIndex); } SpectralResponse bsdf_in_direction(SpectralQuery spect, PathSource mode, const float3& w_o, const Scene& scene, Sampler& smp) const; float pdf_area(SpectralQuery spect, PathSource mode, const PathVertex* prev, const PathVertex* next, const Scene& scene, Sampler& smp) const; float pdf_to_light_out(SpectralQuery spect, const PathVertex* next, const Scene& scene) const; float pdf_to_light_in(SpectralQuery spect, const PathVertex* next, const Scene& scene) const; float pdf_solid_angle_to_area(float pdf_dir, const PathVertex& to_vertex) const; }; template <class T> struct ReplaceInScope { ReplaceInScope(const ReplaceInScope&) = delete; ReplaceInScope& operator=(const ReplaceInScope&) = delete; ReplaceInScope() { } ReplaceInScope(T* destination, const T& new_value) : ptr(destination) , old_value(*destination) { *destination = new_value; } ReplaceInScope& operator=(ReplaceInScope&& r) noexcept { ptr = r.ptr; old_value = r.old_value; r.ptr = nullptr; return *this; } ~ReplaceInScope() { if (ptr != nullptr) { *ptr = old_value; } } T* ptr = nullptr; T old_value = {}; }; struct PathData { std::vector<PathVertex> camera_path; std::vector<PathVertex> emitter_path; }; char status[2048] = {}; Raytracing& rt; std::vector<RNDSampler> samplers; std::vector<PathData> per_thread_path_data; std::atomic<Integrator::State>* state = {}; Film camera_image; Film light_image; Film iteration_light_image; TimeMeasure total_time = {}; TimeMeasure iteration_time = {}; Handle current_task = {}; uint32_t iteration = 0; uint32_t preview_frames = 3; uint32_t opt_max_iterations = 0x7fffffff; uint32_t opt_max_depth = 0x7fffffff; uint32_t opt_rr_start = 0x5; CPUBidirectionalImpl(Raytracing& r, std::atomic<Integrator::State>* st) : rt(r) , state(st) , samplers(rt.scheduler().max_thread_count()) , per_thread_path_data(rt.scheduler().max_thread_count()) { } void execute_range(uint32_t begin, uint32_t end, uint32_t thread_id) { auto& smp = samplers[thread_id]; for (uint32_t i = begin; (state->load() != Integrator::State::Stopped) && (i < end); ++i) { uint32_t x = i % camera_image.dimensions().x; uint32_t y = i / camera_image.dimensions().x; float2 uv = get_jittered_uv(smp, {x, y}, camera_image.dimensions()); float3 xyz = trace_pixel(smp, uv, thread_id); camera_image.accumulate({xyz.x, xyz.y, xyz.z, 1.0f}, uv, float(iteration) / float(iteration + 1)); } } bool running() const { return state->load() != Integrator::State::Stopped; } constexpr static bool _direct_hit = true; constexpr static bool _connect_to_camera = true; constexpr static bool _connect_to_light = true; constexpr static bool _connect_vertices = true; constexpr static bool _enable_mis = true; float3 trace_pixel(RNDSampler& smp, const float2& uv, uint32_t thread_id) { auto& path_data = per_thread_path_data[thread_id]; auto spect = spectrum::sample(smp.next()); auto ray = generate_ray(smp, rt.scene(), uv); build_camera_path(smp, spect, ray, path_data.camera_path); build_emitter_path(smp, spect, path_data.emitter_path); uint64_t path_size = path_data.camera_path.size() + path_data.emitter_path.size() - 2llu; SpectralResponse result = {spect.wavelength, 0.0f}; for (uint64_t eye_t = 1, eye_t_e = path_data.camera_path.size(); running() && (eye_t < eye_t_e); ++eye_t) { for (uint64_t light_s = 0, light_s_e = path_data.emitter_path.size(); running() && (light_s < light_s_e); ++light_s) { auto depth = eye_t + light_s; if (((eye_t == 1) && (light_s == 1)) || (depth < 2) || (depth > 2llu + opt_max_depth)) { continue; } if ((eye_t > 1) && (light_s != 0) && (path_data.camera_path[eye_t].cls == PathVertex::Class::Emitter)) { continue; } if (light_s == 0) { if (_direct_hit) { result += direct_hit(path_data, spect, eye_t, light_s, smp); } } else if (eye_t == 1) { if (_connect_to_camera) { CameraSample camera_sample = {}; auto splat = connect_to_camera(smp, path_data, spect, eye_t, light_s, camera_sample); auto xyz = splat.to_xyz(); iteration_light_image.atomic_add({xyz.x, xyz.y, xyz.z, 1.0f}, camera_sample.uv, thread_id); } } else if (light_s == 1) { if (_connect_to_light) { result += connect_to_light(smp, path_data, spect, eye_t, light_s); } } else if (_connect_vertices) { result += connect_vertices(smp, path_data, spect, eye_t, light_s); } ETX_VALIDATE(result); } } return (result / spectrum::sample_pdf()).to_xyz(); } void build_path(Sampler& smp, SpectralQuery spect, Ray ray, std::vector<PathVertex>& path, PathSource mode, SpectralResponse throughput, float pdf_dir, uint32_t medium_index) { ETX_VALIDATE(throughput); float eta = 1.0f; for (uint32_t path_length = 0; path_length <= opt_max_depth;) { Intersection intersection = {}; bool found_intersection = rt.trace(ray, intersection, smp); Medium::Sample medium_sample = {}; if (medium_index != kInvalidIndex) { medium_sample = rt.scene().mediums[medium_index].sample(spect, smp, ray.o, ray.d, found_intersection ? intersection.t : kMaxFloat); throughput *= medium_sample.weight; ETX_VALIDATE(throughput); } if (medium_sample.sampled_medium()) { const auto& medium = rt.scene().mediums[medium_index]; float3 w_i = ray.d; float3 w_o = medium.sample_phase_function(spect, smp, medium_sample.pos, w_i); auto& v = path.emplace_back(medium_sample, w_i); auto& w = path[path.size() - 2]; v.medium_index = medium_index; v.throughput = throughput; v.delta = false; v.pdf.forward = w.pdf_solid_angle_to_area(pdf_dir, v); float rev_pdf = medium.phase_function(spect, medium_sample.pos, w_o, w_i); w.pdf.backward = v.pdf_solid_angle_to_area(rev_pdf, w); pdf_dir = medium.phase_function(spect, medium_sample.pos, w_i, w_o); ray.o = medium_sample.pos; ray.d = w_o; } else if (found_intersection) { const auto& tri = rt.scene().triangles[intersection.triangle_index]; const auto& mat = rt.scene().materials[tri.material_index]; if (mat.cls == Material::Class::Boundary) { auto bsdf_sample = bsdf::sample({spect, medium_index, mode, intersection, intersection.w_i, {}}, mat, rt.scene(), smp); if (bsdf_sample.properties & BSDFSample::MediumChanged) { medium_index = bsdf_sample.medium_index; } ray.o = intersection.pos; ray.d = bsdf_sample.w_o; continue; } auto& v = path.emplace_back(PathVertex::Class::Surface, intersection); auto& w = path[path.size() - 2]; v.medium_index = medium_index; v.emitter_index = tri.emitter_index; v.throughput = throughput; v.pdf.forward = w.pdf_solid_angle_to_area(pdf_dir, v); ETX_VALIDATE(v.pdf.forward); auto bsdf_data = BSDFData(spect, medium_index, mode, v, v.w_i, {}); auto bsdf_sample = bsdf::sample(bsdf_data, mat, rt.scene(), smp); ETX_VALIDATE(bsdf_sample.weight); v.delta = bsdf_sample.is_delta(); if (bsdf_sample.properties & BSDFSample::MediumChanged) { medium_index = bsdf_sample.medium_index; } bsdf_data.w_o = bsdf_sample.w_o; if (bsdf_sample.valid() == false) { break; } auto rev_bsdf_pdf = bsdf::pdf(bsdf_data.swap_directions(), mat, rt.scene(), smp); ETX_VALIDATE(rev_bsdf_pdf); w.pdf.backward = v.pdf_solid_angle_to_area(rev_bsdf_pdf, w); ETX_VALIDATE(w.pdf.backward); if (mode == PathSource::Camera) { eta *= bsdf_sample.eta; } pdf_dir = v.delta ? 0.0f : bsdf_sample.pdf; ETX_VALIDATE(pdf_dir); throughput *= bsdf_sample.weight; ETX_VALIDATE(throughput); if (mode == PathSource::Light) { throughput *= fix_shading_normal(tri.geo_n, bsdf_data.nrm, bsdf_data.w_i, bsdf_data.w_o); ETX_VALIDATE(throughput); } ray.o = shading_pos(rt.scene().vertices, tri, intersection.barycentric, bsdf_data.w_o); ray.d = bsdf_data.w_o; } else if (mode == PathSource::Camera) { auto& v = path.emplace_back(PathVertex::Class::Emitter); v.medium_index = medium_index; v.throughput = throughput; v.pdf.forward = pdf_dir; v.w_i = ray.d; v.pos = ray.o + rt.scene().bounding_sphere_radius * v.w_i; v.nrm = -v.w_i; break; } else { break; } if ((path_length > opt_rr_start) && (apply_rr(eta, smp.next(), throughput) == false)) { break; } path_length += 1; } } void build_camera_path(Sampler& smp, SpectralQuery spect, Ray ray, std::vector<PathVertex>& path) { path.clear(); auto& z0 = path.emplace_back(PathVertex::Class::Camera); z0.throughput = {spect.wavelength, 1.0f}; auto eval = film_evaluate_out(spect, rt.scene().camera, ray); auto& z1 = path.emplace_back(PathVertex::Class::Camera); z1.medium_index = rt.scene().camera_medium_index; z1.throughput = {spect.wavelength, 1.0f}; z1.pos = ray.o; z1.nrm = eval.normal; z1.w_i = ray.d; z1.pdf.forward = 1.0f; build_path(smp, spect, ray, path, PathSource::Camera, z1.throughput, eval.pdf_dir, z1.medium_index); } void build_emitter_path(Sampler& smp, SpectralQuery spect, std::vector<PathVertex>& path) { path.clear(); const auto& emitter_sample = sample_emission(rt.scene(), spect, smp); if ((emitter_sample.pdf_area == 0.0f) || (emitter_sample.pdf_dir == 0.0f) || (emitter_sample.value.is_zero())) { return; } auto& y0 = path.emplace_back(PathVertex::Class::Emitter); y0.throughput = {spect.wavelength, 1.0f}; y0.delta = emitter_sample.is_delta; auto& y1 = path.emplace_back(PathVertex::Class::Emitter); y1.triangle_index = emitter_sample.triangle_index; y1.medium_index = emitter_sample.medium_index; y1.emitter_index = emitter_sample.emitter_index; y1.throughput = emitter_sample.value; y1.barycentric = emitter_sample.barycentric; y1.pos = emitter_sample.origin; y1.nrm = emitter_sample.normal; y1.pdf.forward = emitter_sample.pdf_area * emitter_sample.pdf_sample; y1.w_i = emitter_sample.direction; y1.delta = emitter_sample.is_delta; float3 o = offset_ray(emitter_sample.origin, y1.nrm); SpectralResponse throughput = y1.throughput * dot(emitter_sample.direction, y1.nrm) / (emitter_sample.pdf_dir * emitter_sample.pdf_area * emitter_sample.pdf_sample); build_path(smp, spect, {o, emitter_sample.direction}, path, PathSource::Light, throughput, emitter_sample.pdf_dir, y1.medium_index); if ((path.size() > 2) && emitter_sample.is_distant) { path[1].pdf.forward = emitter_pdf_in_dist(rt.scene().emitters[emitter_sample.emitter_index], emitter_sample.direction, rt.scene()); ETX_VALIDATE(path[1].pdf.forward); path[2].pdf.forward = emitter_sample.pdf_area; if (path[2].cls == PathVertex::Class::Surface) { const auto& tri = rt.scene().triangles[path[2].triangle_index]; path[2].pdf.forward *= fabsf(dot(emitter_sample.direction, tri.geo_n)); } ETX_VALIDATE(path[2].pdf.forward); } } float mis_weight(PathData& c, SpectralQuery spect, uint64_t eye_t, uint64_t light_s, const PathVertex& sampled, Sampler& smp) { if (_enable_mis == false) { return 1.0f; } if (eye_t + light_s == 2) { return 1.0f; } PathVertex* z_curr = (eye_t > 0) ? c.camera_path.data() + eye_t : nullptr; PathVertex* z_prev = (eye_t > 1) ? c.camera_path.data() + eye_t - 1 : nullptr; PathVertex* y_curr = (light_s > 0) ? c.emitter_path.data() + light_s : nullptr; PathVertex* y_prev = (light_s > 1) ? c.emitter_path.data() + light_s - 1 : nullptr; ReplaceInScope<PathVertex> sampled_vertex; if (light_s == 1) { sampled_vertex = {y_curr, sampled}; } else if (eye_t == 1) { sampled_vertex = {z_curr, sampled}; } ReplaceInScope<bool> z_delta_new; ReplaceInScope<float> z_curr_new; ReplaceInScope<float> z_prev_new; ReplaceInScope<bool> y_delta_new; ReplaceInScope<float> y_curr_new; ReplaceInScope<float> y_prev_new; if (z_curr) { z_delta_new = {&z_curr->delta, false}; float z_curr_pdf = 0.0f; if (light_s > 0) { z_curr_pdf = y_curr->pdf_area(spect, PathSource::Light, y_prev, z_curr, rt.scene(), smp); } else { z_curr_pdf = z_curr->pdf_to_light_in(spect, z_prev, rt.scene()); } ETX_VALIDATE(z_curr_pdf); z_curr_new = {&z_curr->pdf.backward, z_curr_pdf}; ETX_VALIDATE(z_curr->pdf.backward); } if (z_prev) { float z_prev_pdf = 0.0f; if (light_s > 0) { z_prev_pdf = z_curr->pdf_area(spect, PathSource::Camera, y_curr, z_prev, rt.scene(), smp); } else { z_prev_pdf = z_curr->pdf_to_light_out(spect, z_prev, rt.scene()); } ETX_VALIDATE(z_prev_pdf); z_prev_new = {&z_prev->pdf.backward, z_prev_pdf}; ETX_VALIDATE(z_prev->pdf.backward); } if (y_curr) { y_delta_new = {&y_curr->delta, false}; float y_curr_pdf = 0.0f; if (eye_t > 1) { y_curr_pdf = z_curr->pdf_area(spect, PathSource::Camera, z_prev, y_curr, rt.scene(), smp); } else if (z_curr != nullptr) { ETX_ASSERT(z_curr->cls == PathVertex::Class::Camera); float pdf_dir = film_pdf_out(rt.scene().camera, y_curr->pos); y_curr_pdf = z_curr->pdf_solid_angle_to_area(pdf_dir, *y_curr); } else { ETX_FAIL("Invalid case"); } ETX_VALIDATE(y_curr_pdf); y_curr_new = {&y_curr->pdf.backward, y_curr_pdf}; ETX_VALIDATE(y_curr->pdf.backward); } if (y_prev) { ETX_ASSERT(z_curr != nullptr); float y_prev_pdf = y_curr->pdf_area(spect, PathSource::Light, z_curr, y_prev, rt.scene(), smp); ETX_VALIDATE(y_prev_pdf); y_prev_new = {&y_prev->pdf.backward, y_prev_pdf}; ETX_VALIDATE(y_prev->pdf.backward); } float result = 0.0f; #define MAP(A) (((A) == 0.0f) ? 1.0f : (A)) float r = 1.0f; for (uint64_t ti = eye_t; ti > 1; --ti) { r *= MAP(c.camera_path[ti].pdf.backward) / MAP(c.camera_path[ti].pdf.forward); ETX_VALIDATE(r); if ((c.camera_path[ti].delta == false) && (c.camera_path[ti - 1].delta == false)) { result += r; ETX_VALIDATE(result); } } r = 1.0f; for (uint64_t si = light_s; si > 0; --si) { r *= MAP(c.emitter_path[si].pdf.backward) / MAP(c.emitter_path[si].pdf.forward); ETX_VALIDATE(r); if ((c.emitter_path[si].delta == false) && (c.emitter_path[si - 1].delta == false)) { result += r; ETX_VALIDATE(result); } } return 1.0f / (1.0f + result); } SpectralResponse direct_hit(PathData& c, SpectralQuery spect, uint64_t eye_t, uint64_t light_s, Sampler& smp) { const auto& z_i = c.camera_path[eye_t]; if (z_i.is_emitter() == false) { return {spect.wavelength, 0.0f}; } const auto& z_prev = c.camera_path[eye_t - 1]; float pdf_area = 0.0f; float pdf_dir = 0.0f; float pdf_dir_out = 0.0f; SpectralResponse emitter_value = {spect.wavelength, 0.0f}; if (z_i.is_specific_emitter()) { const auto& emitter = rt.scene().emitters[z_i.emitter_index]; ETX_ASSERT(emitter.is_local()); emitter_value = emitter_get_radiance(emitter, spect, z_i.tex, z_prev.pos, z_i.pos, pdf_area, pdf_dir, pdf_dir_out, rt.scene(), (eye_t <= 2)); } else if (rt.scene().environment_emitters.count > 0) { auto w_o = normalize(z_i.pos - z_prev.pos); for (uint32_t ie = 0; ie < rt.scene().environment_emitters.count; ++ie) { const auto& emitter = rt.scene().emitters[rt.scene().environment_emitters.emitters[ie]]; float local_pdf_dir = 0.0f; float local_pdf_dir_out = 0.0f; emitter_value += emitter_get_radiance(emitter, spect, w_o, pdf_area, local_pdf_dir, local_pdf_dir_out, rt.scene()); pdf_dir += local_pdf_dir; } pdf_dir = pdf_dir / float(rt.scene().environment_emitters.count); } if (pdf_dir == 0.0f) { return {spect.wavelength, 0.0f}; } ETX_VALIDATE(emitter_value); float weight = mis_weight(c, spect, eye_t, light_s, {}, smp); return emitter_value * z_i.throughput * weight; } SpectralResponse connect_to_light(Sampler& smp, PathData& c, SpectralQuery spect, uint64_t eye_t, uint64_t light_s) { PathVertex sampled_vertex = {PathVertex::Class::Emitter}; const auto& z_i = c.camera_path[eye_t]; auto emitter_sample = sample_emitter(spect, smp, z_i.pos, rt.scene()); if (emitter_sample.value.is_zero() || (emitter_sample.pdf_dir == 0.0f)) { return {spect.wavelength, 0.0f}; } auto dp = emitter_sample.origin - z_i.pos; if (dot(dp, dp) <= kEpsilon) { return {spect.wavelength, 0.0f}; } sampled_vertex.w_i = normalize(dp); sampled_vertex.triangle_index = emitter_sample.triangle_index; sampled_vertex.emitter_index = emitter_sample.emitter_index; sampled_vertex.pos = emitter_sample.origin; sampled_vertex.nrm = emitter_sample.normal; sampled_vertex.pdf.forward = sampled_vertex.pdf_to_light_in(spect, &z_i, rt.scene()); sampled_vertex.delta = emitter_sample.is_delta; SpectralResponse emitter_throughput = emitter_sample.value / (emitter_sample.pdf_dir * emitter_sample.pdf_sample); ETX_VALIDATE(emitter_throughput); SpectralResponse bsdf = z_i.bsdf_in_direction(spect, PathSource::Camera, emitter_sample.direction, rt.scene(), smp); SpectralResponse tr = local_transmittance(spect, smp, z_i, sampled_vertex); float weight = mis_weight(c, spect, eye_t, light_s, sampled_vertex, smp); return z_i.throughput * bsdf * emitter_throughput * tr * weight; } SpectralResponse connect_to_camera(Sampler& smp, PathData& c, SpectralQuery spect, uint64_t eye_t, uint64_t light_s, CameraSample& camera_sample) { const auto& y_i = c.emitter_path[light_s]; camera_sample = sample_film(smp, rt.scene(), y_i.pos); if (camera_sample.valid() == false) { return {spect.wavelength, 0.0f}; } ETX_VALIDATE(camera_sample.weight); PathVertex sampled_vertex = {PathVertex::Class::Camera}; sampled_vertex.pos = camera_sample.position; sampled_vertex.nrm = camera_sample.normal; sampled_vertex.w_i = camera_sample.direction; SpectralResponse bsdf = y_i.bsdf_in_direction(spect, PathSource::Light, camera_sample.direction, rt.scene(), smp); float weight = mis_weight(c, spect, eye_t, light_s, sampled_vertex, smp); SpectralResponse splat = y_i.throughput * bsdf * camera_sample.weight * (weight / spectrum::sample_pdf()); ETX_VALIDATE(splat); if (splat.is_zero() == false) { splat *= local_transmittance(spect, smp, y_i, sampled_vertex); } return splat; } SpectralResponse connect_vertices(Sampler& smp, PathData& c, SpectralQuery spect, uint64_t eye_t, uint64_t light_s) { const auto& y_i = c.emitter_path[light_s]; const auto& z_i = c.camera_path[eye_t]; auto dw = z_i.pos - y_i.pos; float dwl = dot(dw, dw); dw *= 1.0f / std::sqrt(dwl); SpectralResponse result = y_i.throughput * y_i.bsdf_in_direction(spect, PathSource::Light, dw, rt.scene(), smp) * // z_i.throughput * z_i.bsdf_in_direction(spect, PathSource::Camera, -dw, rt.scene(), smp) * // (1.0f / dwl); // G term = abs(cos(dw, y_i.nrm) * cos(dw, z_i.nrm)) / dwl; cosines already accounted in "bsdf" ETX_VALIDATE(result); if (result.is_zero()) { return {spect.wavelength, 0.0f}; } SpectralResponse tr = local_transmittance(spect, smp, y_i, z_i); ETX_VALIDATE(result); float weight = mis_weight(c, spect, eye_t, light_s, {}, smp); ETX_VALIDATE(weight); return result * tr * weight; } SpectralResponse local_transmittance(SpectralQuery spect, Sampler& smp, const PathVertex& p0, const PathVertex& p1) { auto& scene = rt.scene(); float3 origin = p0.pos; if (p0.is_surface_interaction()) { const auto& tri = scene.triangles[p0.triangle_index]; origin = shading_pos(scene.vertices, tri, p0.barycentric, normalize(p1.pos - p0.pos)); } return transmittance(spect, smp, origin, p1.pos, p0.medium_index, scene, rt); } void start(const Options& opt) { opt_max_iterations = opt.get("spp", opt_max_iterations).to_integer(); opt_max_depth = opt.get("pathlen", opt_max_depth).to_integer(); opt_rr_start = opt.get("rrstart", opt_rr_start).to_integer(); iteration_light_image.clear(); uint32_t dim = camera_image.dimensions().x * camera_image.dimensions().y; for (auto& path_data : per_thread_path_data) { path_data.camera_path.reserve(2llu + opt_max_depth); path_data.emitter_path.reserve(2llu + opt_max_depth); } iteration = 0; snprintf(status, sizeof(status), "[%u] %s ...", iteration, (state->load() == Integrator::State::Running ? "Running" : "Preview")); total_time = {}; iteration_time = {}; current_task = rt.scheduler().schedule(this, dim); } }; CPUBidirectional::CPUBidirectional(Raytracing& rt) : Integrator(rt) { ETX_PIMPL_INIT(CPUBidirectional, rt, &current_state); } CPUBidirectional::~CPUBidirectional() { if (current_state != State::Stopped) { stop(Stop::Immediate); } ETX_PIMPL_CLEANUP(CPUBidirectional); } void CPUBidirectional::preview(const Options& opt) { stop(Stop::Immediate); if (rt.has_scene()) { current_state = State::Preview; _private->start(opt); } } void CPUBidirectional::run(const Options& opt) { stop(Stop::Immediate); if (rt.has_scene()) { current_state = State::Running; _private->start(opt); } } void CPUBidirectional::update() { if ((current_state == State::Stopped) || (rt.scheduler().completed(_private->current_task) == false)) { return; } _private->iteration_light_image.flush_to(_private->light_image, float(_private->iteration) / float(_private->iteration + 1)); if (current_state == State::WaitingForCompletion) { _private->iteration_light_image.clear(); rt.scheduler().wait(_private->current_task); _private->current_task = {}; snprintf(_private->status, sizeof(_private->status), "[%u] Completed in %.2f seconds", _private->iteration, _private->total_time.measure()); current_state = Integrator::State::Stopped; } else if (_private->iteration + 1 < _private->opt_max_iterations) { _private->iteration_light_image.clear(); snprintf(_private->status, sizeof(_private->status), "[%u] %s... (%.3fms per iteration)", _private->iteration, (current_state == Integrator::State::Running ? "Running" : "Preview"), _private->iteration_time.measure_ms()); _private->iteration_time = {}; _private->iteration += 1; rt.scheduler().restart(_private->current_task, _private->camera_image.dimensions().x * _private->camera_image.dimensions().y); } else { snprintf(_private->status, sizeof(_private->status), "[%u] Completed in %.2f seconds", _private->iteration, _private->total_time.measure()); current_state = Integrator::State::Stopped; } } void CPUBidirectional::stop(Stop st) { if (current_state == State::Stopped) { return; } if (st == Stop::Immediate) { current_state = State::Stopped; rt.scheduler().wait(_private->current_task); _private->current_task = {}; } else { current_state = State::WaitingForCompletion; snprintf(_private->status, sizeof(_private->status), "[%u] Waiting for completion", _private->iteration); } } Options CPUBidirectional::options() const { Options result = {}; result.add(1u, _private->opt_max_iterations, 0xffffu, "spp", "Max Iterations"); result.add(1u, _private->opt_max_depth, 65536u, "pathlen", "Maximal Path Length"); result.add(1u, _private->opt_rr_start, 65536u, "rrstart", "Start Russian Roulette at"); return result; } void CPUBidirectional::update_options(const Options&) { } void CPUBidirectional::set_output_size(const uint2& dim) { if (current_state != State::Stopped) { stop(Stop::Immediate); } _private->camera_image.resize(dim, 1); _private->light_image.resize(dim, 1); _private->iteration_light_image.resize(dim, rt.scheduler().max_thread_count()); } const float4* CPUBidirectional::get_camera_image(bool) { return _private->camera_image.data(); } const float4* CPUBidirectional::get_light_image(bool) { return _private->light_image.data(); } const char* CPUBidirectional::status() const { return _private->status; } float CPUBidirectionalImpl::PathVertex::pdf_area(SpectralQuery spect, PathSource mode, const PathVertex* prev, const PathVertex* next, const Scene& scene, Sampler& smp) const { if (cls == Class::Emitter) { return pdf_to_light_out(spect, next, scene); } ETX_ASSERT(prev != nullptr); ETX_ASSERT(next != nullptr); ETX_ASSERT(is_surface_interaction() || is_medium_interaction()); auto w_i = (pos - prev->pos); { float w_i_len = length(w_i); if (w_i_len == 0.0f) { return 0.0f; } w_i *= 1.0f / w_i_len; } auto w_o = (next->pos - pos); { float w_o_len = length(w_o); if (w_o_len == 0.0f) { return 0.0f; } w_o *= 1.0f / w_o_len; } float eval_pdf = 0.0f; if (is_surface_interaction()) { const auto& tri = scene.triangles[triangle_index]; const auto& mat = scene.materials[tri.material_index]; eval_pdf = bsdf::pdf({spect, medium_index, mode, *this, w_i, w_o}, mat, scene, smp); } else if (is_medium_interaction()) { eval_pdf = scene.mediums[medium_index].phase_function(spect, pos, w_i, w_o); } else { ETX_FAIL("Invalid vertex class"); } ETX_VALIDATE(eval_pdf); if (next->is_environment_emitter()) { return eval_pdf; } return pdf_solid_angle_to_area(eval_pdf, *next); } float CPUBidirectionalImpl::PathVertex::pdf_to_light_out(SpectralQuery spect, const PathVertex* next, const Scene& scene) const { ETX_ASSERT(next != nullptr); ETX_ASSERT(is_emitter()); float pdf_area = 0.0f; float pdf_dir = 0.0f; float pdf_dir_out = 0.0f; if (is_specific_emitter()) { const auto& emitter = scene.emitters[emitter_index]; if (emitter.is_local()) { auto w_o = normalize(next->pos - pos); emitter_evaluate_out_local(emitter, spect, tex, nrm, w_o, pdf_area, pdf_dir, pdf_dir_out, scene); pdf_area = pdf_solid_angle_to_area(pdf_dir, *next); } else if (emitter.is_distant()) { auto w_o = normalize(pos - next->pos); emitter_evaluate_out_dist(emitter, spect, w_o, pdf_area, pdf_dir, pdf_dir_out, scene); if (next->is_surface_interaction()) { pdf_area *= fabsf(dot(scene.triangles[next->triangle_index].geo_n, w_o)); } } } else if (scene.environment_emitters.count > 0) { auto w_o = normalize(pos - next->pos); float w_o_dot_n = next->is_surface_interaction() ? fabsf(dot(scene.triangles[next->triangle_index].geo_n, w_o)) : 1.0f; for (uint32_t ie = 0; ie < scene.environment_emitters.count; ++ie) { const auto& emitter = scene.emitters[scene.environment_emitters.emitters[ie]]; float local_pdf_area = 0.0f; emitter_evaluate_out_dist(emitter, spect, w_o, local_pdf_area, pdf_dir, pdf_dir_out, scene); pdf_area += local_pdf_area * w_o_dot_n; } pdf_area = pdf_area / float(scene.environment_emitters.count); } return pdf_area; } float CPUBidirectionalImpl::PathVertex::pdf_to_light_in(SpectralQuery spect, const PathVertex* next, const Scene& scene) const { ETX_ASSERT(is_emitter()); float result = 0.0f; if (is_specific_emitter()) { const auto& emitter = scene.emitters[emitter_index]; float pdf_discrete = emitter_discrete_pdf(emitter, scene.emitters_distribution); result = pdf_discrete * (emitter.is_local() ? emitter_pdf_area_local(emitter, scene) : emitter_pdf_in_dist(emitter, normalize(pos - next->pos), scene)); } else if (scene.environment_emitters.count > 0) { for (uint32_t ie = 0; ie < scene.environment_emitters.count; ++ie) { const auto& emitter = scene.emitters[scene.environment_emitters.emitters[ie]]; float pdf_discrete = emitter_discrete_pdf(emitter, scene.emitters_distribution); result += pdf_discrete * emitter_pdf_in_dist(emitter, normalize(pos - next->pos), scene); } result = result / float(scene.environment_emitters.count); } return result; } float CPUBidirectionalImpl::PathVertex::pdf_solid_angle_to_area(float pdf_dir, const PathVertex& to_vertex) const { if ((pdf_dir == 0.0f) || to_vertex.is_environment_emitter()) { return pdf_dir; } auto w_o = to_vertex.pos - pos; float d_squared = dot(w_o, w_o); if (d_squared == 0.0f) { return 0.0f; } float inv_d_squared = 1.0f / d_squared; w_o *= std::sqrt(inv_d_squared); float cos_t = (to_vertex.is_surface_interaction() ? fabsf(dot(w_o, to_vertex.nrm)) : 1.0f); float result = cos_t * pdf_dir * inv_d_squared; ETX_VALIDATE(result); return result; } SpectralResponse CPUBidirectionalImpl::PathVertex::bsdf_in_direction(SpectralQuery spect, PathSource mode, const float3& w_o, const Scene& scene, Sampler& smp) const { ETX_ASSERT(is_surface_interaction() || is_medium_interaction()); if (is_surface_interaction()) { const auto& tri = scene.triangles[triangle_index]; const auto& mat = scene.materials[tri.material_index]; BSDFEval eval = bsdf::evaluate({spect, medium_index, mode, *this, w_i, w_o}, mat, scene, smp); ETX_VALIDATE(eval.bsdf); if (mode == PathSource::Light) { eval.bsdf *= fix_shading_normal(tri.geo_n, nrm, w_i, w_o); ETX_VALIDATE(eval.bsdf); } ETX_VALIDATE(eval.bsdf); return eval.bsdf; } if (is_medium_interaction()) { return {spect.wavelength, scene.mediums[medium_index].phase_function(spect, pos, w_i, w_o)}; } ETX_FAIL("Invalid vertex class"); return {spect.wavelength, 0.0f}; } } // namespace etx
35.528302
178
0.663303
[ "render", "vector" ]
811a1e03f0993ef5d7eed94490fc4c86d2acaf1a
76,621
cpp
C++
src/library/commissioner_impl.cpp
xwang146/ot-commissioner
ce6b1180459d2533f730b3effee626efee16b9a3
[ "BSD-3-Clause" ]
null
null
null
src/library/commissioner_impl.cpp
xwang146/ot-commissioner
ce6b1180459d2533f730b3effee626efee16b9a3
[ "BSD-3-Clause" ]
16
2020-02-19T03:16:12.000Z
2020-12-08T07:43:20.000Z
src/library/commissioner_impl.cpp
xwang146/ot-commissioner
ce6b1180459d2533f730b3effee626efee16b9a3
[ "BSD-3-Clause" ]
1
2021-06-09T03:39:12.000Z
2021-06-09T03:39:12.000Z
/* * Copyright (c) 2019, The OpenThread Commissioner Authors. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the copyright holder nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * @file * The file implements the Commissioner interface. */ #include "library/commissioner_impl.hpp" #include "library/coap.hpp" #include "library/cose.hpp" #include "library/dtls.hpp" #include "library/logging.hpp" #include "library/openthread/bloom_filter.hpp" #include "library/openthread/pbkdf2_cmac.hpp" #include "library/openthread/sha256.hpp" #include "library/uri.hpp" #define CCM_NOT_IMPLEMENTED "CCM features not implemented" namespace ot { namespace commissioner { static constexpr uint16_t kDefaultMmPort = 61631; static constexpr uint32_t kMinKeepAliveInterval = 30; static constexpr uint32_t kMaxKeepAliveInterval = 45; Error Commissioner::GeneratePSKc(ByteArray & aPSKc, const std::string &aPassphrase, const std::string &aNetworkName, const ByteArray & aExtendedPanId) { Error error; const std::string saltPrefix = "Thread"; ByteArray salt; VerifyOrExit((aPassphrase.size() >= kMinCommissionerCredentialLength) && (aPassphrase.size() <= kMaxCommissionerCredentialLength), error = ERROR_INVALID_ARGS("passphrase length={} exceeds range [{}, {}]", aPassphrase.size(), kMinCommissionerCredentialLength, kMaxCommissionerCredentialLength)); VerifyOrExit(aNetworkName.size() <= kMaxNetworkNameLength, error = ERROR_INVALID_ARGS("network name length={} > {}", aNetworkName.size(), kMaxNetworkNameLength)); VerifyOrExit( aExtendedPanId.size() == kExtendedPanIdLength, error = ERROR_INVALID_ARGS("extended PAN ID length={} != {}", aExtendedPanId.size(), kExtendedPanIdLength)); salt.insert(salt.end(), saltPrefix.begin(), saltPrefix.end()); salt.insert(salt.end(), aExtendedPanId.begin(), aExtendedPanId.end()); salt.insert(salt.end(), aNetworkName.begin(), aNetworkName.end()); aPSKc.resize(kMaxPSKcLength); otPbkdf2Cmac(reinterpret_cast<const uint8_t *>(aPassphrase.data()), static_cast<uint16_t>(aPassphrase.size()), salt.data(), static_cast<uint16_t>(salt.size()), 16384, static_cast<uint16_t>(aPSKc.size()), aPSKc.data()); exit: return error; } ByteArray Commissioner::ComputeJoinerId(uint64_t aEui64) { Sha256 sha256; uint8_t hash[Sha256::kHashSize]; ByteArray eui64; utils::Encode(eui64, aEui64); sha256.Start(); sha256.Update(&eui64[0], eui64.size()); sha256.Finish(hash); static_assert(sizeof(hash) >= kJoinerIdLength, "wrong crypto::Sha256::kHashSize value"); ByteArray joinerId{hash, hash + kJoinerIdLength}; joinerId[0] |= kLocalExternalAddrMask; return joinerId; } void Commissioner::AddJoiner(ByteArray &aSteeringData, const ByteArray &aJoinerId) { if (aSteeringData.size() != kMaxSteeringDataLength) { aSteeringData.resize(kMaxSteeringDataLength); std::fill(aSteeringData.begin(), aSteeringData.end(), 0); } ComputeBloomFilter(aSteeringData, aJoinerId); } Error Commissioner::GetMeshLocalAddr(std::string & aMeshLocalAddr, const std::string &aMeshLocalPrefix, uint16_t aLocator16) { static const size_t kThreadMeshLocalPrefixLength = 8; Error error; ByteArray rawAddr; Address addr; SuccessOrExit(error = Ipv6PrefixFromString(rawAddr, aMeshLocalPrefix)); VerifyOrExit(rawAddr.size() == kThreadMeshLocalPrefixLength, error = ERROR_INVALID_ARGS("Thread Mesh local prefix length={} != {}", rawAddr.size(), kThreadMeshLocalPrefixLength)); utils::Encode<uint16_t>(rawAddr, 0x0000); utils::Encode<uint16_t>(rawAddr, 0x00FF); utils::Encode<uint16_t>(rawAddr, 0xFE00); utils::Encode<uint16_t>(rawAddr, aLocator16); SuccessOrExit(addr.Set(rawAddr)); aMeshLocalAddr = addr.ToString(); exit: return error; } CommissionerImpl::CommissionerImpl(CommissionerHandler &aHandler, struct event_base *aEventBase) : mState(State::kDisabled) , mSessionId(0) , mCommissionerHandler(aHandler) , mEventBase(aEventBase) , mKeepAliveTimer(mEventBase, [this](Timer &aTimer) { SendKeepAlive(aTimer); }) , mBrClient(mEventBase) , mJoinerSessionTimer(mEventBase, [this](Timer &aTimer) { HandleJoinerSessionTimer(aTimer); }) , mResourceUdpRx(uri::kUdpRx, [this](const coap::Request &aRequest) { mProxyClient.HandleUdpRx(aRequest); }) , mResourceRlyRx(uri::kRelayRx, [this](const coap::Request &aRequest) { HandleRlyRx(aRequest); }) , mProxyClient(mEventBase, mBrClient) #if OT_COMM_CONFIG_CCM_ENABLE , mTokenManager(mEventBase) #endif , mResourceDatasetChanged(uri::kMgmtDatasetChanged, [this](const coap::Request &aRequest) { HandleDatasetChanged(aRequest); }) , mResourcePanIdConflict(uri::kMgmtPanidConflict, [this](const coap::Request &aRequest) { HandlePanIdConflict(aRequest); }) , mResourceEnergyReport(uri::kMgmtEdReport, [this](const coap::Request &aRequest) { HandleEnergyReport(aRequest); }) { SuccessOrDie(mBrClient.AddResource(mResourceUdpRx)); SuccessOrDie(mBrClient.AddResource(mResourceRlyRx)); SuccessOrDie(mProxyClient.AddResource(mResourceDatasetChanged)); SuccessOrDie(mProxyClient.AddResource(mResourcePanIdConflict)); SuccessOrDie(mProxyClient.AddResource(mResourceEnergyReport)); } Error CommissionerImpl::Init(const Config &aConfig) { Error error; SuccessOrExit(error = ValidateConfig(aConfig)); mConfig = aConfig; InitLogger(aConfig.mLogger); LoggingConfig(); SuccessOrExit(error = mBrClient.Init(GetDtlsConfig(mConfig))); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { // It is not good to leave the token manager uninitialized in non-CCM mode. // TODO(wgtdkp): create TokenManager only in CCM Mode. SuccessOrExit(error = mTokenManager.Init(mConfig)); } #endif exit: return error; } Error CommissionerImpl::ValidateConfig(const Config &aConfig) { Error error; { tlv::Tlv commissionerIdTlv{tlv::Type::kCommissionerId, aConfig.mId}; VerifyOrExit(!aConfig.mId.empty(), error = ERROR_INVALID_ARGS("commissioner ID is mandatory")); VerifyOrExit(commissionerIdTlv.IsValid(), error = ERROR_INVALID_ARGS("{} is not a valid Commissioner ID", aConfig.mId)); } VerifyOrExit( (aConfig.mKeepAliveInterval >= kMinKeepAliveInterval && aConfig.mKeepAliveInterval <= kMaxKeepAliveInterval), error = ERROR_INVALID_ARGS("keep-alive internal {} exceeds range [{}, {}]", aConfig.mKeepAliveInterval, kMinKeepAliveInterval, kMaxKeepAliveInterval)); if (aConfig.mEnableCcm) { tlv::Tlv domainNameTlv{tlv::Type::kDomainName, aConfig.mDomainName}; #if !OT_COMM_CONFIG_CCM_ENABLE ExitNow(error = ERROR_INVALID_ARGS(CCM_NOT_IMPLEMENTED)); #endif VerifyOrExit(!aConfig.mDomainName.empty(), error = ERROR_INVALID_ARGS("missing Domain Name for CCM network")); VerifyOrExit(domainNameTlv.IsValid(), error = ERROR_INVALID_ARGS("Domain Name is too long (length={})", aConfig.mDomainName.size())); VerifyOrExit(!aConfig.mPrivateKey.empty(), error = ERROR_INVALID_ARGS("missing Private Key file for CCM network")); VerifyOrExit(!aConfig.mCertificate.empty(), error = ERROR_INVALID_ARGS("missing Certificate file for CCM network")); VerifyOrExit(!aConfig.mTrustAnchor.empty(), error = ERROR_INVALID_ARGS("missing Trust Anchor file for CCM network")); } else { // Should we also enable setting PSKc from passphrase? VerifyOrExit(!aConfig.mPSKc.empty(), error = ERROR_INVALID_ARGS("missing PSKc for non-CCM network")); VerifyOrExit(aConfig.mPSKc.size() <= kMaxPSKcLength, error = ERROR_INVALID_ARGS("PSKc is too long (length={})", aConfig.mPSKc.size())); } exit: return error; } void CommissionerImpl::LoggingConfig() { LOG_INFO(LOG_REGION_CONFIG, "Id = {}", mConfig.mId); LOG_INFO(LOG_REGION_CONFIG, "enable CCM = {}", mConfig.mEnableCcm); LOG_INFO(LOG_REGION_CONFIG, "domain name = {}", mConfig.mDomainName); LOG_INFO(LOG_REGION_CONFIG, "keep alive interval = {}", mConfig.mKeepAliveInterval); LOG_INFO(LOG_REGION_CONFIG, "enable DTLS debug logging = {}", mConfig.mEnableDtlsDebugLogging); LOG_INFO(LOG_REGION_CONFIG, "maximum connection number = {}", mConfig.mMaxConnectionNum); // Do not logging credentials } const Config &CommissionerImpl::GetConfig() const { return mConfig; } void CommissionerImpl::Petition(PetitionHandler aHandler, const std::string &aAddr, uint16_t aPort) { Error error; auto onConnected = [this, aHandler](Error aError) { if (aError != ErrorCode::kNone) { aHandler(nullptr, aError); } else { LOG_DEBUG(LOG_REGION_MESHCOP, "DTLS connection to border agent succeed"); SendPetition(aHandler); } }; LOG_DEBUG(LOG_REGION_MESHCOP, "starting petition: border agent = ({}, {})", aAddr, aPort); VerifyOrExit(!IsActive(), error = ERROR_INVALID_STATE("cannot petition when the commissioner is running")); LOG_DEBUG(LOG_REGION_MESHCOP, "starting petition: border agent = ({}, {})", aAddr, aPort); if (mBrClient.IsConnected()) { SendPetition(aHandler); } else { Connect(onConnected, aAddr, aPort); } exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } } void CommissionerImpl::Resign(ErrorHandler aHandler) { if (IsActive()) { SendKeepAlive(mKeepAliveTimer, false); } if (mKeepAliveTimer.IsRunning()) { mKeepAliveTimer.Stop(); } Disconnect(); aHandler(ERROR_NONE); } void CommissionerImpl::Connect(ErrorHandler aHandler, const std::string &aAddr, uint16_t aPort) { auto onConnected = [aHandler](const DtlsSession &, Error aError) { aHandler(aError); }; mBrClient.Connect(onConnected, aAddr, aPort); } void CommissionerImpl::Disconnect() { mBrClient.Disconnect(ERROR_CANCELLED("the CoAPs client was disconnected")); mState = State::kDisabled; } uint16_t CommissionerImpl::GetSessionId() const { return mSessionId; } State CommissionerImpl::GetState() const { return mState; } bool CommissionerImpl::IsActive() const { return GetState() == State::kActive; } bool CommissionerImpl::IsCcmMode() const { return mConfig.mEnableCcm; } const std::string &CommissionerImpl::GetDomainName() const { return mConfig.mDomainName; } void CommissionerImpl::CancelRequests() { mProxyClient.CancelRequests(); mBrClient.CancelRequests(); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { mTokenManager.CancelRequests(); } #endif } void CommissionerImpl::GetCommissionerDataset(Handler<CommissionerDataset> aHandler, uint16_t aDatasetFlags) { Error error; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; ByteArray tlvTypes = GetCommissionerDatasetTlvs(aDatasetFlags); auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { Error error; CommissionerDataset dataset; SuccessOrExit(error = aError); ASSERT(aResponse != nullptr); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect CoAP::CHANGED for MGMT_COMM_GET.rsp message")); SuccessOrExit(error = DecodeCommissionerDataset(dataset, *aResponse)); aHandler(&dataset, error); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } }; SuccessOrExit(error = request.SetUriPath(uri::kMgmtCommissionerGet)); // If Get TLV is not present, get all Commissioner Dataset TLVs. if (!tlvTypes.empty()) { SuccessOrExit(AppendTlv(request, {tlv::Type::kGet, tlvTypes})); } mBrClient.SendRequest(request, onResponse); LOG_DEBUG(LOG_REGION_MGMT, "sent MGMT_COMMISSIONER_GET.req"); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } } void CommissionerImpl::SetCommissionerDataset(ErrorHandler aHandler, const CommissionerDataset &aDataset) { Error error; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { aHandler(HandleStateResponse(aResponse, aError)); }; VerifyOrExit(aDataset.mPresentFlags != 0, error = ERROR_INVALID_ARGS("empty Commissioner Dataset")); VerifyOrExit((aDataset.mPresentFlags & CommissionerDataset::kSessionIdBit) == 0, error = ERROR_INVALID_ARGS("trying to set Commissioner Session ID which is read-only")); VerifyOrExit((aDataset.mPresentFlags & CommissionerDataset::kBorderAgentLocatorBit) == 0, error = ERROR_INVALID_ARGS("trying to set Border Agent Locator which is read-only")); // TODO(wgtdkp): verify if every joiner UDP port differs from each other (required by Thread). // Otherwise, this request may fail. SuccessOrExit(error = request.SetUriPath(uri::kMgmtCommissionerSet)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); SuccessOrExit(error = EncodeCommissionerDataset(request, aDataset)); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mBrClient.SendRequest(request, onResponse); LOG_DEBUG(LOG_REGION_MGMT, "sent MGMT_COMMISSIONER_SET.req"); exit: if (error != ErrorCode::kNone) { aHandler(error); } } void CommissionerImpl::GetActiveDataset(Handler<ActiveOperationalDataset> aHandler, uint16_t aDatasetFlags) { auto rawDatasetHandler = [aHandler](const ByteArray *aRawDataset, Error aError) { Error error; ActiveOperationalDataset dataset; SuccessOrExit(error = aError); SuccessOrExit(error = DecodeActiveOperationalDataset(dataset, *aRawDataset)); VerifyOrExit(dataset.mPresentFlags & ActiveOperationalDataset::kActiveTimestampBit, error = ERROR_BAD_FORMAT("Active Timestamp is not included in MGMT_ACTIVE_GET.rsp")); aHandler(&dataset, error); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } }; GetRawActiveDataset(rawDatasetHandler, aDatasetFlags); } void CommissionerImpl::GetRawActiveDataset(Handler<ByteArray> aHandler, uint16_t aDatasetFlags) { Error error; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; ByteArray datasetList = GetActiveOperationalDatasetTlvs(aDatasetFlags); auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { Error error; SuccessOrExit(error = aError); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect CoAP::CHANGED for MGMT_ACTIVE_GET.rsp message")); aHandler(&aResponse->GetPayload(), error); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } }; SuccessOrExit(error = request.SetUriPath(uri::kMgmtActiveGet)); if (!datasetList.empty()) { SuccessOrExit(error = AppendTlv(request, {tlv::Type::kGet, datasetList})); } #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mBrClient.SendRequest(request, onResponse); LOG_DEBUG(LOG_REGION_MGMT, "sent MGMT_ACTIVE_GET.req"); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } } void CommissionerImpl::SetActiveDataset(ErrorHandler aHandler, const ActiveOperationalDataset &aDataset) { Error error; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { aHandler(HandleStateResponse(aResponse, aError)); }; VerifyOrExit(aDataset.mPresentFlags & ActiveOperationalDataset::kActiveTimestampBit, error = ERROR_INVALID_ARGS("Active Timestamp is mandatory for an Active Operational Dataset")); // TLVs affect connectivity are not allowed. VerifyOrExit((aDataset.mPresentFlags & ActiveOperationalDataset::kChannelBit) == 0, error = ERROR_INVALID_ARGS("Channel cannot be set with Active Operational Dataset, " "try setting with Pending Operational Dataset instead")); VerifyOrExit((aDataset.mPresentFlags & ActiveOperationalDataset::kPanIdBit) == 0, error = ERROR_INVALID_ARGS("PAN ID cannot be set with Active Operational Dataset, " "try setting with Pending Operational Dataset instead")); VerifyOrExit((aDataset.mPresentFlags & ActiveOperationalDataset::kMeshLocalPrefixBit) == 0, error = ERROR_INVALID_ARGS("Mesh-local Prefix cannot be set with Active Operational Dataset, " "try setting with Pending Operational Dataset instead")); VerifyOrExit((aDataset.mPresentFlags & ActiveOperationalDataset::kNetworkMasterKeyBit) == 0, error = ERROR_INVALID_ARGS("Network Master Key cannot be set with Active Operational Dataset, " "try setting with Pending Operational Dataset instead")); SuccessOrExit(error = request.SetUriPath(uri::kMgmtActiveSet)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); SuccessOrExit(error = EncodeActiveOperationalDataset(request, aDataset)); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mBrClient.SendRequest(request, onResponse); LOG_DEBUG(LOG_REGION_MGMT, "sent MGMT_ACTIVE_SET.req"); exit: if (error != ErrorCode::kNone) { aHandler(error); } } void CommissionerImpl::GetPendingDataset(Handler<PendingOperationalDataset> aHandler, uint16_t aDatasetFlags) { Error error; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; ByteArray datasetList = GetPendingOperationalDatasetTlvs(aDatasetFlags); auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { Error error; PendingOperationalDataset dataset; SuccessOrExit(error = aError); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect CoAP::CHANGED for MGMT_PENDING_GET.rsp message")); SuccessOrExit(error = DecodePendingOperationalDataset(dataset, *aResponse)); VerifyOrExit(dataset.mPresentFlags | PendingOperationalDataset::kActiveTimestampBit, error = ERROR_BAD_FORMAT("Active Timestamp is not included in MGMT_PENDING_GET.rsp")); VerifyOrExit(dataset.mPresentFlags | PendingOperationalDataset::kPendingTimestampBit, error = ERROR_BAD_FORMAT("Pending Timestamp is not included in MGMT_PENDING_GET.rsp")); VerifyOrExit(dataset.mPresentFlags | PendingOperationalDataset::kDelayTimerBit, error = ERROR_BAD_FORMAT("Delay Timer is not included in MGMT_PENDING_GET.rsp")); aHandler(&dataset, error); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } }; SuccessOrExit(error = request.SetUriPath(uri::kMgmtPendingGet)); if (!datasetList.empty()) { SuccessOrExit(error = AppendTlv(request, {tlv::Type::kGet, datasetList})); } #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mBrClient.SendRequest(request, onResponse); LOG_DEBUG(LOG_REGION_MGMT, "sent MGMT_PENDING_GET.req"); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } } void CommissionerImpl::SetPendingDataset(ErrorHandler aHandler, const PendingOperationalDataset &aDataset) { Error error; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { aHandler(HandleStateResponse(aResponse, aError)); }; VerifyOrExit(aDataset.mPresentFlags & PendingOperationalDataset::kActiveTimestampBit, error = ERROR_INVALID_ARGS("Active Timestamp is mandatory for a Pending Operational Dataset")); VerifyOrExit(aDataset.mPresentFlags & PendingOperationalDataset::kPendingTimestampBit, error = ERROR_INVALID_ARGS("Pending Timestamp is mandatory for a Pending Operational Dataset")); VerifyOrExit(aDataset.mPresentFlags & PendingOperationalDataset::kDelayTimerBit, error = ERROR_INVALID_ARGS("Delay Timer is mandatory for a Pending Operational Dataset")); SuccessOrExit(error = request.SetUriPath(uri::kMgmtPendingSet)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); SuccessOrExit(error = EncodePendingOperationalDataset(request, aDataset)); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mBrClient.SendRequest(request, onResponse); LOG_DEBUG(LOG_REGION_MGMT, "sent MGMT_PENDING_SET.req"); exit: if (error != ErrorCode::kNone) { aHandler(error); } } #if OT_COMM_CONFIG_CCM_ENABLE void CommissionerImpl::SetBbrDataset(ErrorHandler aHandler, const BbrDataset &aDataset) { Error error; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { aHandler(HandleStateResponse(aResponse, aError)); }; VerifyOrExit(IsActive(), error = ERROR_INVALID_STATE("the commissioner is not active")); VerifyOrExit(IsCcmMode(), error = ERROR_INVALID_STATE("sending MGMT_BBR_SET.req is only valid in CCM mode")); VerifyOrExit((aDataset.mPresentFlags & BbrDataset::kRegistrarIpv6AddrBit) == 0, error = ERROR_INVALID_ARGS("trying to set Registrar IPv6 Address which is read-only")); SuccessOrExit(error = request.SetUriPath(uri::kMgmtBbrSet)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); SuccessOrExit(error = EncodeBbrDataset(request, aDataset)); if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } mBrClient.SendRequest(request, onResponse); LOG_DEBUG(LOG_REGION_MGMT, "sent MGMT_BBR_SET.req"); exit: if (error != ErrorCode::kNone) { aHandler(error); } } void CommissionerImpl::GetBbrDataset(Handler<BbrDataset> aHandler, uint16_t aDatasetFlags) { Error error; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; ByteArray datasetList = GetBbrDatasetTlvs(aDatasetFlags); auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { Error error; BbrDataset dataset; SuccessOrExit(error = aError); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect CoAP::CHANGED for MGMT_BBR_GET.rsp message")); SuccessOrExit(error = DecodeBbrDataset(dataset, *aResponse)); aHandler(&dataset, error); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } }; VerifyOrExit(IsActive(), error = ERROR_INVALID_STATE("the commissioner is not active")); VerifyOrExit(IsCcmMode(), error = ERROR_INVALID_STATE("sending MGMT_BBR_GET.req is only valid in CCM mode")); SuccessOrExit(error = request.SetUriPath(uri::kMgmtBbrGet)); if (!datasetList.empty()) { SuccessOrExit(error = AppendTlv(request, {tlv::Type::kGet, datasetList})); } mBrClient.SendRequest(request, onResponse); LOG_DEBUG(LOG_REGION_MGMT, "sent MGMT_BBR_GET.req"); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } } void CommissionerImpl::SetSecurePendingDataset(ErrorHandler aHandler, const std::string & aPbbrAddr, uint32_t aMaxRetrievalTimer, const PendingOperationalDataset &aDataset) { Error error; Address dstAddr; ByteArray secureDissemination; std::string uri = "coaps://[" + aPbbrAddr + "]" + uri::kMgmtPendingGet; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { aHandler(HandleStateResponse(aResponse, aError)); }; VerifyOrExit(IsCcmMode(), error = ERROR_INVALID_STATE("sending MGMT_SEC_PENDING_SET.req is only valid in CCM mode")); // Delay timer is mandatory. VerifyOrExit(aDataset.mPresentFlags & PendingOperationalDataset::kDelayTimerBit, error = ERROR_INVALID_ARGS("Delay Timer is mandatory for a Secure Pending Operational Dataset")); SuccessOrExit(error = dstAddr.Set(aPbbrAddr)); SuccessOrExit(error = request.SetUriPath(uri::kMgmtSecPendingSet)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); utils::Encode(secureDissemination, aDataset.mPendingTimestamp.Encode()); utils::Encode(secureDissemination, aMaxRetrievalTimer); secureDissemination.insert(secureDissemination.end(), uri.begin(), uri.end()); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kSecureDissemination, secureDissemination})); SuccessOrExit(error = EncodePendingOperationalDataset(request, aDataset)); if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } mProxyClient.SendRequest(request, onResponse, dstAddr, kDefaultMmPort); LOG_DEBUG(LOG_REGION_MGMT, "sent MGMT_SEC_PENDING_SET.req"); exit: if (error != ErrorCode::kNone) { aHandler(error); } } void CommissionerImpl::CommandReenroll(ErrorHandler aHandler, const std::string &aDstAddr) { Error error; VerifyOrExit(IsActive(), error = ERROR_INVALID_STATE("the commissioner is not active")); VerifyOrExit(IsCcmMode(), error = ERROR_INVALID_STATE("en-enroll a device is not in CCM Mode")); SendProxyMessage(aHandler, aDstAddr, uri::kMgmtReenroll); exit: if (error != ERROR_NONE) { aHandler(error); } } void CommissionerImpl::CommandDomainReset(ErrorHandler aHandler, const std::string &aDstAddr) { Error error; VerifyOrExit(IsActive(), error = ERROR_INVALID_STATE("the commissioner is not active")); VerifyOrExit(IsCcmMode(), error = ERROR_INVALID_STATE("resetting a device is not in CCM Mode")); SendProxyMessage(aHandler, aDstAddr, uri::kMgmtDomainReset); exit: if (error != ERROR_NONE) { aHandler(error); } } void CommissionerImpl::CommandMigrate(ErrorHandler aHandler, const std::string &aDstAddr, const std::string &aDstNetworkName) { Error error; Address dstAddr; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { Error error; SuccessOrExit(error = aError); VerifyOrExit(aResponse->GetCode() != coap::Code::kUnauthorized, error = ERROR_SECURITY("response code is CoAP::UNAUTHORIZED")); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect response code as CoAP::CHANGED")); if (!aResponse->GetPayload().empty()) { auto stateTlv = GetTlv(tlv::Type::kState, *aResponse); VerifyOrExit(stateTlv != nullptr, error = ERROR_BAD_FORMAT("no valid State TLV found in response")); VerifyOrExit(stateTlv->GetValueAsInt8() == tlv::kStateAccept, error = ERROR_REJECTED("request was rejected by peer")); } exit: aHandler(error); }; VerifyOrExit(IsActive(), error = ERROR_INVALID_STATE("the commissioner is not active")); VerifyOrExit(IsCcmMode(), error = ERROR_INVALID_STATE("Migrating a Device is only valid in CCM Mode")); SuccessOrExit(error = dstAddr.Set(aDstAddr)); VerifyOrExit(aDstNetworkName.size() <= kMaxNetworkNameLength, error = ERROR_INVALID_ARGS("Network Name length={} > {}", aDstNetworkName.size(), kMaxNetworkNameLength)); SuccessOrExit(error = request.SetUriPath(uri::kMgmtNetMigrate)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kNetworkName, aDstNetworkName})); if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } mProxyClient.SendRequest(request, onResponse, dstAddr, kDefaultMmPort); LOG_DEBUG(LOG_REGION_MGMT, "sent MGMT_NET_MIGRATE.req"); exit: if (error != ErrorCode::kNone) { aHandler(error); } } void CommissionerImpl::RequestToken(Handler<ByteArray> aHandler, const std::string &aAddr, uint16_t aPort) { if (!IsCcmMode()) { aHandler(nullptr, ERROR_INVALID_STATE("requesting COM_TOK is only valid in CCM Mode")); } else { mTokenManager.RequestToken(aHandler, aAddr, aPort); } } Error CommissionerImpl::SetToken(const ByteArray &aSignedToken, const ByteArray &aSignerCert) { Error error; VerifyOrExit(IsCcmMode(), error = ERROR_INVALID_STATE("setting COM_TOK in only valid in CCM Mode")); error = mTokenManager.SetToken(aSignedToken, aSignerCert); exit: return error; } #else void CommissionerImpl::SetBbrDataset(ErrorHandler aHandler, const BbrDataset &aDataset) { (void)aDataset; aHandler(ERROR_UNIMPLEMENTED(CCM_NOT_IMPLEMENTED)); } void CommissionerImpl::GetBbrDataset(Handler<BbrDataset> aHandler, uint16_t aDatasetFlags) { (void)aDatasetFlags; aHandler(nullptr, ERROR_UNIMPLEMENTED(CCM_NOT_IMPLEMENTED)); } void CommissionerImpl::SetSecurePendingDataset(ErrorHandler aHandler, const std::string & aPbbrAddr, uint32_t aMaxRetrievalTimer, const PendingOperationalDataset &aDataset) { (void)aPbbrAddr; (void)aMaxRetrievalTimer; (void)aDataset; aHandler(ERROR_UNIMPLEMENTED(CCM_NOT_IMPLEMENTED)); } void CommissionerImpl::CommandReenroll(ErrorHandler aHandler, const std::string &aDstAddr) { (void)aDstAddr; aHandler(ERROR_UNIMPLEMENTED(CCM_NOT_IMPLEMENTED)); } void CommissionerImpl::CommandDomainReset(ErrorHandler aHandler, const std::string &aDstAddr) { (void)aDstAddr; aHandler(ERROR_UNIMPLEMENTED(CCM_NOT_IMPLEMENTED)); } void CommissionerImpl::CommandMigrate(ErrorHandler aHandler, const std::string &aDstAddr, const std::string &aDstNetworkName) { (void)aDstAddr; (void)aDstNetworkName; aHandler(ERROR_UNIMPLEMENTED(CCM_NOT_IMPLEMENTED)); } void CommissionerImpl::RequestToken(Handler<ByteArray> aHandler, const std::string &aAddr, uint16_t aPort) { (void)aAddr; (void)aPort; aHandler(nullptr, ERROR_UNIMPLEMENTED(CCM_NOT_IMPLEMENTED)); } Error CommissionerImpl::SetToken(const ByteArray &aSignedToken, const ByteArray &aSignerCert) { (void)aSignedToken; (void)aSignerCert; return ERROR_UNIMPLEMENTED(CCM_NOT_IMPLEMENTED); } #endif // OT_COMM_CONFIG_CCM_ENABLE void CommissionerImpl::RegisterMulticastListener(Handler<uint8_t> aHandler, const std::string & aPbbrAddr, const std::vector<std::string> &aMulticastAddrList, uint32_t aTimeout) { Error error; Address dstAddr; Address multicastAddr; ByteArray rawAddresses; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { Error error; tlv::TlvPtr statusTlv = nullptr; uint8_t status; SuccessOrExit(error = aError); VerifyOrExit(aResponse->GetCode() != coap::Code::kUnauthorized, error = ERROR_SECURITY("response code is CoAP::UNAUTHORIZED")); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect response code as CoAP::CHANGED")); statusTlv = GetTlv(tlv::Type::kThreadStatus, *aResponse, tlv::Scope::kThread); VerifyOrExit(statusTlv != nullptr, error = ERROR_BAD_FORMAT("no valid State TLV found in response")); status = statusTlv->GetValueAsUint8(); aHandler(&status, error); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } }; SuccessOrExit(error = dstAddr.Set(aPbbrAddr)); VerifyOrExit(!aMulticastAddrList.empty(), error = ERROR_INVALID_ARGS("Multicast Address List cannot be empty")); for (const auto &addr : aMulticastAddrList) { SuccessOrExit(error = multicastAddr.Set(addr)); VerifyOrExit(multicastAddr.IsIpv6() && multicastAddr.IsMulticast(), error = ERROR_INVALID_ARGS("{} is not a valid IPv6 multicast address", multicastAddr.ToString())); rawAddresses.insert(rawAddresses.end(), multicastAddr.GetRaw().begin(), multicastAddr.GetRaw().end()); } SuccessOrExit(error = request.SetUriPath(uri::kMlr)); SuccessOrExit( error = AppendTlv(request, {tlv::Type::kThreadCommissionerSessionId, GetSessionId(), tlv::Scope::kThread})); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kThreadTimeout, aTimeout, tlv::Scope::kThread})); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kThreadIpv6Addresses, rawAddresses, tlv::Scope::kThread})); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mProxyClient.SendRequest(request, onResponse, dstAddr, kDefaultMmPort); LOG_DEBUG(LOG_REGION_MGMT, "sent MLR.req"); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } } void CommissionerImpl::AnnounceBegin(ErrorHandler aHandler, uint32_t aChannelMask, uint8_t aCount, uint16_t aPeriod, const std::string &aDstAddr) { Error error; Address dstAddr; ByteArray channelMask; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { Error error; SuccessOrExit(error = aError); VerifyOrExit(aResponse->GetCode() != coap::Code::kUnauthorized, error = ERROR_SECURITY("response code is CoAP::UNAUTHORIZED")); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect response code as CoAP::CHANGED")); exit: aHandler(error); }; SuccessOrExit(error = dstAddr.Set(aDstAddr)); if (dstAddr.IsMulticast()) { request.SetType(coap::Type::kNonConfirmable); } SuccessOrExit(error = request.SetUriPath(uri::kMgmtAnnounceBegin)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); SuccessOrExit(error = MakeChannelMask(channelMask, aChannelMask)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kChannelMask, channelMask})); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCount, aCount})); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kPeriod, aPeriod})); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mProxyClient.SendRequest(request, onResponse, dstAddr, kDefaultMmPort); exit: if (error != ErrorCode::kNone || request.IsNonConfirmable()) { aHandler(error); } } void CommissionerImpl::PanIdQuery(ErrorHandler aHandler, uint32_t aChannelMask, uint16_t aPanId, const std::string &aDstAddr) { Error error; Address dstAddr; ByteArray channelMask; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { Error error; SuccessOrExit(error = aError); VerifyOrExit(aResponse->GetCode() != coap::Code::kUnauthorized, error = ERROR_SECURITY("response code is CoAP::UNAUTHORIZED")); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect response code as CoAP::CHANGED")); exit: aHandler(error); }; SuccessOrExit(error = dstAddr.Set(aDstAddr)); if (dstAddr.IsMulticast()) { request.SetType(coap::Type::kNonConfirmable); } SuccessOrExit(error = request.SetUriPath(uri::kMgmtPanidQuery)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); SuccessOrExit(error = MakeChannelMask(channelMask, aChannelMask)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kChannelMask, channelMask})); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kPanId, aPanId})); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mProxyClient.SendRequest(request, onResponse, dstAddr, kDefaultMmPort); exit: if (error != ErrorCode::kNone || request.IsNonConfirmable()) { aHandler(error); } } void CommissionerImpl::EnergyScan(ErrorHandler aHandler, uint32_t aChannelMask, uint8_t aCount, uint16_t aPeriod, uint16_t aScanDuration, const std::string &aDstAddr) { Error error; Address dstAddr; ByteArray channelMask; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { Error error; SuccessOrExit(error = aError); VerifyOrExit(aResponse->GetCode() != coap::Code::kUnauthorized, error = ERROR_SECURITY("response code is CoAP::UNAUTHORIZED")); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect response code as CoAP::CHANGED")); exit: aHandler(error); }; SuccessOrExit(error = dstAddr.Set(aDstAddr)); if (dstAddr.IsMulticast()) { request.SetType(coap::Type::kNonConfirmable); } SuccessOrExit(error = request.SetUriPath(uri::kMgmtEdScan)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); SuccessOrExit(error = MakeChannelMask(channelMask, aChannelMask)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kChannelMask, channelMask})); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCount, aCount})); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kPeriod, aPeriod})); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kScanDuration, aScanDuration})); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mProxyClient.SendRequest(request, onResponse, dstAddr, kDefaultMmPort); exit: if (error != ErrorCode::kNone || request.IsNonConfirmable()) { aHandler(error); } } void CommissionerImpl::SendPetition(PetitionHandler aHandler) { Error error; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [this, aHandler](const coap::Response *aResponse, Error aError) { Error error; tlv::TlvSet tlvSet; tlv::TlvPtr stateTlv = nullptr; tlv::TlvPtr sessionIdTlv = nullptr; tlv::TlvPtr commissionerIdTlv = nullptr; std::string existingCommissionerId; SuccessOrExit(error = aError); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect response code as CoAP::CHANGED")); SuccessOrExit(error = GetTlvSet(tlvSet, *aResponse)); stateTlv = tlvSet[tlv::Type::kState]; VerifyOrExit(stateTlv != nullptr, error = ERROR_BAD_FORMAT("no valid State TLV found in response")); if (stateTlv->GetValueAsInt8() != tlv::kStateAccept) { commissionerIdTlv = tlvSet[tlv::Type::kCommissionerId]; if (commissionerIdTlv != nullptr && commissionerIdTlv->IsValid()) { existingCommissionerId = commissionerIdTlv->GetValueAsString(); } ExitNow(error = ERROR_REJECTED("petition was rejected")); } sessionIdTlv = tlvSet[tlv::Type::kCommissionerSessionId]; VerifyOrExit(sessionIdTlv != nullptr, error = ERROR_BAD_FORMAT("no valid Commissioner Session TLV found in response")); mSessionId = sessionIdTlv->GetValueAsUint16(); mState = State::kActive; mKeepAliveTimer.Start(GetKeepAliveInterval()); LOG_INFO(LOG_REGION_MESHCOP, "petition succeed, start keep-alive timer with {} seconds", GetKeepAliveInterval().count() / 1000); exit: if (error != ErrorCode::kNone) { mState = State::kDisabled; } aHandler(existingCommissionerId.empty() ? nullptr : &existingCommissionerId, error); }; VerifyOrExit(mState == State::kDisabled, error = ERROR_INVALID_STATE("cannot petition when the commissioner is running")); SuccessOrExit(error = request.SetUriPath(uri::kPetitioning)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerId, mConfig.mId})); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mState = State::kPetitioning; mBrClient.SendRequest(request, onResponse); LOG_DEBUG(LOG_REGION_MESHCOP, "sent petition request"); exit: if (error != ErrorCode::kNone) { aHandler(nullptr, error); } } void CommissionerImpl::SendKeepAlive(Timer &, bool aKeepAlive) { Error error; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto state = (aKeepAlive ? tlv::kStateAccept : tlv::kStateReject); auto onResponse = [this](const coap::Response *aResponse, Error aError) { Error error = HandleStateResponse(aResponse, aError); if (error == ErrorCode::kNone) { mKeepAliveTimer.Start(GetKeepAliveInterval()); LOG_INFO(LOG_REGION_MESHCOP, "keep alive message accepted, keep-alive timer restarted"); } else { mState = State::kDisabled; Resign([](Error) {}); LOG_WARN(LOG_REGION_MESHCOP, "keep alive message rejected: {}", error.ToString()); } mCommissionerHandler.OnKeepAliveResponse(error); }; VerifyOrExit(IsActive(), error = ERROR_INVALID_STATE("cannot send keep-alive message the commissioner is not active")); SuccessOrExit(error = request.SetUriPath(uri::kKeepAlive)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kState, state})); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mKeepAliveTimer.Start(GetKeepAliveInterval()); mBrClient.SendRequest(request, onResponse); LOG_DEBUG(LOG_REGION_MESHCOP, "sent keep alive message: keepAlive={}", aKeepAlive); exit: if (error != ErrorCode::kNone) { LOG_WARN(LOG_REGION_MESHCOP, "sending keep alive message failed: {}", error.ToString()); Disconnect(); } } #if OT_COMM_CONFIG_CCM_ENABLE Error CommissionerImpl::SignRequest(coap::Request &aRequest, tlv::Scope aScope) { Error error; ByteArray signature; ASSERT(IsCcmMode()); SuccessOrExit(error = mTokenManager.SignMessage(signature, aRequest)); SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kCommissionerToken, mTokenManager.GetToken(), aScope})); SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kCommissionerSignature, signature, aScope})); exit: return error; } #endif // OT_COMM_CONFIG_CCM_ENABLE Error AppendTlv(coap::Message &aMessage, const tlv::Tlv &aTlv) { Error error; ByteArray buf; VerifyOrExit(aTlv.IsValid(), error = ERROR_INVALID_ARGS("the tlv(type={}) is in bad format", utils::to_underlying(aTlv.GetType()))); aTlv.Serialize(buf); aMessage.Append(buf); exit: return error; } Error GetTlvSet(tlv::TlvSet &aTlvSet, const coap::Message &aMessage, tlv::Scope aScope) { return tlv::GetTlvSet(aTlvSet, aMessage.GetPayload(), aScope); } tlv::TlvPtr GetTlv(tlv::Type aTlvType, const coap::Message &aMessage, tlv::Scope aScope) { return tlv::GetTlv(aTlvType, aMessage.GetPayload(), aScope); } Error CommissionerImpl::HandleStateResponse(const coap::Response *aResponse, Error aError) { Error error; tlv::TlvPtr stateTlv = nullptr; SuccessOrExit(error = aError); VerifyOrExit(aResponse->GetCode() != coap::Code::kUnauthorized, error = ERROR_SECURITY("response code is CoAP::UNAUTHORIZED")); VerifyOrExit(aResponse->GetCode() == coap::Code::kChanged, error = ERROR_BAD_FORMAT("expect response code as CoAP::CHANGED")); VerifyOrExit((stateTlv = GetTlv(tlv::Type::kState, *aResponse)) != nullptr, error = ERROR_BAD_FORMAT("no valid State TLV found in response")); VerifyOrExit(stateTlv->GetValueAsInt8() == tlv::kStateAccept, error = ERROR_REJECTED("the request was rejected by peer")); exit: return error; } static void inline EncodeTlvType(ByteArray &aBuf, tlv::Type aTlvType) { aBuf.emplace_back(utils::to_underlying(aTlvType)); } ByteArray CommissionerImpl::GetActiveOperationalDatasetTlvs(uint16_t aDatasetFlags) { ByteArray tlvTypes; if (aDatasetFlags & ActiveOperationalDataset::kActiveTimestampBit) { EncodeTlvType(tlvTypes, tlv::Type::kActiveTimestamp); } if (aDatasetFlags & ActiveOperationalDataset::kChannelBit) { EncodeTlvType(tlvTypes, tlv::Type::kChannel); } if (aDatasetFlags & ActiveOperationalDataset::kChannelMaskBit) { EncodeTlvType(tlvTypes, tlv::Type::kChannelMask); } if (aDatasetFlags & ActiveOperationalDataset::kExtendedPanIdBit) { EncodeTlvType(tlvTypes, tlv::Type::kExtendedPanId); } if (aDatasetFlags & ActiveOperationalDataset::kMeshLocalPrefixBit) { EncodeTlvType(tlvTypes, tlv::Type::kNetworkMeshLocalPrefix); } if (aDatasetFlags & ActiveOperationalDataset::kNetworkMasterKeyBit) { EncodeTlvType(tlvTypes, tlv::Type::kNetworkMasterKey); } if (aDatasetFlags & ActiveOperationalDataset::kNetworkNameBit) { EncodeTlvType(tlvTypes, tlv::Type::kNetworkName); } if (aDatasetFlags & ActiveOperationalDataset::kPanIdBit) { EncodeTlvType(tlvTypes, tlv::Type::kPanId); } if (aDatasetFlags & ActiveOperationalDataset::kPSKcBit) { EncodeTlvType(tlvTypes, tlv::Type::kPSKc); } if (aDatasetFlags & ActiveOperationalDataset::kSecurityPolicyBit) { EncodeTlvType(tlvTypes, tlv::Type::kSecurityPolicy); } return tlvTypes; } ByteArray CommissionerImpl::GetPendingOperationalDatasetTlvs(uint16_t aDatasetFlags) { auto tlvTypes = GetActiveOperationalDatasetTlvs(aDatasetFlags); if (aDatasetFlags & PendingOperationalDataset::kDelayTimerBit) { EncodeTlvType(tlvTypes, tlv::Type::kDelayTimer); } if (aDatasetFlags & PendingOperationalDataset::kPendingTimestampBit) { EncodeTlvType(tlvTypes, tlv::Type::kPendingTimestamp); } return tlvTypes; } Error CommissionerImpl::DecodeActiveOperationalDataset(ActiveOperationalDataset &aDataset, const ByteArray &aPayload) { Error error; tlv::TlvSet tlvSet; ActiveOperationalDataset dataset; // Clear all data fields dataset.mPresentFlags = 0; SuccessOrExit(error = tlv::GetTlvSet(tlvSet, aPayload)); if (auto activeTimeStamp = tlvSet[tlv::Type::kActiveTimestamp]) { uint64_t value; value = utils::Decode<uint64_t>(activeTimeStamp->GetValue()); dataset.mActiveTimestamp = Timestamp::Decode(value); dataset.mPresentFlags |= ActiveOperationalDataset::kActiveTimestampBit; } if (auto channel = tlvSet[tlv::Type::kChannel]) { const ByteArray &value = channel->GetValue(); dataset.mChannel.mPage = value[0]; dataset.mChannel.mNumber = utils::Decode<uint16_t>(value.data() + 1, value.size() - 1); dataset.mPresentFlags |= ActiveOperationalDataset::kChannelBit; } if (auto channelMask = tlvSet[tlv::Type::kChannelMask]) { SuccessOrExit(DecodeChannelMask(dataset.mChannelMask, channelMask->GetValue())); dataset.mPresentFlags |= ActiveOperationalDataset::kChannelMaskBit; } if (auto extendedPanId = tlvSet[tlv::Type::kExtendedPanId]) { dataset.mExtendedPanId = extendedPanId->GetValue(); dataset.mPresentFlags |= ActiveOperationalDataset::kExtendedPanIdBit; } if (auto meshLocalPrefix = tlvSet[tlv::Type::kNetworkMeshLocalPrefix]) { dataset.mMeshLocalPrefix = meshLocalPrefix->GetValue(); dataset.mPresentFlags |= ActiveOperationalDataset::kMeshLocalPrefixBit; } if (auto networkMasterKey = tlvSet[tlv::Type::kNetworkMasterKey]) { dataset.mNetworkMasterKey = networkMasterKey->GetValue(); dataset.mPresentFlags |= ActiveOperationalDataset::kNetworkMasterKeyBit; } if (auto networkName = tlvSet[tlv::Type::kNetworkName]) { dataset.mNetworkName = networkName->GetValueAsString(); dataset.mPresentFlags |= ActiveOperationalDataset::kNetworkNameBit; } if (auto panId = tlvSet[tlv::Type::kPanId]) { dataset.mPanId = utils::Decode<uint16_t>(panId->GetValue()); dataset.mPresentFlags |= ActiveOperationalDataset::kPanIdBit; } if (auto pskc = tlvSet[tlv::Type::kPSKc]) { dataset.mPSKc = pskc->GetValue(); dataset.mPresentFlags |= ActiveOperationalDataset::kPSKcBit; } if (auto securityPolicy = tlvSet[tlv::Type::kSecurityPolicy]) { auto &value = securityPolicy->GetValue(); dataset.mSecurityPolicy.mRotationTime = utils::Decode<uint16_t>(value); dataset.mSecurityPolicy.mFlags = {value.begin() + sizeof(uint16_t), value.end()}; dataset.mPresentFlags |= ActiveOperationalDataset::kSecurityPolicyBit; } aDataset = dataset; exit: return error; } Error CommissionerImpl::DecodePendingOperationalDataset(PendingOperationalDataset &aDataset, const coap::Response & aResponse) { Error error; tlv::TlvSet tlvSet; PendingOperationalDataset dataset; // Clear all data fields dataset.mPresentFlags = 0; SuccessOrExit(error = DecodeActiveOperationalDataset(dataset, aResponse.GetPayload())); SuccessOrExit(error = GetTlvSet(tlvSet, aResponse)); if (auto delayTimer = tlvSet[tlv::Type::kDelayTimer]) { dataset.mDelayTimer = utils::Decode<uint32_t>(delayTimer->GetValue()); dataset.mPresentFlags |= PendingOperationalDataset::kDelayTimerBit; } if (auto pendingTimestamp = tlvSet[tlv::Type::kPendingTimestamp]) { uint64_t value; value = utils::Decode<uint64_t>(pendingTimestamp->GetValue()); dataset.mPendingTimestamp = Timestamp::Decode(value); dataset.mPresentFlags |= PendingOperationalDataset::kPendingTimestampBit; } aDataset = dataset; exit: return error; } Error CommissionerImpl::DecodeChannelMask(ChannelMask &aChannelMask, const ByteArray &aBuf) { Error error; ChannelMask channelMask; size_t offset = 0; size_t length = aBuf.size(); while (offset < length) { ChannelMaskEntry entry; uint8_t entryLength; VerifyOrExit(offset + 2 <= length, error = ERROR_BAD_FORMAT("premature end of Channel Mask Entry")); entry.mPage = aBuf[offset++]; entryLength = aBuf[offset++]; VerifyOrExit(offset + entryLength <= length, error = ERROR_BAD_FORMAT("premature end of Channel Mask Entry")); entry.mMasks = {aBuf.begin() + offset, aBuf.begin() + offset + entryLength}; channelMask.emplace_back(entry); offset += entryLength; } ASSERT(offset == length); aChannelMask = channelMask; exit: return error; } Error CommissionerImpl::EncodeActiveOperationalDataset(coap::Request & aRequest, const ActiveOperationalDataset &aDataset) { Error error; if (aDataset.mPresentFlags & ActiveOperationalDataset::kActiveTimestampBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kActiveTimestamp, aDataset.mActiveTimestamp.Encode()})); } if (aDataset.mPresentFlags & ActiveOperationalDataset::kChannelBit) { ByteArray value; utils::Encode(value, aDataset.mChannel.mPage); utils::Encode(value, aDataset.mChannel.mNumber); SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kChannel, value})); } if (aDataset.mPresentFlags & ActiveOperationalDataset::kChannelMaskBit) { ByteArray value; SuccessOrExit(error = EncodeChannelMask(value, aDataset.mChannelMask)); SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kChannelMask, value})); } if (aDataset.mPresentFlags & ActiveOperationalDataset::kExtendedPanIdBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kExtendedPanId, aDataset.mExtendedPanId})); } if (aDataset.mPresentFlags & ActiveOperationalDataset::kMeshLocalPrefixBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kNetworkMeshLocalPrefix, aDataset.mMeshLocalPrefix})); } if (aDataset.mPresentFlags & ActiveOperationalDataset::kNetworkMasterKeyBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kNetworkMasterKey, aDataset.mNetworkMasterKey})); } if (aDataset.mPresentFlags & ActiveOperationalDataset::kNetworkNameBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kNetworkName, aDataset.mNetworkName})); } if (aDataset.mPresentFlags & ActiveOperationalDataset::kPanIdBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kPanId, aDataset.mPanId})); } if (aDataset.mPresentFlags & ActiveOperationalDataset::kPSKcBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kPSKc, aDataset.mPSKc})); } if (aDataset.mPresentFlags & ActiveOperationalDataset::kSecurityPolicyBit) { ByteArray value; utils::Encode(value, aDataset.mSecurityPolicy.mRotationTime); value.insert(value.end(), aDataset.mSecurityPolicy.mFlags.begin(), aDataset.mSecurityPolicy.mFlags.end()); SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kSecurityPolicy, value})); } exit: return error; } Error CommissionerImpl::EncodePendingOperationalDataset(coap::Request & aRequest, const PendingOperationalDataset &aDataset) { Error error; SuccessOrExit(error = EncodeActiveOperationalDataset(aRequest, aDataset)); if (aDataset.mPresentFlags & PendingOperationalDataset::kDelayTimerBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kDelayTimer, aDataset.mDelayTimer})); } if (aDataset.mPresentFlags & PendingOperationalDataset::kPendingTimestampBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kPendingTimestamp, aDataset.mPendingTimestamp.Encode()})); } exit: return error; } Error CommissionerImpl::EncodeChannelMask(ByteArray &aBuf, const ChannelMask &aChannelMask) { Error error; for (const auto &entry : aChannelMask) { VerifyOrExit(entry.mMasks.size() < tlv::kEscapeLength, error = ERROR_INVALID_ARGS("Channel Mask list is tool long (>={})", tlv::kEscapeLength)); utils::Encode(aBuf, entry.mPage); utils::Encode(aBuf, static_cast<uint8_t>(entry.mMasks.size())); aBuf.insert(aBuf.end(), entry.mMasks.begin(), entry.mMasks.end()); } exit: return error; } #if OT_COMM_CONFIG_CCM_ENABLE Error CommissionerImpl::DecodeBbrDataset(BbrDataset &aDataset, const coap::Response &aResponse) { Error error; tlv::TlvSet tlvSet; BbrDataset dataset; SuccessOrExit(error = GetTlvSet(tlvSet, aResponse)); if (auto triHostname = tlvSet[tlv::Type::kTriHostname]) { dataset.mTriHostname = triHostname->GetValueAsString(); dataset.mPresentFlags |= BbrDataset::kTriHostnameBit; } if (auto registrarHostname = tlvSet[tlv::Type::kRegistrarHostname]) { dataset.mRegistrarHostname = registrarHostname->GetValueAsString(); dataset.mPresentFlags |= BbrDataset::kRegistrarHostnameBit; } if (auto registrarIpv6Addr = tlvSet[tlv::Type::kRegistrarIpv6Address]) { Address addr; SuccessOrExit(error = addr.Set(registrarIpv6Addr->GetValue())); dataset.mRegistrarIpv6Addr = addr.ToString(); dataset.mPresentFlags |= BbrDataset::kRegistrarIpv6AddrBit; } aDataset = dataset; exit: return error; } Error CommissionerImpl::EncodeBbrDataset(coap::Request &aRequest, const BbrDataset &aDataset) { Error error; if (aDataset.mPresentFlags & BbrDataset::kTriHostnameBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kTriHostname, aDataset.mTriHostname})); } if (aDataset.mPresentFlags & BbrDataset::kRegistrarHostnameBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kRegistrarHostname, aDataset.mRegistrarHostname})); } if (aDataset.mPresentFlags & BbrDataset::kRegistrarIpv6AddrBit) { Address addr; SuccessOrExit(error = addr.Set(aDataset.mRegistrarIpv6Addr)); SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kRegistrarIpv6Address, addr.GetRaw()})); } exit: return error; } ByteArray CommissionerImpl::GetBbrDatasetTlvs(uint16_t aDatasetFlags) { ByteArray tlvTypes; if (aDatasetFlags & BbrDataset::kTriHostnameBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kTriHostname)); } if (aDatasetFlags & BbrDataset::kRegistrarHostnameBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kRegistrarHostname)); } if (aDatasetFlags & BbrDataset::kRegistrarIpv6AddrBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kRegistrarIpv6Address)); } return tlvTypes; } #endif // OT_COMM_CONFIG_CCM_ENABLE Error CommissionerImpl::DecodeCommissionerDataset(CommissionerDataset &aDataset, const coap::Response &aResponse) { Error error; tlv::TlvSet tlvSet; CommissionerDataset dataset; SuccessOrExit(error = GetTlvSet(tlvSet, aResponse)); if (auto sessionId = tlvSet[tlv::Type::kCommissionerSessionId]) { dataset.mSessionId = sessionId->GetValueAsUint16(); dataset.mPresentFlags |= CommissionerDataset::kSessionIdBit; } if (auto borderAgentLocator = tlvSet[tlv::Type::kBorderAgentLocator]) { dataset.mBorderAgentLocator = borderAgentLocator->GetValueAsUint16(); dataset.mPresentFlags |= CommissionerDataset::kBorderAgentLocatorBit; } if (auto steeringData = tlvSet[tlv::Type::kSteeringData]) { dataset.mSteeringData = steeringData->GetValue(); dataset.mPresentFlags |= CommissionerDataset::kSteeringDataBit; } if (auto aeSteeringData = tlvSet[tlv::Type::kAeSteeringData]) { dataset.mAeSteeringData = aeSteeringData->GetValue(); dataset.mPresentFlags |= CommissionerDataset::kAeSteeringDataBit; } if (auto nmkpSteeringData = tlvSet[tlv::Type::kNmkpSteeringData]) { dataset.mNmkpSteeringData = nmkpSteeringData->GetValue(); dataset.mPresentFlags |= CommissionerDataset::kNmkpSteeringDataBit; } if (auto joinerUdpPort = tlvSet[tlv::Type::kJoinerUdpPort]) { dataset.mJoinerUdpPort = joinerUdpPort->GetValueAsUint16(); dataset.mPresentFlags |= CommissionerDataset::kJoinerUdpPortBit; } if (auto aeUdpPort = tlvSet[tlv::Type::kAeUdpPort]) { dataset.mAeUdpPort = aeUdpPort->GetValueAsUint16(); dataset.mPresentFlags |= CommissionerDataset::kAeUdpPortBit; } if (auto nmkpUdpPort = tlvSet[tlv::Type::kNmkpUdpPort]) { dataset.mNmkpUdpPort = nmkpUdpPort->GetValueAsUint16(); dataset.mPresentFlags |= CommissionerDataset::kNmkpUdpPortBit; } aDataset = dataset; exit: return error; } Error CommissionerImpl::EncodeCommissionerDataset(coap::Request &aRequest, const CommissionerDataset &aDataset) { Error error; if (aDataset.mPresentFlags & CommissionerDataset::kSessionIdBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kCommissionerSessionId, aDataset.mSessionId})); } if (aDataset.mPresentFlags & CommissionerDataset::kBorderAgentLocatorBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kBorderAgentLocator, aDataset.mBorderAgentLocator})); } if (aDataset.mPresentFlags & CommissionerDataset::kSteeringDataBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kSteeringData, aDataset.mSteeringData})); } if (aDataset.mPresentFlags & CommissionerDataset::kAeSteeringDataBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kAeSteeringData, aDataset.mAeSteeringData})); } if (aDataset.mPresentFlags & CommissionerDataset::kNmkpSteeringDataBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kNmkpSteeringData, aDataset.mNmkpSteeringData})); } if (aDataset.mPresentFlags & CommissionerDataset::kJoinerUdpPortBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kJoinerUdpPort, aDataset.mJoinerUdpPort})); } if (aDataset.mPresentFlags & CommissionerDataset::kAeUdpPortBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kAeUdpPort, aDataset.mAeUdpPort})); } if (aDataset.mPresentFlags & CommissionerDataset::kNmkpUdpPortBit) { SuccessOrExit(error = AppendTlv(aRequest, {tlv::Type::kNmkpUdpPort, aDataset.mNmkpUdpPort})); } exit: return error; } ByteArray CommissionerImpl::GetCommissionerDatasetTlvs(uint16_t aDatasetFlags) { ByteArray tlvTypes; if (aDatasetFlags & CommissionerDataset::kSessionIdBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kCommissionerSessionId)); } if (aDatasetFlags & CommissionerDataset::kBorderAgentLocatorBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kBorderAgentLocator)); } if (aDatasetFlags & CommissionerDataset::kSteeringDataBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kSteeringData)); } if (aDatasetFlags & CommissionerDataset::kAeSteeringDataBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kAeSteeringData)); } if (aDatasetFlags & CommissionerDataset::kNmkpSteeringDataBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kNmkpSteeringData)); } if (aDatasetFlags & CommissionerDataset::kJoinerUdpPortBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kJoinerUdpPort)); } if (aDatasetFlags & CommissionerDataset::kAeUdpPortBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kAeUdpPort)); } if (aDatasetFlags & CommissionerDataset::kNmkpUdpPortBit) { tlvTypes.emplace_back(utils::to_underlying(tlv::Type::kNmkpUdpPort)); } return tlvTypes; } void CommissionerImpl::SendProxyMessage(ErrorHandler aHandler, const std::string &aDstAddr, const std::string &aUriPath) { Error error; Address dstAddr; coap::Request request{coap::Type::kConfirmable, coap::Code::kPost}; auto onResponse = [aHandler](const coap::Response *aResponse, Error aError) { aHandler(HandleStateResponse(aResponse, aError)); }; SuccessOrExit(error = dstAddr.Set(aDstAddr)); SuccessOrExit(error = request.SetUriPath(aUriPath)); SuccessOrExit(error = AppendTlv(request, {tlv::Type::kCommissionerSessionId, GetSessionId()})); #if OT_COMM_CONFIG_CCM_ENABLE if (IsCcmMode()) { SuccessOrExit(error = SignRequest(request)); } #endif mProxyClient.SendRequest(request, onResponse, dstAddr, kDefaultMmPort); exit: if (error != ErrorCode::kNone) { aHandler(error); } } void CommissionerImpl::HandleDatasetChanged(const coap::Request &aRequest) { LOG_INFO(LOG_REGION_MGMT, "received MGMT_DATASET_CHANGED.ntf from {}", aRequest.GetEndpoint()->GetPeerAddr().ToString()); mProxyClient.SendEmptyChanged(aRequest); mCommissionerHandler.OnDatasetChanged(); } void CommissionerImpl::HandlePanIdConflict(const coap::Request &aRequest) { Error error; tlv::TlvSet tlvSet; tlv::TlvPtr channelMaskTlv; tlv::TlvPtr panIdTlv; ChannelMask channelMask; uint16_t panId; std::string peerAddr = aRequest.GetEndpoint()->GetPeerAddr().ToString(); LOG_INFO(LOG_REGION_MGMT, "received MGMT_PANID_CONFLICT.ans from {}", peerAddr); mProxyClient.SendEmptyChanged(aRequest); SuccessOrExit(error = GetTlvSet(tlvSet, aRequest)); VerifyOrExit((channelMaskTlv = tlvSet[tlv::Type::kChannelMask]) != nullptr, error = ERROR_BAD_FORMAT("no valid Channel Mask TLV in MGMT_PANID_CONFLICT.ans")); VerifyOrExit((panIdTlv = tlvSet[tlv::Type::kPanId]) != nullptr, error = ERROR_BAD_FORMAT("no valid PAN ID TLV in MGMT_PANID_CONFLICT.ans")); SuccessOrExit(error = DecodeChannelMask(channelMask, channelMaskTlv->GetValue())); panId = panIdTlv->GetValueAsUint16(); mCommissionerHandler.OnPanIdConflict(peerAddr, channelMask, panId); exit: if (error != ErrorCode::kNone) { LOG_WARN(LOG_REGION_MGMT, "handle MGMT_PANID_CONFLICT.ans from {} failed: {}", peerAddr, error.ToString()); } } void CommissionerImpl::HandleEnergyReport(const coap::Request &aRequest) { Error error; tlv::TlvSet tlvSet; ChannelMask channelMask; ByteArray energyList; std::string peerAddr = aRequest.GetEndpoint()->GetPeerAddr().ToString(); LOG_INFO(LOG_REGION_MGMT, "received MGMT_ED_REPORT.ans from {}", peerAddr); mProxyClient.SendEmptyChanged(aRequest); SuccessOrExit(error = GetTlvSet(tlvSet, aRequest)); if (auto channelMaskTlv = tlvSet[tlv::Type::kChannelMask]) { SuccessOrExit(error = DecodeChannelMask(channelMask, channelMaskTlv->GetValue())); } if (auto eneryListTlv = tlvSet[tlv::Type::kEnergyList]) { energyList = eneryListTlv->GetValue(); } mCommissionerHandler.OnEnergyReport(peerAddr, channelMask, energyList); exit: if (error != ErrorCode::kNone) { LOG_WARN(LOG_REGION_MGMT, "handle MGMT_ED_REPORT.ans from {} failed: {}", peerAddr, error.ToString()); } } Error CommissionerImpl::MakeChannelMask(ByteArray &aBuf, uint32_t aChannelMask) { Error error; ChannelMaskEntry entry; if (kRadio915Mhz) { if (aChannelMask & kRadio915MhzOqpskChannelMask) { entry.mPage = kRadioChannelPage2; utils::Encode(entry.mMasks, kRadio915MhzOqpskChannelMask); } } else { if (aChannelMask & kRadio2P4GhzOqpskChannelMask) { entry.mPage = kRadioChannelPage0; utils::Encode(entry.mMasks, kRadio2P4GhzOqpskChannelMask); } } VerifyOrExit(!entry.mMasks.empty(), error = ERROR_INVALID_ARGS("no valid Channel Masks provided")); SuccessOrDie(EncodeChannelMask(aBuf, {entry})); exit: return error; } void CommissionerImpl::HandleRlyRx(const coap::Request &aRlyRx) { Error error; tlv::TlvSet tlvSet; tlv::TlvPtr tlv; std::string joinerPSKd; uint16_t joinerUdpPort; uint16_t joinerRouterLocator; ByteArray joinerIid; ByteArray joinerId; ByteArray dtlsRecords; SuccessOrExit(error = GetTlvSet(tlvSet, aRlyRx)); VerifyOrExit((tlv = tlvSet[tlv::Type::kJoinerUdpPort]) != nullptr, error = ERROR_BAD_FORMAT("no valid Joiner UDP Port TLV found")); joinerUdpPort = tlv->GetValueAsUint16(); VerifyOrExit((tlv = tlvSet[tlv::Type::kJoinerRouterLocator]) != nullptr, error = ERROR_BAD_FORMAT("no valid Joiner Router Locator TLV found")); joinerRouterLocator = tlv->GetValueAsUint16(); VerifyOrExit((tlv = tlvSet[tlv::Type::kJoinerIID]) != nullptr, error = ERROR_BAD_FORMAT("no valid Joiner IID TLV found")); joinerIid = tlv->GetValue(); VerifyOrExit((tlv = tlvSet[tlv::Type::kJoinerDtlsEncapsulation]) != nullptr, error = ERROR_BAD_FORMAT("no valid Joiner DTLS Encapsulation TLV found")); dtlsRecords = tlv->GetValue(); joinerId = joinerIid; joinerId[0] ^= kLocalExternalAddrMask; LOG_DEBUG(LOG_REGION_JOINER_SESSION, "received RLY_RX.ntf: joinerID={}, joinerRouterLocator={}, length={}", utils::Hex(joinerId), joinerRouterLocator, dtlsRecords.size()); joinerPSKd = mCommissionerHandler.OnJoinerRequest(joinerId); if (joinerPSKd.empty()) { LOG_INFO(LOG_REGION_JOINER_SESSION, "joiner(ID={}) is disabled", utils::Hex(joinerId)); ExitNow(error = ERROR_REJECTED("joiner(ID={}) is disabled", utils::Hex(joinerId))); } { auto it = mJoinerSessions.find(joinerId); if (it != mJoinerSessions.end() && it->second.Disabled()) { mJoinerSessions.erase(it); it = mJoinerSessions.end(); } if (it == mJoinerSessions.end()) { Address localAddr; SuccessOrExit(error = mBrClient.GetLocalAddr(localAddr)); it = mJoinerSessions .emplace(std::piecewise_construct, std::forward_as_tuple(joinerId), std::forward_as_tuple(*this, joinerId, joinerPSKd, joinerUdpPort, joinerRouterLocator, aRlyRx.GetEndpoint()->GetPeerAddr(), aRlyRx.GetEndpoint()->GetPeerPort(), localAddr, kListeningJoinerPort)) .first; auto &session = it->second; std::string peerAddr = session.GetPeerAddr().ToString(); LOG_DEBUG(LOG_REGION_JOINER_SESSION, "received a new joiner(ID={}) DTLS connection from [{}]:{}", utils::Hex(joinerId), peerAddr, session.GetPeerPort()); session.Connect(); LOG_INFO(LOG_REGION_JOINER_SESSION, "joiner session timer started, expiration-time={}", TimePointToString(session.GetExpirationTime())); mJoinerSessionTimer.Start(session.GetExpirationTime()); } ASSERT(it != mJoinerSessions.end()); auto &session = it->second; session.RecvJoinerDtlsRecords(dtlsRecords); } exit: if (error != ErrorCode::kNone) { LOG_ERROR(LOG_REGION_JOINER_SESSION, "failed to handle RLY_RX.ntf message: {}", error.ToString()); } } void CommissionerImpl::HandleJoinerSessionTimer(Timer &aTimer) { TimePoint nextShot; bool hasNextShot = false; auto now = Clock::now(); LOG_DEBUG(LOG_REGION_JOINER_SESSION, "joiner session timer triggered"); auto it = mJoinerSessions.begin(); while (it != mJoinerSessions.end()) { auto &session = it->second; if (now >= session.GetExpirationTime()) { it = mJoinerSessions.erase(it); LOG_INFO(LOG_REGION_JOINER_SESSION, "joiner session (joiner ID={}) removed", utils::Hex(session.GetJoinerId())); } else { if (!hasNextShot || session.GetExpirationTime() < nextShot) { nextShot = session.GetExpirationTime(); } hasNextShot = true; it++; } } if (hasNextShot) { aTimer.Start(nextShot); } } } // namespace commissioner } // namespace ot
34.654455
120
0.66564
[ "mesh", "vector" ]
811dd6d1e3a0e3a878c2d26c4399230551f6e189
11,389
hpp
C++
kernel/src/simulationTools/OSNSMultipleImpact.hpp
bremond/siconos
8deea56ff6779379f4f69e0376d24a81562a42d4
[ "Apache-2.0" ]
null
null
null
kernel/src/simulationTools/OSNSMultipleImpact.hpp
bremond/siconos
8deea56ff6779379f4f69e0376d24a81562a42d4
[ "Apache-2.0" ]
null
null
null
kernel/src/simulationTools/OSNSMultipleImpact.hpp
bremond/siconos
8deea56ff6779379f4f69e0376d24a81562a42d4
[ "Apache-2.0" ]
null
null
null
/* Siconos is a program dedicated to modeling, simulation and control * of non smooth dynamical systems. * * Copyright 2016 INRIA. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /*! \file OSNSMultipleImpact.hpp * \brief Linear Complementarity Problem formulation and solving */ #ifndef _OSNSMULTIPLEIMPACT_ #define _OSNSMULTIPLEIMPACT_ #include "LinearOSNS.hpp" #include <string> #define DEFAULT__tolImpact MACHINE_PREC #define DEFAULT_TOL_VEL MACHINE_PREC #define DEFAULT_TOL_ENER MACHINE_PREC /** Formalization and Resolution of a Multiple Impact Non-Smooth problem. \todo write a short introduction about OSNSMultipleImpact ... */ class OSNSMultipleImpact : public LinearOSNS { private: /** serialization hooks */ ACCEPT_SERIALIZATION(OSNSMultipleImpact); //! Time-like variable (Impulse) double _impulseVariable; //! Time variable double _timeVariable; //! Number of contacts (only the active contacts) unsigned int _nContact; //! Maximal number of steps for each computation unsigned int _nStepMax; //! Tolerance to define zero double _tolImpact; //! Type of the compliance model std::string _typeCompLaw; //Velocity of bodies during impact //SP::SiconosVector VelAllBody; // Relative velocity at all Interactions (with or without contact) //SP::SiconosVector VelAllIteractions; //! Relative velocity during impact (at the end of each calculation step) SP::SiconosVector _velocityContact; //! Relative velocity during impact (at the beginning of each calculation step) SP::SiconosVector _oldVelocityContact; //! Potential energy during impact (at the end of each calculation step) SP::SiconosVector _energyContact; //! Work done during the last compression phase at contact SP::SiconosVector _WorkcContact; //! Distribution vector to distribute the incremental impulse at contact SP::SiconosVector _distributionVector; /** State of contacts at the beginning of impact if *_stateContact[i] = 0 => no impact at this contact (at contact with positive relative velocity and no potential energy, may be the impact has been terminated at this contact) if *_stateContact[i] = 1 => impact takes place at this contact without potential energy (beginning of impact or repeating impact) if *_stateContact[i] = 2 => impact takes place with not-zero potential energy */ SP::IndexInt _stateContact; //!Stiffness at contacts SP::SiconosVector _Kcontact; //! Restitution coefficient of contacts SP::SiconosVector _restitutionContact; //! Elasticity coefficient of contacts SP::SiconosVector _elasticyCoefficientcontact; //! Incremental impulse at contacts SP::SiconosVector _deltaImpulseContact; //! Total impulse at contacts SP::SiconosVector _tolImpulseContact; //! Impulse at contacts for each update time SP::SiconosVector _impulseContactUpdate; //! Force at contacts SP::SiconosVector _forceContact; /** Flag to select the primary contact based on the relative velocity or on the potential energy at contacts if _selectPrimaConInVel = true => select the primary contact according to the relative velocity at contact if _selectPrimaConInVel = false => select the primary contact according to the potential energy at contact */ bool _selectPrimaConInVel; //! ID of the primary contact unsigned int _primaryContactId; /** Indicator about the selection of the primary contact true if primary contact is selected according to the potential energy false if primary contact is selected according to the relative velocity */ bool _isPrimaryContactEnergy; //! Relative velocity at primary contact double _relativeVelocityPrimaryContact; //! Potential energy at primary contact double _energyPrimaryContact; //! Step size for the iterative calculation double _deltaP; //! Name of file into which the datat is writen std::string _namefile; /** YesWriteData = true ==>save the data during impact YesWriteData = false ==> not save the data during impact */ bool _saveData; /** bool variable to set the step size for multiple impact computation If IsNumberOfStepsEst = true ==> estimate the step size from the state of the dynamic system before impact and the number of step needed Number of steps after which the data is saved */ unsigned int _nStepSave; //! If IsNumberOfStepsEst = false ==> user choose the step size //! Matrix on which the data during impact is saved SP::SiconosMatrix _DataMatrix; //! Number of points to be save during impacts unsigned int _sizeDataSave; /** indicator on the termination of the multiple impact process _IsImpactEnd = true: impact is terminated _IsImpactEnd = false: otherwise */ bool _IsImpactEnd; //! Tolerance to define a negligeble value for a velocity grandeur double _Tol_Vel; //! Tolerance to define a negligeable value for a potential energy grandeur double _Tol_Ener; //! Epsilon to define a zero value for relative velocity in termination condition double _ZeroVel_EndIm; //! Epsilon to define a zero value for potential energy in termination condition double _ZeroEner_EndIm; //! we start to save data from _stepMinSave to _stepMaxSave unsigned int _stepMinSave, _stepMaxSave; public: //!Default constructor OSNSMultipleImpact(); /** Constructor from data (step size is required here) * \param type the type of the compliance law * \param step step size estimated */ OSNSMultipleImpact(std::string type, double step); //!Destructor ~OSNSMultipleImpact(); /* To get the type of the compliance law at contact * \return std::string */ inline std::string get_typeCompLaw() const { return _typeCompLaw; }; /** To set the type of the compliance law * \param newTypeLaw */ void set_typeCompLaw(std::string newTypeLaw); /** To set the tolerance to define zero * \param newTolZero */ void setTolImpact(double newTolZero); /** To get the tolerance to define zero * \return double */ inline double getTolImpact() { return _tolImpact; }; /** To set the flag to save the data during impact or not * \param var */ void SetSaveData(bool var); /** To set the name for the output file * \param file_name */ void SetNameOutput(std::string file_name); /** To get step size * \return double */ inline double GetStepSize() { return _deltaP; }; /* To get the duration of multiple impacts process * \return double */ inline double DurationImpact() { return _timeVariable; }; /** To set the variable _nStepSave * \param var */ void SetNstepSave(unsigned int var); /** To set the maximal number of steps allowed for each computation * \param var */ void SetNstepMax(unsigned int var); /** Set number of points to be saved during impact * \param var */ void SetSizeDataSave(unsigned int var); /** Set tolerence to define whether or not a velocity is zero * \param var */ void SetTolVel(double var); /** Set tolerence to define whether or not a potential energy is zero * \param var */ void SetTolEner(double var); /** Set epsilon _ZeroVel_EndIm * \param var */ void SetZeroVelEndImp(double var); /** Set epsilon _ZeroEner_EndIm * \param var */ void SetZeroEnerEndImp(double var); /** Set the step number to start the data save and step number to stop save * \param min * \param max */ void SetStepMinMaxSave(unsigned int min, unsigned int max); /** To compare a double number with zero * \param var * \return bool */ bool isZero(double var); /** To compare a velocity value with zero * \param var * \return bool */ bool isVelNegative(double var); /** To compare an energy value with zero * \param var * \return bool */ bool isEnerZero(double var); /** To select the pramary contact */ void SelectPrimaContact(); /** Calculate the vector of distributing rule */ void Compute_distributionVector(); /** Compute the normal imulse at contacts */ void ComputeImpulseContact(); /** Compute the relative velocity at contacts */ void Compute_velocityContact(); /** Compute the potential energy at contacts during each computation step */ void Compute_energyContact(); /** Compute the velocity of the bodies during impact */ void UpdateDuringImpact(); /** Run the iterative procedure to solve the multiple impact problem */ void ComputeImpact(); /** Post-compute for multiple impacts */ void PostComputeImpact(); /** Check if the multiple impacts process is terminated or not * \return bool */ bool IsMulImpactTerminate(); /** To allocate the memory */ void AllocateMemory(); /** To build the vector of stiffnesses and restitution coefficient at contacts */ void BuildParaContact(); /** To get the velocity of bodies, relative velocity and potential energy at the beginning of impact */ void InitializeInput(); /** To check the state of contacts during impact */ void Check_stateContact(); /** Pre-compute for multiple impacs */ void PreComputeImpact(); /** To get the primary contact according to the relative velocity In this case, the primary contact correspond to the contact at which the relative velocity is minimum (the relative velocity for two approching bodies is negative so the magnitude of the relative velocity at the primary contact is maximum) */ void PrimConVelocity(); /** To get the primary contact according to the potential energy. In this case, the primary contact corresponds to the one at which the potential energy is maximum */ void PrimConEnergy(); /** To decide if the primary contact is selected according to the relative velocity or to the * potential energy. The first case happens when there is no potential energy at any contact * \return bool */ bool IsEnermaxZero(); /** Verify if the minimum relative velocity at contacts is negative or not * \return bool */ bool IsVcminNegative(); /** compute the unknown post-impact relative velocity and post-impact impulse * \param time * \return int */ int compute(double time); /**initialize * \param sim */ void initialize(SP::Simulation sim); /** print the data to the screen */ void display() const; /** To write a SiconosVector into a matrix * \param v * \param row position starting to write * \param col position starting to write */ void WriteVectorIntoMatrix(const SiconosVector& v, const unsigned int row, const unsigned int col); /** Save data for each step * \param i pointer to be save */ void SaveDataOneStep(unsigned int i); /** Estimate size of data matrix * \return unsigned int */ unsigned int EstimateNdataCols(); ACCEPT_STD_VISITORS(); }; #endif
31.724234
180
0.722539
[ "vector", "model" ]
812333564e6642d3ded769683e63a91e371d211b
761
hpp
C++
maxon_epos_driver/include/maxon_epos_driver/EposManager.hpp
swankun/maxon_epos_ros
30b0df5af7e28b63b74008c9ad2d27e912cb6045
[ "MIT" ]
12
2019-06-10T20:21:53.000Z
2022-03-12T14:44:42.000Z
maxon_epos_driver/include/maxon_epos_driver/EposManager.hpp
swankun/maxon_epos_ros
30b0df5af7e28b63b74008c9ad2d27e912cb6045
[ "MIT" ]
5
2019-08-09T11:29:46.000Z
2022-01-31T07:28:57.000Z
maxon_epos_driver/include/maxon_epos_driver/EposManager.hpp
swankun/maxon_epos_ros
30b0df5af7e28b63b74008c9ad2d27e912cb6045
[ "MIT" ]
6
2020-05-28T06:48:11.000Z
2022-02-25T11:12:03.000Z
/** * @file EposManager * @brief * @author arwtyxouymz * @date 2019-06-03 16:08:23 */ #ifndef _EposManager_HPP #define _EposManager_HPP #include <string> #include <vector> #include <ros/ros.h> #include "maxon_epos_driver/EposMotor.hpp" #include "maxon_epos_msgs/MotorStates.h" class EposManager { public: EposManager(); virtual ~EposManager(); bool init(ros::NodeHandle &root_nh, ros::NodeHandle &motors_nh, const std::vector<std::string> &motor_names); void read(); void write(const maxon_epos_msgs::MotorStates::ConstPtr& msg); private: std::vector<std::shared_ptr<EposMotor>> m_motors; ros::Publisher m_all_motor_publisher; ros::Subscriber m_all_motor_subscriber; }; #endif // _EposManager_HPP
20.567568
67
0.706965
[ "vector" ]
8125349eedf6092a333395a038eba74b7929692e
3,195
cpp
C++
pid/thermalcontroller.cpp
openbmc/phosphor-pid-control
457993f836338aa0c13a32af803fcbc5227c81f3
[ "Apache-2.0" ]
9
2018-09-19T10:26:53.000Z
2020-11-09T23:02:16.000Z
pid/thermalcontroller.cpp
openbmc/phosphor-pid-control
457993f836338aa0c13a32af803fcbc5227c81f3
[ "Apache-2.0" ]
17
2018-08-13T10:34:26.000Z
2022-02-08T02:24:12.000Z
pid/thermalcontroller.cpp
openbmc/phosphor-pid-control
457993f836338aa0c13a32af803fcbc5227c81f3
[ "Apache-2.0" ]
9
2019-03-23T03:08:32.000Z
2021-04-25T03:39:34.000Z
/** * Copyright 2017 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "thermalcontroller.hpp" #include "errors/exception.hpp" #include "util.hpp" #include "zone.hpp" #include <algorithm> #include <cmath> #include <iostream> namespace pid_control { ThermalType getThermalType(const std::string& typeString) { /* Currently it only supports the two types. */ return (typeString == "temp") ? ThermalType::absolute : ThermalType::margin; } bool isThermalType(const std::string& typeString) { static const std::vector<std::string> thermalTypes = {"temp", "margin"}; return std::count(thermalTypes.begin(), thermalTypes.end(), typeString); } std::unique_ptr<PIDController> ThermalController::createThermalPid( ZoneInterface* owner, const std::string& id, const std::vector<std::string>& inputs, double setpoint, const ec::pidinfo& initial, const ThermalType& type) { // ThermalController requires at least 1 input if (inputs.empty()) { throw ControllerBuildException("Thermal controller missing inputs"); return nullptr; } auto thermal = std::make_unique<ThermalController>(id, inputs, type, owner); ec::pid_info_t* info = thermal->getPIDInfo(); thermal->setSetpoint(setpoint); initializePIDStruct(info, initial); return thermal; } // bmc_host_sensor_value_double double ThermalController::inputProc(void) { double value; const double& (*compare)(const double&, const double&); if (type == ThermalType::margin) { value = std::numeric_limits<double>::max(); compare = std::min<double>; } else { value = std::numeric_limits<double>::lowest(); compare = std::max<double>; } bool acceptable = false; for (const auto& in : _inputs) { double cachedValue = _owner->getCachedValue(in); // Less than 0 is perfectly OK for temperature, but must not be NAN if (!(std::isfinite(cachedValue))) { continue; } value = compare(value, cachedValue); acceptable = true; } if (!acceptable) { // While not optimal, zero is better than garbage value = 0; } return value; } // bmc_get_setpt double ThermalController::setptProc(void) { double setpoint = getSetpoint(); /* TODO(venture): Thermal setpoint invalid? */ #if 0 if (-1 == setpoint) { return 0.0f; } else { return setpoint; } #endif return setpoint; } // bmc_set_pid_output void ThermalController::outputProc(double value) { _owner->addSetPoint(value); return; } } // namespace pid_control
24.204545
80
0.663537
[ "vector" ]
81258bf593034c12f5aa33ccda9f7aca874b07b4
3,266
cpp
C++
tags/20100603_v0.2.1/src/singlePlayer/db/sqlite/dao/CPfConfederationsDAOSQLite.cpp
dividio/projectfootball
3c0b94937de2e3cd6e7daf9d3b4942fda974f20c
[ "Zlib" ]
null
null
null
tags/20100603_v0.2.1/src/singlePlayer/db/sqlite/dao/CPfConfederationsDAOSQLite.cpp
dividio/projectfootball
3c0b94937de2e3cd6e7daf9d3b4942fda974f20c
[ "Zlib" ]
null
null
null
tags/20100603_v0.2.1/src/singlePlayer/db/sqlite/dao/CPfConfederationsDAOSQLite.cpp
dividio/projectfootball
3c0b94937de2e3cd6e7daf9d3b4942fda974f20c
[ "Zlib" ]
null
null
null
/****************************************************************************** * Copyright (C) 2008 - Ikaro Games www.ikarogames.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of the GNU General Public License * * as published by the Free Software Foundation; either version 2 * * of the License, or (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * * * * generated by dia/DAOcodegen.py * * Version: 1.20 * ******************************************************************************/ #include <iostream> #include <sstream> #include "CPfConfederationsDAOSQLite.h" CPfConfederationsDAOSQLite::CPfConfederationsDAOSQLite(sqlite3 *database) : CPfConfederationsDAOSQLiteEntity(database) { } CPfConfederationsDAOSQLite::~CPfConfederationsDAOSQLite() { } CPfConfederations* CPfConfederationsDAOSQLite::findByXConfederation(int XConfederation) { std::ostringstream stream; stream << XConfederation; return findByXConfederation(stream.str()); } CPfConfederations* CPfConfederationsDAOSQLite::findByXConfederation(const std::string &XConfederation) { std::string sql( "SELECT * " "FROM PF_CONFEDERATIONS "); sql += "WHERE X_CONFEDERATION='"+XConfederation+"'"; return loadRegister(sql); } std::vector<CPfConfederations*>* CPfConfederationsDAOSQLite::findAll() { return loadVector("SELECT * FROM PF_CONFEDERATIONS"); } std::vector<CPfConfederations*>* CPfConfederationsDAOSQLite::findByXFKSeasonWithLeague(int XFKSeason) { std::ostringstream stream; stream << XFKSeason; return findByXFKSeasonWithLeague(stream.str()); } std::vector<CPfConfederations*>* CPfConfederationsDAOSQLite::findByXFKSeasonWithLeague(const std::string &XFKSeason) { std::string sql( "SELECT DISTINCT CF.* " "FROM PF_CONFEDERATIONS CF " " JOIN PF_COUNTRIES CU ON CU.X_FK_CONFEDERATION=CF.X_CONFEDERATION " " JOIN PF_COMPETITIONS CO ON CO.X_FK_COUNTRY=CU.X_COUNTRY " " JOIN PF_COMPETITIONS_BY_SEASON CBS ON CBS.X_FK_COMPETITION=CO.X_COMPETITION "); sql += "WHERE CBS.X_FK_SEASON='"+XFKSeason+"'"; return loadVector(sql); }
42.973684
116
0.558175
[ "vector" ]
81259aed5655250b03e71f3806e26e1f1e2494a5
2,996
cc
C++
src/Vehicle/Actuators/ActuatorActions.cc
farhangnaderi/qgroundcontrol
fcf6f4ec1043a632072e979e3708857c706ac171
[ "Apache-2.0" ]
2,133
2015-01-04T03:10:22.000Z
2022-03-31T01:51:07.000Z
src/Vehicle/Actuators/ActuatorActions.cc
farhangnaderi/qgroundcontrol
fcf6f4ec1043a632072e979e3708857c706ac171
[ "Apache-2.0" ]
6,166
2015-01-02T18:47:42.000Z
2022-03-31T03:44:10.000Z
src/Vehicle/Actuators/ActuatorActions.cc
farhangnaderi/qgroundcontrol
fcf6f4ec1043a632072e979e3708857c706ac171
[ "Apache-2.0" ]
2,980
2015-01-01T03:09:18.000Z
2022-03-31T04:13:55.000Z
/**************************************************************************** * * (c) 2021 QGROUNDCONTROL PROJECT <http://www.qgroundcontrol.org> * * QGroundControl is licensed according to the terms in the file * COPYING.md in the root of the source code directory. * ****************************************************************************/ #include "ActuatorActions.h" #include "QGCApplication.h" using namespace ActuatorActions; QString Config::typeToLabel() const { switch (type) { case Type::beep: return QCoreApplication::translate("ActuatorAction", "Beep"); case Type::set3DModeOn: return QCoreApplication::translate("ActuatorAction", "3D mode: On"); case Type::set3DModeOff: return QCoreApplication::translate("ActuatorAction", "3D mode: Off"); case Type::setSpinDirection1: return QCoreApplication::translate("ActuatorAction", "Set Spin Direction 1"); case Type::setSpinDirection2: return QCoreApplication::translate("ActuatorAction", "Set Spin Direction 2"); } return ""; } Action::Action(QObject *parent, const Config &action, const QString &label, int outputFunction, Vehicle *vehicle) : _label(label), _outputFunction(outputFunction), _type(action.type), _vehicle(vehicle) { } void Action::trigger() { if (_commandInProgress) { return; } sendMavlinkRequest(); } void Action::ackHandlerEntry(void* resultHandlerData, int compId, MAV_RESULT commandResult, uint8_t progress, Vehicle::MavCmdResultFailureCode_t failureCode) { Action* action = (Action*)resultHandlerData; action->ackHandler(commandResult, failureCode); } void Action::ackHandler(MAV_RESULT commandResult, Vehicle::MavCmdResultFailureCode_t failureCode) { _commandInProgress = false; if (failureCode != Vehicle::MavCmdResultFailureNoResponseToCommand && commandResult != MAV_RESULT_ACCEPTED) { qgcApp()->showAppMessage(tr("Actuator action command failed")); } } void Action::sendMavlinkRequest() { qCDebug(ActuatorsConfigLog) << "Sending actuator action, function:" << _outputFunction << "type:" << (int)_type; _vehicle->sendMavCommandWithHandler( ackHandlerEntry, // Ack callback this, // Ack callback data MAV_COMP_ID_AUTOPILOT1, // the ID of the autopilot MAV_CMD_CONFIGURE_ACTUATOR, // the mavlink command (int)_type, // action type 0, // unused parameter 0, // unused parameter 0, // unused parameter 1000+_outputFunction, // function 0, // unused parameter 0); _commandInProgress = true; } ActionGroup::ActionGroup(QObject *parent, const QString &label, Config::Type type) : _label(label), _type(type) { }
37.45
116
0.60514
[ "3d" ]
812c91c67b2549fb77d25472c59140b1ace7da16
5,951
hpp
C++
homeKeeper/workFunctions.hpp
siweilxy/homeKeeper
4f87cf7d1e48caf6006b5a6dc9aa86e96b7fb061
[ "Apache-2.0" ]
null
null
null
homeKeeper/workFunctions.hpp
siweilxy/homeKeeper
4f87cf7d1e48caf6006b5a6dc9aa86e96b7fb061
[ "Apache-2.0" ]
null
null
null
homeKeeper/workFunctions.hpp
siweilxy/homeKeeper
4f87cf7d1e48caf6006b5a6dc9aa86e96b7fb061
[ "Apache-2.0" ]
null
null
null
/* * workFunctions.hpp * * Created on: 30 Mar 2020 * Author: siwei */ #ifndef HOMEKEEPER_WORKFUNCTIONS_HPP_ #define HOMEKEEPER_WORKFUNCTIONS_HPP_ #include "curl.hpp" #include "EmailSender.hpp" #include "tblEmailInfo.hpp" #include "file.hpp" #include "tblIpInfo.hpp" #include "downloadfile.hpp" #include <map> #include "hmLog.hpp" #define USERNAME "siweilxy@163.com" #define PASSWORD "HGIENKTKTIOBXVCV" #define SMTPSERVER "smtp.163.com" #define SMTPPORT ":25" #define RECIPIENT "<251826184@qq.com>" #define MAILFROM "<siweilxy@163.com>" std::string userName = USERNAME; std::string passowrd = PASSWORD; std::string smtpServer = SMTPSERVER; std::string recipient = RECIPIENT; std::string mailFrom = MAILFROM; void* getIp (void *para) { std::string resNew = "new"; int ret = 0; std::vector<ipInfo_t> infos; while (1) { tblIpInfo ipInfo; ret = ipInfo.init (); if (ret != SUCCESS) { ERROR("init failed"); continue; } while (1) { sleep (2); resNew = curlUtil ("icanhazip.com"); if (resNew == "failed") { ERROR("curlUtil failed"); continue; } DEBUG("ip[%s]",resNew.c_str()); infos.clear (); auto ret = ipInfo.getRes (infos); if (ret != SUCCESS) { ERROR("ipInfo.getRes failed"); continue; } if (infos.empty () || infos[0].ip != resNew) { ipInfo.insertToDb (resNew); ERROR("ip insert:%s", resNew.c_str()); } } } return nullptr; } void* sendEmail (void *para) { std::vector<ipInfo_t> infos; while (1) { tblEmailInfo emailInfo; int ret = emailInfo.init (); if (ret != SUCCESS) { ERROR("emailInfo.init () failed"); continue; } tblIpInfo ipInfo; ret = ipInfo.init (); if (ret != SUCCESS) { ERROR("ipInfo.init () failed"); continue; } while (1) { sleep (5); infos.clear (); auto ret = ipInfo.getRes (infos); if (ret == FAILED) { ERROR("ipInfo.getRes failed"); continue; } bool result = true; if (infos.empty ()) { continue; } else if (infos[0].send_flag == "0") { ERROR("ip is [%s]", infos[0].ip.c_str()); auto res = emailInfo.getRes (); for (auto iter : res) { EmailSender sendMail; sendMail.SetSmtpServer (iter.userName, iter.passwd, iter.smtpServer); sendMail.SetSendName (iter.mailFrom); sendMail.SetSendMail (iter.mailFrom); sendMail.AddRecvMail (iter.recipient); ERROR("[%s] sended", iter.recipient.c_str()); sendMail.SetSubject ("ip changed"); sendMail.SetBodyContent ( infos[0].ip + " url is " + infos[0].ip + ":19870"); //sendMail.AddAttachment("/home/siwei/github/homeKeeper/build/Makefile"); result = sendMail.SendMail () | result; } if (result) { ipInfo.updateToDb (infos[0].rec_id); WARN( "update [%s] send_flag to 1" , infos[0].ip.c_str()); } else { break; } } } } return nullptr; } void* sendFile (void *para) { std::vector<downloadFile_t> downloadFiles; while (1) { tblEmailInfo emailInfo; int ret = emailInfo.init (); if (ret != SUCCESS) { ERROR("emailInfo.init () failed"); continue; } downloadFile dlfile; ret = dlfile.init (); if (ret != SUCCESS) { ERROR("dlfile.init () failed"); continue; } while (1) { sleep (5); auto ret = dlfile.getRes (downloadFiles); if (ret == FAILED) { ERROR( "dlfile.getRes () failed"); continue; } for (auto dlFile : downloadFiles) { WARN ( "get [%s]" , dlFile.fileName.c_str()); } bool result = true; auto res = emailInfo.getRes (); for (auto iter : res) { EmailSender sendMail; sendMail.SetSmtpServer (iter.userName, iter.passwd, iter.smtpServer); sendMail.SetSendName (iter.mailFrom); sendMail.SetSendMail (iter.mailFrom); sendMail.AddRecvMail (iter.recipient); sendMail.SetSubject (downloadFiles[0].fileName); sendMail.AddAttachment (downloadFiles[0].path); result = sendMail.SendMail () & result; } if (result) { dlfile.updateToDb (downloadFiles[0].fileName); WARN ( "update [%s] send_flag to 1" , downloadFiles[0].fileName.c_str()); } else { break; } } sleep (3600 * 24); } return nullptr; } std::map<std::string, std::function<void* (void*)>> funcMap = { { "getIp", getIp }, { "sendEmail", sendEmail } //{ "sendFile", sendFile } }; #endif /* HOMEKEEPER_WORKFUNCTIONS_HPP_ */
26.448889
93
0.458242
[ "vector" ]
8133422ab6ed7414a00489812cf1c5df4b186f3a
3,960
cc
C++
tensorpipe/transport/shm/reactor.cc
heliarmk/tensorpipe
a58b5435d1b06a1dcc6ab584c41aa8d7d29470af
[ "BSD-3-Clause" ]
189
2020-02-05T23:45:38.000Z
2022-03-25T14:31:51.000Z
tensorpipe/transport/shm/reactor.cc
heliarmk/tensorpipe
a58b5435d1b06a1dcc6ab584c41aa8d7d29470af
[ "BSD-3-Clause" ]
372
2020-02-06T11:06:07.000Z
2022-03-30T06:07:00.000Z
tensorpipe/transport/shm/reactor.cc
heliarmk/tensorpipe
a58b5435d1b06a1dcc6ab584c41aa8d7d29470af
[ "BSD-3-Clause" ]
55
2020-02-06T15:01:32.000Z
2022-03-28T03:56:34.000Z
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <tensorpipe/transport/shm/reactor.h> #include <tensorpipe/common/shm_ringbuffer.h> #include <tensorpipe/common/system.h> namespace tensorpipe { namespace transport { namespace shm { namespace { void writeToken(Reactor::Producer& producer, Reactor::TToken token) { for (;;) { auto rv = producer.write(&token, sizeof(token)); if (rv == -EAGAIN) { // There's contention on the spin-lock, wait for it by retrying. std::this_thread::yield(); continue; } if (rv == -ENODATA) { // The ringbuffer is full. Retrying should typically work, but might lead // to a deadlock if, for example, a reactor thread is trying to write a // token to its own ringbuffer, as then it would be stuck here and never // proceed to consume data from the ringbuffer. This could also happen // across multiple processes. This case seems remote enough, and a proper // solution rather complicated, that we're going to take that risk... std::this_thread::yield(); continue; } TP_DCHECK_EQ(rv, sizeof(token)); break; } } } // namespace Reactor::Reactor() { Error error; std::tie(error, headerSegment_, dataSegment_, rb_) = createShmRingBuffer<kNumRingbufferRoles>(kSize); TP_THROW_ASSERT_IF(error) << "Couldn't allocate ringbuffer for reactor: " << error.what(); startThread("TP_SHM_reactor"); } void Reactor::close() { if (!closed_.exchange(true)) { stopBusyPolling(); } } void Reactor::join() { close(); if (!joined_.exchange(true)) { joinThread(); } } Reactor::~Reactor() { join(); } Reactor::TToken Reactor::add(TFunction fn) { std::unique_lock<std::mutex> lock(mutex_); TToken token; // Either reuse a token or generate a new one. auto it = reusableTokens_.begin(); if (it != reusableTokens_.end()) { token = *it; reusableTokens_.erase(it); } else { // If there are no reusable tokens, the next token is always equal // to the number of tokens in use + 1. token = functions_.size(); } // Ensure there is enough space in the functions vector. if (functions_.size() <= token) { functions_.resize(token + 1); } functions_[token] = std::move(fn); functionCount_++; return token; } void Reactor::remove(TToken token) { std::unique_lock<std::mutex> lock(mutex_); functions_[token] = nullptr; reusableTokens_.insert(token); functionCount_--; } std::tuple<int, int> Reactor::fds() const { return std::make_tuple(headerSegment_.getFd(), dataSegment_.getFd()); } bool Reactor::pollOnce() { Consumer reactorConsumer(rb_); uint32_t token; auto ret = reactorConsumer.read(&token, sizeof(token)); if (ret == -ENODATA) { return false; } TP_THROW_SYSTEM_IF(ret < 0, -ret); TFunction fn; // Make copy of std::function so we don't need // to hold the lock while executing it. { std::unique_lock<std::mutex> lock(mutex_); TP_DCHECK_LT(token, functions_.size()); fn = functions_[token]; } if (fn) { fn(); } return true; } bool Reactor::readyToClose() { return functionCount_ == 0; } Reactor::Trigger::Trigger(Fd headerFd, Fd dataFd) { // The header and data segment objects take over ownership // of file descriptors. Release them to avoid double close. Error error; std::tie(error, headerSegment_, dataSegment_, rb_) = loadShmRingBuffer<kNumRingbufferRoles>( std::move(headerFd), std::move(dataFd)); TP_THROW_ASSERT_IF(error) << "Couldn't access ringbuffer of remote reactor: " << error.what(); } void Reactor::Trigger::run(TToken token) { Producer producer(rb_); writeToken(producer, token); } } // namespace shm } // namespace transport } // namespace tensorpipe
24.75
79
0.67096
[ "vector" ]
81341c7cb320d889fe33394b81a0a722a8c96b54
21,596
cpp
C++
ODBCConnectorWin32/ODBCConnector.cpp
YuhichYOC/ODBCConnectorWin32
600b5245d9e39359ee9204e93073849b743e264c
[ "Apache-2.0" ]
null
null
null
ODBCConnectorWin32/ODBCConnector.cpp
YuhichYOC/ODBCConnectorWin32
600b5245d9e39359ee9204e93073849b743e264c
[ "Apache-2.0" ]
null
null
null
ODBCConnectorWin32/ODBCConnector.cpp
YuhichYOC/ODBCConnectorWin32
600b5245d9e39359ee9204e93073849b743e264c
[ "Apache-2.0" ]
null
null
null
/* * * ODBCConnector.cpp * * Copyright 2016 Yuichi Yoshii * 吉井雄一 @ 吉井産業 you.65535.kir@gmail.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "stdafx.h" #include "ODBCConnector.h" IBinder::ColumnType RNumberBinder::GetType() { return IBinder::ColumnType::NUMBER; } void RNumberBinder::SetIndex(int arg) { index = arg; } int RNumberBinder::GetIndex() { return index; } void RNumberBinder::SetSize(SQLLEN arg) { size = arg; } SQLLEN RNumberBinder::GetSize() { return size; } void RNumberBinder::SetScale(SQLSMALLINT arg) { scale = arg; } SQLSMALLINT RNumberBinder::GetScale() { return scale; } int RNumberBinder::GetValue() { return value; } void RNumberBinder::Bind(SQLHANDLE statement) { bindSuccess = false; nullIndicator = sizeof(value); returnCode = SQLBindCol( statement, index + 1, SQL_C_CHAR, (SQLPOINTER)&value, size, &nullIndicator); if (returnCode != SQL_SUCCESS && returnCode != SQL_SUCCESS_WITH_INFO) { WCharString wcs; errorMessage = wcs.SysErrMessage(); } else { bindSuccess = true; } } SQLRETURN RNumberBinder::GetReturnCode() { return returnCode; } string RNumberBinder::GetErrorMessage() { return errorMessage; } bool RNumberBinder::GetBindSuccess() { return bindSuccess; } RNumberBinder::RNumberBinder() { index = 0; size = 0; scale = 0; value = 0; nullIndicator = 0; returnCode = 0; bindSuccess = false; } RNumberBinder::~RNumberBinder() { } IBinder::ColumnType RStringBinder::GetType() { return IBinder::ColumnType::STRING; } void RStringBinder::SetIndex(int arg) { index = arg; } int RStringBinder::GetIndex() { return index; } void RStringBinder::SetSize(SQLLEN arg) { size = arg; } SQLLEN RStringBinder::GetSize() { return size; } void RStringBinder::SetScale(SQLSMALLINT arg) { scale = arg; } SQLSMALLINT RStringBinder::GetScale() { return scale; } unique_ptr<char> RStringBinder::GetValue() { return move(value); } void RStringBinder::Bind(SQLHANDLE statement) { bindSuccess = false; value = unique_ptr<char>(new char[size]); nullIndicator = sizeof(char) * size; returnCode = SQLBindCol( statement, index + 1, SQL_C_CHAR, (SQLPOINTER)value.get(), sizeof(char) * size, &nullIndicator); if (returnCode != SQL_SUCCESS && returnCode != SQL_SUCCESS_WITH_INFO) { WCharString wcs; errorMessage = wcs.SysErrMessage(); } else { bindSuccess = true; } } SQLRETURN RStringBinder::GetReturnCode() { return returnCode; } string RStringBinder::GetErrorMessage() { return errorMessage; } bool RStringBinder::GetBindSuccess() { return bindSuccess; } RStringBinder::RStringBinder() { index = 0; size = 0; scale = 0; value = nullptr; nullIndicator = 0; returnCode = 0; bindSuccess = false; } RStringBinder::~RStringBinder() { } void ReadingBinder::AddBinder(IBinder * arg) { columns.push_back(arg); } bool ReadingBinder::Bind(SQLHANDLE statement) { for (size_t i = 0; i < columns.size(); i++) { columns.at(i)->Bind(statement); if (!columns.at(i)->GetBindSuccess()) { returnCode = columns.at(i)->GetReturnCode(); errorMessage = columns.at(i)->GetErrorMessage(); return false; } } return true; } SQLRETURN ReadingBinder::GetReturnCode() { return returnCode; } string ReadingBinder::GetErrorMessage() { return errorMessage; } vector<IBinder *> ReadingBinder::Get() { return columns; } ReadingBinder::ReadingBinder() { returnCode = 0; } ReadingBinder::~ReadingBinder() { for (size_t i = 0; i < columns.size(); i++) { delete columns.at(i); } } void WNumberBinder::BindParam(SQLHANDLE statement) { bindSuccess = false; returnCode = SQLBindParameter( statement, index + 1, SQL_PARAM_INPUT, SQL_C_SHORT, SQL_INTEGER, size, scale, (SQLPOINTER)&value, 0, &size); if (returnCode != SQL_SUCCESS && returnCode != SQL_SUCCESS_WITH_INFO) { WCharString wcs; errorMessage = wcs.SysErrMessage(); } else { bindSuccess = true; } } IBinder::ColumnType WNumberBinder::GetType() { return IBinder::ColumnType::NUMBER; } void WNumberBinder::SetIndex(int arg) { index = arg; } int WNumberBinder::GetIndex() { return index; } void WNumberBinder::SetSize(SQLLEN arg) { size = arg; } SQLLEN WNumberBinder::GetSize() { return size; } void WNumberBinder::SetScale(SQLSMALLINT arg) { scale = arg; } SQLSMALLINT WNumberBinder::GetScale() { return scale; } void WNumberBinder::SetValue(int arg) { value = arg; } void WNumberBinder::Bind(SQLHANDLE statement) { BindParam(statement); } SQLRETURN WNumberBinder::GetReturnCode() { return returnCode; } string WNumberBinder::GetErrorMessage() { return errorMessage; } bool WNumberBinder::GetBindSuccess() { return bindSuccess; } WNumberBinder::WNumberBinder() { index = 0; size = 0; scale = 0; value = 0; nullIndicator = 0; returnCode = 0; bindSuccess = false; } WNumberBinder::~WNumberBinder() { } void WStringBinder::BindParam(SQLHANDLE statement) { bindSuccess = false; returnCode = SQLBindParameter( statement, index + 1, SQL_PARAM_INPUT, SQL_C_CHAR, SQL_VARCHAR, size, 0, (SQLPOINTER)value.get(), size, &size); if (returnCode != SQL_SUCCESS && returnCode != SQL_SUCCESS_WITH_INFO) { WCharString wcs; errorMessage = wcs.SysErrMessage(); } else { bindSuccess = true; } } IBinder::ColumnType WStringBinder::GetType() { return IBinder::ColumnType::STRING; } void WStringBinder::SetIndex(int arg) { index = arg; } int WStringBinder::GetIndex() { return index; } void WStringBinder::SetSize(SQLLEN arg) { size = arg; } SQLLEN WStringBinder::GetSize() { return size; } void WStringBinder::SetScale(SQLSMALLINT arg) { scale = arg; } SQLSMALLINT WStringBinder::GetScale() { return scale; } void WStringBinder::SetValue(char * arg) { WCharString wc; wc.Value(arg); value = wc.ToChar(); } void WStringBinder::SetValue(const char * arg) { WCharString wc; wc.Value(arg); value = wc.ToChar(); } void WStringBinder::SetValue(wchar_t * arg) { WCharString wc; wc.Value(arg); value = wc.ToChar(); } void WStringBinder::SetValue(const wchar_t * arg) { WCharString wc; wc.Value(arg); value = wc.ToChar(); } void WStringBinder::SetValue(string arg) { WCharString wc; wc.Value(arg); value = wc.ToChar(); } void WStringBinder::Bind(SQLHANDLE statement) { BindParam(statement); } SQLRETURN WStringBinder::GetReturnCode() { return returnCode; } string WStringBinder::GetErrorMessage() { return errorMessage; } bool WStringBinder::GetBindSuccess() { return bindSuccess; } WStringBinder::WStringBinder() { index = 0; size = 0; scale = 0; value = nullptr; nullIndicator = 0; returnCode = 0; bindSuccess = false; } WStringBinder::~WStringBinder() { } void WritingBinder::AddBinder(IBinder * arg) { columns.push_back(arg); } bool WritingBinder::Bind(SQLHANDLE statement) { for (size_t i = 0; i < columns.size(); i++) { columns.at(i)->Bind(statement); if (!columns.at(i)->GetBindSuccess()) { returnCode = columns.at(i)->GetReturnCode(); errorMessage = columns.at(i)->GetErrorMessage(); return false; } } return true; } SQLRETURN WritingBinder::GetReturnCode() { return returnCode; } string WritingBinder::GetErrorMessage() { return errorMessage; } vector<IBinder *> WritingBinder::Get() { return columns; } WritingBinder::WritingBinder() { returnCode = 0; } WritingBinder::~WritingBinder() { for (size_t i = 0; i < columns.size(); i++) { delete columns.at(i); } } IData::DataType NumberData::GetType() { return IData::DataType::NUMBER; } void NumberData::SetData(int arg) { value = arg; } int NumberData::GetData() { return value; } NumberData::NumberData() { value = 0; } NumberData::~NumberData() { } IData::DataType StringData::GetType() { return IData::DataType::STRING; } void StringData::SetData(char * arg) { WCharString wc; wc.Value(arg); value = wc.ToChar(); } void StringData::SetData(const char * arg) { WCharString wc; wc.Value(arg); value = wc.ToChar(); } void StringData::SetData(wchar_t * arg) { WCharString wc; wc.Value(arg); value = wc.ToChar(); } void StringData::SetData(const wchar_t * arg) { WCharString wc; wc.Value(arg); value = wc.ToChar(); } void StringData::SetData(string arg) { WCharString wc; wc.Value(arg); value = wc.ToChar(); } unique_ptr<char> StringData::GetData() { return move(value); } StringData::StringData() { } StringData::~StringData() { } bool ODBCConnector::DescribeTable() { SQLSMALLINT columnsCount; SQLNumResultCols(statement, &columnsCount); for (SQLSMALLINT i = 0; i < columnsCount; i++) { SQLWCHAR columnName[128] = { 0 }; SQLSMALLINT columnNameSize = 0; SQLSMALLINT columnType = 0; SQLULEN columnSize = 0; SQLSMALLINT scale = 0; SQLSMALLINT nullable = 0; rc = SQLDescribeCol( statement, (SQLUSMALLINT)(i + 1), columnName, (SQLSMALLINT) sizeof(columnName), &columnNameSize, &columnType, &columnSize, &scale, &nullable); if (rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO) { errorMessage = wc.SysErrMessage(); return false; } IBinder * addBinder; switch (columnType) { case SQL_CHAR: case SQL_VARCHAR: addBinder = new RStringBinder(); addBinder->SetIndex(i); addBinder->SetSize(columnSize); addBinder->SetScale(scale); break; case SQL_NUMERIC: addBinder = new RNumberBinder(); addBinder->SetIndex(i); addBinder->SetSize(columnSize); addBinder->SetScale(scale); break; case SQL_DECIMAL: case SQL_INTEGER: case SQL_SMALLINT: case SQL_FLOAT: case SQL_DOUBLE: addBinder = new RNumberBinder(); addBinder->SetIndex(i); addBinder->SetSize(columnSize); addBinder->SetScale(scale); break; default: break; } rb.AddBinder(addBinder); } return rb.Bind(statement); } bool ODBCConnector::ExecDML(string arg) { unique_ptr<wchar_t> query(wc.Value(arg).ToWChar()); if (table.size() == 0) { rc = SQLExecDirect(statement, query.get(), SQL_NTS); if (rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO) { HandleDiagnosticRecord(statement, SQL_HANDLE_STMT, rc); errorMessage = wc.SysErrMessage(); return false; } return true; } else { rc = SQLPrepare(statement, query.get(), SQL_NTS); if (rc != SQL_SUCCESS && rc != SQL_NEED_DATA && rc != SQL_SUCCESS_WITH_INFO) { HandleDiagnosticRecord(statement, SQL_HANDLE_STMT, rc); errorMessage = wc.SysErrMessage(); return false; } for (size_t i = 0; i < table.size(); i++) { for (size_t j = 0; j < table.at(i).size(); j++) { if (table.at(i).at(j)->GetType() == IData::DataType::NUMBER) { ((WNumberBinder *)wb.Get().at(j))->SetValue( ((NumberData *)(table.at(i).at(j)))->GetData() ); } else { ((WStringBinder *)wb.Get().at(j))->SetValue( wc.Value(((StringData *)(table.at(i).at(j)))->GetData()).ToString() ); } } if (!wb.Bind(statement)) { HandleDiagnosticRecord(statement, SQL_HANDLE_STMT, rc); errorMessage = wc.SysErrMessage(); return false; } rc = SQLExecute(statement); if (rc != SQL_SUCCESS && rc != SQL_NEED_DATA && rc != SQL_SUCCESS_WITH_INFO) { HandleDiagnosticRecord(statement, SQL_HANDLE_STMT, rc); errorMessage = wc.SysErrMessage(); return false; } } return true; } } void ODBCConnector::Prepare() { prepared = false; rc = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &env); if (rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO) { HandleDiagnosticRecord(env, SQL_HANDLE_ENV, rc); errorMessage = wc.SysErrMessage(); } else { rc = SQLSetEnvAttr( env, SQL_ATTR_ODBC_VERSION, (SQLPOINTER)SQL_OV_ODBC3, 0); if (rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO) { HandleDiagnosticRecord(env, SQL_HANDLE_ENV, rc); errorMessage = wc.SysErrMessage(); } else { rc = SQLAllocHandle(SQL_HANDLE_DBC, env, &connection); if (rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO) { HandleDiagnosticRecord(connection, SQL_HANDLE_DBC, rc); errorMessage = wc.SysErrMessage(); } } } prepared = true; } bool ODBCConnector::GetPrepared() { return prepared; } void ODBCConnector::Connect(string arg) { connected = false; unique_ptr<wchar_t> cs(wc.Value(arg).ToWChar()); rc = SQLDriverConnect( connection, nullptr, cs.get(), SQL_NTS, connectionString, 1024, &bufSize, SQL_DRIVER_NOPROMPT); if (rc == SQL_SUCCESS || rc == SQL_SUCCESS_WITH_INFO) { connected = true; transactionBegun = false; } else { HandleDiagnosticRecord(connection, SQL_HANDLE_DBC, rc); errorMessage = wc.SysErrMessage(); } } bool ODBCConnector::GetConnected() { return connected; } void ODBCConnector::BeginTransaction() { transactionBegun = false; rc = SQLSetConnectAttr( connection, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)SQL_AUTOCOMMIT_OFF, SQL_NTS); if (rc == SQL_SUCCESS || rc == SQL_SUCCESS_WITH_INFO) { transactionBegun = true; } else { HandleDiagnosticRecord(connection, SQL_HANDLE_DBC, rc); errorMessage = wc.SysErrMessage(); } } bool ODBCConnector::GetTransactionBegun() { return transactionBegun; } void ODBCConnector::CommitTransaction() { rc = SQLEndTran( SQL_HANDLE_ENV, (SQLPOINTER)env, SQL_COMMIT); if (rc == SQL_SUCCESS || rc == SQL_SUCCESS_WITH_INFO) { transactionBegun = false; } else { HandleDiagnosticRecord(connection, SQL_HANDLE_DBC, rc); errorMessage = wc.SysErrMessage(); } } void ODBCConnector::RollbackTransaction() { rc = SQLEndTran( SQL_HANDLE_ENV, (SQLPOINTER)env, SQL_ROLLBACK); if (rc == SQL_SUCCESS || rc == SQL_SUCCESS_WITH_INFO) { transactionBegun = false; } else { HandleDiagnosticRecord(connection, SQL_HANDLE_DBC, rc); errorMessage = wc.SysErrMessage(); } } void ODBCConnector::SQLStatementPrepare() { statementPrepared = false; // In ODBC 3.x, the ODBC 2.x function SQLAllocStmt has been replaced by SQLAllocHandle. // https://msdn.microsoft.com/ja-jp/library/ms709370(v=vs.85).aspx // ftp://public.dhe.ibm.com/software/data/db2/everyplace/infocenters/jpn/dbeapr1006.htm // https://mariadb.com/kb/en/sql-99/sqlallocstmt/ // SQLAllocStmt(env, &statement); rc = SQLAllocHandle(SQL_HANDLE_STMT, connection, &statement); if (rc == SQL_SUCCESS || rc == SQL_SUCCESS_WITH_INFO) { statementPrepared = true; } else { HandleDiagnosticRecord(connection, SQL_HANDLE_STMT, rc); errorMessage = wc.SysErrMessage(); } } bool ODBCConnector::GetSQLStatementPrepared() { return statementPrepared; } void ODBCConnector::SQLSelect(string arg) { selQuerySuccess = false; unique_ptr<wchar_t> query(wc.Value(arg).ToWChar()); rc = SQLExecDirect(statement, query.get(), SQL_NTS); if (rc == SQL_SUCCESS || rc == SQL_SUCCESS_WITH_INFO) { if (DescribeTable()) { selQuerySuccess = true; } else { HandleDiagnosticRecord(statement, SQL_HANDLE_STMT, rc); errorMessage = rb.GetErrorMessage(); } } else { HandleDiagnosticRecord(statement, SQL_HANDLE_STMT, rc); errorMessage = wc.SysErrMessage(); } } bool ODBCConnector::GetSelQuerySuccess() { return selQuerySuccess; } void ODBCConnector::Fetch() { fetchCompleted = false; do { rc = SQLFetch(statement); if (rc == SQL_SUCCESS || rc == SQL_SUCCESS_WITH_INFO) { vector<IData *> row; for (size_t i = 0; i < rb.Get().size(); i++) { if (rb.Get().at(i)->GetType() == IBinder::ColumnType::NUMBER) { NumberData * add = new NumberData(); add->SetData( ((RNumberBinder *)rb.Get().at(i))->GetValue() ); row.push_back(add); } else { StringData * add = new StringData(); add->SetData( wc.Value( ((RStringBinder *)rb.Get().at(i))->GetValue() ).ToString() ); row.push_back(add); } } rb.Bind(statement); table.push_back(row); } else if (rc == SQL_NO_DATA_FOUND) { fetchCompleted = true; } else { HandleDiagnosticRecord(statement, SQL_HANDLE_STMT, rc); return; } cout << "\n"; } while (!fetchCompleted); } bool ODBCConnector::GetFetchCompleted() { return fetchCompleted; } void ODBCConnector::AddParamBindPos( IBinder::ColumnType type, SQLLEN size, SQLSMALLINT scale) { paramBindPosAdded = false; IBinder * addBinder; if (type == IBinder::ColumnType::NUMBER) { addBinder = new WNumberBinder(); addBinder->SetIndex(bindPos); addBinder->SetSize(size); addBinder->SetScale(scale); } else { addBinder = new WStringBinder(); addBinder->SetIndex(bindPos); addBinder->SetSize(size); addBinder->SetScale(scale); } wb.AddBinder(addBinder); bindPos++; paramBindPosAdded = true; } bool ODBCConnector::GetParamBindPosAdded() { return paramBindPosAdded; } void ODBCConnector::SetInsertData(vector<vector<IData *>> arg) { table = arg; } void ODBCConnector::SQLInsert(string arg) { insQuerySuccess = false; if (ExecDML(arg)) { insQuerySuccess = true; } } bool ODBCConnector::GetInsQuerySuccess() { return insQuerySuccess; } string ODBCConnector::GetErrorMessage() { return errorMessage; } void ODBCConnector::HandleDiagnosticRecord( SQLHANDLE handle, SQLSMALLINT handleType, SQLRETURN retCode) { if (retCode == SQL_INVALID_HANDLE) { cout << "Invalid handle" << endl; return; } SQLSMALLINT iRec = 0; SQLWCHAR szSQLState[SQL_SQLSTATE_SIZE + 1]; SQLINTEGER error; SQLWCHAR szErrorMessage[1000]; while (SQLGetDiagRec(handleType, handle, ++iRec, szSQLState, &error, szErrorMessage, (SQLSMALLINT)(sizeof(szErrorMessage) / sizeof(WCHAR)), (SQLSMALLINT *)NULL) == SQL_SUCCESS) { cout << wc.Value("Status = ").Append(szSQLState).Append("\n").ToString(); cout << wc.Value("Message = ").Append(szErrorMessage).Append("\n").ToString(); cout << endl; } } ODBCConnector::ODBCConnector() { env = nullptr; connection = nullptr; bufSize = 0; statement = nullptr; rc = 0; prepared = false; connected = false; transactionBegun = false; statementPrepared = false; selQuerySuccess = false; fetchCompleted = false; bindPos = 0; paramBindPosAdded = false; insQuerySuccess = false; disposed = false; } void ODBCConnector::Dispose() { SQLFreeHandle(SQL_HANDLE_STMT, statement); SQLFreeHandle(SQL_HANDLE_DBC, connection); SQLFreeHandle(SQL_HANDLE_ENV, env); disposed = true; } ODBCConnector::~ODBCConnector() { if (!disposed) { Dispose(); } }
23.246502
91
0.600528
[ "vector" ]
8135a06fa01adc3379bd1da0a7552f40773d824e
2,133
cpp
C++
dockerfiles/gaas_tutorial_2/GAAS/software/SLAM/ygz_slam_ros/Thirdparty/PCL/examples/surface/test_nurbs_fitting_surface.cpp
hddxds/scripts_from_gi
afb8977c001b860335f9062464e600d9115ea56e
[ "Apache-2.0" ]
2
2019-04-10T14:04:52.000Z
2019-05-29T03:41:58.000Z
software/SLAM/ygz_slam_ros/Thirdparty/PCL/examples/surface/test_nurbs_fitting_surface.cpp
glider54321/GAAS
5c3b8c684e72fdf7f62c5731a260021e741069e7
[ "BSD-3-Clause" ]
null
null
null
software/SLAM/ygz_slam_ros/Thirdparty/PCL/examples/surface/test_nurbs_fitting_surface.cpp
glider54321/GAAS
5c3b8c684e72fdf7f62c5731a260021e741069e7
[ "BSD-3-Clause" ]
1
2021-12-20T06:54:41.000Z
2021-12-20T06:54:41.000Z
#include <pcl/point_cloud.h> #include <pcl/point_types.h> #include <pcl/common/io.h> #include <pcl/visualization/pcl_visualizer.h> #include <pcl/surface/on_nurbs/fitting_surface_tdm.h> #include <pcl/surface/on_nurbs/triangulation.h> typedef pcl::PointXYZ Point; void CreateCylinderPoints (pcl::PointCloud<Point>::Ptr cloud, pcl::on_nurbs::vector_vec3d &data, unsigned npoints, double alpha, double h, double r) { for (unsigned i = 0; i < npoints; i++) { double da = alpha * double (rand ()) / RAND_MAX; double dh = h * (double (rand ()) / RAND_MAX - 0.5); Point p; p.x = float (r * cos (da)); p.y = float (r * sin (da)); p.z = float (dh); data.push_back (Eigen::Vector3d (p.x, p.y, p.z)); cloud->push_back (p); } } int main () { unsigned npoints (200); unsigned refinement (2); unsigned iterations (10); pcl::visualization::PCLVisualizer viewer ("Test: NURBS surface fitting"); viewer.setSize (800, 600); // create point cloud pcl::PointCloud<Point>::Ptr cloud (new pcl::PointCloud<Point>); pcl::on_nurbs::NurbsDataSurface data; CreateCylinderPoints (cloud, data.interior, npoints, M_PI, 1.0, 0.5); viewer.addPointCloud<Point> (cloud, "cloud_cylinder"); // fit NURBS surface ON_NurbsSurface nurbs = pcl::on_nurbs::FittingSurface::initNurbsPCABoundingBox (3, &data); pcl::on_nurbs::FittingSurface fit (&data, nurbs); // fit.setQuiet (false); pcl::on_nurbs::FittingSurface::Parameter params; params.interior_smoothness = 0.1; params.interior_weight = 1.0; params.boundary_smoothness = 0.1; params.boundary_weight = 0.0; // NURBS refinement for (unsigned i = 0; i < refinement; i++) { fit.refine (0); fit.refine (1); } // fitting iterations for (unsigned i = 0; i < iterations; i++) { fit.assemble (params); fit.solve (); } // triangulate NURBS surface nurbs = fit.m_nurbs; pcl::PolygonMesh mesh; std::string mesh_id = "mesh_nurbs"; pcl::on_nurbs::Triangulation::convertSurface2PolygonMesh (nurbs, mesh, 128); viewer.addPolygonMesh (mesh, mesh_id); viewer.spin (); return 0; }
26.333333
109
0.668073
[ "mesh" ]
813611601894091654b9977715f221aff10c7c32
952
cpp
C++
JuneChallenge/Week2/13:LargestDivisibleSubset.cpp
thepushkarp/leetcode
6812c68d7c49b9e5b0698feb3203346f5fe8adbf
[ "MIT" ]
1
2022-02-10T15:19:02.000Z
2022-02-10T15:19:02.000Z
JuneChallenge/Week2/13:LargestDivisibleSubset.cpp
thepushkarp/leetcode
6812c68d7c49b9e5b0698feb3203346f5fe8adbf
[ "MIT" ]
null
null
null
JuneChallenge/Week2/13:LargestDivisibleSubset.cpp
thepushkarp/leetcode
6812c68d7c49b9e5b0698feb3203346f5fe8adbf
[ "MIT" ]
null
null
null
class Solution { public: vector<int> largestDivisibleSubset(vector<int>& nums) { int n = nums.size(); if (n == 0) return {}; vector<int> dp(n, 0); vector<int> last(n, n); sort(nums.begin(), nums.end()); dp[n-1] = 1; int maxmGlobl = n-1; for (int i = n - 2; i >= 0; i--) { int maxm = i; for (int j = i + 1; j < n; j++) if (nums[j] % nums[i] == 0) if (dp[j] > dp[maxm]) { maxm = j; last[i] = maxm; } dp[i] = 1 + dp[maxm]; if (dp[i] > dp[maxmGlobl]) { maxmGlobl = i; } } vector<int> ans; while (last[maxmGlobl] != n) { ans.push_back(nums[maxmGlobl]); maxmGlobl = last[maxmGlobl]; } ans.push_back(nums[maxmGlobl]); return ans; } };
28.848485
59
0.388655
[ "vector" ]
813f81d1f22dbb4e6540c4d2aac93c0d2cfc6894
8,414
hpp
C++
core/channel/push_combined_channel.hpp
OwenDeng1993/Husky
61ba0e8fc2a0ab025e89307de07a1c6833227857
[ "Apache-2.0" ]
117
2016-08-31T04:05:08.000Z
2021-12-18T15:05:38.000Z
core/channel/push_combined_channel.hpp
OwenDeng1993/Husky
61ba0e8fc2a0ab025e89307de07a1c6833227857
[ "Apache-2.0" ]
223
2016-09-12T05:32:44.000Z
2020-05-22T02:51:21.000Z
core/channel/push_combined_channel.hpp
OwenDeng1993/Husky
61ba0e8fc2a0ab025e89307de07a1c6833227857
[ "Apache-2.0" ]
77
2016-08-31T04:02:57.000Z
2020-04-08T09:23:46.000Z
// Copyright 2016 Husky Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <time.h> #include <functional> #include <utility> #include <vector> #include "base/serialization.hpp" #include "core/channel/channel_base.hpp" #include "core/channel/channel_impl.hpp" #include "core/combiner.hpp" #include "core/hash_ring.hpp" #include "core/mailbox.hpp" #include "core/objlist.hpp" #include "core/shuffle_combiner_store.hpp" #include "core/worker_info.hpp" namespace husky { using base::BinStream; template <typename MsgT, typename DstObjT, typename CombineT> class PushCombinedChannel : public Source2ObjListChannel<DstObjT> { public: PushCombinedChannel(ChannelSource* src, ObjList<DstObjT>* dst) : Source2ObjListChannel<DstObjT>(src, dst) { this->src_ptr_->register_outchannel(this->channel_id_, this); this->dst_ptr_->register_inchannel(this->channel_id_, this); } ~PushCombinedChannel() override { ShuffleCombinerStore::remove_shuffle_combiner<typename DstObjT::KeyT, MsgT>(this->channel_id_); this->src_ptr_->deregister_outchannel(this->channel_id_); this->dst_ptr_->deregister_inchannel(this->channel_id_); } PushCombinedChannel(const PushCombinedChannel&) = delete; PushCombinedChannel& operator=(const PushCombinedChannel&) = delete; PushCombinedChannel(PushCombinedChannel&&) = default; PushCombinedChannel& operator=(PushCombinedChannel&&) = default; void customized_setup() override { // Initialize send_buffer_ // use get_largest_tid() instead of get_num_workers() // sine we may only use a subset of worker send_buffer_.resize(this->worker_info_->get_largest_tid() + 1); // Create shuffle_combiner_ // TODO(yuzhen): Only support sortcombine, hashcombine can be added using enableif shuffle_combiner_ = ShuffleCombinerStore::create_shuffle_combiner<typename DstObjT::KeyT, MsgT>( this->channel_id_, this->local_id_, this->worker_info_->get_num_local_workers(), this->worker_info_->get_largest_tid() + 1); } void push(const MsgT& msg, const typename DstObjT::KeyT& key) { // shuffle_combiner_.init(); // Already move init() to create_shuffle_combiner_() int dst_worker_id = this->worker_info_->get_hash_ring().hash_lookup(key); auto& buffer = (*shuffle_combiner_)[this->local_id_].storage(dst_worker_id); back_combine<CombineT>(buffer, key, msg); } const MsgT& get(const DstObjT& obj) { auto idx = this->dst_ptr_->index_of(&obj); if (idx >= recv_buffer_.size()) { // resize recv_buffer_ and recv_flag_ if it is not large enough recv_buffer_.resize(this->dst_ptr_->get_size()); recv_flag_.resize(this->dst_ptr_->get_size()); } if (recv_flag_[idx] == false) { recv_buffer_[idx] = MsgT(); } return recv_buffer_[idx]; } bool has_msgs(const DstObjT& obj) { auto idx = this->dst_ptr_->index_of(&obj); if (idx >= recv_buffer_.size()) { // resize recv_buffer_ and recv_flag_ if it is not large enough recv_buffer_.resize(this->dst_ptr_->get_size()); recv_flag_.resize(this->dst_ptr_->get_size()); } return recv_flag_[idx]; } void prepare() override { clear_recv_buffer_(); } void in(BinStream& bin) override { process_bin(bin); } void out() override { flush(); } void send() { int start = this->global_id_; for (int i = 0; i < send_buffer_.size(); ++i) { int dst = (start + i) % send_buffer_.size(); if (send_buffer_[dst].size() == 0) continue; this->mailbox_->send(dst, this->channel_id_, this->progress_ + 1, send_buffer_[dst]); send_buffer_[dst].purge(); } } void send_complete() { this->inc_progress(); this->mailbox_->send_complete(this->channel_id_, this->progress_, this->worker_info_->get_local_tids(), this->worker_info_->get_pids()); } /// This method is only useful without list_execute void flush() { shuffle_combine(); send(); send_complete(); } /// This method is only useful without list_execute void prepare_messages() { if (!this->is_flushed()) return; clear_recv_buffer_(); while (this->mailbox_->poll(this->channel_id_, this->progress_)) { auto bin_push = this->mailbox_->recv(this->channel_id_, this->progress_); process_bin(bin_push); } this->reset_flushed(); } ShuffleCombiner<std::pair<typename DstObjT::KeyT, MsgT>>& get_shuffle_combiner(int tid) { return (*shuffle_combiner_)[tid]; } std::vector<BinStream>& get_send_buffer() { return send_buffer_; } protected: void clear_recv_buffer_() { std::fill(recv_flag_.begin(), recv_flag_.end(), false); } void process_bin(BinStream& bin_push) { while (bin_push.size() != 0) { typename DstObjT::KeyT key; bin_push >> key; MsgT msg; bin_push >> msg; DstObjT* recver_obj = this->dst_ptr_->find(key); size_t idx; if (recver_obj == nullptr) { DstObjT obj(key); // Construct obj using key only idx = this->dst_ptr_->add_object(std::move(obj)); } else { idx = this->dst_ptr_->index_of(recver_obj); } if (idx >= recv_buffer_.size()) { recv_buffer_.resize(idx + 1); recv_flag_.resize(idx + 1); } if (recv_flag_[idx] == true) { CombineT::combine(recv_buffer_[idx], msg); } else { recv_buffer_[idx] = std::move(msg); recv_flag_[idx] = true; } } } void shuffle_combine() { // step 1: shuffle combine auto& self_shuffle_combiner = (*shuffle_combiner_)[this->local_id_]; self_shuffle_combiner.send_shuffler_buffer(); for (int iter = 0; iter < this->worker_info_->get_num_local_workers() - 1; iter++) { int next_worker = self_shuffle_combiner.access_next(); auto& peer_shuffle_combiner = (*shuffle_combiner_)[next_worker]; for (int i = this->local_id_; i < this->worker_info_->get_largest_tid() + 1; i += this->worker_info_->get_num_local_workers()) { // combining the i-th buffer auto& self_buffer = self_shuffle_combiner.storage(i); auto& peer_buffer = peer_shuffle_combiner.storage(i); self_buffer.insert(self_buffer.end(), peer_buffer.begin(), peer_buffer.end()); peer_buffer.clear(); } } for (int i = this->local_id_; i < this->worker_info_->get_largest_tid() + 1; i += this->worker_info_->get_num_local_workers()) { auto& self_buffer = self_shuffle_combiner.storage(i); combine_single<CombineT>(self_buffer); } // step 2: serialize combine buffer for (int i = this->local_id_; i < this->worker_info_->get_largest_tid() + 1; i += this->worker_info_->get_num_local_workers()) { auto& combine_buffer = self_shuffle_combiner.storage(i); for (int k = 0; k < combine_buffer.size(); k++) { send_buffer_[i] << combine_buffer[k].first; send_buffer_[i] << combine_buffer[k].second; } combine_buffer.clear(); } } std::vector<ShuffleCombiner<std::pair<typename DstObjT::KeyT, MsgT>>>* shuffle_combiner_; std::vector<BinStream> send_buffer_; std::vector<MsgT> recv_buffer_; std::vector<bool> recv_flag_; }; } // namespace husky
38.774194
111
0.625149
[ "vector" ]
8144e3cec1d62d0520391bfa4787fec7b8b4884d
7,142
cpp
C++
test/src/misc/dbn_sgd.cpp
ongbe/dll
1f451eae2c020dbd8ac17e76e7befc5079210051
[ "MIT" ]
1
2020-02-19T13:13:09.000Z
2020-02-19T13:13:09.000Z
test/src/misc/dbn_sgd.cpp
ongbe/dll
1f451eae2c020dbd8ac17e76e7befc5079210051
[ "MIT" ]
null
null
null
test/src/misc/dbn_sgd.cpp
ongbe/dll
1f451eae2c020dbd8ac17e76e7befc5079210051
[ "MIT" ]
1
2020-02-27T03:42:24.000Z
2020-02-27T03:42:24.000Z
//======================================================================= // Copyright (c) 2014-2017 Baptiste Wicht // Distributed under the terms of the MIT License. // (See accompanying file LICENSE or copy at // http://opensource.org/licenses/MIT) //======================================================================= #include <deque> #include "dll_test.hpp" #include "dll/rbm/rbm.hpp" #include "dll/dbn.hpp" #include "mnist/mnist_reader.hpp" #include "mnist/mnist_utils.hpp" TEST_CASE("dbn/sgd/1", "[dbn][mnist][sgd]") { typedef dll::dbn_desc< dll::dbn_layers< dll::rbm_desc<28 * 28, 100, dll::momentum, dll::batch_size<25>, dll::init_weights>::layer_t, dll::rbm_desc<100, 200, dll::momentum, dll::batch_size<25>>::layer_t, dll::rbm_desc<200, 10, dll::momentum, dll::batch_size<25>, dll::hidden<dll::unit_type::SOFTMAX>>::layer_t>, dll::trainer<dll::sgd_trainer>, dll::batch_size<10>>::dbn_t dbn_t; auto dataset = mnist::read_dataset_direct<std::vector, etl::dyn_matrix<float, 1>>(500); REQUIRE(!dataset.training_images.empty()); mnist::binarize_dataset(dataset); auto dbn = std::make_unique<dbn_t>(); dbn->pretrain(dataset.training_images, 50); auto ft_error = dbn->fine_tune(dataset.training_images, dataset.training_labels, 100); std::cout << "ft_error:" << ft_error << std::endl; CHECK(ft_error < 5e-2); TEST_CHECK(0.2); } TEST_CASE("dbn/sgd/2", "[dbn][mnist][sgd]") { typedef dll::dbn_desc< dll::dbn_layers< dll::rbm_desc<28 * 28, 100, dll::momentum, dll::batch_size<25>, dll::init_weights>::layer_t, dll::rbm_desc<100, 200, dll::momentum, dll::batch_size<25>>::layer_t, dll::rbm_desc<200, 10, dll::momentum, dll::batch_size<25>, dll::hidden<dll::unit_type::SOFTMAX>>::layer_t>, dll::trainer<dll::sgd_trainer>, dll::no_epoch_error, dll::updater<dll::updater_type::MOMENTUM>, dll::batch_size<10>>::dbn_t dbn_t; auto dataset = mnist::read_dataset_direct<std::vector, etl::dyn_matrix<float, 1>>(1000); REQUIRE(!dataset.training_images.empty()); mnist::binarize_dataset(dataset); auto dbn = std::make_unique<dbn_t>(); dbn->pretrain(dataset.training_images, 20); auto error = dbn->fine_tune(dataset.training_images, dataset.training_labels, 100); REQUIRE(error < 5e-2); TEST_CHECK(0.2); } TEST_CASE("dbn/sgd/3", "[dbn][mnist][sgd][gaussian]") { typedef dll::dbn_desc< dll::dbn_layers< dll::rbm_desc<28 * 28, 200, dll::momentum, dll::batch_size<25>, dll::visible<dll::unit_type::GAUSSIAN>>::layer_t, dll::rbm_desc<200, 500, dll::momentum, dll::batch_size<25>>::layer_t, dll::rbm_desc<500, 10, dll::momentum, dll::batch_size<25>, dll::hidden<dll::unit_type::SOFTMAX>>::layer_t>, dll::trainer<dll::sgd_trainer>, dll::batch_size<10>>::dbn_t dbn_t; auto dataset = mnist::read_dataset_direct<std::vector, etl::dyn_matrix<float, 1>>(1000); REQUIRE(!dataset.training_images.empty()); mnist::normalize_dataset(dataset); auto dbn = std::make_unique<dbn_t>(); dbn->pretrain(dataset.training_images, 20); auto error = dbn->fine_tune(dataset.training_images, dataset.training_labels, 100); REQUIRE(error < 5e-2); TEST_CHECK(0.2); } //This test should not perform well, but should not fail //TODO This should be improved TEST_CASE("dbn/sgd/4", "[dbn][mnist][sgd][relu]") { typedef dll::dbn_desc< dll::dbn_layers< dll::rbm_desc<28 * 28, 100, dll::momentum, dll::batch_size<25>, dll::hidden<dll::unit_type::RELU>, dll::init_weights>::layer_t, dll::rbm_desc<100, 200, dll::momentum, dll::batch_size<25>>::layer_t, dll::rbm_desc<200, 10, dll::momentum, dll::batch_size<25>, dll::hidden<dll::unit_type::SOFTMAX>>::layer_t>, dll::trainer<dll::sgd_trainer>, dll::batch_size<10>>::dbn_t dbn_t; auto dataset = mnist::read_dataset_direct<std::vector, etl::dyn_matrix<float, 1>>(200); REQUIRE(!dataset.training_images.empty()); mnist::binarize_dataset(dataset); auto dbn = std::make_unique<dbn_t>(); dbn->pretrain(dataset.training_images, 20); auto error = dbn->fine_tune(dataset.training_images, dataset.training_labels, 100); REQUIRE(std::isfinite(error)); } TEST_CASE("dbn/sgd/5", "[dbn][mnist][sgd]") { typedef dll::dbn_desc< dll::dbn_layers< dll::rbm_desc<28 * 28, 100, dll::momentum, dll::batch_size<25>, dll::init_weights>::layer_t, dll::rbm_desc<100, 200, dll::momentum, dll::batch_size<25>>::layer_t, dll::rbm_desc<200, 10, dll::momentum, dll::batch_size<25>, dll::hidden<dll::unit_type::SOFTMAX>>::layer_t>, dll::trainer<dll::sgd_trainer>, dll::weight_decay<dll::decay_type::L2>, dll::batch_size<10>>::dbn_t dbn_t; auto dataset = mnist::read_dataset_direct<std::vector, etl::dyn_matrix<float, 1>>(200); REQUIRE(!dataset.training_images.empty()); mnist::binarize_dataset(dataset); auto dbn = std::make_unique<dbn_t>(); dbn->pretrain(dataset.training_images, 20); auto error = dbn->fine_tune(dataset.training_images, dataset.training_labels, 200); REQUIRE(error < 1e-1); } //Here to test large batch size TEST_CASE("dbn/sgd/6", "[dbn][mnist][sgd]") { typedef dll::dbn_desc< dll::dbn_layers< dll::rbm_desc<28 * 28, 100, dll::momentum, dll::batch_size<25>, dll::init_weights>::layer_t, dll::rbm_desc<100, 200, dll::momentum, dll::batch_size<25>>::layer_t, dll::rbm_desc<200, 10, dll::momentum, dll::batch_size<25>, dll::hidden<dll::unit_type::SOFTMAX>>::layer_t>, dll::trainer<dll::sgd_trainer>, dll::updater<dll::updater_type::MOMENTUM>, dll::weight_decay<dll::decay_type::L2>, dll::batch_size<100>>::dbn_t dbn_t; auto dataset = mnist::read_dataset_direct<std::vector, etl::dyn_matrix<float, 1>>(300); REQUIRE(!dataset.training_images.empty()); mnist::binarize_dataset(dataset); auto dbn = std::make_unique<dbn_t>(); dbn->pretrain(dataset.training_images, 10); auto error = dbn->fine_tune(dataset.training_images, dataset.training_labels, 100); REQUIRE(error < 1e-1); } TEST_CASE("dbn/sgd/7", "[dbn][mnist][sgd][memory]") { typedef dll::dbn_desc< dll::dbn_layers< dll::rbm_desc<28 * 28, 100, dll::momentum, dll::batch_size<25>, dll::init_weights>::layer_t, dll::rbm_desc<100, 200, dll::momentum, dll::batch_size<25>>::layer_t, dll::rbm_desc<200, 10, dll::momentum, dll::batch_size<25>, dll::hidden<dll::unit_type::SOFTMAX>>::layer_t>, dll::trainer<dll::sgd_trainer>, dll::batch_mode, dll::batch_size<10>>::dbn_t dbn_t; auto dataset = mnist::read_dataset_direct<std::vector, etl::dyn_matrix<float, 1>>(500); REQUIRE(!dataset.training_images.empty()); mnist::binarize_dataset(dataset); auto dbn = std::make_unique<dbn_t>(); dbn->pretrain(dataset.training_images, 20); auto error = dbn->fine_tune(dataset.training_images, dataset.training_labels, 100); REQUIRE(error < 5e-2); TEST_CHECK(0.2); }
40.350282
158
0.651638
[ "vector" ]
81459c92fc25ae17d51501f563eee923a9d13042
17,422
cc
C++
src/frontend/builder.cc
hcho3/treelite
d73bed79a7fff7cc870881f59ebc45b13798d6fb
[ "Apache-2.0" ]
1
2020-12-28T06:19:45.000Z
2020-12-28T06:19:45.000Z
src/frontend/builder.cc
hcho3/treelite
d73bed79a7fff7cc870881f59ebc45b13798d6fb
[ "Apache-2.0" ]
1
2022-02-04T20:14:45.000Z
2022-02-04T20:14:45.000Z
src/frontend/builder.cc
hcho3/treelite
d73bed79a7fff7cc870881f59ebc45b13798d6fb
[ "Apache-2.0" ]
null
null
null
/*! * Copyright 2017 by Contributors * \file builder.cc * \brief model builder frontend * \author Philip Cho */ #include <treelite/frontend.h> #include <treelite/tree.h> #include <dmlc/registry.h> #include <memory> #include <queue> #include "../c_api/c_api_error.h" #define CHECK_EARLY_RETURN(x, msg) \ if (!(x)) { \ TreeliteAPISetLastError(msg); \ dmlc::LogMessage(__FILE__, __LINE__).stream() << msg; \ return false; \ } /* data structures with underscore prefixes are internal use only and do not have external linkage */ namespace { struct _Node { enum class _Status : int8_t { kEmpty, kNumericalTest, kCategoricalTest, kLeaf }; union _Info { treelite::tl_float leaf_value; // for leaf nodes treelite::tl_float threshold; // for non-leaf nodes }; /* * leaf vector: only used for random forests with multi-class classification */ std::vector<treelite::tl_float> leaf_vector; _Status status; /* pointers to parent, left and right children */ _Node* parent; _Node* left_child; _Node* right_child; // split feature index unsigned feature_id; // default direction for missing values bool default_left; // extra info: leaf value or threshold _Info info; // (for numerical split) // operator to use for expression of form [fval] OP [threshold] // If the expression evaluates to true, take the left child; // otherwise, take the right child. treelite::Operator op; // (for categorical split) // list of all categories that belong to the left child node. // All others not in the list belong to the right child node. // Categories are integers ranging from 0 to (n-1), where n is the number of // categories in that particular feature. Let's assume n <= 64. std::vector<uint32_t> left_categories; inline _Node() : status(_Status::kEmpty), parent(nullptr), left_child(nullptr), right_child(nullptr) {} }; struct _Tree { _Node* root; std::unordered_map<int, std::shared_ptr<_Node>> nodes; inline _Tree() : root(nullptr), nodes() {} }; } // anonymous namespace namespace treelite { namespace frontend { DMLC_REGISTRY_FILE_TAG(builder); struct TreeBuilderImpl { _Tree tree; inline TreeBuilderImpl() : tree() {} }; struct ModelBuilderImpl { std::vector<TreeBuilder> trees; int num_feature; int num_output_group; bool random_forest_flag; std::vector<std::pair<std::string, std::string>> cfg; inline ModelBuilderImpl(int num_feature, int num_output_group, bool random_forest_flag) : trees(), num_feature(num_feature), num_output_group(num_output_group), random_forest_flag(random_forest_flag), cfg() { CHECK_GT(num_feature, 0) << "ModelBuilder: num_feature must be positive"; CHECK_GT(num_output_group, 0) << "ModelBuilder: num_output_group must be positive"; } }; TreeBuilder::TreeBuilder() : pimpl(common::make_unique<TreeBuilderImpl>()), ensemble_id(nullptr) {} TreeBuilder::~TreeBuilder() {} bool TreeBuilder::CreateNode(int node_key) { auto& nodes = pimpl->tree.nodes; CHECK_EARLY_RETURN(nodes.count(node_key) == 0, "CreateNode: nodes with duplicate keys are not allowed"); nodes[node_key] = common::make_unique<_Node>(); return true; } bool TreeBuilder::DeleteNode(int node_key) { auto& tree = pimpl->tree; auto& nodes = tree.nodes; CHECK_EARLY_RETURN(nodes.count(node_key) > 0, "DeleteNode: no node found with node_key"); _Node* node = nodes[node_key].get(); if (tree.root == node) { // deleting root tree.root = nullptr; } if (node->left_child != nullptr) { // deleting a parent node->left_child->parent = nullptr; } if (node->right_child != nullptr) { // deleting a parent node->right_child->parent = nullptr; } nodes.erase(node_key); return true; } bool TreeBuilder::SetRootNode(int node_key) { auto& tree = pimpl->tree; auto& nodes = tree.nodes; CHECK_EARLY_RETURN(nodes.count(node_key) > 0, "SetRootNode: no node found with node_key"); _Node* node = nodes[node_key].get(); CHECK_EARLY_RETURN(node->status != _Node::_Status::kLeaf, "SetRootNode: cannot set a leaf node as root"); CHECK_EARLY_RETURN(node->parent == nullptr, "SetRootNode: a root node cannot have a parent"); tree.root = node; return true; } bool TreeBuilder::SetNumericalTestNode(int node_key, unsigned feature_id, Operator op, tl_float threshold, bool default_left, int left_child_key, int right_child_key) { auto& tree = pimpl->tree; auto& nodes = tree.nodes; CHECK_EARLY_RETURN(nodes.count(node_key) > 0, "SetNumericalTestNode: no node found with node_key"); CHECK_EARLY_RETURN(nodes.count(left_child_key) > 0, "SetNumericalTestNode: no node found with left_child_key"); CHECK_EARLY_RETURN(nodes.count(right_child_key) > 0, "SetNumericalTestNode: no node found with right_child_key"); _Node* node = nodes[node_key].get(); _Node* left_child = nodes[left_child_key].get(); _Node* right_child = nodes[right_child_key].get(); CHECK_EARLY_RETURN(node->status == _Node::_Status::kEmpty, "SetNumericalTestNode: cannot modify a non-empty node"); CHECK_EARLY_RETURN(left_child->parent == nullptr, "SetNumericalTestNode: node designated as left child already has " "a parent"); CHECK_EARLY_RETURN(right_child->parent == nullptr, "SetNumericalTestNode: node designated as right child already has " "a parent"); CHECK_EARLY_RETURN(left_child != tree.root && right_child != tree.root, "SetNumericalTestNode: the root node cannot be a child"); node->status = _Node::_Status::kNumericalTest; node->left_child = nodes[left_child_key].get(); node->left_child->parent = node; node->right_child = nodes[right_child_key].get(); node->right_child->parent = node; node->feature_id = feature_id; node->default_left = default_left; node->info.threshold = threshold; node->op = op; return true; } bool TreeBuilder::SetCategoricalTestNode(int node_key, unsigned feature_id, const std::vector<uint32_t>& left_categories, bool default_left, int left_child_key, int right_child_key) { auto& tree = pimpl->tree; auto& nodes = tree.nodes; CHECK_EARLY_RETURN(nodes.count(node_key) > 0, "SetCategoricalTestNode: no node found with node_key"); CHECK_EARLY_RETURN(nodes.count(left_child_key) > 0, "SetCategoricalTestNode: no node found with left_child_key"); CHECK_EARLY_RETURN(nodes.count(right_child_key) > 0, "SetCategoricalTestNode: no node found with right_child_key"); _Node* node = nodes[node_key].get(); _Node* left_child = nodes[left_child_key].get(); _Node* right_child = nodes[right_child_key].get(); CHECK_EARLY_RETURN(node->status == _Node::_Status::kEmpty, "SetCategoricalTestNode: cannot modify a non-empty node"); CHECK_EARLY_RETURN(left_child->parent == nullptr, "SetCategoricalTestNode: node designated as left child already " "has a parent"); CHECK_EARLY_RETURN(right_child->parent == nullptr, "SetCategoricalTestNode: node designated as right child already " "has a parent"); CHECK_EARLY_RETURN(left_child != tree.root && right_child != tree.root, "SetCategoricalTestNode: the root node cannot be a child"); node->status = _Node::_Status::kCategoricalTest; node->left_child = nodes[left_child_key].get(); node->left_child->parent = node; node->right_child = nodes[right_child_key].get(); node->right_child->parent = node; node->feature_id = feature_id; node->default_left = default_left; node->left_categories = left_categories; return true; } bool TreeBuilder::SetLeafNode(int node_key, tl_float leaf_value) { auto& tree = pimpl->tree; auto& nodes = tree.nodes; CHECK_EARLY_RETURN(nodes.count(node_key) > 0, "SetLeafNode: no node found with node_key"); _Node* node = nodes[node_key].get(); CHECK_EARLY_RETURN(node->status == _Node::_Status::kEmpty, "SetLeafNode: cannot modify a non-empty node"); node->status = _Node::_Status::kLeaf; node->info.leaf_value = leaf_value; return true; } bool TreeBuilder::SetLeafVectorNode(int node_key, const std::vector<tl_float>& leaf_vector) { auto& tree = pimpl->tree; auto& nodes = tree.nodes; CHECK_EARLY_RETURN(nodes.count(node_key) > 0, "SetLeafVectorNode: no node found with node_key"); _Node* node = nodes[node_key].get(); CHECK_EARLY_RETURN(node->status == _Node::_Status::kEmpty, "SetLeafVectorNode: cannot modify a non-empty node"); node->status = _Node::_Status::kLeaf; node->leaf_vector = leaf_vector; return true; } ModelBuilder::ModelBuilder(int num_feature, int num_output_group, bool random_forest_flag) : pimpl(common::make_unique<ModelBuilderImpl>(num_feature, num_output_group, random_forest_flag)) {} ModelBuilder::~ModelBuilder() {} void ModelBuilder::SetModelParam(const char* name, const char* value) { pimpl->cfg.emplace_back(name, value); } int ModelBuilder::InsertTree(TreeBuilder* tree_builder, int index) { if (tree_builder == nullptr) { const char* msg = "InsertTree: not a valid tree builder"; LOG(INFO) << msg; TreeliteAPISetLastError(msg); return -1; // fail } if (tree_builder->ensemble_id != nullptr) { const char* msg = "InsertTree: tree is already part of another ensemble"; LOG(INFO) << msg; TreeliteAPISetLastError(msg); return -1; // fail } // check bounds for feature indices for (const auto& kv : tree_builder->pimpl->tree.nodes) { const _Node::_Status status = kv.second->status; if (status == _Node::_Status::kNumericalTest || status == _Node::_Status::kCategoricalTest) { const int fid = static_cast<int>(kv.second->feature_id); if (fid < 0 || fid >= pimpl->num_feature) { std::ostringstream oss; oss << "InsertTree: tree has an invalid split at node " << kv.first << ": feature id " << kv.second->feature_id << " is out of bound"; const std::string str = oss.str(); const char* msg = str.c_str(); LOG(INFO) << msg; TreeliteAPISetLastError(msg); return -1; // fail } } } // perform insertion auto& trees = pimpl->trees; if (index == -1) { trees.push_back(std::move(*tree_builder)); tree_builder->ensemble_id = static_cast<void*>(this); return static_cast<int>(trees.size()); } else { if (static_cast<size_t>(index) <= trees.size()) { trees.insert(trees.begin() + index, std::move(*tree_builder)); tree_builder->ensemble_id = static_cast<void*>(this); return index; } else { LOG(INFO) << "CreateTree: index out of bound"; return -1; // fail } } } TreeBuilder& ModelBuilder::GetTree(int index) { return pimpl->trees[index]; } const TreeBuilder& ModelBuilder::GetTree(int index) const { return pimpl->trees[index]; } bool ModelBuilder::DeleteTree(int index) { auto& trees = pimpl->trees; CHECK_EARLY_RETURN(static_cast<size_t>(index) < trees.size(), "DeleteTree: index out of bound"); trees.erase(trees.begin() + index); return true; } bool ModelBuilder::CommitModel(Model* out_model) { Model model; model.num_feature = pimpl->num_feature; model.num_output_group = pimpl->num_output_group; model.random_forest_flag = pimpl->random_forest_flag; // extra parameters InitParamAndCheck(&model.param, pimpl->cfg); // flag to check consistent use of leaf vector // 0: no leaf should use leaf vector // 1: every leaf should use leaf vector // -1: indeterminate int8_t flag_leaf_vector = -1; for (const auto& _tree_builder : pimpl->trees) { const auto& _tree = _tree_builder.pimpl->tree; CHECK_EARLY_RETURN(_tree.root != nullptr, "CommitModel: a tree has no root node"); model.trees.emplace_back(); Tree& tree = model.trees.back(); tree.Init(); // assign node ID's so that a breadth-wise traversal would yield // the monotonic sequence 0, 1, 2, ... std::queue<std::pair<const _Node*, int>> Q; // (internal pointer, ID) Q.push({_tree.root, 0}); // assign 0 to root while (!Q.empty()) { const _Node* node; int nid; std::tie(node, nid) = Q.front(); Q.pop(); CHECK_EARLY_RETURN(node->status != _Node::_Status::kEmpty, "CommitModel: encountered an empty node in the middle of a tree"); if (node->status == _Node::_Status::kNumericalTest) { CHECK_EARLY_RETURN(node->left_child != nullptr, "CommitModel: a test node lacks a left child"); CHECK_EARLY_RETURN(node->right_child != nullptr, "CommitModel: a test node lacks a right child"); CHECK_EARLY_RETURN(node->left_child->parent == node, "CommitModel: left child has wrong parent"); CHECK_EARLY_RETURN(node->right_child->parent == node, "CommitModel: right child has wrong parent"); tree.AddChilds(nid); tree[nid].set_numerical_split(node->feature_id, node->info.threshold, node->default_left, node->op); Q.push({node->left_child, tree[nid].cleft()}); Q.push({node->right_child, tree[nid].cright()}); } else if (node->status == _Node::_Status::kCategoricalTest) { CHECK_EARLY_RETURN(node->left_child != nullptr, "CommitModel: a test node lacks a left child"); CHECK_EARLY_RETURN(node->right_child != nullptr, "CommitModel: a test node lacks a right child"); CHECK_EARLY_RETURN(node->left_child->parent == node, "CommitModel: left child has wrong parent"); CHECK_EARLY_RETURN(node->right_child->parent == node, "CommitModel: right child has wrong parent"); tree.AddChilds(nid); tree[nid].set_categorical_split(node->feature_id, node->default_left, node->left_categories); Q.push({node->left_child, tree[nid].cleft()}); Q.push({node->right_child, tree[nid].cright()}); } else { // leaf node CHECK_EARLY_RETURN(node->left_child == nullptr && node->right_child == nullptr, "CommitModel: a leaf node cannot have children"); if (!node->leaf_vector.empty()) { // leaf vector exists CHECK_EARLY_RETURN(flag_leaf_vector != 0, "CommitModel: Inconsistent use of leaf vector: " "if one leaf node uses a leaf vector, " "*every* leaf node must use a leaf vector"); flag_leaf_vector = 1; // now every leaf must use leaf vector CHECK_EARLY_RETURN(node->leaf_vector.size() == model.num_output_group, "CommitModel: The length of leaf vector must be " "identical to the number of output groups"); tree[nid].set_leaf_vector(node->leaf_vector); } else { // ordinary leaf CHECK_EARLY_RETURN(flag_leaf_vector != 1, "CommitModel: Inconsistent use of leaf vector: " "if one leaf node does not use a leaf vector, " "*no other* leaf node can use a leaf vector"); flag_leaf_vector = 0; // now no leaf can use leaf vector tree[nid].set_leaf(node->info.leaf_value); } } } } if (flag_leaf_vector == 0) { if (model.num_output_group > 1) { // multiclass classification with gradient boosted trees CHECK_EARLY_RETURN(!model.random_forest_flag, "To use a random forest for multi-class classification, each leaf " "node must output a leaf vector specifying a probability " "distribution"); CHECK_EARLY_RETURN(pimpl->trees.size() % model.num_output_group == 0, "For multi-class classifiers with gradient boosted trees, the number " "of trees must be evenly divisible by the number of output groups"); } } else if (flag_leaf_vector == 1) { // multiclass classification with a random forest CHECK_EARLY_RETURN(model.random_forest_flag, "In multi-class classifiers with gradient boosted trees, each leaf " "node must output a single floating-point value."); } else { LOG(FATAL) << "Impossible thing happened: model has no leaf node!"; } *out_model = std::move(model); return true; } } // namespace frontend } // namespace treelite
38.715556
81
0.63328
[ "vector", "model" ]
8145d833692785119f1b14295ee5517d2eb0ef31
14,146
cpp
C++
scopeprotocols/EyeDecoder2.cpp
smunaut/scopehal
77af237e29f73da7b7b86fd21166bc1a221a1404
[ "BSD-3-Clause" ]
null
null
null
scopeprotocols/EyeDecoder2.cpp
smunaut/scopehal
77af237e29f73da7b7b86fd21166bc1a221a1404
[ "BSD-3-Clause" ]
null
null
null
scopeprotocols/EyeDecoder2.cpp
smunaut/scopehal
77af237e29f73da7b7b86fd21166bc1a221a1404
[ "BSD-3-Clause" ]
null
null
null
/*********************************************************************************************************************** * * * ANTIKERNEL v0.1 * * * * Copyright (c) 2012-2019 Andrew D. Zonenberg * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the * * following conditions are met: * * * * * Redistributions of source code must retain the above copyright notice, this list of conditions, and the * * following disclaimer. * * * * * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the * * following disclaimer in the documentation and/or other materials provided with the distribution. * * * * * Neither the name of the author nor the names of any contributors may be used to endorse or promote products * * derived from this software without specific prior written permission. * * * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * * THE AUTHORS BE HELD LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * * POSSIBILITY OF SUCH DAMAGE. * * * ***********************************************************************************************************************/ #include "../scopehal/scopehal.h" #include "EyeDecoder2.h" #include <algorithm> using namespace std; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Construction / destruction EyeCapture2::EyeCapture2(size_t width, size_t height) : m_width(width) , m_height(height) { size_t npix = width*height; m_accumdata = new int64_t[npix]; m_outdata = new float[npix]; for(size_t i=0; i<npix; i++) { m_outdata[i] = 0; m_accumdata[i] = 0; } } EyeCapture2::~EyeCapture2() { delete[] m_accumdata; m_accumdata = NULL; delete[] m_outdata; m_outdata = NULL; } size_t EyeCapture2::GetDepth() const { return 0; } int64_t EyeCapture2::GetEndTime() const { return 0; } int64_t EyeCapture2::GetSampleStart(size_t /*i*/) const { return 0; } int64_t EyeCapture2::GetSampleLen(size_t /*i*/) const { return 0; } bool EyeCapture2::EqualityTest(size_t /*i*/, size_t /*j*/) const { return false; } bool EyeCapture2::SamplesAdjacent(size_t /*i*/, size_t /*j*/) const { return false; } void EyeCapture2::Normalize() { //Normalize it size_t len = m_width * m_height; int64_t nmax = 0; for(size_t i=0; i<len; i++) nmax = max(m_accumdata[i], nmax); if(nmax == 0) nmax = 1; float norm = 2.0f / nmax; for(size_t i=0; i<len; i++) m_outdata[i] = m_accumdata[i] * norm; //Once the output is normalized, check for any rows with no bin hits due to roundoff and interpolate into them. for(size_t y=1; y+1 < m_height; y++) { bool empty = true; for(size_t x=0; x<m_width; x++) { if(m_accumdata[y*m_width + x]) { empty = false; break; } } if(empty) { for(size_t x=0; x<m_width; x++) { float out1 = m_outdata[(y-1)*m_width + x]; float out2 = m_outdata[(y+1)*m_width + x]; m_outdata[y*m_width + x] = (out1 + out2) / 2; } } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Construction / destruction EyeDecoder2::EyeDecoder2(string color) : ProtocolDecoder(OscilloscopeChannel::CHANNEL_TYPE_COMPLEX, color, CAT_ANALYSIS) { //Set up channels m_signalNames.push_back("din"); m_channels.push_back(NULL); m_signalNames.push_back("clk"); m_channels.push_back(NULL); m_uiWidth = 0; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Factory methods ChannelRenderer* EyeDecoder2::CreateRenderer() { return NULL; } bool EyeDecoder2::ValidateChannel(size_t i, OscilloscopeChannel* channel) { if( (i == 0) && (channel->GetType() == OscilloscopeChannel::CHANNEL_TYPE_ANALOG) ) return true; if( (i == 1) && (channel->GetType() == OscilloscopeChannel::CHANNEL_TYPE_DIGITAL) ) return true; return false; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Accessors string EyeDecoder2::GetProtocolName() { return "Eye pattern"; } void EyeDecoder2::SetDefaultName() { char hwname[256]; snprintf(hwname, sizeof(hwname), "Eye(%s, %s)", m_channels[0]->m_displayname.c_str(), m_channels[1]->m_displayname.c_str()); m_hwname = hwname; m_displayname = m_hwname; } bool EyeDecoder2::IsOverlay() { return false; } bool EyeDecoder2::NeedsConfig() { return true; } double EyeDecoder2::GetVoltageRange() { return m_channels[0]->GetVoltageRange(); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Actual decoder logic /* bool EyeDecoder::DetectModulationLevels(AnalogCapture* din, EyeCapture* cap) { LogDebug("Detecting modulation levels\n"); LogIndenter li; //Find the min/max voltage of the signal (used to set default bounds for the render). //Additionally, generate a histogram of voltages. We need this to configure the trigger(s) correctly //and do measurements on the eye opening(s) - since MLT-3, PAM-x, etc have multiple openings. cap->m_minVoltage = 999; cap->m_maxVoltage = -999; map<int, int64_t> vhist; //1 mV bins for(size_t i=0; i<din->m_samples.size(); i++) { AnalogSample sin = din->m_samples[i]; float f = sin; vhist[f * 1000] ++; if(f > cap->m_maxVoltage) cap->m_maxVoltage = f; if(f < cap->m_minVoltage) cap->m_minVoltage = f; } LogDebug("Voltage range is %.3f to %.3f V\n", cap->m_minVoltage, cap->m_maxVoltage); //Crunch the histogram to find the number of signal levels in use. //We're looking for peaks of significant height (25% of maximum or more) and not too close to another peak. float dv = cap->m_maxVoltage - cap->m_minVoltage; int neighborhood = floor(dv * 50); //dV/20 converted to mV LogDebug("Looking for levels at least %d mV apart\n", neighborhood); int64_t maxpeak = 0; for(auto it : vhist) { if(it.second > maxpeak) maxpeak = it.second; } LogDebug("Highest histogram peak is %ld points\n", maxpeak); int64_t peakthresh = maxpeak/8; int64_t second_peak = 0; double second_weighted = 0; for(auto it : vhist) { int64_t count = it.second; //If we're pretty close to a taller peak (within neighborhood mV) then don't do anything int mv = it.first; bool bigger = false; for(int v=mv-neighborhood; v<=mv+neighborhood; v++) { auto jt = vhist.find(v); if(jt == vhist.end()) continue; if(jt->second > count) { bigger = true; continue; } } if(bigger) continue; //Search the neighborhood around us and do a weighted average to find the center of the bin int64_t weighted = 0; int64_t wcount = 0; for(int v=mv-neighborhood; v<=mv+neighborhood; v++) { auto jt = vhist.find(v); if(jt == vhist.end()) continue; int64_t c = jt->second; wcount += c; weighted += c*v; } if(count < peakthresh) { //Skip peaks that aren't tall enough... but still save the second highest if(count > second_peak) { second_peak = count; second_weighted = weighted * 1e-3f / wcount; } continue; } cap->m_signalLevels.push_back(weighted * 1e-3f / wcount); } //Special case: if the signal has only one level it might be NRZ with a really low duty cycle //Add the second highest peak in this case if(cap->m_signalLevels.size() == 1) cap->m_signalLevels.push_back(second_weighted); sort(cap->m_signalLevels.begin(), cap->m_signalLevels.end()); LogDebug(" Signal appears to be using %d-level modulation\n", (int)cap->m_signalLevels.size()); for(auto v : cap->m_signalLevels) LogDebug(" %6.3f V\n", v); //Now that signal levels are sorted, make sure they're spaced well. //If we have levels that are too close to each other, skip them for(size_t i=0; i<cap->m_signalLevels.size()-1; i++) { float delta = fabs(cap->m_signalLevels[i] - cap->m_signalLevels[i+1]); LogDebug("Delta at i=%zu is %.3f\n", i, delta); //TODO: fine tune this threshold adaptively based on overall signal amplitude? if(delta < 0.175) { LogIndenter li; LogDebug("Too small\n"); //Remove the innermost point (closer to zero) //This is us if we're positive, but the next one if negative! if(cap->m_signalLevels[i] < 0) cap->m_signalLevels.erase(cap->m_signalLevels.begin() + (i+1) ); else cap->m_signalLevels.erase(cap->m_signalLevels.begin() + i); } } //Figure out decision points (eye centers) //FIXME: This doesn't work well for PAM! Only MLT* for(size_t i=0; i<cap->m_signalLevels.size()-1; i++) { float vlo = cap->m_signalLevels[i]; float vhi = cap->m_signalLevels[i+1]; cap->m_decisionPoints.push_back(vlo + (vhi-vlo)/2); } //LogDebug(" Decision points:\n"); //for(auto v : cap->m_decisionPoints) // LogDebug(" %6.3f V\n", v); //Sanity check if(cap->m_signalLevels.size() < 2) { LogDebug("Couldn't find at least two distinct symbol voltages\n"); delete cap; return false; } return true; } */ void EyeDecoder2::Refresh() { static double total_time = 0; static double total_frames = 0; LogIndenter li; //Get the input data if( (m_channels[0] == NULL) || (m_channels[1] == NULL) ) { SetData(NULL); return; } auto waveform = dynamic_cast<AnalogCapture*>(m_channels[0]->GetData()); auto clock = dynamic_cast<DigitalCapture*>(m_channels[1]->GetData()); if( (waveform == NULL) || (clock == NULL) ) { SetData(NULL); return; } //Can't do much if we have no samples to work with if( (waveform->GetDepth() == 0) || (clock->GetDepth() == 0) ) { SetData(NULL); return; } double start = GetTime(); //Initialize the capture //TODO: timestamps? do we need those? EyeCapture2* cap = dynamic_cast<EyeCapture2*>(m_data); if(cap == NULL) cap = new EyeCapture2(m_width, m_height); cap->m_timescale = 1; int64_t* data = cap->GetAccumData(); //Process the eye size_t iclock = 0; double awidth = 0; int64_t nwidth = 0; float yscale = m_height / m_channels[0]->GetVoltageRange(); float fwidth = m_width / 2.0f; float ymid = m_height / 2; for(auto& samp : waveform->m_samples) { //Stop when we get to the end if(iclock + 1 >= clock->GetDepth()) break; //Look up time of the starting and ending clock edges int64_t tclock = clock->GetSampleStart(iclock) * clock->m_timescale; //int64_t tend = clock->GetSampleStart(iclock+1) * clock->m_timescale; int64_t twidth = clock->GetSampleLen(iclock); awidth += twidth; nwidth ++; //Find time of this sample int64_t tstart = samp.m_offset * waveform->m_timescale + waveform->m_triggerPhase; //If it's past the end of the current UI, increment the clock int64_t offset = tstart - tclock; if(offset < 0) continue; if(offset > twidth) { iclock ++; offset -= twidth; } //LogDebug("offset = %ld, twidth = %ld\n",offset, twidth); //Find (and sanity check) the Y coordinate size_t pixel_y = round( (samp.m_sample * yscale) + ymid ); if(pixel_y >= m_height) continue; int64_t* row = data + pixel_y*m_width; //Sampling clock is the middle of the UI, not the start. //Anything more than half a UI right of the clock is negative. int64_t halfwidth = twidth/2; if(offset > halfwidth) offset = -twidth + offset; if(offset < -halfwidth) continue; //Plot each point 3 times for center/left/right portions of the eye //Map -twidth to +twidth to 0...m_width int64_t xpos[] = {offset, offset + twidth, -twidth + offset }; float scale = fwidth / twidth; for(auto x : xpos) { size_t pixel_x = round((x + twidth) * scale); if(pixel_x < m_width) row[pixel_x] ++; } } m_uiWidth = round(awidth / nwidth); cap->Normalize(); SetData(cap); double dt = GetTime() - start; total_frames ++; total_time += dt; LogTrace("Refresh took %.3f ms (avg %.3f)\n", dt * 1000, (total_time * 1000) / total_frames); }
31.02193
120
0.564753
[ "render" ]
814638043465736b9ddcb0b0f158e7d3f551d393
889
cpp
C++
Algorithms/Mathematical-Algorithms/Number-Theory/linear_sieve_algorithm.cpp
tensorush/Computer-Scientists-Toolkit
f48aadf6387b935ac593f6a5513352c3bf562cb0
[ "MIT" ]
9
2021-07-11T19:53:36.000Z
2022-03-28T15:04:38.000Z
Algorithms/Mathematical-Algorithms/Number-Theory/linear_sieve_algorithm.cpp
geotrush/Computer-Scientists-Toolkit
f48aadf6387b935ac593f6a5513352c3bf562cb0
[ "MIT" ]
1
2022-01-18T09:49:36.000Z
2022-01-18T17:50:12.000Z
Algorithms/Mathematical-Algorithms/Number-Theory/linear_sieve_algorithm.cpp
geotrush/Computer-Scientists-Toolkit
f48aadf6387b935ac593f6a5513352c3bf562cb0
[ "MIT" ]
2
2021-11-15T08:02:25.000Z
2022-03-21T14:29:15.000Z
/* Linear Sieve Algorithm ---------------------- Time: O(n) Space: O(n) */ #include <iostream> #include <vector> auto LinearSieveAlgorithm(const unsigned& n) { unsigned numPrimes; std::vector<unsigned> primes, primeDivs(n + 1); for (unsigned i = 2; i <= n; ++i) { if (primeDivs[i] == 0) { primeDivs[i] = i; primes.emplace_back(i); } numPrimes = primes.size(); for (unsigned j = 0, x = i * 2; j < numPrimes && primes[j] <= primeDivs[i] && x <= n; ++j, x = i * primes[j]) { primeDivs[x] = primes[j]; } } return primeDivs; } int main() { unsigned n; std::cin >> n; auto smallestPrimeDivisors = LinearSieveAlgorithm(n); for (unsigned i = 2; i <= n; ++i) { std::cout << i << " | " << smallestPrimeDivisors[i] << std::endl; } return EXIT_SUCCESS; }
25.4
119
0.511811
[ "vector" ]
8147ee8803cf1f8d7b6667ea645277eb32d23b85
635
cpp
C++
HDU/1257/12867082_AC_46ms_1716kB.cpp
BakaErii/ACM_Collection
d368b15c7f1c84472424d5e61e5ebc667f589025
[ "WTFPL" ]
null
null
null
HDU/1257/12867082_AC_46ms_1716kB.cpp
BakaErii/ACM_Collection
d368b15c7f1c84472424d5e61e5ebc667f589025
[ "WTFPL" ]
null
null
null
HDU/1257/12867082_AC_46ms_1716kB.cpp
BakaErii/ACM_Collection
d368b15c7f1c84472424d5e61e5ebc667f589025
[ "WTFPL" ]
null
null
null
/** * @author Moe_Sakiya sakiya@tun.moe * @date 2018-01-23 13:10:41 * */ #include <iostream> #include <string> #include <algorithm> #include <set> #include <map> #include <vector> #include <stack> #include <queue> #include <cstdio> #include <cstring> #include <cstdlib> #include <cmath> using namespace std; int dp[30001], arr[30001]; int main(void) { int ans, n, i, j; while (cin >> n) { ans = 1; for (i = 0; i < n; i++) { cin >> arr[i]; dp[i] = 1; for (j = 0; j < i; j++) if (arr[j] < arr[i]) dp[i] = max(dp[i], dp[j] + 1); ans = max(ans, dp[i]); } cout << ans << endl; } return 0; }
15.875
40
0.548031
[ "vector" ]
8148f7595f0e3e7f75a82f03ded75e77377dbaf8
3,082
cc
C++
tensorflow_serving/util/any_ptr_test.cc
mzhang-code/serving
527c6f2173eba584ebdca4f8b11ae3c0550ab1a9
[ "Apache-2.0" ]
5,791
2016-02-16T17:50:06.000Z
2022-03-31T11:53:10.000Z
tensorflow_serving/util/any_ptr_test.cc
mzhang-code/serving
527c6f2173eba584ebdca4f8b11ae3c0550ab1a9
[ "Apache-2.0" ]
1,618
2016-02-16T18:04:00.000Z
2022-03-30T07:24:28.000Z
tensorflow_serving/util/any_ptr_test.cc
mzhang-code/serving
527c6f2173eba584ebdca4f8b11ae3c0550ab1a9
[ "Apache-2.0" ]
2,501
2016-02-16T19:57:43.000Z
2022-03-27T02:43:49.000Z
/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow_serving/util/any_ptr.h" #include <gtest/gtest.h> #include "absl/base/attributes.h" namespace tensorflow { namespace serving { namespace { TEST(AnyPtrTest, SetAndGet) { AnyPtr ptr; int object; // Implicitly settable/constructable from a raw pointer. ptr = &object; EXPECT_EQ(&object, ptr.get<int>()); EXPECT_EQ(nullptr, ptr.get<bool>()); // Implicitly settable/constructable from nullptr. ptr = nullptr; EXPECT_EQ(nullptr, ptr.get<int>()); EXPECT_EQ(nullptr, ptr.get<bool>()); } TEST(AnyPtrTest, ConstCorrect) { AnyPtr ptr; const int object = 0; ptr = &object; EXPECT_EQ(nullptr, ptr.get<int>()); EXPECT_EQ(&object, ptr.get<const int>()); } // Tests that a dynamic relationship between two classes doesn't cause any sort // of type-punning. TEST(AnyPtrTest, BaseClass) { class Base { public: virtual ~Base() {} private: int unused_base_var_ ABSL_ATTRIBUTE_UNUSED = 0; }; class Child : public Base { public: ~Child() override {} private: int unused_child_var_ ABSL_ATTRIBUTE_UNUSED = 0; }; AnyPtr ptr; Child c; ptr = &c; // Make sure casting to base returns null. This may work in some trivial // cases, but allowing down-casting in AnyPtr could break if, for example, // multiple inheretance is being used. EXPECT_EQ(nullptr, ptr.get<Base>()); // Getting the pointer as the child type should work. EXPECT_EQ(&c, ptr.get<Child>()); // Make sure accessing as base works if we store the pointer as the base // class. ptr = static_cast<Base*>(&c); EXPECT_EQ(&c, ptr.get<Base>()); EXPECT_EQ(nullptr, ptr.get<Child>()); } struct Destructable { ~Destructable() { *destroyed = true; } bool* const destroyed; }; TEST(UniqueAnyPtrTest, SetGetAndDestroy) { bool destroyed = false; UniqueAnyPtr ptr; // Move constructable. ptr = UniqueAnyPtr{std::unique_ptr<Destructable>{new Destructable{&destroyed}}}; EXPECT_EQ(&destroyed, ptr.get<Destructable>()->destroyed); EXPECT_EQ(nullptr, ptr.get<int>()); ASSERT_FALSE(destroyed); // Implicitly settable/constructable from nullptr. ptr = nullptr; EXPECT_TRUE(destroyed); } TEST(UniqueAnyPtrTest, MoveConstruction) { UniqueAnyPtr ptr1 = UniqueAnyPtr(std::unique_ptr<int>(new int(1))); UniqueAnyPtr ptr2(std::move(ptr1)); ASSERT_EQ(1, *ptr2.get<int>()); } } // namespace } // namespace serving } // namespace tensorflow
26.118644
80
0.692083
[ "object" ]
814c07b1ed3aa1ca29a1272279144f4934d3c626
4,134
cpp
C++
saber/lite/funcs/neon/saber_fc.cpp
vin-huang/Anakin
8fc4b82ebaf974a6e052fe3690e41d678de4aa03
[ "Apache-2.0" ]
null
null
null
saber/lite/funcs/neon/saber_fc.cpp
vin-huang/Anakin
8fc4b82ebaf974a6e052fe3690e41d678de4aa03
[ "Apache-2.0" ]
null
null
null
saber/lite/funcs/neon/saber_fc.cpp
vin-huang/Anakin
8fc4b82ebaf974a6e052fe3690e41d678de4aa03
[ "Apache-2.0" ]
null
null
null
#include "saber/lite/funcs/saber_fc.h" #ifdef USE_ARM_PLACE #include "saber/lite/funcs/neon/impl/sgemv_arm.h" namespace anakin{ namespace saber{ namespace lite{ template <typename Dtype> void fill_bias_fc(Dtype* tensor, const Dtype* bias, const int num, const int channel); template <> void fill_bias_fc<float>(float* tensor, const float* bias, const int num, const int channel) { int cnt = channel >> 2; int remain = channel & 3; for (int j = 0; j < num; ++j) { const float* ptr_bias = bias; float* ptr_out = tensor + j * channel; if (cnt > 0) { asm( ".fill_bias_fc: \n" "vld1.32 {d0-d1}, [%[ptr_out]] @ load data\n" "vld1.32 {d2-d3}, [%[ptr_bias]]! @ load data\n" "vadd.f32 q2, q0, q1 @ add bias\n" "vst1.32 {d4-d5}, [%[ptr_out]]! @ store result\n" "subs %[cnt], #1 @ loop count -1\n" "bne .fill_bias_fc @ jump to main loop\n" :[ptr_out] "+r"(ptr_out), [ptr_bias] "+r"(ptr_bias), \ [cnt] "+r"(cnt) : :"q0", "q1", "q2" ); } for (; remain > 0; remain--) { *(ptr_out++) += *(ptr_bias++); } } } SaberFc::SaberFc(int axis, int num_output, bool flag_trans, bool flag_bias, \ const float *weights, const float *bias) { _axis = axis; _num_output = num_output; _flag_trans = flag_trans; _bias_term = flag_bias; _weights = weights; _bias = bias; } SaberStatus SaberFc::load_param(int axis, int num_output, bool flag_trans, bool flag_bias, \ const float *weights, const float *bias) { _axis = axis; _num_output = num_output; _flag_trans = flag_trans; _bias_term = flag_bias; _weights = weights; _bias = bias; return SaberSuccess; } SaberStatus SaberFc::compute_output_shape(const std::vector<Tensor<CPU, AK_FLOAT> *> &inputs, std::vector<Tensor<CPU, AK_FLOAT> *> &outputs) { Shape shape_out = inputs[0]->valid_shape(); int m = inputs[0]->count_valid(0, _axis); int k = inputs[0]->count_valid(_axis, inputs[0]->dims()); int n = _num_output; shape_out.resize(_axis + 1); shape_out[_axis] = n; return outputs[0]->set_shape(shape_out); } SaberStatus SaberFc::init(const std::vector<Tensor<CPU, AK_FLOAT> *> &inputs, \ std::vector<Tensor<CPU, AK_FLOAT> *> &outputs, Context &ctx) { _ctx = ctx; int threads = _ctx.get_act_ids().size(); _m = inputs[0]->count_valid(0, _axis); _k = inputs[0]->count_valid(_axis, inputs[0]->dims()); _n = _num_output; int l1_cache = Env::cur_env()._L1_cache; int l2_cache = Env::cur_env()._L2_cache; //! if L1 cache size is not provided, set to 31K l1_cache = l1_cache > 0? l1_cache : 31000; //! if L2 cache size is not provided, set to 2M l2_cache = l2_cache > 0? l2_cache : 2000000; printf("fc weights transpose: %s\n", _flag_trans? "true" : "false"); if (_m > 1 || _flag_trans) { _gemmer.init(l1_cache, l2_cache, _m, _n, _k, false, !_flag_trans, threads); } return SaberSuccess; } //template <typename Dtype> SaberStatus SaberFc::dispatch(\ const std::vector<Tensor<CPU, AK_FLOAT> *>& inputs, \ std::vector<Tensor<CPU, AK_FLOAT> *>& outputs) { const float* din = inputs[0]->data(); float* dout = outputs[0]->mutable_data(); const float* weights = _weights; const float* bias = nullptr; if (_bias_term) { bias = _bias; } if (_m > 1 || _flag_trans) { _gemmer(din, _k, weights, (_flag_trans? _n : _k), dout, _n, 1.f, 0.f, false); if (_bias_term) { fill_bias_fc(dout, bias, _m, _n); } } else { if (_bias_term) { sgemv_bias(false, _n, _k, weights, din, dout, bias); } else { sgemv(false, _n, _k, weights, din, dout); } } return SaberSuccess; } } //namespace lite } //namespace saber } //namespace anakin #endif
28.708333
94
0.570392
[ "shape", "vector" ]
814d2fc234b7508ba584d991446c181e359997fe
3,792
cpp
C++
ksn-2020-mencari-bola/communicator.cpp
ia-toki/ksn-2020
87cab7916383ae1e442ea991b512909c18036a3c
[ "CC-BY-4.0" ]
null
null
null
ksn-2020-mencari-bola/communicator.cpp
ia-toki/ksn-2020
87cab7916383ae1e442ea991b512909c18036a3c
[ "CC-BY-4.0" ]
null
null
null
ksn-2020-mencari-bola/communicator.cpp
ia-toki/ksn-2020
87cab7916383ae1e442ea991b512909c18036a3c
[ "CC-BY-4.0" ]
null
null
null
#include <cmath> #include <algorithm> #include <fstream> #include <iostream> #include <vector> // ******** Start of communicator utils ******** std::ifstream inp; void ac(std::string reason="") { std::cerr << "AC\n"; if (reason != "") { std::cerr << reason << '\n'; } exit(0); } void wa(std::string reason="") { std::cerr << "WA\n"; if (reason != "") { std::cerr << reason << '\n'; } exit(0); } void ok(double points, std::string reason="") { std::cerr << "OK\n"; std::cerr << points; if (reason != "") { std::cerr << " " << reason << '\n'; } else { std::cerr << '\n'; } exit(0); } void registerCommunicator(int argc, char* argv[]) { inp = std::ifstream(argv[1]); } template<class T> inline void readStream(std::istream &is, T &t) { if (!(is >> t)) wa(); } void eof(std::istream &is) { std::string dummy; if (is >> dummy) wa(); } // ******** End of communicator utils ******** const int kMaxN = 1024; const int kMaxSetQueried = 10; int N, K; std::vector<int> balls; int setsQueried; double totalCost; void init() { inp >> N >> K; balls.resize(K); for (int i = 0; i < K; ++i) { inp >> balls[i]; } std::sort(balls.begin(), balls.end()); setsQueried = 0; totalCost = 0; std::cout << N << " " << K << '\n'; fflush(stdout); } bool containBalls(const std::vector<int> &boxes) { for (int ball: balls) { if (std::binary_search(boxes.begin(), boxes.end(), ball)) return true; } return false; } void ask() { int numberOfSets; readStream(std::cin, numberOfSets); if (numberOfSets < 1 || numberOfSets > kMaxSetQueried) wa(); setsQueried += numberOfSets; if (setsQueried > N) wa(); std::vector<std::vector<int>> boxesSets; int minSize = N; for (int i = 0; i < numberOfSets; ++i) { int numberOfBoxes; readStream(std::cin, numberOfBoxes); if (numberOfBoxes < 1 || numberOfBoxes > N) wa(); minSize = (numberOfBoxes < minSize ? numberOfBoxes : minSize); std::vector<int> boxes(numberOfBoxes); for (int j = 0; j < numberOfBoxes; ++j) { readStream(std::cin, boxes[j]); if (boxes[j] < 1 || boxes[j] > N) wa(); } std::sort(boxes.begin(), boxes.end()); if (std::unique(boxes.begin(), boxes.end()) != boxes.end()) wa(); boxesSets.push_back(boxes); } totalCost += 1.0 / minSize; for (const std::vector<int> &boxes: boxesSets) { if (containBalls(boxes)) std::cout << "YA\n"; else std::cout << "TIDAK\n"; } fflush(stdout); } void guess() { std::vector<int> guessedBalls(K); for (int i = 0; i < K; ++i) { readStream(std::cin, guessedBalls[i]); } eof(std::cin); std::sort(guessedBalls.begin(), guessedBalls.end()); if (balls != guessedBalls) wa(); } void play() { while (true) { std::string instruction; readStream(std::cin, instruction); if (instruction.length() != 1) wa(); switch (instruction[0]) { case '?': ask(); break; case '!': guess(); return; default: wa(); } } } void finishWithScoring() { if (K == 1) { double judgeBest = (double) 2 / N; if (totalCost >= 1) { ok(std::max(0.0 , floor(10 - 10 * sqrt((totalCost - 1) / (N - 1))))); } else if (totalCost > judgeBest) { ok(floor(30 - 20 * pow(1 - judgeBest / totalCost, 0.8))); } else { ac(); } } else if (K == 2) { double judgeBest = (double) 4 / N; if (totalCost >= 1) { ok(std::max(0.0 , floor(20 - 20 * sqrt((totalCost - 1) / (N - 1))))); } else if (totalCost > judgeBest) { ok(floor(70 - 50 * pow(1 - judgeBest / totalCost, 0.8))); } else { ac(); } } exit(42); } int main(int argc, char* argv[]) { registerCommunicator(argc, argv); init(); play(); finishWithScoring(); }
21.423729
75
0.552215
[ "vector" ]
8157b07525e0df37822a31eb43838a7c5847db48
1,217
cpp
C++
SDLGameDev/SDLGameDev/src/GameObject.cpp
rem821/sdl2-opengl-gamedev
84cf4e3fbf92285ab2ab71dd8ba5b92341ea0f52
[ "MIT" ]
null
null
null
SDLGameDev/SDLGameDev/src/GameObject.cpp
rem821/sdl2-opengl-gamedev
84cf4e3fbf92285ab2ab71dd8ba5b92341ea0f52
[ "MIT" ]
null
null
null
SDLGameDev/SDLGameDev/src/GameObject.cpp
rem821/sdl2-opengl-gamedev
84cf4e3fbf92285ab2ab71dd8ba5b92341ea0f52
[ "MIT" ]
null
null
null
#include "GameObject.h" #include "TextureManager.h" #include "fmt/core.h" GameObject::GameObject(SDL_Renderer* renderer, const char* texturesheet, int xpos, int ypos, int width, int height) { objTexture = TextureManager::loadTexture(renderer, texturesheet); this->xpos = xpos; this->ypos = ypos; srcRect.h = height; srcRect.w = width; srcRect.x = 0; srcRect.y = 0; } GameObject::GameObject(SDL_Texture* texture, int xpos, int ypos, int width, int height) { objTexture = texture; this->xpos = xpos; this->ypos = ypos; srcRect.h = height; srcRect.w = width; srcRect.x = 0; srcRect.y = 0; } GameObject::~GameObject() { } int GameObject::getPosX() { return this->xpos; } int GameObject::getPosY() { return this->ypos; } void GameObject::move(int xpos, int ypos) { this->xpos = xpos; this->ypos = ypos; } void GameObject::update() { if(this->xpos > 1600) this->xpos = 1600; if(this->xpos < 0) this->xpos = 0; if(this->ypos > 1200) this->ypos = 1200; if(this->ypos < 0) this->ypos = 0; destRect.x = xpos; destRect.y = ypos; destRect.w = srcRect.w; destRect.h = srcRect.h; } void GameObject::render(SDL_Renderer* renderer) { SDL_RenderCopy(renderer, objTexture, &srcRect, &destRect); }
20.982759
117
0.681183
[ "render" ]
815cb77ceb343e51c3717112214e980cec27aeda
415
hh
C++
include/counter_array.hh
martinsk/etally_core
b1fc30ca79e93bea5a3efc63d01ef9dd67c9c674
[ "MIT" ]
1
2015-12-26T18:57:52.000Z
2015-12-26T18:57:52.000Z
include/counter_array.hh
martinsk/etally_core
b1fc30ca79e93bea5a3efc63d01ef9dd67c9c674
[ "MIT" ]
null
null
null
include/counter_array.hh
martinsk/etally_core
b1fc30ca79e93bea5a3efc63d01ef9dd67c9c674
[ "MIT" ]
null
null
null
#ifndef COUNTER_ARRAY_HH #define COUNTER_ARRAY_HH #include "types.hh" template<typename Ty> class counter_array{ private: std::vector<Ty> data; public: Ty& operator[](idx_t idx) { if( data.size() <= idx) { data.resize(idx+1); } return data[idx]; } const Ty operator[](idx_t idx) const { return data.at(idx); } size_t length() const { return data.size(); } }; #endif
14.821429
40
0.624096
[ "vector" ]
815ceeb668d13ac77aa80073196c929be8eb29fc
2,292
hpp
C++
include/VideoSubtitle.hpp
norishigefukushima/OpenCP
63090131ec975e834f85b04e84ec29b2893845b2
[ "BSD-3-Clause" ]
137
2015-03-27T07:11:19.000Z
2022-03-30T05:58:22.000Z
include/VideoSubtitle.hpp
Pandinosaurus/OpenCP
a5234ed531c610d7944fa14d42f7320442ea34a1
[ "BSD-3-Clause" ]
2
2016-05-18T06:33:16.000Z
2016-07-11T17:39:17.000Z
include/VideoSubtitle.hpp
Pandinosaurus/OpenCP
a5234ed531c610d7944fa14d42f7320442ea34a1
[ "BSD-3-Clause" ]
43
2015-02-20T15:34:25.000Z
2022-01-27T14:59:37.000Z
#pragma once #include "common.hpp" #include "Timer.hpp" namespace cp { class CP_EXPORT VideoSubtitle { private: cp::Timer tscript; double time_dissolve_start = 500.0; double time_dissolve_end = 1000.0; double time_dissolve = time_dissolve_end - time_dissolve_start; std::string font = "Segoe UI"; //string font = "Consolas"; int vspace = 20; cv::Mat title; cv::Mat show; std::vector<std::string> text; std::vector<int> fontSize; cv::Rect textROI = cv::Rect(0, 0, 0, 0); cv::Point textPoint = cv::Point(0, 0); int getAlpha(); cv::Rect getRectText(std::vector<std::string>& text, std::vector<int>& fontSize); void addVText(cv::Mat& image, std::vector<std::string>& text, cv::Point point, std::vector<int>& fontSize, cv::Scalar color); public: enum class POSITION { CENTER, TOP, BOTTOM }; VideoSubtitle(); void restart(); void setDisolveTime(const double start_msec, const double end_msec);//from 0-start 100%, t/(start-end end-start)*100% void setFontType(std::string font = "Segoe UI"); void setVSpace(const int vspace);//vertical space for multi-line text //single-line text case void setTitle(const cv::Size size, std::string text, int fontSize, const cv::Scalar textcolor, const cv::Scalar backgroundcolor = cv::Scalar::all(0), POSITION pos = POSITION::CENTER); //multi-line text case void setTitle(const cv::Size size, std::vector<std::string>& text, std::vector<int>& fontSize, const cv::Scalar textcolor, const cv::Scalar backgroundcolor = cv::Scalar::all(0), POSITION pos = POSITION::CENTER); //alpha blending title and image void showTitleDissolve(std::string wname, const cv::Mat& image); //overlay subscript void showScriptDissolve(std::string wname, const cv::Mat& image, const cv::Scalar textColor = cv::Scalar(255, 255, 255)); //setTitle and then imshow (multi-line) void showTitle(std::string wname, const cv::Size size, std::vector<std::string>& text, std::vector<int>& fontSize, const cv::Scalar textcolor, const cv::Scalar backgroundcolor = cv::Scalar::all(0)); //setTitle and then imshow (single-line) void showTitle(std::string wname, const cv::Size size, std::string text, const int fontSize, const cv::Scalar textcolor, const cv::Scalar backgroundcolor = cv::Scalar::all(0)); }; }
40.928571
213
0.708551
[ "vector" ]
815dd06e708cbb449014bf5705b2bfa658331db5
1,002
cc
C++
cpp/src/util/string_split.cc
parastoog/libaddressinput
4e0f792ff20d78c9fab7ab9d7533324f32efa6b3
[ "Apache-2.0" ]
2,151
2020-04-18T07:31:17.000Z
2022-03-31T08:39:18.000Z
cpp/src/util/string_split.cc
parastoog/libaddressinput
4e0f792ff20d78c9fab7ab9d7533324f32efa6b3
[ "Apache-2.0" ]
395
2020-04-18T08:22:18.000Z
2021-12-08T13:04:49.000Z
cpp/src/util/string_split.cc
parastoog/libaddressinput
4e0f792ff20d78c9fab7ab9d7533324f32efa6b3
[ "Apache-2.0" ]
338
2020-04-18T08:03:10.000Z
2022-03-29T12:33:22.000Z
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // The original source code is from: // http://src.chromium.org/viewvc/chrome/trunk/src/base/strings/string_split.cc?revision=216633 #include "string_split.h" #include <cassert> #include <cstddef> #include <string> #include <vector> namespace i18n { namespace addressinput { void SplitString(const std::string& str, char s, std::vector<std::string>* r) { assert(r != nullptr); r->clear(); size_t last = 0; size_t c = str.size(); for (size_t i = 0; i <= c; ++i) { if (i == c || str[i] == s) { std::string tmp(str, last, i - last); // Avoid converting an empty or all-whitespace source string into a vector // of one empty string. if (i != c || !r->empty() || !tmp.empty()) { r->push_back(tmp); } last = i + 1; } } } } // namespace addressinput } // namespace i18n
26.368421
95
0.630739
[ "vector" ]
815e85d14959ed11c5766283160889f28350f022
12,965
cpp
C++
tms_rc/tms_rc_katana/KNI_4.3.0/src/InvKin/KatanaKinematics6M180.cpp
robotpilot/ros_tms
3d6b6579e89aa9cb216cd3cb6157fabc553c18f1
[ "BSD-3-Clause" ]
54
2015-01-06T06:58:28.000Z
2021-05-02T07:49:37.000Z
tms_rc/tms_rc_katana/KNI_4.3.0/src/InvKin/KatanaKinematics6M180.cpp
robotpilot/ros_tms
3d6b6579e89aa9cb216cd3cb6157fabc553c18f1
[ "BSD-3-Clause" ]
114
2015-01-07T06:42:21.000Z
2022-02-12T05:54:04.000Z
tms_rc/tms_rc_katana/KNI_4.3.0/src/InvKin/KatanaKinematics6M180.cpp
robotpilot/ros_tms
3d6b6579e89aa9cb216cd3cb6157fabc553c18f1
[ "BSD-3-Clause" ]
24
2015-03-27T08:35:59.000Z
2020-06-08T13:05:31.000Z
/*************************************************************************** * Copyright (C) 2006 by Tiziano Mueller * * tiziano.mueller@neuronics.ch * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ #include "KNI_InvKin/KatanaKinematics6M180.h" #include "common/MathHelperFunctions.h" #include <algorithm> namespace KNI { const double KatanaKinematics6M180::_tolerance = 0.001; const int KatanaKinematics6M180::_nrOfPossibleSolutions = 8; void KatanaKinematics6M180::DK(coordinates &solution, encoders const &current_encoders) const { using namespace KNI_MHF; // numering the angles starting by 0-5 double factor; double R13, R23, R31, R32; angles current_angles(6); for (int z = 0; z < 6; ++z) { current_angles[z] = enc2rad(current_encoders[z], _parameters[z].angleOffset, _parameters[z].epc, _parameters[z].encOffset, _parameters[z].rotDir); } // needs refactoring: current_angles[1] = current_angles[1] - M_PI / 2.0; current_angles[2] = current_angles[2] - M_PI; current_angles[3] = M_PI - current_angles[3]; current_angles[4] = -current_angles[4]; coordinates pose(6); angles cx(current_angles.size()), sx(current_angles.size()); angles::iterator cx_iter, sx_iter; angles angle = current_angles; angle[2] = angle[1] + angle[2]; angle[3] = angle[2] + angle[3]; cx_iter = cx.begin(); sx_iter = sx.begin(); std::transform(angle.begin(), angle.end(), sx_iter, unary_precalc_sin< double >()); std::transform(angle.begin(), angle.end(), cx_iter, unary_precalc_cos< double >()); factor = (_length[0] * sx[1] + _length[1] * sx[2] + (_length[2] + _length[3]) * sx[3]); // x = px (compare homogenous transformation matrix) pose[0] = cx[0] * factor; // y = pz (compare homogenous transformation matrix) pose[1] = sx[0] * factor; // z = pz (compare homogenous transformation matrix) pose[2] = _length[0] * cx[1] + _length[1] * cx[2] + (_length[2] + _length[3]) * cx[3]; // phi = atan2(R13/-R23) (compare homogenous transformatio nmatrix) R13 = cx[0] * sx[3]; R23 = sx[0] * sx[3]; pose[3] = atan2(R13, -R23); // theta = acos(R33) (compare homogenous transformation matrix) pose[4] = acos(cx[3]); // psi = atan2(R31/R32) (compare homogenous transformation matrix) R31 = sx[3] * sx[4]; R32 = sx[3] * cx[4]; pose[5] = atan2(R31, R32); std::swap(solution, pose); } void KatanaKinematics6M180::init(metrics const &length, parameter_container const &parameters) { assert((length.size() == 4) && "You have to provide the metrics for exactly 4 links"); // we have 4 links assert((parameters.size() == 6) && "You have to provide exactly 5 motor parameters"); // 5 motors are used for IK calculations _setLength(length); _setParameters(parameters); } void KatanaKinematics6M180::IK_b1b2costh3_6M180(angles_calc &angle, const position &p) const { using namespace KNI_MHF; double d5 = _length[2] + _length[3]; angle.b1 = p.x * cos(angle.theta1) + p.y * sin(angle.theta1) - d5 * sin(angle.theta234); angle.b2 = p.z - d5 * cos(angle.theta234); angle.costh3 = -(pow2(angle.b1) + pow2(angle.b2) - pow2(_length[0]) - pow2(_length[1])) / (2.0 * _length[0] * _length[1]); } void KatanaKinematics6M180::thetacomp(angles_calc &angle, const position &p_m) const { using namespace KNI_MHF; angle.theta2 = -M_PI / 2.0 - (atan0(angle.b1, angle.b2) + atan0(_length[0] + _length[1] * cos(angle.theta3), _length[1] * sin(angle.theta3))); angle.theta4 = angle.theta234 - angle.theta2 - angle.theta3; if (!PositionTest6M180(angle, p_m)) { angle.theta2 = angle.theta2 + M_PI; angle.theta4 = angle.theta234 - angle.theta2 - angle.theta3; } } bool KatanaKinematics6M180::PositionTest6M180(const angles_calc &a, const position &p) const { using namespace KNI_MHF; double temp, xm2, ym2, zm2; temp = _length[0] * sin(a.theta2) + _length[1] * sin(a.theta2 + a.theta3) + (_length[2] + _length[3]) * sin(a.theta234); xm2 = cos(a.theta1) * temp; ym2 = sin(a.theta1) * temp; zm2 = _length[0] * cos(a.theta2) + _length[1] * cos(a.theta2 + a.theta3) + (_length[2] + _length[3]) * cos(a.theta234); if ((pow2(p.x - xm2) + pow2(p.y - ym2) + pow2(p.z - zm2)) >= _tolerance) return false; return true; } bool KatanaKinematics6M180::angledef(angles_calc &a) const { using namespace KNI_MHF; // constants here. needs refactoring: a.theta2 = anglereduce(a.theta2 + M_PI / 2.0); a.theta3 = anglereduce(a.theta3 + M_PI); a.theta4 = anglereduce(M_PI - a.theta4); a.theta5 = anglereduce(a.theta5); if (a.theta1 > _parameters[0].angleStop) { a.theta1 = a.theta1 - 2.0 * M_PI; } if (a.theta2 > M_PI) { a.theta2 = a.theta2 - 2.0 * M_PI; } if (a.theta5 < _parameters[4].angleOffset) { a.theta5 = a.theta5 + 2.0 * M_PI; } return AnglePositionTest(a); } bool KatanaKinematics6M180::AnglePositionTest(const angles_calc &a) const { if ((a.theta1 + 0.0087 < _parameters[0].angleOffset) || (a.theta1 > _parameters[0].angleStop)) return false; if ((a.theta2 - 0.0087 > _parameters[1].angleOffset) || (a.theta2 < _parameters[1].angleStop)) return false; if ((a.theta3 < _parameters[2].angleOffset) || (a.theta3 > _parameters[2].angleStop)) return false; if ((a.theta4 < _parameters[3].angleOffset) || (a.theta4 > _parameters[3].angleStop)) return false; if ((a.theta5 < _parameters[4].angleOffset) || (a.theta5 > _parameters[4].angleStop)) return false; return true; } void KatanaKinematics6M180::IK(encoders::iterator solution, coordinates const &pose, encoders const &current_encoders) const { using namespace KNI_MHF; // pose: Winkel Deg->Rad // Alle 8 Loeungen werden in einem Array angle, welches aus 8 Structs besteht, gespeichert: // 0-3 fr theta1_1 // 4-7 fr theta1_2 // Declarations position p_m; angles_container angle(_nrOfPossibleSolutions); double coeff1, coeff2, theta234; double costh5, sinth5, theta5[2]; double R11, R21, R31, R32; double phi, theta, psi; // Initialization p_m.x = pose[0]; p_m.y = pose[1]; p_m.z = pose[2]; // calculate theta1_1 and theta1_2 angle[0].theta1 = atan1(pose[0], pose[1]); if (angle[0].theta1 > M_PI) { angle[0].theta1 = angle[0].theta1 - M_PI; if (angle[0].theta1 > (179.91 / 180 * M_PI)) { angle[0].theta1 = angle[0].theta1 - M_PI; } } angle[4].theta1 = angle[0].theta1 + M_PI; theta = pose[4]; psi = pose[5]; phi = atan1(p_m.x, p_m.y) + M_PI / 2.0; theta234 = pose[4]; R11 = cos(phi) * cos(psi) - sin(phi) * cos(theta) * sin(psi); R21 = sin(phi) * cos(psi) + cos(phi) * cos(theta) * sin(psi); R31 = sin(theta) * sin(psi); R32 = sin(theta) * cos(psi); // calculate theta5 if (theta234 == 0) { // std::cout << "Warning: Singularity theta234=0 !" << std::endl; for (int i = 0; i < 2; ++i) { coeff1 = -sin(angle[i * 4].theta1); coeff2 = -cos(angle[i * 4].theta1); costh5 = coeff1 * R11 - coeff2 * R21; sinth5 = coeff1 * R21 + coeff2 * R11; theta5[i] = -findFirstEqualAngle(costh5, sinth5, _tolerance); } for (int i = 0; i < _nrOfPossibleSolutions; ++i) { if (i < 4) angle[i].theta5 = theta5[0]; else angle[i].theta5 = theta5[1]; } } else if (theta234 == M_PI) { // std::cout << "Warning: Singularity theta234=PI !" << std::endl; for (int i = 0; i < 2; ++i) { coeff1 = -sin(angle[i * 4].theta1); coeff2 = cos(angle[i * 4].theta1); costh5 = coeff1 * R11 + coeff2 * R21; sinth5 = -coeff1 * R21 + coeff2 * R11; theta5[i] = -findFirstEqualAngle(costh5, sinth5, _tolerance); } for (int i = 0; i < _nrOfPossibleSolutions; ++i) { if (i < 4) angle[i].theta5 = theta5[0]; else angle[i].theta5 = theta5[1]; } } else { theta5[0] = -atan2(R31 / sin(theta234), R32 / sin(theta234)); theta5[1] = -atan2(R31 / sin(-theta234), R32 / sin(-theta234)); for (int i = 0; i < _nrOfPossibleSolutions; ++i) { if (i % 4 == 0 || i % 4 == 1) angle[i].theta5 = theta5[0]; else angle[i].theta5 = theta5[1]; } } //====THETA1_1================== //-------THETA234_1------------- angle[0].theta234 = pose[4]; // angle[0].theta5 = pose[5]; IK_b1b2costh3_6M180(angle[0], p_m); angle[1] = angle[0]; angle[0].theta3 = acos(angle[0].costh3) - M_PI; thetacomp(angle[0], p_m); angle[1].theta3 = -acos(angle[1].costh3) + M_PI; thetacomp(angle[1], p_m); //-------THETA234_2------------- angle[2].theta1 = angle[0].theta1; angle[2].theta234 = -angle[0].theta234; // angle[2].theta5 = angle[0].theta5; IK_b1b2costh3_6M180(angle[2], p_m); angle[3] = angle[2]; angle[2].theta3 = acos(angle[2].costh3) - M_PI; thetacomp(angle[2], p_m); angle[3].theta3 = -acos(angle[3].costh3) + M_PI; thetacomp(angle[3], p_m); //====THETA1_2================== //-------THETA234_1------------- angle[4].theta234 = pose[4]; // angle[4].theta5 = pose[5]; IK_b1b2costh3_6M180(angle[4], p_m); angle[5] = angle[4]; angle[4].theta3 = acos(angle[4].costh3) - M_PI; thetacomp(angle[4], p_m); angle[5].theta3 = -acos(angle[5].costh3) + M_PI; thetacomp(angle[5], p_m); //-------THETA234_2------------- angle[6].theta1 = angle[4].theta1; angle[6].theta234 = -angle[4].theta234; // angle[6].theta5 = angle[4].theta5; IK_b1b2costh3_6M180(angle[6], p_m); angle[7] = angle[6]; angle[6].theta3 = acos(angle[6].costh3) - M_PI; thetacomp(angle[6], p_m); angle[7].theta3 = -acos(angle[7].costh3) + M_PI; thetacomp(angle[7], p_m); for (std::vector< angles_calc >::iterator iter = angle.begin(); iter != angle.end(); /* do iter forward in body */) { if (pow2(iter->costh3) <= 1.0) { if (!angledef(*iter)) iter = angle.erase(iter); else ++iter; continue; } iter = angle.erase(iter); } if (angle.size() == 0) { throw NoSolutionException(); } std::vector< std::vector< int > > PossibleTargetsInEncoders; for (std::vector< angles_calc >::iterator i = angle.begin(); i != angle.end(); ++i) { std::vector< int > solution(5); solution[0] = rad2enc(i->theta1, _parameters[0].angleOffset, _parameters[0].epc, _parameters[0].encOffset, _parameters[0].rotDir); solution[1] = rad2enc(i->theta2, _parameters[1].angleOffset, _parameters[1].epc, _parameters[1].encOffset, _parameters[1].rotDir); solution[2] = rad2enc(i->theta3, _parameters[2].angleOffset, _parameters[2].epc, _parameters[2].encOffset, _parameters[2].rotDir); solution[3] = rad2enc(i->theta4, _parameters[3].angleOffset, _parameters[3].epc, _parameters[3].encOffset, _parameters[3].rotDir); solution[4] = rad2enc(i->theta5, _parameters[4].angleOffset, _parameters[4].epc, _parameters[4].encOffset, _parameters[4].rotDir); PossibleTargetsInEncoders.push_back(solution); } std::vector< std::vector< int > >::const_iterator sol = KinematicsDefaultEncMinAlgorithm()(PossibleTargetsInEncoders.begin(), PossibleTargetsInEncoders.end(), current_encoders.begin(), current_encoders.end()); assert(sol != PossibleTargetsInEncoders.end() && "All solutions are out of range"); encoders::iterator gripper_encoder_iter = std::copy((*sol).begin(), (*sol).end(), solution); *gripper_encoder_iter = current_encoders[5]; // copy gripper-encoders from current } } // NAMESPACE: KNI
33.675325
119
0.593984
[ "vector", "transform" ]
816089c3c452f06ffea63c5603f2b7afe263947a
873
cpp
C++
BashuOJ-Code/2581.cpp
magicgh/algorithm-contest-code
c21a90b11f73535c61e6363a4305b74cff24a85b
[ "MIT" ]
null
null
null
BashuOJ-Code/2581.cpp
magicgh/algorithm-contest-code
c21a90b11f73535c61e6363a4305b74cff24a85b
[ "MIT" ]
null
null
null
BashuOJ-Code/2581.cpp
magicgh/algorithm-contest-code
c21a90b11f73535c61e6363a4305b74cff24a85b
[ "MIT" ]
null
null
null
#include<iostream> #include<cstdio> #include<cstring> #include<cstdlib> #include<cmath> #include<iomanip> #include<algorithm> #include<queue> #include<stack> #include<vector> #define ri register int #define ll long long using namespace std; const int MAXN=100005; int n,a[MAXN]; ll Ans; inline const int getint() { int num=0,bj=1; char c=getchar(); while(!isdigit(c))bj=(c=='-'||bj==-1)?-1:1,c=getchar(); while(isdigit(c))num=num*10+c-'0',c=getchar(); return num*bj; } int main() { n=getint(); for(ri i=1;i<=n;i++)a[i]=getint(); for(ri i=2;i<n;i++) { if(a[i]>a[i-1]&&a[i]>a[i+1]) { Ans+=min(abs(a[i]-a[i-1]),abs(a[i+1]-a[i])); a[i]=max(a[i-1],a[i+1]); } if(a[i]<a[i-1]&&a[i]<a[i+1]) { Ans+=min(abs(a[i]-a[i-1]),abs(a[i+1]-a[i])); a[i]=min(a[i-1],a[i+1]); } } for(ri i=1;i<n;i++)Ans+=abs(a[i+1]-a[i]); printf("%lld\n",Ans); return 0; }
18.574468
56
0.578465
[ "vector" ]
8168514b78d9c2dd5a4a3ee973d60db0d3eb2491
5,911
cpp
C++
lj/main.cpp
ToruNiina/ljfluid
189a7ee6ffcb8c6445f8693b55dea6c096bd6e75
[ "MIT" ]
1
2020-12-15T14:05:13.000Z
2020-12-15T14:05:13.000Z
lj/main.cpp
ToruNiina/ljfluid
189a7ee6ffcb8c6445f8693b55dea6c096bd6e75
[ "MIT" ]
null
null
null
lj/main.cpp
ToruNiina/ljfluid
189a7ee6ffcb8c6445f8693b55dea6c096bd6e75
[ "MIT" ]
null
null
null
#include <lj/particle.hpp> #include <lj/boundary_condition.hpp> // #include <lj/verletlist.hpp> #include <lj/cell_list.hpp> #include <iterator> #include <fstream> #include <random> namespace lj { template<typename Real> constexpr static Real sgm = 1.0; template<typename Real> constexpr static Real eps = 1.0; template<typename Real> constexpr static Real r_c = sgm<Real> * 2.5; template<typename Real> constexpr static Real inv_r_c = 1.0 / r_c<Real>; template<typename Real> void calc_force(std::vector<particle<Real>>& ps, const periodic_boundary<Real>& pb, const cell_list<Real>& ls) // const verlet_list<Real>& ls) { for(std::size_t i=0; i<ps.size(); ++i) { const auto& pos1 = ps[i].position; // for(std::size_t j=i+1; j<ps.size(); ++j) for(auto j : ls.neighbors(i)) { const auto& pos2 = ps[j].position; const auto dpos = pb.adjust_direction(pos2 - pos1); const Real invr = 1. / length(dpos); if(invr < inv_r_c<Real>) {continue;} const Real sgmr = sgm<Real> * invr; const Real sr6 = std::pow(sgmr, 6); const auto f = dpos * (24 * eps<Real> * sr6 * (1 - 2 * sr6) * invr) * invr; ps[i].force += f; ps[j].force -= f; } } return; } template<typename Real> Real calc_kinetic_energy(const std::vector<particle<Real>>& ps) { Real E = 0.0; for(const auto& p : ps) { E += length_sq(p.velocity) * p.mass / 2; } return E; } template<typename Real> Real calc_potential_energy(const std::vector<particle<Real>>& ps, const periodic_boundary<Real>& pb, const cell_list<Real>& ls) // const verlet_list<Real>& ls) { Real E = 0.0; for(std::size_t i=0; i<ps.size(); ++i) { const auto& pos1 = ps[i].position; // for(std::size_t j=i+1; j<ps.size(); ++j) for(auto j : ls.neighbors(i)) { const auto& pos2 = ps[j].position; const Real invr = 1.0 / length(pb.adjust_direction(pos2 - pos1)); if(invr < inv_r_c<Real>) {continue;} const Real sgmr = sgm<Real> * invr; const Real sr6 = std::pow(sgmr, 6); E += 4 * eps<Real> * sr6 * (sr6 - 1); } } return E; } } // lj int main() { // typedef double Real; typedef float Real; const std::size_t log2N = 2; const std::size_t N = std::pow(2, log2N); const lj::vector<Real> upper{N*2.0, N*2.0, N*2.0}; const lj::vector<Real> lower{ 0.0, 0.0, 0.0}; const lj::periodic_boundary<Real> pb(lower, upper); const Real kB = 1.986231313e-3; const Real T = 300.0; const Real dt = 0.01; // lj::verlet_list<Real> ls(dt, lj::r_c<Real>, 0.25); lj::cell_list<Real> ls(dt, lj::r_c<Real>, 0.1, pb); std::vector<lj::particle<Real>> ps(N * N * N); { std::mt19937 mt(123456789); std::normal_distribution<Real> boltz(0.0, std::sqrt(kB * T)); for(std::size_t i=0; i<ps.size(); ++i) { ps[i].mass = 1.0; ps[i].position = lj::vector<Real>{ Real(1) + Real(2) * ((i & (N-1) << log2N * 0) >> log2N * 0), Real(1) + Real(2) * ((i & (N-1) << log2N * 1) >> log2N * 1), Real(1) + Real(2) * ((i & (N-1) << log2N * 2) >> log2N * 2)}; const Real vx = boltz(mt); const Real vy = boltz(mt); const Real vz = boltz(mt); ps[i].velocity = lj::vector<Real>{ vx, vy, vz}; ps[i].force = lj::vector<Real>{0.0, 0.0, 0.0}; } } { std::ofstream neigh("neigh.xyz"); std::ofstream velo("velo.xyz"); } ls.make(ps, pb); lj::calc_force(ps, pb, ls); std::cerr << "time\tkinetic\tpotential\ttotal\n"; for(std::size_t timestep=0; timestep < 100000; ++timestep) { // if(timestep % 100 == 0) { const Real Ek = lj::calc_kinetic_energy(ps); const Real Ep = lj::calc_potential_energy(ps, pb, ls); std::cerr << timestep * dt << '\t' << Ek << '\t' << Ep << '\t' << Ek + Ep << '\n'; std::cout << ps << std::flush; std::ofstream velo("velo.xyz", std::ios_base::app | std::ios_base::out); velo << ps.size() << '\n'; velo << "t = " << timestep * dt << '\n'; for(const auto& p : ps) { velo << "H " << std::fixed << std::setprecision(5) << std::showpoint << std::setw(10) << std::right << p.velocity.x << std::setw(10) << std::right << p.velocity.y << std::setw(10) << std::right << p.velocity.z << '\n'; } std::ofstream neigh("neigh.xyz", std::ios_base::app | std::ios_base::out); neigh << '\n'; for(std::size_t i=0; i<ps.size(); ++i) { neigh << '{'; for(std::size_t n : ls.neighbors(i)) { neigh << n << ", "; } neigh << "}\n"; } neigh << std::endl; } Real max_vel2 = 0.0; for(auto& p : ps) { max_vel2 = std::max(max_vel2, length_sq(p.velocity)); p.velocity = p.velocity + (dt / 2) * p.force / p.mass; p.position = pb.adjust_position(p.position + dt * p.velocity); p.force = lj::vector<Real>{0.0, 0.0, 0.0}; } ls.update(ps, pb, std::sqrt(max_vel2)); lj::calc_force(ps, pb, ls); for(auto& p : ps) { p.velocity = p.velocity + (dt / 2) * p.force / p.mass; } } std::cout << ps << std::flush; return 0; }
31.441489
89
0.482998
[ "vector" ]
816a7ec66978bc632236c519c9db474aef8d888d
15,356
cpp
C++
sandbox/performance.cpp
VaderY/cereal
b03f237713a7e4aab18c7d9150fb3c9a5e92ea3a
[ "BSD-3-Clause" ]
null
null
null
sandbox/performance.cpp
VaderY/cereal
b03f237713a7e4aab18c7d9150fb3c9a5e92ea3a
[ "BSD-3-Clause" ]
null
null
null
sandbox/performance.cpp
VaderY/cereal
b03f237713a7e4aab18c7d9150fb3c9a5e92ea3a
[ "BSD-3-Clause" ]
null
null
null
/* Copyright (c) 2014, Randolph Voorhies, Shane Grant All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of cereal nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES AND SHANE GRANT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4244 4267) #endif // fix for old versions of boost + deprecated auto_ptr #define BOOST_NO_AUTO_PTR #include <sstream> #include <iostream> #include <chrono> #include <random> #include <boost/format.hpp> #include <boost/serialization/serialization.hpp> #include <boost/archive/binary_oarchive.hpp> #include <boost/archive/binary_iarchive.hpp> #include <boost/serialization/vector.hpp> #include <boost/serialization/map.hpp> #include <boost/serialization/string.hpp> #include <boost/serialization/base_object.hpp> #include <cereal/archives/binary.hpp> #include <cereal/types/vector.hpp> #include <cereal/types/string.hpp> #include <cereal/types/map.hpp> //! Runs serialization to save data to an ostringstream /*! Used to time how long it takes to save data to an ostringstream. Everything that happens within the save function will be timed, including any set-up necessary to perform the serialization. @param data The data to save @param saveFunction A function taking in an ostringstream and the data and returning void @return The ostringstream and the time it took to save the data */ template <class T> std::chrono::nanoseconds saveData( T const & data, std::function<void(std::ostringstream &, const T&)> saveFunction, std::ostringstream & os ) { auto start = std::chrono::high_resolution_clock::now(); saveFunction( os, data ); return std::chrono::duration_cast<std::chrono::nanoseconds>( std::chrono::high_resolution_clock::now() - start ); } //! Runs serialization to load data to from an istringstream /*! Used to time how long it takes to load data from an istringstream. Everything that happens within the load function will be timed, including any set-up necessary to perform the serialization. @param dataStream The saved data stream @param loadFunction A function taking in an istringstream and a data reference and returning void @return The loaded data and the time it took to save the data */ template <class T> std::pair<T, std::chrono::nanoseconds> loadData( std::ostringstream const & dataStream, std::function<void(std::istringstream &, T &)> loadFunction ) { T data; std::istringstream os( dataStream.str() ); auto start = std::chrono::high_resolution_clock::now(); loadFunction( os, data ); return {data, std::chrono::duration_cast<std::chrono::nanoseconds>( std::chrono::high_resolution_clock::now() - start )}; } struct cerealBinary { //! Saves data to a cereal binary archive template <class T> static void save( std::ostringstream & os, T const & data ) { cereal::BinaryOutputArchive oar(os); oar(data); } //! Loads data to a cereal binary archive template <class T> static void load( std::istringstream & is, T & data ) { cereal::BinaryInputArchive iar(is); iar(data); } }; struct boostBinary { //! Saves data to a boost binary archive template <class T> static void save( std::ostringstream & os, T const & data ) { boost::archive::binary_oarchive oar(os); oar & data; } //! Loads data to a boost binary archive template <class T> static void load( std::istringstream & is, T & data ) { boost::archive::binary_iarchive iar(is); iar & data; } }; struct binary { typedef boostBinary boost; typedef cerealBinary cereal; }; //! Times how long it takes to serialize (load and store) some data /*! Times how long and the size of the serialization object used to serialize some data. Result is output to standard out. @tparam SerializationT The serialization struct that has all save and load functions @tparam DataTCereal The type of data to test for cereal @tparam DataTBoost The type of data to test for boost @param name The name for this test @param data The data to serialize for cereal @param data The data to serialize for boost @param numAverages The number of times to average @param validateData Whether data should be validated (input == output) */ template <class SerializationT, class DataTCereal, class DataTBoost> void test( std::string const & name, DataTCereal const & dataC, DataTBoost const & dataB, size_t numAverages = 100, bool validateData = false ); template <class SerializationT, class DataTCereal, class DataTBoost> void test( std::string const & name, DataTCereal const & dataC, DataTBoost const & dataB, size_t numAverages, bool /*validateData*/ ) { std::cout << "-----------------------------------" << std::endl; std::cout << "Running test: " << name << std::endl; std::chrono::nanoseconds totalBoostSave{0}; std::chrono::nanoseconds totalBoostLoad{0}; std::chrono::nanoseconds totalCerealSave{0}; std::chrono::nanoseconds totalCerealLoad{0}; size_t boostSize = 0; size_t cerealSize = 0; for(size_t i = 0; i < numAverages; ++i) { // Boost { std::ostringstream os; auto saveResult = saveData<DataTBoost>( dataB, {SerializationT::boost::template save<DataTBoost>}, os ); totalBoostSave += saveResult; if(!boostSize) boostSize = os.tellp(); auto loadResult = loadData<DataTBoost>( os, {SerializationT::boost::template load<DataTBoost>} ); totalBoostLoad += loadResult.second; } // Cereal { std::ostringstream os; auto saveResult = saveData<DataTCereal>( dataC, {SerializationT::cereal::template save<DataTCereal>}, os ); totalCerealSave += saveResult; if(!cerealSize) cerealSize = os.tellp(); auto loadResult = loadData<DataTCereal>( os, {SerializationT::cereal::template load<DataTCereal>} ); totalCerealLoad += loadResult.second; } } // Averages double averageBoostSave = std::chrono::duration_cast<std::chrono::milliseconds>(totalBoostSave).count() / static_cast<double>( numAverages ); double averageBoostLoad = std::chrono::duration_cast<std::chrono::milliseconds>(totalBoostLoad).count() / static_cast<double>( numAverages ); double averageCerealSave = std::chrono::duration_cast<std::chrono::milliseconds>(totalCerealSave).count() / static_cast<double>( numAverages ); double averageCerealLoad = std::chrono::duration_cast<std::chrono::milliseconds>(totalCerealLoad).count() / static_cast<double>( numAverages ); // Percentages relative to boost double cerealSaveP = averageCerealSave / averageBoostSave; double cerealLoadP = averageCerealLoad / averageBoostLoad; double cerealSizeP = cerealSize / static_cast<double>( boostSize ); std::cout << " Boost results:" << std::endl; std::cout << boost::format("\tsave | time: %06.4fms (%1.2f) size: %20.8fkb (%1.8f) total: %6.1fms") % averageBoostSave % 1.0 % (boostSize / 1024.0) % 1.0 % static_cast<double>( std::chrono::duration_cast<std::chrono::milliseconds>(totalBoostSave).count() ); std::cout << std::endl; std::cout << boost::format("\tload | time: %06.4fms (%1.2f) total: %6.1fms") % averageBoostLoad % 1.0 % static_cast<double>( std::chrono::duration_cast<std::chrono::milliseconds>(totalBoostLoad).count() ); std::cout << std::endl; std::cout << " Cereal results:" << std::endl; std::cout << boost::format("\tsave | time: %06.4fms (%1.2f) size: %20.8fkb (%1.8f) total: %6.1fms") % averageCerealSave % cerealSaveP % (cerealSize / 1024.0) % cerealSizeP % static_cast<double>( std::chrono::duration_cast<std::chrono::milliseconds>(totalCerealSave).count() ); std::cout << std::endl; std::cout << boost::format("\tload | time: %06.4fms (%1.2f) total: %6.1fms") % averageCerealLoad % cerealLoadP % static_cast<double>( std::chrono::duration_cast<std::chrono::milliseconds>(totalCerealLoad).count() ); std::cout << std::endl; } template <class SerializationT, class DataT> void test( std::string const & name, DataT const & data, size_t numAverages = 100, bool validateData = false ) { return test<SerializationT, DataT, DataT>( name, data, data, numAverages, validateData ); } template<class T> typename std::enable_if<std::is_floating_point<T>::value, T>::type random_value(std::mt19937 & gen) { return std::uniform_real_distribution<T>(-10000.0, 10000.0)(gen); } template<class T> typename std::enable_if<std::is_integral<T>::value && sizeof(T) != sizeof(char), T>::type random_value(std::mt19937 & gen) { return std::uniform_int_distribution<T>(std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max())(gen); } template<class T> typename std::enable_if<std::is_integral<T>::value && sizeof(T) == sizeof(char), T>::type random_value(std::mt19937 & gen) { return static_cast<T>( std::uniform_int_distribution<int64_t>(std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max())(gen) ); } template<class T> typename std::enable_if<std::is_same<T, std::string>::value, std::string>::type random_value(std::mt19937 & gen) { std::string s(std::uniform_int_distribution<int>(3, 30)(gen), ' '); for(char & c : s) c = std::uniform_int_distribution<char>(' ', '~')(gen); return s; } template<class C> std::basic_string<C> random_basic_string(std::mt19937 & gen, size_t maxSize = 30) { std::basic_string<C> s(std::uniform_int_distribution<int>(3, maxSize)(gen), ' '); for(C & c : s) c = static_cast<C>( std::uniform_int_distribution<int>( '~', '~' )(gen) ); return s; return s; } template <size_t N> std::string random_binary_string(std::mt19937 & gen) { std::string s(N, ' '); for(auto & c : s ) c = std::uniform_int_distribution<char>('0', '1')(gen); return s; } struct PoDStructCereal { int32_t a; int64_t b; float c; double d; template <class Archive> void serialize( Archive & ar ) { ar(a, b, c, d); } }; struct PoDStructBoost { int32_t a; int64_t b; float c; double d; template <class Archive> void serialize( Archive & ar, const unsigned int /*version*/ ) { ar & a & b & c & d; } }; struct PoDChildCereal : virtual PoDStructCereal { PoDChildCereal() : v(1024) { } std::vector<float> v; template <class Archive> void serialize( Archive & ar ) { ar( cereal::virtual_base_class<PoDStructCereal>(this), v ); } }; struct PoDChildBoost : virtual PoDStructBoost { PoDChildBoost() : v(1024) { } std::vector<float> v; template <class Archive> void serialize( Archive & ar, const unsigned int /*version*/ ) { ar & boost::serialization::base_object<PoDStructBoost>(*this); ar & v; } }; int main() { std::random_device rd; std::mt19937 gen(rd()); auto rngC = [&](){ return random_value<uint8_t>(gen); }; auto rngD = [&](){ return random_value<double>(gen); }; const bool randomize = false; //######################################## auto vectorDoubleTest = [&](size_t s, bool randomize_) { std::ostringstream name; name << "Vector(double) size " << s; std::vector<double> data(s); if(randomize_) for( auto & d : data ) d = rngD(); test<binary>( name.str(), data ); }; vectorDoubleTest(1, randomize); // 8B vectorDoubleTest(16, randomize); // 128B vectorDoubleTest(1024, randomize); // 8KB vectorDoubleTest(1024*1024, randomize); // 8MB //######################################## auto vectorCharTest = [&](size_t s, bool randomize_) { std::ostringstream name; name << "Vector(uint8_t) size " << s; std::vector<uint8_t> data(s); if(randomize_) for( auto & d : data ) d = rngC(); test<binary>( name.str(), data ); }; vectorCharTest(1024*1024*64, randomize); //######################################## auto vectorPoDStructTest = [&](size_t s) { std::ostringstream name; name << "Vector(PoDStruct) size " << s; std::vector<PoDStructCereal> dataC(s); std::vector<PoDStructBoost> dataB(s); test<binary>( name.str(), dataC, dataB ); }; vectorPoDStructTest(1); vectorPoDStructTest(64); vectorPoDStructTest(1024); vectorPoDStructTest(1024*1024); vectorPoDStructTest(1024*1024*2); //######################################## auto vectorPoDChildTest = [&](size_t s) { std::ostringstream name; name << "Vector(PoDChild) size " << s; std::vector<PoDChildCereal> dataC(s); std::vector<PoDChildBoost> dataB(s); test<binary>( name.str(), dataC, dataB ); }; vectorPoDChildTest(1024); vectorPoDChildTest(1024*32); //######################################## auto stringTest = [&](size_t s) { std::ostringstream name; name << "String size " << s; std::string data = random_basic_string<char>(gen, s); std::cout << "data.size " << data.size() << std::endl; test<binary>( name.str(), data ); }; stringTest(200000); stringTest(2000000); stringTest(20000000); //######################################## auto vectorStringTest = [&](size_t s) { std::ostringstream name; name << "Vector(String) size " << s; std::vector<std::string> data(s); for(size_t i=0; i<data.size(); ++i) data[i] = random_basic_string<char>(gen); test<binary>( name.str(), data ); }; vectorStringTest(512); vectorStringTest(1024); vectorStringTest(1024*64); vectorStringTest(1024*128); //######################################## auto mapPoDStructTest = [&](size_t s) { std::ostringstream name; name << "Map(PoDStruct) size " <<s; std::map<std::string, PoDStructCereal> mC; std::map<std::string, PoDStructBoost> mB; for(size_t i=0; i<s; ++i) { mC[std::to_string( i )] = PoDStructCereal(); mB[std::to_string( i )] = PoDStructBoost(); } test<binary>(name.str(), mC, mB); }; mapPoDStructTest(1024); mapPoDStructTest(1024*64); return 0; } #ifdef _MSC_VER #pragma warning(pop) #endif
32.465116
180
0.669771
[ "object", "vector" ]
816c107796b17d90d9b3619b271d4c633d6057f6
1,091
hpp
C++
csgo/sdk/sdk.hpp
laxodev/csgo_modest
17e59a31f062077626b4aa9810c6c0989606af3a
[ "MIT" ]
null
null
null
csgo/sdk/sdk.hpp
laxodev/csgo_modest
17e59a31f062077626b4aa9810c6c0989606af3a
[ "MIT" ]
null
null
null
csgo/sdk/sdk.hpp
laxodev/csgo_modest
17e59a31f062077626b4aa9810c6c0989606af3a
[ "MIT" ]
null
null
null
#pragma once /// Source engine #include "source_engine/datatypes/ConVar.hpp" #include "source_engine/datatypes/player_info_t.hpp" #include "source_engine/datatypes/ClientClass.hpp" #include "source_engine/datatypes/datamap.hpp" #include "source_engine/datatypes/CUserCmd.h" #include "source_engine/datatypes/ServerClass.hpp" #include "source_engine/datatypes/CCSWeaponData.hpp" #include "source_engine/ISurface.hpp" #include "source_engine/IVEngineClient.hpp" #include "source_engine/CFontManager.hpp" #include "source_engine/IVEngineVGui.hpp" #include "source_engine/ICVar.hpp" #include "source_engine/CHLClient.hpp" #include "source_engine/IClientEntity.hpp" #include "source_engine/IClientMode.hpp" #include "source_engine/IClientEntityList.h" #include "source_engine/CItemSystem.hpp" #include "source_engine/IServer.hpp" #include "source_engine/CGlobalVarsBase.hpp" /// Helper #include "helper/render.hpp" #include "helper/interfaces.hpp" #include "helper/netvars.hpp" #include "helper/classids.hpp" #include "helper/weaponids.hpp" #include "helper/game.hpp" #include "entities.hpp"
32.088235
52
0.811182
[ "render" ]