code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
//===--- SILModule.cpp - SILModule implementation -------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "sil-module"
#include "swift/SIL/SILModule.h"
#include "Linker.h"
#include "swift/AST/GenericEnvironment.h"
#include "swift/AST/ProtocolConformance.h"
#include "swift/ClangImporter/ClangModule.h"
#include "swift/SIL/FormalLinkage.h"
#include "swift/SIL/Notifications.h"
#include "swift/SIL/SILDebugScope.h"
#include "swift/SIL/SILValue.h"
#include "swift/SIL/SILVisitor.h"
#include "swift/Serialization/SerializedSILLoader.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/YAMLTraits.h"
#include <functional>
using namespace swift;
using namespace Lowering;
class SILModule::SerializationCallback final
: public DeserializationNotificationHandler {
void didDeserialize(ModuleDecl *M, SILFunction *fn) override {
updateLinkage(fn);
}
void didDeserialize(ModuleDecl *M, SILGlobalVariable *var) override {
updateLinkage(var);
// For globals we currently do not support available_externally.
// In the interpreter it would result in two instances for a single global:
// one in the imported module and one in the main module.
var->setDeclaration(true);
}
void didDeserialize(ModuleDecl *M, SILVTable *vtable) override {
// TODO: should vtables get linkage?
//updateLinkage(vtable);
}
void didDeserialize(ModuleDecl *M, SILWitnessTable *wt) override {
updateLinkage(wt);
}
template <class T> void updateLinkage(T *decl) {
switch (decl->getLinkage()) {
case SILLinkage::Public:
decl->setLinkage(SILLinkage::PublicExternal);
return;
case SILLinkage::PublicNonABI:
// PublicNonABI functions receive SharedExternal linkage, so that
// they have "link once" semantics when deserialized by multiple
// translation units in the same Swift module.
decl->setLinkage(SILLinkage::SharedExternal);
return;
case SILLinkage::Hidden:
decl->setLinkage(SILLinkage::HiddenExternal);
return;
case SILLinkage::Shared:
decl->setLinkage(SILLinkage::SharedExternal);
return;
case SILLinkage::Private:
decl->setLinkage(SILLinkage::PrivateExternal);
return;
case SILLinkage::PublicExternal:
case SILLinkage::HiddenExternal:
case SILLinkage::SharedExternal:
case SILLinkage::PrivateExternal:
return;
}
}
StringRef getName() const override {
return "SILModule::SerializationCallback";
}
};
SILModule::SILModule(ModuleDecl *SwiftModule, TypeConverter &TC,
SILOptions &Options, const DeclContext *associatedDC,
bool wholeModule)
: TheSwiftModule(SwiftModule),
AssociatedDeclContext(associatedDC),
Stage(SILStage::Raw), wholeModule(wholeModule), Options(Options),
serialized(false), SerializeSILAction(), Types(TC) {
// We always add the base SILModule serialization callback.
std::unique_ptr<DeserializationNotificationHandler> callback(
new SILModule::SerializationCallback());
deserializationNotificationHandlers.add(std::move(callback));
}
SILModule::~SILModule() {
// Decrement ref count for each SILGlobalVariable with static initializers.
for (SILGlobalVariable &v : silGlobals)
v.dropAllReferences();
// Drop everything functions in this module reference.
//
// This is necessary since the functions may reference each other. We don't
// need to worry about sil_witness_tables since witness tables reference each
// other via protocol conformances and sil_vtables don't reference each other
// at all.
for (SILFunction &F : *this) {
F.dropAllReferences();
F.dropDynamicallyReplacedFunction();
}
}
std::unique_ptr<SILModule>
SILModule::createEmptyModule(ModuleDecl *M, TypeConverter &TC, SILOptions &Options,
bool WholeModule) {
return std::unique_ptr<SILModule>(
new SILModule(M, TC, Options, M, WholeModule));
}
ASTContext &SILModule::getASTContext() const {
return TheSwiftModule->getASTContext();
}
void *SILModule::allocate(unsigned Size, unsigned Align) const {
if (getASTContext().LangOpts.UseMalloc)
return AlignedAlloc(Size, Align);
return BPA.Allocate(Size, Align);
}
void *SILModule::allocateInst(unsigned Size, unsigned Align) const {
return AlignedAlloc(Size, Align);
}
void SILModule::deallocateInst(SILInstruction *I) {
AlignedFree(I);
}
SILWitnessTable *
SILModule::lookUpWitnessTable(ProtocolConformanceRef C,
bool deserializeLazily) {
// If we have an abstract conformance passed in (a legal value), just return
// nullptr.
if (!C.isConcrete())
return nullptr;
return lookUpWitnessTable(C.getConcrete());
}
SILWitnessTable *
SILModule::lookUpWitnessTable(const ProtocolConformance *C,
bool deserializeLazily) {
assert(C && "null conformance passed to lookUpWitnessTable");
SILWitnessTable *wtable;
auto rootC = C->getRootConformance();
// Attempt to lookup the witness table from the table.
auto found = WitnessTableMap.find(rootC);
if (found == WitnessTableMap.end()) {
#ifndef NDEBUG
// Make sure that all witness tables are in the witness table lookup
// cache.
//
// This code should not be hit normally since we add witness tables to the
// lookup cache when we create them. We don't just assert here since there
// is the potential for a conformance without a witness table to be passed
// to this function.
for (SILWitnessTable &WT : witnessTables)
assert(WT.getConformance() != rootC &&
"Found witness table that is not"
" in the witness table lookup cache.");
#endif
// If we don't have a witness table and we're not going to try
// deserializing it, do not create a declaration.
if (!deserializeLazily)
return nullptr;
auto linkage = getLinkageForProtocolConformance(rootC, NotForDefinition);
wtable = SILWitnessTable::create(*this, linkage,
const_cast<RootProtocolConformance *>(rootC));
} else {
wtable = found->second;
assert(wtable != nullptr && "Should never map a conformance to a null witness"
" table.");
// If we have a definition, return it.
if (wtable->isDefinition())
return wtable;
}
// If the module is at or past the Lowered stage, then we can't do any
// further deserialization, since pre-IRGen SIL lowering changes the types
// of definitions to make them incompatible with canonical serialized SIL.
switch (getStage()) {
case SILStage::Canonical:
case SILStage::Raw:
break;
case SILStage::Lowered:
return wtable;
}
// Otherwise try to deserialize it. If we succeed return the deserialized
// function.
//
// *NOTE* In practice, wtable will be deserializedTable, but I do not want to rely
// on that behavior for now.
if (deserializeLazily)
if (auto deserialized = getSILLoader()->lookupWitnessTable(wtable))
return deserialized;
// If we fail, just return the declaration.
return wtable;
}
SILDefaultWitnessTable *
SILModule::lookUpDefaultWitnessTable(const ProtocolDecl *Protocol,
bool deserializeLazily) {
// Note: we only ever look up default witness tables in the translation unit
// that is currently being compiled, since they SILGen generates them when it
// visits the protocol declaration, and IRGen emits them when emitting the
// protocol descriptor metadata for the protocol.
auto found = DefaultWitnessTableMap.find(Protocol);
if (found == DefaultWitnessTableMap.end()) {
if (deserializeLazily) {
SILLinkage linkage =
getSILLinkage(getDeclLinkage(Protocol), ForDefinition);
SILDefaultWitnessTable *wtable =
SILDefaultWitnessTable::create(*this, linkage, Protocol);
wtable = getSILLoader()->lookupDefaultWitnessTable(wtable);
if (wtable)
DefaultWitnessTableMap[Protocol] = wtable;
return wtable;
}
return nullptr;
}
return found->second;
}
SILDefaultWitnessTable *
SILModule::createDefaultWitnessTableDeclaration(const ProtocolDecl *Protocol,
SILLinkage Linkage) {
return SILDefaultWitnessTable::create(*this, Linkage, Protocol);
}
void SILModule::deleteWitnessTable(SILWitnessTable *Wt) {
auto Conf = Wt->getConformance();
assert(lookUpWitnessTable(Conf, false) == Wt);
WitnessTableMap.erase(Conf);
witnessTables.erase(Wt);
}
const IntrinsicInfo &SILModule::getIntrinsicInfo(Identifier ID) {
unsigned OldSize = IntrinsicIDCache.size();
IntrinsicInfo &Info = IntrinsicIDCache[ID];
// If the element was is in the cache, return it.
if (OldSize == IntrinsicIDCache.size())
return Info;
// Otherwise, lookup the ID and Type and store them in the map.
StringRef NameRef = getBuiltinBaseName(getASTContext(), ID.str(), Info.Types);
Info.ID = getLLVMIntrinsicID(NameRef);
return Info;
}
const BuiltinInfo &SILModule::getBuiltinInfo(Identifier ID) {
unsigned OldSize = BuiltinIDCache.size();
BuiltinInfo &Info = BuiltinIDCache[ID];
// If the element was is in the cache, return it.
if (OldSize == BuiltinIDCache.size())
return Info;
// Otherwise, lookup the ID and Type and store them in the map.
// Find the matching ID.
StringRef OperationName =
getBuiltinBaseName(getASTContext(), ID.str(), Info.Types);
// Several operation names have suffixes and don't match the name from
// Builtins.def, so handle those first.
if (OperationName.startswith("fence_"))
Info.ID = BuiltinValueKind::Fence;
else if (OperationName.startswith("cmpxchg_"))
Info.ID = BuiltinValueKind::CmpXChg;
else if (OperationName.startswith("atomicrmw_"))
Info.ID = BuiltinValueKind::AtomicRMW;
else if (OperationName.startswith("atomicload_"))
Info.ID = BuiltinValueKind::AtomicLoad;
else if (OperationName.startswith("atomicstore_"))
Info.ID = BuiltinValueKind::AtomicStore;
else if (OperationName.startswith("allocWithTailElems_"))
Info.ID = BuiltinValueKind::AllocWithTailElems;
else
Info.ID = llvm::StringSwitch<BuiltinValueKind>(OperationName)
#define BUILTIN(id, name, attrs) .Case(name, BuiltinValueKind::id)
#include "swift/AST/Builtins.def"
.Default(BuiltinValueKind::None);
return Info;
}
SILFunction *SILModule::lookUpFunction(SILDeclRef fnRef) {
auto name = fnRef.mangle();
return lookUpFunction(name);
}
bool SILModule::loadFunction(SILFunction *F) {
SILFunction *NewF =
getSILLoader()->lookupSILFunction(F, /*onlyUpdateLinkage*/ false);
if (!NewF)
return false;
assert(F == NewF);
return true;
}
void SILModule::updateFunctionLinkage(SILFunction *F) {
getSILLoader()->lookupSILFunction(F, /*onlyUpdateLinkage*/ true);
}
bool SILModule::linkFunction(SILFunction *F, SILModule::LinkingMode Mode) {
return SILLinkerVisitor(*this, Mode).processFunction(F);
}
SILFunction *SILModule::findFunction(StringRef Name, SILLinkage Linkage) {
assert((Linkage == SILLinkage::Public ||
Linkage == SILLinkage::PublicExternal) &&
"Only a lookup of public functions is supported currently");
SILFunction *F = nullptr;
// First, check if there is a function with a required name in the
// current module.
SILFunction *CurF = lookUpFunction(Name);
// Nothing to do if the current module has a required function
// with a proper linkage already.
if (CurF && CurF->getLinkage() == Linkage) {
F = CurF;
} else {
assert((!CurF || CurF->getLinkage() != Linkage) &&
"hasFunction should be only called for functions that are not "
"contained in the SILModule yet or do not have a required linkage");
}
if (!F) {
if (CurF) {
// Perform this lookup only if a function with a given
// name is present in the current module.
// This is done to reduce the amount of IO from the
// swift module file.
if (!getSILLoader()->hasSILFunction(Name, Linkage))
return nullptr;
// The function in the current module will be changed.
F = CurF;
}
// If function with a given name wasn't seen anywhere yet
// or if it is known to exist, perform a lookup.
if (!F) {
// Try to load the function from other modules.
F = getSILLoader()->lookupSILFunction(Name, /*declarationOnly*/ true,
Linkage);
// Bail if nothing was found and we are not sure if
// this function exists elsewhere.
if (!F)
return nullptr;
assert(F && "SILFunction should be present in one of the modules");
assert(F->getLinkage() == Linkage && "SILFunction has a wrong linkage");
}
}
// If a function exists already and it is a non-optimizing
// compilation, simply convert it into an external declaration,
// so that a compiled version from the shared library is used.
if (F->isDefinition() &&
!F->getModule().getOptions().shouldOptimize()) {
F->convertToDeclaration();
}
if (F->isExternalDeclaration())
F->setSerialized(IsSerialized_t::IsNotSerialized);
F->setLinkage(Linkage);
return F;
}
bool SILModule::hasFunction(StringRef Name) {
if (lookUpFunction(Name))
return true;
return getSILLoader()->hasSILFunction(Name);
}
void SILModule::linkAllFromCurrentModule() {
getSILLoader()->getAllForModule(getSwiftModule()->getName(),
/*PrimaryFile=*/nullptr);
}
void SILModule::invalidateSILLoaderCaches() {
getSILLoader()->invalidateCaches();
}
void SILModule::removeFromZombieList(StringRef Name) {
if (auto *Zombie = ZombieFunctionTable.lookup(Name)) {
ZombieFunctionTable.erase(Name);
zombieFunctions.remove(Zombie);
}
}
/// Erase a function from the module.
void SILModule::eraseFunction(SILFunction *F) {
assert(!F->isZombie() && "zombie function is in list of alive functions");
// The owner of the function's Name is the FunctionTable key. As we remove
// the function from the table we have to store the name string elsewhere:
// in zombieFunctionNames.
StringRef copiedName = F->getName().copy(zombieFunctionNames);
FunctionTable.erase(F->getName());
F->Name = copiedName;
// The function is dead, but we need it later (at IRGen) for debug info
// or vtable stub generation. So we move it into the zombie list.
getFunctionList().remove(F);
zombieFunctions.push_back(F);
ZombieFunctionTable[copiedName] = F;
F->setZombie();
// This opens dead-function-removal opportunities for called functions.
// (References are not needed anymore.)
F->dropAllReferences();
F->dropDynamicallyReplacedFunction();
}
void SILModule::invalidateFunctionInSILCache(SILFunction *F) {
getSILLoader()->invalidateFunction(F);
}
/// Erase a global SIL variable from the module.
void SILModule::eraseGlobalVariable(SILGlobalVariable *G) {
GlobalVariableMap.erase(G->getName());
getSILGlobalList().erase(G);
}
SILVTable *SILModule::lookUpVTable(const ClassDecl *C) {
if (!C)
return nullptr;
// First try to look up R from the lookup table.
auto R = VTableMap.find(C);
if (R != VTableMap.end())
return R->second;
// If that fails, try to deserialize it. If that fails, return nullptr.
SILVTable *Vtbl = getSILLoader()->lookupVTable(C);
if (!Vtbl)
return nullptr;
// If we succeeded, map C -> VTbl in the table and return VTbl.
VTableMap[C] = Vtbl;
return Vtbl;
}
SerializedSILLoader *SILModule::getSILLoader() {
// If the SILLoader is null, create it.
if (!SILLoader)
SILLoader = SerializedSILLoader::create(
getASTContext(), this, &deserializationNotificationHandlers);
// Return the SerializedSILLoader.
return SILLoader.get();
}
/// Given a conformance \p C and a protocol requirement \p Requirement,
/// search the witness table for the conformance and return the witness thunk
/// for the requirement.
std::pair<SILFunction *, SILWitnessTable *>
SILModule::lookUpFunctionInWitnessTable(ProtocolConformanceRef C,
SILDeclRef Requirement) {
// Look up the witness table associated with our protocol conformance from the
// SILModule.
auto Ret = lookUpWitnessTable(C);
// If no witness table was found, bail.
if (!Ret) {
LLVM_DEBUG(llvm::dbgs() << " Failed speculative lookup of "
"witness for: ";
C.dump(llvm::dbgs()); Requirement.dump());
return std::make_pair(nullptr, nullptr);
}
// Okay, we found the correct witness table. Now look for the method.
for (auto &Entry : Ret->getEntries()) {
// Look at method entries only.
if (Entry.getKind() != SILWitnessTable::WitnessKind::Method)
continue;
SILWitnessTable::MethodWitness MethodEntry = Entry.getMethodWitness();
// Check if this is the member we were looking for.
if (MethodEntry.Requirement != Requirement)
continue;
return std::make_pair(MethodEntry.Witness, Ret);
}
return std::make_pair(nullptr, nullptr);
}
/// Given a protocol \p Protocol and a requirement \p Requirement,
/// search the protocol's default witness table and return the default
/// witness thunk for the requirement.
std::pair<SILFunction *, SILDefaultWitnessTable *>
SILModule::lookUpFunctionInDefaultWitnessTable(const ProtocolDecl *Protocol,
SILDeclRef Requirement,
bool deserializeLazily) {
// Look up the default witness table associated with our protocol from the
// SILModule.
auto Ret = lookUpDefaultWitnessTable(Protocol, deserializeLazily);
// If no default witness table was found, bail.
//
// FIXME: Could be an assert if we fix non-single-frontend mode to link
// together serialized SIL emitted by each translation unit.
if (!Ret) {
LLVM_DEBUG(llvm::dbgs() << " Failed speculative lookup of default "
"witness for " << Protocol->getName() << " ";
Requirement.dump());
return std::make_pair(nullptr, nullptr);
}
// Okay, we found the correct default witness table. Now look for the method.
for (auto &Entry : Ret->getEntries()) {
// Ignore dummy entries emitted for non-method requirements, as well as
// requirements without default implementations.
if (!Entry.isValid() || Entry.getKind() != SILWitnessTable::Method)
continue;
// Check if this is the member we were looking for.
if (Entry.getMethodWitness().Requirement != Requirement)
continue;
return std::make_pair(Entry.getMethodWitness().Witness, Ret);
}
// This requirement doesn't have a default implementation.
return std::make_pair(nullptr, nullptr);
}
SILFunction *
SILModule::
lookUpFunctionInVTable(ClassDecl *Class, SILDeclRef Member) {
// Try to lookup a VTable for Class from the module...
auto *Vtbl = lookUpVTable(Class);
// Bail, if the lookup of VTable fails.
if (!Vtbl) {
return nullptr;
}
// Ok, we have a VTable. Try to lookup the SILFunction implementation from
// the VTable.
if (auto E = Vtbl->getEntry(*this, Member))
return E->Implementation;
return nullptr;
}
void SILModule::registerDeserializationNotificationHandler(
std::unique_ptr<DeserializationNotificationHandler> &&handler) {
deserializationNotificationHandlers.add(std::move(handler));
}
void SILModule::registerDeleteNotificationHandler(
DeleteNotificationHandler *handler) {
// Ask the handler (that can be an analysis, a pass, or some other data
// structure) if it wants to receive delete notifications.
if (handler->needsNotifications()) {
NotificationHandlers.insert(handler);
}
}
void SILModule::
removeDeleteNotificationHandler(DeleteNotificationHandler* Handler) {
NotificationHandlers.remove(Handler);
}
void SILModule::notifyDeleteHandlers(SILNode *node) {
for (auto *Handler : NotificationHandlers) {
Handler->handleDeleteNotification(node);
}
}
// TODO: We should have an "isNoReturn" bit on Swift's BuiltinInfo, but for
// now, let's recognize noreturn intrinsics and builtins specially here.
bool SILModule::isNoReturnBuiltinOrIntrinsic(Identifier Name) {
const auto &IntrinsicInfo = getIntrinsicInfo(Name);
if (IntrinsicInfo.ID != llvm::Intrinsic::not_intrinsic) {
return IntrinsicInfo.hasAttribute(llvm::Attribute::NoReturn);
}
const auto &BuiltinInfo = getBuiltinInfo(Name);
switch (BuiltinInfo.ID) {
default:
return false;
case BuiltinValueKind::Unreachable:
case BuiltinValueKind::CondUnreachable:
case BuiltinValueKind::UnexpectedError:
case BuiltinValueKind::ErrorInMain:
return true;
}
}
bool SILModule::
shouldSerializeEntitiesAssociatedWithDeclContext(const DeclContext *DC) const {
// Serialize entities associated with this module's associated context.
if (DC->isChildContextOf(getAssociatedContext())) {
return true;
}
// Serialize entities associated with clang modules, since other entities
// may depend on them, and someone who deserializes those entities may not
// have their own copy.
if (isa<ClangModuleUnit>(DC->getModuleScopeContext())) {
return true;
}
return false;
}
/// Returns true if it is the optimized OnoneSupport module.
bool SILModule::isOptimizedOnoneSupportModule() const {
return getOptions().shouldOptimize() &&
getSwiftModule()->isOnoneSupportModule();
}
void SILModule::setSerializeSILAction(SILModule::ActionCallback Action) {
assert(!SerializeSILAction && "Serialization action can be set only once");
SerializeSILAction = Action;
}
SILModule::ActionCallback SILModule::getSerializeSILAction() const {
return SerializeSILAction;
}
void SILModule::serialize() {
assert(SerializeSILAction && "Serialization action should be set");
assert(!isSerialized() && "The module was serialized already");
SerializeSILAction();
setSerialized();
}
void SILModule::setOptRecordStream(
std::unique_ptr<llvm::yaml::Output> &&Stream,
std::unique_ptr<llvm::raw_ostream> &&RawStream) {
OptRecordStream = std::move(Stream);
OptRecordRawStream = std::move(RawStream);
}
bool SILModule::isStdlibModule() const {
return TheSwiftModule->isStdlibModule();
}
SILProperty *SILProperty::create(SILModule &M,
bool Serialized,
AbstractStorageDecl *Decl,
Optional<KeyPathPatternComponent> Component) {
auto prop = new (M) SILProperty(Serialized, Decl, Component);
M.properties.push_back(prop);
return prop;
}
// Definition from SILLinkage.h.
SILLinkage swift::getDeclSILLinkage(const ValueDecl *decl) {
AccessLevel access = decl->getEffectiveAccess();
SILLinkage linkage;
switch (access) {
case AccessLevel::Private:
case AccessLevel::FilePrivate:
linkage = SILLinkage::Private;
break;
case AccessLevel::Internal:
linkage = SILLinkage::Hidden;
break;
case AccessLevel::Public:
case AccessLevel::Open:
linkage = SILLinkage::Public;
break;
}
return linkage;
}
| karwa/swift | lib/SIL/SILModule.cpp | C++ | apache-2.0 | 23,696 |
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.unit;
import org.hamcrest.MatcherAssert;
import org.junit.Test;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
/**
*
*/
public class ByteSizeValueTests {
@Test
public void testActual() {
MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.GB).bytes(), equalTo(4294967296l));
}
@Test
public void testSimple() {
assertThat(ByteSizeUnit.BYTES.toBytes(10), is(new ByteSizeValue(10, ByteSizeUnit.BYTES).bytes()));
assertThat(ByteSizeUnit.KB.toKB(10), is(new ByteSizeValue(10, ByteSizeUnit.KB).kb()));
assertThat(ByteSizeUnit.MB.toMB(10), is(new ByteSizeValue(10, ByteSizeUnit.MB).mb()));
assertThat(ByteSizeUnit.GB.toGB(10), is(new ByteSizeValue(10, ByteSizeUnit.GB).gb()));
}
@Test
public void testToString() {
assertThat("10b", is(new ByteSizeValue(10, ByteSizeUnit.BYTES).toString()));
assertThat("1.5kb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.BYTES).toString()));
assertThat("1.5mb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.KB).toString()));
assertThat("1.5gb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.MB).toString()));
assertThat("1536gb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.GB).toString()));
}
}
| lmenezes/elasticsearch | src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java | Java | apache-2.0 | 2,226 |
"""
This file implements a brew resolver for Galaxy requirements. In order for Galaxy
to pick up on recursively defined and versioned brew dependencies recipes should
be installed using the experimental `brew-vinstall` external command.
More information here:
https://github.com/jmchilton/brew-tests
https://github.com/Homebrew/homebrew-science/issues/1191
This is still an experimental module and there will almost certainly be backward
incompatible changes coming.
"""
from .resolver_mixins import UsesHomebrewMixin
from ..resolvers import DependencyResolver, INDETERMINATE_DEPENDENCY
# TODO: Implement prefer version linked...
PREFER_VERSION_LINKED = 'linked'
PREFER_VERSION_LATEST = 'latest'
UNKNOWN_PREFER_VERSION_MESSAGE_TEMPLATE = "HomebrewDependencyResolver prefer_version must be %s"
UNKNOWN_PREFER_VERSION_MESSAGE = UNKNOWN_PREFER_VERSION_MESSAGE_TEMPLATE % (PREFER_VERSION_LATEST)
DEFAULT_PREFER_VERSION = PREFER_VERSION_LATEST
class HomebrewDependencyResolver(DependencyResolver, UsesHomebrewMixin):
resolver_type = "homebrew"
def __init__(self, dependency_manager, **kwds):
self.versionless = _string_as_bool(kwds.get('versionless', 'false'))
self.prefer_version = kwds.get('prefer_version', None)
if self.prefer_version is None:
self.prefer_version = DEFAULT_PREFER_VERSION
if self.versionless and self.prefer_version not in [PREFER_VERSION_LATEST]:
raise Exception(UNKNOWN_PREFER_VERSION_MESSAGE)
self._init_homebrew(**kwds)
def resolve(self, name, version, type, **kwds):
if type != "package":
return INDETERMINATE_DEPENDENCY
if version is None or self.versionless:
return self._find_dep_default(name, version)
else:
return self._find_dep_versioned(name, version)
def _string_as_bool( value ):
return str( value ).lower() == "true"
__all__ = ['HomebrewDependencyResolver']
| ssorgatem/pulsar | galaxy/tools/deps/resolvers/homebrew.py | Python | apache-2.0 | 1,947 |
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.engine.impl.cmd;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.flowable.common.engine.api.FlowableException;
import org.flowable.common.engine.api.FlowableObjectNotFoundException;
import org.flowable.common.engine.api.delegate.event.FlowableEngineEventType;
import org.flowable.common.engine.impl.interceptor.Command;
import org.flowable.common.engine.impl.interceptor.CommandContext;
import org.flowable.engine.compatibility.Flowable5CompatibilityHandler;
import org.flowable.engine.delegate.event.impl.FlowableEventBuilder;
import org.flowable.engine.impl.persistence.entity.EventSubscriptionEntityManager;
import org.flowable.engine.impl.persistence.entity.ExecutionEntity;
import org.flowable.engine.impl.persistence.entity.SignalEventSubscriptionEntity;
import org.flowable.engine.impl.util.CommandContextUtil;
import org.flowable.engine.impl.util.Flowable5Util;
import org.flowable.engine.runtime.Execution;
/**
* @author Joram Barrez
* @author Tijs Rademakers
*/
public class SignalEventReceivedCmd implements Command<Void> {
protected final String eventName;
protected final String executionId;
protected final Map<String, Object> payload;
protected final boolean async;
protected String tenantId;
public SignalEventReceivedCmd(String eventName, String executionId, Map<String, Object> processVariables, String tenantId) {
this.eventName = eventName;
this.executionId = executionId;
if (processVariables != null) {
this.payload = new HashMap<>(processVariables);
} else {
this.payload = null;
}
this.async = false;
this.tenantId = tenantId;
}
public SignalEventReceivedCmd(String eventName, String executionId, boolean async, String tenantId) {
this.eventName = eventName;
this.executionId = executionId;
this.async = async;
this.payload = null;
this.tenantId = tenantId;
}
@Override
public Void execute(CommandContext commandContext) {
List<SignalEventSubscriptionEntity> signalEvents = null;
EventSubscriptionEntityManager eventSubscriptionEntityManager = CommandContextUtil.getEventSubscriptionEntityManager(commandContext);
if (executionId == null) {
signalEvents = eventSubscriptionEntityManager.findSignalEventSubscriptionsByEventName(eventName, tenantId);
} else {
ExecutionEntity execution = CommandContextUtil.getExecutionEntityManager(commandContext).findById(executionId);
if (execution == null) {
throw new FlowableObjectNotFoundException("Cannot find execution with id '" + executionId + "'", Execution.class);
}
if (execution.isSuspended()) {
throw new FlowableException("Cannot throw signal event '" + eventName + "' because execution '" + executionId + "' is suspended");
}
if (Flowable5Util.isFlowable5ProcessDefinitionId(commandContext, execution.getProcessDefinitionId())) {
Flowable5CompatibilityHandler compatibilityHandler = Flowable5Util.getFlowable5CompatibilityHandler();
compatibilityHandler.signalEventReceived(eventName, executionId, payload, async, tenantId);
return null;
}
signalEvents = eventSubscriptionEntityManager.findSignalEventSubscriptionsByNameAndExecution(eventName, executionId);
if (signalEvents.isEmpty()) {
throw new FlowableException("Execution '" + executionId + "' has not subscribed to a signal event with name '" + eventName + "'.");
}
}
for (SignalEventSubscriptionEntity signalEventSubscriptionEntity : signalEvents) {
// We only throw the event to globally scoped signals.
// Process instance scoped signals must be thrown within the process itself
if (signalEventSubscriptionEntity.isGlobalScoped()) {
if (executionId == null && Flowable5Util.isFlowable5ProcessDefinitionId(commandContext, signalEventSubscriptionEntity.getProcessDefinitionId())) {
Flowable5CompatibilityHandler compatibilityHandler = Flowable5Util.getFlowable5CompatibilityHandler();
compatibilityHandler.signalEventReceived(signalEventSubscriptionEntity, payload, async);
} else {
CommandContextUtil.getProcessEngineConfiguration().getEventDispatcher().dispatchEvent(
FlowableEventBuilder.createSignalEvent(FlowableEngineEventType.ACTIVITY_SIGNALED, signalEventSubscriptionEntity.getActivityId(), eventName,
payload, signalEventSubscriptionEntity.getExecutionId(), signalEventSubscriptionEntity.getProcessInstanceId(),
signalEventSubscriptionEntity.getProcessDefinitionId()));
eventSubscriptionEntityManager.eventReceived(signalEventSubscriptionEntity, payload, async);
}
}
}
return null;
}
}
| lsmall/flowable-engine | modules/flowable-engine/src/main/java/org/flowable/engine/impl/cmd/SignalEventReceivedCmd.java | Java | apache-2.0 | 5,828 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.Logging.V2.Snippets
{
// [START logging_v2_generated_ConfigServiceV2_UpdateView_sync]
using Google.Cloud.Logging.V2;
using Google.Protobuf.WellKnownTypes;
public sealed partial class GeneratedConfigServiceV2ClientSnippets
{
/// <summary>Snippet for UpdateView</summary>
/// <remarks>
/// This snippet has been automatically generated for illustrative purposes only.
/// It may require modifications to work in your environment.
/// </remarks>
public void UpdateViewRequestObject()
{
// Create client
ConfigServiceV2Client configServiceV2Client = ConfigServiceV2Client.Create();
// Initialize request argument(s)
UpdateViewRequest request = new UpdateViewRequest
{
Name = "",
View = new LogView(),
UpdateMask = new FieldMask(),
};
// Make the request
LogView response = configServiceV2Client.UpdateView(request);
}
}
// [END logging_v2_generated_ConfigServiceV2_UpdateView_sync]
}
| jskeet/google-cloud-dotnet | apis/Google.Cloud.Logging.V2/Google.Cloud.Logging.V2.GeneratedSnippets/ConfigServiceV2Client.UpdateViewRequestObjectSnippet.g.cs | C# | apache-2.0 | 1,762 |
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4-2
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2014.08.10 at 09:54:32 AM IST
//
package com.pacificmetrics.ims.apip.qti.section;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.adapters.NormalizedStringAdapter;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
/**
*
* This is the container for referential link to the externally stored AssessmentItem (each such Item is stored in its own XML instance file). The identifier attribute should be used to identify the actual Item. The details for the Item provided at this point enable the associated Test/Section to determine if the Item fulfills any prerequisites.
*
*
* <p>Java class for AssessmentItemRef.Type complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="AssessmentItemRef.Type">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element ref="{http://www.imsglobal.org/xsd/apip/apipv1p0/qtisection/imsqti_v2p2}timeLimits" minOccurs="0"/>
* <element ref="{http://www.imsglobal.org/xsd/apip/apipv1p0/qtisection/imsqti_v2p2}variableMapping" maxOccurs="unbounded" minOccurs="0"/>
* <element ref="{http://www.imsglobal.org/xsd/apip/apipv1p0/qtisection/imsqti_v2p2}weight" maxOccurs="unbounded" minOccurs="0"/>
* <element ref="{http://www.imsglobal.org/xsd/apip/apipv1p0/qtisection/imsqti_v2p2}templateDefault" maxOccurs="unbounded" minOccurs="0"/>
* </sequence>
* <attGroup ref="{http://www.imsglobal.org/xsd/apip/apipv1p0/qtisection/imsqti_v2p2}identifier.AssessmentItemRef.Attr"/>
* <attGroup ref="{http://www.imsglobal.org/xsd/apip/apipv1p0/qtisection/imsqti_v2p2}fixed.AssessmentItemRef.Attr"/>
* <attGroup ref="{http://www.imsglobal.org/xsd/apip/apipv1p0/qtisection/imsqti_v2p2}category.AssessmentItemRef.Attr"/>
* <attGroup ref="{http://www.imsglobal.org/xsd/apip/apipv1p0/qtisection/imsqti_v2p2}href.AssessmentItemRef.Attr"/>
* <attGroup ref="{http://www.imsglobal.org/xsd/apip/apipv1p0/qtisection/imsqti_v2p2}required.AssessmentItemRef.Attr"/>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "AssessmentItemRef.Type", propOrder = {
"timeLimits",
"variableMappings",
"weights",
"templateDefaults"
})
@XmlRootElement(name = "assessmentItemRef")
public class AssessmentItemRef {
protected TimeLimits timeLimits;
@XmlElement(name = "variableMapping")
protected List<VariableMapping> variableMappings;
@XmlElement(name = "weight")
protected List<Weight> weights;
@XmlElement(name = "templateDefault")
protected List<TemplateDefault> templateDefaults;
@XmlAttribute(name = "identifier", required = true)
@XmlJavaTypeAdapter(NormalizedStringAdapter.class)
@XmlSchemaType(name = "normalizedString")
protected String identifier;
@XmlAttribute(name = "fixed")
protected Boolean fixed;
@XmlAttribute(name = "category")
protected List<String> categories;
@XmlAttribute(name = "href", required = true)
@XmlSchemaType(name = "anyURI")
protected String href;
@XmlAttribute(name = "required")
protected Boolean required;
/**
* Gets the value of the timeLimits property.
*
* @return
* possible object is
* {@link TimeLimits }
*
*/
public TimeLimits getTimeLimits() {
return timeLimits;
}
/**
* Sets the value of the timeLimits property.
*
* @param value
* allowed object is
* {@link TimeLimits }
*
*/
public void setTimeLimits(TimeLimits value) {
this.timeLimits = value;
}
/**
* Gets the value of the variableMappings property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the variableMappings property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getVariableMappings().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link VariableMapping }
*
*
*/
public List<VariableMapping> getVariableMappings() {
if (variableMappings == null) {
variableMappings = new ArrayList<VariableMapping>();
}
return this.variableMappings;
}
/**
* Gets the value of the weights property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the weights property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getWeights().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link Weight }
*
*
*/
public List<Weight> getWeights() {
if (weights == null) {
weights = new ArrayList<Weight>();
}
return this.weights;
}
/**
* Gets the value of the templateDefaults property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the templateDefaults property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getTemplateDefaults().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link TemplateDefault }
*
*
*/
public List<TemplateDefault> getTemplateDefaults() {
if (templateDefaults == null) {
templateDefaults = new ArrayList<TemplateDefault>();
}
return this.templateDefaults;
}
/**
* Gets the value of the identifier property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getIdentifier() {
return identifier;
}
/**
* Sets the value of the identifier property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setIdentifier(String value) {
this.identifier = value;
}
/**
* Gets the value of the fixed property.
*
* @return
* possible object is
* {@link Boolean }
*
*/
public boolean isFixed() {
if (fixed == null) {
return false;
} else {
return fixed;
}
}
/**
* Sets the value of the fixed property.
*
* @param value
* allowed object is
* {@link Boolean }
*
*/
public void setFixed(Boolean value) {
this.fixed = value;
}
/**
* Gets the value of the categories property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the categories property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getCategories().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link String }
*
*
*/
public List<String> getCategories() {
if (categories == null) {
categories = new ArrayList<String>();
}
return this.categories;
}
/**
* Gets the value of the href property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getHref() {
return href;
}
/**
* Sets the value of the href property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setHref(String value) {
this.href = value;
}
/**
* Gets the value of the required property.
*
* @return
* possible object is
* {@link Boolean }
*
*/
public boolean isRequired() {
if (required == null) {
return false;
} else {
return required;
}
}
/**
* Sets the value of the required property.
*
* @param value
* allowed object is
* {@link Boolean }
*
*/
public void setRequired(Boolean value) {
this.required = value;
}
}
| SmarterApp/ItemAuthoring | sbac-iaip/java/src/main/java/com/pacificmetrics/ims/apip/qti/section/AssessmentItemRef.java | Java | apache-2.0 | 9,877 |
// --- ping-content -------------------------------
import $ from 'jquery'
import {debugMsg, spawnVdsm} from './helpers'
export function renderPing () {
var vdsmPingResponse = ''
spawnVdsm('ping', null,
function (data) { vdsmPingResponse += data },
function () { pingSuccessful(vdsmPingResponse) },
pingFailed)
vdsmPingResponse = ''
}
function pingSuccessful (vdsmPingResponse) {
var json = vdsmPingResponse
var resp = $.parseJSON(json)
if (resp.hasOwnProperty('status') && resp.status.hasOwnProperty('code') && resp.status.hasOwnProperty('message')) {
if (resp.status.code === 0) {
printPingContent('Ping succeeded.<br/>The cockpit-ovirt plugin is installed and VDSM connection can be established.', json)// still might be an error, but well-formatted response with its description
} else { // well-formatted error-response with description
printPingContent('Ping failed: ' + resp.status.message, json)
}
return
}
// wrong format
pingFailed(null, 'Ping failed with malformed error message returned: ' + json)
}
function pingFailed (stderr, detail) {
if (!detail) {
detail = 'Ping execution failed.'
}
detail = stderr + '\n' + detail
printPingContent(detail, '{"status": {"message": "' + detail + '", "code": 1}}')
}
function printPingContent (humanText, parserText) {
var content = "<div id='ping-content-human'>" + humanText + '</div>'
content += "<div id='ping-content-parser' hidden>" + parserText + '</div>'
var pingContent = $('#ping-content')
pingContent.html(content)
debugMsg('Ping content: ' + content)
}
| matobet/cockpit-ovirt | src/ping.js | JavaScript | apache-2.0 | 1,604 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.core.appender.mom;
import java.io.Serializable;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import javax.jms.Connection;
import javax.jms.ConnectionFactory;
import javax.jms.Destination;
import javax.jms.JMSException;
import javax.jms.MapMessage;
import javax.jms.Message;
import javax.jms.MessageConsumer;
import javax.jms.MessageProducer;
import javax.jms.Session;
import javax.naming.NamingException;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractManager;
import org.apache.logging.log4j.core.appender.AppenderLoggingException;
import org.apache.logging.log4j.core.appender.ManagerFactory;
import org.apache.logging.log4j.core.net.JndiManager;
import org.apache.logging.log4j.core.util.Log4jThread;
import org.apache.logging.log4j.status.StatusLogger;
import org.apache.logging.log4j.util.BiConsumer;
/**
* Consider this class <b>private</b>; it is only <b>public</b> for access by integration tests.
*
* <p>
* JMS connection and session manager. Can be used to access MessageProducer, MessageConsumer, and Message objects
* involving a configured ConnectionFactory and Destination.
* </p>
*/
public class JmsManager extends AbstractManager {
public static class JmsManagerConfiguration {
private final Properties jndiProperties;
private final String connectionFactoryName;
private final String destinationName;
private final String userName;
private final char[] password;
private final boolean immediateFail;
private final boolean retry;
private final long reconnectIntervalMillis;
JmsManagerConfiguration(final Properties jndiProperties, final String connectionFactoryName,
final String destinationName, final String userName, final char[] password, final boolean immediateFail,
final long reconnectIntervalMillis) {
this.jndiProperties = jndiProperties;
this.connectionFactoryName = connectionFactoryName;
this.destinationName = destinationName;
this.userName = userName;
this.password = password;
this.immediateFail = immediateFail;
this.reconnectIntervalMillis = reconnectIntervalMillis;
this.retry = reconnectIntervalMillis > 0;
}
public String getConnectionFactoryName() {
return connectionFactoryName;
}
public String getDestinationName() {
return destinationName;
}
public JndiManager getJndiManager() {
return JndiManager.getJndiManager(getJndiProperties());
}
public Properties getJndiProperties() {
return jndiProperties;
}
public char[] getPassword() {
return password;
}
public long getReconnectIntervalMillis() {
return reconnectIntervalMillis;
}
public String getUserName() {
return userName;
}
public boolean isImmediateFail() {
return immediateFail;
}
public boolean isRetry() {
return retry;
}
@Override
public String toString() {
return "JmsManagerConfiguration [jndiProperties=" + jndiProperties + ", connectionFactoryName="
+ connectionFactoryName + ", destinationName=" + destinationName + ", userName=" + userName
+ ", immediateFail=" + immediateFail + ", retry=" + retry + ", reconnectIntervalMillis="
+ reconnectIntervalMillis + "]";
}
}
private static class JmsManagerFactory implements ManagerFactory<JmsManager, JmsManagerConfiguration> {
@Override
public JmsManager createManager(final String name, final JmsManagerConfiguration data) {
try {
return new JmsManager(name, data);
} catch (final Exception e) {
LOGGER.error("Error creating JmsManager using JmsManagerConfiguration [{}]", data, e);
return null;
}
}
}
/**
* Handles reconnecting to a Socket on a Thread.
*/
private class Reconnector extends Log4jThread {
private final CountDownLatch latch = new CountDownLatch(1);
private volatile boolean shutdown = false;
private final Object owner;
public Reconnector(final Object owner) {
super("JmsManager-Reconnector");
this.owner = owner;
}
public void latch() {
try {
latch.await();
} catch (final InterruptedException ex) {
// Ignore the exception.
}
}
void reconnect() throws NamingException, JMSException {
final JndiManager jndiManager2 = getJndiManager();
final Connection connection2 = createConnection(jndiManager2);
final Session session2 = createSession(connection2);
final Destination destination2 = createDestination(jndiManager2);
final MessageProducer messageProducer2 = createMessageProducer(session2, destination2);
connection2.start();
synchronized (owner) {
jndiManager = jndiManager2;
connection = connection2;
session = session2;
destination = destination2;
messageProducer = messageProducer2;
reconnector = null;
shutdown = true;
}
LOGGER.debug("Connection reestablished to {}", configuration);
}
@Override
public void run() {
while (!shutdown) {
try {
sleep(configuration.getReconnectIntervalMillis());
reconnect();
} catch (final InterruptedException | JMSException | NamingException e) {
LOGGER.debug("Cannot reestablish JMS connection to {}: {}", configuration, e.getLocalizedMessage(),
e);
} finally {
latch.countDown();
}
}
}
public void shutdown() {
shutdown = true;
}
}
private static final Logger LOGGER = StatusLogger.getLogger();
static final JmsManagerFactory FACTORY = new JmsManagerFactory();
/**
* Gets a JmsManager using the specified configuration parameters.
*
* @param name
* The name to use for this JmsManager.
* @param connectionFactoryName
* The binding name for the {@link javax.jms.ConnectionFactory}.
* @param destinationName
* The binding name for the {@link javax.jms.Destination}.
* @param userName
* The userName to connect with or {@code null} for no authentication.
* @param password
* The password to use with the given userName or {@code null} for no authentication.
* @param immediateFail
* Whether or not to fail immediately with a {@link AppenderLoggingException} when connecting to JMS
* fails.
* @param reconnectIntervalMillis
* How to log sleep in milliseconds before trying to reconnect to JMS.
* @param jndiManager
* The JndiManager to look up JMS information through.
* @return The JmsManager as configured.
*/
public static JmsManager getJmsManager(final String name, final Properties jndiProperties,
final String connectionFactoryName, final String destinationName, final String userName,
final char[] password, final boolean immediateFail, final long reconnectIntervalMillis) {
final JmsManagerConfiguration configuration = new JmsManagerConfiguration(jndiProperties, connectionFactoryName,
destinationName, userName, password, immediateFail, reconnectIntervalMillis);
return getManager(name, FACTORY, configuration);
}
private final JmsManagerConfiguration configuration;
private volatile Reconnector reconnector;
private volatile JndiManager jndiManager;
private volatile Connection connection;
private volatile Session session;
private volatile Destination destination;
private volatile MessageProducer messageProducer;
private JmsManager(final String name, final JmsManagerConfiguration configuration) {
super(null, name);
this.configuration = configuration;
this.jndiManager = configuration.getJndiManager();
try {
this.connection = createConnection(this.jndiManager);
this.session = createSession(this.connection);
this.destination = createDestination(this.jndiManager);
this.messageProducer = createMessageProducer(this.session, this.destination);
this.connection.start();
} catch (NamingException | JMSException e) {
this.reconnector = createReconnector();
this.reconnector.start();
}
}
private boolean closeConnection() {
if (connection == null) {
return true;
}
final Connection temp = connection;
connection = null;
try {
temp.close();
return true;
} catch (final JMSException e) {
StatusLogger.getLogger().debug(
"Caught exception closing JMS Connection: {} ({}); continuing JMS manager shutdown",
e.getLocalizedMessage(), temp, e);
return false;
}
}
private boolean closeJndiManager() {
if (jndiManager == null) {
return true;
}
final JndiManager tmp = jndiManager;
jndiManager = null;
tmp.close();
return true;
}
private boolean closeMessageProducer() {
if (messageProducer == null) {
return true;
}
final MessageProducer temp = messageProducer;
messageProducer = null;
try {
temp.close();
return true;
} catch (final JMSException e) {
StatusLogger.getLogger().debug(
"Caught exception closing JMS MessageProducer: {} ({}); continuing JMS manager shutdown",
e.getLocalizedMessage(), temp, e);
return false;
}
}
private boolean closeSession() {
if (session == null) {
return true;
}
final Session temp = session;
session = null;
try {
temp.close();
return true;
} catch (final JMSException e) {
StatusLogger.getLogger().debug(
"Caught exception closing JMS Session: {} ({}); continuing JMS manager shutdown",
e.getLocalizedMessage(), temp, e);
return false;
}
}
private Connection createConnection(final JndiManager jndiManager) throws NamingException, JMSException {
final ConnectionFactory connectionFactory = jndiManager.lookup(configuration.getConnectionFactoryName());
if (configuration.getUserName() != null && configuration.getPassword() != null) {
return connectionFactory.createConnection(configuration.getUserName(),
configuration.getPassword() == null ? null : String.valueOf(configuration.getPassword()));
}
return connectionFactory.createConnection();
}
private Destination createDestination(final JndiManager jndiManager) throws NamingException {
return jndiManager.lookup(configuration.getDestinationName());
}
/**
* Creates a TextMessage, MapMessage, or ObjectMessage from a Serializable object.
* <p>
* For instance, when using a text-based {@link org.apache.logging.log4j.core.Layout} such as
* {@link org.apache.logging.log4j.core.layout.PatternLayout}, the {@link org.apache.logging.log4j.core.LogEvent}
* message will be serialized to a String.
* </p>
* <p>
* When using a layout such as {@link org.apache.logging.log4j.core.layout.SerializedLayout}, the LogEvent message
* will be serialized as a Java object.
* </p>
* <p>
* When using a layout such as {@link org.apache.logging.log4j.core.layout.MessageLayout} and the LogEvent message
* is a Log4j MapMessage, the message will be serialized as a JMS MapMessage.
* </p>
*
* @param object
* The LogEvent or String message to wrap.
* @return A new JMS message containing the provided object.
* @throws JMSException
*/
public Message createMessage(final Serializable object) throws JMSException {
if (object instanceof String) {
return this.session.createTextMessage((String) object);
} else if (object instanceof org.apache.logging.log4j.message.MapMessage) {
return map((org.apache.logging.log4j.message.MapMessage<?, ?>) object, this.session.createMapMessage());
}
return this.session.createObjectMessage(object);
}
private void createMessageAndSend(final LogEvent event, final Serializable serializable) throws JMSException {
final Message message = createMessage(serializable);
message.setJMSTimestamp(event.getTimeMillis());
messageProducer.send(message);
}
/**
* Creates a MessageConsumer on this Destination using the current Session.
*
* @return A MessageConsumer on this Destination.
* @throws JMSException
*/
public MessageConsumer createMessageConsumer() throws JMSException {
return this.session.createConsumer(this.destination);
}
/**
* Creates a MessageProducer on this Destination using the current Session.
*
* @param session
* The JMS Session to use to create the MessageProducer
* @param destination
* The JMS Destination for the MessageProducer
* @return A MessageProducer on this Destination.
* @throws JMSException
*/
public MessageProducer createMessageProducer(final Session session, final Destination destination)
throws JMSException {
return session.createProducer(destination);
}
private Reconnector createReconnector() {
final Reconnector recon = new Reconnector(this);
recon.setDaemon(true);
recon.setPriority(Thread.MIN_PRIORITY);
return recon;
}
private Session createSession(final Connection connection) throws JMSException {
return connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
}
public JmsManagerConfiguration getJmsManagerConfiguration() {
return configuration;
}
JndiManager getJndiManager() {
return configuration.getJndiManager();
}
<T> T lookup(final String destinationName) throws NamingException {
return this.jndiManager.lookup(destinationName);
}
private MapMessage map(final org.apache.logging.log4j.message.MapMessage<?, ?> log4jMapMessage,
final MapMessage jmsMapMessage) {
// Map without calling rg.apache.logging.log4j.message.MapMessage#getData() which makes a copy of the map.
log4jMapMessage.forEach(new BiConsumer<String, Object>() {
@Override
public void accept(final String key, final Object value) {
try {
jmsMapMessage.setObject(key, value);
} catch (final JMSException e) {
throw new IllegalArgumentException(String.format("%s mapping key '%s' to value '%s': %s",
e.getClass(), key, value, e.getLocalizedMessage()), e);
}
}
});
return jmsMapMessage;
}
@Override
protected boolean releaseSub(final long timeout, final TimeUnit timeUnit) {
if (reconnector != null) {
reconnector.shutdown();
reconnector.interrupt();
reconnector = null;
}
boolean closed = false;
closed &= closeJndiManager();
closed &= closeMessageProducer();
closed &= closeSession();
closed &= closeConnection();
return closed && this.jndiManager.stop(timeout, timeUnit);
}
void send(final LogEvent event, final Serializable serializable) {
if (messageProducer == null) {
if (reconnector != null && !configuration.isImmediateFail()) {
reconnector.latch();
}
if (messageProducer == null) {
throw new AppenderLoggingException(
"Error sending to JMS Manager '" + getName() + "': JMS message producer not available");
}
}
synchronized (this) {
try {
createMessageAndSend(event, serializable);
} catch (final JMSException causeEx) {
if (configuration.isRetry() && reconnector == null) {
reconnector = createReconnector();
try {
closeJndiManager();
reconnector.reconnect();
} catch (NamingException | JMSException reconnEx) {
LOGGER.debug("Cannot reestablish JMS connection to {}: {}; starting reconnector thread {}",
configuration, reconnEx.getLocalizedMessage(), reconnector.getName(), reconnEx);
reconnector.start();
throw new AppenderLoggingException(
String.format("Error sending to %s for %s", getName(), configuration), causeEx);
}
try {
createMessageAndSend(event, serializable);
} catch (final JMSException e) {
throw new AppenderLoggingException(
String.format("Error sending to %s after reestablishing connection for %s", getName(),
configuration),
causeEx);
}
}
}
}
}
}
| codescale/logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/mom/JmsManager.java | Java | apache-2.0 | 19,167 |
package org.gwtbootstrap3.extras.datetimepicker.client;
/*
* #%L
* GwtBootstrap3
* %%
* Copyright (C) 2013 - 2016 GwtBootstrap3
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import com.google.gwt.core.client.GWT;
import com.google.gwt.resources.client.ClientBundle;
import com.google.gwt.resources.client.TextResource;
/**
* @author Sven Jacobs
*/
public interface DateTimePickerClientBundle extends ClientBundle {
static final DateTimePickerClientBundle INSTANCE = GWT.create(DateTimePickerClientBundle.class);
static final String VERSION = "2.4.4";
static final String I18N_DIR = "resource/js/locales-" + VERSION + "/";
@Source("resource/js/bootstrap-datetimepicker-" + VERSION + ".min.cache.js")
TextResource dateTimePicker();
@Source(I18N_DIR + "bootstrap-datetimepicker.ar.js")
TextResource ar();
@Source(I18N_DIR + "bootstrap-datetimepicker.az.js")
TextResource az();
@Source(I18N_DIR + "bootstrap-datetimepicker.bg.js")
TextResource bg();
@Source(I18N_DIR + "bootstrap-datetimepicker.bn.js")
TextResource bn();
@Source(I18N_DIR + "bootstrap-datetimepicker.ca.js")
TextResource ca();
@Source(I18N_DIR + "bootstrap-datetimepicker.cs.js")
TextResource cs();
@Source(I18N_DIR + "bootstrap-datetimepicker.da.js")
TextResource da();
@Source(I18N_DIR + "bootstrap-datetimepicker.de.js")
TextResource de();
@Source(I18N_DIR + "bootstrap-datetimepicker.ee.js")
TextResource ee();
@Source(I18N_DIR + "bootstrap-datetimepicker.el.js")
TextResource el();
@Source(I18N_DIR + "bootstrap-datetimepicker.es.js")
TextResource es();
@Source(I18N_DIR + "bootstrap-datetimepicker.fi.js")
TextResource fi();
@Source(I18N_DIR + "bootstrap-datetimepicker.fr.js")
TextResource fr();
@Source(I18N_DIR + "bootstrap-datetimepicker.he.js")
TextResource he();
@Source(I18N_DIR + "bootstrap-datetimepicker.hr.js")
TextResource hr();
@Source(I18N_DIR + "bootstrap-datetimepicker.hu.js")
TextResource hu();
@Source(I18N_DIR + "bootstrap-datetimepicker.id.js")
TextResource id();
@Source(I18N_DIR + "bootstrap-datetimepicker.is.js")
TextResource is();
@Source(I18N_DIR + "bootstrap-datetimepicker.it.js")
TextResource it();
@Source(I18N_DIR + "bootstrap-datetimepicker.ja.js")
TextResource ja();
@Source(I18N_DIR + "bootstrap-datetimepicker.ka.js")
TextResource ka();
@Source(I18N_DIR + "bootstrap-datetimepicker.ko.js")
TextResource ko();
@Source(I18N_DIR + "bootstrap-datetimepicker.lt.js")
TextResource lt();
@Source(I18N_DIR + "bootstrap-datetimepicker.lv.js")
TextResource lv();
@Source(I18N_DIR + "bootstrap-datetimepicker.ms.js")
TextResource ms();
@Source(I18N_DIR + "bootstrap-datetimepicker.nb.js")
TextResource nb();
@Source(I18N_DIR + "bootstrap-datetimepicker.nl.js")
TextResource nl();
@Source(I18N_DIR + "bootstrap-datetimepicker.no.js")
TextResource no();
@Source(I18N_DIR + "bootstrap-datetimepicker.pl.js")
TextResource pl();
@Source(I18N_DIR + "bootstrap-datetimepicker.pt-BR.js")
TextResource pt_BR();
@Source(I18N_DIR + "bootstrap-datetimepicker.pt.js")
TextResource pt();
@Source(I18N_DIR + "bootstrap-datetimepicker.ro.js")
TextResource ro();
@Source(I18N_DIR + "bootstrap-datetimepicker.rs-latin.js")
TextResource rs_latin();
@Source(I18N_DIR + "bootstrap-datetimepicker.rs.js")
TextResource rs();
@Source(I18N_DIR + "bootstrap-datetimepicker.ru.js")
TextResource ru();
@Source(I18N_DIR + "bootstrap-datetimepicker.sk.js")
TextResource sk();
@Source(I18N_DIR + "bootstrap-datetimepicker.sl.js")
TextResource sl();
@Source(I18N_DIR + "bootstrap-datetimepicker.sv.js")
TextResource sv();
@Source(I18N_DIR + "bootstrap-datetimepicker.sw.js")
TextResource sw();
@Source(I18N_DIR + "bootstrap-datetimepicker.th.js")
TextResource th();
@Source(I18N_DIR + "bootstrap-datetimepicker.tr.js")
TextResource tr();
@Source(I18N_DIR + "bootstrap-datetimepicker.ua.js")
TextResource ua();
@Source(I18N_DIR + "bootstrap-datetimepicker.uk.js")
TextResource uk();
@Source(I18N_DIR + "bootstrap-datetimepicker.zh-CN.js")
TextResource zh_CN();
@Source(I18N_DIR + "bootstrap-datetimepicker.zh-TW.js")
TextResource zh_TW();
}
| gwtbootstrap3/gwtbootstrap3-extras | src/main/java/org/gwtbootstrap3/extras/datetimepicker/client/DateTimePickerClientBundle.java | Java | apache-2.0 | 4,968 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sun.jini.test.spec.config.abstractconfiguration;
import java.util.logging.Level;
import com.sun.jini.qa.harness.QATest;
import com.sun.jini.qa.harness.QAConfig;
import com.sun.jini.qa.harness.TestException;
import com.sun.jini.qa.harness.QAConfig;
import java.util.logging.Logger;
import java.util.logging.Level;
import java.util.Random;
import java.util.Arrays;
/**
* <pre>
* Common parts for Valid*_Test.
* </pre>
*/
public abstract class ValidCheck_Test extends QATest {
/**
* Keyword list, BooleanLiteral and NullLiteral from the Java spec.
*/
final static String [] keywords = {
"abstract", "default", "if", "private", "this",
"boolean", "do", "implements", "protected", "throw",
"break", "double", "import", "public", "throws",
"byte", "else", "instanceof", "return", "transient",
"case", "extends", "int", "short", "try",
"catch", "final", "interface", "static", "void",
"char", "finally", "long", "strictfp", "volatile",
"class", "float", "native", "super", "while",
"const", "for", "new", "switch",
"continue", "goto", "package", "synchronized",
"true", "false", "null"
};
/**
* Random generator for chars.
*/
final static Random random = new Random( 0xc76098a6 );
/**
* Random char
*/
protected char nextRandomChar() {
return (char)(random.nextInt(0x10000));
}
/**
* Random Java Identifier Start char
*/
protected char nextJavaIdentifierStartChar() {
char nextChar = nextRandomChar();
while (!(Character.isJavaIdentifierStart(nextChar))) {
nextChar = nextRandomChar();
};
return nextChar;
}
/**
* Random Java Identifier Part char
*/
protected char nextJavaIdentifierPartChar() {
char nextChar = nextRandomChar();
while (!(Character.isJavaIdentifierPart(nextChar))) {
nextChar = nextRandomChar();
};
return nextChar;
}
/**
* Random non Java Identifier Part char
*/
protected char nextNonJavaIdentifierPartChar() {
char nextChar = nextRandomChar();
while (Character.isJavaIdentifierPart(nextChar)) {
nextChar = nextRandomChar();
};
return nextChar;
}
/**
* Random valid Identifier
*/
protected String nextRandomId(int idLength) {
String result = "" + nextJavaIdentifierStartChar();
for (int i = 0; i < idLength; ++i) {
result += nextJavaIdentifierPartChar();
}
if (Arrays.binarySearch(keywords, result) >= 0) {
result = nextRandomId(idLength);
}
return result;
}
}
| cdegroot/river | qa/src/com/sun/jini/test/spec/config/abstractconfiguration/ValidCheck_Test.java | Java | apache-2.0 | 3,558 |
window.onresize=function(){
setFrame();
}
$(document).ready(function(){
setFrame()
});
function setFrame(){
var totalHeight = document.documentElement.clientHeight
|| window.innerHeight || docuemnt.body.clientHeight;
$('#container').css('height', totalHeight);
var headerHeight = $('#header').height();
var mainHeight = totalHeight - headerHeight;
$('#sidebar').css('height', mainHeight);
$('#content').css('height', mainHeight);
}
$(document).on({
click:function(){
if( $(this).parent().parent().children('li.input-row').length != 1 ){
$(this).parent().remove();
}
}
},'span.remove-input-row');
$(document).on({
click:function(){
var ulElem = $(this).prev('ul.input-table');
var liElem = $('#reaction-input-row').tmpl({});
ulElem.append( liElem );
}
},'button.new-input-row');
$(document).on({
click:function(){
var ulElem = $(this).prev('ul.input-table');
var liElem = $('#parent-input-row').tmpl({});
ulElem.append( liElem );
}
},'button.new-parent-input-row');
$(document).on({
click:function(){
var insertElems=$('#reaction-equation-area').tmpl({});
insertElems.insertBefore( $(this) );
}
},'button#add');
$(document).on({
click:function(){
if( $('div.reaction-equation-area').length != 1 ){
$(this).parent().remove();
}
}
},'div.reaction-equation-area button.remove-reaction-equation');
$(document).on({
click:function(){
window.location = '/home/project';
}
},'div#sidebar span#back');
$(document).on({
click:function(){
$('button.remove-reaction-equation').trigger('click');
$('span.remove-input-row').trigger('click');
$('input').val('');
}
},'div#sidebar span#reset');
function get_materials(){
var material_inputs = $('#parent-material-area ul').find('li.input-row');
var result_list = [];
for(var i = 0; i < material_inputs.length; i++){
var name = $(material_inputs[i]).find('input.material').val();
var amount = $(material_inputs[i]).find('input.amount').val();
result_list.push([name, amount]);
}
return result_list
}
function get_reactants(reactant_area){
var reactant_inputs = $(reactant_area).find('input.material');
result_list = [];
for (var i = 0; i < reactant_inputs.length; i++){
result_list.push($(reactant_inputs).val());
}
return result_list;
}
function get_products(reactant_area){
var reactant_inputs = $(reactant_area).find('input.material');
result_list = [];
for (var i = 0; i < reactant_inputs.length; i++){
result_list.push($(reactant_inputs).val());
}
return result_list;
}
function get_reactions(){
var reaction_inputs = $('div.reaction-equation-area'),
result_list = [];
for (var i = 0; i < reaction_inputs.length; i++){
var reactant_list = get_reactants($(reaction_inputs[i]).find('ul.reactant')),
product_list = get_products($(reaction_inputs[i]).find('ul.resultant')),
rate = $(reaction_inputs[i]).find('input#rate').val();
result_list.push({
'reactants' : reactant_list,
'products' : product_list,
'k': rate
});
}
return result_list;
}
$(document).on({
click:function(){
var matetial_list = get_materials(),
reaction_list = get_reactions();
postData = {
"reactions" : reaction_list,
'martials' : matetial_list,
'reaction_time' : 100
}
$.ajax({
url:'/home/simulate',
type:'POST',
contentType: 'application/json; charset=utf-8',
processData: false,
data:JSON.stringify(postData),
dataType:'JSON',
success:function(result){
drawResult(result);
}
});
}
},'button#run');
function getElement(tag,inner){
var element = document.createElement(tag); // create a element
element.innerHTML = inner; // set element's inner html
for(var i = 2; i<arguments.length-1; i=i+2){
element.setAttribute(arguments[i], arguments[i+1]);
}
return element;
}
function drawResult(result){
'use strict';
var list = new Array();
var path = new Array();
var svg;
var circle = new Array();
var tips;
var ddata;
var tag1;
var tag2;
var type;
var typedate = new Array();
var xAxis;
var yAxis;
var x;
var y;
var c;
var colors = new Array('Blue', 'BlueViolet', 'DeepSkyBlue', 'ForestGreen');
var data = result;
ddata = data.concat();
// 定义circle的半径
var r0 = 3,
r1 = 5;
// 定义动画持续时间
var duration = 500;
var margin = {top: 20, right: 20, bottom: 30, left: 50},
width = document.body.clientWidth - margin.left - margin.right-120-250,
height = 500 - margin.top - margin.bottom -20;
d3.select('.canvs').remove();
d3.select('.list').remove();
d3.select('div#header button').remove();
var container = d3.select('div#content')
.append('svg')
.attr('class', 'canvs')
.attr('width', width + margin.left + margin.right+120)
.attr('height', height + margin.top + margin.bottom+20);
typedate = new Array();
var ts = d3.max(data, function(d) { return d.order; });
for(var i = 0; i <= ts; i++){
typedate[i] = new Array();
list.push(1);
}
svg = container.append('g')
.attr('class', 'content')
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');
data.forEach(function(d) {
d.dayText = d.date;
d.pv = d.pv;
typedate[d.order].push(d);
});
var div = d3.select('div#header').append('div').attr('class', 'list');
for (var i = 0; i < typedate.length; i++){
d3.select('.list')
.append('input')
.attr('class', 'ckbox')
.attr('type', 'checkbox')
.attr('value', i)
.attr('name', 'ck')
.attr('checked', 'checked');
d3.select('.list')
.append('font')
.attr('class', 'ftext')
.attr('color', colors[i])
.text(typedate[i][0].name);
}
show();
function show() {
list = new Array();
ddata.forEach(function(d){
list[d.order] = 1;
});
function draw() {
d3.select('.content').remove();
svg = container.append('g')
.attr('class', 'content')
.attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');
x = d3.scale.linear().range([0, width]);
y = d3.scale.linear().range([height, 0]);
xAxis = d3.svg.axis()
.scale(x)
.orient('bottom')
.ticks(30);
yAxis = d3.svg.axis()
.scale(y)
.orient('left')
.ticks(10);
//x.domain(d3.extent(ddata, function(d) { return d.date;}));
x.domain([d3.min(ddata, function(d) { return d.date; }),
d3.max(ddata, function(d) { return d.date; })]);
y.domain([d3.min(ddata, function(d) { return d.pv; }),
d3.max(ddata, function(d) { return d.pv; })]);
svg.append('text')
.attr('class', 'title')
.text('Simulation Result')
.attr('x', width/2)
.attr('y', 0);
svg.append('g')
.attr('class', 'x axis')
.attr('transform', 'translate(0,' + height + ')')
.call(xAxis)
.append('text')
.text('seconds')
.attr('transform', 'translate(' + (width - 20) + ', 0)');
svg.append('g')
.attr('class', 'y axis')
.call(yAxis)
.append('text')
.text('Concentration');
var line = d3.svg.line()
.x(function(d) { return x(d.date); })
.y(function(d) { return y(d.pv); })
.interpolate('monotone');
var ps = -1;
var tag = false;
typedate = new Array();
var ts = d3.max(data, function(d) { return d.order; });
for(var i = 0; i <= ts; i++){
typedate[i] = new Array();
list.push(1);
}
ddata.forEach(function(d) {
d.dayText = d.date;
d.pv = d.pv;
typedate[d.order].push(d);
});
for (var i=0; i< typedate.length;i++){
if (list[i]==0) {
continue;
}
ps++;
path[ps] = svg.append('path')
.attr('class', 'line')
.attr('stroke', colors[ps])
.attr('fill', 'none')
.attr('stroke-width', '2px')
.attr('d', line(typedate[i]));
}
var cs = -1;
for (var i=0; i< typedate.length;i++){
if (list[i]==0){
continue;
}
cs++;
circle[cs] = new Array();
circle[cs] = svg.selectAll('cirlce')
.data(typedate[i])
.enter()
.append('g')
.append('circle')
.attr('class', 'linecircle')
.attr('cx', line.x())
.attr('cy', line.y())
.attr('r', r0)
.attr('fill', 'green')
.attr('order', function(d){return d.order;})
.attr('date', function(d){return d.date;})
.attr('py', function(d){return d.py;})
.attr('clock', false)
.on('mouseover', function() {
c = d3.select(this).attr('fill');
if (d3.select(this).attr('clock') == 'true'){
c = '#FF0000';
}
d3.select(this).transition().duration(duration).attr('r', r1*2)
.attr('fill', 'steelblue');
})
.on('mouseout', function() {
if (d3.select(this).attr('clock') == 'false'){
d3.select(this).transition().duration(duration).attr('r', r0);
d3.select(this).attr('fill', c);
}
})
.on('click', function(d, i) {
if (tag==true && d.order == type && tag1 != d.date){
d3.select(this).attr('fill', '#FF0000');
tag = false;
for(var i = 0; i < list.length; i++){
if (i != type) list[i] = 0;
}
tag2 = d.date;
if (tag2 < tag1){
var temp = tag2;
tag2 = tag1;
tag1 = temp;
}
ddata = new Array();
data.forEach(function(d){
if (d.order == type && d.date >= tag1 && d.date <= tag2){
ddata.push(d);
}
});
for (var i = 0; i < path.length; i++){
path[i].remove();
}
for (var i = 0; i < circle.length; i++){
circle[i].remove();
}
show();
}
if (tag == false){
d3.select(this).attr('fill', '#FF0000');
d3.select(this).attr('clock', true);
tag = true;
type = d3.select(this).attr('order');
tag1 = d3.select(this).attr('date');
}
});
}
var tips = svg.append('g').attr('class', 'tips');
tips.append('rect')
.attr('class', 'tips-border')
.attr('width', 200)
.attr('height', 50)
.attr('rx', 10)
.attr('ry', 10);
var wording1 = tips.append('text')
.attr('class', 'tips-text')
.attr('x', 10)
.attr('y', 20)
.text('');
var wording2 = tips.append('text')
.attr('class', 'tips-text')
.attr('x', 10)
.attr('y', 40)
.text('');
container
.on('mousemove', function() {
var m = d3.mouse(this),
cx = m[0] - margin.left,
cy = m[1] - margin.top;
showWording(cx,cy);
d3.select('.tips').style('display', 'block');
})
.on('mouseout', function() {
d3.select('.tips').style('display', 'none');
});
function showWording(cx,cy) {
var min;
var d;
var xlen = d3.extent(ddata, function(d) { return d.date;});
var ylen = d3.extent(ddata, function(d) { return d.pv;});
for (var i = 0; i < ddata.length; i++){
var xp = width / (xlen[1]-xlen[0]) * (ddata[i].date-xlen[0])-cx;
var yp = height / (ylen[1]-ylen[0]) * (ylen[1] - ddata[i].pv)-cy;
if (xp < 0) xp = -xp;
if (yp < 0) yp = -yp;
if (i == 0){
d = ddata[i];
min = xp + yp;
}else{
if (xp + yp < min){
min = xp+yp;
d = ddata[i];
}
}
}
function formatWording(d) {
return 'seconds:' + (d.date) + 's';
}
wording1.text(formatWording(d));
wording2.text('Concentration:' + d.pv);
var x1 = x(d.date),
y1 = y(d.pv);
// 处理超出边界的情况
var dx = x1 > width ? x1 - width + 2 : x1 + 2 > width ? 2 : 0;
var dy = y1 > height ? y1 - height + 2 : y1 + 2 > height ? 2 : 0;
x1 -= dx;
y1 -= dy;
d3.select('.tips')
.attr('transform', 'translate(' + x1 + ',' + y1 + ')');
}
}
draw();
}
d3.select("div#header")
.append("button")
.text("Show all")
.on('click', function(){
for(var i = 0; i < typedate.length; i++)
list[i] = 1;
ddata = new Array();
data.forEach(function(d){
ddata.push(d);
});
for (var i = 0; i < path.length; i++){
path[i].remove();
}
for (var i = 0; i < circle.length; i++){
circle[i].remove();
}
show();
});
function doit(){
var sum = 0;
var a = document.getElementsByName("ck");
ddata = new Array();
for(var i=0;i<a.length;i++){
if(!a[i].checked){
list[i]= 0;
}else{
list[i] = 1;
data.forEach(function(d){
if (d.order == i){
ddata.push(d);
}
});
}
}
for (var i = 0; i < path.length; i++){
path[i].remove();
}
for (var i = 0; i < circle.length; i++){
circle[i].remove();
}
path = new Array();
circle = new Array();
show();
}
$(document).on({
click:function(){
doit();
}
}, '.ckbox');
window.onload = function(){
var a = document.getElementsByName("ck");
for(var i=0;i<a.length;i++){
a[i].onclick = doit;
}
}
}
| igemsoftware/HFUT-China_2015 | static/js/simulation.js | JavaScript | apache-2.0 | 14,759 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var App = require('app');
App.MainAdminController = Em.Controller.extend({
name: 'mainAdminController',
category: 'user'
}); | telefonicaid/fiware-cosmos-ambari | ambari-web/app/controllers/main/admin.js | JavaScript | apache-2.0 | 937 |
package org.wildfly.swarm.weld;
import org.wildfly.swarm.container.Fraction;
/**
* @author Bob McWhirter
*/
public class WeldFraction implements Fraction {
public WeldFraction() {
}
}
| bbrowning/wildfly-swarm | weld/api/src/main/java/org/wildfly/swarm/weld/WeldFraction.java | Java | apache-2.0 | 198 |
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package com.android.systemui.classifier;
public class AnglesPercentageEvaluator {
public static float evaluate(float value) {
float evaluation = 0.0f;
if (value < 1.00) evaluation++;
if (value < 0.90) evaluation++;
if (value < 0.70) evaluation++;
return evaluation;
}
}
| xorware/android_frameworks_base | packages/SystemUI/src/com/android/systemui/classifier/AnglesPercentageEvaluator.java | Java | apache-2.0 | 939 |
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
namespace Google\Service\ShoppingContent;
class OrderreturnsProcessRequest extends \Google\Collection
{
protected $collection_key = 'returnItems';
/**
* @var bool
*/
public $fullChargeReturnShippingCost;
/**
* @var string
*/
public $operationId;
protected $refundShippingFeeType = OrderreturnsRefundOperation::class;
protected $refundShippingFeeDataType = '';
protected $returnItemsType = OrderreturnsReturnItem::class;
protected $returnItemsDataType = 'array';
/**
* @param bool
*/
public function setFullChargeReturnShippingCost($fullChargeReturnShippingCost)
{
$this->fullChargeReturnShippingCost = $fullChargeReturnShippingCost;
}
/**
* @return bool
*/
public function getFullChargeReturnShippingCost()
{
return $this->fullChargeReturnShippingCost;
}
/**
* @param string
*/
public function setOperationId($operationId)
{
$this->operationId = $operationId;
}
/**
* @return string
*/
public function getOperationId()
{
return $this->operationId;
}
/**
* @param OrderreturnsRefundOperation
*/
public function setRefundShippingFee(OrderreturnsRefundOperation $refundShippingFee)
{
$this->refundShippingFee = $refundShippingFee;
}
/**
* @return OrderreturnsRefundOperation
*/
public function getRefundShippingFee()
{
return $this->refundShippingFee;
}
/**
* @param OrderreturnsReturnItem[]
*/
public function setReturnItems($returnItems)
{
$this->returnItems = $returnItems;
}
/**
* @return OrderreturnsReturnItem[]
*/
public function getReturnItems()
{
return $this->returnItems;
}
}
// Adding a class alias for backwards compatibility with the previous class name.
class_alias(OrderreturnsProcessRequest::class, 'Google_Service_ShoppingContent_OrderreturnsProcessRequest');
| googleapis/google-api-php-client-services | src/ShoppingContent/OrderreturnsProcessRequest.php | PHP | apache-2.0 | 2,450 |
// Copyright 2014 The Sporting Exchange Limited. All rights reserved.
// Use of this source code is governed by a free license that can be
// found in the LICENSE file.
package aggregator
import (
"log"
"reflect"
"strings"
"testing"
"time"
"opentsp.org/cmd/collect-statse/statse"
"opentsp.org/internal/tsdb"
)
func point(time time.Time, value interface{}, series string) *tsdb.Point {
id := strings.Fields(strings.Replace(series, "=", " ", -1))
point, err := tsdb.NewPoint(time, value, id[0], id[1:]...)
if err != nil {
log.Panicf("point: %v, time=%v value=%v series=%q id=%q", err, time, value, series, id)
}
return point
}
var testSnapshot = []struct {
in []*statse.Event
out []*tsdb.Point
}{
{ // 0 events
in: []*statse.Event{},
out: []*tsdb.Point(nil),
},
{ // 1 event
in: []*statse.Event{
{
Metric: "foo",
Tags: "host=a",
Statistics: []statse.Statistic{
{Key: statse.Time, Value: 1},
},
},
},
out: []*tsdb.Point{
// Per-host.
point(time.Unix(0, 0), uint64(1), "foo.byhost.count error=false host=a"),
point(time.Unix(0, 0), uint64(0), "foo.byhost.count error=true host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.min host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.avg host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.p95 host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.p99 host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.max host=a"),
// Cluster-wide.
point(time.Unix(0, 0), uint64(1), "foo.count error=false host=NA"),
point(time.Unix(0, 0), uint64(0), "foo.count error=true host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.min host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.avg host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.p95 host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.p99 host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.max host=NA"),
},
},
{ // 2 events, 1 host
in: []*statse.Event{
{
Metric: "foo",
Tags: "host=a",
Statistics: []statse.Statistic{
{Key: statse.Time, Value: 1},
},
},
{
Metric: "foo",
Tags: "host=a",
Statistics: []statse.Statistic{
{Key: statse.Time, Value: 2},
},
},
},
out: []*tsdb.Point{
// Per-host.
point(time.Unix(0, 0), uint64(2), "foo.byhost.count error=false host=a"),
point(time.Unix(0, 0), uint64(0), "foo.byhost.count error=true host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.min host=a"),
point(time.Unix(0, 0), float32(1.5), "foo.byhost.time.avg host=a"),
point(time.Unix(0, 0), float32(2), "foo.byhost.time.p95 host=a"),
point(time.Unix(0, 0), float32(2), "foo.byhost.time.p99 host=a"),
point(time.Unix(0, 0), float32(2), "foo.byhost.time.max host=a"),
// Cluster-wide.
point(time.Unix(0, 0), uint64(2), "foo.count error=false host=NA"),
point(time.Unix(0, 0), uint64(0), "foo.count error=true host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.min host=NA"),
point(time.Unix(0, 0), float32(1.5), "foo.time.avg host=NA"),
point(time.Unix(0, 0), float32(2), "foo.time.p95 host=NA"),
point(time.Unix(0, 0), float32(2), "foo.time.p99 host=NA"),
point(time.Unix(0, 0), float32(2), "foo.time.max host=NA"),
},
},
/*
BUG: depends on map iteration order.
{ // 2 events, 2 hosts
in: []*statse.Event{
{
Metric: "foo",
Tags: "host=a",
Statistics: []statse.Statistic{
{Key: statse.Time, Value: 1},
},
},
{
Metric: "foo",
Tags: "host=b",
Statistics: []statse.Statistic{
{Key: statse.Time, Value: 2},
},
},
},
out: []*tsdb.Point{
// host=a
point(time.Unix(0, 0), uint64(1), "foo.byhost.count error=false host=a"),
point(time.Unix(0, 0), uint64(0), "foo.byhost.count error=true host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.min host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.avg host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.p95 host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.p99 host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.max host=a"),
// host=b
point(time.Unix(0, 0), uint64(1), "foo.byhost.count error=false host=b"),
point(time.Unix(0, 0), uint64(0), "foo.byhost.count error=true host=b"),
point(time.Unix(0, 0), float32(2), "foo.byhost.time.min host=b"),
point(time.Unix(0, 0), float32(2), "foo.byhost.time.avg host=b"),
point(time.Unix(0, 0), float32(2), "foo.byhost.time.p95 host=b"),
point(time.Unix(0, 0), float32(2), "foo.byhost.time.p99 host=b"),
point(time.Unix(0, 0), float32(2), "foo.byhost.time.max host=b"),
// Cluster-wide.
point(time.Unix(0, 0), uint64(2), "foo.count error=false host=NA"),
point(time.Unix(0, 0), uint64(0), "foo.count error=true host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.min host=NA"),
point(time.Unix(0, 0), float32(1.5), "foo.time.avg host=NA"),
point(time.Unix(0, 0), float32(2), "foo.time.p95 host=NA"),
point(time.Unix(0, 0), float32(2), "foo.time.p99 host=NA"),
point(time.Unix(0, 0), float32(2), "foo.time.max host=NA"),
},
},
*/
{ // 1 event, all statistic types
in: []*statse.Event{
{
Metric: "foo",
Tags: "host=a",
Statistics: []statse.Statistic{
{Key: statse.Time, Value: 1},
{Key: statse.TTFB, Value: 2},
{Key: statse.Size, Value: 3},
},
},
},
out: []*tsdb.Point{
// byhost count
point(time.Unix(0, 0), uint64(1), "foo.byhost.count error=false host=a"),
point(time.Unix(0, 0), uint64(0), "foo.byhost.count error=true host=a"),
// byhost time
point(time.Unix(0, 0), float32(1), "foo.byhost.time.min host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.avg host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.p95 host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.p99 host=a"),
point(time.Unix(0, 0), float32(1), "foo.byhost.time.max host=a"),
// byhost ttfb
point(time.Unix(0, 0), float32(2), "foo.byhost.ttfb.min host=a"),
point(time.Unix(0, 0), float32(2), "foo.byhost.ttfb.avg host=a"),
point(time.Unix(0, 0), float32(2), "foo.byhost.ttfb.p95 host=a"),
point(time.Unix(0, 0), float32(2), "foo.byhost.ttfb.p99 host=a"),
point(time.Unix(0, 0), float32(2), "foo.byhost.ttfb.max host=a"),
// byhost size
point(time.Unix(0, 0), float32(3), "foo.byhost.size.min host=a"),
point(time.Unix(0, 0), float32(3), "foo.byhost.size.avg host=a"),
point(time.Unix(0, 0), float32(3), "foo.byhost.size.p95 host=a"),
point(time.Unix(0, 0), float32(3), "foo.byhost.size.p99 host=a"),
point(time.Unix(0, 0), float32(3), "foo.byhost.size.max host=a"),
// cluster count
point(time.Unix(0, 0), uint64(1), "foo.count error=false host=NA"),
point(time.Unix(0, 0), uint64(0), "foo.count error=true host=NA"),
// cluster time
point(time.Unix(0, 0), float32(1), "foo.time.min host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.avg host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.p95 host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.p99 host=NA"),
point(time.Unix(0, 0), float32(1), "foo.time.max host=NA"),
// cluster ttfb
point(time.Unix(0, 0), float32(2), "foo.ttfb.min host=NA"),
point(time.Unix(0, 0), float32(2), "foo.ttfb.avg host=NA"),
point(time.Unix(0, 0), float32(2), "foo.ttfb.p95 host=NA"),
point(time.Unix(0, 0), float32(2), "foo.ttfb.p99 host=NA"),
point(time.Unix(0, 0), float32(2), "foo.ttfb.max host=NA"),
// cluster size
point(time.Unix(0, 0), float32(3), "foo.size.min host=NA"),
point(time.Unix(0, 0), float32(3), "foo.size.avg host=NA"),
point(time.Unix(0, 0), float32(3), "foo.size.p95 host=NA"),
point(time.Unix(0, 0), float32(3), "foo.size.p99 host=NA"),
point(time.Unix(0, 0), float32(3), "foo.size.max host=NA"),
},
},
{ // an event without statistics
in: []*statse.Event{
{
Metric: "foo",
Tags: "host=a",
Statistics: []statse.Statistic{},
},
},
out: []*tsdb.Point{
// byhost count
point(time.Unix(0, 0), uint64(1), "foo.byhost.count error=false host=a"),
point(time.Unix(0, 0), uint64(0), "foo.byhost.count error=true host=a"),
// cluster count
point(time.Unix(0, 0), uint64(1), "foo.count error=false host=NA"),
point(time.Unix(0, 0), uint64(0), "foo.count error=true host=NA"),
},
},
{ // an error event with statistics
in: []*statse.Event{
{
Metric: "foo",
Tags: "host=a",
Error: true,
Statistics: []statse.Statistic{
{Key: statse.Time, Value: 1},
{Key: statse.TTFB, Value: 2},
{Key: statse.Size, Value: 3},
},
},
},
out: []*tsdb.Point{
// byhost count
point(time.Unix(0, 0), uint64(0), "foo.byhost.count error=false host=a"),
point(time.Unix(0, 0), uint64(1), "foo.byhost.count error=true host=a"),
// cluster count
point(time.Unix(0, 0), uint64(0), "foo.count error=false host=NA"),
point(time.Unix(0, 0), uint64(1), "foo.count error=true host=NA"),
},
},
}
func TestSnapshot(t *testing.T) {
for i, tt := range testSnapshot {
store := newStore()
store.Write(tt.in...)
job := snapshotJob{
Time: time.Unix(0, 0),
Store: store,
}
job.do()
if !reflect.DeepEqual(job.Output, tt.out) {
t.Errorf("#%d. invalid snapshot", i)
t.Errorf("in:\n")
for _, event := range tt.in {
t.Errorf(" %v\n", event)
}
t.Errorf("got:\n")
for _, point := range job.Output {
t.Errorf(" %v\n", point)
}
t.Errorf("want:\n")
for _, point := range tt.out {
t.Errorf(" %v\n", point)
}
}
for key, entry := range store.m {
for _, buf := range entry.Buffer {
if len(buf) > 0 {
t.Errorf("events still buffered following a snapshot, key=%+v", key)
}
}
}
}
}
func TestSnapshotP95(t *testing.T) {
store := newStore()
for i := 1; i <= 100; i++ {
store.Write(&statse.Event{
Metric: "foo",
Tags: "host=a",
Statistics: []statse.Statistic{
{Key: statse.Time, Value: float32(i)},
},
})
}
job := snapshotJob{
Time: time.Unix(0, 0),
Store: store,
}
job.do()
for _, point := range job.Output {
if string(point.Metric()) != "foo.time.p95" {
continue
}
if got := point.Value().(float32); got != 96 {
t.Errorf("invalid p95: got %v, want 96, point=%v", got, point)
}
return
}
t.Errorf("foo.time.p95 not found")
}
func TestSnapshotNoop(t *testing.T) {
store := newStore()
// add some
store.Write(&statse.Event{
Metric: "foo",
Tags: "host=a",
Statistics: []statse.Statistic{{Key: statse.Time, Value: 0}},
})
// snapshot
job := snapshotJob{
Time: time.Unix(0, 0),
Store: store,
}
job.do()
// snapshot (noop)
job = snapshotJob{
Time: time.Unix(1, 0),
Store: store,
}
job.do()
// ok - no crash.
}
| The-Cloud-Source/opentsp | cmd/collect-statse/aggregator/snapshot_test.go | GO | apache-2.0 | 11,151 |
package net.stickycode.bootstrap.tck.plugin;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Set;
import javax.inject.Inject;
import org.junit.Before;
import org.junit.Test;
import net.stickycode.bootstrap.StickyBootstrap;
public class PluggableTest {
@Inject
Set<Pluggable> plugged;
@Inject
Set<PluggableContract> abstractInTheMiddle;
@Inject
Set<GenericPluggable<?>> genericPlugged;
@SuppressWarnings("rawtypes")
@Inject
Set<GenericPluggable> genericPluggedNoWildcard;
@Test
public void verify() {
assertThat(plugged).hasSize(2);
assertThat(genericPluggedNoWildcard).hasSize(3);
assertThat(genericPlugged).hasSize(3);
}
@Before
public void setup() {
StickyBootstrap.crank(this, getClass());
}
}
| walterDurin/stickycode | net.stickycode.bootstrap/sticky-bootstrap-tck/src/main/java/net/stickycode/bootstrap/tck/plugin/PluggableTest.java | Java | apache-2.0 | 782 |
class User < ApplicationRecord
include RelationshipMixin
acts_as_miq_taggable
has_secure_password
include CustomAttributeMixin
include ActiveVmAggregationMixin
include TimezoneMixin
include CustomActionsMixin
include ExternalUrlMixin
before_destroy :check_reference, :prepend => true
has_many :miq_approvals, :as => :approver
has_many :miq_approval_stamps, :class_name => "MiqApproval", :foreign_key => :stamper_id
has_many :miq_requests, :foreign_key => :requester_id
has_many :vms, :foreign_key => :evm_owner_id
has_many :miq_templates, :foreign_key => :evm_owner_id
has_many :miq_widgets
has_many :miq_widget_contents, :dependent => :destroy
has_many :miq_widget_sets, :as => :owner, :dependent => :destroy
has_many :miq_reports, :dependent => :nullify
has_many :service_orders, :dependent => :nullify
has_many :owned_shares, :class_name => "Share"
has_many :notification_recipients, :dependent => :delete_all
has_many :notifications, :through => :notification_recipients
has_many :unseen_notification_recipients, -> { unseen }, :class_name => 'NotificationRecipient'
has_many :unseen_notifications, :through => :unseen_notification_recipients, :source => :notification
has_many :authentications, :foreign_key => :evm_owner_id, :dependent => :nullify, :inverse_of => :evm_owner
has_many :sessions, :dependent => :destroy
belongs_to :current_group, :class_name => "MiqGroup"
has_and_belongs_to_many :miq_groups
scope :superadmins, lambda {
joins(:miq_groups => {:miq_user_role => :miq_product_features})
.where(:miq_product_features => {:identifier => MiqProductFeature::SUPER_ADMIN_FEATURE })
}
virtual_has_many :active_vms, :class_name => "VmOrTemplate"
delegate :miq_user_role, :current_tenant, :get_filters, :has_filters?, :get_managed_filters, :get_belongsto_filters,
:to => :current_group, :allow_nil => true
delegate :super_admin_user?, :request_admin_user?, :self_service?, :limited_self_service?, :report_admin_user?, :only_my_user_tasks?,
:to => :miq_user_role, :allow_nil => true
validates :name, :presence => true, :length => {:maximum => 100}
validates :first_name, :length => {:maximum => 100}
validates :last_name, :length => {:maximum => 100}
validates :userid, :presence => true, :unique_within_region => {:match_case => false}, :length => {:maximum => 255}
validates :email, :format => {:with => MoreCoreExtensions::StringFormats::RE_EMAIL,
:allow_nil => true, :message => "must be a valid email address"},
:length => {:maximum => 255}
validates :current_group, :inclusion => {:in => proc { |u| u.miq_groups }, :allow_nil => true, :if => :current_group_id_changed?}
# use authenticate_bcrypt rather than .authenticate to avoid confusion
# with the class method of the same name (User.authenticate)
alias_method :authenticate_bcrypt, :authenticate
serialize :settings, Hash # Implement settings column as a hash
default_value_for(:settings) { Hash.new }
default_value_for :failed_login_attempts, 0
scope :with_same_userid, ->(id) { where(:userid => User.where(:id => id).pluck(:userid)) }
def self.with_roles_excluding(identifier)
where.not(:id => User.unscope(:select).joins(:miq_groups => :miq_product_features)
.where(:miq_product_features => {:identifier => identifier})
.select(:id))
end
def self.scope_by_tenant?
true
end
ACCESSIBLE_STRATEGY_WITHOUT_IDS = {:descendant_ids => :descendants, :ancestor_ids => :ancestors}.freeze
def self.tenant_id_clause(user_or_group)
strategy = Rbac.accessible_tenant_ids_strategy(self)
tenant = user_or_group.try(:current_tenant)
return [] if tenant.root?
accessible_tenants = tenant.send(ACCESSIBLE_STRATEGY_WITHOUT_IDS[strategy])
users_ids = accessible_tenants.collect(&:user_ids).flatten + tenant.user_ids
return if users_ids.empty?
{table_name => {:id => users_ids}}
end
def self.lookup_by_userid(userid)
in_my_region.find_by(:userid => userid)
end
singleton_class.send(:alias_method, :find_by_userid, :lookup_by_userid)
Vmdb::Deprecation.deprecate_methods(self, :find_by_userid => :lookup_by_userid)
def self.lookup_by_userid!(userid)
in_my_region.find_by!(:userid => userid)
end
singleton_class.send(:alias_method, :find_by_userid!, :lookup_by_userid!)
Vmdb::Deprecation.deprecate_methods(singleton_class, :find_by_userid! => :lookup_by_userid!)
def self.lookup_by_email(email)
in_my_region.find_by(:email => email)
end
singleton_class.send(:alias_method, :find_by_email, :lookup_by_email)
Vmdb::Deprecation.deprecate_methods(singleton_class, :find_by_email => :lookup_by_email)
# find a user by lowercase email
# often we have the most probably user object onhand. so use that if possible
def self.lookup_by_lower_email(email, cache = [])
email = email.downcase
Array.wrap(cache).detect { |u| u.lower_email == email } || find_by(:lower_email => email)
end
singleton_class.send(:alias_method, :find_by_lower_email, :lookup_by_lower_email)
Vmdb::Deprecation.deprecate_methods(singleton_class, :find_by_lower_email => :lookup_by_lower_email)
def lower_email
email&.downcase
end
virtual_attribute :lower_email, :string, :arel => ->(t) { t.grouping(t[:email].lower) }
hide_attribute :lower_email
def lower_userid
userid&.downcase
end
virtual_attribute :lower_userid, :string, :arel => ->(t) { t.grouping(t[:userid].lower) }
hide_attribute :lower_userid
virtual_column :ldap_group, :type => :string, :uses => :current_group
# FIXME: amazon_group too?
virtual_column :miq_group_description, :type => :string, :uses => :current_group
virtual_column :miq_user_role_name, :type => :string, :uses => {:current_group => :miq_user_role}
def validate
errors.add(:userid, "'system' is reserved for EVM internal operations") unless (userid =~ /^system$/i).nil?
end
before_validation :nil_email_field_if_blank
before_validation :dummy_password_for_external_auth
before_destroy :destroy_subscribed_widget_sets
def check_reference
present_ref = []
%w[miq_requests vms miq_widgets miq_templates].each do |association|
present_ref << association.classify unless public_send(association).first.nil?
end
unless present_ref.empty?
errors.add(:base, "user '#{userid}' with id [#{id}] has references to other models: #{present_ref.join(" ")}")
throw :abort
end
end
def current_group_by_description=(group_description)
if group_description
desired_group = miq_groups.detect { |g| g.description == group_description }
desired_group ||= MiqGroup.in_region(region_id).find_by(:description => group_description) if super_admin_user?
self.current_group = desired_group if desired_group
end
end
def nil_email_field_if_blank
self.email = nil if email.blank?
end
def dummy_password_for_external_auth
if password.blank? && password_digest.blank? &&
!self.class.authenticator(userid).uses_stored_password?
self.password = "dummy"
end
end
def change_password(oldpwd, newpwd)
auth = self.class.authenticator(userid)
unless auth.uses_stored_password?
raise MiqException::MiqEVMLoginError,
_("password change not allowed when authentication mode is %{name}") % {:name => auth.class.proper_name}
end
if auth.authenticate(userid, oldpwd)
self.password = newpwd
self.save!
end
end
def locked?
::Settings.authentication.max_failed_login_attempts.positive? && failed_login_attempts >= ::Settings.authentication.max_failed_login_attempts
end
def unlock!
update!(:failed_login_attempts => 0)
end
def fail_login!
update!(:failed_login_attempts => failed_login_attempts + 1)
unlock_queue if locked?
end
def ldap_group
current_group.try(:description)
end
alias_method :miq_group_description, :ldap_group
def role_allows?(**options)
Rbac.role_allows?(:user => self, **options)
end
def role_allows_any?(**options)
Rbac.role_allows?(:user => self, :any => true, **options)
end
def miq_user_role_name
miq_user_role.try(:name)
end
def self.authenticator(username = nil)
Authenticator.for(::Settings.authentication.to_hash, username)
end
def self.authenticate(username, password, request = nil, options = {})
user = authenticator(username).authenticate(username, password, request, options)
user.try(:link_to_session, request)
user
end
def link_to_session(request)
return unless request
return unless (session_id = request.session_options[:id])
# dalli 3.1 switched to Abstract::PersistedStore from Abstract::Persisted and the resulting session id
# changed from a string to a SessionID object that can't be coerced in finders. Convert this object to string via
# the private_id method, see: https://github.com/rack/rack/issues/1432#issuecomment-571688819
session_id = session_id.private_id if session_id.respond_to?(:private_id)
sessions << Session.find_or_create_by(:session_id => session_id)
end
def broadcast_revoke_sessions
if Settings.server.session_store == "cache"
MiqQueue.broadcast(
:class_name => self.class.name,
:instance_id => id,
:method_name => :revoke_sessions
)
else
# If using SQL or Memory, the sessions don't need to (or can't) be
# revoked via a broadcast since the session/token stores are not server
# specific, so execute it inline.
revoke_sessions
end
end
def revoke_sessions
current_sessions = Session.where(:user_id => id)
ManageIQ::Session.revoke(current_sessions.map(&:session_id))
current_sessions.destroy_all
TokenStore.token_caches.each do |_, token_store|
token_store.delete_all_for_user(userid)
end
end
def self.authenticate_with_http_basic(username, password, request = nil, options = {})
authenticator(username).authenticate_with_http_basic(username, password, request, options)
end
def self.lookup_by_identity(username)
authenticator(username).lookup_by_identity(username)
end
def self.authorize_user(userid)
return if userid.blank? || admin?(userid)
authenticator(userid).authorize_user(userid)
end
def self.authorize_user_with_system_token(userid, user_metadata = {})
return if userid.blank? || user_metadata.blank? || admin?(userid)
authenticator(userid).authorize_user_with_system_token(userid, user_metadata)
end
def logoff
self.lastlogoff = Time.now.utc
save
AuditEvent.success(:event => "logoff", :message => "User #{userid} has logged off", :userid => userid)
end
def get_expressions(db = nil)
sql = ["((search_type=? and search_key is null) or (search_type=? and search_key is null) or (search_type=? and search_key=?))",
'default', 'global', 'user', userid
]
unless db.nil?
sql[0] += "and db=?"
sql << db.to_s
end
MiqSearch.get_expressions(sql)
end
def with_my_timezone(&block)
with_a_timezone(get_timezone, &block)
end
def get_timezone
settings.fetch_path(:display, :timezone) || self.class.server_timezone
end
def miq_groups=(groups)
super
self.current_group = groups.first if current_group.nil? || !groups.include?(current_group)
end
def change_current_group
user_groups = miq_group_ids
user_groups.delete(current_group_id)
raise _("The user's current group cannot be changed because the user does not belong to any other group") if user_groups.empty?
self.current_group = MiqGroup.find_by(:id => user_groups.first)
save!
end
def admin?
self.class.admin?(userid)
end
def self.admin?(userid)
userid == "admin"
end
def subscribed_widget_sets
MiqWidgetSet.subscribed_for_user(self)
end
def destroy_subscribed_widget_sets
subscribed_widget_sets.destroy_all
end
def accessible_vms
if limited_self_service?
vms
elsif self_service?
(vms + miq_groups.includes(:vms).collect(&:vms).flatten).uniq
else
Vm.all
end
end
def regional_users
self.class.regional_users(self)
end
def self.regional_users(user)
where(:lower_userid => user.userid.downcase)
end
def self.super_admin
in_my_region.find_by_userid("admin")
end
def self.current_tenant
current_user.try(:current_tenant)
end
# Save the current user from the session object as a thread variable to allow lookup from other areas of the code
def self.with_user(user, userid = nil)
saved_user = Thread.current[:user]
saved_userid = Thread.current[:userid]
self.current_user = user
Thread.current[:userid] = userid if userid
yield
ensure
Thread.current[:user] = saved_user
Thread.current[:userid] = saved_userid
end
def self.with_user_group(user, group, &block)
return yield if user.nil?
user = User.find(user) unless user.kind_of?(User)
if group && group.kind_of?(MiqGroup)
user.current_group = group
elsif group != user.current_group_id
group = MiqGroup.find_by(:id => group)
user.current_group = group if group
end
User.with_user(user, &block)
end
def self.current_user=(user)
Thread.current[:userid] = user.try(:userid)
Thread.current[:user] = user
end
# avoid using this. pass current_user where possible
def self.current_userid
Thread.current[:userid]
end
def self.current_user
Thread.current[:user] ||= lookup_by_userid(current_userid)
end
# parallel to MiqGroup.with_groups - only show users with these groups
def self.with_groups(miq_group_ids)
includes(:miq_groups).where(:miq_groups => {:id => miq_group_ids})
end
def self.missing_user_features(db_user)
if !db_user
"User"
elsif !db_user.current_group
"Group"
elsif !db_user.current_group.miq_user_role
"Role"
end
end
def self.metadata_for_system_token(userid)
return unless authenticator(userid).user_authorizable_with_system_token?
user = in_my_region.find_by(:userid => userid)
return if user.blank?
{
:userid => user.userid,
:name => user.name,
:email => user.email,
:first_name => user.first_name,
:last_name => user.last_name,
:group_names => user.miq_groups.try(:collect, &:description)
}
end
def self.seed
seed_data.each do |user_attributes|
user_id = user_attributes[:userid]
next if in_my_region.find_by_userid(user_id)
log_attrs = user_attributes.slice(:name, :userid, :group)
_log.info("Creating user with parameters #{log_attrs.inspect}")
group_description = user_attributes.delete(:group)
group = MiqGroup.in_my_region.find_by(:description => group_description)
_log.info("Creating #{user_id} user...")
user = create(user_attributes)
user.miq_groups = [group] if group
user.save
_log.info("Creating #{user_id} user... Complete")
end
end
def self.seed_file_name
@seed_file_name ||= Rails.root.join("db", "fixtures", "#{table_name}.yml")
end
private_class_method :seed_file_name
def self.seed_data
File.exist?(seed_file_name) ? YAML.load_file(seed_file_name) : []
end
private_class_method :seed_data
private
def unlock_queue
MiqQueue.put_or_update(
:class_name => self.class.name,
:instance_id => id,
:method_name => 'unlock!',
:priority => MiqQueue::MAX_PRIORITY
) do |_msg, queue_options|
queue_options.merge(:deliver_on => Time.now.utc + ::Settings.authentication.locked_account_timeout.to_i)
end
end
end
| kbrock/manageiq | app/models/user.rb | Ruby | apache-2.0 | 15,860 |
/*
* Copyright 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/io/async/ssl/SSLErrors.h>
#include <folly/Range.h>
#include <openssl/err.h>
#include <openssl/ssl.h>
using namespace folly;
namespace {
std::string decodeOpenSSLError(
int sslError,
unsigned long errError,
int sslOperationReturnValue) {
if (sslError == SSL_ERROR_SYSCALL && errError == 0) {
if (sslOperationReturnValue == 0) {
return "Connection EOF";
} else {
// In this case errno is set, AsyncSocketException will add it.
return "Network error";
}
} else if (sslError == SSL_ERROR_ZERO_RETURN) {
// This signifies a TLS closure alert.
return "SSL connection closed normally";
} else {
std::array<char, 256> buf;
ERR_error_string_n(errError, buf.data(), buf.size());
// OpenSSL will null terminate the string.
return std::string(buf.data());
}
}
const StringPiece getSSLErrorString(SSLError error) {
StringPiece ret;
switch (error) {
case SSLError::CLIENT_RENEGOTIATION:
ret = "Client tried to renegotiate with server";
break;
case SSLError::INVALID_RENEGOTIATION:
ret = "Attempt to start renegotiation, but unsupported";
break;
case SSLError::EARLY_WRITE:
ret = "Attempt to write before SSL connection established";
break;
case SSLError::SSL_ERROR:
ret = "SSL error";
break;
case SSLError::NETWORK_ERROR:
ret = "Network error";
break;
case SSLError::EOF_ERROR:
ret = "SSL connection closed normally";
break;
}
return ret;
}
AsyncSocketException::AsyncSocketExceptionType exTypefromSSLErrInfo(
int sslErr,
unsigned long errError,
int sslOperationReturnValue) {
if (sslErr == SSL_ERROR_ZERO_RETURN) {
return AsyncSocketException::END_OF_FILE;
} else if (sslErr == SSL_ERROR_SYSCALL) {
if (errError == 0 && sslOperationReturnValue == 0) {
return AsyncSocketException::END_OF_FILE;
} else {
return AsyncSocketException::NETWORK_ERROR;
}
} else {
// Assume an actual SSL error
return AsyncSocketException::SSL_ERROR;
}
}
AsyncSocketException::AsyncSocketExceptionType exTypefromSSLErr(SSLError err) {
switch (err) {
case SSLError::EOF_ERROR:
return AsyncSocketException::END_OF_FILE;
case SSLError::NETWORK_ERROR:
return AsyncSocketException::NETWORK_ERROR;
default:
// everything else is a SSL_ERROR
return AsyncSocketException::SSL_ERROR;
}
}
}
namespace folly {
SSLException::SSLException(
int sslErr,
unsigned long errError,
int sslOperationReturnValue,
int errno_copy)
: AsyncSocketException(
exTypefromSSLErrInfo(sslErr, errError, sslOperationReturnValue),
decodeOpenSSLError(sslErr, errError, sslOperationReturnValue),
sslErr == SSL_ERROR_SYSCALL ? errno_copy : 0) {
if (sslErr == SSL_ERROR_ZERO_RETURN) {
sslError = SSLError::EOF_ERROR;
} else if (sslErr == SSL_ERROR_SYSCALL) {
sslError = SSLError::NETWORK_ERROR;
} else {
// Conservatively assume that this is an SSL error
sslError = SSLError::SSL_ERROR;
}
}
SSLException::SSLException(SSLError error)
: AsyncSocketException(
exTypefromSSLErr(error),
getSSLErrorString(error).str(),
0),
sslError(error) {}
}
| charsyam/folly | folly/io/async/ssl/SSLErrors.cpp | C++ | apache-2.0 | 3,876 |
/*
* Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.opsworks.model;
import java.io.Serializable;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
*/
public class DetachElasticLoadBalancerRequest extends AmazonWebServiceRequest
implements Serializable, Cloneable {
/**
* <p>
* The Elastic Load Balancing instance's name.
* </p>
*/
private String elasticLoadBalancerName;
/**
* <p>
* The ID of the layer that the Elastic Load Balancing instance is attached
* to.
* </p>
*/
private String layerId;
/**
* <p>
* The Elastic Load Balancing instance's name.
* </p>
*
* @param elasticLoadBalancerName
* The Elastic Load Balancing instance's name.
*/
public void setElasticLoadBalancerName(String elasticLoadBalancerName) {
this.elasticLoadBalancerName = elasticLoadBalancerName;
}
/**
* <p>
* The Elastic Load Balancing instance's name.
* </p>
*
* @return The Elastic Load Balancing instance's name.
*/
public String getElasticLoadBalancerName() {
return this.elasticLoadBalancerName;
}
/**
* <p>
* The Elastic Load Balancing instance's name.
* </p>
*
* @param elasticLoadBalancerName
* The Elastic Load Balancing instance's name.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public DetachElasticLoadBalancerRequest withElasticLoadBalancerName(
String elasticLoadBalancerName) {
setElasticLoadBalancerName(elasticLoadBalancerName);
return this;
}
/**
* <p>
* The ID of the layer that the Elastic Load Balancing instance is attached
* to.
* </p>
*
* @param layerId
* The ID of the layer that the Elastic Load Balancing instance is
* attached to.
*/
public void setLayerId(String layerId) {
this.layerId = layerId;
}
/**
* <p>
* The ID of the layer that the Elastic Load Balancing instance is attached
* to.
* </p>
*
* @return The ID of the layer that the Elastic Load Balancing instance is
* attached to.
*/
public String getLayerId() {
return this.layerId;
}
/**
* <p>
* The ID of the layer that the Elastic Load Balancing instance is attached
* to.
* </p>
*
* @param layerId
* The ID of the layer that the Elastic Load Balancing instance is
* attached to.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public DetachElasticLoadBalancerRequest withLayerId(String layerId) {
setLayerId(layerId);
return this;
}
/**
* Returns a string representation of this object; useful for testing and
* debugging.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getElasticLoadBalancerName() != null)
sb.append("ElasticLoadBalancerName: "
+ getElasticLoadBalancerName() + ",");
if (getLayerId() != null)
sb.append("LayerId: " + getLayerId());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof DetachElasticLoadBalancerRequest == false)
return false;
DetachElasticLoadBalancerRequest other = (DetachElasticLoadBalancerRequest) obj;
if (other.getElasticLoadBalancerName() == null
^ this.getElasticLoadBalancerName() == null)
return false;
if (other.getElasticLoadBalancerName() != null
&& other.getElasticLoadBalancerName().equals(
this.getElasticLoadBalancerName()) == false)
return false;
if (other.getLayerId() == null ^ this.getLayerId() == null)
return false;
if (other.getLayerId() != null
&& other.getLayerId().equals(this.getLayerId()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime
* hashCode
+ ((getElasticLoadBalancerName() == null) ? 0
: getElasticLoadBalancerName().hashCode());
hashCode = prime * hashCode
+ ((getLayerId() == null) ? 0 : getLayerId().hashCode());
return hashCode;
}
@Override
public DetachElasticLoadBalancerRequest clone() {
return (DetachElasticLoadBalancerRequest) super.clone();
}
} | sdole/aws-sdk-java | aws-java-sdk-opsworks/src/main/java/com/amazonaws/services/opsworks/model/DetachElasticLoadBalancerRequest.java | Java | apache-2.0 | 5,584 |
/*
* Copyright 2015-2016 USEF Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package energy.usef.agr.workflow.operate.recreate.prognoses;
import static energy.usef.agr.workflow.AgrWorkflowStep.AGR_RECREATE_PROGNOSES;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import energy.usef.agr.dto.ConnectionPortfolioDto;
import energy.usef.agr.dto.ForecastPowerDataDto;
import energy.usef.agr.dto.PowerContainerDto;
import energy.usef.agr.service.business.AgrPlanboardBusinessService;
import energy.usef.agr.service.business.AgrPortfolioBusinessService;
import energy.usef.agr.workflow.operate.recreate.prognoses.ReCreatePrognosesWorkflowParameter.OUT;
import energy.usef.agr.workflow.plan.create.aplan.CreateAPlanEvent;
import energy.usef.agr.workflow.plan.recreate.aplan.ReCreateAPlanEvent;
import energy.usef.agr.workflow.validate.create.dprognosis.ReCreateDPrognosisEvent;
import energy.usef.core.config.Config;
import energy.usef.core.event.RequestMoveToValidateEvent;
import energy.usef.core.event.validation.EventValidationService;
import energy.usef.core.exception.BusinessValidationException;
import energy.usef.core.model.BrpConnectionGroup;
import energy.usef.core.model.CongestionPointConnectionGroup;
import energy.usef.core.model.Connection;
import energy.usef.core.model.ConnectionGroup;
import energy.usef.core.model.DocumentType;
import energy.usef.core.model.PrognosisType;
import energy.usef.core.service.business.CorePlanboardBusinessService;
import energy.usef.core.util.DateTimeUtil;
import energy.usef.core.workflow.DefaultWorkflowContext;
import energy.usef.core.workflow.WorkflowContext;
import energy.usef.core.workflow.dto.PrognosisDto;
import energy.usef.core.workflow.dto.PrognosisTypeDto;
import energy.usef.core.workflow.dto.PtuPrognosisDto;
import energy.usef.core.workflow.step.WorkflowStepExecuter;
import java.math.BigInteger;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import javax.enterprise.event.Event;
import org.joda.time.LocalDate;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.modules.junit4.PowerMockRunner;
import org.powermock.reflect.Whitebox;
/**
* Test class in charge of the unit tests related to the {@link AgrReCreatePrognosesCoordinator} class.
*/
@RunWith(PowerMockRunner.class)
public class AgrReCreatePrognosesCoordinatorTest {
private AgrReCreatePrognosesCoordinator coordinator;
@Mock
private AgrPlanboardBusinessService agrPlanboardBusinessService;
@Mock
private CorePlanboardBusinessService corePlanboardBusinessService;
@Mock
private AgrPortfolioBusinessService agrPortfolioBusinessService;
@Mock
private WorkflowStepExecuter workflowStepExecuter;
@Mock
private Event<ReCreateAPlanEvent> reCreateAPlanEventManager;
@Mock
private Event<CreateAPlanEvent> createAPlanEventManager;
@Mock
private Event<ReCreateDPrognosisEvent> reCreateDPrognosisEventManager;
@Mock
private Event<RequestMoveToValidateEvent> moveToValidateEventManager;
@Mock
private EventValidationService eventValidationService;
@Mock
private Config config;
@Before
public void setUp() {
coordinator = new AgrReCreatePrognosesCoordinator();
Whitebox.setInternalState(coordinator, agrPlanboardBusinessService);
Whitebox.setInternalState(coordinator, corePlanboardBusinessService);
Whitebox.setInternalState(coordinator, agrPortfolioBusinessService);
Whitebox.setInternalState(coordinator, workflowStepExecuter);
Whitebox.setInternalState(coordinator, config);
Whitebox.setInternalState(coordinator, eventValidationService);
Whitebox.setInternalState(coordinator, "reCreateAPlanEventManager", reCreateAPlanEventManager);
Whitebox.setInternalState(coordinator, "createAPlanEventManager", createAPlanEventManager);
Whitebox.setInternalState(coordinator, "reCreateDPrognosisEventManager", reCreateDPrognosisEventManager);
Whitebox.setInternalState(coordinator, "moveToValidateEventManager", moveToValidateEventManager);
}
@Test
public void testHandleEventIsSuccessful() throws BusinessValidationException {
Long[] dprognosisSequences = new Long[] { 3l, 4l, 5l };
List<PrognosisDto> latestDPrognoses = buildLatestDPrognoses(dprognosisSequences);
PowerMockito.when(workflowStepExecuter.invoke(Mockito.eq(AGR_RECREATE_PROGNOSES.name()), Mockito.any()))
.thenReturn(buildContextAfterPBC());
PowerMockito.when(
agrPlanboardBusinessService.findLastPrognoses(Matchers.any(LocalDate.class), Matchers.eq(PrognosisType.A_PLAN),
Matchers.eq(Optional.empty()))).thenReturn(buildLatestAPlans(1l, 2l));
PowerMockito.when(
agrPlanboardBusinessService.findLastPrognoses(Matchers.any(LocalDate.class), Matchers.eq(PrognosisType.D_PROGNOSIS),
Matchers.eq(Optional.empty()))).thenReturn(latestDPrognoses);
PowerMockito.when(corePlanboardBusinessService.findActiveConnectionGroupsWithConnections(Matchers.any(LocalDate.class)))
.thenReturn(buildConnectionGroupToConnections());
PowerMockito.when(agrPortfolioBusinessService.findConnectionPortfolioDto(Matchers.any(LocalDate.class)))
.thenReturn(buildConnectionPortfolio());
coordinator.handleEvent(buildReCreatePrognosesEvent(DateTimeUtil.getCurrentDate()));
ArgumentCaptor<ReCreateDPrognosisEvent> reCreateDPrognosisEventCaptor = ArgumentCaptor.forClass(
ReCreateDPrognosisEvent.class);
verify(agrPlanboardBusinessService, times(1)).findLastPrognoses(Matchers.any(LocalDate.class),
Matchers.eq(PrognosisType.A_PLAN),
Matchers.any(Optional.class));
verify(agrPlanboardBusinessService, times(1)).findLastPrognoses(Matchers.any(LocalDate.class),
Matchers.eq(PrognosisType.D_PROGNOSIS), Matchers.any(Optional.class));
verify(reCreateDPrognosisEventManager, times(1)).fire(reCreateDPrognosisEventCaptor.capture());
ReCreateDPrognosisEvent capturedReCreateDPrognosisEvent = reCreateDPrognosisEventCaptor.getValue();
Assert.assertNotNull(capturedReCreateDPrognosisEvent);
Assert.assertEquals(DateTimeUtil.getCurrentDate(), capturedReCreateDPrognosisEvent.getPeriod());
}
@Test
public void testHandleEventIsSuccessfulForPlanPhase() throws BusinessValidationException {
ArgumentCaptor<WorkflowContext> contextCaptor = ArgumentCaptor.forClass(WorkflowContext.class);
Long[] dprognosisSequences = new Long[] { 3l, 4l, 5l };
Long[] aplansSequences = new Long[] { 1l, 2l };
List<PrognosisDto> latestDPrognoses = buildLatestDPrognoses(dprognosisSequences);
PowerMockito
.when(workflowStepExecuter.invoke(Mockito.eq(AGR_RECREATE_PROGNOSES.name()), contextCaptor.capture()))
.thenReturn(buildContextAfterPBC());
PowerMockito.when(
agrPlanboardBusinessService.findLastPrognoses(Matchers.any(LocalDate.class),
Matchers.eq(PrognosisType.A_PLAN),
Matchers.any(Optional.class))).thenReturn(buildLatestAPlans(aplansSequences));
PowerMockito.when(
agrPlanboardBusinessService.findLastPrognoses(Matchers.any(LocalDate.class),
Matchers.eq(PrognosisType.D_PROGNOSIS),
Matchers.any(Optional.class))).thenReturn(latestDPrognoses);
coordinator.handleEvent(buildReCreatePrognosesEvent(DateTimeUtil.getCurrentDate().plusDays(1)));
verify(agrPlanboardBusinessService, times(1)).findLastPrognoses(Matchers.any(LocalDate.class),
Matchers.eq(PrognosisType.A_PLAN),
Matchers.any(Optional.class));
verify(agrPlanboardBusinessService, times(1)).findLastPrognoses(Matchers.any(LocalDate.class),
Matchers.eq(PrognosisType.D_PROGNOSIS), Matchers.any(Optional.class));
verify(reCreateDPrognosisEventManager, times(1)).fire(Matchers.any(ReCreateDPrognosisEvent.class));
verify(reCreateAPlanEventManager, times(1)).fire(Matchers.any(ReCreateAPlanEvent.class));
verify(corePlanboardBusinessService, times(aplansSequences.length)).findSinglePlanboardMessage(Matchers.any(Long.class),
Matchers.eq(DocumentType.A_PLAN), Matchers.eq("brp.usef-example.com"));
verify(corePlanboardBusinessService, times(dprognosisSequences.length)).findSinglePlanboardMessage(Matchers.any(Long.class),
Matchers.eq(DocumentType.D_PROGNOSIS), Matchers.eq("dso.usef-example.com"));
}
private WorkflowContext buildContextAfterPBC() {
WorkflowContext context = new DefaultWorkflowContext();
context.setValue(OUT.REQUIRES_NEW_A_PLAN_SEQUENCES_LIST.name(), Arrays.asList(1l, 2l));
context.setValue(OUT.REQUIRES_NEW_D_PROGNOSIS_SEQUENCES_LIST.name(), Arrays.asList(3l, 4l, 5l));
return context;
}
private Map<ConnectionGroup, List<Connection>> buildConnectionGroupToConnections() {
final ConnectionGroup congestionPoint = new CongestionPointConnectionGroup("ean.123456789012345678");
final ConnectionGroup brpConnectionGroup = new BrpConnectionGroup("brp.usef-example.com");
final Connection connection1 = new Connection("ean.000000000001");
final Connection connection2 = new Connection("ean.000000000002");
final Connection connection3 = new Connection("ean.000000000003");
Map<ConnectionGroup, List<Connection>> result = new HashMap<>();
result.put(congestionPoint, Arrays.asList(connection1, connection2));
result.put(brpConnectionGroup, Arrays.asList(connection2, connection3));
return result;
}
private List<ConnectionPortfolioDto> buildConnectionPortfolio() {
ConnectionPortfolioDto connectionDto1 = new ConnectionPortfolioDto("ean.000000000001");
ConnectionPortfolioDto connectionDto2 = new ConnectionPortfolioDto("ean.000000000002");
ConnectionPortfolioDto connectionDto3 = new ConnectionPortfolioDto("ean.000000000003");
// PowerData
ForecastPowerDataDto powerData1 = new ForecastPowerDataDto();
powerData1.setUncontrolledLoad(BigInteger.TEN);
ForecastPowerDataDto powerData2 = new ForecastPowerDataDto();
powerData2.setAverageConsumption(BigInteger.TEN);
ForecastPowerDataDto powerData3 = new ForecastPowerDataDto();
powerData3.setAverageConsumption(BigInteger.TEN);
// uncontrolled load
PowerContainerDto pDto1 = new PowerContainerDto(new LocalDate(), 1);
pDto1.setForecast(powerData1);
PowerContainerDto pDto2 = new PowerContainerDto(new LocalDate(), 1);
pDto2.setForecast(powerData2);
PowerContainerDto pDto3 = new PowerContainerDto(new LocalDate(), 1);
pDto3.setForecast(powerData3);
connectionDto1.getConnectionPowerPerPTU().put(1, pDto1);
connectionDto2.getConnectionPowerPerPTU().put(1, pDto2);
connectionDto3.getConnectionPowerPerPTU().put(1, pDto3);
return Stream.of(connectionDto1, connectionDto2, connectionDto3).collect(Collectors.toList());
}
private ReCreatePrognosesEvent buildReCreatePrognosesEvent(LocalDate period) {
return new ReCreatePrognosesEvent(period);
}
private List<PrognosisDto> buildLatestAPlans(Long... sequences) {
return Stream.of(sequences)
.map(sequence -> {
PrognosisDto prognosisDto = new PrognosisDto();
prognosisDto.setParticipantDomain("brp.usef-example.com");
prognosisDto.setType(PrognosisTypeDto.A_PLAN);
prognosisDto.setPeriod(DateTimeUtil.parseDate("2015-03-03"));
prognosisDto.setConnectionGroupEntityAddress("brp.usef-example.com");
prognosisDto.setSequenceNumber(sequence);
prognosisDto.getPtus().addAll(
IntStream.rangeClosed(1, 96).mapToObj(index -> {
PtuPrognosisDto ptuPrognosis = new PtuPrognosisDto();
ptuPrognosis.setPtuIndex(BigInteger.valueOf(index));
ptuPrognosis.setPower(BigInteger.TEN.multiply(BigInteger.valueOf(sequence)));
return ptuPrognosis;
}).collect(Collectors.toList()));
return prognosisDto;
}).collect(Collectors.toList());
}
private List<PrognosisDto> buildLatestDPrognoses(Long... sequences) {
return Stream.of(sequences)
.map(sequence -> {
PrognosisDto prognosisDto = new PrognosisDto();
prognosisDto.setParticipantDomain("dso.usef-example.com");
prognosisDto.setType(PrognosisTypeDto.D_PROGNOSIS);
prognosisDto.setPeriod(DateTimeUtil.parseDate("2015-03-03"));
prognosisDto.setConnectionGroupEntityAddress("ean.123456789012345678");
prognosisDto.setSequenceNumber(sequence);
prognosisDto.getPtus().addAll(
IntStream.rangeClosed(1, 96).mapToObj(index -> {
PtuPrognosisDto ptuPrognosis = new PtuPrognosisDto();
ptuPrognosis.setPtuIndex(BigInteger.valueOf(index));
ptuPrognosis.setPower(BigInteger.TEN.multiply(BigInteger.valueOf(sequence)));
return ptuPrognosis;
}).collect(Collectors.toList()));
return prognosisDto;
}).collect(Collectors.toList());
}
}
| USEF-Foundation/ri.usef.energy | usef-build/usef-workflow/usef-agr/src/test/java/energy/usef/agr/workflow/operate/recreate/prognoses/AgrReCreatePrognosesCoordinatorTest.java | Java | apache-2.0 | 14,800 |
package org.zstack.test.storage.primary.local;
import junit.framework.Assert;
import org.junit.Before;
import org.junit.Test;
import org.zstack.core.cloudbus.CloudBus;
import org.zstack.core.componentloader.ComponentLoader;
import org.zstack.core.db.DatabaseFacade;
import org.zstack.header.configuration.InstanceOfferingInventory;
import org.zstack.header.host.HostInventory;
import org.zstack.header.identity.SessionInventory;
import org.zstack.header.image.ImageInventory;
import org.zstack.header.network.l3.L3NetworkInventory;
import org.zstack.header.vm.VmInstanceInventory;
import org.zstack.storage.primary.local.LocalStorageSimulatorConfig;
import org.zstack.storage.primary.local.LocalStorageSimulatorConfig.Capacity;
import org.zstack.test.*;
import org.zstack.test.deployer.Deployer;
import org.zstack.utils.CollectionUtils;
import org.zstack.utils.data.SizeUnit;
import org.zstack.utils.function.Function;
/**
* 1. have two hosts with local storage
* 2. create a vm1
* 3. migrate the vm1
*
* confirm the vm1 migrated successfully
*/
public class TestLocalStorage15 {
Deployer deployer;
Api api;
ComponentLoader loader;
CloudBus bus;
DatabaseFacade dbf;
SessionInventory session;
LocalStorageSimulatorConfig config;
long totalSize = SizeUnit.GIGABYTE.toByte(100);
@Before
public void setUp() throws Exception {
DBUtil.reDeployDB();
WebBeanConstructor con = new WebBeanConstructor();
deployer = new Deployer("deployerXml/localStorage/TestLocalStorage14.xml", con);
deployer.addSpringConfig("KVMRelated.xml");
deployer.addSpringConfig("localStorageSimulator.xml");
deployer.addSpringConfig("localStorage.xml");
deployer.load();
loader = deployer.getComponentLoader();
bus = loader.getComponent(CloudBus.class);
dbf = loader.getComponent(DatabaseFacade.class);
config = loader.getComponent(LocalStorageSimulatorConfig.class);
Capacity c = new Capacity();
c.total = totalSize;
c.avail = totalSize;
config.capacityMap.put("host1", c);
config.capacityMap.put("host2", c);
deployer.build();
api = deployer.getApi();
session = api.loginAsAdmin();
}
@Test
public void test() throws ApiSenderException {
final VmInstanceInventory vm1 = deployer.vms.get("TestVm");
String hostToMigarate = CollectionUtils.find(deployer.hosts.values(), new Function<String, HostInventory>() {
@Override
public String call(HostInventory arg) {
return !arg.getUuid().equals(vm1.getHostUuid()) ? arg.getUuid() : null;
}
});
api.migrateVmInstance(vm1.getUuid(), hostToMigarate);
}
}
| newbiet/zstack | test/src/test/java/org/zstack/test/storage/primary/local/TestLocalStorage15.java | Java | apache-2.0 | 2,846 |
require File.expand_path(File.dirname(__FILE__) + '/../spec_helper')
describe ProjectCategoriesHelper do
#Delete this example and add some real ones or delete this file
it "should be included in the object returned by #helper" do
included_modules = (class << helper; self; end).send :included_modules
included_modules.should include(ProjectCategoriesHelper)
end
end
| fusesource/fuseforge | spec/helpers/project_categories_helper_spec.rb | Ruby | apache-2.0 | 387 |
/*
* Copyright 2001-2007 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.java.generate.test;
import org.jetbrains.java.generate.config.Config;
import java.io.Serializable;
import java.util.Date;
/**
* This is a dummy test bean for testing the toString() plugin.
*/
public class DummyTestBean extends Config implements Serializable {
public static final String CONST_FIELD = "XXX_XXX";
private static final String CONST_FIELD_PRIV = "XXX_XXX";
@SuppressWarnings("UnnecessaryModifier")
private static final transient String tran = "xxx";
// private static String myStaticString;
// private String[] nameStrings = new String[] { "Claus", "Ibsen" };
// private String otherStrs[];
// public int[] ipAdr = new int[] { 127, 92 };
// private List arrList = new ArrayList();
//
// private Calendar cal = Calendar.getInstance();
private final Date bday = new java.util.Date();
//
// public String pubString;
// private String firstName;
// private java.sql.Date sqlBirthDay = new java.sql.Date(new java.util.Date().getTime());
// private List children;
// public Object someObject;
// public Object[] moreObjects;
// public Map cityMap;
// public Set courses;
// private byte smallNumber;
private float salary;
// protected String procString;
// String defaultPackageString;
// private java.util.Date utilDateTime = new java.util.Date();
private final DummyTestBean singleton = null;
public DummyTestBean getSingleton() {
return singleton;
}
public String toString() {
return "DummyTestBean{" +
"tran='" + tran + '\'' +
", bday=" + bday +
", salary=" + salary +
", singleton=" + singleton +
'}';
}
}
| jwren/intellij-community | plugins/generate-tostring/testSrc/org/jetbrains/java/generate/test/DummyTestBean.java | Java | apache-2.0 | 2,376 |
/*
* Copyright 2014-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.atomix.cluster;
import io.atomix.utils.event.EventListener;
/**
* Entity capable of receiving device cluster-related events.
*/
public interface ClusterMembershipEventListener extends EventListener<ClusterMembershipEvent> {
}
| kuujo/copycat | cluster/src/main/java/io/atomix/cluster/ClusterMembershipEventListener.java | Java | apache-2.0 | 855 |
package com.frameworkium.integration.restfulbooker.api.dto.booking;
import com.frameworkium.core.api.dto.AbstractDTO;
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
public class BookingDates extends AbstractDTO<BookingDates> {
public static final DateTimeFormatter FORMAT =
DateTimeFormatter.ISO_LOCAL_DATE;
public String checkin;
public String checkout;
public static BookingDates newInstance() {
BookingDates dates = new BookingDates();
dates.checkin = LocalDate.now().plusDays(1).format(FORMAT);
dates.checkout = LocalDate.now().plusDays(10).format(FORMAT);
return dates;
}
}
| Frameworkium/frameworkium-core | src/test/java/com/frameworkium/integration/restfulbooker/api/dto/booking/BookingDates.java | Java | apache-2.0 | 673 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Package to test JOrphanUtils methods
*/
package org.apache.jorphan.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.nio.charset.StandardCharsets;
import org.hamcrest.CoreMatchers;
import org.junit.Assert;
import org.junit.Test;
public class TestJorphanUtils {
@Test
public void testReplace1() {
assertEquals("xyzdef", JOrphanUtils.replaceFirst("abcdef", "abc", "xyz"));
}
@Test
public void testReplace2() {
assertEquals("axyzdef", JOrphanUtils.replaceFirst("abcdef", "bc", "xyz"));
}
@Test
public void testReplace3() {
assertEquals("abcxyz", JOrphanUtils.replaceFirst("abcdef", "def", "xyz"));
}
@Test
public void testReplace4() {
assertEquals("abcdef", JOrphanUtils.replaceFirst("abcdef", "bce", "xyz"));
}
@Test
public void testReplace5() {
assertEquals("abcdef", JOrphanUtils.replaceFirst("abcdef", "alt=\"\" ", ""));
}
@Test
public void testReplace6() {
assertEquals("abcdef", JOrphanUtils.replaceFirst("abcdef", "alt=\"\" ", ""));
}
@Test
public void testReplace7() {
assertEquals("alt=\"\"", JOrphanUtils.replaceFirst("alt=\"\"", "alt=\"\" ", ""));
}
@Test
public void testReplace8() {
assertEquals("img src=xyz ", JOrphanUtils.replaceFirst("img src=xyz alt=\"\" ", "alt=\"\" ", ""));
}
// Note: the split tests should agree as far as possible with CSVSaveService.csvSplitString()
// Tests for split(String,String,boolean)
@Test
public void testSplitStringStringTrueWithTrailingSplitChars() {
// Test ignore trailing split characters
// Ignore adjacent delimiters
assertThat("Ignore trailing split chars", JOrphanUtils.split("a,bc,,", ",", true),
CoreMatchers.equalTo(new String[] { "a", "bc" }));
}
@Test
public void testSplitStringStringFalseWithTrailingSplitChars() {
// Test ignore trailing split characters
assertThat("Include the trailing split chars", JOrphanUtils.split("a,bc,,", ",", false),
CoreMatchers.equalTo(new String[] { "a", "bc", "", "" }));
}
@Test
public void testSplitStringStringTrueWithLeadingSplitChars() {
// Test leading split characters
assertThat("Ignore leading split chars", JOrphanUtils.split(",,a,bc", ",", true),
CoreMatchers.equalTo(new String[] { "a", "bc" }));
}
@Test
public void testSplitStringStringFalseWithLeadingSplitChars() {
// Test leading split characters
assertThat("Include leading split chars", JOrphanUtils.split(",,a,bc", ",", false),
CoreMatchers.equalTo(new String[] { "", "", "a", "bc" }));
}
@Test
public void testSplit3() {
String in = "a,bc,,"; // Test ignore trailing split characters
String[] out = JOrphanUtils.split(in, ",",true);// Ignore adjacent delimiters
assertThat(out, CoreMatchers.equalTo(new String[] { "a", "bc" }));
out = JOrphanUtils.split(in, ",",false);
assertThat(out, CoreMatchers.equalTo(new String[] { "a", "bc", "", "" }));
}
@Test
public void testSplitStringStringTrueWithLeadingComplexSplitCharacters() {
// Test leading split characters
assertThat(JOrphanUtils.split(" , ,a ,bc", " ,", true), CoreMatchers.equalTo(new String[] { "a", "bc" }));
}
@Test
public void testSplitStringStringFalseWithLeadingComplexSplitCharacters() {
// Test leading split characters
assertThat(JOrphanUtils.split(" , ,a ,bc", " ,", false),
CoreMatchers.equalTo(new String[] { "", "", "a", "bc" }));
}
@Test
public void testSplitStringStringTrueTruncate() throws Exception
{
assertThat(JOrphanUtils.split("a;,b;,;,;,d;,e;,;,f", ";,", true),
CoreMatchers.equalTo(new String[] { "a", "b", "d", "e", "f" }));
}
@Test
public void testSplitStringStringFalseTruncate() throws Exception
{
assertThat(JOrphanUtils.split("a;,b;,;,;,d;,e;,;,f", ";,", false),
CoreMatchers.equalTo(new String[] { "a", "b", "", "", "d", "e", "", "f" }));
}
@Test
public void testSplitStringStringTrueDoubledSplitChar() throws Exception
{
assertThat(JOrphanUtils.split("a;;b;;;;;;d;;e;;;;f", ";;", true),
CoreMatchers.equalTo(new String[] { "a", "b", "d", "e", "f" }));
}
@Test
public void testSplitStringStringFalseDoubledSplitChar() throws Exception
{
assertThat(JOrphanUtils.split("a;;b;;;;;;d;;e;;;;f", ";;", false),
CoreMatchers.equalTo(new String[] { "a", "b", "", "", "d", "e", "", "f" }));
}
// Empty string
@Test
public void testEmpty(){
String[] out = JOrphanUtils.split("", ",", false);
assertEquals(0, out.length);
}
// Tests for split(String,String,String)
@Test
public void testSplitSSSSingleDelimiterWithDefaultValue() {
// Test non-empty parameters
assertThat(JOrphanUtils.split("a,bc,,", ",", "?"), CoreMatchers.equalTo(new String[] { "a", "bc", "?", "?" }));
}
@Test
public void testSplitSSSSingleDelimiterWithEmptyValue() {
// Empty default
assertThat(JOrphanUtils.split("a,bc,,", ",", ""), CoreMatchers.equalTo(new String[] { "a", "bc", "", "" }));
}
@Test
public void testSplitSSSEmptyDelimiter() {
String in = "a,bc,,"; // Empty delimiter
assertThat(JOrphanUtils.split(in, "", "?"), CoreMatchers.equalTo(new String[] { in }));
}
@Test
public void testSplitSSSMultipleDelimCharsWithDefaultValue() {
// Multiple delimiters
assertThat(JOrphanUtils.split("a,b;c,,", ",;", "?"),
CoreMatchers.equalTo(new String [] { "a", "b", "c", "?", "?" }));
}
@Test
public void testSplitSSSMultipleDelimCharsWithEmptyValue() {
// Multiple delimiters
assertThat(JOrphanUtils.split("a,b;c,,", ",;", ""), CoreMatchers.equalTo(new String[] { "a", "b", "c", "", "" }));
}
@Test
public void testSplitSSSSameDelimiterAsDefaultValue() {
assertThat(JOrphanUtils.split("a,bc,,", ",", ","), CoreMatchers.equalTo(new String[] { "a", "bc", ",", "," }));
}
@Test(expected=NullPointerException.class)
public void testSplitNullStringString() {
JOrphanUtils.split(null, ",","?");
}
@Test(expected=NullPointerException.class)
public void testSplitStringNullString() {
JOrphanUtils.split("a,bc,,", null, "?");
}
@Test
public void testSplitStringStringNullWithSingleDelimiter() {
assertThat(JOrphanUtils.split("a,bc,,", ",", null), CoreMatchers.equalTo(new String[] { "a", "bc" }));
}
@Test
public void testSplitStringStringNullWithMultipleDelimiter() {
assertThat(JOrphanUtils.split("a,;bc,;,", ",;", null), CoreMatchers.equalTo(new String[] { "a", "bc" }));
}
@Test
public void testSplitSSSWithEmptyInput() {
String[] out = JOrphanUtils.split("", "," ,"x");
assertEquals(0, out.length);
}
@Test
public void testSplitSSSWithEmptyDelimiter() {
final String in = "a,;bc,;,";
assertThat(JOrphanUtils.split(in, "", "x"), CoreMatchers.equalTo(new String[] { in }));
}
@Test
public void testreplaceAllChars(){
assertEquals("", JOrphanUtils.replaceAllChars("",' ', "+"));
assertEquals("source", JOrphanUtils.replaceAllChars("source",' ', "+"));
assertEquals("so+rce", JOrphanUtils.replaceAllChars("source",'u', "+"));
assertEquals("+so+urc+", JOrphanUtils.replaceAllChars("esoeurce",'e', "+"));
assertEquals("AZAZsoAZurcAZ", JOrphanUtils.replaceAllChars("eesoeurce",'e', "AZ"));
assertEquals("A+B++C+", JOrphanUtils.replaceAllChars("A B C ",' ', "+"));
assertEquals("A%20B%20%20C%20", JOrphanUtils.replaceAllChars("A B C ",' ', "%20"));
}
@Test
public void testTrim(){
assertEquals("",JOrphanUtils.trim("", " ;"));
assertEquals("",JOrphanUtils.trim(" ", " ;"));
assertEquals("",JOrphanUtils.trim("; ", " ;"));
assertEquals("",JOrphanUtils.trim(";;", " ;"));
assertEquals("",JOrphanUtils.trim(" ", " ;"));
assertEquals("abc",JOrphanUtils.trim("abc ;", " ;"));
}
@Test
public void testbaToHexString(){
assertEquals("",JOrphanUtils.baToHexString(new byte[]{}));
assertEquals("00",JOrphanUtils.baToHexString(new byte[]{0}));
assertEquals("0f107f8081ff",JOrphanUtils.baToHexString(new byte[]{15,16,127,-128,-127,-1}));
}
@Test
public void testbaToByte() throws Exception{
assertEqualsArray(new byte[]{},JOrphanUtils.baToHexBytes(new byte[]{}));
assertEqualsArray(new byte[]{'0','0'},JOrphanUtils.baToHexBytes(new byte[]{0}));
assertEqualsArray("0f107f8081ff".getBytes(StandardCharsets.UTF_8),
JOrphanUtils.baToHexBytes(new byte[] { 15, 16, 127, -128, -127, -1 }));
}
private void assertEqualsArray(byte[] expected, byte[] actual){
assertEquals("arrays must be same length",expected.length, actual.length);
for(int i=0; i < expected.length; i++){
assertEquals("values must be the same for index: "+i,expected[i],actual[i]);
}
}
@Test
public void testIsBlank() {
assertTrue(JOrphanUtils.isBlank(""));
assertTrue(JOrphanUtils.isBlank(null));
assertTrue(JOrphanUtils.isBlank(" "));
assertFalse(JOrphanUtils.isBlank(" zdazd dzd "));
}
@Test
public void testRightAlign() {
StringBuilder in = new StringBuilder("AZE");
assertEquals(" AZE", JOrphanUtils.rightAlign(in, 6).toString());
in = new StringBuilder("AZERTY");
assertEquals("AZERTY", JOrphanUtils.rightAlign(in, 6).toString());
in = new StringBuilder("baulpismuth");
assertEquals("baulpismuth", JOrphanUtils.rightAlign(in, 6).toString());
in = new StringBuilder("A");
assertEquals(" A", JOrphanUtils.rightAlign(in, 8).toString());
}
@Test
public void testReplaceAllWithRegexWithSearchValueContainedInReplaceValue() {
// Bug 61054
Assert.assertArrayEquals(new Object[] { "abcd", 1 },
JOrphanUtils.replaceAllWithRegex("abc", "abc", "abcd", true));
}
@Test
public void testReplaceAllWithRegex() {
Assert.assertArrayEquals(new Object[] {"toto", 0},
JOrphanUtils.replaceAllWithRegex("toto","ti", "ta", true));
Assert.assertArrayEquals(new Object[] {"toto", 0},
JOrphanUtils.replaceAllWithRegex("toto","TO", "TI", true));
Assert.assertArrayEquals(new Object[] {"TITI", 2},
JOrphanUtils.replaceAllWithRegex("toto","TO", "TI", false));
Assert.assertArrayEquals(new Object[] {"TITI", 2},
JOrphanUtils.replaceAllWithRegex("toto","to", "TI", true));
Assert.assertArrayEquals(new Object[] {"TITIti", 2},
JOrphanUtils.replaceAllWithRegex("tototi","to", "TI", true));
Assert.assertArrayEquals(new Object[] {"TOTIti", 1},
JOrphanUtils.replaceAllWithRegex("TOtoti","to", "TI", true));
Assert.assertArrayEquals(new Object[] {"TOTI", 1},
JOrphanUtils.replaceAllWithRegex("TOtoti","to.*", "TI", true));
Assert.assertArrayEquals(new Object[] {"TOTI", 1},
JOrphanUtils.replaceAllWithRegex("TOtoti","to.*ti", "TI", true));
Assert.assertArrayEquals(new Object[] {"TOTITITITIaTITITIti", 7},
JOrphanUtils.replaceAllWithRegex("TO1232a123ti","[0-9]", "TI", true));
Assert.assertArrayEquals(new Object[] {"TOTIaTIti", 2},
JOrphanUtils.replaceAllWithRegex("TO1232a123ti","[0-9]+", "TI", true));
Assert.assertArrayEquals(new Object[] {"TO${var}2a${var}ti", 2},
JOrphanUtils.replaceAllWithRegex("TO1232a123ti","123", "${var}", true));
Assert.assertArrayEquals(new Object[] {"TO${var}2a${var}ti${var2}", 2},
JOrphanUtils.replaceAllWithRegex("TO1232a123ti${var2}","123", "${var}", true));
}
@Test
public void testReplaceValueWithNullValue() {
Assert.assertThat(Integer.valueOf(JOrphanUtils.replaceValue(null, null, false, null, null)),
CoreMatchers.is(Integer.valueOf(0)));
}
@Test
public void testReplaceValueWithValidValueAndValidSetter() {
Holder h = new Holder();
Assert.assertThat(Integer.valueOf(JOrphanUtils.replaceValue("\\d+", "${port}", true, "80", s -> h.value = s)),
CoreMatchers.is(Integer.valueOf(1)));
Assert.assertThat(h.value, CoreMatchers.is("${port}"));
}
private static class Holder {
String value;
}
@Test(expected = NullPointerException.class)
public void testReplaceValueWithNullSetterThatGetsCalled() {
JOrphanUtils.replaceValue("\\d+", "${port}", true, "80", null);
}
}
| ufctester/apache-jmeter | test/src/org/apache/jorphan/util/TestJorphanUtils.java | Java | apache-2.0 | 14,035 |
package com.alipay.api.request;
import java.util.Map;
import com.alipay.api.AlipayRequest;
import com.alipay.api.internal.util.AlipayHashMap;
import com.alipay.api.response.AlipayMicropayOrderConfirmpayurlGetResponse;
import com.alipay.api.AlipayObject;
/**
* ALIPAY API: alipay.micropay.order.confirmpayurl.get request
*
* @author auto create
* @since 1.0, 2016-06-06 17:53:18
*/
public class AlipayMicropayOrderConfirmpayurlGetRequest implements AlipayRequest<AlipayMicropayOrderConfirmpayurlGetResponse> {
private AlipayHashMap udfParams; // add user-defined text parameters
private String apiVersion="1.0";
/**
* 支付宝订单号,冻结流水号.这个是创建冻结订单支付宝返回的
*/
private String alipayOrderNo;
/**
* 支付金额,区间必须在[0.01,30],只能保留小数点后两位
*/
private String amount;
/**
* 支付备注
*/
private String memo;
/**
* 收款方的支付宝ID
*/
private String receiveUserId;
/**
* 本次转账的外部单据号(只能由字母和数字组成,maxlength=32)
*/
private String transferOutOrderNo;
public void setAlipayOrderNo(String alipayOrderNo) {
this.alipayOrderNo = alipayOrderNo;
}
public String getAlipayOrderNo() {
return this.alipayOrderNo;
}
public void setAmount(String amount) {
this.amount = amount;
}
public String getAmount() {
return this.amount;
}
public void setMemo(String memo) {
this.memo = memo;
}
public String getMemo() {
return this.memo;
}
public void setReceiveUserId(String receiveUserId) {
this.receiveUserId = receiveUserId;
}
public String getReceiveUserId() {
return this.receiveUserId;
}
public void setTransferOutOrderNo(String transferOutOrderNo) {
this.transferOutOrderNo = transferOutOrderNo;
}
public String getTransferOutOrderNo() {
return this.transferOutOrderNo;
}
private String terminalType;
private String terminalInfo;
private String prodCode;
private String notifyUrl;
private String returnUrl;
private boolean needEncrypt=false;
private AlipayObject bizModel=null;
public String getNotifyUrl() {
return this.notifyUrl;
}
public void setNotifyUrl(String notifyUrl) {
this.notifyUrl = notifyUrl;
}
public String getReturnUrl() {
return this.returnUrl;
}
public void setReturnUrl(String returnUrl) {
this.returnUrl = returnUrl;
}
public String getApiVersion() {
return this.apiVersion;
}
public void setApiVersion(String apiVersion) {
this.apiVersion = apiVersion;
}
public void setTerminalType(String terminalType){
this.terminalType=terminalType;
}
public String getTerminalType(){
return this.terminalType;
}
public void setTerminalInfo(String terminalInfo){
this.terminalInfo=terminalInfo;
}
public String getTerminalInfo(){
return this.terminalInfo;
}
public void setProdCode(String prodCode) {
this.prodCode=prodCode;
}
public String getProdCode() {
return this.prodCode;
}
public String getApiMethodName() {
return "alipay.micropay.order.confirmpayurl.get";
}
public Map<String, String> getTextParams() {
AlipayHashMap txtParams = new AlipayHashMap();
txtParams.put("alipay_order_no", this.alipayOrderNo);
txtParams.put("amount", this.amount);
txtParams.put("memo", this.memo);
txtParams.put("receive_user_id", this.receiveUserId);
txtParams.put("transfer_out_order_no", this.transferOutOrderNo);
if(udfParams != null) {
txtParams.putAll(this.udfParams);
}
return txtParams;
}
public void putOtherTextParam(String key, String value) {
if(this.udfParams == null) {
this.udfParams = new AlipayHashMap();
}
this.udfParams.put(key, value);
}
public Class<AlipayMicropayOrderConfirmpayurlGetResponse> getResponseClass() {
return AlipayMicropayOrderConfirmpayurlGetResponse.class;
}
public boolean isNeedEncrypt() {
return this.needEncrypt;
}
public void setNeedEncrypt(boolean needEncrypt) {
this.needEncrypt=needEncrypt;
}
public AlipayObject getBizModel() {
return this.bizModel;
}
public void setBizModel(AlipayObject bizModel) {
this.bizModel=bizModel;
}
}
| wendal/alipay-sdk | src/main/java/com/alipay/api/request/AlipayMicropayOrderConfirmpayurlGetRequest.java | Java | apache-2.0 | 4,211 |
class ServiceTemplate < ApplicationRecord
DEFAULT_PROCESS_DELAY_BETWEEN_GROUPS = 120
GENERIC_ITEM_SUBTYPES = {
"custom" => _("Custom"),
"vm" => _("VM"),
"playbook" => _("Playbook"),
"hosted_database" => _("Hosted Database"),
"load_balancer" => _("Load Balancer"),
"storage" => _("Storage")
}.freeze
CATALOG_ITEM_TYPES = {
"amazon" => _("Amazon"),
"azure" => _("Azure"),
"generic" => _("Generic"),
"generic_orchestration" => _("Orchestration"),
"generic_ansible_playbook" => _("Ansible Playbook"),
"generic_ansible_tower" => _("AnsibleTower"),
"generic_container_template" => _("OpenShift Template"),
"google" => _("Google"),
"microsoft" => _("SCVMM"),
"openstack" => _("OpenStack"),
"redhat" => _("RHEV"),
"vmware" => _("VMware")
}.freeze
RESOURCE_ACTION_UPDATE_ATTRS = [:dialog,
:dialog_id,
:fqname,
:configuration_template,
:configuration_template_id,
:configuration_template_type].freeze
include CustomActionsMixin
include ServiceMixin
include OwnershipMixin
include NewWithTypeStiMixin
include TenancyMixin
include_concern 'Filter'
belongs_to :tenant
# # These relationships are used to specify children spawned from a parent service
# has_many :child_services, :class_name => "ServiceTemplate", :foreign_key => :service_template_id
# belongs_to :parent_service, :class_name => "ServiceTemplate", :foreign_key => :service_template_id
# # These relationships are used for resources that are processed as part of the service
# has_many :vms_and_templates, :through => :service_resources, :source => :resource, :source_type => 'VmOrTemplate'
has_many :service_templates, :through => :service_resources, :source => :resource, :source_type => 'ServiceTemplate'
has_many :services
has_one :picture, :dependent => :destroy, :as => :resource, :autosave => true
belongs_to :service_template_catalog
has_many :dialogs, -> { distinct }, :through => :resource_actions
virtual_column :type_display, :type => :string
virtual_column :template_valid, :type => :boolean
virtual_column :template_valid_error_message, :type => :string
default_value_for :service_type, 'unknown'
default_value_for(:generic_subtype) { |st| 'custom' if st.prov_type == 'generic' }
virtual_has_one :config_info, :class_name => "Hash"
scope :with_service_template_catalog_id, ->(cat_id) { where(:service_template_catalog_id => cat_id) }
scope :without_service_template_catalog_id, -> { where(:service_template_catalog_id => nil) }
scope :with_existent_service_template_catalog_id, -> { where.not(:service_template_catalog_id => nil) }
scope :displayed, -> { where(:display => true) }
def self.catalog_item_types
ci_types = Set.new(Rbac.filtered(ExtManagementSystem.all).flat_map(&:supported_catalog_types))
ci_types.add('generic_orchestration') if Rbac.filtered(OrchestrationTemplate).exists?
ci_types.add('generic')
CATALOG_ITEM_TYPES.each.with_object({}) do |(key, description), hash|
hash[key] = { :description => description, :display => ci_types.include?(key) }
end
end
def self.create_catalog_item(options, auth_user)
transaction do
create_from_options(options).tap do |service_template|
config_info = options[:config_info].except(:provision, :retirement, :reconfigure)
workflow_class = MiqProvisionWorkflow.class_for_source(config_info[:src_vm_id])
if workflow_class
request = workflow_class.new(config_info, auth_user).make_request(nil, config_info)
service_template.add_resource(request)
end
service_template.create_resource_actions(options[:config_info])
end
end
end
def self.class_from_request_data(data)
request_type = data['prov_type']
if request_type.include?('generic_')
generic_type = request_type.split('generic_').last
"ServiceTemplate#{generic_type.camelize}".constantize
else
ServiceTemplate
end
end
def update_catalog_item(options, auth_user = nil)
config_info = validate_update_config_info(options)
unless config_info
update_attributes!(options)
return reload
end
transaction do
update_from_options(options)
update_service_resources(config_info, auth_user)
update_resource_actions(config_info)
save!
end
reload
end
def children
service_templates
end
def descendants
children.flat_map { |child| [child] + child.descendants }
end
def subtree
[self] + descendants
end
def vms_and_templates
[]
end
def destroy
parent_svcs = parent_services
unless parent_svcs.blank?
raise MiqException::MiqServiceError, _("Cannot delete a service that is the child of another service.")
end
service_resources.each do |sr|
rsc = sr.resource
rsc.destroy if rsc.kind_of?(MiqProvisionRequestTemplate)
end
super
end
def request_class
ServiceTemplateProvisionRequest
end
def request_type
"clone_to_service"
end
def config_info
options[:config_info] || construct_config_info
end
def create_service(service_task, parent_svc = nil)
nh = attributes.dup
nh['options'][:dialog] = service_task.options[:dialog]
(nh.keys - Service.column_names + %w(created_at guid service_template_id updated_at id type prov_type)).each { |key| nh.delete(key) }
# Hide child services by default
nh['display'] = false if parent_svc
# If display is nil, set it to false
nh['display'] ||= false
# convert template class name to service class name by naming convention
nh['type'] = self.class.name.sub('Template', '')
nh['initiator'] = service_task.options[:initiator] if service_task.options[:initiator]
svc = Service.create(nh)
svc.service_template = self
service_resources.each do |sr|
nh = sr.attributes.dup
%w(id created_at updated_at service_template_id).each { |key| nh.delete(key) }
svc.add_resource(sr.resource, nh) unless sr.resource.nil?
end
if parent_svc
service_resource = ServiceResource.find_by(:id => service_task.options[:service_resource_id])
parent_svc.add_resource!(svc, service_resource)
end
svc.save
svc
end
def set_service_type
svc_type = nil
if service_resources.size.zero?
svc_type = 'unknown'
else
service_resources.each do |sr|
if sr.resource_type == 'Service' || sr.resource_type == 'ServiceTemplate'
svc_type = 'composite'
break
end
end
svc_type = 'atomic' if svc_type.blank?
end
self.service_type = svc_type
end
def composite?
service_type.to_s.include?('composite')
end
def atomic?
service_type.to_s.include?('atomic')
end
def type_display
case service_type
when "atomic" then "Item"
when "composite" then "Bundle"
when nil then "Unknown"
else
service_type.to_s.capitalize
end
end
def create_tasks_for_service(service_task, parent_svc)
unless parent_svc
return [] unless self.class.include_service_template?(service_task,
service_task.source_id,
parent_svc)
end
svc = create_service(service_task, parent_svc)
set_ownership(svc, service_task.get_user)
service_task.destination = svc
create_subtasks(service_task, svc)
end
# default implementation to create subtasks from service resources
def create_subtasks(parent_service_task, parent_service)
tasks = []
service_resources.each do |child_svc_rsc|
scaling_min = child_svc_rsc.scaling_min
1.upto(scaling_min).each do |scaling_idx|
nh = parent_service_task.attributes.dup
%w(id created_on updated_on type state status message).each { |key| nh.delete(key) }
nh['options'] = parent_service_task.options.dup
nh['options'].delete(:child_tasks)
# Initial Options[:dialog] to an empty hash so we do not pass down dialog values to child services tasks
nh['options'][:dialog] = {}
next if child_svc_rsc.resource_type == "ServiceTemplate" &&
!self.class.include_service_template?(parent_service_task,
child_svc_rsc.resource.id,
parent_service)
new_task = parent_service_task.class.new(nh)
new_task.options.merge!(
:src_id => child_svc_rsc.resource.id,
:scaling_idx => scaling_idx,
:scaling_min => scaling_min,
:service_resource_id => child_svc_rsc.id,
:parent_service_id => parent_service.id,
:parent_task_id => parent_service_task.id,
)
new_task.state = 'pending'
new_task.status = 'Ok'
new_task.source = child_svc_rsc.resource
new_task.save!
new_task.after_request_task_create
parent_service_task.miq_request.miq_request_tasks << new_task
tasks << new_task
end
end
tasks
end
def set_ownership(service, user)
return if user.nil?
service.evm_owner = user
if user.current_group
$log.info("Setting Service Owning User to Name=#{user.name}, ID=#{user.id}, Group to Name=#{user.current_group.name}, ID=#{user.current_group.id}")
service.miq_group = user.current_group
else
$log.info("Setting Service Owning User to Name=#{user.name}, ID=#{user.id}")
end
service.save
end
def self.default_provisioning_entry_point(service_type)
if service_type == 'atomic'
'/Service/Provisioning/StateMachines/ServiceProvision_Template/CatalogItemInitialization'
else
'/Service/Provisioning/StateMachines/ServiceProvision_Template/CatalogBundleInitialization'
end
end
def self.default_retirement_entry_point
'/Service/Retirement/StateMachines/ServiceRetirement/Default'
end
def self.default_reconfiguration_entry_point
nil
end
def template_valid?
validate_template[:valid]
end
alias template_valid template_valid?
def template_valid_error_message
validate_template[:message]
end
def validate_template
missing_resources = service_resources.select { |sr| sr.resource.nil? }
if missing_resources.present?
missing_list = missing_resources.collect { |sr| "#{sr.resource_type}:#{sr.resource_id}" }.join(", ")
return {:valid => false,
:message => "Missing Service Resource(s): #{missing_list}"}
end
service_resources.detect do |s|
r = s.resource
r.respond_to?(:template_valid?) && !r.template_valid?
end.try(:resource).try(:validate_template) || {:valid => true, :message => nil}
end
def validate_order
service_template_catalog && display
end
alias orderable? validate_order
def provision_action
resource_actions.find_by(:action => "Provision")
end
def update_resource_actions(ae_endpoints)
resource_action_list.each do |action|
resource_params = ae_endpoints[action[:param_key]]
resource_action = resource_actions.find_by(:action => action[:name])
# If the action exists in updated parameters
if resource_params
# And the resource action exists on the template already, update it
if resource_action
resource_action.update_attributes!(resource_params.slice(*RESOURCE_ACTION_UPDATE_ATTRS))
# If the resource action does not exist, create it
else
build_resource_action(resource_params, action)
end
elsif resource_action
# If the endpoint does not exist in updated parameters, but exists on the template, delete it
resource_action.destroy
end
end
end
def create_resource_actions(ae_endpoints)
ae_endpoints ||= {}
resource_action_list.each do |action|
ae_endpoint = ae_endpoints[action[:param_key]]
next unless ae_endpoint
build_resource_action(ae_endpoint, action)
end
save!
end
def self.create_from_options(options)
create(options.except(:config_info).merge(:options => { :config_info => options[:config_info] }))
end
private_class_method :create_from_options
def provision_request(user, options = nil, request_options = nil)
result = provision_workflow(user, options, request_options).submit_request
raise result[:errors].join(", ") if result[:errors].any?
result[:request]
end
def provision_workflow(user, dialog_options = nil, request_options = nil)
dialog_options ||= {}
request_options ||= {}
ra_options = { :target => self, :initiator => request_options[:initiator] }
ResourceActionWorkflow.new({}, user,
provision_action, ra_options).tap do |wf|
wf.request_options = request_options
dialog_options.each { |key, value| wf.set_value(key, value) }
end
end
def add_resource(rsc, options = {})
super
set_service_type
end
private
def update_service_resources(config_info, auth_user = nil)
config_info = config_info.except(:provision, :retirement, :reconfigure)
workflow_class = MiqProvisionWorkflow.class_for_source(config_info[:src_vm_id])
if workflow_class
service_resources.find_by(:resource_type => 'MiqRequest').try(:destroy)
new_request = workflow_class.new(config_info, auth_user).make_request(nil, config_info)
add_resource!(new_request)
end
end
def build_resource_action(ae_endpoint, action)
fqname = if ae_endpoint.empty?
self.class.send(action[:method], *action[:args]) || ""
else
ae_endpoint[:fqname]
end
build_options = {:action => action[:name],
:fqname => fqname,
:ae_attributes => {:service_action => action[:name]}}
build_options.merge!(ae_endpoint.slice(*RESOURCE_ACTION_UPDATE_ATTRS))
resource_actions.build(build_options)
end
def validate_update_config_info(options)
if options[:service_type] && options[:service_type] != service_type
raise _('service_type cannot be changed')
end
if options[:prov_type] && options[:prov_type] != prov_type
raise _('prov_type cannot be changed')
end
options[:config_info]
end
def resource_action_list
[
{:name => ResourceAction::PROVISION,
:param_key => :provision,
:method => 'default_provisioning_entry_point',
:args => [service_type]},
{:name => ResourceAction::RECONFIGURE,
:param_key => :reconfigure,
:method => 'default_reconfiguration_entry_point',
:args => []},
{:name => ResourceAction::RETIREMENT,
:param_key => :retirement,
:method => 'default_retirement_entry_point',
:args => []}
]
end
def update_from_options(params)
options[:config_info] = params[:config_info]
update_attributes!(params.except(:config_info))
end
def construct_config_info
config_info = {}
if service_resources.where(:resource_type => 'MiqRequest').exists?
config_info.merge!(service_resources.find_by(:resource_type => 'MiqRequest').resource.options.compact)
end
config_info.merge!(resource_actions_info)
end
def resource_actions_info
config_info = {}
resource_actions.each do |resource_action|
resource_options = resource_action.slice(:dialog_id,
:configuration_template_type,
:configuration_template_id).compact
resource_options[:fqname] = resource_action.fqname
config_info[resource_action.action.downcase.to_sym] = resource_options.symbolize_keys
end
config_info
end
def generic_custom_buttons
CustomButton.buttons_for("Service")
end
end
| israel-hdez/manageiq | app/models/service_template.rb | Ruby | apache-2.0 | 16,451 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.shardingsphere.data.pipeline.scenario.rulealtered.spi;
import lombok.ToString;
import org.apache.shardingsphere.data.pipeline.spi.lock.RuleBasedJobLockAlgorithm;
/**
* Default metadata checkout lock algorithm.
*/
@ToString
public final class DefaultMetadataCheckoutLockAlgorithm implements RuleBasedJobLockAlgorithm {
@Override
public void init() {
}
// TODO impl default checkoutLockAlgorithm
@Override
public void lock(final String schemaName, final String jobId) {
}
@Override
public void releaseLock(final String schemaName, final String jobId) {
}
@Override
public String getType() {
return "DEFAULT";
}
}
| apache/incubator-shardingsphere | shardingsphere-kernel/shardingsphere-data-pipeline/shardingsphere-data-pipeline-core/src/main/java/org/apache/shardingsphere/data/pipeline/scenario/rulealtered/spi/DefaultMetadataCheckoutLockAlgorithm.java | Java | apache-2.0 | 1,517 |
package interfaces;// interfaces/AdaptedRandomDoubles.java
// (c)2017 MindView LLC: see Copyright.txt
// We make no guarantees that this code is fit for any purpose.
// Visit http://OnJava8.com for more book information.
// Creating an adapter with inheritance
import java.nio.*;
import java.util.*;
public class AdaptedRandomDoubles
implements RandomDoubles, Readable {
private int count;
public AdaptedRandomDoubles(int count) {
this.count = count;
}
@Override
public int read(CharBuffer cb) {
if(count-- == 0)
return -1;
String result = Double.toString(next()) + " ";
cb.append(result);
return result.length();
}
public static void main(String[] args) {
Scanner s =
new Scanner(new AdaptedRandomDoubles(7));
while(s.hasNextDouble())
System.out.print(s.nextDouble() + " ");
}
}
/* Output:
0.7271157860730044 0.5309454508634242
0.16020656493302599 0.18847866977771732
0.5166020801268457 0.2678662084200585
0.2613610344283964
*/
| mayonghui2112/helloWorld | sourceCode/testMaven/onjava8/src/main/java/interfaces/AdaptedRandomDoubles.java | Java | apache-2.0 | 992 |
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package shim
import (
"context"
"flag"
"fmt"
"io"
"os"
"runtime"
"runtime/debug"
"strings"
"time"
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/namespaces"
shimapi "github.com/containerd/containerd/runtime/v2/task"
"github.com/containerd/containerd/version"
"github.com/containerd/ttrpc"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Client for a shim server
type Client struct {
service shimapi.TaskService
context context.Context
signals chan os.Signal
}
// Publisher for events
type Publisher interface {
events.Publisher
io.Closer
}
// Init func for the creation of a shim server
type Init func(context.Context, string, Publisher, func()) (Shim, error)
// Shim server interface
type Shim interface {
shimapi.TaskService
Cleanup(ctx context.Context) (*shimapi.DeleteResponse, error)
StartShim(ctx context.Context, id, containerdBinary, containerdAddress, containerdTTRPCAddress string) (string, error)
}
// OptsKey is the context key for the Opts value.
type OptsKey struct{}
// Opts are context options associated with the shim invocation.
type Opts struct {
BundlePath string
Debug bool
}
// BinaryOpts allows the configuration of a shims binary setup
type BinaryOpts func(*Config)
// Config of shim binary options provided by shim implementations
type Config struct {
// NoSubreaper disables setting the shim as a child subreaper
NoSubreaper bool
// NoReaper disables the shim binary from reaping any child process implicitly
NoReaper bool
// NoSetupLogger disables automatic configuration of logrus to use the shim FIFO
NoSetupLogger bool
}
var (
debugFlag bool
versionFlag bool
idFlag string
namespaceFlag string
socketFlag string
bundlePath string
addressFlag string
containerdBinaryFlag string
action string
)
const (
ttrpcAddressEnv = "TTRPC_ADDRESS"
)
func parseFlags() {
flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs")
flag.BoolVar(&versionFlag, "v", false, "show the shim version and exit")
flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim")
flag.StringVar(&idFlag, "id", "", "id of the task")
flag.StringVar(&socketFlag, "socket", "", "abstract socket path to serve")
flag.StringVar(&bundlePath, "bundle", "", "path to the bundle if not workdir")
flag.StringVar(&addressFlag, "address", "", "grpc address back to main containerd")
flag.StringVar(&containerdBinaryFlag, "publish-binary", "containerd", "path to publish binary (used for publishing events)")
flag.Parse()
action = flag.Arg(0)
}
func setRuntime() {
debug.SetGCPercent(40)
go func() {
for range time.Tick(30 * time.Second) {
debug.FreeOSMemory()
}
}()
if os.Getenv("GOMAXPROCS") == "" {
// If GOMAXPROCS hasn't been set, we default to a value of 2 to reduce
// the number of Go stacks present in the shim.
runtime.GOMAXPROCS(2)
}
}
func setLogger(ctx context.Context, id string) error {
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: log.RFC3339NanoFixed,
FullTimestamp: true,
})
if debugFlag {
logrus.SetLevel(logrus.DebugLevel)
}
f, err := openLog(ctx, id)
if err != nil {
return err
}
logrus.SetOutput(f)
return nil
}
// Run initializes and runs a shim server
func Run(id string, initFunc Init, opts ...BinaryOpts) {
var config Config
for _, o := range opts {
o(&config)
}
if err := run(id, initFunc, config); err != nil {
fmt.Fprintf(os.Stderr, "%s: %s\n", id, err)
os.Exit(1)
}
}
func run(id string, initFunc Init, config Config) error {
parseFlags()
if versionFlag {
fmt.Printf("%s:\n", os.Args[0])
fmt.Println(" Version: ", version.Version)
fmt.Println(" Revision:", version.Revision)
fmt.Println(" Go version:", version.GoVersion)
fmt.Println("")
return nil
}
setRuntime()
signals, err := setupSignals(config)
if err != nil {
return err
}
if !config.NoSubreaper {
if err := subreaper(); err != nil {
return err
}
}
ttrpcAddress := os.Getenv(ttrpcAddressEnv)
publisher, err := NewPublisher(ttrpcAddress)
if err != nil {
return err
}
defer publisher.Close()
if namespaceFlag == "" {
return fmt.Errorf("shim namespace cannot be empty")
}
ctx := namespaces.WithNamespace(context.Background(), namespaceFlag)
ctx = context.WithValue(ctx, OptsKey{}, Opts{BundlePath: bundlePath, Debug: debugFlag})
ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", id))
ctx, cancel := context.WithCancel(ctx)
service, err := initFunc(ctx, idFlag, publisher, cancel)
if err != nil {
return err
}
switch action {
case "delete":
logger := logrus.WithFields(logrus.Fields{
"pid": os.Getpid(),
"namespace": namespaceFlag,
})
go handleSignals(ctx, logger, signals)
response, err := service.Cleanup(ctx)
if err != nil {
return err
}
data, err := proto.Marshal(response)
if err != nil {
return err
}
if _, err := os.Stdout.Write(data); err != nil {
return err
}
return nil
case "start":
address, err := service.StartShim(ctx, idFlag, containerdBinaryFlag, addressFlag, ttrpcAddress)
if err != nil {
return err
}
if _, err := os.Stdout.WriteString(address); err != nil {
return err
}
return nil
default:
if !config.NoSetupLogger {
if err := setLogger(ctx, idFlag); err != nil {
return err
}
}
client := NewShimClient(ctx, service, signals)
if err := client.Serve(); err != nil {
if err != context.Canceled {
return err
}
}
select {
case <-publisher.Done():
return nil
case <-time.After(5 * time.Second):
return errors.New("publisher not closed")
}
}
}
// NewShimClient creates a new shim server client
func NewShimClient(ctx context.Context, svc shimapi.TaskService, signals chan os.Signal) *Client {
s := &Client{
service: svc,
context: ctx,
signals: signals,
}
return s
}
// Serve the shim server
func (s *Client) Serve() error {
dump := make(chan os.Signal, 32)
setupDumpStacks(dump)
path, err := os.Getwd()
if err != nil {
return err
}
server, err := newServer()
if err != nil {
return errors.Wrap(err, "failed creating server")
}
logrus.Debug("registering ttrpc server")
shimapi.RegisterTaskService(server, s.service)
if err := serve(s.context, server, socketFlag); err != nil {
return err
}
logger := logrus.WithFields(logrus.Fields{
"pid": os.Getpid(),
"path": path,
"namespace": namespaceFlag,
})
go func() {
for range dump {
dumpStacks(logger)
}
}()
return handleSignals(s.context, logger, s.signals)
}
// serve serves the ttrpc API over a unix socket at the provided path
// this function does not block
func serve(ctx context.Context, server *ttrpc.Server, path string) error {
l, err := serveListener(path)
if err != nil {
return err
}
go func() {
defer l.Close()
if err := server.Serve(ctx, l); err != nil &&
!strings.Contains(err.Error(), "use of closed network connection") {
logrus.WithError(err).Fatal("containerd-shim: ttrpc server failure")
}
}()
return nil
}
func dumpStacks(logger *logrus.Entry) {
var (
buf []byte
stackSize int
)
bufferLen := 16384
for stackSize == len(buf) {
buf = make([]byte, bufferLen)
stackSize = runtime.Stack(buf, true)
bufferLen *= 2
}
buf = buf[:stackSize]
logger.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
}
| rancher/k3s | vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go | GO | apache-2.0 | 8,126 |
/**
* $RCSfile$
* $Revision: 7071 $
* $Date: 2007-02-11 18:59:05 -0600 (Sun, 11 Feb 2007) $
*
* Copyright 2003-2007 Jive Software.
*
* All rights reserved. Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jivesoftware.smack;
import org.jivesoftware.smack.packet.StreamError;
import org.jivesoftware.smack.packet.XMPPError;
import java.io.PrintStream;
import java.io.PrintWriter;
/**
* A generic exception that is thrown when an error occurs performing an
* XMPP operation. XMPP servers can respond to error conditions with an error code
* and textual description of the problem, which are encapsulated in the XMPPError
* class. When appropriate, an XMPPError instance is attached instances of this exception.<p>
*
* When a stream error occured, the server will send a stream error to the client before
* closing the connection. Stream errors are unrecoverable errors. When a stream error
* is sent to the client an XMPPException will be thrown containing the StreamError sent
* by the server.
*
* @see XMPPError
* @author Matt Tucker
*/
public class XMPPException extends Exception {
private StreamError streamError = null;
private XMPPError error = null;
private Throwable wrappedThrowable = null;
/**
* Creates a new XMPPException.
*/
public XMPPException() {
super();
}
/**
* Creates a new XMPPException with a description of the exception.
*
* @param message description of the exception.
*/
public XMPPException(String message) {
super(message);
}
/**
* Creates a new XMPPException with the Throwable that was the root cause of the
* exception.
*
* @param wrappedThrowable the root cause of the exception.
*/
public XMPPException(Throwable wrappedThrowable) {
super();
this.wrappedThrowable = wrappedThrowable;
}
/**
* Cretaes a new XMPPException with the stream error that was the root case of the
* exception. When a stream error is received from the server then the underlying
* TCP connection will be closed by the server.
*
* @param streamError the root cause of the exception.
*/
public XMPPException(StreamError streamError) {
super();
this.streamError = streamError;
}
/**
* Cretaes a new XMPPException with the XMPPError that was the root case of the
* exception.
*
* @param error the root cause of the exception.
*/
public XMPPException(XMPPError error) {
super();
this.error = error;
}
/**
* Creates a new XMPPException with a description of the exception and the
* Throwable that was the root cause of the exception.
*
* @param message a description of the exception.
* @param wrappedThrowable the root cause of the exception.
*/
public XMPPException(String message, Throwable wrappedThrowable) {
super(message);
this.wrappedThrowable = wrappedThrowable;
}
/**
* Creates a new XMPPException with a description of the exception, an XMPPError,
* and the Throwable that was the root cause of the exception.
*
* @param message a description of the exception.
* @param error the root cause of the exception.
* @param wrappedThrowable the root cause of the exception.
*/
public XMPPException(String message, XMPPError error, Throwable wrappedThrowable) {
super(message);
this.error = error;
this.wrappedThrowable = wrappedThrowable;
}
/**
* Creates a new XMPPException with a description of the exception and the
* XMPPException that was the root cause of the exception.
*
* @param message a description of the exception.
* @param error the root cause of the exception.
*/
public XMPPException(String message, XMPPError error) {
super(message);
this.error = error;
}
/**
* Returns the XMPPError asscociated with this exception, or <tt>null</tt> if there
* isn't one.
*
* @return the XMPPError asscociated with this exception.
*/
public XMPPError getXMPPError() {
return error;
}
/**
* Returns the StreamError asscociated with this exception, or <tt>null</tt> if there
* isn't one. The underlying TCP connection is closed by the server after sending the
* stream error to the client.
*
* @return the StreamError asscociated with this exception.
*/
public StreamError getStreamError() {
return streamError;
}
/**
* Returns the Throwable asscociated with this exception, or <tt>null</tt> if there
* isn't one.
*
* @return the Throwable asscociated with this exception.
*/
public Throwable getWrappedThrowable() {
return wrappedThrowable;
}
public void printStackTrace() {
printStackTrace(System.err);
}
public void printStackTrace(PrintStream out) {
super.printStackTrace(out);
if (wrappedThrowable != null) {
out.println("Nested Exception: ");
wrappedThrowable.printStackTrace(out);
}
}
public void printStackTrace(PrintWriter out) {
super.printStackTrace(out);
if (wrappedThrowable != null) {
out.println("Nested Exception: ");
wrappedThrowable.printStackTrace(out);
}
}
public String getMessage() {
String msg = super.getMessage();
// If the message was not set, but there is an XMPPError, return the
// XMPPError as the message.
if (msg == null && error != null) {
return error.toString();
}
else if (msg == null && streamError != null) {
return streamError.toString();
}
return msg;
}
public String toString() {
StringBuilder buf = new StringBuilder();
String message = super.getMessage();
if (message != null) {
buf.append(message).append(": ");
}
if (error != null) {
buf.append(error);
}
if (streamError != null) {
buf.append(streamError);
}
if (wrappedThrowable != null) {
buf.append("\n -- caused by: ").append(wrappedThrowable);
}
return buf.toString();
}
} | ice-coffee/EIM | src/org/jivesoftware/smack/XMPPException.java | Java | apache-2.0 | 6,879 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/ssm-incidents/model/IncidentRecordSource.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace SSMIncidents
{
namespace Model
{
IncidentRecordSource::IncidentRecordSource() :
m_createdByHasBeenSet(false),
m_invokedByHasBeenSet(false),
m_resourceArnHasBeenSet(false),
m_sourceHasBeenSet(false)
{
}
IncidentRecordSource::IncidentRecordSource(JsonView jsonValue) :
m_createdByHasBeenSet(false),
m_invokedByHasBeenSet(false),
m_resourceArnHasBeenSet(false),
m_sourceHasBeenSet(false)
{
*this = jsonValue;
}
IncidentRecordSource& IncidentRecordSource::operator =(JsonView jsonValue)
{
if(jsonValue.ValueExists("createdBy"))
{
m_createdBy = jsonValue.GetString("createdBy");
m_createdByHasBeenSet = true;
}
if(jsonValue.ValueExists("invokedBy"))
{
m_invokedBy = jsonValue.GetString("invokedBy");
m_invokedByHasBeenSet = true;
}
if(jsonValue.ValueExists("resourceArn"))
{
m_resourceArn = jsonValue.GetString("resourceArn");
m_resourceArnHasBeenSet = true;
}
if(jsonValue.ValueExists("source"))
{
m_source = jsonValue.GetString("source");
m_sourceHasBeenSet = true;
}
return *this;
}
JsonValue IncidentRecordSource::Jsonize() const
{
JsonValue payload;
if(m_createdByHasBeenSet)
{
payload.WithString("createdBy", m_createdBy);
}
if(m_invokedByHasBeenSet)
{
payload.WithString("invokedBy", m_invokedBy);
}
if(m_resourceArnHasBeenSet)
{
payload.WithString("resourceArn", m_resourceArn);
}
if(m_sourceHasBeenSet)
{
payload.WithString("source", m_source);
}
return payload;
}
} // namespace Model
} // namespace SSMIncidents
} // namespace Aws
| awslabs/aws-sdk-cpp | aws-cpp-sdk-ssm-incidents/source/model/IncidentRecordSource.cpp | C++ | apache-2.0 | 1,931 |
/*
Copyright 2008-2014 CNR-ISTI, http://isti.cnr.it
Institute of Information Science and Technologies
of the Italian National Research Council
See the NOTICE file distributed with this work for additional
information regarding copyright ownership
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package it.cnr.isti.zigbee.zcl.library.api.core;
/**
*
* The interface for helping the serialization ZCL frame on array of byte
*
* @author <a href="mailto:stefano.lenzi@isti.cnr.it">Stefano "Kismet" Lenzi</a>
* @author <a href="mailto:francesco.furfari@isti.cnr.it">Francesco Furfari</a>
* @version $LastChangedRevision$ ($LastChangedDate: 2013-08-06 18:00:05
* +0200 (Tue, 06 Aug 2013) $)
* @since 0.1.0
*
*/
public interface ZBSerializer {
/**
*
* @param data
* {@link Object} containing the value to append
* @param type
* {@link ZigBeeType} to select of data has to be appended
* @since 0.4.0
*/
public void appendZigBeeType(Object data, ZigBeeType type);
/**
* Append a {@link String} to the stream by prefixing it with the length of
* the String itself <br>
* as specified by the <b>ZigBee Cluster Library</b> (<i>Document
* 075123r01ZB</i>
*
* @param str
* the {@link String} to append
* @since 0.4.0
* @throws IllegalArgumentException
* if the length of the {@link String} is greater then 255
*/
public void appendString(String str);
/**
* Since version <b>0.4.0</b> the method must not used, use
* {@link #appendZigBeeType(Object, ZigBeeType)} instead.<br>
* This method has a conceptual bug in respect to appending 8, 16, or 24 bit
* long data, in fact<br>
* the methods can only fail in such cases.
*
* @param data
* {@link Object} to serialize as Java type
* @deprecated Use {@link #appendZigBeeType(Object, ZigBeeType)} instead
*/
public void appendObject(Object data);
public void appendBoolean(Boolean data);
public void appendByte(Byte data);
public void appendShort(Short data);
public void appendInteger(Integer data);
public void appendLong(Long data);
/**
*
* @param data
* the data to be streamed
* @since 0.9.0
*/
public void appendFloat(Float data);
public void append_boolean(boolean data);
public void append_byte(byte data);
public void append_short(short data);
public void append_int(int data);
/**
*
* @param data
* int value to append
* @since 0.4.0
*/
public void append_int24bit(int data);
public void append_long(long data);
/**
*
* @param data
* the value to serialize
* @param size
* the size of bytes used on the stream (i.e.
* UnisgnedInteger48bit will result in 6 bytes)
* @since 0.9.0
*/
public void append_long(long data, int size);
/**
*
* @return a copy of the payload
* @since 0.8.0
*/
public byte[] getPayload();
}
| smulikHakipod/zb4osgi | zb4o-zcl-library/src/main/java/it/cnr/isti/zigbee/zcl/library/api/core/ZBSerializer.java | Java | apache-2.0 | 3,673 |
define(function (require) {
'use strict';
// require jquery and load plugins from the server
var plugins = require('plugins');
var alien4cloud = require('alien4cloud');
return {
startup: function() {
plugins.init().then(function(files, modules) {
require(files, function() {
require(modules, function() {
alien4cloud.startup();
});
});
});
}
};
});
| xdegenne/alien4cloud | alien4cloud-ui/src/main/webapp/scripts/alien4cloud-bootstrap.js | JavaScript | apache-2.0 | 431 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright 2012-2019 the original author or authors.
*/
package org.assertj.core.internal.paths;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.fail;
import static org.assertj.core.test.TestData.someInfo;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.BDDMockito.given;
import static org.mockito.BDDMockito.willAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.mockingDetails;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.DirectoryStream;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.Spliterators;
import java.util.function.Predicate;
import org.assertj.core.api.AssertionInfo;
import org.assertj.core.internal.PathsBaseTest;
import org.assertj.core.util.Strings;
import org.junit.jupiter.api.BeforeEach;
import com.google.common.collect.Iterators;
public class MockPathsBaseTest extends PathsBaseTest {
static final AssertionInfo INFO = someInfo();
Path actual;
Path other;
@BeforeEach
public void init() {
actual = mock(Path.class);
other = mock(Path.class);
}
static void failIfStreamIsOpen(InputStream stream) {
try {
assertThat(stream.read()).as("Stream should be closed").isNegative();
} catch (IOException e) {
assertThat(e).hasNoCause().hasMessage("Stream closed");
}
}
static <T> void failIfStreamIsOpen(DirectoryStream<T> stream) {
try {
long openCount = mockingDetails(stream).getInvocations().stream()
.filter(inv -> inv.getMethod().getName().equals("iterator"))
.count();
verify(stream, times((int) openCount)).close();
} catch (IOException e) {
fail("Should not happen");
}
}
static DirectoryStream<Path> directoryStream(List<Path> directoryItems) {
DirectoryStream<Path> stream = mock(DirectoryStream.class);
given(stream.iterator()).will(inv -> directoryItems.iterator());
given(stream.spliterator()).will(inv -> Spliterators.spliteratorUnknownSize(directoryItems.iterator(), 0));
return stream;
}
private DirectoryStream<Path> filterStream(Predicate<Path> filter, DirectoryStream<Path> source) throws IOException {
DirectoryStream<Path> stream = mock(DirectoryStream.class);
given(stream.iterator()).will(inv -> Iterators.filter(source.iterator(), filter::test));
given(stream.spliterator()).will(inv -> Spliterators.spliteratorUnknownSize(Iterators.filter(source.iterator(), filter::test), 0));
willAnswer(inv -> {
source.close();
return null;
}).given(stream).close();
return stream;
}
static Path mockPath(String... names) {
Path path = mock(Path.class);
given(path.toString()).willReturn(Strings.join(names).with(File.separator));
if (names.length > 1) {
Path filename = mockPath(names[names.length - 1]);
given(path.getFileName()).willReturn(filename);
given(path.getParent()).will(inv -> mockPath(Arrays.copyOf(names, names.length - 1)));
} else {
given(path.getFileName()).willReturn(path);
given(path.getParent()).willReturn(null);
}
given(path.getNameCount()).willReturn(names.length);
given(path.getName(anyInt())).will(inv -> names[(int) inv.getArgument(0)]);
return path;
}
Path mockRegularFile(String... names) {
Path path = mockPath(names);
given(nioFilesWrapper.exists(path)).willReturn(true);
given(nioFilesWrapper.isRegularFile(path)).willReturn(true);
try {
given(nioFilesWrapper.newInputStream(path)).willReturn(new ByteArrayInputStream(new byte[0]));
} catch (IOException e) {
fail("Should not happen");
}
return path;
}
Path mockDirectory(String name, DirectoryStream<Path> directoryItems) {
Path path = mockPath(name);
given(nioFilesWrapper.exists(path)).willReturn(true);
given(nioFilesWrapper.isDirectory(path)).willReturn(true);
try {
given(nioFilesWrapper.newDirectoryStream(eq(path), any())).will(inv -> filterStream(inv.getArgument(1), directoryItems));
} catch (IOException e) {
fail("Should not happen");
}
return path;
}
Path mockDirectory(String name, List<Path> paths) {
DirectoryStream<Path> directoryItems = directoryStream(paths);
Path path = mockPath(name);
given(nioFilesWrapper.exists(path)).willReturn(true);
given(nioFilesWrapper.isDirectory(path)).willReturn(true);
try {
given(nioFilesWrapper.newDirectoryStream(eq(path), any())).will(inv -> filterStream(inv.getArgument(1), directoryItems));
} catch (IOException e) {
fail("Should not happen");
}
return path;
}
}
| xasx/assertj-core | src/test/java/org/assertj/core/internal/paths/MockPathsBaseTest.java | Java | apache-2.0 | 5,560 |
import networkx as nx
import re
import json
from learning.PageManager import PageManager
class TreeListLearner(object):
def __init__(self):
self.__minEdgeWeight = 2
self.__DEBUG = False
"""
pageRepresentation is the invisible/visible data structure
only_consider_tag lets you filter to just one tag type, like DIV
"""
def prefix_tree(self, pageRepresentation, only_consider_tag=None):
ptree = {}
path_to_visible_texts = {}
path_to_first_invis_tokens = {}
for tupl in pageRepresentation:
invisible_token_string = tupl['invisible_token_buffer_before'].replace("> <", "><")
invisible_tokens = re.findall("(<.+?>)", invisible_token_string)
if only_consider_tag is not None:
invisible_tokens = [a for a in invisible_tokens if a.startswith("<" + only_consider_tag)]
path_string = ''.join(invisible_tokens)
if path_string not in path_to_visible_texts:
path_to_visible_texts[path_string] = []
path_to_visible_texts[path_string].append(tupl['visible_token_buffer'])
if path_string not in path_to_first_invis_tokens:
path_to_first_invis_tokens[path_string] = []
path_to_first_invis_tokens[path_string].append(tupl['first_invis_token'])
invisible_tokens.append('VISIBLE') # BC we are going to reverse and make this root
# first, we want to process right to left...
invisible_tokens.reverse()
for depth in range(len(invisible_tokens)):
if depth not in ptree:
ptree[depth] = {}
if depth == 0:
if 'VISIBLE' not in ptree[depth]:
ptree[depth]['VISIBLE'] = {'count': 9999999, 'parent': ''}
else:
node = invisible_tokens[depth]
if node not in ptree[depth]:
ptree[depth][node] = {}
ptree[depth][node] = {'count': 1, 'parent': invisible_tokens[depth - 1]}
else:
ptree[depth][node]['count'] += 1
return ptree, path_to_visible_texts, path_to_first_invis_tokens
def prefix_tree_to_paths(self, prefix_tree):
# basically go through the prefix tree, turn each path into a rule and see the visible text that follows it
# turn paths in the tree into results by looking at the visible text that follows each path
# go from leaf to root
G = nx.DiGraph()
for i in prefix_tree.keys():
if i == 0:
continue
else:
if i == 1:
for node in prefix_tree[i]:
G.add_edge('VISIBLE', str(i) + "||" + node, weight=prefix_tree[i][node]['count'], label=prefix_tree[i][node]['count'])
else:
for node in prefix_tree[i]:
G.add_edge(str(i - 1) + "||" + prefix_tree[i][node]['parent'], str(i) + "||" + node,
weight=prefix_tree[i][node]['count'], label=prefix_tree[i][node]['count'])
leaves = [x for x in G.nodes_iter() if G.out_degree(x) == 0] # nodes with no out degree are leaves
# note we have some disconnected trees, so there might not be a path... but...
paths = []
for leaf in leaves:
has_path = nx.has_path(G, 'VISIBLE', leaf)
if has_path:
short_path = nx.shortest_path(G, 'VISIBLE', leaf)
# leading divs
leading_tags = [a for a in short_path if a != 'VISIBLE']
leading_tags.reverse()
paths.append(leading_tags)
# first, create the path sets... note, any path set would share hte same first token
path_sets = {} # key: first token of path, value: list of members
for pth in paths:
pth.reverse()
first_tok = pth[0]
if first_tok not in path_sets:
path_sets[first_tok] = []
path_sets[first_tok].append(pth)
# now, see if the path set is "valid." A valid pathset is a pathset where at least one member is
# valid (e.g., path from root to leaf has all edges occur at least once
paths_to_keep = []
for path_set_identifier in path_sets.keys():
good_path_parts = [] # for holding paths where edges occur at least number of times we want
for p in path_sets[path_set_identifier]:
edge_data = [G.get_edge_data(p[i], p[i+1]) for i in range(len(p)) if i < len(p) - 1]
tok_with_edge_data = zip(p, edge_data)
keepers = [tupl[0] for tupl in tok_with_edge_data if tupl[1]['weight'] >= self.__minEdgeWeight]
# TODO: If you are missing the first (last?) token, then it means you are breaking from VISIBLE...
# why you end up with lists that are just one node and don't actually extract anything
good_path_parts.append(keepers)
# now, find the intersection of the guys in good path parts, this will be our final path
final_keeper = []
for i in range(len(good_path_parts)):
if i == 0:
final_keeper = good_path_parts[i]
else:
final_keeper = [z for z in good_path_parts[i] if z in final_keeper]
final_keeper.reverse() # reverse it back to what it looked like before
if len(final_keeper) > 0:
paths_to_keep.append(final_keeper)
# finally, clean the tags
cleaned_tags = []
for pth in paths_to_keep:
cleaned_tags.append([a.split("||")[-1] for a in pth])
#nx.drawing.nx_pydot.write_dot(G, 'test.dot')
return cleaned_tags
"""
Given the rows we extract, separate them into clusters where you have overlapping rows or not.
This is the first step to finding interleaving...
Once we find the interleaving, we merge them in (via common parts of the paths), and create
the lists.
From that, we make markup and that's what we give back
Note: we need the page_manager only to find the end token of the last row's Row HTML
"""
def creat_row_markup(self, row_json, all_page_tokens, page_manager):
markup = {}
earliest_latest_row_locations = {}
for path in row_json: # the path defines the row...
earliest = -1
latest = -1
for i in range(len(row_json[path]['rows'])):
row = row_json[path]['rows'][i]
loc = row['starting_token_location']
if earliest == -1: # first run through
earliest = loc
latest = loc
continue
if loc < earliest:
earliest = loc
if loc > latest:
latest = loc
earliest_latest_row_locations[path] = (earliest, latest)
overlaps = []
for pth in earliest_latest_row_locations:
begin = earliest_latest_row_locations[pth][0]
end = earliest_latest_row_locations[pth][1]
if begin == -1 or end == -1: # ill defined locations
continue
if len(overlaps) == 0: # first guy...
overlaps.append([pth])
continue
overlap_clust = -1
for clust_id in range(len(overlaps)):
cluster = overlaps[clust_id]
for cpath in cluster: # could probably just find min and max of cluster and check w/ that, but easier for now...
p_begin = earliest_latest_row_locations[cpath][0]
p_end = earliest_latest_row_locations[cpath][1]
# now, see if there is not overlap...
if p_end < begin or p_begin > end:
continue
overlap_clust = clust_id
if overlap_clust == -1:
overlaps.append([pth])
else:
overlaps[overlap_clust].append(pth)
table_paths = []
for clust in overlaps:
if self.__DEBUG:
print "===oo00 CLUSTER 00oo==="
print clust
path_for_start = ""
#left most, largest row is the beginning, so use that one as A's'
row_start_location = 99999999999
# first, find the member with the most rows
max_rows = max([len(row_json[member]['rows']) for member in clust])
# Ok, so the HTML between rows could have been messed up before bc we didn't know that these were
# overlapping lists. For instance, the first row could be alone and now it's merged, so let's remake
# the html between...
for member in clust:
num_rows = len(row_json[member]['rows'])
if self.__DEBUG:
print "\t--> (%d, %d): %d" % (earliest_latest_row_locations[member][0], earliest_latest_row_locations[member][1], num_rows)
print "\t\t PATH: "+member
print '\n'.join(["\t\t\t"+str(b['starting_token_location'])+" "+b['visible_text']+": "+b['html_between_row'] for b in row_json[member]['rows']])
if num_rows == max_rows:
if earliest_latest_row_locations[member][0] < row_start_location:
row_start_location = earliest_latest_row_locations[member][0]
path_for_start = member
if self.__DEBUG:
print ">> Row starts at: %d (%s) " % (row_start_location, path_for_start)
table_paths.append(path_for_start)
if self.__DEBUG:
print '== TABLE PATHS =='
print '\n'.join(table_paths)
# for each table path, we need to sort the members, and then assign their inner HTML values. Note that
# these might be empty (for first row, etc.) in which case we fill it in. But if it's there, then keep it...
# so we turn each table path into a little regex, and starting from each token, find the next one, and use the
# stuff between as the
# they also need to be sorted bc we need to assign teh correct number to each
for table_path in table_paths:
# make the structure that we want...
by_location = {} # makes it easy to sort by location, etc.
for row in row_json[table_path]['rows']:
by_location[row['starting_token_location']] = row
if len(by_location) < 2:
continue
ordered_row_indexes = sorted(by_location.keys())
extract_sequences = []
ending_row_locations = [] # the token location for the end of each row...
table_path_regex = '+?'.join([tp for tp in table_path])
# Three cases for what your extracted value could be:
# 1 - Normal case: it's the html_between_row value
# 2 - You are a first or optional row, so your html_between_row is empty (bc you might have been
# on a path by yourself). So, we find it as the html between you and the next guy in this combined list
# 3 - The last row. For this, we guess what the end looks like by looking at all of the HTML tags
# for the html_between_row for the guy preceding it, and then find those tags from the start of the
# last row, to the end of the HTML page
for idx in range(len(ordered_row_indexes)):
ordered_row_idx = ordered_row_indexes[idx]
ext_seq = ''
if by_location[ordered_row_idx]['html_between_row'] == '' and idx < len(ordered_row_indexes) - 1:
# can get the HTML as the text between this guy and the next
next_start_token = ordered_row_indexes[idx+1] - 1
sub_page = all_page_tokens.getTokensAsString(ordered_row_idx, next_start_token,
whitespace=True)
ext_seq = sub_page
else:
ext_seq = by_location[ordered_row_idx]['html_between_row']
if idx < len(ordered_row_indexes) - 1: # We don't know where the last guy ends, so we don't have this.
extract_sequences.append(ext_seq)
ending_row_locations.append(ordered_row_indexes[idx+1] - 1)
if idx == len(ordered_row_indexes) - 1: # last guy, so use the end_it regex and find from this guy
# initially was doing longest common substring for all prev rows, but you really just need
# # the last one, I think. Otherwise if you are mixing in optional/first-row you get weirdness...
found_end_loc = self.slot_to_end_token_loc(''.join(extract_sequences[-1]), all_page_tokens,
ordered_row_idx,
page_manager)
seen_etags = [s for s in re.findall("<[a-z]+", ''.join(extract_sequences[-1]))]
# now, jump to the next HTML token we see, after the occurrence of these guys...
rest_of_page = all_page_tokens.getTokensAsString(ordered_row_idx, len(all_page_tokens) - 1,
whitespace=True)
found_match = re.search('.+?'.join(seen_etags), rest_of_page)
if found_match:
found = found_match.end()
else:
found = len(all_page_tokens) - 1
# now, find the next HTML tag from this point, and add that into the extract
# TODO: get this last token in there...
slot = rest_of_page[0:found]
extract_sequences.append(slot)
ending_row_locations.append(found_end_loc)
# now, add this markup in
markup[table_path] = {'sequence': []}
for i in range(len(extract_sequences)):
extract = extract_sequences[i]
seq_number = i+1
#start_tok_loc = by_location[ordered_row_indexes[i]]['starting_token_location']
start_tok_loc = self.slot_to_start_loc(table_path, extract, page_manager)
end_tok_loc = ending_row_locations[i]
if start_tok_loc and end_tok_loc:
markup_value = all_page_tokens.getTokensAsString(start_tok_loc, end_tok_loc, whitespace=True)
markup[table_path]['sequence'].append({'extract': markup_value, 'sequence_number': seq_number,
'starting_token_location': start_tok_loc,
'ending_token_location': end_tok_loc})
return markup
# TODO: This could have errors bc of reliance on regex
def slot_to_start_loc(self, rule, row_html, page_manager):
rule_regex = rule.replace("><", ">.*?<")
# print "ROW: %s" % row_html
# print "RULE: %s" % rule_regex
found_match = re.search(rule_regex, row_html)
if found_match:
found = found_match.end()
possible_locs = page_manager.getPossibleLocations(page_manager.getPageIds()[0], row_html[found:])
best_loc = possible_locs[0] # now we've turned this slot into a location
return best_loc[0]
return None
# TODO: This could have errors... lots of regex stuff...
def slot_to_end_token_loc(self, extraction, all_page_tokens, starting_token_location, page_manager):
seen_etags = [s for s in re.findall("<[a-z]+", extraction)]
# now, jump to the next HTML token we see, after the occurrence of these guys...
rest_of_page = all_page_tokens.getTokensAsString(starting_token_location, len(all_page_tokens) - 1,
whitespace=True)
found_match = re.search('.*?'.join(seen_etags), rest_of_page)
if found_match:
found = found_match.end()
else:
return None
# now, find the next HTML tag from this point, and add that into the extract
# TODO: get this last token in there...
slot = rest_of_page[0:found]
# we know this is the slot for the only page in the page manager passed in...
possible_locs = page_manager.getPossibleLocations(page_manager.getPageIds()[0], slot)
best_loc = possible_locs[0] # now we've turned this slot into a location
if best_loc[0] == starting_token_location:
return best_loc[-1]
else:
raise Exception("Could not locate the correct end token")
# def remove_html(self, value):
# processor = RemoveHtml(value)
# value = processor.post_process()
# processor = RemoveExtraSpaces(value)
# value = processor.post_process()
# return value
# def longest_common_substring(self, s1, s2):
# m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
# longest, x_longest = 0, 0
# for x in xrange(1, 1 + len(s1)):
# for y in xrange(1, 1 + len(s2)):
# if s1[x - 1] == s2[y - 1]:
# m[x][y] = m[x - 1][y - 1] + 1
# if m[x][y] > longest:
# longest = m[x][y]
# x_longest = x
# else:
# m[x][y] = 0
# return s1[x_longest - longest: x_longest]
"""
@param pages: A hash where key is hte page name, and value is the raw page content
"""
def learn_list_extractors(self, pages):
page_mgr = PageManager() #write_debug_files=True)
markup = {}
for page in pages:
page_content = pages[page]
page_mgr.addPage(page, page_content)
content_list_markup = self.lists_on_single_page(page_content)
markup[page] = content_list_markup
# print '--- MARKUP ---'
# print json.dumps(markup)
page_mgr.learnStripes(markups=markup)
rules = page_mgr.learnRulesFromMarkup(markup)
# now, for each markup rule, learn a little page manager
sublist_page_managers = {}
for page in markup:
for rule_name in markup[page]:
if rule_name not in sublist_page_managers:
sublist_page_managers[rule_name] = PageManager()
for rid in range(len(markup[page][rule_name]['sequence'])):
row = markup[page][rule_name]['sequence'][rid]
sublist_page_managers[rule_name].addPage(page+"html%d" % rid, row['extract'])
sublist_sub_rules = {}
for sublist in sublist_page_managers:
sublist_page_managers[sublist].learnStripes()
sub_rules = sublist_page_managers[sublist].learnAllRules(in_list = True)
sublist_sub_rules[sublist] = sub_rules # This should match a rule name in the rules...
count = 1
for rule in rules.rules:
# print "== RULE INFO =="
# print str(rule.name)
rule.set_sub_rules(sublist_sub_rules[rule.name])
list_name = '_div_list'+format(count, '04')
for page_id in markup:
if rule.name in markup[page_id]:
markup[page_id][list_name] = markup[page_id].pop(rule.name)
rule.name = list_name
# print str(json.dumps(rule.toJson()))
# print "==============="
#
# print rules.toJson()
return rules, markup
def lists_on_single_page(self, content):
pg = PageManager()
pg.addPage("zzz", content)
triples = pg.getVisibleTokenStructure()
(ptree, paths_to_vis_text, path_to_invis_toks) = self.prefix_tree(triples, only_consider_tag='div')
potential_lists = self.prefix_tree_to_paths(ptree)
if self.__DEBUG:
print '.... POTENTIAL LISTS ARE ....'
print '\n'.join([''.join(p) for p in potential_lists])
print '.... OK!....'
all_tokens_list = pg.getPage("zzz").tokens
# Now, let's get our lists
lists = {}
for i in range(len(potential_lists)):
pot_list = potential_lists[i]
as_path = ''.join(pot_list)
if self.__DEBUG:
print "PATH: %s" % as_path
lists[as_path] = {
'rows': []
}
# if as_path in paths_to_vis_text:
for path_to_vis in paths_to_vis_text:
if path_to_vis.find(as_path) > -1:
vis_texts = [a for a in paths_to_vis_text[path_to_vis]]
invis_toks = [t for t in path_to_invis_toks[path_to_vis]]
for idx in range(len(vis_texts)):
if self.__DEBUG:
print "%s ==> %s" % (vis_texts[idx], str(invis_toks[idx].token_location))
html_between_row = ''
if (idx+1) < len(vis_texts):
begin = invis_toks[idx].token_location
end = invis_toks[idx+1].token_location - 1
html_between_row = all_tokens_list.getTokensAsString(begin, end, whitespace=True)
lists[as_path]['rows'].append({
'visible_text': vis_texts[idx],
'starting_token_location': invis_toks[idx].token_location,
'html_between_row': html_between_row
})
as_json_str = json.dumps(lists)
if self.__DEBUG:
print "--------"
print as_json_str
print "--------"
# # do it as an extraction instead?
# item_rule_begin = Landmark.escape_regex_string('<html')
# item_rule_end = Landmark.escape_regex_string('/html>')
#
# begin_iter_rule = '.+?'.join([Landmark.escape_regex_string(a) for a in pot_list])
#
# # figure out: for each tag in the rule, add it's end tag (keep track of tag type)
# # NOTE: for now, this assumes that the HTML is well formed
# end_it = '.+?'.join(['</div>' for i in range(len(pot_list))])
#
# end_iter_rule = end_it
#
# # include end-regex: included in the stuff that's extracted.
# # Solve for the case where you only see part of the stuff
# rule = IterationRule(str(i) + "_pathListRule", item_rule_begin, item_rule_end,
# begin_iter_rule, end_iter_rule, removehtml=True)
# extraction = rule.apply(content)
#
# print "**PATH: "+''.join(pot_list)
# as_json_str = json.dumps(extraction)
#
# for seq in extraction['sequence']:
# print "\t"+seq['extract']
# TODO: do this here????
# TODO: big drop down the path should be considered... not just if hte path occurs twice
# TODO: fix bugs
markup = self.creat_row_markup(lists, all_tokens_list, pg)
if self.__DEBUG:
print "list markup"
json.dumps(markup)
return markup
| usc-isi-i2/landmark-extraction | src/learning/TreeListLearner.py | Python | apache-2.0 | 23,788 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import operator
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_preprocessing_layer import Combiner
from tensorflow.python.keras.engine.base_preprocessing_layer import CombinerPreprocessingLayer
from tensorflow.python.keras.layers.preprocessing import categorical_encoding
from tensorflow.python.keras.layers.preprocessing import index_lookup
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import keras_export
LOWER_AND_STRIP_PUNCTUATION = "lower_and_strip_punctuation"
SPLIT_ON_WHITESPACE = "whitespace"
TFIDF = categorical_encoding.TFIDF
INT = categorical_encoding.INT
BINARY = categorical_encoding.BINARY
COUNT = categorical_encoding.COUNT
# This is an explicit regex of all the tokens that will be stripped if
# LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other
# stripping, a Callable should be passed into the 'standardize' arg.
DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']'
# The string tokens in the extracted vocabulary
_VOCAB_NAME = "vocab"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
# The IDF data for the OOV token
_OOV_IDF_NAME = "oov_idf"
# The string tokens in the full vocabulary
_ACCUMULATOR_VOCAB_NAME = "vocab"
# The total counts of each token in the vocabulary
_ACCUMULATOR_COUNTS_NAME = "counts"
# The number of documents / examples that each token appears in.
_ACCUMULATOR_DOCUMENT_COUNTS = "document_counts"
# The total number of documents / examples in the dataset.
_ACCUMULATOR_NUM_DOCUMENTS = "num_documents"
@keras_export(
"keras.layers.experimental.preprocessing.TextVectorization", v1=[])
class TextVectorization(CombinerPreprocessingLayer):
"""Text vectorization layer.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one sample = one string) into either a list of
token indices (one sample = 1D tensor of integer token indices) or a dense
representation (one sample = 1D tensor of float values representing data about
the sample's tokens).
If desired, the user can call this layer's adapt() method on a dataset.
When this layer is adapted, it will analyze the dataset, determine the
frequency of individual string values, and create a 'vocabulary' from them.
This vocabulary can have unlimited size or be capped, depending on the
configuration options for this layer; if there are more unique values in the
input than the maximum vocabulary size, the most frequent terms will be used
to create the vocabulary.
The processing of each sample contains the following steps:
1) standardize each sample (usually lowercasing + punctuation stripping)
2) split each sample into substrings (usually words)
3) recombine substrings into tokens (usually ngrams)
4) index tokens (associate a unique int value with each token)
5) transform each sample using this index, either into a vector of ints or
a dense float vector.
Some notes on passing Callables to customize splitting and normalization for
this layer:
1) Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `tf.keras.utils.register_keras_serializable` for more
details).
2) When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3) When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`. The callable should
return a Tensor with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to", "split],
["another", "string", "to", "split"]]`. This makes the callable site
natively compatible with `tf.strings.split()`.
Attributes:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary.
standardize: Optional specification for standardization to apply to the
input text. Values can be None (no standardization),
'lower_and_strip_punctuation' (lowercase and remove punctuation) or a
Callable. Default is 'lower_and_strip_punctuation'.
split: Optional specification for splitting the input text. Values can be
None (no splitting), 'whitespace' (split on ASCII whitespace), or a
Callable. The default is 'whitespace'.
ngrams: Optional specification for ngrams to create from the possibly-split
input text. Values can be None, an integer or tuple of integers; passing
an integer will create ngrams up to that integer, and passing a tuple of
integers will create ngrams for the specified values in the tuple. Passing
None means that no ngrams will be created.
output_mode: Optional specification for the output of the layer. Values can
be "int", "binary", "count" or "tf-idf", configuring the layer as follows:
"int": Outputs integer indices, one integer index per split string
token.
"binary": Outputs a single int array per batch, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
output_sequence_length: Only valid in INT mode. If set, the output will have
its time dimension padded or truncated to exactly `output_sequence_length`
values, resulting in a tensor of shape [batch_size,
output_sequence_length] regardless of how many tokens resulted from the
splitting step. Defaults to None.
pad_to_max_tokens: Only valid in "binary", "count", and "tf-idf" modes. If
True, the output will have its feature axis padded to `max_tokens` even if
the number of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape [batch_size, max_tokens] regardless of
vocabulary size. Defaults to True.
Example:
This example instantiates a TextVectorization layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
```
max_features = 5000 # Maximum vocab size.
max_len = 40 # Sequence length to pad the outputs to.
# Create the layer.
vectorize_layer = text_vectorization.TextVectorization(
max_tokens=max_features,
output_mode='int',
output_sequence_length=max_len)
# Now that the vocab layer has been created, call `adapt` on the text-only
# dataset to create the vocabulary. You don't have to batch, but for large
# datasets this means we're not keeping spare copies of the dataset in memory.
vectorize_layer.adapt(text_dataset.batch(64))
# Create the model that uses the vectorize text layer
model = tf.keras.models.Sequential()
# Start by creating an explicit input layer. It needs to have a shape of (1,)
# (because we need to guarantee that there is exactly one string input per
# batch), and the dtype needs to be 'string'.
model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
# The first layer in our model is the vectorization layer. After this layer,
# we have a tensor of shape (batch_size, max_len) containing vocab indices.
model.add(vectorize_layer)
# Next, we add a layer to map those vocab indices into a space of
# dimensionality 'embedding_dims'. Note that we're using max_features+1 here,
# since there's an OOV token that gets added to the vocabulary in
# vectorize_layer.
model.add(tf.keras.layers.Embedding(max_features+1, embedding_dims))
# At this point, you have embedded float data representing your tokens, and
# can add whatever other layers you need to create your model.
```
"""
# TODO(momernick): Add an examples section to the docstring.
def __init__(self,
max_tokens=None,
standardize=LOWER_AND_STRIP_PUNCTUATION,
split=SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=INT,
output_sequence_length=None,
pad_to_max_tokens=True,
**kwargs):
# This layer only applies to string processing, and so should only have
# a dtype of 'string'.
if "dtype" in kwargs and kwargs["dtype"] != dtypes.string:
raise ValueError("TextVectorization may only have a dtype of string.")
elif "dtype" not in kwargs:
kwargs["dtype"] = dtypes.string
# 'standardize' must be one of (None, LOWER_AND_STRIP_PUNCTUATION, callable)
layer_utils.validate_string_arg(
standardize,
allowable_strings=(LOWER_AND_STRIP_PUNCTUATION),
layer_name="TextVectorization",
arg_name="standardize",
allow_none=True,
allow_callables=True)
# 'split' must be one of (None, SPLIT_ON_WHITESPACE, callable)
layer_utils.validate_string_arg(
split,
allowable_strings=(SPLIT_ON_WHITESPACE),
layer_name="TextVectorization",
arg_name="split",
allow_none=True,
allow_callables=True)
# 'output_mode' must be one of (None, INT, COUNT, BINARY, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, COUNT, BINARY, TFIDF),
layer_name="TextVectorization",
arg_name="output_mode",
allow_none=True)
# 'ngrams' must be one of (None, int, tuple(int))
if not (ngrams is None or
isinstance(ngrams, int) or
isinstance(ngrams, tuple) and
all(isinstance(item, int) for item in ngrams)):
raise ValueError(("`ngrams` must be None, an integer, or a tuple of "
"integers. Got %s") % (ngrams,))
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is INT.
if (output_mode == INT and not (isinstance(output_sequence_length, int) or
(output_sequence_length is None))):
raise ValueError("`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. "
"Got %s" % output_sequence_length)
if output_mode != INT and output_sequence_length is not None:
raise ValueError("`output_sequence_length` must not be set if "
"`output_mode` is not 'int'.")
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens < 1:
raise ValueError("max_tokens must be > 1.")
self._max_tokens = max_tokens
# In INT mode, we have two reserved values (PAD and OOV). However, non-INT
# modes don't have a PAD value, so we only need to reserve one value.
self._reserved_values = 2 if output_mode == INT else 1
# In INT mode, the zero value is reserved for padding (per Keras standard
# padding approaches). In non-INT modes, there is no padding so we can set
# the OOV value to zero instead of one.
self._oov_value = 1 if output_mode == INT else 0
# We always reduce the max token number by 1 to account for the OOV token
# if it is set. Keras' use of the reserved number 0 for padding tokens,
# if the output is in INT mode, does not really count as a 'token' for
# vocabulary purposes, so we only reduce vocab size by 1 here.
self._max_vocab_size = max_tokens - 1 if max_tokens is not None else None
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
self._pad_to_max = pad_to_max_tokens
self._vocab_size = 0
self._called = False
super(TextVectorization, self).__init__(
combiner=_TextVectorizationCombiner(
self._max_vocab_size, compute_idf=output_mode == TFIDF),
**kwargs)
self._supports_ragged_inputs = True
reserve_zero = output_mode in [None, INT]
self._index_lookup_layer = self._get_index_lookup_class()(
max_tokens=max_tokens, reserve_zero=reserve_zero, dtype=dtypes.string)
# If this layer is configured for string or integer output, we do not
# create a vectorization layer (as the output is not vectorized).
if self._output_mode in [None, INT]:
return
if max_tokens is not None and self._pad_to_max:
max_elements = max_tokens
else:
max_elements = None
self._vectorize_layer = self._get_vectorization_class()(
max_tokens=max_elements, output_mode=self._output_mode)
# These are V1/V2 shim points. There are V1 implementations in the V1 class.
def _get_vectorization_class(self):
return categorical_encoding.CategoricalEncoding
def _get_table_data(self):
keys, values = self._table.export()
return (keys.numpy(), values.numpy())
def _get_index_lookup_class(self):
return index_lookup.IndexLookup
def _to_numpy(self, preprocessed_data):
"""Converts preprocessed inputs into numpy arrays."""
if isinstance(preprocessed_data, np.ndarray):
return preprocessed_data
return np.array(preprocessed_data.to_list())
# End of V1/V2 shim points.
def _assert_same_type(self, expected_type, values, value_name):
if dtypes.as_dtype(expected_type) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected %s type %s, got %s" %
(value_name, expected_type, values.dtype))
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def compute_output_shape(self, input_shape):
if self._output_mode != INT:
return tensor_shape.TensorShape([input_shape[0], self._max_tokens])
if self._output_mode == INT and self._split is None:
return input_shape
if self._output_mode == INT and self._split is not None:
input_shape = list(input_shape)
input_shape[1] = self._output_sequence_length
return tensor_shape.TensorShape(input_shape)
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = K.floatx() if self._output_mode == TFIDF else dtypes.int64
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Arguments:
data: The data to train on. It can be passed either as a tf.data Dataset,
or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
"""
if not reset_state:
raise ValueError("TextVectorization does not support streaming adapts.")
# Build the layer explicitly with the original data shape instead of relying
# on an implicit call to `build` in the base layer's `adapt`, since
# preprocessing changes the input shape.
if isinstance(data, np.ndarray):
if data.ndim == 1:
data = np.expand_dims(data, axis=-1)
self.build(data.shape)
preprocessed_inputs = self._to_numpy(self._preprocess(data))
elif isinstance(data, dataset_ops.DatasetV2):
# TODO(momernick): Replace this with a more V2-friendly API.
shape = dataset_ops.get_legacy_output_shapes(data)
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("The dataset passed to 'adapt' must contain a single "
"tensor value.")
if shape.rank == 1:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, -1))
self.build(dataset_ops.get_legacy_output_shapes(data))
preprocessed_inputs = data.map(self._preprocess)
else:
raise ValueError(
"adapt() requires a Dataset or a Numpy array as input, got {}".format(
type(data)))
super(TextVectorization, self).adapt(preprocessed_inputs, reset_state)
def get_vocabulary(self):
return self._index_lookup_layer.get_vocabulary()
def get_config(self):
config = {
"max_tokens": self._max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._pad_to_max,
}
base_config = super(TextVectorization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def count_params(self):
# This method counts the number of scalars in the weights of this layer.
# Since this layer doesn't have any /actual/ weights (in that there's
# nothing in this layer that can be trained - we only use the weight
# abstraction for ease of saving!) we return 0.
return 0
def set_vocabulary(self,
vocab,
df_data=None,
oov_df_value=None,
append=False):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and DF data for this layer directly, instead
of analyzing a dataset through 'adapt'. It should be used whenever the vocab
(and optionally document frequency) information is already known. If
vocabulary data is already present in the layer, this method will either
replace it, if 'append' is set to False, or append to it (if 'append' is set
to True).
Arguments:
vocab: An array of string tokens.
df_data: An array of document frequency data. Only necessary if the layer
output_mode is TFIDF.
oov_df_value: The document frequency of the OOV token. Only necessary if
output_mode is TFIDF. OOV data is optional when appending additional
data in TFIDF mode; if an OOV value is supplied it will overwrite the
existing OOV value.
append: Whether to overwrite or append any existing vocabulary data.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when "binary", "count", and "tfidf" modes,
if "pad_to_max_tokens" is False and the layer itself has already been
called.
"""
if self._output_mode != TFIDF and df_data is not None:
raise ValueError("df_data should only be set if output_mode is TFIDF. "
"output_mode is %s." % self._output_mode)
if (self._output_mode in [BINARY, COUNT, TFIDF] and self._called and
not self._pad_to_max):
raise RuntimeError(("When using TextVectorization in {mode} mode and "
"pad_to_max_tokens is False, the vocabulary cannot "
"be changed after the layer is "
"called.").format(mode=self._output_mode))
current_table_size = self._index_lookup_layer.vocab_size()
self._index_lookup_layer.set_vocabulary(vocab, append)
# When doing raw or integer output, we don't have a Vectorize layer to
# manage. In this case, we can return directly.
if self._output_mode in [None, INT]:
return
if not self._pad_to_max or self._max_tokens is None:
num_tokens = self._index_lookup_layer.vocab_size() + self._reserved_values
self._vectorize_layer.set_num_elements(num_tokens)
# We're only _really_ appending if the table_size is nonzero. This is
# important for some sanity checks in tfidf mode (specifically, checking if
# oov_df_value is set or not) and handling existing tfidf weight data.
append = append if current_table_size > 0 else False
if self._output_mode == TFIDF:
if df_data is None:
raise ValueError("df_data must be set if output_mode is TFIDF")
if len(vocab) != len(df_data):
raise ValueError("df_data must be the same length as vocab. "
"len(df_data) is %s, len(vocab) is %s" %
(len(vocab), len(df_data)))
if not append and oov_df_value is None:
raise ValueError("You must pass an oov_df_value the first time "
"'set_vocabulary' is called when output_mode is "
"TFIDF.")
df_data = self._convert_to_ndarray(df_data)
if append:
# The existing IDF data is stored in a Keras weight, so we can get it
# by calling K.get_value() on the weight object. Take the first
# table_size+1 values in case we're padding the weight with zeros
existing_df_data = K.get_value(
self._vectorize_layer.tf_idf_weights)[:current_table_size + 1]
df_data = np.append(existing_df_data, df_data, axis=0)
# If we are appending and need to replace the OOV DF value, we can
# assign it over the existing OOV DF value at index 0 of the (already-
# concatenated) DF value array.
if oov_df_value is not None:
df_data[0] = oov_df_value
else:
# If we are not appending (that is, we have only new data) we need to
# insert the OOV value to the front of the array. (This is a append to
# the head, not a replacement of the zeroth value.)
if not isinstance(oov_df_value, np.ndarray):
oov_df_value = np.array([oov_df_value])
df_data = np.insert(df_data, 0, oov_df_value)
self._vectorize_layer.set_tfidf_data(df_data)
def build(self, input_shape):
# We have to use 'and not ==' here, because input_shape[1] !/== 1 can result
# in None for undefined shape axes. If using 'and !=', this causes the
# expression to evaluate to False instead of True if the shape is undefined;
# the expression needs to evaluate to True in that case.
if self._split is not None and not input_shape[1] == 1: # pylint: disable=g-comparison-negation
raise RuntimeError(
"When using TextVectorization to tokenize strings, the first "
"dimension of the input array must be 1, got shape "
"{}".format(input_shape))
super(TextVectorization, self).build(input_shape)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
if self._output_mode == TFIDF:
self.set_vocabulary(updates[_VOCAB_NAME], updates[_IDF_NAME],
updates[_OOV_IDF_NAME])
else:
self.set_vocabulary(updates[_VOCAB_NAME])
def _preprocess(self, inputs):
if self._standardize == LOWER_AND_STRIP_PUNCTUATION:
if ragged_tensor.is_ragged(inputs):
lowercase_inputs = ragged_functional_ops.map_flat_values(
gen_string_ops.string_lower, inputs)
# Depending on configuration, we may never touch the non-data tensor
# in the ragged inputs tensor. If that is the case, and this is the
# only layer in the keras model, running it will throw an error.
# To get around this, we wrap the result in an identity.
lowercase_inputs = array_ops.identity(lowercase_inputs)
else:
lowercase_inputs = gen_string_ops.string_lower(inputs)
inputs = string_ops.regex_replace(lowercase_inputs, DEFAULT_STRIP_REGEX,
"")
elif callable(self._standardize):
inputs = self._standardize(inputs)
elif self._standardize is not None:
raise ValueError(("%s is not a supported standardization. "
"TextVectorization supports the following options "
"for `standardize`: None, "
"'lower_and_strip_punctuation', or a "
"Callable.") % self._standardize)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension 1 and
# so can be squeezed out. We do this here instead of after splitting for
# performance reasons - it's more expensive to squeeze a ragged tensor.
inputs = array_ops.squeeze(inputs, axis=1)
if self._split == SPLIT_ON_WHITESPACE:
# This treats multiple whitespaces as one whitespace, and strips leading
# and trailing whitespace.
inputs = ragged_string_ops.string_split_v2(inputs)
elif callable(self._split):
inputs = self._split(inputs)
else:
raise ValueError(
("%s is not a supported splitting."
"TextVectorization supports the following options "
"for `split`: None, 'whitespace', or a Callable.") % self._split)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however, does
# support both ragged and dense inputs.
if self._ngrams is not None:
inputs = ragged_string_ops.ngrams(
inputs, ngram_width=self._ngrams, separator=" ")
return inputs
def call(self, inputs):
self._called = True
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
return inputs
indexed_data = self._index_lookup_layer(inputs)
if self._output_mode == INT:
# Once we have the dense tensor, we can return it if we weren't given a
# fixed output sequence length. If we were, though, we have to dynamically
# choose whether to pad or trim it based on each tensor.
# We need to convert to dense if we have a ragged tensor.
if ragged_tensor.is_ragged(indexed_data):
dense_data = indexed_data.to_tensor(default_value=0)
else:
dense_data = indexed_data
if self._output_sequence_length is None:
dense_data.set_shape(tensor_shape.TensorShape((None, None)))
return dense_data
else:
sequence_len = K.shape(dense_data)[1]
pad_amt = self._output_sequence_length - sequence_len
pad_fn = lambda: array_ops.pad(dense_data, [[0, 0], [0, pad_amt]])
slice_fn = lambda: dense_data[:, :self._output_sequence_length]
output_tensor = control_flow_ops.cond(
sequence_len < self._output_sequence_length,
true_fn=pad_fn,
false_fn=slice_fn)
output_tensor.set_shape(
tensor_shape.TensorShape((None, self._output_sequence_length)))
return output_tensor
# If we're not returning integers here, we rely on the vectorization layer
# to create the output.
return self._vectorize_layer(indexed_data)
class _TextVectorizationAccumulator(
collections.namedtuple("_TextVectorizationAccumulator",
["count_dict", "per_doc_count_dict", "metadata"])):
pass
# A note on this combiner: This contains functionality that will be extracted
# into the Vectorization and IndexLookup combiner objects. At that point,
# TextVectorization can become a PreprocessingStage instead of a Layer and
# this combiner can be retired. Until then, we leave this as is instead of
# attempting a refactor of what will soon be deleted.
class _TextVectorizationCombiner(Combiner):
"""Combiner for the TextVectorization preprocessing layer.
This class encapsulates the logic for computing a vocabulary based on the
frequency of each token.
Attributes:
vocab_size: (Optional) If set, only the top `vocab_size` tokens (based on
frequency across the dataset) are retained in the vocabulary. If None, or
set to a value greater than the total number of distinct tokens in the
dataset, all tokens are retained.
compute_idf: (Optional) If set, the inverse document frequency will be
computed for each value.
"""
def __init__(self, vocab_size=None, compute_idf=False):
self._vocab_size = vocab_size
self._compute_idf = compute_idf
self._input_dtype = dtypes.string
def compute(self, values, accumulator=None):
"""Compute a step in this computation, returning a new accumulator."""
if dtypes.as_dtype(self._input_dtype) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected input type %s, got %s" %
(self._input_dtype, values.dtype))
if ragged_tensor.is_ragged(values):
values = values.to_list()
if isinstance(values, ops.EagerTensor):
values = values.numpy()
if isinstance(values, np.ndarray):
values = values.tolist()
if accumulator is None:
accumulator = self._create_accumulator()
# TODO(momernick): Benchmark improvements to this algorithm.
for document in values:
current_doc_id = accumulator.metadata[0]
for token in document:
accumulator.count_dict[token] += 1
if self._compute_idf:
doc_count = accumulator.per_doc_count_dict[token]
if doc_count["last_doc_id"] != current_doc_id:
doc_count["count"] += 1
doc_count["last_doc_id"] = current_doc_id
accumulator.metadata[0] += 1
return accumulator
def merge(self, accumulators):
"""Merge several accumulators to a single accumulator."""
if not accumulators:
return accumulators
base_accumulator = accumulators[0]
for accumulator in accumulators[1:]:
base_accumulator.metadata[0] += accumulator.metadata[0]
for token, value in accumulator.count_dict.items():
base_accumulator.count_dict[token] += value
if self._compute_idf:
for token, value in accumulator.per_doc_count_dict.items():
# Any newly created token counts in 'base_accumulator''s
# per_doc_count_dict will have a last_doc_id of -1. This is always
# less than the next doc id (which are strictly positive), so any
# future occurrences are guaranteed to be counted.
base_accumulator.per_doc_count_dict[token]["count"] += value["count"]
return base_accumulator
def _inverse_document_frequency(self, document_counts, num_documents):
"""Compute the inverse-document-frequency (IDF) component of TFIDF.
Uses the default weighting scheme described in
https://en.wikipedia.org/wiki/Tf%E2%80%93idf.
Args:
document_counts: An array of the # of documents each token appears in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return np.log(1 + num_documents / (1 + np.array(document_counts)))
def extract(self, accumulator):
"""Convert an accumulator into a dict of output values.
Args:
accumulator: An accumulator aggregating over the full dataset.
Returns:
A dict of:
"vocab": A list of the retained items in the vocabulary.
"idf": The inverse-document-frequency for each item in vocab.
idf[vocab_idx] is the IDF value for the corresponding vocab item.
"oov_idf": The inverse-document-frequency for the OOV token.
"""
if self._compute_idf:
vocab_counts, document_counts, num_documents = accumulator
else:
vocab_counts, _, _ = accumulator
sorted_counts = sorted(
vocab_counts.items(), key=operator.itemgetter(1, 0), reverse=True)
vocab_data = (
sorted_counts[:self._vocab_size] if self._vocab_size else sorted_counts)
vocab = [data[0] for data in vocab_data]
if self._compute_idf:
doc_counts = [document_counts[token]["count"] for token in vocab]
idf = self._inverse_document_frequency(doc_counts, num_documents[0])
oov_idf = np.array([np.log(1 + num_documents[0])])
return {_VOCAB_NAME: vocab, _IDF_NAME: idf, _OOV_IDF_NAME: oov_idf}
else:
return {_VOCAB_NAME: vocab}
def restore(self, output):
"""Create an accumulator based on 'output'."""
raise NotImplementedError(
"TextVectorization does not restore or support streaming updates.")
def serialize(self, accumulator):
"""Serialize an accumulator for a remote call."""
output_dict = {}
output_dict["metadata"] = accumulator.metadata
output_dict["vocab"] = list(accumulator.count_dict.keys())
output_dict["vocab_counts"] = list(accumulator.count_dict.values())
if self._compute_idf:
output_dict["idf_vocab"] = list(accumulator.per_doc_count_dict.keys())
output_dict["idf_counts"] = [
counter["count"]
for counter in accumulator.per_doc_count_dict.values()
]
return compat.as_bytes(json.dumps(output_dict))
def deserialize(self, encoded_accumulator):
"""Deserialize an accumulator received from 'serialize()'."""
accumulator_dict = json.loads(compat.as_text(encoded_accumulator))
accumulator = self._create_accumulator()
accumulator.metadata[0] = accumulator_dict["metadata"][0]
count_dict = dict(
zip(accumulator_dict["vocab"], accumulator_dict["vocab_counts"]))
accumulator.count_dict.update(count_dict)
if self._compute_idf:
create_dict = lambda x: {"count": x, "last_doc_id": -1}
idf_count_dicts = [
create_dict(count) for count in accumulator_dict["idf_counts"]
]
idf_dict = dict(zip(accumulator_dict["idf_vocab"], idf_count_dicts))
accumulator.per_doc_count_dict.update(idf_dict)
return accumulator
def _create_accumulator(self):
"""Accumulate a sorted array of vocab tokens and corresponding counts."""
count_dict = collections.defaultdict(int)
if self._compute_idf:
create_default_dict = lambda: {"count": 0, "last_doc_id": -1}
per_doc_count_dict = collections.defaultdict(create_default_dict)
else:
per_doc_count_dict = None
metadata = [0]
return _TextVectorizationAccumulator(count_dict, per_doc_count_dict,
metadata)
| xzturn/tensorflow | tensorflow/python/keras/layers/preprocessing/text_vectorization.py | Python | apache-2.0 | 36,215 |
/*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package multichain
import (
"github.com/hyperledger/fabric/common/config"
"github.com/hyperledger/fabric/common/crypto"
"github.com/hyperledger/fabric/common/policies"
"github.com/hyperledger/fabric/common/util"
"github.com/hyperledger/fabric/orderer/common/blockcutter"
"github.com/hyperledger/fabric/orderer/common/broadcast"
"github.com/hyperledger/fabric/orderer/common/configtxfilter"
"github.com/hyperledger/fabric/orderer/common/filter"
"github.com/hyperledger/fabric/orderer/common/sigfilter"
"github.com/hyperledger/fabric/orderer/common/sizefilter"
"github.com/hyperledger/fabric/orderer/ledger"
cb "github.com/hyperledger/fabric/protos/common"
"github.com/hyperledger/fabric/protos/utils"
)
// Consenter defines the backing ordering mechanism
type Consenter interface {
// HandleChain should create a return a reference to a Chain for the given set of resources
// It will only be invoked for a given chain once per process. In general, errors will be treated
// as irrecoverable and cause system shutdown. See the description of Chain for more details
// The second argument to HandleChain is a pointer to the metadata stored on the `ORDERER` slot of
// the last block committed to the ledger of this Chain. For a new chain, this metadata will be
// nil, as this field is not set on the genesis block
HandleChain(support ConsenterSupport, metadata *cb.Metadata) (Chain, error)
}
// Chain defines a way to inject messages for ordering
// Note, that in order to allow flexibility in the implementation, it is the responsibility of the implementer
// to take the ordered messages, send them through the blockcutter.Receiver supplied via HandleChain to cut blocks,
// and ultimately write the ledger also supplied via HandleChain. This flow allows for two primary flows
// 1. Messages are ordered into a stream, the stream is cut into blocks, the blocks are committed (solo, kafka)
// 2. Messages are cut into blocks, the blocks are ordered, then the blocks are committed (sbft)
type Chain interface {
// Enqueue accepts a message and returns true on acceptance, or false on shutdown
Enqueue(env *cb.Envelope) bool
// Start should allocate whatever resources are needed for staying up to date with the chain
// Typically, this involves creating a thread which reads from the ordering source, passes those
// messages to a block cutter, and writes the resulting blocks to the ledger
Start()
// Halt frees the resources which were allocated for this Chain
Halt()
}
// ConsenterSupport provides the resources available to a Consenter implementation
type ConsenterSupport interface {
crypto.LocalSigner
BlockCutter() blockcutter.Receiver
SharedConfig() config.Orderer
CreateNextBlock(messages []*cb.Envelope) *cb.Block
WriteBlock(block *cb.Block, committers []filter.Committer, encodedMetadataValue []byte) *cb.Block
ChainID() string // ChainID returns the chain ID this specific consenter instance is associated with
Height() uint64 // Returns the number of blocks on the chain this specific consenter instance is associated with
}
// ChainSupport provides a wrapper for the resources backing a chain
type ChainSupport interface {
// This interface is actually the union with the deliver.Support but because of a golang
// limitation https://github.com/golang/go/issues/6977 the methods must be explicitly declared
// PolicyManager returns the current policy manager as specified by the chain config
PolicyManager() policies.Manager
// Reader returns the chain Reader for the chain
Reader() ledger.Reader
broadcast.Support
ConsenterSupport
// ProposeConfigUpdate applies a CONFIG_UPDATE to an existing config to produce a *cb.ConfigEnvelope
ProposeConfigUpdate(env *cb.Envelope) (*cb.ConfigEnvelope, error)
}
type chainSupport struct {
*ledgerResources
chain Chain
cutter blockcutter.Receiver
filters *filter.RuleSet
signer crypto.LocalSigner
lastConfig uint64
lastConfigSeq uint64
}
func newChainSupport(
filters *filter.RuleSet,
ledgerResources *ledgerResources,
consenters map[string]Consenter,
signer crypto.LocalSigner,
) *chainSupport {
cutter := blockcutter.NewReceiverImpl(ledgerResources.SharedConfig(), filters)
consenterType := ledgerResources.SharedConfig().ConsensusType()
consenter, ok := consenters[consenterType]
if !ok {
logger.Fatalf("Error retrieving consenter of type: %s", consenterType)
}
cs := &chainSupport{
ledgerResources: ledgerResources,
cutter: cutter,
filters: filters,
signer: signer,
}
var err error
lastBlock := ledger.GetBlock(cs.Reader(), cs.Reader().Height()-1)
metadata, err := utils.GetMetadataFromBlock(lastBlock, cb.BlockMetadataIndex_ORDERER)
// Assuming a block created with cb.NewBlock(), this should not
// error even if the orderer metadata is an empty byte slice
if err != nil {
logger.Fatalf("[channel: %s] Error extracting orderer metadata: %s", cs.ChainID(), err)
}
logger.Debugf("[channel: %s] Retrieved metadata for tip of chain (block #%d): %+v", cs.ChainID(), cs.Reader().Height()-1, metadata)
cs.chain, err = consenter.HandleChain(cs, metadata)
if err != nil {
logger.Fatalf("[channel: %s] Error creating consenter: %s", cs.ChainID(), err)
}
return cs
}
// createStandardFilters creates the set of filters for a normal (non-system) chain
func createStandardFilters(ledgerResources *ledgerResources) *filter.RuleSet {
return filter.NewRuleSet([]filter.Rule{
filter.EmptyRejectRule,
sizefilter.MaxBytesRule(ledgerResources.SharedConfig().BatchSize().AbsoluteMaxBytes),
sigfilter.New(policies.ChannelWriters, ledgerResources.PolicyManager()),
configtxfilter.NewFilter(ledgerResources),
filter.AcceptRule,
})
}
// createSystemChainFilters creates the set of filters for the ordering system chain
func createSystemChainFilters(ml *multiLedger, ledgerResources *ledgerResources) *filter.RuleSet {
return filter.NewRuleSet([]filter.Rule{
filter.EmptyRejectRule,
sizefilter.MaxBytesRule(ledgerResources.SharedConfig().BatchSize().AbsoluteMaxBytes),
sigfilter.New(policies.ChannelWriters, ledgerResources.PolicyManager()),
newSystemChainFilter(ledgerResources, ml),
configtxfilter.NewFilter(ledgerResources),
filter.AcceptRule,
})
}
func (cs *chainSupport) start() {
cs.chain.Start()
}
func (cs *chainSupport) NewSignatureHeader() (*cb.SignatureHeader, error) {
return cs.signer.NewSignatureHeader()
}
func (cs *chainSupport) Sign(message []byte) ([]byte, error) {
return cs.signer.Sign(message)
}
func (cs *chainSupport) Filters() *filter.RuleSet {
return cs.filters
}
func (cs *chainSupport) BlockCutter() blockcutter.Receiver {
return cs.cutter
}
func (cs *chainSupport) Reader() ledger.Reader {
return cs.ledger
}
func (cs *chainSupport) Enqueue(env *cb.Envelope) bool {
return cs.chain.Enqueue(env)
}
func (cs *chainSupport) CreateNextBlock(messages []*cb.Envelope) *cb.Block {
return ledger.CreateNextBlock(cs.ledger, messages)
}
func (cs *chainSupport) addBlockSignature(block *cb.Block) {
logger.Debugf("%+v", cs)
logger.Debugf("%+v", cs.signer)
blockSignature := &cb.MetadataSignature{
SignatureHeader: utils.MarshalOrPanic(utils.NewSignatureHeaderOrPanic(cs.signer)),
}
// Note, this value is intentionally nil, as this metadata is only about the signature, there is no additional metadata
// information required beyond the fact that the metadata item is signed.
blockSignatureValue := []byte(nil)
blockSignature.Signature = utils.SignOrPanic(cs.signer, util.ConcatenateBytes(blockSignatureValue, blockSignature.SignatureHeader, block.Header.Bytes()))
block.Metadata.Metadata[cb.BlockMetadataIndex_SIGNATURES] = utils.MarshalOrPanic(&cb.Metadata{
Value: blockSignatureValue,
Signatures: []*cb.MetadataSignature{
blockSignature,
},
})
}
func (cs *chainSupport) addLastConfigSignature(block *cb.Block) {
configSeq := cs.Sequence()
if configSeq > cs.lastConfigSeq {
cs.lastConfig = block.Header.Number
cs.lastConfigSeq = configSeq
}
lastConfigSignature := &cb.MetadataSignature{
SignatureHeader: utils.MarshalOrPanic(utils.NewSignatureHeaderOrPanic(cs.signer)),
}
lastConfigValue := utils.MarshalOrPanic(&cb.LastConfig{Index: cs.lastConfig})
lastConfigSignature.Signature = utils.SignOrPanic(cs.signer, util.ConcatenateBytes(lastConfigValue, lastConfigSignature.SignatureHeader, block.Header.Bytes()))
block.Metadata.Metadata[cb.BlockMetadataIndex_LAST_CONFIG] = utils.MarshalOrPanic(&cb.Metadata{
Value: lastConfigValue,
Signatures: []*cb.MetadataSignature{
lastConfigSignature,
},
})
}
func (cs *chainSupport) WriteBlock(block *cb.Block, committers []filter.Committer, encodedMetadataValue []byte) *cb.Block {
for _, committer := range committers {
committer.Commit()
}
// Set the orderer-related metadata field
if encodedMetadataValue != nil {
block.Metadata.Metadata[cb.BlockMetadataIndex_ORDERER] = utils.MarshalOrPanic(&cb.Metadata{Value: encodedMetadataValue})
}
cs.addBlockSignature(block)
cs.addLastConfigSignature(block)
err := cs.ledger.Append(block)
if err != nil {
logger.Panicf("[channel: %s] Could not append block: %s", cs.ChainID(), err)
}
return block
}
func (cs *chainSupport) Height() uint64 {
return cs.Reader().Height()
}
| king3000/fabric | orderer/multichain/chainsupport.go | GO | apache-2.0 | 9,884 |
module UiServiceMixin
def icons
{
:ContainerReplicator => {:type => "glyph", :icon => "\uE624", :fontfamily => "PatternFlyIcons-webfont"}, # pficon-replicator
:ContainerGroup => {:type => "glyph", :icon => "\uF1B3", :fontfamily => "FontAwesome"}, # fa-cubes
:ContainerNode => {:type => "glyph", :icon => "\uE621", :fontfamily => "PatternFlyIcons-webfont"}, # pficon-container-node
:ContainerService => {:type => "glyph", :icon => "\uE61E", :fontfamily => "PatternFlyIcons-webfont"}, # pficon-service
:ContainerRoute => {:type => "glyph", :icon => "\uE625", :fontfamily => "PatternFlyIcons-webfont"}, # pficon-route
:Container => {:type => "glyph", :icon => "\uF1B2", :fontfamily => "FontAwesome"}, # fa-cube
:Host => {:type => "glyph", :icon => "\uE600", :fontfamily => "PatternFlyIcons-webfont"}, # pficon-screen
:Vm => {:type => "glyph", :icon => "\uE90f", :fontfamily => "PatternFlyIcons-webfont"}, # pficon-virtual-machine
:MiddlewareDatasource => {:type => "glyph", :icon => "\uF1C0", :fontfamily => "FontAwesome"}, # fa-database
:MiddlewareDeployment => {:type => "glyph", :icon => "\uE603", :fontfamily => "icomoon"}, # product-report
:MiddlewareDeploymentEar => {:type => "glyph", :icon => "\uE626", :fontfamily => "icomoon"}, # product-file-ear-o
:MiddlewareDeploymentWar => {:type => "glyph", :icon => "\uE627", :fontfamily => "icomoon"}, # product-file-war-o
:MiddlewareDomain => {:type => "glyph", :icon => "\uF0E8", :fontfamily => "FontAwesome"}, # fa-sitemap
:MiddlewareServerGroup => {:type => "glyph", :icon => "\uF00A", :fontfamily => "FontAwesome"}, # fa-th
:Kubernetes => {:type => "image", :icon => provider_icon(:Kubernetes)},
:Openshift => {:type => "image", :icon => provider_icon(:Openshift)},
:OpenshiftEnterprise => {:type => "image", :icon => provider_icon(:OpenshiftEnterprise)},
:Atomic => {:type => "image", :icon => provider_icon(:Atomic)},
:AtomicEnterprise => {:type => "image", :icon => provider_icon(:AtomicEnterprise)},
}
end
def provider_icon(provider_type)
file_name = "svg/vendor-#{provider_type.to_s.underscore.downcase}.svg"
ActionController::Base.helpers.image_path(file_name)
end
end
| KevinLoiseau/manageiq | app/models/mixins/ui_service_mixin.rb | Ruby | apache-2.0 | 2,537 |
// HTMLParser Library - A java-based parser for HTML
// http://htmlparser.org
// Copyright (C) 2006 Somik Raha
//
// Revision Control Information
//
// $URL: https://svn.sourceforge.net/svnroot/htmlparser/trunk/parser/src/main/java/org/htmlparser/tags/HeadingTag.java $
// $Author: derrickoswald $
// $Date: 2006-09-16 10:44:17 -0400 (Sat, 16 Sep 2006) $
// $Revision: 4 $
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the Common Public License; either
// version 1.0 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Common Public License for more details.
//
// You should have received a copy of the Common Public License
// along with this library; if not, the license is available from
// the Open Source Initiative (OSI) website:
// http://opensource.org/licenses/cpl1.0.php
package org.htmlparser.tags;
/**
* A heading (h1 - h6) tag.
*/
public class HeadingTag extends CompositeTag
{
/**
* The set of names handled by this tag.
*/
private static final String[] mIds = new String[] {"H1", "H2", "H3", "H4", "H5", "H6"};
/**
* The set of tag names that indicate the end of this tag.
*/
private static final String[] mEnders = new String[] {"H1", "H2", "H3", "H4", "H5", "H6", "PARAM"};
/**
* The set of end tag names that indicate the end of this tag.
*/
private static final String[] mEndTagEnders = new String[] {"BODY", "HTML"};
/**
* Create a new heading tag.
*/
public HeadingTag()
{
}
/**
* Return the set of names handled by this tag.
* @return The names to be matched that create tags of this type.
*/
public String[] getIds ()
{
return (mIds);
}
/**
* Return the set of tag names that cause this tag to finish.
* @return The names of following tags that stop further scanning.
*/
public String[] getEnders ()
{
return (mEnders);
}
/**
* Return the set of end tag names that cause this tag to finish.
* @return The names of following end tags that stop further scanning.
*/
public String[] getEndTagEnders ()
{
return (mEndTagEnders);
}
}
| patrickfav/tuwien | master/swt workspace/HTMLParser/src/org/htmlparser/tags/HeadingTag.java | Java | apache-2.0 | 2,431 |
using System;
namespace JetBrains.ReSharper.Koans.Refactoring
{
namespace ExtractClass
{
// Extract Class
//
// Creates a new class based on members in the existing class.
// Updates references to the extracted members to refer to
// an instance of the new class
//
// No keyboard shortcut. Invoke via Refactor This menu
// <shortcut id="Refactor This...">Ctrl+Shift+R</shortcut>
public class Person
{
public string Forename { get; set; }
public string Surname { get; set; }
public int Age { get; set; }
// 1. Extract address to new class
// Select the members of the class to extract (the 5 properties below)
// Invoke Refactor This → Extract Class
// ReSharper shows dialog with properties already selected to move to extracted class
// Give name to extracted class ("Address" - ReSharper then populates reference to be extracted "address")
// By default, the original properties are removed
// Select from the drop down for "Source class member":
// None - original property is removed
// Create copy - a copy of the original property is left
// Create delegating wrapper - the original property calls into the new instance of the extracted class
// Any usages are updated to use the new Address property
public string HouseNumber { get; set; }
public string Street { get; set; }
public string County { get; set; }
public string PostCode { get; set; }
public string Country { get; set; }
}
public class PersonConsumer
{
public void Method()
{
var person = new Person();
// 2. Ensure all properties still work
Console.WriteLine(person.HouseNumber);
Console.WriteLine(person.Street);
Console.WriteLine(person.County);
Console.WriteLine(person.PostCode);
Console.WriteLine(person.Country);
}
}
}
} | JetBrains/resharper-workshop | localized/ja/04-Refactoring/Refactoring/19-Extract_class.cs | C# | apache-2.0 | 2,245 |
package batch
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/satori/go.uuid"
"net/http"
)
// FileClient is the a client for issuing REST requests to the Azure Batch service.
type FileClient struct {
BaseClient
}
// NewFileClient creates an instance of the FileClient client.
func NewFileClient() FileClient {
return NewFileClientWithBaseURI(DefaultBaseURI)
}
// NewFileClientWithBaseURI creates an instance of the FileClient client.
func NewFileClientWithBaseURI(baseURI string) FileClient {
return FileClient{NewWithBaseURI(baseURI)}
}
// DeleteFromComputeNode sends the delete from compute node request.
// Parameters:
// poolID - the ID of the pool that contains the compute node.
// nodeID - the ID of the compute node from which you want to delete the file.
// filePath - the path to the file or directory that you want to delete.
// recursive - whether to delete children of a directory. If the filePath parameter represents a directory
// instead of a file, you can set recursive to true to delete the directory and all of the files and
// subdirectories in it. If recursive is false then the directory must be empty or deletion will fail.
// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
// seconds.
// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
// returnClientRequestID - whether the server should return the client-request-id in the response.
// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
// time; set it explicitly if you are calling the REST API directly.
func (client FileClient) DeleteFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
req, err := client.DeleteFromComputeNodePreparer(ctx, poolID, nodeID, filePath, recursive, timeout, clientRequestID, returnClientRequestID, ocpDate)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromComputeNode", nil, "Failure preparing request")
return
}
resp, err := client.DeleteFromComputeNodeSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromComputeNode", resp, "Failure sending request")
return
}
result, err = client.DeleteFromComputeNodeResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromComputeNode", resp, "Failure responding to request")
}
return
}
// DeleteFromComputeNodePreparer prepares the DeleteFromComputeNode request.
func (client FileClient) DeleteFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
pathParameters := map[string]interface{}{
"filePath": autorest.Encode("path", filePath),
"nodeId": autorest.Encode("path", nodeID),
"poolId": autorest.Encode("path", poolID),
}
const APIVersion = "2018-08-01.7.0"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if recursive != nil {
queryParameters["recursive"] = autorest.Encode("query", *recursive)
}
if timeout != nil {
queryParameters["timeout"] = autorest.Encode("query", *timeout)
} else {
queryParameters["timeout"] = autorest.Encode("query", 30)
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files/{filePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
if clientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
}
if returnClientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
} else {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(false)))
}
if ocpDate != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteFromComputeNodeSender sends the DeleteFromComputeNode request. The method will close the
// http.Response Body if it receives an error.
func (client FileClient) DeleteFromComputeNodeSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DeleteFromComputeNodeResponder handles the response to the DeleteFromComputeNode request. The method always
// closes the http.Response Body.
func (client FileClient) DeleteFromComputeNodeResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// DeleteFromTask sends the delete from task request.
// Parameters:
// jobID - the ID of the job that contains the task.
// taskID - the ID of the task whose file you want to delete.
// filePath - the path to the task file or directory that you want to delete.
// recursive - whether to delete children of a directory. If the filePath parameter represents a directory
// instead of a file, you can set recursive to true to delete the directory and all of the files and
// subdirectories in it. If recursive is false then the directory must be empty or deletion will fail.
// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
// seconds.
// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
// returnClientRequestID - whether the server should return the client-request-id in the response.
// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
// time; set it explicitly if you are calling the REST API directly.
func (client FileClient) DeleteFromTask(ctx context.Context, jobID string, taskID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
req, err := client.DeleteFromTaskPreparer(ctx, jobID, taskID, filePath, recursive, timeout, clientRequestID, returnClientRequestID, ocpDate)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromTask", nil, "Failure preparing request")
return
}
resp, err := client.DeleteFromTaskSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromTask", resp, "Failure sending request")
return
}
result, err = client.DeleteFromTaskResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromTask", resp, "Failure responding to request")
}
return
}
// DeleteFromTaskPreparer prepares the DeleteFromTask request.
func (client FileClient) DeleteFromTaskPreparer(ctx context.Context, jobID string, taskID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
pathParameters := map[string]interface{}{
"filePath": autorest.Encode("path", filePath),
"jobId": autorest.Encode("path", jobID),
"taskId": autorest.Encode("path", taskID),
}
const APIVersion = "2018-08-01.7.0"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if recursive != nil {
queryParameters["recursive"] = autorest.Encode("query", *recursive)
}
if timeout != nil {
queryParameters["timeout"] = autorest.Encode("query", *timeout)
} else {
queryParameters["timeout"] = autorest.Encode("query", 30)
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files/{filePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
if clientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
}
if returnClientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
} else {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(false)))
}
if ocpDate != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteFromTaskSender sends the DeleteFromTask request. The method will close the
// http.Response Body if it receives an error.
func (client FileClient) DeleteFromTaskSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DeleteFromTaskResponder handles the response to the DeleteFromTask request. The method always
// closes the http.Response Body.
func (client FileClient) DeleteFromTaskResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// GetFromComputeNode returns the content of the specified compute node file.
// Parameters:
// poolID - the ID of the pool that contains the compute node.
// nodeID - the ID of the compute node that contains the file.
// filePath - the path to the compute node file that you want to get the content of.
// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
// seconds.
// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
// returnClientRequestID - whether the server should return the client-request-id in the response.
// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
// time; set it explicitly if you are calling the REST API directly.
// ocpRange - the byte range to be retrieved. The default is to retrieve the entire file. The format is
// bytes=startRange-endRange.
// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
// operation will be performed only if the resource on the service has been modified since the specified time.
// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
// operation will be performed only if the resource on the service has not been modified since the specified
// time.
func (client FileClient) GetFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result ReadCloser, err error) {
req, err := client.GetFromComputeNodePreparer(ctx, poolID, nodeID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ocpRange, ifModifiedSince, ifUnmodifiedSince)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromComputeNode", nil, "Failure preparing request")
return
}
resp, err := client.GetFromComputeNodeSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromComputeNode", resp, "Failure sending request")
return
}
result, err = client.GetFromComputeNodeResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromComputeNode", resp, "Failure responding to request")
}
return
}
// GetFromComputeNodePreparer prepares the GetFromComputeNode request.
func (client FileClient) GetFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
pathParameters := map[string]interface{}{
"filePath": autorest.Encode("path", filePath),
"nodeId": autorest.Encode("path", nodeID),
"poolId": autorest.Encode("path", poolID),
}
const APIVersion = "2018-08-01.7.0"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if timeout != nil {
queryParameters["timeout"] = autorest.Encode("query", *timeout)
} else {
queryParameters["timeout"] = autorest.Encode("query", 30)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files/{filePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
if clientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
}
if returnClientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
} else {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(false)))
}
if ocpDate != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
}
if len(ocpRange) > 0 {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("ocp-range", autorest.String(ocpRange)))
}
if ifModifiedSince != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
}
if ifUnmodifiedSince != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetFromComputeNodeSender sends the GetFromComputeNode request. The method will close the
// http.Response Body if it receives an error.
func (client FileClient) GetFromComputeNodeSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetFromComputeNodeResponder handles the response to the GetFromComputeNode request. The method always
// closes the http.Response Body.
func (client FileClient) GetFromComputeNodeResponder(resp *http.Response) (result ReadCloser, err error) {
result.Value = &resp.Body
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK))
result.Response = autorest.Response{Response: resp}
return
}
// GetFromTask returns the content of the specified task file.
// Parameters:
// jobID - the ID of the job that contains the task.
// taskID - the ID of the task whose file you want to retrieve.
// filePath - the path to the task file that you want to get the content of.
// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
// seconds.
// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
// returnClientRequestID - whether the server should return the client-request-id in the response.
// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
// time; set it explicitly if you are calling the REST API directly.
// ocpRange - the byte range to be retrieved. The default is to retrieve the entire file. The format is
// bytes=startRange-endRange.
// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
// operation will be performed only if the resource on the service has been modified since the specified time.
// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
// operation will be performed only if the resource on the service has not been modified since the specified
// time.
func (client FileClient) GetFromTask(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result ReadCloser, err error) {
req, err := client.GetFromTaskPreparer(ctx, jobID, taskID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ocpRange, ifModifiedSince, ifUnmodifiedSince)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromTask", nil, "Failure preparing request")
return
}
resp, err := client.GetFromTaskSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromTask", resp, "Failure sending request")
return
}
result, err = client.GetFromTaskResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromTask", resp, "Failure responding to request")
}
return
}
// GetFromTaskPreparer prepares the GetFromTask request.
func (client FileClient) GetFromTaskPreparer(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
pathParameters := map[string]interface{}{
"filePath": autorest.Encode("path", filePath),
"jobId": autorest.Encode("path", jobID),
"taskId": autorest.Encode("path", taskID),
}
const APIVersion = "2018-08-01.7.0"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if timeout != nil {
queryParameters["timeout"] = autorest.Encode("query", *timeout)
} else {
queryParameters["timeout"] = autorest.Encode("query", 30)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files/{filePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
if clientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
}
if returnClientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
} else {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(false)))
}
if ocpDate != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
}
if len(ocpRange) > 0 {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("ocp-range", autorest.String(ocpRange)))
}
if ifModifiedSince != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
}
if ifUnmodifiedSince != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetFromTaskSender sends the GetFromTask request. The method will close the
// http.Response Body if it receives an error.
func (client FileClient) GetFromTaskSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetFromTaskResponder handles the response to the GetFromTask request. The method always
// closes the http.Response Body.
func (client FileClient) GetFromTaskResponder(resp *http.Response) (result ReadCloser, err error) {
result.Value = &resp.Body
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK))
result.Response = autorest.Response{Response: resp}
return
}
// GetPropertiesFromComputeNode gets the properties of the specified compute node file.
// Parameters:
// poolID - the ID of the pool that contains the compute node.
// nodeID - the ID of the compute node that contains the file.
// filePath - the path to the compute node file that you want to get the properties of.
// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
// seconds.
// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
// returnClientRequestID - whether the server should return the client-request-id in the response.
// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
// time; set it explicitly if you are calling the REST API directly.
// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
// operation will be performed only if the resource on the service has been modified since the specified time.
// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
// operation will be performed only if the resource on the service has not been modified since the specified
// time.
func (client FileClient) GetPropertiesFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
req, err := client.GetPropertiesFromComputeNodePreparer(ctx, poolID, nodeID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ifModifiedSince, ifUnmodifiedSince)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromComputeNode", nil, "Failure preparing request")
return
}
resp, err := client.GetPropertiesFromComputeNodeSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromComputeNode", resp, "Failure sending request")
return
}
result, err = client.GetPropertiesFromComputeNodeResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromComputeNode", resp, "Failure responding to request")
}
return
}
// GetPropertiesFromComputeNodePreparer prepares the GetPropertiesFromComputeNode request.
func (client FileClient) GetPropertiesFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
pathParameters := map[string]interface{}{
"filePath": autorest.Encode("path", filePath),
"nodeId": autorest.Encode("path", nodeID),
"poolId": autorest.Encode("path", poolID),
}
const APIVersion = "2018-08-01.7.0"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if timeout != nil {
queryParameters["timeout"] = autorest.Encode("query", *timeout)
} else {
queryParameters["timeout"] = autorest.Encode("query", 30)
}
preparer := autorest.CreatePreparer(
autorest.AsHead(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files/{filePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
if clientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
}
if returnClientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
} else {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(false)))
}
if ocpDate != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
}
if ifModifiedSince != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
}
if ifUnmodifiedSince != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetPropertiesFromComputeNodeSender sends the GetPropertiesFromComputeNode request. The method will close the
// http.Response Body if it receives an error.
func (client FileClient) GetPropertiesFromComputeNodeSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetPropertiesFromComputeNodeResponder handles the response to the GetPropertiesFromComputeNode request. The method always
// closes the http.Response Body.
func (client FileClient) GetPropertiesFromComputeNodeResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// GetPropertiesFromTask gets the properties of the specified task file.
// Parameters:
// jobID - the ID of the job that contains the task.
// taskID - the ID of the task whose file you want to get the properties of.
// filePath - the path to the task file that you want to get the properties of.
// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
// seconds.
// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
// returnClientRequestID - whether the server should return the client-request-id in the response.
// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
// time; set it explicitly if you are calling the REST API directly.
// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
// operation will be performed only if the resource on the service has been modified since the specified time.
// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
// operation will be performed only if the resource on the service has not been modified since the specified
// time.
func (client FileClient) GetPropertiesFromTask(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
req, err := client.GetPropertiesFromTaskPreparer(ctx, jobID, taskID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ifModifiedSince, ifUnmodifiedSince)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromTask", nil, "Failure preparing request")
return
}
resp, err := client.GetPropertiesFromTaskSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromTask", resp, "Failure sending request")
return
}
result, err = client.GetPropertiesFromTaskResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromTask", resp, "Failure responding to request")
}
return
}
// GetPropertiesFromTaskPreparer prepares the GetPropertiesFromTask request.
func (client FileClient) GetPropertiesFromTaskPreparer(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
pathParameters := map[string]interface{}{
"filePath": autorest.Encode("path", filePath),
"jobId": autorest.Encode("path", jobID),
"taskId": autorest.Encode("path", taskID),
}
const APIVersion = "2018-08-01.7.0"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if timeout != nil {
queryParameters["timeout"] = autorest.Encode("query", *timeout)
} else {
queryParameters["timeout"] = autorest.Encode("query", 30)
}
preparer := autorest.CreatePreparer(
autorest.AsHead(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files/{filePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
if clientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
}
if returnClientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
} else {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(false)))
}
if ocpDate != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
}
if ifModifiedSince != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
}
if ifUnmodifiedSince != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetPropertiesFromTaskSender sends the GetPropertiesFromTask request. The method will close the
// http.Response Body if it receives an error.
func (client FileClient) GetPropertiesFromTaskSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetPropertiesFromTaskResponder handles the response to the GetPropertiesFromTask request. The method always
// closes the http.Response Body.
func (client FileClient) GetPropertiesFromTaskResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// ListFromComputeNode sends the list from compute node request.
// Parameters:
// poolID - the ID of the pool that contains the compute node.
// nodeID - the ID of the compute node whose files you want to list.
// filter - an OData $filter clause. For more information on constructing this filter, see
// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files.
// recursive - whether to list children of a directory.
// maxResults - the maximum number of items to return in the response. A maximum of 1000 files can be returned.
// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
// seconds.
// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
// returnClientRequestID - whether the server should return the client-request-id in the response.
// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
// time; set it explicitly if you are calling the REST API directly.
func (client FileClient) ListFromComputeNode(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultPage, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: maxResults,
Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
{Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("batch.FileClient", "ListFromComputeNode", err.Error())
}
result.fn = client.listFromComputeNodeNextResults
req, err := client.ListFromComputeNodePreparer(ctx, poolID, nodeID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromComputeNode", nil, "Failure preparing request")
return
}
resp, err := client.ListFromComputeNodeSender(req)
if err != nil {
result.nflr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromComputeNode", resp, "Failure sending request")
return
}
result.nflr, err = client.ListFromComputeNodeResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromComputeNode", resp, "Failure responding to request")
}
return
}
// ListFromComputeNodePreparer prepares the ListFromComputeNode request.
func (client FileClient) ListFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
pathParameters := map[string]interface{}{
"nodeId": autorest.Encode("path", nodeID),
"poolId": autorest.Encode("path", poolID),
}
const APIVersion = "2018-08-01.7.0"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(filter) > 0 {
queryParameters["$filter"] = autorest.Encode("query", filter)
}
if recursive != nil {
queryParameters["recursive"] = autorest.Encode("query", *recursive)
}
if maxResults != nil {
queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
} else {
queryParameters["maxresults"] = autorest.Encode("query", 1000)
}
if timeout != nil {
queryParameters["timeout"] = autorest.Encode("query", *timeout)
} else {
queryParameters["timeout"] = autorest.Encode("query", 30)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files", pathParameters),
autorest.WithQueryParameters(queryParameters))
if clientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
}
if returnClientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
} else {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(false)))
}
if ocpDate != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListFromComputeNodeSender sends the ListFromComputeNode request. The method will close the
// http.Response Body if it receives an error.
func (client FileClient) ListFromComputeNodeSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListFromComputeNodeResponder handles the response to the ListFromComputeNode request. The method always
// closes the http.Response Body.
func (client FileClient) ListFromComputeNodeResponder(resp *http.Response) (result NodeFileListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listFromComputeNodeNextResults retrieves the next set of results, if any.
func (client FileClient) listFromComputeNodeNextResults(lastResults NodeFileListResult) (result NodeFileListResult, err error) {
req, err := lastResults.nodeFileListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromComputeNodeNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListFromComputeNodeSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromComputeNodeNextResults", resp, "Failure sending next results request")
}
result, err = client.ListFromComputeNodeResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "listFromComputeNodeNextResults", resp, "Failure responding to next results request")
}
return
}
// ListFromComputeNodeComplete enumerates all values, automatically crossing page boundaries as required.
func (client FileClient) ListFromComputeNodeComplete(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultIterator, err error) {
result.page, err = client.ListFromComputeNode(ctx, poolID, nodeID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
return
}
// ListFromTask sends the list from task request.
// Parameters:
// jobID - the ID of the job that contains the task.
// taskID - the ID of the task whose files you want to list.
// filter - an OData $filter clause. For more information on constructing this filter, see
// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files.
// recursive - whether to list children of the task directory. This parameter can be used in combination with
// the filter parameter to list specific type of files.
// maxResults - the maximum number of items to return in the response. A maximum of 1000 files can be returned.
// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
// seconds.
// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
// returnClientRequestID - whether the server should return the client-request-id in the response.
// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
// time; set it explicitly if you are calling the REST API directly.
func (client FileClient) ListFromTask(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultPage, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: maxResults,
Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
{Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("batch.FileClient", "ListFromTask", err.Error())
}
result.fn = client.listFromTaskNextResults
req, err := client.ListFromTaskPreparer(ctx, jobID, taskID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromTask", nil, "Failure preparing request")
return
}
resp, err := client.ListFromTaskSender(req)
if err != nil {
result.nflr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromTask", resp, "Failure sending request")
return
}
result.nflr, err = client.ListFromTaskResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromTask", resp, "Failure responding to request")
}
return
}
// ListFromTaskPreparer prepares the ListFromTask request.
func (client FileClient) ListFromTaskPreparer(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
pathParameters := map[string]interface{}{
"jobId": autorest.Encode("path", jobID),
"taskId": autorest.Encode("path", taskID),
}
const APIVersion = "2018-08-01.7.0"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(filter) > 0 {
queryParameters["$filter"] = autorest.Encode("query", filter)
}
if recursive != nil {
queryParameters["recursive"] = autorest.Encode("query", *recursive)
}
if maxResults != nil {
queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
} else {
queryParameters["maxresults"] = autorest.Encode("query", 1000)
}
if timeout != nil {
queryParameters["timeout"] = autorest.Encode("query", *timeout)
} else {
queryParameters["timeout"] = autorest.Encode("query", 30)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files", pathParameters),
autorest.WithQueryParameters(queryParameters))
if clientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
}
if returnClientRequestID != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
} else {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("return-client-request-id", autorest.String(false)))
}
if ocpDate != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListFromTaskSender sends the ListFromTask request. The method will close the
// http.Response Body if it receives an error.
func (client FileClient) ListFromTaskSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListFromTaskResponder handles the response to the ListFromTask request. The method always
// closes the http.Response Body.
func (client FileClient) ListFromTaskResponder(resp *http.Response) (result NodeFileListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listFromTaskNextResults retrieves the next set of results, if any.
func (client FileClient) listFromTaskNextResults(lastResults NodeFileListResult) (result NodeFileListResult, err error) {
req, err := lastResults.nodeFileListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromTaskNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListFromTaskSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromTaskNextResults", resp, "Failure sending next results request")
}
result, err = client.ListFromTaskResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "batch.FileClient", "listFromTaskNextResults", resp, "Failure responding to next results request")
}
return
}
// ListFromTaskComplete enumerates all values, automatically crossing page boundaries as required.
func (client FileClient) ListFromTaskComplete(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultIterator, err error) {
result.page, err = client.ListFromTask(ctx, jobID, taskID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
return
}
| anpingli/origin | vendor/github.com/Azure/azure-sdk-for-go/services/batch/2018-08-01.7.0/batch/file.go | GO | apache-2.0 | 47,380 |
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* @file
*/
#include "modules/planning/open_space/coarse_trajectory_generator/hybrid_a_star.h"
#include "cyber/common/file.h"
#include "gtest/gtest.h"
#include "modules/common/math/box2d.h"
#include "modules/common/math/vec2d.h"
#include "modules/planning/common/obstacle.h"
#include "modules/planning/common/planning_gflags.h"
namespace apollo {
namespace planning {
class HybridATest : public ::testing::Test {
public:
virtual void SetUp() {
FLAGS_planner_open_space_config_filename =
"/apollo/modules/planning/testdata/conf/"
"open_space_standard_parking_lot.pb.txt";
CHECK(apollo::cyber::common::GetProtoFromFile(
FLAGS_planner_open_space_config_filename, &planner_open_space_config_))
<< "Failed to load open space config file "
<< FLAGS_planner_open_space_config_filename;
hybrid_test = std::unique_ptr<HybridAStar>(
new HybridAStar(planner_open_space_config_));
}
protected:
std::unique_ptr<HybridAStar> hybrid_test;
PlannerOpenSpaceConfig planner_open_space_config_;
};
TEST_F(HybridATest, test1) {
double sx = -15.0;
double sy = 0.0;
double sphi = 0.0;
double ex = 15.0;
double ey = 0.0;
double ephi = 0.0;
std::vector<std::vector<common::math::Vec2d>> obstacles_list;
HybridAStartResult result;
Vec2d obstacle_vertice_a(1.0, 0.0);
Vec2d obstacle_vertice_b(-1.0, 0.0);
std::vector<Vec2d> obstacle = {obstacle_vertice_a, obstacle_vertice_b};
// load xy boundary into the Plan() from configuration(Independent from frame)
std::vector<double> XYbounds_;
XYbounds_.push_back(-50.0);
XYbounds_.push_back(50.0);
XYbounds_.push_back(-50.0);
XYbounds_.push_back(50.0);
obstacles_list.emplace_back(obstacle);
ASSERT_TRUE(hybrid_test->Plan(sx, sy, sphi, ex, ey, ephi, XYbounds_,
obstacles_list, &result));
}
} // namespace planning
} // namespace apollo
| wanglei828/apollo | modules/planning/open_space/coarse_trajectory_generator/hybrid_a_star_test.cc | C++ | apache-2.0 | 2,682 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.people.v1.model;
/**
* A person's photo. A picture shown next to the person's name to help others recognize the person.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the People API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class Photo extends com.google.api.client.json.GenericJson {
/**
* True if the photo is a default photo; false if the photo is a user-provided photo.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key("default")
private java.lang.Boolean default__;
/**
* Metadata about the photo.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private FieldMetadata metadata;
/**
* The URL of the photo. You can change the desired size by appending a query parameter
* `sz={size}` at the end of the url, where {size} is the size in pixels. Example: https://lh3.goo
* gleusercontent.com/-T_wVWLlmg7w/AAAAAAAAAAI/AAAAAAAABa8/00gzXvDBYqw/s100/photo.jpg?sz=50
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String url;
/**
* True if the photo is a default photo; false if the photo is a user-provided photo.
* @return value or {@code null} for none
*/
public java.lang.Boolean getDefault() {
return default__;
}
/**
* True if the photo is a default photo; false if the photo is a user-provided photo.
* @param default__ default__ or {@code null} for none
*/
public Photo setDefault(java.lang.Boolean default__) {
this.default__ = default__;
return this;
}
/**
* Metadata about the photo.
* @return value or {@code null} for none
*/
public FieldMetadata getMetadata() {
return metadata;
}
/**
* Metadata about the photo.
* @param metadata metadata or {@code null} for none
*/
public Photo setMetadata(FieldMetadata metadata) {
this.metadata = metadata;
return this;
}
/**
* The URL of the photo. You can change the desired size by appending a query parameter
* `sz={size}` at the end of the url, where {size} is the size in pixels. Example: https://lh3.goo
* gleusercontent.com/-T_wVWLlmg7w/AAAAAAAAAAI/AAAAAAAABa8/00gzXvDBYqw/s100/photo.jpg?sz=50
* @return value or {@code null} for none
*/
public java.lang.String getUrl() {
return url;
}
/**
* The URL of the photo. You can change the desired size by appending a query parameter
* `sz={size}` at the end of the url, where {size} is the size in pixels. Example: https://lh3.goo
* gleusercontent.com/-T_wVWLlmg7w/AAAAAAAAAAI/AAAAAAAABa8/00gzXvDBYqw/s100/photo.jpg?sz=50
* @param url url or {@code null} for none
*/
public Photo setUrl(java.lang.String url) {
this.url = url;
return this;
}
@Override
public Photo set(String fieldName, Object value) {
return (Photo) super.set(fieldName, value);
}
@Override
public Photo clone() {
return (Photo) super.clone();
}
}
| googleapis/google-api-java-client-services | clients/google-api-services-people/v1/1.31.0/com/google/api/services/people/v1/model/Photo.java | Java | apache-2.0 | 3,986 |
from django.conf.urls import url,include
from . import views
urlpatterns = [
url(r'^order/address',views.addressPage,name='addressPage1'),
url(r'^order_with_prescription/address',views.addressPage,name='addressPage2'),
url(r'^order_with_prescription/specify',views.choicePage,name='choicePage'),
url(r'^order_with_prescription/placed',views.orderPlaced,name='orderPlaced1'),
url(r'^order_with_prescription',views.prescriptionPage,name='prescriptionPage'),
url(r'^order/placed',views.orderPlaced,name='orderPlaced2')
] | mpiplani/Online-Pharmacy | online_pharmacy/order/urls.py | Python | apache-2.0 | 547 |
#include "beaker/builder.hpp"
#include "beaker/length.hpp"
#include "beaker/offset.hpp"
#include "beaker/decl.hpp"
// Returns an expression which computes the byte offsetof a
// member within a record.
// 'e' is an offsetof expression.
//
// The byte offset of a member within a record type
// is the sum of the length of all fields preceding
// it within the record.
Expr*
get_offset(Decl const* layout, Decl const* mem)
{
assert(is<Layout_decl>(layout));
// keep track of all member declarations coming before mem
Decl_seq pred;
Expr* offsetof = zero();
for (auto decl : as<Layout_decl>(layout)->fields()) {
if (decl == mem) {
break;
}
pred.push_back(decl);
}
for (auto decl : pred) {
offsetof = add(offsetof, get_length(decl->type()));
}
return offsetof;
}
// Calcualte the offset of a field
// within a layout using a field name expression.
//
// FIXME: This currently does not handle nested layouts such
// as h1::h2::h3.
// This only works with one layout decl and a field
Expr*
get_offset(Field_name_expr const* e)
{
// The first element in the field name should always be a
// layout declaration.
// This should be a guarantee provided by elaboration.
Decl_seq const& decls = e->declarations();
Layout_decl* layout = as<Layout_decl>(decls.front());
assert(layout);
// get the second element in the declaration sequence
// this should be guaranteed to exist
Expr* offset = get_offset(layout, decls.at(1));
// create some iterators
if (decls.size() > 2) {
auto curr = decls.begin() + 1;
auto next = decls.begin() + 2;
// while the next element isn't the end
while (next != decls.end()) {
Field_decl* field = as<Field_decl>(*curr);
assert(field);
// this has to be a field with layout type
// it cannot be anything else and this should
// be a guarantee from elaboration
Layout_type const* type = as<Layout_type>(field->type());
assert(type);
layout = type->declaration();
assert(layout);
offset = add(offset, get_offset(layout, *next));
++curr;
++next;
}
}
return offset;
}
| thehexia/steve | beaker/offset.cpp | C++ | apache-2.0 | 2,155 |
import sys
from setuptools import setup
from setuptools import find_packages
version = '0.5.0.dev0'
install_requires = [
'letsencrypt=={0}'.format(version),
'letsencrypt-apache=={0}'.format(version),
'docker-py',
'requests',
'zope.interface',
]
if sys.version_info < (2, 7):
install_requires.append('mock<1.1.0')
else:
install_requires.append('mock')
if sys.version_info < (2, 7, 9):
# For secure SSL connexion with Python 2.7 (InsecurePlatformWarning)
install_requires.append('ndg-httpsclient')
install_requires.append('pyasn1')
docs_extras = [
'repoze.sphinx.autointerface',
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='letsencrypt-compatibility-test',
version=version,
description="Compatibility tests for Let's Encrypt client",
url='https://github.com/letsencrypt/letsencrypt',
author="Let's Encrypt Project",
author_email='client-dev@letsencrypt.org',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'console_scripts': [
'letsencrypt-compatibility-test = letsencrypt_compatibility_test.test_driver:main',
],
},
)
| TheBoegl/letsencrypt | letsencrypt-compatibility-test/setup.py | Python | apache-2.0 | 1,792 |
/*
* Open Source Software published under the Apache Licence, Version 2.0.
*/
package io.github.vocabhunter.gui.i18n;
public enum I18nKey {
MAIN_WINDOW_UNSAVED("main.window.unsaved"),
MAIN_WINDOW_UNTITLED("main.window.untitled"),
SESSION_WORD_USES("session.word.uses"),
SESSION_TAB_ANALYSIS("session.tab.analysis"),
SESSION_TAB_PROGRESS("session.tab.progress"),
SEARCH_MATCH_NONE("search.match.none"),
SEARCH_MATCH_SELECTION_ON("search.match.selection.on"),
SEARCH_MATCH_SELECTION_OFF("search.match.selection.off"),
ABOUT_VERSION("about.version"),
FILTER_WINDOW_TITLE("filter.window.title"),
FILTER_WORDS_COUNT("filter.words.count"),
FILTER_COLUMN("filter.column"),
FILTER_ERROR_TITLE("filter.error.title"),
FILTER_ERROR_ALL("filter.error.all"),
FILTER_ERROR_DISABLED("filter.error.disabled"),
FILTER_MAIN_LISTS_BUTTON_EDIT("filter.main.lists.button.edit"),
FILTER_MAIN_LISTS_BUTTON_DELETE("filter.main.lists.button.delete"),
FILTER_MAIN_LISTS_TYPE_KNOWN("filter.main.lists.type.known"),
FILTER_MAIN_LISTS_TYPE_BOTH("filter.main.lists.type.both"),
FILTER_MAIN_LISTS_TYPE_LIST("filter.main.lists.type.list"),
FILTER_GRID_WINDOW_TITLE("filter.grid.window.title"),
FILTER_SESSION_WINDOW_TITLE("filter.session.window.title"),
FILTER_SESSION_TYPE_KNOWN_TIP("filter.session.type.known.tip"),
FILTER_SESSION_TYPE_UNKNOWN_TIP("filter.session.type.unknown.tip"),
PROGRESS_SLICE_MARKED("progress.slice.marked"),
PROGRESS_SLICE_UNMARKED("progress.slice.unmarked"),
PROGRESS_SLICE_KNOWN("progress.slice.known"),
PROGRESS_SLICE_UNKNOWN("progress.slice.unknown"),
PROGRESS_SLICE_FILTERED("progress.slice.filtered"),
PROGRESS_WORD_COUNT("progress.word.count"),
PROGRESS_WORD_PERCENTAGE("progress.word.percentage"),
NOTE_WINDOW_TITLE("note.window.title"),
NOTE_TITLE("note.title"),
ERROR_DETAILS("error.details"),
ERROR_SESSION_OPEN_DETAILS("error.session.open.details"),
ERROR_SESSION_OPEN_TITLE("error.session.open.title"),
ERROR_SESSION_EXPORT_DETAILS("error.session.export.details"),
ERROR_SESSION_EXPORT_TITLE("error.session.export.title"),
ERROR_SESSION_SAVE_DETAILS("error.session.save.details"),
ERROR_SESSION_SAVE_TITLE("error.session.save.title"),
FILE_NEW("file.new"),
FILE_OPEN("file.open"),
FILE_SAVE("file.save"),
FILE_EXPORT("file.export"),
FILE_EXCLUDE("file.exclude"),
FILE_MODIFIED("file.modified"),
FILE_UNSAVED("file.unsaved"),
FILE_BUTTON_SAVE("file.button.save"),
FILE_BUTTON_DISCARD("file.button.discard"),
FILE_BUTTON_CANCEL("file.button.cancel"),
FILE_TYPE_ALL("file.type.all"),
FILE_TYPE_TEXT("file.type.text"),
FILE_TYPE_DOCUMENT("file.type.document"),
FILE_TYPE_ANY_TEXT("file.type.any_text"),
FILE_TYPE_PDF("file.type.pdf"),
FILE_TYPE_OFFICE("file.type.office"),
FILE_TYPE_EBOOK("file.type.ebook"),
FILE_TYPE_SESSION("file.type.session"),
FILE_TYPE_SPREADSHEET("file.type.spreadsheet"),
STATUS_ACTION_NEW("status.action.new"),
STATUS_ACTION_OPEN("status.action.open"),
STATUS_ACTION_SAVE("status.action.save"),
STATUS_ACTION_EXPORT("status.action.export"),
STATUS_ACTION_EXIT("status.action.exit"),
STATUS_ACTION_ABOUT("status.action.about"),
STATUS_MARKED_PERCENTAGE("status.marked.percentage"),
STATUS_POSITION_EDIT_ON("status.position.edit.on"),
STATUS_POSITION_EDIT_OFF("status.position.edit.off"),
LANGUAGE_NAME("language.name"),
LANGUAGE_TITLE("language.title"),
LINK_MAIN("link.main"),
LINK_HELP("link.help"),
LINK_ISSUE("link.issue");
private final String key;
I18nKey(final String key) {
this.key = key;
}
public String getKey() {
return key;
}
}
| AdamCarroll/VocabHunter | gui/src/main/java/io/github/vocabhunter/gui/i18n/I18nKey.java | Java | apache-2.0 | 3,821 |
/**
* redpen: a text inspection tool
* Copyright (c) 2014-2015 Recruit Technologies Co., Ltd. and contributors
* (see CONTRIBUTORS.md)
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cc.redpen.validator.sentence;
import cc.redpen.RedPenException;
import cc.redpen.config.Configuration;
import cc.redpen.config.Symbol;
import cc.redpen.config.SymbolType;
import cc.redpen.config.ValidatorConfiguration;
import cc.redpen.model.Sentence;
import cc.redpen.validator.ValidationError;
import cc.redpen.validator.Validator;
import cc.redpen.validator.ValidatorFactory;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
public class QuotationValidatorTest {
@Test
public void testDoubleQuotationMakrs() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said “That is true”.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(0, errors.size());
}
@Test
public void testSingleQuotationMakrs() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said ‘that is true’.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(0, errors.size());
}
@Test
public void testDoubleQuotationMakrWithoutRight() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said “That is true.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testSingleQuotationMakrWithoutRight() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said ‘that is true.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testDoubleQuotationMakrWithoutLeft() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said That is true”.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testSingleQuotationMakrkWithoutLeft() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said that is true’.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testExceptionCase() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I’m a jedi knight.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(0, errors.size());
}
@Test
public void testQuotedExceptionCase() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("he said ‘I’m a jedi knight’.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(0, errors.size());
}
@Test
public void testDoubleLeftSingleQuotationMakrk() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said ‘that is true‘.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testDoubleLeftDoubleQuotationMakrk() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said “that is true.“", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testDoubleRightSingleQuotationMakrk() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said ’that is true’.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testDoubleRightDoubleQuotationMakrk() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said ”that is true”.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testAsciiExceptionCase() throws RedPenException {
Configuration conf = Configuration.builder()
.addValidatorConfig(new ValidatorConfiguration("Quotation").addProperty("use_ascii", false))
.build();
Validator validator = ValidatorFactory.getInstance(conf.getValidatorConfigs().get(0), conf);
Sentence str = new Sentence("I'm a jedi knight.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(0, errors.size());
}
@Test
public void testAsciiDoubleQuotationMakrk() throws RedPenException {
Configuration conf = Configuration.builder()
.addValidatorConfig(new ValidatorConfiguration("Quotation").addProperty("use_ascii", false))
.build();
Validator validator = ValidatorFactory.getInstance(conf.getValidatorConfigs().get(0), conf);
Sentence str = new Sentence("I said \"that is true\".", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(0, errors.size());
}
@Test
public void testNoQuotationMakrk() throws RedPenException {
Configuration conf = Configuration.builder()
.addValidatorConfig(new ValidatorConfiguration("Quotation").addProperty("use_ascii", true))
.build();
Validator validator = ValidatorFactory.getInstance(conf.getValidatorConfigs().get(0), conf);
Sentence str = new Sentence("I said that is true.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(0, errors.size());
}
@Test
public void testNoInput() throws RedPenException {
Configuration conf = Configuration.builder()
.addValidatorConfig(new ValidatorConfiguration("Quotation").addProperty("use_ascii", true))
.build();
Validator validator = ValidatorFactory.getInstance(conf.getValidatorConfigs().get(0), conf);
Sentence str = new Sentence("", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(0, errors.size());
}
@Test
public void testTwiceQuotations() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said ‘that is true’ and not said ‘that is false’", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(0, errors.size());
}
@Test
public void testOneOfFailureInTwiceQuotations() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said ‘that is true and not said ‘that is false’", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testLeftDoubleQuotationsWihtoutSpace() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said“that is true”.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testLeftAsciiDoubleQuotationsWihtoutSpace() throws RedPenException {
Configuration conf = Configuration.builder()
.addValidatorConfig(new ValidatorConfiguration("Quotation").addProperty("use_ascii", true))
.build();
Validator validator = ValidatorFactory.getInstance(conf.getValidatorConfigs().get(0), conf);
Sentence str = new Sentence("I said\"that is true\".", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testRightDoubleQuotationsWihtoutSpace() throws RedPenException {
Validator validator = ValidatorFactory.getInstance("Quotation");
Sentence str = new Sentence("I said “that is true”is true.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testRightAsciiDoubleQuotationsWihtoutSpace() throws RedPenException {
Configuration conf = Configuration.builder()
.addValidatorConfig(new ValidatorConfiguration("Quotation").addProperty("use_ascii", true))
.build();
Validator validator = ValidatorFactory.getInstance(conf.getValidatorConfigs().get(0), conf);
Sentence str = new Sentence("I said \"that is true\"is true.", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(1, errors.size());
}
@Test
public void testDoubleQuotationsWithNonAsciiPeriod() throws RedPenException {
Configuration conf = Configuration.builder()
.addValidatorConfig(new ValidatorConfiguration("Quotation").addProperty("use_ascii", true))
.addSymbol(new Symbol(SymbolType.FULL_STOP, '。'))
.build();
Validator validator = ValidatorFactory.getInstance(conf.getValidatorConfigs().get(0), conf);
// QuotationValidator validator =
// new QuotationValidator(true, '。');
Sentence str = new Sentence("I said \"that is true\"。", 0);
List<ValidationError> errors = new ArrayList<>();
validator.setErrorList(errors);
validator.validate(str);
assertNotNull(errors);
assertEquals(0, errors.size());
}
}
| kenhys/redpen | redpen-core/src/test/java/cc/redpen/validator/sentence/QuotationValidatorTest.java | Java | apache-2.0 | 13,023 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.segment.column;
import javax.annotation.Nullable;
import java.nio.ByteBuffer;
import java.util.Comparator;
/**
* Naming is hard. This is the core interface extracted from another interface called ObjectStrategy that lives in
* 'druid-processing'. It provides basic methods for handling converting some type of object to a binary form, reading
* the binary form back into an object from a {@link ByteBuffer}, and mechanism to perform comparisons between objects.
*
* Complex types register one of these in {@link Types#registerStrategy}, which can be retrieved by the complex
* type name to convert values to and from binary format, and compare them.
*
* This could be recombined with 'ObjectStrategy' should these two modules be combined.
*/
public interface ObjectByteStrategy<T> extends Comparator<T>
{
Class<? extends T> getClazz();
/**
* Convert values from their underlying byte representation.
*
* Implementations of this method <i>may</i> change the given buffer's mark, or limit, and position.
*
* Implementations of this method <i>may not</i> store the given buffer in a field of the "deserialized" object,
* need to use {@link ByteBuffer#slice()}, {@link ByteBuffer#asReadOnlyBuffer()} or {@link ByteBuffer#duplicate()} in
* this case.
*
* @param buffer buffer to read value from
* @param numBytes number of bytes used to store the value, starting at buffer.position()
* @return an object created from the given byte buffer representation
*/
@Nullable
T fromByteBuffer(ByteBuffer buffer, int numBytes);
@Nullable
byte[] toBytes(@Nullable T val);
}
| nishantmonu51/druid | core/src/main/java/org/apache/druid/segment/column/ObjectByteStrategy.java | Java | apache-2.0 | 2,453 |
/*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* Changes may cause incorrect behavior and will be lost if the code is
* regenerated.
*/
'use strict';
const models = require('./index');
/**
* An on-premises file system dataset.
*
* @extends models['Dataset']
*/
class FileShareDataset extends models['Dataset'] {
/**
* Create a FileShareDataset.
* @member {object} [folderPath] The path of the on-premises file system.
* Type: string (or Expression with resultType string).
* @member {object} [fileName] The name of the on-premises file system. Type:
* string (or Expression with resultType string).
* @member {object} [format] The format of the files.
* @member {object} [format.serializer] Serializer. Type: string (or
* Expression with resultType string).
* @member {object} [format.deserializer] Deserializer. Type: string (or
* Expression with resultType string).
* @member {string} [format.type] Polymorphic Discriminator
* @member {object} [fileFilter] Specify a filter to be used to select a
* subset of files in the folderPath rather than all files. Type: string (or
* Expression with resultType string).
* @member {object} [compression] The data compression method used for the
* file system.
* @member {string} [compression.type] Polymorphic Discriminator
*/
constructor() {
super();
}
/**
* Defines the metadata of FileShareDataset
*
* @returns {object} metadata of FileShareDataset
*
*/
mapper() {
return {
required: false,
serializedName: 'FileShare',
type: {
name: 'Composite',
polymorphicDiscriminator: {
serializedName: 'type',
clientName: 'type'
},
uberParent: 'Dataset',
className: 'FileShareDataset',
modelProperties: {
description: {
required: false,
serializedName: 'description',
type: {
name: 'String'
}
},
structure: {
required: false,
serializedName: 'structure',
type: {
name: 'Object'
}
},
linkedServiceName: {
required: true,
serializedName: 'linkedServiceName',
defaultValue: {},
type: {
name: 'Composite',
className: 'LinkedServiceReference'
}
},
parameters: {
required: false,
serializedName: 'parameters',
type: {
name: 'Dictionary',
value: {
required: false,
serializedName: 'ParameterSpecificationElementType',
type: {
name: 'Composite',
className: 'ParameterSpecification'
}
}
}
},
annotations: {
required: false,
serializedName: 'annotations',
type: {
name: 'Sequence',
element: {
required: false,
serializedName: 'ObjectElementType',
type: {
name: 'Object'
}
}
}
},
folder: {
required: false,
serializedName: 'folder',
type: {
name: 'Composite',
className: 'DatasetFolder'
}
},
type: {
required: true,
serializedName: 'type',
isPolymorphicDiscriminator: true,
type: {
name: 'String'
}
},
folderPath: {
required: false,
serializedName: 'typeProperties.folderPath',
type: {
name: 'Object'
}
},
fileName: {
required: false,
serializedName: 'typeProperties.fileName',
type: {
name: 'Object'
}
},
format: {
required: false,
serializedName: 'typeProperties.format',
type: {
name: 'Composite',
additionalProperties: {
type: {
name: 'Dictionary',
value: {
required: false,
serializedName: 'ObjectElementType',
type: {
name: 'Object'
}
}
}
},
polymorphicDiscriminator: {
serializedName: 'type',
clientName: 'type'
},
uberParent: 'DatasetStorageFormat',
className: 'DatasetStorageFormat'
}
},
fileFilter: {
required: false,
serializedName: 'typeProperties.fileFilter',
type: {
name: 'Object'
}
},
compression: {
required: false,
serializedName: 'typeProperties.compression',
type: {
name: 'Composite',
additionalProperties: {
type: {
name: 'Dictionary',
value: {
required: false,
serializedName: 'ObjectElementType',
type: {
name: 'Object'
}
}
}
},
polymorphicDiscriminator: {
serializedName: 'type',
clientName: 'type'
},
uberParent: 'DatasetCompression',
className: 'DatasetCompression'
}
}
}
}
};
}
}
module.exports = FileShareDataset;
| xingwu1/azure-sdk-for-node | lib/services/datafactoryManagement/lib/models/fileShareDataset.js | JavaScript | apache-2.0 | 6,074 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pulsar.io.kafka;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.concurrent.ExecutionException;
import lombok.extern.slf4j.Slf4j;
import org.apache.pulsar.client.api.Schema;
import org.apache.pulsar.common.schema.SchemaInfo;
import org.apache.pulsar.common.schema.SchemaType;
@Slf4j
final class AvroSchemaCache {
private final LoadingCache<Integer, Schema<ByteBuffer>> cache = CacheBuilder
.newBuilder()
.maximumSize(100)
.build(new CacheLoader<Integer, Schema<ByteBuffer>>() {
@Override
public Schema<ByteBuffer> load(Integer schemaId) throws Exception {
return fetchSchema(schemaId);
}
});
private final SchemaRegistryClient schemaRegistryClient;
public AvroSchemaCache(SchemaRegistryClient schemaRegistryClient) {
this.schemaRegistryClient = schemaRegistryClient;
}
public Schema<ByteBuffer> get(int schemaId) {
try {
return cache.get(schemaId);
} catch (ExecutionException err) {
throw new RuntimeException(err.getCause());
}
}
private Schema<ByteBuffer> fetchSchema(int schemaId) {
try {
org.apache.avro.Schema schema = schemaRegistryClient.getById(schemaId);
String definition = schema.toString(false);
log.info("Schema {} definition {}", schemaId, definition);
SchemaInfo schemaInfo = SchemaInfo.builder()
.type(SchemaType.AVRO)
.name(schema.getName())
.properties(Collections.emptyMap())
.schema(definition.getBytes(StandardCharsets.UTF_8)
).build();
return new ByteBufferSchemaWrapper(schemaInfo);
} catch (IOException | RestClientException e) {
throw new RuntimeException(e);
}
}
}
| massakam/pulsar | pulsar-io/kafka/src/main/java/org/apache/pulsar/io/kafka/AvroSchemaCache.java | Java | apache-2.0 | 3,114 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.jackrabbit.oak.plugins.blob.datastore;
import java.util.Map;
import java.util.Properties;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.ConfigurationPolicy;
import org.apache.jackrabbit.core.data.DataStore;
import org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3DataStore;
import org.osgi.service.component.ComponentContext;
@Component(policy = ConfigurationPolicy.REQUIRE, name = S3DataStoreService.NAME)
public class S3DataStoreService extends AbstractDataStoreService{
public static final String NAME = "org.apache.jackrabbit.oak.plugins.blob.datastore.S3DataStore";
@Override
protected DataStore createDataStore(ComponentContext context, Map<String, Object> config) {
S3DataStore dataStore = new S3DataStore();
Properties properties = new Properties();
properties.putAll(config);
dataStore.setProperties(properties);
return dataStore;
}
@Override
protected String[] getDescription() {
return new String[] {"type=S3"};
}
}
| tripodsan/jackrabbit-oak | oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/S3DataStoreService.java | Java | apache-2.0 | 1,880 |
# -*- coding: utf-8 -*-
'''
Management of iptables
======================
This is an iptables-specific module designed to manage Linux firewalls. It is
expected that this state module, and other system-specific firewall states, may
at some point be deprecated in favor of a more generic ``firewall`` state.
.. code-block:: yaml
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match:
- state
- comment
- comment: "Allow HTTP"
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match:
- state
- comment
- comment: "Allow HTTP"
- connstate: NEW
- source: '127.0.0.1'
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
.. Invert Rule
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match:
- state
- comment
- comment: "Allow HTTP"
- connstate: NEW
- source: '! 127.0.0.1'
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match:
- state
- comment
- comment: "Allow HTTP"
- connstate: NEW
- source: 'not 127.0.0.1'
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- family: ipv6
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- family: ipv4
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dports:
- 80
- 443
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.insert:
- position: 1
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.insert:
- position: 1
- table: filter
- family: ipv6
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.delete:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.delete:
- position: 1
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.delete:
- table: filter
- family: ipv6
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
default to accept:
iptables.set_policy:
- chain: INPUT
- policy: ACCEPT
.. note::
Various functions of the ``iptables`` module use the ``--check`` option. If
the version of ``iptables`` on the target system does not include this
option, an alternate version of this check will be performed using the
output of iptables-save. This may have unintended consequences on legacy
releases of ``iptables``.
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
def __virtual__():
'''
Only load if the locale module is available in __salt__
'''
return 'iptables.version' in __salt__
def chain_present(name, table='filter', family='ipv4'):
'''
.. versionadded:: 2014.1.0
Verify the chain is exist.
name
A user-defined chain name.
table
The table to own the chain.
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
chain_check = __salt__['iptables.check_chain'](table, name, family)
if chain_check is True:
ret['result'] = True
ret['comment'] = ('iptables {0} chain is already exist in {1} table for {2}'
.format(name, table, family))
return ret
if __opts__['test']:
ret['comment'] = 'iptables {0} chain in {1} table needs to be set for {2}'.format(
name,
table,
family)
return ret
command = __salt__['iptables.new_chain'](table, name, family)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('iptables {0} chain in {1} table create success for {2}'
.format(name, table, family))
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} chain in {1} table: {2} for {3}'.format(
name,
table,
command.strip(),
family
)
return ret
def chain_absent(name, table='filter', family='ipv4'):
'''
.. versionadded:: 2014.1.0
Verify the chain is absent.
table
The table to remove the chain from
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
chain_check = __salt__['iptables.check_chain'](table, name, family)
if not chain_check:
ret['result'] = True
ret['comment'] = ('iptables {0} chain is already absent in {1} table for {2}'
.format(name, table, family))
return ret
if __opts__['test']:
ret['comment'] = 'iptables {0} chain in {1} table needs to be removed {2}'.format(
name,
table,
family)
return ret
flush_chain = __salt__['iptables.flush'](table, name, family)
if not flush_chain:
command = __salt__['iptables.delete_chain'](table, name, family)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('iptables {0} chain in {1} table delete success for {2}'
.format(name, table, family))
else:
ret['result'] = False
ret['comment'] = ('Failed to delete {0} chain in {1} table: {2} for {3}'
.format(name, table, command.strip(), family))
else:
ret['result'] = False
ret['comment'] = 'Failed to flush {0} chain in {1} table: {2} for {3}'.format(
name,
table,
flush_chain.strip(),
family
)
return ret
def append(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 0.17.0
Add a rule to the end of the specified chain.
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
table
The table that owns the chain which should be modified
family
Network family, ipv4 or ipv6.
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: ``--state`` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
Jump options that doesn't take arguments should be passed in with an empty
string.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if 'rules' in kwargs:
ret['changes']['locale'] = []
comments = []
save = False
for rule in kwargs['rules']:
if 'rules' in rule:
del rule['rules']
if '__agg__' in rule:
del rule['__agg__']
if 'save' in rule and rule['save']:
save = True
if rule['save'] is not True:
save_file = rule['save']
else:
save_file = True
rule['save'] = False
_ret = append(**rule)
if 'locale' in _ret['changes']:
ret['changes']['locale'].append(_ret['changes']['locale'])
comments.append(_ret['comment'])
ret['result'] = _ret['result']
if save:
if save_file is True:
save_file = None
__salt__['iptables.save'](save_file, family=family)
if not ret['changes']['locale']:
del ret['changes']['locale']
ret['comment'] = '\n'.join(comments)
return ret
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
kwargs['name'] = name
kwargs['table'] = table
rule = __salt__['iptables.build_rule'](family=family, **kwargs)
command = __salt__['iptables.build_rule'](full='True', family=family, command='A', **kwargs)
if __salt__['iptables.check'](table,
kwargs['chain'],
rule,
family) is True:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already set ({1}) for {2}'.format(
name,
command.strip(),
family)
if 'save' in kwargs and kwargs['save']:
if kwargs['save'] is not True:
filename = kwargs['save']
else:
filename = None
saved_rules = __salt__['iptables.get_saved_rules'](family=family)
_rules = __salt__['iptables.get_rules'](family=family)
__rules = []
for table in _rules:
for chain in _rules[table]:
__rules.append(_rules[table][chain].get('rules'))
__saved_rules = []
for table in saved_rules:
for chain in saved_rules[table]:
__saved_rules.append(saved_rules[table][chain].get('rules'))
# Only save if rules in memory are different than saved rules
if __rules != __saved_rules:
out = __salt__['iptables.save'](filename, family=family)
ret['comment'] += ('\nSaved iptables rule {0} for {1}\n'
'{2}\n{3}').format(name, family, command.strip(), out)
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be set ({1}) for {2}'.format(
name,
command.strip(),
family)
return ret
if __salt__['iptables.append'](table, kwargs['chain'], rule, family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set iptables rule for {0} to: {1} for {2}'.format(
name,
command.strip(),
family)
if 'save' in kwargs:
if kwargs['save']:
if kwargs['save'] is not True:
filename = kwargs['save']
else:
filename = None
out = __salt__['iptables.save'](filename, family=family)
ret['comment'] = ('Set and saved iptables rule {0} for {1}\n'
'{2}\n{3}').format(name, family, command.strip(), out)
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed to set iptables rule for {0}.\n'
'Attempted rule was {1} for {2}').format(
name,
command.strip(), family)
return ret
def insert(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0
Insert a rule into a chain
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
position
The numerical representation of where the rule should be inserted into
the chain. Note that ``-1`` is not a supported position value.
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: ``--state`` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
Jump options that doesn't take arguments should be passed in with an empty
string.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if 'rules' in kwargs:
ret['changes']['locale'] = []
comments = []
save = False
for rule in kwargs['rules']:
if 'rules' in rule:
del rule['rules']
if '__agg__' in rule:
del rule['__agg__']
if 'save' in rule and rule['save']:
save = True
if rule['save'] is not True:
save_file = rule['save']
else:
save_file = True
rule['save'] = False
_ret = insert(**rule)
if 'locale' in _ret['changes']:
ret['changes']['locale'].append(_ret['changes']['locale'])
comments.append(_ret['comment'])
ret['result'] = _ret['result']
if save:
if save_file is True:
save_file = None
__salt__['iptables.save'](save_file, family=family)
if not ret['changes']['locale']:
del ret['changes']['locale']
ret['comment'] = '\n'.join(comments)
return ret
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
kwargs['name'] = name
kwargs['table'] = table
rule = __salt__['iptables.build_rule'](family=family, **kwargs)
command = __salt__['iptables.build_rule'](full=True, family=family, command='I', **kwargs)
if __salt__['iptables.check'](table,
kwargs['chain'],
rule,
family) is True:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already set for {1} ({2})'.format(
name,
family,
command.strip())
if 'save' in kwargs and kwargs['save']:
if kwargs['save'] is not True:
filename = kwargs['save']
else:
filename = None
saved_rules = __salt__['iptables.get_saved_rules'](family=family)
_rules = __salt__['iptables.get_rules'](family=family)
__rules = []
for table in _rules:
for chain in _rules[table]:
__rules.append(_rules[table][chain].get('rules'))
__saved_rules = []
for table in saved_rules:
for chain in saved_rules[table]:
__saved_rules.append(saved_rules[table][chain].get('rules'))
# Only save if rules in memory are different than saved rules
if __rules != __saved_rules:
out = __salt__['iptables.save'](filename, family=family)
ret['comment'] += ('\nSaved iptables rule {0} for {1}\n'
'{2}\n{3}').format(name, family, command.strip(), out)
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be set for {1} ({2})'.format(
name,
family,
command.strip())
return ret
if not __salt__['iptables.insert'](table, kwargs['chain'], kwargs['position'], rule, family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set iptables rule for {0} to: {1} for {2}'.format(
name,
command.strip(),
family)
if 'save' in kwargs:
if kwargs['save']:
out = __salt__['iptables.save'](filename=None, family=family)
ret['comment'] = ('Set and saved iptables rule {0} for {1}\n'
'{2}\n{3}').format(name, family, command.strip(), out)
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed to set iptables rule for {0}.\n'
'Attempted rule was {1}').format(
name,
command.strip())
return ret
def delete(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0
Delete a rule to a chain
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: ``--state`` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
Jump options that doesn't take arguments should be passed in with an empty
string.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if 'rules' in kwargs:
ret['changes']['locale'] = []
comments = []
save = False
for rule in kwargs['rules']:
if 'rules' in rule:
del rule['rules']
if '__agg__' in rule:
del rule['__agg__']
if 'save' in rule and rule['save']:
if rule['save'] is not True:
save_file = rule['save']
else:
save_file = True
rule['save'] = False
_ret = delete(**rule)
if 'locale' in _ret['changes']:
ret['changes']['locale'].append(_ret['changes']['locale'])
comments.append(_ret['comment'])
ret['result'] = _ret['result']
if save:
if save_file is True:
save_file = None
__salt__['iptables.save'](save_file, family=family)
if not ret['changes']['locale']:
del ret['changes']['locale']
ret['comment'] = '\n'.join(comments)
return ret
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
kwargs['name'] = name
kwargs['table'] = table
rule = __salt__['iptables.build_rule'](family=family, **kwargs)
command = __salt__['iptables.build_rule'](full=True, family=family, command='D', **kwargs)
if not __salt__['iptables.check'](table,
kwargs['chain'],
rule,
family) is True:
if 'position' not in kwargs:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already absent for {1} ({2})'.format(
name,
family,
command.strip())
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be deleted for {1} ({2})'.format(
name,
family,
command.strip())
return ret
if 'position' in kwargs:
result = __salt__['iptables.delete'](
table,
kwargs['chain'],
family=family,
position=kwargs['position'])
else:
result = __salt__['iptables.delete'](
table,
kwargs['chain'],
family=family,
rule=rule)
if not result:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Delete iptables rule for {0} {1}'.format(
name,
command.strip())
if 'save' in kwargs:
if kwargs['save']:
out = __salt__['iptables.save'](filename=None, family=family)
ret['comment'] = ('Deleted and saved iptables rule {0} for {1}\n'
'{2}\n{3}').format(name, family, command.strip(), out)
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed to delete iptables rule for {0}.\n'
'Attempted rule was {1}').format(
name,
command.strip())
return ret
def set_policy(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0
Sets the default policy for iptables firewall tables
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
policy
The requested table policy
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if __salt__['iptables.get_policy'](
table,
kwargs['chain'],
family) == kwargs['policy']:
ret['result'] = True
ret['comment'] = ('iptables default policy for chain {0} on table {1} for {2} already set to {3}'
.format(kwargs['chain'], table, family, kwargs['policy']))
return ret
if __opts__['test']:
ret['comment'] = 'iptables default policy for chain {0} on table {1} for {2} needs to be set to {3}'.format(
kwargs['chain'],
table,
family,
kwargs['policy']
)
return ret
if not __salt__['iptables.set_policy'](
table,
kwargs['chain'],
kwargs['policy'],
family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set default policy for {0} to {1} family {2}'.format(
kwargs['chain'],
kwargs['policy'],
family
)
if 'save' in kwargs:
if kwargs['save']:
__salt__['iptables.save'](filename=None, family=family)
ret['comment'] = 'Set and saved default policy for {0} to {1} family {2}'.format(
kwargs['chain'],
kwargs['policy'],
family
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to set iptables default policy'
return ret
def flush(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0
Flush current iptables state
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if 'chain' not in kwargs:
kwargs['chain'] = ''
if __opts__['test']:
ret['comment'] = 'iptables rules in {0} table {1} chain {2} family needs to be flushed'.format(
name,
table,
family)
return ret
if not __salt__['iptables.flush'](table, kwargs['chain'], family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Flush iptables rules in {0} table {1} chain {2} family'.format(
table,
kwargs['chain'],
family
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to flush iptables rules'
return ret
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all rules in the available
low chunks and merges them into a single rules ref in the present low data
'''
rules = []
agg_enabled = [
'append',
'insert',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = salt.utils.gen_state_tag(chunk)
if tag in running:
# Already ran the iptables state, skip aggregation
continue
if chunk.get('state') == 'iptables':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
if chunk not in rules:
rules.append(chunk)
chunk['__agg__'] = True
if rules:
if 'rules' in low:
low['rules'].extend(rules)
else:
low['rules'] = rules
return low
| stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/states/iptables.py | Python | apache-2.0 | 25,947 |
<?php
/**
* LiquidCacheFile class file
*
* @package Liquid
* @copyright Copyright (c) 2011-2012 Harald Hanek
* @license http://harrydeluxe.mit-license.org
*/
class LiquidCacheFile extends LiquidCache
{
/**
* Initializes this component.
*
* It checks the availability of apccache.
* @throws LiquidException if Cachedir not exists.
*/
public function __construct($options = array())
{
parent::__construct($options);
if (isset($options['cache_dir']) && is_writable($options['cache_dir']))
$this->_path = realpath($options['cache_dir']) . DIRECTORY_SEPARATOR;
else
throw new LiquidException('Cachedir not exists or not writable');
}
/**
* Retrieves a value from cache with a specified key.
*
* @param string $key a unique key identifying the cached value
* @return string the value stored in cache, false if the value is not in the cache or expired.
*/
public function read($key, $unserialize = true)
{
if (!$this->exists($key))
return false;
if ($unserialize)
return unserialize(file_get_contents($this->_path . $this->_prefix . $key));
return file_get_contents($this->_path . $this->_prefix . $key);
}
/**
* Check if specified key exists in cache.
*
* @param string $key a unique key identifying the cached value
* @return boolean true if the key is in cache, false otherwise
*/
public function exists($key)
{
$cacheFile = $this->_path . $this->_prefix . $key;
if (!file_exists($cacheFile) || @filemtime($cacheFile) + $this->_expire < time())
return false;
return true;
}
/**
* Stores a value identified by a key in cache.
*
* @param string $key the key identifying the value to be cached
* @param string $value the value to be cached
* @return boolean true if the value is successfully stored into cache, false otherwise
*/
public function write($key, &$value, $serialize = true)
{
if (@file_put_contents($this->_path . $this->_prefix . $key, $serialize ? serialize($value) : $value) !== false)
{
$this->gc();
return true;
}
throw new LiquidException('Can not write cache file');
}
/**
* Deletes all values from cache.
*
* @return boolean whether the flush operation was successful.
*/
public function flush($expiredOnly = false)
{
foreach(glob($this->_path . $this->_prefix . '*') as $file)
{
if ($expiredOnly)
{
if (@filemtime($file) + $this->_expire < time())
@unlink($file);
}
else
@unlink($file);
}
}
/**
* Removes expired cache files.
*
*
*/
protected function gc()
{
$this->flush(true);
}
}
| leehanse/zzyyzz | wp-content/themes/myoto/lib/liquid-template/lib/Cache/LiquidCacheFile.class.php | PHP | apache-2.0 | 2,984 |
/**
* Define public API for Angular here.
*/
export * from './change_detection';
export * from './core';
export * from './directives';
export * from './forms';
| lgalfaso/angular | modules/angular2/angular2.js | JavaScript | apache-2.0 | 162 |
/*
* Copyright (c) 2010-2013 Evolveum
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.evolveum.midpoint.wf.impl.processes.itemApproval;
import com.evolveum.midpoint.util.logging.Trace;
import com.evolveum.midpoint.util.logging.TraceManager;
import com.evolveum.midpoint.wf.impl.processes.BaseProcessMidPointInterface;
import com.evolveum.midpoint.wf.impl.processes.common.SpringApplicationContextHolder;
import com.evolveum.midpoint.wf.util.ApprovalUtils;
import org.activiti.engine.delegate.DelegateExecution;
import org.activiti.engine.delegate.JavaDelegate;
import org.apache.commons.lang.Validate;
public class PrepareResult implements JavaDelegate {
private static final Trace LOGGER = TraceManager.getTrace(PrepareResult.class);
public void execute(DelegateExecution execution) {
Boolean loopLevelsStop = (Boolean) execution.getVariable(ProcessVariableNames.LOOP_LEVELS_STOP);
Validate.notNull(loopLevelsStop, "loopLevels_stop is undefined");
boolean approved = !loopLevelsStop;
execution.setVariable(BaseProcessMidPointInterface.VARIABLE_WF_ANSWER, ApprovalUtils.approvalStringValue(approved));
execution.setVariable(BaseProcessMidPointInterface.VARIABLE_WF_STATE, "Final decision is " + (approved ? "APPROVED" : "REFUSED"));
SpringApplicationContextHolder.getActivitiInterface().notifyMidpointAboutProcessFinishedEvent(execution);
}
}
| sabriarabacioglu/engerek | model/workflow-impl/src/main/java/com/evolveum/midpoint/wf/impl/processes/itemApproval/PrepareResult.java | Java | apache-2.0 | 1,940 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.shardingsphere.shardingjdbc.jdbc.core.statement;
import org.apache.shardingsphere.shardingjdbc.common.base.AbstractShadowJDBCDatabaseAndTableTest;
import org.apache.shardingsphere.underlying.common.database.type.DatabaseTypes;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import javax.sql.DataSource;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Map;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
public final class ShadowPreparedStatementTest extends AbstractShadowJDBCDatabaseAndTableTest {
private static final String INIT_INSERT_SQL = "INSERT INTO t_encrypt (id, cipher_pwd, plain_pwd) VALUES (99, 'cipher', 'plain')";
private static final String INSERT_SQL = "INSERT INTO t_encrypt (id, cipher_pwd, plain_pwd) VALUES (?, ?, ?)";
private static final String SHADOW_INSERT_SQL = "INSERT INTO t_encrypt (id, cipher_pwd, plain_pwd, shadow) VALUES (?, ?, ?, ?)";
private static final String DELETE_SQL = "DELETE FROM t_encrypt WHERE plain_pwd = ?";
private static final String SHADOW_DELETE_SQL = "DELETE FROM t_encrypt WHERE plain_pwd = ? AND shadow = ?";
private static final String SELECT_SQL = "SELECT id, cipher_pwd, plain_pwd FROM t_encrypt";
private static final String SELECT_SQL_BY_ID = "SELECT id, cipher_pwd, plain_pwd FROM t_encrypt WHERE id = ?";
private static final String CLEAN_SQL = "DELETE FROM t_encrypt";
private static final String UPDATE_SQL = "UPDATE t_encrypt SET cipher_pwd = ? WHERE id = ?";
private static final String SHADOW_UPDATE_SQL = "UPDATE t_encrypt SET cipher_pwd = ? WHERE id = ? AND shadow = ?";
@Test
public void assertInsertWithExecute() throws SQLException {
try (PreparedStatement statement = getConnection().prepareStatement(INSERT_SQL)) {
statement.setObject(1, 2);
statement.setString(2, "cipher");
statement.setString(3, "plain");
statement.execute();
}
assertResultSet(false, 2, "cipher");
assertResultSet(true, 1, "cipher");
}
@Test
public void assertShadowInsertWithExecute() throws SQLException {
try (PreparedStatement statement = getConnection().prepareStatement(SHADOW_INSERT_SQL)) {
statement.setObject(1, 1);
statement.setString(2, "cipher");
statement.setString(3, "plain");
statement.setBoolean(4, true);
statement.execute();
}
assertResultSet(false, 1, "cipher");
assertResultSet(true, 2, "cipher");
}
@Test
public void assertDeleteWithExecute() throws SQLException {
try (PreparedStatement statement = getConnection().prepareStatement(DELETE_SQL)) {
statement.setObject(1, "plain");
statement.executeUpdate();
}
assertResultSet(false, 0, "cipher");
}
@Test
public void assertShadowDeleteWithExecute() throws SQLException {
try (PreparedStatement statement = getConnection().prepareStatement(SHADOW_DELETE_SQL)) {
statement.setObject(1, "plain");
statement.setBoolean(2, true);
statement.executeUpdate();
}
assertResultSet(true, 0, "cipher");
}
@Test
public void assertUpdateWithExecuteUpdate() throws SQLException {
int result;
try (PreparedStatement statement = getConnection().prepareStatement(UPDATE_SQL)) {
statement.setString(1, "cipher_pwd");
statement.setInt(2, 99);
result = statement.executeUpdate();
}
assertThat(result, is(1));
assertResultSet(false, 99, 1, "cipher_pwd");
}
@Test
public void assertShadowUpdateWithExecuteUpdate() throws SQLException {
int result;
try (PreparedStatement statement = getConnection().prepareStatement(SHADOW_UPDATE_SQL)) {
statement.setString(1, "cipher_pwd");
statement.setInt(2, 99);
statement.setBoolean(3, true);
result = statement.executeUpdate();
}
assertThat(result, is(1));
assertResultSet(true, 99, 1, "cipher_pwd");
}
private void assertResultSet(final boolean isShadow, final int resultSetCount, final Object cipherPwd) throws SQLException {
final Map<String, DataSource> dataMaps = getDatabaseTypeMap().get(DatabaseTypes.getActualDatabaseType("H2"));
DataSource dataSource = isShadow ? dataMaps.get("jdbc_1") : dataMaps.get("jdbc_0");
try (Statement statement = dataSource.getConnection().createStatement()) {
ResultSet resultSet = statement.executeQuery(SELECT_SQL);
int count = 1;
while (resultSet.next()) {
assertThat(resultSet.getObject("cipher_pwd"), is(cipherPwd));
count += 1;
}
assertThat(count - 1, is(resultSetCount));
}
}
private void assertResultSet(final boolean isShadow, final int id, final int resultSetCount, final Object cipherPwd) throws SQLException {
final Map<String, DataSource> dataMaps = getDatabaseTypeMap().get(DatabaseTypes.getActualDatabaseType("H2"));
DataSource dataSource = isShadow ? dataMaps.get("jdbc_1") : dataMaps.get("jdbc_0");
try (PreparedStatement statement = dataSource.getConnection().prepareStatement(SELECT_SQL_BY_ID)) {
statement.setObject(1, id);
ResultSet resultSet = statement.executeQuery();
int count = 1;
while (resultSet.next()) {
assertThat(resultSet.getObject("cipher_pwd"), is(cipherPwd));
count += 1;
}
assertThat(count - 1, is(resultSetCount));
}
}
@Before
public void init() throws SQLException {
try (Statement statement = getActualConnection().createStatement()) {
statement.execute(INIT_INSERT_SQL);
}
try (Statement statement = getShadowConnection().createStatement()) {
statement.execute(INIT_INSERT_SQL);
}
}
@After
public void clean() throws SQLException {
try (Statement statement = getActualConnection().createStatement()) {
statement.execute(CLEAN_SQL);
}
try (Statement statement = getShadowConnection().createStatement()) {
statement.execute(CLEAN_SQL);
}
}
}
| shardingjdbc/sharding-jdbc | sharding-jdbc/sharding-jdbc-core/src/test/java/org/apache/shardingsphere/shardingjdbc/jdbc/core/statement/ShadowPreparedStatementTest.java | Java | apache-2.0 | 7,428 |
import {Component} from '@angular/core';
import {Home} from '../home/home';
import {Conferences} from '../conferences/conferences';
import {Agenda} from '../agenda/agenda';
import {Accessmap} from '../accessmap/accessmap';
@Component({
templateUrl: 'build/pages/tabs/tabs.html'
})
export class TabsPage {
constructor() {
// this tells the tabs component which Pages
// should be each tab's root Page
this.tab1Root = Home;
this.tab2Root = Conferences;
this.tab3Root = Agenda;
this.tab4Root=Accessmap;
}
}
| worldline/TechForum2016 | app/pages/tabs/tabs.js | JavaScript | apache-2.0 | 535 |
/**
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Copyright 2012-2015 the original author or authors.
*/
package org.assertj.db.navigation;
import org.assertj.core.api.Assertions;
import org.assertj.db.api.ChangeAssert;
import org.assertj.db.api.ChangeColumnAssert;
import org.assertj.db.api.ChangesAssert;
import org.assertj.db.common.AbstractTest;
import org.assertj.db.common.NeedReload;
import org.assertj.db.exception.AssertJDBException;
import org.assertj.db.type.Changes;
import org.assertj.db.type.Value;
import org.junit.Test;
import java.lang.reflect.Field;
import java.math.BigDecimal;
import java.sql.Date;
import java.util.UUID;
import static org.assertj.db.api.Assertions.assertThat;
import static org.junit.Assert.fail;
/**
* Tests on {@link org.assertj.db.navigation.ToColumnFromChange} class :
* {@link org.assertj.db.navigation.ToColumnFromChange#columnAmongTheModifiedOnes()} method.
*
* @author Régis Pouiller
*
*/
public class ToColumnFromChange_ColumnAmongTheModifiedOnes_Integer_Test extends AbstractTest {
/**
* This method tests the {@code columnAmongTheModifiedOnes} navigation method.
*/
@Test
@NeedReload
public void test_column_among_the_modified_ones_with_index() throws Exception {
Changes changes = new Changes(source).setStartPointNow();
updateChangesForTests();
changes.setEndPointNow();
Field fieldIndex = ChangeAssert.class.getDeclaredField("indexNextColumn");
fieldIndex.setAccessible(true);
Field fieldColumnName = ChangeColumnAssert.class.getDeclaredField("columnName");
fieldColumnName.setAccessible(true);
Field fieldValueAtStartPoint = ChangeColumnAssert.class.getDeclaredField("valueAtStartPoint");
fieldValueAtStartPoint.setAccessible(true);
Field fieldValueAtEndPoint = ChangeColumnAssert.class.getDeclaredField("valueAtEndPoint");
fieldValueAtEndPoint.setAccessible(true);
ChangesAssert changesAssert = assertThat(changes);
ChangeAssert changeAssert = changesAssert.change(6);
Assertions.assertThat(fieldIndex.get(changeAssert)).isEqualTo(0);
ChangeColumnAssert changeColumnAssert0 = changeAssert.columnAmongTheModifiedOnes(0);
Assertions.assertThat(fieldIndex.get(changeAssert)).isEqualTo(1);
ChangeColumnAssert changeColumnAssert1 = changeAssert.columnAmongTheModifiedOnes(1);
Assertions.assertThat(fieldIndex.get(changeAssert)).isEqualTo(2);
ChangeColumnAssert changeColumnAssert2 = changeAssert.columnAmongTheModifiedOnes(2);
Assertions.assertThat(fieldIndex.get(changeAssert)).isEqualTo(3);
ChangeColumnAssert changeColumnAssert3 = changeAssert.columnAmongTheModifiedOnes(3);
Assertions.assertThat(fieldIndex.get(changeAssert)).isEqualTo(4);
ChangeColumnAssert changeColumnAssert4 = changeAssert.columnAmongTheModifiedOnes(4);
Assertions.assertThat(fieldIndex.get(changeAssert)).isEqualTo(5);
try {
changeAssert.columnAmongTheModifiedOnes(5);
fail("An exception must be raised");
} catch (AssertJDBException e) {
Assertions.assertThat(e.getMessage()).isEqualTo("Index 5 out of the limits of the modified columns [0, 5[");
}
try {
changeAssert.columnAmongTheModifiedOnes(-1);
fail("An exception must be raised");
} catch (AssertJDBException e) {
Assertions.assertThat(e.getMessage()).isEqualTo("Index -1 out of the limits of the modified columns [0, 5[");
}
ChangeColumnAssert changeColumnAssertAgain0 = changeAssert.column(0);
Assertions.assertThat(changeColumnAssert0).isSameAs(changeColumnAssertAgain0);
ChangesAssert changesAssertBis = assertThat(changes);
ChangeAssert changeAssertBis = changesAssertBis.change(6);
Assertions.assertThat(fieldIndex.get(changeAssertBis)).isEqualTo(0);
ChangeColumnAssert changeColumnAssertBis0 = changeAssertBis.columnAmongTheModifiedOnes(0);
Assertions.assertThat(fieldIndex.get(changeAssertBis)).isEqualTo(1);
ChangeColumnAssert changeColumnAssertBis1 = changeColumnAssertBis0.columnAmongTheModifiedOnes(1);
Assertions.assertThat(fieldIndex.get(changeAssertBis)).isEqualTo(2);
ChangeColumnAssert changeColumnAssertBis2 = changeColumnAssertBis1.columnAmongTheModifiedOnes(2);
Assertions.assertThat(fieldIndex.get(changeAssertBis)).isEqualTo(3);
ChangeColumnAssert changeColumnAssertBis3 = changeColumnAssertBis2.columnAmongTheModifiedOnes(3);
Assertions.assertThat(fieldIndex.get(changeAssertBis)).isEqualTo(4);
ChangeColumnAssert changeColumnAssertBis4 = changeColumnAssertBis3.columnAmongTheModifiedOnes(4);
Assertions.assertThat(fieldIndex.get(changeAssertBis)).isEqualTo(5);
try {
changeColumnAssertBis4.columnAmongTheModifiedOnes(5);
fail("An exception must be raised");
} catch (AssertJDBException e) {
Assertions.assertThat(e.getMessage()).isEqualTo("Index 5 out of the limits of the modified columns [0, 5[");
}
try {
changeColumnAssertBis4.columnAmongTheModifiedOnes(-1);
fail("An exception must be raised");
} catch (AssertJDBException e) {
Assertions.assertThat(e.getMessage()).isEqualTo("Index -1 out of the limits of the modified columns [0, 5[");
}
ChangeColumnAssert changeColumnAssertBisAgain0 = changeColumnAssertBis4.column(0);
Assertions.assertThat(changeColumnAssertBis0).isSameAs(changeColumnAssertBisAgain0);
Assertions.assertThat(fieldColumnName.get(changeColumnAssert0)).isEqualTo(fieldColumnName.get(changeColumnAssertBis0)).isEqualTo(
"ID");
Assertions.assertThat(fieldColumnName.get(changeColumnAssert1)).isEqualTo(fieldColumnName.get(changeColumnAssertBis1)).isEqualTo(
"NAME");
Assertions.assertThat(fieldColumnName.get(changeColumnAssert2)).isEqualTo(fieldColumnName.get(changeColumnAssertBis2)).isEqualTo(
"FIRSTNAME");
Assertions.assertThat(fieldColumnName.get(changeColumnAssert3)).isEqualTo(
fieldColumnName.get(changeColumnAssertBis3)).isEqualTo(
"BIRTH");
Assertions.assertThat(fieldColumnName.get(changeColumnAssert4)).isEqualTo(
fieldColumnName.get(changeColumnAssertBis4)).isEqualTo(
"ACTOR_IMDB");
Assertions.assertThat(((Value) fieldValueAtStartPoint.get(changeColumnAssert0)).getValue()).isEqualTo(
((Value) fieldValueAtStartPoint.get(changeColumnAssertBis0)).getValue()).isEqualTo(
new BigDecimal("3"));
Assertions.assertThat(((Value) fieldValueAtStartPoint.get(changeColumnAssert1)).getValue()).
isEqualTo(((Value) fieldValueAtStartPoint.get(changeColumnAssertBis1)).getValue()).
isEqualTo("Worthington");
Assertions.assertThat(((Value) fieldValueAtStartPoint.get(changeColumnAssert2)).getValue()).
isEqualTo(((Value) fieldValueAtStartPoint.get(changeColumnAssertBis2)).getValue()).
isEqualTo("Sam");
Assertions.assertThat(((Value) fieldValueAtStartPoint.get(changeColumnAssert3)).getValue()).
isEqualTo(((Value) fieldValueAtStartPoint.get(changeColumnAssertBis3)).getValue()).
isEqualTo(
Date.valueOf("1976-08-02"));
Assertions.assertThat(((Value) fieldValueAtStartPoint.get(changeColumnAssert4)).getValue()).
isEqualTo(((Value) fieldValueAtStartPoint.get(changeColumnAssertBis4)).getValue()).
isEqualTo(
UUID.fromString("D735221B-5DE5-4112-AA1E-49090CB75ADA"));
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssert0)).getValue()).isNull();
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssert1)).getValue()).isNull();
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssert2)).getValue()).isNull();
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssert3)).getValue()).isNull();
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssert4)).getValue()).isNull();
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssertBis0)).getValue()).isNull();
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssertBis1)).getValue()).isNull();
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssertBis2)).getValue()).isNull();
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssertBis3)).getValue()).isNull();
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssertBis4)).getValue()).isNull();
ChangeColumnAssert changeColumnAssert = assertThat(changes).change(3).columnAmongTheModifiedOnes(0);
try {
changeColumnAssert.columnAmongTheModifiedOnes(1);
fail("An exception must be raised");
} catch (AssertJDBException e) {
Assertions.assertThat(e.getMessage()).isEqualTo("Index 1 out of the limits of the modified columns [0, 1[");
}
Assertions.assertThat(((Value) fieldValueAtStartPoint.get(changeColumnAssert)).getValue()).isEqualTo("Sigourney");
Assertions.assertThat(((Value) fieldValueAtEndPoint.get(changeColumnAssert)).getValue()).isEqualTo(
"Susan Alexandra");
}
}
| otoniel-isidoro/assertj-db | src/test/java/org/assertj/db/navigation/ToColumnFromChange_ColumnAmongTheModifiedOnes_Integer_Test.java | Java | apache-2.0 | 9,509 |
/* @flow */
import { PropTypes } from 'react';
import { Map } from 'immutable';
import isString from 'lodash/isString';
import includes from 'lodash/includes';
import isUndefined from 'lodash/isUndefined';
import isEmpty from 'lodash/isEmpty';
import map from 'lodash/map';
import mapValues from 'lodash/mapValues';
import isArrayLikeObject from 'lodash/isArrayLikeObject';
import isPlainObject from 'lodash/isPlainObject';
export const STATE = Object.freeze({
EMPTY_REFERENCE: Symbol('empty reference'),
LOADING: Symbol('loading'),
COMPLETE: Symbol('complete'),
ERROR: Symbol('error'),
NOT_FOUND: Symbol('404'),
ACCESS_DENIED: Symbol('403')
});
/* Validation */
export type AsyncReference = {
namespace :string,
id :string
};
export const AsyncReferencePropType = PropTypes.shape({
namespace: PropTypes.string.isRequired,
id: PropTypes.string.isRequired
});
export type AsyncValue = {
state :Symbol,
value :any
};
export function referenceOrValuePropType(propType) {
return PropTypes.oneOfType([
propType,
AsyncReferencePropType
]);
}
export function isReference(reference :any) :boolean {
return !!reference && isString(reference.namespace) && isString(reference.id);
}
export function isValue(value :any) :boolean {
return (!!value &&
includes(STATE, value.state) &&
!isUndefined(value.value));
}
export function isEmptyValue(value :any) :boolean {
return isValue(value) && value.state === STATE.EMPTY_REFERENCE;
}
export function isLoadingValue(value :any) :boolean {
return isValue(value) && value.state === STATE.LOADING;
}
export function isCompleteValue(value :any) :boolean {
return isValue(value) && value.state === STATE.COMPLETE;
}
export function isErrorValue(value :any) :boolean {
return isValue(value) && value.state === STATE.ERROR;
}
/*
* Async Reference
*/
export function createReference(namespace :string, id :string) {
if (!isString(namespace) && !isEmpty(namespace)) {
throw Error(`'namespace' must be non-empty string, received: "${namespace}"`);
}
if (!isString(id) && !isEmpty(id)) {
throw Error(`'id' must be non-empty string, received: "${id}"`);
}
return {
namespace,
id
};
}
/*
* Async Values
*/
export function createEmptyValue() :AsyncValue {
return {
state: STATE.EMPTY_REFERENCE,
value: null
};
}
export function createLoadingValue() :AsyncValue {
return {
state: STATE.LOADING,
value: null
};
}
export function createCompleteValue(value :any) :AsyncValue {
return {
state: STATE.COMPLETE,
value
};
}
export function createErrorValue(error :any) :AsyncValue {
return {
state: STATE.ERROR,
value: error
};
}
/*
* Referencing and dereferencing
*/
function getReferencePath(reference :AsyncReference) {
return [reference.namespace, reference.id];
}
export type AsyncContent = Map<String, Map<String, AsyncValue>>
export function resolveReference(
asyncContent :AsyncContent,
reference :AsyncReference,
value :AsyncValue) :AsyncContent {
if (!asyncContent) {
throw new Error('"asyncContent" can\'t be null');
}
if (!isReference(reference)) {
throw new Error(`"reference" must be valid AsyncReference, recieved ${reference}`);
}
if (!isValue(value)) {
throw new Error(`"value" must be valid AsyncValue, received ${value}`);
}
const path = getReferencePath(reference);
return asyncContent.setIn(path, value);
}
export function dereference(asyncContent :AsyncContent, reference :AsyncReference) :AsyncValue[] {
if (!asyncContent) {
throw new Error('"asyncContent" can\'t be null');
}
if (!isReference(reference)) {
throw new Error(`"reference" must be valid reference, recieved ${reference}`);
}
const path = getReferencePath(reference);
if (!asyncContent.hasIn(path)) {
return createEmptyValue();
}
return asyncContent.getIn(path);
}
/**
* If value is a reference, dereference.
* If value is array like object, run smartDereference on each item.
* If value is a plain object, run smartDereference on each value
* @param asyncContent
* @param valueOrReference
* @return {*}
*/
export function smartDereference(asyncContent :AsyncContent, valueOrReference :any) :any {
if (isReference(valueOrReference)) {
return dereference(asyncContent, valueOrReference);
}
else if (isArrayLikeObject(valueOrReference)) {
return map(valueOrReference, (vor) => {
return smartDereference(asyncContent, vor);
});
}
else if (isPlainObject(valueOrReference)) {
return mapValues(valueOrReference, (value) => {
return smartDereference(asyncContent, value);
});
}
return valueOrReference;
}
| kryptnostic/gallery | src/containers/async/AsyncStorage.js | JavaScript | apache-2.0 | 4,676 |
<?php
/**
*
* Copyright (c) 2011, Dan Myers.
* Parts copyright (c) 2008, Donovan Schonknecht.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* This is a modified BSD license (the third clause has been removed).
* The BSD license may be found here:
* http://www.opensource.org/licenses/bsd-license.php
*
* Amazon Simple Email Service is a trademark of Amazon.com, Inc. or its affiliates.
*
* SimpleEmailService is based on Donovan Schonknecht's Amazon S3 PHP class, found here:
* http://undesigned.org.za/2007/10/22/amazon-s3-php-class
*
*/
/**
* Amazon SimpleEmailService PHP class
*
* @link http://sourceforge.net/projects/php-aws-ses/
* version 0.8.1
*
*/
class SimpleEmailService
{
protected $__accessKey; // AWS Access key
protected $__secretKey; // AWS Secret key
protected $__host;
public function getAccessKey() { return $this->__accessKey; }
public function getSecretKey() { return $this->__secretKey; }
public function getHost() { return $this->__host; }
protected $__verifyHost = 1;
protected $__verifyPeer = 1;
// verifyHost and verifyPeer determine whether curl verifies ssl certificates.
// It may be necessary to disable these checks on certain systems.
// These only have an effect if SSL is enabled.
public function verifyHost() { return $this->__verifyHost; }
public function enableVerifyHost($enable = true) { $this->__verifyHost = $enable; }
public function verifyPeer() { return $this->__verifyPeer; }
public function enableVerifyPeer($enable = true) { $this->__verifyPeer = $enable; }
// If you use exceptions, errors will be communicated by throwing a
// SimpleEmailServiceException. By default, they will be trigger_error()'d.
protected $__useExceptions = 0;
public function useExceptions() { return $this->__useExceptions; }
public function enableUseExceptions($enable = true) { $this->__useExceptions = $enable; }
/**
* Constructor
*
* @param string $accessKey Access key
* @param string $secretKey Secret key
* @return void
*/
public function __construct($accessKey = null, $secretKey = null, $host = 'email.us-east-1.amazonaws.com') {
if ($accessKey !== null && $secretKey !== null) {
$this->setAuth($accessKey, $secretKey);
}
$this->__host = $host;
}
/**
* Set AWS access key and secret key
*
* @param string $accessKey Access key
* @param string $secretKey Secret key
* @return void
*/
public function setAuth($accessKey, $secretKey) {
$this->__accessKey = $accessKey;
$this->__secretKey = $secretKey;
}
/**
* Lists the email addresses that have been verified and can be used as the 'From' address
*
* @return An array containing two items: a list of verified email addresses, and the request id.
*/
public function listVerifiedEmailAddresses() {
$rest = new SimpleEmailServiceRequest($this, 'GET');
$rest->setParameter('Action', 'ListVerifiedEmailAddresses');
$rest = $rest->getResponse();
if($rest->error === false && $rest->code !== 200) {
$rest->error = array('code' => $rest->code, 'message' => 'Unexpected HTTP status');
}
if($rest->error !== false) {
$this->__triggerError('listVerifiedEmailAddresses', $rest->error);
return false;
}
$response = array();
if(!isset($rest->body)) {
return $response;
}
$addresses = array();
foreach($rest->body->ListVerifiedEmailAddressesResult->VerifiedEmailAddresses->member as $address) {
$addresses[] = (string)$address;
}
$response['Addresses'] = $addresses;
$response['RequestId'] = (string)$rest->body->ResponseMetadata->RequestId;
return $response;
}
/**
* Requests verification of the provided email address, so it can be used
* as the 'From' address when sending emails through SimpleEmailService.
*
* After submitting this request, you should receive a verification email
* from Amazon at the specified address containing instructions to follow.
*
* @param string email The email address to get verified
* @return The request id for this request.
*/
public function verifyEmailAddress($email) {
$rest = new SimpleEmailServiceRequest($this, 'POST');
$rest->setParameter('Action', 'VerifyEmailAddress');
$rest->setParameter('EmailAddress', $email);
$rest = $rest->getResponse();
if($rest->error === false && $rest->code !== 200) {
$rest->error = array('code' => $rest->code, 'message' => 'Unexpected HTTP status');
}
if($rest->error !== false) {
$this->__triggerError('verifyEmailAddress', $rest->error);
return false;
}
$response['RequestId'] = (string)$rest->body->ResponseMetadata->RequestId;
return $response;
}
/**
* Removes the specified email address from the list of verified addresses.
*
* @param string email The email address to remove
* @return The request id for this request.
*/
public function deleteVerifiedEmailAddress($email) {
$rest = new SimpleEmailServiceRequest($this, 'DELETE');
$rest->setParameter('Action', 'DeleteVerifiedEmailAddress');
$rest->setParameter('EmailAddress', $email);
$rest = $rest->getResponse();
if($rest->error === false && $rest->code !== 200) {
$rest->error = array('code' => $rest->code, 'message' => 'Unexpected HTTP status');
}
if($rest->error !== false) {
$this->__triggerError('deleteVerifiedEmailAddress', $rest->error);
return false;
}
$response['RequestId'] = (string)$rest->body->ResponseMetadata->RequestId;
return $response;
}
/**
* Retrieves information on the current activity limits for this account.
* See http://docs.amazonwebservices.com/ses/latest/APIReference/API_GetSendQuota.html
*
* @return An array containing information on this account's activity limits.
*/
public function getSendQuota() {
$rest = new SimpleEmailServiceRequest($this, 'GET');
$rest->setParameter('Action', 'GetSendQuota');
$rest = $rest->getResponse();
if($rest->error === false && $rest->code !== 200) {
$rest->error = array('code' => $rest->code, 'message' => 'Unexpected HTTP status');
}
if($rest->error !== false) {
$this->__triggerError('getSendQuota', $rest->error);
return false;
}
$response = array();
if(!isset($rest->body)) {
return $response;
}
$response['Max24HourSend'] = (string)$rest->body->GetSendQuotaResult->Max24HourSend;
$response['MaxSendRate'] = (string)$rest->body->GetSendQuotaResult->MaxSendRate;
$response['SentLast24Hours'] = (string)$rest->body->GetSendQuotaResult->SentLast24Hours;
$response['RequestId'] = (string)$rest->body->ResponseMetadata->RequestId;
return $response;
}
/**
* Retrieves statistics for the last two weeks of activity on this account.
* See http://docs.amazonwebservices.com/ses/latest/APIReference/API_GetSendStatistics.html
*
* @return An array of activity statistics. Each array item covers a 15-minute period.
*/
public function getSendStatistics() {
$rest = new SimpleEmailServiceRequest($this, 'GET');
$rest->setParameter('Action', 'GetSendStatistics');
$rest = $rest->getResponse();
if($rest->error === false && $rest->code !== 200) {
$rest->error = array('code' => $rest->code, 'message' => 'Unexpected HTTP status');
}
if($rest->error !== false) {
$this->__triggerError('getSendStatistics', $rest->error);
return false;
}
$response = array();
if(!isset($rest->body)) {
return $response;
}
$datapoints = array();
foreach($rest->body->GetSendStatisticsResult->SendDataPoints->member as $datapoint) {
$p = array();
$p['Bounces'] = (string)$datapoint->Bounces;
$p['Complaints'] = (string)$datapoint->Complaints;
$p['DeliveryAttempts'] = (string)$datapoint->DeliveryAttempts;
$p['Rejects'] = (string)$datapoint->Rejects;
$p['Timestamp'] = (string)$datapoint->Timestamp;
$datapoints[] = $p;
}
$response['SendDataPoints'] = $datapoints;
$response['RequestId'] = (string)$rest->body->ResponseMetadata->RequestId;
return $response;
}
public function sendRawEmail($raw) {
$rest = new SimpleEmailServiceRequest($this, 'POST');
$rest->setParameter('Action', 'SendRawEmail');
$rest->setParameter('RawMessage.Data', base64_encode($raw));
$rest = $rest->getResponse();
if($rest->error === false && $rest->code !== 200) {
$rest->error = array('code' => $rest->code, 'message' => 'Unexpected HTTP status');
}
if($rest->error !== false) {
$this->__triggerError('sendRawEmail', $rest->error);
return false;
}
$response['MessageId'] = (string)$rest->body->SendEmailResult->MessageId;
$response['RequestId'] = (string)$rest->body->ResponseMetadata->RequestId;
return $response;
}
/**
* Given a SimpleEmailServiceMessage object, submits the message to the service for sending.
*
* @return An array containing the unique identifier for this message and a separate request id.
* Returns false if the provided message is missing any required fields.
*/
public function sendEmail($sesMessage) {
if(!$sesMessage->validate()) {
return false;
}
$rest = new SimpleEmailServiceRequest($this, 'POST');
$rest->setParameter('Action', 'SendEmail');
$i = 1;
foreach($sesMessage->to as $to) {
$rest->setParameter('Destination.ToAddresses.member.'.$i, $to);
$i++;
}
if(is_array($sesMessage->cc)) {
$i = 1;
foreach($sesMessage->cc as $cc) {
$rest->setParameter('Destination.CcAddresses.member.'.$i, $cc);
$i++;
}
}
if(is_array($sesMessage->bcc)) {
$i = 1;
foreach($sesMessage->bcc as $bcc) {
$rest->setParameter('Destination.BccAddresses.member.'.$i, $bcc);
$i++;
}
}
if(is_array($sesMessage->replyto)) {
$i = 1;
foreach($sesMessage->replyto as $replyto) {
$rest->setParameter('ReplyToAddresses.member.'.$i, $replyto);
$i++;
}
}
$rest->setParameter('Source', $sesMessage->from);
if($sesMessage->returnpath != null) {
$rest->setParameter('ReturnPath', $sesMessage->returnpath);
}
if($sesMessage->subject != null && strlen($sesMessage->subject) > 0) {
$rest->setParameter('Message.Subject.Data', $sesMessage->subject);
if($sesMessage->subjectCharset != null && strlen($sesMessage->subjectCharset) > 0) {
$rest->setParameter('Message.Subject.Charset', $sesMessage->subjectCharset);
}
}
if($sesMessage->messagetext != null && strlen($sesMessage->messagetext) > 0) {
$rest->setParameter('Message.Body.Text.Data', $sesMessage->messagetext);
if($sesMessage->messageTextCharset != null && strlen($sesMessage->messageTextCharset) > 0) {
$rest->setParameter('Message.Body.Text.Charset', $sesMessage->messageTextCharset);
}
}
if($sesMessage->messagehtml != null && strlen($sesMessage->messagehtml) > 0) {
$rest->setParameter('Message.Body.Html.Data', $sesMessage->messagehtml);
if($sesMessage->messageHtmlCharset != null && strlen($sesMessage->messageHtmlCharset) > 0) {
$rest->setParameter('Message.Body.Html.Charset', $sesMessage->messageHtmlCharset);
}
}
$rest = $rest->getResponse();
if($rest->error === false && $rest->code !== 200) {
$rest->error = array('code' => $rest->code, 'message' => 'Unexpected HTTP status');
}
if($rest->error !== false) {
$this->__triggerError('sendEmail', $rest->error);
return false;
}
$response['MessageId'] = (string)$rest->body->SendEmailResult->MessageId;
$response['RequestId'] = (string)$rest->body->ResponseMetadata->RequestId;
return $response;
}
/**
* Trigger an error message
*
* @internal Used by member functions to output errors
* @param array $error Array containing error information
* @return string
*/
public function __triggerError($functionname, $error)
{
if($error == false) {
$message = sprintf("SimpleEmailService::%s(): Encountered an error, but no description given", $functionname);
}
else if(isset($error['curl']) && $error['curl'])
{
$message = sprintf("SimpleEmailService::%s(): %s %s", $functionname, $error['code'], $error['message']);
}
else if(isset($error['Error']))
{
$e = $error['Error'];
$message = sprintf("SimpleEmailService::%s(): %s - %s: %s\nRequest Id: %s\n", $functionname, $e['Type'], $e['Code'], $e['Message'], $error['RequestId']);
}
if ($this->useExceptions()) {
throw new SimpleEmailServiceException($message);
} else {
trigger_error($message, E_USER_WARNING);
}
}
/**
* Callback handler for 503 retries.
*
* @internal Used by SimpleDBRequest to call the user-specified callback, if set
* @param $attempt The number of failed attempts so far
* @return The retry delay in microseconds, or 0 to stop retrying.
*/
public function __executeServiceTemporarilyUnavailableRetryDelay($attempt)
{
if(is_callable($this->__serviceUnavailableRetryDelayCallback)) {
$callback = $this->__serviceUnavailableRetryDelayCallback;
return $callback($attempt);
}
return 0;
}
}
final class SimpleEmailServiceRequest
{
private $ses, $verb, $parameters = array();
public $response;
/**
* Constructor
*
* @param string $ses The SimpleEmailService object making this request
* @param string $action action
* @param string $verb HTTP verb
* @return mixed
*/
function __construct($ses, $verb) {
$this->ses = $ses;
$this->verb = $verb;
$this->response = new STDClass;
$this->response->error = false;
}
/**
* Set request parameter
*
* @param string $key Key
* @param string $value Value
* @param boolean $replace Whether to replace the key if it already exists (default true)
* @return void
*/
public function setParameter($key, $value, $replace = true) {
if(!$replace && isset($this->parameters[$key]))
{
$temp = (array)($this->parameters[$key]);
$temp[] = $value;
$this->parameters[$key] = $temp;
}
else
{
$this->parameters[$key] = $value;
}
}
/**
* Get the response
*
* @return object | false
*/
public function getResponse() {
$params = array();
foreach ($this->parameters as $var => $value)
{
if(is_array($value))
{
foreach($value as $v)
{
$params[] = $var.'='.$this->__customUrlEncode($v);
}
}
else
{
$params[] = $var.'='.$this->__customUrlEncode($value);
}
}
sort($params, SORT_STRING);
// must be in format 'Sun, 06 Nov 1994 08:49:37 GMT'
$date = gmdate('D, d M Y H:i:s e');
$query = implode('&', $params);
$headers = array();
$headers[] = 'Date: '.$date;
$headers[] = 'Host: '.$this->ses->getHost();
$auth = 'AWS3-HTTPS AWSAccessKeyId='.$this->ses->getAccessKey();
$auth .= ',Algorithm=HmacSHA256,Signature='.$this->__getSignature($date);
$headers[] = 'X-Amzn-Authorization: '.$auth;
$url = 'https://'.$this->ses->getHost().'/';
// Basic setup
$curl = curl_init();
curl_setopt($curl, CURLOPT_USERAGENT, 'SimpleEmailService/php');
curl_setopt($curl, CURLOPT_SSL_VERIFYHOST, ($this->ses->verifyHost() ? 2 : 0));
curl_setopt($curl, CURLOPT_SSL_VERIFYPEER, ($this->ses->verifyPeer() ? 1 : 0));
// Request types
switch ($this->verb) {
case 'GET':
$url .= '?'.$query;
break;
case 'POST':
curl_setopt($curl, CURLOPT_CUSTOMREQUEST, $this->verb);
curl_setopt($curl, CURLOPT_POSTFIELDS, $query);
$headers[] = 'Content-Type: application/x-www-form-urlencoded';
break;
case 'DELETE':
$url .= '?'.$query;
curl_setopt($curl, CURLOPT_CUSTOMREQUEST, 'DELETE');
break;
default: break;
}
curl_setopt($curl, CURLOPT_HTTPHEADER, $headers);
curl_setopt($curl, CURLOPT_HEADER, false);
curl_setopt($curl, CURLOPT_URL, $url);
curl_setopt($curl, CURLOPT_RETURNTRANSFER, false);
curl_setopt($curl, CURLOPT_WRITEFUNCTION, array(&$this, '__responseWriteCallback'));
curl_setopt($curl, CURLOPT_FOLLOWLOCATION, true);
// Execute, grab errors
if (curl_exec($curl)) {
$this->response->code = curl_getinfo($curl, CURLINFO_HTTP_CODE);
} else {
$this->response->error = array(
'curl' => true,
'code' => curl_errno($curl),
'message' => curl_error($curl),
'resource' => $this->resource
);
}
@curl_close($curl);
// Parse body into XML
if ($this->response->error === false && isset($this->response->body)) {
$this->response->body = simplexml_load_string($this->response->body);
// Grab SES errors
if (!in_array($this->response->code, array(200, 201, 202, 204))
&& isset($this->response->body->Error)) {
$error = $this->response->body->Error;
$output = array();
$output['curl'] = false;
$output['Error'] = array();
$output['Error']['Type'] = (string)$error->Type;
$output['Error']['Code'] = (string)$error->Code;
$output['Error']['Message'] = (string)$error->Message;
$output['RequestId'] = (string)$this->response->body->RequestId;
$this->response->error = $output;
unset($this->response->body);
}
}
return $this->response;
}
/**
* CURL write callback
*
* @param resource &$curl CURL resource
* @param string &$data Data
* @return integer
*/
private function __responseWriteCallback(&$curl, &$data) {
$this->response->body .= $data;
return strlen($data);
}
/**
* Contributed by afx114
* URL encode the parameters as per http://docs.amazonwebservices.com/AWSECommerceService/latest/DG/index.html?Query_QueryAuth.html
* PHP's rawurlencode() follows RFC 1738, not RFC 3986 as required by Amazon. The only difference is the tilde (~), so convert it back after rawurlencode
* See: http://www.morganney.com/blog/API/AWS-Product-Advertising-API-Requires-a-Signed-Request.php
*
* @param string $var String to encode
* @return string
*/
private function __customUrlEncode($var) {
return str_replace('%7E', '~', rawurlencode($var));
}
/**
* Generate the auth string using Hmac-SHA256
*
* @internal Used by SimpleDBRequest::getResponse()
* @param string $string String to sign
* @return string
*/
private function __getSignature($string) {
return base64_encode(hash_hmac('sha256', $string, $this->ses->getSecretKey(), true));
}
}
final class SimpleEmailServiceMessage {
// these are public for convenience only
// these are not to be used outside of the SimpleEmailService class!
public $to, $cc, $bcc, $replyto;
public $from, $returnpath;
public $subject, $messagetext, $messagehtml;
public $subjectCharset, $messageTextCharset, $messageHtmlCharset;
function __construct() {
$to = array();
$cc = array();
$bcc = array();
$replyto = array();
$from = null;
$returnpath = null;
$subject = null;
$messagetext = null;
$messagehtml = null;
$subjectCharset = null;
$messageTextCharset = null;
$messageHtmlCharset = null;
}
/**
* addTo, addCC, addBCC, and addReplyTo have the following behavior:
* If a single address is passed, it is appended to the current list of addresses.
* If an array of addresses is passed, that array is merged into the current list.
*/
function addTo($to) {
if(!is_array($to)) {
$this->to[] = $to;
}
else {
$this->to = array_merge($this->to, $to);
}
}
function addCC($cc) {
if(!is_array($cc)) {
$this->cc[] = $cc;
}
else {
$this->cc = array_merge($this->cc, $cc);
}
}
function addBCC($bcc) {
if(!is_array($bcc)) {
$this->bcc[] = $bcc;
}
else {
$this->bcc = array_merge($this->bcc, $bcc);
}
}
function addReplyTo($replyto) {
if(!is_array($replyto)) {
$this->replyto[] = $replyto;
}
else {
$this->replyto = array_merge($this->replyto, $replyto);
}
}
function setFrom($from) {
$this->from = $from;
}
function setReturnPath($returnpath) {
$this->returnpath = $returnpath;
}
function setSubject($subject) {
$this->subject = $subject;
}
function setSubjectCharset($charset) {
$this->subjectCharset = $charset;
}
function setMessageFromString($text, $html = null) {
$this->messagetext = $text;
$this->messagehtml = $html;
}
function setMessageFromFile($textfile, $htmlfile = null) {
if(file_exists($textfile) && is_file($textfile) && is_readable($textfile)) {
$this->messagetext = file_get_contents($textfile);
}
if(file_exists($htmlfile) && is_file($htmlfile) && is_readable($htmlfile)) {
$this->messagehtml = file_get_contents($htmlfile);
}
}
function setMessageFromURL($texturl, $htmlurl = null) {
$this->messagetext = file_get_contents($texturl);
if($htmlurl !== null) {
$this->messagehtml = file_get_contents($htmlurl);
}
}
function setMessageCharset($textCharset, $htmlCharset = null) {
$this->messageTextCharset = $textCharset;
$this->messageHtmlCharset = $htmlCharset;
}
/**
* Validates whether the message object has sufficient information to submit a request to SES.
* This does not guarantee the message will arrive, nor that the request will succeed;
* instead, it makes sure that no required fields are missing.
*
* This is used internally before attempting a SendEmail or SendRawEmail request,
* but it can be used outside of this file if verification is desired.
* May be useful if e.g. the data is being populated from a form; developers can generally
* use this function to verify completeness instead of writing custom logic.
*
* @return boolean
*/
public function validate() {
if(count($this->to) == 0)
return false;
if($this->from == null || strlen($this->from) == 0)
return false;
if($this->messagetext == null)
return false;
return true;
}
}
/**
* Thrown by SimpleEmailService when errors occur if you call
* enableUseExceptions(true).
*/
final class SimpleEmailServiceException extends Exception {
} | apexstudios/phabricator | externals/amazon-ses/ses.php | PHP | apache-2.0 | 23,676 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api.records;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records;
/**
* <p>
* <code>ContainerReport</code> is a report of an container.
* </p>
*
* <p>
* It includes details such as:
* <ul>
* <li>{@link ContainerId} of the container.</li>
* <li>Allocated Resources to the container.</li>
* <li>Assigned Node id.</li>
* <li>Assigned Priority.</li>
* <li>Creation Time.</li>
* <li>Finish Time.</li>
* <li>Container Exit Status.</li>
* <li>{@link ContainerState} of the container.</li>
* <li>Diagnostic information in case of errors.</li>
* <li>Log URL.</li>
* <li>nodeHttpAddress</li>
* </ul>
* </p>
*
*/
@Public
@Unstable
public abstract class ContainerReport {
@Private
@Unstable
public static ContainerReport newInstance(ContainerId containerId,
Resource allocatedResource, NodeId assignedNode, Priority priority,
long creationTime, long finishTime, String diagnosticInfo, String logUrl,
int containerExitStatus, ContainerState containerState,
String nodeHttpAddress) {
ContainerReport report = Records.newRecord(ContainerReport.class);
report.setContainerId(containerId);
report.setAllocatedResource(allocatedResource);
report.setAssignedNode(assignedNode);
report.setPriority(priority);
report.setCreationTime(creationTime);
report.setFinishTime(finishTime);
report.setDiagnosticsInfo(diagnosticInfo);
report.setLogUrl(logUrl);
report.setContainerExitStatus(containerExitStatus);
report.setContainerState(containerState);
report.setNodeHttpAddress(nodeHttpAddress);
return report;
}
/**
* Get the <code>ContainerId</code> of the container.
*
* @return <code>ContainerId</code> of the container.
*/
@Public
@Unstable
public abstract ContainerId getContainerId();
@Public
@Unstable
public abstract void setContainerId(ContainerId containerId);
/**
* Get the allocated <code>Resource</code> of the container.
*
* @return allocated <code>Resource</code> of the container.
*/
@Public
@Unstable
public abstract Resource getAllocatedResource();
@Public
@Unstable
public abstract void setAllocatedResource(Resource resource);
/**
* Get the allocated <code>NodeId</code> where container is running.
*
* @return allocated <code>NodeId</code> where container is running.
*/
@Public
@Unstable
public abstract NodeId getAssignedNode();
@Public
@Unstable
public abstract void setAssignedNode(NodeId nodeId);
/**
* Get the allocated <code>Priority</code> of the container.
*
* @return allocated <code>Priority</code> of the container.
*/
@Public
@Unstable
public abstract Priority getPriority();
@Public
@Unstable
public abstract void setPriority(Priority priority);
/**
* Get the creation time of the container.
*
* @return creation time of the container
*/
@Public
@Unstable
public abstract long getCreationTime();
@Public
@Unstable
public abstract void setCreationTime(long creationTime);
/**
* Get the Finish time of the container.
*
* @return Finish time of the container
*/
@Public
@Unstable
public abstract long getFinishTime();
@Public
@Unstable
public abstract void setFinishTime(long finishTime);
/**
* Get the DiagnosticsInfo of the container.
*
* @return DiagnosticsInfo of the container
*/
@Public
@Unstable
public abstract String getDiagnosticsInfo();
@Public
@Unstable
public abstract void setDiagnosticsInfo(String diagnosticsInfo);
/**
* Get the LogURL of the container.
*
* @return LogURL of the container
*/
@Public
@Unstable
public abstract String getLogUrl();
@Public
@Unstable
public abstract void setLogUrl(String logUrl);
/**
* Get the final <code>ContainerState</code> of the container.
*
* @return final <code>ContainerState</code> of the container.
*/
@Public
@Unstable
public abstract ContainerState getContainerState();
@Public
@Unstable
public abstract void setContainerState(ContainerState containerState);
/**
* Get the final <code>exit status</code> of the container.
*
* @return final <code>exit status</code> of the container.
*/
@Public
@Unstable
public abstract int getContainerExitStatus();
@Public
@Unstable
public abstract void setContainerExitStatus(int containerExitStatus);
/**
* Get the Node Http address of the container
*
* @return the node http address of the container
*/
@Public
@Unstable
public abstract String getNodeHttpAddress();
@Private
@Unstable
public abstract void setNodeHttpAddress(String nodeHttpAddress);
}
| tecknowledgeable/hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java | Java | apache-2.0 | 5,718 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0009_auto_20160704_0023'),
]
operations = [
migrations.RemoveField(
model_name='slide',
name='slide_show',
),
migrations.RemoveField(
model_name='panel',
name='slide_show',
),
migrations.DeleteModel(
name='Slide',
),
migrations.DeleteModel(
name='SlideShow',
),
]
| thanos/mykonosbiennale.org | pages/migrations/0010_auto_20160704_0030.py | Python | apache-2.0 | 601 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.rpc.protocol.thrift;
import org.apache.dubbo.common.URL;
import org.apache.dubbo.common.utils.ReflectUtils;
import org.apache.dubbo.rpc.gen.dubbo.$__DemoStub;
import org.apache.dubbo.rpc.gen.dubbo.Demo;
import org.apache.dubbo.rpc.protocol.thrift.ext.MultiServiceProcessor;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.server.TThreadPoolServer;
import org.apache.thrift.transport.TServerSocket;
import org.apache.thrift.transport.TServerTransport;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.lang.reflect.Field;
import java.util.Map;
public class ServiceMethodNotFoundTest extends AbstractTest {
private URL url;
protected void init() throws Exception {
TServerTransport serverTransport = new TServerSocket(PORT);
DubboDemoImpl impl = new DubboDemoImpl();
$__DemoStub.Processor processor = new $__DemoStub.Processor(impl);
// for test
Field field = processor.getClass().getSuperclass().getDeclaredField("processMap");
ReflectUtils.makeAccessible(field);
Object obj = field.get(processor);
if (obj instanceof Map) {
((Map) obj).remove("echoString");
}
// ~
TBinaryProtocol.Factory bFactory = new TBinaryProtocol.Factory();
MultiServiceProcessor wrapper = new MultiServiceProcessor();
wrapper.addProcessor(Demo.class, processor);
server = new TThreadPoolServer(
new TThreadPoolServer.Args(serverTransport)
.inputProtocolFactory(bFactory)
.outputProtocolFactory(bFactory)
.inputTransportFactory(getTransportFactory())
.outputTransportFactory(getTransportFactory())
.processor(wrapper));
Thread startTread = new Thread() {
@Override
public void run() {
server.serve();
}
};
startTread.start();
while (!server.isServing()) {
Thread.sleep(100);
}
}
@BeforeEach
public void setUp() throws Exception {
init();
protocol = new ThriftProtocol();
url = URL.valueOf(ThriftProtocol.NAME + "://127.0.0.1:" + PORT + "/" + Demo.class.getName());
}
@AfterEach
public void tearDown() throws Exception {
destroy();
if (protocol != null) {
protocol.destroy();
protocol = null;
}
if (invoker != null) {
invoker.destroy();
invoker = null;
}
}
@Test
public void testServiceMethodNotFound() throws Exception {
// FIXME
/*url = url.addParameter( "echoString." + Constants.TIMEOUT_KEY, Integer.MAX_VALUE );
invoker = protocol.refer( Demo.class, url );
org.junit.jupiter.api.Assertions.assertNotNull( invoker );
RpcInvocation invocation = new RpcInvocation();
invocation.setMethodName( "echoString" );
invocation.setParameterTypes( new Class<?>[]{ String.class } );
String arg = "Hello, World!";
invocation.setArguments( new Object[] { arg } );
invocation.setAttachment(Constants.INTERFACE_KEY, DemoImpl.class.getName());
Result result = invoker.invoke( invocation );
Assertions.assertNull( result.getResult() );
Assertions.assertTrue( result.getException() instanceof RpcException );*/
}
}
| lovepoem/dubbo | dubbo-rpc/dubbo-rpc-thrift/src/test/java/org/apache/dubbo/rpc/protocol/thrift/ServiceMethodNotFoundTest.java | Java | apache-2.0 | 4,379 |
using Clients;
using IdentityModel;
using IdentityModel.Client;
using Newtonsoft.Json.Linq;
using System;
using System.Diagnostics;
using System.Net.Http;
using System.Threading;
using System.Threading.Tasks;
namespace ConsoleDeviceFlow
{
public class Program
{
static IDiscoveryCache _cache = new DiscoveryCache(Constants.Authority);
public static async Task Main()
{
Console.Title = "Console Device Flow";
var authorizeResponse = await RequestAuthorizationAsync();
var tokenResponse = await RequestTokenAsync(authorizeResponse);
tokenResponse.Show();
Console.ReadLine();
await CallServiceAsync(tokenResponse.AccessToken);
}
static async Task<DeviceAuthorizationResponse> RequestAuthorizationAsync()
{
var disco = await _cache.GetAsync();
if (disco.IsError) throw new Exception(disco.Error);
var client = new HttpClient();
var response = await client.RequestDeviceAuthorizationAsync(new DeviceAuthorizationRequest
{
Address = disco.DeviceAuthorizationEndpoint,
ClientId = "device"
});
if (response.IsError) throw new Exception(response.Error);
Console.WriteLine($"user code : {response.UserCode}");
Console.WriteLine($"device code : {response.DeviceCode}");
Console.WriteLine($"URL : {response.VerificationUri}");
Console.WriteLine($"Complete URL: {response.VerificationUriComplete}");
Console.WriteLine($"\nPress enter to launch browser ({response.VerificationUri})");
Console.ReadLine();
Process.Start(new ProcessStartInfo(response.VerificationUri) { UseShellExecute = true });
return response;
}
private static async Task<TokenResponse> RequestTokenAsync(DeviceAuthorizationResponse authorizeResponse)
{
var disco = await _cache.GetAsync();
if (disco.IsError) throw new Exception(disco.Error);
var client = new HttpClient();
while (true)
{
var response = await client.RequestDeviceTokenAsync(new DeviceTokenRequest
{
Address = disco.TokenEndpoint,
ClientId = "device",
DeviceCode = authorizeResponse.DeviceCode
});
if (response.IsError)
{
if (response.Error == OidcConstants.TokenErrors.AuthorizationPending || response.Error == OidcConstants.TokenErrors.SlowDown)
{
Console.WriteLine($"{response.Error}...waiting.");
Thread.Sleep(authorizeResponse.Interval * 1000);
}
else
{
throw new Exception(response.Error);
}
}
else
{
return response;
}
}
}
static async Task CallServiceAsync(string token)
{
var baseAddress = Constants.SampleApi;
var client = new HttpClient
{
BaseAddress = new Uri(baseAddress)
};
client.SetBearerToken(token);
var response = await client.GetStringAsync("identity");
"\n\nService claims:".ConsoleGreen();
Console.WriteLine(JArray.Parse(response));
}
}
} | MienDev/IdentityServer4 | samples/Clients/src/ConsoleDeviceFlow/Program.cs | C# | apache-2.0 | 3,613 |
"""s6 services management.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import errno
import os
import logging
import six
from treadmill import fs
from .. import _utils
from .. import _service_base
_LOGGER = logging.getLogger(__name__)
class BundleService(_service_base.Service):
"""s6 rc bundle service.
"""
__slots__ = (
'_contents',
)
_TYPE = _service_base.ServiceType.Bundle
def __init__(self, directory, name, contents=None):
super(BundleService, self).__init__(directory, name)
self._contents = contents
@property
def type(self):
return self._TYPE
@property
def _contents_file(self):
return os.path.join(self._dir, 'contents')
@property
def contents(self):
"""Gets the contents of the bundle.
"""
if self._contents is None:
self._contents = _utils.set_list_read(self._contents_file)
return self._contents
def write(self):
"""Write down the service definition.
"""
super(BundleService, self).write()
# Mandatory settings
if not self._contents and not os.path.exists(self._contents_file):
raise ValueError('Invalid Bundle: No content')
if self._contents is not None:
if not self._contents:
raise ValueError('Invalid Bundle: empty')
_utils.set_list_write(self._contents_file, self._contents)
@six.add_metaclass(abc.ABCMeta)
class _AtomicService(_service_base.Service):
"""Abstract base class for all atomic services (per s6-rc definition).
"""
__slots__ = (
'_dependencies',
'_timeout_up',
'_timeout_down',
'_env',
)
def __init__(self, directory, name,
timeout_up=None, timeout_down=None,
dependencies=None, environ=None):
super(_AtomicService, self).__init__(directory, name)
self._dependencies = dependencies
self._timeout_up = timeout_up
self._timeout_down = timeout_down
self._env = environ
@property
def data_dir(self):
"""Returns the data directory for the services.
:returns ``str``:
Full path to the service data directory.
"""
return os.path.join(self._dir, 'data')
@property
def env_dir(self):
"""Returns the environ directory for the services.
:returns ``str``:
Full path to the service environ directory.
"""
return os.path.join(self._dir, 'env')
@property
def environ(self):
"""Returns the environ dictionary for the services.
:returns ``dict``:
Service environ dictionary.
"""
if self._env is None:
self._env = _utils.environ_dir_read(self.env_dir)
return self._env
@environ.setter
def environ(self, new_environ):
self._env = new_environ
@property
def _dependencies_file(self):
return os.path.join(self._dir, 'dependencies')
@property
def dependencies(self):
"""Returns the dependencies set for the services.
:returns ``set``:
Service dependencies set.
"""
if self._dependencies is None:
self._dependencies = _utils.set_list_read(self._dependencies_file)
return self._dependencies
@dependencies.setter
def dependencies(self, new_deps):
self._dependencies = set(new_deps)
@property
def timeout_up(self):
"""Returns amount of milliseconds to wait for the service to come up.
:returns ``int``:
Amount of milliseconds to wait. 0 means infinitely.
"""
if self._timeout_up is None:
self._timeout_up = _utils.value_read(
os.path.join(self._dir, 'timeout-up'),
default=0
)
return self._timeout_up
@property
def timeout_down(self):
"""Returns amount of milliseconds to wait for the service to come down.
:returns ``int``:
Amount of milliseconds to wait. 0 means infinitely.
"""
if self._timeout_down is None:
self._timeout_down = _utils.value_read(
os.path.join(self._dir, 'timeout-down'),
default=0
)
return self._timeout_down
@abc.abstractmethod
def write(self):
"""Write down the service definition.
"""
super(_AtomicService, self).write()
# We only write dependencies/environ if we have new ones.
fs.mkdir_safe(self.env_dir)
fs.mkdir_safe(self.data_dir)
if self._dependencies is not None:
_utils.set_list_write(self._dependencies_file, self._dependencies)
if self._env is not None:
_utils.environ_dir_write(self.env_dir, self._env)
if self._timeout_up is not None:
_utils.value_write(
os.path.join(self._dir, 'timeout-up'),
self._timeout_up
)
if self._timeout_down is not None:
_utils.value_write(
os.path.join(self._dir, 'timeout-down'),
self._timeout_down
)
class LongrunService(_AtomicService):
"""s6 long running service.
"""
__slots__ = (
'_consumer_for',
'_default_down',
'_finish_script',
'_log_run_script',
'_notification_fd',
'_pipeline_name',
'_producer_for',
'_run_script',
'_timeout_finish',
)
_TYPE = _service_base.ServiceType.LongRun
def __init__(self, directory, name,
run_script=None, finish_script=None, notification_fd=None,
log_run_script=None, timeout_finish=None, default_down=None,
pipeline_name=None, producer_for=None, consumer_for=None,
dependencies=None, environ=None):
super(LongrunService, self).__init__(
directory,
name,
dependencies=dependencies,
environ=environ
)
if producer_for and log_run_script:
raise ValueError('Invalid LongRun service options: producer/log')
self._consumer_for = consumer_for
self._default_down = default_down
self._finish_script = finish_script
self._log_run_script = log_run_script
self._notification_fd = notification_fd
self._pipeline_name = pipeline_name
self._producer_for = producer_for
self._run_script = run_script
self._timeout_finish = timeout_finish
@property
def type(self):
return self._TYPE
@property
def logger_dir(self):
"""Returns the logger directory for the services.
:returns ``str``:
Full path to the service log directory.
"""
return os.path.join(self._dir, 'log')
@property
def notification_fd(self):
"""s6 "really up" notification fd.
"""
if self._notification_fd is None:
self._notification_fd = _utils.value_read(
os.path.join(self._dir, 'notification-fd'),
default=-1
)
return self._notification_fd
@notification_fd.setter
def notification_fd(self, new_notification_fd):
self._notification_fd = new_notification_fd
@property
def default_down(self):
"""Is the default service state set to down?
"""
if self._default_down is None:
self._default_down = os.path.exists(
os.path.join(self._dir, 'down')
)
return self._default_down
@default_down.setter
def default_down(self, default_down):
self._default_down = bool(default_down)
@property
def _run_file(self):
return os.path.join(self._dir, 'run')
@property
def _finish_file(self):
return os.path.join(self._dir, 'finish')
@property
def _log_run_file(self):
return os.path.join(self.logger_dir, 'run')
@property
def run_script(self):
"""Service run script.
"""
if self._run_script is None:
self._run_script = _utils.script_read(self._run_file)
return self._run_script
@run_script.setter
def run_script(self, new_script):
self._run_script = new_script
@property
def finish_script(self):
"""Service finish script.
"""
if self._finish_script is None:
try:
self._finish_script = _utils.script_read(self._finish_file)
except IOError as err:
if err.errno is not errno.ENOENT:
raise
return self._finish_script
@finish_script.setter
def finish_script(self, new_script):
self._finish_script = new_script
@property
def log_run_script(self):
"""Service log run script.
"""
if self._log_run_script is None:
try:
self._log_run_script = _utils.script_read(self._log_run_file)
except IOError as err:
if err.errno is not errno.ENOENT:
raise
return self._log_run_script
@log_run_script.setter
def log_run_script(self, new_script):
self._log_run_script = new_script
@property
def timeout_finish(self):
"""Returns amount of milliseconds to wait for the finish script to
complete.
:returns ``int``:
Amount of milliseconds to wait. 0 means infinitely. Default 5000.
"""
if self._timeout_finish is None:
self._timeout_finish = _utils.value_read(
os.path.join(self._dir, 'timeout-finish'),
default=5000
)
return self._timeout_finish
@timeout_finish.setter
def timeout_finish(self, timeout_finish):
"""Service finish script timeout.
"""
if timeout_finish is not None:
if isinstance(timeout_finish, six.integer_types):
self._timeout_finish = timeout_finish
else:
self._timeout_finish = int(timeout_finish, 10)
@property
def _pipeline_name_file(self):
return os.path.join(self._dir, 'pipeline-name')
@property
def pipeline_name(self):
"""Gets the name of the pipeline.
"""
if self._pipeline_name is None:
self._pipeline_name = _utils.data_read(self._pipeline_name_file)
return self._pipeline_name
@pipeline_name.setter
def pipeline_name(self, new_name):
self._pipeline_name = new_name
@property
def _producer_for_file(self):
return os.path.join(self._dir, 'producer-for')
@property
def producer_for(self):
"""Gets which services this service is a producer for.
"""
if self._producer_for is None:
self._producer_for = _utils.data_read(self._producer_for_file)
return self._producer_for
@producer_for.setter
def producer_for(self, new_name):
"""Sets the producer for another service.
"""
self._producer_for = new_name
@property
def _consumer_for_file(self):
return os.path.join(self._dir, 'consumer-for')
@property
def consumer_for(self):
"""Gets which services this service is a consumer for.
"""
if self._consumer_for is None:
self._consumer_for = _utils.data_read(self._consumer_for_file)
return self._consumer_for
@consumer_for.setter
def consumer_for(self, new_name):
"""Sets which services this service is a consumer for.
"""
self._consumer_for = new_name
def write(self):
"""Write down the service definition.
"""
# Disable R0912: Too many branche
# pylint: disable=R0912
super(LongrunService, self).write()
# Mandatory settings
if self._run_script is None and not os.path.exists(self._run_file):
raise ValueError('Invalid LongRun service: not run script')
if self._run_script is not None:
_utils.script_write(self._run_file, self._run_script)
# Handle the case where the run script is a generator
if not isinstance(self._run_script, six.string_types):
self._run_script = None
# Optional settings
if self._finish_script is not None:
_utils.script_write(self._finish_file, self._finish_script)
# Handle the case where the finish script is a generator
if not isinstance(self._finish_script, six.string_types):
self._finish_script = None
if self._log_run_script is not None:
# Create the log dir on the spot
fs.mkdir_safe(os.path.dirname(self._log_run_file))
_utils.script_write(self._log_run_file, self._log_run_script)
# Handle the case where the run script is a generator
if not isinstance(self._log_run_script, six.string_types):
self._log_run_script = None
if self._default_down:
_utils.data_write(
os.path.join(self._dir, 'down'),
None
)
else:
fs.rm_safe(os.path.join(self._dir, 'down'))
if self._timeout_finish is not None:
_utils.value_write(
os.path.join(self._dir, 'timeout-finish'),
self._timeout_finish
)
if self._notification_fd is not None:
_utils.value_write(
os.path.join(self._dir, 'notification-fd'),
self._notification_fd
)
if self._pipeline_name is not None:
_utils.data_write(self._pipeline_name_file, self._pipeline_name)
if self._producer_for is not None:
_utils.data_write(self._producer_for_file, self._producer_for)
if self._consumer_for is not None:
_utils.data_write(self._consumer_for_file, self._consumer_for)
class OneshotService(_AtomicService):
"""Represents a s6 rc one-shot service which is only ever executed once.
"""
__slots__ = (
'_up',
'_down',
)
# TODO: timeout-up/timeout-down
_TYPE = _service_base.ServiceType.Oneshot
def __init__(self, directory, name=None,
up_script=None, down_script=None,
dependencies=None, environ=None):
super(OneshotService, self).__init__(
directory,
name,
dependencies=dependencies,
environ=environ
)
self._up = up_script
self._down = down_script
@property
def type(self):
return self._TYPE
@property
def _up_file(self):
return os.path.join(self._dir, 'up')
@property
def _down_file(self):
return os.path.join(self._dir, 'down')
@property
def up(self):
"""Gets the one shot service up file.
"""
if self._up is None:
self._up = _utils.script_read(self._up_file)
return self._up
@up.setter
def up(self, new_script):
"""Sets the one-shot service up file.
"""
self._up = new_script
@property
def down(self):
"""Gets the one-shot service down file.
"""
if self._down is None:
self._down = _utils.script_read(self._down_file)
return self._down
@down.setter
def down(self, new_script):
"""Sets the one-shot service down file.
"""
self._down = new_script
def write(self):
"""Write down the service definition.
"""
super(OneshotService, self).write()
# Mandatory settings
if not self._up and not os.path.exists(self._up_file):
raise ValueError('Invalid Oneshot service: not up script')
if self._up is not None:
_utils.script_write(self._up_file, self._up)
if not isinstance(self._up_file, six.string_types):
self._up_file = None
# Optional settings
if self._down is not None:
_utils.script_write(self._down_file, self._down)
if not isinstance(self._down_file, six.string_types):
self._down_file = None
def create_service(svc_basedir, svc_name, svc_type, **kwargs):
"""Factory function instantiating a new service object from parameters.
:param ``str`` svc_basedir:
Base directory where to create the service.
:param ``str`` svc_name:
Name of the new service.
:param ``_service_base.ServiceType`` svc_type:
Type for the new service.
:param ``dict`` kw_args:
Additional argument passed to the constructor of the new service.
:returns ``Service``:
New instance of the service
"""
cls = {
_service_base.ServiceType.Bundle: BundleService,
_service_base.ServiceType.LongRun: LongrunService,
_service_base.ServiceType.Oneshot: OneshotService,
}.get(svc_type, None)
if cls is None:
_LOGGER.critical('No implementation for service type %r', svc_type)
cls = LongrunService
return cls(svc_basedir, svc_name, **kwargs)
__all__ = (
'BundleService',
'LongrunService',
'OneshotService',
'create_service',
)
| Morgan-Stanley/treadmill | lib/python/treadmill/supervisor/s6/services.py | Python | apache-2.0 | 17,491 |
"""Implement test server."""
import logging
import socket
from time import sleep
from leicacam.cam import tuples_as_bytes
CAM_REPLY = [
[
(
"relpath",
"subfolder/exp1/CAM1/slide--S00/chamber--U00--V00/field--X01--Y01"
"/image--L0000--S00--U00--V00--J15--E04--O01"
"--X01--Y01--T0000--Z00--C00.ome",
)
],
[
(
"relpath",
"subfolder/exp1/CAM1/slide--S00/chamber--U00--V00/field--X02--Y02"
"/image--L0000--S00--U00--V00--J15--E02--O01"
"--X02--Y02--T0000--Z00--C31.ome",
)
],
]
def image_event(data):
"""Send a reply about saved image."""
if "startcamscan" in data.decode():
return tuples_as_bytes(CAM_REPLY.pop())
return None
class EchoServer:
"""Test server."""
def __init__(self, server_address):
"""Set up server."""
self.logger = logging.getLogger("EchoServer")
self.logger.debug("Setting up server")
self.server_address = server_address
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.setup()
def setup(self):
"""Bind and listen to incoming connections."""
self.sock.bind(self.server_address)
self.sock.listen(1)
def handle(self):
"""Handle incoming connections."""
# pylint: disable=no-member
self.logger.debug("Serve incoming connections")
conn, addr = self.sock.accept()
self.logger.debug("Connected by %s", addr)
try:
self.logger.debug("Send welcome")
conn.sendall("Welcome...".encode("utf-8"))
while True:
data = conn.recv(1024)
if not data:
self.logger.debug("No data, closing")
break
self.send(conn, data)
reply = image_event(data)
if not reply:
continue
sleep(0.2)
self.send(conn, reply)
except OSError as exc:
self.logger.error(exc)
finally:
self.logger.debug("Closing connection to %s", addr)
conn.close()
self.sock.shutdown(socket.SHUT_WR)
self.sock.close()
def send(self, conn, data):
"""Send data."""
self.logger.debug("Sending: %s", data)
conn.sendall(data + b"\n")
def run(self):
"""Run server."""
try:
self.handle()
except OSError as exc:
self.logger.error("Error on socket: %s", exc)
self.logger.debug("Server close")
self.sock.close()
def stop(self):
"""Stop server."""
try:
self.logger.debug("Server shutdown")
self.sock.shutdown(socket.SHUT_WR)
self.logger.debug("Server close")
self.sock.close()
except OSError:
self.logger.error("Error shutting down server socket")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format="%(name)s: %(message)s")
ADDRESS = ("localhost", 8895)
SERVER = EchoServer(ADDRESS)
try:
SERVER.run()
except KeyboardInterrupt:
SERVER.stop()
| CellProfiling/cam_acq | tests/test_server.py | Python | apache-2.0 | 3,254 |
/*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.psi.impl.java.stubs;
import com.intellij.lang.ASTNode;
import com.intellij.lang.LighterAST;
import com.intellij.lang.LighterASTNode;
import com.intellij.psi.JavaTokenType;
import com.intellij.psi.PsiMethod;
import com.intellij.psi.impl.cache.RecordUtil;
import com.intellij.psi.impl.cache.TypeInfo;
import com.intellij.psi.impl.java.stubs.impl.PsiMethodStubImpl;
import com.intellij.psi.impl.java.stubs.index.JavaStubIndexKeys;
import com.intellij.psi.impl.source.PsiAnnotationMethodImpl;
import com.intellij.psi.impl.source.PsiMethodImpl;
import com.intellij.psi.impl.source.tree.ElementType;
import com.intellij.psi.impl.source.tree.JavaDocElementType;
import com.intellij.psi.impl.source.tree.JavaElementType;
import com.intellij.psi.impl.source.tree.LightTreeUtil;
import com.intellij.psi.impl.source.tree.java.AnnotationMethodElement;
import com.intellij.psi.stubs.IndexSink;
import com.intellij.psi.stubs.StubElement;
import com.intellij.psi.stubs.StubInputStream;
import com.intellij.psi.stubs.StubOutputStream;
import com.intellij.psi.tree.IElementType;
import com.intellij.util.io.StringRef;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import java.io.IOException;
import java.util.List;
/**
* @author max
*/
public abstract class JavaMethodElementType extends JavaStubElementType<PsiMethodStub, PsiMethod> {
public JavaMethodElementType(@NonNls final String name) {
super(name);
}
@Override
public PsiMethod createPsi(@NotNull final PsiMethodStub stub) {
return getPsiFactory(stub).createMethod(stub);
}
@Override
public PsiMethod createPsi(@NotNull final ASTNode node) {
if (node instanceof AnnotationMethodElement) {
return new PsiAnnotationMethodImpl(node);
}
else {
return new PsiMethodImpl(node);
}
}
@Override
public PsiMethodStub createStub(final LighterAST tree, final LighterASTNode node, final StubElement parentStub) {
String name = null;
boolean isConstructor = true;
boolean isVarArgs = false;
boolean isDeprecatedByComment = false;
boolean hasDeprecatedAnnotation = false;
String defValueText = null;
boolean expectingDef = false;
for (final LighterASTNode child : tree.getChildren(node)) {
final IElementType type = child.getTokenType();
if (type == JavaDocElementType.DOC_COMMENT) {
isDeprecatedByComment = RecordUtil.isDeprecatedByDocComment(tree, child);
}
else if (type == JavaElementType.MODIFIER_LIST) {
hasDeprecatedAnnotation = RecordUtil.isDeprecatedByAnnotation(tree, child);
}
else if (type == JavaElementType.TYPE) {
isConstructor = false;
}
else if (type == JavaTokenType.IDENTIFIER) {
name = RecordUtil.intern(tree.getCharTable(), child);
}
else if (type == JavaElementType.PARAMETER_LIST) {
final List<LighterASTNode> params = LightTreeUtil.getChildrenOfType(tree, child, JavaElementType.PARAMETER);
if (!params.isEmpty()) {
final LighterASTNode pType = LightTreeUtil.firstChildOfType(tree, params.get(params.size() - 1), JavaElementType.TYPE);
if (pType != null) {
isVarArgs = (LightTreeUtil.firstChildOfType(tree, pType, JavaTokenType.ELLIPSIS) != null);
}
}
}
else if (type == JavaTokenType.DEFAULT_KEYWORD) {
expectingDef = true;
}
else if (expectingDef && !ElementType.JAVA_COMMENT_OR_WHITESPACE_BIT_SET.contains(type) &&
type != JavaTokenType.SEMICOLON && type != JavaElementType.CODE_BLOCK) {
defValueText = LightTreeUtil.toFilteredString(tree, child, null);
break;
}
}
TypeInfo typeInfo = isConstructor ? TypeInfo.createConstructorType() : TypeInfo.create(tree, node, parentStub);
boolean isAnno = (node.getTokenType() == JavaElementType.ANNOTATION_METHOD);
byte flags = PsiMethodStubImpl.packFlags(isConstructor, isAnno, isVarArgs, isDeprecatedByComment, hasDeprecatedAnnotation);
return new PsiMethodStubImpl(parentStub, StringRef.fromString(name), typeInfo, flags, StringRef.fromString(defValueText));
}
@Override
public void serialize(@NotNull final PsiMethodStub stub, @NotNull final StubOutputStream dataStream) throws IOException {
dataStream.writeName(stub.getName());
TypeInfo.writeTYPE(dataStream, stub.getReturnTypeText(false));
dataStream.writeByte(((PsiMethodStubImpl)stub).getFlags());
if (stub.isAnnotationMethod()) {
dataStream.writeName(stub.getDefaultValueText());
}
}
@NotNull
@Override
public PsiMethodStub deserialize(@NotNull final StubInputStream dataStream, final StubElement parentStub) throws IOException {
StringRef name = dataStream.readName();
final TypeInfo type = TypeInfo.readTYPE(dataStream);
byte flags = dataStream.readByte();
final StringRef defaultMethodValue = PsiMethodStubImpl.isAnnotationMethod(flags) ? dataStream.readName() : null;
return new PsiMethodStubImpl(parentStub, name, type, flags, defaultMethodValue);
}
@Override
public void indexStub(@NotNull final PsiMethodStub stub, @NotNull final IndexSink sink) {
final String name = stub.getName();
if (name != null) {
sink.occurrence(JavaStubIndexKeys.METHODS, name);
if (RecordUtil.isStaticNonPrivateMember(stub)) {
sink.occurrence(JavaStubIndexKeys.JVM_STATIC_MEMBERS_NAMES, name);
sink.occurrence(JavaStubIndexKeys.JVM_STATIC_MEMBERS_TYPES, stub.getReturnTypeText(false).getShortTypeText());
}
}
}
}
| IllusionRom-deprecated/android_platform_tools_idea | java/java-psi-impl/src/com/intellij/psi/impl/java/stubs/JavaMethodElementType.java | Java | apache-2.0 | 6,170 |
/**
* JBoss, Home of Professional Open Source
* Copyright 2012, Red Hat Middleware LLC, and individual contributors
* by the @authors tag. See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.arquillian.warp.impl.client.verification;
import org.jboss.arquillian.warp.exception.ClientWarpExecutionException;
import org.jboss.arquillian.warp.impl.shared.ExecutedMethod;
public class InspectionMethodWasNotInvokedException extends ClientWarpExecutionException {
private static final long serialVersionUID = -2562948787400815278L;
public InspectionMethodWasNotInvokedException(ExecutedMethod method) {
super("Lifecycle test declared on " + method.getMethod() + " with qualifiers " + method.getQualifiers()
+ " was not executed");
}
}
| CSchulz/arquillian-extension-warp | impl/src/main/java/org/jboss/arquillian/warp/impl/client/verification/InspectionMethodWasNotInvokedException.java | Java | apache-2.0 | 1,382 |
/**
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.integration;
import static org.hamcrest.Matchers.*;
import static org.junit.Assert.*;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.stream.Collectors;
import org.junit.Assume;
import org.junit.Rule;
import org.junit.Test;
import com.asakusafw.utils.gradle.Bundle;
import com.asakusafw.utils.gradle.ContentsConfigurator;
import com.asakusafw.utils.gradle.EnvironmentConfigurator;
import com.asakusafw.utils.gradle.PropertyConfigurator;
/**
* Test for {@link AsakusaProjectProvider}.
*/
public class AsakusaProjectProviderTest {
/**
* project provider.
*/
@Rule
public final AsakusaProjectProvider provider = new AsakusaProjectProvider();
/**
* simple case.
*/
@Test
public void simple() {
AsakusaProject project = provider.newInstance("testing");
assertThat(String.valueOf(project.getContents().getDirectory().getFileName()), is("testing"));
}
/**
* w/ provider conf.
*/
@Test
public void with_provider_conf() {
provider.withProvider(it -> it.withProject(p -> p.withProperty("action.provider", "true")));
AsakusaProject project = provider.newInstance("testing");
assertThat(project.property("action.provider"), is("true"));
}
/**
* w/ project conf.
*/
@Test
public void with_project_conf() {
provider.withProject(it -> it.withProperty("action.project", "true"));
AsakusaProject project = provider.newInstance("testing");
assertThat(project.property("action.project"), is("true"));
}
/**
* check environment variables.
*/
@Test
public void environment() {
String path = System.getenv("PATH");
Assume.assumeNotNull(path);
AsakusaProject project = provider.newInstance("testing");
assertThat(project.environment("PATH"), is(path));
}
/**
* check system properties.
*/
@Test
public void properties() {
String version = System.getProperty("java.version");
Assume.assumeNotNull(version);
AsakusaProject project = provider.newInstance("testing");
assertThat(project.property("java.version"), is(version));
}
/**
* check loading {@code META-INF/asakusa-integration/system.properties}.
*/
@Test
public void load_embed_properties() {
AsakusaProject project = provider.newInstance("testing");
assertThat(project.property("asakusafw.version"), is(notNullValue()));
}
/**
* can overwrite {@code META-INF/asakusa-integration/system.properties}.
*/
@Test
public void asakusafw_version_override() {
provider.withProject(PropertyConfigurator.of("asakusafw.version", "TESTING"));
AsakusaProject project = provider.newInstance("testing");
assertThat(project.property("asakusafw.version"), is("TESTING"));
}
/**
* ASAKUSA_HOME must be overwritten by the provider.
*/
@Test
public void asakusa_home_temporary() {
provider.withProject(EnvironmentConfigurator.of("ASAKUSA_HOME", "N/A"));
AsakusaProject project = provider.newInstance("testing");
assertThat(project.environment("ASAKUSA_HOME"), is(not("N/A")));
}
/**
* w/ ASAKUSA_HOME.
*/
@Test
public void asakusa_home() {
AsakusaProject project = provider.newInstance("testing")
.with(ContentsConfigurator.copy("src/test/data/home"));
assertThat(project.environment("ASAKUSA_HOME"), is(notNullValue()));
project.gradle("putHome");
project.withFramework(f -> {
assertThat(lines(f.get("output.txt")), contains("OK"));
});
}
/**
* w/ ASAKUSA_HOME - not prepared.
*/
@Test(expected = RuntimeException.class)
public void asakusa_home_nothing() {
AsakusaProject project = provider.newInstance("testing");
project.getFramework();
}
/**
* w/ bundle.
*/
@Test
public void add_bundle() {
AsakusaProject project = provider.newInstance("testing")
.with(ContentsConfigurator.copy("src/test/data/bundle"));
Bundle bundle = project.addBundle("testing");
project.gradle("putBundle");
assertThat(lines(bundle.get("output.txt")), contains("OK"));
}
/**
* w/ bundle.
*/
@Test
public void add_bundle_existing() {
AsakusaProject project = provider.newInstance("testing")
.with(ContentsConfigurator.copy("src/test/data/bundle"));
Bundle bundle = project.addBundle("origin");
project.addBundle("testing", bundle.getDirectory());
project.gradle("putBundle");
assertThat(lines(bundle.get("output.txt")), contains("OK"));
}
private static List<String> lines(Path file) {
try {
return Files.readAllLines(file, Charset.defaultCharset()).stream()
.map(String::trim)
.filter(it -> it.isEmpty() == false)
.collect(Collectors.toList());
} catch (IOException e) {
throw new AssertionError(e);
}
}
}
| akirakw/asakusafw | integration/src/test/java/com/asakusafw/integration/AsakusaProjectProviderTest.java | Java | apache-2.0 | 5,895 |
package manifest_test
import (
"runtime"
"strings"
"code.cloudfoundry.org/cli/cf/manifest"
"code.cloudfoundry.org/cli/utils/generic"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "code.cloudfoundry.org/cli/testhelpers/matchers"
)
func NewManifest(path string, data generic.Map) (m *manifest.Manifest) {
return &manifest.Manifest{Path: path, Data: data}
}
var _ = Describe("Manifests", func() {
It("merges global properties into each app's properties", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"instances": "3",
"memory": "512M",
"applications": []interface{}{
map[interface{}]interface{}{
"name": "bitcoin-miner",
"no-route": true,
},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(*apps[0].InstanceCount).To(Equal(3))
Expect(*apps[0].Memory).To(Equal(int64(512)))
Expect(apps[0].NoRoute).To(BeTrue())
})
Context("when there is no applications block", func() {
It("returns a single application with the global properties", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"instances": "3",
"memory": "512M",
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(len(apps)).To(Equal(1))
Expect(*apps[0].InstanceCount).To(Equal(3))
Expect(*apps[0].Memory).To(Equal(int64(512)))
})
})
It("returns an error when the memory limit doesn't have a unit", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"instances": "3",
"memory": "512",
"applications": []interface{}{
map[interface{}]interface{}{
"name": "bitcoin-miner",
},
},
}))
_, err := m.Applications()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Invalid value for 'memory': 512"))
})
It("returns an error when the memory limit is a non-string", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"instances": "3",
"memory": 128,
"applications": []interface{}{
map[interface{}]interface{}{
"name": "bitcoin-miner",
},
},
}))
_, err := m.Applications()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Invalid value for 'memory': 128"))
})
It("sets applications' health check timeouts", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"name": "bitcoin-miner",
"timeout": "360",
},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(*apps[0].HealthCheckTimeout).To(Equal(360))
})
It("allows boolean env var values", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"env": generic.NewMap(map[interface{}]interface{}{
"bar": true,
}),
}))
_, err := m.Applications()
Expect(err).ToNot(HaveOccurred())
})
It("allows nil value for global env if env is present in the app", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"env": nil,
"applications": []interface{}{
map[interface{}]interface{}{
"name": "bad app",
"env": map[interface{}]interface{}{
"foo": "bar",
},
},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(*apps[0].EnvironmentVars).To(Equal(map[string]interface{}{"foo": "bar"}))
})
It("does not allow nil value for env in application", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"env": generic.NewMap(map[interface{}]interface{}{
"foo": "bar",
}),
"applications": []interface{}{
map[interface{}]interface{}{
"name": "bad app",
"env": nil,
},
},
}))
_, err := m.Applications()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("env should not be null"))
})
It("does not allow nil values for environment variables", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"env": generic.NewMap(map[interface{}]interface{}{
"bar": nil,
}),
"applications": []interface{}{
map[interface{}]interface{}{
"name": "bad app",
},
},
}))
_, err := m.Applications()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("env var 'bar' should not be null"))
})
It("returns an empty map when no env was present in the manifest", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{"name": "no-env-vars"},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(*apps[0].EnvironmentVars).NotTo(BeNil())
})
It("allows applications to have absolute paths", func() {
if runtime.GOOS == "windows" {
m := NewManifest(`C:\some\path\manifest.yml`, generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"path": `C:\another\path`,
},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(*apps[0].Path).To(Equal(`C:\another\path`))
} else {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"path": "/another/path-segment",
},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(*apps[0].Path).To(Equal("/another/path-segment"))
}
})
It("expands relative app paths based on the manifest's path", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"path": "../another/path-segment",
},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
if runtime.GOOS == "windows" {
Expect(*apps[0].Path).To(Equal("\\some\\another\\path-segment"))
} else {
Expect(*apps[0].Path).To(Equal("/some/another/path-segment"))
}
})
It("returns errors when there are null values", func() {
m := NewManifest("/some/path", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"disk_quota": nil,
"domain": nil,
"host": nil,
"name": nil,
"path": nil,
"stack": nil,
"memory": nil,
"instances": nil,
"timeout": nil,
"no-route": nil,
"no-hostname": nil,
"services": nil,
"env": nil,
"random-route": nil,
},
},
}))
_, err := m.Applications()
Expect(err).To(HaveOccurred())
errorSlice := strings.Split(err.Error(), "\n")
manifestKeys := []string{"disk_quota", "domain", "host", "name", "path", "stack",
"memory", "instances", "timeout", "no-route", "no-hostname", "services", "env", "random-route"}
for _, key := range manifestKeys {
Expect(errorSlice).To(ContainSubstrings([]string{key, "not be null"}))
}
})
It("returns errors when hosts/domains is not valid slice", func() {
m := NewManifest("/some/path", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"hosts": "bad-value",
"domains": []interface{}{"val1", "val2", false, true},
},
},
}))
_, err := m.Applications()
Expect(err).To(HaveOccurred())
errorSlice := strings.Split(err.Error(), "\n")
Expect(errorSlice).To(ContainSubstrings([]string{"hosts", "to be a list of strings"}))
Expect(errorSlice).To(ContainSubstrings([]string{"domains", "to be a list of strings"}))
})
It("parses known manifest keys", func() {
m := NewManifest("/some/path", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"buildpack": "my-buildpack",
"disk_quota": "512M",
"domain": "my-domain",
"domains": []interface{}{"domain1.test", "domain2.test"},
"host": "my-hostname",
"hosts": []interface{}{"host-1", "host-2"},
"name": "my-app-name",
"stack": "my-stack",
"memory": "256M",
"health-check-type": "none",
"instances": 1,
"timeout": 11,
"no-route": true,
"no-hostname": true,
"random-route": true,
},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(len(apps)).To(Equal(1))
Expect(*apps[0].BuildpackURL).To(Equal("my-buildpack"))
Expect(*apps[0].DiskQuota).To(Equal(int64(512)))
Expect(apps[0].Domains).To(ConsistOf([]string{"domain1.test", "domain2.test", "my-domain"}))
Expect(apps[0].Hosts).To(ConsistOf([]string{"host-1", "host-2", "my-hostname"}))
Expect(*apps[0].Name).To(Equal("my-app-name"))
Expect(*apps[0].StackName).To(Equal("my-stack"))
Expect(*apps[0].HealthCheckType).To(Equal("none"))
Expect(*apps[0].Memory).To(Equal(int64(256)))
Expect(*apps[0].InstanceCount).To(Equal(1))
Expect(*apps[0].HealthCheckTimeout).To(Equal(11))
Expect(apps[0].NoRoute).To(BeTrue())
Expect(*apps[0].NoHostname).To(BeTrue())
Expect(apps[0].UseRandomRoute).To(BeTrue())
})
It("removes duplicated values in 'hosts' and 'domains'", func() {
m := NewManifest("/some/path", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"domain": "my-domain",
"domains": []interface{}{"my-domain", "domain1.test", "domain1.test", "domain2.test"},
"host": "my-hostname",
"hosts": []interface{}{"my-hostname", "host-1", "host-1", "host-2"},
"name": "my-app-name",
},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(len(apps)).To(Equal(1))
Expect(len(apps[0].Domains)).To(Equal(3))
Expect(apps[0].Domains).To(ConsistOf([]string{"my-domain", "domain1.test", "domain2.test"}))
Expect(len(apps[0].Hosts)).To(Equal(3))
Expect(apps[0].Hosts).To(ConsistOf([]string{"my-hostname", "host-1", "host-2"}))
})
Context("old-style property syntax", func() {
It("returns an error when the manifest contains non-whitelist properties", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"env": generic.NewMap(map[interface{}]interface{}{
"bar": "many-${some_property-name}-are-cool",
}),
}),
},
}))
_, err := m.Applications()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("'${some_property-name}'"))
})
It("replaces the '${random-word} with a combination of 2 random words", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"env": generic.NewMap(map[interface{}]interface{}{
"bar": "prefix_${random-word}_suffix",
"foo": "some-value",
}),
}),
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect((*apps[0].EnvironmentVars)["bar"]).To(MatchRegexp(`prefix_\w+-\w+_suffix`))
Expect((*apps[0].EnvironmentVars)["foo"]).To(Equal("some-value"))
apps2, _ := m.Applications()
Expect((*apps2[0].EnvironmentVars)["bar"]).To(MatchRegexp(`prefix_\w+-\w+_suffix`))
Expect((*apps2[0].EnvironmentVars)["bar"]).NotTo(Equal((*apps[0].EnvironmentVars)["bar"]))
})
})
It("sets the command and buildpack to blank when their values are null in the manifest", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"buildpack": nil,
"command": nil,
}),
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(*apps[0].Command).To(Equal(""))
Expect(*apps[0].BuildpackURL).To(Equal(""))
})
It("sets the command and buildpack to blank when their values are 'default' in the manifest", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"command": "default",
"buildpack": "default",
}),
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(*apps[0].Command).To(Equal(""))
Expect(*apps[0].BuildpackURL).To(Equal(""))
})
It("does not set the start command when the manifest doesn't have the 'command' key", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(apps[0].Command).To(BeNil())
})
It("can build the applications multiple times", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"memory": "254m",
"applications": []interface{}{
map[interface{}]interface{}{
"name": "bitcoin-miner",
},
map[interface{}]interface{}{
"name": "bitcoin-miner",
},
},
}))
apps1, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
apps2, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(apps1).To(Equal(apps2))
})
Context("parsing app ports", func() {
It("parses app ports", func() {
m := NewManifest("/some/path", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"app-ports": []interface{}{
8080,
9090,
},
},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(apps[0].AppPorts).NotTo(BeNil())
Expect(*(apps[0].AppPorts)).To(Equal([]int{8080, 9090}))
})
It("handles omitted field", func() {
m := NewManifest("/some/path", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{},
},
}))
apps, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(apps[0].AppPorts).To(BeNil())
})
It("handles mixed arrays", func() {
m := NewManifest("/some/path", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"app-ports": []interface{}{
8080,
"potato",
},
},
},
}))
_, err := m.Applications()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Expected app-ports to be a list of integers."))
})
It("handles non-array values", func() {
m := NewManifest("/some/path", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
map[interface{}]interface{}{
"app-ports": "potato",
},
},
}))
_, err := m.Applications()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Expected app-ports to be a list of integers."))
})
})
Context("parsing env vars", func() {
It("handles values that are not strings", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"env": map[interface{}]interface{}{
"string-key": "value",
"int-key": 1,
"float-key": 11.1,
},
}),
},
}))
app, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect((*app[0].EnvironmentVars)["string-key"]).To(Equal("value"))
Expect((*app[0].EnvironmentVars)["int-key"]).To(Equal(1))
Expect((*app[0].EnvironmentVars)["float-key"]).To(Equal(11.1))
})
})
Context("parsing services", func() {
It("can read a list of service instance names", func() {
m := NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"services": []interface{}{"service-1", "service-2"},
}))
app, err := m.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(app[0].ServicesToBind).To(Equal([]string{"service-1", "service-2"}))
})
})
Context("when routes are provided", func() {
var manifest *manifest.Manifest
Context("when passed 'routes'", func() {
Context("valid 'routes'", func() {
BeforeEach(func() {
manifest = NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"routes": []interface{}{
map[interface{}]interface{}{"route": "route1.example.com"},
map[interface{}]interface{}{"route": "route2.example.com"},
},
}),
},
}))
})
It("parses routes into app params", func() {
apps, err := manifest.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(apps).To(HaveLen(1))
routes := apps[0].Routes
Expect(routes).To(HaveLen(2))
Expect(routes[0].Route).To(Equal("route1.example.com"))
Expect(routes[1].Route).To(Equal("route2.example.com"))
})
})
Context("invalid 'routes'", func() {
Context("'routes' is formatted incorrectly", func() {
BeforeEach(func() {
manifest = NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"routes": []string{},
}),
},
}))
})
It("errors out", func() {
_, err := manifest.Applications()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(MatchRegexp("should be a list"))
})
})
Context("an individual 'route' is formatted incorrectly", func() {
BeforeEach(func() {
manifest = NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"routes": []interface{}{
map[interface{}]interface{}{"routef": "route1.example.com"},
},
}),
},
}))
})
It("parses routes into app params", func() {
_, err := manifest.Applications()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(MatchRegexp("each route in 'routes' must have a 'route' property"))
})
})
})
})
Context("when there are no routes", func() {
BeforeEach(func() {
manifest = NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"buildpack": nil,
"command": "echo banana",
}),
},
}))
})
It("sets routes to be nil", func() {
apps, err := manifest.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(apps).To(HaveLen(1))
Expect(apps[0].Routes).To(BeNil())
})
})
Context("when no-hostname is not specified in the manifest", func() {
BeforeEach(func() {
manifest = NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"buildpack": nil,
"command": "echo banana",
}),
},
}))
})
It("sets no-hostname to be nil", func() {
apps, err := manifest.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(apps).To(HaveLen(1))
Expect(apps[0].NoHostname).To(BeNil())
})
})
Context("when no-hostname is specified in the manifest", func() {
Context("and it is set to true", func() {
Context("and the value is a boolean", func() {
BeforeEach(func() {
manifest = NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"buildpack": nil,
"command": "echo banana",
"no-hostname": true,
}),
},
}))
})
It("sets no-hostname to be true", func() {
apps, err := manifest.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(apps).To(HaveLen(1))
Expect(*apps[0].NoHostname).To(BeTrue())
})
})
Context("and the value is a string", func() {
BeforeEach(func() {
manifest = NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"buildpack": nil,
"command": "echo banana",
"no-hostname": "true",
}),
},
}))
})
It("sets no-hostname to be true", func() {
apps, err := manifest.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(apps).To(HaveLen(1))
Expect(*apps[0].NoHostname).To(BeTrue())
})
})
})
Context("and it is set to false", func() {
BeforeEach(func() {
manifest = NewManifest("/some/path/manifest.yml", generic.NewMap(map[interface{}]interface{}{
"applications": []interface{}{
generic.NewMap(map[interface{}]interface{}{
"buildpack": nil,
"command": "echo banana",
"no-hostname": false,
}),
},
}))
})
It("sets no-hostname to be false", func() {
apps, err := manifest.Applications()
Expect(err).NotTo(HaveOccurred())
Expect(apps).To(HaveLen(1))
Expect(*apps[0].NoHostname).To(BeFalse())
})
})
})
})
})
| cloudfoundry/v3-cli-plugin | vendor/code.cloudfoundry.org/cli/cf/manifest/manifest_test.go | GO | apache-2.0 | 21,977 |
default[:teamcity][:agent][:path] = "#{node[:teamcity][:path]}/buildAgent"
default[:teamcity][:agent][:pid_file] = "#{node[:teamcity][:agent][:path]}/logs/buildAgent.pid"
default[:teamcity][:agent][:name] = "Default agent"
| andreychernih/chef-teamcity | attributes/agent.rb | Ruby | apache-2.0 | 234 |
// Code generated by Microsoft (R) AutoRest Code Generator 0.9.7.0
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Rest;
using Newtonsoft.Json.Linq;
using RMV.Awesome.Api;
using RMV.Awesome.Api.Models;
namespace RMV.Awesome.Api
{
internal partial class BranchOperations : IServiceOperations<ApiClient>, IBranchOperations
{
/// <summary>
/// Initializes a new instance of the BranchOperations class.
/// </summary>
/// <param name='client'>
/// Reference to the service client.
/// </param>
internal BranchOperations(ApiClient client)
{
this._client = client;
}
private ApiClient _client;
/// <summary>
/// Gets a reference to the RMV.Awesome.Api.ApiClient.
/// </summary>
public ApiClient Client
{
get { return this._client; }
}
/// <param name='cancellationToken'>
/// Cancellation token.
/// </param>
public async Task<HttpOperationResponse<IList<Branch>>> GetBranchListWithOperationResponseAsync(CancellationToken cancellationToken = default(System.Threading.CancellationToken))
{
// Tracing
bool shouldTrace = ServiceClientTracing.IsEnabled;
string invocationId = null;
if (shouldTrace)
{
invocationId = ServiceClientTracing.NextInvocationId.ToString();
Dictionary<string, object> tracingParameters = new Dictionary<string, object>();
ServiceClientTracing.Enter(invocationId, this, "GetBranchListAsync", tracingParameters);
}
// Construct URL
string url = "";
url = url + "/api/branch";
string baseUrl = this.Client.BaseUri.AbsoluteUri;
// Trim '/' character from the end of baseUrl and beginning of url.
if (baseUrl[baseUrl.Length - 1] == '/')
{
baseUrl = baseUrl.Substring(0, baseUrl.Length - 1);
}
if (url[0] == '/')
{
url = url.Substring(1);
}
url = baseUrl + "/" + url;
url = url.Replace(" ", "%20");
// Create HTTP transport objects
HttpRequestMessage httpRequest = new HttpRequestMessage();
httpRequest.Method = HttpMethod.Get;
httpRequest.RequestUri = new Uri(url);
// Set Credentials
if (this.Client.Credentials != null)
{
cancellationToken.ThrowIfCancellationRequested();
await this.Client.Credentials.ProcessHttpRequestAsync(httpRequest, cancellationToken).ConfigureAwait(false);
}
// Send Request
if (shouldTrace)
{
ServiceClientTracing.SendRequest(invocationId, httpRequest);
}
cancellationToken.ThrowIfCancellationRequested();
HttpResponseMessage httpResponse = await this.Client.HttpClient.SendAsync(httpRequest, cancellationToken).ConfigureAwait(false);
if (shouldTrace)
{
ServiceClientTracing.ReceiveResponse(invocationId, httpResponse);
}
HttpStatusCode statusCode = httpResponse.StatusCode;
cancellationToken.ThrowIfCancellationRequested();
string responseContent = await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
if (statusCode != HttpStatusCode.OK)
{
HttpOperationException<object> ex = new HttpOperationException<object>();
ex.Request = httpRequest;
ex.Response = httpResponse;
ex.Body = null;
if (shouldTrace)
{
ServiceClientTracing.Error(invocationId, ex);
}
throw ex;
}
// Create Result
HttpOperationResponse<IList<Branch>> result = new HttpOperationResponse<IList<Branch>>();
result.Request = httpRequest;
result.Response = httpResponse;
// Deserialize Response
if (statusCode == HttpStatusCode.OK)
{
IList<Branch> resultModel = new List<Branch>();
JToken responseDoc = null;
if (string.IsNullOrEmpty(responseContent) == false)
{
responseDoc = JToken.Parse(responseContent);
}
if (responseDoc != null)
{
resultModel = BranchCollection.DeserializeJson(responseDoc);
}
result.Body = resultModel;
}
if (shouldTrace)
{
ServiceClientTracing.Exit(invocationId, result);
}
return result;
}
/// <param name='lat'>
/// Required.
/// </param>
/// <param name='lng'>
/// Required.
/// </param>
/// <param name='cancellationToken'>
/// Cancellation token.
/// </param>
public async Task<HttpOperationResponse<IList<Branch>>> GetBranchListDistanceWithOperationResponseAsync(double lat, double lng, CancellationToken cancellationToken = default(System.Threading.CancellationToken))
{
// Tracing
bool shouldTrace = ServiceClientTracing.IsEnabled;
string invocationId = null;
if (shouldTrace)
{
invocationId = ServiceClientTracing.NextInvocationId.ToString();
Dictionary<string, object> tracingParameters = new Dictionary<string, object>();
tracingParameters.Add("lat", lat);
tracingParameters.Add("lng", lng);
ServiceClientTracing.Enter(invocationId, this, "GetBranchListDistanceAsync", tracingParameters);
}
// Construct URL
string url = "";
url = url + "/api/branch/loc";
List<string> queryParameters = new List<string>();
queryParameters.Add("lat=" + Uri.EscapeDataString(lat.ToString()));
queryParameters.Add("lng=" + Uri.EscapeDataString(lng.ToString()));
if (queryParameters.Count > 0)
{
url = url + "?" + string.Join("&", queryParameters);
}
string baseUrl = this.Client.BaseUri.AbsoluteUri;
// Trim '/' character from the end of baseUrl and beginning of url.
if (baseUrl[baseUrl.Length - 1] == '/')
{
baseUrl = baseUrl.Substring(0, baseUrl.Length - 1);
}
if (url[0] == '/')
{
url = url.Substring(1);
}
url = baseUrl + "/" + url;
url = url.Replace(" ", "%20");
// Create HTTP transport objects
HttpRequestMessage httpRequest = new HttpRequestMessage();
httpRequest.Method = HttpMethod.Get;
httpRequest.RequestUri = new Uri(url);
// Set Credentials
if (this.Client.Credentials != null)
{
cancellationToken.ThrowIfCancellationRequested();
await this.Client.Credentials.ProcessHttpRequestAsync(httpRequest, cancellationToken).ConfigureAwait(false);
}
// Send Request
if (shouldTrace)
{
ServiceClientTracing.SendRequest(invocationId, httpRequest);
}
cancellationToken.ThrowIfCancellationRequested();
HttpResponseMessage httpResponse = await this.Client.HttpClient.SendAsync(httpRequest, cancellationToken).ConfigureAwait(false);
if (shouldTrace)
{
ServiceClientTracing.ReceiveResponse(invocationId, httpResponse);
}
HttpStatusCode statusCode = httpResponse.StatusCode;
cancellationToken.ThrowIfCancellationRequested();
string responseContent = await httpResponse.Content.ReadAsStringAsync().ConfigureAwait(false);
if (statusCode != HttpStatusCode.OK)
{
HttpOperationException<object> ex = new HttpOperationException<object>();
ex.Request = httpRequest;
ex.Response = httpResponse;
ex.Body = null;
if (shouldTrace)
{
ServiceClientTracing.Error(invocationId, ex);
}
throw ex;
}
// Create Result
HttpOperationResponse<IList<Branch>> result = new HttpOperationResponse<IList<Branch>>();
result.Request = httpRequest;
result.Response = httpResponse;
// Deserialize Response
if (statusCode == HttpStatusCode.OK)
{
IList<Branch> resultModel = new List<Branch>();
JToken responseDoc = null;
if (string.IsNullOrEmpty(responseContent) == false)
{
responseDoc = JToken.Parse(responseContent);
}
if (responseDoc != null)
{
resultModel = BranchCollection.DeserializeJson(responseDoc);
}
result.Body = resultModel;
}
if (shouldTrace)
{
ServiceClientTracing.Exit(invocationId, result);
}
return result;
}
}
}
| mlafleur/RMVAwsome | RMV.Awesome/RMV.Awesome.WindowsPhone/ApiClient/BranchOperations.cs | C# | apache-2.0 | 9,980 |
/*
* Copyright 2020 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.pmml.models.drools.ast.factories;
import java.util.List;
import java.util.stream.Collectors;
import org.dmg.pmml.CompoundPredicate;
import org.dmg.pmml.Predicate;
import org.dmg.pmml.SimplePredicate;
import org.kie.pmml.commons.enums.ResultCode;
import org.kie.pmml.models.drools.ast.KiePMMLDroolsRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class used to generate <code>KiePMMLDroolsRule</code>s out of a <code>CompoundPredicate</code>
*/
public class KiePMMLCompoundPredicateWithResultASTFactory {
private static final Logger logger = LoggerFactory.getLogger(KiePMMLCompoundPredicateWithResultASTFactory.class.getName());
/**
* Method to be invoked when <b>compoundPredicate.getBooleanOperator()</b> is <code>SURROGATE</code>.
* Throws exception otherwise
* @param predicateASTFactoryData
* @param agendaActivationGroup
* @param result
* @param isFinalLeaf
*/
public static void declareRuleFromCompoundPredicateSurrogate(final PredicateASTFactoryData predicateASTFactoryData,
final String agendaActivationGroup,
final Object result,
boolean isFinalLeaf) {
logger.trace("declareRuleFromCompoundPredicateSurrogate {} {} {} {}", predicateASTFactoryData, agendaActivationGroup, result, isFinalLeaf);
// Managing only SimplePredicates for the moment being
CompoundPredicate compoundPredicate = (CompoundPredicate) predicateASTFactoryData.getPredicate();
final List<Predicate> simplePredicates = compoundPredicate.getPredicates().stream().filter(predicate -> predicate instanceof SimplePredicate).collect(Collectors.toList());
simplePredicates.forEach(predicate -> {
SimplePredicate simplePredicate = (SimplePredicate) predicate;
PredicateASTFactoryData newPredicateASTFactoryData = predicateASTFactoryData.cloneWithPredicate(simplePredicate);
KiePMMLSimplePredicateASTFactory.factory(newPredicateASTFactoryData).declareRuleFromSimplePredicateSurrogate(agendaActivationGroup, result, isFinalLeaf);
});
}
/**
* Method to be invoked when <b>compoundPredicate.getBooleanOperator()</b> is <code>AND</code>, <code>OR</code> or
* <XOR>XOR</XOR>. Throws exception otherwise
* @param builder
* @param rules
* @param result
* @param isFinalLeaf
*/
public static void declareRuleFromCompoundPredicateAndOrXor(KiePMMLDroolsRule.Builder builder,
final List<KiePMMLDroolsRule> rules,
final Object result,
boolean isFinalLeaf) {
logger.trace("declareRuleFromCompoundPredicateAndOrXor {} {} {} {}", builder, rules, result, isFinalLeaf);
if (isFinalLeaf) {
builder = builder.withResult(result)
.withResultCode(ResultCode.OK);
}
rules.add(builder.build());
}
}
| lanceleverich/drools | kie-pmml-trusty/kie-pmml-models/kie-pmml-models-drools/kie-pmml-models-drools-common/src/main/java/org/kie/pmml/models/drools/ast/factories/KiePMMLCompoundPredicateWithResultASTFactory.java | Java | apache-2.0 | 3,841 |
package org.ovirt.engine.core.common.businessentities;
import java.util.HashMap;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlType;
@XmlAccessorType(XmlAccessType.NONE)
@XmlType(name = "VdsSpmStatus")
public enum VdsSpmStatus {
None(0),
Contending(1),
SPM(2);
private int intValue;
private static java.util.HashMap<Integer, VdsSpmStatus> mappings = new HashMap<Integer, VdsSpmStatus>();
static {
for (VdsSpmStatus vdsSpmStatus : values()) {
mappings.put(vdsSpmStatus.getValue(), vdsSpmStatus);
}
}
private VdsSpmStatus(int value) {
intValue = value;
}
public int getValue() {
return intValue;
}
public static VdsSpmStatus forValue(int value) {
return mappings.get(value);
}
}
| raksha-rao/gluster-ovirt | backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/businessentities/VdsSpmStatus.java | Java | apache-2.0 | 877 |
package com.frameworkset.common;
public class Test_f {
private String name;
private String name1;
private String ret ;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getName1() {
return name1;
}
public void setName1(String name1) {
this.name1 = name1;
}
public String toString()
{
return new StringBuilder("ret=").append(ret).append(",name=").append(name).append(",name1=").append(name1).toString();
}
public String getRet() {
return ret;
}
public void setRet(String ret) {
this.ret = ret;
}
}
| bbossgroups/bbossgroups-3.5 | bboss-persistent/test/com/frameworkset/common/Test_f.java | Java | apache-2.0 | 621 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.rest.handler.legacy;
import org.apache.flink.runtime.concurrent.Executors;
import org.apache.flink.runtime.executiongraph.ExecutionJobVertex;
import org.apache.flink.runtime.rest.handler.legacy.backpressure.BackPressureStatsTracker;
import org.apache.flink.runtime.rest.handler.legacy.backpressure.OperatorBackPressureStats;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.junit.Assert;
import org.junit.Test;
import java.util.Collections;
import java.util.Optional;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/**
* Tests for back pressure handler responses.
*/
public class JobVertexBackPressureHandlerTest {
@Test
public void testGetPaths() {
JobVertexBackPressureHandler handler = new JobVertexBackPressureHandler(mock(ExecutionGraphCache.class), Executors.directExecutor(), mock(BackPressureStatsTracker.class), 0);
String[] paths = handler.getPaths();
Assert.assertEquals(1, paths.length);
Assert.assertEquals("/jobs/:jobid/vertices/:vertexid/backpressure", paths[0]);
}
/** Tests the response when no stats are available. */
@Test
public void testResponseNoStatsAvailable() throws Exception {
ExecutionJobVertex jobVertex = mock(ExecutionJobVertex.class);
BackPressureStatsTracker statsTracker = mock(BackPressureStatsTracker.class);
when(statsTracker.getOperatorBackPressureStats(any(ExecutionJobVertex.class)))
.thenReturn(Optional.empty());
JobVertexBackPressureHandler handler = new JobVertexBackPressureHandler(
mock(ExecutionGraphCache.class),
Executors.directExecutor(),
statsTracker,
9999);
String response = handler.handleRequest(jobVertex, Collections.<String, String>emptyMap()).get();
ObjectMapper mapper = new ObjectMapper();
JsonNode rootNode = mapper.readTree(response);
// Single element
assertEquals(1, rootNode.size());
// Status
JsonNode status = rootNode.get("status");
assertNotNull(status);
assertEquals("deprecated", status.textValue());
verify(statsTracker).triggerStackTraceSample(any(ExecutionJobVertex.class));
}
/** Tests the response when stats are available. */
@Test
public void testResponseStatsAvailable() throws Exception {
ExecutionJobVertex jobVertex = mock(ExecutionJobVertex.class);
BackPressureStatsTracker statsTracker = mock(BackPressureStatsTracker.class);
OperatorBackPressureStats stats = new OperatorBackPressureStats(
0, System.currentTimeMillis(), new double[] { 0.31, 0.48, 1.0, 0.0 });
when(statsTracker.getOperatorBackPressureStats(any(ExecutionJobVertex.class)))
.thenReturn(Optional.of(stats));
JobVertexBackPressureHandler handler = new JobVertexBackPressureHandler(
mock(ExecutionGraphCache.class),
Executors.directExecutor(),
statsTracker,
9999);
String response = handler.handleRequest(jobVertex, Collections.<String, String>emptyMap()).get();
ObjectMapper mapper = new ObjectMapper();
JsonNode rootNode = mapper.readTree(response);
// Single element
assertEquals(4, rootNode.size());
// Status
JsonNode status = rootNode.get("status");
assertNotNull(status);
assertEquals("ok", status.textValue());
// Back pressure level
JsonNode backPressureLevel = rootNode.get("backpressure-level");
assertNotNull(backPressureLevel);
assertEquals("high", backPressureLevel.textValue());
// End time stamp
JsonNode endTimeStamp = rootNode.get("end-timestamp");
assertNotNull(endTimeStamp);
assertEquals(stats.getEndTimestamp(), endTimeStamp.longValue());
// Subtasks
JsonNode subTasks = rootNode.get("subtasks");
assertEquals(stats.getNumberOfSubTasks(), subTasks.size());
for (int i = 0; i < subTasks.size(); i++) {
JsonNode subTask = subTasks.get(i);
JsonNode index = subTask.get("subtask");
assertEquals(i, index.intValue());
JsonNode level = subTask.get("backpressure-level");
assertEquals(JobVertexBackPressureHandler
.getBackPressureLevel(stats.getBackPressureRatio(i)), level.textValue());
JsonNode ratio = subTask.get("ratio");
assertEquals(stats.getBackPressureRatio(i), ratio.doubleValue(), 0.0);
}
// Verify not triggered
verify(statsTracker, never()).triggerStackTraceSample(any(ExecutionJobVertex.class));
}
/** Tests that after the refresh interval another sample is triggered. */
@Test
public void testResponsePassedRefreshInterval() throws Exception {
ExecutionJobVertex jobVertex = mock(ExecutionJobVertex.class);
BackPressureStatsTracker statsTracker = mock(BackPressureStatsTracker.class);
OperatorBackPressureStats stats = new OperatorBackPressureStats(
0, System.currentTimeMillis(), new double[] { 0.31, 0.48, 1.0, 0.0 });
when(statsTracker.getOperatorBackPressureStats(any(ExecutionJobVertex.class)))
.thenReturn(Optional.of(stats));
JobVertexBackPressureHandler handler = new JobVertexBackPressureHandler(
mock(ExecutionGraphCache.class),
Executors.directExecutor(),
statsTracker,
0); // <----- refresh interval should fire immediately
String response = handler.handleRequest(jobVertex, Collections.<String, String>emptyMap()).get();
ObjectMapper mapper = new ObjectMapper();
JsonNode rootNode = mapper.readTree(response);
// Single element
assertEquals(4, rootNode.size());
// Status
JsonNode status = rootNode.get("status");
assertNotNull(status);
// Interval passed, hence deprecated
assertEquals("deprecated", status.textValue());
// Back pressure level
JsonNode backPressureLevel = rootNode.get("backpressure-level");
assertNotNull(backPressureLevel);
assertEquals("high", backPressureLevel.textValue());
// End time stamp
JsonNode endTimeStamp = rootNode.get("end-timestamp");
assertNotNull(endTimeStamp);
assertEquals(stats.getEndTimestamp(), endTimeStamp.longValue());
// Subtasks
JsonNode subTasks = rootNode.get("subtasks");
assertEquals(stats.getNumberOfSubTasks(), subTasks.size());
for (int i = 0; i < subTasks.size(); i++) {
JsonNode subTask = subTasks.get(i);
JsonNode index = subTask.get("subtask");
assertEquals(i, index.intValue());
JsonNode level = subTask.get("backpressure-level");
assertEquals(JobVertexBackPressureHandler
.getBackPressureLevel(stats.getBackPressureRatio(i)), level.textValue());
JsonNode ratio = subTask.get("ratio");
assertEquals(stats.getBackPressureRatio(i), ratio.doubleValue(), 0.0);
}
// Verify triggered
verify(statsTracker).triggerStackTraceSample(any(ExecutionJobVertex.class));
}
}
| PangZhi/flink | flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/legacy/JobVertexBackPressureHandlerTest.java | Java | apache-2.0 | 7,593 |
<?php
/**
* This file is automatically @generated by {@link BuildMetadataPHPFromXml}.
* Please don't modify it directly.
*/
return array (
'generalDesc' =>
array (
'NationalNumberPattern' => '[1-578]\\d{5,9}',
'PossibleNumberPattern' => '\\d{6,10}',
),
'fixedLine' =>
array (
'NationalNumberPattern' => '
[237]\\d{8}|
8(?:
[68]\\d{3}|
7[0-69]\\d{2}|
9(?:
[02-9]\\d{2}|
1(?:
[0-57-9]\\d|
6[0135-9]
)
)
)\\d{4}
',
'PossibleNumberPattern' => '\\d{8,9}',
'ExampleNumber' => '212345678',
),
'mobile' =>
array (
'NationalNumberPattern' => '
14(?:
5\\d|
71
)\\d{5}|
4(?:
[0-2]\\d|
3[0-57-9]|
4[47-9]|
5[0-25-9]|
6[6-9]|
7[03-9]|
8[147-9]|
9[017-9]
)\\d{6}
',
'PossibleNumberPattern' => '\\d{9}',
'ExampleNumber' => '412345678',
),
'tollFree' =>
array (
'NationalNumberPattern' => '
180(?:
0\\d{3}|
2
)\\d{3}
',
'PossibleNumberPattern' => '\\d{7,10}',
'ExampleNumber' => '1800123456',
),
'premiumRate' =>
array (
'NationalNumberPattern' => '190[0126]\\d{6}',
'PossibleNumberPattern' => '\\d{10}',
'ExampleNumber' => '1900123456',
),
'sharedCost' =>
array (
'NationalNumberPattern' => '
13(?:
00\\d{2}
)?\\d{4}
',
'PossibleNumberPattern' => '\\d{6,10}',
'ExampleNumber' => '1300123456',
),
'personalNumber' =>
array (
'NationalNumberPattern' => '500\\d{6}',
'PossibleNumberPattern' => '\\d{9}',
'ExampleNumber' => '500123456',
),
'voip' =>
array (
'NationalNumberPattern' => '550\\d{6}',
'PossibleNumberPattern' => '\\d{9}',
'ExampleNumber' => '550123456',
),
'pager' =>
array (
'NationalNumberPattern' => '16\\d{3,7}',
'PossibleNumberPattern' => '\\d{5,9}',
'ExampleNumber' => '1612345',
),
'uan' =>
array (
'NationalNumberPattern' => 'NA',
'PossibleNumberPattern' => 'NA',
),
'emergency' =>
array (
'NationalNumberPattern' => 'NA',
'PossibleNumberPattern' => 'NA',
),
'voicemail' =>
array (
'NationalNumberPattern' => 'NA',
'PossibleNumberPattern' => 'NA',
),
'shortCode' =>
array (
'NationalNumberPattern' => 'NA',
'PossibleNumberPattern' => 'NA',
),
'standardRate' =>
array (
'NationalNumberPattern' => 'NA',
'PossibleNumberPattern' => 'NA',
),
'carrierSpecific' =>
array (
'NationalNumberPattern' => 'NA',
'PossibleNumberPattern' => 'NA',
),
'noInternationalDialling' =>
array (
'NationalNumberPattern' => '
1(?:
3(?:
\\d{4}|
00\\d{6}
)|
80(?:
0\\d{6}|
2\\d{3}
)
)
',
'PossibleNumberPattern' => '\\d{6,10}',
'ExampleNumber' => '1300123456',
),
'id' => 'AU',
'countryCode' => 61,
'internationalPrefix' => '(?:14(?:1[14]|34|4[17]|[56]6|7[47]|88))?001[14-689]',
'preferredInternationalPrefix' => '0011',
'nationalPrefix' => '0',
'nationalPrefixForParsing' => '0',
'sameMobileAndFixedLinePattern' => false,
'numberFormat' =>
array (
0 =>
array (
'pattern' => '([2378])(\\d{4})(\\d{4})',
'format' => '$1 $2 $3',
'leadingDigitsPatterns' =>
array (
0 => '[2378]',
),
'nationalPrefixFormattingRule' => '(0$1)',
'domesticCarrierCodeFormattingRule' => '',
),
1 =>
array (
'pattern' => '(\\d{3})(\\d{3})(\\d{3})',
'format' => '$1 $2 $3',
'leadingDigitsPatterns' =>
array (
0 => '
[45]|
14
',
),
'nationalPrefixFormattingRule' => '0$1',
'domesticCarrierCodeFormattingRule' => '',
),
2 =>
array (
'pattern' => '(16)(\\d{3})(\\d{2,4})',
'format' => '$1 $2 $3',
'leadingDigitsPatterns' =>
array (
0 => '16',
),
'nationalPrefixFormattingRule' => '0$1',
'domesticCarrierCodeFormattingRule' => '',
),
3 =>
array (
'pattern' => '(1[389]\\d{2})(\\d{3})(\\d{3})',
'format' => '$1 $2 $3',
'leadingDigitsPatterns' =>
array (
0 => '
1(?:
[38]0|
90
)
',
1 => '
1(?:
[38]00|
90
)
',
),
'nationalPrefixFormattingRule' => '$1',
'domesticCarrierCodeFormattingRule' => '',
),
4 =>
array (
'pattern' => '(180)(2\\d{3})',
'format' => '$1 $2',
'leadingDigitsPatterns' =>
array (
0 => '180',
1 => '1802',
),
'nationalPrefixFormattingRule' => '$1',
'domesticCarrierCodeFormattingRule' => '',
),
5 =>
array (
'pattern' => '(19\\d)(\\d{3})',
'format' => '$1 $2',
'leadingDigitsPatterns' =>
array (
0 => '19[13]',
),
'nationalPrefixFormattingRule' => '$1',
'domesticCarrierCodeFormattingRule' => '',
),
6 =>
array (
'pattern' => '(19\\d{2})(\\d{4})',
'format' => '$1 $2',
'leadingDigitsPatterns' =>
array (
0 => '19[67]',
),
'nationalPrefixFormattingRule' => '$1',
'domesticCarrierCodeFormattingRule' => '',
),
7 =>
array (
'pattern' => '(13)(\\d{2})(\\d{2})',
'format' => '$1 $2 $3',
'leadingDigitsPatterns' =>
array (
0 => '13[1-9]',
),
'nationalPrefixFormattingRule' => '$1',
'domesticCarrierCodeFormattingRule' => '',
),
),
'intlNumberFormat' =>
array (
),
'mainCountryForCode' => true,
'leadingZeroPossible' => false,
'mobileNumberPortableRegion' => true,
);
| sudiptpa/libphonenumber-for-php | src/libphonenumber/data/PhoneNumberMetadata_AU.php | PHP | apache-2.0 | 6,075 |
package com.netflix.zuul.proxy;
import com.yammer.metrics.Metrics;
import com.yammer.metrics.reporting.JmxReporter;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.logging.Slf4JLoggerFactory;
import org.jboss.netty.util.HashedWheelTimer;
import org.jboss.netty.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.util.concurrent.Executors;
public class HttpServer {
private static final Timer TIMER = new HashedWheelTimer();
private static final Logger LOG = LoggerFactory.getLogger(HttpServer.class);
private final int port;
private Channel channel;
public HttpServer(int port) {
this.port = port;
}
public synchronized void run() {
// Configure the server.
ServerBootstrap b = new ServerBootstrap(new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool()));
b.setPipelineFactory(new CommonHttpPipeline(TIMER));
b.setOption("child.tcpNoDelay", true);
channel = b.bind(new InetSocketAddress(port));
LOG.info("server bound to port {}", port);
}
public static void main(String[] args) {
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
LOG.info("Starting server...");
new HttpServer(80).run();
JmxReporter.startDefault(Metrics.defaultRegistry());
//ConsoleReporter.enable(1, TimeUnit.SECONDS);
}
}
| jhulick/zuul-netty | netty-server/src/main/java/com/netflix/zuul/proxy/HttpServer.java | Java | apache-2.0 | 1,659 |
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Immutable;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.CodeAnalysis.Shared.Options;
using Roslyn.Utilities;
namespace Microsoft.CodeAnalysis.Diagnostics.EngineV2
{
internal partial class DiagnosticIncrementalAnalyzer
{
/// <summary>
/// State for diagnostics that belong to a project at given time.
/// </summary>
private class ProjectState
{
// project id of this state
private readonly StateSet _owner;
// last aggregated analysis result for this project saved
private AnalysisResult _lastResult;
public ProjectState(StateSet owner, ProjectId projectId)
{
_owner = owner;
_lastResult = new AnalysisResult(projectId, VersionStamp.Default, documentIds: null, isEmpty: true, fromBuild: false);
}
public bool FromBuild => _lastResult.FromBuild;
public ImmutableHashSet<DocumentId> GetDocumentsWithDiagnostics()
{
return _lastResult.DocumentIdsOrEmpty;
}
public bool IsEmpty()
{
return _lastResult.IsEmpty;
}
public bool IsEmpty(DocumentId documentId)
{
return IsEmpty(_lastResult, documentId);
}
/// <summary>
/// Return all diagnostics for the given project stored in this state
/// </summary>
public async Task<AnalysisResult> GetAnalysisDataAsync(Project project, bool avoidLoadingData, CancellationToken cancellationToken)
{
// make a copy of last result.
var lastResult = _lastResult;
Contract.ThrowIfFalse(lastResult.ProjectId == project.Id);
if (lastResult.IsDefault)
{
return await LoadInitialAnalysisDataAsync(project, cancellationToken).ConfigureAwait(false);
}
// PERF: avoid loading data if version is not right one.
// avoid loading data flag is there as a strictly perf optimization.
var version = await GetDiagnosticVersionAsync(project, cancellationToken).ConfigureAwait(false);
if (avoidLoadingData && lastResult.Version != version)
{
return lastResult;
}
// if given project doesnt have any diagnostics, return empty.
if (lastResult.IsEmpty)
{
return new AnalysisResult(lastResult.ProjectId, lastResult.Version);
}
// loading data can be cancelled any time.
var serializer = new DiagnosticDataSerializer(_owner.AnalyzerVersion, lastResult.Version);
var builder = new Builder(project.Id, lastResult.Version, lastResult.DocumentIds);
foreach (var documentId in lastResult.DocumentIds)
{
cancellationToken.ThrowIfCancellationRequested();
var document = project.GetDocument(documentId);
if (document == null)
{
continue;
}
if (!await TryDeserializeDocumentAsync(serializer, document, builder, cancellationToken).ConfigureAwait(false))
{
Contract.Requires(lastResult.Version == VersionStamp.Default);
// this can happen if we merged back active file diagnostics back to project state but
// project state didn't have diagnostics for the file yet. (since project state was staled)
continue;
}
}
if (!await TryDeserializeAsync(serializer, project, project.Id, _owner.NonLocalStateName, builder.AddOthers, cancellationToken).ConfigureAwait(false))
{
Contract.Requires(false, "How this can happen?");
}
return builder.ToResult();
}
/// <summary>
/// Return all diagnostics for the given document stored in this state including non local diagnostics for this document
/// </summary>
public async Task<AnalysisResult> GetAnalysisDataAsync(Document document, bool avoidLoadingData, CancellationToken cancellationToken)
{
// make a copy of last result.
var lastResult = _lastResult;
Contract.ThrowIfFalse(lastResult.ProjectId == document.Project.Id);
if (lastResult.IsDefault)
{
return await LoadInitialAnalysisDataAsync(document, cancellationToken).ConfigureAwait(false);
}
var version = await GetDiagnosticVersionAsync(document.Project, cancellationToken).ConfigureAwait(false);
if (avoidLoadingData && lastResult.Version != version)
{
return lastResult;
}
// if given document doesnt have any diagnostics, return empty.
if (IsEmpty(lastResult, document.Id))
{
return new AnalysisResult(lastResult.ProjectId, lastResult.Version);
}
// loading data can be cancelled any time.
var serializer = new DiagnosticDataSerializer(_owner.AnalyzerVersion, lastResult.Version);
var builder = new Builder(document.Project.Id, lastResult.Version);
if (!await TryDeserializeDocumentAsync(serializer, document, builder, cancellationToken).ConfigureAwait(false))
{
Contract.Requires(lastResult.Version == VersionStamp.Default);
// this can happen if we merged back active file diagnostics back to project state but
// project state didn't have diagnostics for the file yet. (since project state was staled)
}
return builder.ToResult();
}
/// <summary>
/// Return all no location diagnostics for the given project stored in this state
/// </summary>
public async Task<AnalysisResult> GetProjectAnalysisDataAsync(Project project, bool avoidLoadingData, CancellationToken cancellationToken)
{
// make a copy of last result.
var lastResult = _lastResult;
Contract.ThrowIfFalse(lastResult.ProjectId == project.Id);
if (lastResult.IsDefault)
{
return await LoadInitialProjectAnalysisDataAsync(project, cancellationToken).ConfigureAwait(false);
}
var version = await GetDiagnosticVersionAsync(project, cancellationToken).ConfigureAwait(false);
if (avoidLoadingData && lastResult.Version != version)
{
return lastResult;
}
// if given document doesnt have any diagnostics, return empty.
if (lastResult.IsEmpty)
{
return new AnalysisResult(lastResult.ProjectId, lastResult.Version);
}
// loading data can be cancelled any time.
var serializer = new DiagnosticDataSerializer(_owner.AnalyzerVersion, lastResult.Version);
var builder = new Builder(project.Id, lastResult.Version);
if (!await TryDeserializeAsync(serializer, project, project.Id, _owner.NonLocalStateName, builder.AddOthers, cancellationToken).ConfigureAwait(false))
{
Contract.Requires(false, "How this can happen?");
}
return builder.ToResult();
}
public async Task SaveAsync(Project project, AnalysisResult result)
{
Contract.ThrowIfTrue(result.IsAggregatedForm);
RemoveInMemoryCache(_lastResult);
// save last aggregated form of analysis result
_lastResult = result.ToAggregatedForm();
// serialization can't be cancelled.
var serializer = new DiagnosticDataSerializer(_owner.AnalyzerVersion, result.Version);
foreach (var documentId in result.DocumentIds)
{
var document = project.GetDocument(documentId);
Contract.ThrowIfNull(document);
await SerializeAsync(serializer, document, document.Id, _owner.SyntaxStateName, GetResult(result, AnalysisKind.Syntax, document.Id)).ConfigureAwait(false);
await SerializeAsync(serializer, document, document.Id, _owner.SemanticStateName, GetResult(result, AnalysisKind.Semantic, document.Id)).ConfigureAwait(false);
await SerializeAsync(serializer, document, document.Id, _owner.NonLocalStateName, GetResult(result, AnalysisKind.NonLocal, document.Id)).ConfigureAwait(false);
}
await SerializeAsync(serializer, project, result.ProjectId, _owner.NonLocalStateName, result.Others).ConfigureAwait(false);
}
public void ResetVersion()
{
// reset version of cached data so that we can recalculate new data (ex, OnDocumentReset)
_lastResult = new AnalysisResult(_lastResult.ProjectId, VersionStamp.Default, _lastResult.DocumentIds, _lastResult.IsEmpty, _lastResult.FromBuild);
}
public async Task MergeAsync(ActiveFileState state, Document document)
{
Contract.ThrowIfFalse(state.DocumentId == document.Id);
// merge active file state to project state
var lastResult = _lastResult;
var syntax = state.GetAnalysisData(AnalysisKind.Syntax);
var semantic = state.GetAnalysisData(AnalysisKind.Semantic);
var project = document.Project;
var fullAnalysis = ServiceFeatureOnOffOptions.IsClosedFileDiagnosticsEnabled(project.Solution.Workspace, project.Language);
// keep from build flag if full analysis is off
var fromBuild = fullAnalysis ? false : lastResult.FromBuild;
// if it is allowed to keep project state, check versions and if they are same, bail out
// if full solution analysis is off or we are asked to reset document state, we always merge.
if (fullAnalysis &&
syntax.Version != VersionStamp.Default &&
syntax.Version == semantic.Version &&
syntax.Version == lastResult.Version)
{
// all data is in sync already.
return;
}
// we have mixed versions or full analysis is off, set it to default so that it can be re-calculated next time so data can be in sync.
var version = VersionStamp.Default;
// serialization can't be cancelled.
var serializer = new DiagnosticDataSerializer(_owner.AnalyzerVersion, version);
// save active file diagnostics back to project state
await SerializeAsync(serializer, document, document.Id, _owner.SyntaxStateName, syntax.Items).ConfigureAwait(false);
await SerializeAsync(serializer, document, document.Id, _owner.SemanticStateName, semantic.Items).ConfigureAwait(false);
// save last aggregated form of analysis result
_lastResult = new AnalysisResult(_lastResult.ProjectId, version, _lastResult.DocumentIdsOrEmpty.Add(state.DocumentId), isEmpty: false, fromBuild: fromBuild);
}
public bool OnDocumentRemoved(DocumentId id)
{
RemoveInMemoryCacheEntries(id);
return !IsEmpty(id);
}
public bool OnProjectRemoved(ProjectId id)
{
RemoveInMemoryCacheEntry(id, _owner.NonLocalStateName);
return !IsEmpty();
}
private async Task<AnalysisResult> LoadInitialAnalysisDataAsync(Project project, CancellationToken cancellationToken)
{
// loading data can be cancelled any time.
var version = await GetDiagnosticVersionAsync(project, cancellationToken).ConfigureAwait(false);
var serializer = new DiagnosticDataSerializer(_owner.AnalyzerVersion, version);
var builder = new Builder(project.Id, version);
foreach (var document in project.Documents)
{
cancellationToken.ThrowIfCancellationRequested();
if (!await TryDeserializeDocumentAsync(serializer, document, builder, cancellationToken).ConfigureAwait(false))
{
continue;
}
}
if (!await TryDeserializeAsync(serializer, project, project.Id, _owner.NonLocalStateName, builder.AddOthers, cancellationToken).ConfigureAwait(false))
{
return new AnalysisResult(project.Id, VersionStamp.Default, ImmutableHashSet<DocumentId>.Empty, isEmpty: true, fromBuild: false);
}
return builder.ToResult();
}
private async Task<AnalysisResult> LoadInitialAnalysisDataAsync(Document document, CancellationToken cancellationToken)
{
// loading data can be cancelled any time.
var project = document.Project;
var version = await GetDiagnosticVersionAsync(project, cancellationToken).ConfigureAwait(false);
var serializer = new DiagnosticDataSerializer(_owner.AnalyzerVersion, version);
var builder = new Builder(project.Id, version);
if (!await TryDeserializeDocumentAsync(serializer, document, builder, cancellationToken).ConfigureAwait(false))
{
return new AnalysisResult(project.Id, VersionStamp.Default, ImmutableHashSet<DocumentId>.Empty, isEmpty: true, fromBuild: false);
}
return builder.ToResult();
}
private async Task<AnalysisResult> LoadInitialProjectAnalysisDataAsync(Project project, CancellationToken cancellationToken)
{
// loading data can be cancelled any time.
var version = await GetDiagnosticVersionAsync(project, cancellationToken).ConfigureAwait(false);
var serializer = new DiagnosticDataSerializer(_owner.AnalyzerVersion, version);
var builder = new Builder(project.Id, version);
if (!await TryDeserializeAsync(serializer, project, project.Id, _owner.NonLocalStateName, builder.AddOthers, cancellationToken).ConfigureAwait(false))
{
return new AnalysisResult(project.Id, VersionStamp.Default, ImmutableHashSet<DocumentId>.Empty, isEmpty: true, fromBuild: false);
}
return builder.ToResult();
}
private async Task SerializeAsync(DiagnosticDataSerializer serializer, object documentOrProject, object key, string stateKey, ImmutableArray<DiagnosticData> diagnostics)
{
// try to serialize it
if (await serializer.SerializeAsync(documentOrProject, stateKey, diagnostics, CancellationToken.None).ConfigureAwait(false))
{
// we succeeded saving it to persistent storage. remove it from in memory cache if it exists
RemoveInMemoryCacheEntry(key, stateKey);
return;
}
// if serialization fail, hold it in the memory
InMemoryStorage.Cache(_owner.Analyzer, ValueTuple.Create(key, stateKey), new CacheEntry(serializer.Version, diagnostics));
}
private async Task<bool> TryDeserializeDocumentAsync(DiagnosticDataSerializer serializer, Document document, Builder builder, CancellationToken cancellationToken)
{
var result = true;
result &= await TryDeserializeAsync(serializer, document, document.Id, _owner.SyntaxStateName, builder.AddSyntaxLocals, cancellationToken).ConfigureAwait(false);
result &= await TryDeserializeAsync(serializer, document, document.Id, _owner.SemanticStateName, builder.AddSemanticLocals, cancellationToken).ConfigureAwait(false);
result &= await TryDeserializeAsync(serializer, document, document.Id, _owner.NonLocalStateName, builder.AddNonLocals, cancellationToken).ConfigureAwait(false);
return result;
}
private async Task<bool> TryDeserializeAsync<T>(
DiagnosticDataSerializer serializer,
object documentOrProject, T key, string stateKey,
Action<T, ImmutableArray<DiagnosticData>> add,
CancellationToken cancellationToken) where T : class
{
var diagnostics = await DeserializeAsync(serializer, documentOrProject, key, stateKey, cancellationToken).ConfigureAwait(false);
if (diagnostics.IsDefault)
{
return false;
}
add(key, diagnostics);
return true;
}
private async Task<ImmutableArray<DiagnosticData>> DeserializeAsync(DiagnosticDataSerializer serializer, object documentOrProject, object key, string stateKey, CancellationToken cancellationToken)
{
// check cache first
CacheEntry entry;
if (InMemoryStorage.TryGetValue(_owner.Analyzer, ValueTuple.Create(key, stateKey), out entry) && serializer.Version == entry.Version)
{
return entry.Diagnostics;
}
// try to deserialize it
return await serializer.DeserializeAsync(documentOrProject, stateKey, cancellationToken).ConfigureAwait(false);
}
private void RemoveInMemoryCache(AnalysisResult lastResult)
{
// remove old cache
foreach (var documentId in lastResult.DocumentIdsOrEmpty)
{
RemoveInMemoryCacheEntries(documentId);
}
}
private void RemoveInMemoryCacheEntries(DocumentId id)
{
RemoveInMemoryCacheEntry(id, _owner.SyntaxStateName);
RemoveInMemoryCacheEntry(id, _owner.SemanticStateName);
RemoveInMemoryCacheEntry(id, _owner.NonLocalStateName);
}
private void RemoveInMemoryCacheEntry(object key, string stateKey)
{
// remove in memory cache if entry exist
InMemoryStorage.Remove(_owner.Analyzer, ValueTuple.Create(key, stateKey));
}
private bool IsEmpty(AnalysisResult result, DocumentId documentId)
{
return !result.DocumentIdsOrEmpty.Contains(documentId);
}
// we have this builder to avoid allocating collections unnecessarily.
private class Builder
{
private readonly ProjectId _projectId;
private readonly VersionStamp _version;
private readonly ImmutableHashSet<DocumentId> _documentIds;
private ImmutableDictionary<DocumentId, ImmutableArray<DiagnosticData>>.Builder _syntaxLocals;
private ImmutableDictionary<DocumentId, ImmutableArray<DiagnosticData>>.Builder _semanticLocals;
private ImmutableDictionary<DocumentId, ImmutableArray<DiagnosticData>>.Builder _nonLocals;
private ImmutableArray<DiagnosticData> _others;
public Builder(ProjectId projectId, VersionStamp version, ImmutableHashSet<DocumentId> documentIds = null)
{
_projectId = projectId;
_version = version;
_documentIds = documentIds;
}
public void AddSyntaxLocals(DocumentId documentId, ImmutableArray<DiagnosticData> diagnostics)
{
Add(ref _syntaxLocals, documentId, diagnostics);
}
public void AddSemanticLocals(DocumentId documentId, ImmutableArray<DiagnosticData> diagnostics)
{
Add(ref _semanticLocals, documentId, diagnostics);
}
public void AddNonLocals(DocumentId documentId, ImmutableArray<DiagnosticData> diagnostics)
{
Add(ref _nonLocals, documentId, diagnostics);
}
public void AddOthers(ProjectId unused, ImmutableArray<DiagnosticData> diagnostics)
{
_others = diagnostics;
}
private void Add(ref ImmutableDictionary<DocumentId, ImmutableArray<DiagnosticData>>.Builder locals, DocumentId documentId, ImmutableArray<DiagnosticData> diagnostics)
{
locals = locals ?? ImmutableDictionary.CreateBuilder<DocumentId, ImmutableArray<DiagnosticData>>();
locals.Add(documentId, diagnostics);
}
public AnalysisResult ToResult()
{
return new AnalysisResult(_projectId, _version,
_syntaxLocals?.ToImmutable() ?? ImmutableDictionary<DocumentId, ImmutableArray<DiagnosticData>>.Empty,
_semanticLocals?.ToImmutable() ?? ImmutableDictionary<DocumentId, ImmutableArray<DiagnosticData>>.Empty,
_nonLocals?.ToImmutable() ?? ImmutableDictionary<DocumentId, ImmutableArray<DiagnosticData>>.Empty,
_others.IsDefault ? ImmutableArray<DiagnosticData>.Empty : _others,
_documentIds,
fromBuild: false);
}
}
}
}
}
| basoundr/roslyn | src/Features/Core/Portable/Diagnostics/EngineV2/DiagnosticIncrementalAnalyzer.ProjectState.cs | C# | apache-2.0 | 22,614 |
package com.nianticproject.ingress.common.ui.widget;
import broot.ingress.mod.Entry;
public enum MenuTabId {
MOD_ABOUT, MOD_ITEMS, ITEMS, INTEL, MISSION, RECRUIT, DEVICE;
public String toString() {
String ret = Entry.MenuTabId_onToString(this);
return ret != null ? ret : super.toString();
}
public String getText2() {
switch (this) {
case RECRUIT:
return "[5]";
}
return "";
}
}
| zimler/ingress-apk-mod | sim/com/nianticproject/ingress/common/ui/widget/MenuTabId.java | Java | apache-2.0 | 473 |
package org.hl7.fhir.dstu2016may.hapi.validation;
import java.util.List;
import org.hl7.fhir.dstu2016may.validation.ValidationMessage;
import org.hl7.fhir.instance.model.api.IBaseResource;
import ca.uhn.fhir.model.api.Bundle;
import ca.uhn.fhir.validation.IValidationContext;
import ca.uhn.fhir.validation.IValidatorModule;
import ca.uhn.fhir.validation.ResultSeverityEnum;
import ca.uhn.fhir.validation.SingleValidationMessage;
/**
* Base class for a bridge between the RI validation tools and HAPI
*/
abstract class BaseValidatorBridge implements IValidatorModule {
public BaseValidatorBridge() {
super();
}
private void doValidate(IValidationContext<?> theCtx) {
List<ValidationMessage> messages = validate(theCtx);
for (ValidationMessage riMessage : messages) {
SingleValidationMessage hapiMessage = new SingleValidationMessage();
if (riMessage.getCol() != -1) {
hapiMessage.setLocationCol(riMessage.getCol());
}
if (riMessage.getLine() != -1) {
hapiMessage.setLocationLine(riMessage.getLine());
}
hapiMessage.setLocationString(riMessage.getLocation());
hapiMessage.setMessage(riMessage.getMessage());
if (riMessage.getLevel() != null) {
hapiMessage.setSeverity(ResultSeverityEnum.fromCode(riMessage.getLevel().toCode()));
}
theCtx.addValidationMessage(hapiMessage);
}
}
protected abstract List<ValidationMessage> validate(IValidationContext<?> theCtx);
@Override
public void validateBundle(IValidationContext<Bundle> theCtx) {
doValidate(theCtx);
}
@Override
public void validateResource(IValidationContext<IBaseResource> theCtx) {
doValidate(theCtx);
}
} | Gaduo/hapi-fhir | hapi-fhir-structures-dstu2.1/src/main/java/org/hl7/fhir/dstu2016may/hapi/validation/BaseValidatorBridge.java | Java | apache-2.0 | 1,638 |
using System;
namespace Foundatio.Logging {
public static class BuilderExtensions {
public static ILogBuilder Level(this ILogger logger, LogLevel logLevel) {
if (!logger.IsEnabled(logLevel))
return NullLogBuilder.Instance;
return new LogBuilder(logLevel, logger);
}
public static ILogBuilder Trace(this ILogger logger) {
if (!logger.IsEnabled(LogLevel.Trace))
return NullLogBuilder.Instance;
return new LogBuilder(LogLevel.Trace, logger);
}
public static ILogBuilder Debug(this ILogger logger) {
if (!logger.IsEnabled(LogLevel.Debug))
return NullLogBuilder.Instance;
return new LogBuilder(LogLevel.Debug, logger);
}
public static ILogBuilder Info(this ILogger logger) {
if (!logger.IsEnabled(LogLevel.Information))
return NullLogBuilder.Instance;
return new LogBuilder(LogLevel.Information, logger);
}
public static ILogBuilder Warn(this ILogger logger) {
if (!logger.IsEnabled(LogLevel.Warning))
return NullLogBuilder.Instance;
return new LogBuilder(LogLevel.Warning, logger);
}
public static ILogBuilder Error(this ILogger logger) {
if (!logger.IsEnabled(LogLevel.Error))
return NullLogBuilder.Instance;
return new LogBuilder(LogLevel.Error, logger);
}
public static ILogBuilder Critical(this ILogger logger) {
if (!logger.IsEnabled(LogLevel.Critical))
return NullLogBuilder.Instance;
return new LogBuilder(LogLevel.Critical, logger);
}
}
}
| wgraham17/Foundatio | src/Foundatio/Logging/Fluent/BuilderExtensions.cs | C# | apache-2.0 | 1,760 |
/**
* Copyright (c) 2014 Netflix, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mslcli.common.keyx;
import java.security.KeyPair;
import java.util.HashMap;
import java.util.Map;
import javax.crypto.interfaces.DHPrivateKey;
import javax.crypto.interfaces.DHPublicKey;
import com.netflix.msl.MslKeyExchangeException;
import com.netflix.msl.keyx.JsonWebEncryptionLadderExchange;
import com.netflix.msl.keyx.KeyExchangeFactory;
import com.netflix.msl.keyx.KeyExchangeScheme;
import com.netflix.msl.keyx.KeyRequestData;
import com.netflix.msl.keyx.WrapCryptoContextRepository;
import com.netflix.msl.util.AuthenticationUtils;
import mslcli.common.CmdArguments;
import mslcli.common.IllegalCmdArgumentException;
import mslcli.common.util.AppContext;
import mslcli.common.util.ConfigurationException;
import mslcli.common.util.WrapCryptoContextRepositoryHandle;
/**
* <p>
* Json Web Encryption Ladder Key Exchange Handle class
* </p>
*
* @author Vadim Spector <vspector@netflix.com>
*/
public class JsonWebEncryptionLadderExchangeHandle extends KeyExchangeHandle {
/**
* default constructor
*/
public JsonWebEncryptionLadderExchangeHandle() {
super(KeyExchangeScheme.JWE_LADDER);
}
@Override
public KeyRequestData getKeyRequestData(final AppContext appCtx, final CmdArguments args)
throws ConfigurationException, IllegalCmdArgumentException, MslKeyExchangeException
{
final JsonWebEncryptionLadderExchange.Mechanism m = getKeyExchangeMechanism(
JsonWebEncryptionLadderExchange.Mechanism.class, args.getKeyExchangeMechanism());
final byte[] wrapdata;
if (m == JsonWebEncryptionLadderExchange.Mechanism.WRAP) {
wrapdata = getRepo(appCtx, args).getLastWrapdata();
if (wrapdata == null)
throw new IllegalCmdArgumentException(String.format("No Key Wrapping Data Found for {%s %s}", getScheme().name(), m));
} else {
wrapdata = null;
}
return new JsonWebEncryptionLadderExchange.RequestData(m, wrapdata);
}
@Override
public KeyExchangeFactory getKeyExchangeFactory(final AppContext appCtx, final CmdArguments args, final AuthenticationUtils authutils)
throws ConfigurationException, IllegalCmdArgumentException
{
return new JsonWebEncryptionLadderExchange(getRepo(appCtx, args), authutils);
}
}
| rspieldenner/msl | examples/mslcli/src/main/java/mslcli/common/keyx/JsonWebEncryptionLadderExchangeHandle.java | Java | apache-2.0 | 2,946 |
require 'spec_helper'
require 'shared-examples'
manifest = 'openstack-network/keystone.pp'
describe manifest do
shared_examples 'catalog' do
internal_protocol = 'http'
internal_address = Noop.hiera('management_vip')
admin_protocol = internal_protocol
admin_address = internal_address
if Noop.hiera_structure('use_ssl', false)
public_protocol = 'https'
public_address = Noop.hiera_structure('use_ssl/neutron_public_hostname')
internal_protocol = 'https'
internal_address = Noop.hiera_structure('use_ssl/neutron_internal_hostname')
admin_protocol = 'https'
admin_address = Noop.hiera_structure('use_ssl/neutron_admin_hostname')
elsif Noop.hiera_structure('public_ssl/services', false)
public_address = Noop.hiera_structure('public_ssl/hostname')
public_protocol = 'https'
else
public_protocol = 'http'
public_address = Noop.hiera('public_vip')
end
region = Noop.hiera_structure('quantum_settings/region', 'RegionOne')
password = Noop.hiera_structure('quantum_settings/keystone/admin_password')
auth_name = Noop.hiera_structure('quantum_settings/auth_name', 'neutron')
configure_endpoint = Noop.hiera_structure('quantum_settings/configure_endpoint', true)
configure_user = Noop.hiera_structure('quantum_settings/configure_user', true)
configure_user_role = Noop.hiera_structure('quantum_settings/configure_user_role', true)
service_name = Noop.hiera_structure('quantum_settings/service_name', 'neutron')
tenant = Noop.hiera_structure('quantum_settings/tenant', 'services')
port ='9696'
public_url = "#{public_protocol}://#{public_address}:#{port}"
internal_url = "#{internal_protocol}://#{internal_address}:#{port}"
admin_url = "#{admin_protocol}://#{admin_address}:#{port}"
use_neutron = Noop.hiera('use_neutron', false)
if use_neutron
it 'should declare neutron::keystone::auth class' do
should contain_class('neutron::keystone::auth').with(
'password' => password,
'auth_name' => auth_name,
'configure_endpoint' => configure_endpoint,
'configure_user' => configure_user,
'configure_user_role' => configure_user_role,
'service_name' => service_name,
'public_url' => public_url,
'internal_url' => internal_url,
'admin_url' => admin_url,
'region' => region,
)
end
end
end
test_ubuntu_and_centos manifest
end
| eayunstack/fuel-library | tests/noop/spec/hosts/openstack-network/keystone_spec.rb | Ruby | apache-2.0 | 2,683 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# Source: google/logging/v2/logging_config.proto for package 'google.logging.v2'
# Original file comments:
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'grpc'
require 'google/logging/v2/logging_config_pb'
module Google
module Logging
module V2
module ConfigServiceV2
# Service for configuring sinks used to export log entries outside of
# Stackdriver Logging.
class Service
include GRPC::GenericService
self.marshal_class_method = :encode
self.unmarshal_class_method = :decode
self.service_name = 'google.logging.v2.ConfigServiceV2'
# Lists sinks.
rpc :ListSinks, ListSinksRequest, ListSinksResponse
# Gets a sink.
rpc :GetSink, GetSinkRequest, LogSink
# Creates a sink that exports specified log entries to a destination. The
# export of newly-ingested log entries begins immediately, unless the sink's
# `writer_identity` is not permitted to write to the destination. A sink can
# export log entries only from the resource owning the sink.
rpc :CreateSink, CreateSinkRequest, LogSink
# Updates a sink. This method replaces the following fields in the existing
# sink with values from the new sink: `destination`, and `filter`.
# The updated sink might also have a new `writer_identity`; see the
# `unique_writer_identity` field.
rpc :UpdateSink, UpdateSinkRequest, LogSink
# Deletes a sink. If the sink has a unique `writer_identity`, then that
# service account is also deleted.
rpc :DeleteSink, DeleteSinkRequest, Google::Protobuf::Empty
# Lists all the exclusions in a parent resource.
rpc :ListExclusions, ListExclusionsRequest, ListExclusionsResponse
# Gets the description of an exclusion.
rpc :GetExclusion, GetExclusionRequest, LogExclusion
# Creates a new exclusion in a specified parent resource.
# Only log entries belonging to that resource can be excluded.
# You can have up to 10 exclusions in a resource.
rpc :CreateExclusion, CreateExclusionRequest, LogExclusion
# Changes one or more properties of an existing exclusion.
rpc :UpdateExclusion, UpdateExclusionRequest, LogExclusion
# Deletes an exclusion.
rpc :DeleteExclusion, DeleteExclusionRequest, Google::Protobuf::Empty
end
Stub = Service.rpc_stub_class
end
end
end
end
| CloudVLab/google-cloud-ruby | google-cloud-logging/lib/google/logging/v2/logging_config_services_pb.rb | Ruby | apache-2.0 | 3,146 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.f1x.log;
import java.io.IOException;
import java.io.OutputStream;
/**
* Component that adapts FIX messages to format appropriate for log file. For example, timestamp and message direction.
*
* Implementations are not required to be thread safe. Caller must organize critical section for output stream.
*/
public interface LogFormatter {
/**
* @param isInbound message direction (true if message is inbound)
* @param buffer Buffer containing FIX message to log
* @param offset message offset in the buffer
* @param length message length in bytes
* @return number of bytes written into output stream
*/
int log(boolean isInbound, byte[] buffer, int offset, int length, OutputStream os) throws IOException;
}
| andymalakov/f1x | src/main/java/org/f1x/log/LogFormatter.java | Java | apache-2.0 | 2,999 |
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Reflection;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.Host.Mef;
using Microsoft.CodeAnalysis.Text;
using Microsoft.SourceBrowser.Common;
namespace Microsoft.SourceBrowser.HtmlGenerator
{
public class MetadataAsSource
{
private static Func<Document, ISymbol, CancellationToken, Task<Document>> addSourceToAsync = null;
private static Func<Document, ISymbol, CancellationToken, Task<Document>> ReflectAddSourceToAsync(object service)
{
var assembly = Assembly.Load("Microsoft.CodeAnalysis.Features");
var type = assembly.GetType("Microsoft.CodeAnalysis.MetadataAsSource.IMetadataAsSourceService");
var method = type.GetMethod("AddSourceToAsync");
return (Func<Document, ISymbol, CancellationToken, Task<Document>>)
Delegate.CreateDelegate(typeof(Func<Document, ISymbol, CancellationToken, Task<Document>>), service, method);
}
public static MetadataReference CreateReferenceFromFilePath(string assemblyFilePath)
{
var documentationProvider = GetDocumentationProvider(
assemblyFilePath,
Path.GetFileNameWithoutExtension(assemblyFilePath));
return MetadataReference.CreateFromFile(assemblyFilePath, documentation: documentationProvider);
}
public static Solution LoadMetadataAsSourceSolution(string assemblyFilePath)
{
try
{
using (Disposable.Timing("Metadata as source: " + assemblyFilePath))
{
var assemblyName = Path.GetFileNameWithoutExtension(assemblyFilePath);
var solution = new AdhocWorkspace(MefHostServices.DefaultHost).CurrentSolution;
var workspace = solution.Workspace;
var project = solution.AddProject(assemblyName, assemblyName, LanguageNames.CSharp);
var metadataReference = CreateReferenceFromFilePath(assemblyFilePath);
var referencePaths = MetadataReading.GetReferencePaths(metadataReference);
foreach (var referencePath in referencePaths)
{
project = project.AddMetadataReference(CreateReferenceFromFilePath(referencePath));
}
var projectWithReference = project.AddMetadataReference(metadataReference);
var compilation = projectWithReference.GetCompilationAsync().ConfigureAwait(false).GetAwaiter().GetResult();
var assemblyOrModuleSymbol = compilation.GetAssemblyOrModuleSymbol(metadataReference);
IAssemblySymbol assemblySymbol = assemblyOrModuleSymbol as IAssemblySymbol;
IModuleSymbol moduleSymbol = assemblyOrModuleSymbol as IModuleSymbol;
if (moduleSymbol != null && assemblySymbol == null)
{
assemblySymbol = moduleSymbol.ContainingAssembly;
}
var assemblyAttributes = MetadataReading.GetAssemblyAttributes(assemblySymbol);
var assemblyAttributesFileText = MetadataReading.GetAssemblyAttributesFileText(
LanguageNames.CSharp,
assemblyFilePath.Substring(0, 3),
assemblyAttributes);
INamespaceSymbol namespaceSymbol = null;
if (assemblySymbol != null)
{
namespaceSymbol = assemblySymbol.GlobalNamespace;
}
else if (moduleSymbol != null)
{
namespaceSymbol = moduleSymbol.GlobalNamespace;
}
var types = GetTypes(namespaceSymbol)
.OfType<INamedTypeSymbol>()
.Where(t => t.CanBeReferencedByName);
var tempDocument = projectWithReference.AddDocument("temp", SourceText.From(""), null);
var metadataAsSourceService = WorkspaceHacks.GetMetadataAsSourceService(tempDocument);
if (addSourceToAsync == null)
{
addSourceToAsync = ReflectAddSourceToAsync(metadataAsSourceService);
}
var texts = new Dictionary<INamedTypeSymbol, string>();
Parallel.ForEach(
types,
new ParallelOptions
{
MaxDegreeOfParallelism = Environment.ProcessorCount
},
type =>
{
try
{
string text = "";
if (Configuration.GenerateMetadataAsSourceBodies)
{
var document = addSourceToAsync(
tempDocument,
type,
CancellationToken.None).Result;
text = document.GetTextAsync().Result.ToString();
}
lock (texts)
{
texts.Add(type, text);
}
}
catch (Exception ex)
{
Log.Exception(ex, "Error when adding a MAS document to texts: " + assemblyFilePath);
}
});
HashSet<string> existingFileNames = new HashSet<string>(StringComparer.OrdinalIgnoreCase);
foreach (var kvp in texts)
{
var tempProject = AddDocument(project, kvp, existingFileNames);
// tempProject can be null if the document was in an unutterable namespace
// we want to skip such documents
if (tempProject != null)
{
project = tempProject;
}
}
const string assemblyAttributesFileName = "AssemblyAttributes.cs";
project = project.AddDocument(
assemblyAttributesFileName,
assemblyAttributesFileText,
filePath: assemblyAttributesFileName).Project;
solution = project.Solution;
return solution;
}
}
catch (Exception ex)
{
Log.Exception(ex, "Failed to run metadata as source for: " + assemblyFilePath);
return null;
}
}
private static Dictionary<string, string> assemblyNameToXmlDocFileMap = null;
/// <summary>
/// This has to be unique, there shouldn't be a project with this name ever
/// </summary>
public const string GeneratedAssemblyAttributesFileName = "GeneratedAssemblyAttributes0e71257b769ef";
private static Dictionary<string, string> AssemblyNameToXmlDocFileMap
=> assemblyNameToXmlDocFileMap ?? (assemblyNameToXmlDocFileMap = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase));
private static DocumentationProvider GetDocumentationProvider(string assemblyFilePath, string assemblyName)
{
var result = DocumentationProvider.Default;
if (AssemblyNameToXmlDocFileMap.TryGetValue(assemblyName, out string xmlFile))
{
result = new XmlDocumentationProvider(xmlFile);
}
return result;
}
private static Project AddDocument(
Project project,
KeyValuePair<INamedTypeSymbol, string> symbolAndText,
HashSet<string> existingFileNames)
{
var symbol = symbolAndText.Key;
var text = symbolAndText.Value;
var sanitizedTypeName = Paths.SanitizeFileName(symbol.Name);
if (symbol.IsGenericType)
{
sanitizedTypeName = sanitizedTypeName + "`" + symbol.TypeParameters.Length;
}
var fileName = sanitizedTypeName + ".cs";
var folders = GetFolderChain(symbol);
if (folders == null)
{
// There was an unutterable namespace name - abort the entire document
return null;
}
var foldersString = string.Join(".", folders ?? Enumerable.Empty<string>());
var fileNameAndFolders = foldersString + fileName;
int index = 1;
while (!existingFileNames.Add(fileNameAndFolders))
{
fileName = sanitizedTypeName + index + ".cs";
fileNameAndFolders = foldersString + fileName;
index++;
}
project = project.AddDocument(fileName, text, folders, fileName).Project;
return project;
}
private static string[] GetFolderChain(INamedTypeSymbol symbol)
{
var containingNamespace = symbol.ContainingNamespace;
var folders = new List<string>();
while (containingNamespace != null && !containingNamespace.IsGlobalNamespace)
{
if (!containingNamespace.CanBeReferencedByName)
{
// namespace name is mangled - we don't want it
return null;
}
var sanitizedNamespaceName = Paths.SanitizeFolder(containingNamespace.Name);
folders.Add(sanitizedNamespaceName);
containingNamespace = containingNamespace.ContainingNamespace;
}
folders.Reverse();
return folders.ToArray();
}
private static IEnumerable<ISymbol> GetTypes(INamespaceSymbol namespaceSymbol)
{
var results = new List<ISymbol>();
EnumSymbols(namespaceSymbol, results.Add);
return results;
}
private static void EnumSymbols(INamespaceSymbol namespaceSymbol, Action<ISymbol> action)
{
foreach (var subNamespace in namespaceSymbol.GetNamespaceMembers())
{
EnumSymbols(subNamespace, action);
}
foreach (var topLevelType in namespaceSymbol.GetTypeMembers())
{
action(topLevelType);
}
}
}
}
| KirillOsenkov/SourceBrowser | src/HtmlGenerator/Pass1-Generation/MetadataAsSource.cs | C# | apache-2.0 | 11,271 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test.types;
//
// IDL:TestStruct120Seq:1.0
//
final public class TestStruct120SeqHolder implements org.omg.CORBA.portable.Streamable
{
public TestStruct1[] value;
public
TestStruct120SeqHolder()
{
}
public
TestStruct120SeqHolder(TestStruct1[] initial)
{
value = initial;
}
public void
_read(org.omg.CORBA.portable.InputStream in)
{
value = TestStruct120SeqHelper.read(in);
}
public void
_write(org.omg.CORBA.portable.OutputStream out)
{
TestStruct120SeqHelper.write(out, value);
}
public org.omg.CORBA.TypeCode
_type()
{
return TestStruct120SeqHelper.type();
}
}
| apache/geronimo-yoko | yoko-core/src/test/java/test/types/TestStruct120SeqHolder.java | Java | apache-2.0 | 1,502 |
package cn.xaut.shop.pojo;
/**
* DisputeFile entity. @author MyEclipse Persistence Tools
*/
public class DisputeFile implements java.io.Serializable {
// Fields
/**
*
*/
private static final long serialVersionUID = -1700722068784179239L;
private Integer fileid;
//private Integer disputeid;
private String name;
private String url;
// Constructors
private Dispute dispute;
public Dispute getDispute() {
return dispute;
}
public void setDispute(Dispute dispute) {
this.dispute = dispute;
}
public Integer getFileid() {
return this.fileid;
}
public void setFileid(Integer fileid) {
this.fileid = fileid;
}
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
public String getUrl() {
return this.url;
}
public void setUrl(String url) {
this.url = url;
}
} | wanliyang10010/Shop | src/cn/xaut/shop/pojo/DisputeFile.java | Java | apache-2.0 | 867 |
using EPiServer.Commerce.Catalog.ContentTypes;
using EPiServer.Commerce.Catalog.DataAnnotations;
using EPiServer.Core;
using EPiServer.DataAnnotations;
using System.ComponentModel.DataAnnotations;
using EPiServer.Reference.Commerce.Site.Features.Product.Models;
namespace Episerver.DataImporter.Models
{
[CatalogContentType(DisplayName = "GenericVariation", GUID = "ba18ca7a-e74b-46fa-91ed-1957253ab81f", Description = "")]
public class GenericVariation : BaseVariant
{
[Searchable]
[Tokenize]
[CultureSpecific]
[IncludeInDefaultSearch]
[BackingType(typeof(PropertyString))]
[Display(Name = "Brand", Order = 1)]
public virtual string Brand { get; set; }
[Searchable]
[CultureSpecific]
[Tokenize]
[IncludeInDefaultSearch]
[Display(Name = "Description", Order = 2)]
public virtual XhtmlString Description { get; set; }
[Searchable]
[CultureSpecific]
[Tokenize]
[IncludeInDefaultSearch]
[Display(Name = "Long Description", Order = 3)]
public virtual XhtmlString LongDescription { get; set; }
[CultureSpecific]
[Display(Name = "Product Teaser", Order = 4)]
public virtual XhtmlString ProductTeaser { get; set; }
[BackingType(typeof(PropertyString))]
[Display(Name = "ModelNumber", Order = 5)]
public virtual string ModelNumber { get; set; }
[BackingType(typeof(PropertyString))]
[Display(Name = "UPC", Order = 6)]
public virtual string Upc { get; set; }
}
} | vnbaaij/QuicksilverB2B | Sources/EPiServer.Reference.Commerce.Site/Features/Product/Models/GenericVariation.cs | C# | apache-2.0 | 1,602 |
#
# Author:: AJ Christensen (<aj@chef.io>)
# Copyright:: Copyright 2008-2016, Chef Software Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "chef/provider/group/groupadd"
require "etc"
class Chef
class Provider
class Group
class Suse < Chef::Provider::Group::Groupadd
provides :group, platform: "opensuse", platform_version: "< 12.3"
provides :group, platform: "suse", platform_version: "< 12.0"
def load_current_resource
super
end
def define_resource_requirements
super
requirements.assert(:all_actions) do |a|
a.assertion { ::File.exist?("/usr/sbin/groupmod") }
a.failure_message Chef::Exceptions::Group, "Could not find binary /usr/sbin/groupmod for #{new_resource.name}"
# No whyrun alternative: this component should be available in the base install of any given system that uses it
end
requirements.assert(:create, :manage, :modify) do |a|
a.assertion do
begin
to_add(new_resource.members).all? { |member| Etc.getpwnam(member) }
rescue
false
end
end
a.failure_message Chef::Exceptions::Group, "Could not add users #{to_add(new_resource.members).join(', ')} to #{new_resource.group_name}: one of these users does not exist"
a.whyrun "Could not find one of these users: #{to_add(new_resource.members).join(', ')}. Assuming it will be created by a prior step"
end
end
def set_members(members)
to_remove(members).each do |member|
remove_member(member)
end
to_add(members).each do |member|
add_member(member)
end
end
def to_add(members)
members - current_resource.members
end
def add_member(member)
shell_out!("groupmod", "-A", member, new_resource.group_name)
end
def to_remove(members)
current_resource.members - members
end
def remove_member(member)
shell_out!("groupmod", "-R", member, new_resource.group_name)
end
end
end
end
end
| tomdoherty/chef | lib/chef/provider/group/suse.rb | Ruby | apache-2.0 | 2,775 |
package org.epnoi.storage.document;
import org.epnoi.storage.document.domain.ItemDocument;
import org.epnoi.storage.document.repository.BaseDocumentRepository;
import org.epnoi.storage.document.repository.ItemDocumentRepository;
import org.springframework.beans.factory.annotation.Autowired;
/**
* Created by cbadenes on 22/12/15.
*/
public class ItemDocumentRepositoryTest extends BaseDocumentRepositoryTest<ItemDocument> {
@Autowired
ItemDocumentRepository repository;
@Override
public BaseDocumentRepository<ItemDocument> getRepository() {
return repository;
}
@Override
public ItemDocument getEntity() {
ItemDocument document = new ItemDocument();
document.setUri("items/72ce5395-6268-439a-947e-802229e7f022");
document.setCreationTime("2015-12-21T16:18:59Z");
document.setFormat("pdf");
document.setLanguage("en");
document.setTitle("This is an example");
document.setSubject("semantic web, e-science");
document.setDescription("for testing purposes");
document.setUrl("file:://opt/drinventor/example.pdf");
document.setContent("Miniopterus aelleni is a bat in the genus Miniopterus found in the Comoro Islands and Madagascar. It is a small, brown bat, with a forearm length of 35 to 41 mm (1.4 to 1.6 in). The long tragus (a projection in the outer ear) has a broad base and a blunt or rounded tip. The uropatagium (tail membrane) is sparsely haired. The palate is flat and there are distinct diastemata (gaps) between the upper canines and premolars. Populations of this species were previously included in Miniopterus manavi, but recent molecular studies revealed that M");
document.setTokens("Miniopterus aelleni be a bat in the genus Miniopterus find in the Comoro Islands and Madagascar . It be a small , brown bat , with a forearm length of 35 to 41 mm ( 1.4 to 1.6 in ) . The long tragus ( a projection in the outer ear ) have a broad base and a blunt or round tip . The uropatagium ( tail membrane ) be sparsely haired . The palate be flat and there be distinct diastema ( gap ) between the upper canine and premolar . Populations of this specie be previously include in Miniopterus manavi , but recent molecular study reveal that M ");
return document;
}
}
| fitash/epnoi | storage/src/test/java/org/epnoi/storage/document/ItemDocumentRepositoryTest.java | Java | apache-2.0 | 2,315 |
/*
* Copyright 2002-2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.web.bind;
import java.beans.PropertyEditorSupport;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import org.springframework.beans.PropertyValue;
import org.springframework.beans.PropertyValues;
import org.springframework.mock.web.test.MockHttpServletRequest;
import org.springframework.tests.sample.beans.ITestBean;
import org.springframework.tests.sample.beans.TestBean;
import static org.junit.Assert.*;
/**
* @author Rod Johnson
* @author Juergen Hoeller
* @author Chris Beams
* @author Scott Andrews
*/
public class ServletRequestDataBinderTests {
@Test
public void testBindingWithNestedObjectCreation() throws Exception {
TestBean tb = new TestBean();
ServletRequestDataBinder binder = new ServletRequestDataBinder(tb, "person");
binder.registerCustomEditor(ITestBean.class, new PropertyEditorSupport() {
@Override
public void setAsText(String text) throws IllegalArgumentException {
setValue(new TestBean());
}
});
MockHttpServletRequest request = new MockHttpServletRequest();
request.addParameter("spouse", "someValue");
request.addParameter("spouse.name", "test");
binder.bind(request);
assertNotNull(tb.getSpouse());
assertEquals("test", tb.getSpouse().getName());
}
@Test
public void testFieldPrefixCausesFieldReset() throws Exception {
TestBean target = new TestBean();
ServletRequestDataBinder binder = new ServletRequestDataBinder(target);
MockHttpServletRequest request = new MockHttpServletRequest();
request.addParameter("_postProcessed", "visible");
request.addParameter("postProcessed", "on");
binder.bind(request);
assertTrue(target.isPostProcessed());
request.removeParameter("postProcessed");
binder.bind(request);
assertFalse(target.isPostProcessed());
}
@Test
public void testFieldPrefixCausesFieldResetWithIgnoreUnknownFields() throws Exception {
TestBean target = new TestBean();
ServletRequestDataBinder binder = new ServletRequestDataBinder(target);
binder.setIgnoreUnknownFields(false);
MockHttpServletRequest request = new MockHttpServletRequest();
request.addParameter("_postProcessed", "visible");
request.addParameter("postProcessed", "on");
binder.bind(request);
assertTrue(target.isPostProcessed());
request.removeParameter("postProcessed");
binder.bind(request);
assertFalse(target.isPostProcessed());
}
@Test
public void testFieldDefault() throws Exception {
TestBean target = new TestBean();
ServletRequestDataBinder binder = new ServletRequestDataBinder(target);
MockHttpServletRequest request = new MockHttpServletRequest();
request.addParameter("!postProcessed", "off");
request.addParameter("postProcessed", "on");
binder.bind(request);
assertTrue(target.isPostProcessed());
request.removeParameter("postProcessed");
binder.bind(request);
assertFalse(target.isPostProcessed());
}
@Test
public void testFieldDefaultPreemptsFieldMarker() throws Exception {
TestBean target = new TestBean();
ServletRequestDataBinder binder = new ServletRequestDataBinder(target);
MockHttpServletRequest request = new MockHttpServletRequest();
request.addParameter("!postProcessed", "on");
request.addParameter("_postProcessed", "visible");
request.addParameter("postProcessed", "on");
binder.bind(request);
assertTrue(target.isPostProcessed());
request.removeParameter("postProcessed");
binder.bind(request);
assertTrue(target.isPostProcessed());
request.removeParameter("!postProcessed");
binder.bind(request);
assertFalse(target.isPostProcessed());
}
@Test
public void testFieldDefaultNonBoolean() throws Exception {
TestBean target = new TestBean();
ServletRequestDataBinder binder = new ServletRequestDataBinder(target);
MockHttpServletRequest request = new MockHttpServletRequest();
request.addParameter("!name", "anonymous");
request.addParameter("name", "Scott");
binder.bind(request);
assertEquals("Scott", target.getName());
request.removeParameter("name");
binder.bind(request);
assertEquals("anonymous", target.getName());
}
@Test
public void testWithCommaSeparatedStringArray() throws Exception {
TestBean target = new TestBean();
ServletRequestDataBinder binder = new ServletRequestDataBinder(target);
MockHttpServletRequest request = new MockHttpServletRequest();
request.addParameter("stringArray", "bar");
request.addParameter("stringArray", "abc");
request.addParameter("stringArray", "123,def");
binder.bind(request);
assertEquals("Expected all three items to be bound", 3, target.getStringArray().length);
request.removeParameter("stringArray");
request.addParameter("stringArray", "123,def");
binder.bind(request);
assertEquals("Expected only 1 item to be bound", 1, target.getStringArray().length);
}
@Test
public void testBindingWithNestedObjectCreationAndWrongOrder() throws Exception {
TestBean tb = new TestBean();
ServletRequestDataBinder binder = new ServletRequestDataBinder(tb, "person");
binder.registerCustomEditor(ITestBean.class, new PropertyEditorSupport() {
@Override
public void setAsText(String text) throws IllegalArgumentException {
setValue(new TestBean());
}
});
MockHttpServletRequest request = new MockHttpServletRequest();
request.addParameter("spouse.name", "test");
request.addParameter("spouse", "someValue");
binder.bind(request);
assertNotNull(tb.getSpouse());
assertEquals("test", tb.getSpouse().getName());
}
@Test
public void testNoPrefix() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
request.addParameter("forname", "Tony");
request.addParameter("surname", "Blair");
request.addParameter("age", "" + 50);
ServletRequestParameterPropertyValues pvs = new ServletRequestParameterPropertyValues(request);
doTestTony(pvs);
}
@Test
public void testPrefix() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
request.addParameter("test_forname", "Tony");
request.addParameter("test_surname", "Blair");
request.addParameter("test_age", "" + 50);
ServletRequestParameterPropertyValues pvs = new ServletRequestParameterPropertyValues(request);
assertTrue("Didn't fidn normal when given prefix", !pvs.contains("forname"));
assertTrue("Did treat prefix as normal when not given prefix", pvs.contains("test_forname"));
pvs = new ServletRequestParameterPropertyValues(request, "test");
doTestTony(pvs);
}
@Test
public void testNoParameters() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
ServletRequestParameterPropertyValues pvs = new ServletRequestParameterPropertyValues(request);
assertTrue("Found no parameters", pvs.getPropertyValues().length == 0);
}
@Test
public void testMultipleValuesForParameter() throws Exception {
MockHttpServletRequest request = new MockHttpServletRequest();
String[] original = new String[] {"Tony", "Rod"};
request.addParameter("forname", original);
ServletRequestParameterPropertyValues pvs = new ServletRequestParameterPropertyValues(request);
assertTrue("Found 1 parameter", pvs.getPropertyValues().length == 1);
assertTrue("Found array value", pvs.getPropertyValue("forname").getValue() instanceof String[]);
String[] values = (String[]) pvs.getPropertyValue("forname").getValue();
assertEquals("Correct values", Arrays.asList(values), Arrays.asList(original));
}
/**
* Must contain: forname=Tony surname=Blair age=50
*/
protected void doTestTony(PropertyValues pvs) throws Exception {
assertTrue("Contains 3", pvs.getPropertyValues().length == 3);
assertTrue("Contains forname", pvs.contains("forname"));
assertTrue("Contains surname", pvs.contains("surname"));
assertTrue("Contains age", pvs.contains("age"));
assertTrue("Doesn't contain tory", !pvs.contains("tory"));
PropertyValue[] ps = pvs.getPropertyValues();
Map<String, String> m = new HashMap<String, String>();
m.put("forname", "Tony");
m.put("surname", "Blair");
m.put("age", "50");
for (int i = 0; i < ps.length; i++) {
Object val = m.get(ps[i].getName());
assertTrue("Can't have unexpected value", val != null);
assertTrue("Val i string", val instanceof String);
assertTrue("val matches expected", val.equals(ps[i].getValue()));
m.remove(ps[i].getName());
}
assertTrue("Map size is 0", m.size() == 0);
}
}
| shivpun/spring-framework | spring-web/src/test/java/org/springframework/web/bind/ServletRequestDataBinderTests.java | Java | apache-2.0 | 9,059 |
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.druid.server.lookup.namespace;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import io.druid.data.SearchableVersionedDataFinder;
import io.druid.jackson.DefaultObjectMapper;
import io.druid.java.util.common.lifecycle.Lifecycle;
import io.druid.query.lookup.namespace.CacheGenerator;
import io.druid.query.lookup.namespace.ExtractionNamespace;
import io.druid.query.lookup.namespace.JdbcExtractionNamespace;
import io.druid.query.lookup.namespace.UriExtractionNamespace;
import io.druid.query.lookup.namespace.UriExtractionNamespaceTest;
import io.druid.segment.loading.LocalFileTimestampVersionFinder;
import io.druid.server.lookup.namespace.cache.CacheScheduler;
import io.druid.server.lookup.namespace.cache.OnHeapNamespaceExtractionCacheManager;
import io.druid.server.metrics.NoopServiceEmitter;
import org.joda.time.Period;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.util.Map;
/**
*
*/
public class NamespacedExtractorModuleTest
{
private static final ObjectMapper mapper = UriExtractionNamespaceTest.registerTypes(new DefaultObjectMapper());
private CacheScheduler scheduler;
private Lifecycle lifecycle;
@Rule
public final TemporaryFolder temporaryFolder = new TemporaryFolder();
@Before
public void setUp() throws Exception
{
final Map<Class<? extends ExtractionNamespace>, CacheGenerator<?>> factoryMap =
ImmutableMap.<Class<? extends ExtractionNamespace>, CacheGenerator<?>>of(
UriExtractionNamespace.class,
new UriCacheGenerator(
ImmutableMap.<String, SearchableVersionedDataFinder>of(
"file",
new LocalFileTimestampVersionFinder()
)
),
JdbcExtractionNamespace.class, new JdbcCacheGenerator()
);
lifecycle = new Lifecycle();
lifecycle.start();
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(
noopServiceEmitter,
factoryMap,
new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter, new NamespaceExtractionConfig())
);
}
@After
public void tearDown()
{
lifecycle.stop();
}
@Test
public void testNewTask() throws Exception
{
final File tmpFile = temporaryFolder.newFile();
try (Writer out = Files.newWriter(tmpFile, StandardCharsets.UTF_8)) {
out.write(mapper.writeValueAsString(ImmutableMap.<String, String>of("foo", "bar")));
}
final UriCacheGenerator factory = new UriCacheGenerator(
ImmutableMap.<String, SearchableVersionedDataFinder>of("file", new LocalFileTimestampVersionFinder())
);
final UriExtractionNamespace namespace = new UriExtractionNamespace(
tmpFile.toURI(),
null, null,
new UriExtractionNamespace.ObjectMapperFlatDataParser(
UriExtractionNamespaceTest.registerTypes(new DefaultObjectMapper())
),
new Period(0),
null
);
CacheScheduler.VersionedCache versionedCache = factory.generateCache(namespace, null, null, scheduler);
Assert.assertNotNull(versionedCache);
Map<String, String> map = versionedCache.getCache();
Assert.assertEquals("bar", map.get("foo"));
Assert.assertEquals(null, map.get("baz"));
}
@Test
public void testListNamespaces() throws Exception
{
final File tmpFile = temporaryFolder.newFile();
try (Writer out = Files.newWriter(tmpFile, StandardCharsets.UTF_8)) {
out.write(mapper.writeValueAsString(ImmutableMap.<String, String>of("foo", "bar")));
}
final UriExtractionNamespace namespace = new UriExtractionNamespace(
tmpFile.toURI(),
null, null,
new UriExtractionNamespace.ObjectMapperFlatDataParser(UriExtractionNamespaceTest.registerTypes(new DefaultObjectMapper())),
new Period(0),
null
);
try (CacheScheduler.Entry entry = scheduler.scheduleAndWait(namespace, 1_000)) {
Assert.assertNotNull(entry);
entry.awaitTotalUpdates(1);
Assert.assertEquals(1, scheduler.getActiveEntries());
}
}
@Test//(timeout = 10_000)
public void testDeleteNamespaces() throws Exception
{
final File tmpFile = temporaryFolder.newFile();
try (Writer out = Files.newWriter(tmpFile, StandardCharsets.UTF_8)) {
out.write(mapper.writeValueAsString(ImmutableMap.<String, String>of("foo", "bar")));
}
final UriExtractionNamespace namespace = new UriExtractionNamespace(
tmpFile.toURI(),
null, null,
new UriExtractionNamespace.ObjectMapperFlatDataParser(
UriExtractionNamespaceTest.registerTypes(new DefaultObjectMapper())
),
new Period(0),
null
);
try (CacheScheduler.Entry entry = scheduler.scheduleAndWait(namespace, 1_000)) {
Assert.assertNotNull(entry);
}
}
@Test
public void testNewUpdate() throws Exception
{
final File tmpFile = temporaryFolder.newFile();
try (Writer out = Files.newWriter(tmpFile, StandardCharsets.UTF_8)) {
out.write(mapper.writeValueAsString(ImmutableMap.<String, String>of("foo", "bar")));
}
final UriExtractionNamespace namespace = new UriExtractionNamespace(
tmpFile.toURI(),
null, null,
new UriExtractionNamespace.ObjectMapperFlatDataParser(
UriExtractionNamespaceTest.registerTypes(new DefaultObjectMapper())
),
new Period(0),
null
);
Assert.assertEquals(0, scheduler.getActiveEntries());
try (CacheScheduler.Entry entry = scheduler.scheduleAndWait(namespace, 10_000)) {
Assert.assertNotNull(entry);
entry.awaitTotalUpdates(1);
Assert.assertEquals(1, scheduler.getActiveEntries());
}
}
}
| taochaoqiang/druid | extensions-core/lookups-cached-global/src/test/java/io/druid/server/lookup/namespace/NamespacedExtractorModuleTest.java | Java | apache-2.0 | 6,814 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/appflow/model/SourceFieldProperties.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace Appflow
{
namespace Model
{
SourceFieldProperties::SourceFieldProperties() :
m_isRetrievable(false),
m_isRetrievableHasBeenSet(false),
m_isQueryable(false),
m_isQueryableHasBeenSet(false)
{
}
SourceFieldProperties::SourceFieldProperties(JsonView jsonValue) :
m_isRetrievable(false),
m_isRetrievableHasBeenSet(false),
m_isQueryable(false),
m_isQueryableHasBeenSet(false)
{
*this = jsonValue;
}
SourceFieldProperties& SourceFieldProperties::operator =(JsonView jsonValue)
{
if(jsonValue.ValueExists("isRetrievable"))
{
m_isRetrievable = jsonValue.GetBool("isRetrievable");
m_isRetrievableHasBeenSet = true;
}
if(jsonValue.ValueExists("isQueryable"))
{
m_isQueryable = jsonValue.GetBool("isQueryable");
m_isQueryableHasBeenSet = true;
}
return *this;
}
JsonValue SourceFieldProperties::Jsonize() const
{
JsonValue payload;
if(m_isRetrievableHasBeenSet)
{
payload.WithBool("isRetrievable", m_isRetrievable);
}
if(m_isQueryableHasBeenSet)
{
payload.WithBool("isQueryable", m_isQueryable);
}
return payload;
}
} // namespace Model
} // namespace Appflow
} // namespace Aws
| awslabs/aws-sdk-cpp | aws-cpp-sdk-appflow/source/model/SourceFieldProperties.cpp | C++ | apache-2.0 | 1,502 |
/*
* C++ sockets on Unix and Windows
* Copyright (C) 2002
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "PracticalSocket.h"
#ifdef WIN32
#include <winsock.h> // For socket(), connect(), send(), and recv()
typedef int socklen_t;
typedef char raw_type; // Type used for raw data on this platform
#else
#include <sys/types.h> // For data types
#include <sys/socket.h> // For socket(), connect(), send(), and recv()
#include <netdb.h> // For gethostbyname()
#include <arpa/inet.h> // For inet_addr()
#include <unistd.h> // For close()
#include <netinet/in.h> // For sockaddr_in
typedef void raw_type; // Type used for raw data on this platform
#endif
#include <errno.h> // For errno
using namespace std;
#ifdef WIN32
static bool initialized = false;
#endif
// SocketException Code
SocketException::SocketException(const string &message, bool inclSysMsg)
throw() : userMessage(message) {
if (inclSysMsg) {
userMessage.append(": ");
char errMsg[256];
strerror_s(errMsg, sizeof(errMsg) / sizeof(char), errno);
userMessage.append(errMsg);
}
}
SocketException::~SocketException() throw() {
}
const char *SocketException::what() const throw() {
return userMessage.c_str();
}
// Function to fill in address structure given an address and port
static void fillAddr(const string &address, unsigned short port,
sockaddr_in &addr) {
memset(&addr, 0, sizeof(addr)); // Zero out address structure
addr.sin_family = AF_INET; // Internet address
hostent *host; // Resolve name
if ((host = gethostbyname(address.c_str())) == NULL) {
// strerror() will not work for gethostbyname() and hstrerror()
// is supposedly obsolete
throw SocketException("Failed to resolve name (gethostbyname())");
}
addr.sin_addr.s_addr = *((unsigned long *)host->h_addr_list[0]);
addr.sin_port = htons(port); // Assign port in network byte order
}
// Socket Code
Socket::Socket(int type, int protocol) throw(SocketException) {
#ifdef WIN32
if (!initialized) {
WORD wVersionRequested;
WSADATA wsaData;
wVersionRequested = MAKEWORD(2, 0); // Request WinSock v2.0
if (WSAStartup(wVersionRequested, &wsaData) != 0) { // Load WinSock DLL
throw SocketException("Unable to load WinSock DLL");
}
initialized = true;
}
#endif
// Make a new socket
if ((sockDesc = static_cast<int>(socket(PF_INET, type, protocol))) < 0) {
throw SocketException("Socket creation failed (socket())", true);
}
}
Socket::Socket(int sockDesc) {
this->sockDesc = sockDesc;
}
Socket::~Socket() {
#ifdef WIN32
::closesocket(sockDesc);
#else
::close(sockDesc);
#endif
sockDesc = -1;
}
string Socket::getLocalAddress() throw(SocketException) {
sockaddr_in addr;
unsigned int addr_len = sizeof(addr);
if (getsockname(sockDesc, (sockaddr *)&addr, (socklen_t *)&addr_len) < 0) {
throw SocketException("Fetch of local address failed (getsockname())", true);
}
return inet_ntoa(addr.sin_addr);
}
unsigned short Socket::getLocalPort() throw(SocketException) {
sockaddr_in addr;
unsigned int addr_len = sizeof(addr);
if (getsockname(sockDesc, (sockaddr *)&addr, (socklen_t *)&addr_len) < 0) {
throw SocketException("Fetch of local port failed (getsockname())", true);
}
return ntohs(addr.sin_port);
}
void Socket::setLocalPort(unsigned short localPort) throw(SocketException) {
// Bind the socket to its port
sockaddr_in localAddr;
memset(&localAddr, 0, sizeof(localAddr));
localAddr.sin_family = AF_INET;
localAddr.sin_addr.s_addr = htonl(INADDR_ANY);
localAddr.sin_port = htons(localPort);
if (bind(sockDesc, (sockaddr *)&localAddr, sizeof(sockaddr_in)) < 0) {
throw SocketException("Set of local port failed (bind())", true);
}
}
void Socket::setLocalAddressAndPort(const string &localAddress,
unsigned short localPort) throw(SocketException) {
// Get the address of the requested host
sockaddr_in localAddr;
fillAddr(localAddress, localPort, localAddr);
if (bind(sockDesc, (sockaddr *)&localAddr, sizeof(sockaddr_in)) < 0) {
throw SocketException("Set of local address and port failed (bind())", true);
}
}
void Socket::cleanUp() throw(SocketException) {
#ifdef WIN32
if (WSACleanup() != 0) {
throw SocketException("WSACleanup() failed");
}
#endif
}
unsigned short Socket::resolveService(const string &service,
const string &protocol) {
struct servent *serv; /* Structure containing service information */
if ((serv = getservbyname(service.c_str(), protocol.c_str())) == NULL)
return (unsigned short)atoi(service.c_str()); /* Service is port number */
else
return ntohs(serv->s_port); /* Found port (network byte order) by name */
}
// CommunicatingSocket Code
CommunicatingSocket::CommunicatingSocket(int type, int protocol)
throw(SocketException) : Socket(type, protocol) {
}
CommunicatingSocket::CommunicatingSocket(int newConnSD) : Socket(newConnSD) {
}
void CommunicatingSocket::connect(const string &foreignAddress,
unsigned short foreignPort) throw(SocketException) {
// Get the address of the requested host
sockaddr_in destAddr;
fillAddr(foreignAddress, foreignPort, destAddr);
// Try to connect to the given port
if (::connect(sockDesc, (sockaddr *)&destAddr, sizeof(destAddr)) < 0) {
throw SocketException("Connect failed (connect())", true);
}
}
void CommunicatingSocket::send(const void *buffer, int bufferLen)
throw(SocketException) {
if (::send(sockDesc, (raw_type *)buffer, bufferLen, 0) < 0) {
throw SocketException("Send failed (send())", true);
}
}
int CommunicatingSocket::recv(void *buffer, int bufferLen)
throw(SocketException) {
int rtn;
if ((rtn = ::recv(sockDesc, (raw_type *)buffer, bufferLen, 0)) < 0) {
throw SocketException("Received failed (recv())", true);
}
return rtn;
}
string CommunicatingSocket::getForeignAddress()
throw(SocketException) {
sockaddr_in addr;
unsigned int addr_len = sizeof(addr);
if (getpeername(sockDesc, (sockaddr *)&addr, (socklen_t *)&addr_len) < 0) {
throw SocketException("Fetch of foreign address failed (getpeername())", true);
}
return inet_ntoa(addr.sin_addr);
}
unsigned short CommunicatingSocket::getForeignPort() throw(SocketException) {
sockaddr_in addr;
unsigned int addr_len = sizeof(addr);
if (getpeername(sockDesc, (sockaddr *)&addr, (socklen_t *)&addr_len) < 0) {
throw SocketException("Fetch of foreign port failed (getpeername())", true);
}
return ntohs(addr.sin_port);
}
// TCPSocket Code
TCPSocket::TCPSocket()
throw(SocketException) : CommunicatingSocket(SOCK_STREAM,
IPPROTO_TCP) {
}
TCPSocket::TCPSocket(const string &foreignAddress, unsigned short foreignPort)
throw(SocketException) : CommunicatingSocket(SOCK_STREAM, IPPROTO_TCP) {
connect(foreignAddress, foreignPort);
}
TCPSocket::TCPSocket(int newConnSD) : CommunicatingSocket(newConnSD) {
}
// TCPServerSocket Code
TCPServerSocket::TCPServerSocket(unsigned short localPort, int queueLen)
throw(SocketException) : Socket(SOCK_STREAM, IPPROTO_TCP) {
setLocalPort(localPort);
setListen(queueLen);
}
TCPServerSocket::TCPServerSocket(const string &localAddress,
unsigned short localPort, int queueLen)
throw(SocketException) : Socket(SOCK_STREAM, IPPROTO_TCP) {
setLocalAddressAndPort(localAddress, localPort);
setListen(queueLen);
}
TCPSocket *TCPServerSocket::accept() throw(SocketException) {
int newConnSD;
if ((newConnSD = static_cast<int>(::accept(sockDesc, NULL, 0))) < 0) {
throw SocketException("Accept failed (accept())", true);
}
return new TCPSocket(newConnSD);
}
void TCPServerSocket::setListen(int queueLen) throw(SocketException) {
if (listen(sockDesc, queueLen) < 0) {
throw SocketException("Set listening socket failed (listen())", true);
}
}
// UDPSocket Code
UDPSocket::UDPSocket() throw(SocketException) : CommunicatingSocket(SOCK_DGRAM,
IPPROTO_UDP) {
setBroadcast();
}
UDPSocket::UDPSocket(unsigned short localPort) throw(SocketException) :
CommunicatingSocket(SOCK_DGRAM, IPPROTO_UDP) {
setLocalPort(localPort);
setBroadcast();
}
UDPSocket::UDPSocket(const string &localAddress, unsigned short localPort)
throw(SocketException) : CommunicatingSocket(SOCK_DGRAM, IPPROTO_UDP) {
setLocalAddressAndPort(localAddress, localPort);
setBroadcast();
}
void UDPSocket::setReceiveTimeout(int timeout_msec) {
setsockopt(sockDesc, SOL_SOCKET, SO_RCVTIMEO,
(char*)&timeout_msec, sizeof(int));
}
void UDPSocket::setBroadcast() {
// If this fails, we'll hear about it when we try to send. This will allow
// system that cannot broadcast to continue if they don't plan to broadcast
int broadcastPermission = 1;
setsockopt(sockDesc, SOL_SOCKET, SO_BROADCAST,
(raw_type *)&broadcastPermission, sizeof(broadcastPermission));
}
void UDPSocket::disconnect() throw(SocketException) {
sockaddr_in nullAddr;
memset(&nullAddr, 0, sizeof(nullAddr));
nullAddr.sin_family = AF_UNSPEC;
// Try to disconnect
if (::connect(sockDesc, (sockaddr *)&nullAddr, sizeof(nullAddr)) < 0) {
#ifdef WIN32
if (errno != WSAEAFNOSUPPORT) {
#else
if (errno != EAFNOSUPPORT) {
#endif
throw SocketException("Disconnect failed (connect())", true);
}
}
}
void UDPSocket::sendTo(const void *buffer, int bufferLen,
const string &foreignAddress, unsigned short foreignPort)
throw(SocketException) {
sockaddr_in destAddr;
fillAddr(foreignAddress, foreignPort, destAddr);
// Write out the whole buffer as a single message.
if (sendto(sockDesc, (raw_type *)buffer, bufferLen, 0,
(sockaddr *)&destAddr, sizeof(destAddr)) != bufferLen) {
throw SocketException("Send failed (sendto())", true);
}
}
int UDPSocket::recvFrom(void *buffer, int bufferLen, string &sourceAddress,
unsigned short &sourcePort) throw(SocketException) {
sockaddr_in clntAddr;
socklen_t addrLen = sizeof(clntAddr);
int rtn;
if ((rtn = recvfrom(sockDesc, (raw_type *)buffer, bufferLen, 0,
(sockaddr *)&clntAddr, (socklen_t *)&addrLen)) < 0) {
throw SocketException("Receive failed (recvfrom())", true);
}
sourceAddress = inet_ntoa(clntAddr.sin_addr);
sourcePort = ntohs(clntAddr.sin_port);
return rtn;
}
void UDPSocket::setMulticastTTL(unsigned char multicastTTL) throw(SocketException) {
if (setsockopt(sockDesc, IPPROTO_IP, IP_MULTICAST_TTL,
(raw_type *)&multicastTTL, sizeof(multicastTTL)) < 0) {
throw SocketException("Multicast TTL set failed (setsockopt())", true);
}
}
void UDPSocket::joinGroup(const string &multicastGroup) throw(SocketException) {
struct ip_mreq multicastRequest;
multicastRequest.imr_multiaddr.s_addr = inet_addr(multicastGroup.c_str());
multicastRequest.imr_interface.s_addr = htonl(INADDR_ANY);
if (setsockopt(sockDesc, IPPROTO_IP, IP_ADD_MEMBERSHIP,
(raw_type *)&multicastRequest,
sizeof(multicastRequest)) < 0) {
throw SocketException("Multicast group join failed (setsockopt())", true);
}
}
void UDPSocket::leaveGroup(const string &multicastGroup) throw(SocketException) {
struct ip_mreq multicastRequest;
multicastRequest.imr_multiaddr.s_addr = inet_addr(multicastGroup.c_str());
multicastRequest.imr_interface.s_addr = htonl(INADDR_ANY);
if (setsockopt(sockDesc, IPPROTO_IP, IP_DROP_MEMBERSHIP,
(raw_type *)&multicastRequest,
sizeof(multicastRequest)) < 0) {
throw SocketException("Multicast group leave failed (setsockopt())", true);
}
} | LastSquirrelIT/MultiOneTimePassword-CredentialProvider | CredentialProvider/PracticalSocket.cpp | C++ | apache-2.0 | 11,961 |