code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9
values | license stringclasses 15
values | size int32 3 1.05M |
|---|---|---|---|---|---|
package main
import (
"fmt"
"io/ioutil"
"log"
"mime"
"net/http"
"path/filepath"
"github.com/gorilla/mux"
)
func Index(w http.ResponseWriter, r *http.Request) {
p, err := loadPage("single")
if err != nil {
panic(err)
}
fmt.Fprintf(w, string(p))
}
func Monitor(w http.ResponseWriter, r *http.Request) {
p, err := loadPage("monitor")
if err != nil {
panic(err)
}
fmt.Fprintf(w, string(p))
}
func Static(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
filename := fmt.Sprintf("./static/%s", string(vars["filename"]))
file, err := ioutil.ReadFile(filename)
if err != nil {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.WriteHeader(http.StatusNotFound)
log.Printf("Error while serving static files: %#v", err)
} else {
ext := filepath.Ext(filename)
w.Header().Set("Content-Type", mime.TypeByExtension(ext))
w.Write(file)
}
}
| Matius87/turbocam | frontend/handlers.go | GO | apache-2.0 | 890 |
package ch.uzh.ddis.katts.monitoring;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.lang.SerializationUtils;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ch.uzh.ddis.katts.utils.Cluster;
import ch.uzh.ddis.katts.utils.EvalInfo;
import com.netflix.curator.framework.CuratorFramework;
/**
* This monitor checks when the query is terminated and informs external processes about query termination by writing
* the termination time of this worker into Zookeeper. The process is as follows: Each worker (each vm) writes its
* system time into a zookeeper node as soon as it has "started" processing. When it has processed all its input it
* writes its curren system time into start time
*
*
* @see Termination
*
* @author Thomas Hunziker
* @author "Lorenz Fischer" <lfischer@ifi.uzh.ch>
*
*/
public class TerminationMonitor {
public static final String CONF_TERMINATION_CHECK_INTERVAL = "katts.terminationCheckInterval";
/** Each storm worker (vm) writes its system time into this path as soon as it has started processing input. */
public static final String HOST_START_TIMES_ZK_PATH = "/katts/start_times";
/**
* Each storm worker (vm) writes its system time into this path as a childnode, as soon as it has finished
* processing all its input. When all workers that have "registered" themselves by setting their start time in
* {@link #HOST_START_TIMES_ZK_PATH} have a corresponding entry in {@link #HOST_END_TIMES_ZK_PATH} the computation
* has finished and the runtime can be computed by taking the maximum end_time minus the minimum start_time.
*/
public static final String HOST_END_TIMES_ZK_PATH = "/katts/end_times";
/** The minimum start time of all hosts will be written into the node at this path. */
public static final String START_TIME_ZK_PATH = "/katts/start_time";
/** The maximum end time of all hosts will be written into the node at this path. */
public static final String END_TIME_ZK_PATH = "/katts/end_time";
// public static final String KATTS_TUPLES_OUTPUTTED_ZK_PATH = "/katts_number_of_tuples_outputed";
/** The singleton instance. Each worker can only have one termiation monitor. */
private static TerminationMonitor instance;
/**
* This method returns the singleton object if it already exists and creates it, in the latter case. This method is
* synchronized and can be called from multiple threads at the same time.
*
* @param stormConf
* the storm configuration object containing the connectin information for Zookeeper.
* @return the monitor singleton for this VM.
*/
public static synchronized TerminationMonitor getInstance(Map<?, ?> stormConf) {
if (instance == null) {
instance = new TerminationMonitor(stormConf);
}
return instance;
}
/**
* A reference to the storm configuration object. The information in this object is necessary to start a connection
* to Zookeeper.
*/
private Map<?, ?> stormConfiguration;
/**
* We use this curator instance to talk to ZK. According to https://github.com/Netflix/curator/wiki/Framework we
* should only have one curator instance and reuse it throughout the VM.
*/
private CuratorFramework curator;
/** A flag to remember if this monitor has "told" the Zookeeper that all its sources have finished processing. */
private boolean endTimeSet = false;
/**
* This list contains a reference to all objects that need to be informed when this worker has finished processing
* input.
*/
private List<TerminationCallback> terminationCallbacks = new ArrayList<TerminationCallback>();
private Logger logger = LoggerFactory.getLogger(TerminationMonitor.class);
/**
* We keep track of all sources that have not yet procesed all of their content using this set. Whenever a new
* source registers itself usding the {@link #registerSource(String)} method, the id of the source gets added to
* this set. Whenever a source tells the monitor that it is done processing using the {@link #terminate(String)} we
* remove this source from this set.
* <p/>
* The worker instance this monitor runs on is thought to have fully finished processing when this set is empty
* after its {@link #terminate(String)} has been called.
*/
private Set<String> unfinishedSources = Collections.synchronizedSet(new HashSet<String>());
/**
* This object is used to guarantee that the start time can only be set once and that it is set only by the first
* call of {@link #start()}.
*/
private boolean startTimeSet = false;
/**
* This string uniquely identifies this worker node inside the storm instance and it is used to monitor the start
* and the stop time of the system.
*/
private String stormWorkerIdentifier;
/**
* Singleton classes need private constructors.
*
* @param stormConf
* the map containing the storm configuration, which is necessary to create the connection to Zookeeper.
*/
private TerminationMonitor(Map<?, ?> stormConf) {
this.stormConfiguration = stormConf;
/*
* TODO: this only works for as long as we have ALWAYS only one storm worker per host.
*/
this.stormWorkerIdentifier = Cluster.getHostIdentifier();
try {
this.curator = Cluster.getCuratorClient(this.stormConfiguration);
} catch (IOException e) {
// we should stop everything right here, since this is not going to end up well
throw new IllegalStateException("Could not create the Zookeeper connection using the Curator.", e);
}
// write evaluation info from storm configuration into zookeeper
try {
EvalInfo.persistInfoToZookeeper(stormConf, this.curator);
} catch (Exception e) {
this.logger.warn("Could not store configuration information to zookeper. "
+ "Probably it already existed. Exception was: " + e.getMessage());
}
// create root node for the end times
try {
if (this.curator.checkExists().forPath(HOST_END_TIMES_ZK_PATH) == null) {
this.curator.create().creatingParentsIfNeeded().forPath(HOST_END_TIMES_ZK_PATH);
}
} catch (Exception e) {
throw new RuntimeException("Can't create node for path '" + HOST_END_TIMES_ZK_PATH + "' because: "
+ e.getMessage(), e);
}
/*
* Register a watcher to the end times path, so we will be informed as soon as all workers have finished
* processing their input.
*/
Watcher terminationWatcher = new Watcher() {
@Override
public void process(WatchedEvent event) {
CuratorFramework ctr;
int startTimeCount;
int endTimeCount;
try {
List<String> starTimePaths;
List<String> endTimePaths;
ctr = TerminationMonitor.this.curator;
starTimePaths = ctr.getChildren().forPath(HOST_START_TIMES_ZK_PATH);
endTimePaths = ctr.getChildren().forPath(HOST_END_TIMES_ZK_PATH);
startTimeCount = starTimePaths.size();
endTimeCount = endTimePaths.size();
if (startTimeCount > 0 && startTimeCount == endTimeCount) {
// First: inform all TerminationCallbacks in local VM
synchronized (TerminationMonitor.this.terminationCallbacks) { // no new callbacks can be added
for (TerminationCallback callback : TerminationMonitor.this.terminationCallbacks) {
callback.workerTerminated();
}
}
// Second: Compute global start and end time
Long minimumStartTime = Long.MAX_VALUE;
Long maximumEndTime = Long.MIN_VALUE;
// find minimum start time
for (String startTimePath : starTimePaths) {
byte[] data = ctr.getData().forPath(HOST_START_TIMES_ZK_PATH + "/" + startTimePath);
Long currentStartTime = (Long) SerializationUtils.deserialize(data);
if (currentStartTime < minimumStartTime) {
minimumStartTime = currentStartTime;
}
}
// find maximum end time
for (String endTimePath : endTimePaths) {
byte[] data = ctr.getData().forPath(HOST_END_TIMES_ZK_PATH + "/" + endTimePath);
Long currentStartTime = (Long) SerializationUtils.deserialize(data);
if (currentStartTime > maximumEndTime) {
maximumEndTime = currentStartTime;
}
}
/*
* write start and end time back into ZK, this call will be tried by all worker nodes. therefore
* we will need to check if the node already exists.
*/
if (ctr.checkExists().forPath(START_TIME_ZK_PATH) == null) {
ctr.create().forPath(START_TIME_ZK_PATH, SerializationUtils.serialize(minimumStartTime));
}
if (ctr.checkExists().forPath(END_TIME_ZK_PATH) == null) {
ctr.create().forPath(END_TIME_ZK_PATH, SerializationUtils.serialize(maximumEndTime));
}
}
ctr.getChildren().usingWatcher(this).forPath(HOST_END_TIMES_ZK_PATH);
} catch (Exception e) {
throw new IllegalStateException("Error while checking for query completion", e);
}
}
};
// add the watcher to the path of the end time
try {
this.curator.getChildren().usingWatcher(terminationWatcher).forPath(HOST_END_TIMES_ZK_PATH);
} catch (Exception e) {
throw new IllegalStateException("Could not add watcher the Zookeeper connection using the Curator.", e);
}
}
/**
* Registers a callback object with this monitor. As soon as the last source has finished processing its input all
* registered callbacks will be informed about the fact that this worker instance has finished processing and will
* now temrinate.
*
* @param callback
* the callback object that should be informed when the processing has finished.
*/
public synchronized void registerTerminationCallback(TerminationCallback callback) {
this.terminationCallbacks.add(callback);
}
/**
* Tells the termination monitor that the source with the given id has processed all its input.
*
* @param sourceId
* the source which has processed all its input.
*/
public synchronized void terminate(String sourceId) {
// as the unfinishedSources set is synchronized, this method does not need to be synchronized.
this.unfinishedSources.remove(sourceId);
if (this.unfinishedSources.isEmpty()) {
logger.info("Source with id " + sourceId + " is done processing. Run complete!");
} else {
logger.info("Source with id " + sourceId + " is done processing. Still waiting for others: "
+ this.unfinishedSources);
}
/*
* Inform interested parties that this worker is done processing data as soon as we have no unfinished sources
* left.
*/
if (!this.endTimeSet && this.unfinishedSources.isEmpty()) {
String endTimePath = HOST_END_TIMES_ZK_PATH + "/" + this.stormWorkerIdentifier;
byte[] serializedTime = SerializationUtils.serialize(Long.valueOf(System.currentTimeMillis()));
try {
this.curator.create().creatingParentsIfNeeded().forPath(endTimePath, serializedTime);
} catch (Exception e) {
throw new RuntimeException("Can't write end time to ZK at path '" + endTimePath + "' because: "
+ e.getMessage(), e);
}
endTimeSet = true;
}
}
/**
* This method will set the start time for this worker in Zookeeper to the current system time to signal that the
* storm worker this VM is running in has started processing input. We only register the first call to this method.
* All following calls have no impact.
*/
public synchronized void start() {
if (this.startTimeSet == false) { // the value has been false before
String startTimePath = HOST_START_TIMES_ZK_PATH + "/" + this.stormWorkerIdentifier;
byte[] serializedTime = SerializationUtils.serialize(Long.valueOf(System.currentTimeMillis()));
try {
this.curator.create().creatingParentsIfNeeded().forPath(startTimePath, serializedTime);
} catch (Exception e) {
throw new RuntimeException("Can't write start time to ZK at path '" + startTimePath + "' because: "
+ e.getMessage(), e);
}
this.startTimeSet = true;
}
}
/**
* Tell this termination monitor that there is a source running in the same VM that we have to wait for. The
* termination monitor will only set its state to "terminated" in Zookeeper, when all sources reported that all of
* their content has been processed by the system.
*
* @param sourceId
* the identifier for the source, we should wait for termination on.
*/
public void registerSource(String sourceId) {
this.unfinishedSources.add(sourceId);
}
/**
* Classes that implement this interface can register themselves with the {@link TerminationMonitor} using the
* {@link TerminationMonitor#registerTerminationCallback(TerminationCallback)} method. When this monitor has has
* been informed by all registered sources that thei have exhausted their input using the
* {@link TerminationMonitor#terminate(String)} method, all registered {@link TerminationCallback} objects will have
* their {@link #workerTerminated()} method called before this monitor informs the other workers by writing the
* finished flag into zookeeper.
*
* The {@link #workerTerminated()} method can be used to do cleanup work such as writing performance statistics to
* the filesystem or into zookeeper.
*
* @author "Lorenz Fischer" <lfischer@ifi.uzh.ch>
*
*/
public static interface TerminationCallback {
/**
* This method will be called when all sources that have registered themselves with the
* {@link TerminationMonitor} have signalled that they have finished processing input.
*/
public void workerTerminated();
}
}
| uzh/katts | src/main/java/ch/uzh/ddis/katts/monitoring/TerminationMonitor.java | Java | apache-2.0 | 13,693 |
/*
* Copyright © 2018 Mercateo AG (http://www.mercateo.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.factcast.store.pgsql.rds;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
@Configuration
public class RdsConfiguration {
@Bean
public RdsDataSourceFactoryBeanPostProcessor rdsDataSourceFactorBeanPostProcessor(
Environment env) {
return new RdsDataSourceFactoryBeanPostProcessor(env);
}
}
| uweschaefer/factcast | factcast-store-pgsql-rds/src/main/java/org/factcast/store/pgsql/rds/RdsConfiguration.java | Java | apache-2.0 | 1,073 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdint.h>
#include <ostream>
#include <set>
#include <string>
#include <vector>
#include <glog/logging.h>
#include <google/protobuf/repeated_field.h>
#include <mesos/roles.hpp>
#include <mesos/v1/mesos.hpp>
#include <mesos/v1/resources.hpp>
#include <mesos/v1/values.hpp>
#include <stout/foreach.hpp>
#include <stout/hashmap.hpp>
#include <stout/json.hpp>
#include <stout/lambda.hpp>
#include <stout/protobuf.hpp>
#include <stout/strings.hpp>
#include <stout/unreachable.hpp>
#include "common/resource_quantities.hpp"
#include "common/resources_utils.hpp"
using std::make_shared;
using std::map;
using std::ostream;
using std::pair;
using std::set;
using std::shared_ptr;
using std::string;
using std::vector;
using google::protobuf::RepeatedPtrField;
using mesos::internal::ResourceQuantities;
namespace mesos {
namespace v1 {
/////////////////////////////////////////////////
// Helper functions.
/////////////////////////////////////////////////
bool operator==(
const Resource::AllocationInfo& left,
const Resource::AllocationInfo& right)
{
if (left.has_role() != right.has_role()) {
return false;
}
if (left.has_role() && left.role() != right.role()) {
return false;
}
return true;
}
bool operator!=(
const Resource::AllocationInfo& left,
const Resource::AllocationInfo& right)
{
return !(left == right);
}
bool operator==(
const Resource::ReservationInfo& left,
const Resource::ReservationInfo& right)
{
if (left.type() != right.type()) {
return false;
}
if (left.role() != right.role()) {
return false;
}
if (left.has_principal() != right.has_principal()) {
return false;
}
if (left.has_principal() && left.principal() != right.principal()) {
return false;
}
if (left.has_labels() != right.has_labels()) {
return false;
}
if (left.has_labels() && left.labels() != right.labels()) {
return false;
}
return true;
}
bool operator!=(
const Resource::ReservationInfo& left,
const Resource::ReservationInfo& right)
{
return !(left == right);
}
bool operator==(
const Resource::DiskInfo::Source::Path& left,
const Resource::DiskInfo::Source::Path& right)
{
if (left.has_root() != right.has_root()) {
return false;
}
if (left.has_root() && left.root() != right.root()) {
return false;
}
return true;
}
bool operator==(
const Resource::DiskInfo::Source::Mount& left,
const Resource::DiskInfo::Source::Mount& right)
{
if (left.has_root() != right.has_root()) {
return false;
}
if (left.has_root() && left.root() != right.root()) {
return false;
}
return left.root() == right.root();
}
bool operator!=(
const Resource::DiskInfo::Source::Path& left,
const Resource::DiskInfo::Source::Path& right)
{
return !(left == right);
}
bool operator!=(
const Resource::DiskInfo::Source::Mount& left,
const Resource::DiskInfo::Source::Mount& right)
{
return !(left == right);
}
bool operator==(
const Resource::DiskInfo::Source& left,
const Resource::DiskInfo::Source& right)
{
if (left.type() != right.type()) {
return false;
}
if (left.has_path() != right.has_path()) {
return false;
}
if (left.has_path() && left.path() != right.path()) {
return false;
}
if (left.has_mount() != right.has_mount()) {
return false;
}
if (left.has_mount() && left.mount() != right.mount()) {
return false;
}
if (left.has_vendor() != right.has_vendor()) {
return false;
}
if (left.has_vendor() && left.vendor() != right.vendor()) {
return false;
}
if (left.has_id() != right.has_id()) {
return false;
}
if (left.has_id() && left.id() != right.id()) {
return false;
}
if (left.has_metadata() != right.has_metadata()) {
return false;
}
if (left.has_metadata() && left.metadata() != right.metadata()) {
return false;
}
if (left.has_profile() != right.has_profile()) {
return false;
}
if (left.has_profile() && left.profile() != right.profile()) {
return false;
}
return true;
}
bool operator!=(
const Resource::DiskInfo::Source& left,
const Resource::DiskInfo::Source& right)
{
return !(left == right);
}
bool operator==(const Resource::DiskInfo& left, const Resource::DiskInfo& right)
{
if (left.has_source() != right.has_source()) {
return false;
}
if (left.has_source() && left.source() != right.source()) {
return false;
}
// NOTE: We ignore 'volume' inside DiskInfo when doing comparison
// because it describes how this resource will be used which has
// nothing to do with the Resource object itself. A framework can
// use this resource and specify different 'volume' every time it
// uses it.
if (left.has_persistence() != right.has_persistence()) {
return false;
}
if (left.has_persistence() &&
left.persistence().id() != right.persistence().id()) {
return false;
}
return true;
}
bool operator!=(const Resource::DiskInfo& left, const Resource::DiskInfo& right)
{
return !(left == right);
}
bool operator==(const Resource& left, const Resource& right)
{
if (left.name() != right.name() || left.type() != right.type()) {
return false;
}
// Check AllocationInfo.
if (left.has_allocation_info() != right.has_allocation_info()) {
return false;
}
if (left.has_allocation_info() &&
left.allocation_info() != right.allocation_info()) {
return false;
}
// Check the stack of ReservationInfo.
if (left.reservations_size() != right.reservations_size()) {
return false;
}
for (int i = 0; i < left.reservations_size(); ++i) {
if (left.reservations(i) != right.reservations(i)) {
return false;
}
}
// Check DiskInfo.
if (left.has_disk() != right.has_disk()) {
return false;
}
if (left.has_disk() && left.disk() != right.disk()) {
return false;
}
// Check RevocableInfo.
if (left.has_revocable() != right.has_revocable()) {
return false;
}
// Check ResourceProviderID.
if (left.has_provider_id() != right.has_provider_id()) {
return false;
}
if (left.has_provider_id() && left.provider_id() != right.provider_id()) {
return false;
}
// Check SharedInfo.
if (left.has_shared() != right.has_shared()) {
return false;
}
if (left.type() == Value::SCALAR) {
return left.scalar() == right.scalar();
} else if (left.type() == Value::RANGES) {
return left.ranges() == right.ranges();
} else if (left.type() == Value::SET) {
return left.set() == right.set();
} else {
return false;
}
}
bool operator!=(const Resource& left, const Resource& right)
{
return !(left == right);
}
namespace internal {
// Tests if we can add two Resource objects together resulting in one
// valid Resource object. For example, two Resource objects with
// different name, type or role are not addable.
static bool addable(const Resource& left, const Resource& right)
{
// Check SharedInfo.
if (left.has_shared() != right.has_shared()) {
return false;
}
// For shared resources, they can be added only if left == right.
if (left.has_shared()) {
return left == right;
}
// Now, we verify if the two non-shared resources can be added.
if (left.name() != right.name() || left.type() != right.type()) {
return false;
}
// Check AllocationInfo.
if (left.has_allocation_info() != right.has_allocation_info()) {
return false;
}
if (left.has_allocation_info() &&
left.allocation_info() != right.allocation_info()) {
return false;
}
// Check the stack of ReservationInfo.
if (left.reservations_size() != right.reservations_size()) {
return false;
}
for (int i = 0; i < left.reservations_size(); ++i) {
if (left.reservations(i) != right.reservations(i)) {
return false;
}
}
// Check DiskInfo.
if (left.has_disk() != right.has_disk()) { return false; }
if (left.has_disk()) {
if (left.disk() != right.disk()) { return false; }
if (left.disk().has_source()) {
switch (left.disk().source().type()) {
case Resource::DiskInfo::Source::PATH: {
// Two PATH resources can be added if their disks are identical.
break;
}
case Resource::DiskInfo::Source::BLOCK:
case Resource::DiskInfo::Source::MOUNT: {
// Two resources that represent exclusive 'MOUNT' or 'BLOCK' disks
// cannot be added together; this would defeat the exclusivity.
return false;
}
case Resource::DiskInfo::Source::RAW: {
// We can only add resources representing 'RAW' disks if
// they have no identity or are identical.
if (left.disk().source().has_id()) {
return false;
}
break;
}
case Resource::DiskInfo::Source::UNKNOWN:
UNREACHABLE();
}
}
// TODO(jieyu): Even if two Resource objects with DiskInfo have
// the same persistence ID, they cannot be added together if they
// are non-shared. In fact, this shouldn't happen if we do not
// add resources from different namespaces (e.g., across slave).
// Consider adding a warning.
if (left.disk().has_persistence()) {
return false;
}
}
// Check RevocableInfo.
if (left.has_revocable() != right.has_revocable()) {
return false;
}
// Check ResourceProviderID.
if (left.has_provider_id() != right.has_provider_id()) {
return false;
}
if (left.has_provider_id() && left.provider_id() != right.provider_id()) {
return false;
}
return true;
}
// Tests if we can subtract "right" from "left" resulting in one valid
// Resource object. For example, two Resource objects with different
// name, type or role are not subtractable.
// NOTE: Set subtraction is always well defined, it does not require
// 'right' to be contained within 'left'. For example, assuming that
// "left = {1, 2}" and "right = {2, 3}", "left" and "right" are
// subtractable because "left - right = {1}". However, "left" does not
// contain "right".
static bool subtractable(const Resource& left, const Resource& right)
{
// Check SharedInfo.
if (left.has_shared() != right.has_shared()) {
return false;
}
// For shared resources, they can be subtracted only if left == right.
if (left.has_shared()) {
return left == right;
}
// Now, we verify if the two non-shared resources can be subtracted.
if (left.name() != right.name() || left.type() != right.type()) {
return false;
}
// Check AllocationInfo.
if (left.has_allocation_info() != right.has_allocation_info()) {
return false;
}
if (left.has_allocation_info() &&
left.allocation_info() != right.allocation_info()) {
return false;
}
// Check the stack of ReservationInfo.
if (left.reservations_size() != right.reservations_size()) {
return false;
}
for (int i = 0; i < left.reservations_size(); ++i) {
if (left.reservations(i) != right.reservations(i)) {
return false;
}
}
// Check DiskInfo.
if (left.has_disk() != right.has_disk()) { return false; }
if (left.has_disk()) {
if (left.disk() != right.disk()) { return false; }
if (left.disk().has_source()) {
switch (left.disk().source().type()) {
case Resource::DiskInfo::Source::PATH: {
// Two PATH resources can be subtracted if their disks are identical.
break;
}
case Resource::DiskInfo::Source::BLOCK:
case Resource::DiskInfo::Source::MOUNT: {
// Two resources that represent exclusive 'MOUNT' or 'BLOCK' disks
// cannot be subtracted from each other if they are not the exact same
// mount; this would defeat the exclusivity.
if (left != right) {
return false;
}
break;
}
case Resource::DiskInfo::Source::RAW: {
// We can only subtract resources representing 'RAW' disks
// if they have no identity.
if (left.disk().source().has_id() && left != right) {
return false;
}
break;
}
case Resource::DiskInfo::Source::UNKNOWN:
UNREACHABLE();
}
}
// NOTE: For Resource objects that have DiskInfo, we can only subtract
// if they are equal.
if (left.disk().has_persistence() && left != right) {
return false;
}
}
// Check RevocableInfo.
if (left.has_revocable() != right.has_revocable()) {
return false;
}
// Check ResourceProviderID.
if (left.has_provider_id() != right.has_provider_id()) {
return false;
}
if (left.has_provider_id() && left.provider_id() != right.provider_id()) {
return false;
}
return true;
}
// Tests if "right" is contained in "left".
static bool contains(const Resource& left, const Resource& right)
{
// NOTE: This is a necessary condition for 'contains'.
// 'subtractable' will verify name, role, type, ReservationInfo,
// DiskInfo, SharedInfo, RevocableInfo, and ResourceProviderID
// compatibility.
if (!subtractable(left, right)) {
return false;
}
if (left.type() == Value::SCALAR) {
return right.scalar() <= left.scalar();
} else if (left.type() == Value::RANGES) {
return right.ranges() <= left.ranges();
} else if (left.type() == Value::SET) {
return right.set() <= left.set();
} else {
return false;
}
}
/**
* Checks that a Resources object is valid for command line specification.
*
* Checks that the given Resources object is appropriate for specification at
* the command line. Resources are appropriate if they do not have two resources
* with the same name but different types, and do not attempt to specify
* persistent volumes, revocable resources, or dynamic reservations.
*
* @param resources The input Resources.
* @return An `Option` containing None() if validation was successful, or an
* Error otherwise.
*/
static Option<Error> validateCommandLineResources(const Resources& resources)
{
hashmap<string, Value::Type> nameTypes;
foreach (const Resource& resource, resources) {
// These fields should only be provided programmatically,
// not at the command line.
if (Resources::isPersistentVolume(resource)) {
return Error(
"Persistent volumes cannot be specified at the command line");
} else if (Resources::isRevocable(resource)) {
return Error(
"Revocable resources cannot be specified at the command line; do"
" not include a 'revocable' key in the resources JSON");
} else if (Resources::isDynamicallyReserved(resource)) {
return Error(
"Dynamic reservations cannot be specified at the command line; do"
" not include a reservation with DYNAMIC type in the resources JSON");
}
if (nameTypes.contains(resource.name()) &&
nameTypes[resource.name()] != resource.type()) {
return Error(
"Resources with the same name ('" + resource.name() + "') but"
" different types are not allowed");
} else if (!nameTypes.contains(resource.name())) {
nameTypes[resource.name()] = resource.type();
}
}
return None();
}
} // namespace internal {
Resource& operator+=(Resource& left, const Resource& right)
{
if (left.type() == Value::SCALAR) {
*left.mutable_scalar() += right.scalar();
} else if (left.type() == Value::RANGES) {
*left.mutable_ranges() += right.ranges();
} else if (left.type() == Value::SET) {
*left.mutable_set() += right.set();
}
return left;
}
Resource operator+(const Resource& left, const Resource& right)
{
Resource result = left;
result += right;
return result;
}
Resource& operator-=(Resource& left, const Resource& right)
{
if (left.type() == Value::SCALAR) {
*left.mutable_scalar() -= right.scalar();
} else if (left.type() == Value::RANGES) {
*left.mutable_ranges() -= right.ranges();
} else if (left.type() == Value::SET) {
*left.mutable_set() -= right.set();
}
return left;
}
Resource operator-(const Resource& left, const Resource& right)
{
Resource result = left;
result -= right;
return result;
}
/////////////////////////////////////////////////
// Public static functions.
/////////////////////////////////////////////////
Try<Resource> Resources::parse(
const string& name,
const string& value,
const string& role)
{
Try<Value> result = internal::values::parse(value);
if (result.isError()) {
return Error(
"Failed to parse resource " + name +
" value " + value + " error " + result.error());
}
Resource resource;
Value _value = result.get();
resource.set_name(name);
if (role != "*") {
Resource::ReservationInfo* reservation = resource.add_reservations();
reservation->set_type(Resource::ReservationInfo::STATIC);
reservation->set_role(role);
}
if (_value.type() == Value::SCALAR) {
resource.set_type(Value::SCALAR);
resource.mutable_scalar()->CopyFrom(_value.scalar());
} else if (_value.type() == Value::RANGES) {
resource.set_type(Value::RANGES);
resource.mutable_ranges()->CopyFrom(_value.ranges());
} else if (_value.type() == Value::SET) {
resource.set_type(Value::SET);
resource.mutable_set()->CopyFrom(_value.set());
} else {
return Error(
"Bad type for resource " + name + " value " + value +
" type " + Value::Type_Name(_value.type()));
}
return resource;
}
// TODO(wickman) It is possible for Resources::ostream<< to produce
// unparseable resources, i.e. those with
// ReservationInfo/DiskInfo/RevocableInfo.
Try<Resources> Resources::parse(
const string& text,
const string& defaultRole)
{
Try<vector<Resource>> resources = Resources::fromString(text, defaultRole);
if (resources.isError()) {
return Error(resources.error());
}
Resources result;
// Validate the Resource objects and convert them
// to the "post-reservation-refinement" format.
foreach (Resource& resource, CHECK_NOTERROR(resources)) {
// If invalid, propgate error instead of skipping the resource.
Option<Error> error = Resources::validate(resource);
if (error.isSome()) {
return error.get();
}
// Convert the resource to the "post-reservation-refinement" format.
if (resource.reservations_size() > 0) {
// In this case, we're either already in
// the "post-reservation-refinement" format,
// or we're in the "endpoint" format.
// We clear out the "pre-reservation-refinement" fields
// in case the resources are in the "endpoint" format.
resource.clear_role();
resource.clear_reservation();
} else if (resource.role() == "*") {
CHECK(!resource.has_reservation()) << resource;
// Unreserved resources.
resource.clear_role();
} else {
// Resource with a single reservation.
Resource::ReservationInfo* reservation = resource.add_reservations();
// Check the `Resource.reservation` to determine whether
// we have a static or dynamic reservation.
if (!resource.has_reservation()) {
reservation->set_type(Resource::ReservationInfo::STATIC);
} else {
reservation->CopyFrom(resource.reservation());
resource.clear_reservation();
reservation->set_type(Resource::ReservationInfo::DYNAMIC);
}
reservation->set_role(resource.role());
resource.clear_role();
}
// Add the validated and converted resource to the result.
result.add(std::move(resource));
}
// TODO(jmlvanre): Move this up into `Containerizer::resources`.
Option<Error> error = internal::validateCommandLineResources(result);
if (error.isSome()) {
return error.get();
}
return result;
}
Try<vector<Resource>> Resources::fromJSON(
const JSON::Array& resourcesJSON,
const string& defaultRole)
{
// Convert the JSON Array into a protobuf message and use
// that to construct a vector of Resource object.
Try<RepeatedPtrField<Resource>> resourcesProtobuf =
protobuf::parse<RepeatedPtrField<Resource>>(resourcesJSON);
if (resourcesProtobuf.isError()) {
return Error(
"Some JSON resources were not formatted properly: " +
resourcesProtobuf.error());
}
vector<Resource> result;
foreach (Resource& resource, resourcesProtobuf.get()) {
// Set the default role if none was specified.
//
// NOTE: We rely on the fact that the result of this function is
// converted to the "post-reservation-refinement" format.
if (!resource.has_role() && resource.reservations_size() == 0) {
resource.set_role(defaultRole);
}
// We add the Resource object even if it is empty or invalid.
result.push_back(resource);
}
return result;
}
Try<vector<Resource>> Resources::fromSimpleString(
const string& text,
const string& defaultRole)
{
vector<Resource> resources;
foreach (const string& token, strings::tokenize(text, ";")) {
// TODO(anindya_sinha): Allow text based representation of resources
// to specify PATH or MOUNT type disks along with its root.
vector<string> pair = strings::tokenize(token, ":");
if (pair.size() != 2) {
return Error(
"Bad value for resources, missing or extra ':' in " + token);
}
string name;
string role;
size_t openParen = pair[0].find('(');
if (openParen == string::npos) {
name = strings::trim(pair[0]);
role = defaultRole;
} else {
size_t closeParen = pair[0].find(')');
if (closeParen == string::npos || closeParen < openParen) {
return Error(
"Bad value for resources, mismatched parentheses in " + token);
}
name = strings::trim(pair[0].substr(0, openParen));
role = strings::trim(pair[0].substr(
openParen + 1,
closeParen - openParen - 1));
}
Try<Resource> resource = Resources::parse(name, pair[1], role);
if (resource.isError()) {
return Error(resource.error());
}
// We add the Resource object even if it is empty or invalid.
resources.push_back(resource.get());
}
return resources;
}
Try<vector<Resource>> Resources::fromString(
const string& text,
const string& defaultRole)
{
// Try to parse as a JSON Array. Otherwise, parse as a text string.
Try<JSON::Array> json = JSON::parse<JSON::Array>(text);
return json.isSome() ?
Resources::fromJSON(json.get(), defaultRole) :
Resources::fromSimpleString(text, defaultRole);
}
Option<Error> Resources::validate(const Resource& resource)
{
if (resource.name().empty()) {
return Error("Empty resource name");
}
if (!Value::Type_IsValid(resource.type())) {
return Error("Invalid resource type");
}
if (resource.type() == Value::SCALAR) {
if (!resource.has_scalar() ||
resource.has_ranges() ||
resource.has_set()) {
return Error("Invalid scalar resource");
}
if (resource.scalar().value() < 0) {
return Error("Invalid scalar resource: value < 0");
}
} else if (resource.type() == Value::RANGES) {
if (resource.has_scalar() ||
!resource.has_ranges() ||
resource.has_set()) {
return Error("Invalid ranges resource");
}
for (int i = 0; i < resource.ranges().range_size(); i++) {
const Value::Range& range = resource.ranges().range(i);
// Ensure the range make sense (isn't inverted).
if (range.begin() > range.end()) {
return Error("Invalid ranges resource: begin > end");
}
// Ensure ranges don't overlap (but not necessarily coalesced).
for (int j = i + 1; j < resource.ranges().range_size(); j++) {
if (range.begin() <= resource.ranges().range(j).begin() &&
resource.ranges().range(j).begin() <= range.end()) {
return Error("Invalid ranges resource: overlapping ranges");
}
}
}
} else if (resource.type() == Value::SET) {
if (resource.has_scalar() ||
resource.has_ranges() ||
!resource.has_set()) {
return Error("Invalid set resource");
}
for (int i = 0; i < resource.set().item_size(); i++) {
const string& item = resource.set().item(i);
// Ensure no duplicates.
for (int j = i + 1; j < resource.set().item_size(); j++) {
if (item == resource.set().item(j)) {
return Error("Invalid set resource: duplicated elements");
}
}
}
} else {
// Resource doesn't support TEXT or other value types.
return Error("Unsupported resource type");
}
// Checks for 'disk' resource.
if (resource.has_disk()) {
if (resource.name() != "disk") {
return Error(
"DiskInfo should not be set for " + resource.name() + " resource");
}
const Resource::DiskInfo& disk = resource.disk();
if (disk.has_source()) {
const Resource::DiskInfo::Source& source = disk.source();
switch (source.type()) {
case Resource::DiskInfo::Source::PATH:
case Resource::DiskInfo::Source::MOUNT:
// `PATH` and `MOUNT` contain only `optional` members.
break;
case Resource::DiskInfo::Source::BLOCK:
case Resource::DiskInfo::Source::RAW:
if (source.has_mount()) {
return Error(
"Mount should not be set for " +
Resource::DiskInfo::Source::Type_Name(source.type()) +
" disk source");
}
if (source.has_path()) {
return Error(
"Path should not be set for " +
Resource::DiskInfo::Source::Type_Name(source.type()) +
" disk source");
}
break;
case Resource::DiskInfo::Source::UNKNOWN:
return Error(
"Unsupported 'DiskInfo.Source.Type' in "
"'" + stringify(source) + "'");
}
}
}
// Validate the reservation format.
if (resource.reservations_size() == 0) {
// Check for the "pre-reservation-refinement" format.
// Check role name.
Option<Error> error = roles::validate(resource.role());
if (error.isSome()) {
return error;
}
// Check reservation.
if (resource.has_reservation()) {
if (resource.reservation().has_type()) {
return Error(
"'Resource.ReservationInfo.type' must not be set for"
" the 'Resource.reservation' field");
}
if (resource.reservation().has_role()) {
return Error(
"'Resource.ReservationInfo.role' must not be set for"
" the 'Resource.reservation' field");
}
// Checks for the invalid state of (role, reservation) pair.
if (resource.role() == "*") {
return Error(
"Invalid reservation: role \"*\" cannot be dynamically reserved");
}
}
} else {
// Check for the "post-reservation-refinement" format.
CHECK_GT(resource.reservations_size(), 0);
// Validate all of the roles in `reservations`.
foreach (
const Resource::ReservationInfo& reservation, resource.reservations()) {
if (!reservation.has_type()) {
return Error(
"Invalid reservation: 'Resource.ReservationInfo.type'"
" field must be set.");
}
if (!reservation.has_role()) {
return Error(
"Invalid reservation: 'Resource.ReservationInfo.role'"
" field must be set.");
}
Option<Error> error = roles::validate(reservation.role());
if (error.isSome()) {
return error;
}
if (reservation.role() == "*") {
return Error("Invalid reservation: role \"*\" cannot be reserved");
}
}
// Check that the reservations are correctly refined.
string ancestor = resource.reservations(0).role();
for (int i = 1; i < resource.reservations_size(); ++i) {
const Resource::ReservationInfo& reservation = resource.reservations(i);
if (reservation.type() == Resource::ReservationInfo::STATIC) {
return Error(
"Invalid refined reservation: A refined reservation"
" cannot be STATIC");
}
const string& descendant = reservation.role();
if (!roles::isStrictSubroleOf(descendant, ancestor)) {
return Error(
"Invalid refined reservation: role '" + descendant + "'" +
" is not a refinement of '" + ancestor + "'");
}
ancestor = descendant;
}
// Additionally, we allow the "pre-reservation-refinement" format to be set
// as long as there is only one reservation, and the `Resource.role` and
// `Resource.reservation` fields are consistent with the reservation.
if (resource.reservations_size() == 1) {
const Resource::ReservationInfo& reservation = resource.reservations(0);
if (resource.has_role() && resource.role() != reservation.role()) {
return Error(
"Invalid resource format: 'Resource.role' field with"
" '" + resource.role() + "' does not match the role"
" '" + reservation.role() + "' in 'Resource.reservations'");
}
switch (reservation.type()) {
case Resource::ReservationInfo::STATIC: {
if (resource.has_reservation()) {
return Error(
"Invalid resource format: 'Resource.reservation' must not be"
" set if the single reservation in 'Resource.reservations' is"
" STATIC");
}
break;
}
case Resource::ReservationInfo::DYNAMIC: {
if (resource.has_role() != resource.has_reservation()) {
return Error(
"Invalid resource format: 'Resource.role' and"
" 'Resource.reservation' must either be both set or both not"
" set if the single reservation in 'Resource.reservations' is"
" DYNAMIC");
}
if (resource.has_reservation() &&
resource.reservation().principal() != reservation.principal()) {
return Error(
"Invalid resource format: 'Resource.reservation.principal'"
" field with '" + resource.reservation().principal() + "' does"
" not match the principal '" + reservation.principal() + "'"
" in 'Resource.reservations'");
}
if (resource.has_reservation() &&
resource.reservation().labels() != reservation.labels()) {
return Error(
"Invalid resource format: 'Resource.reservation.labels' field"
" with '" + stringify(resource.reservation().labels()) + "'"
" does not match the labels"
" '" + stringify(reservation.labels()) + "'"
" in 'Resource.reservations'");
}
break;
}
case Resource::ReservationInfo::UNKNOWN: {
return Error("Unsupported 'Resource.ReservationInfo.Type'");
}
}
} else {
CHECK_GT(resource.reservations_size(), 1);
if (resource.has_role()) {
return Error(
"Invalid resource format: 'Resource.role' must not be set if"
" there is more than one reservation in 'Resource.reservations'");
}
if (resource.has_reservation()) {
return Error(
"Invalid resource format: 'Resource.reservation' must not be set if"
" there is more than one reservation in 'Resource.reservations'");
}
}
}
// Check that shareability is enabled for supported resource types.
// For now, it is for persistent volumes only.
// NOTE: We need to modify this once we extend shareability to other
// resource types.
if (resource.has_shared()) {
if (resource.name() != "disk") {
return Error("Resource " + resource.name() + " cannot be shared");
}
if (!resource.has_disk() || !resource.disk().has_persistence()) {
return Error("Only persistent volumes can be shared");
}
}
return None();
}
Option<Error> Resources::validate(const RepeatedPtrField<Resource>& resources)
{
foreach (const Resource& resource, resources) {
Option<Error> error = validate(resource);
if (error.isSome()) {
return Error(
"Resource '" + stringify(resource) +
"' is invalid: " + error->message);
}
}
return None();
}
bool Resources::isEmpty(const Resource& resource)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
if (resource.type() == Value::SCALAR) {
Value::Scalar zero;
zero.set_value(0);
return resource.scalar() == zero;
} else if (resource.type() == Value::RANGES) {
return resource.ranges().range_size() == 0;
} else if (resource.type() == Value::SET) {
return resource.set().item_size() == 0;
} else {
return false;
}
}
bool Resources::isPersistentVolume(const Resource& resource)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
return resource.has_disk() && resource.disk().has_persistence();
}
bool Resources::isDisk(
const Resource& resource,
const Resource::DiskInfo::Source::Type& type)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
return resource.has_disk() &&
resource.disk().has_source() &&
resource.disk().source().type() == type;
}
bool Resources::isReserved(
const Resource& resource,
const Option<string>& role)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
return !isUnreserved(resource) &&
(role.isNone() || role.get() == reservationRole(resource));
}
bool Resources::isAllocatableTo(
const Resource& resource,
const std::string& role)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
return isUnreserved(resource) ||
role == reservationRole(resource) ||
roles::isStrictSubroleOf(role, reservationRole(resource));
}
bool Resources::isUnreserved(const Resource& resource)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
return resource.reservations_size() == 0;
}
bool Resources::isDynamicallyReserved(const Resource& resource)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
return isReserved(resource) && (resource.reservations().rbegin()->type() ==
Resource::ReservationInfo::DYNAMIC);
}
bool Resources::isRevocable(const Resource& resource)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
return resource.has_revocable();
}
bool Resources::isShared(const Resource& resource)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
return resource.has_shared();
}
bool Resources::hasRefinedReservations(const Resource& resource)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
return resource.reservations_size() > 1;
}
bool Resources::hasResourceProvider(const Resource& resource)
{
CHECK(!resource.has_role()) << resource;
CHECK(!resource.has_reservation()) << resource;
return resource.has_provider_id();
}
const string& Resources::reservationRole(const Resource& resource)
{
CHECK_GT(resource.reservations_size(), 0);
return resource.reservations().rbegin()->role();
}
bool Resources::shrink(Resource* resource, const Value::Scalar& target)
{
if (resource->scalar() <= target) {
return true; // Already within target.
}
Resource copy = *resource;
copy.mutable_scalar()->CopyFrom(target);
// Some resources (e.g. MOUNT disk) are indivisible. We use
// a containement check to verify this. Specifically, if a
// contains a smaller version of itself, then it can safely
// be chopped into a smaller amount.
if (Resources(*resource).contains(copy)) {
resource->CopyFrom(copy);
return true;
}
return false;
}
/////////////////////////////////////////////////
// Public member functions.
/////////////////////////////////////////////////
Option<Error> Resources::Resource_::validate() const
{
if (isShared() && sharedCount.get() < 0) {
return Error("Invalid shared resource: count < 0");
}
return Resources::validate(resource);
}
bool Resources::Resource_::isEmpty() const
{
if (isShared() && sharedCount.get() == 0) {
return true;
}
return Resources::isEmpty(resource);
}
bool Resources::Resource_::contains(const Resource_& that) const
{
// Both Resource_ objects should have the same sharedness.
if (isShared() != that.isShared()) {
return false;
}
// Assuming the wrapped Resource objects are equal, the 'contains'
// relationship is determined by the relationship of the counters
// for shared resources.
if (isShared()) {
return sharedCount.get() >= that.sharedCount.get() &&
resource == that.resource;
}
// For non-shared resources just compare the protobufs.
return internal::contains(resource, that.resource);
}
Resources::Resource_& Resources::Resource_::operator+=(const Resource_& that)
{
// This function assumes that the 'resource' fields are addable.
if (!isShared()) {
resource += that.resource;
} else {
// 'addable' makes sure both 'resource' fields are shared and
// equal, so we just need to sum up the counters here.
CHECK_SOME(sharedCount);
CHECK_SOME(that.sharedCount);
sharedCount = sharedCount.get() + that.sharedCount.get();
}
return *this;
}
Resources::Resource_& Resources::Resource_::operator-=(const Resource_& that)
{
// This function assumes that the 'resource' fields are subtractable.
if (!isShared()) {
resource -= that.resource;
} else {
// 'subtractable' makes sure both 'resource' fields are shared and
// equal, so we just need to subtract the counters here.
CHECK_SOME(sharedCount);
CHECK_SOME(that.sharedCount);
sharedCount = sharedCount.get() - that.sharedCount.get();
}
return *this;
}
bool Resources::Resource_::operator==(const Resource_& that) const
{
// Both Resource_ objects should have the same sharedness.
if (isShared() != that.isShared()) {
return false;
}
// For shared resources to be equal, the shared counts need to match.
if (isShared() && (sharedCount.get() != that.sharedCount.get())) {
return false;
}
return resource == that.resource;
}
bool Resources::Resource_::operator!=(const Resource_& that) const
{
return !(*this == that);
}
Resources::Resources(const Resource& resource)
{
// NOTE: Invalid and zero Resource object will be ignored.
*this += resource;
}
Resources::Resources(Resource&& resource)
{
// NOTE: Invalid and zero Resource object will be ignored.
*this += std::move(resource);
}
Resources::Resources(const vector<Resource>& _resources)
{
foreach (const Resource& resource, _resources) {
// NOTE: Invalid and zero Resource objects will be ignored.
*this += resource;
}
}
Resources::Resources(vector<Resource>&& _resources)
{
resourcesNoMutationWithoutExclusiveOwnership.reserve(_resources.size());
foreach (Resource& resource, _resources) {
// NOTE: Invalid and zero Resource objects will be ignored.
*this += std::move(resource);
}
}
Resources::Resources(const RepeatedPtrField<Resource>& _resources)
{
foreach (const Resource& resource, _resources) {
// NOTE: Invalid and zero Resource objects will be ignored.
*this += resource;
}
}
Resources::Resources(RepeatedPtrField<Resource>&& _resources)
{
resourcesNoMutationWithoutExclusiveOwnership.reserve(_resources.size());
foreach (Resource& resource, _resources) {
// NOTE: Invalid and zero Resource objects will be ignored.
*this += std::move(resource);
}
}
bool Resources::contains(const Resources& that) const
{
Resources remaining = *this;
foreach (
const Resource_Unsafe& resource_,
that.resourcesNoMutationWithoutExclusiveOwnership) {
// NOTE: We use _contains because Resources only contain valid
// Resource objects, and we don't want the performance hit of the
// validity check.
if (!remaining._contains(*resource_)) {
return false;
}
if (isPersistentVolume(resource_->resource)) {
remaining.subtract(*resource_);
}
}
return true;
}
bool Resources::contains(const Resource& that) const
{
// NOTE: We must validate 'that' because invalid resources can lead
// to false positives here (e.g., "cpus:-1" will return true). This
// is because 'contains' assumes resources are valid.
return validate(that).isNone() && _contains(Resource_(that));
}
// This function assumes all quantities with the same name are merged
// in the input `quantities` which is a guaranteed property of
// `ResourceQuantities`.
bool Resources::contains(const ResourceQuantities& quantities) const
{
foreach (auto& quantity, quantities){
double remaining = quantity.second.value();
foreach (const Resource& r, get(quantity.first)) {
switch (r.type()) {
case Value::SCALAR: remaining -= r.scalar().value(); break;
case Value::SET: remaining -= r.set().item_size(); break;
case Value::RANGES:
foreach (const Value::Range& range, r.ranges().range()) {
remaining -= range.end() - range.begin() + 1;
if (remaining <= 0) {
break;
}
}
break;
case Value::TEXT:
LOG(FATAL) << "Unexpected TEXT type resource " << r << " in "
<< *this;
break;
}
if (remaining <= 0) {
break;
}
}
if (remaining > 0) {
return false;
}
}
return true;
}
size_t Resources::count(const Resource& that) const
{
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (resource_->resource == that) {
// Return 1 for non-shared resources because non-shared
// Resource objects in Resources are unique.
return resource_->isShared() ? CHECK_NOTNONE(resource_->sharedCount) : 1;
}
}
return 0;
}
void Resources::allocate(const string& role)
{
foreach (
Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
// Copy-on-write (if more than 1 reference).
if (resource_.use_count() > 1) {
resource_ = make_shared<Resource_>(*resource_);
}
resource_->resource.mutable_allocation_info()->set_role(role);
}
}
void Resources::unallocate()
{
foreach (
Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (resource_->resource.has_allocation_info()) {
// Copy-on-write (if more than 1 reference).
if (resource_.use_count() > 1) {
resource_ = make_shared<Resource_>(*resource_);
}
resource_->resource.clear_allocation_info();
}
}
}
Resources Resources::filter(
const lambda::function<bool(const Resource&)>& predicate) const
{
Resources result;
result.resourcesNoMutationWithoutExclusiveOwnership.reserve(this->size());
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (predicate(resource_->resource)) {
// We `push_back()` here instead of `add()` (which is O(n)). `add()` is
// not necessary because we assume all Resource objects are already
// combined in `Resources` and `filter()` should only take away
// resource objects.
result.resourcesNoMutationWithoutExclusiveOwnership.push_back(resource_);
}
}
return result;
}
hashmap<string, Resources> Resources::reservations() const
{
hashmap<string, Resources> result;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (isReserved(resource_->resource)) {
result[reservationRole(resource_->resource)].add(resource_);
}
}
return result;
}
Resources Resources::reserved(const Option<string>& role) const
{
return filter(lambda::bind(isReserved, lambda::_1, role));
}
Resources Resources::allocatableTo(const string& role) const
{
return filter(lambda::bind(isAllocatableTo, lambda::_1, role));
}
Resources Resources::unreserved() const
{
return filter(isUnreserved);
}
Resources Resources::persistentVolumes() const
{
return filter(isPersistentVolume);
}
Resources Resources::revocable() const
{
return filter(isRevocable);
}
Resources Resources::nonRevocable() const
{
return filter(
[](const Resource& resource) { return !isRevocable(resource); });
}
Resources Resources::shared() const
{
return filter(isShared);
}
Resources Resources::nonShared() const
{
return filter(
[](const Resource& resource) { return !isShared(resource); });
}
hashmap<string, Resources> Resources::allocations() const
{
hashmap<string, Resources> result;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
// We require that this is called only when
// the resources are allocated.
CHECK(resource_->resource.has_allocation_info());
CHECK(resource_->resource.allocation_info().has_role());
result[resource_->resource.allocation_info().role()].add(resource_);
}
return result;
}
Resources Resources::pushReservation(
const Resource::ReservationInfo& reservation) const
{
Resources result;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
Resource_ r_ = *resource_;
r_.resource.add_reservations()->CopyFrom(reservation);
Option<Error> validationError = Resources::validate(r_.resource);
CHECK_NONE(validationError)
<< "Invalid resource " << r_ << ": " << validationError.get();
result.add(std::move(r_));
}
return result;
}
Resources Resources::popReservation() const
{
Resources result;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
CHECK_GT(resource_->resource.reservations_size(), 0);
Resource_ r_ = *resource_;
r_.resource.mutable_reservations()->RemoveLast();
result.add(std::move(r_));
}
return result;
}
Resources Resources::toUnreserved() const
{
Resources result;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (isReserved(resource_->resource)) {
Resource_ r_ = *resource_;
r_.resource.clear_reservations();
result.add(std::move(r_));
} else {
result.add(resource_);
}
}
return result;
}
Resources Resources::createStrippedScalarQuantity() const
{
Resources stripped;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (resource_->resource.type() == Value::SCALAR) {
Resource scalar;
scalar.set_name(resource_->resource.name());
scalar.set_type(resource_->resource.type());
scalar.mutable_scalar()->CopyFrom(resource_->resource.scalar());
stripped.add(std::move(scalar));
}
}
return stripped;
}
Option<Resources> Resources::find(const Resources& targets) const
{
Resources total;
foreach (const Resource& target, targets) {
Option<Resources> found = find(target);
// Each target needs to be found!
if (found.isNone()) {
return None();
}
total += found.get();
}
return total;
}
Try<Resources> Resources::apply(const ResourceConversion& conversion) const
{
return conversion.apply(*this);
}
Try<Resources> Resources::apply(const Offer::Operation& operation) const
{
Try<vector<ResourceConversion>> conversions =
getResourceConversions(operation);
if (conversions.isError()) {
return Error("Cannot get conversions: " + conversions.error());
}
Try<Resources> result = apply(conversions.get());
if (result.isError()) {
return Error(result.error());
}
// The following are sanity checks to ensure the amount of each type
// of resource does not change.
// TODO(jieyu): Currently, we only check known resource types like
// cpus, gpus, mem, disk, ports, etc. We should generalize this.
CHECK(result->cpus() == cpus());
CHECK(result->gpus() == gpus());
CHECK(result->mem() == mem());
CHECK(result->disk() == disk());
CHECK(result->ports() == ports());
return result;
}
template <>
Option<Value::Scalar> Resources::get(const string& name) const
{
Value::Scalar total;
bool found = false;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (resource_->resource.name() == name &&
resource_->resource.type() == Value::SCALAR) {
total += resource_->resource.scalar();
found = true;
}
}
if (found) {
return total;
}
return None();
}
template <>
Option<Value::Set> Resources::get(const string& name) const
{
Value::Set total;
bool found = false;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (resource_->resource.name() == name &&
resource_->resource.type() == Value::SET) {
total += resource_->resource.set();
found = true;
}
}
if (found) {
return total;
}
return None();
}
template <>
Option<Value::Ranges> Resources::get(const string& name) const
{
Value::Ranges total;
bool found = false;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (resource_->resource.name() == name &&
resource_->resource.type() == Value::RANGES) {
total += resource_->resource.ranges();
found = true;
}
}
if (found) {
return total;
}
return None();
}
Resources Resources::get(const string& name) const
{
return filter([=](const Resource& resource) {
return resource.name() == name;
});
}
Resources Resources::scalars() const
{
return filter([=](const Resource& resource) {
return resource.type() == Value::SCALAR;
});
}
set<string> Resources::names() const
{
set<string> result;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
result.insert(resource_->resource.name());
}
return result;
}
map<string, Value_Type> Resources::types() const
{
map<string, Value_Type> result;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
result[resource_->resource.name()] = resource_->resource.type();
}
return result;
}
Option<double> Resources::cpus() const
{
Option<Value::Scalar> value = get<Value::Scalar>("cpus");
if (value.isSome()) {
return value->value();
} else {
return None();
}
}
Option<double> Resources::gpus() const
{
Option<Value::Scalar> value = get<Value::Scalar>("gpus");
if (value.isSome()) {
return value->value();
} else {
return None();
}
}
Option<Bytes> Resources::mem() const
{
Option<Value::Scalar> value = get<Value::Scalar>("mem");
if (value.isSome()) {
return Megabytes(static_cast<uint64_t>(value->value()));
} else {
return None();
}
}
Option<Bytes> Resources::disk() const
{
Option<Value::Scalar> value = get<Value::Scalar>("disk");
if (value.isSome()) {
return Megabytes(static_cast<uint64_t>(value->value()));
} else {
return None();
}
}
Option<Value::Ranges> Resources::ports() const
{
Option<Value::Ranges> value = get<Value::Ranges>("ports");
if (value.isSome()) {
return value.get();
} else {
return None();
}
}
Option<Value::Ranges> Resources::ephemeral_ports() const
{
Option<Value::Ranges> value = get<Value::Ranges>("ephemeral_ports");
if (value.isSome()) {
return value.get();
} else {
return None();
}
}
/////////////////////////////////////////////////
// Private member functions.
/////////////////////////////////////////////////
bool Resources::_contains(const Resource_& that) const
{
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (resource_->contains(that)) {
return true;
}
}
return false;
}
Option<Resources> Resources::find(const Resource& target) const
{
Resources found;
Resources total = *this;
Resources remaining = Resources(target).toUnreserved();
// First look in the target role, then unreserved, then any remaining role.
vector<lambda::function<bool(const Resource&)>> predicates;
if (isReserved(target)) {
predicates.push_back(
lambda::bind(isReserved, lambda::_1, reservationRole(target)));
}
predicates.push_back(isUnreserved);
predicates.push_back([](const Resource&) { return true; });
foreach (const auto& predicate, predicates) {
foreach (
const Resource_Unsafe& resource_,
total.filter(predicate).resourcesNoMutationWithoutExclusiveOwnership) {
// Need to `toUnreserved` to ignore the roles in contains().
Resources unreserved;
unreserved.add(resource_);
unreserved = unreserved.toUnreserved();
if (unreserved.contains(remaining)) {
// The target has been found, return the result.
foreach (Resource r, remaining) {
r.mutable_reservations()->CopyFrom(
resource_->resource.reservations());
found.add(std::move(r));
}
return found;
} else if (remaining.contains(unreserved)) {
found.add(resource_);
total.subtract(*resource_);
remaining -= unreserved;
break;
}
}
}
return None();
}
/////////////////////////////////////////////////
// Overloaded operators.
/////////////////////////////////////////////////
Resources::operator RepeatedPtrField<Resource>() const
{
RepeatedPtrField<Resource> all;
foreach (
const Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
all.Add()->CopyFrom(resource_->resource);
}
return all;
}
bool Resources::operator==(const Resources& that) const
{
return this->contains(that) && that.contains(*this);
}
bool Resources::operator!=(const Resources& that) const
{
return !(*this == that);
}
Resources Resources::operator+(const Resource& that) const &
{
Resources result = *this;
result += that;
return result;
}
Resources Resources::operator+(const Resource& that) &&
{
Resources result = std::move(*this);
result += that;
return result;
}
Resources Resources::operator+(Resource&& that) const &
{
Resources result = *this;
result += std::move(that);
return result;
}
Resources Resources::operator+(Resource&& that) &&
{
Resources result = std::move(*this);
result += std::move(that);
return result;
}
Resources Resources::operator+(const Resources& that) const &
{
Resources result = *this;
result += that;
return result;
}
Resources Resources::operator+(const Resources& that) &&
{
Resources result = std::move(*this);
result += that;
return result;
}
Resources Resources::operator+(Resources&& that) const &
{
Resources result = std::move(that);
result += *this;
return result;
}
Resources Resources::operator+(Resources&& that) &&
{
Resources result = std::move(*this);
result += std::move(that);
return result;
}
void Resources::add(const Resource_& that)
{
if (that.isEmpty()) {
return;
}
bool found = false;
foreach (
Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (internal::addable(resource_->resource, that.resource)) {
// Copy-on-write (if more than 1 reference).
if (resource_.use_count() > 1) {
resource_ = make_shared<Resource_>(*resource_);
}
*resource_ += that;
found = true;
break;
}
}
// Cannot be combined with any existing Resource object.
if (!found) {
resourcesNoMutationWithoutExclusiveOwnership.push_back(
make_shared<Resource_>(that));
}
}
void Resources::add(Resource_&& that)
{
if (that.isEmpty()) {
return;
}
bool found = false;
foreach (
Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (internal::addable(resource_->resource, that.resource)) {
// Copy-on-write (if more than 1 reference).
if (resource_.use_count() > 1) {
that += *resource_;
resource_ = make_shared<Resource_>(std::move(that));
} else {
*resource_ += that;
}
found = true;
break;
}
}
// Cannot be combined with any existing Resource object.
if (!found) {
resourcesNoMutationWithoutExclusiveOwnership.push_back(
make_shared<Resource_>(std::move(that)));
}
}
void Resources::add(const Resource_Unsafe& that)
{
if (that->isEmpty()) {
return;
}
bool found = false;
foreach (
Resource_Unsafe& resource_,
resourcesNoMutationWithoutExclusiveOwnership) {
if (internal::addable(resource_->resource, that->resource)) {
// Copy-on-write (if more than 1 reference).
if (resource_.use_count() > 1) {
resource_ = make_shared<Resource_>(*resource_);
}
*resource_ += *that;
found = true;
break;
}
}
// Cannot be combined with any existing Resource object.
if (!found) {
resourcesNoMutationWithoutExclusiveOwnership.push_back(that);
}
}
Resources& Resources::operator+=(const Resource_& that)
{
if (that.validate().isNone()) {
add(that);
}
return *this;
}
Resources& Resources::operator+=(Resource_&& that)
{
if (that.validate().isNone()) {
add(std::move(that));
}
return *this;
}
Resources& Resources::operator+=(const Resource& that)
{
*this += Resource_(that);
return *this;
}
Resources& Resources::operator+=(Resource&& that)
{
*this += Resource_(std::move(that));
return *this;
}
Resources& Resources::operator+=(const Resources& that)
{
foreach (
const Resource_Unsafe& resource_,
that.resourcesNoMutationWithoutExclusiveOwnership) {
add(resource_);
}
return *this;
}
Resources& Resources::operator+=(Resources&& that)
{
foreach (
const Resource_Unsafe& resource_,
that.resourcesNoMutationWithoutExclusiveOwnership) {
add(std::move(resource_));
}
return *this;
}
Resources Resources::operator-(const Resource& that) const
{
Resources result = *this;
result -= that;
return result;
}
Resources Resources::operator-(const Resources& that) const
{
Resources result = *this;
result -= that;
return result;
}
void Resources::subtract(const Resource_& that)
{
if (that.isEmpty()) {
return;
}
for (size_t i = 0; i < resourcesNoMutationWithoutExclusiveOwnership.size();
i++) {
Resource_Unsafe& resource_ =
resourcesNoMutationWithoutExclusiveOwnership[i];
if (internal::subtractable(resource_->resource, that)) {
// Copy-on-write (if more than 1 reference).
if (resource_.use_count() > 1) {
resource_ = make_shared<Resource_>(*resource_);
}
*resource_ -= that;
// Remove the resource if it has become negative or empty.
// Note that a negative resource means the caller is
// subtracting more than they should!
//
// TODO(gyliu513): Provide a stronger interface to avoid
// silently allowing this to occur.
// A "negative" Resource_ either has a negative sharedCount or
// a negative scalar value.
bool negative =
(resource_->isShared() && resource_->sharedCount.get() < 0) ||
(resource_->resource.type() == Value::SCALAR &&
resource_->resource.scalar().value() < 0);
if (negative || resource_->isEmpty()) {
// As `resources` is not ordered, and erasing an element
// from the middle is expensive, we swap with the last element
// and then shrink the vector by one.
resourcesNoMutationWithoutExclusiveOwnership[i] =
resourcesNoMutationWithoutExclusiveOwnership.back();
resourcesNoMutationWithoutExclusiveOwnership.pop_back();
}
break;
}
}
}
Resources& Resources::operator-=(const Resource_& that)
{
if (that.validate().isNone()) {
subtract(that);
}
return *this;
}
Resources& Resources::operator-=(const Resource& that)
{
*this -= Resource_(that);
return *this;
}
Resources& Resources::operator-=(const Resources& that)
{
foreach (
const Resource_Unsafe& resource_,
that.resourcesNoMutationWithoutExclusiveOwnership) {
subtract(*resource_);
}
return *this;
}
ostream& operator<<(ostream& stream, const Resource::DiskInfo::Source& source)
{
const Option<string> csiSource = source.has_id() || source.has_profile()
? "(" + source.vendor() + "," + source.id() + "," + source.profile() + ")"
: Option<string>::none();
switch (source.type()) {
case Resource::DiskInfo::Source::MOUNT:
return stream << "MOUNT" << csiSource.getOrElse(
source.mount().has_root() ? ":" + source.mount().root() : "");
case Resource::DiskInfo::Source::PATH:
return stream << "PATH" << csiSource.getOrElse(
source.path().has_root() ? ":" + source.path().root() : "");
case Resource::DiskInfo::Source::BLOCK:
return stream << "BLOCK" << csiSource.getOrElse("");
case Resource::DiskInfo::Source::RAW:
return stream << "RAW" << csiSource.getOrElse("");
case Resource::DiskInfo::Source::UNKNOWN:
return stream << "UNKNOWN";
}
UNREACHABLE();
}
ostream& operator<<(ostream& stream, const Volume& volume)
{
string volumeConfig = volume.container_path();
if (volume.has_host_path()) {
volumeConfig = volume.host_path() + ":" + volumeConfig;
if (volume.has_mode()) {
switch (volume.mode()) {
case Volume::RW: volumeConfig += ":rw"; break;
case Volume::RO: volumeConfig += ":ro"; break;
default:
LOG(FATAL) << "Unknown Volume mode: " << volume.mode();
break;
}
}
}
stream << volumeConfig;
return stream;
}
ostream& operator<<(ostream& stream, const Labels& labels)
{
stream << "{";
for (int i = 0; i < labels.labels().size(); i++) {
const Label& label = labels.labels().Get(i);
stream << label.key();
if (label.has_value()) {
stream << ": " << label.value();
}
if (i + 1 < labels.labels().size()) {
stream << ", ";
}
}
stream << "}";
return stream;
}
ostream& operator<<(
ostream& stream,
const Resource::ReservationInfo& reservation)
{
stream << Resource::ReservationInfo::Type_Name(reservation.type()) << ","
<< reservation.role();
if (reservation.has_principal()) {
stream << "," << reservation.principal();
}
if (reservation.has_labels()) {
stream << "," << reservation.labels();
}
return stream;
}
ostream& operator<<(ostream& stream, const Resource::DiskInfo& disk)
{
if (disk.has_source()) {
stream << disk.source();
}
if (disk.has_persistence()) {
if (disk.has_source()) {
stream << ",";
}
stream << disk.persistence().id();
}
if (disk.has_volume()) {
stream << ":" << disk.volume();
}
return stream;
}
ostream& operator<<(ostream& stream, const Resource& resource)
{
stream << resource.name();
if (resource.has_allocation_info()) {
stream << "(allocated: " << resource.allocation_info().role() << ")";
}
if (resource.reservations_size() > 0) {
stream << "(reservations: [";
for (int i = 0; i < resource.reservations_size(); ++i) {
if (i > 0) {
stream << ", ";
}
stream << "(" << resource.reservations(i) << ")";
}
stream << "])";
}
if (resource.has_disk()) {
stream << "[" << resource.disk() << "]";
}
// Once extended revocable attributes are available, change this to a more
// meaningful value.
if (resource.has_revocable()) {
stream << "{REV}";
}
if (resource.has_shared()) {
stream << "<SHARED>";
}
stream << ":";
switch (resource.type()) {
case Value::SCALAR: stream << resource.scalar(); break;
case Value::RANGES: stream << resource.ranges(); break;
case Value::SET: stream << resource.set(); break;
default:
LOG(FATAL) << "Unexpected Value type: " << resource.type();
break;
}
return stream;
}
ostream& operator<<(ostream& stream, const Resources::Resource_& resource_)
{
stream << resource_.resource;
if (resource_.isShared()) {
stream << "<" << resource_.sharedCount.get() << ">";
}
return stream;
}
ostream& operator<<(ostream& stream, const Resources& resources)
{
if (resources.empty()) {
stream << "{}";
return stream;
}
Resources::const_iterator it = resources.begin();
while (it != resources.end()) {
stream << *it;
if (++it != resources.end()) {
stream << "; ";
}
}
return stream;
}
// We use `JSON::protobuf` to print the resources here because these
// resources may not have been validated, or not converted to
// "post-reservation-refinement" format at this point.
ostream& operator<<(
ostream& stream,
const google::protobuf::RepeatedPtrField<Resource>& resources)
{
return stream << JSON::protobuf(resources);
}
Try<Resources> ResourceConversion::apply(const Resources& resources) const
{
Resources result = resources;
if (!result.contains(consumed)) {
return Error(
stringify(result) + " does not contain " +
stringify(consumed));
}
result -= consumed;
result += converted;
if (postValidation.isSome()) {
Try<Nothing> validation = postValidation.get()(result);
if (validation.isError()) {
return Error(validation.error());
}
}
return result;
}
} // namespace v1 {
} // namespace mesos {
| jpeach/mesos | src/v1/resources.cpp | C++ | apache-2.0 | 66,806 |
/*
* Copyright (c) 2005-2010 Grameen Foundation USA
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* See also http://www.apache.org/licenses/LICENSE-2.0.html for an
* explanation of the license and how it is applied.
*/
package org.mifos.customers.center.business.service;
import java.util.Date;
import junit.framework.Assert;
import org.mifos.accounts.productdefinition.business.SavingsOfferingBO;
import org.mifos.accounts.savings.business.SavingsBO;
import org.mifos.accounts.savings.util.helpers.SavingsTestHelper;
import org.mifos.accounts.util.helpers.AccountStates;
import org.mifos.application.meeting.business.MeetingBO;
import org.mifos.customers.business.CustomerBO;
import org.mifos.customers.center.business.CenterBO;
import org.mifos.customers.client.business.ClientBO;
import org.mifos.customers.group.business.GroupBO;
import org.mifos.customers.util.helpers.CustomerStatus;
import org.mifos.framework.MifosIntegrationTestCase;
import org.mifos.framework.business.service.ServiceFactory;
import org.mifos.framework.exceptions.ServiceException;
import org.mifos.framework.hibernate.helper.QueryResult;
import org.mifos.framework.hibernate.helper.StaticHibernateUtil;
import org.mifos.framework.persistence.TestDatabase;
import org.mifos.framework.util.helpers.BusinessServiceName;
import org.mifos.framework.util.helpers.TestObjectFactory;
public class CenterBusinessServiceIntegrationTest extends MifosIntegrationTestCase {
public CenterBusinessServiceIntegrationTest() throws Exception {
super();
}
private CustomerBO center;
private CustomerBO group;
private CustomerBO client;
private SavingsTestHelper helper = new SavingsTestHelper();
private SavingsOfferingBO savingsOffering;
private SavingsBO savingsBO;
private CenterBusinessService service;
@Override
protected void setUp() throws Exception {
super.setUp();
service = (CenterBusinessService) ServiceFactory.getInstance().getBusinessService(BusinessServiceName.Center);
}
@Override
public void tearDown() throws Exception {
try {
TestObjectFactory.cleanUp(savingsBO);
TestObjectFactory.cleanUp(client);
TestObjectFactory.cleanUp(group);
TestObjectFactory.cleanUp(center);
} catch (Exception e) {
// TODO Whoops, cleanup didnt work, reset db
TestDatabase.resetMySQLDatabase();
}
StaticHibernateUtil.closeSession();
super.tearDown();
}
public void testGetCenter() throws Exception {
center = createCenter("center1");
createAccountsForCenter();
savingsBO = getSavingsAccount(center, "fsaf6", "ads6");
StaticHibernateUtil.closeSession();
center = service.getCenter(center.getCustomerId());
Assert.assertNotNull(center);
Assert.assertEquals("center1", center.getDisplayName());
Assert.assertEquals(2, center.getAccounts().size());
Assert.assertEquals(0, center.getOpenLoanAccounts().size());
Assert.assertEquals(1, center.getOpenSavingAccounts().size());
Assert.assertEquals(CustomerStatus.CENTER_ACTIVE.getValue(), center.getCustomerStatus().getId());
StaticHibernateUtil.closeSession();
retrieveAccountsToDelete();
}
public void testSuccessfulGet() throws Exception {
center = createCenter("Center2");
createAccountsForCenter();
savingsBO = getSavingsAccount(center, "fsaf6", "ads6");
StaticHibernateUtil.closeSession();
center = service.getCenter(center.getCustomerId());
Assert.assertNotNull(center);
Assert.assertEquals("Center2", center.getDisplayName());
Assert.assertEquals(2, center.getAccounts().size());
Assert.assertEquals(0, center.getOpenLoanAccounts().size());
Assert.assertEquals(1, center.getOpenSavingAccounts().size());
Assert.assertEquals(CustomerStatus.CENTER_ACTIVE.getValue(), center.getCustomerStatus().getId());
StaticHibernateUtil.closeSession();
retrieveAccountsToDelete();
}
public void testFailureGet() throws Exception {
center = createCenter("Center1");
StaticHibernateUtil.closeSession();
TestObjectFactory.simulateInvalidConnection();
try {
service.getCenter(center.getCustomerId());
Assert.assertTrue(false);
} catch (ServiceException e) {
Assert.assertTrue(true);
} finally {
StaticHibernateUtil.closeSession();
}
}
public void testFailureFindBySystemId() throws Exception {
center = createCenter("Center1");
StaticHibernateUtil.closeSession();
TestObjectFactory.simulateInvalidConnection();
try {
service.findBySystemId(center.getGlobalCustNum());
Assert.assertTrue(false);
} catch (ServiceException e) {
Assert.assertTrue(true);
} finally {
StaticHibernateUtil.closeSession();
}
}
public void testSearch() throws Exception {
center = createCenter("center1");
QueryResult queryResult = service.search("center1", Short.valueOf("1"));
Assert.assertEquals(1, queryResult.getSize());
Assert.assertEquals(1, queryResult.get(0, 10).size());
}
public void testFailureSearch() throws Exception {
center = createCenter("Center1");
TestObjectFactory.simulateInvalidConnection();
try {
service.search("center1", Short.valueOf("1"));
Assert.assertTrue(false);
} catch (ServiceException e) {
Assert.assertTrue(true);
} finally {
StaticHibernateUtil.closeSession();
}
}
private SavingsBO getSavingsAccount(CustomerBO customerBO, String offeringName, String shortName) throws Exception {
savingsOffering = helper.createSavingsOffering(offeringName, shortName);
return TestObjectFactory.createSavingsAccount("000100000000017", customerBO,
AccountStates.SAVINGS_ACC_APPROVED, new Date(System.currentTimeMillis()), savingsOffering);
}
private CenterBO createCenter(String name) {
MeetingBO meeting = TestObjectFactory.createMeeting(TestObjectFactory.getTypicalMeeting());
return TestObjectFactory.createWeeklyFeeCenter(name, meeting);
}
private GroupBO createGroup(String groupName) {
return TestObjectFactory.createWeeklyFeeGroupUnderCenter(groupName, CustomerStatus.GROUP_ACTIVE, center);
}
private ClientBO createClient(String clientName) {
return TestObjectFactory.createClient(clientName, CustomerStatus.CLIENT_ACTIVE, group);
}
private void createAccountsForCenter() throws Exception {
String groupName = "Group_Active_test";
group = createGroup(groupName);
client = createClient("Client_Active_test");
}
private void retrieveAccountsToDelete() {
savingsBO = TestObjectFactory.getObject(SavingsBO.class, savingsBO.getAccountId());
center = TestObjectFactory.getCenter(center.getCustomerId());
group = TestObjectFactory.getGroup(group.getCustomerId());
client = TestObjectFactory.getClient(client.getCustomerId());
}
}
| mifos/1.5.x | application/src/test/java/org/mifos/customers/center/business/service/CenterBusinessServiceIntegrationTest.java | Java | apache-2.0 | 7,818 |
/*
* Copyright 2013~2014 Dan Haywood
*
* Licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.isisaddons.wicket.summernote.fixture.dom;
import org.apache.isis.applib.DomainObjectContainer;
import org.apache.isis.applib.annotation.*;
import org.apache.isis.applib.query.QueryDefault;
import org.apache.isis.applib.services.clock.ClockService;
import org.joda.time.LocalDate;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.List;
@DomainService(menuOrder = "10")
@DomainServiceLayout(
menuBar= DomainServiceLayout.MenuBar.TERTIARY,
named = "ToDos"
)
public class SummernoteEditorToDoItems {
public SummernoteEditorToDoItems() {
}
// //////////////////////////////////////
// Identification in the UI
// //////////////////////////////////////
public String getId() {
return "toDoItems";
}
public String iconName() {
return "ToDoItem";
}
// //////////////////////////////////////
// NotYetComplete (action)
// //////////////////////////////////////
@Action(
semantics = SemanticsOf.SAFE
)
@ActionLayout(
bookmarking = BookmarkPolicy.AS_ROOT
)
@MemberOrder(sequence = "1")
public List<SummernoteEditorToDoItem> notYetComplete() {
final List<SummernoteEditorToDoItem> items = notYetCompleteNoUi();
if(items.isEmpty()) {
container.informUser("All to-do items have been completed :-)");
}
return items;
}
@Programmatic
public List<SummernoteEditorToDoItem> notYetCompleteNoUi() {
return container.allMatches(
new QueryDefault<>(SummernoteEditorToDoItem.class,
"todo_notYetComplete",
"ownedBy", currentUserName()));
}
// //////////////////////////////////////
// Complete (action)
// //////////////////////////////////////
@Action(
semantics = SemanticsOf.SAFE
)
@MemberOrder(sequence = "3")
public List<SummernoteEditorToDoItem> complete() {
final List<SummernoteEditorToDoItem> items = completeNoUi();
if(items.isEmpty()) {
container.informUser("No to-do items have yet been completed :-(");
}
return items;
}
@Programmatic
public List<SummernoteEditorToDoItem> completeNoUi() {
return container.allMatches(
new QueryDefault<>(SummernoteEditorToDoItem.class,
"todo_complete",
"ownedBy", currentUserName()));
}
// //////////////////////////////////////
// NewToDo (action)
// //////////////////////////////////////
@MemberOrder(sequence = "40")
public SummernoteEditorToDoItem newToDo(
@ParameterLayout(named="Description") @Parameter(regexPattern = "\\w[@&:\\-\\,\\.\\+ \\w]*")
final String description,
@ParameterLayout(named="Category")
final SummernoteEditorToDoItem.Category category,
@ParameterLayout(named="Subcategory")
final SummernoteEditorToDoItem.Subcategory subcategory,
@ParameterLayout(named="Due by") @Parameter(optionality = Optionality.OPTIONAL)
final LocalDate dueBy,
@ParameterLayout(named="Cost") @Parameter(optionality = Optionality.OPTIONAL)
final BigDecimal cost,
@ParameterLayout(named="Previous cost") @Parameter(optionality = Optionality.OPTIONAL)
final BigDecimal previousCost) {
final String ownedBy = currentUserName();
return newToDo(description, category, subcategory, ownedBy, dueBy, cost, previousCost);
}
public SummernoteEditorToDoItem.Category default1NewToDo() {
return SummernoteEditorToDoItem.Category.Professional;
}
public SummernoteEditorToDoItem.Subcategory default2NewToDo() {
return SummernoteEditorToDoItem.Category.Professional.subcategories().get(0);
}
public LocalDate default3NewToDo() {
return clockService.now().plusDays(14);
}
public List<SummernoteEditorToDoItem.Subcategory> choices2NewToDo(
final String description, final SummernoteEditorToDoItem.Category category) {
return SummernoteEditorToDoItem.Subcategory.listFor(category);
}
public String validateNewToDo(
final String description,
final SummernoteEditorToDoItem.Category category,
final SummernoteEditorToDoItem.Subcategory subcategory,
final LocalDate dueBy,
final BigDecimal cost,
final BigDecimal previousCost) {
return SummernoteEditorToDoItem.Subcategory.validate(category, subcategory);
}
// //////////////////////////////////////
// AllToDos (action)
// //////////////////////////////////////
@Action(
semantics = SemanticsOf.SAFE
)
@MemberOrder(sequence = "50")
public List<SummernoteEditorToDoItem> allToDos() {
final String currentUser = currentUserName();
final List<SummernoteEditorToDoItem> items = container.allMatches(SummernoteEditorToDoItem.class, SummernoteEditorToDoItem.Predicates.thoseOwnedBy(currentUser));
Collections.sort(items);
if(items.isEmpty()) {
container.warnUser("No to-do items found.");
}
return items;
}
// //////////////////////////////////////
// AutoComplete
// //////////////////////////////////////
@Programmatic // not part of metamodel
public List<SummernoteEditorToDoItem> autoComplete(final String description) {
return container.allMatches(
new QueryDefault<>(SummernoteEditorToDoItem.class,
"todo_autoComplete",
"ownedBy", currentUserName(),
"description", description));
}
// //////////////////////////////////////
// Programmatic Helpers
// //////////////////////////////////////
@Programmatic // for use by fixtures
public SummernoteEditorToDoItem newToDo(
final String description,
final SummernoteEditorToDoItem.Category category,
final SummernoteEditorToDoItem.Subcategory subcategory,
final String userName,
final LocalDate dueBy,
final BigDecimal cost,
final BigDecimal previousCost) {
final SummernoteEditorToDoItem toDoItem = container.newTransientInstance(SummernoteEditorToDoItem.class);
toDoItem.setDescription(description);
toDoItem.setCategory(category);
toDoItem.setSubcategory(subcategory);
toDoItem.setOwnedBy(userName);
toDoItem.setDueBy(dueBy);
toDoItem.setCost(cost);
toDoItem.setPreviousCost(previousCost);
container.persist(toDoItem);
container.flush();
return toDoItem;
}
private String currentUserName() {
return container.getUser().getName();
}
// //////////////////////////////////////
// Injected Services
// //////////////////////////////////////
@javax.inject.Inject
private DomainObjectContainer container;
@javax.inject.Inject
private ClockService clockService;
}
| mariannehagaseth/CreationToolEcompliance | fixture/src/main/java/org/isisaddons/wicket/summernote/fixture/dom/SummernoteEditorToDoItems.java | Java | apache-2.0 | 7,779 |
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from six.moves import range
from kafka_utils.kafka_cluster_manager.cluster_info.broker import Broker
from kafka_utils.kafka_cluster_manager.cluster_info.partition import Partition
def create_broker(broker_id, partitions):
b = Broker(broker_id, partitions=set(partitions))
for p in partitions:
p.add_replica(b)
return b
def create_and_attach_partition(topic, partition_id):
partition = Partition(topic, partition_id)
topic.add_partition(partition)
return partition
def broker_range(n):
"""Return list of brokers with broker ids ranging from 0 to n-1."""
return {str(x): {"host": "host%s" % x} for x in range(n)}
| Yelp/kafka-utils | tests/kafka_cluster_manager/helper.py | Python | apache-2.0 | 1,295 |
/*
* Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.client.spi.impl.discovery;
import com.hazelcast.client.HazelcastClient;
import com.hazelcast.client.config.ClientClasspathXmlConfig;
import com.hazelcast.client.config.ClientConfig;
import com.hazelcast.client.config.ClientNetworkConfig;
import com.hazelcast.client.config.XmlClientConfigBuilder;
import com.hazelcast.client.connection.AddressTranslator;
import com.hazelcast.config.AwsConfig;
import com.hazelcast.config.Config;
import com.hazelcast.config.DiscoveryConfig;
import com.hazelcast.config.DiscoveryStrategyConfig;
import com.hazelcast.config.InterfacesConfig;
import com.hazelcast.config.JoinConfig;
import com.hazelcast.config.properties.PropertyDefinition;
import com.hazelcast.config.properties.PropertyTypeConverter;
import com.hazelcast.config.properties.SimplePropertyDefinition;
import com.hazelcast.core.Hazelcast;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.logging.ILogger;
import com.hazelcast.logging.Logger;
import com.hazelcast.nio.Address;
import com.hazelcast.spi.discovery.AbstractDiscoveryStrategy;
import com.hazelcast.spi.discovery.DiscoveryNode;
import com.hazelcast.spi.discovery.DiscoveryStrategy;
import com.hazelcast.spi.discovery.DiscoveryStrategyFactory;
import com.hazelcast.spi.discovery.NodeFilter;
import com.hazelcast.spi.discovery.SimpleDiscoveryNode;
import com.hazelcast.spi.discovery.impl.DefaultDiscoveryService;
import com.hazelcast.spi.discovery.impl.DefaultDiscoveryServiceProvider;
import com.hazelcast.spi.discovery.integration.DiscoveryMode;
import com.hazelcast.spi.discovery.integration.DiscoveryService;
import com.hazelcast.spi.discovery.integration.DiscoveryServiceProvider;
import com.hazelcast.spi.discovery.integration.DiscoveryServiceSettings;
import com.hazelcast.spi.partitiongroup.PartitionGroupStrategy;
import com.hazelcast.spi.properties.GroupProperty;
import com.hazelcast.test.HazelcastSerialClassRunner;
import com.hazelcast.test.HazelcastTestSupport;
import com.hazelcast.test.annotation.QuickTest;
import org.junit.After;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import javax.xml.XMLConstants;
import javax.xml.transform.Source;
import javax.xml.transform.stream.StreamSource;
import javax.xml.validation.Schema;
import javax.xml.validation.SchemaFactory;
import javax.xml.validation.Validator;
import java.io.InputStream;
import java.lang.reflect.Field;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class ClientDiscoverySpiTest extends HazelcastTestSupport {
private static final ILogger LOGGER = Logger.getLogger(ClientDiscoverySpiTest.class);
@After
public void cleanup() {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test
public void testSchema() throws Exception {
String xmlFileName = "hazelcast-client-discovery-spi-test.xml";
SchemaFactory factory = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI);
URL schemaResource = ClientDiscoverySpiTest.class.getClassLoader().getResource("hazelcast-client-config-3.9.xsd");
Schema schema = factory.newSchema(schemaResource);
InputStream xmlResource = ClientDiscoverySpiTest.class.getClassLoader().getResourceAsStream(xmlFileName);
Source source = new StreamSource(xmlResource);
Validator validator = schema.newValidator();
validator.validate(source);
}
@Test
public void testParsing() throws Exception {
String xmlFileName = "hazelcast-client-discovery-spi-test.xml";
InputStream xmlResource = ClientDiscoverySpiTest.class.getClassLoader().getResourceAsStream(xmlFileName);
ClientConfig clientConfig = new XmlClientConfigBuilder(xmlResource).build();
ClientNetworkConfig networkConfig = clientConfig.getNetworkConfig();
AwsConfig awsConfig = networkConfig.getAwsConfig();
assertNull(awsConfig);
DiscoveryConfig discoveryConfig = networkConfig.getDiscoveryConfig();
assertTrue(discoveryConfig.isEnabled());
assertEquals(1, discoveryConfig.getDiscoveryStrategyConfigs().size());
DiscoveryStrategyConfig providerConfig = discoveryConfig.getDiscoveryStrategyConfigs().iterator().next();
assertEquals(3, providerConfig.getProperties().size());
assertEquals("foo", providerConfig.getProperties().get("key-string"));
assertEquals("123", providerConfig.getProperties().get("key-int"));
assertEquals("true", providerConfig.getProperties().get("key-boolean"));
}
@Test
public void testNodeStartup() {
Config config = new Config();
config.setProperty("hazelcast.discovery.enabled", "true");
config.getNetworkConfig().setPort(50001);
InterfacesConfig interfaces = config.getNetworkConfig().getInterfaces();
interfaces.clear();
interfaces.setEnabled(true);
interfaces.addInterface("127.0.0.1");
List<DiscoveryNode> discoveryNodes = new CopyOnWriteArrayList<DiscoveryNode>();
DiscoveryStrategyFactory factory = new CollectingDiscoveryStrategyFactory(discoveryNodes);
JoinConfig join = config.getNetworkConfig().getJoin();
join.getTcpIpConfig().setEnabled(false);
join.getMulticastConfig().setEnabled(false);
DiscoveryConfig discoveryConfig = join.getDiscoveryConfig();
discoveryConfig.getDiscoveryStrategyConfigs().clear();
DiscoveryStrategyConfig strategyConfig = new DiscoveryStrategyConfig(factory, Collections.<String, Comparable>emptyMap());
discoveryConfig.addDiscoveryStrategyConfig(strategyConfig);
final HazelcastInstance hazelcastInstance1 = Hazelcast.newHazelcastInstance(config);
final HazelcastInstance hazelcastInstance2 = Hazelcast.newHazelcastInstance(config);
final HazelcastInstance hazelcastInstance3 = Hazelcast.newHazelcastInstance(config);
try {
ClientConfig clientConfig = new ClientConfig();
clientConfig.setProperty("hazelcast.discovery.enabled", "true");
discoveryConfig = clientConfig.getNetworkConfig().getDiscoveryConfig();
discoveryConfig.getDiscoveryStrategyConfigs().clear();
strategyConfig = new DiscoveryStrategyConfig(factory, Collections.<String, Comparable>emptyMap());
discoveryConfig.addDiscoveryStrategyConfig(strategyConfig);
final HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
assertNotNull(hazelcastInstance1);
assertNotNull(hazelcastInstance2);
assertNotNull(hazelcastInstance3);
assertNotNull(client);
assertClusterSizeEventually(3, hazelcastInstance1, hazelcastInstance2, hazelcastInstance3, client);
} finally {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
}
@Test
public void testDiscoveryServiceLifecycleMethodsCalledWhenClientAndServerStartAndShutdown() {
//Given
Config config = new Config();
config.setProperty("hazelcast.discovery.enabled", "true");
config.getNetworkConfig().setPort(50001);
InterfacesConfig interfaces = config.getNetworkConfig().getInterfaces();
interfaces.clear();
interfaces.setEnabled(true);
interfaces.addInterface("127.0.0.1");
//Both server and client are using the same LifecycleDiscoveryStrategyFactory so latch count is set to 2.
CountDownLatch startLatch = new CountDownLatch(2);
CountDownLatch stopLatch = new CountDownLatch(2);
List<DiscoveryNode> discoveryNodes = new CopyOnWriteArrayList<DiscoveryNode>();
DiscoveryStrategyFactory factory = new LifecycleDiscoveryStrategyFactory(startLatch, stopLatch, discoveryNodes);
JoinConfig join = config.getNetworkConfig().getJoin();
join.getTcpIpConfig().setEnabled(false);
join.getMulticastConfig().setEnabled(false);
DiscoveryConfig discoveryConfig = join.getDiscoveryConfig();
discoveryConfig.getDiscoveryStrategyConfigs().clear();
DiscoveryStrategyConfig strategyConfig = new DiscoveryStrategyConfig(factory, Collections.<String, Comparable>emptyMap());
discoveryConfig.addDiscoveryStrategyConfig(strategyConfig);
final HazelcastInstance hazelcastInstance = Hazelcast.newHazelcastInstance(config);
ClientConfig clientConfig = new ClientConfig();
clientConfig.setProperty("hazelcast.discovery.enabled", "true");
discoveryConfig = clientConfig.getNetworkConfig().getDiscoveryConfig();
discoveryConfig.getDiscoveryStrategyConfigs().clear();
strategyConfig = new DiscoveryStrategyConfig(factory, Collections.<String, Comparable>emptyMap());
discoveryConfig.addDiscoveryStrategyConfig(strategyConfig);
final HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
assertNotNull(hazelcastInstance);
assertNotNull(client);
//When
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
//Then
assertOpenEventually(startLatch);
assertOpenEventually(stopLatch);
}
@Test
public void testNodeFilter_from_xml() throws Exception {
String xmlFileName = "hazelcast-client-discovery-spi-test.xml";
InputStream xmlResource = ClientDiscoverySpiTest.class.getClassLoader().getResourceAsStream(xmlFileName);
ClientConfig clientConfig = new XmlClientConfigBuilder(xmlResource).build();
ClientNetworkConfig networkConfig = clientConfig.getNetworkConfig();
DiscoveryConfig discoveryConfig = networkConfig.getDiscoveryConfig();
DiscoveryServiceProvider provider = new DefaultDiscoveryServiceProvider();
DiscoveryService discoveryService = provider.newDiscoveryService(buildDiscoveryServiceSettings(discoveryConfig));
discoveryService.start();
discoveryService.discoverNodes();
discoveryService.destroy();
Field nodeFilterField = DefaultDiscoveryService.class.getDeclaredField("nodeFilter");
nodeFilterField.setAccessible(true);
TestNodeFilter nodeFilter = (TestNodeFilter) nodeFilterField.get(discoveryService);
assertEquals(4, nodeFilter.getNodes().size());
}
@Test
public void test_discovery_address_translator() throws Exception {
String xmlFileName = "hazelcast-client-discovery-spi-test.xml";
InputStream xmlResource = ClientDiscoverySpiTest.class.getClassLoader().getResourceAsStream(xmlFileName);
ClientConfig clientConfig = new XmlClientConfigBuilder(xmlResource).build();
ClientNetworkConfig networkConfig = clientConfig.getNetworkConfig();
DiscoveryConfig discoveryConfig = networkConfig.getDiscoveryConfig();
DiscoveryServiceProvider provider = new DefaultDiscoveryServiceProvider();
DiscoveryService discoveryService = provider.newDiscoveryService(buildDiscoveryServiceSettings(discoveryConfig));
AddressTranslator translator = new DiscoveryAddressTranslator(discoveryService, false);
Address address = new Address("127.0.0.1", 50001);
assertNull(translator.translate(null));
assertEquals(address, translator.translate(address));
// Enforce refresh of the internal mapping
assertEquals(address, translator.translate(address));
}
@Test
public void test_discovery_address_translator_with_public_ip() throws Exception {
String xmlFileName = "hazelcast-client-discovery-spi-test.xml";
InputStream xmlResource = ClientDiscoverySpiTest.class.getClassLoader().getResourceAsStream(xmlFileName);
ClientConfig clientConfig = new XmlClientConfigBuilder(xmlResource).build();
ClientNetworkConfig networkConfig = clientConfig.getNetworkConfig();
DiscoveryConfig discoveryConfig = networkConfig.getDiscoveryConfig();
DiscoveryServiceProvider provider = new DefaultDiscoveryServiceProvider();
DiscoveryService discoveryService = provider.newDiscoveryService(buildDiscoveryServiceSettings(discoveryConfig));
AddressTranslator translator = new DiscoveryAddressTranslator(discoveryService, true);
Address publicAddress = new Address("127.0.0.1", 50001);
Address privateAddress = new Address("127.0.0.1", 1);
// Enforce refresh of the internal mapping
assertEquals(publicAddress, translator.translate(privateAddress));
}
@Test(expected = IllegalArgumentException.class)
public void test_enabled_whenDiscoveryConfigIsNull() {
ClientConfig config = new ClientConfig();
config.setProperty(GroupProperty.DISCOVERY_SPI_ENABLED.getName(), "true");
ClientNetworkConfig networkConfig = config.getNetworkConfig();
networkConfig.setDiscoveryConfig(null);
}
@Test
public void test_enabled_whenDiscoveryConfigIsEmpty() {
ClientConfig config = new ClientConfig();
config.setProperty(GroupProperty.DISCOVERY_SPI_ENABLED.getName(), "true");
ClientNetworkConfig networkConfig = config.getNetworkConfig();
networkConfig.setConnectionAttemptLimit(1);
networkConfig.setConnectionAttemptPeriod(1);
try {
HazelcastClient.newHazelcastClient(config);
} catch (IllegalStateException expected) {
// no server available
}
}
@Test
public void test_CustomDiscoveryService_whenDiscoveredNodes_isNull() {
ClientConfig config = new ClientConfig();
config.setProperty(GroupProperty.DISCOVERY_SPI_ENABLED.getName(), "true");
final DiscoveryService discoveryService = mock(DiscoveryService.class);
DiscoveryServiceProvider discoveryServiceProvider = new DiscoveryServiceProvider() {
public DiscoveryService newDiscoveryService(DiscoveryServiceSettings arg0) {
return discoveryService;
}
};
ClientNetworkConfig networkConfig = config.getNetworkConfig();
networkConfig.setConnectionAttemptLimit(1);
networkConfig.setConnectionAttemptPeriod(1);
networkConfig.getDiscoveryConfig().setDiscoveryServiceProvider(discoveryServiceProvider);
try {
HazelcastClient.newHazelcastClient(config);
fail("Client cannot start, discovery nodes is null!");
} catch (NullPointerException expected) {
// discovered nodes is null
}
verify(discoveryService).discoverNodes();
}
@Test
public void test_CustomDiscoveryService_whenDiscoveredNodes_isEmpty() {
ClientConfig config = new ClientConfig();
config.setProperty(GroupProperty.DISCOVERY_SPI_ENABLED.getName(), "true");
final DiscoveryService discoveryService = mock(DiscoveryService.class);
DiscoveryServiceProvider discoveryServiceProvider = new DiscoveryServiceProvider() {
public DiscoveryService newDiscoveryService(DiscoveryServiceSettings arg0) {
when(discoveryService.discoverNodes()).thenReturn(Collections.<DiscoveryNode>emptyList());
return discoveryService;
}
};
ClientNetworkConfig networkConfig = config.getNetworkConfig();
networkConfig.setConnectionAttemptLimit(1);
networkConfig.setConnectionAttemptPeriod(1);
networkConfig.getDiscoveryConfig().setDiscoveryServiceProvider(discoveryServiceProvider);
try {
HazelcastClient.newHazelcastClient(config);
} catch (IllegalStateException expected) {
// no server available
}
verify(discoveryService).discoverNodes();
}
@Test (expected = IllegalStateException.class)
public void testDiscoveryEnabledNoLocalhost() {
Hazelcast.newHazelcastInstance();
ClientConfig clientConfig = new ClientConfig();
clientConfig.setProperty(GroupProperty.DISCOVERY_SPI_ENABLED.getName(), "true");
ClientNetworkConfig networkConfig = clientConfig.getNetworkConfig();
networkConfig.setConnectionAttemptLimit(1);
networkConfig.setConnectionAttemptPeriod(1);
networkConfig.getDiscoveryConfig().addDiscoveryStrategyConfig(
new DiscoveryStrategyConfig(new NoMemberDiscoveryStrategyFactory(), Collections.<String, Comparable>emptyMap()));
HazelcastClient.newHazelcastClient(clientConfig);
}
@Test
public void testDiscoveryDisabledLocalhost() {
Hazelcast.newHazelcastInstance();
// should not throw any exception, localhost is added into the list of addresses
HazelcastClient.newHazelcastClient();
}
@Test (expected = IllegalStateException.class)
public void testMulticastDiscoveryEnabledNoLocalhost() {
Hazelcast.newHazelcastInstance();
ClientClasspathXmlConfig clientConfig = new ClientClasspathXmlConfig(
"hazelcast-client-dummy-multicast-discovery-test.xml");
HazelcastClient.newHazelcastClient(clientConfig);
}
private DiscoveryServiceSettings buildDiscoveryServiceSettings(DiscoveryConfig config) {
return new DiscoveryServiceSettings().setConfigClassLoader(ClientDiscoverySpiTest.class.getClassLoader())
.setDiscoveryConfig(config).setDiscoveryMode(DiscoveryMode.Client).setLogger(LOGGER);
}
private static class TestDiscoveryStrategy implements DiscoveryStrategy {
@Override
public void start() {
}
@Override
public Collection<DiscoveryNode> discoverNodes() {
try {
List<DiscoveryNode> discoveryNodes = new ArrayList<DiscoveryNode>(4);
Address privateAddress = new Address("127.0.0.1", 1);
Address publicAddress = new Address("127.0.0.1", 50001);
discoveryNodes.add(new SimpleDiscoveryNode(privateAddress, publicAddress));
privateAddress = new Address("127.0.0.1", 2);
publicAddress = new Address("127.0.0.1", 50002);
discoveryNodes.add(new SimpleDiscoveryNode(privateAddress, publicAddress));
privateAddress = new Address("127.0.0.1", 3);
publicAddress = new Address("127.0.0.1", 50003);
discoveryNodes.add(new SimpleDiscoveryNode(privateAddress, publicAddress));
privateAddress = new Address("127.0.0.1", 4);
publicAddress = new Address("127.0.0.1", 50004);
discoveryNodes.add(new SimpleDiscoveryNode(privateAddress, publicAddress));
return discoveryNodes;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void destroy() {
}
@Override
public PartitionGroupStrategy getPartitionGroupStrategy() {
return null;
}
@Override
public Map<String, Object> discoverLocalMetadata() {
return Collections.emptyMap();
}
}
public static class TestDiscoveryStrategyFactory implements DiscoveryStrategyFactory {
private final Collection<PropertyDefinition> propertyDefinitions;
public TestDiscoveryStrategyFactory() {
List<PropertyDefinition> propertyDefinitions = new ArrayList<PropertyDefinition>();
propertyDefinitions.add(new SimplePropertyDefinition("key-string", PropertyTypeConverter.STRING));
propertyDefinitions.add(new SimplePropertyDefinition("key-int", PropertyTypeConverter.INTEGER));
propertyDefinitions.add(new SimplePropertyDefinition("key-boolean", PropertyTypeConverter.BOOLEAN));
propertyDefinitions.add(new SimplePropertyDefinition("key-something", true, PropertyTypeConverter.STRING));
this.propertyDefinitions = Collections.unmodifiableCollection(propertyDefinitions);
}
@Override
public Class<? extends DiscoveryStrategy> getDiscoveryStrategyType() {
return TestDiscoveryStrategy.class;
}
@Override
public DiscoveryStrategy newDiscoveryStrategy(DiscoveryNode discoveryNode, ILogger logger,
Map<String, Comparable> properties) {
return new TestDiscoveryStrategy();
}
@Override
public Collection<PropertyDefinition> getConfigurationProperties() {
return propertyDefinitions;
}
}
public static class CollectingDiscoveryStrategyFactory implements DiscoveryStrategyFactory {
private final List<DiscoveryNode> discoveryNodes;
private CollectingDiscoveryStrategyFactory(List<DiscoveryNode> discoveryNodes) {
this.discoveryNodes = discoveryNodes;
}
@Override
public Class<? extends DiscoveryStrategy> getDiscoveryStrategyType() {
return CollectingDiscoveryStrategy.class;
}
@Override
public DiscoveryStrategy newDiscoveryStrategy(DiscoveryNode discoveryNode, ILogger logger,
Map<String, Comparable> properties) {
return new CollectingDiscoveryStrategy(discoveryNode, discoveryNodes, logger, properties);
}
@Override
public Collection<PropertyDefinition> getConfigurationProperties() {
return null;
}
}
private static class CollectingDiscoveryStrategy extends AbstractDiscoveryStrategy {
private final List<DiscoveryNode> discoveryNodes;
private final DiscoveryNode discoveryNode;
public CollectingDiscoveryStrategy(DiscoveryNode discoveryNode, List<DiscoveryNode> discoveryNodes, ILogger logger,
Map<String, Comparable> properties) {
super(logger, properties);
this.discoveryNodes = discoveryNodes;
this.discoveryNode = discoveryNode;
}
@Override
public void start() {
super.start();
if (discoveryNode != null) {
discoveryNodes.add(discoveryNode);
}
getLogger();
getProperties();
}
@Override
public Iterable<DiscoveryNode> discoverNodes() {
return new ArrayList<DiscoveryNode>(discoveryNodes);
}
@Override
public void destroy() {
super.destroy();
discoveryNodes.remove(discoveryNode);
}
}
public static class LifecycleDiscoveryStrategyFactory implements DiscoveryStrategyFactory {
private final CountDownLatch startLatch;
private final CountDownLatch stopLatch;
private final List<DiscoveryNode> discoveryNodes;
private LifecycleDiscoveryStrategyFactory(CountDownLatch startLatch, CountDownLatch stopLatch,
List<DiscoveryNode> discoveryNodes) {
this.startLatch = startLatch;
this.stopLatch = stopLatch;
this.discoveryNodes = discoveryNodes;
}
@Override
public Class<? extends DiscoveryStrategy> getDiscoveryStrategyType() {
return LifecycleDiscoveryStrategy.class;
}
@Override
public DiscoveryStrategy newDiscoveryStrategy(DiscoveryNode discoveryNode, ILogger logger,
Map<String, Comparable> properties) {
return new LifecycleDiscoveryStrategy(startLatch, stopLatch, discoveryNode, discoveryNodes, logger, properties);
}
@Override
public Collection<PropertyDefinition> getConfigurationProperties() {
return null;
}
}
private static class LifecycleDiscoveryStrategy extends AbstractDiscoveryStrategy {
private final CountDownLatch startLatch;
private final CountDownLatch stopLatch;
private final List<DiscoveryNode> discoveryNodes;
private final DiscoveryNode discoveryNode;
public LifecycleDiscoveryStrategy(CountDownLatch startLatch, CountDownLatch stopLatch,
DiscoveryNode discoveryNode, List<DiscoveryNode> discoveryNodes,
ILogger logger, Map<String, Comparable> properties) {
super(logger, properties);
this.startLatch = startLatch;
this.stopLatch = stopLatch;
this.discoveryNodes = discoveryNodes;
this.discoveryNode = discoveryNode;
}
@Override
public void start() {
super.start();
startLatch.countDown();
if (discoveryNode != null) {
discoveryNodes.add(discoveryNode);
}
}
@Override
public Iterable<DiscoveryNode> discoverNodes() {
return new ArrayList<DiscoveryNode>(discoveryNodes);
}
@Override
public void destroy() {
super.destroy();
stopLatch.countDown();
discoveryNodes.remove(discoveryNode);
}
}
public static class TestNodeFilter implements NodeFilter {
private final List<DiscoveryNode> nodes = new ArrayList<DiscoveryNode>();
@Override
public boolean test(DiscoveryNode candidate) {
nodes.add(candidate);
return true;
}
private List<DiscoveryNode> getNodes() {
return nodes;
}
}
private static class NoMemberDiscoveryStrategy extends AbstractDiscoveryStrategy {
public NoMemberDiscoveryStrategy(ILogger logger, Map<String, Comparable> properties) {
super(logger, properties);
}
@Override
public Iterable<DiscoveryNode> discoverNodes() {
return null;
}
}
public static class NoMemberDiscoveryStrategyFactory implements DiscoveryStrategyFactory {
@Override
public Class<? extends DiscoveryStrategy> getDiscoveryStrategyType() {
return NoMemberDiscoveryStrategy.class;
}
@Override
public DiscoveryStrategy newDiscoveryStrategy(DiscoveryNode discoveryNode, ILogger logger,
Map<String, Comparable> properties) {
return new NoMemberDiscoveryStrategy(logger, properties);
}
@Override
public Collection<PropertyDefinition> getConfigurationProperties() {
return null;
}
}
}
| juanavelez/hazelcast | hazelcast-client/src/test/java/com/hazelcast/client/spi/impl/discovery/ClientDiscoverySpiTest.java | Java | apache-2.0 | 27,829 |
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "leveldb/db.h"
#include "leveldb/filter_policy.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
#include "leveldb/cache.h"
#include "leveldb/env.h"
#include "leveldb/table.h"
#include "util/hash.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/testharness.h"
#include "util/testutil.h"
namespace leveldb {
static std::string RandomString(Random* rnd, int len) {
std::string r;
test::RandomString(rnd, len, &r);
return r;
}
namespace {
class AtomicCounter {
private:
port::Mutex mu_;
int count_;
public:
AtomicCounter() : count_(0) { }
void Increment() {
MutexLock l(&mu_);
count_++;
}
int Read() {
MutexLock l(&mu_);
return count_;
}
void Reset() {
MutexLock l(&mu_);
count_ = 0;
}
};
}
// Special Env used to delay background operations
class SpecialEnv : public EnvWrapper {
public:
// sstable Sync() calls are blocked while this pointer is non-NULL.
port::AtomicPointer delay_sstable_sync_;
// Simulate no-space errors while this pointer is non-NULL.
port::AtomicPointer no_space_;
// Simulate non-writable file system while this pointer is non-NULL
port::AtomicPointer non_writable_;
bool count_random_reads_;
AtomicCounter random_read_counter_;
AtomicCounter sleep_counter_;
explicit SpecialEnv(Env* base) : EnvWrapper(base) {
delay_sstable_sync_.Release_Store(NULL);
no_space_.Release_Store(NULL);
non_writable_.Release_Store(NULL);
count_random_reads_ = false;
}
Status NewWritableFile(const std::string& f, WritableFile** r, size_t map_size) {
class SSTableFile : public WritableFile {
private:
SpecialEnv* env_;
WritableFile* base_;
public:
SSTableFile(SpecialEnv* env, WritableFile* base)
: env_(env),
base_(base) {
}
~SSTableFile() { delete base_; }
Status Append(const Slice& data) {
if (env_->no_space_.Acquire_Load() != NULL) {
// Drop writes on the floor
return Status::OK();
} else {
return base_->Append(data);
}
}
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() {
while (env_->delay_sstable_sync_.Acquire_Load() != NULL) {
env_->SleepForMicroseconds(100000);
}
return base_->Sync();
}
};
if (non_writable_.Acquire_Load() != NULL) {
return Status::IOError("simulated write error");
}
Status s = target()->NewWritableFile(f, r, 2<<20);
if (s.ok()) {
if (strstr(f.c_str(), ".sst") != NULL) {
*r = new SSTableFile(this, *r);
}
}
return s;
}
Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
class CountingFile : public RandomAccessFile {
private:
RandomAccessFile* target_;
AtomicCounter* counter_;
public:
CountingFile(RandomAccessFile* target, AtomicCounter* counter)
: target_(target), counter_(counter) {
}
virtual ~CountingFile() { delete target_; }
virtual Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const {
counter_->Increment();
return target_->Read(offset, n, result, scratch);
}
};
Status s = target()->NewRandomAccessFile(f, r);
if (s.ok() && count_random_reads_) {
*r = new CountingFile(*r, &random_read_counter_);
}
return s;
}
virtual void SleepForMicroseconds(int micros) {
sleep_counter_.Increment();
target()->SleepForMicroseconds(micros);
}
};
class DBTest {
private:
const FilterPolicy* filter_policy_;
// Sequence of option configurations to try
enum OptionConfig {
kDefault,
kFilter,
kUncompressed,
kEnd
};
int option_config_;
public:
std::string dbname_;
SpecialEnv* env_;
DB* db_;
Options last_options_;
DBTest() : option_config_(kDefault),
env_(new SpecialEnv(Env::Default())) {
filter_policy_ = NewBloomFilterPolicy2(16);
dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, Options());
db_ = NULL;
Reopen();
}
~DBTest() {
delete db_;
DestroyDB(dbname_, Options());
delete env_;
delete filter_policy_;
}
// Switch to a fresh database with the next option configuration to
// test. Return false if there are no more configurations to test.
bool ChangeOptions() {
option_config_++;
if (option_config_ >= kEnd) {
return false;
} else {
DestroyAndReopen();
return true;
}
}
// Return the current option configuration.
Options CurrentOptions() {
Options options;
switch (option_config_) {
case kFilter:
options.filter_policy = filter_policy_;
break;
case kUncompressed:
options.compression = kNoCompression;
break;
default:
break;
}
return options;
}
DBImpl* dbfull() {
return reinterpret_cast<DBImpl*>(db_);
}
void Reopen(Options* options = NULL) {
ASSERT_OK(TryReopen(options));
}
void Close() {
delete db_;
db_ = NULL;
}
void DestroyAndReopen(Options* options = NULL) {
delete db_;
db_ = NULL;
DestroyDB(dbname_, Options());
ASSERT_OK(TryReopen(options));
}
Status TryReopen(Options* options) {
delete db_;
db_ = NULL;
Options opts;
if (options != NULL) {
opts = *options;
} else {
opts = CurrentOptions();
opts.create_if_missing = true;
}
last_options_ = opts;
return DB::Open(opts, dbname_, &db_);
}
Status DoubleOpen(Options* options = NULL) {
DB * db_fail;
delete db_;
db_ = NULL;
Options opts, opts2;
if (options != NULL) {
opts = *options;
} else {
opts = CurrentOptions();
opts.create_if_missing = true;
}
last_options_ = opts;
DB::Open(opts, dbname_, &db_);
return DB::Open(opts2, dbname_, &db_fail);
}
Status Put(const std::string& k, const std::string& v) {
return db_->Put(WriteOptions(), k, v);
}
Status Delete(const std::string& k) {
return db_->Delete(WriteOptions(), k);
}
std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
ReadOptions options;
options.snapshot = snapshot;
std::string result;
Status s = db_->Get(options, k, &result);
if (s.IsNotFound()) {
result = "NOT_FOUND";
} else if (!s.ok()) {
result = s.ToString();
}
return result;
}
std::string GetNoCache(const std::string& k, const Snapshot* snapshot = NULL) {
ReadOptions options;
options.snapshot = snapshot;
options.fill_cache=false;
std::string result;
Status s = db_->Get(options, k, &result);
if (s.IsNotFound()) {
result = "NOT_FOUND";
} else if (!s.ok()) {
result = s.ToString();
}
return result;
}
// Return a string that contains all key,value pairs in order,
// formatted like "(k1->v1)(k2->v2)".
std::string Contents() {
std::vector<std::string> forward;
std::string result;
Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
std::string s = IterStatus(iter);
result.push_back('(');
result.append(s);
result.push_back(')');
forward.push_back(s);
}
// Check reverse iteration results are the reverse of forward results
int matched = 0;
for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
ASSERT_LT(matched, forward.size());
ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
matched++;
}
ASSERT_EQ(matched, forward.size());
delete iter;
return result;
}
std::string AllEntriesFor(const Slice& user_key) {
Iterator* iter = dbfull()->TEST_NewInternalIterator();
InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
iter->Seek(target.Encode());
std::string result;
if (!iter->status().ok()) {
result = iter->status().ToString();
} else {
result = "[ ";
bool first = true;
while (iter->Valid()) {
ParsedInternalKey ikey;
if (!ParseInternalKey(iter->key(), &ikey)) {
result += "CORRUPTED";
} else {
if (last_options_.comparator->Compare(ikey.user_key, user_key) != 0) {
break;
}
if (!first) {
result += ", ";
}
first = false;
switch (ikey.type) {
case kTypeValue:
result += iter->value().ToString();
break;
case kTypeDeletion:
result += "DEL";
break;
}
}
iter->Next();
}
if (!first) {
result += " ";
}
result += "]";
}
delete iter;
return result;
}
int NumTableFilesAtLevel(int level) {
std::string property;
ASSERT_TRUE(
db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
&property));
return atoi(property.c_str());
}
int TotalTableFiles() {
int result = 0;
for (int level = 0; level < config::kNumLevels; level++) {
result += NumTableFilesAtLevel(level);
}
return result;
}
// Return spread of files per level
std::string FilesPerLevel() {
std::string result;
int last_non_zero_offset = 0;
for (int level = 0; level < config::kNumLevels; level++) {
int f = NumTableFilesAtLevel(level);
char buf[100];
snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
result += buf;
if (f > 0) {
last_non_zero_offset = result.size();
}
}
result.resize(last_non_zero_offset);
return result;
}
int CountFiles() {
std::vector<std::string> files;
env_->GetChildren(dbname_, &files);
return static_cast<int>(files.size());
}
uint64_t Size(const Slice& start, const Slice& limit) {
Range r(start, limit);
uint64_t size;
db_->GetApproximateSizes(&r, 1, &size);
return size;
}
void Compact(const Slice& start, const Slice& limit) {
db_->CompactRange(&start, &limit);
}
// Do n memtable compactions, each of which produces an sstable
// covering the range [small,large].
void MakeTables(int n, const std::string& small, const std::string& large) {
for (int i = 0; i < n; i++) {
Put(small, "begin");
Put(large, "end");
dbfull()->TEST_CompactMemTable();
}
}
// Prevent pushing of new sstables into deeper levels by adding
// tables that cover a specified range to all levels.
void FillLevels(const std::string& smallest, const std::string& largest) {
MakeTables(config::kNumLevels, smallest, largest);
}
void DumpFileCounts(const char* label) {
fprintf(stderr, "---\n%s:\n", label);
fprintf(stderr, "maxoverlap: %lld\n",
static_cast<long long>(
dbfull()->TEST_MaxNextLevelOverlappingBytes()));
for (int level = 0; level < config::kNumLevels; level++) {
int num = NumTableFilesAtLevel(level);
if (num > 0) {
fprintf(stderr, " level %3d : %d files\n", level, num);
}
}
}
std::string DumpSSTableList() {
std::string property;
db_->GetProperty("leveldb.sstables", &property);
return property;
}
std::string IterStatus(Iterator* iter) {
std::string result;
if (iter->Valid()) {
result = iter->key().ToString() + "->" + iter->value().ToString();
} else {
result = "(invalid)";
}
return result;
}
};
TEST(DBTest, Empty) {
do {
ASSERT_TRUE(db_ != NULL);
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
TEST(DBTest, DoubleOpen)
{
ASSERT_NOTOK(DoubleOpen());
}
TEST(DBTest, ReadWrite) {
do {
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_OK(Put("bar", "v2"));
ASSERT_OK(Put("foo", "v3"));
ASSERT_EQ("v3", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
} while (ChangeOptions());
}
TEST(DBTest, PutDeleteGet) {
do {
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
ASSERT_EQ("v2", Get("foo"));
ASSERT_OK(db_->Delete(WriteOptions(), "foo"));
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
TEST(DBTest, GetFromImmutableLayer) {
do {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000; // Small write buffer
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
env_->delay_sstable_sync_.Release_Store(env_); // Block sync calls
Put("k1", std::string(100000, 'x')); // Fill memtable
Put("k2", std::string(100000, 'y')); // Trigger compaction
ASSERT_EQ("v1", Get("foo"));
env_->delay_sstable_sync_.Release_Store(NULL); // Release sync calls
} while (ChangeOptions());
}
TEST(DBTest, GetFromVersions) {
do {
ASSERT_OK(Put("foo", "v1"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v1", Get("foo"));
} while (ChangeOptions());
}
TEST(DBTest, GetSnapshot) {
do {
// Try with both a short key and a long key
for (int i = 0; i < 2; i++) {
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
ASSERT_OK(Put(key, "v1"));
const Snapshot* s1 = db_->GetSnapshot();
ASSERT_OK(Put(key, "v2"));
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
db_->ReleaseSnapshot(s1);
}
} while (ChangeOptions());
}
TEST(DBTest, GetLevel0Ordering) {
do {
// Check that we process level-0 files in correct order. The code
// below generates two level-0 files where the earlier one comes
// before the later one in the level-0 file list since the earlier
// one has a smaller "smallest" key.
ASSERT_OK(Put("bar", "b"));
ASSERT_OK(Put("foo", "v1"));
dbfull()->TEST_CompactMemTable();
ASSERT_OK(Put("foo", "v2"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get("foo"));
} while (ChangeOptions());
}
TEST(DBTest, GetOrderedByLevels) {
do {
ASSERT_OK(Put("foo", "v1"));
Compact("a", "z");
ASSERT_EQ("v1", Get("foo"));
ASSERT_OK(Put("foo", "v2"));
ASSERT_EQ("v2", Get("foo"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("v2", Get("foo"));
} while (ChangeOptions());
}
TEST(DBTest, GetPicksCorrectFile) {
do {
// Arrange to have multiple files in a non-level-0 level.
ASSERT_OK(Put("a", "va"));
Compact("a", "b");
ASSERT_OK(Put("x", "vx"));
Compact("x", "y");
ASSERT_OK(Put("f", "vf"));
Compact("f", "g");
ASSERT_EQ("va", Get("a"));
ASSERT_EQ("vf", Get("f"));
ASSERT_EQ("vx", Get("x"));
} while (ChangeOptions());
}
#if 0
// riak does not execute compaction due to reads
TEST(DBTest, GetEncountersEmptyLevel) {
do {
// Arrange for the following to happen:
// * sstable A in level 0
// * nothing in level 1
// * sstable B in level 2
// Then do enough Get() calls to arrange for an automatic compaction
// of sstable A. A bug would cause the compaction to be marked as
// occuring at level 1 (instead of the correct level 0).
// Step 1: First place sstables in levels 0 and 2
int compaction_count = 0;
while (NumTableFilesAtLevel(0) == 0 ||
NumTableFilesAtLevel(2) == 0) {
ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
compaction_count++;
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
}
// Step 2: clear level 1 if necessary.
dbfull()->TEST_CompactRange(1, NULL, NULL);
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 1);
// Step 3: read a bunch of times
for (int i = 0; i < 1000; i++) {
ASSERT_EQ("NOT_FOUND", Get("missing"));
}
// Step 4: Wait for compaction to finish
env_->SleepForMicroseconds(1000000);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
} while (ChangeOptions());
}
#endif
TEST(DBTest, IterEmpty) {
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("foo");
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
}
TEST(DBTest, IterSingle) {
ASSERT_OK(Put("a", "va"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("a");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("b");
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
}
TEST(DBTest, IterMulti) {
ASSERT_OK(Put("a", "va"));
ASSERT_OK(Put("b", "vb"));
ASSERT_OK(Put("c", "vc"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Next();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Seek("a");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Seek("ax");
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Seek("b");
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Seek("z");
ASSERT_EQ(IterStatus(iter), "(invalid)");
// Switch from reverse to forward
iter->SeekToLast();
iter->Prev();
iter->Prev();
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->vb");
// Switch from forward to reverse
iter->SeekToFirst();
iter->Next();
iter->Next();
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->vb");
// Make sure iter stays at snapshot
ASSERT_OK(Put("a", "va2"));
ASSERT_OK(Put("a2", "va3"));
ASSERT_OK(Put("b", "vb2"));
ASSERT_OK(Put("c", "vc2"));
ASSERT_OK(Delete("b"));
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Next();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
}
TEST(DBTest, IterSmallAndLargeMix) {
ASSERT_OK(Put("a", "va"));
ASSERT_OK(Put("b", std::string(100000, 'b')));
ASSERT_OK(Put("c", "vc"));
ASSERT_OK(Put("d", std::string(100000, 'd')));
ASSERT_OK(Put("e", std::string(100000, 'e')));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
iter->Next();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
iter->Next();
ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
iter->Prev();
ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
iter->Prev();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
}
TEST(DBTest, IterMultiWithDelete) {
do {
ASSERT_OK(Put("a", "va"));
ASSERT_OK(Put("b", "vb"));
ASSERT_OK(Put("c", "vc"));
ASSERT_OK(Delete("b"));
ASSERT_EQ("NOT_FOUND", Get("b"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->Seek("c");
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
delete iter;
} while (ChangeOptions());
}
TEST(DBTest, Recover) {
do {
ASSERT_OK(Put("foo", "v1"));
ASSERT_OK(Put("baz", "v5"));
Reopen();
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v5", Get("baz"));
ASSERT_OK(Put("bar", "v2"));
ASSERT_OK(Put("foo", "v3"));
Reopen();
ASSERT_EQ("v3", Get("foo"));
ASSERT_OK(Put("foo", "v4"));
ASSERT_EQ("v4", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
ASSERT_EQ("v5", Get("baz"));
} while (ChangeOptions());
}
TEST(DBTest, RecoveryWithEmptyLog) {
do {
ASSERT_OK(Put("foo", "v1"));
ASSERT_OK(Put("foo", "v2"));
Reopen();
Reopen();
ASSERT_OK(Put("foo", "v3"));
Reopen();
ASSERT_EQ("v3", Get("foo"));
} while (ChangeOptions());
}
// Check that writes done during a memtable compaction are recovered
// if the database is shutdown during the memtable compaction.
TEST(DBTest, RecoverDuringMemtableCompaction) {
do {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 1000000;
Reopen(&options);
// Trigger a long memtable compaction and reopen the database during it
ASSERT_OK(Put("foo", "v1")); // Goes to 1st log file
ASSERT_OK(Put("big1", std::string(10000000, 'x'))); // Fills memtable
ASSERT_OK(Put("big2", std::string(1000, 'y'))); // Triggers compaction
ASSERT_OK(Put("bar", "v2")); // Goes to new log file
Reopen(&options);
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
ASSERT_EQ(std::string(10000000, 'x'), Get("big1"));
ASSERT_EQ(std::string(1000, 'y'), Get("big2"));
} while (ChangeOptions());
}
static std::string Key(int i) {
char buf[100];
snprintf(buf, sizeof(buf), "key%06d", i);
return std::string(buf);
}
TEST(DBTest, MinorCompactionsHappen) {
Options options = CurrentOptions();
options.write_buffer_size = 10000;
Reopen(&options);
const int N = 500;
int starting_num_tables = TotalTableFiles();
for (int i = 0; i < N; i++) {
ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
}
int ending_num_tables = TotalTableFiles();
ASSERT_GT(ending_num_tables, starting_num_tables);
for (int i = 0; i < N; i++) {
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
}
Reopen();
for (int i = 0; i < N; i++) {
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
}
}
TEST(DBTest, RecoverWithLargeLog) {
{
Options options = CurrentOptions();
Reopen(&options);
ASSERT_OK(Put("big1", std::string(200000, '1')));
ASSERT_OK(Put("big2", std::string(200000, '2')));
ASSERT_OK(Put("small3", std::string(10, '3')));
ASSERT_OK(Put("small4", std::string(10, '4')));
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
}
// Make sure that if we re-open with a small write buffer size that
// we flush table files in the middle of a large log file.
Options options = CurrentOptions();
options.write_buffer_size = 100000;
Reopen(&options);
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
ASSERT_EQ(std::string(200000, '1'), Get("big1"));
ASSERT_EQ(std::string(200000, '2'), Get("big2"));
ASSERT_EQ(std::string(10, '3'), Get("small3"));
ASSERT_EQ(std::string(10, '4'), Get("small4"));
ASSERT_GT(NumTableFilesAtLevel(0), 1);
}
TEST(DBTest, CompactionsGenerateMultipleFiles) {
Options options = CurrentOptions();
options.write_buffer_size = 100000000; // Large write buffer
Reopen(&options);
Random rnd(301);
// Write 8MB (80 values, each 100K)
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
std::vector<std::string> values;
for (int i = 0; i < 80; i++) {
values.push_back(RandomString(&rnd, 100000));
ASSERT_OK(Put(Key(i), values[i]));
}
// Reopening moves updates to level-0
Reopen(&options);
dbfull()->TEST_CompactRange(0, NULL, NULL);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
// not riak ASSERT_GT(NumTableFilesAtLevel(1), 1);
ASSERT_EQ(NumTableFilesAtLevel(1), 1); // yes riak
for (int i = 0; i < 80; i++) {
ASSERT_EQ(Get(Key(i)), values[i]);
}
}
TEST(DBTest, RepeatedWritesToSameKey) {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000; // Small write buffer
Reopen(&options);
// We must have at most one file per level except for level-0,
// which may have up to kL0_StopWritesTrigger files.
const int kMaxFiles = config::kNumLevels + config::kL0_StopWritesTrigger;
Random rnd(301);
std::string value = RandomString(&rnd, 2 * options.write_buffer_size);
for (int i = 0; i < 5 * kMaxFiles; i++) {
Put("key", value);
ASSERT_LE(TotalTableFiles(), kMaxFiles);
fprintf(stderr, "after %d: %d files\n", int(i+1), TotalTableFiles());
}
}
TEST(DBTest, SparseMerge) {
Options options = CurrentOptions();
options.compression = kNoCompression;
Reopen(&options);
FillLevels("A", "Z");
// Suppose there is:
// small amount of data with prefix A
// large amount of data with prefix B
// small amount of data with prefix C
// and that recent updates have made small changes to all three prefixes.
// Check that we do not do a compaction that merges all of B in one shot.
const std::string value(1000, 'x');
Put("A", "va");
// Write approximately 100MB of "B" values
for (int i = 0; i < 100000; i++) {
char key[100];
snprintf(key, sizeof(key), "B%010d", i);
Put(key, value);
}
Put("C", "vc");
dbfull()->TEST_CompactMemTable();
dbfull()->TEST_CompactRange(0, NULL, NULL);
// Make sparse update
Put("A", "va2");
Put("B100", "bvalue2");
Put("C", "vc2");
dbfull()->TEST_CompactMemTable();
// Compactions should not cause us to create a situation where
// a file overlaps too much data at the next level.
// 07/10/14 matthewv - we overlap first two levels. sparse test not appropriate there,
// and we set overlaps into 100s of megabytes as "normal"
// ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
dbfull()->TEST_CompactRange(0, NULL, NULL);
// ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
dbfull()->TEST_CompactRange(1, NULL, NULL);
// ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
}
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
(unsigned long long)(val),
(unsigned long long)(low),
(unsigned long long)(high));
}
return result;
}
TEST(DBTest, ApproximateSizes) {
do {
Options options = CurrentOptions();
options.write_buffer_size = 100000000; // Large write buffer
options.compression = kNoCompression;
DestroyAndReopen();
ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
Reopen(&options);
ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
// Write 8MB (80 values, each 100K)
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
const int N = 80;
static const int S1 = 100000;
static const int S2 = 105000; // Allow some expansion from metadata
Random rnd(301);
for (int i = 0; i < N; i++) {
ASSERT_OK(Put(Key(i), RandomString(&rnd, S1)));
}
// 0 because GetApproximateSizes() does not account for memtable space
ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
// Check sizes across recovery by reopening a few times
for (int run = 0; run < 3; run++) {
Reopen(&options);
for (int compact_start = 0; compact_start < N; compact_start += 10) {
for (int i = 0; i < N; i += 10) {
ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
}
ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
std::string cstart_str = Key(compact_start);
std::string cend_str = Key(compact_start + 9);
Slice cstart = cstart_str;
Slice cend = cend_str;
dbfull()->TEST_CompactRange(0, &cstart, &cend);
}
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GT(NumTableFilesAtLevel(1), 0);
}
} while (ChangeOptions());
}
TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
do {
Options options = CurrentOptions();
options.compression = kNoCompression;
Reopen();
Random rnd(301);
std::string big1 = RandomString(&rnd, 100000);
ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000)));
ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000)));
ASSERT_OK(Put(Key(2), big1));
ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000)));
ASSERT_OK(Put(Key(4), big1));
ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000)));
ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
// Check sizes across recovery by reopening a few times
for (int run = 0; run < 3; run++) {
Reopen(&options);
ASSERT_TRUE(Between(Size("", Key(0)), 0, 0));
ASSERT_TRUE(Between(Size("", Key(1)), 10000, 11000));
ASSERT_TRUE(Between(Size("", Key(2)), 20000, 21000));
ASSERT_TRUE(Between(Size("", Key(3)), 120000, 121000));
ASSERT_TRUE(Between(Size("", Key(4)), 130000, 131000));
ASSERT_TRUE(Between(Size("", Key(5)), 230000, 231000));
ASSERT_TRUE(Between(Size("", Key(6)), 240000, 241000));
ASSERT_TRUE(Between(Size("", Key(7)), 540000, 541000));
ASSERT_TRUE(Between(Size("", Key(8)), 550000, 560000));
ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
dbfull()->TEST_CompactRange(0, NULL, NULL);
}
} while (ChangeOptions());
}
TEST(DBTest, IteratorPinsRef) {
Put("foo", "hello");
// Get iterator that will yield the current contents of the DB.
Iterator* iter = db_->NewIterator(ReadOptions());
// Write to force compactions
Put("foo", "newvalue1");
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
}
Put("foo", "newvalue2");
iter->SeekToFirst();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("foo", iter->key().ToString());
ASSERT_EQ("hello", iter->value().ToString());
iter->Next();
ASSERT_TRUE(!iter->Valid());
delete iter;
}
TEST(DBTest, Snapshot) {
do {
Put("foo", "v1");
const Snapshot* s1 = db_->GetSnapshot();
Put("foo", "v2");
const Snapshot* s2 = db_->GetSnapshot();
Put("foo", "v3");
const Snapshot* s3 = db_->GetSnapshot();
Put("foo", "v4");
ASSERT_EQ("v1", Get("foo", s1));
ASSERT_EQ("v2", Get("foo", s2));
ASSERT_EQ("v3", Get("foo", s3));
ASSERT_EQ("v4", Get("foo"));
db_->ReleaseSnapshot(s3);
ASSERT_EQ("v1", Get("foo", s1));
ASSERT_EQ("v2", Get("foo", s2));
ASSERT_EQ("v4", Get("foo"));
db_->ReleaseSnapshot(s1);
ASSERT_EQ("v2", Get("foo", s2));
ASSERT_EQ("v4", Get("foo"));
db_->ReleaseSnapshot(s2);
ASSERT_EQ("v4", Get("foo"));
} while (ChangeOptions());
}
#if 0 // trouble under Riak due to assumed file sizes
TEST(DBTest, HiddenValuesAreRemoved) {
do {
Random rnd(301);
FillLevels("a", "z");
std::string big = RandomString(&rnd, 50000);
Put("foo", big);
Put("pastfoo", "v");
const Snapshot* snapshot = db_->GetSnapshot();
Put("foo", "tiny");
Put("pastfoo2", "v2"); // Advance sequence number one more
ASSERT_OK(dbfull()->TEST_CompactMemTable());
ASSERT_GT(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(big, Get("foo", snapshot));
ASSERT_TRUE(Between(Size("", "pastfoo"), 50000, 60000));
db_->ReleaseSnapshot(snapshot);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
Slice x("x");
dbfull()->TEST_CompactRange(0, NULL, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GE(NumTableFilesAtLevel(1), 1);
dbfull()->TEST_CompactRange(1, NULL, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
} while (ChangeOptions());
}
#endif
TEST(DBTest, DeletionMarkers1) {
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
//ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
// Place a table at level last-1 to prevent merging with preceding mutation
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
//ASSERT_EQ(NumTableFilesAtLevel(last), 1);
//ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
Delete("foo");
Put("foo", "v2");
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]"); // riak 1.3, DEL merged out by BuildTable
Slice z("z");
dbfull()->TEST_CompactRange(last-2, NULL, &z);
// DEL eliminated, but v1 remains because we aren't compacting that level
// (DEL can be eliminated because v2 hides v1).
//ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]"); Riak 1.4 has merged to level 1
//dbfull()->TEST_CompactRange(last-1, NULL, NULL);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
}
TEST(DBTest, DeletionMarkers2) {
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_CompactMemTable());
const int last = config::kMaxMemCompactLevel;
ASSERT_EQ(NumTableFilesAtLevel(0), 1); // foo => v1 is now in last level
dbfull()->TEST_CompactRange(0, NULL, NULL);
ASSERT_EQ(NumTableFilesAtLevel(1), 1); // foo => v1 is now in last level
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
// Place a table at level last-1 to prevent merging with preceding mutation
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_CompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
Delete("foo");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
dbfull()->TEST_CompactRange(0, NULL, NULL); // Riak overlaps level 1
// DEL kept: "last" file overlaps
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
dbfull()->TEST_CompactRange(1, NULL, NULL);
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL ]");
dbfull()->TEST_CompactRange(2, NULL, NULL);
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
}
TEST(DBTest, OverlapInLevel0) {
do {
ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
// Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
ASSERT_OK(Put("100", "v100"));
ASSERT_OK(Put("999", "v999"));
dbfull()->TEST_CompactMemTable();
dbfull()->TEST_CompactRange(0, NULL, NULL);
dbfull()->TEST_CompactRange(1, NULL, NULL);
ASSERT_OK(Delete("100"));
ASSERT_OK(Delete("999"));
dbfull()->TEST_CompactMemTable();
dbfull()->TEST_CompactRange(0, NULL, NULL);
ASSERT_EQ("0,1,1", FilesPerLevel());
// Make files spanning the following ranges in level-0:
// files[0] 200 .. 900
// files[1] 300 .. 500
// Note that files are sorted by smallest key.
ASSERT_OK(Put("300", "v300"));
ASSERT_OK(Put("500", "v500"));
dbfull()->TEST_CompactMemTable();
ASSERT_OK(Put("200", "v200"));
ASSERT_OK(Put("600", "v600"));
ASSERT_OK(Put("900", "v900"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("2,1,1", FilesPerLevel());
// Compact away the placeholder files we created initially
dbfull()->TEST_CompactRange(1, NULL, NULL);
dbfull()->TEST_CompactRange(2, NULL, NULL);
ASSERT_EQ("2", FilesPerLevel());
// Do a memtable compaction. Before bug-fix, the compaction would
// not detect the overlap with level-0 files and would incorrectly place
// the deletion in a deeper level.
ASSERT_OK(Delete("600"));
dbfull()->TEST_CompactMemTable();
ASSERT_EQ("3", FilesPerLevel());
ASSERT_EQ("NOT_FOUND", Get("600"));
} while (ChangeOptions());
}
TEST(DBTest, L0_CompactionBug_Issue44_a) {
Reopen();
ASSERT_OK(Put("b", "v"));
Reopen();
ASSERT_OK(Delete("b"));
ASSERT_OK(Delete("a"));
Reopen();
ASSERT_OK(Delete("a"));
Reopen();
ASSERT_OK(Put("a", "v"));
Reopen();
Reopen();
ASSERT_EQ("(a->v)", Contents());
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
ASSERT_EQ("(a->v)", Contents());
}
TEST(DBTest, L0_CompactionBug_Issue44_b) {
Reopen();
Put("","");
Reopen();
Delete("e");
Put("","");
Reopen();
Put("c", "cv");
Reopen();
Put("","");
Reopen();
Put("","");
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
Reopen();
Put("d","dv");
Reopen();
Put("","");
Reopen();
Delete("d");
Delete("b");
Reopen();
ASSERT_EQ("(->)(c->cv)", Contents());
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
ASSERT_EQ("(->)(c->cv)", Contents());
}
TEST(DBTest, ComparatorCheck) {
class NewComparator : public Comparator {
public:
virtual const char* Name() const { return "leveldb.NewComparator"; }
virtual int Compare(const Slice& a, const Slice& b) const {
return BytewiseComparator()->Compare(a, b);
}
virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
BytewiseComparator()->FindShortestSeparator(s, l);
}
virtual void FindShortSuccessor(std::string* key) const {
BytewiseComparator()->FindShortSuccessor(key);
}
};
NewComparator cmp;
Options new_options = CurrentOptions();
new_options.comparator = &cmp;
Status s = TryReopen(&new_options);
ASSERT_TRUE(!s.ok());
ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
<< s.ToString();
}
TEST(DBTest, CustomComparator) {
class NumberComparator : public Comparator {
public:
virtual const char* Name() const { return "test.NumberComparator"; }
virtual int Compare(const Slice& a, const Slice& b) const {
return ToNumber(a) - ToNumber(b);
}
virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
ToNumber(*s); // Check format
ToNumber(l); // Check format
}
virtual void FindShortSuccessor(std::string* key) const {
ToNumber(*key); // Check format
}
private:
static int ToNumber(const Slice& x) {
// Check that there are no extra characters.
ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
<< EscapeString(x);
int val;
char ignored;
ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
<< EscapeString(x);
return val;
}
};
NumberComparator cmp;
Options new_options = CurrentOptions();
new_options.create_if_missing = true;
new_options.comparator = &cmp;
new_options.filter_policy = NULL; // Cannot use bloom filters
new_options.write_buffer_size = 1000; // Compact more often
DestroyAndReopen(&new_options);
ASSERT_OK(Put("[10]", "ten"));
ASSERT_OK(Put("[0x14]", "twenty"));
for (int i = 0; i < 2; i++) {
ASSERT_EQ("ten", Get("[10]"));
ASSERT_EQ("ten", Get("[0xa]"));
ASSERT_EQ("twenty", Get("[20]"));
ASSERT_EQ("twenty", Get("[0x14]"));
ASSERT_EQ("NOT_FOUND", Get("[15]"));
ASSERT_EQ("NOT_FOUND", Get("[0xf]"));
Compact("[0]", "[9999]");
}
for (int run = 0; run < 2; run++) {
for (int i = 0; i < 1000; i++) {
char buf[100];
snprintf(buf, sizeof(buf), "[%d]", i*10);
ASSERT_OK(Put(buf, buf));
}
Compact("[0]", "[1000000]");
}
}
TEST(DBTest, ManualCompaction) {
ASSERT_EQ(config::kMaxMemCompactLevel, 2)
<< "Need to update this test to match kMaxMemCompactLevel";
MakeTables(3, "p", "q");
ASSERT_EQ("3", FilesPerLevel());
// Compaction range falls before files
Compact("", "c");
ASSERT_EQ("3", FilesPerLevel());
// Compaction range falls after files
Compact("r", "z");
ASSERT_EQ("3", FilesPerLevel());
// Compaction range overlaps files
Compact("p1", "p9");
ASSERT_EQ("0,1", FilesPerLevel());
// Populate a different range
MakeTables(3, "c", "e");
ASSERT_EQ("3,1", FilesPerLevel());
// Compact just the new range
Compact("b", "f");
ASSERT_EQ("0,2", FilesPerLevel());
// Compact all
MakeTables(1, "a", "z");
ASSERT_EQ("1,2", FilesPerLevel());
db_->CompactRange(NULL, NULL);
ASSERT_EQ("0,3", FilesPerLevel());
}
TEST(DBTest, DBOpen_Options) {
std::string dbname = test::TmpDir() + "/db_options_test";
DestroyDB(dbname, Options());
// Does not exist, and create_if_missing == false: error
DB* db = NULL;
Options opts;
opts.create_if_missing = false;
Status s = DB::Open(opts, dbname, &db);
ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != NULL);
ASSERT_TRUE(db == NULL);
// Does not exist, and create_if_missing == true: OK
opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
ASSERT_TRUE(db != NULL);
delete db;
db = NULL;
// Does exist, and error_if_exists == true: error
opts.create_if_missing = false;
opts.error_if_exists = true;
s = DB::Open(opts, dbname, &db);
ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != NULL);
ASSERT_TRUE(db == NULL);
// Does exist, and error_if_exists == false: OK
opts.create_if_missing = true;
opts.error_if_exists = false;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
ASSERT_TRUE(db != NULL);
delete db;
db = NULL;
}
// Check that number of files does not grow when we are out of space
TEST(DBTest, NoSpace) {
Options options = CurrentOptions();
options.env = env_;
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
Compact("a", "z");
const int num_files = CountFiles();
env_->no_space_.Release_Store(env_); // Force out-of-space errors
env_->sleep_counter_.Reset();
for (int i = 0; i < 5; i++) {
for (int level = 0; level < config::kNumLevels-1; level++) {
dbfull()->TEST_CompactRange(level, NULL, NULL);
}
}
env_->no_space_.Release_Store(NULL);
ASSERT_LT(CountFiles(), num_files + 3);
// Check that compaction attempts slept after errors
ASSERT_GE(env_->sleep_counter_.Read(), 5);
}
#if 0
TEST(DBTest, NonWritableFileSystem) {
Options options = CurrentOptions();
options.write_buffer_size = 1000;
options.env = env_;
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
env_->non_writable_.Release_Store(env_); // Force errors for new files
std::string big(100000, 'x');
int errors = 0;
for (int i = 0; i < 20; i++) {
fprintf(stderr, "iter %d; errors %d\n", i, errors);
if (!Put("foo", big).ok()) {
errors++;
env_->SleepForMicroseconds(100000);
}
}
ASSERT_GT(errors, 0);
env_->non_writable_.Release_Store(NULL);
}
#endif
TEST(DBTest, FilesDeletedAfterCompaction) {
ASSERT_OK(Put("foo", "v2"));
Compact("a", "z");
const int num_files = CountFiles();
for (int i = 0; i < 10; i++) {
ASSERT_OK(Put("foo", "v2"));
Compact("a", "z");
}
ASSERT_EQ(CountFiles(), num_files);
}
TEST(DBTest, BloomFilter) {
env_->count_random_reads_ = true;
Options options = CurrentOptions();
options.env = env_;
options.block_cache = NewLRUCache(0); // Prevent cache hits
options.filter_policy = NewBloomFilterPolicy2(16);
Reopen(&options);
// Populate multiple layers
const int N = 10000;
for (int i = 0; i < N; i++) {
ASSERT_OK(Put(Key(i), Key(i)));
}
Compact("a", "z");
for (int i = 0; i < N; i += 100) {
ASSERT_OK(Put(Key(i), Key(i)));
}
dbfull()->TEST_CompactMemTable();
// Prevent auto compactions triggered by seeks
env_->delay_sstable_sync_.Release_Store(env_);
// Lookup present keys. Should rarely read from small sstable.
env_->random_read_counter_.Reset();
for (int i = 0; i < N; i++) {
ASSERT_EQ(Key(i), GetNoCache(Key(i)));
}
int reads = env_->random_read_counter_.Read();
fprintf(stderr, "%d present => %d reads\n", N, reads);
ASSERT_GE(reads, N);
ASSERT_LE(reads, N + 2*N/100);
// Lookup present keys. Should rarely read from either sstable.
env_->random_read_counter_.Reset();
for (int i = 0; i < N; i++) {
ASSERT_EQ("NOT_FOUND", GetNoCache(Key(i) + ".missing"));
}
reads = env_->random_read_counter_.Read();
fprintf(stderr, "%d missing => %d reads\n", N, reads);
ASSERT_LE(reads, 3*N/100);
env_->delay_sstable_sync_.Release_Store(NULL);
Close();
delete options.block_cache;
delete options.filter_policy;
}
// Multi-threaded test:
namespace {
static const int kNumThreads = 4;
static const int kTestSeconds = 10;
static const int kNumKeys = 1000;
struct MTState {
DBTest* test;
port::AtomicPointer stop;
port::AtomicPointer counter[kNumThreads];
port::AtomicPointer thread_done[kNumThreads];
};
struct MTThread {
MTState* state;
int id;
};
static void MTThreadBody(void* arg) {
MTThread* t = reinterpret_cast<MTThread*>(arg);
int id = t->id;
DB* db = t->state->test->db_;
uintptr_t counter = 0;
fprintf(stderr, "... starting thread %d\n", id);
Random rnd(1000 + id);
std::string value;
char valbuf[1500];
while (t->state->stop.Acquire_Load() == NULL) {
t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter));
int key = rnd.Uniform(kNumKeys);
char keybuf[20];
snprintf(keybuf, sizeof(keybuf), "%016d", key);
if (rnd.OneIn(2)) {
// Write values of the form <key, my id, counter>.
// We add some padding for force compactions.
snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
key, id, static_cast<int>(counter));
ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
} else {
// Read a value and verify that it matches the pattern written above.
Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
if (s.IsNotFound()) {
// Key has not yet been written
} else {
// Check that the writer thread counter is >= the counter in the value
ASSERT_OK(s);
int k, w, c;
ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
ASSERT_EQ(k, key);
ASSERT_GE(w, 0);
ASSERT_LT(w, kNumThreads);
ASSERT_LE(c, reinterpret_cast<uintptr_t>(
t->state->counter[w].Acquire_Load()));
}
}
counter++;
}
t->state->thread_done[id].Release_Store(t);
fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
}
} // namespace
TEST(DBTest, MultiThreaded) {
do {
// Initialize state
MTState mt;
mt.test = this;
mt.stop.Release_Store(0);
for (int id = 0; id < kNumThreads; id++) {
mt.counter[id].Release_Store(0);
mt.thread_done[id].Release_Store(0);
}
// Start threads
MTThread thread[kNumThreads];
pthread_t tid;
for (int id = 0; id < kNumThreads; id++) {
thread[id].state = &mt;
thread[id].id = id;
tid=env_->StartThread(MTThreadBody, &thread[id]);
pthread_detach(tid);
}
// Let them run for a while
env_->SleepForMicroseconds(kTestSeconds * 1000000);
// Stop the threads and wait for them to finish
mt.stop.Release_Store(&mt);
for (int id = 0; id < kNumThreads; id++) {
while (mt.thread_done[id].Acquire_Load() == NULL) {
env_->SleepForMicroseconds(100000);
}
}
} while (ChangeOptions());
}
namespace {
typedef std::map<std::string, std::string> KVMap;
}
class ModelDB: public DB {
public:
class ModelSnapshot : public Snapshot {
public:
KVMap map_;
};
explicit ModelDB(const Options& options): options_(options) { }
~ModelDB() { }
virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
return DB::Put(o, k, v);
}
virtual Status Delete(const WriteOptions& o, const Slice& key) {
return DB::Delete(o, key);
}
virtual Status Get(const ReadOptions& options,
const Slice& key, std::string* value) {
assert(false); // Not implemented
return Status::NotFound(key);
}
virtual Status Get(const ReadOptions& options,
const Slice& key, Value* value) {
assert(false); // Not implemented
return Status::NotFound(key);
}
virtual Iterator* NewIterator(const ReadOptions& options) {
if (options.snapshot == NULL) {
KVMap* saved = new KVMap;
*saved = map_;
return new ModelIter(saved, true);
} else {
const KVMap* snapshot_state =
&(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
return new ModelIter(snapshot_state, false);
}
}
virtual const Snapshot* GetSnapshot() {
ModelSnapshot* snapshot = new ModelSnapshot;
snapshot->map_ = map_;
return snapshot;
}
virtual void ReleaseSnapshot(const Snapshot* snapshot) {
delete reinterpret_cast<const ModelSnapshot*>(snapshot);
}
virtual Status Write(const WriteOptions& options, WriteBatch* batch) {
class Handler : public WriteBatch::Handler {
public:
KVMap* map_;
virtual void Put(const Slice& key, const Slice& value) {
(*map_)[key.ToString()] = value.ToString();
}
virtual void Delete(const Slice& key) {
map_->erase(key.ToString());
}
};
Handler handler;
handler.map_ = &map_;
return batch->Iterate(&handler);
}
virtual bool GetProperty(const Slice& property, std::string* value) {
return false;
}
virtual void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) {
for (int i = 0; i < n; i++) {
sizes[i] = 0;
}
}
virtual void CompactRange(const Slice* start, const Slice* end) {
}
private:
class ModelIter: public Iterator {
public:
ModelIter(const KVMap* map, bool owned)
: map_(map), owned_(owned), iter_(map_->end()) {
}
~ModelIter() {
if (owned_) delete map_;
}
virtual bool Valid() const { return iter_ != map_->end(); }
virtual void SeekToFirst() { iter_ = map_->begin(); }
virtual void SeekToLast() {
if (map_->empty()) {
iter_ = map_->end();
} else {
iter_ = map_->find(map_->rbegin()->first);
}
}
virtual void Seek(const Slice& k) {
iter_ = map_->lower_bound(k.ToString());
}
virtual void Next() { ++iter_; }
virtual void Prev() { --iter_; }
virtual Slice key() const { return iter_->first; }
virtual Slice value() const { return iter_->second; }
virtual Status status() const { return Status::OK(); }
private:
const KVMap* const map_;
const bool owned_; // Do we own map_
KVMap::const_iterator iter_;
};
const Options options_;
KVMap map_;
};
static std::string RandomKey(Random* rnd) {
int len = (rnd->OneIn(3)
? 1 // Short sometimes to encourage collisions
: (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
return test::RandomKey(rnd, len);
}
static bool CompareIterators(int step,
DB* model,
DB* db,
const Snapshot* model_snap,
const Snapshot* db_snap) {
ReadOptions options;
options.snapshot = model_snap;
Iterator* miter = model->NewIterator(options);
options.snapshot = db_snap;
Iterator* dbiter = db->NewIterator(options);
bool ok = true;
int count = 0;
for (miter->SeekToFirst(), dbiter->SeekToFirst();
ok && miter->Valid() && dbiter->Valid();
miter->Next(), dbiter->Next()) {
count++;
if (miter->key().compare(dbiter->key()) != 0) {
fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
step,
EscapeString(miter->key()).c_str(),
EscapeString(dbiter->key()).c_str());
ok = false;
break;
}
if (miter->value().compare(dbiter->value()) != 0) {
fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
step,
EscapeString(miter->key()).c_str(),
EscapeString(miter->value()).c_str(),
EscapeString(miter->value()).c_str());
ok = false;
}
}
if (ok) {
if (miter->Valid() != dbiter->Valid()) {
fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
step, miter->Valid(), dbiter->Valid());
ok = false;
}
}
fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
delete miter;
delete dbiter;
return ok;
}
TEST(DBTest, Randomized) {
Random rnd(test::RandomSeed());
do {
ModelDB model(CurrentOptions());
const int N = 10000;
const Snapshot* model_snap = NULL;
const Snapshot* db_snap = NULL;
std::string k, v;
for (int step = 0; step < N; step++) {
if (step % 100 == 0) {
fprintf(stderr, "Step %d of %d\n", step, N);
}
// TODO(sanjay): Test Get() works
int p = rnd.Uniform(100);
if (p < 45) { // Put
k = RandomKey(&rnd);
v = RandomString(&rnd,
rnd.OneIn(20)
? 100 + rnd.Uniform(100)
: rnd.Uniform(8));
ASSERT_OK(model.Put(WriteOptions(), k, v));
ASSERT_OK(db_->Put(WriteOptions(), k, v));
} else if (p < 90) { // Delete
k = RandomKey(&rnd);
ASSERT_OK(model.Delete(WriteOptions(), k));
ASSERT_OK(db_->Delete(WriteOptions(), k));
} else { // Multi-element batch
WriteBatch b;
const int num = rnd.Uniform(8);
for (int i = 0; i < num; i++) {
if (i == 0 || !rnd.OneIn(10)) {
k = RandomKey(&rnd);
} else {
// Periodically re-use the same key from the previous iter, so
// we have multiple entries in the write batch for the same key
}
if (rnd.OneIn(2)) {
v = RandomString(&rnd, rnd.Uniform(10));
b.Put(k, v);
} else {
b.Delete(k);
}
}
ASSERT_OK(model.Write(WriteOptions(), &b));
ASSERT_OK(db_->Write(WriteOptions(), &b));
}
if ((step % 100) == 0) {
ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL));
ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
// Save a snapshot from each DB this time that we'll use next
// time we compare things, to make sure the current state is
// preserved with the snapshot
if (model_snap != NULL) model.ReleaseSnapshot(model_snap);
if (db_snap != NULL) db_->ReleaseSnapshot(db_snap);
Reopen();
ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL));
model_snap = model.GetSnapshot();
db_snap = db_->GetSnapshot();
}
}
if (model_snap != NULL) model.ReleaseSnapshot(model_snap);
if (db_snap != NULL) db_->ReleaseSnapshot(db_snap);
} while (ChangeOptions());
}
std::string MakeKey(unsigned int num) {
char buf[30];
snprintf(buf, sizeof(buf), "%016u", num);
return std::string(buf);
}
void BM_LogAndApply(int iters, int num_base_files) {
std::string dbname = test::TmpDir() + "/leveldb_test_benchmark";
DestroyDB(dbname, Options());
DB* db = NULL;
Options opts;
opts.create_if_missing = true;
Status s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
ASSERT_TRUE(db != NULL);
delete db;
db = NULL;
Env* env = Env::Default();
port::Mutex mu;
MutexLock l(&mu);
InternalKeyComparator cmp(BytewiseComparator());
Options options;
VersionSet vset(dbname, &options, NULL, &cmp);
ASSERT_OK(vset.Recover());
VersionEdit vbase;
uint64_t fnum = 1;
for (int i = 0; i < num_base_files; i++) {
InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
}
ASSERT_OK(vset.LogAndApply(&vbase, &mu));
uint64_t start_micros = env->NowMicros();
for (int i = 0; i < iters; i++) {
VersionEdit vedit;
vedit.DeleteFile(2, fnum);
InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
vset.LogAndApply(&vedit, &mu);
}
uint64_t stop_micros = env->NowMicros();
unsigned int us = stop_micros - start_micros;
char buf[16];
snprintf(buf, sizeof(buf), "%d", num_base_files);
fprintf(stderr,
"BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
buf, iters, us, ((float)us) / iters);
}
} // namespace leveldb
int main(int argc, char** argv) {
if (argc > 1 && std::string(argv[1]) == "--benchmark") {
leveldb::BM_LogAndApply(1000, 1);
leveldb::BM_LogAndApply(1000, 100);
leveldb::BM_LogAndApply(1000, 10000);
leveldb::BM_LogAndApply(100, 100000);
return 0;
}
return leveldb::test::RunAllTests();
}
| DavidAlphaFox/riak | deps/eleveldb/c_src/leveldb/db/db_test.cc | C++ | apache-2.0 | 58,689 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Face functions for image classification.
"""
from . import _local
from . import _cloud
def preprocess_async(train_dataset, output_dir, eval_dataset=None, checkpoint=None, cloud=None):
"""Preprocess data. Produce output that can be used by training efficiently.
Args:
train_dataset: training data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If eval_dataset is None, the pipeline will randomly split train_dataset into
train/eval set with 7:3 ratio.
output_dir: The output directory to use. Preprocessing will create a sub directory under
it for each run, and also update "latest" file which points to the latest preprocessed
directory. Users are responsible for cleanup. Can be local or GCS path.
eval_dataset: evaluation data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If specified, it will be used for evaluation during training, and train_dataset will be
completely used for training.
checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used.
cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but
not None, it will run in cloud. Otherwise, it runs locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
if cloud is None:
return _local.Local.preprocess(train_dataset, output_dir, eval_dataset, checkpoint)
if not isinstance(cloud, dict):
cloud = {}
return _cloud.Cloud.preprocess(train_dataset, output_dir, eval_dataset, checkpoint, cloud)
def preprocess(train_dataset, output_dir, eval_dataset=None, checkpoint=None, cloud=None):
"""Blocking version of preprocess_async(). The only difference is that it blocks the caller
until the job finishes, and it does not have a return value.
"""
job = preprocess_async(train_dataset, output_dir, eval_dataset, checkpoint, cloud)
job.wait()
print(job.state)
def train_async(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None):
"""Train model. The output can be used for batch prediction or online deployment.
Args:
input_dir: A directory path containing preprocessed results. Can be local or GCS path.
batch_size: size of batch used for training.
max_steps: number of steps to train.
output_dir: The output directory to use. Can be local or GCS path.
checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used.
cloud: a google.datalab.ml.CloudTrainingConfig object to let it run in cloud.
If None, it runs locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
if cloud is None:
return _local.Local.train(input_dir, batch_size, max_steps, output_dir, checkpoint)
return _cloud.Cloud.train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud)
def train(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None):
"""Blocking version of train_async(). The only difference is that it blocks the caller
until the job finishes, and it does not have a return value.
"""
job = train_async(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud)
job.wait()
print(job.state)
def predict(model, image_files, resize=False, show_image=True, cloud=None):
"""Predict using an model in a local or GCS directory (offline), or a deployed model (online).
Args:
model: if cloud is None, a local or GCS directory of a trained model. Otherwise, it specifies
a deployed model identified by model.version, such as "imagemodel.v1".
image_files: The paths to the image files to predict labels. Can be local or GCS paths.
resize: Whether to resize the image to a reasonable size (300x300) before prediction.
show_image: Whether to show images in the results.
cloud: if None, predicts with offline model locally. Otherwise, predict with a deployed online
model.
Returns:
A pandas DataFrame including the prediction results.
"""
print('Predicting...')
if cloud is None:
results = _local.Local.predict(model, image_files, resize, show_image)
else:
results = _cloud.Cloud.predict(model, image_files, resize, show_image)
return results
def batch_predict_async(dataset, model_dir, output_csv=None, output_bq_table=None, cloud=None):
"""Batch prediction with an offline model.
Args:
dataset: CsvDataSet or BigQueryDataSet for batch prediction input. Can contain either
one column 'image_url', or two columns with another being 'label'.
model_dir: The directory of a trained inception model. Can be local or GCS paths.
output_csv: The output csv file for prediction results. If specified,
it will also output a csv schema file with the name output_csv + '.schema.json'.
output_bq_table: if specified, the output BigQuery table for prediction results.
output_csv and output_bq_table can both be set.
cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but
not None, it will run in cloud. Otherwise, it runs locally.
If specified, it must include 'temp_location' with value being a GCS path, because cloud
run requires a staging GCS directory.
Raises:
ValueError if both output_csv and output_bq_table are None, or if cloud is not None
but it does not include 'temp_location'.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
if cloud is None:
return _local.Local.batch_predict(dataset, model_dir, output_csv, output_bq_table)
if not isinstance(cloud, dict):
cloud = {}
return _cloud.Cloud.batch_predict(dataset, model_dir, output_csv, output_bq_table, cloud)
def batch_predict(dataset, model_dir, output_csv=None, output_bq_table=None, cloud=None):
"""Blocking version of batch_predict_async(). The only difference is that it blocks the caller
until the job finishes, and it does not have a return value.
"""
job = batch_predict_async(dataset, model_dir, output_csv, output_bq_table, cloud)
job.wait()
print(job.state)
| jdanbrown/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_api.py | Python | apache-2.0 | 6,780 |
# Copyright 2011, OpenStack Foundation
# Copyright 2012, Red Hat, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import encodeutils
from oslo_utils import excutils
import six
import webob
from glance.common import exception
from glance.common import timeutils
from glance.domain import proxy as domain_proxy
from glance.i18n import _, _LE
notifier_opts = [
cfg.StrOpt('default_publisher_id',
default="image.localhost",
help=_("""
Default publisher_id for outgoing Glance notifications.
This is the value that the notification driver will use to identify
messages for events originating from the Glance service. Typically,
this is the hostname of the instance that generated the message.
Possible values:
* Any reasonable instance identifier, for example: image.host1
Related options:
* None
""")),
cfg.ListOpt('disabled_notifications',
default=[],
help=_("""
List of notifications to be disabled.
Specify a list of notifications that should not be emitted.
A notification can be given either as a notification type to
disable a single event notification, or as a notification group
prefix to disable all event notifications within a group.
Possible values:
A comma-separated list of individual notification types or
notification groups to be disabled. Currently supported groups:
* image
* image.member
* task
* metadef_namespace
* metadef_object
* metadef_property
* metadef_resource_type
* metadef_tag
For a complete listing and description of each event refer to:
http://docs.openstack.org/developer/glance/notifications.html
The values must be specified as: <group_name>.<event_name>
For example: image.create,task.success,metadef_tag
Related options:
* None
""")),
]
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
LOG = logging.getLogger(__name__)
def set_defaults(control_exchange='glance'):
oslo_messaging.set_transport_defaults(control_exchange)
def get_transport():
return oslo_messaging.get_notification_transport(CONF)
class Notifier(object):
"""Uses a notification strategy to send out messages about events."""
def __init__(self):
publisher_id = CONF.default_publisher_id
self._transport = get_transport()
self._notifier = oslo_messaging.Notifier(self._transport,
publisher_id=publisher_id)
def warn(self, event_type, payload):
self._notifier.warn({}, event_type, payload)
def info(self, event_type, payload):
self._notifier.info({}, event_type, payload)
def error(self, event_type, payload):
self._notifier.error({}, event_type, payload)
def _get_notification_group(notification):
return notification.split('.', 1)[0]
def _is_notification_enabled(notification):
disabled_notifications = CONF.disabled_notifications
notification_group = _get_notification_group(notification)
notifications = (notification, notification_group)
for disabled_notification in disabled_notifications:
if disabled_notification in notifications:
return False
return True
def _send_notification(notify, notification_type, payload):
if _is_notification_enabled(notification_type):
notify(notification_type, payload)
def format_image_notification(image):
"""
Given a glance.domain.Image object, return a dictionary of relevant
notification information. We purposely do not include 'location'
as it may contain credentials.
"""
return {
'id': image.image_id,
'name': image.name,
'status': image.status,
'created_at': timeutils.isotime(image.created_at),
'updated_at': timeutils.isotime(image.updated_at),
'min_disk': image.min_disk,
'min_ram': image.min_ram,
'protected': image.protected,
'checksum': image.checksum,
'owner': image.owner,
'disk_format': image.disk_format,
'container_format': image.container_format,
'size': image.size,
'virtual_size': image.virtual_size,
'is_public': image.visibility == 'public',
'properties': dict(image.extra_properties),
'tags': list(image.tags),
'deleted': False,
'deleted_at': None,
}
def format_image_member_notification(image_member):
"""Given a glance.domain.ImageMember object, return a dictionary of relevant
notification information.
"""
return {
'image_id': image_member.image_id,
'member_id': image_member.member_id,
'status': image_member.status,
'created_at': timeutils.isotime(image_member.created_at),
'updated_at': timeutils.isotime(image_member.updated_at),
'deleted': False,
'deleted_at': None,
}
def format_task_notification(task):
# NOTE(nikhil): input is not passed to the notifier payload as it may
# contain sensitive info.
return {
'id': task.task_id,
'type': task.type,
'status': task.status,
'result': None,
'owner': task.owner,
'message': None,
'expires_at': timeutils.isotime(task.expires_at),
'created_at': timeutils.isotime(task.created_at),
'updated_at': timeutils.isotime(task.updated_at),
'deleted': False,
'deleted_at': None,
}
def format_metadef_namespace_notification(metadef_namespace):
return {
'namespace': metadef_namespace.namespace,
'namespace_old': metadef_namespace.namespace,
'display_name': metadef_namespace.display_name,
'protected': metadef_namespace.protected,
'visibility': metadef_namespace.visibility,
'owner': metadef_namespace.owner,
'description': metadef_namespace.description,
'created_at': timeutils.isotime(metadef_namespace.created_at),
'updated_at': timeutils.isotime(metadef_namespace.updated_at),
'deleted': False,
'deleted_at': None,
}
def format_metadef_object_notification(metadef_object):
object_properties = metadef_object.properties or {}
properties = []
for name, prop in six.iteritems(object_properties):
object_property = _format_metadef_object_property(name, prop)
properties.append(object_property)
return {
'namespace': metadef_object.namespace,
'name': metadef_object.name,
'name_old': metadef_object.name,
'properties': properties,
'required': metadef_object.required,
'description': metadef_object.description,
'created_at': timeutils.isotime(metadef_object.created_at),
'updated_at': timeutils.isotime(metadef_object.updated_at),
'deleted': False,
'deleted_at': None,
}
def _format_metadef_object_property(name, metadef_property):
return {
'name': name,
'type': metadef_property.type or None,
'title': metadef_property.title or None,
'description': metadef_property.description or None,
'default': metadef_property.default or None,
'minimum': metadef_property.minimum or None,
'maximum': metadef_property.maximum or None,
'enum': metadef_property.enum or None,
'pattern': metadef_property.pattern or None,
'minLength': metadef_property.minLength or None,
'maxLength': metadef_property.maxLength or None,
'confidential': metadef_property.confidential or None,
'items': metadef_property.items or None,
'uniqueItems': metadef_property.uniqueItems or None,
'minItems': metadef_property.minItems or None,
'maxItems': metadef_property.maxItems or None,
'additionalItems': metadef_property.additionalItems or None,
}
def format_metadef_property_notification(metadef_property):
schema = metadef_property.schema
return {
'namespace': metadef_property.namespace,
'name': metadef_property.name,
'name_old': metadef_property.name,
'type': schema.get('type'),
'title': schema.get('title'),
'description': schema.get('description'),
'default': schema.get('default'),
'minimum': schema.get('minimum'),
'maximum': schema.get('maximum'),
'enum': schema.get('enum'),
'pattern': schema.get('pattern'),
'minLength': schema.get('minLength'),
'maxLength': schema.get('maxLength'),
'confidential': schema.get('confidential'),
'items': schema.get('items'),
'uniqueItems': schema.get('uniqueItems'),
'minItems': schema.get('minItems'),
'maxItems': schema.get('maxItems'),
'additionalItems': schema.get('additionalItems'),
'deleted': False,
'deleted_at': None,
}
def format_metadef_resource_type_notification(metadef_resource_type):
return {
'namespace': metadef_resource_type.namespace,
'name': metadef_resource_type.name,
'name_old': metadef_resource_type.name,
'prefix': metadef_resource_type.prefix,
'properties_target': metadef_resource_type.properties_target,
'created_at': timeutils.isotime(metadef_resource_type.created_at),
'updated_at': timeutils.isotime(metadef_resource_type.updated_at),
'deleted': False,
'deleted_at': None,
}
def format_metadef_tag_notification(metadef_tag):
return {
'namespace': metadef_tag.namespace,
'name': metadef_tag.name,
'name_old': metadef_tag.name,
'created_at': timeutils.isotime(metadef_tag.created_at),
'updated_at': timeutils.isotime(metadef_tag.updated_at),
'deleted': False,
'deleted_at': None,
}
class NotificationBase(object):
def get_payload(self, obj):
return {}
def send_notification(self, notification_id, obj, extra_payload=None):
payload = self.get_payload(obj)
if extra_payload is not None:
payload.update(extra_payload)
_send_notification(self.notifier.info, notification_id, payload)
@six.add_metaclass(abc.ABCMeta)
class NotificationProxy(NotificationBase):
def __init__(self, repo, context, notifier):
self.repo = repo
self.context = context
self.notifier = notifier
super_class = self.get_super_class()
super_class.__init__(self, repo)
@abc.abstractmethod
def get_super_class(self):
pass
@six.add_metaclass(abc.ABCMeta)
class NotificationRepoProxy(NotificationBase):
def __init__(self, repo, context, notifier):
self.repo = repo
self.context = context
self.notifier = notifier
proxy_kwargs = {'context': self.context, 'notifier': self.notifier}
proxy_class = self.get_proxy_class()
super_class = self.get_super_class()
super_class.__init__(self, repo, proxy_class, proxy_kwargs)
@abc.abstractmethod
def get_super_class(self):
pass
@abc.abstractmethod
def get_proxy_class(self):
pass
@six.add_metaclass(abc.ABCMeta)
class NotificationFactoryProxy(object):
def __init__(self, factory, context, notifier):
kwargs = {'context': context, 'notifier': notifier}
proxy_class = self.get_proxy_class()
super_class = self.get_super_class()
super_class.__init__(self, factory, proxy_class, kwargs)
@abc.abstractmethod
def get_super_class(self):
pass
@abc.abstractmethod
def get_proxy_class(self):
pass
class ImageProxy(NotificationProxy, domain_proxy.Image):
def get_super_class(self):
return domain_proxy.Image
def get_payload(self, obj):
return format_image_notification(obj)
def _format_image_send(self, bytes_sent):
return {
'bytes_sent': bytes_sent,
'image_id': self.repo.image_id,
'owner_id': self.repo.owner,
'receiver_tenant_id': self.context.tenant,
'receiver_user_id': self.context.user,
}
def _get_chunk_data_iterator(self, data, chunk_size=None):
sent = 0
for chunk in data:
yield chunk
sent += len(chunk)
if sent != (chunk_size or self.repo.size):
notify = self.notifier.error
else:
notify = self.notifier.info
try:
_send_notification(notify, 'image.send',
self._format_image_send(sent))
except Exception as err:
msg = (_LE("An error occurred during image.send"
" notification: %(err)s") % {'err': err})
LOG.error(msg)
def get_data(self, offset=0, chunk_size=None):
# Due to the need of evaluating subsequent proxies, this one
# should return a generator, the call should be done before
# generator creation
data = self.repo.get_data(offset=offset, chunk_size=chunk_size)
return self._get_chunk_data_iterator(data, chunk_size=chunk_size)
def set_data(self, data, size=None):
self.send_notification('image.prepare', self.repo)
notify_error = self.notifier.error
try:
self.repo.set_data(data, size)
except glance_store.StorageFull as e:
msg = (_("Image storage media is full: %s") %
encodeutils.exception_to_unicode(e))
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
except glance_store.StorageWriteDenied as e:
msg = (_("Insufficient permissions on image storage media: %s")
% encodeutils.exception_to_unicode(e))
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPServiceUnavailable(explanation=msg)
except ValueError as e:
msg = (_("Cannot save data for image %(image_id)s: %(error)s") %
{'image_id': self.repo.image_id,
'error': encodeutils.exception_to_unicode(e)})
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPBadRequest(
explanation=encodeutils.exception_to_unicode(e))
except exception.Duplicate as e:
msg = (_("Unable to upload duplicate image data for image"
"%(image_id)s: %(error)s") %
{'image_id': self.repo.image_id,
'error': encodeutils.exception_to_unicode(e)})
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPConflict(explanation=msg)
except exception.Forbidden as e:
msg = (_("Not allowed to upload image data for image %(image_id)s:"
" %(error)s")
% {'image_id': self.repo.image_id,
'error': encodeutils.exception_to_unicode(e)})
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.NotFound as e:
exc_str = encodeutils.exception_to_unicode(e)
msg = (_("Image %(image_id)s could not be found after upload."
" The image may have been deleted during the upload:"
" %(error)s") % {'image_id': self.repo.image_id,
'error': exc_str})
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPNotFound(explanation=exc_str)
except webob.exc.HTTPError as e:
with excutils.save_and_reraise_exception():
msg = (_("Failed to upload image data for image %(image_id)s"
" due to HTTP error: %(error)s") %
{'image_id': self.repo.image_id,
'error': encodeutils.exception_to_unicode(e)})
_send_notification(notify_error, 'image.upload', msg)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_("Failed to upload image data for image %(image_id)s "
"due to internal error: %(error)s") %
{'image_id': self.repo.image_id,
'error': encodeutils.exception_to_unicode(e)})
_send_notification(notify_error, 'image.upload', msg)
else:
self.send_notification('image.upload', self.repo)
self.send_notification('image.activate', self.repo)
class ImageMemberProxy(NotificationProxy, domain_proxy.ImageMember):
def get_super_class(self):
return domain_proxy.ImageMember
class ImageFactoryProxy(NotificationFactoryProxy, domain_proxy.ImageFactory):
def get_super_class(self):
return domain_proxy.ImageFactory
def get_proxy_class(self):
return ImageProxy
class ImageRepoProxy(NotificationRepoProxy, domain_proxy.Repo):
def get_super_class(self):
return domain_proxy.Repo
def get_proxy_class(self):
return ImageProxy
def get_payload(self, obj):
return format_image_notification(obj)
def save(self, image, from_state=None):
super(ImageRepoProxy, self).save(image, from_state=from_state)
self.send_notification('image.update', image)
def add(self, image):
super(ImageRepoProxy, self).add(image)
self.send_notification('image.create', image)
def remove(self, image):
super(ImageRepoProxy, self).remove(image)
self.send_notification('image.delete', image, extra_payload={
'deleted': True, 'deleted_at': timeutils.isotime()
})
class ImageMemberRepoProxy(NotificationBase, domain_proxy.MemberRepo):
def __init__(self, repo, image, context, notifier):
self.repo = repo
self.image = image
self.context = context
self.notifier = notifier
proxy_kwargs = {'context': self.context, 'notifier': self.notifier}
proxy_class = self.get_proxy_class()
super_class = self.get_super_class()
super_class.__init__(self, image, repo, proxy_class, proxy_kwargs)
def get_super_class(self):
return domain_proxy.MemberRepo
def get_proxy_class(self):
return ImageMemberProxy
def get_payload(self, obj):
return format_image_member_notification(obj)
def save(self, member, from_state=None):
super(ImageMemberRepoProxy, self).save(member, from_state=from_state)
self.send_notification('image.member.update', member)
def add(self, member):
super(ImageMemberRepoProxy, self).add(member)
self.send_notification('image.member.create', member)
def remove(self, member):
super(ImageMemberRepoProxy, self).remove(member)
self.send_notification('image.member.delete', member, extra_payload={
'deleted': True, 'deleted_at': timeutils.isotime()
})
class TaskProxy(NotificationProxy, domain_proxy.Task):
def get_super_class(self):
return domain_proxy.Task
def get_payload(self, obj):
return format_task_notification(obj)
def begin_processing(self):
super(TaskProxy, self).begin_processing()
self.send_notification('task.processing', self.repo)
def succeed(self, result):
super(TaskProxy, self).succeed(result)
self.send_notification('task.success', self.repo)
def fail(self, message):
super(TaskProxy, self).fail(message)
self.send_notification('task.failure', self.repo)
def run(self, executor):
super(TaskProxy, self).run(executor)
self.send_notification('task.run', self.repo)
class TaskFactoryProxy(NotificationFactoryProxy, domain_proxy.TaskFactory):
def get_super_class(self):
return domain_proxy.TaskFactory
def get_proxy_class(self):
return TaskProxy
class TaskRepoProxy(NotificationRepoProxy, domain_proxy.TaskRepo):
def get_super_class(self):
return domain_proxy.TaskRepo
def get_proxy_class(self):
return TaskProxy
def get_payload(self, obj):
return format_task_notification(obj)
def add(self, task):
result = super(TaskRepoProxy, self).add(task)
self.send_notification('task.create', task)
return result
def remove(self, task):
result = super(TaskRepoProxy, self).remove(task)
self.send_notification('task.delete', task, extra_payload={
'deleted': True, 'deleted_at': timeutils.isotime()
})
return result
class TaskStubProxy(NotificationProxy, domain_proxy.TaskStub):
def get_super_class(self):
return domain_proxy.TaskStub
class TaskStubRepoProxy(NotificationRepoProxy, domain_proxy.TaskStubRepo):
def get_super_class(self):
return domain_proxy.TaskStubRepo
def get_proxy_class(self):
return TaskStubProxy
class MetadefNamespaceProxy(NotificationProxy, domain_proxy.MetadefNamespace):
def get_super_class(self):
return domain_proxy.MetadefNamespace
class MetadefNamespaceFactoryProxy(NotificationFactoryProxy,
domain_proxy.MetadefNamespaceFactory):
def get_super_class(self):
return domain_proxy.MetadefNamespaceFactory
def get_proxy_class(self):
return MetadefNamespaceProxy
class MetadefNamespaceRepoProxy(NotificationRepoProxy,
domain_proxy.MetadefNamespaceRepo):
def get_super_class(self):
return domain_proxy.MetadefNamespaceRepo
def get_proxy_class(self):
return MetadefNamespaceProxy
def get_payload(self, obj):
return format_metadef_namespace_notification(obj)
def save(self, metadef_namespace):
name = getattr(metadef_namespace, '_old_namespace',
metadef_namespace.namespace)
result = super(MetadefNamespaceRepoProxy, self).save(metadef_namespace)
self.send_notification(
'metadef_namespace.update', metadef_namespace,
extra_payload={
'namespace_old': name,
})
return result
def add(self, metadef_namespace):
result = super(MetadefNamespaceRepoProxy, self).add(metadef_namespace)
self.send_notification('metadef_namespace.create', metadef_namespace)
return result
def remove(self, metadef_namespace):
result = super(MetadefNamespaceRepoProxy, self).remove(
metadef_namespace)
self.send_notification(
'metadef_namespace.delete', metadef_namespace,
extra_payload={'deleted': True, 'deleted_at': timeutils.isotime()}
)
return result
def remove_objects(self, metadef_namespace):
result = super(MetadefNamespaceRepoProxy, self).remove_objects(
metadef_namespace)
self.send_notification('metadef_namespace.delete_objects',
metadef_namespace)
return result
def remove_properties(self, metadef_namespace):
result = super(MetadefNamespaceRepoProxy, self).remove_properties(
metadef_namespace)
self.send_notification('metadef_namespace.delete_properties',
metadef_namespace)
return result
def remove_tags(self, metadef_namespace):
result = super(MetadefNamespaceRepoProxy, self).remove_tags(
metadef_namespace)
self.send_notification('metadef_namespace.delete_tags',
metadef_namespace)
return result
class MetadefObjectProxy(NotificationProxy, domain_proxy.MetadefObject):
def get_super_class(self):
return domain_proxy.MetadefObject
class MetadefObjectFactoryProxy(NotificationFactoryProxy,
domain_proxy.MetadefObjectFactory):
def get_super_class(self):
return domain_proxy.MetadefObjectFactory
def get_proxy_class(self):
return MetadefObjectProxy
class MetadefObjectRepoProxy(NotificationRepoProxy,
domain_proxy.MetadefObjectRepo):
def get_super_class(self):
return domain_proxy.MetadefObjectRepo
def get_proxy_class(self):
return MetadefObjectProxy
def get_payload(self, obj):
return format_metadef_object_notification(obj)
def save(self, metadef_object):
name = getattr(metadef_object, '_old_name', metadef_object.name)
result = super(MetadefObjectRepoProxy, self).save(metadef_object)
self.send_notification(
'metadef_object.update', metadef_object,
extra_payload={
'namespace': metadef_object.namespace.namespace,
'name_old': name,
})
return result
def add(self, metadef_object):
result = super(MetadefObjectRepoProxy, self).add(metadef_object)
self.send_notification('metadef_object.create', metadef_object)
return result
def remove(self, metadef_object):
result = super(MetadefObjectRepoProxy, self).remove(metadef_object)
self.send_notification(
'metadef_object.delete', metadef_object,
extra_payload={
'deleted': True,
'deleted_at': timeutils.isotime(),
'namespace': metadef_object.namespace.namespace
}
)
return result
class MetadefPropertyProxy(NotificationProxy, domain_proxy.MetadefProperty):
def get_super_class(self):
return domain_proxy.MetadefProperty
class MetadefPropertyFactoryProxy(NotificationFactoryProxy,
domain_proxy.MetadefPropertyFactory):
def get_super_class(self):
return domain_proxy.MetadefPropertyFactory
def get_proxy_class(self):
return MetadefPropertyProxy
class MetadefPropertyRepoProxy(NotificationRepoProxy,
domain_proxy.MetadefPropertyRepo):
def get_super_class(self):
return domain_proxy.MetadefPropertyRepo
def get_proxy_class(self):
return MetadefPropertyProxy
def get_payload(self, obj):
return format_metadef_property_notification(obj)
def save(self, metadef_property):
name = getattr(metadef_property, '_old_name', metadef_property.name)
result = super(MetadefPropertyRepoProxy, self).save(metadef_property)
self.send_notification(
'metadef_property.update', metadef_property,
extra_payload={
'namespace': metadef_property.namespace.namespace,
'name_old': name,
})
return result
def add(self, metadef_property):
result = super(MetadefPropertyRepoProxy, self).add(metadef_property)
self.send_notification('metadef_property.create', metadef_property)
return result
def remove(self, metadef_property):
result = super(MetadefPropertyRepoProxy, self).remove(metadef_property)
self.send_notification(
'metadef_property.delete', metadef_property,
extra_payload={
'deleted': True,
'deleted_at': timeutils.isotime(),
'namespace': metadef_property.namespace.namespace
}
)
return result
class MetadefResourceTypeProxy(NotificationProxy,
domain_proxy.MetadefResourceType):
def get_super_class(self):
return domain_proxy.MetadefResourceType
class MetadefResourceTypeFactoryProxy(NotificationFactoryProxy,
domain_proxy.MetadefResourceTypeFactory):
def get_super_class(self):
return domain_proxy.MetadefResourceTypeFactory
def get_proxy_class(self):
return MetadefResourceTypeProxy
class MetadefResourceTypeRepoProxy(NotificationRepoProxy,
domain_proxy.MetadefResourceTypeRepo):
def get_super_class(self):
return domain_proxy.MetadefResourceTypeRepo
def get_proxy_class(self):
return MetadefResourceTypeProxy
def get_payload(self, obj):
return format_metadef_resource_type_notification(obj)
def add(self, md_resource_type):
result = super(MetadefResourceTypeRepoProxy, self).add(
md_resource_type)
self.send_notification('metadef_resource_type.create',
md_resource_type)
return result
def remove(self, md_resource_type):
result = super(MetadefResourceTypeRepoProxy, self).remove(
md_resource_type)
self.send_notification(
'metadef_resource_type.delete', md_resource_type,
extra_payload={
'deleted': True,
'deleted_at': timeutils.isotime(),
'namespace': md_resource_type.namespace.namespace
}
)
return result
class MetadefTagProxy(NotificationProxy, domain_proxy.MetadefTag):
def get_super_class(self):
return domain_proxy.MetadefTag
class MetadefTagFactoryProxy(NotificationFactoryProxy,
domain_proxy.MetadefTagFactory):
def get_super_class(self):
return domain_proxy.MetadefTagFactory
def get_proxy_class(self):
return MetadefTagProxy
class MetadefTagRepoProxy(NotificationRepoProxy, domain_proxy.MetadefTagRepo):
def get_super_class(self):
return domain_proxy.MetadefTagRepo
def get_proxy_class(self):
return MetadefTagProxy
def get_payload(self, obj):
return format_metadef_tag_notification(obj)
def save(self, metadef_tag):
name = getattr(metadef_tag, '_old_name', metadef_tag.name)
result = super(MetadefTagRepoProxy, self).save(metadef_tag)
self.send_notification(
'metadef_tag.update', metadef_tag,
extra_payload={
'namespace': metadef_tag.namespace.namespace,
'name_old': name,
})
return result
def add(self, metadef_tag):
result = super(MetadefTagRepoProxy, self).add(metadef_tag)
self.send_notification('metadef_tag.create', metadef_tag)
return result
def add_tags(self, metadef_tags):
result = super(MetadefTagRepoProxy, self).add_tags(metadef_tags)
for metadef_tag in metadef_tags:
self.send_notification('metadef_tag.create', metadef_tag)
return result
def remove(self, metadef_tag):
result = super(MetadefTagRepoProxy, self).remove(metadef_tag)
self.send_notification(
'metadef_tag.delete', metadef_tag,
extra_payload={
'deleted': True,
'deleted_at': timeutils.isotime(),
'namespace': metadef_tag.namespace.namespace
}
)
return result
| stevelle/glance | glance/notifier.py | Python | apache-2.0 | 31,619 |
package weixin.popular.bean.paymch;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
@XmlRootElement(name = "xml")
@XmlAccessorType(XmlAccessType.FIELD)
public class MchOrderquery extends MchVersion{
@XmlElement
private String appid;
@XmlElement
private String mch_id;
@XmlElement
private String transaction_id;
@XmlElement
private String out_trade_no;
@XmlElement
private String nonce_str;
@XmlElement
private String sign;
@XmlElement
private String sign_type;
/**
* @since 2.8.5
*/
@XmlElement
private String sub_appid;
/**
* @since 2.8.5
*/
@XmlElement
private String sub_mch_id;
public String getAppid() {
return appid;
}
public void setAppid(String appid) {
this.appid = appid;
}
public String getMch_id() {
return mch_id;
}
public void setMch_id(String mch_id) {
this.mch_id = mch_id;
}
public String getOut_trade_no() {
return out_trade_no;
}
public void setOut_trade_no(String out_trade_no) {
this.out_trade_no = out_trade_no;
}
public String getNonce_str() {
return nonce_str;
}
public void setNonce_str(String nonce_str) {
this.nonce_str = nonce_str;
}
public String getSign() {
return sign;
}
public void setSign(String sign) {
this.sign = sign;
}
public String getTransaction_id() {
return transaction_id;
}
public void setTransaction_id(String transaction_id) {
this.transaction_id = transaction_id;
}
public String getSub_appid() {
return sub_appid;
}
public void setSub_appid(String sub_appid) {
this.sub_appid = sub_appid;
}
public String getSub_mch_id() {
return sub_mch_id;
}
public void setSub_mch_id(String sub_mch_id) {
this.sub_mch_id = sub_mch_id;
}
public String getSign_type() {
return sign_type;
}
/**
* 签名类型
* @since 2.8.5
* @param sign_type HMAC-SHA256和MD5
*/
public void setSign_type(String sign_type) {
this.sign_type = sign_type;
}
}
| liyiorg/weixin-popular | src/main/java/weixin/popular/bean/paymch/MchOrderquery.java | Java | apache-2.0 | 2,181 |
/*
* Copyright 2017 Niklas Persson
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.equadon.intellij.mips.lang.psi.impl;
import com.equadon.intellij.mips.lang.psi.MipsElement;
import com.intellij.extapi.psi.ASTWrapperPsiElement;
import com.intellij.lang.ASTNode;
import org.jetbrains.annotations.NotNull;
public class MipsElementImpl extends ASTWrapperPsiElement implements MipsElement {
public MipsElementImpl(@NotNull ASTNode node) {
super(node);
}
}
| equadon/intellij-mips | src/com/equadon/intellij/mips/lang/psi/impl/MipsElementImpl.java | Java | apache-2.0 | 987 |
package ru.greg3d.model;
public class Film {
private String id = "";
private String imdb = "";
private String title = "";
private String year = "";
private String notes = "";
private String duration = "";
private String rating = "";
public String getId() {
return id;
}
public Film setId(String id) {
this.id = id;
return this;
}
public String getImdb() {
return imdb;
}
public Film setImdb(String imdb) {
this.imdb = imdb;
return this;
}
public String getTitle() {
return title;
}
public String getDuration() {
return duration;
}
public String getRating() {
return rating;
}
public Film setTitle(String title) {
this.title = title;
return this;
}
public String getYear() {
return year;
}
public Film setYear(String year) {
this.year = year ;
return this;
}
public Film setYear(int year) {
this.year = String.valueOf(year) ;
return this;
}
public String getNotes() {
return notes;
}
public Film setNotes(String notes) {
this.notes = notes;
return this;
}
public Film setDuration(String duration) {
this.duration = duration;
return this;
}
public Film setDuration(int duration) {
this.duration = String.valueOf(duration);
return this;
}
public Film setRating(String rating) {
this.rating = rating;
return this;
}
public Film setRating(int rating) {
this.rating = String.valueOf(rating);
return this;
}
public String getFilmFieldsValues(){
return String.format("Title = '%2s' Year ='%2s'"
, this.title, this.year);
}
}
| Greg3dot14D/PageObjectWithLocatorCorrector | src/main/java/ru/greg3d/model/Film.java | Java | apache-2.0 | 1,537 |
package uk.ac.ebi.subs.api.processors;
import lombok.Data;
import lombok.NonNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.data.rest.webmvc.support.RepositoryEntityLinks;
import org.springframework.hateoas.Link;
import org.springframework.hateoas.Resource;
import org.springframework.hateoas.ResourceProcessor;
import org.springframework.stereotype.Component;
import uk.ac.ebi.subs.api.controllers.SubmissionContentsController;
import uk.ac.ebi.subs.api.model.SubmissionContents;
import uk.ac.ebi.subs.api.services.OperationControlService;
import uk.ac.ebi.subs.repository.model.DataType;
import uk.ac.ebi.subs.repository.model.Project;
import uk.ac.ebi.subs.repository.model.StoredSubmittable;
import uk.ac.ebi.subs.repository.model.Submission;
import uk.ac.ebi.subs.repository.model.SubmissionPlan;
import uk.ac.ebi.subs.repository.model.fileupload.File;
import uk.ac.ebi.subs.repository.repos.DataTypeRepository;
import uk.ac.ebi.subs.repository.repos.submittables.ProjectRepository;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static org.springframework.hateoas.mvc.ControllerLinkBuilder.linkTo;
import static org.springframework.hateoas.mvc.ControllerLinkBuilder.methodOn;
/**
* Resource processor for {@link SubmissionContents} entity used by Spring MVC controller.
*/
@Component
@Data
public class SubmissionContentsProcessor implements ResourceProcessor<Resource<SubmissionContents>> {
@NonNull
private LinkHelper linkHelper;
@NonNull
private ProjectRepository projectRepository;
@NonNull
private RepositoryEntityLinks repositoryEntityLinks;
@NonNull
private DataTypeRepository dataTypeRepository;
@NonNull
private OperationControlService operationControlService;
@NonNull
private List<Class<? extends StoredSubmittable>> submittablesClassList;
private static final Logger logger = LoggerFactory.getLogger(SubmissionContentsProcessor.class);
private static final String DATA_TYPE_FILES = "files";
public Resource<SubmissionContents> process(Resource<SubmissionContents> resource) {
String subId = resource.getContent().getSubmission().getId();
List<DataType> dataTypesInSubmission = dataTypesInSubmission(resource.getContent().getSubmission());
resource.getContent().setDataTypes(dataTypesInSubmission);
addSubmittablesInSubmission(dataTypesInSubmission, resource);
addFilesLink(resource, subId);
addProjectLink(resource, subId);
resource.getContent().setSubmission(null);
return resource;
}
private List<DataType> dataTypesInSubmission(Submission submission) {
SubmissionPlan submissionPlan = submission.getSubmissionPlan();
List<DataType> dataTypesInSubmission;
List<DataType> allDataTypes = dataTypeRepository.findAll();
if (submissionPlan == null) {
dataTypesInSubmission = allDataTypes;
} else {
Set<String> dataTypeIds = new HashSet<>(submissionPlan.getDataTypeIds());
dataTypesInSubmission = allDataTypes.stream()
.filter(dt -> dataTypeIds.contains(dt.getId()))
.collect(Collectors.toList());
}
return dataTypesInSubmission;
}
private void addSubmittablesInSubmission(List<DataType> dataTypesInSubmission, Resource<SubmissionContents> resource) {
boolean updateable = operationControlService.isUpdateable(resource.getContent().getSubmission());
for (DataType dataType : dataTypesInSubmission) {
// we can not have these type of links as it is not a submittable data type
if (dataType.getId().equals(DATA_TYPE_FILES)) {
continue;
}
Link collectionLink = linkTo(
methodOn(SubmissionContentsController.class)
.getSubmissionContentsForDataType(
resource.getContent().getSubmission().getId(),
dataType.getId(),
null
)
).withRel(dataType.getId());
resource.add(collectionLink);
if (updateable) {
Link createLink = linkHelper.submittableCreateLink(dataType,resource.getContent().getSubmission());
resource.add(createLink);
}
}
}
private void addProjectLink(Resource<SubmissionContents> resource, String submissionId) {
if (projectRepository.findOneBySubmissionId(submissionId) != null) {
resource.add(createResourceLink(Project.class, "project-by-submission",
paramWithSubmissionID(submissionId), "project"));
}
}
private void addFilesLink(Resource<SubmissionContents> resource, String submissionId) {
resource.add(createResourceLink(File.class, "by-submission",
paramWithSubmissionID(submissionId), "files"));
}
private Map<String, String> paramWithSubmissionID(String submissionId) {
Map<String, String> params = new HashMap<>();
params.put("submissionId", submissionId);
return params;
}
private Link createResourceLink(Class clazzResource, String rel, Map<String, String> params, String withRel) {
return repositoryEntityLinks
.linkToSearchResource(clazzResource, rel)
.expand(params)
.withRel(withRel);
}
}
| EMBL-EBI-SUBS/subs-api | src/main/java/uk/ac/ebi/subs/api/processors/SubmissionContentsProcessor.java | Java | apache-2.0 | 5,617 |
package com.way.util;
import java.net.URLEncoder;
import android.os.AsyncTask;
import android.os.Handler;
import android.text.TextUtils;
import com.way.app.Application;
import com.way.bean.City;
import com.way.bean.WeatherInfo;
import com.way.weather.MainActivity;
public class GetWeatherTask extends AsyncTask<Void, Void, Integer> {
private static final String BASE_URL = "http://sixweather.3gpk.net/SixWeather.aspx?city=%s";
private static final int SCUESS = 0;
private static final int SCUESS_YUJING = 1;
private static final int FAIL = -1;
private Handler mHandler;
private City mCity;
private Application mApplication;
public GetWeatherTask(Handler handler, City city) {
this.mHandler = handler;
this.mCity = city;
mApplication = Application.getInstance();
}
@Override
protected Integer doInBackground(Void... params) {
try {
String url = String.format(BASE_URL,
URLEncoder.encode(mCity.getName(), "utf-8"));
// 为了避免频繁刷新浪费流量,所以先读取内存中的信息
// if (mApplication.getAllWeather() != null
// && mApplication.getAllWeather().getCity()
// .equals(mCity.getName())) {
// L.i("lwp", "get the weather info from memory");
// return SCUESS;// 直接返回,不继续执行
// }
// 再读取文件中的缓存信息
String fileResult = ConfigCache.getUrlCache(mCity.getPinyin());// 读取文件中的缓存
if (!TextUtils.isEmpty(fileResult)) {
WeatherInfo allWeather = XmlPullParseUtil
.parseWeatherInfo(fileResult);
if (allWeather != null) {
mApplication.SetAllWeather(allWeather);
L.i("lwp", "get the weather info from file");
return SCUESS;
}
}
// 最后才执行网络请求
String netResult = ApiClient.connServerForResult(url);
if (!TextUtils.isEmpty(netResult)) {
WeatherInfo allWeather = XmlPullParseUtil
.parseWeatherInfo(netResult);
if (allWeather != null) {
mApplication.SetAllWeather(allWeather);
ConfigCache.setUrlCache(netResult, mCity.getPinyin());
L.i("lwp", "get the weather info from network");
String yujin = allWeather.getYujing();
if (!TextUtils.isEmpty(yujin) && !yujin.contains("暂无预警"))
return SCUESS_YUJING;
return SCUESS;
}
}
} catch (Exception e) {
e.printStackTrace();
}
return FAIL;
}
@Override
protected void onPostExecute(Integer result) {
super.onPostExecute(result);
if(result < 0 ){
mHandler.sendEmptyMessage(MainActivity.GET_WEATHER_FAIL);// 获取天气信息失败
L.i("lwp", "get weather fail");
}else{
mHandler.sendEmptyMessage(MainActivity.GET_WEATHER_SCUESS);// 获取天气信息成功,通知主线程更新
L.i("lwp", "get weather scuess");
L.i("lwp", mApplication.getAllWeather().toString());
if(result == SCUESS_YUJING){
mApplication.showNotification();
}
}
}
}
| Wodner/weather | src/com/way/util/GetWeatherTask.java | Java | apache-2.0 | 2,870 |
package com.simukappu.coherence.cachestore.spring.mybatis;
/**
* CacheStore implementation class integrated with Spring and MyBatis framework.<br>
* This CacheStore is able to only write (insert or update) to database through
* Coherence cache, not load or delete.<br>
*
* @author Shota Yamazaki
*/
public class SpringMyBatisCacheStoreWriteOnly extends SpringMyBatisCacheStore {
@Override
public final Object load(Object oKey) {
return null;
}
@Override
protected final void delete(Object oKey) {
}
}
| simukappu/Coherence-tools | spring-mybatis-cachestore/src/main/java/com/simukappu/coherence/cachestore/spring/mybatis/SpringMyBatisCacheStoreWriteOnly.java | Java | apache-2.0 | 541 |
/**
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.fabric8.openshift.client.dsl;
import io.fabric8.kubernetes.client.dsl.Instantiateable;
import io.fabric8.kubernetes.client.dsl.Resource;
import io.fabric8.kubernetes.client.dsl.Secretable;
import io.fabric8.kubernetes.client.dsl.Triggerable;
import io.fabric8.kubernetes.client.dsl.Typeable;
import io.fabric8.openshift.api.model.BuildRequest;
import io.fabric8.openshift.api.model.WebHookTrigger;
public interface BuildConfigResource<I, T, D, B, S, N> extends Resource<I, T, D, B>,
Instantiateable<BuildRequest, N>,
Typeable<Triggerable<WebHookTrigger, S>>,
Triggerable<WebHookTrigger, S>,
Secretable<Typeable<Triggerable<WebHookTrigger, S>>> {
}
| alesj/kubernetes-client | openshift-client/src/main/java/io/fabric8/openshift/client/dsl/BuildConfigResource.java | Java | apache-2.0 | 1,276 |
/*!
* SAP UI development toolkit for HTML5 (SAPUI5/OpenUI5)
* (c) Copyright 2009-2015 SAP SE or an SAP affiliate company.
* Licensed under the Apache License, Version 2.0 - see LICENSE.txt.
*/
// Provides control sap.m.PagingButton.
sap.ui.define(['jquery.sap.global', './Button', 'sap/ui/core/Control'],
function (jQuery, Button, Control) {
"use strict";
/**
* Constructor for a new PagingButton.
*
* @param {string} [sId] id for the new control, generated automatically if no id is given
* @param {object} [mSettings] initial settings for the new control
*
* @class
* Enables users to navigate between items/entities.
* @extends sap.ui.core.Control
*
* @author SAP SE
* @version 1.30.4-SNAPSHOT
*
* @constructor
* @public
* @since 1.30
* @alias sap.m.PagingButton
* @ui5-metamodel This control/element also will be described in the UI5 (legacy) designtime metamodel
*/
var PagingButton = Control.extend("sap.m.PagingButton", {
metadata: {
library: "sap.m",
properties: {
/**
* The total count of items/entities that the control navigates through.
* Minimum number of items/entities is 1.
*/
count: {type: "int", group: "Data", defaultValue: 1},
/**
* The current position in the items/entities that the control navigates through. One-based.
* Minimum position number is 1.
*/
position: {type: "int", group: "Data", defaultValue: 1}
},
aggregations: {
previousButton: {type: "sap.m.Button", multiple: false, visibility: "hidden"},
nextButton: {type: "sap.m.Button", multiple: false, visibility: "hidden"}
},
events: {
/**
* This event is fired when the current position is changed
*/
positionChange: {
parameters: {
/**
* The number of the new position. One-based.
*/
newPosition: {type: "int"},
/**
* The number of the old position. One-based.
*/
oldPosition: {type: "int"}
}
}
}
}
});
PagingButton.prototype.init = function () {
this._attachPressEvents();
};
PagingButton.prototype.onBeforeRendering = function () {
this._enforceValidPosition(this.getPosition());
this._updateButtonState();
};
/**
* This function lazily retrieves the nextButton
* @returns {sap.m.Button}
*/
PagingButton.prototype._getNextButton = function () {
if (!this.getAggregation("nextButton")) {
this.setAggregation("nextButton", new sap.m.Button({
icon: "sap-icon://slim-arrow-down",
enabled: false,
id: this.getId() + "-nextButton"
}));
}
return this.getAggregation("nextButton");
};
/**
* This function lazily retrieves the previousButton
* @returns {sap.m.Button}
*/
PagingButton.prototype._getPreviousButton = function () {
if (!this.getAggregation("previousButton")) {
this.setAggregation("previousButton", new sap.m.Button({
icon: "sap-icon://slim-arrow-up",
enabled: false,
id: this.getId() + "-previousButton"
}));
}
return this.getAggregation("previousButton");
};
PagingButton.prototype._attachPressEvents = function () {
this._getPreviousButton().attachPress(this._handlePositionChange.bind(this, false));
this._getNextButton().attachPress(this._handlePositionChange.bind(this, true));
};
/**
* This function handles the position change
* @params {boolean} bIncrease - Indicates the direction of the change of position
* @returns {sap.m.PagingButton} Reference to the control instance for chaining
*/
PagingButton.prototype._handlePositionChange = function (bIncrease) {
var iOldPosition = this.getPosition(),
iNewPosition = bIncrease ? iOldPosition + 1 : iOldPosition - 1;
this.setPosition(iNewPosition);
this.firePositionChange({newPosition: iNewPosition, oldPosition: iOldPosition});
this._updateButtonState();
return this;
};
/**
* Sets the appropriate state (enabled/disabled) for the buttons based on the total count / position
* @returns {sap.m.PagingButton} Reference to the control instance for chaining
*/
PagingButton.prototype._updateButtonState = function () {
var iTotalCount = this.getCount(),
iCurrentPosition = this.getPosition();
this._getPreviousButton().setEnabled(iCurrentPosition > 1);
this._getNextButton().setEnabled(iCurrentPosition < iTotalCount);
return this;
};
PagingButton.prototype.setPosition = function (iPosition) {
return this._validateProperty("position", iPosition);
};
PagingButton.prototype.setCount = function (iCount) {
return this._validateProperty("count", iCount);
};
/**
* Validate both the count/position properties and ensure they are correct
* @params {string} sProperty - The property to be checked, {number} iValue - The value to be checked
* @returns {sap.m.PagingButton} Reference to the control instance for chaining
*/
PagingButton.prototype._validateProperty = function (sProperty, iValue) {
if (iValue < 1) {
jQuery.sap.log.warning("Property '" + sProperty + "' must be greater or equal to 1", this);
return this;
}
return this.setProperty(sProperty, iValue);
};
/**
* Validates the position property to ensure that it's not set higher than the total count
* @params {number} iPosition
* @returns {sap.m.PagingButton} Reference to the control instance for chaining
*/
PagingButton.prototype._enforceValidPosition = function (iPosition) {
var iCount = this.getCount();
if (iPosition > iCount) {
jQuery.sap.log.warning("Property position must be less or equal to the total count");
this.setPosition(iCount);
}
return this;
};
return PagingButton;
}, /* bExport= */ true); | fconFGDCA/DetailCADA | resources/sap/m/PagingButton-dbg.js | JavaScript | apache-2.0 | 5,788 |
// Copyright (C) 2003, Fernando Luis Cacciola Carballal.
//
// Use, modification, and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/lib/optional for documentation.
//
// You are welcome to contact the author at:
// fernando_cacciola@hotmail.com
//
// NOTE: taken from boost 1.55.0 and adapted for cppdevtk
#include <cppdevtk/base/optional.hpp>
#include<string>
//
// THIS TEST SHOULD FAIL TO COMPILE
//
#if 0 // cppdevtk test passed
void test_no_unsupported_conversion()
{
::cppdevtk::base::Optional<int> opt1(1) ;
::cppdevtk::base::Optional< std::string > opt2( opt1 ) ; // Cannot convert from "int" to "std::string"
}
#endif
| CoSoSys/cppdevtk | test/test_base/boost_test_optional/optional_test_fail3a.cpp | C++ | apache-2.0 | 815 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.serviceregistry.api.request;
import org.apache.servicecomb.serviceregistry.api.registry.Microservice;
/**
* Created by on 2016/12/5.
*/
public class CreateServiceRequest {
private Microservice service;
public Microservice getService() {
return service;
}
public void setService(Microservice service) {
this.service = service;
}
}
| acsukesh/java-chassis | service-registry/src/main/java/org/apache/servicecomb/serviceregistry/api/request/CreateServiceRequest.java | Java | apache-2.0 | 1,190 |
package pl.dzielins42.dmtools.calculator.building.model;
public enum StyleType {
INTERIOR, EXTERIOR;
}
| dzielins42/urban-bear | src/main/java/pl/dzielins42/dmtools/calculator/building/model/StyleType.java | Java | apache-2.0 | 108 |
require 'beaker-rspec'
hosts.each do |host|
install_package(host, 'wget')
install_package(host, 'rsync')
install_package(host, 'locales')
create_remote_file host, '/etc/locale.gen', 'en_US.UTF-8 UTF-8'
shell 'locale-gen'
host.add_env_var('LANG', 'en_US.UTF-8')
host.add_env_var('LANGUAGE', 'en_US.UTF-8')
host.add_env_var('LC_ALL', 'en_US.UTF-8')
on host, install_puppet
end
zookeeper_pp = <<-EOS
class { 'zookeeper':
client_ip => $::ipaddress_lo
}
EOS
RSpec.configure do |c|
module_root = File.expand_path(File.join(File.dirname(__FILE__), '..'))
module_name = module_root.split(File::SEPARATOR).last
# Readable test descriptions
c.formatter = :documentation
# Configure all nodes in nodeset
c.before :suite do
puppet_module_install(:source => module_root, :module_name => module_name)
hosts.each do |host|
on host, puppet('module install puppetlabs-stdlib -v 4.9.0')
on host, puppet('module install deric-zookeeper -v 0.3.9')
# Make sure a working instance of zookeeper is running
apply_manifest(zookeeper_pp)
end
end
end
def druid_cli(command, opts = '', exit_codes = [0], &block)
classpath = ":/etc/druid/:/usr/local/lib/druid/lib/*"
cmd = "/usr/bin/java #{opts} -classpath #{classpath} io.druid.cli.Main #{command}"
shell(cmd, :acceptable_exit_codes => exit_codes, &block)
end
| MrAlias/druid | spec/spec_helper_acceptance.rb | Ruby | apache-2.0 | 1,368 |
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.data.solr.repository.config;
import org.springframework.beans.factory.xml.NamespaceHandler;
import org.springframework.beans.factory.xml.NamespaceHandlerSupport;
import org.springframework.data.repository.config.RepositoryBeanDefinitionParser;
import org.springframework.data.repository.config.RepositoryConfigurationExtension;
import org.springframework.data.solr.config.HttpSolrServerBeanDefinitionParser;
import org.springframework.data.solr.embedded.config.EmbeddedSolrServerBeanDefinitionParser;
/**
* {@link NamespaceHandler} implementation to register parser for {@code <solr:repositories />},
* {@code <solr:embedded-solr-server solrHome="path/to/solr/home/directory" />} elements.
*
* @author Oliver Gierke
* @author Christoph Strobl
*/
class SolrRepositoryNamespaceHandler extends NamespaceHandlerSupport {
/*
* (non-Javadoc)
* @see org.springframework.beans.factory.xml.NamespaceHandler#init()
*/
@Override
public void init() {
RepositoryConfigurationExtension extension = new SolrRepositoryConfigExtension();
RepositoryBeanDefinitionParser parser = new RepositoryBeanDefinitionParser(extension);
registerBeanDefinitionParser("repositories", parser);
registerBeanDefinitionParser("embedded-solr-server", new EmbeddedSolrServerBeanDefinitionParser());
registerBeanDefinitionParser("solr-server", new HttpSolrServerBeanDefinitionParser());
}
}
| dynamicguy/spring-data-solr | src/main/java/org/springframework/data/solr/repository/config/SolrRepositoryNamespaceHandler.java | Java | apache-2.0 | 2,030 |
// Copyright 2017 Foxysoft GmbH
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* global fx_trace */
/**
* <div>Returns the main (SAP Master) Identity Store's ID.</div>
* <div><strong>SAP IDM 8.0:</strong> If $IDSID is non-empty and not -1,
* returns that. Otherwise, obtains the minimum Identity Store ID
* from the database and returns that.</div>
* <div><strong>SAP IDM 7.2:</strong> Returns the value of global
* constant SAP_MASTER_IDS_ID.</div>
* @return {string} IDSID
* @requires fx_trace
* @since 1.1.0
*/
function fx_IDSID()
{
return fx_trace({compat: 1.0}).fx_IDSID();
}
| foxysoft/idm-connector-bobj | src/main/javascript/fx_IDSID.js | JavaScript | apache-2.0 | 1,139 |
/*
Copyright (C) 2015 Preet Desai (preet.desai@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <ks/gl/KsGLTexture2D.hpp>
#include <ks/gl/KsGLStateSet.hpp>
#include <ks/gl/KsGLImplementation.hpp>
#include <ks/shared/KsImage.hpp>
#include <algorithm>
namespace ks
{
namespace gl
{
Texture2D::Texture2D(Format format) :
m_width(16),
m_height(16),
m_format(format),
m_filter_min(Filter::Nearest),
m_filter_mag(Filter::Nearest),
m_wrap_s(Wrap::ClampToEdge),
m_wrap_t(Wrap::ClampToEdge),
m_upd_params(true)
{
// save params
if(m_format == Format::RGB8) {
m_gl_format = GL_RGB;
m_gl_datatype = GL_UNSIGNED_BYTE;
}
else if(m_format == Format::RGBA8) {
m_gl_format = GL_RGBA;
m_gl_datatype = GL_UNSIGNED_BYTE;
}
else if(m_format == Format::LUMINANCE8) {
m_gl_format = GL_LUMINANCE;
m_gl_datatype = GL_UNSIGNED_BYTE;
}
else if(m_format == Format::RGBA4) {
m_gl_format = GL_RGBA;
m_gl_datatype = GL_UNSIGNED_SHORT_4_4_4_4;
}
else if(m_format == Format::RGB5_A1) {
m_gl_format = GL_RGBA;
m_gl_datatype = GL_UNSIGNED_SHORT_5_5_5_1;
}
else if(m_format == Format::RGB565) {
m_gl_format = GL_RGB;
m_gl_datatype = GL_UNSIGNED_SHORT_5_6_5;
}
else if(m_format == Format::DEPTH_COMPONENT16) {
#ifdef KS_ENV_GL_ES
if(!Implementation::GetGLExtensionExists("GL_OES_depth_texture")) {
LOG.Error()<< m_log_prefix
<< "Depth texture requested but "
"GL_OES_depth_texture N/A";
}
#endif
m_gl_format = GL_DEPTH_COMPONENT;
m_gl_datatype = GL_UNSIGNED_SHORT;
}
else if(m_format == Format::DEPTH_COMPONENT32) {
#ifdef KS_ENV_GL_ES
if(!Implementation::GetGLExtensionExists("GL_OES_depth_texture")) {
LOG.Error()<< m_log_prefix
<< "Depth texture requested but "
"GL_OES_depth_texture N/A";
}
#endif
m_gl_format = GL_DEPTH_COMPONENT;
m_gl_datatype = GL_UNSIGNED_INT;
}
else if(m_format == Format::DEPTH24_STENCIL8) {
#if defined(KS_ENV_GL_ES)
if(!Implementation::GetGLExtensionExists("GL_OES_packed_depth_stencil")) {
LOG.Error()<< m_log_prefix
<< "Packed depth+stencil texture requested but "
"GL_OES_packed_depth_stencil N/A";
}
m_gl_format = GL_DEPTH_STENCIL_OES;
m_gl_datatype = GL_UNSIGNED_INT_24_8_OES;
#elif defined(KS_ENV_GL_DESKTOP)
if(!Implementation::GetGLExtensionExists("GL_ARB_framebuffer_object")) {
// GL_ARB_framebuffer_object combines:
// EXT_framebuffer_object
// EXT_framebuffer_blit
// EXT_framebuffer_multisample
// EXT_packed_depth_stencil <---
LOG.Error()<< m_log_prefix
<< "Packed depth+stencil texture requested but "
"GL_ARB_framebuffer_object N/A";
}
m_gl_format = GL_DEPTH_STENCIL;
m_gl_datatype = GL_UNSIGNED_INT_24_8;
#endif
}
// We explicitly set the filtering and wrap modes
// in the constructor because texturing might not
// work if these aren't set at least once.
// Texture parameters (wrap,filter) are owned/
// saved to the texture object so we shouldn't
// need to set them per frame.
}
Texture2D::~Texture2D()
{
// empty
}
bool Texture2D::GLBind(StateSet* state_set,GLuint tex_unit)
{
if(m_texture_handle == 0) {
LOG.Error() << m_log_prefix
<< "tried to bind with texture 0";
return false;
}
// set the active texture unit
state_set->SetActiveTexUnitAndBind(
tex_unit,
m_texture_handle,
GL_TEXTURE_2D,
m_id);
return true;
}
void Texture2D::GLUnbind()
{
// do nothing
}
void Texture2D::GLSync()
{
for(Update& update : m_list_updates)
{
if((update.options & Update::ReUpload) == Update::ReUpload)
{
if(update.src_data->data_ptr)
{
assert(m_width == update.src_data->width);
assert(m_height == update.src_data->height);
assert(0 == update.src_offset.x);
assert(0 == update.src_offset.y);
glTexImage2D(GL_TEXTURE_2D,
0, // mipmap level
m_gl_format,
m_width,
m_height,
0, // border, not used for GLES
m_gl_format,
m_gl_datatype,
update.src_data->data_ptr);
}
else
{
// Create the texture with @data set to 0;
// This is well defined behavior; the texture will
// be created but image data is unspecified (see
// ES 2 spec, 3.7.1 p 69)
glTexImage2D(GL_TEXTURE_2D,
0, // mipmap level
m_gl_format,
m_width,
m_height,
0, // border, not used for GLES
m_gl_format,
m_gl_datatype,
0); // data
}
KS_CHECK_GL_ERROR(m_log_prefix+"upload texture");
}
else
{
glTexSubImage2D(GL_TEXTURE_2D,
0, // mipmap level
update.src_offset.x,
update.src_offset.y,
update.src_data->width,
update.src_data->height,
m_gl_format,
m_gl_datatype,
update.src_data->data_ptr);
KS_CHECK_GL_ERROR(m_log_prefix+"upload subimage");
}
}
m_list_updates.clear();
if(m_upd_params)
{
// set filter
glTexParameteri(GL_TEXTURE_2D,
GL_TEXTURE_MIN_FILTER,
static_cast<GLint>(m_filter_min));
glTexParameteri(GL_TEXTURE_2D,
GL_TEXTURE_MAG_FILTER,
static_cast<GLint>(m_filter_mag));
KS_CHECK_GL_ERROR(m_log_prefix+"texture filter params");
// set wrap
glTexParameteri(GL_TEXTURE_2D,
GL_TEXTURE_WRAP_S,
static_cast<GLint>(m_wrap_s));
glTexParameteri(GL_TEXTURE_2D,
GL_TEXTURE_WRAP_T,
static_cast<GLint>(m_wrap_t));
KS_CHECK_GL_ERROR(m_log_prefix+"texture wrap params");
m_upd_params = false;
}
}
void Texture2D::UpdateTexture(Update update)
{
bool const is_reupload =
((update.options & Update::ReUpload) == Update::ReUpload);
if(is_reupload)
{
// Erase all updates before this one
m_list_updates.clear();
// Resize the image
m_width = update.src_data->width;
m_height = update.src_data->height;
}
m_list_updates.push_back(update);
}
void Texture2D::SetFilterModes(Filter filter_min,Filter filter_mag)
{
m_filter_min = filter_min;
m_filter_mag = filter_mag;
m_upd_params = true;
}
void Texture2D::SetWrapModes(Wrap wrap_s,Wrap wrap_t)
{
m_wrap_s = wrap_s;
m_wrap_t = wrap_t;
m_upd_params = true;
}
uint Texture2D::GetUpdateCount() const
{
return m_list_updates.size();
}
bool Texture2D::GetParamsUpdated() const
{
return m_upd_params;
}
u32 Texture2D::calcNumBytes() const
{
double num_pixels = m_width*m_height;
double bpp =
(m_format == Format::RGBA8) ? 4 :
(m_format == Format::RGB8) ? 3 :
(m_format == Format::LUMINANCE8) ? 1 :
(m_format == Format::RGBA4) ? 2 :
(m_format == Format::RGB5_A1) ? 2 :
(m_format == Format::RGB565) ? 2 :
(m_format == Format::DEPTH_COMPONENT16) ? 2 :
(m_format == Format::DEPTH_COMPONENT32) ? 4 :
(m_format == Format::DEPTH24_STENCIL8) ? 4 : 0;
return static_cast<u32>(num_pixels*bpp);
}
} // gl
} // ks
| preet/ks_gl | ks/gl/KsGLTexture2D.cpp | C++ | apache-2.0 | 10,908 |
/**
* Copyright (C) 2013
* by 52 North Initiative for Geospatial Open Source Software GmbH
*
* Contact: Andreas Wytzisk
* 52 North Initiative for Geospatial Open Source Software GmbH
* Martin-Luther-King-Weg 24
* 48155 Muenster, Germany
* info@52north.org
*
* This program is free software; you can redistribute and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*
* This program is distributed WITHOUT ANY WARRANTY; even without the implied
* WARRANTY OF MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with
* this program (see gnu-gpl v2.txt). If not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA or
* visit the Free Software Foundation web page, http://www.fsf.org.
*/
package org.n52.sos.ogc.om;
import java.util.Set;
import javax.xml.namespace.QName;
import org.n52.sos.ogc.OGCConstants;
import org.n52.sos.util.http.MediaType;
import org.n52.sos.w3c.SchemaLocation;
import com.google.common.collect.ImmutableSet;
/**
* Class contains element names and namespaces used to encode the O&M responses.
*
* @since 4.0.0
*/
public interface OmConstants {
String PARAMETER_NOT_SET = "PARAMETER_NOT_SET";
// //////////////////////////////
// namespaces and schema locations
String NS_OM = "http://www.opengis.net/om/1.0";
String NS_OM_2 = "http://www.opengis.net/om/2.0";
String NS_OM_PREFIX = "om";
String NS_GMD = "http://www.isotc211.org/2005/gmd";
String NS_GMD_PREFIX = "gmd";
String NS_WV = "http://www.n52.org/wv";
String SCHEMA_LOCATION_URL_OM = "http://schemas.opengis.net/om/1.0.0/om.xsd";
String SCHEMA_LOCATION_URL_OM_CONSTRAINT =
"http://schemas.opengis.net/om/1.0.0/extensions/observationSpecialization_constraint.xsd";
String SCHEMA_LOCATION_URL_OM_20 = "http://schemas.opengis.net/om/2.0/observation.xsd";
String SCHEMA_LOCATION_URL_OM_20_OM_OBSERVATION = SCHEMA_LOCATION_URL_OM_20 + "#OM_Observation";
SchemaLocation OM_100_SCHEMA_LOCATION = new SchemaLocation(NS_OM, SCHEMA_LOCATION_URL_OM);
SchemaLocation OM_20_SCHEMA_LOCATION = new SchemaLocation(NS_OM_2, SCHEMA_LOCATION_URL_OM_20);
// //////////////////////////////////////////////////////////////////////
// other
String AN_ID = "id";
MediaType CONTENT_TYPE_OM = new MediaType("text", "xml", "subtype", "om/1.0.0");
MediaType CONTENT_TYPE_OM_2 = new MediaType("text", "xml", "subtype", "om/2.0.0");
String RESPONSE_FORMAT_OM = "http://www.opengis.net/om/1.0.0";
String RESPONSE_FORMAT_OM_2 = "http://www.opengis.net/om/2.0";
// ///////////////////////////////////////////////////////////////////
// names of elements in O&M documents
String EN_ASCII_BLOCK = "AsciiBlock";
String EN_ABSTRACT_DATA_GROUP = "_DataGroup";
String EN_ABSTRACT_DATA_QUALITY = "AbstractDQ_Element";
String EN_BOUNDED_BY = "boundedBy";
String EN_CATEGORY_OBSERVATION = "CategoryObservation";
String EN_COUNT_OBSERVATION = "CountObservation";
String EN_TEXT_OBSERVATION = "TextObservation";
String EN_TRUTH_OBSERVATION = "TruthObservation";
String EN_GEOMETRY_OBSERVATION = "GeometryObservation";
String EN_COMMON_OBSERVATION = "CommonObservation";
String EN_COMPOSITE_PHENOMENON = "CompositePhenomenon";
String EN_DATA_GROUP = "DataGroup";
String EN_DQ_QUAN_ATTR_ACC = "DQ_QuantitativeAttributeAccuracy";
String EN_DQ_NON_QUAN_ATTR_ACC = "DQ_NonQuantitativeAttributeAccuracy";
String EN_DQ_COMPL_COMM = "DQ_CompletenessCommission";
String EN_DQ_COMPL_OM = "DQ_CompletenessOmission";
String EN_FEATURE = "Feature";
String EN_FEATURE_COLLECTION = "FeatureCollection";
String EN_GEOREF_FEATURE = "GeoReferenceableFeature";
String EN_MEMBER = "member";
String EN_MEASUREMENT = "Measurement";
String EN_OBSERVED_PROPERTY = "observedProperty";
String EN_OBSERVATION_COLLECTION = "ObservationCollection";
String EN_OBSERVATION = "Observation";
String EN_PHENOMENON = "Phenomenon";
String EN_COMPOSITE_SURFACE = "CompositeSurface";
String EN_RESULT = "result";
String EN_WV_STATION = "WVStation";
String EN_TEMPORAL_OPS = "temporalOps";
String EN_PROCEDURE = "procedure";
String EN_PHENOMENON_TIME = "phenomenonTime";
String EN_FEATURE_OF_INTEREST = "featureOfInterest";
String EN_PROCESS = "Process";
// /////////////////////////////////////////////////////////////////////////////////
// other constants
String PHEN_SAMPLING_TIME = "http://www.opengis.net/def/property/OGC/0/SamplingTime";
String PHENOMENON_TIME = "http://www.opengis.net/def/property/OGC/0/PhenomenonTime";
String PHENOMENON_TIME_NAME = "phenomenonTime";
String SAMPLING_TIME_NAME = "samplingTime";
String PHEN_UOM_ISO8601 = "http://www.opengis.net/def/uom/ISO-8601/0/Gregorian";
String PHEN_FEATURE_OF_INTEREST = "http://www.opengis.net/def/property/OGC/0/FeatureOfInterest";
String ATTR_SRS_NAME = "srsName";
String PARAM_NAME_SAMPLING_GEOMETRY = "http://www.opengis.net/def/param-name/OGC-OM/2.0/samplingGeometry";
// observation types
String OBS_TYPE_MEASUREMENT = "http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_Measurement";
String OBS_TYPE_CATEGORY_OBSERVATION =
"http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_CategoryObservation";
String OBS_TYPE_COMPLEX_OBSERVATION =
"http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_ComplexObservation";
String OBS_TYPE_COUNT_OBSERVATION = "http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_CountObservation";
String OBS_TYPE_GEOMETRY_OBSERVATION =
"http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_GeometryObservation";
// no Definition in O&M and not in Lightweight Profile
String OBS_TYPE_TEXT_OBSERVATION = "http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_TextObservation";
String OBS_TYPE_TRUTH_OBSERVATION = "http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_TruthObservation";
String OBS_TYPE_OBSERVATION = "http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_Observation";
String OBS_TYPE_UNKNOWN = OGCConstants.UNKNOWN;
String OBS_TYPE_SWE_ARRAY_OBSERVATION =
"http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_SWEArrayObservation";
String OBS_RESULT_TYPE_OBSERVATION = "http://www.opengis.net/sensorML/2.0/DataArray";
String SAMPLING_FEAT_TYPE_UNKNOWN = "http://www.opengis.net/def/samplingFeatureType/unknown";
// ////////////////////////////////////////////////////////
// resultModel constants; not possible to use enum because of
QName RESULT_MODEL_MEASUREMENT = new QName(NS_OM, EN_MEASUREMENT, NS_OM_PREFIX);
QName RESULT_MODEL_GEOMETRY_OBSERVATION = new QName(NS_OM, EN_GEOMETRY_OBSERVATION, NS_OM_PREFIX);
QName RESULT_MODEL_CATEGORY_OBSERVATION = new QName(NS_OM, EN_CATEGORY_OBSERVATION, NS_OM_PREFIX);
QName RESULT_MODEL_OBSERVATION = new QName(NS_OM, EN_OBSERVATION, NS_OM_PREFIX);
QName RESULT_MODEL_COUNT_OBSERVATION = new QName(NS_OM, EN_COUNT_OBSERVATION, NS_OM_PREFIX);
QName RESULT_MODEL_TRUTH_OBSERVATION = new QName(NS_OM, EN_TRUTH_OBSERVATION, NS_OM_PREFIX);
QName RESULT_MODEL_TEXT_OBSERVATION = new QName(NS_OM, EN_TEXT_OBSERVATION, NS_OM_PREFIX);
/**
* Array of constants for result models.
*/
Set<QName> RESULT_MODELS = ImmutableSet.of(RESULT_MODEL_OBSERVATION, RESULT_MODEL_MEASUREMENT,
RESULT_MODEL_CATEGORY_OBSERVATION, RESULT_MODEL_GEOMETRY_OBSERVATION);
}
| sauloperez/sos | src/core/api/src/main/java/org/n52/sos/ogc/om/OmConstants.java | Java | apache-2.0 | 8,069 |
package org.openntf.domino.graph2.impl;
import java.util.Iterator;
import org.openntf.domino.big.NoteCoordinate;
import com.tinkerpop.blueprints.Vertex;
public class DVertexIterable implements Iterable<Vertex> {
public static class DVertexIterator implements Iterator<Vertex> {
private final DElementStore elementStore_;
private final Iterable<NoteCoordinate> index_;
private Iterator<NoteCoordinate> iterator_;
public DVertexIterator(final DElementStore store, final Iterable<NoteCoordinate> index) {
elementStore_ = store;
index_ = index;
}
private Iterator<NoteCoordinate> getIterator() {
if (iterator_ == null) {
iterator_ = index_.iterator();
}
return iterator_;
}
@Override
public boolean hasNext() {
return getIterator().hasNext();
}
@Override
public Vertex next() {
Vertex result = null;
NoteCoordinate nc = getIterator().next();
if (nc != null) {
result = elementStore_.getVertex(nc);
}
return result;
}
@Override
public void remove() {
getIterator().remove();
}
}
private final Iterable<NoteCoordinate> index_;
private final DElementStore store_;
public DVertexIterable(final DElementStore store, final Iterable<NoteCoordinate> index) {
store_ = store;
index_ = index;
}
@Override
public Iterator<Vertex> iterator() {
return new DVertexIterator(store_, index_);
}
}
| rPraml/org.openntf.domino | domino/graph/src/main/java/org/openntf/domino/graph2/impl/DVertexIterable.java | Java | apache-2.0 | 1,380 |
/*
* Copyright 1999-2011 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.druid.bvt.pool;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import org.junit.Assert;
import junit.framework.TestCase;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.stat.DruidDataSourceStatManager;
public class TestClose_0 extends TestCase {
protected void setUp() throws Exception {
DruidDataSourceStatManager.clear();
}
protected void tearDown() throws Exception {
Assert.assertEquals(0, DruidDataSourceStatManager.getInstance().getDataSourceList().size());
}
public void test_close() throws Exception {
DruidDataSource dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mock:xxx");
String sql = "SELECT 1";
Connection conn = dataSource.getConnection();
PreparedStatement stmt = conn.prepareStatement(sql);
ResultSet rs = stmt.executeQuery();
rs.next();
conn.close();
Assert.assertEquals(true, stmt.isClosed());
Assert.assertEquals(true, rs.isClosed());
rs.close();
stmt.close();
dataSource.close();
}
}
| xiaomozhang/druid | druid-1.0.9/src/test/java/com/alibaba/druid/bvt/pool/TestClose_0.java | Java | apache-2.0 | 1,779 |
package uz.greenwhite.slidingmenu.support.v10.service;
import android.os.AsyncTask;
import org.json.JSONArray;
import org.json.JSONObject;
import uz.greenwhite.slidingmenu.support.v10.error.RequestException;
import uz.greenwhite.slidingmenu.support.v10.service.http_request.HttpRequest;
import uz.greenwhite.slidingmenu.support.v10.service.http_request.Request;
import uz.greenwhite.slidingmenu.support.v10.service.request.TaskRequest;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.util.List;
public abstract class TaskService<R> extends AsyncTask<TaskRequest<R>, Void, Void> implements Request {
public abstract void onStart();
public abstract void onStop();
public abstract void onResult(long id, String result);
protected final List<TaskRequest<R>> taskRequest;
protected TaskService(List<TaskRequest<R>> taskRequest) {
this.taskRequest = taskRequest;
}
@Override
protected void onPreExecute() {
onStart();
}
@Override
protected Void doInBackground(TaskRequest<R>... params) {
for (TaskRequest t : params) {
try {
HttpRequest.post(this, t.url);
} catch (Exception e) {
e.printStackTrace();
}
}
return null;
}
@Override
protected void onPostExecute(Void result) {
onStop();
}
@Override
public void send(OutputStream os) throws Exception {
JSONArray arr = new JSONArray();
for (TaskRequest t : taskRequest) {
JSONObject obj = new JSONObject();
obj.put("d", t.getBody());
arr.put(obj);
}
PrintWriter writer = new PrintWriter(new OutputStreamWriter(os, "UTF8"), true);
writer.print(arr.toString());
writer.flush();
}
@Override
public void receive(InputStream is) throws Exception {
String s = HttpRequest.makeString(is);
JSONArray arr = new JSONArray(s);
for (int i = 0; i < arr.length(); i++) {
JSONObject obj = arr.getJSONObject(i);
long id = obj.getLong("i");
String result;
if (obj.has("r")) {
result = obj.getString("r");
} else if (obj.has("e")) {
result = obj.getString("e");
} else {
throw new RequestException("no result found");
}
onResult(id, result);
}
}
public TaskRequest findTask(long id) {
for (TaskRequest c : taskRequest) {
if (c.id == id) {
return c;
}
}
return null;
}
}
| axmadjon/AndroidSlidingMenu | app/src/main/java/uz/greenwhite/slidingmenu/support/v10/service/TaskService.java | Java | apache-2.0 | 2,722 |
package cr.ac.tec.appsmoviles.jnicalculator.gui;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
/**
* Main view of the application
*/
public class MainView extends JFrame {
/**
* The vertical weight for the display
*/
public static final double DISPLAY_VERTICAL_WEIGHT = 0.15;
/**
* The vertical weight for the buttons panel
*/
private static final double BUTTONS_PANEL_VERTICAL_WEIGHT = 0.75;
/**
* The background color for the display
*/
private static final String DISPLAY_BACKGROUND_COLOR = "#FFFFFF";
/**
* The color for the border of the display
*/
private static final String DISPLAY_BORDER_COLOR = "#CCCCCC";
/**
* The font used to inside the display
*/
private static final String DISPLAY_FONT = "Monospaced";
/**
* The font size for the display
*/
private static final int DISPLAY_FONT_SIZE = 36;
/**
* The color of the text in the display
*/
private static final String DISPLAY_FOREGROUND_COLOR = "#000000";
/**
* The maximum number of digits that can appear in the display
*/
private static final int MAX_DIGITS = 11;
/**
* The initial height of the window
*/
private static final int WINDOW_HEIGHT = 400;
/**
* Whether the window is resizable or not
*/
private static final boolean WINDOW_RESIZABLE = false;
/**
* The text that will appear in the window's title bar
*/
private static final String WINDOW_TITLE = "JNI Calculator";
/**
* The initial width of the window
*/
private static final int WINDOW_WIDTH = 300;
/**
* The button that sends the clear command
*/
private JButton clearButton;
/**
* The underlying controller for the calculator
*/
private MainViewController controller;
/**
* The display for the output
*/
private JLabel display = new JLabel();
/**
* Creates a window frame and initializes its components
*/
public MainView() {
super();
setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);
setTitle(WINDOW_TITLE);
setLocationByPlatform(true);
setSize(new java.awt.Dimension(WINDOW_WIDTH, WINDOW_HEIGHT));
setResizable(WINDOW_RESIZABLE);
clearButton = createButton("AC", new ActionListener() {
/**
* Invoked when an action occurs.
*/
@Override
public void actionPerformed(ActionEvent event) {
controller.handleClear();
}
});
controller = new MainViewController(clearButton, display, MAX_DIGITS);
initComponents();
}
/**
* Creates a button
*
* @param text the text for the button
* @param listener the ActionListener for the button
* @return a button with the specified properties
*/
private JButton createButton(String text, ActionListener listener) {
JButton button = new JButton(text);
button.addActionListener(listener);
return button;
}
/**
* Creates the components needed for the calculator and connects them to the
* controller
*/
private void initComponents() {
// Root
JPanel root = new JPanel();
root.setSize(new Dimension(this.getWidth(), this.getHeight()));
// Display
display.setFont(new java.awt.Font(DISPLAY_FONT, 0, DISPLAY_FONT_SIZE));
display.setBorder(javax.swing.BorderFactory.createLineBorder(Color.decode(DISPLAY_BORDER_COLOR)));
display.setForeground(Color.decode(DISPLAY_FOREGROUND_COLOR));
display.setBackground(Color.decode(DISPLAY_BACKGROUND_COLOR));
display.setOpaque(true);
display.setHorizontalAlignment(JLabel.RIGHT);
// Buttons Panel
JPanel buttonsPanel = new JPanel(new GridLayout(5, 4, 0, 0));
// Buttons
ActionListener digitListener = new DigitActionListener();
buttonsPanel.add(clearButton);
buttonsPanel.add(createButton("√", new ActionListener() {
/**
* Invoked when an action occurs.
*/
@Override
public void actionPerformed(ActionEvent event) {
controller.handleSquareRoot();
}
}));
buttonsPanel.add(createButton("%", new ActionListener() {
/**
* Invoked when an action occurs.
*/
@Override
public void actionPerformed(ActionEvent event) {
controller.handlePercentage();
}
}));
buttonsPanel.add(createButton("÷", new ActionListener() {
/**
* Invoked when an action occurs.
*/
@Override
public void actionPerformed(ActionEvent event) {
controller.handleDivision();
}
}));
buttonsPanel.add(createButton("7", digitListener));
buttonsPanel.add(createButton("8", digitListener));
buttonsPanel.add(createButton("9", digitListener));
buttonsPanel.add(createButton("×", new ActionListener() {
/**
* Invoked when an action occurs.
*/
@Override
public void actionPerformed(ActionEvent event) {
controller.handleTimes();
}
}));
buttonsPanel.add(createButton("4", digitListener));
buttonsPanel.add(createButton("5", digitListener));
buttonsPanel.add(createButton("6", digitListener));
buttonsPanel.add(createButton("-", new ActionListener() {
/**
* Invoked when an action occurs.
*/
@Override
public void actionPerformed(ActionEvent event) {
controller.handleMinus();
}
}));
buttonsPanel.add(createButton("1", digitListener));
buttonsPanel.add(createButton("2", digitListener));
buttonsPanel.add(createButton("3", digitListener));
buttonsPanel.add(createButton("+", new ActionListener() {
/**
* Invoked when an action occurs.
*/
@Override
public void actionPerformed(ActionEvent event) {
controller.handlePlus();
}
}));
buttonsPanel.add(createButton("±", new ActionListener() {
/**
* Invoked when an action occurs.
*/
@Override
public void actionPerformed(ActionEvent event) {
controller.handleSignChange();
}
}));
buttonsPanel.add(createButton("0", digitListener));
buttonsPanel.add(createButton(".", digitListener));
buttonsPanel.add(createButton("=", new ActionListener() {
/**
* Invoked when an action occurs.
*/
@Override
public void actionPerformed(ActionEvent event) {
controller.handleEquals();
}
}));
// Add Components
root.setLayout(new GridBagLayout());
GridBagConstraints constraints = new GridBagConstraints();
constraints.weightx = 1;
constraints.weighty = DISPLAY_VERTICAL_WEIGHT;
constraints.gridx = 0;
constraints.gridy = 0;
constraints.insets = new Insets(2, 5, 2, 5);
constraints.fill = GridBagConstraints.HORIZONTAL;
root.add(display, constraints);
constraints.insets = new Insets(2, 0, 2, 0);
constraints.weighty = BUTTONS_PANEL_VERTICAL_WEIGHT;
constraints.gridx = 0;
constraints.gridy = 1;
constraints.fill = GridBagConstraints.VERTICAL;
root.add(buttonsPanel, constraints);
add(root);
}
/**
* An ActionListener that handles digits and sends them to the controller
*/
private class DigitActionListener implements ActionListener {
/**
* Invoked when an action occurs.
*/
@Override
public void actionPerformed(ActionEvent event) {
String digit = ((JButton) event.getSource()).getText();
controller.handleDigit(digit);
}
}
}
| fjsalas/JNI-Calculator | src/main/java/cr/ac/tec/appsmoviles/jnicalculator/gui/MainView.java | Java | apache-2.0 | 7,475 |
<?php
defined('BASEPATH') or exit('No direct script access allowed');
/*
| -------------------------------------------------------------------
| DATABASE CONNECTIVITY SETTINGS
| -------------------------------------------------------------------
| This file will contain the settings needed to access your database.
|
| For complete instructions please consult the 'Database Connection'
| page of the User Guide.
|
| -------------------------------------------------------------------
| EXPLANATION OF VARIABLES
| -------------------------------------------------------------------
|
| ['dsn'] The full DSN string describe a connection to the database.
| ['hostname'] The hostname of your database server.
| ['username'] The username used to connect to the database
| ['password'] The password used to connect to the database
| ['database'] The name of the database you want to connect to
| ['dbdriver'] The database driver. e.g.: mysqli.
| Currently supported:
| cubrid, ibase, mssql, mysql, mysqli, oci8,
| odbc, pdo, postgre, sqlite, sqlite3, sqlsrv
| ['dbprefix'] You can add an optional prefix, which will be added
| to the table name when using the Query Builder class
| ['pconnect'] TRUE/FALSE - Whether to use a persistent connection
| ['db_debug'] TRUE/FALSE - Whether database errors should be displayed.
| ['cache_on'] TRUE/FALSE - Enables/disables query caching
| ['cachedir'] The path to the folder where cache files should be stored
| ['char_set'] The character set used in communicating with the database
| ['dbcollat'] The character collation used in communicating with the database
| NOTE: For MySQL and MySQLi databases, this setting is only used
| as a backup if your server is running PHP < 5.2.3 or MySQL < 5.0.7
| (and in table creation queries made with DB Forge).
| There is an incompatibility in PHP with mysql_real_escape_string() which
| can make your site vulnerable to SQL injection if you are using a
| multi-byte character set and are running versions lower than these.
| Sites using Latin-1 or UTF-8 database character set and collation are unaffected.
| ['swap_pre'] A default table prefix that should be swapped with the dbprefix
| ['encrypt'] Whether or not to use an encrypted connection.
|
| 'mysql' (deprecated), 'sqlsrv' and 'pdo/sqlsrv' drivers accept TRUE/FALSE
| 'mysqli' and 'pdo/mysql' drivers accept an array with the following options:
|
| 'ssl_key' - Path to the private key file
| 'ssl_cert' - Path to the public key certificate file
| 'ssl_ca' - Path to the certificate authority file
| 'ssl_capath' - Path to a directory containing trusted CA certificats in PEM format
| 'ssl_cipher' - List of *allowed* ciphers to be used for the encryption, separated by colons (':')
| 'ssl_verify' - TRUE/FALSE; Whether verify the server certificate or not ('mysqli' only)
|
| ['compress'] Whether or not to use client compression (MySQL only)
| ['stricton'] TRUE/FALSE - forces 'Strict Mode' connections
| - good for ensuring strict SQL while developing
| ['ssl_options'] Used to set various SSL options that can be used when making SSL connections.
| ['failover'] array - A array with 0 or more data for connections if the main should fail.
| ['save_queries'] TRUE/FALSE - Whether to "save" all executed queries.
| NOTE: Disabling this will also effectively disable both
| $this->db->last_query() and profiling of DB queries.
| When you run a query, with this setting set to TRUE (default),
| CodeIgniter will store the SQL statement for debugging purposes.
| However, this may cause high memory usage, especially if you run
| a lot of SQL queries ... disable this to avoid that problem.
|
| The $active_group variable lets you choose which connection group to
| make active. By default there is only one group (the 'default' group).
|
| The $query_builder variables lets you determine whether or not to load
| the query builder class.
*/
$active_group = 'default';
$query_builder = true;
$db['default'] = array(
'dsn' => '',
'hostname' => 'infizi.com',
'username' => 'infizime_product',
'password' => '4WJ8fPs{T-Q0',
'database' => 'infizime_product',
'dbdriver' => 'mysqli',
'dbprefix' => '',
'pconnect' => false,
'db_debug' => true,
'cache_on' => true,
'cachedir' => 'mysql_cache',
'char_set' => 'utf8',
'dbcollat' => 'utf8_general_ci',
'swap_pre' => '',
'encrypt' => false,
'compress' => true,
'stricton' => true,
'failover' => array(),
'save_queries' => true
);
| Swift-Jr/product | source/ci/config/database.php | PHP | apache-2.0 | 4,601 |
package apple.metalperformanceshaders;
import apple.NSObject;
import apple.foundation.NSArray;
import apple.foundation.NSCoder;
import apple.foundation.NSMethodSignature;
import apple.foundation.NSSet;
import apple.metal.protocol.MTLBuffer;
import apple.metal.protocol.MTLCommandBuffer;
import apple.metal.protocol.MTLDevice;
import org.moe.natj.c.ann.FunctionPtr;
import org.moe.natj.general.NatJ;
import org.moe.natj.general.Pointer;
import org.moe.natj.general.ann.Generated;
import org.moe.natj.general.ann.Library;
import org.moe.natj.general.ann.Mapped;
import org.moe.natj.general.ann.NInt;
import org.moe.natj.general.ann.NUInt;
import org.moe.natj.general.ann.Owned;
import org.moe.natj.general.ann.Runtime;
import org.moe.natj.general.ptr.VoidPtr;
import org.moe.natj.objc.Class;
import org.moe.natj.objc.ObjCRuntime;
import org.moe.natj.objc.SEL;
import org.moe.natj.objc.ann.ObjCClassBinding;
import org.moe.natj.objc.ann.ProtocolClassMethod;
import org.moe.natj.objc.ann.Selector;
import org.moe.natj.objc.map.ObjCObjectMapper;
/**
* MPSMatrixDecompositionCholesky
* <p>
* [@dependency] This depends on Metal.framework.
* <p>
* A kernel for computing the Cholesky factorization of a matrix.
* <p>
* A MPSMatrixDecompositionLU object computes one of the following
* factorizations of a matrix A:
* <p>
* A = L * L**T
* A = U**T * U
* <p>
* A is a symmetric positive-definite matrix for which the
* factorization is to be computed. L and U are lower and upper
* triangular matrices respectively.
*/
@Generated
@Library("MetalPerformanceShaders")
@Runtime(ObjCRuntime.class)
@ObjCClassBinding
public class MPSMatrixDecompositionCholesky extends MPSMatrixUnaryKernel {
static {
NatJ.register();
}
@Generated
protected MPSMatrixDecompositionCholesky(Pointer peer) {
super(peer);
}
@Generated
@Selector("accessInstanceVariablesDirectly")
public static native boolean accessInstanceVariablesDirectly();
@Generated
@Owned
@Selector("alloc")
public static native MPSMatrixDecompositionCholesky alloc();
@Owned
@Generated
@Selector("allocWithZone:")
public static native MPSMatrixDecompositionCholesky allocWithZone(VoidPtr zone);
@Generated
@Selector("automaticallyNotifiesObserversForKey:")
public static native boolean automaticallyNotifiesObserversForKey(String key);
@Generated
@Selector("cancelPreviousPerformRequestsWithTarget:")
public static native void cancelPreviousPerformRequestsWithTarget(@Mapped(ObjCObjectMapper.class) Object aTarget);
@Generated
@Selector("cancelPreviousPerformRequestsWithTarget:selector:object:")
public static native void cancelPreviousPerformRequestsWithTargetSelectorObject(
@Mapped(ObjCObjectMapper.class) Object aTarget, SEL aSelector,
@Mapped(ObjCObjectMapper.class) Object anArgument);
@Generated
@Selector("classFallbacksForKeyedArchiver")
public static native NSArray<String> classFallbacksForKeyedArchiver();
@Generated
@Selector("classForKeyedUnarchiver")
public static native Class classForKeyedUnarchiver();
@Generated
@Selector("debugDescription")
public static native String debugDescription_static();
@Generated
@Selector("description")
public static native String description_static();
/**
* Encode a MPSMatrixDecompositionCholesky kernel into a command Buffer.
* <p>
* This function encodes the MPSMatrixDecompositionCholesky object to a valid
* command buffer.
* <p>
* If during the factorization a leading minor of the matrix is found to be
* not positive definite, MPSMatrixDecompositionNonPositiveDefinite will be returned
* in the provided status buffer. Previously computed pivots and the non positive
* pivot are written to the result, but the factorization does not complete.
* The data referenced by the MTLBuffer is not valid until the command buffer has completed
* execution. If the matrix return status is not desired NULL may be provided.
* <p>
* If the return status is MPSMatrixDecompositionStatusSuccess, resultMatrix
* contains the resulting factors in its lower or upper triangular regions
* respectively.
* <p>
* This kernel functions either in-place, if the result matrix
* completely aliases the source matrix, or out-of-place. If there
* is any partial overlap between input and output data the results
* are undefined.
*
* @param commandBuffer A valid MTLCommandBuffer to receive the encoded filter
* @param sourceMatrix A valid MPSMatrix containing the source data. Must have
* enough space to hold a order x order matrix.
* @param resultMatrix A valid MPSMatrix to contain the result. Must have enough
* space to hold a order x order matrix.
* @param status A MTLBuffer which indicates the resulting MPSMatrixDecompositionStatus
* value.
*/
@Generated
@Selector("encodeToCommandBuffer:sourceMatrix:resultMatrix:status:")
public native void encodeToCommandBufferSourceMatrixResultMatrixStatus(
@Mapped(ObjCObjectMapper.class) MTLCommandBuffer commandBuffer, MPSMatrix sourceMatrix,
MPSMatrix resultMatrix, @Mapped(ObjCObjectMapper.class) MTLBuffer status);
@Generated
@Selector("hash")
@NUInt
public static native long hash_static();
@Generated
@Selector("init")
public native MPSMatrixDecompositionCholesky init();
@Generated
@Selector("initWithCoder:")
public native MPSMatrixDecompositionCholesky initWithCoder(NSCoder aDecoder);
@Generated
@Selector("initWithCoder:device:")
public native MPSMatrixDecompositionCholesky initWithCoderDevice(NSCoder aDecoder,
@Mapped(ObjCObjectMapper.class) Object device);
@Generated
@Selector("initWithDevice:")
public native MPSMatrixDecompositionCholesky initWithDevice(@Mapped(ObjCObjectMapper.class) Object device);
/**
* Initialize an MPSMatrixDecompositionCholesky object on a device
*
* @param device The device on which the kernel will execute.
* @param lower A boolean value indicating if the lower triangular
* part of the source matrix is stored. If lower = YES
* the lower triangular part will be used and the factor
* will be written to the lower triangular part of the
* result, otherwise the upper triangular part will be used
* and the factor will be written to the upper triangular
* part.
* @param order The number of rows and columns in the source matrix.
* @return A valid MPSMatrixDecompositionCholesky object or nil, if failure.
*/
@Generated
@Selector("initWithDevice:lower:order:")
public native MPSMatrixDecompositionCholesky initWithDeviceLowerOrder(
@Mapped(ObjCObjectMapper.class) MTLDevice device, boolean lower, @NUInt long order);
@Generated
@Selector("instanceMethodForSelector:")
@FunctionPtr(name = "call_instanceMethodForSelector_ret")
public static native NSObject.Function_instanceMethodForSelector_ret instanceMethodForSelector(SEL aSelector);
@Generated
@Selector("instanceMethodSignatureForSelector:")
public static native NSMethodSignature instanceMethodSignatureForSelector(SEL aSelector);
@Generated
@Selector("instancesRespondToSelector:")
public static native boolean instancesRespondToSelector(SEL aSelector);
@Generated
@Selector("isSubclassOfClass:")
public static native boolean isSubclassOfClass(Class aClass);
@Generated
@Selector("keyPathsForValuesAffectingValueForKey:")
public static native NSSet<String> keyPathsForValuesAffectingValueForKey(String key);
@Generated
@Owned
@Selector("new")
public static native MPSMatrixDecompositionCholesky new_objc();
@Generated
@Selector("resolveClassMethod:")
public static native boolean resolveClassMethod(SEL sel);
@Generated
@Selector("resolveInstanceMethod:")
public static native boolean resolveInstanceMethod(SEL sel);
@Generated
@Selector("setVersion:")
public static native void setVersion_static(@NInt long aVersion);
@Generated
@Selector("superclass")
public static native Class superclass_static();
@Generated
@Selector("supportsSecureCoding")
public static native boolean supportsSecureCoding();
@Generated
@ProtocolClassMethod("supportsSecureCoding")
public boolean _supportsSecureCoding() {
return supportsSecureCoding();
}
@Generated
@Selector("version")
@NInt
public static native long version_static();
}
| multi-os-engine/moe-core | moe.apple/moe.platform.ios/src/main/java/apple/metalperformanceshaders/MPSMatrixDecompositionCholesky.java | Java | apache-2.0 | 8,871 |
#!/usr/bin/env python
#===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
IngestDBWrapper: provides low-level database commands for the ingest process.
This class (based on ConnectionWrapper) provides low-level database
commands used by the ingest process. This is where the SQL queries go.
The methods in this class should be context free, so all context information
should be passed in as parameters and passed out as return values. To put
it another way, the database connection should be the *only* data attribute.
If you feel you need to cache the result of database queries or track context,
please do it in the calling class, not here. This is intended as a very clean
and simple interface to the database, to replace big chunks of SQL with
meaningfully named method calls.
"""
from __future__ import absolute_import
import logging
import datetime
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED
import agdc.dbutil as dbutil
import pytz
from EOtools.utils import log_multiline
# Set up logger.
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
#
# Module level constants
#
ONE_HOUR = datetime.timedelta(0, 3600)
#
# Symbolic names for tile classes
#
TC_PENDING = 0
TC_SINGLE_SCENE = 1
TC_DELETED = 2
TC_SUPERSEDED = 3
TC_MOSAIC = 4
# pylint: disable=too-many-public-methods
class IngestDBWrapper(dbutil.ConnectionWrapper):
"""IngestDBWrapper: low-level database commands for the ingest process.
"""
#
# Constants
#
# This is the +- percentage to match within for fuzzy datetime matches.
FUZZY_MATCH_PERCENTAGE = 0
#
# Utility Functions
#
def execute_sql_single(self, sql, params):
"""Executes an sql query returning (at most) a single row.
This creates a cursor, executes the sql query or command specified
by the operation string 'sql' and parameters 'params', and returns
the first row of the result, or None if there is no result."""
cur = self.conn.cursor()
self.log_sql(cur.mogrify(sql, params))
cur.execute(sql, params)
result = cur.fetchone()
return result
def execute_sql_multi(self, sql, params):
"""Executes an sql query returning multiple rows.
This creates a cursor, executes the sql query or command specified
by the operation string 'sql' and parameters 'params', and returns
a list of results, or an empty list if there are no results."""
cur = self.conn.cursor()
self.log_sql(cur.mogrify(sql, params))
cur.execute(sql, params)
result = cur.fetchall()
return result
@staticmethod
def log_sql(sql_query_string):
"""Logs an sql query to the logger at debug level.
This uses the log_multiline utility function from EOtools.utils.
sql_query_string is as returned from cursor.mogrify."""
log_multiline(LOGGER.debug, sql_query_string,
title='SQL', prefix='\t')
#
# Queries and Commands
#
def turn_off_autocommit(self):
"""Turns autocommit off for the database connection.
Returns the old commit mode in a form suitable for passing to
the restore_commit_mode method. Note that changeing commit mode
must be done outside a transaction."""
old_commit_mode = (self.conn.autocommit, self.conn.isolation_level)
self.conn.autocommit = False
self.conn.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)
return old_commit_mode
def turn_on_autocommit(self):
"""Turns autocommit on for the database connection.
Returns the old commit mode in a form suitable for passing to
the restore_commit_mode method. Note that changeing commit mode
must be done outside a transaction."""
old_commit_mode = (self.conn.autocommit, self.conn.isolation_level)
self.conn.autocommit = True
self.conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
return old_commit_mode
def restore_commit_mode(self, commit_mode):
"""Restores the commit mode of the database connection.
The commit mode passed in should have come from either
the turn_off_autocommit or turn_on_autocommit method.
This method will then restore the connection commit\
mode to what is was before."""
(autocommit, isolation_level) = commit_mode
self.conn.autocommit = autocommit
self.conn.set_isolation_level(isolation_level)
def get_satellite_id(self, satellite_tag):
"""Finds a satellite_id in the database.
This method returns a satellite_id found by matching the
satellite_tag in the database, or None if it cannot be
found."""
sql = ("SELECT satellite_id FROM satellite\n" +
"WHERE satellite_tag = %s;")
params = (satellite_tag,)
result = self.execute_sql_single(sql, params)
satellite_id = result[0] if result else None
return satellite_id
def get_sensor_id(self, satellite_id, sensor_name):
"""Finds a sensor_id in the database.
This method returns a sensor_id found by matching the
satellite_id, sensor_name pair in the database, or None if such
a pair cannot be found."""
sql = ("SELECT sensor_id FROM sensor\n" +
"WHERE satellite_id = %s AND\n" +
" sensor_name = %s;")
params = (satellite_id, sensor_name)
result = self.execute_sql_single(sql, params)
sensor_id = result[0] if result else None
return sensor_id
def get_level_id(self, level_name):
"""Finds a (processing) level_id in the database.
This method returns a level_id found by matching the level_name
in the database, or None if it cannot be found."""
sql = ("SELECT level_id FROM processing_level\n" +
"WHERE level_name = %s;")
params = (level_name,)
result = self.execute_sql_single(sql, params)
level_id = result[0] if result else None
return level_id
def get_acquisition_id_exact(self, acquisition_dict):
"""Finds the id of an acquisition record in the database.
Returns an acquisition_id if a record matching the key fields in
acquistion_dict is found, None otherwise. The key fields are:
satellite_id, sensor_id, x_ref, y_ref, start_datetime,
and end_datetime.
The acquisition_dict must contain values for all of these.
This query requires an exact match for the start and end datetimes.
"""
sql = ("SELECT acquisition_id FROM acquisition\n" +
"WHERE satellite_id = %(satellite_id)s AND\n" +
" sensor_id = %(sensor_id)s AND\n" +
(" x_ref = %(x_ref)s AND\n" if acquisition_dict['x_ref'] is not None else " x_ref is null AND\n") +
(" y_ref = %(y_ref)s AND\n" if acquisition_dict['y_ref'] is not None else " y_ref is null AND\n") +
" start_datetime = %(start_datetime)s AND\n" +
" end_datetime = %(end_datetime)s;")
result = self.execute_sql_single(sql, acquisition_dict)
acquisition_id = result[0] if result else None
return acquisition_id
def get_acquisition_id_fuzzy(self, acquisition_dict):
"""Finds the id of an acquisition record in the database.
Returns an acquisition_id if a record matching the key fields in
acquistion_dict is found, None otherwise. The key fields are:
satellite_id, sensor_id, x_ref, y_ref, start_datetime,
and end_datetime.
The acquisition_dict must contain values for all of these.
This query uses an approximate match for the start and end datetimes.
"""
aq_length = (acquisition_dict['end_datetime'] -
acquisition_dict['start_datetime'])
delta = (aq_length*self.FUZZY_MATCH_PERCENTAGE)/100
params = dict(acquisition_dict)
params['delta'] = delta
sql = ("SELECT acquisition_id FROM acquisition\n" +
"WHERE satellite_id = %(satellite_id)s AND\n" +
" sensor_id = %(sensor_id)s AND\n" +
(" x_ref = %(x_ref)s AND\n" if params['x_ref'] is not None else " x_ref is null AND\n") +
(" y_ref = %(y_ref)s AND\n" if params['y_ref'] is not None else " y_ref is null AND\n") +
" start_datetime BETWEEN\n" +
" %(start_datetime)s - %(delta)s AND\n" +
" %(start_datetime)s + %(delta)s AND\n" +
" end_datetime BETWEEN\n" +
" %(end_datetime)s - %(delta)s AND\n" +
" %(end_datetime)s + %(delta)s;")
result = self.execute_sql_single(sql, params)
acquisition_id = result[0] if result else None
return acquisition_id
def insert_acquisition_record(self, acquisition_dict):
"""Creates a new acquisition record in the database.
The values of the fields in the new record are taken from
acquisition_dict. Returns the acquisition_id of the new record."""
# Columns to be inserted. If gcp_count or mtl_text are empty, we
# exclude them from the list, so they pick up the defaults instead.
column_list = ['acquisition_id',
'satellite_id',
'sensor_id',
'x_ref',
'y_ref',
'start_datetime',
'end_datetime',
'll_lon',
'll_lat',
'lr_lon',
'lr_lat',
'ul_lon',
'ul_lat',
'ur_lon',
'ur_lat'
]
if acquisition_dict['gcp_count'] is not None:
column_list.append('gcp_count')
if acquisition_dict['mtl_text'] is not None:
column_list.append('mtl_text')
columns = "(" + ",\n".join(column_list) + ")"
# Values are taken from the acquisition_dict, with keys the same
# as the column name, except for acquisition_id, which is the next
# value in the acquisition_id_seq sequence.
value_list = []
for column in column_list:
if column == 'acquisition_id':
value_list.append("nextval('acquisition_id_seq')")
else:
value_list.append("%(" + column + ")s")
values = "(" + ",\n".join(value_list) + ")"
sql = ("INSERT INTO acquisition " + columns + "\n" +
"VALUES " + values + "\n" +
"RETURNING acquisition_id;")
result = self.execute_sql_single(sql, acquisition_dict)
acquisition_id = result[0]
return acquisition_id
def get_dataset_id(self, dataset_dict):
"""Finds the id of a dataset record in the database.
Returns a dataset_id if a record metching the key fields in
dataset_dict is found, None otherwise. The key fields are:
aquisition_id and level_id.
The dataset_dict must contain values for both of these."""
sql = ("SELECT dataset_id FROM dataset\n" +
"WHERE acquisition_id = %(acquisition_id)s AND\n" +
" level_id = %(level_id)s;")
result = self.execute_sql_single(sql, dataset_dict)
dataset_id = result[0] if result else None
return dataset_id
def dataset_older_than_database(self, dataset_id,
disk_datetime_processed,
tile_class_filter=None):
"""Compares the datetime_processed of the dataset on disk with that on
the database. The database time is the earliest of either the
datetime_processed field from the dataset table or the earliest
tile.ctime field for the dataset's tiles. Tiles considered are
restricted to those with tile_class_ids listed in tile_class_filter
if it is non-empty.
Returns tuple
(disk_datetime_processed, database_datetime_processed, tile_ingested_datetime)
if no ingestion required
or None if ingestion is required
"""
sql_dtp = ("SELECT datetime_processed FROM dataset\n" +
"WHERE dataset_id = %s;")
result = self.execute_sql_single(sql_dtp, (dataset_id,))
database_datetime_processed = result[0]
if database_datetime_processed < disk_datetime_processed:
return None
# The database's dataset record is newer that what is on disk.
# Consider whether the tile record's are older than dataset on disk.
# Make the dataset's datetime_processed timezone-aware.
utc = pytz.timezone("UTC")
disk_datetime_processed = utc.localize(disk_datetime_processed)
sql_ctime = ("SELECT MIN(ctime) FROM tile\n" +
"WHERE dataset_id = %(dataset_id)s\n" +
("AND tile_class_id IN %(tile_class_filter)s\n" if
tile_class_filter else "") +
";"
)
params = {'dataset_id': dataset_id,
'tile_class_filter': tuple(tile_class_filter)
}
result = self.execute_sql_single(sql_ctime, params)
min_ctime = result[0]
if min_ctime is None:
return None
if min_ctime < disk_datetime_processed:
return None
# The dataset on disk is more recent than the database records and
# should be re-ingested. Return tuple containing relevant times
return (disk_datetime_processed, utc.localize(database_datetime_processed), min_ctime)
def insert_dataset_record(self, dataset_dict):
"""Creates a new dataset record in the database.
The values of the fields in the new record are taken from
dataset_dict. Returns the dataset_id of the new record."""
# Columns to be inserted.
column_list = ['dataset_id',
'acquisition_id',
'dataset_path',
'level_id',
'datetime_processed',
'dataset_size',
'crs',
'll_x',
'll_y',
'lr_x',
'lr_y',
'ul_x',
'ul_y',
'ur_x',
'ur_y',
'x_pixels',
'y_pixels',
'xml_text']
columns = "(" + ",\n".join(column_list) + ")"
# Values are taken from the dataset_dict, with keys the same
# as the column name, except for dataset_id, which is the next
# value in the dataset_id_seq sequence.
value_list = []
for column in column_list:
if column == 'dataset_id':
value_list.append("nextval('dataset_id_seq')")
else:
value_list.append("%(" + column + ")s")
values = "(" + ",\n".join(value_list) + ")"
sql = ("INSERT INTO dataset " + columns + "\n" +
"VALUES " + values + "\n" +
"RETURNING dataset_id;")
result = self.execute_sql_single(sql, dataset_dict)
dataset_id = result[0]
return dataset_id
def update_dataset_record(self, dataset_dict):
"""Updates an existing dataset record in the database.
The record to update is identified by dataset_id, which must be
present in dataset_dict. Its non-key fields are updated to match
the values in dataset_dict.
"""
# Columns to be updated
column_list = ['dataset_path',
'datetime_processed',
'dataset_size',
'crs',
'll_x',
'll_y',
'lr_x',
'lr_y',
'ul_x',
'ul_y',
'ur_x',
'ur_y',
'x_pixels',
'y_pixels',
'xml_text']
assign_list = [(col + " = %(" + col + ")s") for col in column_list]
assignments = ",\n".join(assign_list)
sql = ("UPDATE dataset\n" +
"SET " + assignments + "\n" +
"WHERE dataset_id = %(dataset_id)s" + "\n" +
"RETURNING dataset_id;")
self.execute_sql_single(sql, dataset_dict)
def get_dataset_tile_ids(self, dataset_id, tile_class_filter=()):
"""Returns a list of tile_ids associated with a dataset.
If tile_class_filter is not an empty tuple then the tile_ids returned are
restricted to those with tile_class_ids that that match the
tile_class_filter. Otherwise all tile_ids for the dataset are
returned."""
sql = ("SELECT tile_id FROM tile\n" +
"WHERE dataset_id = %(dataset_id)s\n" +
("AND tile_class_id IN %(tile_class_filter)s\n" if
tile_class_filter else "") +
"ORDER By tile_id;"
)
params = {'dataset_id': dataset_id,
'tile_class_filter': tuple(tile_class_filter)
}
result = self.execute_sql_multi(sql, params)
tile_id_list = [tup[0] for tup in result]
return tile_id_list
def get_tile_pathname(self, tile_id):
"""Returns the pathname for a tile."""
sql = ("SELECT tile_pathname FROM tile\n" +
"WHERE tile_id = %s;")
result = self.execute_sql_single(sql, (tile_id,))
tile_pathname = result[0]
return tile_pathname
def remove_tile_record(self, tile_id):
"""Removes a tile record from the database."""
sql = "DELETE FROM tile WHERE tile_id = %s RETURNING tile_id;"
self.execute_sql_single(sql, (tile_id,))
def get_tile_id(self, tile_dict):
"""Finds the id of a tile record in the database.
Returns a tile_id if a record metching the key fields in
tile_dict is found, None otherwise. The key fields are:
dataset_id, x_index, y_index, and tile_type_id.
The tile_dict must contain values for all of these."""
sql = ("SELECT tile_id FROM tile\n" +
"WHERE dataset_id = %(dataset_id)s AND\n" +
" x_index = %(x_index)s AND\n" +
" y_index = %(y_index)s AND\n" +
" tile_type_id = %(tile_type_id)s;")
result = self.execute_sql_single(sql, tile_dict)
tile_id = result[0] if result else None
return tile_id
def tile_footprint_exists(self, tile_dict):
"""Check the tile footprint table for an existing entry.
The table is checked for existing entry with combination
(x_index, y_index, tile_type_id). Returns True if such an entry
exists and False otherwise.
"""
sql = ("SELECT 1 FROM tile_footprint\n" +
"WHERE x_index = %(x_index)s AND\n" +
" y_index = %(y_index)s AND\n" +
" tile_type_id = %(tile_type_id)s;")
result = self.execute_sql_single(sql, tile_dict)
footprint_exists = True if result else False
return footprint_exists
def insert_tile_footprint(self, footprint_dict):
"""Inserts an entry into the tile_footprint table of the database.
TODO: describe how bbox generated.
"""
# TODO Use Alex's code in email to generate bbox
# Columns to be updated
column_list = ['x_index',
'y_index',
'tile_type_id',
'x_min',
'y_min',
'x_max',
'y_max',
'bbox']
columns = "(" + ",\n".join(column_list) + ")"
value_list = []
for column in column_list:
if column == 'bbox':
value_list.append('NULL')
else:
value_list.append("%(" + column + ")s")
values = "(" + ",\n".join(value_list) + ")"
sql = ("INSERT INTO tile_footprint " + columns + "\n" +
"VALUES " + values + "\n" +
"RETURNING x_index;")
self.execute_sql_single(sql, footprint_dict)
def insert_tile_record(self, tile_dict):
"""Creates a new tile record in the database.
The values of the fields in the new record are taken from
tile_dict. Returns the tile_id of the new record."""
column_list = ['tile_id',
'x_index',
'y_index',
'tile_type_id',
'dataset_id',
'tile_pathname',
'tile_class_id',
'tile_size',
'ctime']
columns = "(" + ",\n".join(column_list) + ")"
# Values are taken from the tile_dict, with keys the same
# as the column name, except for tile_id, which is the next
# value in the dataset_id_seq sequence.
value_list = []
for column in column_list:
if column == 'tile_id':
value_list.append("nextval('tile_id_seq')")
elif column == 'ctime':
value_list.append('now()')
else:
value_list.append("%(" + column + ")s")
values = "(" + ",\n".join(value_list) + ")"
sql = ("INSERT INTO tile " + columns + "\n" +
"VALUES " + values + "\n" +
"RETURNING tile_id;")
result = self.execute_sql_single(sql, tile_dict)
tile_id = result[0]
return tile_id
def get_overlapping_dataset_ids(self,
dataset_id,
delta_t=ONE_HOUR,
tile_class_filter=(1, 3)):
"""Return dataset ids for overlapping datasets (incuding this dataset)
Given an original dataset specified by 'dataset_id', return the list
of dataset_ids for datasets that overlap this one. An overlap occurs
when a tile belonging to a target dataset overlaps in space and
time with one from the orignal dataset. 'delta_t' sets the tolerance
for detecting time overlaps. It should be a python datetime.timedelta
object (obtainable by constructor or by subtracting two datetimes).
Only tiles of a class present in the tuple 'tile_class_filter' are
considered. Note that if the original dataset has no tiles of the
relevent types an empty list will be returned. Otherwise the list
will contain at least the original dataset id.
"""
sql = ("SELECT DISTINCT od.dataset_id\n" +
"FROM dataset d\n" +
"INNER JOIN tile t USING (dataset_id)\n" +
"INNER JOIN acquisition a USING (acquisition_id)\n" +
"INNER JOIN tile o ON\n" +
" o.x_index = t.x_index AND\n" +
" o.y_index = t.y_index AND\n" +
" o.tile_type_id = t.tile_type_id\n" +
"INNER JOIN dataset od ON\n" +
" od.dataset_id = o.dataset_id AND\n" +
" od.level_id = d.level_id\n" +
"INNER JOIN acquisition oa ON\n" +
" oa.acquisition_id = od.acquisition_id AND\n" +
" oa.satellite_id = a.satellite_id\n" +
"WHERE\n" +
" d.dataset_id = %(dataset_id)s\n" +
(" AND t.tile_class_id IN %(tile_class_filter)s\n" if
tile_class_filter else "") +
(" AND o.tile_class_id IN %(tile_class_filter)s\n" if
tile_class_filter else "") +
" AND (\n" +
" (oa.start_datetime BETWEEN\n" +
" a.start_datetime - %(delta_t)s AND\n" +
" a.end_datetime + %(delta_t)s)\n" +
" OR\n" +
" (oa.end_datetime BETWEEN\n" +
" a.start_datetime - %(delta_t)s AND\n" +
" a.end_datetime + %(delta_t)s)\n" +
" )\n" +
"ORDER BY od.dataset_id;")
params = {'dataset_id': dataset_id,
'delta_t': delta_t,
'tile_class_filter': tuple(tile_class_filter)
}
result = self.execute_sql_multi(sql, params)
dataset_id_list = [tup[0] for tup in result]
return dataset_id_list
def get_overlapping_tiles_for_dataset(self,
dataset_id,
delta_t=ONE_HOUR,
input_tile_class_filter=None,
output_tile_class_filter=None,
dataset_filter=None):
"""Return a nested dictonary for the tiles overlapping a dataset.
The top level dictonary is keyed by tile footprint (x_index, y_index,
tile_type_id). Each entry is a list of tile records. Each tile record
is a dictonary with entries for tile_id, dataset_id, tile_class,
tile_pathname, and ctime.
Arguments:
dataset_id: id of the dataset to act as the base for the query.
The input tiles are the ones associated with this dataset.
delta_t: The tolerance used to detect overlaps in time. This
should be a python timedelta object (from the datatime module).
input_tile_class_filter: A tuple of tile_class_ids to restrict
the input tiles. If non-empty, input tiles not matching these
will be ignored.
output_tile_class_filter: A tuple of tile_class_ids to restrict
the output tiles. If non-empty, output tiles not matching these
will be ignored.
dataset_filter: A tuple of dataset_ids to restrict the datasets
that the output tiles belong to. If non-empty, output tiles
not from these datasets will be ignored. Used to avoid
operating on tiles belonging to non-locked datasets.
"""
sql = ("SELECT DISTINCT o.tile_id, o.x_index, o.y_index,\n" +
" o.tile_type_id, o.dataset_id, o.tile_pathname,\n" +
" o.tile_class_id, o.tile_size, o.ctime,\n" +
" oa.start_datetime\n" +
"FROM tile t\n" +
"INNER JOIN dataset d USING (dataset_id)\n" +
"INNER JOIN acquisition a USING (acquisition_id)\n" +
"INNER JOIN tile o ON\n" +
" o.x_index = t.x_index AND\n" +
" o.y_index = t.y_index AND\n" +
" o.tile_type_id = t.tile_type_id\n" +
"INNER JOIN dataset od ON\n" +
" od.dataset_id = o.dataset_id AND\n" +
" od.level_id = d.level_id\n" +
"INNER JOIN acquisition oa ON\n" +
" oa.acquisition_id = od.acquisition_id AND\n" +
" oa.satellite_id = a.satellite_id\n" +
"WHERE\n" +
" d.dataset_id = %(dataset_id)s\n" +
(" AND od.dataset_id IN %(dataset_filter)s\n" if
dataset_filter else "") +
(" AND t.tile_class_id IN %(input_tile_class_filter)s\n" if
input_tile_class_filter else "") +
(" AND o.tile_class_id IN %(output_tile_class_filter)s\n" if
output_tile_class_filter else "") +
" AND (\n" +
" (oa.start_datetime BETWEEN\n" +
" a.start_datetime - %(delta_t)s AND\n" +
" a.end_datetime + %(delta_t)s)\n" +
" OR\n" +
" (oa.end_datetime BETWEEN\n" +
" a.start_datetime - %(delta_t)s AND\n" +
" a.end_datetime + %(delta_t)s)\n" +
" )\n" +
"ORDER BY oa.start_datetime;"
)
params = {'dataset_id': dataset_id,
'delta_t': delta_t,
'input_tile_class_filter': tuple(input_tile_class_filter),
'output_tile_class_filter': tuple(output_tile_class_filter),
'dataset_filter': tuple(dataset_filter)
}
result = self.execute_sql_multi(sql, params)
overlap_dict = {}
for record in result:
tile_footprint = tuple(record[1:4])
tile_record = {'tile_id': record[0],
'x_index': record[1],
'y_index': record[2],
'tile_type_id': record[3],
'dataset_id': record[4],
'tile_pathname': record[5],
'tile_class_id': record[6],
'tile_size': record[7],
'ctime': record[8]
}
if tile_footprint not in overlap_dict:
overlap_dict[tile_footprint] = []
overlap_dict[tile_footprint].append(tile_record)
return overlap_dict
def update_tile_class(self, tile_id, new_tile_class_id):
"""Update the tile_class_id of a tile to a new value."""
sql = ("UPDATE tile\n" +
"SET tile_class_id = %(new_tile_class_id)s\n" +
"WHERE tile_id = %(tile_id)s\n" +
"RETURNING tile_id;"
)
params = {'tile_id': tile_id,
'new_tile_class_id': new_tile_class_id
}
self.execute_sql_single(sql, params)
| ama-jharrison/agdc | agdc/agdc/abstract_ingester/ingest_db_wrapper.py | Python | apache-2.0 | 30,969 |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally import exceptions
from rally.plugins.openstack.context.keystone import roles
from tests.unit import fakes
from tests.unit import test
CTX = "rally.plugins.openstack.context.keystone.roles"
class RoleGeneratorTestCase(test.TestCase):
def create_default_roles_and_patch_add_remove_functions(self, fc):
fc.keystone().roles.add_user_role = mock.MagicMock()
fc.keystone().roles.remove_user_role = mock.MagicMock()
fc.keystone().roles.create("r1", "test_role1")
fc.keystone().roles.create("r2", "test_role2")
self.assertEqual(2, len(fc.keystone().roles.list()))
@property
def context(self):
return {
"config": {
"roles": [
"test_role1",
"test_role2"
]
},
"admin": {"credential": mock.MagicMock()},
"task": mock.MagicMock()
}
@mock.patch("%s.osclients" % CTX)
def test_add_role(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
result = ctx._add_role(mock.MagicMock(),
self.context["config"]["roles"][0])
expected = {"id": "r1", "name": "test_role1"}
self.assertEqual(expected, result)
@mock.patch("%s.osclients" % CTX)
def test_add_role_which_does_not_exist(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ex = self.assertRaises(exceptions.NoSuchRole, ctx._add_role,
mock.MagicMock(), "unknown_role")
expected = "There is no role with name `unknown_role`."
self.assertEqual(expected, str(ex))
@mock.patch("%s.osclients" % CTX)
def test_remove_role(self, mock_osclients):
role = mock.MagicMock()
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ctx._remove_role(mock.MagicMock(), role)
calls = [
mock.call("u1", role["id"], tenant="t1"),
mock.call("u2", role["id"], tenant="t2"),
]
mock_keystone = mock_osclients.Clients().keystone()
mock_keystone.roles.remove_user_role.assert_has_calls(calls)
@mock.patch("%s.osclients" % CTX)
def test_setup_and_cleanup(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
with roles.RoleGenerator(self.context) as ctx:
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ctx.setup()
calls = [
mock.call("u1", "r1", tenant="t1"),
mock.call("u2", "r1", tenant="t2"),
mock.call("u1", "r2", tenant="t1"),
mock.call("u2", "r2", tenant="t2")
]
fc.keystone().roles.add_user_role.assert_has_calls(calls)
self.assertEqual(
4, fc.keystone().roles.add_user_role.call_count)
self.assertEqual(
0, fc.keystone().roles.remove_user_role.call_count)
self.assertEqual(2, len(ctx.context["roles"]))
self.assertEqual(2, len(fc.keystone().roles.list()))
# Cleanup (called by content manager)
self.assertEqual(2, len(fc.keystone().roles.list()))
self.assertEqual(4, fc.keystone().roles.add_user_role.call_count)
self.assertEqual(4, fc.keystone().roles.remove_user_role.call_count)
calls = [
mock.call("u1", "r1", tenant="t1"),
mock.call("u2", "r1", tenant="t2"),
mock.call("u1", "r2", tenant="t1"),
mock.call("u2", "r2", tenant="t2")
]
fc.keystone().roles.remove_user_role.assert_has_calls(calls)
| amit0701/rally | tests/unit/plugins/openstack/context/keystone/test_roles.py | Python | apache-2.0 | 5,275 |
/*
* Copyright (C) 2014-2016 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the
* License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied.
*/
package gobblin.runtime.cli;
/**
* An application that can be called by {@link GobblinCli}.
*/
public interface CliApplication {
void run(String[] args);
}
| yukuai518/gobblin | gobblin-runtime/src/main/java/gobblin/runtime/cli/CliApplication.java | Java | apache-2.0 | 678 |
package utils;
import com.leapmotion.leap.Vector;
/**
*
* @author Jose Pereda - August 2014 - @JPeredaDnr
*/
public class Pair {
/*
Creates a pair of joints (in terms of Vectors) to join the proximal end of two bones
*/
private Vector v0;
private Vector v1;
public Pair(Vector v0, Vector v1){
this.v0=v0;
this.v1=v1;
}
public Vector getV0() {
return v0;
}
public void setV0(Vector v0) {
this.v0 = v0;
}
public Vector getV1() {
return v1;
}
public void setV1(Vector v1) {
this.v1 = v1;
}
public Vector getCenter(){
return new Vector((v1.getX()+v0.getX())/2f,(v1.getY()+v0.getY())/2f,(v1.getZ()+v0.getZ())/2f);
}
public Vector getDirection(){
return new Vector(v1.getX()-v0.getX(),v1.getY()-v0.getY(),v1.getZ()-v0.getZ()).normalized();
}
@Override
public String toString() {
return "Pair{" + "v0=" + v0 + ", v1=" + v1 + '}';
}
}
| jperedadnr/RiggedHand | src/utils/Pair.java | Java | apache-2.0 | 1,045 |
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// 有关程序集的常规信息通过以下
// 特性集控制。更改这些特性值可修改
// 与程序集关联的信息。
[assembly: AssemblyTitle("SmsPushTester")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("Microsoft")]
[assembly: AssemblyProduct("SmsPushTester")]
[assembly: AssemblyCopyright("Copyright © Microsoft 2016")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// 将 ComVisible 设置为 false 使此程序集中的类型
// 对 COM 组件不可见。 如果需要从 COM 访问此程序集中的类型,
// 则将该类型上的 ComVisible 特性设置为 true。
[assembly: ComVisible(false)]
// 如果此项目向 COM 公开,则下列 GUID 用于类型库的 ID
[assembly: Guid("de910b03-d928-4caf-95ee-b865332729fb")]
// 程序集的版本信息由下面四个值组成:
//
// 主版本
// 次版本
// 生成号
// 修订号
//
// 可以指定所有这些值,也可以使用“生成号”和“修订号”的默认值,
// 方法是按如下所示使用“*”:
// [assembly: AssemblyVersion("1.0.*")]
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
| chenxihoho/FirstFrame | 008.PushMessage/Sms/SmsPushTester/SmsPushTester/Properties/AssemblyInfo.cs | C# | apache-2.0 | 1,368 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.uiDesigner.inspections;
import com.intellij.openapi.module.Module;
import com.intellij.uiDesigner.lw.IComponent;
import com.intellij.uiDesigner.lw.IRootContainer;
import com.intellij.uiDesigner.UIDesignerBundle;
import com.intellij.uiDesigner.FormEditingUtil;
import javax.annotation.Nonnull;
/**
* @author yole
*/
public class OneButtonGroupInspection extends BaseFormInspection {
public OneButtonGroupInspection() {
super("OneButtonGroup");
}
@Nonnull
@Override public String getDisplayName() {
return UIDesignerBundle.message("inspection.one.button.group");
}
protected void checkComponentProperties(Module module, IComponent component, FormErrorCollector collector) {
final IRootContainer root = FormEditingUtil.getRoot(component);
if (root == null) return;
String groupName = root.getButtonGroupName(component);
if (groupName != null) {
final String[] sameGroupComponents = root.getButtonGroupComponentIds(groupName);
for(String id: sameGroupComponents) {
final IComponent otherComponent = FormEditingUtil.findComponent(root, id);
if (otherComponent != null && otherComponent != component) {
return;
}
}
collector.addError(getID(), component, null, UIDesignerBundle.message("inspection.one.button.group.error"));
}
}
}
| consulo/consulo-ui-designer | src/main/java/com/intellij/uiDesigner/inspections/OneButtonGroupInspection.java | Java | apache-2.0 | 1,953 |
import { Context, LegacyProcessor, ValidatedJobConfig } from '../../interfaces';
import ConvictSchema from '../convict-schema';
import { SchemaModule } from '../interfaces';
export default function schemaShim<S = any>(legacy: LegacyProcessor): SchemaModule {
return {
Schema: class LegacySchemaShim extends ConvictSchema<S> {
// @ts-ignore
validate(inputConfig: any) {
const opConfig = super.validate(inputConfig);
if (legacy.selfValidation) {
// @ts-ignore
legacy.selfValidation(opConfig);
}
return opConfig;
}
validateJob(job: ValidatedJobConfig): void {
if (legacy.crossValidation) {
legacy.crossValidation(job, this.context.sysconfig);
}
}
build(context?: Context) {
return legacy.schema(context);
}
}
};
}
| jsnoble/teraslice | packages/job-components/src/operations/shims/schema-shim.ts | TypeScript | apache-2.0 | 994 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.storagegateway.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AssignTapePool" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AssignTapePoolResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable {
/**
* <p>
* The unique Amazon Resource Names (ARN) of the virtual tape that was added to the tape pool.
* </p>
*/
private String tapeARN;
/**
* <p>
* The unique Amazon Resource Names (ARN) of the virtual tape that was added to the tape pool.
* </p>
*
* @param tapeARN
* The unique Amazon Resource Names (ARN) of the virtual tape that was added to the tape pool.
*/
public void setTapeARN(String tapeARN) {
this.tapeARN = tapeARN;
}
/**
* <p>
* The unique Amazon Resource Names (ARN) of the virtual tape that was added to the tape pool.
* </p>
*
* @return The unique Amazon Resource Names (ARN) of the virtual tape that was added to the tape pool.
*/
public String getTapeARN() {
return this.tapeARN;
}
/**
* <p>
* The unique Amazon Resource Names (ARN) of the virtual tape that was added to the tape pool.
* </p>
*
* @param tapeARN
* The unique Amazon Resource Names (ARN) of the virtual tape that was added to the tape pool.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public AssignTapePoolResult withTapeARN(String tapeARN) {
setTapeARN(tapeARN);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getTapeARN() != null)
sb.append("TapeARN: ").append(getTapeARN());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof AssignTapePoolResult == false)
return false;
AssignTapePoolResult other = (AssignTapePoolResult) obj;
if (other.getTapeARN() == null ^ this.getTapeARN() == null)
return false;
if (other.getTapeARN() != null && other.getTapeARN().equals(this.getTapeARN()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getTapeARN() == null) ? 0 : getTapeARN().hashCode());
return hashCode;
}
@Override
public AssignTapePoolResult clone() {
try {
return (AssignTapePoolResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
| aws/aws-sdk-java | aws-java-sdk-storagegateway/src/main/java/com/amazonaws/services/storagegateway/model/AssignTapePoolResult.java | Java | apache-2.0 | 4,084 |
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="nl_NL">
<context>
<name>UDMX</name>
<message>
<location filename="udmx.cpp" line="101"/>
<source>This plugin provides DMX output support for Anyma uDMX devices.</source>
<translation>Deze plugin verzorgt DMX output voor Anyma uDMX apparaten.</translation>
</message>
<message>
<location filename="udmx.cpp" line="195"/>
<source>Do you wish to re-scan your hardware?</source>
<translation>Hardware opnieuw scannen?</translation>
</message>
</context>
<context>
<name>UDMXDevice</name>
<message>
<location filename="udmxdevice.cpp" line="114"/>
<source>Unknown</source>
<translation>Onbekend</translation>
</message>
<message>
<location filename="udmxdevice.cpp" line="135"/>
<source>DMX Frame Frequency</source>
<translation>DMX Frame frequentie</translation>
</message>
<message>
<location filename="udmxdevice.cpp" line="138"/>
<source>Bad</source>
<translation>Foutief</translation>
</message>
<message>
<location filename="udmxdevice.cpp" line="140"/>
<source>Good</source>
<translation>Goed</translation>
</message>
<message>
<location filename="udmxdevice.cpp" line="142"/>
<source>Patch this device to a universe to find out.</source>
<translation type="unfinished">Patch dit apparaat aan een universe.</translation>
</message>
<message>
<location filename="udmxdevice.cpp" line="143"/>
<source>System Timer Accuracy</source>
<translation>Systeemklok precisie</translation>
</message>
<message>
<location filename="udmxdevice.cpp" line="148"/>
<source>Unknown device</source>
<translation>Onbekend apparaat</translation>
</message>
<message>
<location filename="udmxdevice.cpp" line="150"/>
<source>Cannot connect to USB device.</source>
<translation>Verbinden met USB apparaat mislukt.</translation>
</message>
</context>
</TS>
| joepadmiraal/qlcplus | plugins/udmx/src/uDMX_nl_NL.ts | TypeScript | apache-2.0 | 2,144 |
/*
* Copyright 2012 Devoteam http://www.devoteam.com
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
*
* This file is part of Multi-Protocol Test Suite (MTS).
*
* Multi-Protocol Test Suite (MTS) is free software: you can redistribute
* it and/or modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, either version 3 of the
* License.
*
* Multi-Protocol Test Suite (MTS) is distributed in the hope that it will
* be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Multi-Protocol Test Suite (MTS).
* If not, see <http://www.gnu.org/licenses/>.
*
*/
package kafka.security;
import java.io.IOException;
import java.nio.channels.Channel;
/**
* An interface to secure channels. It is usually recommended to perform the
* initial SSL handshake with the separate handshake method, although the
* implementation may support embedded handshaking within the read and write
* methods. The shutdown process may also be started separately but should be
* called automatically by the close method of the implementation.
*
* @author Ilkka Priha
*/
public interface SecureChannel extends Channel
{
/**
* Returns true if this channel is finished with handshaking.
*
* @return true if finished, false otherwise.
*/
public boolean finished();
/**
* Returns the number of encrypted bytes not yet flushed.
*
* @return the number of encrypted bytes.
*/
public int encrypted();
/**
* Returns the number of decrypted bytes not yet read.
*
* @return the number of decrypted bytes.
*/
public int decrypted();
/**
* Starts or continues handshaking with the specified operations.
*
* @param ops the current ready operations set.
* @return the interest set to continue or 0 if finished.
* @throws IOException on I/O errors.
*/
public int handshake(int ops) throws IOException;
/**
* Starts the shutdown sequence but does not close the channel.
*
* @return true if finished, false otherwise.
* @throws IOException on I/O errors.
*/
public boolean shutdown() throws IOException;
/**
* Flushes remaining encrypted bytes if any.
*
* @throws IOException on I/O errors.
*/
public void flush() throws IOException;
}
| darrendao/kafka.play.time | security/src/main/java/kafka/security/SecureChannel.java | Java | apache-2.0 | 2,604 |
using System;
using System.Xml.Serialization;
namespace Aop.Api.Domain
{
/// <summary>
/// ZolozIdentificationCustomerCertifyQueryModel Data Structure.
/// </summary>
[Serializable]
public class ZolozIdentificationCustomerCertifyQueryModel : AopObject
{
/// <summary>
/// 业务单据号,用于核对和排查
/// </summary>
[XmlElement("biz_id")]
public string BizId { get; set; }
/// <summary>
/// zimId,用于查询认证结果
/// </summary>
[XmlElement("zim_id")]
public string ZimId { get; set; }
}
}
| 329277920/Snail | Snail.Pay.Ali.Sdk/Domain/ZolozIdentificationCustomerCertifyQueryModel.cs | C# | apache-2.0 | 621 |
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.test.api.task;
import static org.junit.Assert.assertEquals;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.camunda.bpm.engine.CaseService;
import org.camunda.bpm.engine.FilterService;
import org.camunda.bpm.engine.ProcessEngineException;
import org.camunda.bpm.engine.RepositoryService;
import org.camunda.bpm.engine.RuntimeService;
import org.camunda.bpm.engine.TaskService;
import org.camunda.bpm.engine.impl.TaskQueryImpl;
import org.camunda.bpm.engine.runtime.CaseInstance;
import org.camunda.bpm.engine.runtime.ProcessInstance;
import org.camunda.bpm.engine.task.Task;
import org.camunda.bpm.engine.task.TaskQuery;
import org.camunda.bpm.engine.test.Deployment;
import org.camunda.bpm.engine.test.ProcessEngineRule;
import org.camunda.bpm.model.bpmn.Bpmn;
import org.camunda.bpm.model.bpmn.BpmnModelInstance;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
/**
* @author Tassilo Weidner
*/
public class TaskQueryOrTest {
@Rule
public ProcessEngineRule processEngineRule = new ProcessEngineRule(true);
@Rule
public ExpectedException thrown = ExpectedException.none();
protected RuntimeService runtimeService;
protected TaskService taskService;
protected CaseService caseService;
protected RepositoryService repositoryService;
protected FilterService filterService;
@Before
public void init() {
runtimeService = processEngineRule.getRuntimeService();
taskService = processEngineRule.getTaskService();
caseService = processEngineRule.getCaseService();
repositoryService = processEngineRule.getRepositoryService();
filterService = processEngineRule.getFilterService();
}
@After
public void tearDown() {
for (org.camunda.bpm.engine.repository.Deployment deployment:
repositoryService.createDeploymentQuery().list()) {
repositoryService.deleteDeployment(deployment.getId(), true);
}
for (Task task: taskService.createTaskQuery().list()) {
taskService.deleteTask(task.getId(), true);
}
}
@Test
public void shouldThrowExceptionByMissingStartOr() {
thrown.expect(ProcessEngineException.class);
thrown.expectMessage("Invalid query usage: cannot set endOr() before or()");
taskService.createTaskQuery()
.or()
.endOr()
.endOr();
}
@Test
public void shouldThrowExceptionByNesting() {
thrown.expect(ProcessEngineException.class);
thrown.expectMessage("Invalid query usage: cannot set or() within 'or' query");
taskService.createTaskQuery()
.or()
.or()
.endOr()
.endOr()
.or()
.endOr();
}
@Test
public void shouldThrowExceptionByWithCandidateGroupsApplied() {
thrown.expect(ProcessEngineException.class);
thrown.expectMessage("Invalid query usage: cannot set withCandidateGroups() within 'or' query");
taskService.createTaskQuery()
.or()
.withCandidateGroups()
.endOr();
}
@Test
public void shouldThrowExceptionByWithoutCandidateGroupsApplied() {
thrown.expect(ProcessEngineException.class);
thrown.expectMessage("Invalid query usage: cannot set withoutCandidateGroups() within 'or' query");
taskService.createTaskQuery()
.or()
.withoutCandidateGroups()
.endOr();
}
@Test
public void shouldThrowExceptionByWithCandidateUsersApplied() {
thrown.expect(ProcessEngineException.class);
thrown.expectMessage("Invalid query usage: cannot set withCandidateUsers() within 'or' query");
taskService.createTaskQuery()
.or()
.withCandidateUsers()
.endOr();
}
@Test
public void shouldThrowExceptionByWithoutCandidateUsersApplied() {
thrown.expect(ProcessEngineException.class);
thrown.expectMessage("Invalid query usage: cannot set withoutCandidateUsers() within 'or' query");
taskService.createTaskQuery()
.or()
.withoutCandidateUsers()
.endOr();
}
@Test
public void shouldThrowExceptionByOrderingApplied() {
thrown.expect(ProcessEngineException.class);
thrown.expectMessage("Invalid query usage: cannot set orderByCaseExecutionId() within 'or' query");
taskService.createTaskQuery()
.or()
.orderByCaseExecutionId()
.endOr();
}
@Test
public void shouldThrowExceptionByInitializeFormKeysInOrQuery() {
thrown.expect(ProcessEngineException.class);
thrown.expectMessage("Invalid query usage: cannot set initializeFormKeys() within 'or' query");
taskService.createTaskQuery()
.or()
.initializeFormKeys()
.endOr();
}
@Test
public void shouldReturnNoTasksWithTaskCandidateUserAndOrTaskCandidateGroup() {
// given
Task task1 = taskService.newTask();
taskService.saveTask(task1);
taskService.addCandidateUser(task1.getId(), "aCandidateUser");
Task task2 = taskService.newTask();
taskService.saveTask(task2);
taskService.addCandidateGroup(task2.getId(), "aCandidateGroup");
// when
List<Task> tasks = taskService.createTaskQuery()
.taskCandidateUser("aCandidateUser")
.or()
.taskCandidateGroup("aCandidateGroup")
.endOr()
.list();
// then
assertEquals(0, tasks.size());
}
@Test
public void shouldReturnTasksWithEmptyOrQuery() {
// given
taskService.saveTask(taskService.newTask());
taskService.saveTask(taskService.newTask());
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.endOr()
.list();
// then
assertEquals(2, tasks.size());
}
@Test
public void shouldReturnTasksWithTaskCandidateUserOrTaskCandidateGroup() {
// given
Task task1 = taskService.newTask();
taskService.saveTask(task1);
taskService.addCandidateUser(task1.getId(), "John Doe");
Task task2 = taskService.newTask();
taskService.saveTask(task2);
taskService.addCandidateGroup(task2.getId(), "Controlling");
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.taskCandidateUser("John Doe")
.taskCandidateGroup("Controlling")
.endOr()
.list();
// then
assertEquals(2, tasks.size());
}
@Test
public void shouldReturnTasksWithTaskCandidateUserOrTaskCandidateGroupWithIncludeAssignedTasks() {
// given
Task task1 = taskService.newTask();
taskService.saveTask(task1);
taskService.addCandidateUser(task1.getId(), "John Doe");
taskService.setAssignee(task1.getId(), "John Doe");
Task task2 = taskService.newTask();
taskService.saveTask(task2);
taskService.addCandidateGroup(task2.getId(), "Controlling");
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.taskCandidateUser("John Doe")
.taskCandidateGroup("Controlling")
.includeAssignedTasks()
.endOr()
.list();
// then
assertEquals(2, tasks.size());
}
@Test
public void shouldReturnTasksWithTaskCandidateUserOrAssignee() {
// given
Task task1 = taskService.newTask();
taskService.saveTask(task1);
taskService.setAssignee(task1.getId(), "John Doe");
Task task2 = taskService.newTask();
taskService.saveTask(task2);
taskService.addCandidateUser(task2.getId(), "John Doe");
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.taskCandidateUser("John Doe")
.taskAssignee("John Doe")
.endOr()
.list();
// then
assertEquals(2, tasks.size());
}
@Test
public void shouldReturnTasksWithTaskCandidateUserOrTaskCandidateGroupIn() {
// given
Task task1 = taskService.newTask();
taskService.saveTask(task1);
taskService.addCandidateUser(task1.getId(), "John Doe");
Task task2 = taskService.newTask();
taskService.saveTask(task2);
taskService.addCandidateGroup(task2.getId(), "Controlling");
Task task3 = taskService.newTask();
taskService.saveTask(task3);
taskService.addCandidateGroup(task3.getId(), "Sales");
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.taskCandidateUser("John Doe")
.taskCandidateGroupIn(Arrays.asList("Controlling", "Sales"))
.endOr()
.list();
// then
assertEquals(3, tasks.size());
}
@Test
public void shouldReturnTasksWithTaskCandidateGroupOrTaskCandidateGroupIn() {
// given
Task task1 = taskService.newTask();
taskService.saveTask(task1);
taskService.addCandidateGroup(task1.getId(), "Accounting");
Task task2 = taskService.newTask();
taskService.saveTask(task2);
taskService.addCandidateGroup(task2.getId(), "Controlling");
Task task3 = taskService.newTask();
taskService.saveTask(task3);
taskService.addCandidateGroup(task3.getId(), "Sales");
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.taskCandidateGroup("Accounting")
.taskCandidateGroupIn(Arrays.asList("Controlling", "Sales"))
.endOr()
.list();
// then
assertEquals(3, tasks.size());
}
@Test
public void shouldReturnTasksWithTaskNameOrTaskDescription() {
// given
Task task1 = taskService.newTask();
task1.setName("aTaskName");
taskService.saveTask(task1);
Task task2 = taskService.newTask();
task2.setDescription("aTaskDescription");
taskService.saveTask(task2);
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.taskName("aTaskName")
.taskDescription("aTaskDescription")
.endOr()
.list();
// then
assertEquals(2, tasks.size());
}
@Test
public void shouldReturnTasksWithMultipleOrCriteria() {
// given
Task task1 = taskService.newTask();
task1.setName("aTaskName");
taskService.saveTask(task1);
Task task2 = taskService.newTask();
task2.setDescription("aTaskDescription");
taskService.saveTask(task2);
Task task3 = taskService.newTask();
taskService.saveTask(task3);
Task task4 = taskService.newTask();
task4.setPriority(5);
taskService.saveTask(task4);
Task task5 = taskService.newTask();
task5.setOwner("aTaskOwner");
taskService.saveTask(task5);
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.taskName("aTaskName")
.taskDescription("aTaskDescription")
.taskId(task3.getId())
.taskPriority(5)
.taskOwner("aTaskOwner")
.endOr()
.list();
// then
assertEquals(5, tasks.size());
}
@Test
public void shouldReturnTasksFilteredByMultipleOrAndCriteria() {
// given
Task task1 = taskService.newTask();
task1.setPriority(4);
taskService.saveTask(task1);
Task task2 = taskService.newTask();
task2.setName("aTaskName");
task2.setOwner("aTaskOwner");
task2.setAssignee("aTaskAssignee");
task2.setPriority(4);
taskService.saveTask(task2);
Task task3 = taskService.newTask();
task3.setName("aTaskName");
task3.setOwner("aTaskOwner");
task3.setAssignee("aTaskAssignee");
task3.setPriority(4);
task3.setDescription("aTaskDescription");
taskService.saveTask(task3);
Task task4 = taskService.newTask();
task4.setOwner("aTaskOwner");
task4.setAssignee("aTaskAssignee");
task4.setPriority(4);
task4.setDescription("aTaskDescription");
taskService.saveTask(task4);
Task task5 = taskService.newTask();
task5.setDescription("aTaskDescription");
task5.setOwner("aTaskOwner");
taskService.saveTask(task5);
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.taskName("aTaskName")
.taskDescription("aTaskDescription")
.taskId(task3.getId())
.endOr()
.taskOwner("aTaskOwner")
.taskPriority(4)
.taskAssignee("aTaskAssignee")
.list();
// then
assertEquals(3, tasks.size());
}
@Test
public void shouldReturnTasksFilteredByMultipleOrQueries() {
// given
Task task1 = taskService.newTask();
task1.setName("aTaskName");
taskService.saveTask(task1);
Task task2 = taskService.newTask();
task2.setName("aTaskName");
task2.setDescription("aTaskDescription");
taskService.saveTask(task2);
Task task3 = taskService.newTask();
task3.setName("aTaskName");
task3.setDescription("aTaskDescription");
task3.setOwner("aTaskOwner");
taskService.saveTask(task3);
Task task4 = taskService.newTask();
task4.setName("aTaskName");
task4.setDescription("aTaskDescription");
task4.setOwner("aTaskOwner");
task4.setAssignee("aTaskAssignee");
taskService.saveTask(task4);
Task task5 = taskService.newTask();
task5.setName("aTaskName");
task5.setDescription("aTaskDescription");
task5.setOwner("aTaskOwner");
task5.setAssignee("aTaskAssignee");
task5.setPriority(4);
taskService.saveTask(task5);
Task task6 = taskService.newTask();
task6.setName("aTaskName");
task6.setDescription("aTaskDescription");
task6.setOwner("aTaskOwner");
task6.setAssignee("aTaskAssignee");
task6.setPriority(4);
taskService.saveTask(task6);
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.taskName("aTaskName")
.taskDescription("aTaskDescription")
.endOr()
.or()
.taskName("aTaskName")
.taskDescription("aTaskDescription")
.taskAssignee("aTaskAssignee")
.endOr()
.or()
.taskName("aTaskName")
.taskDescription("aTaskDescription")
.taskOwner("aTaskOwner")
.taskAssignee("aTaskAssignee")
.endOr()
.or()
.taskAssignee("aTaskAssignee")
.taskPriority(4)
.endOr()
.list();
// then
assertEquals(3, tasks.size());
}
@Test
public void shouldReturnTasksWhereSameCriterionWasAppliedThreeTimesInOneQuery() {
// given
Task task1 = taskService.newTask();
taskService.saveTask(task1);
taskService.addCandidateGroup(task1.getId(), "Accounting");
Task task2 = taskService.newTask();
taskService.saveTask(task2);
taskService.addCandidateGroup(task2.getId(), "Controlling");
Task task3 = taskService.newTask();
taskService.saveTask(task3);
taskService.addCandidateGroup(task3.getId(), "Sales");
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.taskCandidateGroup("Accounting")
.taskCandidateGroup("Controlling")
.taskCandidateGroup("Sales")
.endOr()
.list();
// then
assertEquals(1, tasks.size());
}
@Test
public void shouldReturnTasksWithTaskVariableValueEqualsOrTaskVariableValueGreaterThan() {
// given
Task task1 = taskService.newTask();
taskService.saveTask(task1);
taskService.setVariable(task1.getId(),"aLongValue", 789L);
Task task2 = taskService.newTask();
taskService.saveTask(task2);
taskService.setVariable(task2.getId(),"anEvenLongerValue", 1000L);
// when
TaskQuery query = taskService.createTaskQuery()
.or()
.taskVariableValueEquals("aLongValue", 789L)
.taskVariableValueGreaterThan("anEvenLongerValue", 999L)
.endOr();
// then
assertEquals(2, query.count());
}
@Test
public void shouldInitializeFormKeys() {
// given
BpmnModelInstance aProcessDefinition = Bpmn.createExecutableProcess("aProcessDefinition")
.startEvent()
.userTask()
.camundaFormKey("aFormKey")
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", aProcessDefinition)
.deploy();
ProcessInstance processInstance1 = runtimeService
.startProcessInstanceByKey("aProcessDefinition");
BpmnModelInstance anotherProcessDefinition = Bpmn.createExecutableProcess("anotherProcessDefinition")
.startEvent()
.userTask()
.camundaFormKey("anotherFormKey")
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", anotherProcessDefinition)
.deploy();
ProcessInstance processInstance2 = runtimeService
.startProcessInstanceByKey("anotherProcessDefinition");
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.processDefinitionId(processInstance1.getProcessDefinitionId())
.processInstanceId(processInstance2.getId())
.endOr()
.initializeFormKeys()
.list();
// then
assertEquals(2, tasks.size());
assertEquals("aFormKey", tasks.get(0).getFormKey());
assertEquals("anotherFormKey", tasks.get(1).getFormKey());
}
@Test
public void shouldReturnTasksWithProcessDefinitionNameOrProcessDefinitionKey() {
// given
BpmnModelInstance aProcessDefinition = Bpmn.createExecutableProcess("aProcessDefinition")
.name("process1")
.startEvent()
.userTask()
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", aProcessDefinition)
.deploy();
runtimeService.startProcessInstanceByKey("aProcessDefinition");
BpmnModelInstance anotherProcessDefinition = Bpmn.createExecutableProcess("anotherProcessDefinition")
.startEvent()
.userTask()
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", anotherProcessDefinition)
.deploy();
runtimeService.startProcessInstanceByKey("anotherProcessDefinition");
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.processDefinitionName("process1")
.processDefinitionKey("anotherProcessDefinition")
.endOr()
.list();
// then
assertEquals(2, tasks.size());
}
@Test
public void shouldReturnTasksWithProcessInstanceBusinessKeyOrProcessInstanceBusinessKeyLike() {
// given
BpmnModelInstance aProcessDefinition = Bpmn.createExecutableProcess("aProcessDefinition")
.startEvent()
.userTask()
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", aProcessDefinition)
.deploy();
runtimeService
.startProcessInstanceByKey("aProcessDefinition", "aBusinessKey");
BpmnModelInstance anotherProcessDefinition = Bpmn.createExecutableProcess("anotherProcessDefinition")
.startEvent()
.userTask()
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", anotherProcessDefinition)
.deploy();
runtimeService
.startProcessInstanceByKey("anotherProcessDefinition", "anotherBusinessKey");
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.processInstanceBusinessKey("aBusinessKey")
.processInstanceBusinessKeyLike("anotherBusinessKey")
.endOr()
.list();
// then
assertEquals(2, tasks.size());
}
@Test
public void shouldReturnTasksWithProcessInstanceBusinessKeyOrProcessInstanceBusinessKeyLikeAndAssignee() {
// given
BpmnModelInstance aProcessDefinition = Bpmn.createExecutableProcess("aProcessDefinition")
.startEvent()
.userTask()
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", aProcessDefinition)
.deploy();
ProcessInstance processInstance = runtimeService
.startProcessInstanceByKey("aProcessDefinition", "aBusinessKey");
runtimeService
.startProcessInstanceByKey("aProcessDefinition", "aBusinessKey");
BpmnModelInstance anotherProcessDefinition = Bpmn.createExecutableProcess("anotherProcessDefinition")
.startEvent()
.userTask()
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", anotherProcessDefinition)
.deploy();
ProcessInstance processInstanceAnotherDefinition = runtimeService
.startProcessInstanceByKey("anotherProcessDefinition", "anotherBusinessKey");
// set the assignee for one task of each process definition
String assignee = "testUser4";
String taskId = taskService.createTaskQuery().processInstanceId(processInstance.getId()).singleResult().getId();
taskService.setAssignee(taskId, assignee);
taskId = taskService.createTaskQuery().processInstanceId(processInstanceAnotherDefinition.getId()).singleResult().getId();
taskService.setAssignee(taskId, assignee);
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.processInstanceBusinessKey("aBusinessKey")
.processInstanceBusinessKeyLike("anotherBusinessKey")
.endOr()
.taskAssignee(assignee)
.list();
// then
assertEquals(2, tasks.size());
}
@Test
public void shouldReturnTasksWithProcessInstanceBusinessKeyOrProcessInstanceBusinessKeyLikeOrStandaloneAssignee() {
// given
BpmnModelInstance aProcessDefinition = Bpmn.createExecutableProcess("aProcessDefinition")
.startEvent()
.userTask()
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", aProcessDefinition)
.deploy();
runtimeService
.startProcessInstanceByKey("aProcessDefinition", "aBusinessKey");
BpmnModelInstance anotherProcessDefinition = Bpmn.createExecutableProcess("anotherProcessDefinition")
.startEvent()
.userTask()
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", anotherProcessDefinition)
.deploy();
runtimeService
.startProcessInstanceByKey("anotherProcessDefinition", "anotherBusinessKey");
// create a standalone task with assignee
String assignee = "testUser4";
Task newTask = taskService.newTask();
newTask.setAssignee(assignee);
taskService.saveTask(newTask);
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.processInstanceBusinessKey("aBusinessKey")
.processInstanceBusinessKeyLike("anotherBusinessKey")
.taskAssignee(assignee)
.endOr()
.list();
// then
assertEquals(3, tasks.size());
}
@Test
@Deployment(resources={
"org/camunda/bpm/engine/test/api/cmmn/oneTaskCase.cmmn",
"org/camunda/bpm/engine/test/api/cmmn/oneTaskCase2.cmmn"})
public void shouldReturnTasksWithCaseDefinitionKeyCaseDefinitionName() {
// given
String caseDefinitionId1 = repositoryService
.createCaseDefinitionQuery()
.caseDefinitionKey("oneTaskCase")
.singleResult()
.getId();
caseService
.withCaseDefinition(caseDefinitionId1)
.create();
String caseDefinitionId2 = repositoryService
.createCaseDefinitionQuery()
.caseDefinitionKey("oneTaskCase2")
.singleResult()
.getId();
caseService
.withCaseDefinition(caseDefinitionId2)
.create();
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.caseDefinitionKey("oneTaskCase")
.caseDefinitionName("One")
.endOr()
.list();
// then
assertEquals(2, tasks.size());
}
@Test
@Deployment(resources={
"org/camunda/bpm/engine/test/api/cmmn/oneTaskCase.cmmn",
"org/camunda/bpm/engine/test/api/cmmn/oneTaskCase2.cmmn"})
public void shouldReturnTasksWithCaseInstanceBusinessKeyOrCaseInstanceBusinessKeyLike() {
// given
String caseDefinitionId1 = repositoryService
.createCaseDefinitionQuery()
.caseDefinitionKey("oneTaskCase")
.singleResult()
.getId();
CaseInstance caseInstance1 = caseService
.withCaseDefinition(caseDefinitionId1)
.businessKey("aBusinessKey")
.create();
String caseDefinitionId2 = repositoryService
.createCaseDefinitionQuery()
.caseDefinitionKey("oneTaskCase2")
.singleResult()
.getId();
CaseInstance caseInstance2 = caseService
.withCaseDefinition(caseDefinitionId2)
.businessKey("anotherBusinessKey")
.create();
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.caseInstanceBusinessKey(caseInstance1.getBusinessKey())
.caseInstanceBusinessKeyLike(caseInstance2.getBusinessKey())
.endOr()
.list();
// then
assertEquals(2, tasks.size());
}
@Test
@Deployment(resources={
"org/camunda/bpm/engine/test/api/cmmn/oneTaskCase.cmmn",
"org/camunda/bpm/engine/test/api/cmmn/oneTaskCase2.cmmn"})
public void shouldReturnTasksWithCaseInstanceBusinessKeyOrCaseInstanceBusinessKeyLikeOrStandaloneAssignee() {
// given
String caseDefinitionId1 = repositoryService
.createCaseDefinitionQuery()
.caseDefinitionKey("oneTaskCase")
.singleResult()
.getId();
CaseInstance caseInstance1 = caseService
.withCaseDefinition(caseDefinitionId1)
.businessKey("aBusinessKey")
.create();
String caseDefinitionId2 = repositoryService
.createCaseDefinitionQuery()
.caseDefinitionKey("oneTaskCase2")
.singleResult()
.getId();
CaseInstance caseInstance2 = caseService
.withCaseDefinition(caseDefinitionId2)
.businessKey("anotherBusinessKey")
.create();
// create a standalone task with assignee
String assignee = "testUser4";
Task newTask = taskService.newTask();
newTask.setAssignee(assignee);
taskService.saveTask(newTask);
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.caseInstanceBusinessKey(caseInstance1.getBusinessKey())
.caseInstanceBusinessKeyLike(caseInstance2.getBusinessKey())
.taskAssignee(assignee)
.endOr()
.list();
// then
assertEquals(3, tasks.size());
}
@Test
@Deployment(resources={"org/camunda/bpm/engine/test/api/cmmn/oneTaskCase.cmmn"})
public void shouldReturnTasksWithCaseInstanceBusinessKeyOrProcessInstanceBusinessKey() {
String businessKey = "aBusinessKey";
BpmnModelInstance aProcessDefinition = Bpmn.createExecutableProcess("aProcessDefinition")
.startEvent()
.userTask()
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", aProcessDefinition)
.deploy();
runtimeService.startProcessInstanceByKey("aProcessDefinition", businessKey);
String caseDefinitionId = repositoryService
.createCaseDefinitionQuery()
.caseDefinitionKey("oneTaskCase")
.singleResult()
.getId();
caseService
.withCaseDefinition(caseDefinitionId)
.businessKey(businessKey)
.create();
TaskQuery query = taskService.createTaskQuery();
query
.or()
.caseInstanceBusinessKey(businessKey)
.processInstanceBusinessKey(businessKey)
.endOr();
assertEquals(2, query.list().size());
}
@Test
public void shouldReturnTasksWithActivityInstanceIdInOrTaskId() {
// given
BpmnModelInstance aProcessDefinition = Bpmn.createExecutableProcess("aProcessDefinition")
.startEvent()
.userTask()
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", aProcessDefinition)
.deploy();
ProcessInstance processInstance1 = runtimeService
.startProcessInstanceByKey("aProcessDefinition");
String activityInstanceId = runtimeService.getActivityInstance(processInstance1.getId())
.getChildActivityInstances()[0].getId();
Task task2 = taskService.newTask();
taskService.saveTask(task2);
// when
List<Task> tasks = taskService.createTaskQuery()
.or()
.activityInstanceIdIn(activityInstanceId)
.taskId(task2.getId())
.endOr()
.list();
// then
assertEquals(2, tasks.size());
}
@Test
public void shouldReturnTasksByExtendingQuery_OrInExtendingQuery() {
// given
TaskQuery extendedQuery = taskService.createTaskQuery()
.taskCandidateGroup("sales");
TaskQuery extendingQuery = taskService.createTaskQuery()
.or()
.taskName("aTaskName")
.endOr()
.or()
.taskNameLike("anotherTaskName")
.endOr();
// when
TaskQueryImpl result = (TaskQueryImpl)((TaskQueryImpl)extendedQuery).extend(extendingQuery);
// then
assertEquals("sales", result.getCandidateGroup());
assertEquals("aTaskName", result.getQueries().get(1).getName());
assertEquals("anotherTaskName", result.getQueries().get(2).getNameLike());
}
@Test
public void shouldReturnTasksByExtendingQuery_OrInExtendedQuery() {
// given
TaskQuery extendedQuery = taskService.createTaskQuery()
.or()
.taskName("aTaskName")
.endOr()
.or()
.taskNameLike("anotherTaskName")
.endOr();
TaskQuery extendingQuery = taskService.createTaskQuery()
.taskCandidateGroup("aCandidateGroup");
// when
TaskQueryImpl result = (TaskQueryImpl)((TaskQueryImpl)extendedQuery).extend(extendingQuery);
// then
assertEquals("aTaskName", result.getQueries().get(1).getName());
assertEquals("anotherTaskName", result.getQueries().get(2).getNameLike());
assertEquals("aCandidateGroup", result.getCandidateGroup());
}
@Test
public void shouldReturnTasksByExtendingQuery_OrInBothExtendedAndExtendingQuery() {
// given
TaskQuery extendedQuery = taskService.createTaskQuery()
.or()
.taskName("aTaskName")
.endOr()
.or()
.taskNameLike("anotherTaskName")
.endOr();
TaskQuery extendingQuery = taskService.createTaskQuery()
.or()
.taskCandidateGroup("aCandidateGroup")
.endOr()
.or()
.taskCandidateUser("aCandidateUser")
.endOr();
// when
TaskQueryImpl result = (TaskQueryImpl)((TaskQueryImpl)extendedQuery).extend(extendingQuery);
// then
assertEquals("aTaskName", result.getQueries().get(1).getName());
assertEquals("anotherTaskName", result.getQueries().get(2).getNameLike());
assertEquals("aCandidateGroup", result.getQueries().get(3).getCandidateGroup());
assertEquals("aCandidateUser", result.getQueries().get(4).getCandidateUser());
}
@Test
public void shouldTestDueDateCombinations() throws ParseException {
HashMap<String, Date> dates = createFollowUpAndDueDateTasks();
assertEquals(2, taskService.createTaskQuery()
.or()
.dueDate(dates.get("date"))
.dueBefore(dates.get("oneHourAgo"))
.endOr()
.count());
assertEquals(2, taskService.createTaskQuery()
.or()
.dueDate(dates.get("date"))
.dueAfter(dates.get("oneHourLater"))
.endOr()
.count());
assertEquals(2, taskService.createTaskQuery()
.or()
.dueBefore(dates.get("oneHourAgo"))
.dueAfter(dates.get("oneHourLater"))
.endOr()
.count());
assertEquals(3, taskService.createTaskQuery()
.or()
.dueBefore(dates.get("oneHourLater"))
.dueAfter(dates.get("oneHourAgo"))
.endOr()
.count());
assertEquals(3, taskService.createTaskQuery()
.or()
.dueDate(dates.get("date"))
.dueBefore(dates.get("oneHourAgo"))
.dueAfter(dates.get("oneHourLater"))
.endOr()
.count());
}
@Test
public void shouldTestFollowUpDateCombinations() throws ParseException {
HashMap<String, Date> dates = createFollowUpAndDueDateTasks();
assertEquals(2, taskService.createTaskQuery()
.or()
.followUpDate(dates.get("date"))
.followUpBefore(dates.get("oneHourAgo"))
.endOr()
.count());
assertEquals(2, taskService.createTaskQuery()
.or()
.followUpDate(dates.get("date"))
.followUpAfter(dates.get("oneHourLater"))
.endOr()
.count());
assertEquals(2, taskService.createTaskQuery()
.or()
.followUpBefore(dates.get("oneHourAgo"))
.followUpAfter(dates.get("oneHourLater"))
.endOr()
.count());
assertEquals(3, taskService.createTaskQuery()
.or()
.followUpBefore(dates.get("oneHourLater"))
.followUpAfter(dates.get("oneHourAgo"))
.endOr()
.count());
assertEquals(3, taskService.createTaskQuery()
.or()
.followUpDate(dates.get("date"))
.followUpBefore(dates.get("oneHourAgo"))
.followUpAfter(dates.get("oneHourLater"))
.endOr()
.count());
// followUp before or null
taskService.saveTask(taskService.newTask());
assertEquals(4, taskService.createTaskQuery().count());
assertEquals(3, taskService.createTaskQuery()
.or()
.followUpDate(dates.get("date"))
.followUpBeforeOrNotExistent(dates.get("oneHourAgo"))
.endOr()
.count());
assertEquals(3, taskService.createTaskQuery()
.or()
.followUpBeforeOrNotExistent(dates.get("oneHourAgo"))
.followUpAfter(dates.get("oneHourLater"))
.endOr()
.count());
assertEquals(4, taskService.createTaskQuery()
.or()
.followUpBeforeOrNotExistent(dates.get("oneHourLater"))
.followUpAfter(dates.get("oneHourAgo"))
.endOr()
.count());
assertEquals(4, taskService.createTaskQuery()
.or()
.followUpDate(dates.get("date"))
.followUpBeforeOrNotExistent(dates.get("oneHourAgo"))
.followUpAfter(dates.get("oneHourLater"))
.endOr()
.count());
}
@Test
public void shouldReturnTasksByVariableAndActiveProcesses() throws Exception {
// given
BpmnModelInstance aProcessDefinition = Bpmn.createExecutableProcess("oneTaskProcess")
.startEvent()
.userTask("testQuerySuspensionStateTask")
.endEvent()
.done();
repositoryService
.createDeployment()
.addModelInstance("foo.bpmn", aProcessDefinition)
.deploy();
// start two process instance and leave them active
runtimeService.startProcessInstanceByKey("oneTaskProcess");
runtimeService.startProcessInstanceByKey("oneTaskProcess");
// start one process instance and suspend it
Map<String, Object> variables = new HashMap<String, Object>();
variables.put("foo", 0);
ProcessInstance suspendedProcessInstance = runtimeService.startProcessInstanceByKey("oneTaskProcess", variables);
runtimeService.suspendProcessInstanceById(suspendedProcessInstance.getProcessInstanceId());
// assume
assertEquals(2, taskService.createTaskQuery().taskDefinitionKey("testQuerySuspensionStateTask").active().count());
assertEquals(1, taskService.createTaskQuery().taskDefinitionKey("testQuerySuspensionStateTask").suspended().count());
// then
assertEquals(3, taskService.createTaskQuery().or().active().processVariableValueEquals("foo", 0).endOr().list().size());
}
protected HashMap<String, Date> createFollowUpAndDueDateTasks() throws ParseException {
final Date date = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss").parse("27/07/2017 01:12:13"),
oneHourAgo = new Date(date.getTime() - 60 * 60 * 1000),
oneHourLater = new Date(date.getTime() + 60 * 60 * 1000);
Task taskDueBefore = taskService.newTask();
taskDueBefore.setFollowUpDate(new Date(oneHourAgo.getTime() - 1000));
taskDueBefore.setDueDate(new Date(oneHourAgo.getTime() - 1000));
taskService.saveTask(taskDueBefore);
Task taskDueDate = taskService.newTask();
taskDueDate.setFollowUpDate(date);
taskDueDate.setDueDate(date);
taskService.saveTask(taskDueDate);
Task taskDueAfter = taskService.newTask();
taskDueAfter.setFollowUpDate(new Date(oneHourLater.getTime() + 1000));
taskDueAfter.setDueDate(new Date(oneHourLater.getTime() + 1000));
taskService.saveTask(taskDueAfter);
assertEquals(3, taskService.createTaskQuery().count());
return new HashMap<String, Date>() {{
put("date", date);
put("oneHourAgo", oneHourAgo);
put("oneHourLater", oneHourLater);
}};
}
}
| falko/camunda-bpm-platform | engine/src/test/java/org/camunda/bpm/engine/test/api/task/TaskQueryOrTest.java | Java | apache-2.0 | 37,054 |
<?php
require_once("ShingekiAPIBaseClass.php");
$err_msg = "";
class ShingekiAPI_Stage extends ShingekiAPIBaseClass {
protected $shingeki_dir = "/tmp/shingeki";
protected $stage_dir = "/tmp/shingeki/stage/";
protected $stageData ;
function UserName2Id($userlist, $username) {
foreach($userlist as $key => $val) {
if ($username == $val) {
return $key;
}
}
return -1;
}
function parseInputData() {
$raw_data = file_get_contents($this->stage_dir . $this->pPost['roomId']);
$this->stageData = json_decode($raw_data, true);
if (isset($this->pPost['act'])) {
switch($this->pPost['act']) {
case "login":
$this->login();
break;
case "room_list":
$this->roomList();
file_put_contents($this->filename, json_encode($this->data));
break;
default:
$this->err_msg = "act not found.";
header('HTTP', true, 400);
return false;
}
} else {
}
return true;
}
public function execute() {
$stage = 1;
$this->convertInputJson();
if (!$this->parseInputData()) {
$this->outputError();
return;
}
print json_encode($this->stageData);
}
}
$shingekiAPI_Stage = new ShingekiAPI_Stage();
$shingekiAPI_Stage->execute();
| mhidaka/playgroundthon | server-php/stage.php | PHP | apache-2.0 | 1,233 |
package com.qxg.study.studyandroid.view.RecyclerView;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.graphics.Rect;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.DividerItemDecoration;
import android.support.v7.widget.GridLayoutManager;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.StaggeredGridLayoutManager;
import android.view.View;
import com.qxg.study.studyandroid.R;
import com.qxg.study.studyandroid.adapter.HeaderRVAdapter;
import com.qxg.study.studyandroid.adapter.RVAdapter;
import java.util.ArrayList;
import butterknife.BindView;
import butterknife.ButterKnife;
public class SimpleRVActivity extends AppCompatActivity {
@BindView(R.id.simple_rv)
RecyclerView simpleRv;
private ArrayList<String> lists;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_simple_rv);
ButterKnife.bind(this);
initList();
int flag = getIntent().getIntExtra("flag",0);
solveFlag(flag); // 针对不同的flag做不同的RV的操作
}
private void initList(){
lists = new ArrayList<String>();
for (int i = 0; i < 100; i++) {
lists.add(i+"");
}
}
private void solveFlag(int flag){
switch(flag){
case 0:
//简单用法
simpleRv.setAdapter(new RVAdapter(this,lists));
simpleRv.setLayoutManager(new LinearLayoutManager(this));
break;
case 1:
//横向滚动
simpleRv.setAdapter(new RVAdapter(this,lists));
LinearLayoutManager layoutManager = new LinearLayoutManager(this);
layoutManager.setOrientation(LinearLayoutManager.HORIZONTAL);
simpleRv.setLayoutManager(layoutManager);
break;
case 2:
simpleRv.setAdapter(new RVAdapter(this,lists));
StaggeredGridLayoutManager sLayoutManager =
new StaggeredGridLayoutManager(3,StaggeredGridLayoutManager.VERTICAL);
simpleRv.setLayoutManager(sLayoutManager);
break;
case 3:
simpleRv.setAdapter(new HeaderRVAdapter(this,lists));
simpleRv.setLayoutManager(new LinearLayoutManager(this));
break;
case 4:
simpleRv.setLayoutManager(new GridLayoutManager(this,4));
simpleRv.setAdapter(new HeaderRVAdapter(this,lists));
break;
case 5:
simpleRv.setAdapter(new HeaderRVAdapter(this,lists));
simpleRv.setLayoutManager(new StaggeredGridLayoutManager(3,StaggeredGridLayoutManager.VERTICAL));
break;
case 6:
//设置分割线
simpleRv.setAdapter(new RVAdapter(this,lists));
simpleRv.addItemDecoration(new DividerItemDecoration(this,DividerItemDecoration.HORIZONTAL));
simpleRv.setLayoutManager(new LinearLayoutManager(this));
break;
case 7:
//自定义分割线
simpleRv.setAdapter(new RVAdapter(this,lists));
simpleRv.addItemDecoration(new MyDecoration());
simpleRv.setLayoutManager(new LinearLayoutManager(this));
break;
}
}
class MyDecoration extends RecyclerView.ItemDecoration{
int height = 8;
@Override
public void onDraw(Canvas c, RecyclerView parent, RecyclerView.State state) {
super.onDraw(c, parent, state);
//画线
int left = parent.getPaddingLeft();
int right = parent.getWidth() - parent.getPaddingRight();
final int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
View now = parent.getChildAt(i);
//获得child的布局信息
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) now.getLayoutParams();
int top = now.getBottom() + params.bottomMargin;
int bottom = top + height; //假设是2dp
//开始画线
Paint mPaint = new Paint();
mPaint.setColor(Color.parseColor("#cccccc"));
c.drawRect(left,top,right,bottom,mPaint);
//如果要画drawable,可以通过drawable的setBounds来设置所画的位置,再通过drawable的draw方法,将drawable画出来就行
}
}
@Override
public void getItemOffsets(Rect outRect, View view, RecyclerView parent, RecyclerView.State state) {
//画横线,就是往下偏移一个分割线的高度,而如果是画竖线,就是将宽度传给第三个参数,即:outRect.set(0,0,width,0);
outRect.set(0, 0, 0, height);
}
}
}
| 583462423/StudyAndroid | app/src/main/java/com/qxg/study/studyandroid/view/RecyclerView/SimpleRVActivity.java | Java | apache-2.0 | 5,179 |
<?php
the_post();
get_header();
?>
<div class="row">
<div class="main">
<!-- briciole di pane -->
<?php if(is_single()) { ?>
<div class="row">
<div class="breadcrumbs block1"><div id="left">
<a href="<?php bloginfo('siteurl'); ?>">HOMEPAGE</a> >
<?php
$cat = array_shift(get_the_category());
echo get_category_parents($cat, true, ' > ');
the_title();
?>
</div></div>
</div>
<?php } ?>
<!-- //briciole di pane -->
<?php
$hide_socialshare = @get_post_meta($post->ID, '_qn_post_hide_socialshare', true) == 'SI';
qn2011_sidebar('Article Top Widgets');
echo '<article class="' . ($hide_socialshare ? 'noshare' : '') . '">';
display_post($post, "single");
echo '</article>';
if(!$hide_socialshare) {
?>
<div class="row hidden-mobile">
<div class="block1 share">
<h4 class="tab-title"><i class="icon-blank"></i>Strumenti</h4>
<ul>
<li class="box email">
<a href="javascript:void(0);" onclick="location.assign('mailto:?body='+encodeURIComponent(location.href.replace(/[#?].+$/,''))+'&subject='+encodeURIComponent(document.title))"><i class="icon-mail"></i>INVIA</a>
</li>
<li class="box print"><a href="javascript:window.print()"><i class="icon-print"></i>STAMPA</a></li>
</ul>
</div>
</div>
<?php
}
qn2011_sidebar('Article Bottom Widgets');
if(!is_page() && /*get_option('default_comment_status') == "open" &&*/ $post->comment_status == "open") {
echo '<div id="commenti">' . PHP_EOL;
comments_template( '', true );
echo '</div>' . PHP_EOL;
}
?>
</div> <!-- //main -->
<aside class="side">
<?php
$opt_artsidebar = theme_fp_get_saved_data('artsidebar');
$artsidebar_action = @$opt_artsidebar['action'];
if(!$artsidebar_action) qn2011_sidebar('ColonnaDestra');
qn2011_sidebar('ColonnaDestra ARTICOLO');
if($artsidebar_action == 1) qn2011_sidebar('ColonnaDestra');
?>
</aside>
</div> <!-- //row -->
<div class="row">
<?php qn2011_sidebar('Midline Widgets'); ?>
</div>
<?php get_footer(); ?>
| iropicci/wpthemeqnnuhp | QN_NuHP/single.php | PHP | apache-2.0 | 1,956 |
package dex
import (
"io/ioutil"
"os"
)
func do(fn func()) {
retreat, err := os.Getwd()
if err != nil { panic(err); }
defer os.Chdir(retreat)
basedir := os.Getenv("BASEDIR")
if len(basedir) != 0 {
err = os.Chdir(basedir)
if err != nil { panic(err); }
}
err = os.MkdirAll("target/test", 0755)
if err != nil { panic(err); }
tmpdir, err := ioutil.TempDir("target/test","")
if err != nil { panic(err); }
err = os.Chdir(tmpdir)
if err != nil { panic(err); }
fn()
}
| polydawn/hroot | dex/utils_test.go | GO | apache-2.0 | 485 |
var classorg_1_1onosproject_1_1cordvtn_1_1cli_1_1CordVtnNodeListCommand =
[
[ "execute", "classorg_1_1onosproject_1_1cordvtn_1_1cli_1_1CordVtnNodeListCommand.html#a8795ed63dd8a80eb314b7c3ea072f75a", null ]
]; | onosfw/apis | onos/apis/classorg_1_1onosproject_1_1cordvtn_1_1cli_1_1CordVtnNodeListCommand.js | JavaScript | apache-2.0 | 212 |
/**
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
**/
package org.person.sfgower.rabbitutil;
import java.util.HashMap;
/**
* Created by TRINITY on 4/17/16.
*/
public class ConsumerConfiguration extends HashMap<String,String> {
}
| sfgower/rabbitutil | src/main/java/org/person/sfgower/rabbitutil/ConsumerConfiguration.java | Java | apache-2.0 | 291 |
var searchData=
[
['leaderboard_2ecpp',['Leaderboard.cpp',['../a00044.html',1,'']]],
['leaderboard_2eh',['Leaderboard.h',['../a00047.html',1,'']]],
['leaderboardview_2ecpp',['LeaderboardView.cpp',['../a00134.html',1,'']]],
['leaderboardview_2eh',['LeaderboardView.h',['../a00137.html',1,'']]]
];
| Bokoblin/DUTS2-POO-ProjetRunner | docs/search/files_7.js | JavaScript | apache-2.0 | 304 |
package com.sargasso;
import java.util.Arrays;
import java.util.List;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import com.sargasso.models.Item;
/**
* Root resource (exposed at "myresource" path)
*/
@Path("myresource")
public class MyResource {
/**
* Method handling HTTP GET requests. The returned object will be sent
* to the client as "text/plain" media type.
*
* @return String that will be returned as a text/plain response.
*/
@GET
@Produces(MediaType.APPLICATION_JSON)
public List<Item> getIt() {
Item item = new Item();
item.setName("Andrew's Item");
return Arrays.asList(item);
}
}
| ryon5948/sargasso | src/main/java/com/sargasso/MyResource.java | Java | apache-2.0 | 732 |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for catch."""
from typing import Optional, Sequence
from bsuite.experiments.catch import sweep
from bsuite.utils import plotting
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
BASE_REGRET = 1.6
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
"""Output a single score for catch."""
return plotting.ave_regret_score(
df, baseline_regret=BASE_REGRET, episode=sweep.NUM_EPISODES)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Simple learning curves for catch."""
p = plotting.plot_regret_learning(
df, sweep_vars=sweep_vars, max_episode=sweep.NUM_EPISODES)
p += gg.geom_hline(
gg.aes(yintercept=BASE_REGRET), linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
colour_var: Optional[str] = None) -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = df_in.copy()
df['average_return'] = 1.0 - (df.total_regret.diff() / df.episode.diff())
p = plotting.plot_individual_returns(
df_in=df,
max_episode=NUM_EPISODES,
return_column='average_return',
colour_var=colour_var,
yintercept=1.,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
| deepmind/bsuite | bsuite/experiments/catch/analysis.py | Python | apache-2.0 | 2,122 |
/*
* Copyright 2014 Guidewire Software, Inc.
*/
package gw.internal.xml.xsd.typeprovider.schema;
import gw.internal.xml.xsd.typeprovider.XmlSchemaIndex;
import gw.lang.reflect.LocationInfo;
public final class XmlSchemaFractionDigitsFacet extends XmlSchemaFacet<XmlSchemaFractionDigitsFacet> {
public XmlSchemaFractionDigitsFacet( XmlSchemaIndex schemaIndex, LocationInfo locationInfo, String value ) {
super( schemaIndex, locationInfo, value );
}
@Override
protected XmlSchemaFractionDigitsFacet copy( XmlSchemaIndex schemaIndex ) throws UnsupportedOperationException {
return new XmlSchemaFractionDigitsFacet( schemaIndex, getLocationInfo(), getValue() );
}
}
| pdalbora/gosu-lang | gosu-xml/src/main/java/gw/internal/xml/xsd/typeprovider/schema/XmlSchemaFractionDigitsFacet.java | Java | apache-2.0 | 686 |
/*
Copyright (c) 2017 Ahome' Innovation Technologies. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.ait.tooling.common.api.types;
public interface IAsyncCallback<T>
{
public void onFailure(Throwable throwable);
public void onSuccess(T result);
}
| ahome-it/ahome-tooling-common | src/main/java/com/ait/tooling/common/api/types/IAsyncCallback.java | Java | apache-2.0 | 807 |
package com.briangriffey.glass.example;
import android.app.PendingIntent;
import android.app.Service;
import android.content.Intent;
import android.os.IBinder;
import android.widget.RemoteViews;
import com.google.android.glass.timeline.LiveCard;
import com.google.android.glass.timeline.TimelineManager;
/**
* Created by briangriffey on 1/3/14.
*/
public class LaunchActivityService extends Service {
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
TimelineManager manager = TimelineManager.from(this);
LiveCard card = manager.createLiveCard("awesome");
RemoteViews view = new RemoteViews(this.getPackageName(), R.layout.live_card_layout);
view.setTextViewText(android.R.id.text1, "Amazing");
card.setViews(view);
Intent menuIntent = new Intent(this, CardActivity.class);
menuIntent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_ACTIVITY_CLEAR_TASK);
card.setAction(PendingIntent.getActivity(this, 0, menuIntent, 0));
card.publish(LiveCard.PublishMode.REVEAL);
return START_STICKY;
}
@Override
public void onDestroy() {
super.onDestroy();
}
@Override
public IBinder onBind(Intent intent) {
return null;
}
}
| briangriffey/glass-full-activity | src/com/briangriffey/glass/example/LaunchActivityService.java | Java | apache-2.0 | 1,288 |
package view;
import java.awt.Dimension;
import java.awt.Graphics;
import java.awt.Image;
import java.awt.Toolkit;
import java.awt.event.MouseAdapter;
import java.awt.event.MouseEvent;
import java.net.URL;
import java.util.ArrayList;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import models.MazeModel;
import models.MazePoint;
public class RandomModel {
private JFrame randomPage;
private MazeModel mazeModel;
private MazePoint mazePoint;
private ArrayList<MazePoint> maze;
static Thread myThread;
private MazePoint point_now;
private MazePoint step_point;
private MazePoint test_point;
JPanel panel;
CanvasPanel mazePanel;
// Ëĸö°´Å¥
private JLabel bt_create;
private JLabel bt_solveAuto;
private JLabel bt_exit1;
private JLabel bt_again;
private JLabel bt_prompt;
// µ÷½Ú³ß´çµÄ°´Å¥
JLabel rowLeft;
JLabel rowRight;
JLabel colLeft;
JLabel colRight;
JLabel bt_up;
JLabel bt_down;
JLabel bt_left;
JLabel bt_right;
ImageIcon image_create, image_create1;
ImageIcon image_solveAuto, image_solveAuto1;
ImageIcon image_exit, image_exit1;
ImageIcon image_again, image_again1;
ImageIcon image_prompt, image_prompt1;
ImageIcon leftLight;
ImageIcon leftDark;
ImageIcon rightLight;
ImageIcon rightDark;
ImageIcon up, up1;
ImageIcon down, down1;
ImageIcon left, left1;
ImageIcon right, right1;
final int RANDOM_DRAW = 0;
final int STEP_DRAW = 1;// ³õÊ¼Ëæ»úÃÔ¹¬
final int ALL_DRAW = 2; // Ö´Ðе½×îºóʱ½«Í¨Â·»³ö
int step_number = 0;// Íê³ÉÃÔ¹¬µÄ²½Êý
// ÏÔʾÐÐÁÐÊý×Ö
JLabel rowNum;
JLabel colNum;
ArrayList<ImageIcon> imagesList;
private static int mazeRow, mazeCol;// ÃÔ¹¬µÄÐÐÁÐ
private int realx = 0, realy = 0;// ÃÔ¹¬µÄÕæÊµ´óС
private int sizex = 0, sizey = 0;// ¶ÔÓ¦ÃÔ¹¬µÄ·½¸ñ´óС
private int stepx = 0, stepy = 0;// ÊÖ¶¯×ßλÖÃ
int choose = -1;
final ImageIcon image_wall;// ǽµÄͼƬ
ImageIcon image_robot, image_deImageIcon;
Image wallImage, robotImage, destinationImage;
URL robot, destination;
boolean haveCreat = true;// ±êʶÃÔ¹¬ÊÇ·ñÒÑ´´½¨
boolean ifEnd = true;// ÊÇ·ñ½áÊøÑ°ÕÒ
boolean enThread = true;// ¿ØÖÆÏß³ÌÑ»·
int runNumber = 0, firstTime = 0;
MazePoint[] path;//·¾¶´¢´æÊý×é
/** ÃÔ¹¬ËùÔÚλÖà */
final int MAZE_WIDTH = 500;
final int MAZE_HEIGHT = 500;
final int MAZE_X = 80;
final int MAZE_Y = 100;
/** °´Å¥ËùÔÚλÖà */
final int BTNS_OFFSET_X = 600;
final int BTNS_OFFSET_Y = 205;
// µ÷½ÚÃÔ¹¬´óСµÄËĸö°´Å¥µÄµÄ×ø±ê
final int SIZE_OFFSET_X = 680;
final int SIZE_OFFSET_Y = 130;
final int ROW_GAP = 80;
final int COL_GAP = 36;
final int bt_distance = 50;// °´Å¥¼ä¾àÀë
public RandomModel() {
mazeRow = 5;
mazeCol = 5;
mazePanel = new CanvasPanel();
step_point = new MazePoint(0, 0);
// System.out.println("x:"+step_point.getX()+" y:"+step_point.getY());
// ¼ÓÔØÍ¼Æ¬
URL wall = this.getClass().getResource("/images/image_wall.jpg");
image_wall = new ImageIcon(wall);
wallImage = image_wall.getImage();
robot = this.getClass().getResource("/images/image_robot.png");
image_robot = new ImageIcon(robot);
destination = this.getClass().getResource(
"/images/image_destination.png");// Öյ㴦
image_deImageIcon = new ImageIcon(destination);
destinationImage = image_deImageIcon.getImage();
initialzeThread();
initialize();// ³õʼ»¯ÃÔ¹¬½çÃæ
}
/** ³õʼ»¯Ò»²½Ö´ÐеÄÏß³Ì */
public void initialzeThread() {
myThread = new Thread(new Runnable() {
@Override
public void run() {
// TODO Auto-generated method stub
while (enThread) {
try {
Thread.sleep(100);
} catch (Exception e) {
e.printStackTrace();
}
choose = 1;
mazePanel.repaint();
if (point_now.equal(realx - 1, realy - 1)) {
enThread = false;
JOptionPane.showMessageDialog(null, "µ½´ïÄ¿µÄµØ,¹²×ßÁË "
+ step_number + " ²½");
runNumber = 2;
choose = 2;
mazePanel.repaint();
ifEnd = true;
myThread.suspend();
}
}
}
});
}
// Éú³Éx³ËyµÄÃÔ¹¬£¬·µ»Ø¸ømaze
private void GetMaze(int x, int y) {
mazeModel = new MazeModel(x, y);
maze = mazeModel.getRandomMaze();
point_now = new MazePoint(0, 0);
enThread = true;
ifEnd = false;
}
public void initialize() {
randomPage = new JFrame();
randomPage.setResizable(false);
randomPage.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
Toolkit tool = Toolkit.getDefaultToolkit();// ¶¨Ò幤¾ß°ü
Dimension screenSize = tool.getScreenSize();
int screenWidth = screenSize.width / 2; // »ñÈ¡ÆÁÄ»µÄ¿í
int screenHeight = screenSize.height / 2; // »ñÈ¡ÆÁÄ»µÄ¸ß
URL creat = this.getClass().getResource("/images/image_create.png");
image_create = new ImageIcon(creat);
creat = this.getClass().getResource("/images/image_create1.png");
image_create1 = new ImageIcon(creat);
URL sovelAuto = this.getClass().getResource(
"/images/image_sovelAuto.png");
image_solveAuto = new ImageIcon(sovelAuto);
sovelAuto = this.getClass().getResource("/images/image_sovelAuto1.png");
image_solveAuto1 = new ImageIcon(sovelAuto);
URL again = this.getClass().getResource("/images/image_again.png");
image_again = new ImageIcon(again);
again = this.getClass().getResource("/images/image_again1.png");
image_again1 = new ImageIcon(again);
URL exit = this.getClass().getResource("/images/random_exit.png");
image_exit = new ImageIcon(exit);
exit = this.getClass().getResource("/images/random_exit1.png");
image_exit1 = new ImageIcon(exit);
URL button;
button = this.getClass().getResource("/images/image_tishi.png");
image_prompt = new ImageIcon(button);
button = this.getClass().getResource("/images/image_tishi1.png");
image_prompt1 = new ImageIcon(button);
button = this.getClass().getResource("/images/left_dark.png");
leftDark = new ImageIcon(button);
button = this.getClass().getResource("/images/left_light.png");
leftLight = new ImageIcon(button);
button = this.getClass().getResource("/images/right_dark.png");
rightDark = new ImageIcon(button);
button = this.getClass().getResource("/images/right_light.png");
rightLight = new ImageIcon(button);
// ÉÏÏÂ×óÓÒ°´Å¥
button = this.getClass().getResource("/images/up.png");
up = new ImageIcon(button);
button = this.getClass().getResource("/images/up1.png");
up1 = new ImageIcon(button);
button = this.getClass().getResource("/images/down.png");
down = new ImageIcon(button);
button = this.getClass().getResource("/images/down1.png");
down1 = new ImageIcon(button);
button = this.getClass().getResource("/images/left.png");
left = new ImageIcon(button);
button = this.getClass().getResource("/images/left1.png");
left1 = new ImageIcon(button);
button = this.getClass().getResource("/images/right.png");
right = new ImageIcon(button);
button = this.getClass().getResource("/images/right1.png");
right1 = new ImageIcon(button);
imagesList = new ArrayList<ImageIcon>();
for (int i = 1; i <= 25; i++) {
String name = "/images/" + i + ".png";
button = this.getClass().getResource(name);
imagesList.add(new ImageIcon(button));
}
rowNum = new JLabel();
rowNum.setIcon(imagesList.get(mazeRow - 1));
rowNum.setMaximumSize(new Dimension(imagesList.get(0).getIconWidth(),
imagesList.get(0).getIconHeight()));
rowNum.setBounds(SIZE_OFFSET_X + ROW_GAP / 2, SIZE_OFFSET_Y, imagesList
.get(0).getIconWidth(), imagesList.get(0).getIconHeight());
colNum = new JLabel();
colNum.setIcon(imagesList.get(mazeRow - 1));
colNum.setMaximumSize(new Dimension(imagesList.get(0).getIconWidth(),
imagesList.get(0).getIconHeight()));
colNum.setBounds(SIZE_OFFSET_X + ROW_GAP / 2, SIZE_OFFSET_Y + COL_GAP,
imagesList.get(0).getIconWidth(), imagesList.get(0)
.getIconHeight());
/** ------------------ÉÏÏÂ×óÓÒ°´Å¥---------------------- */
bt_up = new JLabel();
bt_up.setIcon(up);
bt_up.setMaximumSize(new Dimension(down.getIconWidth(), down
.getIconHeight()));
bt_up.setBounds(SIZE_OFFSET_X, 425, down.getIconWidth() + 50,
down.getIconHeight() + 50);
bt_down = new JLabel();
bt_down.setIcon(down);
bt_down.setMaximumSize(new Dimension(down.getIconWidth(), down
.getIconHeight()));
bt_down.setBounds(SIZE_OFFSET_X, 525, down.getIconWidth(),
down.getIconHeight());
bt_left = new JLabel();
bt_left.setIcon(left);
bt_left.setMaximumSize(new Dimension(left.getIconWidth(), left
.getIconHeight()));
bt_left.setBounds(632, 490, left.getIconWidth(), left.getIconHeight());
bt_right = new JLabel();
bt_right.setIcon(right);
bt_right.setMaximumSize(new Dimension(right.getIconWidth(), right
.getIconHeight()));
bt_right.setBounds(730, 490, right.getIconWidth(),
right.getIconHeight());
/************* ÉèÖÃËĸö°´Å¥ **********************/
rowLeft = new JLabel();
rowLeft.setIcon(leftDark);
rowLeft.setMaximumSize(new Dimension(leftDark.getIconWidth(), leftDark
.getIconHeight()));
rowLeft.setBounds(SIZE_OFFSET_X, SIZE_OFFSET_Y,
leftDark.getIconWidth(), leftDark.getIconHeight());
rowRight = new JLabel();
rowRight.setIcon(rightDark);
rowRight.setMaximumSize(new Dimension(rightDark.getIconWidth(),
rightDark.getIconHeight()));
rowRight.setBounds(SIZE_OFFSET_X + ROW_GAP, SIZE_OFFSET_Y,
rightDark.getIconWidth(), rightDark.getIconHeight());
colLeft = new JLabel();
colLeft.setIcon(leftDark);
colLeft.setMaximumSize(new Dimension(leftDark.getIconWidth(), leftDark
.getIconHeight()));
colLeft.setBounds(SIZE_OFFSET_X, SIZE_OFFSET_Y + COL_GAP,
leftDark.getIconWidth(), leftDark.getIconHeight());
colRight = new JLabel();
colRight.setIcon(rightDark);
colRight.setMaximumSize(new Dimension(rightDark.getIconWidth(),
rightDark.getIconHeight()));
colRight.setBounds(SIZE_OFFSET_X + ROW_GAP, SIZE_OFFSET_Y + COL_GAP,
rightDark.getIconWidth(), rightDark.getIconHeight());
/************* ÉèÖÃÎå¸ö°´Å¥ **********************/
bt_create = new JLabel();
bt_create.setIcon(image_create);
bt_create.setMaximumSize(new Dimension(image_create.getIconWidth(),
image_create.getIconHeight()));
bt_create.setBounds(BTNS_OFFSET_X, BTNS_OFFSET_Y,
image_create.getIconWidth(), image_create.getIconHeight());
bt_solveAuto = new JLabel();
bt_solveAuto.setIcon(image_solveAuto);
bt_solveAuto.setMaximumSize(new Dimension(image_solveAuto
.getIconWidth(), image_solveAuto.getIconHeight()));
bt_solveAuto
.setBounds(BTNS_OFFSET_X, BTNS_OFFSET_Y + bt_distance,
image_solveAuto.getIconWidth(),
image_solveAuto.getIconHeight());
bt_again = new JLabel();
bt_again.setIcon(image_again);
bt_again.setMaximumSize(new Dimension(image_again.getIconWidth(),
image_again.getIconHeight()));
bt_again.setBounds(BTNS_OFFSET_X, BTNS_OFFSET_Y + bt_distance * 2,
image_again.getIconWidth(), image_again.getIconHeight());
bt_prompt = new JLabel();
bt_prompt.setIcon(image_prompt);
bt_prompt.setMaximumSize(new Dimension(image_prompt.getIconWidth(),
image_prompt.getIconHeight()));
bt_prompt.setBounds(BTNS_OFFSET_X, BTNS_OFFSET_Y + bt_distance * 3,
image_prompt.getIconWidth(), image_prompt.getIconHeight());
bt_exit1 = new JLabel();
bt_exit1.setIcon(image_exit);
bt_exit1.setMaximumSize(new Dimension(image_exit.getIconWidth(),
image_exit.getIconHeight()));
bt_exit1.setBounds(BTNS_OFFSET_X, BTNS_OFFSET_Y + bt_distance * 4,
image_exit.getIconWidth(), image_exit.getIconHeight());
URL back = this.getClass().getResource("/images/random_maze.png");
ImageIcon backgrouondIcon = new ImageIcon(back);
panel = new JPanel() {
protected void paintComponent(Graphics g) {
g.drawImage(backgrouondIcon.getImage(), 0, 0, this);
super.paintComponent(g);
}
};
panel.setLayout(null);
panel.add(bt_create);
panel.add(bt_solveAuto);
panel.add(bt_again);
panel.add(bt_prompt);
panel.add(bt_exit1);
panel.add(rowLeft);
panel.add(rowRight);
panel.add(colLeft);
panel.add(colRight);
panel.add(rowNum);
panel.add(colNum);
panel.add(bt_up);
panel.add(bt_down);
panel.add(bt_left);
panel.add(bt_right);
panel.setBounds(0, 0, 800, 600);
panel.setOpaque(false);
randomPage.setUndecorated(true);// È¥³ý´°¿ÚµÄ±ß¿ò
randomPage.setBounds(screenWidth - MainView.GAME_WIDTH / 2,
screenHeight - MainView.GAME_HEIGHT / 2, MainView.GAME_WIDTH,
MainView.GAME_HEIGHT);
randomPage.setLayout(null);
randomPage.getContentPane().add(panel);
randomPage.setVisible(true);
rowLeft.addMouseListener(new RowLeftMouseAdapter());
rowRight.addMouseListener(new RowRightMouseAdapter());
colLeft.addMouseListener(new ColLeftMouseAdapter());
colRight.addMouseListener(new ColRightMouseAdapter());
bt_up.addMouseListener(new UpMouseAdapter());
bt_down.addMouseListener(new DownMouseAdapter());
bt_left.addMouseListener(new LeftMouseAdapter());
bt_right.addMouseListener(new RightMouseAdapter());
bt_create.addMouseListener(new CreateMouseAdapter());
bt_solveAuto.addMouseListener(new SolveMouseAdapter());
bt_again.addMouseListener(new AgainMouseAdapter());
bt_prompt.addMouseListener(new PromptMouseAdapter());
bt_exit1.addMouseListener(new ExitMouseAdapter());
}
class CanvasPanel extends JPanel {
int WALL_HEIGHT = 5;
private static final long serialVersionUID = 6386167515595185903L;
/** ÖØÐ´paint()·½·¨ */
@Override
public void paint(Graphics arg0) {
// TODO Auto-generated method stub
super.paint(arg0);
/** ÉèÖñ³¾°Í¼Æ¬ */
URL back = this.getClass().getResource(
"/images/random_maze_back.png");
final ImageIcon backgroundIcon = new ImageIcon(back);
arg0.drawImage(backgroundIcon.getImage(), 0, 0, null);
if (choose == RANDOM_DRAW) {
paintStart(arg0);// ³õÊ¼Ëæ»úÃÔ¹¬
} else if (choose == STEP_DRAW) {
paintOneStep(arg0);// Ö´ÐÐÒ»´ÎºóÃÔ¹¬µÄͼÏñ
} else if (choose == ALL_DRAW) {// Ö´Ðе½×îºóʱ½«Í¨Â·»³ö
paintRoad(arg0);
paintOneStep(arg0);
} else if (choose == 4) {
paintUp(arg0);
} else if (choose == 5) {
paintDown(arg0);
} else if (choose == 6) {
paintLeft(arg0);
} else if (choose == 7) {
paintRight(arg0);
}
}
public void paintUp(Graphics g) {
drawMaze(g);
// if(maze.get(now_x+mazeCol*(now_y-1)).getState()==1){
g.drawImage(robotImage, step_point.getX() * sizex + sizex / 6,
step_point.getY() * sizey + sizey / 6, sizex - sizex / 6,
sizey - sizey / 6, null);
if (step_point.getX() != (realx - 1)
|| step_point.getY() != (realy - 1))
g.drawImage(destinationImage, maze.get(maze.size() - 1).getX()
* sizex + sizex / 6, maze.get(maze.size() - 1).getY()
* sizey + sizey / 6, sizex - sizex / 6, sizey - sizey
/ 6, null);
step_number++;
// }
}
public void paintDown(Graphics g) {
drawMaze(g);
g.drawImage(robotImage, step_point.getX() * sizex + sizex / 6,
step_point.getY() * sizey + sizey / 6, sizex - sizex / 6,
sizey - sizey / 6, null);
if (step_point.getX() != (realx - 1)
|| step_point.getY() != (realy - 1))
g.drawImage(destinationImage, maze.get(maze.size() - 1).getX()
* sizex + sizex / 6, maze.get(maze.size() - 1).getY()
* sizey + sizey / 6, sizex - sizex / 6, sizey - sizey
/ 6, null);
step_number++;
}
public void paintLeft(Graphics g) {
drawMaze(g);
// if(mazePoint.getAhead(point_now,0)){
g.drawImage(robotImage, step_point.getX() * sizex + sizex / 6,
step_point.getY() * sizey + sizey / 6, sizex - sizex / 6,
sizey - sizey / 6, null);
if (step_point.getX() != (realx - 1)
|| step_point.getY() != (realy - 1))
g.drawImage(destinationImage, maze.get(maze.size() - 1).getX()
* sizex + sizex / 6, maze.get(maze.size() - 1).getY()
* sizey + sizey / 6, sizex - sizex / 6, sizey - sizey
/ 6, null);
step_number++;
// }
}
public void paintRight(Graphics g) {
drawMaze(g);
// if(mazePoint.getAhead(point_now,2)){
g.drawImage(robotImage, step_point.getX() * sizex + sizex / 6,
step_point.getY() * sizey + sizey / 6, sizex - sizex / 6,
sizey - sizey / 6, null);
if (step_point.getX() != (realx - 1)
|| step_point.getY() != (realy - 1))
g.drawImage(destinationImage, maze.get(maze.size() - 1).getX()
* sizex + sizex / 6, maze.get(maze.size() - 1).getY()
* sizey + sizey / 6, sizex - sizex / 6, sizey - sizey
/ 6, null);
step_number++;
// }
}
/** »æÖÆÃÔ¹¬Í¼Ïñ·½·¨ */
public void drawMaze(Graphics g) {
MazePoint temp;
int x, y, up, down, left, right;
for (int i = 0; i < maze.size(); i++) {
temp = maze.get(i);
x = temp.getX() * sizex + sizex;
y = temp.getY() * sizey + sizey;
up = temp.getUp();
down = temp.getDown();
left = temp.getLeft();
right = temp.getRight();
int sizeMin = Math
.min(sizex / WALL_HEIGHT, sizey / WALL_HEIGHT);
// »æÖÆÖ¸¶¨Í¼ÏñÖÐÒÑËõ·Åµ½ÊʺÏÖ¸¶¨¾ØÐÎÄÚ²¿µÄͼÏñ
if (up == 0)
g.drawImage(wallImage, x - sizex, y - sizey, sizex
+ sizeMin, sizeMin, null);
if (down == 0)
g.drawImage(wallImage, x - sizex, y, sizex + sizeMin,
sizeMin, null);
if (left == 0)
g.drawImage(wallImage, x - sizex, y - sizey, sizeMin, sizey
+ sizeMin, null);
if (right == 0)
g.drawImage(wallImage, x, y - sizey, sizeMin, sizey
+ sizeMin, null);
}
}
/** »æÖÆ»úÆ÷È˼°ÖÕµãͼƬµÄ·½·¨ */
public void paintStart(Graphics g) {
drawMaze(g);
robotImage = image_robot.getImage();
g.drawImage(robotImage, sizex / 6, sizey / 6, sizex - sizex / 6,
sizey - sizey / 6, null);
g.drawImage(destinationImage, maze.get(maze.size() - 1).getX()
* sizex + sizex / 6, maze.get(maze.size() - 1).getY()
* sizey + sizey / 6, sizex - sizex / 6, sizey - sizey / 6,
null);
}
public void paintOneStep(Graphics g) {// ǰ½øÒ»²½
drawMaze(g);
point_now = mazeModel.StepSolve();
g.drawImage(robotImage, point_now.getX() * sizex + sizex / 6,
point_now.getY() * sizey + sizey / 6, sizex - sizex / 6,
sizey - sizey / 6, null);
if (point_now.getX() != (realx - 1)
|| point_now.getY() != (realy - 1))
g.drawImage(destinationImage, maze.get(maze.size() - 1).getX()
* sizex + sizex / 6, maze.get(maze.size() - 1).getY()
* sizey + sizey / 6, sizex - sizex / 6, sizey - sizey
/ 6, null);
step_number++;
}
public void paintRoad(Graphics g) {
path = mazeModel.getStack().getStack();
int count = mazeModel.getStack().getTop();
for (int i = 0; i < count; i++) {
g.drawLine(path[i].getX() * sizex + sizex / 2, path[i].getY()
* sizey + sizey / 2, path[i + 1].getX() * sizex + sizex
/ 2, path[i + 1].getY() * sizey + sizey / 2);
}
g.drawLine(path[count].getX() * sizex + sizex / 2,
path[count].getY() * sizey + sizey / 2,
maze.get(maze.size() - 1 - mazeModel.getWidth()).getX()
* sizex + sizex / 2,
maze.get(maze.size() - 1 - mazeModel.getWidth()).getY()
* sizey + 3 * sizey / 2);
}
}
/** ´´½¨°´Å¥¼àÌýʼþ */
class CreateMouseAdapter extends MouseAdapter {
@Override
public void mouseClicked(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseClicked(e);
step_number = 0;
haveCreat = true;
realx = RandomModel.mazeCol;
realy = RandomModel.mazeRow;
sizex = MAZE_WIDTH / (realx + 1);
sizey = MAZE_HEIGHT / (realy + 1);
GetMaze(realx, realy);
mazePanel.setLayout(null);
mazePanel.setBounds(MAZE_X, MAZE_Y, MAZE_WIDTH, MAZE_HEIGHT);
randomPage.getContentPane().add(mazePanel, 0);
choose = 0;
mazePanel.repaint();
}
@Override
public void mouseEntered(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseEntered(e);
bt_create.setIcon(image_create1);
}
@Override
public void mouseExited(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseExited(e);
bt_create.setIcon(image_create);
}
}
class SolveMouseAdapter extends MouseAdapter {
@Override
public void mouseClicked(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseClicked(e);
if(step_point.getX()==0&&step_point.getY()==0){
step_number = 0;
}
point_now=step_point;
if (!haveCreat) {
JOptionPane.showMessageDialog(null, "ÇëÏÈÉú³ÉÒ»¸öÃÔ¹¬");
} else if (!ifEnd) {
runNumber++;
if (runNumber == 1) {
myThread.start();
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
} else if (runNumber % 2 == 1) {
myThread.resume();
} else {
myThread.suspend();
}
}
}
@Override
public void mouseEntered(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseEntered(e);
bt_solveAuto.setIcon(image_solveAuto1);
}
@Override
public void mouseExited(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseExited(e);
bt_solveAuto.setIcon(image_solveAuto);
}
}
class AgainMouseAdapter extends MouseAdapter {
@Override
public void mouseClicked(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseClicked(e);
step_number=0;
point_now = maze.get(0);
mazeModel.setNowPoint(point_now);
choose = 0;
mazePanel.repaint();
enThread = true;
ifEnd = false;
if (runNumber != 0)
runNumber = 2;
}
@Override
public void mouseEntered(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseEntered(e);
bt_again.setIcon(image_again1);
}
@Override
public void mouseExited(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseExited(e);
bt_again.setIcon(image_again);
}
}
class PromptMouseAdapter extends MouseAdapter {
@Override
public void mouseClicked(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseClicked(e);
if (!haveCreat) {
JOptionPane.showMessageDialog(null, "ÇëÏÈÉú³ÉÒ»¸öÃÔ¹¬");
} else {
if (point_now.getX() != (realx - 1)
|| point_now.getY() != (realy - 1)) {
choose = 1;
mazePanel.repaint();
} else {
choose = 2;
JOptionPane.showMessageDialog(null, "µ½´ïÄ¿µÄµØ£¡");
mazePanel.repaint();
}
}
}
@Override
public void mouseEntered(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseEntered(e);
bt_prompt.setIcon(image_prompt1);
}
@Override
public void mouseExited(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseExited(e);
bt_prompt.setIcon(image_prompt);
}
}
class ExitMouseAdapter extends MouseAdapter {
@Override
public void mouseClicked(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseClicked(e);
randomPage.setVisible(false);
MainView.mainPage.setVisible(true);
}
@Override
public void mouseEntered(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseEntered(e);
bt_exit1.setIcon(image_exit1);
}
@Override
public void mouseExited(MouseEvent e) {
// TODO Auto-generated method stub
super.mouseExited(e);
bt_exit1.setIcon(image_exit);
}
}
// µ÷½ÚÃÔ¹¬´óСµÄËĸö¼àÌýÆ÷
class RowLeftMouseAdapter extends MouseAdapter {
public void mouseClicked(MouseEvent e) {
mazeRow--;
rowNum.setIcon(imagesList.get(mazeRow - 1));
}
public void mouseEntered(MouseEvent e) {
rowLeft.setIcon(leftLight);
}
public void mouseExited(MouseEvent e) {
rowLeft.setIcon(leftDark);
}
}
class RowRightMouseAdapter extends MouseAdapter {
public void mouseClicked(MouseEvent e) {
mazeRow++;
rowNum.setIcon(imagesList.get(mazeRow - 1));
}
public void mouseEntered(MouseEvent e) {
rowRight.setIcon(rightLight);
}
public void mouseExited(MouseEvent e) {
rowRight.setIcon(rightDark);
}
}
class ColLeftMouseAdapter extends MouseAdapter {
public void mouseClicked(MouseEvent e) {
mazeCol--;
colNum.setIcon(imagesList.get(mazeCol - 1));
}
public void mouseEntered(MouseEvent e) {
colLeft.setIcon(leftLight);
}
public void mouseExited(MouseEvent e) {
colLeft.setIcon(leftDark);
}
}
class ColRightMouseAdapter extends MouseAdapter {
public void mouseClicked(MouseEvent e) {
mazeCol++;
colNum.setIcon(imagesList.get(mazeCol - 1));
}
public void mouseEntered(MouseEvent e) {
colRight.setIcon(rightLight);
}
public void mouseExited(MouseEvent e) {
colRight.setIcon(rightDark);
}
}
// ÉÏÏÂ×óÓÒ¼ü
class UpMouseAdapter extends MouseAdapter {
public void mouseClicked(MouseEvent e) {
test_point= maze.get(step_point.getX()+mazeRow*step_point.getY());
if(test_point.getUp()==1){
step_point.setY(step_point.getY() - 1);
choose = 4;
panel.repaint();
System.out.println("x:" + step_point.getX() + " y:"
+ step_point.getY() + " step:" + step_number);
if (step_point.getX() == (mazeCol - 1)
&& step_point.getY() == (mazeRow - 1)) {
JOptionPane.showMessageDialog(null, "µ½´ïÖÕµã,¹²×ßÁË "+step_number+" ²½");
}
}
}
public void mouseEntered(MouseEvent e) {
bt_up.setIcon(up1);
}
public void mouseExited(MouseEvent e) {
bt_up.setIcon(up);
}
}
class DownMouseAdapter extends MouseAdapter {
public void mouseClicked(MouseEvent e) {
test_point= maze.get(step_point.getX()+mazeRow*step_point.getY());
if(test_point.getDown()==1){
step_point.setY(step_point.getY() + 1);
choose = 5;
panel.repaint();
System.out.println("x:" + step_point.getX() + " y:"
+ step_point.getY() + " step:" + step_number);
if (step_point.getX() == ((mazeCol - 1))
&& step_point.getY() == (mazeRow - 1)) {
JOptionPane.showMessageDialog(null, "µ½´ïÖÕµã,¹²×ßÁË "+step_number+" ²½");
}
}
}
public void mouseEntered(MouseEvent e) {
bt_down.setIcon(down1);
}
public void mouseExited(MouseEvent e) {
bt_down.setIcon(down);
}
}
class LeftMouseAdapter extends MouseAdapter {
public void mouseClicked(MouseEvent e) {
test_point= maze.get(step_point.getX()+mazeRow*step_point.getY());
if(test_point.getLeft()==1){
step_point.setX(step_point.getX() - 1);
choose = 6;
panel.repaint();
System.out.println("x:" + step_point.getX() + " y:"
+ step_point.getY() + " step:" + step_number);
if (step_point.getX() == (mazeCol - 1)
&& step_point.getY() == (mazeRow - 1)) {
JOptionPane.showMessageDialog(null, "µ½´ïÖÕµã,¹²×ßÁË "+step_number+" ²½");
}
}
}
public void mouseEntered(MouseEvent e) {
bt_left.setIcon(left1);
}
public void mouseExited(MouseEvent e) {
bt_left.setIcon(left);
}
}
class RightMouseAdapter extends MouseAdapter {
public void mouseClicked(MouseEvent e) {
test_point= maze.get(step_point.getX()+mazeRow*step_point.getY());
if(test_point.getRight()==1){
step_point.setX(step_point.getX() + 1);
choose = 7;
panel.repaint();
System.out.println("x:" + step_point.getX() + " y:"
+ step_point.getY() + " step:" + step_number);
if (step_point.getX() == (mazeCol - 1)
&& step_point.getY() == (mazeRow - 1)) {
JOptionPane.showMessageDialog(null, "µ½´ïÖÕµã,¹²×ßÁË "+step_number+" ²½");
}
}
}
public void mouseEntered(MouseEvent e) {
bt_right.setIcon(right1);
}
public void mouseExited(MouseEvent e) {
bt_right.setIcon(right);
}
}
}
| xcg-code/RoobotMaze-java | src/view/RandomModel.java | Java | apache-2.0 | 27,250 |
#!/usr/bin/env python3
import argparse
import logging
import os
import sys
from pylib.common.utils.misc_utils import MiscUtils
from pylib.pygitlab import Gitlab
########################################
# add script folder to python path
# allow running script from any folder location.
# without need to mess with PYTHONPATH env variable
script_path = os.path.realpath(__file__)
sys.path.insert(0, script_path)
# print('\n'.join(sys.path))
#########################################
# Author: Adam Richards
# find group id using: bin/gitlab_groups.py --list
# export call: bin/gitlab_group_vars.py --id 793 --export --file ~/domino_vars.json
# import call: bin/gitlab_group_vars.py --id 1659 --import --file ~/domino_vars.json
try:
gl = Gitlab()
utils = MiscUtils()
except Exception as e:
print(e, file=sys.stderr)
if __name__ == "__main__":
logging.basicConfig(
level=logging.ERROR, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# raw output flag
raw_output = False
# debug flag
debug_enabled = False
parser = argparse.ArgumentParser(
description="Program: {0}".format(sys.argv[0]))
# options to load into 'command' property
parser.add_argument('--list', dest='command', action='store_const',
const='LIST', default=None,
help='list all groups variables ')
parser.add_argument('--export', dest='command', action='store_const',
const='EXPORT', default=None,
help='export all group variables to a file')
parser.add_argument('--import', dest='command', action='store_const',
const='IMPORT', default=None,
help='import all group variables from a file')
parser.add_argument('--file', dest='filename',
action='store', help='file to be used for import or export')
parser.add_argument('--id', dest='group_id',
action='store', help='gitlab group id')
parser.add_argument('--debug', dest='debug_enabled', action='store_const',
const=True, default=False, help='enable debug logging')
parser.add_argument('--json', dest='json_output', action='store_const',
const=True, default=False, help='output json results')
args = parser.parse_args()
# check if 0 args passed to script. the script name is always argv[0]
if(len(sys.argv) == 1):
parser.print_help()
exit(1)
debug_enabled = args.debug_enabled
if debug_enabled:
gl.setLogLevel(logging.DEBUG)
json_output = args.json_output
filename = args.filename
if debug_enabled:
print("Command line arguments object: \n{0}".format(
utils.object_to_json(args)))
gl.setup()
# process --export command
if args.command == 'EXPORT':
group_vars_obj = gl.group_vars_get(args.group_id, page_size=20)
utils.file_write_as_json(filename, group_vars_obj)
exit(0)
# process --import command
if args.command == 'IMPORT':
group_vars_obj = utils.file_read_as_json(filename)
result_ar = gl.group_vars_put(args.group_id, group_vars_obj)
if json_output:
result_ar_json = utils.object_to_json_pretty(result_ar)
print(result_ar_json)
else:
for item in result_ar:
print("key: {0}, status: {1}".format(
item['key'], item['status']))
exit(0)
# process --list command
if args.command == 'LIST':
group_vars_obj = gl.group_vars_get(args.group_id, page_size=20)
if json_output:
group_var_json = utils.object_to_json_pretty(group_vars_obj)
print(group_var_json)
else:
for item in group_vars_obj:
# print("Name: {0}, Id: {1}, Path: {2}".format(item['name'],item['id'],item['web_url']))
print("{0}".format(item['key']))
exit(0)
| abrichards5/ABRUnixScripts | bin/gitlab_group_vars.py | Python | apache-2.0 | 4,029 |
package esiv2
import "time"
/*
200 ok object */
type GetCharactersCharacterIdOnlineOk struct {
/*
Timestamp of the last login */
LastLogin time.Time `json:"last_login,omitempty"`
/*
Timestamp of the last logout */
LastLogout time.Time `json:"last_logout,omitempty"`
/*
Total number of times the character has logged in */
Logins int32 `json:"logins,omitempty"`
/*
If the character is online */
Online bool `json:"online,omitempty"`
}
| antihax/mock-esi | v2/go/model_get_characters_character_id_online_ok.go | GO | apache-2.0 | 452 |
package org.darcstarsolutions.finance.common.calculators;
import org.darcstarsolutions.finance.common.Asset;
/**
* Created by mharris021 on 10/16/14.
*/
public interface ArithmeticCalculator<T extends Asset, U extends T> {
U add(U asset1, U asset2);
U subtract(U asset1, U asset2);
U negate(U asset);
}
| DarcStarSolutions/common-finance | src/main/java/org/darcstarsolutions/finance/common/calculators/ArithmeticCalculator.java | Java | apache-2.0 | 321 |
package com.guardanis.imageloader.filters;
import android.content.Context;
import android.graphics.Bitmap;
import java.util.HashMap;
import java.util.Map;
public class BitmapColorReplacementFilter extends ImageFilter<Bitmap> {
private Map<Integer, Integer> replacements;
/**
* @param context
* @param replace The singular color value to be replaced
* @param with The value to replace with
*/
public BitmapColorReplacementFilter(Context context, int replace, int with) {
super(context);
this.replacements = new HashMap<Integer, Integer>();
this.replacements.put(replace, with);
}
/**
* @param context
* @param replacements A Map of integers where the key is the value to be replaced and the value is what is should be replaced with
*/
public BitmapColorReplacementFilter(Context context, Map<Integer, Integer> replacements) {
super(context);
this.replacements = replacements;
}
@Override
public Bitmap filter(Bitmap unedited) {
if(unedited != null){
if(!unedited.isMutable())
unedited = mutate(unedited);
unedited.setPixels(getReplacementPixels(unedited), 0, unedited.getWidth(),
0, 0, unedited.getWidth(), unedited.getHeight());
}
return unedited;
}
private int[] getReplacementPixels(Bitmap copy){
int[] pixels = new int [copy.getHeight() * copy.getWidth()];
copy.getPixels(pixels, 0, copy.getWidth(),
0, 0, copy.getWidth(), copy.getHeight());
for(int i = 0; i < pixels.length; i++)
for(Integer key : replacements.keySet())
if(pixels[i] == key)
pixels[i] = replacements.get(key);
return pixels;
}
@Override
public String getAdjustmentInfo(){
String values = "";
for(Integer key : replacements.keySet())
values += key + "-" + replacements.get(key);
return getClass().getSimpleName() + "_" + values;
}
}
| hgl888/Android-Universal-Image-Loader | imageloader/src/main/java/com/guardanis/imageloader/filters/BitmapColorReplacementFilter.java | Java | apache-2.0 | 2,070 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.isis.core.metamodel.specloader.specimpl;
import java.util.List;
import com.google.common.base.Objects;
import com.google.common.collect.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.isis.applib.RecoverableException;
import org.apache.isis.applib.annotation.ActionSemantics;
import org.apache.isis.applib.annotation.Where;
import org.apache.isis.applib.filter.Filter;
import org.apache.isis.core.commons.debug.DebugString;
import org.apache.isis.core.commons.exceptions.UnknownTypeException;
import org.apache.isis.core.metamodel.adapter.ObjectAdapter;
import org.apache.isis.core.metamodel.consent.Consent;
import org.apache.isis.core.metamodel.consent.InteractionInitiatedBy;
import org.apache.isis.core.metamodel.consent.InteractionResultSet;
import org.apache.isis.core.metamodel.facetapi.Facet;
import org.apache.isis.core.metamodel.facetapi.FacetHolder;
import org.apache.isis.core.metamodel.facetapi.FeatureType;
import org.apache.isis.core.metamodel.facets.FacetedMethod;
import org.apache.isis.core.metamodel.facets.FacetedMethodParameter;
import org.apache.isis.core.metamodel.facets.TypedHolder;
import org.apache.isis.core.metamodel.facets.param.choices.ActionChoicesFacet;
import org.apache.isis.core.metamodel.facets.actions.debug.DebugFacet;
import org.apache.isis.core.metamodel.facets.actions.defaults.ActionDefaultsFacet;
import org.apache.isis.core.metamodel.facets.actions.exploration.ExplorationFacet;
import org.apache.isis.core.metamodel.facets.actions.action.invocation.ActionInvocationFacet;
import org.apache.isis.core.metamodel.facets.actions.prototype.PrototypeFacet;
import org.apache.isis.core.metamodel.facets.actions.semantics.ActionSemanticsFacet;
import org.apache.isis.core.metamodel.facets.param.choices.ActionParameterChoicesFacet;
import org.apache.isis.core.metamodel.facets.param.defaults.ActionParameterDefaultsFacet;
import org.apache.isis.core.metamodel.interactions.ActionInvocationContext;
import org.apache.isis.core.metamodel.interactions.ActionUsabilityContext;
import org.apache.isis.core.metamodel.interactions.ActionVisibilityContext;
import org.apache.isis.core.metamodel.interactions.InteractionUtils;
import org.apache.isis.core.metamodel.interactions.UsabilityContext;
import org.apache.isis.core.metamodel.interactions.ValidityContext;
import org.apache.isis.core.metamodel.interactions.VisibilityContext;
import org.apache.isis.core.metamodel.spec.ActionType;
import org.apache.isis.core.metamodel.spec.DomainModelException;
import org.apache.isis.core.metamodel.spec.Instance;
import org.apache.isis.core.metamodel.spec.ObjectSpecification;
import org.apache.isis.core.metamodel.spec.feature.ObjectAction;
import org.apache.isis.core.metamodel.spec.feature.ObjectActionParameter;
import org.apache.isis.core.metamodel.spec.feature.ObjectMemberDependencies;
public class ObjectActionImpl extends ObjectMemberAbstract implements ObjectAction {
private final static Logger LOG = LoggerFactory.getLogger(ObjectActionImpl.class);
public static ActionType getType(final String typeStr) {
final ActionType type = ActionType.valueOf(typeStr);
if (type == null) {
throw new IllegalArgumentException();
}
return type;
}
/**
* Lazily initialized by {@link #getParameters()} (so don't use directly!)
*/
private List<ObjectActionParameter> parameters;
// //////////////////////////////////////////////////////////////////
// Constructors
// //////////////////////////////////////////////////////////////////
public ObjectActionImpl(final FacetedMethod facetedMethod, final ObjectMemberDependencies objectMemberDependencies) {
super(facetedMethod, FeatureType.ACTION, objectMemberDependencies);
}
// //////////////////////////////////////////////////////////////////
// ReturnType, OnType, Actions (set)
// //////////////////////////////////////////////////////////////////
/**
* Always returns <tt>null</tt>.
*/
@Override
public ObjectSpecification getSpecification() {
return null;
}
@Override
public ObjectSpecification getReturnType() {
final ActionInvocationFacet facet = getActionInvocationFacet();
return facet.getReturnType();
}
/**
* Returns true if the represented action returns something, else returns
* false.
*/
@Override
public boolean hasReturn() {
if(getReturnType() == null) {
// this shouldn't happen; return Type always defined, even if represents void.class
return false;
}
return getReturnType() != getSpecificationLoader().loadSpecification(void.class);
}
@Override
public ObjectSpecification getOnType() {
final ActionInvocationFacet facet = getActionInvocationFacet();
return facet.getOnType();
}
@Override
public ActionSemantics.Of getSemantics() {
final ActionSemanticsFacet facet = getFacet(ActionSemanticsFacet.class);
return facet != null? facet.value(): ActionSemantics.Of.NON_IDEMPOTENT;
}
// /////////////////////////////////////////////////////////////
// getInstance
// /////////////////////////////////////////////////////////////
@Override
public Instance getInstance(final ObjectAdapter adapter) {
final ObjectAction specification = this;
return adapter.getInstance(specification);
}
// /////////////////////////////////////////////////////////////
// Type, IsContributed
// /////////////////////////////////////////////////////////////
@Override
public ActionType getType() {
return getType(this);
}
private static ActionType getType(final FacetHolder facetHolder) {
Facet facet = facetHolder.getFacet(DebugFacet.class);
if (facet != null) {
return ActionType.DEBUG;
}
facet = facetHolder.getFacet(ExplorationFacet.class);
if (facet != null) {
return ActionType.EXPLORATION;
}
facet = facetHolder.getFacet(PrototypeFacet.class);
if (facet != null) {
return ActionType.PROTOTYPE;
}
return ActionType.USER;
}
// /////////////////////////////////////////////////////////////
// Parameters
// /////////////////////////////////////////////////////////////
@Override
public int getParameterCount() {
return getFacetedMethod().getParameters().size();
}
@Override
public boolean promptForParameters(final ObjectAdapter target) {
return getParameterCount() != 0;
}
/**
* Build lazily by {@link #getParameters()}.
*
* <p>
* Although this is lazily loaded, the method is also <tt>synchronized</tt>
* so there shouldn't be any thread race conditions.
*/
@Override
public synchronized List<ObjectActionParameter> getParameters() {
if (this.parameters == null) {
final int parameterCount = getParameterCount();
final List<ObjectActionParameter> parameters = Lists.newArrayList();
final List<FacetedMethodParameter> paramPeers = getFacetedMethod().getParameters();
for (int i = 0; i < parameterCount; i++) {
final TypedHolder paramPeer = paramPeers.get(i);
final ObjectSpecification specification = ObjectMemberAbstract.getSpecification(getSpecificationLoader(), paramPeer.getType());
final ObjectActionParameter parameter;
if (specification.isParseable()) {
parameter = new ObjectActionParameterParseable(i, this, paramPeer);
} else if (specification.isNotCollection()) {
parameter = new OneToOneActionParameterImpl(i, this, paramPeer);
} else if (specification.isParentedOrFreeCollection()) {
throw new UnknownTypeException("collections not supported as parameters: " + getIdentifier());
} else {
throw new UnknownTypeException(specification);
}
parameters.add(parameter);
}
this.parameters = parameters;
}
return parameters;
}
@Override
public synchronized List<ObjectSpecification> getParameterTypes() {
final List<ObjectSpecification> parameterTypes = Lists.newArrayList();
final List<ObjectActionParameter> parameters = getParameters();
for (final ObjectActionParameter parameter : parameters) {
parameterTypes.add(parameter.getSpecification());
}
return parameterTypes;
}
@Override
public ObjectActionParameter getParameterById(final String paramId) {
final List<ObjectActionParameter> allParameters = getParameters();
for (int i = 0; i < allParameters.size(); i++) {
final ObjectActionParameter param = allParameters.get(i);
if (Objects.equal(paramId, param.getId())) {
return param;
}
}
return null;
}
@Override
public ObjectActionParameter getParameterByName(final String paramName) {
final List<ObjectActionParameter> allParameters = getParameters();
for (int i = 0; i < allParameters.size(); i++) {
final ObjectActionParameter param = allParameters.get(i);
if (Objects.equal(paramName, param.getName())) {
return param;
}
}
return null;
}
@Override
public List<ObjectActionParameter> getParameters(final Filter<ObjectActionParameter> filter) {
final List<ObjectActionParameter> allParameters = getParameters();
final List<ObjectActionParameter> selectedParameters = Lists.newArrayList();
for (int i = 0; i < allParameters.size(); i++) {
if (filter.accept(allParameters.get(i))) {
selectedParameters.add(allParameters.get(i));
}
}
return selectedParameters;
}
private ObjectActionParameter getParameter(final int position) {
final List<ObjectActionParameter> parameters = getParameters();
if (position >= parameters.size()) {
throw new IllegalArgumentException("getParameter(int): only " + parameters.size() + " parameters, position=" + position);
}
return parameters.get(position);
}
// /////////////////////////////////////////////////////////////
// Visible (or hidden)
// /////////////////////////////////////////////////////////////
@Override
public VisibilityContext<?> createVisibleInteractionContext(
final ObjectAdapter targetObjectAdapter, final InteractionInitiatedBy interactionInitiatedBy,
Where where) {
return new ActionVisibilityContext(targetObjectAdapter, getIdentifier(), interactionInitiatedBy, where);
}
// /////////////////////////////////////////////////////////////
// Usable (or disabled)
// /////////////////////////////////////////////////////////////
@Override
public UsabilityContext<?> createUsableInteractionContext(
final ObjectAdapter targetObjectAdapter, final InteractionInitiatedBy interactionInitiatedBy,
Where where) {
return new ActionUsabilityContext(targetObjectAdapter, getIdentifier(), interactionInitiatedBy, where);
}
// //////////////////////////////////////////////////////////////////
// validate
// //////////////////////////////////////////////////////////////////
@Override
public Consent isProposedArgumentSetValid(
final ObjectAdapter target,
final ObjectAdapter[] proposedArguments,
final InteractionInitiatedBy interactionInitiatedBy) {
return isProposedArgumentSetValidResultSet(target, proposedArguments, interactionInitiatedBy).createConsent();
}
private InteractionResultSet isProposedArgumentSetValidResultSet(
final ObjectAdapter objectAdapter,
final ObjectAdapter[] proposedArguments,
final InteractionInitiatedBy interactionInitiatedBy) {
final InteractionResultSet resultSet = new InteractionResultSet();
final List<ObjectActionParameter> actionParameters = getParameters();
if (proposedArguments != null) {
for (int i = 0; i < proposedArguments.length; i++) {
final ValidityContext<?> ic =
actionParameters.get(i).createProposedArgumentInteractionContext(
objectAdapter, proposedArguments, i, interactionInitiatedBy
);
InteractionUtils.isValidResultSet(getParameter(i), ic, resultSet);
}
}
// only check the action's own validity if all the arguments are OK.
if (resultSet.isAllowed()) {
final ValidityContext<?> ic = createActionInvocationInteractionContext(
objectAdapter, proposedArguments, interactionInitiatedBy);
InteractionUtils.isValidResultSet(this, ic, resultSet);
}
return resultSet;
}
private ActionInvocationContext createActionInvocationInteractionContext(
final ObjectAdapter targetObject,
final ObjectAdapter[] proposedArguments,
final InteractionInitiatedBy interactionInitiatedBy) {
return new ActionInvocationContext(targetObject, getIdentifier(), proposedArguments,
interactionInitiatedBy);
}
// //////////////////////////////////////////////////////////////////
// executeWithRuleChecking, execute
// //////////////////////////////////////////////////////////////////
@Override
public ObjectAdapter executeWithRuleChecking(
final ObjectAdapter target,
final ObjectAdapter[] arguments,
final InteractionInitiatedBy interactionInitiatedBy,
final Where where) {
// see it?
final Consent visibility = isVisible(target, interactionInitiatedBy, where);
if (visibility.isVetoed()) {
throw new AuthorizationException();
}
// use it?
final Consent usability = isUsable(target, interactionInitiatedBy, where);
if(usability.isVetoed()) {
throw new AuthorizationException();
}
// do it?
final Consent validity = isProposedArgumentSetValid(target, arguments, interactionInitiatedBy);
if(validity.isVetoed()) {
throw new RecoverableException(validity.getReason());
}
return execute(target, arguments, interactionInitiatedBy);
}
@Override
public ObjectAdapter execute(
final ObjectAdapter target,
final ObjectAdapter[] arguments,
final InteractionInitiatedBy interactionInitiatedBy) {
if(LOG.isDebugEnabled()) {
LOG.debug("execute action " + target + "." + getId());
}
final ActionInvocationFacet facet = getFacet(ActionInvocationFacet.class);
return facet.invoke(this, target, arguments,
interactionInitiatedBy);
}
protected ActionInvocationFacet getActionInvocationFacet() {
return getFacetedMethod().getFacet(ActionInvocationFacet.class);
}
// //////////////////////////////////////////////////////////////////
// defaults
// //////////////////////////////////////////////////////////////////
@Override
public ObjectAdapter[] getDefaults(final ObjectAdapter target) {
final int parameterCount = getParameterCount();
final List<ObjectActionParameter> parameters = getParameters();
final Object[] parameterDefaultPojos;
final ActionDefaultsFacet facet = getFacet(ActionDefaultsFacet.class);
if (!facet.isNoop()) {
// use the old defaultXxx approach
parameterDefaultPojos = facet.getDefaults(target);
if (parameterDefaultPojos.length != parameterCount) {
throw new DomainModelException("Defaults array of incompatible size; expected " + parameterCount + " elements, but was " + parameterDefaultPojos.length + " for " + facet);
}
for (int i = 0; i < parameterCount; i++) {
if (parameterDefaultPojos[i] != null) {
final ObjectSpecification componentSpec = getSpecificationLoader().loadSpecification(parameterDefaultPojos[i].getClass());
final ObjectSpecification parameterSpec = parameters.get(i).getSpecification();
if (!componentSpec.isOfType(parameterSpec)) {
throw new DomainModelException("Defaults type incompatible with parameter " + (i + 1) + " type; expected " + parameterSpec.getFullIdentifier() + ", but was " + componentSpec.getFullIdentifier());
}
}
}
} else {
// use the new defaultNXxx approach for each param in turn
// (the reflector will have made sure both aren't installed).
parameterDefaultPojos = new Object[parameterCount];
for (int i = 0; i < parameterCount; i++) {
final ActionParameterDefaultsFacet paramFacet = parameters.get(i).getFacet(ActionParameterDefaultsFacet.class);
if (paramFacet != null && !paramFacet.isNoop()) {
parameterDefaultPojos[i] = paramFacet.getDefault(target, null);
} else {
parameterDefaultPojos[i] = null;
}
}
}
final ObjectAdapter[] parameterDefaultAdapters = new ObjectAdapter[parameterCount];
if (parameterDefaultPojos != null) {
for (int i = 0; i < parameterCount; i++) {
parameterDefaultAdapters[i] = adapterFor(parameterDefaultPojos[i]);
}
}
return parameterDefaultAdapters;
}
private ObjectAdapter adapterFor(final Object pojo) {
return pojo == null ? null : getPersistenceSessionService().adapterFor(pojo);
}
// /////////////////////////////////////////////////////////////
// options (choices)
// /////////////////////////////////////////////////////////////
@Override
public ObjectAdapter[][] getChoices(
final ObjectAdapter target,
final InteractionInitiatedBy interactionInitiatedBy) {
final int parameterCount = getParameterCount();
Object[][] parameterChoicesPojos;
final ActionChoicesFacet facet = getFacet(ActionChoicesFacet.class);
final List<ObjectActionParameter> parameters = getParameters();
if (!facet.isNoop()) {
// using the old choicesXxx() approach
parameterChoicesPojos = facet.getChoices(target,
interactionInitiatedBy);
// if no options, or not the right number of pojos, then default
if (parameterChoicesPojos == null) {
parameterChoicesPojos = new Object[parameterCount][];
} else if (parameterChoicesPojos.length != parameterCount) {
throw new DomainModelException(
String.format("Choices array of incompatible size; expected %d elements, but was %d for %s",
parameterCount, parameterChoicesPojos.length, facet));
}
} else {
// use the new choicesNXxx approach for each param in turn
// (the reflector will have made sure both aren't installed).
parameterChoicesPojos = new Object[parameterCount][];
for (int i = 0; i < parameterCount; i++) {
final ActionParameterChoicesFacet paramFacet = parameters.get(i).getFacet(ActionParameterChoicesFacet.class);
if (paramFacet != null && !paramFacet.isNoop()) {
parameterChoicesPojos[i] = paramFacet.getChoices(target, null,
interactionInitiatedBy);
} else {
parameterChoicesPojos[i] = new Object[0];
}
}
}
final ObjectAdapter[][] parameterChoicesAdapters = new ObjectAdapter[parameterCount][];
for (int i = 0; i < parameterCount; i++) {
final ObjectSpecification paramSpec = parameters.get(i).getSpecification();
if (parameterChoicesPojos[i] != null && parameterChoicesPojos[i].length > 0) {
ObjectActionParameterAbstract.checkChoicesOrAutoCompleteType(
getSpecificationLoader(), parameterChoicesPojos[i], paramSpec);
parameterChoicesAdapters[i] = new ObjectAdapter[parameterChoicesPojos[i].length];
for (int j = 0; j < parameterChoicesPojos[i].length; j++) {
parameterChoicesAdapters[i][j] = adapterFor(parameterChoicesPojos[i][j]);
}
} else if (paramSpec.isNotCollection()) {
parameterChoicesAdapters[i] = new ObjectAdapter[0];
} else {
throw new UnknownTypeException(paramSpec);
}
if (parameterChoicesAdapters[i].length == 0) {
parameterChoicesAdapters[i] = null;
}
}
return parameterChoicesAdapters;
}
// //////////////////////////////////////////////////////////////////
// debug, toString
// //////////////////////////////////////////////////////////////////
@Override
public String debugData() {
final DebugString debugString = new DebugString();
getFacetedMethod().debugData(debugString);
return debugString.toString();
}
@Override
public String toString() {
final StringBuffer sb = new StringBuffer();
sb.append("Action [");
sb.append(super.toString());
sb.append(",type=");
sb.append(getType());
sb.append(",returns=");
sb.append(getReturnType());
sb.append(",parameters={");
for (int i = 0; i < getParameterCount(); i++) {
if (i > 0) {
sb.append(",");
}
sb.append(getParameters().get(i).getSpecification().getShortIdentifier());
}
sb.append("}]");
return sb.toString();
}
}
| howepeng/isis | core/metamodel/src/main/java/org/apache/isis/core/metamodel/specloader/specimpl/ObjectActionImpl.java | Java | apache-2.0 | 23,245 |
package com.home.teamnotifier.db;
import com.google.inject.Inject;
import com.home.teamnotifier.core.ServerAvailabilityChecker;
import com.home.teamnotifier.core.responses.status.EnvironmentInfo;
import com.home.teamnotifier.core.responses.status.EnvironmentsInfo;
import com.home.teamnotifier.gateways.EnvironmentGateway;
import javax.persistence.TypedQuery;
import javax.persistence.criteria.CriteriaBuilder;
import javax.persistence.criteria.CriteriaQuery;
import javax.persistence.criteria.Root;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.home.teamnotifier.db.DbGatewayCommons.toServerInfo;
import static java.util.stream.Collectors.toList;
public class DbEnvironmentGateway implements EnvironmentGateway {
private final TransactionHelper transactionHelper;
private final ServerAvailabilityChecker serverAvailabilityChecker;
@Inject
public DbEnvironmentGateway(
final TransactionHelper transactionHelper,
ServerAvailabilityChecker serverAvailabilityChecker
) {
this.transactionHelper = transactionHelper;
this.serverAvailabilityChecker = serverAvailabilityChecker;
}
@Override
public EnvironmentsInfo status() {
final Map<ServerEntity, Boolean> availabilityMap = serverAvailabilityChecker.getAvailability();
return new EnvironmentsInfo(
loadListFromDb().stream()
.map(e -> toEnvironment(e, availabilityMap))
.collect(Collectors.toList())
);
}
private List<EnvironmentEntity> loadListFromDb() {
return transactionHelper.transaction(em -> {
final CriteriaBuilder cb = em.getCriteriaBuilder();
final CriteriaQuery<EnvironmentEntity> cq = cb.createQuery(EnvironmentEntity.class);
final Root<EnvironmentEntity> rootEntry = cq.from(EnvironmentEntity.class);
final CriteriaQuery<EnvironmentEntity> all = cq.select(rootEntry);
final TypedQuery<EnvironmentEntity> allQuery = em.createQuery(all);
return allQuery.getResultList();
});
}
private EnvironmentInfo toEnvironment(
final EnvironmentEntity entity,
final Map<ServerEntity, Boolean> availabilityMap
) {
return new EnvironmentInfo(
entity.getName(),
entity.getImmutableSetOfServers().stream()
.map(e -> toServerInfo(e, availabilityMap))
.collect(toList())
);
}
}
| Salamahin/teamnotifier | src/main/java/com/home/teamnotifier/db/DbEnvironmentGateway.java | Java | apache-2.0 | 2,638 |
#include "Explosion.h"
Explosion::Explosion(float x, float y, ALLEGRO_BITMAP *image) {
// superclass init
GameObject::Init(x, y, 0, 0, 0, 0, 0, 0);
// set id to an explosion
SetID(EXPLOSION);
// make explosions not collidable
SetCollidable(false);
// Our various variables for looping through the sprite sheet
maxFrame = 31;
curFrame = 0;
frameCount = 0;
frameDelay = 2;
frameWidth = 128;
frameHeight = 128;
animationColumns = 8;
animationDirection = 1;
// our sprite sheet for the explosion
Explosion::image = image;
}
void Explosion::Destroy() {
// superclass destroy
GameObject::Destroy();
}
void Explosion::Update() {
// superclass update
GameObject::Update();
// draw each section of the spritesheet one after another, using the above variables
if(++frameCount >= frameDelay) {
curFrame += animationDirection;
if(curFrame >= maxFrame)
SetAlive(false);
frameCount = 0;
}
}
void Explosion::Render() {
// superclass render
GameObject::Render();
// this defines what section of the spritesheet to draw
int fx = (curFrame % animationColumns) * frameWidth;
int fy = (curFrame / animationColumns) * frameHeight;
// draw the specific region of the sprite sheet
al_draw_bitmap_region(image, fx, fy, frameWidth, frameHeight, x - frameWidth / 2, y - frameHeight / 2, 0);
} | MrBotox/SpaceGame | SpaceGame/Explosion.cpp | C++ | apache-2.0 | 1,317 |
/*
** Copyright 2011-2014 Centreon
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
**
** For more information : contact@centreon.com
*/
#include "com/centreon/broker/notification/builders/command_by_id_builder.hh"
#include "com/centreon/broker/notification/utilities/qhash_func.hh"
using namespace com::centreon::broker::notification;
using namespace com::centreon::broker::notification::objects;
/**
* Construct the object.
*
* @param[in,out] table The table to fill.
*/
command_by_id_builder::command_by_id_builder(
QHash<uint32_t, command::ptr>& table)
: _table(table) {}
/**
* Add a command to the builder.
*
* @param[in] id The id of the command.
* @param[in] com The command.
*/
void command_by_id_builder::add_command(uint32_t id, command::ptr com) {
_table[id] = com;
}
| centreon/centreon-broker | notification/src/builders/command_by_id_builder.cc | C++ | apache-2.0 | 1,317 |
/*
*
* Copyright (C) 2000 Silicon Graphics, Inc. All Rights Reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan/
*
*/
/*----------------------------------------------------------------
* This is an example from the Inventor Mentor.
* chapter 15, example 2.
*
* Uses 3 translate1Draggers to change the x, y, and z
* components of a translation. A calculator engine assembles
* the components.
* Arranges these draggers along edges of a box containing the
* 3D text to be moved.
* The 3D text and the box are made with SoShapeKits
*----------------------------------------------------------------*/
#include <stdlib.h>
#include <Inventor/Xt/SoXt.h>
#include <Inventor/Xt/viewers/SoXtExaminerViewer.h>
#include <Inventor/draggers/SoTranslate1Dragger.h>
#include <Inventor/engines/SoCalculator.h>
#include <Inventor/nodekits/SoShapeKit.h>
#include <Inventor/nodes/SoCube.h>
#include <Inventor/nodes/SoSeparator.h>
#include <Inventor/nodes/SoText3.h>
#include <Inventor/nodes/SoTransform.h>
int
main(int , char **argv)
{
Widget myWindow = SoXt::init(argv[0]);
if (myWindow == NULL) exit(1);
SoSeparator *root = new SoSeparator;
root->ref();
// Create 3 translate1Draggers and place them in space.
SoSeparator *xDragSep = new SoSeparator;
SoSeparator *yDragSep = new SoSeparator;
SoSeparator *zDragSep = new SoSeparator;
root->addChild(xDragSep);
root->addChild(yDragSep);
root->addChild(zDragSep);
// Separators will each hold a different transform
SoTransform *xDragXf = new SoTransform;
SoTransform *yDragXf = new SoTransform;
SoTransform *zDragXf = new SoTransform;
xDragXf->set("translation 0 -4 8");
yDragXf->set("translation -8 0 8 rotation 0 0 1 1.57");
zDragXf->set("translation -8 -4 0 rotation 0 1 0 -1.57");
xDragSep->addChild(xDragXf);
yDragSep->addChild(yDragXf);
zDragSep->addChild(zDragXf);
// Add the draggers under the separators, after transforms
SoTranslate1Dragger *xDragger = new SoTranslate1Dragger;
SoTranslate1Dragger *yDragger = new SoTranslate1Dragger;
SoTranslate1Dragger *zDragger = new SoTranslate1Dragger;
xDragSep->addChild(xDragger);
yDragSep->addChild(yDragger);
zDragSep->addChild(zDragger);
// Create shape kit for the 3D text
// The text says 'Slide Cubes To Move Me'
SoShapeKit *textKit = new SoShapeKit;
root->addChild(textKit);
SoText3 *myText3 = new SoText3;
textKit->setPart("shape", myText3);
myText3->justification = SoText3::CENTER;
myText3->string.set1Value(0,"Slide Arrows");
myText3->string.set1Value(1,"To");
myText3->string.set1Value(2,"Move Me");
textKit->set("font { size 2}");
textKit->set("material { diffuseColor 1 1 0}");
// Create shape kit for surrounding box.
// It's an unpickable cube, sized as (16,8,16)
SoShapeKit *boxKit = new SoShapeKit;
root->addChild(boxKit);
boxKit->setPart("shape", new SoCube);
boxKit->set("drawStyle { style LINES }");
boxKit->set("pickStyle { style UNPICKABLE }");
boxKit->set("material { emissiveColor 1 0 1 }");
boxKit->set("shape { width 16 height 8 depth 16 }");
// Create the calculator to make a translation
// for the text. The x component of a translate1Dragger's
// translation field shows how far it moved in that
// direction. So our text's translation is:
// (xDragTranslate[0],yDragTranslate[0],zDragTranslate[0])
SoCalculator *myCalc = new SoCalculator;
myCalc->ref();
myCalc->A.connectFrom(&xDragger->translation);
myCalc->B.connectFrom(&yDragger->translation);
myCalc->C.connectFrom(&zDragger->translation);
myCalc->expression = "oA = vec3f(A[0],B[0],C[0])";
// Connect the the translation in textKit from myCalc
SoTransform *textXf
= (SoTransform *) textKit->getPart("transform",TRUE);
textXf->translation.connectFrom(&myCalc->oA);
SoXtExaminerViewer *myViewer =
new SoXtExaminerViewer(myWindow);
myViewer->setSceneGraph(root);
myViewer->setTitle("Slider Box");
myViewer->viewAll();
myViewer->show();
SoXt::show(myWindow);
SoXt::mainLoop();
}
| OpenXIP/xip-libraries | src/extern/inventor/apps/examples/Mentor/CXX/15.2.SliderBox.c++ | C++ | apache-2.0 | 5,500 |
from utils.header import MagicField, Field
from load_command import LoadCommandCommand, LoadCommandHeader
class SourceVersionField(Field):
def display(self, header):
if self.mnemonic:
value = self._get_value(header)
a = (value >> 40) & 0xffffff
b = (value >> 30) & 0x3ff
c = (value >> 20) & 0x3ff
d = (value >> 10) & 0x3ff
e = value & 0x3ff
return '%d.%d.%d.%d.%d' % (a, b, c, d, e)
return super(SourceVersionField, self).display(header)
class SourceVersionCommand(LoadCommandHeader):
ENDIAN = None
FIELDS = (
MagicField('cmd', 'I', {LoadCommandCommand.COMMANDS['LC_SOURCE_VERSION']: 'LC_SOURCE_VERSION'}),
Field('cmdsize', 'I'),
SourceVersionField('version', 'Q'),
)
def __init__(self, bytes_=None, **kwargs):
self.version = None
super(SourceVersionCommand, self).__init__('source_version_command', bytes_, **kwargs)
| hkkwok/MachOTool | mach_o/headers/source_version_command.py | Python | apache-2.0 | 983 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.myfaces.ov2021.view.facelets.compiler;
import org.apache.myfaces.ov2021.config.ConfigFilesXmlValidationUtils;
import org.apache.myfaces.shared_ext202patch.config.MyfacesConfig;
import org.apache.myfaces.shared_ext202patch.util.ClassUtils;
import org.apache.myfaces.ov2021.spi.FaceletConfigResourceProvider;
import org.apache.myfaces.ov2021.spi.FaceletConfigResourceProviderFactory;
import org.apache.myfaces.ov2021.view.facelets.tag.AbstractTagLibrary;
import org.apache.myfaces.ov2021.view.facelets.tag.TagLibrary;
import org.apache.myfaces.ov2021.view.facelets.tag.composite.CompositeComponentResourceTagHandler;
import org.apache.myfaces.ov2021.view.facelets.tag.composite.CompositeResouceWrapper;
import org.apache.myfaces.ov2021.view.facelets.util.ParameterCheck;
import org.apache.myfaces.ov2021.view.facelets.util.ReflectionUtil;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
import org.xml.sax.Locator;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
import org.xml.sax.XMLReader;
import org.xml.sax.helpers.DefaultHandler;
import javax.faces.FacesException;
import javax.faces.application.Resource;
import javax.faces.application.ResourceHandler;
import javax.faces.context.ExternalContext;
import javax.faces.context.FacesContext;
import javax.faces.view.facelets.ComponentConfig;
import javax.faces.view.facelets.FaceletHandler;
import javax.faces.view.facelets.Tag;
import javax.faces.view.facelets.TagConfig;
import javax.faces.view.facelets.TagHandler;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLConnection;
import java.util.Collection;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Handles creating a {@link org.apache.myfaces.ov2021.view.facelets.tag.TagLibrary TagLibrary} from a {@link java.net.URL URL} source.
*
* @author Jacob Hookom
* @version $Id: TagLibraryConfig.java 1449028 2013-02-22 13:30:05Z lofwyr $
*/
public final class TagLibraryConfig
{
//private final static String SUFFIX = ".taglib.xml";
//protected final static Logger log = Logger.getLogger("facelets.compiler");
protected final static Logger log = Logger.getLogger(TagLibraryConfig.class.getName());
private static class TagLibraryImpl extends AbstractTagLibrary
{
private String _compositeLibraryName;
private final ResourceHandler _resourceHandler;
public TagLibraryImpl(FacesContext facesContext, String namespace)
{
super(namespace);
_compositeLibraryName = null;
_resourceHandler = facesContext.getApplication().getResourceHandler();
}
@Override
public boolean containsTagHandler(String ns, String localName)
{
boolean result = super.containsTagHandler(ns, localName);
if (!result && _compositeLibraryName != null && containsNamespace(ns))
{
Resource compositeComponentResource = _resourceHandler.createResource(
localName +".xhtml", _compositeLibraryName);
if (compositeComponentResource != null)
{
URL url = compositeComponentResource.getURL();
return (url != null);
}
}
return result;
}
@Override
public TagHandler createTagHandler(String ns, String localName,
TagConfig tag) throws FacesException
{
TagHandler tagHandler = super.createTagHandler(ns, localName, tag);
if (tagHandler == null && _compositeLibraryName != null && containsNamespace(ns))
{
String resourceName = localName + ".xhtml";
// MYFACES-3308 If a composite component exists, it requires to
// be always resolved. In other words, it should always exists a default.
// The call here for resourceHandler.createResource, just try to get
// the Resource and if it does not exists, it just returns null.
// The intention of this code is just create an instance and pass to
// CompositeComponentResourceTagHandler. Then, its values
// (resourceName, libraryName) will be used to derive the real instance
// to use in a view, based on the locale used.
Resource compositeComponentResource = new CompositeResouceWrapper(
_resourceHandler.createResource(resourceName, _compositeLibraryName));
if (compositeComponentResource != null)
{
ComponentConfig componentConfig = new ComponentConfigWrapper(tag,
"javax.faces.NamingContainer", null);
return new CompositeComponentResourceTagHandler(componentConfig, compositeComponentResource);
}
}
return tagHandler;
}
public void setCompositeLibrary(String compositeLibraryName)
{
_compositeLibraryName = compositeLibraryName;
}
public void putConverter(String name, String id)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("id", id);
this.addConverter(name, id);
}
public void putConverter(String name, String id, Class<? extends TagHandler> handlerClass)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("id", id);
ParameterCheck.notNull("handlerClass", handlerClass);
this.addConverter(name, id, handlerClass);
}
public void putValidator(String name, String id)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("id", id);
this.addValidator(name, id);
}
public void putValidator(String name, String id, Class<? extends TagHandler> handlerClass)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("id", id);
ParameterCheck.notNull("handlerClass", handlerClass);
this.addValidator(name, id, handlerClass);
}
public void putTagHandler(String name, Class<? extends TagHandler> type)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("type", type);
this.addTagHandler(name, type);
}
public void putComponent(String name, String componentType, String rendererType)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("componentType", componentType);
this.addComponent(name, componentType, rendererType);
}
public void putComponent(String name, String componentType, String rendererType,
Class<? extends TagHandler> handlerClass)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("componentType", componentType);
ParameterCheck.notNull("handlerClass", handlerClass);
this.addComponent(name, componentType, rendererType, handlerClass);
}
public void putUserTag(String name, URL source)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("source", source);
this.addUserTag(name, source);
}
public void putFunction(String name, Method method)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("method", method);
this.addFunction(name, method);
}
public void putBehavior(String name, String id)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("id", id);
this.addBehavior(name, id);
}
public void putBehavior(String name, String id, Class<? extends TagHandler> handlerClass)
{
ParameterCheck.notNull("name", name);
ParameterCheck.notNull("id", id);
ParameterCheck.notNull("handlerClass", handlerClass);
this.addBehavior(name, id, handlerClass);
}
}
private static class ComponentConfigWrapper implements ComponentConfig
{
protected final TagConfig parent;
protected final String componentType;
protected final String rendererType;
public ComponentConfigWrapper(TagConfig parent, String componentType,
String rendererType)
{
this.parent = parent;
this.componentType = componentType;
this.rendererType = rendererType;
}
public String getComponentType()
{
return this.componentType;
}
public String getRendererType()
{
return this.rendererType;
}
public FaceletHandler getNextHandler()
{
return this.parent.getNextHandler();
}
public Tag getTag()
{
return this.parent.getTag();
}
public String getTagId()
{
return this.parent.getTagId();
}
}
private static class LibraryHandler extends DefaultHandler
{
private final URL source;
private final FacesContext facesContext;
private TagLibrary library;
private final StringBuffer buffer;
private Locator locator;
private String tagName;
private String converterId;
private String validatorId;
private String behaviorId;
private String componentType;
private String rendererType;
private String functionName;
private Class<? extends TagHandler> handlerClass;
private Class<?> functionClass;
private String functionSignature;
private String compositeLibraryName;
public LibraryHandler(FacesContext facesContext, URL source)
{
this.source = source;
this.buffer = new StringBuffer(64);
this.facesContext = facesContext;
}
public TagLibrary getLibrary()
{
return this.library;
}
public void endElement(String uri, String localName, String qName) throws SAXException
{
try
{
if ("facelet-taglib".equals(qName))
{
// Nothing to do
}
else if ("library-class".equals(qName))
{
this.processLibraryClass();
}
else if ("namespace".equals(qName))
{
this.library = new TagLibraryImpl(facesContext, this.captureBuffer());
if (this.compositeLibraryName != null)
{
((TagLibraryImpl)this.library).setCompositeLibrary(compositeLibraryName);
}
}
else if ("composite-library-name".equals(qName))
{
this.compositeLibraryName = this.captureBuffer();
if (this.library != null)
{
((TagLibraryImpl)this.library).setCompositeLibrary(compositeLibraryName);
}
}
else if ("component-type".equals(qName))
{
this.componentType = this.captureBuffer();
}
else if ("renderer-type".equals(qName))
{
this.rendererType = this.captureBuffer();
}
else if ("tag-name".equals(qName))
{
this.tagName = this.captureBuffer();
}
else if ("function-name".equals(qName))
{
this.functionName = this.captureBuffer();
}
else if ("function-class".equals(qName))
{
String className = this.captureBuffer();
this.functionClass = createClass(Object.class, className);
}
else if ("description".equals(qName))
{
//Not used
}
else if ("display-name".equals(qName))
{
//Not used
}
else if ("icon".equals(qName))
{
//Not used
}
else
{
// Make sure there we've seen a namespace element
// before trying any of the following elements to avoid
// obscure NPEs
if (this.library == null)
{
throw new IllegalStateException("No <namespace> element");
}
TagLibraryImpl impl = (TagLibraryImpl) this.library;
if ("tag".equals(qName))
{
if (this.handlerClass != null)
{
impl.putTagHandler(this.tagName, this.handlerClass);
}
}
else if ("handler-class".equals(qName))
{
String cName = this.captureBuffer();
this.handlerClass = createClass(TagHandler.class, cName);
}
else if ("component".equals(qName))
{
if (this.handlerClass != null)
{
impl.putComponent(this.tagName, this.componentType, this.rendererType, this.handlerClass);
this.handlerClass = null;
}
else
{
impl.putComponent(this.tagName, this.componentType, this.rendererType);
}
}
else if ("converter-id".equals(qName))
{
this.converterId = this.captureBuffer();
}
else if ("converter".equals(qName))
{
if (this.handlerClass != null)
{
impl.putConverter(this.tagName, this.converterId, handlerClass);
this.handlerClass = null;
}
else
{
impl.putConverter(this.tagName, this.converterId);
}
this.converterId = null;
}
else if ("validator-id".equals(qName))
{
this.validatorId = this.captureBuffer();
}
else if ("validator".equals(qName))
{
if (this.handlerClass != null)
{
impl.putValidator(this.tagName, this.validatorId, handlerClass);
this.handlerClass = null;
}
else
{
impl.putValidator(this.tagName, this.validatorId);
}
this.validatorId = null;
}
else if ("behavior-id".equals(qName))
{
this.behaviorId = this.captureBuffer();
}
else if ("behavior".equals(qName))
{
if (this.handlerClass != null)
{
impl.putBehavior(this.tagName, this.behaviorId, handlerClass);
this.handlerClass = null;
}
else
{
impl.putBehavior(this.tagName, this.behaviorId);
}
this.behaviorId = null;
}
else if ("source".equals(qName))
{
String path = this.captureBuffer();
URL url = new URL(this.source, path);
impl.putUserTag(this.tagName, url);
}
else if ("function-signature".equals(qName))
{
this.functionSignature = this.captureBuffer();
Method m = createMethod(this.functionClass, this.functionSignature);
impl.putFunction(this.functionName, m);
}
}
}
catch (Exception e)
{
throw new SAXParseException("Error Handling [" + this.source + "@" + this.locator.getLineNumber()
+ "," + this.locator.getColumnNumber() + "] <" + qName + ">", locator, e);
}
}
private String captureBuffer() throws Exception
{
String s = this.buffer.toString().trim();
if (s.length() == 0)
{
throw new Exception("Value Cannot be Empty");
}
this.buffer.setLength(0);
return s;
}
@SuppressWarnings("unchecked")
private static <T> Class<? extends T> createClass(Class<T> type, String name) throws Exception
{
Class<? extends T> factory = (Class<? extends T>)ReflectionUtil.forName(name);
if (!type.isAssignableFrom(factory))
{
throw new Exception(name + " must be an instance of " + type.getName());
}
return factory;
}
private static Method createMethod(Class<?> type, String s) throws Exception
{
int pos = s.indexOf(' ');
if (pos == -1)
{
throw new Exception("Must Provide Return Type: " + s);
}
else
{
int pos2 = s.indexOf('(', pos + 1);
if (pos2 == -1)
{
throw new Exception("Must provide a method name, followed by '(': " + s);
}
else
{
String mn = s.substring(pos + 1, pos2).trim();
pos = s.indexOf(')', pos2 + 1);
if (pos == -1)
{
throw new Exception("Must close parentheses, ')' missing: " + s);
}
else
{
String[] ps = s.substring(pos2 + 1, pos).trim().split(",");
Class<?>[] pc;
if (ps.length == 1 && "".equals(ps[0]))
{
pc = new Class[0];
}
else
{
pc = new Class[ps.length];
for (int i = 0; i < pc.length; i++)
{
pc[i] = ReflectionUtil.forName(ps[i].trim());
}
}
try
{
return type.getMethod(mn, pc);
}
catch (NoSuchMethodException e)
{
throw new Exception("No Function Found on type: " + type.getName() + " with signature: "
+ s);
}
}
}
}
}
private void processLibraryClass() throws Exception
{
String name = this.captureBuffer();
Class<?> type = createClass(TagLibrary.class, name);
this.library = (TagLibrary) type.newInstance();
}
public InputSource resolveEntity(String publicId, String systemId) throws SAXException
{
if ("-//Sun Microsystems, Inc.//DTD Facelet Taglib 1.0//EN".equals(publicId))
{
URL url = ClassUtils.getResource("org/apache/myfaces/resource/facelet-taglib_1_0.dtd");
return new InputSource(url.toExternalForm());
}
return null;
}
public void characters(char[] ch, int start, int length) throws SAXException
{
this.buffer.append(ch, start, length);
}
public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException
{
this.buffer.setLength(0);
if ("tag".equals(qName))
{
this.handlerClass = null;
this.componentType = null;
this.rendererType = null;
this.tagName = null;
}
else if ("function".equals(qName))
{
this.functionName = null;
this.functionClass = null;
this.functionSignature = null;
}
}
public void error(SAXParseException e) throws SAXException
{
throw new SAXException(
"Error Handling [" + this.source + "@" + e.getLineNumber() + "," + e.getColumnNumber() + "]", e);
}
public void setDocumentLocator(Locator locator)
{
this.locator = locator;
}
public void fatalError(SAXParseException e) throws SAXException
{
throw e;
}
public void warning(SAXParseException e) throws SAXException
{
throw e;
}
}
public TagLibraryConfig()
{
super();
}
public static TagLibrary create(FacesContext facesContext, URL url) throws IOException
{
InputStream is = null;
TagLibrary t = null;
URLConnection conn = null;
try
{
ExternalContext externalContext = facesContext.getExternalContext();
boolean schemaValidating = false;
// validate XML
if (MyfacesConfig.getCurrentInstance(externalContext).isValidateXML())
{
String version = ConfigFilesXmlValidationUtils.getFaceletTagLibVersion(url);
if (schemaValidating = "2.0".equals(version))
{
ConfigFilesXmlValidationUtils.validateFaceletTagLibFile(url, externalContext, version);
}
}
// parse file
LibraryHandler handler = new LibraryHandler(facesContext, url);
SAXParser parser = createSAXParser(handler, externalContext, schemaValidating);
conn = url.openConnection();
conn.setUseCaches(false);
is = conn.getInputStream();
parser.parse(is, handler);
t = handler.getLibrary();
}
catch (SAXException e)
{
IOException ioe = new IOException("Error parsing [" + url + "]: ");
ioe.initCause(e);
throw ioe;
}
catch (ParserConfigurationException e)
{
IOException ioe = new IOException("Error parsing [" + url + "]: ");
ioe.initCause(e);
throw ioe;
}
finally
{
if (is != null)
{
is.close();
}
}
return t;
}
public void loadImplicit(FacesContext facesContext, Compiler compiler) throws IOException
{
//URL[] urls = Classpath.search(cl, "META-INF/", SUFFIX);
//for (int i = 0; i < urls.length; i++)
ExternalContext externalContext = facesContext.getExternalContext();
FaceletConfigResourceProvider provider = FaceletConfigResourceProviderFactory.
getFacesConfigResourceProviderFactory(externalContext).
createFaceletConfigResourceProvider(externalContext);
Collection<URL> urls = provider.getFaceletTagLibConfigurationResources(externalContext);
for (URL url : urls)
{
try
{
//TagLibrary tl = create(urls[i]);
TagLibrary tl = create(facesContext, url);
if (tl != null)
{
compiler.addTagLibrary(tl);
}
if (log.isLoggable(Level.FINE))
{
//log.fine("Added Library from: " + urls[i]);
log.fine("Added Library from: " + url);
}
}
catch (Exception e)
{
//log.log(Level.SEVERE, "Error Loading Library: " + urls[i], e);
log.log(Level.SEVERE, "Error Loading Library: " + url, e);
}
}
}
private static final SAXParser createSAXParser(LibraryHandler handler, ExternalContext externalContext,
boolean schemaValidating)
throws SAXException, ParserConfigurationException
{
SAXParserFactory factory = SAXParserFactory.newInstance();
if (MyfacesConfig.getCurrentInstance(externalContext).isValidateXML() && !schemaValidating)
{
// DTD validating
factory.setNamespaceAware(false);
factory.setFeature("http://xml.org/sax/features/validation", true);
factory.setValidating(true);
}
else
{
//Just parse it and do not validate, because it is not necessary.
factory.setNamespaceAware(true);
factory.setFeature("http://xml.org/sax/features/validation", false);
factory.setValidating(false);
}
SAXParser parser = factory.newSAXParser();
XMLReader reader = parser.getXMLReader();
reader.setErrorHandler(handler);
reader.setEntityResolver(handler);
return parser;
}
}
| lu4242/ext-myfaces-2.0.2-patch | trunk/myfaces-impl-2021override/src/main/java/org/apache/myfaces/ov2021/view/facelets/compiler/TagLibraryConfig.java | Java | apache-2.0 | 27,496 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by cloud.google.com/go/internal/gapicgen/gensnippets. DO NOT EDIT.
// [START bigqueryreservation_v1_generated_ReservationService_UpdateCapacityCommitment_sync]
package main
import (
"context"
reservation "cloud.google.com/go/bigquery/reservation/apiv1"
reservationpb "google.golang.org/genproto/googleapis/cloud/bigquery/reservation/v1"
)
func main() {
ctx := context.Background()
c, err := reservation.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &reservationpb.UpdateCapacityCommitmentRequest{
// TODO: Fill request struct fields.
// See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/bigquery/reservation/v1#UpdateCapacityCommitmentRequest.
}
resp, err := c.UpdateCapacityCommitment(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
// [END bigqueryreservation_v1_generated_ReservationService_UpdateCapacityCommitment_sync]
| googleapis/google-cloud-go | internal/generated/snippets/bigquery/reservation/apiv1/Client/UpdateCapacityCommitment/main.go | GO | apache-2.0 | 1,545 |
# frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
require "google/cloud/orchestration/airflow/service/v1/environments"
require "google/cloud/orchestration/airflow/service/v1/image_versions"
require "google/cloud/orchestration/airflow/service/v1/version"
module Google
module Cloud
module Orchestration
module Airflow
module Service
##
# To load this package, including all its services, and instantiate a client:
#
# @example
#
# require "google/cloud/orchestration/airflow/service/v1"
# client = ::Google::Cloud::Orchestration::Airflow::Service::V1::Environments::Client.new
#
module V1
end
end
end
end
end
end
helper_path = ::File.join __dir__, "v1", "_helpers.rb"
require "google/cloud/orchestration/airflow/service/v1/_helpers" if ::File.file? helper_path
| googleapis/google-cloud-ruby | google-cloud-orchestration-airflow-service-v1/lib/google/cloud/orchestration/airflow/service/v1.rb | Ruby | apache-2.0 | 1,523 |
import async from 'async';
import { RateVideoResponse } from './protos';
import { UserRatedVideo } from './events';
import { toCassandraUuid, toProtobufTimestamp } from '../common/protobuf-conversions';
import { getCassandraClient } from '../../common/cassandra';
import { publish } from '../../common/message-bus';
// Update the video_ratings counter table
const updateRatingsCql = `
UPDATE video_ratings
SET rating_counter = rating_counter + 1, rating_total = rating_total + ?
WHERE videoid = ?`;
// Insert rating for a user and specific video
const insertUserRatingCql = `
INSERT INTO video_ratings_by_user (
videoid, userid, rating)
VALUES (?, ?, ?)`;
/**
* Adds a user's rating of a video.
*/
export function rateVideo(call, cb) {
let { request } = call;
async.waterfall([
// Get client
async.asyncify(getCassandraClient),
// Execute CQL
(client, next) => {
// Get some bind variable values for the CQL we're going to run
let videoId = toCassandraUuid(request.videoId);
let userId = toCassandraUuid(request.userId);
let { rating } = request;
// We can't use a batch to do inserts to multiple tables here because one the video_ratings table
// is a counter table (and Cassandra doesn't let us mix counter DML with regular DML in a batch),
// but we can execute the inserts in parallel
async.parallel([
execCb => client.execute(updateRatingsCql, [ rating, videoId ], execCb),
execCb => client.execute(insertUserRatingCql, [ videoId, userId, rating ], execCb)
], next);
},
// If successful with inserts, publish an event
(resultSets, next) => {
// Tell the world about the user rating the video
let event = new UserRatedVideo({
videoId: request.videoId,
userId: request.userId,
rating: request.rating,
ratingTimestamp: toProtobufTimestamp(new Date(Date.now()))
});
publish(event, next);
},
// Finally, return a response object
next => {
next(null, new RateVideoResponse());
}
], cb);
}; | KillrVideo/killrvideo-nodejs | src/services/ratings/rate-video.js | JavaScript | apache-2.0 | 2,086 |
/*
* Copyright 2012 <a href="mailto:lincolnbaxter@gmail.com">Lincoln Baxter, III</a>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ocpsoft.prettytime.units;
import org.ocpsoft.prettytime.impl.ResourcesTimeUnit;
public class Millennium extends ResourcesTimeUnit
{
public Millennium()
{
setMillisPerUnit(31556926000000L);
}
@Override
protected String getResourceKeyPrefix()
{
return "Millennium";
}
}
| ocpsoft/prettytime | core/src/main/java/org/ocpsoft/prettytime/units/Millennium.java | Java | apache-2.0 | 969 |
package org.robbins.flashcards.cassandra.repository.domain;
import org.springframework.data.cassandra.mapping.Column;
import org.springframework.data.cassandra.mapping.PrimaryKey;
import org.springframework.data.cassandra.mapping.Table;
import java.io.Serializable;
import java.util.UUID;
@Table(value = "tag_flashcard")
public class TagFlashCardCassandraEntity implements Serializable {
@PrimaryKey
private TagFlashCardKey id;
@Column
private String question;
@Column
private String answer;
public TagFlashCardKey getId() {
return id;
}
public void setId(TagFlashCardKey id) {
this.id = id;
}
public String getQuestion() {
return question;
}
public void setQuestion(String question) {
this.question = question;
}
public String getAnswer() {
return answer;
}
public void setAnswer(String answer) {
this.answer = answer;
}
}
| justinhrobbins/FlashCards_App | FlashCards_Repository/FlashCards_Repository_Cassandra/src/main/java/org/robbins/flashcards/cassandra/repository/domain/TagFlashCardCassandraEntity.java | Java | apache-2.0 | 954 |
using System.IO;
namespace Chinchilla.Topologies.Model
{
public class TopologyWriter : ITopologyVisitor
{
private readonly TextWriter textWriter;
public TopologyWriter(TextWriter textWriter)
{
this.textWriter = textWriter;
}
public void Visit(IQueue queue)
{
textWriter.WriteLine(queue);
}
public void Visit(IExchange exchange)
{
textWriter.WriteLine(exchange);
}
public void Visit(IBinding binding)
{
textWriter.WriteLine(binding);
}
}
}
| jonnii/chinchilla | src/Chinchilla/Topologies/Model/TopologyWriter.cs | C# | apache-2.0 | 611 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.memorydb.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* The status of the ACL update
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/memorydb-2021-01-01/ACLsUpdateStatus" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class ACLsUpdateStatus implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* A list of ACLs pending to be applied.
* </p>
*/
private String aCLToApply;
/**
* <p>
* A list of ACLs pending to be applied.
* </p>
*
* @param aCLToApply
* A list of ACLs pending to be applied.
*/
public void setACLToApply(String aCLToApply) {
this.aCLToApply = aCLToApply;
}
/**
* <p>
* A list of ACLs pending to be applied.
* </p>
*
* @return A list of ACLs pending to be applied.
*/
public String getACLToApply() {
return this.aCLToApply;
}
/**
* <p>
* A list of ACLs pending to be applied.
* </p>
*
* @param aCLToApply
* A list of ACLs pending to be applied.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ACLsUpdateStatus withACLToApply(String aCLToApply) {
setACLToApply(aCLToApply);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getACLToApply() != null)
sb.append("ACLToApply: ").append(getACLToApply());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ACLsUpdateStatus == false)
return false;
ACLsUpdateStatus other = (ACLsUpdateStatus) obj;
if (other.getACLToApply() == null ^ this.getACLToApply() == null)
return false;
if (other.getACLToApply() != null && other.getACLToApply().equals(this.getACLToApply()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getACLToApply() == null) ? 0 : getACLToApply().hashCode());
return hashCode;
}
@Override
public ACLsUpdateStatus clone() {
try {
return (ACLsUpdateStatus) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.memorydb.model.transform.ACLsUpdateStatusMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
| aws/aws-sdk-java | aws-java-sdk-memorydb/src/main/java/com/amazonaws/services/memorydb/model/ACLsUpdateStatus.java | Java | apache-2.0 | 4,083 |
package org.ret.core.entity;
import java.io.Serializable;
import javax.persistence.Cacheable;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.Table;
import org.hibernate.annotations.Cache;
import org.hibernate.annotations.CacheConcurrencyStrategy;
@Entity
@Table(name="city")
@Cacheable
@Cache(usage = CacheConcurrencyStrategy.READ_ONLY)
public class City implements Serializable{
private static final long serialVersionUID = 8224159162653666213L;
@Id
@Column(name="CITY_ID")
private Integer cityId;
@Column(name="NAME")
private String city;
@ManyToOne
@JoinColumn(name="COUNTY_ID")
private County county;
public Integer getCityId() {
return cityId;
}
public String getCity() {
return city;
}
public County getCounty() {
return county;
}
}
| hongyuanChrisLi/RealEstateTrends | src/main/java/org/ret/core/entity/City.java | Java | apache-2.0 | 994 |
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spinnaker.clouddriver.aws.lifecycle;
import com.amazonaws.auth.policy.*;
import com.amazonaws.auth.policy.Statement.Effect;
import com.amazonaws.auth.policy.actions.SNSActions;
import com.amazonaws.auth.policy.actions.SQSActions;
import com.amazonaws.services.sns.AmazonSNS;
import com.amazonaws.services.sns.model.SetTopicAttributesRequest;
import com.amazonaws.services.sqs.AmazonSQS;
import com.amazonaws.services.sqs.model.Message;
import com.amazonaws.services.sqs.model.ReceiptHandleIsInvalidException;
import com.amazonaws.services.sqs.model.ReceiveMessageRequest;
import com.amazonaws.services.sqs.model.ReceiveMessageResult;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.frigga.Names;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spinnaker.clouddriver.aws.deploy.ops.discovery.AwsEurekaSupport;
import com.netflix.spinnaker.clouddriver.aws.security.AmazonClientProvider;
import com.netflix.spinnaker.clouddriver.aws.security.AmazonCredentials.LifecycleHook;
import com.netflix.spinnaker.clouddriver.aws.security.NetflixAmazonCredentials;
import com.netflix.spinnaker.clouddriver.eureka.api.Eureka;
import com.netflix.spinnaker.clouddriver.eureka.deploy.ops.AbstractEurekaSupport.DiscoveryStatus;
import com.netflix.spinnaker.clouddriver.security.AccountCredentials;
import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider;
import java.io.IOException;
import java.time.Duration;
import java.util.*;
import java.util.stream.Collectors;
import javax.inject.Provider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.HttpStatus;
import retrofit.RetrofitError;
public class InstanceTerminationLifecycleWorker implements Runnable {
private static final Logger log =
LoggerFactory.getLogger(InstanceTerminationLifecycleWorker.class);
private static final int AWS_MAX_NUMBER_OF_MESSAGES = 10;
private static final String SUPPORTED_LIFECYCLE_TRANSITION =
"autoscaling:EC2_INSTANCE_TERMINATING";
ObjectMapper objectMapper;
AmazonClientProvider amazonClientProvider;
AccountCredentialsProvider accountCredentialsProvider;
InstanceTerminationConfigurationProperties properties;
Provider<AwsEurekaSupport> discoverySupport;
Registry registry;
private final ARN queueARN;
private final ARN topicARN;
private String queueId = null;
public InstanceTerminationLifecycleWorker(
ObjectMapper objectMapper,
AmazonClientProvider amazonClientProvider,
AccountCredentialsProvider accountCredentialsProvider,
InstanceTerminationConfigurationProperties properties,
Provider<AwsEurekaSupport> discoverySupport,
Registry registry) {
this.objectMapper = objectMapper;
this.amazonClientProvider = amazonClientProvider;
this.accountCredentialsProvider = accountCredentialsProvider;
this.properties = properties;
this.discoverySupport = discoverySupport;
this.registry = registry;
Set<? extends AccountCredentials> accountCredentials = accountCredentialsProvider.getAll();
this.queueARN = new ARN(accountCredentials, properties.getQueueARN());
this.topicARN = new ARN(accountCredentials, properties.getTopicARN());
}
public String getWorkerName() {
return queueARN.account.getName()
+ "/"
+ queueARN.region
+ "/"
+ InstanceTerminationLifecycleWorker.class.getSimpleName();
}
@Override
public void run() {
log.info("Starting " + getWorkerName());
while (true) {
try {
listenForMessages();
} catch (Throwable e) {
log.error("Unexpected error running " + getWorkerName() + ", restarting", e);
}
}
}
private void listenForMessages() {
AmazonSQS amazonSQS = amazonClientProvider.getAmazonSQS(queueARN.account, queueARN.region);
AmazonSNS amazonSNS = amazonClientProvider.getAmazonSNS(topicARN.account, topicARN.region);
Set<? extends AccountCredentials> accountCredentials = accountCredentialsProvider.getAll();
List<String> allAccountIds = getAllAccountIds(accountCredentials);
this.queueId =
ensureQueueExists(
amazonSQS,
queueARN,
topicARN,
getSourceRoleArns(accountCredentials),
properties.getSqsMessageRetentionPeriodSeconds());
ensureTopicExists(amazonSNS, topicARN, allAccountIds, queueARN);
while (true) {
ReceiveMessageResult receiveMessageResult =
amazonSQS.receiveMessage(
new ReceiveMessageRequest(queueId)
.withMaxNumberOfMessages(AWS_MAX_NUMBER_OF_MESSAGES)
.withVisibilityTimeout(properties.getVisibilityTimeout())
.withWaitTimeSeconds(properties.getWaitTimeSeconds()));
if (receiveMessageResult.getMessages().isEmpty()) {
// No messages
continue;
}
receiveMessageResult
.getMessages()
.forEach(
message -> {
LifecycleMessage lifecycleMessage = unmarshalLifecycleMessage(message.getBody());
if (lifecycleMessage != null) {
if (!SUPPORTED_LIFECYCLE_TRANSITION.equalsIgnoreCase(
lifecycleMessage.lifecycleTransition)) {
log.info(
"Ignoring unsupported lifecycle transition: "
+ lifecycleMessage.lifecycleTransition);
deleteMessage(amazonSQS, queueId, message);
return;
}
handleMessage(lifecycleMessage);
}
deleteMessage(amazonSQS, queueId, message);
registry.counter(getProcessedMetricId(queueARN.region)).increment();
});
}
}
private LifecycleMessage unmarshalLifecycleMessage(String messageBody) {
String body = messageBody;
try {
NotificationMessageWrapper wrapper =
objectMapper.readValue(messageBody, NotificationMessageWrapper.class);
if (wrapper != null && wrapper.message != null) {
body = wrapper.message;
}
} catch (IOException e) {
// Try to unwrap a notification message; if that doesn't work,
// assume that we're dealing with a message directly from SQS.
log.debug(
"Unable unmarshal NotificationMessageWrapper. Assuming SQS message. (body: {})",
messageBody,
e);
}
LifecycleMessage lifecycleMessage = null;
try {
lifecycleMessage = objectMapper.readValue(body, LifecycleMessage.class);
} catch (IOException e) {
log.error("Unable to unmarshal LifecycleMessage (body: {})", body, e);
}
return lifecycleMessage;
}
private void handleMessage(LifecycleMessage message) {
NetflixAmazonCredentials credentials = getAccountCredentialsById(message.accountId);
if (credentials == null) {
log.error("Unable to find credentials for account id: {}", message.accountId);
return;
}
Names names = Names.parseName(message.autoScalingGroupName);
Eureka eureka = discoverySupport.get().getEureka(credentials, queueARN.region);
if (!updateInstanceStatus(eureka, names.getApp(), message.ec2InstanceId)) {
registry.counter(getFailedMetricId(queueARN.region)).increment();
}
recordLag(
message.time,
queueARN.region,
message.accountId,
message.autoScalingGroupName,
message.ec2InstanceId);
}
private boolean updateInstanceStatus(Eureka eureka, String app, String instanceId) {
int retry = 0;
while (retry < properties.getEurekaUpdateStatusRetryMax()) {
retry++;
try {
eureka.updateInstanceStatus(app, instanceId, DiscoveryStatus.Disable.getValue());
return true;
} catch (RetrofitError e) {
final String recoverableMessage =
"Failed marking app out of service (status: {}, app: {}, instance: {}, retry: {})";
if (HttpStatus.NOT_FOUND.value() == e.getResponse().getStatus()) {
log.warn(recoverableMessage, e.getResponse().getStatus(), app, instanceId, retry);
} else if (e.getKind() == RetrofitError.Kind.NETWORK) {
log.error(recoverableMessage, e.getResponse().getStatus(), app, instanceId, retry, e);
} else {
log.error(
"Irrecoverable error while marking app out of service (app: {}, instance: {}, retry: {})",
app,
instanceId,
retry,
e);
break;
}
}
}
return false;
}
private static void deleteMessage(AmazonSQS amazonSQS, String queueUrl, Message message) {
try {
amazonSQS.deleteMessage(queueUrl, message.getReceiptHandle());
} catch (ReceiptHandleIsInvalidException e) {
log.warn(
"Error deleting lifecycle message, reason: {} (receiptHandle: {})",
e.getMessage(),
message.getReceiptHandle());
}
}
private NetflixAmazonCredentials getAccountCredentialsById(String accountId) {
for (AccountCredentials credentials : accountCredentialsProvider.getAll()) {
if (credentials.getAccountId() != null && credentials.getAccountId().equals(accountId)) {
return (NetflixAmazonCredentials) credentials;
}
}
return null;
}
private static String ensureTopicExists(
AmazonSNS amazonSNS, ARN topicARN, List<String> allAccountIds, ARN queueARN) {
topicARN.arn = amazonSNS.createTopic(topicARN.name).getTopicArn();
amazonSNS.setTopicAttributes(
new SetTopicAttributesRequest()
.withTopicArn(topicARN.arn)
.withAttributeName("Policy")
.withAttributeValue(buildSNSPolicy(topicARN, allAccountIds).toJson()));
amazonSNS.subscribe(topicARN.arn, "sqs", queueARN.arn);
return topicARN.arn;
}
private static Policy buildSNSPolicy(ARN topicARN, List<String> allAccountIds) {
Statement statement = new Statement(Statement.Effect.Allow).withActions(SNSActions.Publish);
statement.setPrincipals(
allAccountIds.stream().map(Principal::new).collect(Collectors.toList()));
statement.setResources(Collections.singletonList(new Resource(topicARN.arn)));
return new Policy("allow-remote-account-send", Collections.singletonList(statement));
}
private static String ensureQueueExists(
AmazonSQS amazonSQS,
ARN queueARN,
ARN topicARN,
Set<String> terminatingRoleArns,
int sqsMessageRetentionPeriodSeconds) {
String queueUrl = amazonSQS.createQueue(queueARN.name).getQueueUrl();
HashMap<String, String> attributes = new HashMap<>();
attributes.put("Policy", buildSQSPolicy(queueARN, topicARN, terminatingRoleArns).toJson());
attributes.put("MessageRetentionPeriod", Integer.toString(sqsMessageRetentionPeriodSeconds));
amazonSQS.setQueueAttributes(queueUrl, attributes);
return queueUrl;
}
/**
* This policy allows operators to choose whether or not to have lifecycle hooks to be sent via
* SNS for fanout, or be sent directly to an SQS queue from the autoscaling group.
*/
private static Policy buildSQSPolicy(ARN queue, ARN topic, Set<String> terminatingRoleArns) {
Statement snsStatement = new Statement(Effect.Allow).withActions(SQSActions.SendMessage);
snsStatement.setPrincipals(Principal.All);
snsStatement.setResources(Collections.singletonList(new Resource(queue.arn)));
snsStatement.setConditions(
Collections.singletonList(
new Condition()
.withType("ArnEquals")
.withConditionKey("aws:SourceArn")
.withValues(topic.arn)));
Statement sqsStatement =
new Statement(Effect.Allow).withActions(SQSActions.SendMessage, SQSActions.GetQueueUrl);
sqsStatement.setPrincipals(
terminatingRoleArns.stream().map(Principal::new).collect(Collectors.toList()));
sqsStatement.setResources(Collections.singletonList(new Resource(queue.arn)));
return new Policy("allow-sns-or-sqs-send", Arrays.asList(snsStatement, sqsStatement));
}
Id getLagMetricId(String region) {
return registry.createId("terminationLifecycle.lag", "region", region);
}
void recordLag(Date start, String region, String account, String serverGroup, String instanceId) {
if (start != null) {
Long lag = registry.clock().wallTime() - start.getTime();
log.info(
"Lifecycle message processed (account: {}, serverGroup: {}, instance: {}, lagSeconds: {})",
account,
serverGroup,
instanceId,
Duration.ofMillis(lag).getSeconds());
registry.gauge(getLagMetricId(region), lag);
}
}
Id getProcessedMetricId(String region) {
return registry.createId("terminationLifecycle.totalProcessed", "region", region);
}
Id getFailedMetricId(String region) {
return registry.createId("terminationLifecycle.totalFailed", "region", region);
}
private static List<String> getAllAccountIds(
Set<? extends AccountCredentials> accountCredentials) {
return accountCredentials.stream()
.map(AccountCredentials::getAccountId)
.filter(a -> a != null)
.collect(Collectors.toList());
}
private static <T extends AccountCredentials> Set<String> getSourceRoleArns(
Set<T> allCredentials) {
Set<String> sourceRoleArns = new HashSet<>();
for (T credentials : allCredentials) {
if (credentials instanceof NetflixAmazonCredentials) {
NetflixAmazonCredentials c = (NetflixAmazonCredentials) credentials;
if (c.getLifecycleHooks() != null) {
sourceRoleArns.addAll(
c.getLifecycleHooks().stream()
.filter(
h ->
"autoscaling:EC2_INSTANCE_TERMINATING".equals(h.getLifecycleTransition()))
.map(LifecycleHook::getRoleARN)
.collect(Collectors.toSet()));
}
}
}
return sourceRoleArns;
}
}
| duftler/clouddriver | clouddriver-aws/src/main/groovy/com/netflix/spinnaker/clouddriver/aws/lifecycle/InstanceTerminationLifecycleWorker.java | Java | apache-2.0 | 14,654 |
//============================================================================//
// File: qcan_config.cpp //
// Description: Configure CAN interface //
// //
// Copyright (C) MicroControl GmbH & Co. KG //
// 53844 Troisdorf - Germany //
// www.microcontrol.net //
// //
//----------------------------------------------------------------------------//
// Redistribution and use in source and binary forms, with or without //
// modification, are permitted provided that the following conditions //
// are met: //
// 1. Redistributions of source code must retain the above copyright //
// notice, this list of conditions, the following disclaimer and //
// the referenced file 'LICENSE'. //
// 2. Redistributions in binary form must reproduce the above copyright //
// notice, this list of conditions and the following disclaimer in the //
// documentation and/or other materials provided with the distribution. //
// 3. Neither the name of MicroControl nor the names of its contributors //
// may be used to endorse or promote products derived from this software //
// without specific prior written permission. //
// //
// Provided that this notice is retained in full, this software may be //
// distributed under the terms of the GNU Lesser General Public License //
// ("LGPL") version 3 as distributed in the 'LICENSE' file. //
// //
//============================================================================//
#include "qcan_config.hpp"
#include <QTime>
#include <QTimer>
#include <QDebug>
//----------------------------------------------------------------------------//
// main() //
// //
//----------------------------------------------------------------------------//
int main(int argc, char *argv[])
{
QCoreApplication clAppT(argc, argv);
QCoreApplication::setApplicationName("can-config");
//----------------------------------------------------------------
// get application version (defined in .pro file)
//
QString clVersionT;
clVersionT += QString("%1.%2.").arg(VERSION_MAJOR).arg(VERSION_MINOR);
clVersionT += QString("%1").arg(VERSION_BUILD);
QCoreApplication::setApplicationVersion(clVersionT);
//----------------------------------------------------------------
// create the main class
//
QCanConfig clMainT;
//----------------------------------------------------------------
// connect the signals
//
QObject::connect(&clMainT, SIGNAL(finished()),
&clAppT, SLOT(quit()));
QObject::connect(&clAppT, SIGNAL(aboutToQuit()),
&clMainT, SLOT(aboutToQuitApp()));
//----------------------------------------------------------------
// This code will start the messaging engine in QT and in 10 ms
// it will start the execution in the clMainT.runCmdParser()
// routine.
//
QTimer::singleShot(10, &clMainT, SLOT(runCmdParser()));
clAppT.exec();
}
//----------------------------------------------------------------------------//
// QCanConfig() //
// constructor //
//----------------------------------------------------------------------------//
QCanConfig::QCanConfig(QObject *parent) :
QObject(parent)
{
//----------------------------------------------------------------
// get the instance of the main application
//
pclAppP = QCoreApplication::instance();
//----------------------------------------------------------------
// connect signals for socket operations
//
QObject::connect(&clCanSocketP, SIGNAL(connected()),
this, SLOT(socketConnected()));
QObject::connect(&clCanSocketP, SIGNAL(disconnected()),
this, SLOT(socketDisconnected()));
QObject::connect(&clCanSocketP, SIGNAL(error(QAbstractSocket::SocketError)),
this, SLOT(socketError(QAbstractSocket::SocketError)));
}
// shortly after quit is called the CoreApplication will signal this routine
// this is a good place to delete any objects that were created in the
// constructor and/or to stop any threads
void QCanConfig::aboutToQuitApp()
{
// stop threads
// sleep(1); // wait for threads to stop.
// delete any objects
}
//----------------------------------------------------------------------------//
// quit() //
// call this routine to quit the application //
//----------------------------------------------------------------------------//
void QCanConfig::quit()
{
//qDebug() << "I will quit soon";
clCanSocketP.disconnectNetwork();
emit finished();
}
//----------------------------------------------------------------------------//
// runCmdParser() //
// 10ms after the application starts this method will parse all commands //
//----------------------------------------------------------------------------//
void QCanConfig::runCmdParser(void)
{
//----------------------------------------------------------------
// setup command line parser
//
clCmdParserP.setApplicationDescription(tr("Configure CAN interface"));
clCmdParserP.addHelpOption();
//----------------------------------------------------------------
// argument <interface> is required
//
clCmdParserP.addPositionalArgument("interface",
tr("CAN interface, e.g. can1"));
//-----------------------------------------------------------
// command line option: -a
//
QCommandLineOption clOptAllT(QStringList() << "a" << "all",
tr("Show all CAN interfaces"));
clCmdParserP.addOption(clOptAllT);
//-----------------------------------------------------------
// command line option: -H <host>
//
QCommandLineOption clOptHostT("H",
tr("Connect to <host>"),
tr("host"));
clCmdParserP.addOption(clOptHostT);
//-----------------------------------------------------------
// command line option: -m <mode>
//
QCommandLineOption clOptModeT(QStringList() << "m" << "mode",
tr("Set mode of CAN interface"),
tr("start|stop|listen-only"));
clCmdParserP.addOption(clOptModeT);
//-----------------------------------------------------------
// command line option: -nbtr <value>
//
QCommandLineOption clOptNomBtrT("nbtr",
tr("Set nominal bit-rate"),
tr("value"));
clCmdParserP.addOption(clOptNomBtrT);
//-----------------------------------------------------------
// command line option: -dbtr <value>
//
QCommandLineOption clOptDatBtrT("dbtr",
tr("Set data bit-rate"),
tr("value"));
clCmdParserP.addOption(clOptDatBtrT);
clCmdParserP.addVersionOption();
//----------------------------------------------------------------
// Process the actual command line arguments given by the user
//
clCmdParserP.process(*pclAppP);
//----------------------------------------------------------------
// Test for --all option
//
if(clCmdParserP.isSet(clOptAllT))
{
quit();
}
const QStringList clArgsT = clCmdParserP.positionalArguments();
if (clArgsT.size() != 1)
{
fprintf(stderr, "%s\n",
qPrintable(tr("Error: Must specify CAN interface.\n")));
clCmdParserP.showHelp(0);
}
//----------------------------------------------------------------
// test format of argument <interface>
//
QString clInterfaceT = clArgsT.at(0);
if(!clInterfaceT.startsWith("can"))
{
fprintf(stderr, "%s %s\n",
qPrintable(tr("Error: Unknown CAN interface ")),
qPrintable(clInterfaceT));
clCmdParserP.showHelp(0);
}
//-----------------------------------------------------------
// convert CAN channel to uint8_t value
//
QString clIfNumT = clInterfaceT.right(clInterfaceT.size() - 3);
bool btConversionSuccessT;
int32_t slChannelT = clIfNumT.toInt(&btConversionSuccessT, 10);
if((btConversionSuccessT == false) ||
(slChannelT == 0) )
{
fprintf(stderr, "%s \n\n",
qPrintable(tr("Error: CAN interface out of range")));
clCmdParserP.showHelp(0);
}
//-----------------------------------------------------------
// store CAN interface channel (CAN_Channel_e)
//
ubChannelP = (uint8_t) (slChannelT);
//----------------------------------------------------------------
// set bit-rate
//
btConfigBitrateP = false;
slNomBitRateP = eCAN_BITRATE_NONE;
slDatBitRateP = eCAN_BITRATE_NONE;
if (clCmdParserP.isSet(clOptNomBtrT))
{
slNomBitRateP = clCmdParserP.value(clOptNomBtrT).toInt(Q_NULLPTR, 10);
btConfigBitrateP = true;
}
if (clCmdParserP.isSet(clOptDatBtrT))
{
if (slNomBitRateP == eCAN_BITRATE_NONE)
{
fprintf(stderr, "%s \n\n",
qPrintable(tr("Error: Must set nominal bit-rate also")));
clCmdParserP.showHelp(0);
}
else
{
slDatBitRateP = clCmdParserP.value(clOptDatBtrT).toInt(Q_NULLPTR, 10);
}
}
//----------------------------------------------------------------
// set host address for socket
//
if(clCmdParserP.isSet(clOptHostT))
{
QHostAddress clAddressT = QHostAddress(clCmdParserP.value(clOptHostT));
clCanSocketP.setHostAddress(clAddressT);
}
//----------------------------------------------------------------
// connect to CAN interface
//
clCanSocketP.connectNetwork((CAN_Channel_e) ubChannelP);
}
//----------------------------------------------------------------------------//
// sendFrame() //
// //
//----------------------------------------------------------------------------//
void QCanConfig::sendCommand(void)
{
if (btConfigBitrateP)
{
clCanApiP.setBitrate(slNomBitRateP, slDatBitRateP);
clCanSocketP.writeFrame(clCanApiP);
}
QTimer::singleShot(50, this, SLOT(quit()));
}
//----------------------------------------------------------------------------//
// socketConnected() //
// //
//----------------------------------------------------------------------------//
void QCanConfig::socketConnected()
{
//----------------------------------------------------------------
// initial setup of CAN frame
//
QTimer::singleShot(10, this, SLOT(sendCommand()));
}
//----------------------------------------------------------------------------//
// socketDisconnected() //
// show error message and quit //
//----------------------------------------------------------------------------//
void QCanConfig::socketDisconnected()
{
qDebug() << "Disconnected from CAN " << ubChannelP;
}
//----------------------------------------------------------------------------//
// socketError() //
// show error message and quit //
//----------------------------------------------------------------------------//
void QCanConfig::socketError(QAbstractSocket::SocketError teSocketErrorV)
{
Q_UNUSED(teSocketErrorV); // parameter not used
//----------------------------------------------------------------
// show error message in case the connection to the network fails
//
fprintf(stderr, "%s %s\n",
qPrintable(tr("Failed to connect to CAN interface:")),
qPrintable(clCanSocketP.errorString()));
quit();
}
| JoTid/CANpie | source/qcan/applications/can-config/qcan_config.cpp | C++ | apache-2.0 | 12,949 |
/*
* Copyright (c) 2017 VMware Inc. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.hillview.maps;
import org.hillview.dataset.api.IMap;
import org.hillview.table.FalseTableFilter;
import org.hillview.table.TableFilter;
import org.hillview.table.api.IMembershipSet;
import org.hillview.table.api.ITable;
/**
* A Map which implements table filtering: given a row index it returns true if the
* row is in the resulting table.
*/
public class FilterMap implements IMap<ITable, ITable> {
/**
* Argument is a row index.
* Returns true if a row has to be preserved
*/
private final TableFilter rowFilterPredicate;
public FilterMap() {
rowFilterPredicate = new FalseTableFilter();
}
public FilterMap(TableFilter rowFilterPredicate) {
this.rowFilterPredicate = rowFilterPredicate;
}
@Override
public ITable apply(ITable data) {
this.rowFilterPredicate.setTable(data);
IMembershipSet result = data.getMembershipSet().filter(this.rowFilterPredicate::test);
return data.selectRowsFromFullTable(result);
}
}
| lalithsuresh/hiero | platform/src/main/java/org/hillview/maps/FilterMap.java | Java | apache-2.0 | 1,686 |
package eu.giuseppeurso.activemq.springbasic;
import org.springframework.context.support.FileSystemXmlApplicationContext;
public class App
{
public static void main(String[] args) throws Exception {
FileSystemXmlApplicationContext context = new FileSystemXmlApplicationContext("src/main/resources/SpringBeans.xml");
Producer producer = (Producer)context.getBean("producer");
producer.start();
}
} | sensaid/activemq | spring-basic/src/main/java/eu/giuseppeurso/activemq/springbasic/App.java | Java | apache-2.0 | 425 |
// Copyright 2016 Yahoo Inc.
// Licensed under the terms of the Apache license. Please see LICENSE.md file distributed with this work for terms.
package com.yahoo.bard.webservice.metadata;
import com.yahoo.bard.webservice.druid.model.query.DruidAggregationQuery;
import com.yahoo.bard.webservice.util.SimplifiedIntervalList;
import java.util.function.Function;
/**
* RequestedIntervalsFunction is an implementation of Function that takes a DruidAggregationQuery and returns a
* SimplifiedIntervalList. The purpose of this empty interface was to avoid using the long signature of the Function
* time and again.
*/
public interface RequestedIntervalsFunction extends Function<DruidAggregationQuery<?>, SimplifiedIntervalList> {
}
| yahoo/fili | fili-core/src/main/java/com/yahoo/bard/webservice/metadata/RequestedIntervalsFunction.java | Java | apache-2.0 | 736 |
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://code.google.com/p/google-apis-client-generator/
* (build: 2015-01-14 17:53:03 UTC)
* on 2015-03-07 at 18:23:38 UTC
* Modify at your own risk.
*/
package assesortron.assesortronTaskerAPI.model;
/**
* Model definition for StringWrapper.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the assesortronTaskerAPI. For a detailed explanation see:
* <a href="http://code.google.com/p/google-http-java-client/wiki/JSON">http://code.google.com/p/google-http-java-client/wiki/JSON</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class StringWrapper extends com.google.api.client.json.GenericJson {
/**
* The value may be {@code null}.
*/
@com.google.api.client.util.Key("string")
private java.lang.String string__;
/**
* @return value or {@code null} for none
*/
public java.lang.String getString() {
return string__;
}
/**
* @param string__ string__ or {@code null} for none
*/
public StringWrapper setString(java.lang.String string__) {
this.string__ = string__;
return this;
}
@Override
public StringWrapper set(String fieldName, Object value) {
return (StringWrapper) super.set(fieldName, value);
}
@Override
public StringWrapper clone() {
return (StringWrapper) super.clone();
}
}
| willpassidomo/AssessortronServer | target/endpoints-client-libs/assesortronTaskerAPI/src/main/java/assesortron/assesortronTaskerAPI/model/StringWrapper.java | Java | apache-2.0 | 1,988 |
//
// Copyright 2016 R. Stanley Hum <r.stanley.hum@gmail.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
using System.Collections.Immutable;
using Redux;
using HandbookApp.States;
using HandbookApp.Actions;
using System;
using System.Collections.Generic;
using System.Linq;
using Splat;
using Newtonsoft.Json;
namespace HandbookApp.Reducers
{
public static class BookReducers
{
public static ImmutableDictionary<string, Book> BookReducer(ImmutableDictionary<string, Book> previousState, IAction action)
{
if (action is AddBookRangeAction)
{
return addBookRangeReducer(previousState, (AddBookRangeAction)action);
}
if (action is DeleteBookRangeAction)
{
return deleteBookRangeReducer(previousState, (DeleteBookRangeAction) action);
}
return previousState;
}
private static ImmutableDictionary<string, Book> deleteBookRangeReducer(ImmutableDictionary<string, Book> previousState, DeleteBookRangeAction action)
{
LogHost.Default.Info("DeleteBookRangeAction: {0}", JsonConvert.SerializeObject(action.BookIds));
return previousState.RemoveRange(action.BookIds);
}
private static ImmutableDictionary<string, Book> addBookRangeReducer(ImmutableDictionary<string, Book> previousState, AddBookRangeAction action)
{
if (action.Books.Count != 0)
{
var itemlist = action.Books
.Select(x => new KeyValuePair<string, Book>(x.Id, x));
LogHost.Default.Info("AddBookRangeAction: {0}", JsonConvert.SerializeObject(action.Books.Select(x => x.Id).ToList()));
return previousState.SetItems(itemlist);
}
return previousState;
}
}
}
| humrs/HandbookApp | HandbookApp/HandbookApp/Reducers/BookReducers.cs | C# | apache-2.0 | 2,407 |
/*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.roots.ui.configuration;
import com.intellij.ide.JavaUiBundle;
import com.intellij.openapi.application.ApplicationBundle;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.options.UnnamedConfigurable;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.projectRoots.Sdk;
import com.intellij.openapi.projectRoots.SdkModel;
import com.intellij.openapi.projectRoots.SimpleJavaSdkType;
import com.intellij.openapi.roots.ProjectRootManager;
import com.intellij.openapi.roots.ui.configuration.projectRoot.ModuleStructureConfigurable;
import com.intellij.openapi.roots.ui.configuration.projectRoot.ProjectSdksModel;
import com.intellij.openapi.roots.ui.configuration.projectRoot.StructureConfigurableContext;
import com.intellij.openapi.roots.ui.configuration.projectRoot.daemon.ModuleProjectStructureElement;
import com.intellij.openapi.util.Comparing;
import com.intellij.openapi.util.text.HtmlBuilder;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.util.ui.JBUI;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import static java.awt.GridBagConstraints.*;
public class ProjectJdkConfigurable implements UnnamedConfigurable {
private static final Logger LOG = Logger.getInstance(ProjectJdkConfigurable.class);
private JdkComboBox myCbProjectJdk;
private JPanel myJdkPanel;
private final Project myProject;
private final ProjectStructureConfigurable myProjectStructureConfigurable;
private final ProjectSdksModel myJdksModel;
private final SdkModel.Listener myListener = new SdkModel.Listener() {
@Override
public void sdkAdded(@NotNull Sdk sdk) {
reloadModel();
}
@Override
public void beforeSdkRemove(@NotNull Sdk sdk) {
reloadModel();
}
@Override
public void sdkChanged(@NotNull Sdk sdk, String previousName) {
reloadModel();
}
@Override
public void sdkHomeSelected(@NotNull Sdk sdk, @NotNull String newSdkHome) {
reloadModel();
}
};
private boolean myFreeze = false;
public ProjectJdkConfigurable(ProjectStructureConfigurable projectStructureConfigurable, final ProjectSdksModel jdksModel) {
myProject = projectStructureConfigurable.getProject();
myProjectStructureConfigurable = projectStructureConfigurable;
myJdksModel = jdksModel;
myJdksModel.addListener(myListener);
}
@Nullable
public Sdk getSelectedProjectJdk() {
return myCbProjectJdk != null ? myJdksModel.findSdk(myCbProjectJdk.getSelectedJdk()) : null;
}
@NotNull
@Override
public JComponent createComponent() {
if (myJdkPanel == null) {
myJdkPanel = new JPanel(new GridBagLayout());
myCbProjectJdk = new JdkComboBox(myProject, myJdksModel, SimpleJavaSdkType.notSimpleJavaSdkType(), null, null, null);
myCbProjectJdk.showNoneSdkItem();
myCbProjectJdk.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
if (myFreeze) return;
myJdksModel.setProjectSdk(myCbProjectJdk.getSelectedJdk());
clearCaches();
}
});
String accessibleName = StringUtil.removeHtmlTags(JavaUiBundle.message("module.libraries.target.jdk.project.radio.name"));
String accessibleDescription = StringUtil.removeHtmlTags(JavaUiBundle.message("module.libraries.target.jdk.project.radio.description"));
myCbProjectJdk.getAccessibleContext().setAccessibleName(accessibleName);
myCbProjectJdk.getAccessibleContext().setAccessibleDescription(
accessibleDescription);
String labelString = new HtmlBuilder()
.appendRaw(JavaUiBundle.message("module.libraries.target.jdk.project.radio.name")).br()
.appendRaw(JavaUiBundle.message("module.libraries.target.jdk.project.radio.description")).wrapWith("html").toString();
myJdkPanel.add(new JLabel(labelString), new GridBagConstraints(0, 0, 3, 1, 0, 0, NORTHWEST, NONE, JBUI.insetsBottom(4), 0, 0));
myJdkPanel.add(myCbProjectJdk, new GridBagConstraints(0, 1, 1, 1, 0, 1.0, NORTHWEST, NONE, JBUI.insetsLeft(4), 0, 0));
final JButton editButton = new JButton(ApplicationBundle.message("button.edit"));
myCbProjectJdk.setEditButton(editButton, myProject, myJdksModel::getProjectSdk);
myJdkPanel.add(editButton, new GridBagConstraints(RELATIVE, 1, 1, 1, 1.0, 0, NORTHWEST, NONE, JBUI.insetsLeft(4), 0, 0));
}
return myJdkPanel;
}
private void reloadModel() {
myFreeze = true;
final Sdk projectJdk = myJdksModel.getProjectSdk();
if (myCbProjectJdk != null) {
myCbProjectJdk.reloadModel();
final String sdkName = projectJdk == null ? ProjectRootManager.getInstance(myProject).getProjectSdkName() : projectJdk.getName();
if (sdkName != null) {
final Sdk jdk = myJdksModel.findSdk(sdkName);
if (jdk != null) {
myCbProjectJdk.setSelectedJdk(jdk);
} else {
myCbProjectJdk.setInvalidJdk(sdkName);
clearCaches();
}
} else {
myCbProjectJdk.setSelectedJdk(null);
}
}
else {
LOG.error("'createComponent' wasn't called before 'reset' for " + toString());
}
myFreeze = false;
}
private void clearCaches() {
final ModuleStructureConfigurable rootConfigurable = myProjectStructureConfigurable.getModulesConfig();
Module[] modules = rootConfigurable.getModules();
for (Module module : modules) {
final StructureConfigurableContext context = rootConfigurable.getContext();
context.getDaemonAnalyzer().queueUpdate(new ModuleProjectStructureElement(context, module));
}
}
@Override
public boolean isModified() {
final Sdk projectJdk = ProjectRootManager.getInstance(myProject).getProjectSdk();
return !Comparing.equal(projectJdk, getSelectedProjectJdk());
}
@Override
public void apply() {
ProjectRootManager.getInstance(myProject).setProjectSdk(getSelectedProjectJdk());
}
@Override
public void reset() {
reloadModel();
if (myCbProjectJdk != null) {
final String sdkName = ProjectRootManager.getInstance(myProject).getProjectSdkName();
if (sdkName != null) {
final Sdk jdk = myJdksModel.findSdk(sdkName);
if (jdk != null) {
myCbProjectJdk.setSelectedJdk(jdk);
} else {
myCbProjectJdk.setInvalidJdk(sdkName);
}
} else {
myCbProjectJdk.setSelectedJdk(null);
}
}
}
@Override
public void disposeUIResources() {
myJdksModel.removeListener(myListener);
myJdkPanel = null;
myCbProjectJdk = null;
}
void addChangeListener(ActionListener listener) {
myCbProjectJdk.addActionListener(listener);
}
}
| dahlstrom-g/intellij-community | java/idea-ui/src/com/intellij/openapi/roots/ui/configuration/ProjectJdkConfigurable.java | Java | apache-2.0 | 7,522 |
require 'rqrcode'
module LinksHelper
def display_expire_flag(link)
if !link[:does_expire]
return "<div class=\"grey text\">Never</div>".html_safe
else
if link[:experies_on] == Date.today
return "<div class=\"red text\">Today</div>".html_safe
else
return "<div class=\"ui momentjs\" data-tooltip=\"#{link[:experies_on].strftime("%d.%m.%Y")}\" data-position=\"top left\">#{link[:experies_on].strftime("%Y%m%d")}</div>".html_safe
end
end
end
def display_link_button(link)
return "<a href=\"#{link}\" class=\"ui primary button load\" data-tooltip=\"#{link}\" data-position=\"top right\">Visit</a>".html_safe
end
def generate_qrcode(link)
qrcode = RQRCode::QRCode.new(link)
image = qrcode.as_png(
resize_gte_to: false,
resize_exactly_to: false,
fill: 'white',
color: 'black',
size: 120,
border_modules: 4,
module_px_size: 6,
file: nil # path to write
)
return image.resize(150, 150)
end
end
| Baschtie/LinkList | app/helpers/links_helper.rb | Ruby | apache-2.0 | 1,045 |
#!/usr/bin/python2.7
import os
import sys
base_dir = os.path.dirname(os.path.realpath(__file__))
python_modules_dir = os.path.join(base_dir,"python-modules")
sys.path.append(python_modules_dir)
curdir = os.path.abspath(os.path.dirname(sys.argv[0]))
from ACEStream.Plugin.EngineConsole import start
apptype = 'acestream'
start(apptype, curdir)
| aplicatii-romanesti/allinclusive-kodi-pi | .kodi/userdata/addon_data/plugin.video.p2p-streams/acestream/ace/start.py | Python | apache-2.0 | 345 |
package io.quarkus.it.panache;
import java.util.Optional;
import javax.enterprise.context.ApplicationScoped;
import javax.transaction.Transactional;
import io.quarkus.hibernate.orm.panache.PanacheRepositoryBase;
@ApplicationScoped
@Transactional
public class UserRepository implements PanacheRepositoryBase<User, String> {
public Optional<User> find(final String id) {
return Optional.ofNullable(findById(id));
}
}
| quarkusio/quarkus | integration-tests/hibernate-orm-panache/src/main/java/io/quarkus/it/panache/UserRepository.java | Java | apache-2.0 | 436 |
package antw.logger.model;
import java.util.Collection;
import antw.common.model.Name;
public class Project extends Name {
private boolean _subProject;
private Targets _targets = new Targets();
public Project() {
}
public Project(String name) {
super(name);
}
public Target getTarget(String name) {
Target target = _targets.get(name);
target.setProject(this);
return target;
}
public Collection<Target> computeRelativeBuildTime(long duration) {
return _targets.computeRelativeBuildTime(duration);
}
public Project setSubProject(boolean subProject) {
_subProject = subProject;
return this;
}
public boolean isSubProject() {
return _subProject;
}
@Override
public int hashCode() {
return getName().hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
Project other = (Project) obj;
return this.getName().equals(other.getName());
}
}
| mbauhardt/antw | modules/logger/src/main/java/antw/logger/model/Project.java | Java | apache-2.0 | 1,082 |
/**
* Copyright 2017-2019 The GreyCat Authors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package greycat.struct;
public interface ERelation {
EStruct[] nodes();
EStruct node(int index);
int size();
ERelation add(EStruct eStruct);
ERelation addAll(EStruct[] eStructs);
ERelation clear();
}
| datathings/greycat | greycat/src/main/java/greycat/struct/ERelation.java | Java | apache-2.0 | 872 |
/**
* Copyright 2004-2014 The Kuali Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ecl2.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kuali.kpme.core.salarygroup.web;
import org.apache.commons.lang.StringUtils;
import org.kuali.kpme.core.api.salarygroup.SalaryGroup;
import org.kuali.kpme.core.lookup.KPMELookupableHelperServiceImpl;
import org.kuali.kpme.core.salarygroup.SalaryGroupBo;
import org.kuali.kpme.core.service.HrServiceLocator;
import org.kuali.kpme.core.util.TKUtils;
import org.kuali.rice.core.api.mo.ModelObjectUtils;
import org.kuali.rice.krad.bo.BusinessObject;
import java.util.List;
import java.util.Map;
public class SalaryGroupLookupableHelper extends KPMELookupableHelperServiceImpl {
private static final ModelObjectUtils.Transformer<SalaryGroup, SalaryGroupBo> toSalaryGroupBo =
new ModelObjectUtils.Transformer<SalaryGroup, SalaryGroupBo>() {
public SalaryGroupBo transform(SalaryGroup input) {
return SalaryGroupBo.from(input);
};
};
@Override
public List<? extends BusinessObject> getSearchResults(Map<String, String> fieldValues) {
String hrSalGroup = fieldValues.get("hrSalGroup");
String fromEffdt = TKUtils.getFromDateString(fieldValues.get("effectiveDate"));
String toEffdt = TKUtils.getToDateString(fieldValues.get("effectiveDate"));
String active = fieldValues.get("active");
String showHist = fieldValues.get("history");
String institution = fieldValues.get("institution");
String location = fieldValues.get("location");
String leavePlan = fieldValues.get("leavePlan");
String benefitsEligible = fieldValues.get("benefitsEligible");
String leaveEligible = fieldValues.get("leaveEligible");
String percentTime = fieldValues.get("percentTime");
if (StringUtils.equals(hrSalGroup, "%")) {
hrSalGroup = "";
}
return ModelObjectUtils.transform(HrServiceLocator.getSalaryGroupService().getSalaryGroups(hrSalGroup, institution, location, leavePlan, TKUtils.formatDateString(fromEffdt),
TKUtils.formatDateString(toEffdt), active, showHist, benefitsEligible, leaveEligible, percentTime), toSalaryGroupBo);
}
}
| kuali/kpme | core/impl/src/main/java/org/kuali/kpme/core/salarygroup/web/SalaryGroupLookupableHelper.java | Java | apache-2.0 | 2,777 |
package org.whale.system.domain;
import org.whale.system.annotation.jdbc.Column;
import org.whale.system.annotation.jdbc.Id;
import org.whale.system.annotation.jdbc.Order;
import org.whale.system.annotation.jdbc.Table;
import org.whale.system.annotation.jdbc.Validate;
import org.whale.system.base.BaseEntry;
import org.whale.system.domain.Dept;
/**
* 根部门 需要sql脚本初始化
*
* @author wjs
* @Date 2014-9-16
*/
@Table(value="sys_dept", cnName="部门")
public class Dept extends BaseEntry {
private static final long serialVersionUID = -1410859166676l;
/**部门名称 */
public static final String F_deptName = "deptName";
/**部门编码 */
public static final String F_deptCode = "deptCode";
/**父部门 */
public static final String F_pid = "pid";
/**排序 */
public static final String F_orderNo = "orderNo";
@Id
@Column(name="id", cnName="id")
private Long id;
@Validate(required=true)
@Column(name="deptName", cnName="部门名称")
private String deptName;
@Validate(required=true)
@Column(name="deptCode", cnName="部门编码", unique=true)
private String deptCode;
@Order
@Column(name="orderNo", cnName="排序")
private Integer orderNo;
@Column(name="remark", cnName="备注")
private String remark;
@Column(name="pid", cnName="父部门")
private Long pid;
@Column(name="deptTel", cnName="联系电话")
private String deptTel;
@Column(name="deptAddr", cnName="联系地址")
private String deptAddr;
@Column(cnName="部门类型")
private String deptType;
/**id */
public Long getId(){
return id;
}
/**id */
public void setId(Long id){
this.id = id;
}
/**部门名称 */
public String getDeptName(){
return deptName;
}
/**部门名称 */
public void setDeptName(String deptName){
this.deptName = deptName;
}
/**部门编码 */
public String getDeptCode(){
return deptCode;
}
/**部门编码 */
public void setDeptCode(String deptCode){
this.deptCode = deptCode;
}
/**排序 */
public Integer getOrderNo(){
return orderNo;
}
/**排序 */
public void setOrderNo(Integer orderNo){
this.orderNo = orderNo;
}
/**备注 */
public String getRemark(){
return remark;
}
/**备注 */
public void setRemark(String remark){
this.remark = remark;
}
/**父id */
public Long getPid(){
return pid;
}
/**父id */
public void setPid(Long pid){
this.pid = pid;
}
/**联系电话 */
public String getDeptTel(){
return deptTel;
}
/**联系电话 */
public void setDeptTel(String deptTel){
this.deptTel = deptTel;
}
/**联系地址 */
public String getDeptAddr(){
return deptAddr;
}
/**联系地址 */
public void setDeptAddr(String deptAddr){
this.deptAddr = deptAddr;
}
public String getDeptType() {
return deptType;
}
public void setDeptType(String deptType) {
this.deptType = deptType;
}
} | fywxin/base | system-parent/system-dao/src/main/java/org/whale/system/domain/Dept.java | Java | apache-2.0 | 2,911 |
// Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package com.twitter.intellij.pants.model;
import com.intellij.testFramework.LightPlatformTestCase;
public class SimpleExportResultTest extends LightPlatformTestCase {
private static boolean STRICT = true;
public void testParseExport_1_0_7() throws Exception {
final String exportOutput =
"{\n" +
" \"libraries\": {},\n" +
" \"version\": \"1.0.7\",\n" +
" \"targets\": {},\n" +
" \"preferred_jvm_distributions\": {\n" +
" \"java7\": {\n" +
" \"strict\": \"/Library/Java/JavaVirtualMachines/jdk1.7.0_72.jdk/Contents/Home\",\n" +
" \"non_strict\": \"/Library/Java/JavaVirtualMachines/jdk1.7.0_72.jdk/Contents/Home\"\n" +
" },\n" +
" \"java6\": {\n" +
" \"strict\": \"/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home\",\n" +
" \"non_strict\": \"/Library/Java/JavaVirtualMachines/jdk1.7.0_72.jdk/Contents/Home\"\n" +
" },\n" +
" \"java8\": {\n" +
" \"strict\": \"/Library/Java/JavaVirtualMachines/jdk1.8.0_65.jdk/Contents/Home\",\n" +
" \"non_strict\": \"/Library/Java/JavaVirtualMachines/jdk1.8.0_65.jdk/Contents/Home\"\n" +
" }\n" +
" },\n" +
" \"jvm_platforms\": {\n" +
" \"platforms\": {\n" +
" \"java7\": {\n" +
" \"source_level\": \"1.7\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.7\"\n" +
" },\n" +
" \"java6\": {\n" +
" \"source_level\": \"1.6\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.6\"\n" +
" },\n" +
" \"java8\": {\n" +
" \"source_level\": \"1.8\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.8\"\n" +
" }\n" +
" },\n" +
" \"default_platform\": \"java6\"\n" +
" }\n" +
"}";
SimpleExportResult exportResult = SimpleExportResult.parse(exportOutput);
assertEquals("1.0.7", exportResult.getVersion());
assertEquals("java6", exportResult.getJvmPlatforms().getDefaultPlatform());
assertEquals(
"/Library/Java/JavaVirtualMachines/jdk1.7.0_72.jdk/Contents/Home",
exportResult.getJdkHome(!STRICT).get()
);
assertEquals(
"/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home",
exportResult.getJdkHome(STRICT).get()
);
assertTrue(exportResult.getJdkHome(STRICT).isPresent());
}
public void testParseExport_1_0_6() throws Exception {
final String exportOutput =
"{\n" +
" \"libraries\": {},\n" +
" \"version\": \"1.0.6\",\n" +
" \"targets\": {},\n" +
" \"jvm_platforms\": {\n" +
" \"platforms\": {\n" +
" \"java7\": {\n" +
" \"source_level\": \"1.7\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.7\"\n" +
" },\n" +
" \"java6\": {\n" +
" \"source_level\": \"1.6\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.6\"\n" +
" },\n" +
" \"java8\": {\n" +
" \"source_level\": \"1.8\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.8\"\n" +
" }\n" +
" },\n" +
" \"default_platform\": \"java6\"\n" +
" }\n" +
"}";
SimpleExportResult exportResult = SimpleExportResult.parse(exportOutput);
assertEquals("1.0.6", exportResult.getVersion());
assertEquals("java6", exportResult.getJvmPlatforms().getDefaultPlatform());
assertNull(exportResult.getPreferredJvmDistributions());
}
public void testExportCache() {
SimpleExportResult export_a = SimpleExportResult.getExportResult("./.cache/pants/pants");
SimpleExportResult export_b = SimpleExportResult.getExportResult("./.cache/pants/pants");
// export_b should be cached result, so identical to export_a
assertTrue(export_a == export_b);
SimpleExportResult.clearCache();
SimpleExportResult export_c = SimpleExportResult.getExportResult("./.cache/pants/pants");
assertTrue(export_a != export_c);
}
public void testMissingStrict() {
final String exportOutput =
"{\n" +
" \"preferred_jvm_distributions\": {\n" +
" \"java7\": {\n" +
" \"non_strict\": \"/Library/Java/JavaVirtualMachines/jdk1.8.0_102.jdk/Contents/Home\"\n" +
" },\n" +
" \"java7-jdk8\": {\n" +
" \"strict\": \"/Library/Java/JavaVirtualMachines/jdk1.8.0_102.jdk/Contents/Home\",\n" +
" \"non_strict\": \"/Library/Java/JavaVirtualMachines/jdk1.8.0_102.jdk/Contents/Home\"\n" +
" },\n" +
" \"java8\": {\n" +
" \"strict\": \"/Library/Java/JavaVirtualMachines/jdk1.8.0_102.jdk/Contents/Home\",\n" +
" \"non_strict\": \"/Library/Java/JavaVirtualMachines/jdk1.8.0_102.jdk/Contents/Home\"\n" +
" }\n" +
" },\n" +
" \"libraries\": {},\n" +
" \"version\": \"1.0.7\",\n" +
" \"targets\": {},\n" +
" \"jvm_platforms\": {\n" +
" \"platforms\": {\n" +
" \"java7\": {\n" +
" \"source_level\": \"1.7\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.7\"\n" +
" },\n" +
" \"java7-jdk8\": {\n" +
" \"source_level\": \"1.7\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.8\"\n" +
" },\n" +
" \"java8\": {\n" +
" \"source_level\": \"1.8\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.8\"\n" +
" }\n" +
" },\n" +
" \"default_platform\": \"java7\"\n" +
" }\n" +
"}";
SimpleExportResult exportResult = SimpleExportResult.parse(exportOutput);
// java7 has no strict jdk home path.
assertFalse(exportResult.getJdkHome(STRICT).isPresent());
}
public void testNoDefaultPlatform() {
final String exportOutput =
"{\n" +
" \"preferred_jvm_distributions\": {\n" +
" \"java7\": {\n" +
" \"strict\": \"/Library/Java/JavaVirtualMachines/jdk1.7.0_80.jdk/Contents/Home\",\n" +
" \"non_strict\": \"/Library/Java/JavaVirtualMachines/jdk1.7.0_80.jdk/Contents/Home\"\n" +
" },\n" +
" \"java6\": {\n" +
" \"non_strict\": \"/Library/Java/JavaVirtualMachines/jdk1.7.0_80.jdk/Contents/Home\"\n" +
" },\n" +
" \"java8\": {\n" +
" \"strict\": \"/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home\",\n" +
" \"non_strict\": \"/Library/Java/JavaVirtualMachines/jdk1.8.0_60.jdk/Contents/Home\"\n" +
" }\n" +
" },\n" +
" \"libraries\": {},\n" +
" \"version\": \"1.0.9\",\n" +
" \"targets\": {},\n" +
" \"jvm_platforms\": {\n" +
" \"platforms\": {\n" +
" \"java7\": {\n" +
" \"source_level\": \"1.7\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.7\"\n" +
" },\n" +
" \"java6\": {\n" +
" \"source_level\": \"1.6\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.6\"\n" +
" },\n" +
" \"java8\": {\n" +
" \"source_level\": \"1.8\",\n" +
" \"args\": [],\n" +
" \"target_level\": \"1.8\"\n" +
" }\n" +
" },\n" +
" \"default_platform\": \"(DistributionLocator.cached().version 1.8)\"\n" +
" }\n" +
"}";
SimpleExportResult exportResult = SimpleExportResult.parse(exportOutput);
// (DistributionLocator.cached().version 1.8) does not correspond to any platform
// as far as this plugin is concerned.
assertFalse(exportResult.getJdkHome(STRICT).isPresent());
}
}
| pantsbuild/intellij-pants-plugin | src/test/scala/com/twitter/intellij/pants/model/SimpleExportResultTest.java | Java | apache-2.0 | 8,715 |
/*
* Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.identitymanagement.waiters;
import javax.annotation.Generated;
import com.amazonaws.annotation.SdkInternalApi;
import com.amazonaws.waiters.SdkFunction;
import com.amazonaws.services.identitymanagement.model.GetUserRequest;
import com.amazonaws.services.identitymanagement.model.GetUserResult;
import com.amazonaws.services.identitymanagement.AmazonIdentityManagement;
@SdkInternalApi
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class GetUserFunction implements SdkFunction<GetUserRequest, GetUserResult> {
/**
* Represents the service client
*/
private final AmazonIdentityManagement client;
/**
* Constructs a new GetUserFunction with the given client
*
* @param client
* Service client
*/
public GetUserFunction(AmazonIdentityManagement client) {
this.client = client;
}
/**
* Makes a call to the operation specified by the waiter by taking the corresponding request and returns the
* corresponding result
*
* @param getUserRequest
* Corresponding request for the operation
* @return Corresponding result of the operation
*/
@Override
public GetUserResult apply(GetUserRequest getUserRequest) {
return client.getUser(getUserRequest);
}
}
| dagnir/aws-sdk-java | aws-java-sdk-iam/src/main/java/com/amazonaws/services/identitymanagement/waiters/GetUserFunction.java | Java | apache-2.0 | 1,917 |
@extends('layouts.club')
@section('content')
<div class="container">
<div class="row">
<div class="col-md-10 col-md-offset-1">
<div class="row">
<div class="col-sm-5">
<h3>Confirmation, are you sure?</h3>
<p>We will remove this information permanatly, no refunds will be made at this time.</p>
<br />
{{Form::open(array('action' => array('MemberController@destroy',$member->team->id, $member->id), 'class'=>'form-horizontal', 'method' => 'delete')) }}
<button type="submit" class="btn btn-danger btn-outline">Remove Member</button>
<a href="{{URL::action('TeamController@index')}}" class="btn btn-primary btn-outline">Cancel</a>
{{Form::close()}}
</div>
<div class="col-sm-7">
</div><!-- end of col-sm-7 row -->
</div><!-- end of first row -->
<br>
<div class="row">
<div class="col-md-12">
</div>
</div>
</div>
</div>
</div>
@stop | PlusTechnologies/league-production | app/views/app/club/member/delete.blade.php | PHP | apache-2.0 | 991 |
package org.gradle.test.performance.mediummonolithicjavaproject.p423;
import org.junit.Test;
import static org.junit.Assert.*;
public class Test8472 {
Production8472 objectUnderTest = new Production8472();
@Test
public void testProperty0() {
Production8469 value = new Production8469();
objectUnderTest.setProperty0(value);
assertEquals(value, objectUnderTest.getProperty0());
}
@Test
public void testProperty1() {
Production8470 value = new Production8470();
objectUnderTest.setProperty1(value);
assertEquals(value, objectUnderTest.getProperty1());
}
@Test
public void testProperty2() {
Production8471 value = new Production8471();
objectUnderTest.setProperty2(value);
assertEquals(value, objectUnderTest.getProperty2());
}
@Test
public void testProperty3() {
String value = "value";
objectUnderTest.setProperty3(value);
assertEquals(value, objectUnderTest.getProperty3());
}
@Test
public void testProperty4() {
String value = "value";
objectUnderTest.setProperty4(value);
assertEquals(value, objectUnderTest.getProperty4());
}
@Test
public void testProperty5() {
String value = "value";
objectUnderTest.setProperty5(value);
assertEquals(value, objectUnderTest.getProperty5());
}
@Test
public void testProperty6() {
String value = "value";
objectUnderTest.setProperty6(value);
assertEquals(value, objectUnderTest.getProperty6());
}
@Test
public void testProperty7() {
String value = "value";
objectUnderTest.setProperty7(value);
assertEquals(value, objectUnderTest.getProperty7());
}
@Test
public void testProperty8() {
String value = "value";
objectUnderTest.setProperty8(value);
assertEquals(value, objectUnderTest.getProperty8());
}
@Test
public void testProperty9() {
String value = "value";
objectUnderTest.setProperty9(value);
assertEquals(value, objectUnderTest.getProperty9());
}
} | oehme/analysing-gradle-performance | my-app/src/test/java/org/gradle/test/performance/mediummonolithicjavaproject/p423/Test8472.java | Java | apache-2.0 | 2,174 |
<?php
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v9/enums/quality_score_bucket.proto
namespace Google\Ads\GoogleAds\V9\Enums\QualityScoreBucketEnum;
use UnexpectedValueException;
/**
* Enum listing the possible quality score buckets.
*
* Protobuf type <code>google.ads.googleads.v9.enums.QualityScoreBucketEnum.QualityScoreBucket</code>
*/
class QualityScoreBucket
{
/**
* Not specified.
*
* Generated from protobuf enum <code>UNSPECIFIED = 0;</code>
*/
const UNSPECIFIED = 0;
/**
* Used for return value only. Represents value unknown in this version.
*
* Generated from protobuf enum <code>UNKNOWN = 1;</code>
*/
const UNKNOWN = 1;
/**
* Quality of the creative is below average.
*
* Generated from protobuf enum <code>BELOW_AVERAGE = 2;</code>
*/
const BELOW_AVERAGE = 2;
/**
* Quality of the creative is average.
*
* Generated from protobuf enum <code>AVERAGE = 3;</code>
*/
const AVERAGE = 3;
/**
* Quality of the creative is above average.
*
* Generated from protobuf enum <code>ABOVE_AVERAGE = 4;</code>
*/
const ABOVE_AVERAGE = 4;
private static $valueToName = [
self::UNSPECIFIED => 'UNSPECIFIED',
self::UNKNOWN => 'UNKNOWN',
self::BELOW_AVERAGE => 'BELOW_AVERAGE',
self::AVERAGE => 'AVERAGE',
self::ABOVE_AVERAGE => 'ABOVE_AVERAGE',
];
public static function name($value)
{
if (!isset(self::$valueToName[$value])) {
throw new UnexpectedValueException(sprintf(
'Enum %s has no name defined for value %s', __CLASS__, $value));
}
return self::$valueToName[$value];
}
public static function value($name)
{
$const = __CLASS__ . '::' . strtoupper($name);
if (!defined($const)) {
throw new UnexpectedValueException(sprintf(
'Enum %s has no value defined for name %s', __CLASS__, $name));
}
return constant($const);
}
}
// Adding a class alias for backwards compatibility with the previous class name.
class_alias(QualityScoreBucket::class, \Google\Ads\GoogleAds\V9\Enums\QualityScoreBucketEnum_QualityScoreBucket::class);
| googleads/google-ads-php | src/Google/Ads/GoogleAds/V9/Enums/QualityScoreBucketEnum/QualityScoreBucket.php | PHP | apache-2.0 | 2,317 |
package com.wayneleo.quickstart.framework.core.cache;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class CacheManager {
private static final Map<String, Map<String, Object>> REPOSITORIES_0 = new HashMap<>();
private static final Map<String, Map<String, Object>> REPOSITORIES_1 = new HashMap<>();
private static int currentRepositoriesFlag = 0;
private static ThreadLocal<Integer> localRepositoriesFlag = new ThreadLocal<>();
public static void refresh( List<CacheInitializer> initializers ) {
List<CacheContent> cacheContents = null;
Map<String, Map<String, Object>> repositories = getRepositores( noCurrentFlag() );
Map<String, Object> repository = null;
for ( CacheInitializer initializer : initializers ) {
cacheContents = initializer.doInit();
if ( ( null == cacheContents ) || ( cacheContents.size() < 1 ) ) {
continue;
}
for ( CacheContent cacheContent : cacheContents ) {
repository = repositories.get( cacheContent.namespace );
if ( null == repository ) {
repository = new HashMap<>();
repositories.put( cacheContent.namespace, repository );
}
repository.put( cacheContent.key, cacheContent.value );
}
}
currentRepositoriesFlag = noCurrentFlag();
}
public static boolean contains( String namespace, String key ) {
Map<String, Object> repository = getRepositores( currentFlag() ).get( namespace );
if ( null == repository ) {
return false;
}
return repository.containsKey( key );
}
@SuppressWarnings( "unchecked" )
public static <T> T get( String namespace, String key ) {
Map<String, Object> repository = getRepositores( currentFlag() ).get( namespace );
if ( null != repository ) {
return ( T ) repository.get( key );
}
return null;
}
private static Map<String, Map<String, Object>> getRepositores( int flag ) {
if ( 0 == flag ) {
return REPOSITORIES_0;
}
else {
return REPOSITORIES_1;
}
}
private static int currentFlag() {
if ( null == localRepositoriesFlag.get() ) {
localRepositoriesFlag.set( currentRepositoriesFlag );
}
return localRepositoriesFlag.get();
}
private static int noCurrentFlag() {
if ( 0 == currentFlag() ) {
return 1;
}
else {
return 0;
}
}
}
| cnwayne/QuickStartForJava | framework/framework-core/src/main/java/com/wayneleo/quickstart/framework/core/cache/CacheManager.java | Java | apache-2.0 | 2,719 |
package test.java.com.iceteaviet.chess.network;
import main.java.com.iceteaviet.chess.network.ChessServer;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.WindowAdapter;
import java.awt.event.WindowEvent;
import java.io.IOException;
/**
* Created by Genius Doan on 6/14/2017.
*/
public class ServerTest extends JFrame implements ActionListener {
Container cp;
private JEditorPane htmlPane;
private JScrollPane scrollPane;
private JTextField textField;
private ChessServer server;
public ServerTest() {
setSize(300, 400);
setTitle("Server Chat");
server = ChessServer.getInstance();
this.addWindowListener(new WindowAdapter() {
public void WindowClosing(WindowEvent e) {
server.closeConnection();
}
});
htmlPane = new JEditorPane();
htmlPane.setContentType("text/html");
scrollPane = new JScrollPane(htmlPane);
textField = new JTextField(20);
cp = getContentPane();
cp.add(textField, BorderLayout.NORTH);
textField.addActionListener(this);
cp.add(scrollPane, BorderLayout.CENTER);
setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
}
public static void main(String[] args) throws IOException {
ServerTest t = new ServerTest();
t.server.listen();
t.setVisible(true);
t.server.startChat(t.htmlPane, t.textField, "Server");
}
public void actionPerformed(ActionEvent e) {
}
} | USAssignmentWarehouse/Chess | src/test/java/com/iceteaviet/chess/network/ServerTest.java | Java | apache-2.0 | 1,607 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processor.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Marker annotation a processor implementation can use to indicate that the
* processor is not thread safe for concurrent execution of its onTrigger()
* method. By default processors are assumed to be thread safe for concurrent
* execution.
*
* @author none
*/
@Documented
@Target({ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Inherited
public @interface TriggerSerially {
}
| rdblue/incubator-nifi | nifi-api/src/main/java/org/apache/nifi/processor/annotation/TriggerSerially.java | Java | apache-2.0 | 1,487 |