code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30
values | license stringclasses 15
values | size int64 3 1.01M |
|---|---|---|---|---|---|
/*
socket.c
Created: Feb 2001 by Philip Homburg <philip@f-mnx.phicoh.com>
Open a TCP connection
*/
#define _POSIX_C_SOURCE 2
#include <errno.h>
#include <fcntl.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/wait.h>
#include <net/hton.h>
#include <net/netlib.h>
#include <net/gen/in.h>
#include <net/gen/inet.h>
#include <netdb.h>
#include <net/gen/socket.h>
#include <net/gen/tcp.h>
#include <net/gen/tcp_io.h>
#define BUF_SIZE 10240
char *progname;
int tcpfd= -1;
char buf[BUF_SIZE];
static int bulk= 0;
static int push= 0;
static int stdout_issocket= 0;
static int timeout;
static void do_conn(char *hostname, char *portname);
static void alrm_conn(int sig);
static void alrm_io(int sig);
static void fullduplex(void);
static void fatal(char *msg, ...);
static void usage(void);
int main(int argc, char *argv[])
{
int c;
char *hostname;
char *portname;
char *check;
int B_flag, P_flag, s_flag;
char *t_arg;
(progname=strrchr(argv[0],'/')) ? progname++ : (progname=argv[0]);
B_flag= 0;
P_flag= 0;
s_flag= 0;
t_arg= NULL;
while (c= getopt(argc, argv, "BPst:?"), c != -1)
{
switch(c)
{
case 'B': B_flag= 1; break;
case 'P': P_flag= 1; break;
case 's': s_flag= 1; break;
case 't': t_arg= optarg; break;
case '?': usage();
default:
fatal("getopt failed: '%c'", c);
}
}
if (t_arg)
{
timeout= strtol(t_arg, &check, 0);
if (check[0] != '\0')
fatal("unable to parse timeout '%s'\n", t_arg);
if (timeout <= 0)
fatal("bad timeout '%d'\n", timeout);
}
else
timeout= 0;
if (optind+2 != argc)
usage();
hostname= argv[optind++];
portname= argv[optind++];
bulk= B_flag;
push= P_flag;
stdout_issocket= s_flag;
do_conn(hostname, portname);
/* XXX */
if (timeout)
{
signal(SIGALRM, alrm_io);
alarm(timeout);
}
fullduplex();
exit(0);
}
static void do_conn(char *hostname, char *portname)
{
ipaddr_t addr;
tcpport_t port;
struct hostent *he;
struct servent *se;
char *tcp_device, *check;
nwio_tcpconf_t tcpconf;
nwio_tcpcl_t tcpcl;
nwio_tcpopt_t tcpopt;
if (!inet_aton(hostname, &addr))
{
he= gethostbyname(hostname);
if (he == NULL)
fatal("unknown hostname '%s'", hostname);
if (he->h_addrtype != AF_INET || he->h_length != sizeof(addr))
fatal("bad address for '%s'", hostname);
memcpy(&addr, he->h_addr, sizeof(addr));
}
port= strtol(portname, &check, 0);
if (check[0] != 0)
{
se= getservbyname(portname, "tcp");
if (se == NULL)
fatal("unkown port '%s'", portname);
port= ntohs(se->s_port);
}
tcp_device= getenv("TCP_DEVICE");
if (tcp_device == NULL) tcp_device= TCP_DEVICE;
tcpfd= open(tcp_device, O_RDWR);
if (tcpfd == -1)
fatal("unable to open '%s': %s", tcp_device, strerror(errno));
tcpconf.nwtc_flags= NWTC_EXCL | NWTC_LP_SEL | NWTC_SET_RA |
NWTC_SET_RP;
tcpconf.nwtc_remaddr= addr;
tcpconf.nwtc_remport= htons(port);;
if (ioctl(tcpfd, NWIOSTCPCONF, &tcpconf) == -1)
fatal("NWIOSTCPCONF failed: %s", strerror(errno));
if (timeout)
{
signal(SIGALRM, alrm_conn);
alarm(timeout);
}
tcpcl.nwtcl_flags= 0;
if (ioctl(tcpfd, NWIOTCPCONN, &tcpcl) == -1)
{
fatal("unable to connect to %s:%u: %s", inet_ntoa(addr),
ntohs(tcpconf.nwtc_remport), strerror(errno));
}
alarm(0);
if (bulk)
{
tcpopt.nwto_flags= NWTO_BULK;
if (ioctl(tcpfd, NWIOSTCPOPT, &tcpopt) == -1)
fatal("NWIOSTCPOPT failed: %s", strerror(errno));
}
}
static void alrm_conn(int sig)
{
fatal("timeout during connect");
}
static void alrm_io(int sig)
{
fatal("timeout during io");
}
static void fullduplex(void)
{
pid_t cpid;
int o, r, s, s_errno, loc;
cpid= fork();
switch(cpid)
{
case -1: fatal("fork failed: %s", strerror(errno));
case 0:
/* Read from TCP, write to stdout. */
for (;;)
{
r= read(tcpfd, buf, BUF_SIZE);
if (r == 0)
break;
if (r == -1)
{
r= errno;
if (stdout_issocket)
ioctl(1, NWIOTCPSHUTDOWN, NULL);
fatal("error reading from TCP conn.: %s",
strerror(errno));
}
s= r;
for (o= 0; o<s; o += r)
{
r= write(1, buf+o, s-o);
if (r <= 0)
{
fatal("error writing to stdout: %s",
r == 0 ? "EOF" :
strerror(errno));
}
}
}
if (stdout_issocket)
{
r= ioctl(1, NWIOTCPSHUTDOWN, NULL);
if (r == -1)
{
fatal("NWIOTCPSHUTDOWN failed on stdout: %s",
strerror(errno));
}
}
exit(0);
default:
break;
}
/* Read from stdin, write to TCP. */
for (;;)
{
r= read(0, buf, BUF_SIZE);
if (r == 0)
break;
if (r == -1)
{
s_errno= errno;
kill(cpid, SIGTERM);
fatal("error reading from stdin: %s",
strerror(s_errno));
}
s= r;
for (o= 0; o<s; o += r)
{
r= write(tcpfd, buf+o, s-o);
if (r <= 0)
{
s_errno= errno;
kill(cpid, SIGTERM);
fatal("error writing to TCP conn.: %s",
r == 0 ? "EOF" :
strerror(s_errno));
}
}
if (push)
ioctl(tcpfd, NWIOTCPPUSH, NULL);
}
if (ioctl(tcpfd, NWIOTCPSHUTDOWN, NULL) == -1)
{
s_errno= errno;
kill(cpid, SIGTERM);
fatal("unable to shut down TCP conn.: %s", strerror(s_errno));
}
r= waitpid(cpid, &loc, 0);
if (r == -1)
{
s_errno= errno;
kill(cpid, SIGTERM);
fatal("waitpid failed: %s", strerror(s_errno));
}
if (WIFEXITED(loc))
exit(WEXITSTATUS(loc));
kill(getpid(), WTERMSIG(loc));
exit(1);
}
static void fatal(char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
fprintf(stderr, "%s: ", progname);
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
va_end(ap);
exit(1);
}
static void usage(void)
{
fprintf(stderr, "Usage: %s [-BPs] [-t timeout] hostname portname\n",
progname);
exit(1);
}
/*
* $PchId: socket.c,v 1.3 2005/01/31 22:33:20 philip Exp $
*/
| veritas-shine/minix3-rpi | minix/commands/swifi/socket.c | C | apache-2.0 | 5,758 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_relu(dtype):
trials = [
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 16), 12, 76, "uint8"),
((1, 4, 4, 4), 65, 125, "int8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
]
np.random.seed(0)
for shape, a_min, a_max, trial_dtype in trials:
if trial_dtype == dtype:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
def test_relu_failure():
trials = [
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
]
for shape, dtype, a_min, a_max, err_msg in trials:
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| dmlc/tvm | tests/python/contrib/test_ethosn/test_relu.py | Python | apache-2.0 | 2,921 |
/*
* Copyright (c) 2013 Houbrechts IT
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.houbie.lesscss.engine;
import com.github.houbie.lesscss.LessParseException;
import com.github.houbie.lesscss.resourcereader.ResourceReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.script.Invocable;
import javax.script.ScriptEngine;
import javax.script.ScriptEngineManager;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.SequenceInputStream;
import java.util.Map;
import static com.github.houbie.lesscss.LessCompiler.CompilationDetails;
/**
* LessCompilationEngine implementation that uses a standard {@link javax.script.ScriptEngine} implementation.
*/
public class ScriptEngineLessCompilationEngine implements LessCompilationEngine {
private static Logger logger = LoggerFactory.getLogger(ScriptEngineLessCompilationEngine.class);
private static final String JS_ALL_MIN_JS = "js/all-min.js";
private static final String LESS_SCRIPT = "js/less-rhino-1.7.0-mod.js";
private static final String MINIFY_SCRIPT = "js/cssmin.js";
private static final String COMPILE_SCRIPT = "js/compile.js";
private static final boolean MINIFIED = true;
private ScriptEngine scriptEngine;
/**
* @param scriptEngineName the name of the underlying ScriptEngine (e.g. "nashorn", "rhino", ...)
*/
public ScriptEngineLessCompilationEngine(String scriptEngineName) {
logger.info("creating new NashornEngine");
ScriptEngineManager factory = new ScriptEngineManager();
scriptEngine = factory.getEngineByName(scriptEngineName);
if (scriptEngine == null) {
throw new RuntimeException("The ScriptEngine " + scriptEngineName + " could not be loaded");
}
}
/**
* @param scriptEngine the underlying ScriptEngine
*/
public ScriptEngineLessCompilationEngine(ScriptEngine scriptEngine) {
logger.info("creating new engine with {}", scriptEngine.getClass());
this.scriptEngine = scriptEngine;
}
@Override
public void initialize(Reader customJavaScriptReader) {
try {
if (customJavaScriptReader != null) {
scriptEngine.eval(customJavaScriptReader);
}
scriptEngine.eval(getLessScriptReader());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private Reader getLessScriptReader() {
ClassLoader cl = getClass().getClassLoader();
InputStream concatenatedScripts;
if (MINIFIED) {
concatenatedScripts = cl.getResourceAsStream(JS_ALL_MIN_JS);
} else {
concatenatedScripts = new SequenceInputStream(cl.getResourceAsStream(LESS_SCRIPT), new SequenceInputStream(cl.getResourceAsStream(MINIFY_SCRIPT), cl.getResourceAsStream(COMPILE_SCRIPT)));
}
return new InputStreamReader(concatenatedScripts);
}
@Override
public CompilationDetails compile(String less, CompilationOptions compilationOptions, ResourceReader resourceReader) {
Map result;
try {
result = (Map) ((Invocable) scriptEngine).invokeFunction("compile", less, compilationOptions, resourceReader);
} catch (Exception e) {
throw new RuntimeException("Exception while compiling less", e);
}
if (result.get("parseException") != null) {
throw new LessParseException((String) result.get("parseException"));
}
return new CompilationDetails((String) result.get("css"), (String) result.get("sourceMapContent"));
}
public ScriptEngine getScriptEngine() {
return scriptEngine;
}
}
| houbie/lesscss | src/main/java/com/github/houbie/lesscss/engine/ScriptEngineLessCompilationEngine.java | Java | apache-2.0 | 4,246 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
package internal
package parser
import java.io.File
import scala.io.Source
object NewFormatSpec extends AbstractSpec {
implicit val splitter: SplitExpressions.SplitExpression = EvaluateConfigurations.splitExpressions
test("New Format should handle lines") {
val rootPath = getClass.getResource("/new-format").getPath
println(s"Reading files from: $rootPath")
val allFiles = new File(rootPath).listFiles.toList
allFiles foreach { path =>
println(s"$path")
val lines = Source.fromFile(path).getLines().toList
val (_, statements) = splitter(path, lines)
assert(statements.nonEmpty, s"""
|***should contains statements***
|$lines """.stripMargin)
}
}
}
| sbt/sbt | main/src/test/scala/sbt/internal/parser/NewFormatSpec.scala | Scala | apache-2.0 | 883 |
package issues.issue130;
public class Impl_0 {
public int a = 0;
protected void printMe(String s) {
System.out.println(s);
}
}
| intrigus/jtransc | jtransc-main/test/issues/issue130/Impl_0.java | Java | apache-2.0 | 134 |
/**
* Wraps the
*
* @param text
* {string} haystack to search through
* @param search
* {string} needle to search for
* @param [caseSensitive]
* {boolean} optional boolean to use case-sensitive searching
*/
angular.module('ui.highlight', []).filter('highlight', function(highlight) {
return function(text, search, caseSensitive) {
if (search || angular.isNumber(search)) {
var ltext = text.toString();
var lsearch = search.toString();
if (caseSensitive) {
return ltext.split(lsearch).join('<span class="ui-match">' + lsearch + '</span>');
} else {
return ltext.replace(new RegExp(lsearch, 'gi'), '<span class="ui-match">$&</span>');
}
} else {
return text;
}
};
});
| WestCoastInformatics/OTF-Mapping-Service | webapp/src/main/webapp/js/highlight.js | JavaScript | apache-2.0 | 771 |
FROM ubuntu:18.04
LABEL maintainer="chriamue@gmail.com"
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential clang git cmake \
qt5-default qttools5-dev qttools5-dev-tools libboost-system-dev libboost-filesystem-dev libboost-python-dev python-numpy \
libopencv-dev libpoco-dev libdlib-dev
RUN cd / && git clone https://github.com/annotatorproject/annotatorlib
RUN cd /annotatorlib && mkdir build && cd build \
&& cmake -DCMAKE_BUILD_TYPE=Release .. \
&& make -j3 && make install && ldconfig \
&& ln -s /usr/local/lib/pyannotatorlib.so /usr/lib/python2.7/pyannotatorlib.so
RUN cd /annotatorlib/source/imagesets && git clone https://github.com/annotatorproject/annotatorimageset_flickr \
&& cd ../../build && cmake .. && make -j3 && make install && ldconfig
| annotatorproject/annotatorlib | deploy/docker/cpu/Dockerfile | Dockerfile | apache-2.0 | 804 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler.export;
import java.io.IOException;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiDocValues;
import org.apache.lucene.index.OrdinalMap;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LongValues;
class StringValue implements SortValue {
private final SortedDocValues globalDocValues;
private final OrdinalMap ordinalMap;
private final String field;
private final IntComp comp;
protected LongValues toGlobal = LongValues.IDENTITY; // this segment to global ordinal. NN;
protected SortedDocValues docValues;
public int currentOrd;
protected int lastDocID;
private boolean present;
private BytesRef lastBytes;
private String lastString;
private int lastOrd = -1;
private int leafOrd = -1;
public StringValue(SortedDocValues globalDocValues, String field, IntComp comp) {
this.globalDocValues = globalDocValues;
this.docValues = globalDocValues;
if (globalDocValues instanceof MultiDocValues.MultiSortedDocValues) {
this.ordinalMap = ((MultiDocValues.MultiSortedDocValues) globalDocValues).mapping;
} else {
this.ordinalMap = null;
}
this.field = field;
this.comp = comp;
this.currentOrd = comp.resetValue();
this.present = false;
}
public String getLastString() {
return this.lastString;
}
public void setLastString(String lastString) {
this.lastString = lastString;
}
public StringValue copy() {
StringValue copy = new StringValue(globalDocValues, field, comp);
return copy;
}
public void setCurrentValue(int docId) throws IOException {
// System.out.println(docId +":"+lastDocID);
/*
if (docId < lastDocID) {
throw new AssertionError("docs were sent out-of-order: lastDocID=" + lastDocID + " vs doc=" + docId);
}
lastDocID = docId;
*/
if (docId > docValues.docID()) {
docValues.advance(docId);
}
if (docId == docValues.docID()) {
present = true;
currentOrd = docValues.ordValue();
} else {
present = false;
currentOrd = -1;
}
}
@Override
public boolean isPresent() {
return present;
}
public void setCurrentValue(SortValue sv) {
StringValue v = (StringValue) sv;
this.currentOrd = v.currentOrd;
this.present = v.present;
this.leafOrd = v.leafOrd;
this.lastOrd = v.lastOrd;
this.toGlobal = v.toGlobal;
}
public Object getCurrentValue() throws IOException {
assert present == true;
if (currentOrd != lastOrd) {
lastBytes = docValues.lookupOrd(currentOrd);
lastOrd = currentOrd;
lastString = null;
}
return lastBytes;
}
public void toGlobalValue(SortValue previousValue) {
lastOrd = currentOrd;
StringValue sv = (StringValue) previousValue;
if (sv.lastOrd == currentOrd) {
// Take the global ord from the previousValue unless we are a -1 which is the same in both
// global and leaf ordinal
if (this.currentOrd != -1) {
this.currentOrd = sv.currentOrd;
}
} else {
if (this.currentOrd > -1) {
this.currentOrd = (int) toGlobal.get(this.currentOrd);
}
}
}
public String getField() {
return field;
}
public void setNextReader(LeafReaderContext context) throws IOException {
leafOrd = context.ord;
if (ordinalMap != null) {
toGlobal = ordinalMap.getGlobalOrds(context.ord);
}
docValues = DocValues.getSorted(context.reader(), field);
lastDocID = 0;
}
public void reset() {
this.currentOrd = comp.resetValue();
this.present = false;
lastDocID = 0;
}
public int compareTo(SortValue o) {
StringValue sv = (StringValue) o;
return comp.compare(currentOrd, sv.currentOrd);
}
public String toString() {
return Integer.toString(this.currentOrd);
}
}
| apache/solr | solr/core/src/java/org/apache/solr/handler/export/StringValue.java | Java | apache-2.0 | 4,760 |
package at.jku.sea.cloud.rest.pojo.stream.provider;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.fasterxml.jackson.annotation.JsonTypeInfo.Id;
import com.fasterxml.jackson.annotation.JsonTypeName;
import at.jku.sea.cloud.rest.pojo.PojoCollectionArtifact;
@JsonTypeInfo(use = Id.NAME, property = "__type")
@JsonTypeName(value = "CollectionArtifactProvider")
public class PojoCollectionArtifactProvider extends PojoProvider {
private PojoCollectionArtifact collectionArtifact;
public PojoCollectionArtifactProvider() {
}
public PojoCollectionArtifactProvider(PojoCollectionArtifact collectionArtifact) {
this.collectionArtifact = collectionArtifact;
}
public PojoCollectionArtifact getCollectionArtifact() {
return collectionArtifact;
}
public void setCollectionArtifact(PojoCollectionArtifact collectionArtifact) {
this.collectionArtifact = collectionArtifact;
}
}
| OnurKirkizoglu/master_thesis | at.jku.sea.cloud.rest/src/main/java/at/jku/sea/cloud/rest/pojo/stream/provider/PojoCollectionArtifactProvider.java | Java | apache-2.0 | 932 |
package com.sebastian_daschner.scalable_coffee_shop.beans.boundary;
import javax.inject.Inject;
import javax.json.Json;
import javax.json.JsonObject;
import javax.json.JsonObjectBuilder;
import javax.ws.rs.BadRequestException;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
@Path("beans")
public class BeansResource {
@Inject
BeanCommandService commandService;
@Inject
BeanQueryService queryService;
@GET
public JsonObject getBeans() {
final JsonObjectBuilder builder = Json.createObjectBuilder();
queryService.getStoredBeans()
.entrySet().forEach(e -> builder.add(e.getKey(), e.getValue()));
return builder.build();
}
@POST
public void storeBeans(JsonObject object) {
final String beanOrigin = object.getString("beanOrigin", null);
final int amount = object.getInt("amount", 0);
if (beanOrigin == null || amount == 0)
throw new BadRequestException();
commandService.storeBeans(beanOrigin, amount);
}
}
| sdaschner/scalable-coffee-shop | beans/src/main/java/com/sebastian_daschner/scalable_coffee_shop/beans/boundary/BeansResource.java | Java | apache-2.0 | 1,063 |
#!/bin/bash -e
#
#
# Shutdown script for Datafari
#
#
if (( EUID != 0 )); then
echo "You need to be root to run this script." 1>&2
exit 100
fi
DIR=../../../macosx/bin
source "set-datafari-env-devmode.sh"
source "${DIR}/utils.sh"
if is_running $SOLR_PID_FILE; then
SOLR_INCLUDE=$SOLR_ENV $SOLR_INSTALL_DIR/bin/solr stop
else
echo "Warn : Solr does not seem to be running."
fi
if is_running $CASSANDRA_PID_FILE; then
kill $(cat $CASSANDRA_PID_FILE)
rm $CASSANDRA_PID_FILE
else
echo "Warn : Cassandra does not seem to be running."
fi
| svanschalkwyk/datafari | dev-tools/script/macosx/stop-datafari-devmode.sh | Shell | apache-2.0 | 558 |
// Copyright 2000-2022 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
package com.intellij.execution.configurations;
import com.intellij.execution.ExecutionBundle;
import com.intellij.openapi.options.ConfigurationException;
import com.intellij.openapi.progress.ProcessCanceledException;
import com.intellij.openapi.ui.ValidationInfo;
import com.intellij.openapi.util.NlsContexts.DialogMessage;
import com.intellij.util.ThrowableRunnable;
import javax.swing.*;
import static com.intellij.openapi.util.NlsContexts.DialogTitle;
public class RuntimeConfigurationException extends ConfigurationException {
public RuntimeConfigurationException(@DialogMessage String message, @DialogTitle String title) {
super(message, title);
}
public RuntimeConfigurationException(@DialogMessage String message) {
super(message, ExecutionBundle.message("run.configuration.error.dialog.title"));
}
public RuntimeConfigurationException(@DialogMessage String message, Throwable cause) {
super(message, cause, ExecutionBundle.message("run.configuration.error.dialog.title"));
}
public static <T extends Throwable> ValidationInfo validate(JComponent component, ThrowableRunnable<T> runnable) {
try {
runnable.run();
return new ValidationInfo("", component);
}
catch (ProcessCanceledException e) {
throw e;
}
catch (Throwable t) {
return new ValidationInfo(t.getMessage(), component);
}
}
} | jwren/intellij-community | platform/execution/src/com/intellij/execution/configurations/RuntimeConfigurationException.java | Java | apache-2.0 | 1,497 |
package com.inmobi.messaging;
/*
* #%L
* messaging-client-core
* %%
* Copyright (C) 2012 - 2014 InMobi
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.nio.ByteBuffer;
/**
* Message class holding the data.
*
*/
public final class Message implements MessageBase {
private ByteBuffer data;
public Message() {
}
/**
* Create new message with {@link ByteBuffer}
*
* @param data The {@link ByteBuffer}
*/
public Message(ByteBuffer data) {
this.data = data;
}
/**
* Create new message with byte array
*
* @param data The byte array.
*/
public Message(byte[] data) {
this.data = ByteBuffer.wrap(data);
}
/**
* Get the data associated with message.
*
* @return {@link ByteBuffer} holding the data.
*/
public ByteBuffer getData() {
return data;
}
public synchronized void set(ByteBuffer data) {
this.data = data;
}
public synchronized void clear() {
data.clear();
}
public long getSize() {
return data.limit();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((data == null) ? 0 : data.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Message other = (Message) obj;
if (data == null) {
if (other.data != null) {
return false;
}
} else if (!data.equals(other.data)) {
return false;
}
return true;
}
@Override
public Message clone() {
Message m = new Message(data.duplicate());
return m;
}
}
| sreedishps/pintail | messaging-client-core/src/main/java/com/inmobi/messaging/Message.java | Java | apache-2.0 | 2,267 |
package org.jboss.resteasy.client.core;
import org.jboss.resteasy.client.ClientExecutor;
import org.jboss.resteasy.client.ClientRequest;
import org.jboss.resteasy.client.ClientResponse;
import org.jboss.resteasy.client.ProxyConfig;
import org.jboss.resteasy.client.core.extractors.ClientErrorHandler;
import org.jboss.resteasy.client.core.extractors.ClientRequestContext;
import org.jboss.resteasy.client.core.extractors.EntityExtractor;
import org.jboss.resteasy.client.core.extractors.EntityExtractorFactory;
import org.jboss.resteasy.client.core.marshallers.ClientMarshallerFactory;
import org.jboss.resteasy.client.core.marshallers.Marshaller;
import org.jboss.resteasy.client.exception.mapper.ClientExceptionMapper;
import org.jboss.resteasy.resteasy_jaxrs.i18n.Messages;
import org.jboss.resteasy.specimpl.ResteasyUriBuilder;
import org.jboss.resteasy.spi.ResteasyProviderFactory;
import org.jboss.resteasy.util.MediaTypeHelper;
import javax.ws.rs.Path;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.ext.Providers;
import java.lang.reflect.Method;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
/**
* @author <a href="mailto:bill@burkecentral.com">Bill Burke</a>
* @version $Revision: 1 $
*/
@SuppressWarnings("unchecked")
public class ClientInvoker extends ClientInterceptorRepositoryImpl implements MethodInvoker
{
protected ResteasyProviderFactory providerFactory;
protected String httpMethod;
protected ResteasyUriBuilder uri;
protected Method method;
protected Class declaring;
protected MediaType accepts;
protected Marshaller[] marshallers;
protected ClientExecutor executor;
protected boolean followRedirects;
protected EntityExtractor extractor;
protected EntityExtractorFactory extractorFactory;
protected URI baseUri;
protected Map<String, Object> attributes = new HashMap<String, Object>();
public ClientInvoker(URI baseUri, Class declaring, Method method, ResteasyProviderFactory providerFactory, ClientExecutor executor, EntityExtractorFactory extractorFactory)
{
this(baseUri, declaring, method, new ProxyConfig(null, executor, providerFactory, extractorFactory, null, null, null));
}
public ClientInvoker(URI baseUri, Class declaring, Method method, ProxyConfig config)
{
this.declaring = declaring;
this.method = method;
this.marshallers = ClientMarshallerFactory.createMarshallers(declaring, method, providerFactory, config.getServerConsumes());
this.providerFactory = config.getProviderFactory();
this.executor = config.getExecutor();
accepts = MediaTypeHelper.getProduces(declaring, method, config.getServerProduces());
this.uri = new ResteasyUriBuilder();
this.baseUri = baseUri;
uri.uri(baseUri);
if (declaring.isAnnotationPresent(Path.class)) uri.path(declaring);
if (method.isAnnotationPresent(Path.class)) uri.path(method);
this.extractorFactory = config.getExtractorFactory();
this.extractor = extractorFactory.createExtractor(method);
}
public Map<String, Object> getAttributes()
{
return attributes;
}
public MediaType getAccepts()
{
return accepts;
}
public Method getMethod()
{
return method;
}
public Class getDeclaring()
{
return declaring;
}
public ResteasyProviderFactory getProviderFactory()
{
return providerFactory;
}
public Object invoke(Object[] args)
{
boolean isProvidersSet = ResteasyProviderFactory.getContextData(Providers.class) != null;
if (!isProvidersSet) ResteasyProviderFactory.pushContext(Providers.class, providerFactory);
try
{
if (uri == null) throw new RuntimeException(Messages.MESSAGES.baseURINotSetForClientProxy());
ClientRequest request = createRequest(args);
BaseClientResponse clientResponse = null;
try
{
clientResponse = (BaseClientResponse) request.httpMethod(httpMethod);
}
catch (Exception e)
{
ClientExceptionMapper<Exception> mapper = providerFactory.getClientExceptionMapper(Exception.class);
if (mapper != null)
{
throw mapper.toException(e);
}
throw new RuntimeException(e);
}
ClientErrorHandler errorHandler = new ClientErrorHandler(providerFactory.getClientErrorInterceptors());
clientResponse.setAttributeExceptionsTo(method.toString());
clientResponse.setAnnotations(method.getAnnotations());
ClientRequestContext clientRequestContext = new ClientRequestContext(request, clientResponse, errorHandler, extractorFactory, baseUri);
return extractor.extractEntity(clientRequestContext);
}
finally
{
if (!isProvidersSet) ResteasyProviderFactory.popContextData(Providers.class);
}
}
protected ClientRequest createRequest(Object[] args)
{
ClientRequest request = new ClientRequest(uri, executor, providerFactory);
request.getAttributes().putAll(attributes);
if (accepts != null) request.header(HttpHeaders.ACCEPT, accepts.toString());
this.copyClientInterceptorsTo(request);
boolean isClientResponseResult = ClientResponse.class.isAssignableFrom(method.getReturnType());
request.followRedirects(!isClientResponseResult || this.followRedirects);
for (int i = 0; i < marshallers.length; i++)
{
marshallers[i].build(request, args[i]);
}
return request;
}
public String getHttpMethod()
{
return httpMethod;
}
public void setHttpMethod(String httpMethod)
{
this.httpMethod = httpMethod;
}
public boolean isFollowRedirects()
{
return followRedirects;
}
public void setFollowRedirects(boolean followRedirects)
{
this.followRedirects = followRedirects;
}
public void followRedirects()
{
setFollowRedirects(true);
}
} | psakar/Resteasy | resteasy-jaxrs/src/main/java/org/jboss/resteasy/client/core/ClientInvoker.java | Java | apache-2.0 | 6,212 |
module.exports = {
"env": {
"es6": true,
"node": true
},
"globals": {
"expect": true,
"it": true,
"describe": true,
},
"extends": "eslint:recommended",
"parser": "babel-eslint",
"parserOptions": {
"ecmaFeatures": {
"experimentalObjectRestSpread": true,
"jsx": true
},
"sourceType": "module"
},
"plugins": [
"react"
],
"rules": {
"no-unused-vars": 2,
"react/jsx-uses-vars": 2,
"react/jsx-uses-react": 2,
"indent": [
"error",
2
],
"linebreak-style": [
"error",
"unix"
],
"quotes": [
"error",
"single"
],
"semi": [
"error",
"always"
]
}
};
| airingursb/two-life | src/components/react-native-calendars/.eslintrc.js | JavaScript | apache-2.0 | 859 |
/* Public domain */
typedef struct vg_point {
struct vg_node _inherit;
float size; /* Size in pixels (0.0 = invisible) */
} VG_Point;
#define VGPOINT(p) ((VG_Point *)(p))
/* Begin generated block */
__BEGIN_DECLS
extern DECLSPEC VG_NodeOps vgPointOps;
static __inline__ VG_Point *
VG_PointNew(void *pNode, VG_Vector pos)
{
VG_Point *vp;
vp = (VG_Point *)AG_Malloc(sizeof(VG_Point));
VG_NodeInit(vp, &vgPointOps);
VG_Translate(vp, pos);
VG_NodeAttach(pNode, vp);
return (vp);
}
static __inline__ void
VG_PointSize(VG_Point *vp, float r)
{
VG_Lock(VGNODE(vp)->vg);
vp->size = r;
VG_Unlock(VGNODE(vp)->vg);
}
__END_DECLS
/* Close generated block */
| LiberatorUSA/GUCEF | dependencies/agar/include/agar/vg/vg_point.h | C | apache-2.0 | 665 |
// Decompiled by Jad v1.5.8e. Copyright 2001 Pavel Kouznetsov.
// Jad home page: http://www.geocities.com/kpdus/jad.html
// Decompiler options: braces fieldsfirst space lnc
package cn.com.smartdevices.bracelet.view;
import android.animation.Animator;
// Referenced classes of package cn.com.smartdevices.bracelet.view:
// RoundProgressBar
class s
implements android.animation.Animator.AnimatorListener
{
final RoundProgressBar a;
s(RoundProgressBar roundprogressbar)
{
a = roundprogressbar;
super();
}
public void onAnimationCancel(Animator animator)
{
}
public void onAnimationEnd(Animator animator)
{
if (RoundProgressBar.a(a) < RoundProgressBar.b(a) && RoundProgressBar.c(a) < RoundProgressBar.b(a))
{
RoundProgressBar.a(a, RoundProgressBar.b(a));
RoundProgressBar.a(a, RoundProgressBar.a(a) - RoundProgressBar.c(a), RoundProgressBar.c(a), RoundProgressBar.a(a));
}
}
public void onAnimationRepeat(Animator animator)
{
}
public void onAnimationStart(Animator animator)
{
}
}
| vishnudevk/MiBandDecompiled | Original Files/source/src/cn/com/smartdevices/bracelet/view/s.java | Java | apache-2.0 | 1,132 |
(function(window) {
var DEFAULT_ERROR_ID = 'error-default';
/**
* Very simple base class for views.
* Provides functionality for active/inactive.
*
* The first time the view is activated
* the onactive function/event will fire.
*
* The .seen property is added to each object
* with view in its prototype. .seen can be used
* to detect if the view has ever been activated.
*
* @param {String|Object} options options or a selector for element.
*/
function View(options) {
if (typeof(options) === 'undefined') {
options = {};
}
if (typeof(options) === 'string') {
this.selectors = { element: options };
} else {
var key;
if (typeof(options) === 'undefined') {
options = {};
}
for (key in options) {
if (options.hasOwnProperty(key)) {
this[key] = options[key];
}
}
}
this.hideErrors = this.hideErrors.bind(this);
}
const INVALID_CSS = /([^a-zA-Z\-\_0-9])/g;
View.ACTIVE = 'active';
View.prototype = {
seen: false,
activeClass: View.ACTIVE,
errorVisible: false,
get element() {
return this._findElement('element');
},
get status() {
return this._findElement('status');
},
get errors() {
return this._findElement('errors');
},
/**
* Creates a string id for a given model.
*
* view.idForModel('foo-', { _id: 1 }); // => foo-1
* view.idForModel('foo-', '2'); // => foo-2
*
* @param {String} prefix of string.
* @param {Object|String|Numeric} objectOrString representation of model.
*/
idForModel: function(prefix, objectOrString) {
prefix += (typeof(objectOrString) === 'object') ?
objectOrString._id :
objectOrString;
return prefix;
},
calendarId: function(input) {
if (typeof(input) !== 'string') {
input = input.calendarId;
}
input = this.cssClean(input);
return 'calendar-id-' + input;
},
/**
* Delegate pattern event listener.
*
* @param {HTMLElement} element parent element.
* @param {String} type type of dom event.
* @param {String} selector css selector element should match
* _note_ there is no magic here this
* is determined from the root of the document.
* @param {Function|Object} handler event handler.
* first argument is the raw
* event second is the element
* matching the pattern.
*/
delegate: function(element, type, selector, handler) {
if (typeof(handler) === 'object') {
var context = handler;
handler = function() {
context.handleEvent.apply(context, arguments);
};
}
element.addEventListener(type, function(e) {
var target = e.target;
while (target !== element) {
if ('mozMatchesSelector' in target &&
target.mozMatchesSelector(selector)) {
return handler(e, target);
}
target = target.parentNode;
}
});
},
/**
* Clean a string for use with css.
* Converts illegal chars to legal ones.
*/
cssClean: function(string) {
if (typeof(string) !== 'string')
return string;
//TODO: I am worried about the performance
//of using this all over the place =/
//consider sanitizing all keys to ensure
//they don't blow up when used as a selector?
return string.replace(INVALID_CSS, '-');
},
/**
* Finds a caches a element defined
* by selectors
*
* @param {String} selector name as defined in selectors.
* @param {Boolean} all true when to find all elements. (default false).
*/
_findElement: function(name, all, element) {
if (typeof(all) === 'object') {
element = all;
all = false;
}
element = element || document;
var cacheName;
var selector;
if (typeof(all) === 'undefined') {
all = false;
}
if (name in this.selectors) {
cacheName = '_' + name + 'Element';
selector = this.selectors[name];
if (!this[cacheName]) {
if (all) {
this[cacheName] = element.querySelectorAll(selector);
} else {
this[cacheName] = element.querySelector(selector);
}
}
return this[cacheName];
}
return null;
},
/**
* Displays a list of errors
*
* @param {Array} list error list
* (see Event.validaitonErrors) or Error object.
*/
showErrors: function(list) {
var _ = navigator.mozL10n.get;
var errors = '';
// We can pass Error objects or
// Array of {name: foo} objects
if (!Array.isArray(list)) {
list = [list];
}
var i = 0;
var len = list.length;
for (; i < len; i++) {
var name = list[i].l10nID || list[i].name;
errors += _('error-' + name) || _(DEFAULT_ERROR_ID);
}
// populate error and display it.
this.errors.textContent = errors;
this.errorVisible = true;
this.status.classList.add(this.activeClass);
this.status.addEventListener('animationend', this.hideErrors);
},
hideErrors: function() {
this.status.classList.remove(this.activeClass);
this.status.removeEventListener('animationend', this.hideErrors);
this.errorVisible = false;
},
onactive: function() {
if (this.errorVisible) {
this.hideErrors();
}
//seen can be set to anything other
//then false to override this behaviour
if (this.seen === false) {
this.onfirstseen();
}
// intentionally using 'in'
if ('dispatch' in this) {
this.dispatch.apply(this, arguments);
}
this.seen = true;
if (this.element) {
this.element.classList.add(this.activeClass);
}
},
oninactive: function() {
if (this.element) {
this.element.classList.remove(this.activeClass);
}
},
onfirstseen: function() {}
};
Calendar.View = View;
}(this));
| wilebeast/FireFox-OS | B2G/gaia/apps/calendar/js/view.js | JavaScript | apache-2.0 | 6,314 |
require_extension('D');
require_rv64;
require_fp;
softfloat_roundingMode = RM;
WRITE_FRD(i64_to_f64(RS1));
set_fp_exceptions;
| SI-RISCV/e200_opensource | riscv-tools/riscv-isa-sim/riscv/insns/fcvt_d_l.h | C | apache-2.0 | 126 |
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.support.design.widget;
import android.graphics.drawable.Drawable;
import android.graphics.drawable.DrawableContainer;
import android.util.Log;
import java.lang.reflect.Method;
/** Caution. Gross hacks ahead. */
class DrawableUtils {
private static final String LOG_TAG = "DrawableUtils";
private static Method sSetConstantStateMethod;
private static boolean sSetConstantStateMethodFetched;
private DrawableUtils() {}
static boolean setContainerConstantState(
DrawableContainer drawable, Drawable.ConstantState constantState) {
// We can use getDeclaredMethod() on v9+
return setContainerConstantStateV9(drawable, constantState);
}
private static boolean setContainerConstantStateV9(
DrawableContainer drawable, Drawable.ConstantState constantState) {
if (!sSetConstantStateMethodFetched) {
try {
sSetConstantStateMethod =
DrawableContainer.class.getDeclaredMethod(
"setConstantState", DrawableContainer.DrawableContainerState.class);
sSetConstantStateMethod.setAccessible(true);
} catch (NoSuchMethodException e) {
Log.e(LOG_TAG, "Could not fetch setConstantState(). Oh well.");
}
sSetConstantStateMethodFetched = true;
}
if (sSetConstantStateMethod != null) {
try {
sSetConstantStateMethod.invoke(drawable, constantState);
return true;
} catch (Exception e) {
Log.e(LOG_TAG, "Could not invoke setConstantState(). Oh well.");
}
}
return false;
}
}
| WeRockStar/iosched | third_party/material-components-android/lib/src/android/support/design/widget/DrawableUtils.java | Java | apache-2.0 | 2,166 |
# This code was automatically generated using xdrgen
# DO NOT EDIT or your changes may be overwritten
require 'xdr'
# === xdr source ============================================================
#
# enum ManageOfferEffect
# {
# MANAGE_OFFER_CREATED = 0,
# MANAGE_OFFER_UPDATED = 1,
# MANAGE_OFFER_DELETED = 2
# };
#
# ===========================================================================
module Stellar
class ManageOfferEffect < XDR::Enum
member :manage_offer_created, 0
member :manage_offer_updated, 1
member :manage_offer_deleted, 2
seal
end
end
| nullstyle/ruby-stellar-base | generated/stellar/manage_offer_effect.rb | Ruby | apache-2.0 | 600 |
package com.artemis;
import static org.junit.Assert.assertEquals;
import java.util.NoSuchElementException;
import com.artemis.systems.EntityProcessingSystem;
import com.artemis.utils.IntBag;
import org.junit.Test;
import com.artemis.utils.ImmutableBag;
/**
* Created by obartley on 6/9/14.
*/
public class EntitySystemTest {
@SuppressWarnings("static-method")
@Test(expected = NoSuchElementException.class)
public void test_process_one_inactive() {
World w = new World(new WorldConfiguration()
.setSystem(new IteratorTestSystem(0)));
Entity e = w.createEntity();
e.edit().add(new C());
e.disable();
w.process();
}
@SuppressWarnings("static-method")
@Test
public void test_process_one_active() {
World w = new World(new WorldConfiguration()
.setSystem(new IteratorTestSystem(1)));
Entity e = w.createEntity();
e.edit().add(new C());
w.process();
}
@Test
public void aspect_exclude_only() {
ExcludingSystem es1 = new ExcludingSystem();
EmptySystem es2 = new EmptySystem();
World w = new World(new WorldConfiguration()
.setSystem(es1)
.setSystem(es2));
Entity e = w.createEntity();
w.process();
assertEquals(1, es1.getActives().size());
assertEquals(1, es2.getActives().size());
}
public static class C extends Component {}
public static class C2 extends Component {}
public static class IteratorTestSystem extends EntitySystem {
public int expectedSize;
@SuppressWarnings("unchecked")
public IteratorTestSystem(int expectedSize) {
super(Aspect.all(C.class));
this.expectedSize = expectedSize;
}
@Override
protected void processSystem() {
assertEquals(expectedSize, subscription.getEntities().size());
getActives().iterator().next();
}
@Override
protected boolean checkProcessing() {
return true;
}
}
public static class ExcludingSystem extends EntityProcessingSystem {
public ExcludingSystem() {
super(Aspect.exclude(C.class));
}
@Override
protected void process(Entity e) {}
}
public static class EmptySystem extends EntityProcessingSystem {
public EmptySystem() {
super(Aspect.all());
}
@Override
protected void process(Entity e) {}
}
}
| antag99/artemis-odb | artemis/src/test/java/com/artemis/EntitySystemTest.java | Java | apache-2.0 | 2,187 |
package com.sqisland.gce2retrofit;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.stream.JsonReader;
import com.squareup.javawriter.JavaWriter;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.lang3.text.WordUtils;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.io.Writer;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import static javax.lang.model.element.Modifier.PUBLIC;
public class Generator {
private static final String OPTION_CLASS_MAP = "classmap";
private static final String OPTION_METHODS = "methods";
private static Gson gson = new Gson();
public enum MethodType {
SYNC, ASYNC, REACTIVE
}
public static void main(String... args)
throws IOException, URISyntaxException {
Options options = getOptions();
CommandLine cmd = getCommandLine(options, args);
if (cmd == null) {
return;
}
String[] arguments = cmd.getArgs();
if (arguments.length != 2) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("java -jar gce2retrofit.jar discovery.json output_dir", options);
System.exit(1);
}
String discoveryFile = arguments[0];
String outputDir = arguments[1];
Map<String, String> classMap = cmd.hasOption(OPTION_CLASS_MAP)?
readClassMap(new FileReader(cmd.getOptionValue(OPTION_CLASS_MAP))) : null;
EnumSet<MethodType> methodTypes = getMethods(cmd.getOptionValue(OPTION_METHODS));
generate(new FileReader(discoveryFile), new FileWriterFactory(new File(outputDir)),
classMap, methodTypes);
}
private static Options getOptions() {
Options options = new Options();
options.addOption(
OPTION_CLASS_MAP, true, "Map fields to classes. Format: field_name\\tclass_name");
options.addOption(
OPTION_METHODS, true,
"Methods to generate, either sync, async or reactive. Default is to generate sync & async.");
return options;
}
private static CommandLine getCommandLine(Options options, String... args) {
CommandLineParser parser = new BasicParser();
try {
CommandLine cmd = parser.parse(options, args);
return cmd;
} catch (ParseException e) {
System.out.println("Unexpected exception:" + e.getMessage());
}
return null;
}
public static void generate(
Reader discoveryReader, WriterFactory writerFactory,
Map<String, String> classMap, EnumSet<MethodType> methodTypes)
throws IOException, URISyntaxException {
JsonReader jsonReader = new JsonReader(discoveryReader);
Discovery discovery = gson.fromJson(jsonReader, Discovery.class);
String packageName = StringUtil.getPackageName(discovery.baseUrl);
if (packageName == null || packageName.isEmpty()) {
packageName = StringUtil.getPackageName(discovery.rootUrl);
}
String modelPackageName = packageName + ".model";
for (Entry<String, JsonElement> entry : discovery.schemas.entrySet()) {
generateModel(
writerFactory, modelPackageName, entry.getValue().getAsJsonObject(), classMap);
}
if (discovery.resources != null) {
generateInterfaceFromResources(
writerFactory, packageName, "", discovery.resources, methodTypes);
}
if (discovery.name != null && discovery.methods != null) {
generateInterface(
writerFactory, packageName, discovery.name, discovery.methods, methodTypes);
}
}
public static Map<String, String> readClassMap(Reader reader) throws IOException {
Map<String, String> classMap = new HashMap<String, String>();
String line;
BufferedReader bufferedReader = new BufferedReader(reader);
while ((line = bufferedReader.readLine()) != null) {
String[] fields = line.split("\t");
if (fields.length == 2) {
classMap.put(fields[0], fields[1]);
}
}
return classMap;
}
public static EnumSet<MethodType> getMethods(String input) {
EnumSet<MethodType> methodTypes = EnumSet.noneOf(MethodType.class);
if (input != null) {
String[] parts = input.split(",");
for (String part : parts) {
if ("sync".equals(part) || "both".equals(part)) {
methodTypes.add(MethodType.SYNC);
}
if ("async".equals(part) || "both".equals(part)) {
methodTypes.add(MethodType.ASYNC);
}
if ("reactive".equals(part)) {
methodTypes.add(MethodType.REACTIVE);
}
}
}
if (methodTypes.isEmpty()) {
methodTypes = EnumSet.of(Generator.MethodType.ASYNC, Generator.MethodType.SYNC);
}
return methodTypes;
}
private static void generateModel(
WriterFactory writerFactory, String modelPackageName,
JsonObject schema, Map<String, String> classMap)
throws IOException {
String id = schema.get("id").getAsString();
String path = StringUtil.getPath(modelPackageName, id + ".java");
Writer writer = writerFactory.getWriter(path);
JavaWriter javaWriter = new JavaWriter(writer);
javaWriter.emitPackage(modelPackageName)
.emitImports("com.google.gson.annotations.SerializedName")
.emitEmptyLine()
.emitImports("java.util.List")
.emitEmptyLine();
String type = schema.get("type").getAsString();
if (type.equals("object")) {
javaWriter.beginType(modelPackageName + "." + id, "class", EnumSet.of(PUBLIC));
generateObject(javaWriter, schema, classMap);
javaWriter.endType();
} else if (type.equals("string")) {
javaWriter.beginType(modelPackageName + "." + id, "enum", EnumSet.of(PUBLIC));
generateEnum(javaWriter, schema);
javaWriter.endType();
}
writer.close();
}
private static void generateObject(
JavaWriter javaWriter, JsonObject schema, Map<String, String> classMap)
throws IOException {
JsonElement element = schema.get("properties");
if (element == null) {
return;
}
JsonObject properties = element.getAsJsonObject();
for (Entry<String, JsonElement> entry : properties.entrySet()) {
String key = entry.getKey();
String variableName = key;
if (StringUtil.isReservedWord(key)) {
javaWriter.emitAnnotation("SerializedName(\"" + key + "\")");
variableName += "_";
}
PropertyType propertyType = gson.fromJson(
entry.getValue(), PropertyType.class);
String javaType = propertyType.toJavaType();
if (classMap != null && classMap.containsKey(key)) {
javaType = classMap.get(key);
}
javaWriter.emitField(javaType, variableName, EnumSet.of(PUBLIC));
}
}
private static void generateEnum(JavaWriter javaWriter, JsonObject schema) throws IOException {
JsonArray enums = schema.get("enum").getAsJsonArray();
for (int i = 0; i < enums.size(); ++i) {
javaWriter.emitEnumValue(enums.get(i).getAsString());
}
}
private static void generateInterfaceFromResources(
WriterFactory writerFactory, String packageName,
String resourceName, JsonObject resources,
EnumSet<MethodType> methodTypes)
throws IOException {
for (Entry<String, JsonElement> entry : resources.entrySet()) {
JsonObject entryValue = entry.getValue().getAsJsonObject();
if (entryValue.has("methods")) {
generateInterface(writerFactory, packageName,
resourceName + "_" + entry.getKey(),
entryValue.get("methods").getAsJsonObject(),
methodTypes);
}
if (entryValue.has("resources")) {
generateInterfaceFromResources(writerFactory, packageName,
resourceName + "_" + entry.getKey(),
entryValue.get("resources").getAsJsonObject(),
methodTypes);
}
}
}
private static void generateInterface(
WriterFactory writerFactory, String packageName,
String resourceName, JsonObject methods,
EnumSet<MethodType> methodTypes)
throws IOException {
String capitalizedName = WordUtils.capitalizeFully(resourceName, '_');
String className = capitalizedName.replaceAll("_", "");
String path = StringUtil.getPath(packageName, className + ".java");
Writer fileWriter = writerFactory.getWriter(path);
JavaWriter javaWriter = new JavaWriter(fileWriter);
javaWriter.emitPackage(packageName)
.emitImports(packageName + ".model.*")
.emitEmptyLine()
.emitImports(
"retrofit.Callback",
"retrofit.client.Response",
"retrofit.http.GET",
"retrofit.http.POST",
"retrofit.http.PATCH",
"retrofit.http.DELETE",
"retrofit.http.Body",
"retrofit.http.Path",
"retrofit.http.Query");
if (methodTypes.contains(MethodType.REACTIVE)) {
javaWriter.emitImports("rx.Observable");
}
javaWriter.emitEmptyLine();
javaWriter.beginType(
packageName + "." + className, "interface", EnumSet.of(PUBLIC));
for (Entry<String, JsonElement> entry : methods.entrySet()) {
String methodName = entry.getKey();
Method method = gson.fromJson(entry.getValue(), Method.class);
for (MethodType methodType : methodTypes) {
javaWriter.emitAnnotation(method.httpMethod, "\"/" + method.path + "\"");
emitMethodSignature(fileWriter, methodName, method, methodType);
}
}
javaWriter.endType();
fileWriter.close();
}
// TODO: Use JavaWriter to emit method signature
private static void emitMethodSignature(
Writer writer, String methodName, Method method, MethodType methodType) throws IOException {
ArrayList<String> params = new ArrayList<String>();
if (method.request != null) {
params.add("@Body " + method.request.$ref + " " +
(method.request.parameterName != null ? method.request.parameterName : "resource"));
}
for (Entry<String, JsonElement> param : getParams(method)) {
params.add(param2String(param));
}
String returnValue = "void";
if (methodType == MethodType.SYNC && "POST".equals(method.httpMethod)) {
returnValue = "Response";
}
if (method.response != null) {
if (methodType == MethodType.SYNC) {
returnValue = method.response.$ref;
} else if (methodType == MethodType.REACTIVE) {
returnValue = "Observable<" + method.response.$ref + ">";
}
}
if (methodType == MethodType.ASYNC) {
if (method.response == null) {
params.add("Callback<Void> cb");
} else {
params.add("Callback<" + method.response.$ref + "> cb");
}
}
writer.append(" " + returnValue + " " + methodName + (methodType == MethodType.REACTIVE ? "Rx" : "") + "(");
for (int i = 0; i < params.size(); ++i) {
if (i != 0) {
writer.append(", ");
}
writer.append(params.get(i));
}
writer.append(");\n");
}
/**
* Assemble a list of parameters, with the first entries matching the ones
* listed in parameterOrder
*
* @param method The method containing parameters and parameterOrder
* @return Ordered parameters
*/
private static List<Entry<String, JsonElement>> getParams(Method method) {
List<Entry<String, JsonElement>> params
= new ArrayList<Entry<String, JsonElement>>();
if (method.parameters == null) {
return params;
}
// Convert the entry set into a map, and extract the keys not listed in
// parameterOrder
HashMap<String, Entry<String, JsonElement>> map
= new HashMap<String, Entry<String, JsonElement>>();
List<String> remaining = new ArrayList<String>();
for (Entry<String, JsonElement> entry : method.parameters.entrySet()) {
String key = entry.getKey();
map.put(key, entry);
if (method.parameterOrder == null ||
!method.parameterOrder.contains(key)) {
remaining.add(key);
}
}
// Add the keys in parameterOrder
if (method.parameterOrder != null) {
for (String key : method.parameterOrder) {
params.add(map.get(key));
}
}
// Then add the keys not in parameterOrder
for (String key : remaining) {
params.add(map.get(key));
}
return params;
}
private static String param2String(Entry<String, JsonElement> param) {
StringBuffer buf = new StringBuffer();
String paramName = param.getKey();
ParameterType paramType = gson.fromJson(
param.getValue(), ParameterType.class);
if ("path".equals(paramType.location)) {
buf.append("@Path(\"" + paramName + "\") ");
}
if ("query".equals(paramType.location)) {
buf.append("@Query(\"" + paramName + "\") ");
}
String type = paramType.toJavaType();
if (!paramType.required) {
type = StringUtil.primitiveToObject(type);
}
buf.append(type + " " + paramName);
return buf.toString();
}
}
| MaTriXy/gce2retrofit | gce2retrofit/src/main/java/com/sqisland/gce2retrofit/Generator.java | Java | apache-2.0 | 13,413 |
/*
* Copyright 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <type_traits>
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <boost/noncopyable.hpp>
#include <folly/AtomicStruct.h>
#include <folly/detail/CacheLocality.h>
#include <folly/portability/SysMman.h>
#include <folly/portability/Unistd.h>
// Ignore shadowing warnings within this file, so includers can use -Wshadow.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
namespace folly {
namespace detail {
template <typename Pool>
struct IndexedMemPoolRecycler;
}
/// Instances of IndexedMemPool dynamically allocate and then pool their
/// element type (T), returning 4-byte integer indices that can be passed
/// to the pool's operator[] method to access or obtain pointers to the
/// actual elements. The memory backing items returned from the pool
/// will always be readable, even if items have been returned to the pool.
/// These two features are useful for lock-free algorithms. The indexing
/// behavior makes it easy to build tagged pointer-like-things, since
/// a large number of elements can be managed using fewer bits than a
/// full pointer. The access-after-free behavior makes it safe to read
/// from T-s even after they have been recycled, since it is guaranteed
/// that the memory won't have been returned to the OS and unmapped
/// (the algorithm must still use a mechanism to validate that the read
/// was correct, but it doesn't have to worry about page faults), and if
/// the elements use internal sequence numbers it can be guaranteed that
/// there won't be an ABA match due to the element being overwritten with
/// a different type that has the same bit pattern.
///
/// IndexedMemPool has two object lifecycle strategies. The first
/// is to construct objects when they are allocated from the pool and
/// destroy them when they are recycled. In this mode allocIndex and
/// allocElem have emplace-like semantics. In the second mode, objects
/// are default-constructed the first time they are removed from the pool,
/// and deleted when the pool itself is deleted. By default the first
/// mode is used for non-trivial T, and the second is used for trivial T.
///
/// IMPORTANT: Space for extra elements is allocated to account for those
/// that are inaccessible because they are in other local lists, so the
/// actual number of items that can be allocated ranges from capacity to
/// capacity + (NumLocalLists_-1)*LocalListLimit_. This is important if
/// you are trying to maximize the capacity of the pool while constraining
/// the bit size of the resulting pointers, because the pointers will
/// actually range up to the boosted capacity. See maxIndexForCapacity
/// and capacityForMaxIndex.
///
/// To avoid contention, NumLocalLists_ free lists of limited (less than
/// or equal to LocalListLimit_) size are maintained, and each thread
/// retrieves and returns entries from its associated local list. If the
/// local list becomes too large then elements are placed in bulk in a
/// global free list. This allows items to be efficiently recirculated
/// from consumers to producers. AccessSpreader is used to access the
/// local lists, so there is no performance advantage to having more
/// local lists than L1 caches.
///
/// The pool mmap-s the entire necessary address space when the pool is
/// constructed, but delays element construction. This means that only
/// elements that are actually returned to the caller get paged into the
/// process's resident set (RSS).
template <
typename T,
uint32_t NumLocalLists_ = 32,
uint32_t LocalListLimit_ = 200,
template <typename> class Atom = std::atomic,
bool EagerRecycleWhenTrivial = false,
bool EagerRecycleWhenNotTrivial = true>
struct IndexedMemPool : boost::noncopyable {
typedef T value_type;
typedef std::unique_ptr<T, detail::IndexedMemPoolRecycler<IndexedMemPool>>
UniquePtr;
static_assert(LocalListLimit_ <= 255, "LocalListLimit must fit in 8 bits");
enum {
NumLocalLists = NumLocalLists_,
LocalListLimit = LocalListLimit_
};
static constexpr bool eagerRecycle() {
return std::is_trivial<T>::value
? EagerRecycleWhenTrivial : EagerRecycleWhenNotTrivial;
}
// these are public because clients may need to reason about the number
// of bits required to hold indices from a pool, given its capacity
static constexpr uint32_t maxIndexForCapacity(uint32_t capacity) {
// index of std::numeric_limits<uint32_t>::max() is reserved for isAllocated
// tracking
return uint32_t(std::min(
uint64_t(capacity) + (NumLocalLists - 1) * LocalListLimit,
uint64_t(std::numeric_limits<uint32_t>::max() - 1)));
}
static constexpr uint32_t capacityForMaxIndex(uint32_t maxIndex) {
return maxIndex - (NumLocalLists - 1) * LocalListLimit;
}
/// Constructs a pool that can allocate at least _capacity_ elements,
/// even if all the local lists are full
explicit IndexedMemPool(uint32_t capacity)
: actualCapacity_(maxIndexForCapacity(capacity))
, size_(0)
, globalHead_(TaggedPtr{})
{
const size_t needed = sizeof(Slot) * (actualCapacity_ + 1);
size_t pagesize = size_t(sysconf(_SC_PAGESIZE));
mmapLength_ = ((needed - 1) & ~(pagesize - 1)) + pagesize;
assert(needed <= mmapLength_ && mmapLength_ < needed + pagesize);
assert((mmapLength_ % pagesize) == 0);
slots_ = static_cast<Slot*>(mmap(nullptr, mmapLength_,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
if (slots_ == MAP_FAILED) {
assert(errno == ENOMEM);
throw std::bad_alloc();
}
}
/// Destroys all of the contained elements
~IndexedMemPool() {
if (!eagerRecycle()) {
for (uint32_t i = size_; i > 0; --i) {
slots_[i].~Slot();
}
}
munmap(slots_, mmapLength_);
}
/// Returns a lower bound on the number of elements that may be
/// simultaneously allocated and not yet recycled. Because of the
/// local lists it is possible that more elements than this are returned
/// successfully
uint32_t capacity() {
return capacityForMaxIndex(actualCapacity_);
}
/// Finds a slot with a non-zero index, emplaces a T there if we're
/// using the eager recycle lifecycle mode, and returns the index,
/// or returns 0 if no elements are available.
template <typename ...Args>
uint32_t allocIndex(Args&&... args) {
static_assert(sizeof...(Args) == 0 || eagerRecycle(),
"emplace-style allocation requires eager recycle, "
"which is defaulted only for non-trivial types");
auto idx = localPop(localHead());
if (idx != 0 && eagerRecycle()) {
T* ptr = &slot(idx).elem;
new (ptr) T(std::forward<Args>(args)...);
}
return idx;
}
/// If an element is available, returns a std::unique_ptr to it that will
/// recycle the element to the pool when it is reclaimed, otherwise returns
/// a null (falsy) std::unique_ptr
template <typename ...Args>
UniquePtr allocElem(Args&&... args) {
auto idx = allocIndex(std::forward<Args>(args)...);
T* ptr = idx == 0 ? nullptr : &slot(idx).elem;
return UniquePtr(ptr, typename UniquePtr::deleter_type(this));
}
/// Gives up ownership previously granted by alloc()
void recycleIndex(uint32_t idx) {
assert(isAllocated(idx));
if (eagerRecycle()) {
slot(idx).elem.~T();
}
localPush(localHead(), idx);
}
/// Provides access to the pooled element referenced by idx
T& operator[](uint32_t idx) {
return slot(idx).elem;
}
/// Provides access to the pooled element referenced by idx
const T& operator[](uint32_t idx) const {
return slot(idx).elem;
}
/// If elem == &pool[idx], then pool.locateElem(elem) == idx. Also,
/// pool.locateElem(nullptr) == 0
uint32_t locateElem(const T* elem) const {
if (!elem) {
return 0;
}
static_assert(std::is_standard_layout<Slot>::value, "offsetof needs POD");
auto slot = reinterpret_cast<const Slot*>(
reinterpret_cast<const char*>(elem) - offsetof(Slot, elem));
auto rv = uint32_t(slot - slots_);
// this assert also tests that rv is in range
assert(elem == &(*this)[rv]);
return rv;
}
/// Returns true iff idx has been alloc()ed and not recycleIndex()ed
bool isAllocated(uint32_t idx) const {
return slot(idx).localNext == uint32_t(-1);
}
private:
///////////// types
struct Slot {
T elem;
uint32_t localNext;
uint32_t globalNext;
Slot() : localNext{}, globalNext{} {}
};
struct TaggedPtr {
uint32_t idx;
// size is bottom 8 bits, tag in top 24. g++'s code generation for
// bitfields seems to depend on the phase of the moon, plus we can
// do better because we can rely on other checks to avoid masking
uint32_t tagAndSize;
enum : uint32_t {
SizeBits = 8,
SizeMask = (1U << SizeBits) - 1,
TagIncr = 1U << SizeBits,
};
uint32_t size() const {
return tagAndSize & SizeMask;
}
TaggedPtr withSize(uint32_t repl) const {
assert(repl <= LocalListLimit);
return TaggedPtr{ idx, (tagAndSize & ~SizeMask) | repl };
}
TaggedPtr withSizeIncr() const {
assert(size() < LocalListLimit);
return TaggedPtr{ idx, tagAndSize + 1 };
}
TaggedPtr withSizeDecr() const {
assert(size() > 0);
return TaggedPtr{ idx, tagAndSize - 1 };
}
TaggedPtr withIdx(uint32_t repl) const {
return TaggedPtr{ repl, tagAndSize + TagIncr };
}
TaggedPtr withEmpty() const {
return withIdx(0).withSize(0);
}
};
struct FOLLY_ALIGN_TO_AVOID_FALSE_SHARING LocalList {
AtomicStruct<TaggedPtr,Atom> head;
LocalList() : head(TaggedPtr{}) {}
};
////////// fields
/// the number of bytes allocated from mmap, which is a multiple of
/// the page size of the machine
size_t mmapLength_;
/// the actual number of slots that we will allocate, to guarantee
/// that we will satisfy the capacity requested at construction time.
/// They will be numbered 1..actualCapacity_ (note the 1-based counting),
/// and occupy slots_[1..actualCapacity_].
uint32_t actualCapacity_;
/// this records the number of slots that have actually been constructed.
/// To allow use of atomic ++ instead of CAS, we let this overflow.
/// The actual number of constructed elements is min(actualCapacity_,
/// size_)
Atom<uint32_t> size_;
/// raw storage, only 1..min(size_,actualCapacity_) (inclusive) are
/// actually constructed. Note that slots_[0] is not constructed or used
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING Slot* slots_;
/// use AccessSpreader to find your list. We use stripes instead of
/// thread-local to avoid the need to grow or shrink on thread start
/// or join. These are heads of lists chained with localNext
LocalList local_[NumLocalLists];
/// this is the head of a list of node chained by globalNext, that are
/// themselves each the head of a list chained by localNext
FOLLY_ALIGN_TO_AVOID_FALSE_SHARING AtomicStruct<TaggedPtr,Atom> globalHead_;
///////////// private methods
uint32_t slotIndex(uint32_t idx) const {
assert(0 < idx &&
idx <= actualCapacity_ &&
idx <= size_.load(std::memory_order_acquire));
return idx;
}
Slot& slot(uint32_t idx) {
return slots_[slotIndex(idx)];
}
const Slot& slot(uint32_t idx) const {
return slots_[slotIndex(idx)];
}
// localHead references a full list chained by localNext. s should
// reference slot(localHead), it is passed as a micro-optimization
void globalPush(Slot& s, uint32_t localHead) {
while (true) {
TaggedPtr gh = globalHead_.load(std::memory_order_acquire);
s.globalNext = gh.idx;
if (globalHead_.compare_exchange_strong(gh, gh.withIdx(localHead))) {
// success
return;
}
}
}
// idx references a single node
void localPush(AtomicStruct<TaggedPtr,Atom>& head, uint32_t idx) {
Slot& s = slot(idx);
TaggedPtr h = head.load(std::memory_order_acquire);
while (true) {
s.localNext = h.idx;
if (h.size() == LocalListLimit) {
// push will overflow local list, steal it instead
if (head.compare_exchange_strong(h, h.withEmpty())) {
// steal was successful, put everything in the global list
globalPush(s, idx);
return;
}
} else {
// local list has space
if (head.compare_exchange_strong(h, h.withIdx(idx).withSizeIncr())) {
// success
return;
}
}
// h was updated by failing CAS
}
}
// returns 0 if empty
uint32_t globalPop() {
while (true) {
TaggedPtr gh = globalHead_.load(std::memory_order_acquire);
if (gh.idx == 0 || globalHead_.compare_exchange_strong(
gh, gh.withIdx(slot(gh.idx).globalNext))) {
// global list is empty, or pop was successful
return gh.idx;
}
}
}
// returns 0 if allocation failed
uint32_t localPop(AtomicStruct<TaggedPtr,Atom>& head) {
while (true) {
TaggedPtr h = head.load(std::memory_order_acquire);
if (h.idx != 0) {
// local list is non-empty, try to pop
Slot& s = slot(h.idx);
if (head.compare_exchange_strong(
h, h.withIdx(s.localNext).withSizeDecr())) {
// success
s.localNext = uint32_t(-1);
return h.idx;
}
continue;
}
uint32_t idx = globalPop();
if (idx == 0) {
// global list is empty, allocate and construct new slot
if (size_.load(std::memory_order_relaxed) >= actualCapacity_ ||
(idx = ++size_) > actualCapacity_) {
// allocation failed
return 0;
}
// default-construct it now if we aren't going to construct and
// destroy on each allocation
if (!eagerRecycle()) {
T* ptr = &slot(idx).elem;
new (ptr) T();
}
slot(idx).localNext = uint32_t(-1);
return idx;
}
Slot& s = slot(idx);
if (head.compare_exchange_strong(
h, h.withIdx(s.localNext).withSize(LocalListLimit))) {
// global list moved to local list, keep head for us
s.localNext = uint32_t(-1);
return idx;
}
// local bulk push failed, return idx to the global list and try again
globalPush(s, idx);
}
}
AtomicStruct<TaggedPtr,Atom>& localHead() {
auto stripe = detail::AccessSpreader<Atom>::current(NumLocalLists);
return local_[stripe].head;
}
};
namespace detail {
/// This is a stateful Deleter functor, which allows std::unique_ptr
/// to track elements allocated from an IndexedMemPool by tracking the
/// associated pool. See IndexedMemPool::allocElem.
template <typename Pool>
struct IndexedMemPoolRecycler {
Pool* pool;
explicit IndexedMemPoolRecycler(Pool* pool) : pool(pool) {}
IndexedMemPoolRecycler(const IndexedMemPoolRecycler<Pool>& rhs)
= default;
IndexedMemPoolRecycler& operator= (const IndexedMemPoolRecycler<Pool>& rhs)
= default;
void operator()(typename Pool::value_type* elem) const {
pool->recycleIndex(pool->locateElem(elem));
}
};
}
} // namespace folly
# pragma GCC diagnostic pop
| charsyam/folly | folly/IndexedMemPool.h | C | apache-2.0 | 15,973 |
#!/usr/bin/env ruby
#--
# set.rb - defines the Set class
#++
# Copyright (c) 2002 Akinori MUSHA <knu@iDaemons.org>
#
# Documentation by Akinori MUSHA and Gavin Sinclair.
#
# All rights reserved. You can redistribute and/or modify it under the same
# terms as Ruby.
#
# $Id: set.rb 8696 2009-01-10 21:17:58Z headius $
#
# == Overview
#
# This library provides the Set class, which deals with a collection
# of unordered values with no duplicates. It is a hybrid of Array's
# intuitive inter-operation facilities and Hash's fast lookup. If you
# need to keep values ordered, use the SortedSet class.
#
# The method +to_set+ is added to Enumerable for convenience.
#
# See the Set class for an example of usage.
#
# Set implements a collection of unordered values with no duplicates.
# This is a hybrid of Array's intuitive inter-operation facilities and
# Hash's fast lookup.
#
# Several methods accept any Enumerable object (implementing +each+)
# for greater flexibility: new, replace, merge, subtract, |, &, -, ^.
#
# The equality of each couple of elements is determined according to
# Object#eql? and Object#hash, since Set uses Hash as storage.
#
# Finally, if you are using class Set, you can also use Enumerable#to_set
# for convenience.
#
# == Example
#
# require 'set'
# s1 = Set.new [1, 2] # -> #<Set: {1, 2}>
# s2 = [1, 2].to_set # -> #<Set: {1, 2}>
# s1 == s2 # -> true
# s1.add("foo") # -> #<Set: {1, 2, "foo"}>
# s1.merge([2, 6]) # -> #<Set: {6, 1, 2, "foo"}>
# s1.subset? s2 # -> false
# s2.subset? s1 # -> true
#
class Set
include Enumerable
# Creates a new set containing the given objects.
def self.[](*ary)
new(ary)
end
# Creates a new set containing the elements of the given enumerable
# object.
#
# If a block is given, the elements of enum are preprocessed by the
# given block.
def initialize(enum = nil, &block) # :yields: o
@hash ||= Hash.new
enum.nil? and return
if block
enum.each { |o| add(block[o]) }
else
merge(enum)
end
end
# Copy internal hash.
def initialize_copy(orig)
@hash = orig.instance_eval{@hash}.dup
end
# Returns the number of elements.
def size
@hash.size
end
alias length size
# Returns true if the set contains no elements.
def empty?
@hash.empty?
end
# Removes all elements and returns self.
def clear
@hash.clear
self
end
# Replaces the contents of the set with the contents of the given
# enumerable object and returns self.
def replace(enum)
if enum.class == self.class
@hash.replace(enum.instance_eval { @hash })
else
enum.is_a?(Enumerable) or raise ArgumentError, "value must be enumerable"
clear
enum.each { |o| add(o) }
end
self
end
# Converts the set to an array. The order of elements is uncertain.
def to_a
@hash.keys
end
def flatten_merge(set, seen = Set.new)
set.each { |e|
if e.is_a?(Set)
if seen.include?(e_id = e.object_id)
raise ArgumentError, "tried to flatten recursive Set"
end
seen.add(e_id)
flatten_merge(e, seen)
seen.delete(e_id)
else
add(e)
end
}
self
end
protected :flatten_merge
# Returns a new set that is a copy of the set, flattening each
# containing set recursively.
def flatten
self.class.new.flatten_merge(self)
end
# Equivalent to Set#flatten, but replaces the receiver with the
# result in place. Returns nil if no modifications were made.
def flatten!
if detect { |e| e.is_a?(Set) }
replace(flatten())
else
nil
end
end
# Returns true if the set contains the given object.
def include?(o)
@hash.include?(o)
end
alias member? include?
# Returns true if the set is a superset of the given set.
def superset?(set)
set.is_a?(Set) or raise ArgumentError, "value must be a set"
return false if size < set.size
set.all? { |o| include?(o) }
end
# Returns true if the set is a proper superset of the given set.
def proper_superset?(set)
set.is_a?(Set) or raise ArgumentError, "value must be a set"
return false if size <= set.size
set.all? { |o| include?(o) }
end
# Returns true if the set is a subset of the given set.
def subset?(set)
set.is_a?(Set) or raise ArgumentError, "value must be a set"
return false if set.size < size
all? { |o| set.include?(o) }
end
# Returns true if the set is a proper subset of the given set.
def proper_subset?(set)
set.is_a?(Set) or raise ArgumentError, "value must be a set"
return false if set.size <= size
all? { |o| set.include?(o) }
end
# Calls the given block once for each element in the set, passing
# the element as parameter.
def each
@hash.each_key { |o| yield(o) }
self
end
# Adds the given object to the set and returns self. Use +merge+ to
# add several elements at once.
def add(o)
@hash[o] = true
self
end
alias << add
# Adds the given object to the set and returns self. If the
# object is already in the set, returns nil.
def add?(o)
if include?(o)
nil
else
add(o)
end
end
# Deletes the given object from the set and returns self. Use +subtract+ to
# delete several items at once.
def delete(o)
@hash.delete(o)
self
end
# Deletes the given object from the set and returns self. If the
# object is not in the set, returns nil.
def delete?(o)
if include?(o)
delete(o)
else
nil
end
end
# Deletes every element of the set for which block evaluates to
# true, and returns self.
def delete_if
to_a.each { |o| @hash.delete(o) if yield(o) }
self
end
# Do collect() destructively.
def collect!
set = self.class.new
each { |o| set << yield(o) }
replace(set)
end
alias map! collect!
# Equivalent to Set#delete_if, but returns nil if no changes were
# made.
def reject!
n = size
delete_if { |o| yield(o) }
size == n ? nil : self
end
# Merges the elements of the given enumerable object to the set and
# returns self.
def merge(enum)
if enum.is_a?(Set)
@hash.update(enum.instance_eval { @hash })
else
enum.is_a?(Enumerable) or raise ArgumentError, "value must be enumerable"
enum.each { |o| add(o) }
end
self
end
# Deletes every element that appears in the given enumerable object
# and returns self.
def subtract(enum)
enum.is_a?(Enumerable) or raise ArgumentError, "value must be enumerable"
enum.each { |o| delete(o) }
self
end
# Returns a new set built by merging the set and the elements of the
# given enumerable object.
def |(enum)
enum.is_a?(Enumerable) or raise ArgumentError, "value must be enumerable"
dup.merge(enum)
end
alias + | ##
alias union | ##
# Returns a new set built by duplicating the set, removing every
# element that appears in the given enumerable object.
def -(enum)
enum.is_a?(Enumerable) or raise ArgumentError, "value must be enumerable"
dup.subtract(enum)
end
alias difference - ##
# Returns a new set containing elements common to the set and the
# given enumerable object.
def &(enum)
enum.is_a?(Enumerable) or raise ArgumentError, "value must be enumerable"
n = self.class.new
enum.each { |o| n.add(o) if include?(o) }
n
end
alias intersection & ##
# Returns a new set containing elements exclusive between the set
# and the given enumerable object. (set ^ enum) is equivalent to
# ((set | enum) - (set & enum)).
def ^(enum)
enum.is_a?(Enumerable) or raise ArgumentError, "value must be enumerable"
n = Set.new(enum)
each { |o| if n.include?(o) then n.delete(o) else n.add(o) end }
n
end
# Returns true if two sets are equal. The equality of each couple
# of elements is defined according to Object#eql?.
def ==(set)
equal?(set) and return true
set.is_a?(Set) && size == set.size or return false
hash = @hash.dup
set.all? { |o| hash.include?(o) }
end
def hash # :nodoc:
@hash.hash
end
def eql?(o) # :nodoc:
return false unless o.is_a?(Set)
@hash.eql?(o.instance_eval{@hash})
end
# Classifies the set by the return value of the given block and
# returns a hash of {value => set of elements} pairs. The block is
# called once for each element of the set, passing the element as
# parameter.
#
# e.g.:
#
# require 'set'
# files = Set.new(Dir.glob("*.rb"))
# hash = files.classify { |f| File.mtime(f).year }
# p hash # => {2000=>#<Set: {"a.rb", "b.rb"}>,
# # 2001=>#<Set: {"c.rb", "d.rb", "e.rb"}>,
# # 2002=>#<Set: {"f.rb"}>}
def classify # :yields: o
h = {}
each { |i|
x = yield(i)
(h[x] ||= self.class.new).add(i)
}
h
end
# Divides the set into a set of subsets according to the commonality
# defined by the given block.
#
# If the arity of the block is 2, elements o1 and o2 are in common
# if block.call(o1, o2) is true. Otherwise, elements o1 and o2 are
# in common if block.call(o1) == block.call(o2).
#
# e.g.:
#
# require 'set'
# numbers = Set[1, 3, 4, 6, 9, 10, 11]
# set = numbers.divide { |i,j| (i - j).abs == 1 }
# p set # => #<Set: {#<Set: {1}>,
# # #<Set: {11, 9, 10}>,
# # #<Set: {3, 4}>,
# # #<Set: {6}>}>
def divide(&func)
if func.arity == 2
require 'tsort'
class << dig = {} # :nodoc:
include TSort
alias tsort_each_node each_key
def tsort_each_child(node, &block)
fetch(node).each(&block)
end
end
each { |u|
dig[u] = a = []
each{ |v| func.call(u, v) and a << v }
}
set = Set.new()
dig.each_strongly_connected_component { |css|
set.add(self.class.new(css))
}
set
else
Set.new(classify(&func).values)
end
end
InspectKey = :__inspect_key__ # :nodoc:
# Returns a string containing a human-readable representation of the
# set. ("#<Set: {element1, element2, ...}>")
def inspect
ids = (Thread.current[InspectKey] ||= [])
if ids.include?(object_id)
return sprintf('#<%s: {...}>', self.class.name)
end
begin
ids << object_id
return sprintf('#<%s: {%s}>', self.class, to_a.inspect[1..-2])
ensure
ids.pop
end
end
def pretty_print(pp) # :nodoc:
pp.text sprintf('#<%s: {', self.class.name)
pp.nest(1) {
pp.seplist(self) { |o|
pp.pp o
}
}
pp.text "}>"
end
def pretty_print_cycle(pp) # :nodoc:
pp.text sprintf('#<%s: {%s}>', self.class.name, empty? ? '' : '...')
end
end
# SortedSet implements a set which elements are sorted in order. See Set.
class SortedSet < Set
@@setup = false
class << self
def [](*ary) # :nodoc:
new(ary)
end
def setup # :nodoc:
@@setup and return
module_eval {
# a hack to shut up warning
alias old_init initialize
remove_method :old_init
}
begin
require 'rbtree'
module_eval %{
def initialize(*args, &block)
@hash = RBTree.new
super
end
}
rescue LoadError
module_eval %{
def initialize(*args, &block)
@keys = nil
super
end
def clear
@keys = nil
super
end
def replace(enum)
@keys = nil
super
end
def add(o)
@keys = nil
@hash[o] = true
self
end
alias << add
def delete(o)
@keys = nil
@hash.delete(o)
self
end
def delete_if
n = @hash.size
super
@keys = nil if @hash.size != n
self
end
def merge(enum)
@keys = nil
super
end
def each
to_a.each { |o| yield(o) }
self
end
def to_a
(@keys = @hash.keys).sort! unless @keys
@keys
end
}
end
@@setup = true
end
end
def initialize(*args, &block) # :nodoc:
SortedSet.setup
initialize(*args, &block)
end
end
module Enumerable
# Makes a set from the enumerable object with given arguments.
# Needs to +require "set"+ to use this method.
def to_set(klass = Set, *args, &block)
klass.new(self, *args, &block)
end
end
# =begin
# == RestricedSet class
# RestricedSet implements a set with restrictions defined by a given
# block.
#
# === Super class
# Set
#
# === Class Methods
# --- RestricedSet::new(enum = nil) { |o| ... }
# --- RestricedSet::new(enum = nil) { |rset, o| ... }
# Creates a new restricted set containing the elements of the given
# enumerable object. Restrictions are defined by the given block.
#
# If the block's arity is 2, it is called with the RestrictedSet
# itself and an object to see if the object is allowed to be put in
# the set.
#
# Otherwise, the block is called with an object to see if the object
# is allowed to be put in the set.
#
# === Instance Methods
# --- restriction_proc
# Returns the restriction procedure of the set.
#
# =end
#
# class RestricedSet < Set
# def initialize(*args, &block)
# @proc = block or raise ArgumentError, "missing a block"
#
# if @proc.arity == 2
# instance_eval %{
# def add(o)
# @hash[o] = true if @proc.call(self, o)
# self
# end
# alias << add
#
# def add?(o)
# if include?(o) || !@proc.call(self, o)
# nil
# else
# @hash[o] = true
# self
# end
# end
#
# def replace(enum)
# enum.is_a?(Enumerable) or raise ArgumentError, "value must be enumerable"
# clear
# enum.each { |o| add(o) }
#
# self
# end
#
# def merge(enum)
# enum.is_a?(Enumerable) or raise ArgumentError, "value must be enumerable"
# enum.each { |o| add(o) }
#
# self
# end
# }
# else
# instance_eval %{
# def add(o)
# if @proc.call(o)
# @hash[o] = true
# end
# self
# end
# alias << add
#
# def add?(o)
# if include?(o) || !@proc.call(o)
# nil
# else
# @hash[o] = true
# self
# end
# end
# }
# end
#
# super(*args)
# end
#
# def restriction_proc
# @proc
# end
# end
if $0 == __FILE__
eval DATA.read, nil, $0, __LINE__+4
end
__END__
require 'test/unit'
class TC_Set < Test::Unit::TestCase
def test_aref
assert_nothing_raised {
Set[]
Set[nil]
Set[1,2,3]
}
assert_equal(0, Set[].size)
assert_equal(1, Set[nil].size)
assert_equal(1, Set[[]].size)
assert_equal(1, Set[[nil]].size)
set = Set[2,4,6,4]
assert_equal(Set.new([2,4,6]), set)
end
def test_s_new
assert_nothing_raised {
Set.new()
Set.new(nil)
Set.new([])
Set.new([1,2])
Set.new('a'..'c')
Set.new('XYZ')
}
assert_raises(ArgumentError) {
Set.new(false)
}
assert_raises(ArgumentError) {
Set.new(1)
}
assert_raises(ArgumentError) {
Set.new(1,2)
}
assert_equal(0, Set.new().size)
assert_equal(0, Set.new(nil).size)
assert_equal(0, Set.new([]).size)
assert_equal(1, Set.new([nil]).size)
ary = [2,4,6,4]
set = Set.new(ary)
ary.clear
assert_equal(false, set.empty?)
assert_equal(3, set.size)
ary = [1,2,3]
s = Set.new(ary) { |o| o * 2 }
assert_equal([2,4,6], s.sort)
end
def test_clone
set1 = Set.new
set2 = set1.clone
set1 << 'abc'
assert_equal(Set.new, set2)
end
def test_dup
set1 = Set[1,2]
set2 = set1.dup
assert_not_same(set1, set2)
assert_equal(set1, set2)
set1.add(3)
assert_not_equal(set1, set2)
end
def test_size
assert_equal(0, Set[].size)
assert_equal(2, Set[1,2].size)
assert_equal(2, Set[1,2,1].size)
end
def test_empty?
assert_equal(true, Set[].empty?)
assert_equal(false, Set[1, 2].empty?)
end
def test_clear
set = Set[1,2]
ret = set.clear
assert_same(set, ret)
assert_equal(true, set.empty?)
end
def test_replace
set = Set[1,2]
ret = set.replace('a'..'c')
assert_same(set, ret)
assert_equal(Set['a','b','c'], set)
end
def test_to_a
set = Set[1,2,3,2]
ary = set.to_a
assert_equal([1,2,3], ary.sort)
end
def test_flatten
# test1
set1 = Set[
1,
Set[
5,
Set[7,
Set[0]
],
Set[6,2],
1
],
3,
Set[3,4]
]
set2 = set1.flatten
set3 = Set.new(0..7)
assert_not_same(set2, set1)
assert_equal(set3, set2)
# test2; destructive
orig_set1 = set1
set1.flatten!
assert_same(orig_set1, set1)
assert_equal(set3, set1)
# test3; multiple occurrences of a set in an set
set1 = Set[1, 2]
set2 = Set[set1, Set[set1, 4], 3]
assert_nothing_raised {
set2.flatten!
}
assert_equal(Set.new(1..4), set2)
# test4; recursion
set2 = Set[]
set1 = Set[1, set2]
set2.add(set1)
assert_raises(ArgumentError) {
set1.flatten!
}
# test5; miscellaneous
empty = Set[]
set = Set[Set[empty, "a"],Set[empty, "b"]]
assert_nothing_raised {
set.flatten
}
set1 = empty.merge(Set["no_more", set])
assert_nil(Set.new(0..31).flatten!)
x = Set[Set[],Set[1,2]].flatten!
y = Set[1,2]
assert_equal(x, y)
end
def test_include?
set = Set[1,2,3]
assert_equal(true, set.include?(1))
assert_equal(true, set.include?(2))
assert_equal(true, set.include?(3))
assert_equal(false, set.include?(0))
assert_equal(false, set.include?(nil))
set = Set["1",nil,"2",nil,"0","1",false]
assert_equal(true, set.include?(nil))
assert_equal(true, set.include?(false))
assert_equal(true, set.include?("1"))
assert_equal(false, set.include?(0))
assert_equal(false, set.include?(true))
end
def test_superset?
set = Set[1,2,3]
assert_raises(ArgumentError) {
set.superset?()
}
assert_raises(ArgumentError) {
set.superset?(2)
}
assert_raises(ArgumentError) {
set.superset?([2])
}
assert_equal(true, set.superset?(Set[]))
assert_equal(true, set.superset?(Set[1,2]))
assert_equal(true, set.superset?(Set[1,2,3]))
assert_equal(false, set.superset?(Set[1,2,3,4]))
assert_equal(false, set.superset?(Set[1,4]))
assert_equal(true, Set[].superset?(Set[]))
end
def test_proper_superset?
set = Set[1,2,3]
assert_raises(ArgumentError) {
set.proper_superset?()
}
assert_raises(ArgumentError) {
set.proper_superset?(2)
}
assert_raises(ArgumentError) {
set.proper_superset?([2])
}
assert_equal(true, set.proper_superset?(Set[]))
assert_equal(true, set.proper_superset?(Set[1,2]))
assert_equal(false, set.proper_superset?(Set[1,2,3]))
assert_equal(false, set.proper_superset?(Set[1,2,3,4]))
assert_equal(false, set.proper_superset?(Set[1,4]))
assert_equal(false, Set[].proper_superset?(Set[]))
end
def test_subset?
set = Set[1,2,3]
assert_raises(ArgumentError) {
set.subset?()
}
assert_raises(ArgumentError) {
set.subset?(2)
}
assert_raises(ArgumentError) {
set.subset?([2])
}
assert_equal(true, set.subset?(Set[1,2,3,4]))
assert_equal(true, set.subset?(Set[1,2,3]))
assert_equal(false, set.subset?(Set[1,2]))
assert_equal(false, set.subset?(Set[]))
assert_equal(true, Set[].subset?(Set[1]))
assert_equal(true, Set[].subset?(Set[]))
end
def test_proper_subset?
set = Set[1,2,3]
assert_raises(ArgumentError) {
set.proper_subset?()
}
assert_raises(ArgumentError) {
set.proper_subset?(2)
}
assert_raises(ArgumentError) {
set.proper_subset?([2])
}
assert_equal(true, set.proper_subset?(Set[1,2,3,4]))
assert_equal(false, set.proper_subset?(Set[1,2,3]))
assert_equal(false, set.proper_subset?(Set[1,2]))
assert_equal(false, set.proper_subset?(Set[]))
assert_equal(false, Set[].proper_subset?(Set[]))
end
def test_each
ary = [1,3,5,7,10,20]
set = Set.new(ary)
assert_raises(LocalJumpError) {
set.each
}
assert_nothing_raised {
set.each { |o|
ary.delete(o) or raise "unexpected element: #{o}"
}
ary.empty? or raise "forgotten elements: #{ary.join(', ')}"
}
end
def test_add
set = Set[1,2,3]
ret = set.add(2)
assert_same(set, ret)
assert_equal(Set[1,2,3], set)
ret = set.add?(2)
assert_nil(ret)
assert_equal(Set[1,2,3], set)
ret = set.add(4)
assert_same(set, ret)
assert_equal(Set[1,2,3,4], set)
ret = set.add?(5)
assert_same(set, ret)
assert_equal(Set[1,2,3,4,5], set)
end
def test_delete
set = Set[1,2,3]
ret = set.delete(4)
assert_same(set, ret)
assert_equal(Set[1,2,3], set)
ret = set.delete?(4)
assert_nil(ret)
assert_equal(Set[1,2,3], set)
ret = set.delete(2)
assert_equal(set, ret)
assert_equal(Set[1,3], set)
ret = set.delete?(1)
assert_equal(set, ret)
assert_equal(Set[3], set)
end
def test_delete_if
set = Set.new(1..10)
ret = set.delete_if { |i| i > 10 }
assert_same(set, ret)
assert_equal(Set.new(1..10), set)
set = Set.new(1..10)
ret = set.delete_if { |i| i % 3 == 0 }
assert_same(set, ret)
assert_equal(Set[1,2,4,5,7,8,10], set)
end
def test_collect!
set = Set[1,2,3,'a','b','c',-1..1,2..4]
ret = set.collect! { |i|
case i
when Numeric
i * 2
when String
i.upcase
else
nil
end
}
assert_same(set, ret)
assert_equal(Set[2,4,6,'A','B','C',nil], set)
end
def test_reject!
set = Set.new(1..10)
ret = set.reject! { |i| i > 10 }
assert_nil(ret)
assert_equal(Set.new(1..10), set)
ret = set.reject! { |i| i % 3 == 0 }
assert_same(set, ret)
assert_equal(Set[1,2,4,5,7,8,10], set)
end
def test_merge
set = Set[1,2,3]
ret = set.merge([2,4,6])
assert_same(set, ret)
assert_equal(Set[1,2,3,4,6], set)
end
def test_subtract
set = Set[1,2,3]
ret = set.subtract([2,4,6])
assert_same(set, ret)
assert_equal(Set[1,3], set)
end
def test_plus
set = Set[1,2,3]
ret = set + [2,4,6]
assert_not_same(set, ret)
assert_equal(Set[1,2,3,4,6], ret)
end
def test_minus
set = Set[1,2,3]
ret = set - [2,4,6]
assert_not_same(set, ret)
assert_equal(Set[1,3], ret)
end
def test_and
set = Set[1,2,3,4]
ret = set & [2,4,6]
assert_not_same(set, ret)
assert_equal(Set[2,4], ret)
end
def test_xor
set = Set[1,2,3,4]
ret = set ^ [2,4,5,5]
assert_not_same(set, ret)
assert_equal(Set[1,3,5], ret)
end
def test_eq
set1 = Set[2,3,1]
set2 = Set[1,2,3]
assert_equal(set1, set1)
assert_equal(set1, set2)
assert_not_equal(Set[1], [1])
set1 = Class.new(Set)["a", "b"]
set2 = Set["a", "b", set1]
set1 = set1.add(set1.clone)
# assert_equal(set1, set2)
# assert_equal(set2, set1)
assert_equal(set2, set2.clone)
assert_equal(set1.clone, set1)
assert_not_equal(Set[Exception.new,nil], Set[Exception.new,Exception.new], "[ruby-dev:26127]")
end
# def test_hash
# end
# def test_eql?
# end
def test_classify
set = Set.new(1..10)
ret = set.classify { |i| i % 3 }
assert_equal(3, ret.size)
assert_instance_of(Hash, ret)
ret.each_value { |value| assert_instance_of(Set, value) }
assert_equal(Set[3,6,9], ret[0])
assert_equal(Set[1,4,7,10], ret[1])
assert_equal(Set[2,5,8], ret[2])
end
def test_divide
set = Set.new(1..10)
ret = set.divide { |i| i % 3 }
assert_equal(3, ret.size)
n = 0
ret.each { |s| n += s.size }
assert_equal(set.size, n)
assert_equal(set, ret.flatten)
set = Set[7,10,5,11,1,3,4,9,0]
ret = set.divide { |a,b| (a - b).abs == 1 }
assert_equal(4, ret.size)
n = 0
ret.each { |s| n += s.size }
assert_equal(set.size, n)
assert_equal(set, ret.flatten)
ret.each { |s|
if s.include?(0)
assert_equal(Set[0,1], s)
elsif s.include?(3)
assert_equal(Set[3,4,5], s)
elsif s.include?(7)
assert_equal(Set[7], s)
elsif s.include?(9)
assert_equal(Set[9,10,11], s)
else
raise "unexpected group: #{s.inspect}"
end
}
end
def test_inspect
set1 = Set[1]
assert_equal('#<Set: {1}>', set1.inspect)
set2 = Set[Set[0], 1, 2, set1]
assert_equal(false, set2.inspect.include?('#<Set: {...}>'))
set1.add(set2)
assert_equal(true, set1.inspect.include?('#<Set: {...}>'))
end
# def test_pretty_print
# end
# def test_pretty_print_cycle
# end
end
class TC_SortedSet < Test::Unit::TestCase
def test_sortedset
s = SortedSet[4,5,3,1,2]
assert_equal([1,2,3,4,5], s.to_a)
prev = nil
s.each { |o| assert(prev < o) if prev; prev = o }
assert_not_nil(prev)
s.map! { |o| -2 * o }
assert_equal([-10,-8,-6,-4,-2], s.to_a)
prev = nil
ret = s.each { |o| assert(prev < o) if prev; prev = o }
assert_not_nil(prev)
assert_same(s, ret)
s = SortedSet.new([2,1,3]) { |o| o * -2 }
assert_equal([-6,-4,-2], s.to_a)
s = SortedSet.new(['one', 'two', 'three', 'four'])
a = []
ret = s.delete_if { |o| a << o; o[0] == ?t }
assert_same(s, ret)
assert_equal(['four', 'one'], s.to_a)
assert_equal(['four', 'one', 'three', 'two'], a)
s = SortedSet.new(['one', 'two', 'three', 'four'])
a = []
ret = s.reject! { |o| a << o; o[0] == ?t }
assert_same(s, ret)
assert_equal(['four', 'one'], s.to_a)
assert_equal(['four', 'one', 'three', 'two'], a)
s = SortedSet.new(['one', 'two', 'three', 'four'])
a = []
ret = s.reject! { |o| a << o; false }
assert_same(nil, ret)
assert_equal(['four', 'one', 'three', 'two'], s.to_a)
assert_equal(['four', 'one', 'three', 'two'], a)
end
end
class TC_Enumerable < Test::Unit::TestCase
def test_to_set
ary = [2,5,4,3,2,1,3]
set = ary.to_set
assert_instance_of(Set, set)
assert_equal([1,2,3,4,5], set.sort)
set = ary.to_set { |o| o * -2 }
assert_instance_of(Set, set)
assert_equal([-10,-8,-6,-4,-2], set.sort)
set = ary.to_set(SortedSet)
assert_instance_of(SortedSet, set)
assert_equal([1,2,3,4,5], set.to_a)
set = ary.to_set(SortedSet) { |o| o * -2 }
assert_instance_of(SortedSet, set)
assert_equal([-10,-8,-6,-4,-2], set.sort)
end
end
# class TC_RestricedSet < Test::Unit::TestCase
# def test_s_new
# assert_raises(ArgumentError) { RestricedSet.new }
#
# s = RestricedSet.new([-1,2,3]) { |o| o > 0 }
# assert_equal([2,3], s.sort)
# end
#
# def test_restriction_proc
# s = RestricedSet.new([-1,2,3]) { |o| o > 0 }
#
# f = s.restriction_proc
# assert_instance_of(Proc, f)
# assert(f[1])
# assert(!f[0])
# end
#
# def test_replace
# s = RestricedSet.new(-3..3) { |o| o > 0 }
# assert_equal([1,2,3], s.sort)
#
# s.replace([-2,0,3,4,5])
# assert_equal([3,4,5], s.sort)
# end
#
# def test_merge
# s = RestricedSet.new { |o| o > 0 }
# s.merge(-5..5)
# assert_equal([1,2,3,4,5], s.sort)
#
# s.merge([10,-10,-8,8])
# assert_equal([1,2,3,4,5,8,10], s.sort)
# end
# end
| google-code/android-scripting | jruby/src/lib/ruby/1.8/set.rb | Ruby | apache-2.0 | 27,421 |
# AUTOGENERATED FILE
FROM balenalib/revpi-core-3-ubuntu:bionic-run
# A few reasons for installing distribution-provided OpenJDK:
#
# 1. Oracle. Licensing prevents us from redistributing the official JDK.
#
# 2. Compiling OpenJDK also requires the JDK to be installed, and it gets
# really hairy.
#
# For some sample build times, see Debian's buildd logs:
# https://buildd.debian.org/status/logs.php?pkg=openjdk-7
# Default to UTF-8 file.encoding
ENV LANG C.UTF-8
# add a simple script that can auto-detect the appropriate JAVA_HOME value
# based on whether the JDK or only the JRE is installed
RUN { \
echo '#!/bin/sh'; \
echo 'set -e'; \
echo; \
echo 'dirname "$(dirname "$(readlink -f "$(which javac || which java)")")"'; \
} > /usr/local/bin/docker-java-home \
&& chmod +x /usr/local/bin/docker-java-home
# do some fancy footwork to create a JAVA_HOME that's cross-architecture-safe
RUN ln -svT "/usr/lib/jvm/java-8-openjdk-$(dpkg --print-architecture)" /docker-java-home
ENV JAVA_HOME /docker-java-home
RUN set -ex; \
\
# deal with slim variants not having man page directories (which causes "update-alternatives" to fail)
if [ ! -d /usr/share/man/man1 ]; then \
mkdir -p /usr/share/man/man1; \
fi; \
\
apt-get update; \
apt-get install -y --no-install-recommends \
software-properties-common \
; \
add-apt-repository ppa:openjdk-r/ppa; \
apt-get update; \
apt-get install -y --no-install-recommends \
openjdk-8-jre \
; \
rm -rf /var/lib/apt/lists/*; \
\
# verify that "docker-java-home" returns what we expect
[ "$(readlink -f "$JAVA_HOME")" = "$(docker-java-home)" ]; \
\
# update-alternatives so that future installs of other OpenJDK versions don't change /usr/bin/java
update-alternatives --get-selections | awk -v home="$(readlink -f "$JAVA_HOME")" 'index($3, home) == 1 { $2 = "manual"; print | "update-alternatives --set-selections" }'; \
# ... and verify that it actually worked for one of the alternatives we care about
update-alternatives --query java | grep -q 'Status: manual'
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Ubuntu bionic \nVariant: run variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nOpenJDK v8-jre \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh | nghiant2710/base-images | balena-base-images/openjdk/revpi-core-3/ubuntu/bionic/8-jre/run/Dockerfile | Dockerfile | apache-2.0 | 3,099 |
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.airavata.registry.core.app.catalog.model;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.Table;
import java.io.Serializable;
import java.sql.Timestamp;
@Entity
@Table(name = "APPLICATION_INTERFACE")
public class ApplicationInterface implements Serializable {
@Id
@Column(name = "INTERFACE_ID")
private String interfaceID;
@Column(name = "APPLICATION_NAME")
private String appName;
@Column(name = "APPLICATION_DESCRIPTION")
private String appDescription;
@Column(name = "CREATION_TIME")
private Timestamp creationTime;
@Column(name = "GATEWAY_ID")
private String gatewayId;
@Column(name = "ARCHIVE_WORKING_DIRECTORY")
private boolean archiveWorkingDirectory;
@Column(name = "HAS_OPTIONAL_FILE_INPUTS")
private boolean hasOptionalFileInputs;
@Column(name = "UPDATE_TIME")
private Timestamp updateTime;
public String getGatewayId() {
return gatewayId;
}
public void setGatewayId(String gatewayId) {
this.gatewayId = gatewayId;
}
public boolean isArchiveWorkingDirectory() {
return archiveWorkingDirectory;
}
public void setArchiveWorkingDirectory(boolean archiveWorkingDirectory) {
this.archiveWorkingDirectory = archiveWorkingDirectory;
}
public Timestamp getCreationTime() {
return creationTime;
}
public void setCreationTime(Timestamp creationTime) {
this.creationTime = creationTime;
}
public Timestamp getUpdateTime() {
return updateTime;
}
public void setUpdateTime(Timestamp updateTime) {
this.updateTime = updateTime;
}
public String getInterfaceID() {
return interfaceID;
}
public void setInterfaceID(String interfaceID) {
this.interfaceID = interfaceID;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
public String getAppDescription() {
return appDescription;
}
public void setAppDescription(String appDescription) {
this.appDescription = appDescription;
}
public boolean isHasOptionalFileInputs() {
return hasOptionalFileInputs;
}
public void setHasOptionalFileInputs(boolean hasOptionalFileInputs) {
this.hasOptionalFileInputs = hasOptionalFileInputs;
}
}
| machristie/airavata | modules/registry/registry-core/src/main/java/org/apache/airavata/registry/core/app/catalog/model/ApplicationInterface.java | Java | apache-2.0 | 3,284 |
#!/bin/bash
# This shell scripts generates the top-level Markdown structure of the
# Snabb lwAFTR manual.
#
# The authors list is automatically generated from Git history,
# ordered from most to least commits.
# Script based on src/doc/genbook.sh
lwaftr_app=../../../apps/lwaftr/
cat <<EOF
% Snabb lwAFTR Manual
% $(git log --pretty="%an" $lwaftr_app | \
grep -v -e '^root$' | \
sort | uniq -c | sort -nr | sed 's/^[0-9 ]*//' | \
awk 'BEGIN { first=1; }
(NF >= 2) { if (first) { first=0 } else { printf("; ") };
printf("%s", $0) }
END { print("") }')
% Version $(git log -n1 --format="format:%h, %ad%n")
$(cat README.welcome.md)
$(cat README.build.md)
$(cat README.running.md)
$(cat README.testing.md)
$(cat README.troubleshooting.md)
$(cat README.bindingtable.md)
$(cat README.configuration.md)
$(cat README.rfccompliance.md)
$(cat README.benchmarking.md)
$(cat README.performance.md)
$(cat README.virtualization.md)
EOF
| wingo/snabbswitch | src/program/lwaftr/doc/genbook.sh | Shell | apache-2.0 | 1,017 |
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.map_message_type import *
REQUEST_TYPE = MAP_REMOVEENTRYLISTENER
RESPONSE_TYPE = 101
RETRYABLE = True
def calculate_size(name, registration_id):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_str(registration_id)
return data_size
def encode_request(name, registration_id):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, registration_id))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_str(registration_id)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_bool()
return parameters
| cangencer/hazelcast-python-client | hazelcast/protocol/codec/map_remove_entry_listener_codec.py | Python | apache-2.0 | 1,199 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for checks."""
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.checks import hints
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import config_file as rdf_config_file
from grr.lib.rdfvalues import protodict as rdf_protodict
class HintsTests(test_lib.GRRBaseTest):
"""Test hint operations."""
def testCheckOverlay(self):
"""Overlay(hint1, hint2) should populate hint2 with the values of hint1."""
# Fully populated hint.
full = {
"problem": "Terminator needs trousers.\n",
"fix": "Give me your clothes.\n",
"format": "{mission}, {target}\n",
"summary": "I'll be back."
}
# Partial hint
partial = {
"problem": "Terminator needs to go shopping.",
"fix": "Phased plasma rifle in the 40-watt range.",
"format": "",
"summary": ""
}
# Partial overlaid with full.
overlay = {
"problem": "Terminator needs to go shopping.",
"fix": "Phased plasma rifle in the 40-watt range.",
"format": "{mission}, {target}",
"summary": "I'll be back."
}
# Empty hint.
empty = {"problem": "", "fix": "", "format": "", "summary": ""}
# Empty hint should not clobber populated hint.
starts_full = full.copy()
starts_empty = empty.copy()
hints.Overlay(starts_full, starts_empty)
self.assertDictEqual(full, starts_full)
self.assertDictEqual(empty, starts_empty)
# Populate empty hint from partially populated hint.
starts_partial = partial.copy()
starts_empty = empty.copy()
hints.Overlay(starts_empty, starts_partial)
self.assertDictEqual(partial, starts_partial)
self.assertDictEqual(partial, starts_empty)
# Overlay the full and partial hints to get the hybrid.
starts_full = full.copy()
starts_partial = partial.copy()
hints.Overlay(starts_partial, starts_full)
self.assertDictEqual(full, starts_full)
self.assertDictEqual(overlay, starts_partial)
def testRdfFormatter(self):
"""Hints format RDF values with arbitrary values and attributes."""
# Create a complex RDF value
rdf = rdf_client.ClientSummary()
rdf.system_info.system = "Linux"
rdf.system_info.node = "coreai.skynet.com"
# Users (repeated)
rdf.users = [rdf_client.User(username=u) for u in ("root", "jconnor")]
# Interface (nested, repeated)
addresses = [
rdf_client.NetworkAddress(human_readable=a)
for a in ("1.1.1.1", "2.2.2.2", "3.3.3.3")
]
eth0 = rdf_client.Interface(ifname="eth0", addresses=addresses[:2])
ppp0 = rdf_client.Interface(ifname="ppp0", addresses=addresses[2])
rdf.interfaces = [eth0, ppp0]
template = ("{system_info.system} {users.username} {interfaces.ifname} "
"{interfaces.addresses.human_readable}\n")
hinter = hints.Hinter(template=template)
expected = "Linux root,jconnor eth0,ppp0 1.1.1.1,2.2.2.2,3.3.3.3"
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def testRdfFormatterHandlesKeyValuePair(self):
"""rdfvalue.KeyValue items need special handling to expand k and v."""
key = rdf_protodict.DataBlob().SetValue("skynet")
value = rdf_protodict.DataBlob().SetValue([1997])
rdf = rdf_protodict.KeyValue(k=key, v=value)
template = "{k}: {v}"
hinter = hints.Hinter(template=template)
expected = "skynet: 1997"
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def testRdfFormatterAttributedDict(self):
sshd = rdf_config_file.SshdConfig()
sshd.config = rdf_protodict.AttributedDict(skynet="operational")
template = "{config.skynet}"
hinter = hints.Hinter(template=template)
expected = "operational"
result = hinter.Render(sshd)
self.assertEqual(expected, result)
def testRdfFormatterFanOut(self):
rdf = rdf_protodict.Dict()
user1 = rdf_client.User(username="drexler")
user2 = rdf_client.User(username="joy")
rdf["cataclysm"] = "GreyGoo"
rdf["thinkers"] = [user1, user2]
rdf["reference"] = {
"ecophage": ["bots", ["nanobots", ["picobots"]]],
"doomsday": {
"books": ["cats cradle", "prey"]
}
}
template = ("{cataclysm}; {thinkers.username}; {reference.ecophage}; "
"{reference.doomsday}\n")
hinter = hints.Hinter(template=template)
expected = ("GreyGoo; drexler,joy; bots,nanobots,picobots; "
"books:cats cradle,prey")
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def testStatModeFormat(self):
rdf = rdf_client.StatEntry(st_mode=33204)
expected = "-rw-rw-r--"
template = "{st_mode}"
hinter = hints.Hinter(template=template)
result = hinter.Render(rdf)
self.assertEqual(expected, result)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| destijl/grr | grr/lib/checks/hints_test.py | Python | apache-2.0 | 4,932 |
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* Extension ID of Files.app.
* @type {string}
* @const
*/
var FILE_MANAGER_EXTENSIONS_ID = 'hhaomjibdihmijegdhdafkllkbggdgoj';
/**
* Calls a remote test util in Files.app's extension. See: test_util.js.
*
* @param {string} func Function name.
* @param {?string} appId Target window's App ID or null for functions
* not requiring a window.
* @param {Array.<*>} args Array of arguments.
* @param {function(*)} callback Callback handling the function's result.
*/
function callRemoteTestUtil(func, appId, args, callback) {
chrome.runtime.sendMessage(
FILE_MANAGER_EXTENSIONS_ID, {
func: func,
appId: appId,
args: args
},
callback);
}
chrome.test.runTests([
// Waits for the C++ code to send a string identifying a test, then runs that
// test.
function testRunner() {
var command = chrome.extension.inIncognitoContext ? 'which test guest' :
'which test non-guest';
chrome.test.sendMessage(command, function(testCaseName) {
// Run one of the test cases defined in the testcase namespace, in
// test_cases.js. The test case name is passed via StartTest call in
// file_manager_browsertest.cc.
if (testcase[testCaseName])
testcase[testCaseName]();
else
chrome.test.fail('Bogus test name passed to testRunner()');
});
}
]);
| plxaye/chromium | src/chrome/test/data/extensions/api_test/file_manager_browsertest/background.js | JavaScript | apache-2.0 | 1,527 |
/*
Copyright 2007-2009 WebDriver committers
Copyright 2007-2009 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "stdafx.h"
#include "webdriver.h"
#include "finder.h"
#include "interactions.h"
#include "InternetExplorerDriver.h"
#include "logging.h"
#include "jsxpath.h"
#include "cookies.h"
#include "sizzle.h"
#include "utils.h"
#include "atoms.h"
#include "IEReturnTypes.h"
#include "windowHandling.h"
#include <stdio.h>
#include <iostream>
#include <string>
#include <vector>
#define END_TRY catch(std::wstring& m) \
{ \
if (m.find(L"TIME OUT") != std::wstring::npos) { return ETIMEOUT; } \
wcerr << m.c_str() << endl; \
LOG(WARN) << "Last error: " << GetLastError(); \
return EEXPECTEDERROR; \
} \
catch (...) \
{ \
safeIO::CoutA("CException caught in dll", true); \
return EUNHANDLEDERROR; }
struct WebDriver {
InternetExplorerDriver *ie;
long implicitWaitTimeout;
};
struct WebElement {
ElementWrapper *element;
};
struct ScriptArgs {
LONG currentIndex;
int maxLength;
SAFEARRAY* args;
};
struct ScriptResult {
CComVariant result;
};
struct StringWrapper {
wchar_t *text;
};
struct ElementCollection {
std::vector<ElementWrapper*>* elements;
};
struct StringCollection {
std::vector<std::wstring>* strings;
};
InternetExplorerDriver* openIeInstance = NULL;
clock_t endAt(WebDriver* driver) {
clock_t end = clock() + (driver->implicitWaitTimeout / 1000 * CLOCKS_PER_SEC);
if (driver->implicitWaitTimeout > 0 && driver->implicitWaitTimeout < 1000)
{
end += 1 * CLOCKS_PER_SEC;
}
return end;
}
int terminateIe()
{
std::vector<HWND> allWindows;
getTopLevelWindows(&allWindows);
// Wait until all open windows are gone. Common case, no worries
while (allWindows.size() > 0) {
allWindows.clear();
getTopLevelWindows(&allWindows);
for (vector<HWND>::iterator curr = allWindows.begin();
curr != allWindows.end();
curr++) {
SendMessage(*curr, WM_CLOSE, NULL, NULL);
}
// Pause to allow IE to process the message. If we don't do this and
// we're using IE 8, and "Restore previous session" is enabled (an
// increasingly common state) then a modal system dialog will be
// displayed to the user. Not what we want.
wait(500);
}
// If it's longer than this, we're on a very strange system
wchar_t taskkillPath[256];
if (!ExpandEnvironmentStrings(L"%SystemRoot%\\system32\\taskkill.exe", taskkillPath, 256))
{
cerr << "Unable to find taskkill application" << endl;
return EUNHANDLEDERROR;
}
std::wstring args = L" /f /im iexplore.exe";
STARTUPINFO startup_info;
memset(&startup_info, 0, sizeof(startup_info));
startup_info.cb = sizeof(startup_info);
PROCESS_INFORMATION process_info;
if (!CreateProcessW(taskkillPath, &args[0], NULL, NULL, false, DETACHED_PROCESS, NULL, NULL, &startup_info, &process_info))
{
cerr << "Could not execute taskkill. Bailing: " << GetLastError() << endl;
return EUNHANDLEDERROR;
}
WaitForSingleObject(process_info.hProcess, INFINITE);
CloseHandle(process_info.hThread);
CloseHandle(process_info.hProcess);
return SUCCESS;
}
extern "C"
{
// String manipulation functions
int wdStringLength(StringWrapper* string, int* length)
{
if (!string) {
cerr << "No string to get length of" << endl;
*length = -1;
return -1;
}
if (!string->text) {
cerr << "No underlying string to get length of" << endl;
*length = -1;
return -2;
}
size_t len = wcslen(string->text);
*length = (int) len + 1;
return SUCCESS;
}
int wdFreeString(StringWrapper* string)
{
if (!string) {
return ENOSTRING;
}
if (string->text) delete[] string->text;
delete string;
return SUCCESS;
}
int wdCopyString(StringWrapper* source, int size, wchar_t* dest)
{
if (!source) {
cerr << "No source wrapper" << endl;
return ENOSTRING;
}
if (!source->text) {
cerr << "No source text" << endl;
return ENOSTRING;
}
wcscpy_s(dest, size, source->text);
return SUCCESS;
}
// Collection manipulation functions
int wdcGetElementCollectionLength(ElementCollection* collection, int* length)
{
if (!collection || !collection->elements) return ENOCOLLECTION;
*length = (int) collection->elements->size();
return SUCCESS;
}
int wdcGetElementAtIndex(ElementCollection* collection, int index, WebElement** result)
{
*result = NULL;
if (!collection || !collection->elements) return ENOCOLLECTION;
std::vector<ElementWrapper*>::const_iterator cur = collection->elements->begin();
cur += index;
WebElement* element = new WebElement();
element->element = *cur;
*result = element;
return SUCCESS;
}
int wdcGetStringCollectionLength(StringCollection* collection, int* length)
{
if (!collection) return ENOCOLLECTION;
*length = (int) collection->strings->size();
return SUCCESS;
}
int wdcGetStringAtIndex(StringCollection* collection, int index, StringWrapper** result)
{
*result = NULL;
if (!collection) return ENOCOLLECTION;
std::vector<std::wstring>::const_iterator cur = collection->strings->begin();
cur += index;
StringWrapper* wrapper = new StringWrapper();
size_t size = (*cur).length() + 1;
wrapper->text = new wchar_t[size];
wcscpy_s(wrapper->text, size, (*cur).c_str());
*result = wrapper;
return SUCCESS;
}
// Element manipulation functions
int wdeFreeElement(WebElement* element)
{
if (!element)
return ENOSUCHDRIVER;
if (element->element) delete element->element;
delete element;
return SUCCESS;
}
int wdFreeElementCollection(ElementCollection* collection, int alsoFreeElements)
{
if (!collection || !collection->elements)
return ENOSUCHCOLLECTION;
if (alsoFreeElements) {
std::vector<ElementWrapper*>::const_iterator cur = collection->elements->begin();
std::vector<ElementWrapper*>::const_iterator end = collection->elements->end();
while (cur != end) {
delete *cur;
cur++;
}
}
delete collection->elements;
delete collection;
return SUCCESS;
}
int wdFreeStringCollection(StringCollection* collection)
{
if (!collection || !collection->strings)
return ENOSUCHCOLLECTION;
delete collection->strings;
delete collection;
return SUCCESS;
}
int wdFreeScriptArgs(ScriptArgs* scriptArgs)
{
if (!scriptArgs || !scriptArgs->args)
return ENOSUCHCOLLECTION;
SafeArrayDestroy(scriptArgs->args);
delete scriptArgs;
return SUCCESS;
}
int wdFreeScriptResult(ScriptResult* scriptResult)
{
if (!scriptResult)
return ENOCOLLECTION;
VariantClear(&scriptResult->result);
delete scriptResult;
return SUCCESS;
}
// Driver manipulation functions
int wdFreeDriver(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
terminateIe();
} catch (...) {
// Fine. We're quitting anyway.
}
delete driver->ie;
delete driver;
driver = NULL;
// Let the IE COM instance fade away
wait(4000);
return SUCCESS;
}
int wdNewDriverInstance(WebDriver** result)
{
*result = NULL;
TRY
{
terminateIe();
/*
wchar_t iePath[256];
if (!ExpandEnvironmentStrings(L"%ProgramFiles%\\Internet Explorer\\iexplore.exe", iePath, 256))
{
cerr << "Unable to find IE" << endl;
return EUNHANDLEDERROR;
}
memset(&startup_info, 0, sizeof(startup_info));
startup_info.cb = sizeof(startup_info);
args = L"about:blank";
if (!CreateProcessW(iePath, &args[0], NULL, NULL, false, 0, NULL, NULL, &startup_info, &process_info))
{
cerr << "Could not execute IE. Bailing: " << GetLastError() << endl;
return EUNHANDLEDERROR;
}
*/
WebDriver *driver = new WebDriver();
driver->ie = new InternetExplorerDriver();
driver->ie->setVisible(true);
driver->implicitWaitTimeout = 0;
openIeInstance = driver->ie;
*result = driver;
return SUCCESS;
}
END_TRY
return ENOSUCHDRIVER;
}
int wdGet(WebDriver* driver, const wchar_t* url)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->get(url);
driver->ie->waitForNavigateToFinish();
return SUCCESS;
} END_TRY;
}
int wdGoBack(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->goBack();
return SUCCESS;
} END_TRY;
}
int wdGoForward(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->goForward();
return SUCCESS;
} END_TRY;
}
int wdRefresh(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
StringWrapper* wrapper;
int result = wdGetCurrentUrl(driver, &wrapper);
if (result != SUCCESS) {
return result;
}
result = wdGet(driver, wrapper->text);
wdFreeString(wrapper);
return result;
}
int wdClose(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->close();
return SUCCESS;
} END_TRY
}
int wdGetVisible(WebDriver* driver, int* value)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
*value = driver->ie->getVisible() ? 1 : 0;
return SUCCESS;
} END_TRY;
}
int wdSetVisible(WebDriver* driver, int value)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->setVisible(value != 0);
} END_TRY;
return SUCCESS;
}
int wdGetCurrentUrl(WebDriver* driver, StringWrapper** result)
{
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->getCurrentUrl());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdGetTitle(WebDriver* driver, StringWrapper** result)
{
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->getTitle());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdGetPageSource(WebDriver* driver, StringWrapper** result)
{
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->getPageSource());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdGetCookies(WebDriver* driver, StringWrapper** result)
{
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->getCookies());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdAddCookie(WebDriver* driver, const wchar_t* cookie)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
return driver->ie->addCookie(cookie);
} END_TRY;
}
int wdDeleteCookie(WebDriver* driver, const wchar_t* cookieName)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
// Inject the XPath engine
std::wstring script;
for (int i = 0; DELETECOOKIES[i]; i++) {
script += DELETECOOKIES[i];
}
ScriptArgs* args;
int result = wdNewScriptArgs(&args, 1);
if (result != SUCCESS) {
return result;
}
wdAddStringScriptArg(args, cookieName);
ScriptResult* scriptResult = NULL;
result = wdExecuteScript(driver, script.c_str(), args, &scriptResult);
wdFreeScriptArgs(args);
if (scriptResult) delete scriptResult;
return result;
}
int wdSwitchToActiveElement(WebDriver* driver, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
ElementWrapper* element = driver->ie->getActiveElement();
if (!element)
return ENOSUCHELEMENT;
WebElement* toReturn = new WebElement();
toReturn->element = element;
*result = toReturn;
return SUCCESS;
} END_TRY;
}
int wdSwitchToWindow(WebDriver* driver, const wchar_t* name)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
int result;
// It's entirely possible the window to switch to isn't here yet.
// TODO(simon): Make this configurable
for (int i = 0; i < 8; i++) {
result = driver->ie->switchToWindow(name);
if (result == SUCCESS) { break; }
wait(500);
}
return result;
} END_TRY;
}
int wdSwitchToFrame(WebDriver* driver, const wchar_t* path)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
// TODO(simon): Make this configurable
for (int i = 0; i < 8; i++) {
bool result = driver->ie->switchToFrame(path);
if (result) { return SUCCESS; }
wait(500);
}
return ENOSUCHFRAME;
} END_TRY;
}
int wdWaitForLoadToComplete(WebDriver* driver)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
driver->ie->waitForNavigateToFinish();
return SUCCESS;
} END_TRY;
}
int wdGetCurrentWindowHandle(WebDriver* driver, StringWrapper** handle)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->getHandle());
// TODO(simon): Check that the handle is in the map of known driver instances
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*handle = res;
return SUCCESS;
} END_TRY;
}
int wdGetAllWindowHandles(WebDriver* driver, StringCollection** handles)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
*handles = NULL;
try {
std::vector<std::wstring> rawHandles = driver->ie->getAllHandles();
StringCollection* collection = new StringCollection();
collection->strings = new std::vector<std::wstring>();
for (std::vector<std::wstring>::iterator curr = rawHandles.begin();
curr != rawHandles.end();
curr++) {
collection->strings->push_back(std::wstring(*curr));
}
*handles = collection;
return SUCCESS;
} END_TRY;
}
int verifyFresh(WebElement* element)
{
if (!element || !element->element) { return ENOSUCHELEMENT; }
try {
if (!element->element->isFresh())
{
return EOBSOLETEELEMENT;
}
} END_TRY;
return SUCCESS;
}
int wdeClick(WebElement* element)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
res = element->element->click();
return res;
} END_TRY;
}
int wdeGetAttribute(WebDriver* driver, WebElement* element, const wchar_t* name, StringWrapper** result)
{
*result = NULL;
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
std::wstring script(L"(function() { return function(){ ");
// Read in all the scripts
for (int j = 0; GET_ATTRIBUTE[j]; j++) {
script += GET_ATTRIBUTE[j];
script += L"\n";
}
// Now for the magic
script += L"var element = arguments[0];\n";
script += L"var attributeName = arguments[1];\n";
script += L"return getAttribute(element, attributeName);\n";
// Close things
script += L"};})();";
ScriptArgs* args;
res = wdNewScriptArgs(&args, 2);
if (res != SUCCESS) {
return res;
}
wdAddElementScriptArg(args, element);
wdAddStringScriptArg(args, name);
WebDriver* driver = new WebDriver();
driver->ie = element->element->getParent();
ScriptResult* scriptResult = NULL;
res = wdExecuteScript(driver, script.c_str(), args, &scriptResult);
wdFreeScriptArgs(args);
driver->ie = NULL;
delete driver;
if (res != SUCCESS)
{
wdFreeScriptResult(scriptResult);
return res;
}
int type;
wdGetScriptResultType(driver, scriptResult, &type);
if (type != TYPE_EMPTY && scriptResult->result.vt != VT_NULL) {
const std::wstring originalString(comvariant2cw(scriptResult->result));
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
*result = new StringWrapper();
(*result)->text = toReturn;
}
wdFreeScriptResult(scriptResult);
return SUCCESS;
} END_TRY;
}
int wdeGetValueOfCssProperty(WebElement* element, const wchar_t* name, StringWrapper** result)
{
*result = NULL;
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
const std::wstring originalString(element->element->getValueOfCssProperty(name));
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdeGetText(WebElement* element, StringWrapper** result)
{
*result = NULL;
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
const std::wstring originalString(element->element->getText());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdeGetTagName(WebElement* element, StringWrapper** result)
{
*result = NULL;
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
const std::wstring originalString(element->element->getTagName());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdeIsSelected(WebElement* element, int* result)
{
*result = 0;
try {
StringWrapper* wrapper;
WebDriver* driver = new WebDriver();
driver->ie = element->element->getParent();
int res = wdeGetAttribute(driver, element, L"selected", &wrapper);
driver->ie = NULL;
delete driver;
if (res != SUCCESS)
{
return res;
}
*result = wrapper && wrapper->text && wcscmp(L"true", wrapper->text) == 0 ? 1 : 0;
wdFreeString(wrapper);
return SUCCESS;
} END_TRY;
}
int wdeSetSelected(WebElement* element)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
return element->element->setSelected();
} END_TRY;
}
int wdeToggle(WebElement* element, int* result)
{
*result = 0;
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
int res = element->element->toggle();
if (res == SUCCESS) {
return wdeIsSelected(element, result);
}
return res;
} END_TRY;
}
int wdeIsEnabled(WebElement* element, int* result)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
*result = element->element->isEnabled() ? 1 : 0;
return SUCCESS;
} END_TRY;
}
int wdeIsDisplayed(WebElement* element, int* result)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
*result = element->element->isDisplayed() ? 1 : 0;
return SUCCESS;
} END_TRY;
}
int wdeSendKeys(WebElement* element, const wchar_t* text)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
return element->element->sendKeys(text);
} END_TRY;
}
int wdeSendKeyPress(WebElement* element, const wchar_t* text)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
return element->element->sendKeyPress(text);
} END_TRY;
}
int wdeSendKeyRelease(WebElement* element, const wchar_t* text)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
return element->element->sendKeyRelease(text);
} END_TRY;
}
int wdeClear(WebElement* element)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
element->element->clear();
return SUCCESS;
} END_TRY;
}
int wdeSubmit(WebElement* element)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
element->element->submit();
return SUCCESS;
} END_TRY;
}
int wdeGetDetailsOnceScrolledOnToScreen(WebElement* element, HWND* hwnd, long* x, long* y, long* width, long* height)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
element->element->getLocationWhenScrolledIntoView(hwnd, x, y, width, height);
return SUCCESS;
} END_TRY;
}
int wdeGetLocation(WebElement* element, long* x, long* y)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
element->element->getLocation(x, y);
return SUCCESS;
} END_TRY;
}
int wdeGetSize(WebElement* element, long* width, long* height)
{
int res = verifyFresh(element); if (res != SUCCESS) { return res; }
try {
int result = element->element->getWidth(width);
if (result != SUCCESS) {
return result;
}
result = element->element->getHeight(height);
return result;
} END_TRY;
}
int wdFindElementById(WebDriver* driver, WebElement* element, const wchar_t* id, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
res = ie->selectElementById(elem, id, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsById(WebDriver* driver, WebElement* element, const wchar_t* id, ElementCollection** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
clock_t end = endAt(driver);
ElementCollection* collection = new ElementCollection();
*result = collection;
do {
collection->elements = driver->ie->selectElementsById(elem, id);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int wdFindElementByName(WebDriver* driver, WebElement* element, const wchar_t* name, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
CComPtr<IHTMLDOMNode> res;
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
int res = ie->selectElementByName(elem, name, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsByName(WebDriver* driver, WebElement* element, const wchar_t* name, ElementCollection** result)
{
*result = NULL;
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
ElementCollection* collection = new ElementCollection();
*result = collection;
clock_t end = endAt(driver);
do {
collection->elements = driver->ie->selectElementsByName(elem, name);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int wdFindElementByClassName(WebDriver* driver, WebElement* element, const wchar_t* className, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
int res = ie->selectElementByClassName(elem, className, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsByClassName(WebDriver* driver, WebElement* element, const wchar_t* className, ElementCollection** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
clock_t end = endAt(driver);
ElementCollection* collection = new ElementCollection();
*result = collection;
do {
collection->elements = driver->ie->selectElementsByClassName(elem, className);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int wdFindElementByCss(WebDriver* driver, WebElement* element, const wchar_t* selector, WebElement** out)
{
*out = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
clock_t end = endAt(driver);
int result = ENOSUCHELEMENT;
do {
std::wstring script(L"(function() { return function(){");
for (int i = 0; SIZZLE[i]; i++) {
script += SIZZLE[i];
script += L"\n";
}
script += L"var root = arguments[1] ? arguments[1] : document.documentElement;";
script += L"if (root['querySelector']) { return root.querySelector(arguments[0]); } ";
script += L"var results = []; Sizzle(arguments[0], root, results);";
script += L"return results.length > 0 ? results[0] : null;";
script += L"};})();";
// Call it
ScriptArgs* args;
result = wdNewScriptArgs(&args, 2);
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
result = wdAddStringScriptArg(args, selector);
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
if (element) {
result = wdAddElementScriptArg(args, element);
}
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
ScriptResult* queryResult;
result = wdExecuteScript(driver, script.c_str(), args, &queryResult);
wdFreeScriptArgs(args);
// And be done
if (result == SUCCESS) {
int type = 0;
result = wdGetScriptResultType(driver, queryResult, &type);
if (type != TYPE_EMPTY) {
result = wdGetElementScriptResult(queryResult, driver, out);
} else {
result = ENOSUCHELEMENT;
wdFreeScriptResult(queryResult);
continue;
}
}
wdFreeScriptResult(queryResult);
return result;
} while (clock() < end);
return result;
} END_TRY;
}
int wdFindElementsByCss(WebDriver* driver, WebElement* element, const wchar_t* selector, ElementCollection** out)
{
*out = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
clock_t end = endAt(driver);
int result = EUNHANDLEDERROR;
do {
// Call it
std::wstring script(L"(function() { return function(){");
for (int i = 0; SIZZLE[i]; i++) {
script += SIZZLE[i];
script += L"\n";
}
script += L"var root = arguments[1] ? arguments[1] : document.documentElement;";
script += L"if (root['querySelectorAll']) { return root.querySelectorAll(arguments[0]); } ";
script += L"var results = []; Sizzle(arguments[0], root, results);";
script += L"return results;";
script += L"};})();";
// Call it
ScriptArgs* args;
result = wdNewScriptArgs(&args, 2);
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
result = wdAddStringScriptArg(args, selector);
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
result = wdAddElementScriptArg(args, element);
if (result != SUCCESS) {
wdFreeScriptArgs(args);
continue;
}
ScriptResult* queryResult;
result = wdExecuteScript(driver, script.c_str(), args, &queryResult);
wdFreeScriptArgs(args);
// And be done
if (result != SUCCESS) {
wdFreeScriptResult(queryResult);
return result;
}
ElementCollection* elements = new ElementCollection();
elements->elements = new std::vector<ElementWrapper*>();
int length;
result = wdGetArrayLengthScriptResult(driver, queryResult, &length);
if (result != SUCCESS) {
wdFreeScriptResult(queryResult);
return result;
}
for (long i = 0; i < length; i++) {
ScriptResult* getElemRes;
wdGetArrayItemFromScriptResult(driver, queryResult, i, &getElemRes);
WebElement* e;
wdGetElementScriptResult(getElemRes, driver, &e);
elements->elements->push_back(e->element);
e->element = NULL;
delete e;
}
wdFreeScriptResult(queryResult);
*out = elements;
return SUCCESS;
} while (clock() < end);
return result;
} END_TRY;
}
int wdFindElementByLinkText(WebDriver* driver, WebElement* element, const wchar_t* linkText, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
int res = ie->selectElementByLink(elem, linkText, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsByLinkText(WebDriver* driver, WebElement* element, const wchar_t* linkText, ElementCollection** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
ElementCollection* collection = new ElementCollection();
*result = collection;
clock_t end = endAt(driver);
do {
collection->elements = driver->ie->selectElementsByLink(elem, linkText);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int wdFindElementByPartialLinkText(WebDriver* driver, WebElement* element, const wchar_t* linkText, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
int res = ie->selectElementByPartialLink(elem, linkText, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsByPartialLinkText(WebDriver* driver, WebElement* element, const wchar_t* linkText, ElementCollection** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
ElementCollection* collection = new ElementCollection();
*result = collection;
clock_t end = endAt(driver);
do {
collection->elements = driver->ie->selectElementsByPartialLink(elem, linkText);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int wdFindElementByTagName(WebDriver* driver, WebElement* element, const wchar_t* name, WebElement** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
try {
clock_t end = endAt(driver);
int res = ENOSUCHELEMENT;
do {
ElementWrapper* wrapper;
int res = ie->selectElementByTagName(elem, name, &wrapper);
if (res != SUCCESS) {
continue;
}
WebElement* toReturn = new WebElement();
toReturn->element = wrapper;
*result = toReturn;
return SUCCESS;
} while (clock() < end);
return res;
} END_TRY;
}
int wdFindElementsByTagName(WebDriver* driver, WebElement* element, const wchar_t* name, ElementCollection** result)
{
*result = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
InternetExplorerDriver* ie = driver->ie;
CComPtr<IHTMLElement> elem;
if (element && element->element) {
elem = element->element->getWrappedElement();
}
ElementCollection* collection = new ElementCollection();
*result = collection;
clock_t end = endAt(driver);
do {
collection->elements = driver->ie->selectElementsByTagName(elem, name);
if (collection->elements->size() > 0) {
return SUCCESS;
}
} while (clock() < end);
return SUCCESS;
} END_TRY;
}
int injectXPathEngine(WebDriver* driver)
{
// Inject the XPath engine
std::wstring script;
for (int i = 0; XPATHJS[i]; i++) {
script += XPATHJS[i];
}
ScriptArgs* args;
int result = wdNewScriptArgs(&args, 0);
if (result != SUCCESS) {
return result;
}
ScriptResult* scriptResult = NULL;
result = wdExecuteScript(driver, script.c_str(), args, &scriptResult);
wdFreeScriptArgs(args);
if (scriptResult) delete scriptResult;
return result;
}
int wdFindElementByXPath(WebDriver* driver, WebElement* element, const wchar_t* xpath, WebElement** out)
{
*out = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
clock_t end = endAt(driver);
int result = ENOSUCHELEMENT;
do {
result = injectXPathEngine(driver);
// TODO(simon): Why does the injecting sometimes fail?
/*
if (result != SUCCESS) {
return result;
}
*/
// Call it
std::wstring query;
if (element) {
query += L"(function() { return function(){var res = document.__webdriver_evaluate(arguments[0], arguments[1], null, 7, null); return res.snapshotItem(0) ;};})();";
} else {
query += L"(function() { return function(){var res = document.__webdriver_evaluate(arguments[0], document, null, 7, null); return res.snapshotLength != 0 ? res.snapshotItem(0) : undefined ;};})();";
}
ScriptArgs* queryArgs;
result = wdNewScriptArgs(&queryArgs, 2);
if (result != SUCCESS) {
wdFreeScriptArgs(queryArgs);
continue;
}
result = wdAddStringScriptArg(queryArgs, xpath);
if (result != SUCCESS) {
wdFreeScriptArgs(queryArgs);
continue;
}
if (element) {
result = wdAddElementScriptArg(queryArgs, element);
}
if (result != SUCCESS) {
wdFreeScriptArgs(queryArgs);
continue;
}
ScriptResult* queryResult;
result = wdExecuteScript(driver, query.c_str(), queryArgs, &queryResult);
wdFreeScriptArgs(queryArgs);
// And be done
if (result == SUCCESS) {
int type = 0;
result = wdGetScriptResultType(driver, queryResult, &type);
if (type != TYPE_EMPTY) {
result = wdGetElementScriptResult(queryResult, driver, out);
} else {
result = ENOSUCHELEMENT;
wdFreeScriptResult(queryResult);
continue;
}
}
wdFreeScriptResult(queryResult);
return result;
} while (clock() < end);
return result;
} END_TRY;
}
int wdFindElementsByXPath(WebDriver* driver, WebElement* element, const wchar_t* xpath, ElementCollection** out)
{
*out = NULL;
if (!driver || !driver->ie) { return ENOSUCHDRIVER; }
try {
clock_t end = endAt(driver);
int result = EUNHANDLEDERROR;
do {
result = injectXPathEngine(driver);
if (result != SUCCESS) {
continue;
}
// Call it
std::wstring query;
if (element)
query += L"(function() { return function() {var res = document.__webdriver_evaluate(arguments[0], arguments[1], null, 7, null); return res;};})();";
else
query += L"(function() { return function() {var res = document.__webdriver_evaluate(arguments[0], document, null, 7, null); return res;};})();";
// We need to use the raw functions because we don't allow random objects
// to be returned from the executeScript method normally
SAFEARRAYBOUND bounds;
bounds.cElements = 2;
bounds.lLbound = 0;
SAFEARRAY* queryArgs = SafeArrayCreate(VT_VARIANT, 1, &bounds);
CComVariant queryArg(xpath);
LONG index = 0;
SafeArrayPutElement(queryArgs, &index, &queryArg);
if (element) {
CComVariant elementArg(element->element->getWrappedElement());
LONG index = 1;
SafeArrayPutElement(queryArgs, &index, &elementArg);
}
CComVariant snapshot;
result = driver->ie->executeScript(query.c_str(), queryArgs, &snapshot);
SafeArrayDestroy(queryArgs);
if (result != SUCCESS) {
continue;
}
bounds.cElements = 1;
SAFEARRAY* lengthArgs = SafeArrayCreate(VT_VARIANT, 1, &bounds);
index = 0;
SafeArrayPutElement(lengthArgs, &index, &snapshot);
CComVariant lengthVar;
result = driver->ie->executeScript(L"(function(){return function() {return arguments[0].snapshotLength;}})();", lengthArgs, &lengthVar);
SafeArrayDestroy(lengthArgs);
if (result != SUCCESS) {
continue;
}
if (lengthVar.vt != VT_I4) {
result = EUNEXPECTEDJSERROR;
continue;
}
long length = lengthVar.lVal;
bounds.cElements = 2;
SAFEARRAY* snapshotArgs = SafeArrayCreate(VT_VARIANT, 1, &bounds);
index = 0;
SafeArrayPutElement(snapshotArgs, &index, &snapshot);
ElementCollection* elements = new ElementCollection();
elements->elements = new std::vector<ElementWrapper*>();
index = 1;
for (long i = 0; i < length; i++) {
ScriptArgs* getElemArgs;
wdNewScriptArgs(&getElemArgs, 2);
// Cheat
index = 0;
SafeArrayPutElement(getElemArgs->args, &index, &snapshot);
getElemArgs->currentIndex++;
wdAddNumberScriptArg(getElemArgs, i);
ScriptResult* getElemRes;
wdExecuteScript(driver, L"(function(){return function() {return arguments[0].iterateNext();}})();", getElemArgs, &getElemRes);
WebElement* e;
wdGetElementScriptResult(getElemRes, driver, &e);
elements->elements->push_back(e->element);
wdFreeScriptArgs(getElemArgs);
}
SafeArrayDestroy(queryArgs);
*out = elements;
return SUCCESS;
} while (clock() < end);
return result;
} END_TRY;
}
int wdNewScriptArgs(ScriptArgs** scriptArgs, int maxLength)
{
*scriptArgs = NULL;
ScriptArgs* args = new ScriptArgs();
args->currentIndex = 0;
args->maxLength = maxLength;
SAFEARRAYBOUND bounds;
bounds.cElements = maxLength;
bounds.lLbound = 0;
args->args = SafeArrayCreate(VT_VARIANT, 1, &bounds);
*scriptArgs = args;
return SUCCESS;
}
int wdAddStringScriptArg(ScriptArgs* scriptArgs, const wchar_t* arg)
{
std::wstring value(arg);
CComVariant dest(arg);
LONG index = scriptArgs->currentIndex;
SafeArrayPutElement(scriptArgs->args, &index, &dest);
scriptArgs->currentIndex++;
return SUCCESS;
}
int wdAddBooleanScriptArg(ScriptArgs* scriptArgs, int trueOrFalse)
{
VARIANT dest;
dest.vt = VT_BOOL;
dest.boolVal = trueOrFalse == 1;
LONG index = scriptArgs->currentIndex;
SafeArrayPutElement(scriptArgs->args, &index, &dest);
scriptArgs->currentIndex++;
return SUCCESS;
}
int wdAddNumberScriptArg(ScriptArgs* scriptArgs, long number)
{
VARIANT dest;
dest.vt = VT_I4;
dest.lVal = (LONG) number;
LONG index = scriptArgs->currentIndex;
SafeArrayPutElement(scriptArgs->args, &index, &dest);
scriptArgs->currentIndex++;
return SUCCESS;
}
int wdAddDoubleScriptArg(ScriptArgs* scriptArgs, double number)
{
VARIANT dest;
dest.vt = VT_R8;
dest.dblVal = (DOUBLE) number;
LONG index = scriptArgs->currentIndex;
SafeArrayPutElement(scriptArgs->args, &index, &dest);
scriptArgs->currentIndex++;
return SUCCESS;
}
int wdAddElementScriptArg(ScriptArgs* scriptArgs, WebElement* element)
{
VARIANT dest;
VariantClear(&dest);
if (!element || !element->element) {
dest.vt = VT_EMPTY;
} else {
dest.vt = VT_DISPATCH;
dest.pdispVal = element->element->getWrappedElement();
}
LONG index = scriptArgs->currentIndex;
SafeArrayPutElement(scriptArgs->args, &index, &dest);
scriptArgs->currentIndex++;
return SUCCESS;
}
int wdExecuteScript(WebDriver* driver, const wchar_t* script, ScriptArgs* scriptArgs, ScriptResult** scriptResultRef)
{
try {
*scriptResultRef = NULL;
CComVariant result;
int res = driver->ie->executeScript(script, scriptArgs->args, &result);
if (res != SUCCESS) {
return res;
}
ScriptResult* toReturn = new ScriptResult();
HRESULT hr = VariantCopy(&(toReturn->result), &result);
if (!SUCCEEDED(hr) && result.vt == VT_USERDEFINED) {
// Special handling of the user defined path *sigh*
toReturn->result.vt = VT_USERDEFINED;
toReturn->result.bstrVal = CComBSTR(result.bstrVal);
}
*scriptResultRef = toReturn;
return SUCCESS;
} END_TRY;
}
int wdGetScriptResultType(WebDriver* driver, ScriptResult* result, int* type)
{
if (!result) { return ENOSCRIPTRESULT; }
switch (result->result.vt) {
case VT_BSTR:
*type = TYPE_STRING;
break;
case VT_I4:
case VT_I8:
*type = TYPE_LONG;
break;
case VT_BOOL:
*type = TYPE_BOOLEAN;
break;
case VT_DISPATCH:
{
LPCWSTR itemType = driver->ie->getScriptResultType(&(result->result));
std::string itemTypeStr;
cw2string(itemType, itemTypeStr);
LOG(DEBUG) << "Got type: " << itemTypeStr;
// If it's a Javascript array or an HTML Collection - type 8 will
// indicate the driver that this is ultimately an array.
if ((itemTypeStr == "JavascriptArray") ||
(itemTypeStr == "HtmlCollection")) {
*type = TYPE_ARRAY;
} else {
*type = TYPE_ELEMENT;
}
}
break;
case VT_EMPTY:
*type = TYPE_EMPTY;
break;
case VT_USERDEFINED:
*type = TYPE_EXCEPTION;
break;
case VT_R4:
case VT_R8:
*type = TYPE_DOUBLE;
break;
default:
return EUNKNOWNSCRIPTRESULT;
}
return SUCCESS;
}
int wdGetStringScriptResult(ScriptResult* result, StringWrapper** wrapper)
{
*wrapper = NULL;
if (!result) { return ENOSCRIPTRESULT; }
StringWrapper* toReturn = new StringWrapper();
BSTR val = result->result.bstrVal;
if (!val) {
toReturn->text = new wchar_t[1];
wcscpy_s(toReturn->text, 1, L"");
} else {
UINT length = SysStringLen(val);
toReturn->text = new wchar_t[length + 1];
wcscpy_s(toReturn->text, length + 1, val);
}
*wrapper = toReturn;
return SUCCESS;
}
int wdGetNumberScriptResult(ScriptResult* result, long* value)
{
if (!result) { return ENOSCRIPTRESULT; }
*value = result->result.lVal;
return SUCCESS;
}
int wdGetDoubleScriptResult(ScriptResult* result, double* value)
{
if (!result) { return ENOSCRIPTRESULT; }
*value = result->result.dblVal;
return SUCCESS;
}
int wdGetBooleanScriptResult(ScriptResult* result, int* value)
{
if (!result) { return ENOSCRIPTRESULT; }
*value = result->result.boolVal == VARIANT_TRUE ? 1 : 0;
return SUCCESS;
}
int wdGetElementScriptResult(ScriptResult* result, WebDriver* driver, WebElement** element)
{
*element = NULL;
if (!result) { return ENOSCRIPTRESULT; }
IHTMLElement *node = (IHTMLElement*) result->result.pdispVal;
WebElement* toReturn = new WebElement();
toReturn->element = new ElementWrapper(driver->ie, node);
*element = toReturn;
return SUCCESS;
}
int wdGetArrayLengthScriptResult(WebDriver* driver, ScriptResult* result,
int* length)
{
// Prepare an array for the Javascript execution, containing only one
// element - the original returned array from a JS execution.
SAFEARRAYBOUND lengthQuery;
lengthQuery.cElements = 1;
lengthQuery.lLbound = 0;
SAFEARRAY* lengthArgs = SafeArrayCreate(VT_VARIANT, 1, &lengthQuery);
LONG index = 0;
SafeArrayPutElement(lengthArgs, &index, &(result->result));
CComVariant lengthVar;
int lengthResult = driver->ie->executeScript(
L"(function(){return function() {return arguments[0].length;}})();",
lengthArgs, &lengthVar);
SafeArrayDestroy(lengthArgs);
if (lengthResult != SUCCESS) {
return lengthResult;
}
// Expect the return type to be an integer. A non-integer means this was
// not an array after all.
if (lengthVar.vt != VT_I4) {
return EUNEXPECTEDJSERROR;
}
*length = lengthVar.lVal;
return SUCCESS;
}
int wdGetArrayItemFromScriptResult(WebDriver* driver, ScriptResult* result,
int index, ScriptResult** arrayItem)
{
// Prepare an array for Javascript execution. The array contains the original
// array returned from a previous execution and the index of the item required
// from that array.
ScriptArgs* getItemArgs;
wdNewScriptArgs(&getItemArgs, 2);
LONG argIndex = 0;
// Original array.
SafeArrayPutElement(getItemArgs->args, &argIndex, &(result->result));
getItemArgs->currentIndex++;
// Item index
wdAddNumberScriptArg(getItemArgs, index);
int execRes = wdExecuteScript(
driver,
L"(function(){return function() {return arguments[0][arguments[1]];}})();",
getItemArgs, arrayItem);
wdFreeScriptArgs(getItemArgs);
getItemArgs = NULL;
return execRes;
}
int wdeMouseDownAt(HWND hwnd, long windowX, long windowY)
{
mouseDownAt(hwnd, windowX, windowY, MOUSEBUTTON_LFET);
return SUCCESS;
}
int wdeMouseUpAt(HWND hwnd, long windowX, long windowY)
{
mouseUpAt(hwnd, windowX, windowY, MOUSEBUTTON_LFET);
return SUCCESS;
}
int wdeMouseMoveTo(HWND hwnd, long duration, long fromX, long fromY, long toX, long toY)
{
mouseMoveTo(hwnd, duration, fromX, fromY, toX, toY);
return SUCCESS;
}
int wdCaptureScreenshotAsBase64(WebDriver* driver, StringWrapper** result) {
*result = NULL;
if (!driver || !driver->ie) return ENOSUCHDRIVER;
try {
const std::wstring originalString(driver->ie->captureScreenshotAsBase64());
size_t length = originalString.length() + 1;
wchar_t* toReturn = new wchar_t[length];
wcscpy_s(toReturn, length, originalString.c_str());
StringWrapper* res = new StringWrapper();
res->text = toReturn;
*result = res;
return SUCCESS;
} END_TRY;
}
int wdSetImplicitWaitTimeout(WebDriver* driver, long timeoutInMillis)
{
if (!driver || !driver->ie) return ENOSUCHDRIVER;
driver->implicitWaitTimeout = timeoutInMillis;
return SUCCESS;
}
}
| zerodiv/CTM-Windows-Agent | Continuum_Windows_Testing_Agent/Vendor/jobbie/src/cpp/InternetExplorerDriver/webdriver.cpp | C++ | apache-2.0 | 47,123 |
/**
* Copyright 2014 Nortal AS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.nortal.petit.orm.statement;
import java.util.List;
import org.springframework.util.CollectionUtils;
/**
* @author Lauri Lättemäe (lauri.lattemae@nortal.com)
* @created 29.04.2013
*/
public abstract class ExecutableStatement<B> extends SimpleStatement<B> {
/**
* Returns statements sql with parameter values
*
* @return
*/
@Override
public String getSqlWithParams() {
prepare();
StringBuffer sb = new StringBuffer();
if (!CollectionUtils.isEmpty(getBeans())) {
for (B bean : getBeans()) {
prepare(bean);
sb.append(super.getSqlWithParams()).append("\n");
}
} else {
sb.append(super.getSqlWithParams()).append("\n");
}
return sb.toString();
}
protected abstract List<B> getBeans();
protected abstract void prepare(B bean);
public abstract void exec();
}
| jimmytheneutrino/petit | modules/orm/src/main/java/com/nortal/petit/orm/statement/ExecutableStatement.java | Java | apache-2.0 | 1,555 |
/*
* Copyright 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.common.base.Preconditions;
import com.google.javascript.jscomp.ControlFlowGraph.Branch;
import com.google.javascript.jscomp.NodeTraversal.AbstractPostOrderCallback;
import com.google.javascript.jscomp.NodeTraversal.ScopedCallback;
import com.google.javascript.jscomp.graph.GraphReachability;
import com.google.javascript.jscomp.graph.DiGraph.DiGraphEdge;
import com.google.javascript.jscomp.graph.DiGraph.DiGraphNode;
import com.google.javascript.rhino.Node;
import com.google.javascript.rhino.Token;
import java.util.Deque;
import java.util.LinkedList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Removes dead code from a parse tree. The kinds of dead code that this pass
* removes are:
* - Any code following a return statement, such as the <code>alert</code>
* call in: <code>if (x) { return; alert('unreachable'); }</code>.
* - Statements that have no side effects, such as:
* <code>a.b.MyClass.prototype.propertyName;</code> or <code>true;</code>.
* That first kind of statement sometimes appears intentionally, so that
* prototype properties can be annotated using JSDoc without actually
* being initialized.
*
*/
class UnreachableCodeElimination extends AbstractPostOrderCallback
implements CompilerPass, ScopedCallback {
private static final Logger logger =
Logger.getLogger(UnreachableCodeElimination.class.getName());
private final AbstractCompiler compiler;
private final boolean removeNoOpStatements;
Deque<ControlFlowGraph<Node>> cfgStack =
new LinkedList<ControlFlowGraph<Node>>();
ControlFlowGraph<Node> curCfg = null;
UnreachableCodeElimination(AbstractCompiler compiler,
boolean removeNoOpStatements) {
this.compiler = compiler;
this.removeNoOpStatements = removeNoOpStatements;
}
@Override
public void enterScope(NodeTraversal t) {
Scope scope = t.getScope();
// Computes the control flow graph.
ControlFlowAnalysis cfa = new ControlFlowAnalysis(compiler, false);
cfa.process(null, scope.getRootNode());
cfgStack.push(curCfg);
curCfg = cfa.getCfg();
new GraphReachability<Node, ControlFlowGraph.Branch>(curCfg)
.compute(curCfg.getEntry().getValue());
}
@Override
public void exitScope(NodeTraversal t) {
curCfg = cfgStack.pop();
}
@Override
public void process(Node externs, Node root) {
NodeTraversal.traverse(compiler, root, this);
}
@Override
public void visit(NodeTraversal t, Node n, Node parent) {
if (parent == null) {
return;
}
if (n.getType() == Token.FUNCTION || n.getType() == Token.SCRIPT) {
return;
}
// Removes TRYs that had its CATCH removed and/or empty FINALLY.
// TODO(dcc): Move the parts of this that don't require a control flow
// graph to PeepholeRemoveDeadCode
if (n.getType() == Token.TRY) {
Node body = n.getFirstChild();
Node catchOrFinallyBlock = body.getNext();
Node finallyBlock = catchOrFinallyBlock.getNext();
if (!catchOrFinallyBlock.hasChildren() &&
(finallyBlock == null || !finallyBlock.hasChildren())) {
n.removeChild(body);
parent.replaceChild(n, body);
compiler.reportCodeChange();
n = body;
}
}
DiGraphNode<Node, Branch> gNode = curCfg.getDirectedGraphNode(n);
if (gNode == null) { // Not in CFG.
return;
}
if (gNode.getAnnotation() != GraphReachability.REACHABLE ||
(removeNoOpStatements && !NodeUtil.mayHaveSideEffects(n))) {
removeDeadExprStatementSafely(n);
return;
}
tryRemoveUnconditionalBranching(n);
}
/**
* Tries to remove n if an unconditional branch node (break, continue or
* return) if the target of n is the same as the the follow of n. That is, if
* we remove n, the control flow remains the same. Also if n targets to
* another unconditional branch, this function will recursively try to remove
* the target branch as well. The reason why we want to cascade this removal
* is because we only run this pass once. If we have code such as
*
* break -> break -> break
*
* where all 3 break's are useless. The order of removal matters. When we
* first look at the first break, we see that it branches to the 2nd break.
* However, if we remove the last break, the 2nd break becomes useless and
* finally the first break becomes useless as well.
*
* @return The target of this jump. If the target is also useless jump,
* the target of that useless jump recursively.
*/
@SuppressWarnings("fallthrough")
private Node tryRemoveUnconditionalBranching(Node n) {
/*
* For each of the unconditional branching control flow node, check to see
* if the ControlFlowAnalysis.computeFollowNode of that node is same as
* the branching target. If it is, the branch node is safe to be removed.
*
* This is not as clever as MinimizeExitPoints because it doesn't do any
* if-else conversion but it handles more complicated switch statements
* much nicer.
*/
// If n is null the target is the end of the function, nothing to do.
if (n == null) {
return n;
}
DiGraphNode<Node, Branch> gNode = curCfg.getDirectedGraphNode(n);
if (gNode == null) {
return n;
}
// If the parent is null, this mean whatever node it was there is now
// useless and it has been removed by other logics in this pass. That node
// while no longer exists in the AST, is still in the CFG because we
// never update the graph as nodes are removed.
if (n.getParent() == null) {
List<DiGraphEdge<Node,Branch>> outEdges = gNode.getOutEdges();
if (outEdges.size() == 1) {
return tryRemoveUnconditionalBranching(
outEdges.get(0).getDestination().getValue());
}
}
switch (n.getType()) {
case Token.BLOCK:
if (n.hasChildren()) {
Node first = n.getFirstChild();
return tryRemoveUnconditionalBranching(first);
} else {
return tryRemoveUnconditionalBranching(
ControlFlowAnalysis.computeFollowNode(n));
}
case Token.RETURN:
if (n.hasChildren()) {
break;
}
case Token.BREAK:
case Token.CONTINUE:
// We are looking for a control flow changing statement that always
// branches to the same node. If removing it the control flow still
// branches to that same node. It is safe to remove it.
List<DiGraphEdge<Node,Branch>> outEdges = gNode.getOutEdges();
if (outEdges.size() == 1 &&
// If there is a next node, there is no chance this jump is useless.
(n.getNext() == null || n.getNext().getType() == Token.FUNCTION)) {
Preconditions.checkState(outEdges.get(0).getValue() == Branch.UNCOND);
Node fallThrough = tryRemoveUnconditionalBranching(
ControlFlowAnalysis.computeFollowNode(n));
Node nextCfgNode = outEdges.get(0).getDestination().getValue();
if (nextCfgNode == fallThrough) {
removeDeadExprStatementSafely(n);
return fallThrough;
}
}
}
return n;
}
private void removeDeadExprStatementSafely(Node n) {
if (n.getType() == Token.EMPTY ||
(n.getType() == Token.BLOCK && !n.hasChildren())) {
// Not always trivial to remove, let FoldContants work its magic later.
return;
}
// Removing an unreachable DO node is messy because it means we still have
// to execute one iteration. If the DO's body has breaks in the middle, it
// can get even more trickier and code size might actually increase.
switch (n.getType()) {
case Token.DO:
case Token.TRY:
case Token.CATCH:
case Token.FINALLY:
return;
}
NodeUtil.redeclareVarsInsideBranch(n);
compiler.reportCodeChange();
if (logger.isLoggable(Level.FINE)) {
logger.fine("Removing " + n.toString());
}
NodeUtil.removeChild(n.getParent(), n);
}
}
| antz29/closure-compiler | src/com/google/javascript/jscomp/UnreachableCodeElimination.java | Java | apache-2.0 | 8,731 |
package theinternet.pages;
import com.frameworkium.core.ui.annotations.Visible;
import com.frameworkium.core.ui.pages.BasePage;
import com.frameworkium.core.ui.pages.PageFactory;
import io.qameta.allure.Step;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.FindBy;
import ru.yandex.qatools.htmlelements.annotations.Name;
import ru.yandex.qatools.htmlelements.element.FileInput;
public class FileUploadPage extends BasePage<FileUploadPage> {
@Visible
@Name("Choose Files button")
@FindBy(css = "input#file-upload")
private FileInput chooseFileInput;
@Visible
@Name("Upload button")
@FindBy(css = "input#file-submit")
private WebElement uploadButton;
@Step("Upload a file by choosing file and then clicking upload")
public FileUploadSuccessPage uploadFile(String filePath) {
chooseFileInput.setFileToUpload(filePath);
uploadButton.click();
return PageFactory.newInstance(FileUploadSuccessPage.class);
}
}
| Frameworkium/frameworkium | src/test/java/theinternet/pages/FileUploadPage.java | Java | apache-2.0 | 1,003 |
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "OpenGLRenderer"
#include "jni.h"
#include "GraphicsJNI.h"
#include <nativehelper/JNIHelp.h>
#include "core_jni_helpers.h"
#include <android_runtime/android_graphics_SurfaceTexture.h>
#include <gui/GLConsumer.h>
#include <Paint.h>
#include <SkBitmap.h>
#include <SkCanvas.h>
#include <SkMatrix.h>
#include <SkXfermode.h>
#include <DeferredLayerUpdater.h>
#include <LayerRenderer.h>
#include <SkiaShader.h>
#include <Rect.h>
#include <RenderNode.h>
namespace android {
using namespace uirenderer;
static jboolean android_view_HardwareLayer_prepare(JNIEnv* env, jobject clazz,
jlong layerUpdaterPtr, jint width, jint height, jboolean isOpaque) {
DeferredLayerUpdater* layer = reinterpret_cast<DeferredLayerUpdater*>(layerUpdaterPtr);
bool changed = false;
changed |= layer->setSize(width, height);
changed |= layer->setBlend(!isOpaque);
return changed;
}
static void android_view_HardwareLayer_setLayerPaint(JNIEnv* env, jobject clazz,
jlong layerUpdaterPtr, jlong paintPtr) {
DeferredLayerUpdater* layer = reinterpret_cast<DeferredLayerUpdater*>(layerUpdaterPtr);
if (layer) {
Paint* paint = reinterpret_cast<Paint*>(paintPtr);
layer->setPaint(paint);
}
}
static void android_view_HardwareLayer_setTransform(JNIEnv* env, jobject clazz,
jlong layerUpdaterPtr, jlong matrixPtr) {
DeferredLayerUpdater* layer = reinterpret_cast<DeferredLayerUpdater*>(layerUpdaterPtr);
SkMatrix* matrix = reinterpret_cast<SkMatrix*>(matrixPtr);
layer->setTransform(matrix);
}
static void android_view_HardwareLayer_setSurfaceTexture(JNIEnv* env, jobject clazz,
jlong layerUpdaterPtr, jobject surface, jboolean isAlreadyAttached) {
DeferredLayerUpdater* layer = reinterpret_cast<DeferredLayerUpdater*>(layerUpdaterPtr);
sp<GLConsumer> surfaceTexture(SurfaceTexture_getSurfaceTexture(env, surface));
layer->setSurfaceTexture(surfaceTexture, !isAlreadyAttached);
}
static void android_view_HardwareLayer_updateSurfaceTexture(JNIEnv* env, jobject clazz,
jlong layerUpdaterPtr) {
DeferredLayerUpdater* layer = reinterpret_cast<DeferredLayerUpdater*>(layerUpdaterPtr);
layer->updateTexImage();
}
static jint android_view_HardwareLayer_getTexName(JNIEnv* env, jobject clazz,
jlong layerUpdaterPtr) {
DeferredLayerUpdater* layer = reinterpret_cast<DeferredLayerUpdater*>(layerUpdaterPtr);
return layer->backingLayer()->getTextureId();
}
// ----------------------------------------------------------------------------
// JNI Glue
// ----------------------------------------------------------------------------
const char* const kClassPathName = "android/view/HardwareLayer";
static JNINativeMethod gMethods[] = {
{ "nPrepare", "(JIIZ)Z", (void*) android_view_HardwareLayer_prepare },
{ "nSetLayerPaint", "(JJ)V", (void*) android_view_HardwareLayer_setLayerPaint },
{ "nSetTransform", "(JJ)V", (void*) android_view_HardwareLayer_setTransform },
{ "nSetSurfaceTexture", "(JLandroid/graphics/SurfaceTexture;Z)V",
(void*) android_view_HardwareLayer_setSurfaceTexture },
{ "nUpdateSurfaceTexture", "(J)V", (void*) android_view_HardwareLayer_updateSurfaceTexture },
{ "nGetTexName", "(J)I", (void*) android_view_HardwareLayer_getTexName },
};
int register_android_view_HardwareLayer(JNIEnv* env) {
return RegisterMethodsOrDie(env, kClassPathName, gMethods, NELEM(gMethods));
}
};
| Ant-Droid/android_frameworks_base_OLD | core/jni/android_view_HardwareLayer.cpp | C++ | apache-2.0 | 4,154 |
import { Type } from 'angular2/src/facade/lang';
import { CanActivate } from './lifecycle_annotations_impl';
import { reflector } from 'angular2/src/core/reflection/reflection';
export function hasLifecycleHook(e, type) {
if (!(type instanceof Type))
return false;
return e.name in type.prototype;
}
export function getCanActivateHook(type) {
var annotations = reflector.annotations(type);
for (let i = 0; i < annotations.length; i += 1) {
let annotation = annotations[i];
if (annotation instanceof CanActivate) {
return annotation.fn;
}
}
return null;
}
//# sourceMappingURL=route_lifecycle_reflector.js.map
| aayushkapoor206/whatshot | node_modules/angular2/es6/prod/src/router/route_lifecycle_reflector.js | JavaScript | apache-2.0 | 677 |
/**
* Copyright (c) 2016-present, RxJava Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package io.reactivex.internal.operators.flowable;
import java.util.concurrent.atomic.AtomicReference;
import org.reactivestreams.*;
import io.reactivex.*;
import io.reactivex.annotations.Nullable;
import io.reactivex.disposables.*;
import io.reactivex.exceptions.Exceptions;
import io.reactivex.functions.Function;
import io.reactivex.internal.disposables.DisposableHelper;
import io.reactivex.internal.functions.ObjectHelper;
import io.reactivex.internal.subscriptions.*;
import io.reactivex.internal.util.AtomicThrowable;
import io.reactivex.plugins.RxJavaPlugins;
/**
* Maps a sequence of values into CompletableSources and awaits their termination.
* @param <T> the value type
*/
public final class FlowableFlatMapCompletable<T> extends AbstractFlowableWithUpstream<T, T> {
final Function<? super T, ? extends CompletableSource> mapper;
final int maxConcurrency;
final boolean delayErrors;
public FlowableFlatMapCompletable(Flowable<T> source,
Function<? super T, ? extends CompletableSource> mapper, boolean delayErrors,
int maxConcurrency) {
super(source);
this.mapper = mapper;
this.delayErrors = delayErrors;
this.maxConcurrency = maxConcurrency;
}
@Override
protected void subscribeActual(Subscriber<? super T> subscriber) {
source.subscribe(new FlatMapCompletableMainSubscriber<T>(subscriber, mapper, delayErrors, maxConcurrency));
}
static final class FlatMapCompletableMainSubscriber<T> extends BasicIntQueueSubscription<T>
implements FlowableSubscriber<T> {
private static final long serialVersionUID = 8443155186132538303L;
final Subscriber<? super T> downstream;
final AtomicThrowable errors;
final Function<? super T, ? extends CompletableSource> mapper;
final boolean delayErrors;
final CompositeDisposable set;
final int maxConcurrency;
Subscription upstream;
volatile boolean cancelled;
FlatMapCompletableMainSubscriber(Subscriber<? super T> subscriber,
Function<? super T, ? extends CompletableSource> mapper, boolean delayErrors,
int maxConcurrency) {
this.downstream = subscriber;
this.mapper = mapper;
this.delayErrors = delayErrors;
this.errors = new AtomicThrowable();
this.set = new CompositeDisposable();
this.maxConcurrency = maxConcurrency;
this.lazySet(1);
}
@Override
public void onSubscribe(Subscription s) {
if (SubscriptionHelper.validate(this.upstream, s)) {
this.upstream = s;
downstream.onSubscribe(this);
int m = maxConcurrency;
if (m == Integer.MAX_VALUE) {
s.request(Long.MAX_VALUE);
} else {
s.request(m);
}
}
}
@Override
public void onNext(T value) {
CompletableSource cs;
try {
cs = ObjectHelper.requireNonNull(mapper.apply(value), "The mapper returned a null CompletableSource");
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
upstream.cancel();
onError(ex);
return;
}
getAndIncrement();
InnerConsumer inner = new InnerConsumer();
if (!cancelled && set.add(inner)) {
cs.subscribe(inner);
}
}
@Override
public void onError(Throwable e) {
if (errors.addThrowable(e)) {
if (delayErrors) {
if (decrementAndGet() == 0) {
Throwable ex = errors.terminate();
downstream.onError(ex);
} else {
if (maxConcurrency != Integer.MAX_VALUE) {
upstream.request(1);
}
}
} else {
cancel();
if (getAndSet(0) > 0) {
Throwable ex = errors.terminate();
downstream.onError(ex);
}
}
} else {
RxJavaPlugins.onError(e);
}
}
@Override
public void onComplete() {
if (decrementAndGet() == 0) {
Throwable ex = errors.terminate();
if (ex != null) {
downstream.onError(ex);
} else {
downstream.onComplete();
}
} else {
if (maxConcurrency != Integer.MAX_VALUE) {
upstream.request(1);
}
}
}
@Override
public void cancel() {
cancelled = true;
upstream.cancel();
set.dispose();
}
@Override
public void request(long n) {
// ignored, no values emitted
}
@Nullable
@Override
public T poll() throws Exception {
return null; // always empty
}
@Override
public boolean isEmpty() {
return true; // always empty
}
@Override
public void clear() {
// nothing to clear
}
@Override
public int requestFusion(int mode) {
return mode & ASYNC;
}
void innerComplete(InnerConsumer inner) {
set.delete(inner);
onComplete();
}
void innerError(InnerConsumer inner, Throwable e) {
set.delete(inner);
onError(e);
}
final class InnerConsumer extends AtomicReference<Disposable> implements CompletableObserver, Disposable {
private static final long serialVersionUID = 8606673141535671828L;
@Override
public void onSubscribe(Disposable d) {
DisposableHelper.setOnce(this, d);
}
@Override
public void onComplete() {
innerComplete(this);
}
@Override
public void onError(Throwable e) {
innerError(this, e);
}
@Override
public void dispose() {
DisposableHelper.dispose(this);
}
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(get());
}
}
}
}
| NiteshKant/RxJava | src/main/java/io/reactivex/internal/operators/flowable/FlowableFlatMapCompletable.java | Java | apache-2.0 | 7,266 |
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.dmn.core.compiler;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import org.kie.dmn.api.core.DMNType;
import org.kie.dmn.api.core.ast.BusinessKnowledgeModelNode;
import org.kie.dmn.api.core.ast.DMNNode;
import org.kie.dmn.api.core.ast.DecisionNode;
import org.kie.dmn.api.core.ast.DecisionServiceNode;
import org.kie.dmn.api.core.ast.InputDataNode;
import org.kie.dmn.core.api.DMNExpressionEvaluator;
import org.kie.dmn.core.ast.DecisionNodeImpl;
import org.kie.dmn.core.impl.CompositeTypeImpl;
import org.kie.dmn.core.impl.DMNModelImpl;
import org.kie.dmn.core.util.Msg;
import org.kie.dmn.model.api.DRGElement;
import org.kie.dmn.model.api.Decision;
public class DecisionCompiler implements DRGElementCompiler {
@Override
public boolean accept(DRGElement de) {
return de instanceof Decision;
}
@Override
public void compileNode(DRGElement de, DMNCompilerImpl compiler, DMNModelImpl model) {
Decision decision = (Decision) de;
DecisionNodeImpl dn = new DecisionNodeImpl( decision );
DMNType type = null;
if ( decision.getVariable() == null ) {
DMNCompilerHelper.reportMissingVariable( model, de, decision, Msg.MISSING_VARIABLE_FOR_DECISION );
return;
}
DMNCompilerHelper.checkVariableName( model, decision, decision.getName() );
if ( decision.getVariable() != null && decision.getVariable().getTypeRef() != null ) {
type = compiler.resolveTypeRef(model, decision, decision.getVariable(), decision.getVariable().getTypeRef());
} else {
type = compiler.resolveTypeRef(model, decision, decision, null);
}
dn.setResultType( type );
model.addDecision( dn );
}
@Override
public boolean accept(DMNNode node) {
return node instanceof DecisionNodeImpl;
}
@Override
public void compileEvaluator(DMNNode node, DMNCompilerImpl compiler, DMNCompilerContext ctx, DMNModelImpl model) {
DecisionNodeImpl di = (DecisionNodeImpl) node;
compiler.linkRequirements( model, di );
ctx.enterFrame();
try {
Map<String, DMNType> importedTypes = new HashMap<>();
for( DMNNode dep : di.getDependencies().values() ) {
if( dep instanceof DecisionNode ) {
if (dep.getModelNamespace().equals(model.getNamespace())) {
ctx.setVariable(dep.getName(), ((DecisionNode) dep).getResultType());
} else {
// then the Decision dependency is an imported Decision.
Optional<String> alias = model.getImportAliasFor(dep.getModelNamespace(), dep.getModelName());
if (alias.isPresent()) {
CompositeTypeImpl importedComposite = (CompositeTypeImpl) importedTypes.computeIfAbsent(alias.get(), a -> new CompositeTypeImpl());
importedComposite.addField(dep.getName(), ((DecisionNode) dep).getResultType());
}
}
} else if( dep instanceof InputDataNode ) {
if (dep.getModelNamespace().equals(model.getNamespace())) {
ctx.setVariable(dep.getName(), ((InputDataNode) dep).getType());
} else {
// then the InputData dependency is an imported InputData.
Optional<String> alias = model.getImportAliasFor(dep.getModelNamespace(), dep.getModelName());
if (alias.isPresent()) {
CompositeTypeImpl importedComposite = (CompositeTypeImpl) importedTypes.computeIfAbsent(alias.get(), a -> new CompositeTypeImpl());
importedComposite.addField(dep.getName(), ((InputDataNode) dep).getType());
}
}
} else if( dep instanceof BusinessKnowledgeModelNode ) {
if (dep.getModelNamespace().equals(model.getNamespace())) {
// might need to create a DMNType for "functions" and replace the type here by that
ctx.setVariable(dep.getName(), ((BusinessKnowledgeModelNode) dep).getResultType());
} else {
// then the BKM dependency is an imported BKM.
Optional<String> alias = model.getImportAliasFor(dep.getModelNamespace(), dep.getModelName());
if (alias.isPresent()) {
CompositeTypeImpl importedComposite = (CompositeTypeImpl) importedTypes.computeIfAbsent(alias.get(), a -> new CompositeTypeImpl());
importedComposite.addField(dep.getName(), ((BusinessKnowledgeModelNode) dep).getResultType());
}
}
} else if (dep instanceof DecisionServiceNode) {
if (dep.getModelNamespace().equals(model.getNamespace())) {
// might need to create a DMNType for "functions" and replace the type here by that
ctx.setVariable(dep.getName(), ((DecisionServiceNode) dep).getResultType());
} else {
// then the BKM dependency is an imported BKM.
Optional<String> alias = model.getImportAliasFor(dep.getModelNamespace(), dep.getModelName());
if (alias.isPresent()) {
CompositeTypeImpl importedComposite = (CompositeTypeImpl) importedTypes.computeIfAbsent(alias.get(), a -> new CompositeTypeImpl());
importedComposite.addField(dep.getName(), ((DecisionServiceNode) dep).getResultType());
}
}
}
}
for (Entry<String, DMNType> importedType : importedTypes.entrySet()) {
ctx.setVariable(importedType.getKey(), importedType.getValue());
}
DMNExpressionEvaluator evaluator = compiler.getEvaluatorCompiler().compileExpression( ctx, model, di, di.getName(), di.getDecision().getExpression() );
di.setEvaluator( evaluator );
} finally {
ctx.exitFrame();
}
}
} | romartin/drools | kie-dmn/kie-dmn-core/src/main/java/org/kie/dmn/core/compiler/DecisionCompiler.java | Java | apache-2.0 | 6,980 |
var msg = require('./locale');
var api = require('./apiJavascript.js');
var paramLists = require('./paramLists.js');
module.exports.blocks = [
{func: 'setDroid', parent: api, category: '', params: ['"R2-D2"'], dropdown: { 0: ['"random"', '"R2-D2"', '"C-3PO"'] } },
{func: 'setDroidSpeed', parent: api, category: '', params: ['"fast"'], dropdown: { 0: ['"random"', '"slow"', '"normal"', '"fast"'] } },
{func: 'setBackground', parent: api, category: '', params: ['"Hoth"'], dropdown: { 0: ['"random"', '"Endor"', '"Hoth"', '"Starship"'] } },
{func: 'setMap', parent: api, category: '', params: ['"blank"'], dropdown: { 0: ['"random"', '"blank"', '"circle"', '"horizontal"', '"grid"', '"blobs"'] } },
{func: 'moveRight', parent: api, category: '', },
{func: 'moveLeft', parent: api, category: '', },
{func: 'moveUp', parent: api, category: '', },
{func: 'moveDown', parent: api, category: '', },
{func: 'goRight', parent: api, category: '', },
{func: 'goLeft', parent: api, category: '', },
{func: 'goUp', parent: api, category: '', },
{func: 'goDown', parent: api, category: '', },
{func: 'playSound', parent: api, category: '', params: ['"R2-D2sound1"'], dropdown: { 0: paramLists.playSoundDropdown } },
{func: 'endGame', parent: api, category: '', params: ['"win"'], dropdown: { 0: ['"win"', '"lose"'] } },
{func: 'addPoints', parent: api, category: '', params: ["100"] },
{func: 'removePoints', parent: api, category: '', params: ["100"] },
{func: 'addCharacter', parent: api, category: '', params: ['"PufferPig"'], dropdown: { 0: ['"random"', '"Stormtrooper"', '"RebelPilot"', '"PufferPig"', '"Mynock"', '"MouseDroid"', '"Tauntaun"', '"Probot"'] } },
{func: 'moveFast', parent: api, category: '', params: ['"PufferPig"'], dropdown: { 0: ['"random"', '"Stormtrooper"', '"RebelPilot"', '"PufferPig"', '"Mynock"', '"MouseDroid"', '"Tauntaun"', '"Probot"'] } },
{func: 'moveNormal', parent: api, category: '', params: ['"PufferPig"'], dropdown: { 0: ['"random"', '"Stormtrooper"', '"RebelPilot"', '"PufferPig"', '"Mynock"', '"MouseDroid"', '"Tauntaun"', '"Probot"'] } },
{func: 'moveSlow', parent: api, category: '', params: ['"PufferPig"'], dropdown: { 0: ['"random"', '"Stormtrooper"', '"RebelPilot"', '"PufferPig"', '"Mynock"', '"MouseDroid"', '"Tauntaun"', '"Probot"'] } },
{func: 'whenLeft', block: 'function whenLeft() {}', expansion: 'function whenLeft() {\n __;\n}', category: '' },
{func: 'whenRight', block: 'function whenRight() {}', expansion: 'function whenRight() {\n __;\n}', category: '' },
{func: 'whenUp', block: 'function whenUp() {}', expansion: 'function whenUp() {\n __;\n}', category: '' },
{func: 'whenDown', block: 'function whenDown() {}', expansion: 'function whenDown() {\n __;\n}', category: '' },
{func: 'whenTouchObstacle', block: 'function whenTouchObstacle() {}', expansion: 'function whenTouchObstacle() {\n __;\n}', category: '' },
{func: 'whenGetCharacter', block: 'function whenGetCharacter() {}', expansion: 'function whenGetCharacter() {\n __;\n}', category: '' },
{func: 'whenGetStormtrooper', block: 'function whenGetStormtrooper() {}', expansion: 'function whenGetStormtrooper() {\n __;\n}', category: '' },
{func: 'whenGetRebelPilot', block: 'function whenGetRebelPilot() {}', expansion: 'function whenGetRebelPilot() {\n __;\n}', category: '' },
{func: 'whenGetPufferPig', block: 'function whenGetPufferPig() {}', expansion: 'function whenGetPufferPig() {\n __;\n}', category: '' },
{func: 'whenGetMynock', block: 'function whenGetMynock() {}', expansion: 'function whenGetMynock() {\n __;\n}', category: '' },
{func: 'whenGetMouseDroid', block: 'function whenGetMouseDroid() {}', expansion: 'function whenGetMouseDroid() {\n __;\n}', category: '' },
{func: 'whenGetTauntaun', block: 'function whenGetTauntaun() {}', expansion: 'function whenGetTauntaun() {\n __;\n}', category: '' },
{func: 'whenGetProbot', block: 'function whenGetProbot() {}', expansion: 'function whenGetProbot() {\n __;\n}', category: '' },
{func: 'whenGetAllCharacters', block: 'function whenGetAllCharacters() {}', expansion: 'function whenGetAllCharacters() {\n __;\n}', category: '' },
{func: 'whenGetAllStormtroopers', block: 'function whenGetAllStormtroopers() {}', expansion: 'function whenGetAllStormtroopers() {\n __;\n}', category: '' },
{func: 'whenGetAllRebelPilots', block: 'function whenGetAllRebelPilots() {}', expansion: 'function whenGetAllRebelPilots() {\n __;\n}', category: '' },
{func: 'whenGetAllPufferPigs', block: 'function whenGetAllPufferPigs() {}', expansion: 'function whenGetAllPufferPigs() {\n __;\n}', category: '' },
{func: 'whenGetAllMynocks', block: 'function whenGetAllMynocks() {}', expansion: 'function whenGetAllMynocks() {\n __;\n}', category: '' },
{func: 'whenGetAllMouseDroids', block: 'function whenGetAllMouseDroids() {}', expansion: 'function whenGetAllMouseDroids() {\n __;\n}', category: '' },
{func: 'whenGetAllTauntauns', block: 'function whenGetAllTauntauns() {}', expansion: 'function whenGetAllTauntauns() {\n __;\n}', category: '' },
{func: 'whenGetAllProbots', block: 'function whenGetAllProbots() {}', expansion: 'function whenGetAllProbots() {\n __;\n}', category: '' },
// Functions hidden from autocomplete - not used in hoc2015:
{func: 'whenTouchStormtrooper', block: 'function whenTouchStormtrooper() {}', expansion: 'function whenTouchStormtrooper() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'whenTouchRebelPilot', block: 'function whenTouchRebelPilot() {}', expansion: 'function whenTouchRebelPilot() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'whenTouchPufferPig', block: 'function whenTouchPufferPig() {}', expansion: 'function whenTouchPufferPig() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'whenTouchMynock', block: 'function whenTouchMynock() {}', expansion: 'function whenTouchMynock() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'whenTouchMouseDroid', block: 'function whenTouchMouseDroid() {}', expansion: 'function whenTouchMouseDroid() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'whenTouchTauntaun', block: 'function whenTouchTauntaun() {}', expansion: 'function whenTouchTauntaun() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'whenTouchProbot', block: 'function whenTouchProbot() {}', expansion: 'function whenTouchProbot() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'whenTouchCharacter', block: 'function whenTouchCharacter() {}', expansion: 'function whenTouchCharacter() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'changeScore', parent: api, category: '', params: ["1"], noAutocomplete: true },
{func: 'whenTouchGoal', block: 'function whenTouchGoal() {}', expansion: 'function whenTouchGoal() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'whenTouchAllGoals', block: 'function whenTouchAllGoals() {}', expansion: 'function whenTouchAllGoals() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'whenScore1000', block: 'function whenScore1000() {}', expansion: 'function whenScore1000() {\n __;\n}', category: '', noAutocomplete: true },
{func: 'setToChase', parent: api, category: '', params: ['"PufferPig"'], dropdown: { 0: ['"random"', '"Stormtrooper"', '"RebelPilot"', '"PufferPig"', '"Mynock"', '"MouseDroid"', '"Tauntaun"', '"Probot"'] }, noAutocomplete: true },
{func: 'setToFlee', parent: api, category: '', params: ['"PufferPig"'], dropdown: { 0: ['"random"', '"Stormtrooper"', '"RebelPilot"', '"PufferPig"', '"Mynock"', '"MouseDroid"', '"Tauntaun"', '"Probot"'] }, noAutocomplete: true },
{func: 'setToRoam', parent: api, category: '', params: ['"PufferPig"'], dropdown: { 0: ['"random"', '"Stormtrooper"', '"RebelPilot"', '"PufferPig"', '"Mynock"', '"MouseDroid"', '"Tauntaun"', '"Probot"'] }, noAutocomplete: true },
{func: 'setToStop', parent: api, category: '', params: ['"PufferPig"'], dropdown: { 0: ['"random"', '"Stormtrooper"', '"RebelPilot"', '"PufferPig"', '"Mynock"', '"MouseDroid"', '"Tauntaun"', '"Probot"'] }, noAutocomplete: true },
{func: 'setSprite', parent: api, category: '', params: ['0', '"R2-D2"'], dropdown: { 1: ['"random"', '"R2-D2"', '"C-3PO"'] }, noAutocomplete: true },
{func: 'setSpritePosition', parent: api, category: '', params: ["0", "7"], noAutocomplete: true },
{func: 'setSpriteSpeed', parent: api, category: '', params: ["0", "8"], noAutocomplete: true },
{func: 'setSpriteEmotion', parent: api, category: '', params: ["0", "1"], noAutocomplete: true },
{func: 'setSpriteSize', parent: api, category: '', params: ["0", "1.0"], noAutocomplete: true },
{func: 'throwProjectile', parent: api, category: '', params: ["0", "1", '"blue_fireball"'], noAutocomplete: true },
{func: 'vanish', parent: api, category: '', params: ["0"], noAutocomplete: true },
{func: 'move', parent: api, category: '', params: ["0", "1"], noAutocomplete: true },
{func: 'showDebugInfo', parent: api, category: '', params: ["false"], noAutocomplete: true },
{func: 'onEvent', parent: api, category: '', params: ["'when-left'", "function() {\n \n}"], noAutocomplete: true },
];
module.exports.categories = {
'': {
color: 'red',
blocks: []
},
'Play Lab': {
color: 'red',
blocks: []
},
Commands: {
color: 'red',
blocks: []
},
Events: {
color: 'green',
blocks: []
},
};
module.exports.autocompleteFunctionsWithParens = true;
module.exports.showParamDropdowns = true;
| pickettd/code-dot-org | apps/src/studio/dropletConfig.js | JavaScript | apache-2.0 | 9,539 |
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
namespace Google\Service\Classroom;
class CloudPubsubTopic extends \Google\Model
{
/**
* @var string
*/
public $topicName;
/**
* @param string
*/
public function setTopicName($topicName)
{
$this->topicName = $topicName;
}
/**
* @return string
*/
public function getTopicName()
{
return $this->topicName;
}
}
// Adding a class alias for backwards compatibility with the previous class name.
class_alias(CloudPubsubTopic::class, 'Google_Service_Classroom_CloudPubsubTopic');
| googleapis/google-api-php-client-services | src/Classroom/CloudPubsubTopic.php | PHP | apache-2.0 | 1,120 |
/**
* libjass
*
* https://github.com/Arnavion/libjass
*
* Copyright 2013 Arnav Singh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
define(["intern!tdd", "require", "tests/support/test-page"], function (tdd, require, TestPage) {
tdd.suite("Outlines", function () {
tdd.test("Basic", function () {
var testPage = new TestPage(this.remote, require.toUrl("tests/support/browser-test-page.html"), "/tests/functional/outlines/outlines.ass", 1280, 720);
return testPage
.prepare()
.then(function (testPage) { return testPage.seekAndCompareScreenshot(0.5, require.toUrl("./outlines-1.png")); })
.then(function (testPage) { return testPage.seekAndCompareScreenshot(1.5, require.toUrl("./outlines-2.png")); })
.then(function (testPage) { return testPage.seekAndCompareScreenshot(2.5, require.toUrl("./outlines-3.png")); })
.then(function (testPage) { return testPage.seekAndCompareScreenshot(3.5, require.toUrl("./outlines-4.png")); })
.then(function (testPage) { return testPage.done(); });
});
});
});
| joshuabrown-ellation/libjass | tests/functional/outlines/outlines.js | JavaScript | apache-2.0 | 1,551 |
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
namespace Google\Service\ToolResults;
class NonSdkApiUsageViolation extends \Google\Collection
{
protected $collection_key = 'apiSignatures';
/**
* @var string[]
*/
public $apiSignatures;
/**
* @var int
*/
public $uniqueApis;
/**
* @param string[]
*/
public function setApiSignatures($apiSignatures)
{
$this->apiSignatures = $apiSignatures;
}
/**
* @return string[]
*/
public function getApiSignatures()
{
return $this->apiSignatures;
}
/**
* @param int
*/
public function setUniqueApis($uniqueApis)
{
$this->uniqueApis = $uniqueApis;
}
/**
* @return int
*/
public function getUniqueApis()
{
return $this->uniqueApis;
}
}
// Adding a class alias for backwards compatibility with the previous class name.
class_alias(NonSdkApiUsageViolation::class, 'Google_Service_ToolResults_NonSdkApiUsageViolation');
| googleapis/google-api-php-client-services | src/ToolResults/NonSdkApiUsageViolation.php | PHP | apache-2.0 | 1,498 |
#!/usr/bin/env python
# encoding=utf-8
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pexpect
import pytest
import shlex
import shutil
import socket
import signal
from impala_shell_results import get_shell_cmd_result, cancellation_helper
from subprocess import Popen, PIPE
from tests.common.impala_service import ImpaladService
from tests.verifiers.metric_verifier import MetricVerifier
from time import sleep
SHELL_CMD = "%s/bin/impala-shell.sh" % os.environ['IMPALA_HOME']
SHELL_HISTORY_FILE = os.path.expanduser("~/.impalahistory")
TMP_HISTORY_FILE = os.path.expanduser("~/.impalahistorytmp")
class TestImpalaShellInteractive(object):
"""Test the impala shell interactively"""
def _send_cmd_to_shell(self, p, cmd):
"""Given an open shell process, write a cmd to stdin
This method takes care of adding the delimiter and EOL, callers should send the raw
command.
"""
p.stdin.write("%s;\n" % cmd)
p.stdin.flush()
def _start_new_shell_process(self, args=None):
"""Starts a shell process and returns the process handle"""
cmd = "%s %s" % (SHELL_CMD, args) if args else SHELL_CMD
return Popen(shlex.split(SHELL_CMD), shell=True, stdout=PIPE,
stdin=PIPE, stderr=PIPE)
@classmethod
def setup_class(cls):
if os.path.exists(SHELL_HISTORY_FILE):
shutil.move(SHELL_HISTORY_FILE, TMP_HISTORY_FILE)
@classmethod
def teardown_class(cls):
if os.path.exists(TMP_HISTORY_FILE): shutil.move(TMP_HISTORY_FILE, SHELL_HISTORY_FILE)
@pytest.mark.execute_serially
def test_escaped_quotes(self):
"""Test escaping quotes"""
# test escaped quotes outside of quotes
result = run_impala_shell_interactive("select \\'bc';")
assert "could not match input" in result.stderr
result = run_impala_shell_interactive("select \\\"bc\";")
assert "could not match input" in result.stderr
# test escaped quotes within quotes
result = run_impala_shell_interactive("select 'ab\\'c';")
assert "Fetched 1 row(s)" in result.stderr
result = run_impala_shell_interactive("select \"ab\\\"c\";")
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_cancellation(self):
impalad = ImpaladService(socket.getfqdn())
impalad.wait_for_num_in_flight_queries(0)
command = "select sleep(10000);"
p = self._start_new_shell_process()
self._send_cmd_to_shell(p, command)
sleep(1)
# iterate through all processes with psutil
shell_pid = cancellation_helper()
sleep(2)
os.kill(shell_pid, signal.SIGINT)
result = get_shell_cmd_result(p)
assert impalad.wait_for_num_in_flight_queries(0)
@pytest.mark.execute_serially
def test_unicode_input(self):
"Test queries containing non-ascii input"
# test a unicode query spanning multiple lines
unicode_text = u'\ufffd'
args = "select '%s'\n;" % unicode_text.encode('utf-8')
result = run_impala_shell_interactive(args)
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_welcome_string(self):
"""Test that the shell's welcome message is only printed once
when the shell is started. Ensure it is not reprinted on errors.
Regression test for IMPALA-1153
"""
result = run_impala_shell_interactive('asdf;')
assert result.stdout.count("Welcome to the Impala shell") == 1
result = run_impala_shell_interactive('select * from non_existent_table;')
assert result.stdout.count("Welcome to the Impala shell") == 1
@pytest.mark.execute_serially
def test_bash_cmd_timing(self):
"""Test existence of time output in bash commands run from shell"""
args = "! ls;"
result = run_impala_shell_interactive(args)
assert "Executed in" in result.stderr
@pytest.mark.execute_serially
def test_reconnect(self):
"""Regression Test for IMPALA-1235
Verifies that a connect command by the user is honoured.
"""
def get_num_open_sessions(impala_service):
"""Helper method to retrieve the number of open sessions"""
return impala_service.get_metric_value('impala-server.num-open-beeswax-sessions')
hostname = socket.getfqdn()
initial_impala_service = ImpaladService(hostname)
target_impala_service = ImpaladService(hostname, webserver_port=25001,
beeswax_port=21001, be_port=22001)
# Get the initial state for the number of sessions.
num_sessions_initial = get_num_open_sessions(initial_impala_service)
num_sessions_target = get_num_open_sessions(target_impala_service)
# Connect to localhost:21000 (default)
p = self._start_new_shell_process()
sleep(2)
# Make sure we're connected <hostname>:21000
assert get_num_open_sessions(initial_impala_service) == num_sessions_initial + 1, \
"Not connected to %s:21000" % hostname
self._send_cmd_to_shell(p, "connect %s:21001" % hostname)
# Wait for a little while
sleep(2)
# The number of sessions on the target impalad should have been incremented.
assert get_num_open_sessions(target_impala_service) == num_sessions_target + 1, \
"Not connected to %s:21001" % hostname
# The number of sessions on the initial impalad should have been decremented.
assert get_num_open_sessions(initial_impala_service) == num_sessions_initial, \
"Connection to %s:21000 should have been closed" % hostname
@pytest.mark.execute_serially
def test_ddl_queries_are_closed(self):
"""Regression test for IMPALA-1317
The shell does not call close() for alter, use and drop queries, leaving them in
flight. This test issues those queries in interactive mode, and checks the debug
webpage to confirm that they've been closed.
TODO: Add every statement type.
"""
TMP_DB = 'inflight_test_db'
TMP_TBL = 'tmp_tbl'
MSG = '%s query should be closed'
NUM_QUERIES = 'impala-server.num-queries'
impalad = ImpaladService(socket.getfqdn())
p = self._start_new_shell_process()
try:
start_num_queries = impalad.get_metric_value(NUM_QUERIES)
self._send_cmd_to_shell(p, 'create database if not exists %s' % TMP_DB)
self._send_cmd_to_shell(p, 'use %s' % TMP_DB)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 2)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'use'
self._send_cmd_to_shell(p, 'create table %s(i int)' % TMP_TBL)
self._send_cmd_to_shell(p, 'alter table %s add columns (j int)' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 4)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'alter'
self._send_cmd_to_shell(p, 'drop table %s' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 5)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'drop'
finally:
run_impala_shell_interactive("drop table if exists %s.%s;" % (TMP_DB, TMP_TBL))
run_impala_shell_interactive("drop database if exists foo;")
@pytest.mark.execute_serially
def test_multiline_queries_in_history(self):
"""Test to ensure that multiline queries with comments are preserved in history
Ensure that multiline queries are preserved when they're read back from history.
Additionally, also test that comments are preserved.
"""
# regex for pexpect, a shell prompt is expected after each command..
prompt_regex = '.*%s:2100.*' % socket.getfqdn()
# readline gets its input from tty, so using stdin does not work.
child_proc = pexpect.spawn(SHELL_CMD)
queries = ["select\n1--comment;",
"select /*comment*/\n1;",
"select\n/*comm\nent*/\n1;"]
for query in queries:
child_proc.expect(prompt_regex)
child_proc.sendline(query)
child_proc.expect(prompt_regex)
child_proc.sendline('quit;')
p = self._start_new_shell_process()
self._send_cmd_to_shell(p, 'history')
result = get_shell_cmd_result(p)
for query in queries:
assert query in result.stderr, "'%s' not in '%s'" % (query, result.stderr)
def run_impala_shell_interactive(command, shell_args=''):
"""Runs a command in the Impala shell interactively."""
cmd = "%s %s" % (SHELL_CMD, shell_args)
# workaround to make Popen environment 'utf-8' compatible
# since piping defaults to ascii
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
p = Popen(shlex.split(cmd), shell=True, stdout=PIPE,
stdin=PIPE, stderr=PIPE, env=my_env)
p.stdin.write(command + "\n")
p.stdin.flush()
return get_shell_cmd_result(p)
| andybab/Impala | tests/shell/test_shell_interactive.py | Python | apache-2.0 | 9,064 |
/*
* Copyright (c) 2001-2007, TIBCO Software Inc.
* Use, modification, and distribution subject to terms of license.
*/
jsx3.require("jsx3.chart.Axis");jsx3.Class.defineClass("jsx3.chart.CategoryAxis",jsx3.chart.Axis,null,function(c,p){var
ub={d:"h6",a:"aligned",c:"av",f:"gn",b:"between",e:"tickAlignment"};c.TICKS_ALIGNED=ub.a;c.TICKS_BETWEEN=ub.b;c.MAX_TICKS=200;c.BG={aligned:1,between:1};p.init=function(i,r,q){this.jsxsuper(i,r,q);this.tickAlignment=ub.b;this.categoryField=null;this.paddingLow=null;this.paddingHigh=null;this.Ho(ub.c,0);this.Ho(ub.d,0);};p.getTickAlignment=function(){return this.tickAlignment;};p.setTickAlignment=function(l){if(c.BG[l]){this.tickAlignment=l;}else throw new
jsx3.IllegalArgumentException(ub.e,l);};p.getCategoryField=function(){return this.categoryField;};p.setCategoryField=function(m){this.categoryField=m;};p.getPaddingLow=function(){return this.paddingLow!=null?this.paddingLow:0;};p.setPaddingLow=function(h){this.paddingLow=h;};p.getPaddingHigh=function(){return this.paddingHigh!=null?this.paddingHigh:0;};p.setPaddingHigh=function(r){this.paddingHigh=r;};p.fl=function(){this.Ll(ub.f);var
da=this.getChart();if(da==null){this.Ho(ub.c,0);this.Ho(ub.d,0);}else{var
tb=da.pe(this,true);var
ib=da.bh();this.Ho(ub.d,tb.length);this.Ho(ub.c,ib!=null?ib.length:0);}};p.Hf=function(){var
nb=this.pj(ub.f);if(nb!=null)return nb;var
B=this.pj(ub.c);nb=[];if(B<1)return nb;var
ga=this.getPaddingLow();var
Va=this.getPaddingHigh();var
x=this.tickAlignment==ub.b?B+1:B;var
La=x-1;var
fb=La+ga+Va;var
pb=this.length/fb;var
C=ga*pb;for(var
Qa=0;Qa<x&&Qa<c.MAX_TICKS;Qa++)nb.push(Math.round(C+Qa*pb));this.Ho(ub.f,nb);return nb;};p.se=function(){var
B=this.pj(ub.c);if(this.tickAlignment==ub.b){var
Ab=this.Hf();var
Xa=[];for(var
va=0;va<B;va++)Xa[va]=Math.round((Ab[va]+Ab[va+1])/2);return Xa;}else return this.Hf();};p.Xj=function(b){var
Pa=b;var
z=this.getChart();if(this.categoryField&&z!=null){var
ab=z.bh();if(ab!=null){var
ga=ab[b];if(ga!=null)Pa=ga.getAttribute([this.categoryField]);}}return Pa;};p.mo=function(){return false;};p.getRangeForCategory=function(j){var
_=this.Hf();if(this.tickAlignment==ub.b){if(j<0||j>=_.length-1)return null;else return [_[j],_[j+1]];}else{if(j<0||j>=_.length||_.length<2)return null;var
Xa=j==0?_[1]-_[0]:_[j]-_[j-1];return [Math.round(_[j]-Xa/2),Math.round(_[j]+Xa/2)];}};p.getPointForCategory=function(n){var
Aa=this.Hf();if(this.tickAlignment==ub.b){if(n<0||n>=Aa.length-1)return null;else return Math.round((Aa[n]+Aa[n+1])/2);}else return Aa[n];};c.getVersion=function(){return jsx3.chart.si;};});
| burris/dwr | ui/gi/demo/web/gi/JSX/addins/charting/classes/jsx3/chart/CategoryAxis.js | JavaScript | apache-2.0 | 2,582 |
/*
* Copyright 2012-2013 eBay Software Foundation and ios-driver committers
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.uiautomation.ios;
import com.google.common.collect.ImmutableList;
import org.libimobiledevice.ios.driver.binding.exceptions.SDKException;
import org.libimobiledevice.ios.driver.binding.model.ApplicationInfo;
import org.libimobiledevice.ios.driver.binding.model.DeviceInfo;
import org.libimobiledevice.ios.driver.binding.services.DeviceCallBack;
import org.libimobiledevice.ios.driver.binding.services.DeviceService;
import org.libimobiledevice.ios.driver.binding.services.IOSDevice;
import org.libimobiledevice.ios.driver.binding.services.ImageMountingService;
import org.libimobiledevice.ios.driver.binding.services.InformationService;
import org.libimobiledevice.ios.driver.binding.services.InstallerService;
import org.openqa.selenium.WebDriverException;
import org.uiautomation.ios.application.IPAShellApplication;
import org.uiautomation.ios.utils.DDILocator;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.logging.Logger;
public class DeviceStore extends DeviceCallBack {
private static final Logger log = Logger.getLogger(DeviceStore.class.getName());
private final List<RealDevice> reals = new CopyOnWriteArrayList<RealDevice>();
private final List<SimulatorDevice> sims = new CopyOnWriteArrayList<SimulatorDevice>();
private final ApplicationStore apps;
private final Set<String> uuidWhitelist;
public DeviceStore(ApplicationStore apps, Set<String> uuidWhitelist) {
super();
this.apps = apps;
this.uuidWhitelist = uuidWhitelist;
}
/**
* @return immutable copy of the currently available devices.
*/
public List<Device> getDevices() {
List<Device> all = new ArrayList<Device>();
all.addAll(reals);
all.addAll(sims);
return ImmutableList.copyOf(all);
}
public List<RealDevice> getRealDevices() {
return reals;
}
public List<SimulatorDevice> getSimulatorDevices() {
return sims;
}
public void add(SimulatorDevice simulatorDevice) {
sims.add(simulatorDevice);
}
@Override
protected void onDeviceAdded(String uuid) {
if (!uuidWhitelist.isEmpty() && !uuidWhitelist.contains(uuid)) {
log.info("device detected but not whitelisted");
return;
}
RealDevice d = null;
try {
IOSDevice device = DeviceService.get(uuid);
DeviceInfo info = new DeviceInfo(uuid);
d = new RealDevice(info);
log.info("new device detected (" + uuid + ") " + info.getDeviceName());
reals.add(d);
InstallerService s = new InstallerService(device);
String id = "com.apple.mobilesafari";
ApplicationInfo safari = s.getApplication(id);
String v = (String) safari.getProperty("CFBundleVersion");
log.info("device " + info.getDeviceName() + " = safari " + v);
IPAShellApplication ipa = new IPAShellApplication(id, v, safari);
apps.add(ipa);
InformationService i = new InformationService(device);
if (!i.isDevModeEnabled()) {
log.warning(
"The device " + uuid + " is not set to dev mode. It can't be used for testing.");
File ddi = DDILocator.locateDDI(device);
mount(device, ddi);
log.info("DDI mounted.Device now in dev mode.");
}
} catch (SDKException | WebDriverException e) {
if (d != null) {
reals.remove(d);
}
}
}
private void mount(IOSDevice device, File ddi) throws SDKException {
ImageMountingService service = null;
try {
service = new ImageMountingService(device);
service.mount(ddi);
} finally {
if (service != null) {
service.free();
}
}
}
@Override
protected void onDeviceRemoved(String uuid) {
if (!uuidWhitelist.isEmpty() && !uuidWhitelist.contains(uuid)) {
log.info("device removed but not whitelisted");
return;
}
for (RealDevice d : reals) {
if (d.getUuid().equals(uuid)) {
log.info("Removing " + uuid + " for the devices pool");
boolean ok = reals.remove(d);
if (!ok) {
log.warning("device " + uuid + " has been unplugged, but was never there ?");
}
}
}
}
}
| darraghgrace/ios-driver | server/src/main/java/org/uiautomation/ios/DeviceStore.java | Java | apache-2.0 | 4,836 |
/*
* Kendo UI Web v2014.1.318 (http://kendoui.com)
* Copyright 2014 Telerik AD. All rights reserved.
*
* Kendo UI Web commercial licenses may be obtained at
* http://www.telerik.com/purchase/license-agreement/kendo-ui-web
* If you do not own a commercial license, this file shall be governed by the
* GNU General Public License (GPL) version 3.
* For GPL requirements, please review: http://www.gnu.org/copyleft/gpl.html
*/
(function(f, define){
define([], f);
})(function(){
(function( window, undefined ) {
var kendo = window.kendo || (window.kendo = { cultures: {} });
kendo.cultures["nn"] = {
name: "nn",
numberFormat: {
pattern: ["-n"],
decimals: 2,
",": " ",
".": ",",
groupSize: [3],
percent: {
pattern: ["-n %","n %"],
decimals: 2,
",": " ",
".": ",",
groupSize: [3],
symbol: "%"
},
currency: {
pattern: ["$ -n","$ n"],
decimals: 2,
",": " ",
".": ",",
groupSize: [3],
symbol: "kr"
}
},
calendars: {
standard: {
days: {
names: ["søndag","måndag","tysdag","onsdag","torsdag","fredag","laurdag"],
namesAbbr: ["sø","må","ty","on","to","fr","la"],
namesShort: ["sø","må","ty","on","to","fr","la"]
},
months: {
names: ["januar","februar","mars","april","mai","juni","juli","august","september","oktober","november","desember",""],
namesAbbr: ["jan","feb","mar","apr","mai","jun","jul","aug","sep","okt","nov","des",""]
},
AM: [""],
PM: [""],
patterns: {
d: "dd.MM.yyyy",
D: "d. MMMM yyyy",
F: "d. MMMM yyyy HH:mm:ss",
g: "dd.MM.yyyy HH:mm",
G: "dd.MM.yyyy HH:mm:ss",
m: "d. MMMM",
M: "d. MMMM",
s: "yyyy'-'MM'-'dd'T'HH':'mm':'ss",
t: "HH:mm",
T: "HH:mm:ss",
u: "yyyy'-'MM'-'dd HH':'mm':'ss'Z'",
y: "MMMM yyyy",
Y: "MMMM yyyy"
},
"/": ".",
":": ":",
firstDay: 1
}
}
}
})(this);
return window.kendo;
}, typeof define == 'function' && define.amd ? define : function(_, f){ f(); }); | facundolucas/eCuentas | src/main/webapp/resources/kendoui/src/js/cultures/kendo.culture.nn.js | JavaScript | apache-2.0 | 2,710 |
using System;
using System.Text;
using System.Threading.Tasks;
using Foundatio.Extensions;
namespace Foundatio.Serializer {
public interface ISerializer {
Task<object> DeserializeAsync(byte[] data, Type objectType);
Task<byte[]> SerializeAsync(object value);
}
public static class SerializerExtensions {
public static Task<object> DeserializeAsync(this ISerializer serializer, string data, Type objectType) {
return serializer.DeserializeAsync(Encoding.UTF8.GetBytes(data ?? String.Empty), objectType);
}
public static async Task<T> DeserializeAsync<T>(this ISerializer serializer, byte[] data) {
return (T)await serializer.DeserializeAsync(data, typeof(T)).AnyContext();
}
public static Task<T> DeserializeAsync<T>(this ISerializer serializer, string data) {
return DeserializeAsync<T>(serializer, Encoding.UTF8.GetBytes(data ?? String.Empty));
}
public static async Task<string> SerializeToStringAsync(this ISerializer serializer, object value) {
if (value == null)
return null;
return Encoding.UTF8.GetString(await serializer.SerializeAsync(value).AnyContext());
}
}
}
| wgraham17/Foundatio | src/Foundatio/Serializer/ISerializer.cs | C# | apache-2.0 | 1,253 |
package ec2
import (
"github.com/crowdmob/goamz/aws"
"time"
)
func Sign(auth aws.Auth, method, path string, params map[string]string, host string) {
sign(auth, method, path, params, host)
}
func fixedTime() time.Time {
return time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)
}
func FakeTime(fakeIt bool) {
if fakeIt {
timeNow = fixedTime
} else {
timeNow = time.Now
}
}
| coreos/aws-auth-proxy | vendor/github.com/crowdmob/goamz/ec2/export_test.go | GO | apache-2.0 | 380 |
\section{File List}
Here is a list of all files with brief descriptions\-:\begin{DoxyCompactList}
\item\contentsline{section}{build/catkin\-\_\-generated/\hyperlink{generate__cached__setup_8py}{generate\-\_\-cached\-\_\-setup.\-py} }{\pageref{generate__cached__setup_8py}}{}
\item\contentsline{section}{build/catkin\-\_\-generated/\hyperlink{order__packages_8py}{order\-\_\-packages.\-py} }{\pageref{order__packages_8py}}{}
\item\contentsline{section}{build/catkin\-\_\-generated/installspace/\hyperlink{build_2catkin__generated_2installspace_2__setup__util_8py}{\-\_\-setup\-\_\-util.\-py} }{\pageref{build_2catkin__generated_2installspace_2__setup__util_8py}}{}
\item\contentsline{section}{build/\-C\-Make\-Files/2.\-8.\-12.\-2/\-Compiler\-Id\-C/\hyperlink{CMakeCCompilerId_8c}{C\-Make\-C\-Compiler\-Id.\-c} }{\pageref{CMakeCCompilerId_8c}}{}
\item\contentsline{section}{build/\-C\-Make\-Files/2.\-8.\-12.\-2/\-Compiler\-Id\-C\-X\-X/\hyperlink{CMakeCXXCompilerId_8cpp}{C\-Make\-C\-X\-X\-Compiler\-Id.\-cpp} }{\pageref{CMakeCXXCompilerId_8cpp}}{}
\item\contentsline{section}{build/nubot\-\_\-common/catkin\-\_\-generated/\hyperlink{nubot__common_2catkin__generated_2pkg_8develspace_8context_8pc_8py}{pkg.\-develspace.\-context.\-pc.\-py} }{\pageref{nubot__common_2catkin__generated_2pkg_8develspace_8context_8pc_8py}}{}
\item\contentsline{section}{build/nubot\-\_\-common/catkin\-\_\-generated/\hyperlink{nubot__common_2catkin__generated_2pkg_8installspace_8context_8pc_8py}{pkg.\-installspace.\-context.\-pc.\-py} }{\pageref{nubot__common_2catkin__generated_2pkg_8installspace_8context_8pc_8py}}{}
\item\contentsline{section}{build/nubot\-\_\-common/cmake/\hyperlink{nubot__common-genmsg-context_8py}{nubot\-\_\-common-\/genmsg-\/context.\-py} }{\pageref{nubot__common-genmsg-context_8py}}{}
\item\contentsline{section}{build/nubot\-\_\-simulation/nubot\-\_\-description/catkin\-\_\-generated/\hyperlink{nubot__simulation_2nubot__description_2catkin__generated_2pkg_8develspace_8context_8pc_8py}{pkg.\-develspace.\-context.\-pc.\-py} }{\pageref{nubot__simulation_2nubot__description_2catkin__generated_2pkg_8develspace_8context_8pc_8py}}{}
\item\contentsline{section}{build/nubot\-\_\-simulation/nubot\-\_\-description/catkin\-\_\-generated/\hyperlink{nubot__simulation_2nubot__description_2catkin__generated_2pkg_8installspace_8context_8pc_8py}{pkg.\-installspace.\-context.\-pc.\-py} }{\pageref{nubot__simulation_2nubot__description_2catkin__generated_2pkg_8installspace_8context_8pc_8py}}{}
\item\contentsline{section}{build/nubot\-\_\-simulation/nubot\-\_\-gazebo/catkin\-\_\-generated/\hyperlink{nubot__simulation_2nubot__gazebo_2catkin__generated_2pkg_8develspace_8context_8pc_8py}{pkg.\-develspace.\-context.\-pc.\-py} }{\pageref{nubot__simulation_2nubot__gazebo_2catkin__generated_2pkg_8develspace_8context_8pc_8py}}{}
\item\contentsline{section}{build/nubot\-\_\-simulation/nubot\-\_\-gazebo/catkin\-\_\-generated/\hyperlink{nubot__simulation_2nubot__gazebo_2catkin__generated_2pkg_8installspace_8context_8pc_8py}{pkg.\-installspace.\-context.\-pc.\-py} }{\pageref{nubot__simulation_2nubot__gazebo_2catkin__generated_2pkg_8installspace_8context_8pc_8py}}{}
\item\contentsline{section}{devel/\hyperlink{devel_2__setup__util_8py}{\-\_\-setup\-\_\-util.\-py} }{\pageref{devel_2__setup__util_8py}}{}
\item\contentsline{section}{devel/include/nubot\-\_\-common/\hyperlink{BallHandle_8h}{Ball\-Handle.\-h} }{\pageref{BallHandle_8h}}{}
\item\contentsline{section}{devel/include/nubot\-\_\-common/\hyperlink{BallHandleRequest_8h}{Ball\-Handle\-Request.\-h} }{\pageref{BallHandleRequest_8h}}{}
\item\contentsline{section}{devel/include/nubot\-\_\-common/\hyperlink{BallHandleResponse_8h}{Ball\-Handle\-Response.\-h} }{\pageref{BallHandleResponse_8h}}{}
\item\contentsline{section}{devel/include/nubot\-\_\-common/\hyperlink{Shoot_8h}{Shoot.\-h} }{\pageref{Shoot_8h}}{}
\item\contentsline{section}{devel/include/nubot\-\_\-common/\hyperlink{ShootRequest_8h}{Shoot\-Request.\-h} }{\pageref{ShootRequest_8h}}{}
\item\contentsline{section}{devel/include/nubot\-\_\-common/\hyperlink{ShootResponse_8h}{Shoot\-Response.\-h} }{\pageref{ShootResponse_8h}}{}
\item\contentsline{section}{devel/include/nubot\-\_\-common/\hyperlink{VelCmd_8h}{Vel\-Cmd.\-h} }{\pageref{VelCmd_8h}}{}
\item\contentsline{section}{devel/include/nubot\-\_\-gazebo/\hyperlink{NubotGazeboConfig_8h}{Nubot\-Gazebo\-Config.\-h} }{\pageref{NubotGazeboConfig_8h}}{}
\item\contentsline{section}{devel/lib/python2.\-7/dist-\/packages/nubot\-\_\-common/\hyperlink{nubot__common_2____init_____8py}{\-\_\-\-\_\-init\-\_\-\-\_\-.\-py} }{\pageref{nubot__common_2____init_____8py}}{}
\item\contentsline{section}{devel/lib/python2.\-7/dist-\/packages/nubot\-\_\-common/msg/\hyperlink{nubot__common_2msg_2____init_____8py}{\-\_\-\-\_\-init\-\_\-\-\_\-.\-py} }{\pageref{nubot__common_2msg_2____init_____8py}}{}
\item\contentsline{section}{devel/lib/python2.\-7/dist-\/packages/nubot\-\_\-common/msg/\hyperlink{__VelCmd_8py}{\-\_\-\-Vel\-Cmd.\-py} }{\pageref{__VelCmd_8py}}{}
\item\contentsline{section}{devel/lib/python2.\-7/dist-\/packages/nubot\-\_\-common/srv/\hyperlink{nubot__common_2srv_2____init_____8py}{\-\_\-\-\_\-init\-\_\-\-\_\-.\-py} }{\pageref{nubot__common_2srv_2____init_____8py}}{}
\item\contentsline{section}{devel/lib/python2.\-7/dist-\/packages/nubot\-\_\-common/srv/\hyperlink{__BallHandle_8py}{\-\_\-\-Ball\-Handle.\-py} }{\pageref{__BallHandle_8py}}{}
\item\contentsline{section}{devel/lib/python2.\-7/dist-\/packages/nubot\-\_\-common/srv/\hyperlink{__Shoot_8py}{\-\_\-\-Shoot.\-py} }{\pageref{__Shoot_8py}}{}
\item\contentsline{section}{devel/lib/python2.\-7/dist-\/packages/nubot\-\_\-gazebo/\hyperlink{nubot__gazebo_2____init_____8py}{\-\_\-\-\_\-init\-\_\-\-\_\-.\-py} }{\pageref{nubot__gazebo_2____init_____8py}}{}
\item\contentsline{section}{devel/lib/python2.\-7/dist-\/packages/nubot\-\_\-gazebo/cfg/\hyperlink{nubot__gazebo_2cfg_2____init_____8py}{\-\_\-\-\_\-init\-\_\-\-\_\-.\-py} }{\pageref{nubot__gazebo_2cfg_2____init_____8py}}{}
\item\contentsline{section}{devel/lib/python2.\-7/dist-\/packages/nubot\-\_\-gazebo/cfg/\hyperlink{NubotGazeboConfig_8py}{Nubot\-Gazebo\-Config.\-py} }{\pageref{NubotGazeboConfig_8py}}{}
\item\contentsline{section}{src/nubot\-\_\-common/core/include/nubot/core/\hyperlink{Angle_8hpp}{Angle.\-hpp} }{\pageref{Angle_8hpp}}{}
\item\contentsline{section}{src/nubot\-\_\-common/core/include/nubot/core/\hyperlink{Circle_8hpp}{Circle.\-hpp} }{\pageref{Circle_8hpp}}{}
\item\contentsline{section}{src/nubot\-\_\-common/core/include/nubot/core/\hyperlink{core_8hpp}{core.\-hpp} }{\pageref{core_8hpp}}{}
\item\contentsline{section}{src/nubot\-\_\-common/core/include/nubot/core/\hyperlink{DPoint_8hpp}{D\-Point.\-hpp} }{\pageref{DPoint_8hpp}}{}
\item\contentsline{section}{src/nubot\-\_\-common/core/include/nubot/core/\hyperlink{Line_8hpp}{Line.\-hpp} }{\pageref{Line_8hpp}}{}
\item\contentsline{section}{src/nubot\-\_\-common/core/include/nubot/core/\hyperlink{PPoint_8hpp}{P\-Point.\-hpp} }{\pageref{PPoint_8hpp}}{}
\item\contentsline{section}{src/nubot\-\_\-common/core/include/nubot/core/\hyperlink{time_8hpp}{time.\-hpp} }{\pageref{time_8hpp}}{}
\item\contentsline{section}{src/nubot\-\_\-simulation/nubot\-\_\-gazebo/plugins/\hyperlink{nubot__gazebo_8cc}{nubot\-\_\-gazebo.\-cc} }{\pageref{nubot__gazebo_8cc}}{}
\item\contentsline{section}{src/nubot\-\_\-simulation/nubot\-\_\-gazebo/plugins/\hyperlink{nubot__gazebo_8hh}{nubot\-\_\-gazebo.\-hh} }{\pageref{nubot__gazebo_8hh}}{}
\item\contentsline{section}{src/nubot\-\_\-simulation/nubot\-\_\-gazebo/plugins/\hyperlink{nubot__PID_8cc}{nubot\-\_\-\-P\-I\-D.\-cc} }{\pageref{nubot__PID_8cc}}{}
\item\contentsline{section}{src/nubot\-\_\-simulation/nubot\-\_\-gazebo/plugins/\hyperlink{nubot__PID_8hh}{nubot\-\_\-\-P\-I\-D.\-hh} }{\pageref{nubot__PID_8hh}}{}
\item\contentsline{section}{src/nubot\-\_\-simulation/nubot\-\_\-gazebo/plugins/\hyperlink{nubot__teleop__keyboard_8cc}{nubot\-\_\-teleop\-\_\-keyboard.\-cc} }{\pageref{nubot__teleop__keyboard_8cc}}{}
\item\contentsline{section}{src/nubot\-\_\-simulation/nubot\-\_\-gazebo/plugins/\hyperlink{nubot__teleop__keyboard_8hh}{nubot\-\_\-teleop\-\_\-keyboard.\-hh} }{\pageref{nubot__teleop__keyboard_8hh}}{}
\item\contentsline{section}{src/nubot\-\_\-simulation/nubot\-\_\-gazebo/plugins/\hyperlink{parabolic__transition__planning_8cc}{parabolic\-\_\-transition\-\_\-planning.\-cc} }{\pageref{parabolic__transition__planning_8cc}}{}
\item\contentsline{section}{src/nubot\-\_\-simulation/nubot\-\_\-gazebo/plugins/\hyperlink{parabolic__transition__planning_8hh}{parabolic\-\_\-transition\-\_\-planning.\-hh} }{\pageref{parabolic__transition__planning_8hh}}{}
\item\contentsline{section}{src/nubot\-\_\-simulation/nubot\-\_\-gazebo/plugins/\hyperlink{vector__angle_8hh}{vector\-\_\-angle.\-hh} }{\pageref{vector__angle_8hh}}{}
\end{DoxyCompactList}
| nubot-nudt/single_nubot_gazebo | doc/latex/files.tex | TeX | apache-2.0 | 8,866 |
// Copyright 2017, Dell EMC, Inc.
/* jshint node:true */
'use strict';
describe(require('path').basename(__filename), function () {
var base = require('./base-task-data-spec');
base.before(function (context) {
context.taskdefinition = helper.require(
'/lib/task-data/base-tasks/dell-wsman-reset-components.js'
);
});
describe('task-data', function () {
base.examples();
});
});
| AlaricChan/on-tasks | spec/lib/task-data/base-tasks/dell-wsman-reset-components-spec.js | JavaScript | apache-2.0 | 439 |
/*
* Copyright (C) FuseSource, Inc.
* http://fusesource.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.fabric.monitor.internal
import scala.collection.mutable.HashMap
import org.rrd4j.ConsolFun._
import org.rrd4j.core._
import org.rrd4j.DsType._
import java.util.concurrent.atomic.AtomicBoolean
import org.rrd4j.core.Util
import org.linkedin.util.clock.Timespan
import java.io.File
import java.{util => ju}
import FileSupport._
import collection.JavaConversions._
import org.fusesource.scalate.util.Log
import org.fusesource.fabric.monitor.api._
import scala.Some
object DefaultMonitor {
val log = Log(classOf[DefaultMonitor])
}
import DefaultMonitor._
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class DefaultMonitor (
val rrd_file_prefix:String="",
val rrd_backend:RrdBackendFactory = new RrdNioBackendFactory()
) extends Monitor {
var step_duration = 1000L
def path_to_rrd_file(name:String) = rrd_file_prefix + name +".rrd"
case class MonitoredSet(dto:MonitoredSetDTO) {
import dto._
import collection.JavaConversions._
val sample_span = Timespan.parse(dto.step)
val sources = {
var source_counter = 0 ;
(dto.data_sources.map { source =>
// rrd ids are limitied to 20 chars... lets do the best we can:
var rrd_id = "%x:".format(source_counter)
// Use the right most part of the id as it's usually the most specific
rrd_id += source.id.takeRight(20-rrd_id.length)
source_counter += 1
(rrd_id, source)
}).toMap
}
def file_base_name: String = {
rrd_file_prefix + name
}
val rrd_file_name = path_to_rrd_file(name)
val rrd_archive_funcs = dto.archives.map(_.consolidation toUpperCase match {
case "AVERAGE" => AVERAGE
case "MIN" => MIN
case "MAX" => MAX
case "LAST" => LAST
case "FIRST" => FIRST
case "TOTAL" => TOTAL
}).toSet
val rrd_def = {
log.info("Creating RRD file to: " + rrd_file_name)
val rc = new RrdDef(rrd_file_name, sample_span.getDurationInSeconds)
sources.foreach { case (rrd_id, source) =>
import source._
val steps = Option(heartbeat).map( x =>
Timespan.parse(x).getDuration(Timespan.TimeUnit.SECOND)
).getOrElse(2 * sample_span.getDuration(Timespan.TimeUnit.SECOND))
rc.addDatasource(rrd_id, kind.toUpperCase match {
case "GAUGE" => GAUGE
case "COUNTER" => COUNTER
case "DERIVE" => DERIVE
case "ABSOLUTE" => ABSOLUTE
}, steps, min, max);
}
archives.foreach { archive =>
import archive._
val archive_span = Option(step).map(Timespan.parse(_)).getOrElse(sample_span)
val steps = (archive_span.getDurationInMilliseconds / sample_span.getDurationInMilliseconds).toInt
val total_span = Timespan.parse(window)
val rows = (total_span.getDurationInMilliseconds / archive_span.getDurationInMilliseconds).toInt
val consolFun = consolidation.toUpperCase match {
case "AVERAGE" => AVERAGE
case "MIN" => MIN
case "MAX" => MAX
case "LAST" => LAST
case "FIRST" => FIRST
case "TOTAL" => TOTAL
}
rc.addArchive(consolFun, xff, steps, rows);
}
rc
}
var pollers = List[(String, Poller)]()
var thread:Thread = _
var active = new AtomicBoolean()
def start = {
if( active.compareAndSet(false, true) ) {
new File(file_base_name+".json").write_bytes(JsonCodec.encode(dto))
thread = new Thread("Monitoring: "+name) {
setDaemon(true)
override def run: Unit = {
val sources_by_factory = HashMap[PollerFactory, List[(String, DataSourceDTO)]]()
sources.foreach { case (rrd_id, source) =>
poller_factories.find(_.accepts(source)).foreach { factory =>
val sources = sources_by_factory.getOrElseUpdate(factory, Nil)
sources_by_factory.put(factory, (rrd_id, source)::sources)
}
}
pollers = {
sources_by_factory.flatMap{ case (factory, sources)=>
sources.map{ case (rrd_id, source) => (rrd_id, factory.create(source)) }
}
}.toList
val rrd_db = new RrdDb(rrd_def, rrd_backend);
try {
while(active.get) {
val sample = rrd_db.createSample()
sample.setTime(Util.getTime())
val start = System.currentTimeMillis()
// log.info("Collecting samples from %d pollers.".format(pollers.size))
pollers.foreach { case (rrd_id, poller) =>
val result = poller.poll
sample.setValue(rrd_id, result)
}
// log.info("Collected sample: "+sample.dump)
sample.update();
// Sleep until we need to poll again.
def remaining = (start + (step_duration * sample_span.getDurationInSeconds)) - System.currentTimeMillis()
var r = remaining
while( r > 0 ) {
Thread.sleep( r )
r = remaining
}
}
} finally {
rrd_db.close
}
}
}
thread.start()
}
}
def stop = {
if( active.compareAndSet(true, false) ) {
thread.join
thread = null
}
}
}
val current_monitored_sets = HashMap [String, MonitoredSet]()
// TODO: if the poller_factories gets changed, we should
// recreate the current_monitored_sets as there may be more or less
// data sources that we can gather data for.
var poller_factories:Seq[PollerFactory] = Nil
def configure(value: Traversable[MonitoredSetDTO]): Unit = this.synchronized {
val next_services = Map[String, MonitoredSet]( value.map { dto=>
dto.name -> MonitoredSet(dto)
}.toSeq : _*)
// Figure out which services are being added, removed, or updated.
val existing_keys = current_monitored_sets.keys.toSet
val next_service_keys = next_services.keys.toSet
val updating = existing_keys.intersect(next_service_keys)
val adding = next_service_keys -- updating
val removing = existing_keys -- next_service_keys
adding.foreach { id =>
val next = next_services.get(id).get
next.start
current_monitored_sets += id -> next
}
updating.foreach { id =>
val next = next_services.get(id).get
val prev = current_monitored_sets.get(id).get
// did the service configuration change?
if( next != prev ) {
prev.stop
next.start
current_monitored_sets.put(id, next)
}
}
removing.foreach{ id =>
val prev = current_monitored_sets.remove(id).get
prev.stop
}
}
def close: Unit = {
}
def fetch(fetch: FetchMonitoredViewDTO):Option[MonitoredViewDTO] = this.synchronized {
val monitored_set_id = fetch.monitored_set
val ids = fetch.data_sources
val consolidations = fetch.consolidations
val start = fetch.start
val end = fetch.end
val step = fetch.step
val monitored_set = current_monitored_sets.get(monitored_set_id) match {
case Some(x) => x
case None => return None
}
val rrd_db = new RrdDb(monitored_set.rrd_file_name, true, rrd_backend);
try {
val rc = new MonitoredViewDTO
rc.start = start
rc.end = end
rc.step = step
if( rc.step == 0 ) {
rc.step = 1
}
if( rc.end == 0 ) {
rc.end = Util.getTime-1
}
if( rc.start == 0 ) {
rc.start = rc.end - (rc.step*60*5)
}
monitored_set.rrd_archive_funcs.foreach { consol_fun =>
if( consolidations == null || consolidations.size == 0 || consolidations.contains(consol_fun) ) {
val request = rrd_db.createFetchRequest(consol_fun, rc.start, rc.end, rc.step)
if ( ids !=null && !ids.isEmpty ) {
// Map DS ids to rrd_ids so that we only fetch the requested data...
val filter: ju.Set[String] = setAsJavaSet(monitored_set.sources.flatMap { case (rrd_id, source) =>
if (ids.contains(source.id)) {
Some(rrd_id)
} else {
None
}
}.toSet)
request.setFilter(filter)
}
val data = request.fetchData();
for( rrd_id <- data.getDsNames ) {
val t = new DataSourceViewDTO
t.id = rrd_id
t.label = rrd_id
t.description = ""
// we can probably get better values from
// the data source dto
for( dto <- monitored_set.sources.get(rrd_id) ) {
t.id = dto.id
t.label = Option(dto.name)getOrElse(t.id)
t.description = Option(dto.description).getOrElse("")
}
rc.data_sources.add(t)
t.consolidation = consol_fun.toString
t.data = data.getValues(rrd_id)
}
// lets reorder the data so it matches the order it was
// requested in..
if ( ids !=null && !ids.isEmpty ) {
val sources = rc.data_sources.map( x=> (x.id, x) ).toMap
rc.data_sources = ids.flatMap(id => sources.get(id))
}
}
}
Some(rc)
} finally {
rrd_db.close()
}
}
def list: Array[MonitoredSetDTO] = this.synchronized {
current_monitored_sets.values.map(_.dto).toArray
}
}
| janstey/fuse | sandbox/fabric-monitor/src/main/scala/org/fusesource/fabric/monitor/internal/DefaultMonitor.scala | Scala | apache-2.0 | 10,211 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_COPY_THUNK_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_COPY_THUNK_H_
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/core/platform/types.h"
namespace xla {
namespace gpu {
// A thunk that copies data from a host buffer to a device buffer.
class HostToDeviceCopyThunk : public Thunk {
public:
// Constructs a CopyThunk that copies host data from `source_address` to the
// device buffer `destination_buffer`. `mem_size` is the size of the data in
// bytes.
HostToDeviceCopyThunk(ThunkInfo thunk_info, const void* source_address,
const BufferAllocation::Slice& destination_buffer,
uint64 mem_size);
HostToDeviceCopyThunk(const HostToDeviceCopyThunk&) = delete;
HostToDeviceCopyThunk& operator=(const HostToDeviceCopyThunk&) = delete;
Status ExecuteOnStream(const ExecuteParams& params) override;
private:
const void* source_address_;
const BufferAllocation::Slice destination_buffer_;
const uint64 mem_size_;
};
// A thunk that copies data from a device buffer to another device buffer.
class DeviceToDeviceCopyThunk : public Thunk {
public:
// Constructs a CopyThunk that copies host data from `source_buffer` to the
// device buffer `destination_buffer`. `mem_size` is the size of the data in
// bytes.
DeviceToDeviceCopyThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64 mem_size);
DeviceToDeviceCopyThunk(const DeviceToDeviceCopyThunk&) = delete;
DeviceToDeviceCopyThunk& operator=(const DeviceToDeviceCopyThunk&) = delete;
Status ExecuteOnStream(const ExecuteParams& params) override;
private:
const BufferAllocation::Slice source_buffer_;
const BufferAllocation::Slice destination_buffer_;
const uint64 mem_size_;
};
} // namespace gpu
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_COPY_THUNK_H_
| frreiss/tensorflow-fred | tensorflow/compiler/xla/service/gpu/copy_thunk.h | C | apache-2.0 | 2,984 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using RestSharp.Deserializers;
namespace Twilio
{
public class IpAddress : TwilioBase
{
public string Sid { get; set; }
public string FriendlyName { get; set; }
[DeserializeAs(Name="IpAddress")]
public string Address { get; set; }
}
}
| IRlyDontKnow/twilio-csharp | src/Twilio.Api/Model/IpAddress.cs | C# | apache-2.0 | 378 |
package provision
import (
"bytes"
"fmt"
"io/ioutil"
"net/url"
"path"
"path/filepath"
"strconv"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/docker/machine/libmachine/auth"
"github.com/docker/machine/libmachine/provision/pkgaction"
"github.com/docker/machine/libmachine/swarm"
"github.com/docker/machine/utils"
)
type DockerOptions struct {
EngineOptions string
EngineOptionsPath string
}
func installDockerGeneric(p Provisioner) error {
// install docker - until cloudinit we use ubuntu everywhere so we
// just install it using the docker repos
if output, err := p.SSHCommand("if ! type docker; then curl -sSL https://get.docker.com | sh -; fi"); err != nil {
var buf bytes.Buffer
if _, err := buf.ReadFrom(output.Stderr); err != nil {
return err
}
return fmt.Errorf("error installing docker: %s\n", buf.String())
}
return nil
}
func ConfigureAuth(p Provisioner, authOptions auth.AuthOptions) error {
var (
err error
)
machineName := p.GetDriver().GetMachineName()
org := machineName
bits := 2048
ip, err := p.GetDriver().GetIP()
if err != nil {
return err
}
// copy certs to client dir for docker client
machineDir := filepath.Join(utils.GetMachineDir(), machineName)
if err := utils.CopyFile(authOptions.CaCertPath, filepath.Join(machineDir, "ca.pem")); err != nil {
log.Fatalf("Error copying ca.pem to machine dir: %s", err)
}
if err := utils.CopyFile(authOptions.ClientCertPath, filepath.Join(machineDir, "cert.pem")); err != nil {
log.Fatalf("Error copying cert.pem to machine dir: %s", err)
}
if err := utils.CopyFile(authOptions.ClientKeyPath, filepath.Join(machineDir, "key.pem")); err != nil {
log.Fatalf("Error copying key.pem to machine dir: %s", err)
}
log.Debugf("generating server cert: %s ca-key=%s private-key=%s org=%s",
authOptions.ServerCertPath,
authOptions.CaCertPath,
authOptions.PrivateKeyPath,
org,
)
// TODO: Switch to passing just authOptions to this func
// instead of all these individual fields
err = utils.GenerateCert(
[]string{ip},
authOptions.ServerCertPath,
authOptions.ServerKeyPath,
authOptions.CaCertPath,
authOptions.PrivateKeyPath,
org,
bits,
)
if err != nil {
return fmt.Errorf("error generating server cert: %s", err)
}
if err := p.Service("docker", pkgaction.Stop); err != nil {
return err
}
dockerDir := p.GetDockerOptionsDir()
if _, err := p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s", dockerDir)); err != nil {
return err
}
// upload certs and configure TLS auth
caCert, err := ioutil.ReadFile(authOptions.CaCertPath)
if err != nil {
return err
}
// due to windows clients, we cannot use filepath.Join as the paths
// will be mucked on the linux hosts
machineCaCertPath := path.Join(dockerDir, "ca.pem")
authOptions.CaCertRemotePath = machineCaCertPath
serverCert, err := ioutil.ReadFile(authOptions.ServerCertPath)
if err != nil {
return err
}
machineServerCertPath := path.Join(dockerDir, "server.pem")
authOptions.ServerCertRemotePath = machineServerCertPath
serverKey, err := ioutil.ReadFile(authOptions.ServerKeyPath)
if err != nil {
return err
}
machineServerKeyPath := path.Join(dockerDir, "server-key.pem")
authOptions.ServerKeyRemotePath = machineServerKeyPath
if _, err = p.SSHCommand(fmt.Sprintf("echo \"%s\" | sudo tee %s", string(caCert), machineCaCertPath)); err != nil {
return err
}
if _, err = p.SSHCommand(fmt.Sprintf("echo \"%s\" | sudo tee %s", string(serverKey), machineServerKeyPath)); err != nil {
return err
}
if _, err = p.SSHCommand(fmt.Sprintf("echo \"%s\" | sudo tee %s", string(serverCert), machineServerCertPath)); err != nil {
return err
}
dockerUrl, err := p.GetDriver().GetURL()
if err != nil {
return err
}
u, err := url.Parse(dockerUrl)
if err != nil {
return err
}
dockerPort := 2376
parts := strings.Split(u.Host, ":")
if len(parts) == 2 {
dPort, err := strconv.Atoi(parts[1])
if err != nil {
return err
}
dockerPort = dPort
}
dkrcfg, err := p.GenerateDockerOptions(dockerPort, authOptions)
if err != nil {
return err
}
if _, err = p.SSHCommand(fmt.Sprintf("echo \"%s\" | sudo tee -a %s", dkrcfg.EngineOptions, dkrcfg.EngineOptionsPath)); err != nil {
return err
}
if err := p.Service("docker", pkgaction.Start); err != nil {
return err
}
return nil
}
func getDefaultDaemonOpts(driverName string, authOptions auth.AuthOptions) string {
return fmt.Sprintf(`--tlsverify --tlscacert=%s --tlskey=%s --tlscert=%s %s`,
authOptions.CaCertRemotePath,
authOptions.ServerKeyRemotePath,
authOptions.ServerCertRemotePath,
fmt.Sprintf("--label=provider=%s", driverName),
)
}
func configureSwarm(p Provisioner, swarmOptions swarm.SwarmOptions) error {
if !swarmOptions.IsSwarm {
return nil
}
basePath := p.GetDockerOptionsDir()
ip, err := p.GetDriver().GetIP()
if err != nil {
return err
}
tlsCaCert := path.Join(basePath, "ca.pem")
tlsCert := path.Join(basePath, "server.pem")
tlsKey := path.Join(basePath, "server-key.pem")
masterArgs := fmt.Sprintf("--tlsverify --tlscacert=%s --tlscert=%s --tlskey=%s -H %s %s",
tlsCaCert, tlsCert, tlsKey, swarmOptions.Host, swarmOptions.Discovery)
nodeArgs := fmt.Sprintf("--addr %s:2376 %s", ip, swarmOptions.Discovery)
u, err := url.Parse(swarmOptions.Host)
if err != nil {
return err
}
parts := strings.Split(u.Host, ":")
port := parts[1]
// TODO: Do not hardcode daemon port, ask the driver
if err := utils.WaitForDocker(ip, 2376); err != nil {
return err
}
if _, err := p.SSHCommand(fmt.Sprintf("sudo docker pull %s", swarm.DockerImage)); err != nil {
return err
}
dockerDir := p.GetDockerOptionsDir()
// if master start master agent
if swarmOptions.Master {
log.Debug("launching swarm master")
log.Debugf("master args: %s", masterArgs)
if _, err = p.SSHCommand(fmt.Sprintf("sudo docker run -d -p %s:%s --restart=always --name swarm-agent-master -v %s:%s %s manage %s",
port, port, dockerDir, dockerDir, swarm.DockerImage, masterArgs)); err != nil {
return err
}
}
// start node agent
log.Debug("launching swarm node")
log.Debugf("node args: %s", nodeArgs)
if _, err = p.SSHCommand(fmt.Sprintf("sudo docker run -d --restart=always --name swarm-agent -v %s:%s %s join %s",
dockerDir, dockerDir, swarm.DockerImage, nodeArgs)); err != nil {
return err
}
return nil
}
| zehicle/machine | libmachine/provision/utils.go | GO | apache-2.0 | 6,377 |
/*
* Copyright (c) 2018 Makaio GmbH
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <shell/shell_rtt.h>
#include <init.h>
#include <SEGGER_RTT.h>
#include <logging/log.h>
BUILD_ASSERT_MSG(!(IS_ENABLED(CONFIG_LOG_BACKEND_RTT) &&
COND_CODE_0(CONFIG_LOG_BACKEND_RTT_BUFFER, (1), (0))),
"Conflicting log RTT backend enabled on the same channel");
SHELL_RTT_DEFINE(shell_transport_rtt);
SHELL_DEFINE(shell_rtt, CONFIG_SHELL_PROMPT_RTT, &shell_transport_rtt,
CONFIG_SHELL_BACKEND_RTT_LOG_MESSAGE_QUEUE_SIZE,
CONFIG_SHELL_BACKEND_RTT_LOG_MESSAGE_QUEUE_TIMEOUT,
SHELL_FLAG_OLF_CRLF);
LOG_MODULE_REGISTER(shell_rtt, CONFIG_SHELL_RTT_LOG_LEVEL);
static bool rtt_blocking;
static void timer_handler(struct k_timer *timer)
{
const struct shell_rtt *sh_rtt = k_timer_user_data_get(timer);
if (SEGGER_RTT_HasData(0)) {
sh_rtt->handler(SHELL_TRANSPORT_EVT_RX_RDY, sh_rtt->context);
}
}
static int init(const struct shell_transport *transport,
const void *config,
shell_transport_handler_t evt_handler,
void *context)
{
struct shell_rtt *sh_rtt = (struct shell_rtt *)transport->ctx;
sh_rtt->handler = evt_handler;
sh_rtt->context = context;
k_timer_init(&sh_rtt->timer, timer_handler, NULL);
k_timer_user_data_set(&sh_rtt->timer, (void *)sh_rtt);
k_timer_start(&sh_rtt->timer, CONFIG_SHELL_RTT_RX_POLL_PERIOD,
CONFIG_SHELL_RTT_RX_POLL_PERIOD);
return 0;
}
static int uninit(const struct shell_transport *transport)
{
return 0;
}
static int enable(const struct shell_transport *transport, bool blocking)
{
struct shell_rtt *sh_rtt = (struct shell_rtt *)transport->ctx;
if (blocking) {
rtt_blocking = true;
k_timer_stop(&sh_rtt->timer);
}
return 0;
}
static int write(const struct shell_transport *transport,
const void *data, size_t length, size_t *cnt)
{
struct shell_rtt *sh_rtt = (struct shell_rtt *)transport->ctx;
const u8_t *data8 = (const u8_t *)data;
if (rtt_blocking) {
*cnt = SEGGER_RTT_WriteNoLock(0, data8, length);
while (SEGGER_RTT_HasDataUp(0)) {
/* empty */
}
} else {
*cnt = SEGGER_RTT_Write(0, data8, length);
}
sh_rtt->handler(SHELL_TRANSPORT_EVT_TX_RDY, sh_rtt->context);
return 0;
}
static int read(const struct shell_transport *transport,
void *data, size_t length, size_t *cnt)
{
*cnt = SEGGER_RTT_Read(0, data, length);
return 0;
}
const struct shell_transport_api shell_rtt_transport_api = {
.init = init,
.uninit = uninit,
.enable = enable,
.write = write,
.read = read
};
static int enable_shell_rtt(struct device *arg)
{
ARG_UNUSED(arg);
bool log_backend = CONFIG_SHELL_RTT_INIT_LOG_LEVEL > 0;
u32_t level = (CONFIG_SHELL_RTT_INIT_LOG_LEVEL > LOG_LEVEL_DBG) ?
CONFIG_LOG_MAX_LEVEL : CONFIG_SHELL_RTT_INIT_LOG_LEVEL;
shell_init(&shell_rtt, NULL, true, log_backend, level);
return 0;
}
/* Function is used for testing purposes */
const struct shell *shell_backend_rtt_get_ptr(void)
{
return &shell_rtt;
}
SYS_INIT(enable_shell_rtt, POST_KERNEL, 0);
| ldts/zephyr | subsys/shell/shell_rtt.c | C | apache-2.0 | 2,990 |
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/inverse_op.h"
#include <string>
#include <unordered_map>
namespace paddle {
namespace operators {
class InverseOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "Inverse");
OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "Inverse");
auto input_dims = ctx->GetInputDim("Input");
int64_t input_rank = input_dims.size();
PADDLE_ENFORCE_GE(
input_rank, 2,
platform::errors::InvalidArgument(
"The dimension of Input(Input) is expected to be no less than 2. "
"But recieved: Input(Input)'s dimension = %d, shape = [%s].",
input_rank, input_dims));
for (int64_t i = 0; i < input_rank; ++i) {
PADDLE_ENFORCE_EQ(
(input_dims[i] == -1) || (input_dims[i] > 0), true,
platform::errors::InvalidArgument(
"Each dimension of input tensor is expected to be -1 or a "
"positive number, but recieved %d. Input's shape is [%s].",
input_dims[i], input_dims));
}
if (input_dims[input_rank - 2] > 0 && input_dims[input_rank - 1] > 0) {
PADDLE_ENFORCE_EQ(input_dims[input_rank - 2], input_dims[input_rank - 1],
platform::errors::InvalidArgument(
"The last two dimensions are expected to be equal. "
"But recieved: %d and %d; "
"Input(Input)'s shape = [%s].",
input_dims[input_rank - 2],
input_dims[input_rank - 1], input_dims));
}
ctx->SetOutputDim("Output", input_dims);
ctx->ShareLoD("Input", /*->*/ "Output");
}
};
class InverseOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
protected:
std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
const override {
static std::unordered_map<std::string, std::string> m{
{"Input", /*->*/ "Output"}};
return m;
}
};
class InverseGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
auto input_grad = framework::GradVarName("Input");
auto output_grad = framework::GradVarName("Output");
OP_INOUT_CHECK(ctx->HasInput("Output"), "Input", "Output", "InverseGrad");
OP_INOUT_CHECK(ctx->HasInput(output_grad), "Input", output_grad,
"InverseGrad");
if (ctx->HasOutput(input_grad)) {
ctx->SetOutputDim(input_grad, ctx->GetInputDim(output_grad));
}
}
};
class InverseOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput(
"Input",
"(Tensor) A square matrix (2-D Tensor) or batches of square matrices"
" to inverse.");
AddOutput("Output", "(Tensor) The inverse of input matrix.");
AddComment(R"DOC(
Inverse Operator
Takes the inverse of the square matrix.
)DOC");
}
};
template <typename T>
class InverseGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> grad) const override {
grad->SetType(this->ForwardOpType() + "_grad");
grad->SetInput("Output", this->Output("Output"));
grad->SetInput(framework::GradVarName("Output"),
this->OutputGrad("Output"));
grad->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(inverse, ops::InverseOp, ops::InverseOpMaker,
ops::InverseOpInferVarType,
ops::InverseGradOpMaker<paddle::framework::OpDesc>,
ops::InverseGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(inverse_grad, ops::InverseGradOp);
REGISTER_OP_CPU_KERNEL(
inverse, ops::InverseKernel<paddle::platform::CPUDeviceContext, float>,
ops::InverseKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
inverse_grad,
ops::InverseGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::InverseGradKernel<paddle::platform::CPUDeviceContext, double>);
| luotao1/Paddle | paddle/fluid/operators/inverse_op.cc | C++ | apache-2.0 | 5,052 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_45) on Fri Sep 05 23:35:02 UTC 2014 -->
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<TITLE>
Uses of Class org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam (Apache Hadoop HttpFS 2.5.1 API)
</TITLE>
<META NAME="date" CONTENT="2014-09-05">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam (Apache Hadoop HttpFS 2.5.1 API)";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../org/apache/hadoop/fs/http/server/HttpFSParametersProvider.LenParam.html" title="class in org.apache.hadoop.fs.http.server"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../../index.html?org/apache/hadoop/fs/http/server//class-useHttpFSParametersProvider.LenParam.html" target="_top"><B>FRAMES</B></A>
<A HREF="HttpFSParametersProvider.LenParam.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam</B></H2>
</CENTER>
No usage of org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../org/apache/hadoop/fs/http/server/HttpFSParametersProvider.LenParam.html" title="class in org.apache.hadoop.fs.http.server"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../../index.html?org/apache/hadoop/fs/http/server//class-useHttpFSParametersProvider.LenParam.html" target="_top"><B>FRAMES</B></A>
<A HREF="HttpFSParametersProvider.LenParam.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
Copyright © 2014 <a href="http://www.apache.org">Apache Software Foundation</a>. All Rights Reserved.
</BODY>
</HTML>
| jimmypw/hadoop | share/doc/hadoop/hadoop-hdfs-httpfs/apidocs/org/apache/hadoop/fs/http/server/class-use/HttpFSParametersProvider.LenParam.html | HTML | apache-2.0 | 6,526 |
<?php
/**
* @package php-font-lib
* @link https://github.com/PhenX/php-font-lib
* @author Fabien Ménager <fabien.menager@gmail.com>
* @license http://www.gnu.org/copyleft/lesser.html GNU Lesser General Public License
*/
namespace FontLib\WOFF;
use FontLib\Table\DirectoryEntry;
/**
* WOFF font file.
*
* @package php-font-lib
*
* @property TableDirectoryEntry[] $directory
*/
class File extends \FontLib\TrueType\File {
function parseHeader() {
if (!empty($this->header)) {
return;
}
$this->header = new Header($this);
$this->header->parse();
}
public function load($file) {
parent::load($file);
$this->parseTableEntries();
$dataOffset = $this->pos() + count($this->directory) * 20;
$fw = $this->getTempFile(false);
$fr = $this->f;
$this->f = $fw;
$offset = $this->header->encode();
foreach ($this->directory as $entry) {
// Read ...
$this->f = $fr;
$this->seek($entry->offset);
$data = $this->read($entry->length);
if ($entry->length < $entry->origLength) {
$data = gzuncompress($data);
}
// Prepare data ...
$length = strlen($data);
$entry->length = $entry->origLength = $length;
$entry->offset = $dataOffset;
// Write ...
$this->f = $fw;
// Woff Entry
$this->seek($offset);
$offset += $this->write($entry->tag, 4); // tag
$offset += $this->writeUInt32($dataOffset); // offset
$offset += $this->writeUInt32($length); // length
$offset += $this->writeUInt32($length); // origLength
$offset += $this->writeUInt32(DirectoryEntry::computeChecksum($data)); // checksum
// Data
$this->seek($dataOffset);
$dataOffset += $this->write($data, $length);
}
$this->f = $fw;
$this->seek(0);
// Need to re-parse this, don't know why
$this->header = null;
$this->directory = array();
$this->parseTableEntries();
}
}
| marc0l92/AmministratoreDiCondominio_webapp | application/libraries/dompdf/lib/php-font-lib/classes/WOFF/File.php | PHP | apache-2.0 | 2,056 |
using Newtonsoft.Json;
namespace Nest
{
[JsonObject]
public interface ILoggingAction : IAction
{
[JsonProperty("text")]
string Text { get; set; }
[JsonProperty("category")]
string Category { get; set; }
[JsonProperty("level")]
LogLevel? Level { get; set; }
}
public class LoggingAction : ActionBase, ILoggingAction
{
public override ActionType ActionType => ActionType.Logging;
public string Text { get; set; }
public string Category { get; set; }
public LogLevel? Level { get; set; }
public LoggingAction(string name) : base(name) {}
}
public class LoggingActionDescriptor : ActionsDescriptorBase<LoggingActionDescriptor, ILoggingAction>, ILoggingAction
{
protected override ActionType ActionType => ActionType.Logging;
LogLevel? ILoggingAction.Level { get; set; }
string ILoggingAction.Text { get; set; }
string ILoggingAction.Category { get; set; }
public LoggingActionDescriptor(string name) : base(name) {}
public LoggingActionDescriptor Level(LogLevel level) => Assign(a => a.Level = level);
public LoggingActionDescriptor Text(string text) => Assign(a => a.Text = text);
public LoggingActionDescriptor Category(string category) => Assign(a => a.Category = category);
}
}
| CSGOpenSource/elasticsearch-net | src/Nest/XPack/Watcher/Action/Logging/LoggingAction.cs | C# | apache-2.0 | 1,240 |
/* (c) British Telecommunications plc, 2009, All Rights Reserved */
package com.bt.pi.sss;
import com.bt.pi.app.common.entities.User;
public interface UserManager {
boolean userExists(String accessKey);
User getUserByAccessKey(String accessKey);
}
| barnyard/pi-sss | src/main/java/com/bt/pi/sss/UserManager.java | Java | apache-2.0 | 259 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import DBMS
from lib.core.settings import MAXDB_SYSTEM_DBS
from lib.core.unescaper import unescaper
from plugins.dbms.maxdb.enumeration import Enumeration
from plugins.dbms.maxdb.filesystem import Filesystem
from plugins.dbms.maxdb.fingerprint import Fingerprint
from plugins.dbms.maxdb.syntax import Syntax
from plugins.dbms.maxdb.takeover import Takeover
from plugins.generic.misc import Miscellaneous
class MaxDBMap(Syntax, Fingerprint, Enumeration, Filesystem, Miscellaneous, Takeover):
"""
This class defines SAP MaxDB methods
"""
def __init__(self):
self.excludeDbsList = MAXDB_SYSTEM_DBS
Syntax.__init__(self)
Fingerprint.__init__(self)
Enumeration.__init__(self)
Filesystem.__init__(self)
Miscellaneous.__init__(self)
Takeover.__init__(self)
unescaper[DBMS.MAXDB] = Syntax.escape
| glaudsonml/kurgan-ai | tools/sqlmap/plugins/dbms/maxdb/__init__.py | Python | apache-2.0 | 1,033 |
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pl.allegro.foggerexample.config;
import android.app.Application;
import android.app.Instrumentation;
import android.content.Context;
import android.content.SharedPreferences;
import android.preference.PreferenceManager;
import pl.allegro.foggerexample.config.application.ApplicationRunConfiguration;
import pl.allegro.foggerexample.config.dagger.Injector;
import pl.allegro.foggerexample.config.dagger.module.RootModule;
public class FoggerExampleApplication extends Application {
private static FoggerExampleApplication instance;
private ApplicationRunConfiguration applicationRunConfiguration;
public FoggerExampleApplication() {
}
public FoggerExampleApplication(final Context context) {
super();
attachBaseContext(context);
setInstance(this);
}
@Override
public void onCreate() {
super.onCreate();
initDaggerOnApplicationCreationStep();
Injector.inject(this);
}
private void initDaggerOnApplicationCreationStep() {
SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(this);
applicationRunConfiguration = ApplicationRunConfiguration.create(sharedPreferences);
Object[] modules = new Object[]{new RootModule()};
Injector.init(modules);
Injector.injectStatics();
}
private static void setInstance(FoggerExampleApplication foggerExampleApplication) {
instance = foggerExampleApplication;
}
public FoggerExampleApplication(final Instrumentation instrumentation) {
super();
attachBaseContext(instrumentation.getTargetContext());
}
public static FoggerExampleApplication getInstance() {
return instance;
}
}
| RyanTech/fogger | example/src/main/java/pl/allegro/foggerexample/config/FoggerExampleApplication.java | Java | apache-2.0 | 2,363 |
/*
* Copyright 2015 Alexey Andreev.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.teavm.jso.impl;
import java.io.IOException;
/**
*
* @author Alexey Andreev
*/
interface NameEmitter {
void emit(int precedence) throws IOException;
}
| jtulach/teavm | jso/impl/src/main/java/org/teavm/jso/impl/NameEmitter.java | Java | apache-2.0 | 781 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
#include <boost/lexical_cast.hpp>
#include "errmsgs.hpp"
namespace Drill{
static Drill::ErrorMessages errorMessages[]={
{ERR_NONE, 0, 0, "No Error."},
{ERR_CONN_FAILURE, ERR_CATEGORY_CONN, 0, "Connection failure. Host:%s port:%s. Error: %s."},
{ERR_CONN_EXCEPT, ERR_CATEGORY_CONN, 0, "Socket connection failure with the following exception: %s."},
{ERR_CONN_UNKPROTO, ERR_CATEGORY_CONN, 0, "Unknown protocol: %s."},
{ERR_CONN_RDFAIL, ERR_CATEGORY_CONN, 0, "Connection failed with error: %s."},
{ERR_CONN_WFAIL, ERR_CATEGORY_CONN, 0, "Synchronous socket write failed with error: %s."},
{ERR_CONN_ZOOKEEPER, ERR_CATEGORY_CONN, 0, "Zookeeper error. %s"},
{ERR_CONN_NOHSHAKE, ERR_CATEGORY_CONN, 0, "Handshake failed because the server killed the connection. "
"Expected RPC version %d."},
{ERR_CONN_ZKFAIL, ERR_CATEGORY_CONN, 0, "Failed to connect to Zookeeper."},
{ERR_CONN_ZKTIMOUT, ERR_CATEGORY_CONN, 0, "Timed out while waiting to connect."},
{ERR_CONN_ZKERR, ERR_CATEGORY_CONN, 0, "Error in reading from Zookeeper (error code: %d)."},
{ERR_CONN_ZKDBITERR, ERR_CATEGORY_CONN, 0, "Error in reading drillbit endpoint from Zookeeper (error code: %d)."},
{ERR_CONN_ZKNODBIT, ERR_CATEGORY_CONN, 0, "No drillbit found with this Zookeeper."},
{ERR_CONN_ZKNOAUTH, ERR_CATEGORY_CONN, 0, "Authentication failed."},
{ERR_CONN_ZKEXP, ERR_CATEGORY_CONN, 0, "Session expired."},
{ERR_CONN_HSHAKETIMOUT, ERR_CATEGORY_CONN, 0, "Handshake Timeout."},
{ERR_CONN_BAD_RPC_VER, ERR_CATEGORY_CONN, 0, "Handshake failed because of a RPC version mismatch. "
"Expected RPC version %d, got %d. [Server message was: (%s) %s]"},
{ERR_CONN_AUTHFAIL, ERR_CATEGORY_CONN, 0, "User authentication failed (please check the username and password)."
"[Server message was: (%s) %s]"},
{ERR_CONN_UNKNOWN_ERR, ERR_CATEGORY_CONN, 0, "Handshake Failed due to an error on the server. [Server message was: (%s) %s]"},
{ERR_CONN_NOCONN, ERR_CATEGORY_CONN, 0, "There is no connection to the server."},
{ERR_CONN_ALREADYCONN, ERR_CATEGORY_CONN, 0, "This client is already connected to a server."},
{ERR_CONN_NOCONNSTR, ERR_CATEGORY_CONN, 0, "Cannot connect if either host name or port number are empty."},
{ERR_CONN_SSLCERTFAIL, ERR_CATEGORY_CONN, 0, "SSL certificate file %s could not be loaded (exception message: %s)."},
{ERR_CONN_NOSOCKET, ERR_CATEGORY_CONN, 0, "Failed to open socket connection."},
{ERR_CONN_NOSERVERAUTH, ERR_CATEGORY_CONN, 0, "Client needs a secure connection but server does not"
" support any security mechanisms. Please contact an administrator. [Warn: This"
" could be due to a bad configuration or a security attack is in progress.]"},
{ERR_CONN_NOSERVERENC, ERR_CATEGORY_CONN, 0, "Client needs encryption but encryption is disabled on the server."
" Please check connection parameters or contact administrator. [Warn: This"
" could be due to a bad configuration or a security attack is in progress.]"},
{ERR_QRY_OUTOFMEM, ERR_CATEGORY_QRY, 0, "Out of memory."},
{ERR_QRY_COMMERR, ERR_CATEGORY_QRY, 0, "Communication error. %s"},
{ERR_QRY_INVREADLEN, ERR_CATEGORY_QRY, 0, "Internal Error: Received a message with an invalid read length."},
{ERR_QRY_INVQUERYID, ERR_CATEGORY_QRY, 0, "Internal Error: Cannot find query Id in internal structure."},
{ERR_QRY_INVRPCTYPE, ERR_CATEGORY_QRY, 0, "Unknown rpc type received from server:%d."},
{ERR_QRY_OUTOFORDER, ERR_CATEGORY_QRY, 0, "Internal Error: Query result received before query id. Aborting ..."},
{ERR_QRY_INVRPC, ERR_CATEGORY_QRY, 0, "Rpc Error: %s."},
{ERR_QRY_TIMOUT, ERR_CATEGORY_QRY, 0, "Timed out waiting for server to respond."},
{ERR_QRY_FAILURE, ERR_CATEGORY_QRY, 0, "Query execution error. Details:[ \n%s\n]"},
{ERR_QRY_SELVEC2, ERR_CATEGORY_QRY, 0, "Receiving a selection_vector_2 from the server came as a complete surprise at this point"},
{ERR_QRY_RESPFAIL, ERR_CATEGORY_QRY, 0, "Received a RESPONSE_FAILURE from the server."},
{ERR_QRY_UNKQRYSTATE, ERR_CATEGORY_QRY, 0, "Got an unknown query state message from the server."},
{ERR_QRY_UNKQRY, ERR_CATEGORY_QRY, 0, "Query not found on server. It might have been terminated already."},
{ERR_QRY_CANCELED, ERR_CATEGORY_QRY, 0, "Query has been cancelled"},
{ERR_QRY_COMPLETED, ERR_CATEGORY_QRY, 0, "Query completed."},
{ERR_QRY_16, ERR_CATEGORY_QRY, 0, "Query Failed."},
{ERR_QRY_17, ERR_CATEGORY_QRY, 0, "Query Failed."},
{ERR_QRY_18, ERR_CATEGORY_QRY, 0, "Query Failed."},
{ERR_QRY_19, ERR_CATEGORY_QRY, 0, "Query Failed."},
{ERR_QRY_20, ERR_CATEGORY_QRY, 0, "Query Failed."},
};
std::string getMessage(uint32_t msgId, ...){
char str[10240];
std::string s;
assert((ERR_NONE <= msgId) && (msgId < ERR_QRY_MAX));
va_list args;
va_start (args, msgId);
vsnprintf (str, sizeof(str), errorMessages[msgId-DRILL_ERR_START].msgFormatStr, args);
va_end (args);
s=std::string("[")+boost::lexical_cast<std::string>(msgId)+std::string("]")+str;
return s;
}
}// namespace Drill
| KulykRoman/drill | contrib/native/client/src/clientlib/errmsgs.cpp | C++ | apache-2.0 | 6,003 |
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adwords.axis.v201502.advancedoperations;
import com.google.api.ads.adwords.axis.factory.AdWordsServices;
import com.google.api.ads.adwords.axis.v201502.cm.AdvertisingChannelType;
import com.google.api.ads.adwords.axis.v201502.cm.ApiException;
import com.google.api.ads.adwords.axis.v201502.cm.BiddingStrategyConfiguration;
import com.google.api.ads.adwords.axis.v201502.cm.BiddingStrategyOperation;
import com.google.api.ads.adwords.axis.v201502.cm.BiddingStrategyReturnValue;
import com.google.api.ads.adwords.axis.v201502.cm.BiddingStrategyServiceInterface;
import com.google.api.ads.adwords.axis.v201502.cm.Budget;
import com.google.api.ads.adwords.axis.v201502.cm.BudgetBudgetDeliveryMethod;
import com.google.api.ads.adwords.axis.v201502.cm.BudgetBudgetPeriod;
import com.google.api.ads.adwords.axis.v201502.cm.BudgetOperation;
import com.google.api.ads.adwords.axis.v201502.cm.BudgetReturnValue;
import com.google.api.ads.adwords.axis.v201502.cm.BudgetServiceInterface;
import com.google.api.ads.adwords.axis.v201502.cm.Campaign;
import com.google.api.ads.adwords.axis.v201502.cm.CampaignOperation;
import com.google.api.ads.adwords.axis.v201502.cm.CampaignReturnValue;
import com.google.api.ads.adwords.axis.v201502.cm.CampaignServiceInterface;
import com.google.api.ads.adwords.axis.v201502.cm.Money;
import com.google.api.ads.adwords.axis.v201502.cm.NetworkSetting;
import com.google.api.ads.adwords.axis.v201502.cm.Operator;
import com.google.api.ads.adwords.axis.v201502.cm.SharedBiddingStrategy;
import com.google.api.ads.adwords.axis.v201502.cm.TargetSpendBiddingScheme;
import com.google.api.ads.adwords.lib.client.AdWordsSession;
import com.google.api.ads.common.lib.auth.OfflineCredentials;
import com.google.api.ads.common.lib.auth.OfflineCredentials.Api;
import com.google.api.client.auth.oauth2.Credential;
import java.rmi.RemoteException;
import javax.xml.rpc.ServiceException;
/**
* This example adds a Shared Bidding Strategy and uses it to construct a campaign.
*/
public class UseSharedBiddingStrategy {
// Optional: If you'd like to use an existing shared budget, assign a
// shared budget ID here.
private static final Long SHARED_BUDGET_ID = null;
public static void main(String[] args) throws Exception {
Credential oAuth2Credential = new OfflineCredentials.Builder()
.forApi(Api.ADWORDS)
.fromFile()
.build()
.generateCredential();
// Construct an AdWordsSession.
AdWordsSession session = new AdWordsSession.Builder()
.fromFile()
.withOAuth2Credential(oAuth2Credential)
.build();
AdWordsServices adWordsServices = new AdWordsServices();
runExample(adWordsServices, session, SHARED_BUDGET_ID);
}
public static void runExample(AdWordsServices adWordsServices, AdWordsSession session,
Long sharedBudgetId) throws Exception {
SharedBiddingStrategy sharedBiddingStrategy = createBiddingStrategy(adWordsServices, session);
if (sharedBudgetId == null) {
Budget budget = createSharedBudget(adWordsServices, session);
sharedBudgetId = budget.getBudgetId();
}
createCampaignWithBiddingStrategy(adWordsServices, session, sharedBiddingStrategy.getId(),
sharedBudgetId);
}
/**
* Creates the bidding strategy object.
*
* @param adWordsServices the user to run the example with
* @param session the AdWordsSession
* @throws RemoteException
* @throws ApiException
* @throws ServiceException
*/
private static SharedBiddingStrategy createBiddingStrategy(AdWordsServices adWordsServices,
AdWordsSession session)
throws ApiException, RemoteException, ServiceException {
// Get the BiddingStrategyService, which loads the required classes.
BiddingStrategyServiceInterface biddingStrategyService =
adWordsServices.get(session, BiddingStrategyServiceInterface.class);
// Create a shared bidding strategy.
SharedBiddingStrategy sharedBiddingStrategy = new SharedBiddingStrategy();
sharedBiddingStrategy.setName("Maximize Clicks" + System.currentTimeMillis());
TargetSpendBiddingScheme biddingScheme = new TargetSpendBiddingScheme();
// Optionally set additional bidding scheme parameters.
biddingScheme.setBidCeiling(new Money(null, 2000000L));
biddingScheme.setSpendTarget(new Money(null, 20000000L));
sharedBiddingStrategy.setBiddingScheme(biddingScheme);
// Create operation.
BiddingStrategyOperation operation = new BiddingStrategyOperation();
operation.setOperand(sharedBiddingStrategy);
operation.setOperator(Operator.ADD);
BiddingStrategyOperation[] operations = new BiddingStrategyOperation[] {operation};
BiddingStrategyReturnValue result = biddingStrategyService.mutate(operations);
SharedBiddingStrategy newBiddingStrategy = result.getValue(0);
System.out.printf(
"Shared bidding strategy with name '%s' and ID %d of type %s was created.\n",
newBiddingStrategy.getName(), newBiddingStrategy.getId(),
newBiddingStrategy.getBiddingScheme().getBiddingSchemeType());
return newBiddingStrategy;
}
/**
* Creates an explicit budget to be used only to create the Campaign.
*
* @param adWordsServices the user to run the example with
* @param session the AdWordsSession
* @throws ServiceException
* @throws RemoteException
* @throws ApiException
*/
private static Budget createSharedBudget(AdWordsServices adWordsServices,
AdWordsSession session)
throws ServiceException, ApiException, RemoteException {
// Get the BudgetService, which loads the required classes.
BudgetServiceInterface budgetService =
adWordsServices.get(session, BudgetServiceInterface.class);
// Create a shared budget.
Budget budget = new Budget();
budget.setName("Shared Interplanetary Budget #" + System.currentTimeMillis());
budget.setPeriod(BudgetBudgetPeriod.DAILY);
budget.setAmount(new Money(null, 50000000L));
budget.setDeliveryMethod(BudgetBudgetDeliveryMethod.STANDARD);
budget.setIsExplicitlyShared(true);
BudgetOperation operation = new BudgetOperation();
operation.setOperand(budget);
operation.setOperator(Operator.ADD);
BudgetOperation[] operations = new BudgetOperation[] {operation};
// Make the mutate request.
BudgetReturnValue result = budgetService.mutate(operations);
Budget newBudget = result.getValue(0);
System.out.printf("Budget with name '%s', ID %d was created.\n", newBudget.getName(),
newBudget.getBudgetId());
return newBudget;
}
/**
* Create a Campaign with a Shared Bidding Strategy.
*
* @param adWordsServices the user to run the example with
* @param session the AdWordsSession
* @param biddingStrategyId the bidding strategy id to use
* @param sharedBudgetId the shared budget id to use
* @throws RemoteException
* @throws ApiException
* @throws ServiceException
*/
private static Campaign createCampaignWithBiddingStrategy(
AdWordsServices adWordsServices, AdWordsSession session, Long biddingStrategyId,
Long sharedBudgetId) throws ApiException, RemoteException, ServiceException {
// Get the CampaignService, which loads the required classes.
CampaignServiceInterface campaignService =
adWordsServices.get(session, CampaignServiceInterface.class);
// Create campaign.
Campaign campaign = new Campaign();
campaign.setName("Interplanetary Cruise #" + System.currentTimeMillis());
// Set the budget.
Budget budget = new Budget();
budget.setBudgetId(sharedBudgetId);
campaign.setBudget(budget);
// Set bidding strategy (required).
BiddingStrategyConfiguration biddingStrategyConfiguration = new BiddingStrategyConfiguration();
biddingStrategyConfiguration.setBiddingStrategyId(biddingStrategyId);
campaign.setBiddingStrategyConfiguration(biddingStrategyConfiguration);
// Set advertising channel type (required).
campaign.setAdvertisingChannelType(AdvertisingChannelType.SEARCH);
// Set network targeting (recommended).
NetworkSetting networkSetting = new NetworkSetting();
networkSetting.setTargetGoogleSearch(true);
networkSetting.setTargetSearchNetwork(true);
networkSetting.setTargetContentNetwork(true);
campaign.setNetworkSetting(networkSetting);
// Create operation.
CampaignOperation operation = new CampaignOperation();
operation.setOperand(campaign);
operation.setOperator(Operator.ADD);
CampaignReturnValue result = campaignService.mutate(new CampaignOperation[] {operation});
Campaign newCampaign = result.getValue(0);
System.out.printf("Campaign with name '%s', ID %d and bidding scheme ID %d was created.\n",
newCampaign.getName(), newCampaign.getId(),
newCampaign.getBiddingStrategyConfiguration().getBiddingStrategyId());
return newCampaign;
}
}
| raja15792/googleads-java-lib | examples/adwords_axis/src/main/java/adwords/axis/v201502/advancedoperations/UseSharedBiddingStrategy.java | Java | apache-2.0 | 9,544 |
<?php
/**
* Copyright 2012-2015 ContactLab, Italy
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class createCampaignResponse
{
/**
* @var Campaign $return
* @access public
*/
public $return = null;
}
| contactlab/soap-api-php-client | src/createCampaignResponse.php | PHP | apache-2.0 | 742 |
/*
* Copyright 2014 Space Dynamics Laboratory - Utah State University Research Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.usu.sdl.openstorefront.web.init;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import javax.servlet.annotation.WebListener;
/**
*
* @author dshurtleff
*/
@WebListener
public class ApplicationInit
implements ServletContextListener
{
@Override
public void contextInitialized(ServletContextEvent sce)
{
}
@Override
public void contextDestroyed(ServletContextEvent sce)
{
}
}
| skycow/Open-Storefront | server/openstorefront/openstorefront-web/src/main/java/edu/usu/sdl/openstorefront/web/init/ApplicationInit.java | Java | apache-2.0 | 1,105 |
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.scanner;
import java.util.Collection;
import java.util.HashMap;
import org.drools.compiler.kie.builder.impl.MessageImpl;
import org.junit.Test;
import org.kie.api.KieServices;
import org.kie.api.builder.KieBuilder;
import org.kie.api.builder.KieFileSystem;
import org.kie.api.builder.ReleaseId;
import org.kie.api.builder.model.KieModuleModel;
import org.kie.api.conf.EqualityBehaviorOption;
import org.kie.api.conf.EventProcessingOption;
import org.kie.internal.builder.IncrementalResults;
import org.kie.internal.builder.InternalKieBuilder;
import static org.junit.Assert.*;
public class KieModuleIncrementalCompilationTest extends AbstractKieCiTest {
@Test
public void testCheckMetaDataAfterIncrementalDelete() throws Exception {
String drl1 = "package org.kie.scanner\n" +
"rule R1 when\n" +
" String()\n" +
"then\n" +
"end\n";
String drl2 = "package org.kie.scanner\n" +
"rule R2_2 when\n" +
" String( )\n" +
"then\n" +
"end\n";
KieServices ks = KieServices.Factory.get();
KieFileSystem kfs = ks.newKieFileSystem()
.write( "src/main/resources/r1.drl", drl1 )
.write( "src/main/resources/r2.drl", drl2 );
KieBuilder kieBuilder = ks.newKieBuilder( kfs ).buildAll();
assertEquals( 2, getRuleNames( kieBuilder ).get( "org.kie.scanner" ).size() );
kfs.delete( "src/main/resources/r2.drl" );
IncrementalResults addResults = ( (InternalKieBuilder) kieBuilder ).createFileSet( "src/main/resources/r2.drl" ).build();
assertEquals( 1, getRuleNames( kieBuilder ).get( "org.kie.scanner" ).size() );
}
private HashMap<String, Collection<String>> getRuleNames( KieBuilder kieBuilder ) {
KieModuleMetaData kieModuleMetaData = getKieModuleMetaData( kieBuilder );
HashMap<String, Collection<String>> ruleNames = new HashMap<String, Collection<String>>();
for ( String packageName : kieModuleMetaData.getPackages() ) {
ruleNames.put( packageName, kieModuleMetaData.getRuleNamesInPackage( packageName ) );
}
return ruleNames;
}
private KieModuleMetaData getKieModuleMetaData( KieBuilder kieBuilder ) {
return KieModuleMetaData.Factory.newKieModuleMetaData( ( (InternalKieBuilder) kieBuilder ).getKieModuleIgnoringErrors() );
}
@Test
public void testIncrementalCompilationFirstBuildHasErrors() throws Exception {
KieServices ks = KieServices.Factory.get();
//Malformed POM - No Version information
ReleaseId releaseId = ks.newReleaseId( "org.kie", "incremental-test-with-invalid pom", "" );
KieFileSystem kfs = createKieFileSystemWithKProject( ks );
kfs.writePomXML( getPom( releaseId ) );
//Valid
String drl1 =
"rule R1 when\n" +
" $s : String()\n" +
"then\n" +
"end\n";
//Invalid
String drl2 =
"rule R2 when\n" +
" $s : Strin( )\n" +
"then\n" +
"end\n";
//Write Rule 1 - No DRL errors, but POM is in error
kfs.write( "src/main/resources/KBase1/r1.drl", drl1 );
KieBuilder kieBuilder = ks.newKieBuilder( kfs ).buildAll();
assertEquals( 1,
kieBuilder.getResults().getMessages( org.kie.api.builder.Message.Level.ERROR ).size() );
//Add file with error - expect 1 "added" error message
kfs.write( "src/main/resources/KBase1/r2.drl", drl2 );
IncrementalResults addResults = ( (InternalKieBuilder) kieBuilder ).createFileSet( "src/main/resources/KBase1/r2.drl" ).build();
assertEquals( 1, addResults.getAddedMessages().size() );
assertEquals( 0, addResults.getRemovedMessages().size() );
}
@Test
public void checkIncrementalCompilationWithRuleFunctionRule() throws Exception {
String rule_1 = "package org.kie.scanner\n" +
"rule R1 when\n" +
" String()\n" +
"then\n" +
"end\n";
String rule_2 = "package org.kie.scanner\n" +
"rule R1 when\n" +
" String()\n" +
"then\n" +
" System.out.println(MyFunction());\n" +
"end\n";
String function = "package org.kie.scanner\n" +
"function int MyFunction() {\n" +
" return 1;\n" +
"}\n";
KieServices ks = KieServices.Factory.get();
KieFileSystem kfs = ks.newKieFileSystem();
kfs.write( "src/main/resources/org/kie/scanner/rule.drl", rule_1 );
KieBuilder kieBuilder = ks.newKieBuilder( kfs ).buildAll();
assertEquals( 0,
kieBuilder.getResults().getMessages( org.kie.api.builder.Message.Level.ERROR ).size() );
kfs.write( "src/main/resources/org/kie/scanner/function.drl", function );
IncrementalResults addResults1 = ( (InternalKieBuilder) kieBuilder ).createFileSet( "src/main/resources/org/kie/scanner/function.drl" ).build();
assertEquals( 0, addResults1.getAddedMessages().size() );
assertEquals( 0, addResults1.getRemovedMessages().size() );
kfs.write( "src/main/resources/org/kie/scanner/rule.drl", rule_2 );
IncrementalResults addResults2 = ( (InternalKieBuilder) kieBuilder ).createFileSet( "src/main/resources/org/kie/scanner/rule.drl" ).build();
assertEquals( 0, addResults2.getAddedMessages().size() );
assertEquals( 0, addResults2.getRemovedMessages().size() );
}
@Test
public void checkIncrementalCompilationWithRuleThenFunction() throws Exception {
String rule = "package org.kie.scanner\n" +
"rule R1 when\n" +
" String()\n" +
"then\n" +
" System.out.println(MyFunction());\n" +
"end\n";
String function = "package org.kie.scanner\n" +
"function int MyFunction() {\n" +
" return 1;\n" +
"}\n";
KieServices ks = KieServices.Factory.get();
KieFileSystem kfs = ks.newKieFileSystem();
kfs.write( "src/main/resources/org/kie/scanner/rule.drl", rule );
KieBuilder kieBuilder = ks.newKieBuilder( kfs ).buildAll();
assertEquals( 1,
kieBuilder.getResults().getMessages( org.kie.api.builder.Message.Level.ERROR ).size() );
kfs.write( "src/main/resources/org/kie/scanner/function.drl", function );
IncrementalResults addResults1 = ( (InternalKieBuilder) kieBuilder ).createFileSet( "src/main/resources/org/kie/scanner/function.drl" ).build();
assertEquals( 0, addResults1.getAddedMessages().size() );
assertEquals( 1, addResults1.getRemovedMessages().size() );
}
@Test
public void checkIncrementalCompilationWithFunctionThenRule() throws Exception {
String rule = "package org.kie.scanner\n" +
"rule R1 when\n" +
" String()\n" +
"then\n" +
" System.out.println(MyFunction());\n" +
"end\n";
String function = "package org.kie.scanner\n" +
"function int MyFunction() {\n" +
" return 1;\n" +
"}\n";
KieServices ks = KieServices.Factory.get();
KieFileSystem kfs = ks.newKieFileSystem();
kfs.write( "src/main/resources/org/kie/scanner/function.drl", function );
KieBuilder kieBuilder = ks.newKieBuilder( kfs ).buildAll();
assertEquals( 0,
kieBuilder.getResults().getMessages( org.kie.api.builder.Message.Level.ERROR ).size() );
kfs.write( "src/main/resources/org/kie/scanner/rule.drl", rule );
IncrementalResults addResults = ( (InternalKieBuilder) kieBuilder ).createFileSet( "src/main/resources/org/kie/scanner/rule.drl" ).build();
assertEquals( 0, addResults.getAddedMessages().size() );
assertEquals( 0, addResults.getRemovedMessages().size() );
}
@Test
public void checkIncrementalCompilationWithMultipleKieBases() throws Exception {
String rule = "package org.kie.scanner\n" +
"rule R1 when\n" +
"then\n" +
"end\n";
String invalidRule = "package org.kie.scanner\n" +
"rule R2 when\n" +
" Cheese()\n" + // missing import
"then\n" +
"end\n";
KieServices ks = KieServices.Factory.get();
KieFileSystem kfs = createKieFileSystemWithTwoKBases(ks);
kfs.write("src/main/resources/org/kie/scanner/rule.drl",
rule);
KieBuilder kieBuilder = ks.newKieBuilder(kfs).buildAll();
assertEquals(0,
kieBuilder.getResults().getMessages().size());
kfs.write("src/main/resources/org/kie/scanner/invalidRule.drl",
invalidRule);
IncrementalResults addResults = ((InternalKieBuilder) kieBuilder).createFileSet("src/main/resources/org/kie/scanner/invalidRule.drl").build();
assertEquals(2, addResults.getAddedMessages().size());
addResults
.getAddedMessages()
.stream()
.map(m -> (MessageImpl) m )
.forEach(m -> assertNotNull(m.getKieBaseName()));
}
private KieFileSystem createKieFileSystemWithTwoKBases(final KieServices ks) {
final KieModuleModel kproj = ks.newKieModuleModel();
kproj.newKieBaseModel("default").setDefault(true)
.setEqualsBehavior( EqualityBehaviorOption.EQUALITY )
.setEventProcessingMode( EventProcessingOption.STREAM );
kproj.newKieBaseModel("kbase1").setDefault(false)
.setEqualsBehavior(EqualityBehaviorOption.EQUALITY)
.setEventProcessingMode(EventProcessingOption.STREAM);
final KieFileSystem kfs = ks.newKieFileSystem();
kfs.writeKModuleXML(kproj.toXML());
return kfs;
}
}
| reynoldsm88/drools | kie-ci/src/test/java/org/kie/scanner/KieModuleIncrementalCompilationTest.java | Java | apache-2.0 | 10,918 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/events/CloudWatchEvents_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/events/model/ConnectionState.h>
#include <aws/core/utils/DateTime.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace CloudWatchEvents
{
namespace Model
{
class AWS_CLOUDWATCHEVENTS_API UpdateConnectionResult
{
public:
UpdateConnectionResult();
UpdateConnectionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
UpdateConnectionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The ARN of the connection that was updated.</p>
*/
inline const Aws::String& GetConnectionArn() const{ return m_connectionArn; }
/**
* <p>The ARN of the connection that was updated.</p>
*/
inline void SetConnectionArn(const Aws::String& value) { m_connectionArn = value; }
/**
* <p>The ARN of the connection that was updated.</p>
*/
inline void SetConnectionArn(Aws::String&& value) { m_connectionArn = std::move(value); }
/**
* <p>The ARN of the connection that was updated.</p>
*/
inline void SetConnectionArn(const char* value) { m_connectionArn.assign(value); }
/**
* <p>The ARN of the connection that was updated.</p>
*/
inline UpdateConnectionResult& WithConnectionArn(const Aws::String& value) { SetConnectionArn(value); return *this;}
/**
* <p>The ARN of the connection that was updated.</p>
*/
inline UpdateConnectionResult& WithConnectionArn(Aws::String&& value) { SetConnectionArn(std::move(value)); return *this;}
/**
* <p>The ARN of the connection that was updated.</p>
*/
inline UpdateConnectionResult& WithConnectionArn(const char* value) { SetConnectionArn(value); return *this;}
/**
* <p>The state of the connection that was updated.</p>
*/
inline const ConnectionState& GetConnectionState() const{ return m_connectionState; }
/**
* <p>The state of the connection that was updated.</p>
*/
inline void SetConnectionState(const ConnectionState& value) { m_connectionState = value; }
/**
* <p>The state of the connection that was updated.</p>
*/
inline void SetConnectionState(ConnectionState&& value) { m_connectionState = std::move(value); }
/**
* <p>The state of the connection that was updated.</p>
*/
inline UpdateConnectionResult& WithConnectionState(const ConnectionState& value) { SetConnectionState(value); return *this;}
/**
* <p>The state of the connection that was updated.</p>
*/
inline UpdateConnectionResult& WithConnectionState(ConnectionState&& value) { SetConnectionState(std::move(value)); return *this;}
/**
* <p>A time stamp for the time that the connection was created.</p>
*/
inline const Aws::Utils::DateTime& GetCreationTime() const{ return m_creationTime; }
/**
* <p>A time stamp for the time that the connection was created.</p>
*/
inline void SetCreationTime(const Aws::Utils::DateTime& value) { m_creationTime = value; }
/**
* <p>A time stamp for the time that the connection was created.</p>
*/
inline void SetCreationTime(Aws::Utils::DateTime&& value) { m_creationTime = std::move(value); }
/**
* <p>A time stamp for the time that the connection was created.</p>
*/
inline UpdateConnectionResult& WithCreationTime(const Aws::Utils::DateTime& value) { SetCreationTime(value); return *this;}
/**
* <p>A time stamp for the time that the connection was created.</p>
*/
inline UpdateConnectionResult& WithCreationTime(Aws::Utils::DateTime&& value) { SetCreationTime(std::move(value)); return *this;}
/**
* <p>A time stamp for the time that the connection was last modified.</p>
*/
inline const Aws::Utils::DateTime& GetLastModifiedTime() const{ return m_lastModifiedTime; }
/**
* <p>A time stamp for the time that the connection was last modified.</p>
*/
inline void SetLastModifiedTime(const Aws::Utils::DateTime& value) { m_lastModifiedTime = value; }
/**
* <p>A time stamp for the time that the connection was last modified.</p>
*/
inline void SetLastModifiedTime(Aws::Utils::DateTime&& value) { m_lastModifiedTime = std::move(value); }
/**
* <p>A time stamp for the time that the connection was last modified.</p>
*/
inline UpdateConnectionResult& WithLastModifiedTime(const Aws::Utils::DateTime& value) { SetLastModifiedTime(value); return *this;}
/**
* <p>A time stamp for the time that the connection was last modified.</p>
*/
inline UpdateConnectionResult& WithLastModifiedTime(Aws::Utils::DateTime&& value) { SetLastModifiedTime(std::move(value)); return *this;}
/**
* <p>A time stamp for the time that the connection was last authorized.</p>
*/
inline const Aws::Utils::DateTime& GetLastAuthorizedTime() const{ return m_lastAuthorizedTime; }
/**
* <p>A time stamp for the time that the connection was last authorized.</p>
*/
inline void SetLastAuthorizedTime(const Aws::Utils::DateTime& value) { m_lastAuthorizedTime = value; }
/**
* <p>A time stamp for the time that the connection was last authorized.</p>
*/
inline void SetLastAuthorizedTime(Aws::Utils::DateTime&& value) { m_lastAuthorizedTime = std::move(value); }
/**
* <p>A time stamp for the time that the connection was last authorized.</p>
*/
inline UpdateConnectionResult& WithLastAuthorizedTime(const Aws::Utils::DateTime& value) { SetLastAuthorizedTime(value); return *this;}
/**
* <p>A time stamp for the time that the connection was last authorized.</p>
*/
inline UpdateConnectionResult& WithLastAuthorizedTime(Aws::Utils::DateTime&& value) { SetLastAuthorizedTime(std::move(value)); return *this;}
private:
Aws::String m_connectionArn;
ConnectionState m_connectionState;
Aws::Utils::DateTime m_creationTime;
Aws::Utils::DateTime m_lastModifiedTime;
Aws::Utils::DateTime m_lastAuthorizedTime;
};
} // namespace Model
} // namespace CloudWatchEvents
} // namespace Aws
| aws/aws-sdk-cpp | aws-cpp-sdk-events/include/aws/events/model/UpdateConnectionResult.h | C | apache-2.0 | 6,508 |
using System;
using BEPUphysics.BroadPhaseEntries;
using BEPUphysics.BroadPhaseSystems;
using BEPUphysics.BroadPhaseEntries.MobileCollidables;
using BEPUphysics.CollisionTests;
using BEPUphysics.CollisionTests.CollisionAlgorithms.GJK;
using BEPUphysics.CollisionTests.Manifolds;
using BEPUphysics.Constraints.Collision;
using BEPUphysics.PositionUpdating;
using BEPUphysics.Settings;
namespace BEPUphysics.NarrowPhaseSystems.Pairs
{
///<summary>
/// Handles a convex-convex collision pair.
///</summary>
public class GeneralConvexPairHandler : ConvexConstraintPairHandler
{
ConvexCollidable convexA;
ConvexCollidable convexB;
GeneralConvexContactManifold contactManifold = new GeneralConvexContactManifold();
public override Collidable CollidableA
{
get { return convexA; }
}
public override Collidable CollidableB
{
get { return convexB; }
}
/// <summary>
/// Gets the contact manifold used by the pair handler.
/// </summary>
public override ContactManifold ContactManifold
{
get { return contactManifold; }
}
public override Entities.Entity EntityA
{
get { return convexA.entity; }
}
public override Entities.Entity EntityB
{
get { return convexB.entity; }
}
///<summary>
/// Initializes the pair handler.
///</summary>
///<param name="entryA">First entry in the pair.</param>
///<param name="entryB">Second entry in the pair.</param>
public override void Initialize(BroadPhaseEntry entryA, BroadPhaseEntry entryB)
{
convexA = entryA as ConvexCollidable;
convexB = entryB as ConvexCollidable;
if (convexA == null || convexB == null)
{
throw new ArgumentException("Inappropriate types used to initialize pair.");
}
base.Initialize(entryA, entryB);
}
///<summary>
/// Cleans up the pair handler.
///</summary>
public override void CleanUp()
{
base.CleanUp();
convexA = null;
convexB = null;
}
}
}
| Anomalous-Software/BEPUPhysics | BEPUphysics/NarrowPhaseSystems/Pairs/GeneralConvexPairHandler.cs | C# | apache-2.0 | 2,405 |
#
# Author:: Seth Chisamore <schisamo@opscode.com>
# Cookbook Name:: python
# Recipe:: source
#
# Copyright 2011, Opscode, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
configure_options = node['python']['configure_options'].join(" ")
packages = value_for_platform_family(
"rhel" => ["openssl-devel","bzip2-devel","zlib-devel","expat-devel","db4-devel","sqlite-devel","ncurses-devel","readline-devel"],
"default" => ["libssl-dev","libbz2-dev","zlib1g-dev","libexpat1-dev","libdb-dev","libsqlite3-dev","libncursesw5-dev","libncurses5-dev","libreadline-dev","libsasl2-dev", "libgdbm-dev"]
)
#
packages.each do |dev_pkg|
package dev_pkg
end
version = node['python']['version']
install_path = "#{node['python']['prefix_dir']}/bin/python#{version.split(/(^\d+\.\d+)/)[1]}"
remote_file "#{Chef::Config[:file_cache_path]}/Python-#{version}.tar.bz2" do
source "#{node['python']['url']}/#{version}/Python-#{version}.tar.bz2"
checksum node['python']['checksum']
mode "0644"
not_if { ::File.exists?(install_path) }
end
bash "build-and-install-python" do
cwd Chef::Config[:file_cache_path]
code <<-EOF
tar -jxvf Python-#{version}.tar.bz2
(cd Python-#{version} && ./configure #{configure_options})
(cd Python-#{version} && make && make install)
EOF
environment({
"LDFLAGS" => "-L#{node['python']['prefix_dir']} -L/usr/lib",
"CPPFLAGS" => "-I#{node['python']['prefix_dir']} -I/usr/lib",
"CXXFLAGS" => "-I#{node['python']['prefix_dir']} -I/usr/lib",
"CFLAGS" => "-I#{node['python']['prefix_dir']} -I/usr/lib"
}) if platform?("ubuntu") && node['platform_version'].to_f >= 12.04
not_if { ::File.exists?(install_path) }
end
| zenoss/python | recipes/source.rb | Ruby | apache-2.0 | 2,207 |
<?php
final class PhabricatorDifferentialApplication extends PhabricatorApplication {
public function getBaseURI() {
return '/differential/';
}
public function getName() {
return pht('Differential');
}
public function getMenuName() {
return pht('Code Review');
}
public function getShortDescription() {
return pht('Pre-Commit Review');
}
public function getIcon() {
return 'fa-cog';
}
public function isPinnedByDefault(PhabricatorUser $viewer) {
return true;
}
public function getHelpDocumentationArticles(PhabricatorUser $viewer) {
return array(
array(
'name' => pht('Differential User Guide'),
'href' => PhabricatorEnv::getDoclink('Differential User Guide'),
),
);
}
public function getFactObjectsForAnalysis() {
return array(
new DifferentialRevision(),
);
}
public function getTitleGlyph() {
return "\xE2\x9A\x99";
}
public function getOverview() {
return pht(
'Differential is a **code review application** which allows '.
'engineers to review, discuss and approve changes to software.');
}
public function getRoutes() {
return array(
'/D(?P<id>[1-9]\d*)' => 'DifferentialRevisionViewController',
'/differential/' => array(
'(?:query/(?P<queryKey>[^/]+)/)?'
=> 'DifferentialRevisionListController',
'diff/' => array(
'(?P<id>[1-9]\d*)/' => 'DifferentialDiffViewController',
'create/' => 'DifferentialDiffCreateController',
),
'changeset/' => 'DifferentialChangesetViewController',
'revision/' => array(
$this->getEditRoutePattern('edit/')
=> 'DifferentialRevisionEditController',
$this->getEditRoutePattern('attach/(?P<diffID>[^/]+)/to/')
=> 'DifferentialRevisionEditController',
'closedetails/(?P<phid>[^/]+)/'
=> 'DifferentialRevisionCloseDetailsController',
'update/(?P<revisionID>[1-9]\d*)/'
=> 'DifferentialDiffCreateController',
'operation/(?P<id>[1-9]\d*)/'
=> 'DifferentialRevisionOperationController',
'inlines/(?P<id>[1-9]\d*)/'
=> 'DifferentialRevisionInlinesController',
),
'comment/' => array(
'preview/(?P<id>[1-9]\d*)/' => 'DifferentialCommentPreviewController',
'save/(?P<id>[1-9]\d*)/' => 'DifferentialCommentSaveController',
'inline/' => array(
'preview/(?P<id>[1-9]\d*)/'
=> 'DifferentialInlineCommentPreviewController',
'edit/(?P<id>[1-9]\d*)/'
=> 'DifferentialInlineCommentEditController',
),
),
'preview/' => 'PhabricatorMarkupPreviewController',
),
);
}
public function getApplicationOrder() {
return 0.100;
}
public function getRemarkupRules() {
return array(
new DifferentialRemarkupRule(),
);
}
public function supportsEmailIntegration() {
return true;
}
public function getAppEmailBlurb() {
return pht(
'Send email to these addresses to create revisions. The body of the '.
'message and / or one or more attachments should be the output of a '.
'"diff" command. %s',
phutil_tag(
'a',
array(
'href' => $this->getInboundEmailSupportLink(),
),
pht('Learn More')));
}
protected function getCustomCapabilities() {
return array(
DifferentialDefaultViewCapability::CAPABILITY => array(
'caption' => pht('Default view policy for newly created revisions.'),
'template' => DifferentialRevisionPHIDType::TYPECONST,
'capability' => PhabricatorPolicyCapability::CAN_VIEW,
),
);
}
public function getMailCommandObjects() {
return array(
'revision' => array(
'name' => pht('Email Commands: Revisions'),
'header' => pht('Interacting with Differential Revisions'),
'object' => new DifferentialRevision(),
'summary' => pht(
'This page documents the commands you can use to interact with '.
'revisions in Differential.'),
),
);
}
public function getApplicationSearchDocumentTypes() {
return array(
DifferentialRevisionPHIDType::TYPECONST,
);
}
}
| r4nt/phabricator | src/applications/differential/application/PhabricatorDifferentialApplication.php | PHP | apache-2.0 | 4,319 |
/*
* Copyright 2019 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.profiler.sender.grpc;
import com.navercorp.pinpoint.common.util.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.Executor;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
/**
* @author Woonduk Kang(emeroad)
*/
public class ReconnectExecutor {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
private volatile boolean shutdown;
private final ScheduledExecutorService scheduledExecutorService;
private final AtomicLong rejectedCounter = new AtomicLong();
public ReconnectExecutor(ScheduledExecutorService scheduledExecutorService) {
this.scheduledExecutorService = Assert.requireNonNull(scheduledExecutorService, "scheduledExecutorService");
}
private void execute0(Runnable command) {
Assert.requireNonNull(command, "command");
if (shutdown) {
logger.debug("already shutdown");
return;
}
if (command instanceof ReconnectJob) {
ReconnectJob reconnectJob = (ReconnectJob) command;
try {
scheduledExecutorService.schedule(reconnectJob, reconnectJob.nextBackoffNanos(), TimeUnit.NANOSECONDS);
} catch (RejectedExecutionException e) {
final long failCount = rejectedCounter.incrementAndGet();
logger.info("{} reconnectJob scheduled fail {}", command, failCount);
}
} else {
throw new IllegalArgumentException("unknown command type " + command);
}
}
public void close() {
shutdown = true;
}
public Reconnector newReconnector(Runnable reconnectJob) {
Assert.requireNonNull(reconnectJob, "reconnectJob");
if (logger.isInfoEnabled()) {
logger.info("newReconnector(reconnectJob = [{}])", reconnectJob);
}
final Executor dispatch = new Executor() {
@Override
public void execute(Runnable command) {
ReconnectExecutor.this.execute0(command);
}
};
final ReconnectJob reconnectJobWrap = wrapReconnectJob(reconnectJob);
return new ReconnectAdaptor(dispatch, reconnectJobWrap);
}
private ReconnectJob wrapReconnectJob(Runnable runnable) {
return new ExponentialBackoffReconnectJob(runnable);
}
}
| suraj-raturi/pinpoint | profiler/src/main/java/com/navercorp/pinpoint/profiler/sender/grpc/ReconnectExecutor.java | Java | apache-2.0 | 3,111 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.internal.csv;
import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
/** Utility methods for dealing with CSV files */
public class CSVUtils {
private static final String[] EMPTY_STRING_ARRAY = new String[0];
private static final String[][] EMPTY_DOUBLE_STRING_ARRAY = new String[0][0];
/**
* <code>CSVUtils</code> instances should NOT be constructed in standard programming.
*
* <p>This constructor is public to permit tools that require a JavaBean instance to operate.
*/
public CSVUtils() {}
/**
* Converts an array of string values into a single CSV line. All <code>null</code> values are
* converted to the string <code>"null"</code>, all strings equal to <code>"null"</code> will
* additionally get quotes around.
*
* @param values the value array
* @return the CSV string, will be an empty string if the length of the value array is 0
*/
public static String printLine(String[] values, CSVStrategy strategy) {
// set up a CSVUtils
StringWriter stringWriter = new StringWriter();
CSVPrinter csvPrinter = new CSVPrinter(stringWriter, strategy);
// check for null values an "null" as strings and convert them
// into the strings "null" and "\"null\""
for (int i = 0; i < values.length; i++) {
if (values[i] == null) {
values[i] = "null";
} else if (values[i].equals("null")) {
values[i] = "\"null\"";
}
}
// convert to CSV
try {
csvPrinter.println(values);
} catch (IOException e) {
// should not happen with StringWriter
}
// as the resulting string has \r\n at the end, we will trim that away
return stringWriter.toString().trim();
}
// ======================================================
// static parsers
// ======================================================
/**
* Parses the given String according to the default {@link CSVStrategy}.
*
* @param s CSV String to be parsed.
* @return parsed String matrix (which is never null)
* @throws IOException in case of error
*/
public static String[][] parse(String s) throws IOException {
if (s == null) {
throw new IllegalArgumentException("Null argument not allowed.");
}
String[][] result = (new CSVParser(new StringReader(s))).getAllValues();
if (result == null) {
// since CSVStrategy ignores empty lines an empty array is returned
// (i.e. not "result = new String[][] {{""}};")
result = EMPTY_DOUBLE_STRING_ARRAY;
}
return result;
}
/**
* Parses the first line only according to the default {@link CSVStrategy}.
*
* <p>Parsing empty string will be handled as valid records containing zero elements, so the
* following property holds: parseLine("").length == 0.
*
* @param s CSV String to be parsed.
* @return parsed String vector (which is never null)
* @throws IOException in case of error
*/
public static String[] parseLine(String s) throws IOException {
if (s == null) {
throw new IllegalArgumentException("Null argument not allowed.");
}
// uh,jh: make sure that parseLine("").length == 0
if (s.length() == 0) {
return EMPTY_STRING_ARRAY;
}
return (new CSVParser(new StringReader(s))).getLine();
}
}
| apache/solr | solr/core/src/java/org/apache/solr/internal/csv/CSVUtils.java | Java | apache-2.0 | 4,128 |
/*
* Autosleep
* Copyright (C) 2016 Orange
* Authors: Benjamin Einaudi benjamin.einaudi@orange.com
* Arnaud Ruffin arnaud.ruffin@orange.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.cloudfoundry.autosleep.ui.proxy;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.client.SimpleClientHttpRequestFactory;
import org.springframework.web.client.RestTemplate;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
@Configuration
public class HttpClientConfiguration {
@Value("${autowakeup.skip.ssl.validation:false}")
private boolean skipSslValidation;
private SSLContext buildSslContext(TrustManager trustManager) {
try {
SSLContext sslContext = SSLContext.getInstance("SSL");
sslContext.init(null, new TrustManager[]{trustManager}, null);
return sslContext;
} catch (KeyManagementException | NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
private TrustManager buildTrustAllCerts() {
return new X509TrustManager() {
@Override
public void checkClientTrusted(X509Certificate[] certificates, String client) throws CertificateException {
}
@Override
public void checkServerTrusted(X509Certificate[] certificates, String client) throws CertificateException {
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return null;
}
};
}
private HostnameVerifier buildVerifyNoHostname() {
return (hostname, session) -> true;
}
@Bean
public RestTemplate restTemplate() {
if (!skipSslValidation) {
return new RestTemplate();
} else {
final HostnameVerifier hostnameVerifier = buildVerifyNoHostname();
final SSLContext sslContext = buildSslContext(buildTrustAllCerts());
return new RestTemplate(new SimpleClientHttpRequestFactory() {
@Override
protected void prepareConnection(HttpURLConnection connection, String httpMethod) throws IOException {
if (connection instanceof HttpsURLConnection) {
HttpsURLConnection secureConnection = (HttpsURLConnection) connection;
secureConnection.setHostnameVerifier(hostnameVerifier);
secureConnection.setSSLSocketFactory(sslContext.getSocketFactory());
}
super.prepareConnection(connection, httpMethod);
}
});
}
}
}
| pradyutsarma/autosleep | spring-apps/autowakeup-proxy/src/main/java/org/cloudfoundry/autosleep/ui/proxy/HttpClientConfiguration.java | Java | apache-2.0 | 3,689 |
// Copyright (c) Microsoft Open Technologies, Inc. All rights reserved. See License.txt in the project root for license information.
namespace System.Web.Mvc
{
public abstract class MvcFilter : IMvcFilter
{
protected MvcFilter()
{
}
protected MvcFilter(bool allowMultiple, int order)
{
AllowMultiple = allowMultiple;
Order = order;
}
public bool AllowMultiple { get; private set; }
public int Order { get; private set; }
}
}
| Terminator-Aaron/Katana | aspnetwebsrc/System.Web.Mvc/MvcFilter.cs | C# | apache-2.0 | 532 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2010, Red Hat, Inc., and individual contributors
* by the @authors tag. See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.weld.tests.event.observer.transactional;
import static javax.ejb.TransactionManagementType.BEAN;
import static javax.enterprise.event.TransactionPhase.AFTER_COMPLETION;
import static javax.enterprise.event.TransactionPhase.AFTER_FAILURE;
import static javax.enterprise.event.TransactionPhase.AFTER_SUCCESS;
import static javax.enterprise.event.TransactionPhase.BEFORE_COMPLETION;
import static javax.enterprise.event.TransactionPhase.IN_PROGRESS;
import java.io.Serializable;
import javax.annotation.Priority;
import javax.ejb.Stateful;
import javax.ejb.TransactionManagement;
import javax.enterprise.context.SessionScoped;
import javax.enterprise.event.Observes;
@Stateful
@TransactionManagement(BEAN)
@Tame
@SessionScoped
@SuppressWarnings("serial")
public class Pomeranian implements PomeranianInterface, Serializable {
@Override
public void observeInProgress(@Observes(during = IN_PROGRESS) Bark event) {
Actions.add(IN_PROGRESS);
}
@Override
public void observeAfterCompletion(@Observes(during = AFTER_COMPLETION) Bark someEvent) {
Actions.add(AFTER_COMPLETION);
}
@Override
public void observeAfterSuccess(@Observes(during = AFTER_SUCCESS) Bark event) {
Actions.add(AFTER_SUCCESS);
}
@Override
public void observeAfterSuccessWithHighPriority(@Priority(1) @Observes(during = AFTER_SUCCESS) Bark event) {
Actions.add(AFTER_SUCCESS + "1");
}
@Override
public void observeAfterSuccessWithLowPriority(@Priority(100) @Observes(during = AFTER_SUCCESS) Bark event) {
Actions.add(AFTER_SUCCESS + "100");
}
@Override
public void observeAfterFailure(@Observes(during = AFTER_FAILURE) Bark event) {
Actions.add(AFTER_FAILURE);
}
@Override
public void observeBeforeCompletion(@Observes(during = BEFORE_COMPLETION) Bark event) {
Actions.add(BEFORE_COMPLETION);
}
@Override
public void observeAndFail(@Observes(during=BEFORE_COMPLETION) @Gnarly Bark event) throws FooException {
Actions.add(BEFORE_COMPLETION);
throw new FooException();
}
}
| antoinesd/weld-core | tests-arquillian/src/test/java/org/jboss/weld/tests/event/observer/transactional/Pomeranian.java | Java | apache-2.0 | 2,906 |
// Licensed to the Apache Software Foundation (ASF) under one *
// or more contributor license agreements. See the NOTICE file *
// distributed with this work for additional information *
// regarding copyright ownership. The ASF licenses this file *
// to you under the Apache License, Version 2.0 (the *
// "License"); you may not use this file except in compliance *
// with the License. You may obtain a copy of the License at *
// *
// http://www.apache.org/licenses/LICENSE-2.0 *
// *
// Unless required by applicable law or agreed to in writing, *
// software distributed under the License is distributed on an *
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
// KIND, either express or implied. See the License for the *
// specific language governing permissions and limitations *
// under the License. *
using System.Reflection;
using System.Runtime.CompilerServices;
//
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
//
[assembly: AssemblyTitle("")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("Apache Software Foundation")]
[assembly: AssemblyProduct("Apache Etch")]
[assembly: AssemblyCopyright("Copyright © Apache Software Foundation 2013")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
//
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Revision and Build Numbers
// by using the '*' as shown below:
[assembly: AssemblyVersion("1.0.*")]
//
// In order to sign your assembly you must specify a key to use. Refer to the
// Microsoft .NET Framework documentation for more information on assembly signing.
//
// Use the attributes below to control which key is used for signing.
//
// Notes:
// (*) If no key is specified, the assembly is not signed.
// (*) KeyName refers to a key that has been installed in the Crypto Service
// Provider (CSP) on your machine. KeyFile refers to a file which contains
// a key.
// (*) If the KeyFile and the KeyName values are both specified, the
// following processing occurs:
// (1) If the KeyName can be found in the CSP, that key is used.
// (2) If the KeyName does not exist and the KeyFile does exist, the key
// in the KeyFile is installed into the CSP and used.
// (*) In order to create a KeyFile, you can use the sn.exe (Strong Name) utility.
// When specifying the KeyFile, the location of the KeyFile should be
// relative to the project output directory which is
// %Project Directory%\obj\<configuration>. For example, if your KeyFile is
// located in the project directory, you would specify the AssemblyKeyFile
// attribute as [assembly: AssemblyKeyFile("..\\..\\mykey.snk")]
// (*) Delay Signing is an advanced option - see the Microsoft .NET Framework
// documentation for more information on this.
//
[assembly: AssemblyDelaySign(false)]
[assembly: AssemblyKeyFile("")]
[assembly: AssemblyKeyName("")]
| OBIGOGIT/etch | plugins/visualstudio-addin/postbuild/AssemblyInfo.cs | C# | apache-2.0 | 3,504 |
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#pragma once
#include <aws/appstream/AppStream_EXPORTS.h>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace AppStream
{
namespace Model
{
class AWS_APPSTREAM_API DisassociateFleetResult
{
public:
DisassociateFleetResult();
DisassociateFleetResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DisassociateFleetResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
};
} // namespace Model
} // namespace AppStream
} // namespace Aws
| JoyIfBam5/aws-sdk-cpp | aws-cpp-sdk-appstream/include/aws/appstream/model/DisassociateFleetResult.h | C | apache-2.0 | 1,228 |
/*
* Kendo UI Web v2014.1.318 (http://kendoui.com)
* Copyright 2014 Telerik AD. All rights reserved.
*
* Kendo UI Web commercial licenses may be obtained at
* http://www.telerik.com/purchase/license-agreement/kendo-ui-web
* If you do not own a commercial license, this file shall be governed by the
* GNU General Public License (GPL) version 3.
* For GPL requirements, please review: http://www.gnu.org/copyleft/gpl.html
*/
(function(f, define){
define([], f);
})(function(){
(function( window, undefined ) {
var kendo = window.kendo || (window.kendo = { cultures: {} });
kendo.cultures["he"] = {
name: "he",
numberFormat: {
pattern: ["-n"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3],
percent: {
pattern: ["-n%","n%"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3],
symbol: "%"
},
currency: {
pattern: ["$-n","$ n"],
decimals: 2,
",": ",",
".": ".",
groupSize: [3],
symbol: "₪"
}
},
calendars: {
standard: {
days: {
names: ["יום ראשון","יום שני","יום שלישי","יום רביעי","יום חמישי","יום שישי","שבת"],
namesAbbr: ["יום א","יום ב","יום ג","יום ד","יום ה","יום ו","שבת"],
namesShort: ["א","ב","ג","ד","ה","ו","ש"]
},
months: {
names: ["ינואר","פברואר","מרץ","אפריל","מאי","יוני","יולי","אוגוסט","ספטמבר","אוקטובר","נובמבר","דצמבר",""],
namesAbbr: ["ינו","פבר","מרץ","אפר","מאי","יונ","יול","אוג","ספט","אוק","נוב","דצמ",""]
},
AM: ["AM","am","AM"],
PM: ["PM","pm","PM"],
patterns: {
d: "dd/MM/yyyy",
D: "dddd dd MMMM yyyy",
F: "dddd dd MMMM yyyy HH:mm:ss",
g: "dd/MM/yyyy HH:mm",
G: "dd/MM/yyyy HH:mm:ss",
m: "dd MMMM",
M: "dd MMMM",
s: "yyyy'-'MM'-'dd'T'HH':'mm':'ss",
t: "HH:mm",
T: "HH:mm:ss",
u: "yyyy'-'MM'-'dd HH':'mm':'ss'Z'",
y: "MMMM yyyy",
Y: "MMMM yyyy"
},
"/": "/",
":": ":",
firstDay: 0
}
}
}
})(this);
return window.kendo;
}, typeof define == 'function' && define.amd ? define : function(_, f){ f(); }); | facundolucas/eCuentas | src/main/webapp/resources/kendoui/src/js/cultures/kendo.culture.he.js | JavaScript | apache-2.0 | 2,934 |
/**
* Copyright (c) Microsoft Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the
* License at http://www.apache.org/licenses/LICENSE-2.0.
*
* THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
* ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
* MERCHANTABLITY OR NON-INFRINGEMENT.
*
* See the Apache Version 2.0 License for specific language governing
* permissions and limitations under the License.
*/
package com.interopbridges.scx.webservices;
/**
*
* <p>
* Concrete representation of an Endpoint to match what is described in the web
* service's WSDL.
* </p>
*
* <p>
*
* <pre>
* <service name="CalculatorService">
* <port name="CalculatorPort" binding="tns:CalculatorPortBinding">
* <soap:address location="http://scxom64-ws7-02:9080/WebServiceProject/CalculatorService" />
* </port>
* </service>
* </pre>
*
* </p>
*
* <p>
* Typically this might look like:
* <ol>
* <li><b>http://scxom64-ws7-02:9080/WebServiceProject/CalculatorService</b></li>
* <li><b>http://scxom-ws7-02:8080/axis2/services/DinnerFinderService</li>
* DinnerFinderServiceHttpSoap11Endpoint/</b>
* </ol>>
* </p>
*
* @author Christopher Crammond
*/
public class Endpoint implements EndpointMBean {
/**
* Key for describing for the (interopbridges) JMX type of MBean
*/
private String _jmxType = "endpoint";
/**
* String representing the full URL of the endpoint address. This should
* match the soap:address's location attribute from the WSDL. <br>
*
*/
private String _url;
/**
* Empty Constructor. It is considered to be a best practice to create this
* default constructor rather than relying on the compiler to auto-generate
* it.
*/
public Endpoint() {
this._url = "";
}
/**
* Preferred Constructor
*
* @param url
* String representing the full URL of the endpoint address.
*/
public Endpoint(String url) {
this._url = url;
}
/*
* (non-Javadoc)
*
* @see com.interopbridges.scx.webservices.EndpointMBean#getUrl()
*/
public String getUrl() {
return this._url;
}
/*
* (non-Javadoc)
*
* @see com.interopbridges.scx.webservices.IMBean#getJmxType()
*/
public String getJmxType() {
return this._jmxType;
}
}
| Microsoft/BeanSpy | test/code/JEE/Common/src/com/interopbridges/scx/webservices/Endpoint.java | Java | apache-2.0 | 2,617 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.cache.snapshot;
import java.io.Serializable;
import org.apache.geode.internal.cache.snapshot.SnapshotFileMapper;
/**
* Provides a way to configure the behavior of snapshot operations. The default options are:
* <dl>
* <dt>filter</dt>
* <dd>null</dd>
* </dl>
*
* @param <K> the cache entry key type
* @param <V> the cache entry value type
*
* @since GemFire 7.0
*/
public interface SnapshotOptions<K, V> extends Serializable {
/**
* Defines the available snapshot file formats.
*
* @since GemFire 7.0
*/
enum SnapshotFormat {
/** an optimized binary format specific to GemFire */
GEMFIRE
}
/**
* Sets a filter to apply to snapshot entries. Entries that are accepted by the filter will be
* included in import and export operations.
*
* @param filter the filter to apply, or null to remove the filter
* @return the snapshot options
*/
SnapshotOptions<K, V> setFilter(SnapshotFilter<K, V> filter);
/**
* Returns the filter to be applied to snapshot entries. Entries that are accepted by the filter
* will be included in import and export operations.
*
* @return the filter, or null if the filter is not set
*/
SnapshotFilter<K, V> getFilter();
/**
* Sets whether to invoke callbacks when loading a snapshot. The default is false.
*
* @param invokeCallbacks
*
* @return the snapshot options
*/
SnapshotOptions<K, V> invokeCallbacks(boolean invokeCallbacks);
/**
* Returns whether loading a snapshot causes callbacks to be invoked
*
* @return whether loading a snapshot causes callbacks to be invoked
*/
boolean shouldInvokeCallbacks();
/**
* Returns true if the snapshot operation will proceed in parallel.
*
* @return true if the parallel mode has been enabled
*
* @since Geode 1.3
*/
boolean isParallelMode();
/**
* Enables parallel mode for snapshot export, which will cause each member of a partitioned region
* to save its local data set (ignoring redundant copies) to a separate snapshot file.
*
* <p>
* Parallelizing snapshot operations may yield significant performance improvements for large data
* sets. This is particularly true when each member is writing to separate physical disks.
* <p>
* This flag is ignored for replicated regions.
*
* @param parallel true if the snapshot operations will be performed in parallel
* @return the snapshot options
*
* @see SnapshotFileMapper
*
* @since Geode 1.3
*/
SnapshotOptions<K, V> setParallelMode(boolean parallel);
}
| charliemblack/geode | geode-core/src/main/java/org/apache/geode/cache/snapshot/SnapshotOptions.java | Java | apache-2.0 | 3,388 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest
class ApiFMaxTest(unittest.TestCase):
"""ApiFMaxTest"""
def setUp(self):
"""setUp"""
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
self.input_x = np.random.rand(10, 15).astype("float32")
self.input_y = np.random.rand(10, 15).astype("float32")
self.input_z = np.random.rand(15).astype("float32")
self.input_a = np.array([0, np.nan, np.nan]).astype('int64')
self.input_b = np.array([2, np.inf, -np.inf]).astype('int64')
self.input_c = np.array([4, 1, 3]).astype('int64')
self.np_expected1 = np.fmax(self.input_x, self.input_y)
self.np_expected2 = np.fmax(self.input_x, self.input_z)
self.np_expected3 = np.fmax(self.input_a, self.input_c)
self.np_expected4 = np.fmax(self.input_b, self.input_c)
def test_static_api(self):
"""test_static_api"""
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_x = paddle.static.data("x", shape=[10, 15], dtype="float32")
data_y = paddle.static.data("y", shape=[10, 15], dtype="float32")
result_fmax = paddle.fmax(data_x, data_y)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"y": self.input_y},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected1))
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_x = paddle.static.data("x", shape=[10, 15], dtype="float32")
data_z = paddle.static.data("z", shape=[15], dtype="float32")
result_fmax = paddle.fmax(data_x, data_z)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"z": self.input_z},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected2))
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_a = paddle.static.data("a", shape=[3], dtype="int64")
data_c = paddle.static.data("c", shape=[3], dtype="int64")
result_fmax = paddle.fmax(data_a, data_c)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"a": self.input_a,
"c": self.input_c},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected3))
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_b = paddle.static.data("b", shape=[3], dtype="int64")
data_c = paddle.static.data("c", shape=[3], dtype="int64")
result_fmax = paddle.fmax(data_b, data_c)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"b": self.input_b,
"c": self.input_c},
fetch_list=[result_fmax])
self.assertTrue(np.allclose(res, self.np_expected4))
def test_dynamic_api(self):
"""test_dynamic_api"""
paddle.disable_static()
x = paddle.to_tensor(self.input_x)
y = paddle.to_tensor(self.input_y)
z = paddle.to_tensor(self.input_z)
a = paddle.to_tensor(self.input_a)
b = paddle.to_tensor(self.input_b)
c = paddle.to_tensor(self.input_c)
res = paddle.fmax(x, y)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected1))
# test broadcast
res = paddle.fmax(x, z)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected2))
res = paddle.fmax(a, c)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected3))
res = paddle.fmax(b, c)
res = res.numpy()
self.assertTrue(np.allclose(res, self.np_expected4))
class TestElementwiseFmaxOp(OpTest):
"""TestElementwiseFmaxOp"""
def setUp(self):
"""setUp"""
self.op_type = "elementwise_fmax"
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.fmax(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
"""test_check_output"""
self.check_output()
def test_check_grad_normal(self):
"""test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x"""
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
class TestElementwiseFmax2Op(OpTest):
"""TestElementwiseFmax2Op"""
def setUp(self):
"""setUp"""
self.op_type = "elementwise_fmax"
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
y[2, 10:] = np.nan
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.fmax(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
"""test_check_output"""
self.check_output()
def test_check_grad_normal(self):
"""test_check_grad_normal"""
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
"""test_check_grad_ingore_x"""
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
| luotao1/Paddle | python/paddle/fluid/tests/unittests/test_fmax_op.py | Python | apache-2.0 | 7,540 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2012, Red Hat, Inc., and individual contributors
* by the @authors tag. See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.weld.util.bean;
import java.io.Serializable;
import javax.enterprise.inject.spi.InjectionPoint;
import org.jboss.weld.injection.ForwardingInjectionPoint;
import org.jboss.weld.serialization.InjectionPointHolder;
public class SerializableForwardingInjectionPoint extends ForwardingInjectionPoint implements Serializable {
private static final long serialVersionUID = 7803445899943317029L;
private final InjectionPointHolder ip;
public SerializableForwardingInjectionPoint(String contextId, InjectionPoint ip) {
this.ip = new InjectionPointHolder(contextId, ip);
}
@Override
protected InjectionPoint delegate() {
return ip.get();
}
}
| antoinesd/weld-core | impl/src/main/java/org/jboss/weld/util/bean/SerializableForwardingInjectionPoint.java | Java | apache-2.0 | 1,478 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.flume.appender;
import java.util.Properties;
import org.apache.flume.Event;
import org.apache.flume.api.RpcClient;
import org.apache.flume.api.RpcClientFactory;
import org.apache.logging.log4j.core.appender.AppenderLoggingException;
import org.apache.logging.log4j.core.appender.ManagerFactory;
/**
* Manager for FlumeAvroAppenders.
*/
public class FlumeAvroManager extends AbstractFlumeManager {
private static final int MAX_RECONNECTS = 3;
private static final int MINIMUM_TIMEOUT = 1000;
private static AvroManagerFactory factory = new AvroManagerFactory();
private final Agent[] agents;
private final int batchSize;
private final int retries;
private final int connectTimeout;
private final int requestTimeout;
private final int current = 0;
private RpcClient rpcClient = null;
/**
* Constructor
* @param name The unique name of this manager.
* @param agents An array of Agents.
* @param batchSize The number of events to include in a batch.
* @param retries The number of times to retry connecting before giving up.
* @param connectTimeout The connection timeout in ms.
* @param requestTimeout The request timeout in ms.
*
*/
protected FlumeAvroManager(final String name, final String shortName, final Agent[] agents, final int batchSize,
final int retries, final int connectTimeout, final int requestTimeout) {
super(name);
this.agents = agents;
this.batchSize = batchSize;
this.retries = retries;
this.connectTimeout = connectTimeout;
this.requestTimeout = requestTimeout;
this.rpcClient = connect(agents, retries, connectTimeout, requestTimeout);
}
/**
* Returns a FlumeAvroManager.
* @param name The name of the manager.
* @param agents The agents to use.
* @param batchSize The number of events to include in a batch.
* @param retries The number of times to retry connecting before giving up.
* @param connectTimeout The connection timeout in ms.
* @param requestTimeout The request timeout in ms.
* @return A FlumeAvroManager.
*/
public static FlumeAvroManager getManager(final String name, final Agent[] agents, int batchSize,
final int retries, final int connectTimeout, final int requestTimeout) {
if (agents == null || agents.length == 0) {
throw new IllegalArgumentException("At least one agent is required");
}
if (batchSize <= 0) {
batchSize = 1;
}
final StringBuilder sb = new StringBuilder("FlumeAvro[");
boolean first = true;
for (final Agent agent : agents) {
if (!first) {
sb.append(",");
}
sb.append(agent.getHost()).append(":").append(agent.getPort());
first = false;
}
sb.append("]");
return getManager(sb.toString(), factory,
new FactoryData(name, agents, batchSize, retries, connectTimeout, requestTimeout));
}
/**
* Returns the agents.
* @return The agent array.
*/
public Agent[] getAgents() {
return agents;
}
/**
* Returns the index of the current agent.
* @return The index for the current agent.
*/
public int getCurrent() {
return current;
}
public int getRetries() {
return retries;
}
public int getConnectTimeout() {
return connectTimeout;
}
public int getRequestTimeout() {
return requestTimeout;
}
public int getBatchSize() {
return batchSize;
}
public synchronized void send(final BatchEvent events) {
if (rpcClient == null) {
rpcClient = connect(agents, retries, connectTimeout, requestTimeout);
}
if (rpcClient != null) {
try {
LOGGER.trace("Sending batch of {} events", events.getEvents().size());
rpcClient.appendBatch(events.getEvents());
} catch (final Exception ex) {
rpcClient.close();
rpcClient = null;
final String msg = "Unable to write to " + getName() + " at " + agents[current].getHost() + ":" +
agents[current].getPort();
LOGGER.warn(msg, ex);
throw new AppenderLoggingException("No Flume agents are available");
}
} else {
final String msg = "Unable to write to " + getName() + " at " + agents[current].getHost() + ":" +
agents[current].getPort();
LOGGER.warn(msg);
throw new AppenderLoggingException("No Flume agents are available");
}
}
@Override
public synchronized void send(final Event event) {
if (rpcClient == null) {
rpcClient = connect(agents, retries, connectTimeout, requestTimeout);
}
if (rpcClient != null) {
try {
rpcClient.append(event);
} catch (final Exception ex) {
rpcClient.close();
rpcClient = null;
final String msg = "Unable to write to " + getName() + " at " + agents[current].getHost() + ":" +
agents[current].getPort();
LOGGER.warn(msg, ex);
throw new AppenderLoggingException("No Flume agents are available");
}
} else {
final String msg = "Unable to write to " + getName() + " at " + agents[current].getHost() + ":" +
agents[current].getPort();
LOGGER.warn(msg);
throw new AppenderLoggingException("No Flume agents are available");
}
}
/**
* There is a very good chance that this will always return the first agent even if it isn't available.
* @param agents The list of agents to choose from
* @return The FlumeEventAvroServer.
*/
private RpcClient connect(final Agent[] agents, int retries, final int connectTimeout, final int requestTimeout) {
try {
final Properties props = new Properties();
props.put("client.type", agents.length > 1 ? "default_failover" : "default");
int count = 1;
final StringBuilder sb = new StringBuilder();
for (final Agent agent : agents) {
if (sb.length() > 0) {
sb.append(" ");
}
final String hostName = "host" + count++;
props.put("hosts." + hostName, agent.getHost() + ":" + agent.getPort());
sb.append(hostName);
}
props.put("hosts", sb.toString());
if (batchSize > 0) {
props.put("batch-size", Integer.toString(batchSize));
}
if (retries > 1) {
if (retries > MAX_RECONNECTS) {
retries = MAX_RECONNECTS;
}
props.put("max-attempts", Integer.toString(retries * agents.length));
}
if (requestTimeout >= MINIMUM_TIMEOUT) {
props.put("request-timeout", Integer.toString(requestTimeout));
}
if (connectTimeout >= MINIMUM_TIMEOUT) {
props.put("connect-timeout", Integer.toString(connectTimeout));
}
return RpcClientFactory.getInstance(props);
} catch (final Exception ex) {
LOGGER.error("Unable to create Flume RPCClient: {}", ex.getMessage());
return null;
}
}
@Override
protected void releaseSub() {
if (rpcClient != null) {
try {
rpcClient.close();
} catch (final Exception ex) {
LOGGER.error("Attempt to close RPC client failed", ex);
}
}
rpcClient = null;
}
/**
* Factory data.
*/
private static class FactoryData {
private final String name;
private final Agent[] agents;
private final int batchSize;
private final int retries;
private final int conntectTimeout;
private final int requestTimeout;
/**
* Constructor.
* @param name The name of the Appender.
* @param agents The agents.
* @param batchSize The number of events to include in a batch.
*/
public FactoryData(final String name, final Agent[] agents, final int batchSize, final int retries,
final int connectTimeout, final int requestTimeout) {
this.name = name;
this.agents = agents;
this.batchSize = batchSize;
this.retries = retries;
this.conntectTimeout = connectTimeout;
this.requestTimeout = requestTimeout;
}
}
/**
* Avro Manager Factory.
*/
private static class AvroManagerFactory implements ManagerFactory<FlumeAvroManager, FactoryData> {
/**
* Create the FlumeAvroManager.
* @param name The name of the entity to manage.
* @param data The data required to create the entity.
* @return The FlumeAvroManager.
*/
@Override
public FlumeAvroManager createManager(final String name, final FactoryData data) {
try {
return new FlumeAvroManager(name, data.name, data.agents, data.batchSize, data.retries,
data.conntectTimeout, data.requestTimeout);
} catch (final Exception ex) {
LOGGER.error("Could not create FlumeAvroManager", ex);
}
return null;
}
}
}
| ClarenceAu/log4j2 | log4j-flume-ng/src/main/java/org/apache/logging/log4j/flume/appender/FlumeAvroManager.java | Java | apache-2.0 | 10,591 |
package net.stickycode.configuration.value;
import net.stickycode.configuration.ConfigurationValue;
public class SystemValue
implements ConfigurationValue {
private String value;
public SystemValue(String value) {
this.value = value;
}
@Override
public String get() {
return value;
}
@Override
public boolean hasPrecedence(ConfigurationValue v) {
if (ApplicationValue.class.isAssignableFrom(v.getClass()))
return false;
if (SystemValue.class.isAssignableFrom(v.getClass()))
return false;
return true;
}
@Override
public String toString() {
return getClass().getSimpleName() + "{" + value + "}";
}
}
| walterDurin/stickycode | net.stickycode.configuration/sticky-configuration/src/main/java/net/stickycode/configuration/value/SystemValue.java | Java | apache-2.0 | 672 |
// Generated by xsd compiler for android/java
// DO NOT CHANGE!
package com.ebay.marketplace.search.v1.services;
import com.leansoft.nano.annotation.*;
/**
*
* Reserved for future use.
*
*/
@RootElement(name = "findItemsForFavoriteSearchResponse", namespace = "http://www.ebay.com/marketplace/search/v1/services")
public class FindItemsForFavoriteSearchResponse extends BaseFindingServiceResponse {
@Element
private CategoryHistogramContainer categoryHistogramContainer;
@Element
private AspectHistogramContainer aspectHistogramContainer;
@Element
private ConditionHistogramContainer conditionHistogramContainer;
/**
* public getter
*
*
* Reserved for future use.
*
*
* @returns com.ebay.marketplace.search.v1.services.CategoryHistogramContainer
*/
public CategoryHistogramContainer getCategoryHistogramContainer() {
return this.categoryHistogramContainer;
}
/**
* public setter
*
*
* Reserved for future use.
*
*
* @param com.ebay.marketplace.search.v1.services.CategoryHistogramContainer
*/
public void setCategoryHistogramContainer(CategoryHistogramContainer categoryHistogramContainer) {
this.categoryHistogramContainer = categoryHistogramContainer;
}
/**
* public getter
*
*
* Reserved for future use.
*
*
* @returns com.ebay.marketplace.search.v1.services.AspectHistogramContainer
*/
public AspectHistogramContainer getAspectHistogramContainer() {
return this.aspectHistogramContainer;
}
/**
* public setter
*
*
* Reserved for future use.
*
*
* @param com.ebay.marketplace.search.v1.services.AspectHistogramContainer
*/
public void setAspectHistogramContainer(AspectHistogramContainer aspectHistogramContainer) {
this.aspectHistogramContainer = aspectHistogramContainer;
}
/**
* public getter
*
*
* Reserved for future use.
*
*
* @returns com.ebay.marketplace.search.v1.services.ConditionHistogramContainer
*/
public ConditionHistogramContainer getConditionHistogramContainer() {
return this.conditionHistogramContainer;
}
/**
* public setter
*
*
* Reserved for future use.
*
*
* @param com.ebay.marketplace.search.v1.services.ConditionHistogramContainer
*/
public void setConditionHistogramContainer(ConditionHistogramContainer conditionHistogramContainer) {
this.conditionHistogramContainer = conditionHistogramContainer;
}
} | bulldog2011/nano-rest | sample/EBaySearch/src/com/ebay/marketplace/search/v1/services/FindItemsForFavoriteSearchResponse.java | Java | apache-2.0 | 2,540 |
package transaction
import (
. "DNA/common"
"DNA/common/serialization"
"DNA/core/contract"
"DNA/core/contract/program"
sig "DNA/core/signature"
"DNA/core/transaction/payload"
. "DNA/errors"
"crypto/sha256"
"errors"
"fmt"
"io"
"sort"
)
//for different transaction types with different payload format
//and transaction process methods
type TransactionType byte
const (
BookKeeping TransactionType = 0x00
IssueAsset TransactionType = 0x01
BookKeeper TransactionType = 0x02
PrivacyPayload TransactionType = 0x20
RegisterAsset TransactionType = 0x40
TransferAsset TransactionType = 0x80
Record TransactionType = 0x81
DeployCode TransactionType = 0xd0
DataFile TransactionType = 0x12
)
//Payload define the func for loading the payload data
//base on payload type which have different struture
type Payload interface {
// Get payload data
Data(version byte) []byte
//Serialize payload data
Serialize(w io.Writer, version byte) error
Deserialize(r io.Reader, version byte) error
}
//Transaction is used for carry information or action to Ledger
//validated transaction will be added to block and updates state correspondingly
var TxStore ILedgerStore
type Transaction struct {
TxType TransactionType
PayloadVersion byte
Payload Payload
Attributes []*TxAttribute
UTXOInputs []*UTXOTxInput
BalanceInputs []*BalanceTxInput
Outputs []*TxOutput
Programs []*program.Program
//Inputs/Outputs map base on Asset (needn't serialize)
AssetOutputs map[Uint256][]*TxOutput
AssetInputAmount map[Uint256]Fixed64
AssetOutputAmount map[Uint256]Fixed64
hash *Uint256
}
//Serialize the Transaction
func (tx *Transaction) Serialize(w io.Writer) error {
err := tx.SerializeUnsigned(w)
if err != nil {
return NewDetailErr(err, ErrNoCode, "Transaction txSerializeUnsigned Serialize failed.")
}
//Serialize Transaction's programs
lens := uint64(len(tx.Programs))
err = serialization.WriteVarUint(w, lens)
if err != nil {
return NewDetailErr(err, ErrNoCode, "Transaction WriteVarUint failed.")
}
if lens > 0 {
for _, p := range tx.Programs {
err = p.Serialize(w)
if err != nil {
return NewDetailErr(err, ErrNoCode, "Transaction Programs Serialize failed.")
}
}
}
return nil
}
//Serialize the Transaction data without contracts
func (tx *Transaction) SerializeUnsigned(w io.Writer) error {
//txType
w.Write([]byte{byte(tx.TxType)})
//PayloadVersion
w.Write([]byte{tx.PayloadVersion})
//Payload
if tx.Payload == nil {
return errors.New("Transaction Payload is nil.")
}
tx.Payload.Serialize(w, tx.PayloadVersion)
//[]*txAttribute
err := serialization.WriteVarUint(w, uint64(len(tx.Attributes)))
if err != nil {
return NewDetailErr(err, ErrNoCode, "Transaction item txAttribute length serialization failed.")
}
if len(tx.Attributes) > 0 {
for _, attr := range tx.Attributes {
attr.Serialize(w)
}
}
//[]*UTXOInputs
err = serialization.WriteVarUint(w, uint64(len(tx.UTXOInputs)))
if err != nil {
return NewDetailErr(err, ErrNoCode, "Transaction item UTXOInputs length serialization failed.")
}
if len(tx.UTXOInputs) > 0 {
for _, utxo := range tx.UTXOInputs {
utxo.Serialize(w)
}
}
// TODO BalanceInputs
//[]*Outputs
err = serialization.WriteVarUint(w, uint64(len(tx.Outputs)))
if err != nil {
return NewDetailErr(err, ErrNoCode, "Transaction item Outputs length serialization failed.")
}
if len(tx.Outputs) > 0 {
for _, output := range tx.Outputs {
output.Serialize(w)
}
}
return nil
}
//deserialize the Transaction
func (tx *Transaction) Deserialize(r io.Reader) error {
// tx deserialize
err := tx.DeserializeUnsigned(r)
if err != nil {
return NewDetailErr(err, ErrNoCode, "transaction Deserialize error")
}
// tx program
lens, err := serialization.ReadVarUint(r, 0)
if err != nil {
return NewDetailErr(err, ErrNoCode, "transaction tx program Deserialize error")
}
programHashes := []*program.Program{}
if lens > 0 {
for i := 0; i < int(lens); i++ {
outputHashes := new(program.Program)
outputHashes.Deserialize(r)
programHashes = append(programHashes, outputHashes)
}
tx.Programs = programHashes
}
return nil
}
func (tx *Transaction) DeserializeUnsigned(r io.Reader) error {
var txType [1]byte
_, err := io.ReadFull(r, txType[:])
if err != nil {
return err
}
tx.TxType = TransactionType(txType[0])
return tx.DeserializeUnsignedWithoutType(r)
}
func (tx *Transaction) DeserializeUnsignedWithoutType(r io.Reader) error {
var payloadVersion [1]byte
_, err := io.ReadFull(r, payloadVersion[:])
tx.PayloadVersion = payloadVersion[0]
if err != nil {
return err
}
//payload
//tx.Payload.Deserialize(r)
switch tx.TxType {
case RegisterAsset:
tx.Payload = new(payload.RegisterAsset)
case IssueAsset:
tx.Payload = new(payload.IssueAsset)
case TransferAsset:
tx.Payload = new(payload.TransferAsset)
case BookKeeping:
tx.Payload = new(payload.BookKeeping)
case Record:
tx.Payload = new(payload.Record)
case BookKeeper:
tx.Payload = new(payload.BookKeeper)
case PrivacyPayload:
tx.Payload = new(payload.PrivacyPayload)
case DataFile:
tx.Payload = new(payload.DataFile)
default:
return errors.New("[Transaction],invalide transaction type.")
}
err = tx.Payload.Deserialize(r, tx.PayloadVersion)
if err != nil {
return NewDetailErr(err, ErrNoCode, "Payload Parse error")
}
//attributes
Len, err := serialization.ReadVarUint(r, 0)
if err != nil {
return err
}
if Len > uint64(0) {
for i := uint64(0); i < Len; i++ {
attr := new(TxAttribute)
err = attr.Deserialize(r)
if err != nil {
return err
}
tx.Attributes = append(tx.Attributes, attr)
}
}
//UTXOInputs
Len, err = serialization.ReadVarUint(r, 0)
if err != nil {
return err
}
if Len > uint64(0) {
for i := uint64(0); i < Len; i++ {
utxo := new(UTXOTxInput)
err = utxo.Deserialize(r)
if err != nil {
return err
}
tx.UTXOInputs = append(tx.UTXOInputs, utxo)
}
}
//TODO balanceInputs
//Outputs
Len, err = serialization.ReadVarUint(r, 0)
if err != nil {
return err
}
if Len > uint64(0) {
for i := uint64(0); i < Len; i++ {
output := new(TxOutput)
output.Deserialize(r)
tx.Outputs = append(tx.Outputs, output)
}
}
return nil
}
func (tx *Transaction) GetProgramHashes() ([]Uint160, error) {
if tx == nil {
return []Uint160{}, errors.New("[Transaction],GetProgramHashes transaction is nil.")
}
hashs := []Uint160{}
uniqHashes := []Uint160{}
// add inputUTXO's transaction
referenceWithUTXO_Output, err := tx.GetReference()
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction], GetProgramHashes failed.")
}
for _, output := range referenceWithUTXO_Output {
programHash := output.ProgramHash
hashs = append(hashs, programHash)
}
for _, attribute := range tx.Attributes {
if attribute.Usage == Script {
dataHash, err := Uint160ParseFromBytes(attribute.Data)
if err != nil {
return nil, NewDetailErr(errors.New("[Transaction], GetProgramHashes err."), ErrNoCode, "")
}
hashs = append(hashs, Uint160(dataHash))
}
}
switch tx.TxType {
case RegisterAsset:
issuer := tx.Payload.(*payload.RegisterAsset).Issuer
signatureRedeemScript, err := contract.CreateSignatureRedeemScript(issuer)
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction], GetProgramHashes CreateSignatureRedeemScript failed.")
}
astHash, err := ToCodeHash(signatureRedeemScript)
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction], GetProgramHashes ToCodeHash failed.")
}
hashs = append(hashs, astHash)
case IssueAsset:
result := tx.GetMergedAssetIDValueFromOutputs()
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction], GetTransactionResults failed.")
}
for k := range result {
tx, err := TxStore.GetTransaction(k)
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, fmt.Sprintf("[Transaction], GetTransaction failed With AssetID:=%x", k))
}
if tx.TxType != RegisterAsset {
return nil, NewDetailErr(errors.New("[Transaction] error"), ErrNoCode, fmt.Sprintf("[Transaction], Transaction Type ileage With AssetID:=%x", k))
}
switch v1 := tx.Payload.(type) {
case *payload.RegisterAsset:
hashs = append(hashs, v1.Controller)
default:
return nil, NewDetailErr(errors.New("[Transaction] error"), ErrNoCode, fmt.Sprintf("[Transaction], payload is illegal", k))
}
}
case DataFile:
issuer := tx.Payload.(*payload.DataFile).Issuer
signatureRedeemScript, err := contract.CreateSignatureRedeemScript(issuer)
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction], GetProgramHashes CreateSignatureRedeemScript failed.")
}
astHash, err := ToCodeHash(signatureRedeemScript)
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction], GetProgramHashes ToCodeHash failed.")
}
hashs = append(hashs, astHash)
case TransferAsset:
case Record:
case BookKeeper:
issuer := tx.Payload.(*payload.BookKeeper).Issuer
signatureRedeemScript, err := contract.CreateSignatureRedeemScript(issuer)
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction - BookKeeper], GetProgramHashes CreateSignatureRedeemScript failed.")
}
astHash, err := ToCodeHash(signatureRedeemScript)
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction - BookKeeper], GetProgramHashes ToCodeHash failed.")
}
hashs = append(hashs, astHash)
case PrivacyPayload:
issuer := tx.Payload.(*payload.PrivacyPayload).EncryptAttr.(*payload.EcdhAes256).FromPubkey
signatureRedeemScript, err := contract.CreateSignatureRedeemScript(issuer)
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction], GetProgramHashes CreateSignatureRedeemScript failed.")
}
astHash, err := ToCodeHash(signatureRedeemScript)
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction], GetProgramHashes ToCodeHash failed.")
}
hashs = append(hashs, astHash)
default:
}
//remove dupilicated hashes
uniq := make(map[Uint160]bool)
for _, v := range hashs {
uniq[v] = true
}
for k := range uniq {
uniqHashes = append(uniqHashes, k)
}
sort.Sort(byProgramHashes(uniqHashes))
return uniqHashes, nil
}
func (tx *Transaction) SetPrograms(programs []*program.Program) {
tx.Programs = programs
}
func (tx *Transaction) GetPrograms() []*program.Program {
return tx.Programs
}
func (tx *Transaction) GetOutputHashes() ([]Uint160, error) {
//TODO: implement Transaction.GetOutputHashes()
return []Uint160{}, nil
}
func (tx *Transaction) GenerateAssetMaps() {
//TODO: implement Transaction.GenerateAssetMaps()
}
func (tx *Transaction) GetMessage() []byte {
return sig.GetHashData(tx)
}
func (tx *Transaction) Hash() Uint256 {
if tx.hash == nil {
d := sig.GetHashData(tx)
temp := sha256.Sum256([]byte(d))
f := Uint256(sha256.Sum256(temp[:]))
tx.hash = &f
}
return *tx.hash
}
func (tx *Transaction) SetHash(hash Uint256) {
tx.hash = &hash
}
func (tx *Transaction) Type() InventoryType {
return TRANSACTION
}
func (tx *Transaction) Verify() error {
//TODO: Verify()
return nil
}
func (tx *Transaction) GetReference() (map[*UTXOTxInput]*TxOutput, error) {
if tx.TxType == RegisterAsset {
return nil, nil
}
//UTXO input / Outputs
reference := make(map[*UTXOTxInput]*TxOutput)
// Key index,v UTXOInput
for _, utxo := range tx.UTXOInputs {
transaction, err := TxStore.GetTransaction(utxo.ReferTxID)
if err != nil {
return nil, NewDetailErr(err, ErrNoCode, "[Transaction], GetReference failed.")
}
index := utxo.ReferTxOutputIndex
reference[utxo] = transaction.Outputs[index]
}
return reference, nil
}
func (tx *Transaction) GetTransactionResults() (TransactionResult, error) {
result := make(map[Uint256]Fixed64)
outputResult := tx.GetMergedAssetIDValueFromOutputs()
InputResult, err := tx.GetMergedAssetIDValueFromReference()
if err != nil {
return nil, err
}
//calc the balance of input vs output
for outputAssetid, outputValue := range outputResult {
if inputValue, ok := InputResult[outputAssetid]; ok {
result[outputAssetid] = inputValue - outputValue
} else {
result[outputAssetid] -= outputValue
}
}
for inputAssetid, inputValue := range InputResult {
if _, exist := result[inputAssetid]; !exist {
result[inputAssetid] += inputValue
}
}
return result, nil
}
func (tx *Transaction) GetMergedAssetIDValueFromOutputs() TransactionResult {
var result = make(map[Uint256]Fixed64)
for _, v := range tx.Outputs {
amout, ok := result[v.AssetID]
if ok {
result[v.AssetID] = amout + v.Value
} else {
result[v.AssetID] = v.Value
}
}
return result
}
func (tx *Transaction) GetMergedAssetIDValueFromReference() (TransactionResult, error) {
reference, err := tx.GetReference()
if err != nil {
return nil, err
}
var result = make(map[Uint256]Fixed64)
for _, v := range reference {
amout, ok := result[v.AssetID]
if ok {
result[v.AssetID] = amout + v.Value
} else {
result[v.AssetID] = v.Value
}
}
return result, nil
}
type byProgramHashes []Uint160
func (a byProgramHashes) Len() int { return len(a) }
func (a byProgramHashes) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byProgramHashes) Less(i, j int) bool {
if a[i].CompareTo(a[j]) > 0 {
return false
} else {
return true
}
}
| BooniesFX/DNA | core/transaction/transaction.go | GO | apache-2.0 | 13,443 |
/*
* Copyright 2010 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.optaplanner.benchmark.impl.statistic.bestscore;
import java.util.List;
import org.optaplanner.benchmark.config.statistic.ProblemStatisticType;
import org.optaplanner.benchmark.impl.result.SubSingleBenchmarkResult;
import org.optaplanner.benchmark.impl.statistic.ProblemBasedSubSingleStatistic;
import org.optaplanner.core.api.domain.solution.Solution;
import org.optaplanner.core.api.solver.Solver;
import org.optaplanner.core.api.solver.event.BestSolutionChangedEvent;
import org.optaplanner.core.api.solver.event.SolverEventListener;
import org.optaplanner.core.impl.score.definition.ScoreDefinition;
public class BestScoreSubSingleStatistic extends ProblemBasedSubSingleStatistic<BestScoreStatisticPoint> {
private final BestScoreSubSingleStatisticListener listener;
public BestScoreSubSingleStatistic(SubSingleBenchmarkResult subSingleBenchmarkResult) {
super(subSingleBenchmarkResult, ProblemStatisticType.BEST_SCORE);
listener = new BestScoreSubSingleStatisticListener();
}
// ************************************************************************
// Lifecycle methods
// ************************************************************************
public void open(Solver solver) {
solver.addEventListener(listener);
}
public void close(Solver solver) {
solver.removeEventListener(listener);
}
private class BestScoreSubSingleStatisticListener implements SolverEventListener<Solution> {
public void bestSolutionChanged(BestSolutionChangedEvent<Solution> event) {
pointList.add(new BestScoreStatisticPoint(
event.getTimeMillisSpent(), event.getNewBestSolution().getScore()));
}
}
// ************************************************************************
// CSV methods
// ************************************************************************
@Override
protected String getCsvHeader() {
return BestScoreStatisticPoint.buildCsvLine("timeMillisSpent", "score");
}
@Override
protected BestScoreStatisticPoint createPointFromCsvLine(ScoreDefinition scoreDefinition,
List<String> csvLine) {
return new BestScoreStatisticPoint(Long.valueOf(csvLine.get(0)),
scoreDefinition.parseScore(csvLine.get(1)));
}
}
| eshen1991/optaplanner | optaplanner-benchmark/src/main/java/org/optaplanner/benchmark/impl/statistic/bestscore/BestScoreSubSingleStatistic.java | Java | apache-2.0 | 2,938 |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import struct
import socket
import logging
import netaddr
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_2_parser
from ryu.lib import hub
from ryu.lib import mac
LOG = logging.getLogger('ryu.lib.ofctl_v1_2')
DEFAULT_TIMEOUT = 1.0
def str_to_int(src):
if isinstance(src, str):
if src.startswith("0x") or src.startswith("0X"):
dst = int(src, 16)
else:
dst = int(src)
else:
dst = src
return dst
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
if action_type == 'OUTPUT':
out_port = int(dic.get('port', ofp.OFPP_ANY))
max_len = int(dic.get('max_len', ofp.OFPCML_MAX))
result = parser.OFPActionOutput(out_port, max_len)
elif action_type == 'COPY_TTL_OUT':
result = parser.OFPActionCopyTtlOut()
elif action_type == 'COPY_TTL_IN':
result = parser.OFPActionCopyTtlIn()
elif action_type == 'SET_MPLS_TTL':
mpls_ttl = int(dic.get('mpls_ttl'))
result = parser.OFPActionSetMplsTtl(mpls_ttl)
elif action_type == 'DEC_MPLS_TTL':
result = parser.OFPActionDecMplsTtl()
elif action_type == 'PUSH_VLAN':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushVlan(ethertype)
elif action_type == 'POP_VLAN':
result = parser.OFPActionPopVlan()
elif action_type == 'PUSH_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushMpls(ethertype)
elif action_type == 'POP_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPopMpls(ethertype)
elif action_type == 'SET_QUEUE':
queue_id = int(dic.get('queue_id'))
result = parser.OFPActionSetQueue(queue_id)
elif action_type == 'GROUP':
group_id = int(dic.get('group_id'))
result = parser.OFPActionGroup(group_id)
elif action_type == 'SET_NW_TTL':
nw_ttl = int(dic.get('nw_ttl'))
result = parser.OFPActionSetNwTtl(nw_ttl)
elif action_type == 'DEC_NW_TTL':
result = parser.OFPActionDecNwTtl()
elif action_type == 'SET_FIELD':
field = dic.get('field')
value = dic.get('value')
result = parser.OFPActionSetField(**{field: value})
else:
result = None
return result
def to_actions(dp, acts):
inst = []
actions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for a in acts:
action = to_action(dp, a)
if action is not None:
actions.append(action)
else:
action_type = a.get('type')
if action_type == 'GOTO_TABLE':
table_id = int(a.get('table_id'))
inst.append(parser.OFPInstructionGotoTable(table_id))
elif action_type == 'WRITE_METADATA':
metadata = str_to_int(a.get('metadata'))
metadata_mask = (str_to_int(a['metadata_mask'])
if 'metadata_mask' in a
else parser.UINT64_MAX)
inst.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
else:
LOG.debug('Unknown action type: %s' % action_type)
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
return inst
def action_to_str(act):
action_type = act.cls_action_type
if action_type == ofproto_v1_2.OFPAT_OUTPUT:
buf = 'OUTPUT:' + str(act.port)
elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_OUT:
buf = 'COPY_TTL_OUT'
elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_IN:
buf = 'COPY_TTL_IN'
elif action_type == ofproto_v1_2.OFPAT_SET_MPLS_TTL:
buf = 'SET_MPLS_TTL:' + str(act.mpls_ttl)
elif action_type == ofproto_v1_2.OFPAT_DEC_MPLS_TTL:
buf = 'DEC_MPLS_TTL'
elif action_type == ofproto_v1_2.OFPAT_PUSH_VLAN:
buf = 'PUSH_VLAN:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_POP_VLAN:
buf = 'POP_VLAN'
elif action_type == ofproto_v1_2.OFPAT_PUSH_MPLS:
buf = 'PUSH_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_POP_MPLS:
buf = 'POP_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_SET_QUEUE:
buf = 'SET_QUEUE:' + str(act.queue_id)
elif action_type == ofproto_v1_2.OFPAT_GROUP:
buf = 'GROUP:' + str(act.group_id)
elif action_type == ofproto_v1_2.OFPAT_SET_NW_TTL:
buf = 'SET_NW_TTL:' + str(act.nw_ttl)
elif action_type == ofproto_v1_2.OFPAT_DEC_NW_TTL:
buf = 'DEC_NW_TTL'
elif action_type == ofproto_v1_2.OFPAT_SET_FIELD:
buf = 'SET_FIELD: {%s:%s}' % (act.key, act.value)
else:
buf = 'UNKNOWN'
return buf
def actions_to_str(instructions):
actions = []
for instruction in instructions:
if isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionActions):
for a in instruction.actions:
actions.append(action_to_str(a))
elif isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionGotoTable):
buf = 'GOTO_TABLE:' + str(instruction.table_id)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionWriteMetadata):
buf = ('WRITE_METADATA:0x%x/0x%x' % (instruction.metadata,
instruction.metadata_mask)
if instruction.metadata_mask
else 'WRITE_METADATA:0x%x' % instruction.metadata)
actions.append(buf)
else:
continue
return actions
def to_match(dp, attrs):
convert = {'in_port': int,
'in_phy_port': int,
'metadata': to_match_metadata,
'dl_dst': to_match_eth,
'dl_src': to_match_eth,
'eth_dst': to_match_eth,
'eth_src': to_match_eth,
'dl_type': int,
'eth_type': int,
'dl_vlan': to_match_vid,
'vlan_vid': to_match_vid,
'vlan_pcp': int,
'ip_dscp': int,
'ip_ecn': int,
'nw_proto': int,
'ip_proto': int,
'nw_src': to_match_ip,
'nw_dst': to_match_ip,
'ipv4_src': to_match_ip,
'ipv4_dst': to_match_ip,
'tp_src': int,
'tp_dst': int,
'tcp_src': int,
'tcp_dst': int,
'udp_src': int,
'udp_dst': int,
'sctp_src': int,
'sctp_dst': int,
'icmpv4_type': int,
'icmpv4_code': int,
'arp_op': int,
'arp_spa': to_match_ip,
'arp_tpa': to_match_ip,
'arp_sha': to_match_eth,
'arp_tha': to_match_eth,
'ipv6_src': to_match_ip,
'ipv6_dst': to_match_ip,
'ipv6_flabel': int,
'icmpv6_type': int,
'icmpv6_code': int,
'ipv6_nd_target': to_match_ip,
'ipv6_nd_sll': to_match_eth,
'ipv6_nd_tll': to_match_eth,
'mpls_label': int,
'mpls_tc': int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('dl_type') == ether.ETH_TYPE_ARP or \
attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'nw_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['nw_src']
del attrs['nw_src']
if 'nw_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['nw_dst']
del attrs['nw_dst']
kwargs = {}
for key, value in attrs.items():
if key in convert:
value = convert[key](value)
if key in keys:
# For old field name
key = keys[key]
if key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key = conv[ip_proto][key]
kwargs[key] = value
else:
# others
kwargs[key] = value
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
else:
return value
def to_match_ip(value):
if '/' in value:
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.ip)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
else:
return value
def to_match_vid(value):
# NOTE: If "vlan_id/dl_vlan" field is described as decimal int value
# (and decimal string value), it is treated as values of
# VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically
# applied. OTOH, If it is described as hexadecimal string,
# treated as values of oxm_value (including OFPVID_PRESENT
# bit), and OFPVID_PRESENT bit is NOT automatically applied.
if isinstance(value, int):
# described as decimal int value
return value | ofproto_v1_2.OFPVID_PRESENT
else:
if '/' in value:
val = value.split('/')
return int(val[0], 0), int(val[1], 0)
else:
if value.isdigit():
# described as decimal string value
return int(value, 10) | ofproto_v1_2.OFPVID_PRESENT
else:
return int(value, 0)
def to_match_metadata(value):
if '/' in value:
value = value.split('/')
return str_to_int(value[0]), str_to_int(value[1])
else:
return str_to_int(value)
def match_to_str(ofmatch):
keys = {'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'vlan_vid': 'dl_vlan',
'ipv4_src': 'nw_src',
'ipv4_dst': 'nw_dst',
'ip_proto': 'nw_proto',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'
}
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
if key in keys:
key = keys[key]
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'dl_vlan':
value = match_vid_to_str(value, mask)
elif key == 'metadata':
value = match_metadata_to_str(value, mask)
else:
if mask is not None:
value = value + '/' + mask
else:
value = value
match.setdefault(key, value)
return match
def match_metadata_to_str(value, mask):
return ('%d/%d' % (value, mask) if mask else '%d' % value)
def match_vid_to_str(value, mask):
if mask is not None:
value = '0x%04x/0x%04x' % (value, mask)
else:
if value & ofproto_v1_2.OFPVID_PRESENT:
value = str(value & ~ofproto_v1_2.OFPVID_PRESENT)
else:
value = '0x%04x' % value
return value
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = {}
for msg in msgs:
stats = msg.body
s = {'mfr_desc': stats.mfr_desc,
'hw_desc': stats.hw_desc,
'sw_desc': stats.sw_desc,
'serial_num': stats.serial_num,
'dp_desc': stats.dp_desc}
desc = {str(dp.id): s}
return desc
def get_queue_stats(dp, waiters):
ofp = dp.ofproto
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, ofp.OFPP_ANY,
ofp.OFPQ_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = []
for msg in msgs:
stats = msg.body
for stat in stats:
s.append({'port_no': stat.port_no,
'queue_id': stat.queue_id,
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
desc = {str(dp.id): s}
return desc
def get_flow_stats(dp, waiters, flow={}):
table_id = int(flow.get('table_id', dp.ofproto.OFPTT_ALL))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, table_id, out_port, out_group, cookie, cookie_mask, match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
for stats in msg.body:
actions = actions_to_str(stats.instructions)
match = match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'table_id': stats.table_id,
'length': stats.length}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_port_stats(dp, waiters):
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, dp.ofproto.OFPP_ANY, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'port_no': stats.port_no,
'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
'rx_dropped': stats.rx_dropped,
'tx_dropped': stats.tx_dropped,
'rx_errors': stats.rx_errors,
'tx_errors': stats.tx_errors,
'rx_frame_err': stats.rx_frame_err,
'rx_over_err': stats.rx_over_err,
'rx_crc_err': stats.rx_crc_err,
'collisions': stats.collisions}
ports.append(s)
ports = {str(dp.id): ports}
return ports
def get_group_stats(dp, waiters):
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, dp.ofproto.OFPG_ALL, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
groups = []
for msg in msgs:
for stats in msg.body:
bucket_counters = []
for bucket_counter in stats.bucket_counters:
c = {'packet_count': bucket_counter.packet_count,
'byte_count': bucket_counter.byte_count}
bucket_counters.append(c)
g = {'length': stats.length,
'group_id': stats.group_id,
'ref_count': stats.ref_count,
'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'bucket_stats': bucket_counters}
groups.append(g)
groups = {str(dp.id): groups}
return groups
def get_group_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
types.append(v)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
capabilities.append(v)
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
acts.append(v2)
actions.append({v1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
features = {str(dp.id): features}
return features
def get_group_desc(dp, waiters):
type_convert = {dp.ofproto.OFPGT_ALL: 'ALL',
dp.ofproto.OFPGT_SELECT: 'SELECT',
dp.ofproto.OFPGT_INDIRECT: 'INDIRECT',
dp.ofproto.OFPGT_FF: 'FF'}
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
for stats in msg.body:
buckets = []
for bucket in stats.buckets:
actions = []
for action in bucket.actions:
actions.append(action_to_str(action))
b = {'weight': bucket.weight,
'watch_port': bucket.watch_port,
'watch_group': bucket.watch_group,
'actions': actions}
buckets.append(b)
d = {'type': type_convert.get(stats.type),
'group_id': stats.group_id,
'buckets': buckets}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPFeaturesRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
stats = msg.ports
for stat in stats.values():
d = {'port_no': stat.port_no,
'hw_addr': stat.hw_addr,
'name': stat.name,
'config': stat.config,
'state': stat.state,
'curr': stat.curr,
'advertised': stat.advertised,
'supported': stat.supported,
'peer': stat.peer,
'curr_speed': stat.curr_speed,
'max_speed': stat.max_speed}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def mod_flow_entry(dp, flow, cmd):
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
table_id = int(flow.get('table_id', 0))
idle_timeout = int(flow.get('idle_timeout', 0))
hard_timeout = int(flow.get('hard_timeout', 0))
priority = int(flow.get('priority', 0))
buffer_id = int(flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
flags = int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_actions(dp, flow.get('actions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
dp.send_msg(flow_mod)
def mod_group_entry(dp, group, cmd):
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
type_ = type_convert.get(group.get('type', 'ALL'))
if type_ is None:
LOG.debug('Unknown type: %s', group.get('type'))
group_id = int(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = int(bucket.get('weight', 0))
watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
dp.send_msg(group_mod)
def mod_port_behavior(dp, port_config):
port_no = int(port_config.get('port_no', 0))
hw_addr = port_config.get('hw_addr')
config = int(port_config.get('config', 0))
mask = int(port_config.get('mask', 0))
advertise = int(port_config.get('advertise'))
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
dp.send_msg(port_mod)
def send_experimenter(dp, exp):
experimenter = exp.get('experimenter', 0)
exp_type = exp.get('exp_type', 0)
data_type = exp.get('data_type', 'ascii')
if data_type != 'ascii' and data_type != 'base64':
LOG.debug('Unknown data type: %s', data_type)
data = exp.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
expmsg = dp.ofproto_parser.OFPExperimenter(
dp, experimenter, exp_type, data)
dp.send_msg(expmsg)
| Neil741/ryu-master | ryu/lib/ofctl_v1_2.py | Python | apache-2.0 | 24,202 |
// Copyright (c) 2006-7 John Maddock
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_MATH_TOOLS_CONFIG_HPP
#define BOOST_MATH_TOOLS_CONFIG_HPP
#ifdef _MSC_VER
#pragma once
#endif
#include <boost/config.hpp>
#include <boost/cstdint.hpp> // for geofeatures_boost::uintmax_t
#include <boost/detail/workaround.hpp>
#include <boost/type_traits/is_integral.hpp>
#include <algorithm> // for min and max
#include <boost/config/no_tr1/cmath.hpp>
#include <climits>
#include <cfloat>
#if (defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__))
# include <math.h>
#endif
#ifndef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS
# include <limits>
#endif
#include <boost/math/tools/user.hpp>
#if (defined(__CYGWIN__) || defined(__FreeBSD__) || defined(__NetBSD__) \
|| (defined(__hppa) && !defined(__OpenBSD__)) || (defined(__NO_LONG_DOUBLE_MATH) && (DBL_MANT_DIG != LDBL_MANT_DIG))) \
&& !defined(BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS)
# define BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
#endif
#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x582))
//
// Borland post 5.8.2 uses Dinkumware's std C lib which
// doesn't have true long double precision. Earlier
// versions are problematic too:
//
# define BOOST_MATH_NO_REAL_CONCEPT_TESTS
# define BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
# define BOOST_MATH_CONTROL_FP _control87(MCW_EM,MCW_EM)
# include <float.h>
#endif
#ifdef __IBMCPP__
//
// For reasons I don't unserstand, the tests with IMB's compiler all
// pass at long double precision, but fail with real_concept, those tests
// are disabled for now. (JM 2012).
# define BOOST_MATH_NO_REAL_CONCEPT_TESTS
#endif
#ifdef sun
// Any use of __float128 in program startup code causes a segfault (tested JM 2015, Solaris 11).
# define BOOST_MATH_DISABLE_FLOAT128
#endif
#ifdef __HAIKU__
//
// Not sure what's up with the math detection on Haiku, but linking fails with
// float128 code enabled, and we don't have an implementation of __expl, so
// disabling long double functions for now as well.
# define BOOST_MATH_DISABLE_FLOAT128
# define BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
#endif
#if (defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__)) && ((LDBL_MANT_DIG == 106) || (__LDBL_MANT_DIG__ == 106)) && !defined(BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS)
//
// Darwin's rather strange "double double" is rather hard to
// support, it should be possible given enough effort though...
//
# define BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
#endif
#if defined(unix) && defined(__INTEL_COMPILER) && (__INTEL_COMPILER <= 1000) && !defined(BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS)
//
// Intel compiler prior to version 10 has sporadic problems
// calling the long double overloads of the std lib math functions:
// calling ::powl is OK, but std::pow(long double, long double)
// may segfault depending upon the value of the arguments passed
// and the specific Linux distribution.
//
// We'll be conservative and disable long double support for this compiler.
//
// Comment out this #define and try building the tests to determine whether
// your Intel compiler version has this issue or not.
//
# define BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS
#endif
#if defined(unix) && defined(__INTEL_COMPILER)
//
// Intel compiler has sporadic issues compiling std::fpclassify depending on
// the exact OS version used. Use our own code for this as we know it works
// well on Intel processors:
//
#define BOOST_MATH_DISABLE_STD_FPCLASSIFY
#endif
#if defined(BOOST_MSVC) && !defined(_WIN32_WCE)
// Better safe than sorry, our tests don't support hardware exceptions:
# define BOOST_MATH_CONTROL_FP _control87(MCW_EM,MCW_EM)
#endif
#ifdef __IBMCPP__
# define BOOST_MATH_NO_DEDUCED_FUNCTION_POINTERS
#endif
#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901))
# define BOOST_MATH_USE_C99
#endif
#if (defined(__hpux) && !defined(__hppa))
# define BOOST_MATH_USE_C99
#endif
#if defined(__GNUC__) && defined(_GLIBCXX_USE_C99)
# define BOOST_MATH_USE_C99
#endif
#if defined(_LIBCPP_VERSION) && !defined(_MSC_VER)
# define BOOST_MATH_USE_C99
#endif
#if defined(__CYGWIN__) || defined(__HP_aCC) || defined(BOOST_INTEL) \
|| defined(BOOST_NO_NATIVE_LONG_DOUBLE_FP_CLASSIFY) \
|| (defined(__GNUC__) && !defined(BOOST_MATH_USE_C99))\
|| defined(BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS)
# define BOOST_MATH_NO_NATIVE_LONG_DOUBLE_FP_CLASSIFY
#endif
#if BOOST_WORKAROUND(__SUNPRO_CC, <= 0x590)
# include "boost/type.hpp"
# include "boost/non_type.hpp"
# define BOOST_MATH_EXPLICIT_TEMPLATE_TYPE(t) geofeatures_boost::type<t>* = 0
# define BOOST_MATH_EXPLICIT_TEMPLATE_TYPE_SPEC(t) geofeatures_boost::type<t>*
# define BOOST_MATH_EXPLICIT_TEMPLATE_NON_TYPE(t, v) geofeatures_boost::non_type<t, v>* = 0
# define BOOST_MATH_EXPLICIT_TEMPLATE_NON_TYPE_SPEC(t, v) geofeatures_boost::non_type<t, v>*
# define BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(t) \
, BOOST_MATH_EXPLICIT_TEMPLATE_TYPE(t)
# define BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE_SPEC(t) \
, BOOST_MATH_EXPLICIT_TEMPLATE_TYPE_SPEC(t)
# define BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_NON_TYPE(t, v) \
, BOOST_MATH_EXPLICIT_TEMPLATE_NON_TYPE(t, v)
# define BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_NON_TYPE_SPEC(t, v) \
, BOOST_MATH_EXPLICIT_TEMPLATE_NON_TYPE_SPEC(t, v)
#else
// no workaround needed: expand to nothing
# define BOOST_MATH_EXPLICIT_TEMPLATE_TYPE(t)
# define BOOST_MATH_EXPLICIT_TEMPLATE_TYPE_SPEC(t)
# define BOOST_MATH_EXPLICIT_TEMPLATE_NON_TYPE(t, v)
# define BOOST_MATH_EXPLICIT_TEMPLATE_NON_TYPE_SPEC(t, v)
# define BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE(t)
# define BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_TYPE_SPEC(t)
# define BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_NON_TYPE(t, v)
# define BOOST_MATH_APPEND_EXPLICIT_TEMPLATE_NON_TYPE_SPEC(t, v)
#endif // __SUNPRO_CC
#if (defined(__SUNPRO_CC) || defined(__hppa) || defined(__GNUC__)) && !defined(BOOST_MATH_SMALL_CONSTANT)
// Sun's compiler emits a hard error if a constant underflows,
// as does aCC on PA-RISC, while gcc issues a large number of warnings:
# define BOOST_MATH_SMALL_CONSTANT(x) 0.0
#else
# define BOOST_MATH_SMALL_CONSTANT(x) x
#endif
#if BOOST_WORKAROUND(BOOST_MSVC, < 1400)
//
// Define if constants too large for a float cause "bad"
// values to be stored in the data, rather than infinity
// or a suitably large value.
//
# define BOOST_MATH_BUGGY_LARGE_FLOAT_CONSTANTS
#endif
//
// Tune performance options for specific compilers:
//
#ifdef BOOST_MSVC
# define BOOST_MATH_POLY_METHOD 2
#elif defined(BOOST_INTEL)
# define BOOST_MATH_POLY_METHOD 2
# define BOOST_MATH_RATIONAL_METHOD 2
#elif defined(__GNUC__)
# define BOOST_MATH_POLY_METHOD 3
# define BOOST_MATH_RATIONAL_METHOD 3
# define BOOST_MATH_INT_TABLE_TYPE(RT, IT) RT
# define BOOST_MATH_INT_VALUE_SUFFIX(RV, SUF) RV##.0L
#endif
#if defined(BOOST_NO_LONG_LONG) && !defined(BOOST_MATH_INT_TABLE_TYPE)
# define BOOST_MATH_INT_TABLE_TYPE(RT, IT) RT
# define BOOST_MATH_INT_VALUE_SUFFIX(RV, SUF) RV##.0L
#endif
//
// The maximum order of polynomial that will be evaluated
// via an unrolled specialisation:
//
#ifndef BOOST_MATH_MAX_POLY_ORDER
# define BOOST_MATH_MAX_POLY_ORDER 17
#endif
//
// Set the method used to evaluate polynomials and rationals:
//
#ifndef BOOST_MATH_POLY_METHOD
# define BOOST_MATH_POLY_METHOD 1
#endif
#ifndef BOOST_MATH_RATIONAL_METHOD
# define BOOST_MATH_RATIONAL_METHOD 0
#endif
//
// decide whether to store constants as integers or reals:
//
#ifndef BOOST_MATH_INT_TABLE_TYPE
# define BOOST_MATH_INT_TABLE_TYPE(RT, IT) IT
#endif
#ifndef BOOST_MATH_INT_VALUE_SUFFIX
# define BOOST_MATH_INT_VALUE_SUFFIX(RV, SUF) RV##SUF
#endif
//
// Test whether to support __float128:
//
#if defined(_GLIBCXX_USE_FLOAT128) && defined(BOOST_GCC) && !defined(__STRICT_ANSI__) \
&& !defined(BOOST_MATH_DISABLE_FLOAT128) || defined(BOOST_MATH_USE_FLOAT128)
//
// Only enable this when the compiler really is GCC as clang and probably
// intel too don't support __float128 yet :-(
//
#ifndef BOOST_MATH_USE_FLOAT128
# define BOOST_MATH_USE_FLOAT128
#endif
# if defined(BOOST_INTEL) && defined(BOOST_INTEL_CXX_VERSION) && (BOOST_INTEL_CXX_VERSION >= 1310) && defined(__GNUC__)
# if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6))
# define BOOST_MATH_FLOAT128_TYPE __float128
# endif
# elif defined(__GNUC__)
# define BOOST_MATH_FLOAT128_TYPE __float128
# endif
# ifndef BOOST_MATH_FLOAT128_TYPE
# define BOOST_MATH_FLOAT128_TYPE _Quad
# endif
#endif
//
// Check for WinCE with no iostream support:
//
#if defined(_WIN32_WCE) && !defined(__SGI_STL_PORT)
# define BOOST_MATH_NO_LEXICAL_CAST
#endif
//
// Helper macro for controlling the FP behaviour:
//
#ifndef BOOST_MATH_CONTROL_FP
# define BOOST_MATH_CONTROL_FP
#endif
//
// Helper macro for using statements:
//
#define BOOST_MATH_STD_USING_CORE \
using std::abs;\
using std::acos;\
using std::cos;\
using std::fmod;\
using std::modf;\
using std::tan;\
using std::asin;\
using std::cosh;\
using std::frexp;\
using std::pow;\
using std::tanh;\
using std::atan;\
using std::exp;\
using std::ldexp;\
using std::sin;\
using std::atan2;\
using std::fabs;\
using std::log;\
using std::sinh;\
using std::ceil;\
using std::floor;\
using std::log10;\
using std::sqrt;
#define BOOST_MATH_STD_USING BOOST_MATH_STD_USING_CORE
namespace geofeatures_boost {} namespace boost = geofeatures_boost; namespace geofeatures_boost{ namespace math{
namespace tools
{
template <class T>
inline T max BOOST_PREVENT_MACRO_SUBSTITUTION(T a, T b, T c)
{
return (std::max)((std::max)(a, b), c);
}
template <class T>
inline T max BOOST_PREVENT_MACRO_SUBSTITUTION(T a, T b, T c, T d)
{
return (std::max)((std::max)(a, b), (std::max)(c, d));
}
} // namespace tools
template <class T>
void suppress_unused_variable_warning(const T&)
{
}
namespace detail{
template <class T>
struct is_integer_for_rounding
{
static const bool value = geofeatures_boost::is_integral<T>::value
#ifndef BOOST_NO_LIMITS_COMPILE_TIME_CONSTANTS
|| (std::numeric_limits<T>::is_specialized && std::numeric_limits<T>::is_integer)
#endif
;
};
}
}} // namespace geofeatures_boost namespace math
#ifdef __GLIBC_PREREQ
# if __GLIBC_PREREQ(2,14)
# define BOOST_MATH_HAVE_FIXED_GLIBC
# endif
#endif
#if ((defined(__linux__) && !defined(__UCLIBC__) && !defined(BOOST_MATH_HAVE_FIXED_GLIBC)) || defined(__QNX__) || defined(__IBMCPP__)) && !defined(BOOST_NO_FENV_H)
//
// This code was introduced in response to this glibc bug: http://sourceware.org/bugzilla/show_bug.cgi?id=2445
// Basically powl and expl can return garbage when the result is small and certain exception flags are set
// on entrance to these functions. This appears to have been fixed in Glibc 2.14 (May 2011).
// Much more information in this message thread: https://groups.google.com/forum/#!topic/boost-list/ZT99wtIFlb4
//
#include <boost/detail/fenv.hpp>
# ifdef FE_ALL_EXCEPT
namespace geofeatures_boost {} namespace boost = geofeatures_boost; namespace geofeatures_boost{ namespace math{
namespace detail
{
struct fpu_guard
{
fpu_guard()
{
fegetexceptflag(&m_flags, FE_ALL_EXCEPT);
feclearexcept(FE_ALL_EXCEPT);
}
~fpu_guard()
{
fesetexceptflag(&m_flags, FE_ALL_EXCEPT);
}
private:
fexcept_t m_flags;
};
} // namespace detail
}} // namespaces
# define BOOST_FPU_EXCEPTION_GUARD geofeatures_boost::math::detail::fpu_guard local_guard_object;
# define BOOST_MATH_INSTRUMENT_FPU do{ fexcept_t cpu_flags; fegetexceptflag(&cpu_flags, FE_ALL_EXCEPT); BOOST_MATH_INSTRUMENT_VARIABLE(cpu_flags); } while(0);
# else
# define BOOST_FPU_EXCEPTION_GUARD
# define BOOST_MATH_INSTRUMENT_FPU
# endif
#else // All other platforms.
# define BOOST_FPU_EXCEPTION_GUARD
# define BOOST_MATH_INSTRUMENT_FPU
#endif
#ifdef BOOST_MATH_INSTRUMENT
# include <iostream>
# include <iomanip>
# include <typeinfo>
# define BOOST_MATH_INSTRUMENT_CODE(x) \
std::cout << std::setprecision(35) << __FILE__ << ":" << __LINE__ << " " << x << std::endl;
# define BOOST_MATH_INSTRUMENT_VARIABLE(name) BOOST_MATH_INSTRUMENT_CODE(BOOST_STRINGIZE(name) << " = " << name)
#else
# define BOOST_MATH_INSTRUMENT_CODE(x)
# define BOOST_MATH_INSTRUMENT_VARIABLE(name)
#endif
#endif // BOOST_MATH_TOOLS_CONFIG_HPP
| sachindeorah/geofeatures | GeoFeatures/boost/math/tools/config.hpp | C++ | apache-2.0 | 12,678 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import copy
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.image import glance
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova.objects import base as nova_object
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
# Instead of having a huge list of arguments to instance_update(), we just
# accept a dict of fields to update and use this whitelist to validate it.
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
# Fields that we want to convert back into a datetime object.
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
RPC_API_VERSION = '1.58'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.quotas = quota.QUOTAS
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def create_rpc_dispatcher(self, *args, **kwargs):
kwargs['additional_apis'] = [self.compute_task_mgr]
return super(ConductorManager, self).create_rpc_dispatcher(*args,
**kwargs)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
def ping(self, context, arg):
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# now a part of the base rpc API.
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@rpc_common.client_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service=None):
for key, value in updates.iteritems():
if key not in allowed_updates:
LOG.error(_("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, basestring):
updates[key] = timeutils.parse_strtime(value)
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, instance_ref, service)
return jsonutils.to_primitive(instance_ref)
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get(self, context, instance_id):
return jsonutils.to_primitive(
self.db.instance_get(context, instance_id))
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all(self, context):
return jsonutils.to_primitive(self.db.instance_get_all(context))
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_get(self, context, migration_id):
migration_ref = self.db.migration_get(context.elevated(),
migration_id)
return jsonutils.to_primitive(migration_ref)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
migrations = self.db.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
return jsonutils.to_primitive(migrations)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
# NOTE(comstud): This method can be removed in v2.0 of the RPC API.
def migration_create(self, context, instance, values):
values.update({'instance_uuid': instance['uuid'],
'source_compute': instance['host'],
'source_node': instance['node']})
migration_ref = self.db.migration_create(context.elevated(), values)
return jsonutils.to_primitive(migration_ref)
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_update(self, context, migration, status):
migration_ref = self.db.migration_update(context.elevated(),
migration['id'],
{'status': status})
return jsonutils.to_primitive(migration_ref)
@rpc_common.client_exceptions(exception.AggregateHostExists)
def aggregate_host_add(self, context, aggregate, host):
host_ref = self.db.aggregate_host_add(context.elevated(),
aggregate['id'], host)
return jsonutils.to_primitive(host_ref)
@rpc_common.client_exceptions(exception.AggregateHostNotFound)
def aggregate_host_delete(self, context, aggregate, host):
self.db.aggregate_host_delete(context.elevated(),
aggregate['id'], host)
@rpc_common.client_exceptions(exception.AggregateNotFound)
def aggregate_get(self, context, aggregate_id):
aggregate = self.db.aggregate_get(context.elevated(), aggregate_id)
return jsonutils.to_primitive(aggregate)
def aggregate_get_by_host(self, context, host, key=None):
aggregates = self.db.aggregate_get_by_host(context.elevated(),
host, key)
return jsonutils.to_primitive(aggregates)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
new_metadata = self.db.aggregate_metadata_add(context.elevated(),
aggregate['id'],
metadata, set_delete)
return jsonutils.to_primitive(new_metadata)
@rpc_common.client_exceptions(exception.AggregateMetadataNotFound)
def aggregate_metadata_delete(self, context, aggregate, key):
self.db.aggregate_metadata_delete(context.elevated(),
aggregate['id'], key)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
result = self.db.aggregate_metadata_get_by_host(context, host, key)
return jsonutils.to_primitive(result)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None,
update_cells=True):
if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
self.db.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
usage = self.db.bw_usage_get(context, uuid, start_period, mac)
return jsonutils.to_primitive(usage)
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
return self.backdoor_port
def security_group_get_by_instance(self, context, instance):
group = self.db.security_group_get_by_instance(context,
instance['uuid'])
return jsonutils.to_primitive(group)
def security_group_rule_get_by_security_group(self, context, secgroup):
rules = self.db.security_group_rule_get_by_security_group(
context, secgroup['id'])
return jsonutils.to_primitive(rules, max_depth=4)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
return jsonutils.to_primitive(rules)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
info = self.db.agent_build_get_by_triple(context, hypervisor, os,
architecture)
return jsonutils.to_primitive(info)
def block_device_mapping_update_or_create(self, context, values,
create=None):
if create is None:
bdm = self.db.block_device_mapping_update_or_create(context,
values)
elif create is True:
bdm = self.db.block_device_mapping_create(context, values)
else:
bdm = self.db.block_device_mapping_update(context,
values['id'],
values)
# NOTE:comstud): 'bdm' is always in the new format, so we
# account for this in cells/messaging.py
self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm,
create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return jsonutils.to_primitive(bdms)
def block_device_mapping_destroy(self, context, bdms=None,
instance=None, volume_id=None,
device_name=None):
if bdms is not None:
for bdm in bdms:
self.db.block_device_mapping_destroy(context, bdm['id'])
# NOTE(comstud): bdm['id'] will be different in API cell,
# so we must try to destroy by device_name or volume_id.
# We need an instance_uuid in order to do this properly,
# too.
# I hope to clean a lot of this up in the object
# implementation.
instance_uuid = (bdm['instance_uuid'] or
(instance and instance['uuid']))
if not instance_uuid:
continue
# Better to be safe than sorry. device_name is not
# NULLable, however it could be an empty string.
if bdm['device_name']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
device_name=bdm['device_name'])
elif bdm['volume_id']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
volume_id=bdm['volume_id'])
elif instance is not None and volume_id is not None:
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance['uuid'], volume_id)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], volume_id=volume_id)
elif instance is not None and device_name is not None:
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device_name)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], device_name=device_name)
else:
# NOTE(danms): This shouldn't happen
raise exception.Invalid(_("Invalid block_device_mapping_destroy"
" invocation"))
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join=None):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
columns_to_join=columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all_hung_in_rebooting(self, context, timeout):
result = self.db.instance_get_all_hung_in_rebooting(context, timeout)
return jsonutils.to_primitive(result)
def instance_get_active_by_window(self, context, begin, end=None,
project_id=None, host=None):
# Unused, but cannot remove until major RPC version bump
result = self.db.instance_get_active_by_window(context, begin, end,
project_id, host)
return jsonutils.to_primitive(result)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
result = self.db.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
def instance_destroy(self, context, instance):
self.db.instance_destroy(context, instance['uuid'])
def instance_info_cache_delete(self, context, instance):
self.db.instance_info_cache_delete(context, instance['uuid'])
def instance_info_cache_update(self, context, instance, values):
self.db.instance_info_cache_update(context, instance['uuid'],
values)
def instance_type_get(self, context, instance_type_id):
result = self.db.flavor_get(context, instance_type_id)
return jsonutils.to_primitive(result)
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(kerrin): This method can be removed in v2.0 of the RPC API.
def vol_get_usage_by_time(self, context, start_time):
result = self.db.vol_get_usage_by_time(context, start_time)
return jsonutils.to_primitive(result)
# NOTE(kerrin): The last_refreshed argument is unused by this method
# and can be removed in v2.0 of the RPC API.
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance['uuid'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
update_totals)
# We have just updated the database, so send the notification now
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@rpc_common.client_exceptions(exception.ComputeHostNotFound,
exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
result = self.db.service_get_by_compute_host(context, host)
# FIXME(comstud) Potentially remove this on bump to v2.0
result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
elif all((host, binary)):
result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
def action_event_start(self, context, values):
evt = self.db.action_event_start(context, values)
return jsonutils.to_primitive(evt)
def action_event_finish(self, context, values):
evt = self.db.action_event_finish(context, values)
return jsonutils.to_primitive(evt)
def service_create(self, context, values):
svc = self.db.service_create(context, values)
return jsonutils.to_primitive(svc)
@rpc_common.client_exceptions(exception.ServiceNotFound)
def service_destroy(self, context, service_id):
self.db.service_destroy(context, service_id)
def compute_node_create(self, context, values):
result = self.db.compute_node_create(context, values)
return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values, prune_stats=False):
result = self.db.compute_node_update(context, node['id'], values,
prune_stats)
return jsonutils.to_primitive(result)
def compute_node_delete(self, context, node):
result = self.db.compute_node_delete(context, node['id'])
return jsonutils.to_primitive(result)
@rpc_common.client_exceptions(exception.ServiceNotFound)
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
def task_log_get(self, context, task_name, begin, end, host, state=None):
result = self.db.task_log_get(context, task_name, begin, end, host,
state)
return jsonutils.to_primitive(result)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
result = self.db.task_log_begin_task(context.elevated(), task_name,
begin, end, host, task_items,
message)
return jsonutils.to_primitive(result)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
result = self.db.task_log_end_task(context.elevated(), task_name,
begin, end, host, errors, message)
return jsonutils.to_primitive(result)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, args):
self.security_group_api.trigger_handler(event, context, *args)
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
def network_migrate_instance_start(self, context, instance, migration):
self.network_api.migrate_instance_start(context, instance, migration)
def network_migrate_instance_finish(self, context, instance, migration):
self.network_api.migrate_instance_finish(context, instance, migration)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.commit(context, reservations, project_id=project_id,
user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.rollback(context, reservations, project_id=project_id,
user_id=user_id)
def get_ec2_ids(self, context, instance):
ec2_ids = {}
ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
instance['image_ref'])
for image_type in ['kernel', 'ramdisk']:
if '%s_id' % image_type in instance:
image_id = instance['%s_id' % image_type]
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s-id' % image_type] = ec2_id
return ec2_ids
# NOTE(danms): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_stop(self, context, instance, do_cast=True):
# NOTE(mriedem): Clients using an interface before 1.43 will be sending
# dicts so we need to handle that here since compute/api::stop()
# requires an object.
if isinstance(instance, dict):
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance)
self.compute_api.stop(context, instance, do_cast)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_confirm_resize(self, context, instance, migration_ref):
if isinstance(instance, dict):
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if isinstance(migration_ref, dict):
migration_ref = migration_obj.Migration._from_db_object(
context.elevated(), migration_ref)
self.compute_api.confirm_resize(context, instance,
migration=migration_ref)
def compute_unrescue(self, context, instance):
self.compute_api.unrescue(context, instance)
def _object_dispatch(self, target, method, context, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in a ClientException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(context, *args, **kwargs)
except Exception:
raise rpc_common.ClientException()
def object_class_action(self, context, objname, objmethod,
objver, args, kwargs):
"""Perform a classmethod action on an object."""
objclass = nova_object.NovaObject.obj_class_from_name(objname,
objver)
return self._object_dispatch(objclass, objmethod, context,
args, kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = copy.copy(objinst)
result = self._object_dispatch(objinst, objmethod, context,
args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for field in objinst.fields:
if not objinst.obj_attr_is_set(field):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(field) or
oldobj[field] != objinst[field]):
updates[field] = objinst._attr_to_primitive(field)
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
# NOTE(danms): This method is now deprecated and can be removed in
# v2.0 of the RPC API
def compute_reboot(self, context, instance, reboot_type):
self.compute_api.reboot(context, instance, reboot_type)
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
RPC_API_NAMESPACE = 'compute_task'
RPC_API_VERSION = '1.6'
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.image_service = glance.get_default_image_service()
self.quotas = quota.QUOTAS
@rpc_common.client_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None):
if instance and not isinstance(instance, instance_obj.Instance):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, ConductorManager(),
'cold_migrate', instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
try:
hosts = self.scheduler_rpcapi.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance['vm_state']
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
LOG.warning(_("No valid host found for cold migrate"))
return
try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
# TODO(timello): originally, instance_type in request_spec
# on compute.api.resize does not have 'extra_specs', so we
# remove it for now to keep tests backward compatibility.
request_spec['instance_type'].pop('extra_specs')
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': vm_states.ERROR,
'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
def _set_vm_state_and_notify(self, context, method, updates, ex,
request_spec):
scheduler_utils.set_vm_state_and_notify(
context, 'compute_task', method, updates,
ex, request_spec, self.db)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
destination = scheduler_hint.get("host")
try:
live_migrate.execute(context, instance, destination,
block_migration, disk_over_commit)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError) as ex:
with excutils.save_and_reraise_exception():
#TODO(johngarbutt) - eventually need instance actions here
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
dict(vm_state=instance['vm_state'],
task_state=None,
expected_task_state=task_states.MIGRATING,),
ex, request_spec, self.db)
except Exception as ex:
with excutils.save_and_reraise_exception():
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ERROR},
ex, request_spec, self.db)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# NOTE(alaski): For compatibility until a new scheduler method is used.
request_spec.update({'block_device_mapping': block_device_mapping,
'security_group': security_groups})
self.scheduler_rpcapi.run_instance(context, request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks, is_first_time=True,
filter_properties=filter_properties,
legacy_bdm_in_spec=legacy_bdm)
def _get_image(self, context, image_id):
if not image_id:
return None
return self.image_service.show(context, image_id)
def _delete_image(self, context, image_id):
(image_service, image_id) = glance.get_remote_image_service(context,
image_id)
return image_service.delete(context, image_id)
def _schedule_instances(self, context, image, filter_properties,
*instances):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# dict(host='', nodename='', limits='')
hosts = self.scheduler_rpcapi.select_destinations(context,
request_spec, filter_properties)
return hosts
def unshelve_instance(self, context, instance):
sys_meta = instance.system_metadata
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
snapshot_id = sys_meta.get('shelved_image_id')
if snapshot_id:
self._delete_image(context, snapshot_id)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
try:
with compute_utils.EventReporter(context, self.db,
'get_image_info', instance.uuid):
image = self._get_image(context,
sys_meta['shelved_image_id'])
except exception.ImageNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unshelve attempted but vm_state not SHELVED '
'or SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
hosts = self._schedule_instances(context, image, [], instance)
host = hosts.pop(0)['host']
self.compute_rpcapi.unshelve_instance(context, instance, host,
image)
else:
LOG.error(_('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in sys_meta:
del(sys_meta[key])
instance.system_metadata = sys_meta
instance.save()
| pombredanne/MOG | nova/conductor/manager.py | Python | apache-2.0 | 39,345 |
' Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
Imports System.Threading
Imports System.Threading.Tasks
Imports System.Xml.Linq
Imports Microsoft.CodeAnalysis.Completion
Imports Microsoft.CodeAnalysis.Editor.Implementation.IntelliSense.Completion
Imports Microsoft.CodeAnalysis.Editor.UnitTests.Extensions
Imports Microsoft.CodeAnalysis.Editor.UnitTests.Workspaces
Imports Microsoft.CodeAnalysis.Options
Imports Microsoft.CodeAnalysis.VisualBasic.Completion.SuggestionMode
Namespace Microsoft.CodeAnalysis.Editor.VisualBasic.UnitTests.Completion.CompletionProviders
Public Class SuggestionModeCompletionProviderTests
Inherits AbstractVisualBasicCompletionProviderTests
Public Sub New(workspaceFixture As VisualBasicTestWorkspaceFixture)
MyBase.New(workspaceFixture)
End Sub
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestFieldDeclaration1() As Task
Dim markup = <a>Class C
$$
End Class</a>
Await VerifyNotBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestFieldDeclaration2() As Task
Dim markup = <a>Class C
Public $$
End Class</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestFieldDeclaration3() As Task
Dim markup = <a>Module M
Public $$
End Module</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestFieldDeclaration4() As Task
Dim markup = <a>Structure S
Public $$
End Structure</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestFieldDeclaration5() As Task
Dim markup = <a>Class C
WithEvents $$
End Class</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestFieldDeclaration6() As Task
Dim markup = <a>Class C
Protected Friend $$
End Class</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestParameterDeclaration1() As Task
Dim markup = <a>Class C
Public Sub Bar($$
End Sub
End Class</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestParameterDeclaration2() As Task
Dim markup = <a>Class C
Public Sub Bar(Optional foo as Integer, $$
End Sub
End Class</a>
Await VerifyNotBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestParameterDeclaration3() As Task
Dim markup = <a>Class C
Public Sub Bar(Optional $$
End Sub
End Class</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestParameterDeclaration4() As Task
Dim markup = <a>Class C
Public Sub Bar(Optional x $$
End Sub
End Class</a>
Await VerifyNotBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestParameterDeclaration5() As Task
Dim markup = <a>Class C
Public Sub Bar(Optional x As $$
End Sub
End Class</a>
Await VerifyNotBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestParameterDeclaration6() As Task
Dim markup = <a>Class C
Public Sub Bar(Optional x As Integer $$
End Sub
End Class</a>
Await VerifyNotBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestParameterDeclaration7() As Task
Dim markup = <a>Class C
Public Sub Bar(ByVal $$
End Sub
End Class</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestParameterDeclaration8() As Task
Dim markup = <a>Class C
Public Sub Bar(ByVal x $$
End Sub
End Class</a>
Await VerifyNotBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestParameterDeclaration9() As Task
Dim markup = <a>Class C
Sub Foo $$
End Class</a>
Await VerifyNotBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestParameterDeclaration10() As Task
Dim markup = <a>Class C
Public Property SomeProp $$
End Class</a>
Await VerifyNotBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestSelectClause1() As Task
Dim markup = <a>Class z
Sub bar()
Dim a = New Integer(1, 2, 3) {}
Dim foo = From z In a
Select $$
End Sub
End Class</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestSelectClause2() As Task
Dim markup = <a>Class z
Sub bar()
Dim a = New Integer(1, 2, 3) {}
Dim foo = From z In a
Select 1, $$
End Sub
End Class</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestForStatement1() As Task
Dim markup = <a>Class z
Sub bar()
For $$
End Sub
End Class</a>
Await VerifyBuilderAsync(markup)
End Function
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestForStatement2() As Task
Dim markup = <a>Class z
Sub bar()
For $$ = 1 To 10
Next
End Sub
End Class</a>
Await VerifyBuilderAsync(markup)
End Function
<WorkItem(545351, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/545351")>
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestBuilderWhenOptionExplicitOff() As Task
Dim markup = <a>Option Explicit Off
Class C1
Sub M()
Console.WriteLine($$
End Sub
End Class
</a>
Await VerifyBuilderAsync(markup)
End Function
<WorkItem(546659, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/546659")>
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestUsingStatement() As Task
Dim markup = <a>
Class C1
Sub M()
Using $$
End Sub
End Class
</a>
Await VerifyBuilderAsync(markup)
End Function
<WorkItem(734596, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/734596")>
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestOptionExplicitOffStatementLevel1() As Task
Dim markup = <a>
Option Explicit Off
Class C1
Sub M()
$$
End Sub
End Class
</a>
Await VerifyBuilderAsync(markup)
End Function
<WorkItem(734596, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/734596")>
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestOptionExplicitOffStatementLevel2() As Task
Dim markup = <a>
Option Explicit Off
Class C1
Sub M()
a = $$
End Sub
End Class
</a>
Await VerifyBuilderAsync(markup)
End Function
<WorkItem(960416, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/960416")>
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function TestReadonlyField() As Task
Dim markup = <a>
Class C1
Readonly $$
Sub M()
End Sub
End Class
</a>
Await VerifyBuilderAsync(markup)
End Function
<WorkItem(1044441, "http://vstfdevdiv:8080/DevDiv2/DevDiv/_workitems/edit/1044441")>
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function BuilderInDebugger() As Task
Dim markup = <a>
Class C1
Sub Foo()
Dim __o = $$
End Sub
End Class
</a>
Await VerifyBuilderAsync(markup, CompletionTrigger.Default, useDebuggerOptions:=True)
End Function
<WorkItem(7213, "https://github.com/dotnet/roslyn/issues/7213")>
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function NamespaceDeclarationName_Unqualified() As Task
Dim markup = <a>
Namespace $$
End Class
</a>
Await VerifyBuilderAsync(markup, CompletionTrigger.Default)
End Function
<WorkItem(7213, "https://github.com/dotnet/roslyn/issues/7213")>
<Fact, Trait(Traits.Feature, Traits.Features.Completion)>
Public Async Function NamespaceDeclarationName_Qualified() As Task
Dim markup = <a>
Namespace A.$$
End Class
</a>
Await VerifyBuilderAsync(markup, CompletionTrigger.Default)
End Function
Private Function VerifyNotBuilderAsync(markup As XElement, Optional triggerInfo As CompletionTrigger? = Nothing, Optional useDebuggerOptions As Boolean = False) As Task
Return VerifySuggestionModeWorkerAsync(markup, isBuilder:=False, triggerInfo:=triggerInfo, useDebuggerOptions:=useDebuggerOptions)
End Function
Private Function VerifyBuilderAsync(markup As XElement, Optional triggerInfo As CompletionTrigger? = Nothing, Optional useDebuggerOptions As Boolean = False) As Task
Return VerifySuggestionModeWorkerAsync(markup, isBuilder:=True, triggerInfo:=triggerInfo, useDebuggerOptions:=useDebuggerOptions)
End Function
Private Async Function VerifySuggestionModeWorkerAsync(markup As XElement, isBuilder As Boolean, triggerInfo As CompletionTrigger?, Optional useDebuggerOptions As Boolean = False) As Task
Dim code As String = Nothing
Dim position As Integer = 0
MarkupTestFile.GetPosition(markup.NormalizedValue, code, position)
Using workspaceFixture = New VisualBasicTestWorkspaceFixture()
Dim options = If(useDebuggerOptions,
(Await workspaceFixture.GetWorkspaceAsync()).Options.WithDebuggerCompletionOptions(),
(Await workspaceFixture.GetWorkspaceAsync()).Options)
Dim document1 = Await workspaceFixture.UpdateDocumentAsync(code, SourceCodeKind.Regular)
Await CheckResultsAsync(document1, position, isBuilder, triggerInfo, options)
If Await CanUseSpeculativeSemanticModelAsync(document1, position) Then
Dim document2 = Await workspaceFixture.UpdateDocumentAsync(code, SourceCodeKind.Regular, cleanBeforeUpdate:=False)
Await CheckResultsAsync(document2, position, isBuilder, triggerInfo, options)
End If
End Using
End Function
Private Async Function CheckResultsAsync(document As Document, position As Integer, isBuilder As Boolean, triggerInfo As CompletionTrigger?, options As OptionSet) As Task
triggerInfo = If(triggerInfo, CompletionTrigger.CreateInsertionTrigger("a"c))
Dim service = GetCompletionService(document.Project.Solution.Workspace)
Dim context = Await service.GetContextAsync(
service.ExclusiveProviders?(0), document, position, triggerInfo.Value, options, CancellationToken.None)
If isBuilder Then
Assert.NotNull(context)
Assert.NotNull(context.SuggestionModeItem)
Else
If context IsNot Nothing Then
Assert.True(context.SuggestionModeItem Is Nothing, "group.Builder = " & If(context.SuggestionModeItem IsNot Nothing, context.SuggestionModeItem.DisplayText, "null"))
End If
End If
End Function
Friend Overrides Function CreateCompletionProvider() As CompletionProvider
Return New VisualBasicSuggestionModeCompletionProvider()
End Function
End Class
End Namespace
| jaredpar/roslyn | src/EditorFeatures/VisualBasicTest/Completion/CompletionProviders/SuggestionModeCompletionProviderTests.vb | Visual Basic | apache-2.0 | 13,083 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.Orchestration.Airflow.Service.V1.Snippets
{
using Google.Api.Gax;
using System;
using System.Linq;
using System.Threading.Tasks;
/// <summary>Generated snippets.</summary>
public sealed class AllGeneratedImageVersionsClientSnippets
{
/// <summary>Snippet for ListImageVersions</summary>
public void ListImageVersionsRequestObject()
{
// Snippet: ListImageVersions(ListImageVersionsRequest, CallSettings)
// Create client
ImageVersionsClient imageVersionsClient = ImageVersionsClient.Create();
// Initialize request argument(s)
ListImageVersionsRequest request = new ListImageVersionsRequest
{
Parent = "",
IncludePastReleases = false,
};
// Make the request
PagedEnumerable<ListImageVersionsResponse, ImageVersion> response = imageVersionsClient.ListImageVersions(request);
// Iterate over all response items, lazily performing RPCs as required
foreach (ImageVersion item in response)
{
// Do something with each item
Console.WriteLine(item);
}
// Or iterate over pages (of server-defined size), performing one RPC per page
foreach (ListImageVersionsResponse page in response.AsRawResponses())
{
// Do something with each page of items
Console.WriteLine("A page of results:");
foreach (ImageVersion item in page)
{
// Do something with each item
Console.WriteLine(item);
}
}
// Or retrieve a single page of known size (unless it's the final page), performing as many RPCs as required
int pageSize = 10;
Page<ImageVersion> singlePage = response.ReadPage(pageSize);
// Do something with the page of items
Console.WriteLine($"A page of {pageSize} results (unless it's the final page):");
foreach (ImageVersion item in singlePage)
{
// Do something with each item
Console.WriteLine(item);
}
// Store the pageToken, for when the next page is required.
string nextPageToken = singlePage.NextPageToken;
// End snippet
}
/// <summary>Snippet for ListImageVersionsAsync</summary>
public async Task ListImageVersionsRequestObjectAsync()
{
// Snippet: ListImageVersionsAsync(ListImageVersionsRequest, CallSettings)
// Create client
ImageVersionsClient imageVersionsClient = await ImageVersionsClient.CreateAsync();
// Initialize request argument(s)
ListImageVersionsRequest request = new ListImageVersionsRequest
{
Parent = "",
IncludePastReleases = false,
};
// Make the request
PagedAsyncEnumerable<ListImageVersionsResponse, ImageVersion> response = imageVersionsClient.ListImageVersionsAsync(request);
// Iterate over all response items, lazily performing RPCs as required
await response.ForEachAsync((ImageVersion item) =>
{
// Do something with each item
Console.WriteLine(item);
});
// Or iterate over pages (of server-defined size), performing one RPC per page
await response.AsRawResponses().ForEachAsync((ListImageVersionsResponse page) =>
{
// Do something with each page of items
Console.WriteLine("A page of results:");
foreach (ImageVersion item in page)
{
// Do something with each item
Console.WriteLine(item);
}
});
// Or retrieve a single page of known size (unless it's the final page), performing as many RPCs as required
int pageSize = 10;
Page<ImageVersion> singlePage = await response.ReadPageAsync(pageSize);
// Do something with the page of items
Console.WriteLine($"A page of {pageSize} results (unless it's the final page):");
foreach (ImageVersion item in singlePage)
{
// Do something with each item
Console.WriteLine(item);
}
// Store the pageToken, for when the next page is required.
string nextPageToken = singlePage.NextPageToken;
// End snippet
}
/// <summary>Snippet for ListImageVersions</summary>
public void ListImageVersions()
{
// Snippet: ListImageVersions(string, string, int?, CallSettings)
// Create client
ImageVersionsClient imageVersionsClient = ImageVersionsClient.Create();
// Initialize request argument(s)
string parent = "";
// Make the request
PagedEnumerable<ListImageVersionsResponse, ImageVersion> response = imageVersionsClient.ListImageVersions(parent);
// Iterate over all response items, lazily performing RPCs as required
foreach (ImageVersion item in response)
{
// Do something with each item
Console.WriteLine(item);
}
// Or iterate over pages (of server-defined size), performing one RPC per page
foreach (ListImageVersionsResponse page in response.AsRawResponses())
{
// Do something with each page of items
Console.WriteLine("A page of results:");
foreach (ImageVersion item in page)
{
// Do something with each item
Console.WriteLine(item);
}
}
// Or retrieve a single page of known size (unless it's the final page), performing as many RPCs as required
int pageSize = 10;
Page<ImageVersion> singlePage = response.ReadPage(pageSize);
// Do something with the page of items
Console.WriteLine($"A page of {pageSize} results (unless it's the final page):");
foreach (ImageVersion item in singlePage)
{
// Do something with each item
Console.WriteLine(item);
}
// Store the pageToken, for when the next page is required.
string nextPageToken = singlePage.NextPageToken;
// End snippet
}
/// <summary>Snippet for ListImageVersionsAsync</summary>
public async Task ListImageVersionsAsync()
{
// Snippet: ListImageVersionsAsync(string, string, int?, CallSettings)
// Create client
ImageVersionsClient imageVersionsClient = await ImageVersionsClient.CreateAsync();
// Initialize request argument(s)
string parent = "";
// Make the request
PagedAsyncEnumerable<ListImageVersionsResponse, ImageVersion> response = imageVersionsClient.ListImageVersionsAsync(parent);
// Iterate over all response items, lazily performing RPCs as required
await response.ForEachAsync((ImageVersion item) =>
{
// Do something with each item
Console.WriteLine(item);
});
// Or iterate over pages (of server-defined size), performing one RPC per page
await response.AsRawResponses().ForEachAsync((ListImageVersionsResponse page) =>
{
// Do something with each page of items
Console.WriteLine("A page of results:");
foreach (ImageVersion item in page)
{
// Do something with each item
Console.WriteLine(item);
}
});
// Or retrieve a single page of known size (unless it's the final page), performing as many RPCs as required
int pageSize = 10;
Page<ImageVersion> singlePage = await response.ReadPageAsync(pageSize);
// Do something with the page of items
Console.WriteLine($"A page of {pageSize} results (unless it's the final page):");
foreach (ImageVersion item in singlePage)
{
// Do something with each item
Console.WriteLine(item);
}
// Store the pageToken, for when the next page is required.
string nextPageToken = singlePage.NextPageToken;
// End snippet
}
}
}
| googleapis/google-cloud-dotnet | apis/Google.Cloud.Orchestration.Airflow.Service.V1/Google.Cloud.Orchestration.Airflow.Service.V1.Snippets/ImageVersionsClientSnippets.g.cs | C# | apache-2.0 | 9,411 |
/*
* Rules for CKEditor content
*/
.textEditorContent {
font-size: 0.9em;
word-wrap: break-word;
word-break: break-word;
}
.textEditorContent img {
height: auto;
max-width: 100%;
max-height: 100%;
}
.textEditorContent ul {
margin: inherit;
padding: 10px 40px;
}
.textEditorContent .description table,
.textEditorContent .description th,
.textEditorContent .description td {
border: 1px dotted #d3d3d3;
}
.textEditorContent .description abbr[title] {
border: none;
}
.textEditorContent .description select {
font-family: unset;
color: initial;
}
.textEditorContent .description h1,
.textEditorContent .description h2,
.textEditorContent .description h3,
.textEditorContent .description h4,
.textEditorContent .description h5,
.textEditorContent .description h6,
.textEditorContent .description p,
.textEditorContent .description ul {
margin: 0.5em 0;
}
.textEditorContent .description hr {
border-color: #ccc;
} | regan-sarwas/map-series-storytelling-template-js | src/app/storymaps/tpl/ui/Common.css | CSS | apache-2.0 | 940 |
/**
* Copyright (C) 2011-2012 Typesafe Inc. <http://typesafe.com>
*/
package com.typesafe.config.impl;
import java.io.ObjectStreamException;
import java.io.Serializable;
import java.math.BigInteger;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.typesafe.config.ConfigException;
import com.typesafe.config.ConfigObject;
import com.typesafe.config.ConfigOrigin;
import com.typesafe.config.ConfigRenderOptions;
import com.typesafe.config.ConfigValue;
final class SimpleConfigObject extends AbstractConfigObject implements Serializable {
private static final long serialVersionUID = 2L;
// this map should never be modified - assume immutable
final private Map<String, AbstractConfigValue> value;
final private boolean resolved;
final private boolean ignoresFallbacks;
SimpleConfigObject(ConfigOrigin origin,
Map<String, AbstractConfigValue> value, ResolveStatus status,
boolean ignoresFallbacks) {
super(origin);
if (value == null)
throw new ConfigException.BugOrBroken(
"creating config object with null map");
this.value = value;
this.resolved = status == ResolveStatus.RESOLVED;
this.ignoresFallbacks = ignoresFallbacks;
// Kind of an expensive debug check. Comment out?
if (status != ResolveStatus.fromValues(value.values()))
throw new ConfigException.BugOrBroken("Wrong resolved status on " + this);
}
SimpleConfigObject(ConfigOrigin origin,
Map<String, AbstractConfigValue> value) {
this(origin, value, ResolveStatus.fromValues(value.values()), false /* ignoresFallbacks */);
}
@Override
public SimpleConfigObject withOnlyKey(String key) {
return withOnlyPath(Path.newKey(key));
}
@Override
public SimpleConfigObject withoutKey(String key) {
return withoutPath(Path.newKey(key));
}
// gets the object with only the path if the path
// exists, otherwise null if it doesn't. this ensures
// that if we have { a : { b : 42 } } and do
// withOnlyPath("a.b.c") that we don't keep an empty
// "a" object.
@Override
protected SimpleConfigObject withOnlyPathOrNull(Path path) {
String key = path.first();
Path next = path.remainder();
AbstractConfigValue v = value.get(key);
if (next != null) {
if (v != null && (v instanceof AbstractConfigObject)) {
v = ((AbstractConfigObject) v).withOnlyPathOrNull(next);
} else {
// if the path has more elements but we don't have an object,
// then the rest of the path does not exist.
v = null;
}
}
if (v == null) {
return null;
} else {
return new SimpleConfigObject(origin(), Collections.singletonMap(key, v),
v.resolveStatus(), ignoresFallbacks);
}
}
@Override
SimpleConfigObject withOnlyPath(Path path) {
SimpleConfigObject o = withOnlyPathOrNull(path);
if (o == null) {
return new SimpleConfigObject(origin(),
Collections.<String, AbstractConfigValue> emptyMap(), ResolveStatus.RESOLVED,
ignoresFallbacks);
} else {
return o;
}
}
@Override
SimpleConfigObject withoutPath(Path path) {
String key = path.first();
Path next = path.remainder();
AbstractConfigValue v = value.get(key);
if (v != null && next != null && v instanceof AbstractConfigObject) {
v = ((AbstractConfigObject) v).withoutPath(next);
Map<String, AbstractConfigValue> updated = new HashMap<String, AbstractConfigValue>(
value);
updated.put(key, v);
return new SimpleConfigObject(origin(), updated, ResolveStatus.fromValues(updated
.values()), ignoresFallbacks);
} else if (next != null || v == null) {
// can't descend, nothing to remove
return this;
} else {
Map<String, AbstractConfigValue> smaller = new HashMap<String, AbstractConfigValue>(
value.size() - 1);
for (Map.Entry<String, AbstractConfigValue> old : value.entrySet()) {
if (!old.getKey().equals(key))
smaller.put(old.getKey(), old.getValue());
}
return new SimpleConfigObject(origin(), smaller, ResolveStatus.fromValues(smaller
.values()), ignoresFallbacks);
}
}
@Override
public SimpleConfigObject withValue(String key, ConfigValue v) {
if (v == null)
throw new ConfigException.BugOrBroken(
"Trying to store null ConfigValue in a ConfigObject");
Map<String, AbstractConfigValue> newMap;
if (value.isEmpty()) {
newMap = Collections.singletonMap(key, (AbstractConfigValue) v);
} else {
newMap = new HashMap<String, AbstractConfigValue>(value);
newMap.put(key, (AbstractConfigValue) v);
}
return new SimpleConfigObject(origin(), newMap, ResolveStatus.fromValues(newMap.values()),
ignoresFallbacks);
}
@Override
SimpleConfigObject withValue(Path path, ConfigValue v) {
String key = path.first();
Path next = path.remainder();
if (next == null) {
return withValue(key, v);
} else {
AbstractConfigValue child = value.get(key);
if (child != null && child instanceof AbstractConfigObject) {
// if we have an object, add to it
return withValue(key, ((AbstractConfigObject) child).withValue(next, v));
} else {
// as soon as we have a non-object, replace it entirely
SimpleConfig subtree = ((AbstractConfigValue) v).atPath(
SimpleConfigOrigin.newSimple("withValue(" + next.render() + ")"), next);
return withValue(key, subtree.root());
}
}
}
@Override
protected AbstractConfigValue attemptPeekWithPartialResolve(String key) {
return value.get(key);
}
private SimpleConfigObject newCopy(ResolveStatus newStatus, ConfigOrigin newOrigin,
boolean newIgnoresFallbacks) {
return new SimpleConfigObject(newOrigin, value, newStatus, newIgnoresFallbacks);
}
@Override
protected SimpleConfigObject newCopy(ResolveStatus newStatus, ConfigOrigin newOrigin) {
return newCopy(newStatus, newOrigin, ignoresFallbacks);
}
@Override
protected SimpleConfigObject withFallbacksIgnored() {
if (ignoresFallbacks)
return this;
else
return newCopy(resolveStatus(), origin(), true /* ignoresFallbacks */);
}
@Override
ResolveStatus resolveStatus() {
return ResolveStatus.fromBoolean(resolved);
}
@Override
public SimpleConfigObject replaceChild(AbstractConfigValue child, AbstractConfigValue replacement) {
HashMap<String, AbstractConfigValue> newChildren = new HashMap<String, AbstractConfigValue>(value);
for (Map.Entry<String, AbstractConfigValue> old : newChildren.entrySet()) {
if (old.getValue() == child) {
if (replacement != null)
old.setValue(replacement);
else
newChildren.remove(old.getKey());
return new SimpleConfigObject(origin(), newChildren, ResolveStatus.fromValues(newChildren.values()),
ignoresFallbacks);
}
}
throw new ConfigException.BugOrBroken("SimpleConfigObject.replaceChild did not find " + child + " in " + this);
}
@Override
public boolean hasDescendant(AbstractConfigValue descendant) {
for (AbstractConfigValue child : value.values()) {
if (child == descendant)
return true;
}
// now do the expensive search
for (AbstractConfigValue child : value.values()) {
if (child instanceof Container && ((Container) child).hasDescendant(descendant))
return true;
}
return false;
}
@Override
protected boolean ignoresFallbacks() {
return ignoresFallbacks;
}
@Override
public Map<String, Object> unwrapped() {
Map<String, Object> m = new HashMap<String, Object>();
for (Map.Entry<String, AbstractConfigValue> e : value.entrySet()) {
m.put(e.getKey(), e.getValue().unwrapped());
}
return m;
}
@Override
protected SimpleConfigObject mergedWithObject(AbstractConfigObject abstractFallback) {
requireNotIgnoringFallbacks();
if (!(abstractFallback instanceof SimpleConfigObject)) {
throw new ConfigException.BugOrBroken(
"should not be reached (merging non-SimpleConfigObject)");
}
SimpleConfigObject fallback = (SimpleConfigObject) abstractFallback;
boolean changed = false;
boolean allResolved = true;
Map<String, AbstractConfigValue> merged = new HashMap<String, AbstractConfigValue>();
Set<String> allKeys = new HashSet<String>();
allKeys.addAll(this.keySet());
allKeys.addAll(fallback.keySet());
for (String key : allKeys) {
AbstractConfigValue first = this.value.get(key);
AbstractConfigValue second = fallback.value.get(key);
AbstractConfigValue kept;
if (first == null)
kept = second;
else if (second == null)
kept = first;
else
kept = first.withFallback(second);
merged.put(key, kept);
if (first != kept)
changed = true;
if (kept.resolveStatus() == ResolveStatus.UNRESOLVED)
allResolved = false;
}
ResolveStatus newResolveStatus = ResolveStatus.fromBoolean(allResolved);
boolean newIgnoresFallbacks = fallback.ignoresFallbacks();
if (changed)
return new SimpleConfigObject(mergeOrigins(this, fallback), merged, newResolveStatus,
newIgnoresFallbacks);
else if (newResolveStatus != resolveStatus() || newIgnoresFallbacks != ignoresFallbacks())
return newCopy(newResolveStatus, origin(), newIgnoresFallbacks);
else
return this;
}
private SimpleConfigObject modify(NoExceptionsModifier modifier) {
try {
return modifyMayThrow(modifier);
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new ConfigException.BugOrBroken("unexpected checked exception", e);
}
}
private SimpleConfigObject modifyMayThrow(Modifier modifier) throws Exception {
Map<String, AbstractConfigValue> changes = null;
for (String k : keySet()) {
AbstractConfigValue v = value.get(k);
// "modified" may be null, which means remove the child;
// to do that we put null in the "changes" map.
AbstractConfigValue modified = modifier.modifyChildMayThrow(k, v);
if (modified != v) {
if (changes == null)
changes = new HashMap<String, AbstractConfigValue>();
changes.put(k, modified);
}
}
if (changes == null) {
return this;
} else {
Map<String, AbstractConfigValue> modified = new HashMap<String, AbstractConfigValue>();
boolean sawUnresolved = false;
for (String k : keySet()) {
if (changes.containsKey(k)) {
AbstractConfigValue newValue = changes.get(k);
if (newValue != null) {
modified.put(k, newValue);
if (newValue.resolveStatus() == ResolveStatus.UNRESOLVED)
sawUnresolved = true;
} else {
// remove this child; don't put it in the new map.
}
} else {
AbstractConfigValue newValue = value.get(k);
modified.put(k, newValue);
if (newValue.resolveStatus() == ResolveStatus.UNRESOLVED)
sawUnresolved = true;
}
}
return new SimpleConfigObject(origin(), modified,
sawUnresolved ? ResolveStatus.UNRESOLVED : ResolveStatus.RESOLVED,
ignoresFallbacks());
}
}
private static final class ResolveModifier implements Modifier {
final Path originalRestrict;
ResolveContext context;
final ResolveSource source;
ResolveModifier(ResolveContext context, ResolveSource source) {
this.context = context;
this.source = source;
originalRestrict = context.restrictToChild();
}
@Override
public AbstractConfigValue modifyChildMayThrow(String key, AbstractConfigValue v) throws NotPossibleToResolve {
if (context.isRestrictedToChild()) {
if (key.equals(context.restrictToChild().first())) {
Path remainder = context.restrictToChild().remainder();
if (remainder != null) {
ResolveResult<? extends AbstractConfigValue> result = context.restrict(remainder).resolve(v,
source);
context = result.context.unrestricted().restrict(originalRestrict);
return result.value;
} else {
// we don't want to resolve the leaf child.
return v;
}
} else {
// not in the restrictToChild path
return v;
}
} else {
// no restrictToChild, resolve everything
ResolveResult<? extends AbstractConfigValue> result = context.unrestricted().resolve(v, source);
context = result.context.unrestricted().restrict(originalRestrict);
return result.value;
}
}
}
@Override
ResolveResult<? extends AbstractConfigObject> resolveSubstitutions(ResolveContext context, ResolveSource source)
throws NotPossibleToResolve {
if (resolveStatus() == ResolveStatus.RESOLVED)
return ResolveResult.make(context, this);
final ResolveSource sourceWithParent = source.pushParent(this);
try {
ResolveModifier modifier = new ResolveModifier(context, sourceWithParent);
AbstractConfigValue value = modifyMayThrow(modifier);
return ResolveResult.make(modifier.context, value).asObjectResult();
} catch (NotPossibleToResolve e) {
throw e;
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new ConfigException.BugOrBroken("unexpected checked exception", e);
}
}
@Override
SimpleConfigObject relativized(final Path prefix) {
return modify(new NoExceptionsModifier() {
@Override
public AbstractConfigValue modifyChild(String key, AbstractConfigValue v) {
return v.relativized(prefix);
}
});
}
// this is only Serializable to chill out a findbugs warning
static final private class RenderComparator implements java.util.Comparator<String>, Serializable {
private static final long serialVersionUID = 1L;
private static boolean isAllDigits(String s) {
int length = s.length();
// empty string doesn't count as a number
// string longer than "max number of digits in a long" cannot be parsed as a long
if (length == 0)
return false;
for (int i = 0; i < length; ++i) {
char c = s.charAt(i);
if (!Character.isDigit(c))
return false;
}
return true;
}
// This is supposed to sort numbers before strings,
// and sort the numbers numerically. The point is
// to make objects which are really list-like
// (numeric indices) appear in order.
@Override
public int compare(String a, String b) {
boolean aDigits = isAllDigits(a);
boolean bDigits = isAllDigits(b);
if (aDigits && bDigits) {
return new BigInteger(a).compareTo(new BigInteger(b));
} else if (aDigits) {
return -1;
} else if (bDigits) {
return 1;
} else {
return a.compareTo(b);
}
}
}
@Override
protected void render(StringBuilder sb, int indent, boolean atRoot, ConfigRenderOptions options) {
if (isEmpty()) {
sb.append("{}");
} else {
boolean outerBraces = options.getJson() || !atRoot;
int innerIndent;
if (outerBraces) {
innerIndent = indent + 1;
sb.append("{");
if (options.getFormatted())
sb.append('\n');
} else {
innerIndent = indent;
}
int separatorCount = 0;
String[] keys = keySet().toArray(new String[size()]);
Arrays.sort(keys, new RenderComparator());
for (String k : keys) {
AbstractConfigValue v;
v = value.get(k);
if (options.getOriginComments()) {
String[] lines = v.origin().description().split("\n");
for (String l : lines) {
indent(sb, indent + 1, options);
sb.append('#');
if (!l.isEmpty())
sb.append(' ');
sb.append(l);
sb.append("\n");
}
}
if (options.getComments()) {
for (String comment : v.origin().comments()) {
indent(sb, innerIndent, options);
sb.append("#");
if (!comment.startsWith(" "))
sb.append(' ');
sb.append(comment);
sb.append("\n");
}
}
indent(sb, innerIndent, options);
v.render(sb, innerIndent, false /* atRoot */, k, options);
if (options.getFormatted()) {
if (options.getJson()) {
sb.append(",");
separatorCount = 2;
} else {
separatorCount = 1;
}
sb.append('\n');
} else {
sb.append(",");
separatorCount = 1;
}
}
// chop last commas/newlines
sb.setLength(sb.length() - separatorCount);
if (outerBraces) {
if (options.getFormatted()) {
sb.append('\n'); // put a newline back
if (outerBraces)
indent(sb, indent, options);
}
sb.append("}");
}
}
if (atRoot && options.getFormatted())
sb.append('\n');
}
@Override
public AbstractConfigValue get(Object key) {
return value.get(key);
}
private static boolean mapEquals(Map<String, ConfigValue> a, Map<String, ConfigValue> b) {
if (a == b)
return true;
Set<String> aKeys = a.keySet();
Set<String> bKeys = b.keySet();
if (!aKeys.equals(bKeys))
return false;
for (String key : aKeys) {
if (!a.get(key).equals(b.get(key)))
return false;
}
return true;
}
private static int mapHash(Map<String, ConfigValue> m) {
// the keys have to be sorted, otherwise we could be equal
// to another map but have a different hashcode.
List<String> keys = new ArrayList<String>();
keys.addAll(m.keySet());
Collections.sort(keys);
int valuesHash = 0;
for (String k : keys) {
valuesHash += m.get(k).hashCode();
}
return 41 * (41 + keys.hashCode()) + valuesHash;
}
@Override
protected boolean canEqual(Object other) {
return other instanceof ConfigObject;
}
@Override
public boolean equals(Object other) {
// note that "origin" is deliberately NOT part of equality.
// neither are other "extras" like ignoresFallbacks or resolve status.
if (other instanceof ConfigObject) {
// optimization to avoid unwrapped() for two ConfigObject,
// which is what AbstractConfigValue does.
return canEqual(other) && mapEquals(this, ((ConfigObject) other));
} else {
return false;
}
}
@Override
public int hashCode() {
// note that "origin" is deliberately NOT part of equality
// neither are other "extras" like ignoresFallbacks or resolve status.
return mapHash(this);
}
@Override
public boolean containsKey(Object key) {
return value.containsKey(key);
}
@Override
public Set<String> keySet() {
return value.keySet();
}
@Override
public boolean containsValue(Object v) {
return value.containsValue(v);
}
@Override
public Set<Map.Entry<String, ConfigValue>> entrySet() {
// total bloat just to work around lack of type variance
HashSet<java.util.Map.Entry<String, ConfigValue>> entries = new HashSet<Map.Entry<String, ConfigValue>>();
for (Map.Entry<String, AbstractConfigValue> e : value.entrySet()) {
entries.add(new AbstractMap.SimpleImmutableEntry<String, ConfigValue>(
e.getKey(), e
.getValue()));
}
return entries;
}
@Override
public boolean isEmpty() {
return value.isEmpty();
}
@Override
public int size() {
return value.size();
}
@Override
public Collection<ConfigValue> values() {
return new HashSet<ConfigValue>(value.values());
}
final private static String EMPTY_NAME = "empty config";
final private static SimpleConfigObject emptyInstance = empty(SimpleConfigOrigin
.newSimple(EMPTY_NAME));
final static SimpleConfigObject empty() {
return emptyInstance;
}
final static SimpleConfigObject empty(ConfigOrigin origin) {
if (origin == null)
return empty();
else
return new SimpleConfigObject(origin,
Collections.<String, AbstractConfigValue> emptyMap());
}
final static SimpleConfigObject emptyMissing(ConfigOrigin baseOrigin) {
return new SimpleConfigObject(SimpleConfigOrigin.newSimple(
baseOrigin.description() + " (not found)"),
Collections.<String, AbstractConfigValue> emptyMap());
}
// serialization all goes through SerializedConfigValue
private Object writeReplace() throws ObjectStreamException {
return new SerializedConfigValue(this);
}
}
| typesafehub/config | config/src/main/java/com/typesafe/config/impl/SimpleConfigObject.java | Java | apache-2.0 | 24,048 |
<?php
for ($i = 0; $i < $this->length; ++$i) {
if (is_object($objectOrArray)) {
$value = $this->readProperty($objectOrArray, $i);
// arrays need to be treated separately (due to PHP bug?)
// http://bugs.php.net/bug.php?id=52133
} elseif (is_array($objectOrArray)) {
$property = $this->elements[$i];
if (!array_key_exists($property, $objectOrArray)) {
$objectOrArray[$property] = $i + 1 < $this->length ? array() : null;
}
$value =& $objectOrArray[$property];
} else {
throw new UnexpectedTypeException($objectOrArray, 'object or array');
}
$objectOrArray =& $value;
}
return $value; | Ocramius/php-analyzer | tests/Scrutinizer/Tests/PhpAnalyzer/ControlFlow/Fixture/Integration/Regression/symfony_property_util$getValue.php | PHP | apache-2.0 | 681 |
/*
* Copyright 2015 AppDynamics, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.appdynamicspilot.rest;
import java.util.ArrayList;
import java.util.List;
import org.apache.log4j.Logger;
public class ShoppingCart implements java.io.Serializable {
Logger log = Logger.getLogger(ShoppingCart.class);
private List<ShoppingCartItem> items;
public ShoppingCart() {
items = new ArrayList<ShoppingCartItem>();
}
public void addItem(ShoppingCartItem item) {
items.add(item);
}
public void removeItem(ShoppingCartItem item) {
items.remove(item);
}
public List<ShoppingCartItem> getAllItems() {
return items;
}
public double getCartTotal() {
double total = 0.0;
for (ShoppingCartItem item : items) {
total += item.getPrice();
}
return total;
}
public void clear() {
items.clear();
}
}
| udayinfy/ECommerce-Java | ECommerce-Web/src/main/java/com/appdynamicspilot/rest/ShoppingCart.java | Java | apache-2.0 | 1,458 |
var express = require( 'express' ),
router = require( './router' );
var app = express();
app.set('port', (process.env.PORT || 3000));
router.define( app );
// Set up port
// ========================================================
app.listen(app.get('port'), function() {
console.log("Node app is running at localhost:" + app.get('port'));
});
| OnWhoseBehalf/on-behalf-api | src/server.js | JavaScript | apache-2.0 | 353 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.