code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30
values | license stringclasses 15
values | size int64 3 1.01M |
|---|---|---|---|---|---|
+++
title = "John Behrens"
image = "john-behrens.jpg"
twitter = "webconsultseu"
website = "http://www.skills-for-teams.com"
type = "speaker"
linktitle = "john-behrens"
+++
John Behrens is a freelancer from Hamburg Germany, he is fulfilling different roles in Developer, Tester, Scrum Master, Engineer, Architect, Trainer or Agile Coach. He is mostly willing to use any skill that is needed to help the team and take a project to success.
| gomex/devopsdays-web | content/events/2019-stockholm/speakers/john-behrens.md | Markdown | apache-2.0 | 440 |
require 'puppet/node/facts'
require 'puppet/indirector/rest'
require 'puppet/util/puppetdb'
class Puppet::Node::Facts::Puppetdb < Puppet::Indirector::REST
include Puppet::Util::Puppetdb
include Puppet::Util::Puppetdb::CommandNames
def save(request)
facts = request.instance.dup
facts.values = facts.values.dup
facts.stringify
submit_command(request.key, facts.to_pson, CommandReplaceFacts, 1)
end
def find(request)
begin
response = http_get(request, "/v2/nodes/#{CGI.escape(request.key)}/facts", headers)
log_x_deprecation_header(response)
if response.is_a? Net::HTTPSuccess
result = PSON.parse(response.body)
# Note: the Inventory Service API appears to expect us to return nil here
# if the node isn't found. However, PuppetDB returns an empty array in
# this case; for now we will just look for that condition and assume that
# it means that the node wasn't found, so we will return nil. In the
# future we may want to improve the logic such that we can distinguish
# between the "node not found" and the "no facts for this node" cases.
if result.empty?
return nil
end
facts = result.inject({}) do |a,h|
a.merge(h['name'] => h['value'])
end
Puppet::Node::Facts.new(request.key, facts)
else
# Newline characters cause an HTTP error, so strip them
raise "[#{response.code} #{response.message}] #{response.body.gsub(/[\r\n]/, '')}"
end
rescue => e
raise Puppet::Error, "Failed to find facts from PuppetDB at #{self.class.server}:#{self.class.port}: #{e}"
end
end
# Search for nodes matching a set of fact constraints. The constraints are
# specified as a hash of the form:
#
# `{type.name.operator => value`
#
# The only accepted `type` is 'facts'.
#
# `name` must be the fact name to query against.
#
# `operator` may be one of {eq, ne, lt, gt, le, ge}, and will default to 'eq'
# if unspecified.
def search(request)
return [] unless request.options
operator_map = {
'eq' => '=',
'gt' => '>',
'lt' => '<',
'ge' => '>=',
'le' => '<=',
}
filters = request.options.sort.map do |key,value|
type, name, operator = key.to_s.split('.')
operator ||= 'eq'
raise Puppet::Error, "Fact search against keys of type '#{type}' is unsupported" unless type == 'facts'
if operator == 'ne'
['not', ['=', ['fact', name], value]]
else
[operator_map[operator], ['fact', name], value]
end
end
query = ["and"] + filters
query_param = CGI.escape(query.to_pson)
begin
response = http_get(request, "/v2/nodes?query=#{query_param}", headers)
log_x_deprecation_header(response)
if response.is_a? Net::HTTPSuccess
PSON.parse(response.body).collect {|s| s["name"]}
else
# Newline characters cause an HTTP error, so strip them
raise "[#{response.code} #{response.message}] #{response.body.gsub(/[\r\n]/, '')}"
end
rescue => e
raise Puppet::Error, "Could not perform inventory search from PuppetDB at #{self.class.server}:#{self.class.port}: #{e}"
end
end
def headers
{
"Accept" => "application/json",
"Content-Type" => "application/x-www-form-urlencoded; charset=UTF-8",
}
end
end
| melissa/puppetdb | puppet/lib/puppet/indirector/facts/puppetdb.rb | Ruby | apache-2.0 | 3,402 |
/*
* Copyright 2021 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.quarkus.runtime.storage.database.liquibase;
import java.lang.reflect.Method;
import java.sql.Connection;
import javax.xml.parsers.SAXParserFactory;
import org.jboss.logging.Logger;
import org.keycloak.Config;
import org.keycloak.connections.jpa.updater.liquibase.conn.LiquibaseConnectionProvider;
import org.keycloak.connections.jpa.updater.liquibase.conn.LiquibaseConnectionProviderFactory;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.KeycloakSessionFactory;
import liquibase.Liquibase;
import liquibase.database.Database;
import liquibase.database.DatabaseFactory;
import liquibase.database.jvm.JdbcConnection;
import liquibase.exception.LiquibaseException;
import liquibase.parser.ChangeLogParser;
import liquibase.parser.ChangeLogParserFactory;
import liquibase.parser.core.xml.XMLChangeLogSAXParser;
import liquibase.resource.ClassLoaderResourceAccessor;
import liquibase.resource.ResourceAccessor;
public class QuarkusLiquibaseConnectionProvider implements LiquibaseConnectionProviderFactory, LiquibaseConnectionProvider {
private static final Logger logger = Logger.getLogger(QuarkusLiquibaseConnectionProvider.class);
private volatile boolean initialized = false;
private ClassLoaderResourceAccessor resourceAccessor;
@Override
public LiquibaseConnectionProvider create(KeycloakSession session) {
if (!initialized) {
synchronized (this) {
if (!initialized) {
baseLiquibaseInitialization(session);
initialized = true;
}
}
}
return this;
}
protected void baseLiquibaseInitialization(KeycloakSession session) {
resourceAccessor = new ClassLoaderResourceAccessor(getClass().getClassLoader());
// disables XML validation
for (ChangeLogParser parser : ChangeLogParserFactory.getInstance().getParsers()) {
if (parser instanceof XMLChangeLogSAXParser) {
Method getSaxParserFactory = null;
try {
getSaxParserFactory = XMLChangeLogSAXParser.class.getDeclaredMethod("getSaxParserFactory");
getSaxParserFactory.setAccessible(true);
SAXParserFactory saxParserFactory = (SAXParserFactory) getSaxParserFactory.invoke(parser);
saxParserFactory.setValidating(false);
saxParserFactory.setSchema(null);
} catch (Exception e) {
logger.warnf("Failed to disable liquibase XML validations");
} finally {
if (getSaxParserFactory != null) {
getSaxParserFactory.setAccessible(false);
}
}
}
}
}
@Override
public void init(Config.Scope config) {
}
@Override
public void postInit(KeycloakSessionFactory factory) {
}
@Override
public void close() {
}
@Override
public String getId() {
return "quarkus";
}
@Override
public Liquibase getLiquibase(Connection connection, String defaultSchema) throws LiquibaseException {
Database database = DatabaseFactory.getInstance().findCorrectDatabaseImplementation(new JdbcConnection(connection));
if (defaultSchema != null) {
database.setDefaultSchemaName(defaultSchema);
}
String changelog = QuarkusJpaUpdaterProvider.CHANGELOG;
logger.debugf("Using changelog file %s and changelogTableName %s", changelog, database.getDatabaseChangeLogTableName());
return new Liquibase(changelog, resourceAccessor, database);
}
@Override
public Liquibase getLiquibaseForCustomUpdate(Connection connection, String defaultSchema, String changelogLocation, ClassLoader classloader, String changelogTableName) throws LiquibaseException {
Database database = DatabaseFactory.getInstance().findCorrectDatabaseImplementation(new JdbcConnection(connection));
if (defaultSchema != null) {
database.setDefaultSchemaName(defaultSchema);
}
ResourceAccessor resourceAccessor = new ClassLoaderResourceAccessor(classloader);
database.setDatabaseChangeLogTableName(changelogTableName);
logger.debugf("Using changelog file %s and changelogTableName %s", changelogLocation, database.getDatabaseChangeLogTableName());
return new Liquibase(changelogLocation, resourceAccessor, database);
}
@Override
public int order() {
return 100;
}
}
| stianst/keycloak | quarkus/runtime/src/main/java/org/keycloak/quarkus/runtime/storage/database/liquibase/QuarkusLiquibaseConnectionProvider.java | Java | apache-2.0 | 5,267 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_45) on Fri Aug 28 09:51:25 EDT 2015 -->
<title>Cassandra.AsyncClient.atomic_batch_mutate_call (apache-cassandra API)</title>
<meta name="date" content="2015-08-28">
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Cassandra.AsyncClient.atomic_batch_mutate_call (apache-cassandra API)";
}
}
catch(err) {
}
//-->
var methods = {"i0":10,"i1":10};
var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
var tableTab = "tableTab";
var activeTableTab = "activeTableTab";
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/Cassandra.AsyncClient.atomic_batch_mutate_call.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.add_call.html" title="class in org.apache.cassandra.thrift"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.batch_mutate_call.html" title="class in org.apache.cassandra.thrift"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html" target="_top">Frames</a></li>
<li><a href="Cassandra.AsyncClient.atomic_batch_mutate_call.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li><a href="#nested.classes.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">Nested</a> | </li>
<li><a href="#fields.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">Field</a> | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">org.apache.cassandra.thrift</div>
<h2 title="Class Cassandra.AsyncClient.atomic_batch_mutate_call" class="title">Class Cassandra.AsyncClient.atomic_batch_mutate_call</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li>java.lang.Object</li>
<li>
<ul class="inheritance">
<li>org.apache.thrift.async.TAsyncMethodCall</li>
<li>
<ul class="inheritance">
<li>org.apache.cassandra.thrift.Cassandra.AsyncClient.atomic_batch_mutate_call</li>
</ul>
</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>Enclosing class:</dt>
<dd><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.html" title="class in org.apache.cassandra.thrift">Cassandra.AsyncClient</a></dd>
</dl>
<hr>
<br>
<pre>public static class <span class="typeNameLabel">Cassandra.AsyncClient.atomic_batch_mutate_call</span>
extends org.apache.thrift.async.TAsyncMethodCall</pre>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ======== NESTED CLASS SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="nested.class.summary">
<!-- -->
</a>
<h3>Nested Class Summary</h3>
<ul class="blockList">
<li class="blockList"><a name="nested.classes.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">
<!-- -->
</a>
<h3>Nested classes/interfaces inherited from class org.apache.thrift.async.TAsyncMethodCall</h3>
<code>org.apache.thrift.async.TAsyncMethodCall.State</code></li>
</ul>
</li>
</ul>
<!-- =========== FIELD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="field.summary">
<!-- -->
</a>
<h3>Field Summary</h3>
<ul class="blockList">
<li class="blockList"><a name="fields.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">
<!-- -->
</a>
<h3>Fields inherited from class org.apache.thrift.async.TAsyncMethodCall</h3>
<code>client, transport</code></li>
</ul>
</li>
</ul>
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor.summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colOne" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colOne"><code><span class="memberNameLink"><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html#atomic_batch_mutate_call-java.util.Map-org.apache.cassandra.thrift.ConsistencyLevel-org.apache.thrift.async.AsyncMethodCallback-org.apache.thrift.async.TAsyncClient-org.apache.thrift.protocol.TProtocolFactory-org.apache.thrift.transport.TNonblockingTransport-">atomic_batch_mutate_call</a></span>(java.util.Map<java.nio.ByteBuffer,java.util.Map<java.lang.String,java.util.List<<a href="../../../../org/apache/cassandra/thrift/Mutation.html" title="class in org.apache.cassandra.thrift">Mutation</a>>>> mutation_map,
<a href="../../../../org/apache/cassandra/thrift/ConsistencyLevel.html" title="enum in org.apache.cassandra.thrift">ConsistencyLevel</a> consistency_level,
org.apache.thrift.async.AsyncMethodCallback resultHandler,
org.apache.thrift.async.TAsyncClient client,
org.apache.thrift.protocol.TProtocolFactory protocolFactory,
org.apache.thrift.transport.TNonblockingTransport transport)</code> </td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd"> </span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd"> </span></span><span id="t4" class="tableTab"><span><a href="javascript:show(8);">Concrete Methods</a></span><span class="tabEnd"> </span></span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr id="i0" class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html#getResult--">getResult</a></span>()</code> </td>
</tr>
<tr id="i1" class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html#write_args-org.apache.thrift.protocol.TProtocol-">write_args</a></span>(org.apache.thrift.protocol.TProtocol prot)</code> </td>
</tr>
</table>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">
<!-- -->
</a>
<h3>Methods inherited from class org.apache.thrift.async.TAsyncMethodCall</h3>
<code>getClient, getFrameBuffer, getSequenceId, getStartTime, getState, getTimeoutTimestamp, hasTimeout, isFinished, onError, prepareMethodCall, transition</code></li>
</ul>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.Object</h3>
<code>clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor.detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="atomic_batch_mutate_call-java.util.Map-org.apache.cassandra.thrift.ConsistencyLevel-org.apache.thrift.async.AsyncMethodCallback-org.apache.thrift.async.TAsyncClient-org.apache.thrift.protocol.TProtocolFactory-org.apache.thrift.transport.TNonblockingTransport-">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>atomic_batch_mutate_call</h4>
<pre>public atomic_batch_mutate_call(java.util.Map<java.nio.ByteBuffer,java.util.Map<java.lang.String,java.util.List<<a href="../../../../org/apache/cassandra/thrift/Mutation.html" title="class in org.apache.cassandra.thrift">Mutation</a>>>> mutation_map,
<a href="../../../../org/apache/cassandra/thrift/ConsistencyLevel.html" title="enum in org.apache.cassandra.thrift">ConsistencyLevel</a> consistency_level,
org.apache.thrift.async.AsyncMethodCallback resultHandler,
org.apache.thrift.async.TAsyncClient client,
org.apache.thrift.protocol.TProtocolFactory protocolFactory,
org.apache.thrift.transport.TNonblockingTransport transport)
throws org.apache.thrift.TException</pre>
<dl>
<dt><span class="throwsLabel">Throws:</span></dt>
<dd><code>org.apache.thrift.TException</code></dd>
</dl>
</li>
</ul>
</li>
</ul>
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method.detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="write_args-org.apache.thrift.protocol.TProtocol-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>write_args</h4>
<pre>public void write_args(org.apache.thrift.protocol.TProtocol prot)
throws org.apache.thrift.TException</pre>
<dl>
<dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
<dd><code>write_args</code> in class <code>org.apache.thrift.async.TAsyncMethodCall</code></dd>
<dt><span class="throwsLabel">Throws:</span></dt>
<dd><code>org.apache.thrift.TException</code></dd>
</dl>
</li>
</ul>
<a name="getResult--">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>getResult</h4>
<pre>public void getResult()
throws <a href="../../../../org/apache/cassandra/thrift/InvalidRequestException.html" title="class in org.apache.cassandra.thrift">InvalidRequestException</a>,
<a href="../../../../org/apache/cassandra/thrift/UnavailableException.html" title="class in org.apache.cassandra.thrift">UnavailableException</a>,
<a href="../../../../org/apache/cassandra/thrift/TimedOutException.html" title="class in org.apache.cassandra.thrift">TimedOutException</a>,
org.apache.thrift.TException</pre>
<dl>
<dt><span class="throwsLabel">Throws:</span></dt>
<dd><code><a href="../../../../org/apache/cassandra/thrift/InvalidRequestException.html" title="class in org.apache.cassandra.thrift">InvalidRequestException</a></code></dd>
<dd><code><a href="../../../../org/apache/cassandra/thrift/UnavailableException.html" title="class in org.apache.cassandra.thrift">UnavailableException</a></code></dd>
<dd><code><a href="../../../../org/apache/cassandra/thrift/TimedOutException.html" title="class in org.apache.cassandra.thrift">TimedOutException</a></code></dd>
<dd><code>org.apache.thrift.TException</code></dd>
</dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/Cassandra.AsyncClient.atomic_batch_mutate_call.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.add_call.html" title="class in org.apache.cassandra.thrift"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../../org/apache/cassandra/thrift/Cassandra.AsyncClient.batch_mutate_call.html" title="class in org.apache.cassandra.thrift"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html" target="_top">Frames</a></li>
<li><a href="Cassandra.AsyncClient.atomic_batch_mutate_call.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li><a href="#nested.classes.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">Nested</a> | </li>
<li><a href="#fields.inherited.from.class.org.apache.thrift.async.TAsyncMethodCall">Field</a> | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2015 The Apache Software Foundation</small></p>
</body>
</html>
| mitch-kyle/message-board | support/apache-cassandra-2.2.1/javadoc/org/apache/cassandra/thrift/Cassandra.AsyncClient.atomic_batch_mutate_call.html | HTML | apache-2.0 | 16,255 |
package org.apereo.cas.web.flow.actions;
import org.apereo.cas.authentication.CoreAuthenticationTestUtils;
import org.apereo.cas.authentication.principal.ResponseBuilderLocator;
import org.apereo.cas.authentication.principal.WebApplicationService;
import org.apereo.cas.authentication.principal.WebApplicationServiceResponseBuilder;
import org.apereo.cas.config.CasCoreServicesConfiguration;
import org.apereo.cas.config.CasCoreUtilConfiguration;
import org.apereo.cas.services.ServicesManager;
import org.apereo.cas.web.flow.CasWebflowConstants;
import org.apereo.cas.web.support.WebUtils;
import lombok.val;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.cloud.autoconfigure.RefreshAutoConfiguration;
import org.springframework.mock.web.MockHttpServletRequest;
import org.springframework.mock.web.MockHttpServletResponse;
import org.springframework.mock.web.MockServletContext;
import org.springframework.test.context.junit4.rules.SpringClassRule;
import org.springframework.test.context.junit4.rules.SpringMethodRule;
import org.springframework.webflow.context.servlet.ServletExternalContext;
import org.springframework.webflow.test.MockRequestContext;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
/**
* This is {@link RedirectToServiceActionTests}.
*
* @author Misagh Moayyed
* @since 5.3.0
*/
@SpringBootTest(classes = {
RefreshAutoConfiguration.class,
CasCoreServicesConfiguration.class,
CasCoreUtilConfiguration.class
})
public class RedirectToServiceActionTests {
@ClassRule
public static final SpringClassRule SPRING_CLASS_RULE = new SpringClassRule();
@Rule
public final SpringMethodRule springMethodRule = new SpringMethodRule();
@Autowired
@Qualifier("servicesManager")
private ServicesManager servicesManager;
@Test
public void verifyAction() throws Exception {
val context = new MockRequestContext();
val request = new MockHttpServletRequest();
context.setExternalContext(new ServletExternalContext(new MockServletContext(), request, new MockHttpServletResponse()));
WebUtils.putAuthentication(CoreAuthenticationTestUtils.getAuthentication(), context);
WebUtils.putService(context, CoreAuthenticationTestUtils.getWebApplicationService());
val locator = mock(ResponseBuilderLocator.class);
when(locator.locate(any(WebApplicationService.class))).thenReturn(new WebApplicationServiceResponseBuilder(this.servicesManager));
val redirectToServiceAction = new RedirectToServiceAction(locator);
val event = redirectToServiceAction.execute(context);
assertEquals(CasWebflowConstants.TRANSITION_ID_REDIRECT, event.getId());
}
}
| robertoschwald/cas | core/cas-server-core-webflow/src/test/java/org/apereo/cas/web/flow/actions/RedirectToServiceActionTests.java | Java | apache-2.0 | 2,947 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" >
<title>Berlin 2013
- Proposal</title>
<meta name="author" content="Tim Lossen" >
<link rel="alternate" type="application/rss+xml" title="devopsdays RSS Feed" href="http://www.devopsdays.org/feed/" >
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load('jquery', '1.3.2');
</script>
<!---This is a combined jAmpersand, jqwindont , jPullquote -->
<script type="text/javascript" src="/js/devops.js"></script>
<!--- Blueprint CSS Framework Screen + Fancytype-Screen + jedi.css -->
<link rel="stylesheet" href="/css/devops.min.css" type="text/css" media="screen, projection">
<link rel="stylesheet" href="/css/blueprint/print.css" type="text/css" media="print">
<!--[if IE]>
<link rel="stylesheet" href="/css/blueprint/ie.css" type="text/css" media="screen, projection">
<![endif]-->
</head>
<body onload="initialize()">
<div class="container ">
<div class="span-24 last" id="header">
<div class="span-16 first">
<img src="/images/devopsdays-banner.png" title="devopsdays banner" width="801" height="115" alt="devopdays banner" ><br>
</div>
<div class="span-8 last">
</div>
</div>
<div class="span-24 last">
<div class="span-15 first">
<div id="headermenu">
<table >
<tr>
<td>
<a href="/"><img alt="home" title="home" src="/images/home.png"></a>
<a href="/">Home</a>
</td>
<td>
<a href="/contact/"><img alt="contact" title="contact" src="/images/contact.png"></a>
<a href="/contact/">Contact</a>
</td>
<td>
<a href="/events/"><img alt="events" title="events" src="/images/events.png"></a>
<a href="/events/">Events</a>
</td>
<td>
<a href="/presentations/"><img alt="presentations" title="presentations" src="/images/presentations.png"></a>
<a href="/presentations/">Presentations</a>
</td>
<td>
<a href="/blog/"><img alt="blog" title="blog" src="/images/blog.png"></a>
<a href="/blog/">Blog</a>
</td>
</tr>
</table>
</div>
</div>
<div class="span-8 last">
</div>
<div class="span-24 last" id="title">
<div class="span-15 first">
<h1>Berlin 2013
- Proposal </h1>
</div>
<div class="span-8 last">
</div>
<h1>Gold sponsors</h1>
</div>
<div class="span-15 ">
<div class="span-15 last ">
<div class="submenu">
<h3>
<a href="/events/2013-berlin/">welcome</a>
<a href="/events/2013-berlin/propose">propose</a>
<a href="/events/2013-berlin/program">program</a>
<a href="/events/2013-berlin/location">location</a>
<a href="/events/2013-berlin/registration">register</a>
<a href="/events/2013-berlin/sponsor">sponsor</a>
<a href="/events/2013-berlin/contact">contact</a>
</h3>
</div>
Back to <a href='..'>proposals overview</a> - <a href='../../program'>program</a>
<hr>
<h3>Podularity FTW!</h3>
<p><strong>Abstract:</strong></p>
<p>In "The Connected Company" (O'Reilly, 2012), Dave Gray describes how
pods -- "small, autonomous units" -- are the basic building blocks of
a flexible, scalable and resilient organisation. At Wooga, we have
used pods (though we call them "game teams") as the main
organizational unit from the beginning, and this has led quite
naturally to a strong devops culture.</p>
<p>In this talk, I will tell how this decision came about, how it has
played out, which practical benefits the approach provides -- but
also, what problems we have run into. As a bonus, I will report on a
recent attempt to use podularity as the technical system architecture,
where scalability and resilience are major concerns as well.</p>
<p><strong>Speaker:</strong></p>
<p>Tim Lossen</p>
</div>
<div class="span-15 first last">
<script type="text/javascript">
// var disqus_developer = 1;
</script>
<div id="disqus_thread"></div>
<script type="text/javascript">
var disqus_shortname = 'devopsdays';
(function() {
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
dsq.src = 'http://' + disqus_shortname + '.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();
</script>
<noscript>Please enable JavaScript to view the <a href="http://disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript>
<a href="http://disqus.com" class="dsq-brlink">blog comments powered by <span class="logo-disqus">Disqus</span></a>
<hr>
</div>
</div>
<div class="span-8 last">
<div class="span-8 last">
<a href='http://www.zeroturnaround.com'><img border=0 alt='Zeroturnaround' title='Zeroturnaround' width=100px height=100px src='/events/2013-berlin/logos/zeroturnaround.png'></a>
<a href='http://www8.hp.com/de/de/software-solutions/software.html?compURI=1234839&jumpid=reg_r1002_dede_c-001_title_r0001'><img border=0 alt='HP' title='HP' width=100px height=100px src='/events/2013-berlin/logos/hp.png'></a>
<h1>Gold Sponsors</h1>
<a href='http://www.gutefrage.net/'><img border=0 alt='Gutefrage' title='Gutefrage' width=100px height=100px src='/events/2013-berlin/logos/gutefrage.png'></a>
<a href='http://developer.immobilienscout24.de/jobs/'><img border=0 alt='Immobilien Scout' title='Immobilien Scout' width=100px height=100px src='/events/2013-berlin/logos/immobilienscout.png'></a>
<a href='http://www.unbelievable-machine.com'><img border=0 alt='The unbelievable Machine Company' title='The unbelievable Machine Company' width=100px height=100px src='/events/2013-berlin/logos/unbelievablemachine.png'></a>
<a href='http://nokia.de'><img border=0 alt='Nokia Here' title='Nokia Here' width=100px height=100px src='/events/2013-berlin/logos/here.png'></a>
<a href='https://www.engineyard.com/'><img border=0 alt='Engine Yard' title='Engine Yard' width=100px height=100px src='/events/2013-berlin/logos/engineyard.png'></a>
<a href='http://www.idealo.de/'><img border=0 alt='Idealo' title='Idealo' width=100px height=100px src='/events/2013-berlin/logos/idealo.png'></a>
<a href='http://www.netways.de/'><img border=0 alt='Netways' title='Netways' width=100px height=100px src='/events/2013-berlin/logos/netways.png'></a>
<a href='http://www.axelspringer.de/karriere'><img border=0 alt='Axel Springer' title='Axel Springer' width=100px height=100px src='/events/2013-berlin/logos/axelspringer.png'></a>
<h1>Silver sponsors</h1>
<a href='http://www.innoq.com/'><img border=0 alt='InnoQ' title='InnoQ' width=100px height=100px src='/events/2013-berlin/logos/innoq.png'></a>
<a href='http://www.friendscout24.com/'><img border=0 alt='FriendScout 24' title='FriendScout 24' width=100px height=100px src='/events/2013-berlin/logos/friendscout24.png'></a>
<a href='http://www.serena.com/'><img border=0 alt='Serena' title='Serena' width=100px height=100px src='/events/2013-berlin/logos/serena.png'></a>
<a href='http://www.cassini.de/'><img border=0 alt='Cassini' title='Cassini' width=100px height=100px src='/events/2013-berlin/logos/cassini.png'></a>
<a href='http://www.leanovate.de/'><img border=0 alt='Leanovate' title='Leanovate' width=100px height=100px src='/events/2013-berlin/logos/leanovate.png'></a>
<a href='http://www.it-agile.de/'><img border=0 alt='IT-Agile' title='IT-Agile' width=100px height=100px src='/events/2013-berlin/logos/itagile.png'></a>
<a href='http://www.cloudbau.de/'><img border=0 alt='Cloudbau' title='Cloudbau' width=100px height=100px src='/events/2013-berlin/logos/cloudbau.png'></a>
<a href='http://www.gsb.stanford.edu/ignite/paris'><img border=0 alt='Stanford Ignite Paris' title='Stanford Ignite Paris' width=100px height=100px src='/events/2013-berlin/logos/stanford.png'></a>
<a href='http://www.tarent.com'><img border=0 alt='Tarent' title='Tarent' width=100px height=100px src='/events/2013-berlin/logos/tarent.png'></a>
<a href='http://aws.amazon.com/opsworks/'><img border=0 alt='OpsWorks' title='OpsWorks' width=100px height=100px src='/events/2013-berlin/logos/opsworks.png'></a>
<a href='http://www.epost.de/'><img border=0 alt='E-POST' title='E-POST' width=100px height=100px src='/events/2013-berlin/logos/epost.png'></a>
<h1>Evening sponsors</h1>
<a href='http://www.syseleven.com/'><img border=0 alt='SysEleven' title='SysEleven' width=100px height=100px src='/events/2013-berlin/logos/syseleven.png'></a>
<a href='http://www.github.com/'><img border=0 alt='github' title='github' width=100px height=100px src='/events/2013-berlin/logos/github.png'></a>
</div>
<div class="span-8 last">
</div>
</div>
</div>
</div>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-9713393-1']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</body>
</html>
| benjohnson77/devopsdays-web | static/events/2013-berlin/proposals/Podularity FTW/index.html | HTML | apache-2.0 | 9,252 |
package org.camunda.bpm.engine.rest.wink;
import org.camunda.bpm.engine.rest.AbstractMessageRestServiceTest;
import org.camunda.bpm.engine.rest.util.WinkTomcatServerBootstrap;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class MessageRestServiceTest extends AbstractMessageRestServiceTest {
protected static WinkTomcatServerBootstrap serverBootstrap;
@BeforeClass
public static void setUpEmbeddedRuntime() {
serverBootstrap = new WinkTomcatServerBootstrap();
serverBootstrap.start();
}
@AfterClass
public static void tearDownEmbeddedRuntime() {
serverBootstrap.stop();
}
}
| tkaefer/camunda-bpm-platform | engine-rest/src/test/java/org/camunda/bpm/engine/rest/wink/MessageRestServiceTest.java | Java | apache-2.0 | 629 |
let connectionIdx = 0;
let messageIdx = 0;
function addConnection(connection) {
connection.connectionId = ++connectionIdx;
addMessage('New connection #' + connectionIdx);
connection.addEventListener('message', function(event) {
messageIdx++;
const data = JSON.parse(event.data);
const logString = 'Message ' + messageIdx + ' from connection #' +
connection.connectionId + ': ' + data.message;
addMessage(logString, data.lang);
maybeSetFruit(data.message);
connection.send('Received message ' + messageIdx);
});
connection.addEventListener('close', function(event) {
addMessage('Connection #' + connection.connectionId + ' closed, reason = ' +
event.reason + ', message = ' + event.message);
});
};
/* Utils */
const fruitEmoji = {
'grapes': '\u{1F347}',
'watermelon': '\u{1F349}',
'melon': '\u{1F348}',
'tangerine': '\u{1F34A}',
'lemon': '\u{1F34B}',
'banana': '\u{1F34C}',
'pineapple': '\u{1F34D}',
'green apple': '\u{1F35F}',
'apple': '\u{1F34E}',
'pear': '\u{1F350}',
'peach': '\u{1F351}',
'cherries': '\u{1F352}',
'strawberry': '\u{1F353}'
};
function addMessage(content, language) {
const listItem = document.createElement("li");
if (language) {
listItem.lang = language;
}
listItem.textContent = content;
document.querySelector("#message-list").appendChild(listItem);
};
function maybeSetFruit(message) {
const fruit = message.toLowerCase();
if (fruit in fruitEmoji) {
document.querySelector('#main').textContent = fruitEmoji[fruit];
}
};
document.addEventListener('DOMContentLoaded', function() {
if (navigator.presentation.receiver) {
navigator.presentation.receiver.connectionList.then(list => {
list.connections.map(connection => addConnection(connection));
list.addEventListener('connectionavailable', function(event) {
addConnection(event.connection);
});
});
}
});
| beaufortfrancois/samples | presentation-api/receiver/receiver.js | JavaScript | apache-2.0 | 1,980 |
/*
* Copyright 2013 Steve Vickers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Created on: Jun 15, 2014
*/
package reactivemongo.extensions.dsl.criteria
import org.scalatest._
import org.scalatest.matchers._
import reactivemongo.bson._
/**
* The '''UntypedWhereSpec''' type verifies the behaviour expected of the
* `where` method in the [[reactivemongo.extensions.dsl.criteria.Untyped]]
* `type`.
*
* @author svickers
*
*/
class UntypedWhereSpec
extends FlatSpec
with Matchers {
/// Class Imports
import Untyped._
"An Untyped where" should "support 1 placeholder" in
{
val q = where {
_.a === 1
}
BSONDocument.pretty(q) shouldBe (
BSONDocument.pretty(
BSONDocument(
"a" -> BSONInteger(1)
)
)
);
}
it should "support 2 placeholders" in
{
val q = where {
_.a === 1 && _.b === 2
}
BSONDocument.pretty(q) shouldBe (
BSONDocument.pretty(
BSONDocument(
"$and" ->
BSONArray(
BSONDocument(
"a" -> BSONInteger(1)
),
BSONDocument(
"b" -> BSONInteger(2)
)
)
)
)
);
}
it should "support 3 placeholders" in
{
val q = where {
_.a === 1 && _.b === 2 && _.c === 3
}
BSONDocument.pretty(q) shouldBe (
BSONDocument.pretty(
BSONDocument(
"$and" ->
BSONArray(
BSONDocument(
"a" -> BSONInteger(1)
),
BSONDocument(
"b" -> BSONInteger(2)
),
BSONDocument(
"c" -> BSONInteger(3)
)
)
)
)
);
}
/// The library supports from 1 to 22 placeholders for the where method.
it should "support 22 placeholders" in
{
val q = where {
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0 &&
_.p === 0
}
BSONDocument.pretty(q) shouldBe (
BSONDocument.pretty(
BSONDocument(
"$and" ->
BSONArray(List.fill(22)(BSONDocument("p" -> BSONInteger(0))))
)
)
);
}
}
| ReactiveMongo/ReactiveMongo-Extensions | bson/src/test/scala/dsl/criteria/UntypedWhereSpec.scala | Scala | apache-2.0 | 3,275 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2014, Red Hat, Inc. and/or its affiliates, and individual
* contributors by the @authors tag. See the copyright.txt in the
* distribution for a full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.as.quickstart.deltaspike.partialbean;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import javax.enterprise.context.ApplicationScoped;
/**
* This class implements a dynamic DeltaSpike Partial Bean. It is bound to
* one or more abstract classes or interfaces via the Binding Annotation
* (@ExamplePartialBeanBinding below).
*
* All abstract, unimplemented methods from those beans will be implemented
* via the invoke method.
*
*/
@ExamplePartialBeanBinding
@ApplicationScoped
public class ExamplePartialBeanImplementation implements InvocationHandler {
/**
* In our example, this method will be invoked when the "sayHello" method is called.
*
* @param proxy The object upon which the method is being invoked.
* @param method The method being invoked (sayHello in this QuickStart)
* @param args The arguments being passed in to the invoked method
*/
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
return "Hello " + args[0];
}
}
| jboss-developer/jboss-wfk-quickstarts | deltaspike-partialbean-basic/src/main/java/org/jboss/as/quickstart/deltaspike/partialbean/ExamplePartialBeanImplementation.java | Java | apache-2.0 | 1,876 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2010, Red Hat, Inc. and/or its affiliates, and individual contributors
* by the @authors tag. See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hibernate.validator.cfg.defs;
import javax.validation.constraints.Pattern;
import org.hibernate.validator.cfg.ConstraintDef;
import org.hibernate.validator.constraints.Email;
/**
* @author Hardy Ferentschik
*/
public class EmailDef extends ConstraintDef<EmailDef, Email> {
public EmailDef() {
super( Email.class );
}
public EmailDef regexp(String regexp) {
addParameter( "regexp", regexp );
return this;
}
public EmailDef flags(Pattern.Flag... flags) {
addParameter( "flags", flags );
return this;
}
}
| jmartisk/hibernate-validator | engine/src/main/java/org/hibernate/validator/cfg/defs/EmailDef.java | Java | apache-2.0 | 1,337 |
/*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/core/util"
)
func (b *BlockHeader) Hash() []byte {
data, err := proto.Marshal(b) // XXX this is wrong, protobuf is not the right mechanism to serialize for a hash
if err != nil {
panic("This should never fail and is generally irrecoverable")
}
return util.ComputeCryptoHash(data)
}
func (b *BlockData) Hash() []byte {
data, err := proto.Marshal(b) // XXX this is wrong, protobuf is not the right mechanism to serialize for a hash, AND, it is not a MerkleTree hash
if err != nil {
panic("This should never fail and is generally irrecoverable")
}
return util.ComputeCryptoHash(data)
}
| stonejiang208/fabric | protos/common/block.go | GO | apache-2.0 | 1,283 |
<?php
/*
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* GENERATED CODE WARNING
* Generated by gapic-generator-php from the file
* https://github.com/googleapis/googleapis/blob/master/google/cloud/accessapproval/v1/accessapproval.proto
* Updates to the above are reflected here through a refresh process.
*/
namespace Google\Cloud\AccessApproval\V1;
use Google\Cloud\AccessApproval\V1\Gapic\AccessApprovalGapicClient;
/** {@inheritdoc} */
class AccessApprovalClient extends AccessApprovalGapicClient
{
// This class is intentionally empty, and is intended to hold manual additions to
// the generated {@see AccessApprovalGapicClient} class.
}
| googleapis/google-cloud-php-access-approval | src/V1/AccessApprovalClient.php | PHP | apache-2.0 | 1,209 |
/*
* Copyright (c) 2010. All rights reserved.
*/
package ro.isdc.wro.model.resource.processor;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.net.URL;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import ro.isdc.wro.config.Context;
import ro.isdc.wro.model.resource.ResourceType;
import ro.isdc.wro.model.resource.processor.impl.css.ConformColorsCssProcessor;
import ro.isdc.wro.util.WroTestUtils;
/**
* TestConformColorsCssProcessor.
*
* @author Alex Objelean
* @created Created on Aug 15, 2010
*/
public class TestConformColorsCssProcessor {
private ResourcePreProcessor processor;
@BeforeClass
public static void onBeforeClass() {
assertEquals(0, Context.countActive());
}
@AfterClass
public static void onAfterClass() {
assertEquals(0, Context.countActive());
}
@Before
public void setUp() {
processor = new ConformColorsCssProcessor();
}
@Test
public void testFromFolder()
throws Exception {
final URL url = getClass().getResource("conformColors");
final File testFolder = new File(url.getFile(), "test");
final File expectedFolder = new File(url.getFile(), "expected");
WroTestUtils.compareFromDifferentFoldersByExtension(testFolder, expectedFolder, "css", processor);
}
@Test
public void shouldSupportCorrectResourceTypes() {
WroTestUtils.assertProcessorSupportResourceTypes(processor, ResourceType.CSS);
}
}
| UAK-35/wro4j | wro4j-core/src/test/java/ro/isdc/wro/model/resource/processor/TestConformColorsCssProcessor.java | Java | apache-2.0 | 1,512 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/grappler/optimizers/arithmetic_optimizer.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kHoistFactorOptimizerDiv[] =
"ArithmeticOptimizer/HoistCommonFactor_Div_";
constexpr char kHoistFactorOptimizerMul[] =
"ArithmeticOptimizer/HoistCommonFactor_Mul_";
constexpr char kHoistFactorOptimizerAdd[] =
"ArithmeticOptimizer/HoistCommonFactor_Add_";
constexpr char kSimplifyAggregationConst[] =
"ArithmeticOptimizer/SimplifyAggregation_Const_";
constexpr char kSimplifyAggregationMul[] =
"ArithmeticOptimizer/SimplifyAggregation_Mul_";
// Optimized name of outer Mul node by HoistCommonFactorOutOfAggregation.
string HoistMulName(const string& name) {
return AddPrefixToNodeName(name, kHoistFactorOptimizerMul, "");
}
// Optimized name of outer Div node by HoistCommonFactorOutOfAggregation.
string HoistDivName(const string& name) {
return AddPrefixToNodeName(name, kHoistFactorOptimizerDiv, "");
}
// Optimized name of inner Add node by HoistCommonFactorOutOfAggregation.
string HoistAddName(const string& name) {
return AddPrefixToNodeName(name, kHoistFactorOptimizerAdd, "");
}
// Optimized name of Const node by SimplifyAggregation.
string AggregationConstName(const string& name) {
return AddPrefixToNodeName(name, kSimplifyAggregationConst, "");
}
// Optimized name of Mul node by SimplifyAggregation.
string AggregationMulName(const string& name) {
return AddPrefixToNodeName(name, kSimplifyAggregationMul, "");
}
string OptimizedName(const string& name) {
return AddPrefixToNodeName(name, kArithmeticOptimizer);
}
void VerifyGraphsMatch(const GraphDef& original_graph,
const GraphDef& optimized_graph, int line) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << line;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(original.name(), optimized.name()) << line;
EXPECT_EQ(original.op(), optimized.op()) << line;
EXPECT_EQ(original.input_size(), optimized.input_size()) << line;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << line;
}
}
}
} // namespace
class ArithmeticOptimizerTest : public GrapplerTest {
protected:
// Optimize a graph using ArithmeticOptimizer and prune all the nodes that no
// longer have any output consumers.
void OptimizeAndPrune(ArithmeticOptimizer* optimizer, GrapplerItem* item,
GraphDef* output) {
TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output));
item->graph.Swap(output);
output->Clear();
TF_EXPECT_OK(ModelPruner().Optimize(nullptr, *item, output));
}
// Run ArithmeticOptimizer twice to make sure the rewrite is idempotent.
void OptimizeTwice(ArithmeticOptimizer* optimizer, GrapplerItem* item,
GraphDef* output) {
TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output));
item->graph.Swap(output);
output->Clear();
TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output));
}
// Run ArithmeticOptimizer twice to make sure the rewrite is idempotent.
// Optionally run a constant folding pass before pruning.
void OptimizeTwiceAndPrune(ArithmeticOptimizer* optimizer, GrapplerItem* item,
GraphDef* output, bool const_folding = false) {
TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output));
item->graph.Swap(output);
output->Clear();
TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output));
if (const_folding) {
item->graph.Swap(output);
output->Clear();
TF_EXPECT_OK(ConstantFolding(/*cpu_device=*/nullptr)
.Optimize(nullptr, *item, output));
}
item->graph.Swap(output);
output->Clear();
TF_EXPECT_OK(ModelPruner().Optimize(nullptr, *item, output));
}
// TODO(ezhulenev): Make private. After migration to stages each test
// should explicitly enable required optimization for tests isolation
void DisableAllStages(ArithmeticOptimizer* optimizer) {
ArithmeticOptimizer::ArithmeticOptimizerOptions options;
options.dedup_computations = false;
options.combine_add_to_addn = false;
options.convert_sqrt_div_to_rsqrt_mul = false;
options.convert_pow = false;
options.convert_log1p = false;
options.optimize_max_or_min_of_monotonic = false;
options.fold_conjugate_into_transpose = false;
options.fold_multiply_into_conv = false;
options.fold_transpose_into_matmul = false;
options.hoist_common_factor_out_of_aggregation = false;
options.hoist_cwise_unary_chains = false;
options.minimize_broadcasts = false;
options.remove_identity_transpose = false;
options.remove_involution = false;
options.remove_idempotent = false;
options.remove_redundant_bitcast = false;
options.remove_redundant_cast = false;
options.remove_redundant_reshape = false;
options.remove_negation = false;
options.remove_logical_not = false;
options.reorder_cast_and_transpose = false;
options.replace_mul_with_square = false;
options.simplify_aggregation = false;
options.unary_ops_composition = false;
optimizer->options_ = options;
}
void DisableAddToAddNCombining(ArithmeticOptimizer* optimizer) {
optimizer->options_.combine_add_to_addn = false;
}
void EnableOnlyAddToAddNCombining(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.combine_add_to_addn = true;
}
void EnableOnlyFoldConjugateIntoTranspose(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.fold_conjugate_into_transpose = true;
}
void EnableOnlyFoldMultipleIntoConv(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.fold_multiply_into_conv = true;
}
void EnableOnlyFoldTransposeIntoMatMul(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.fold_transpose_into_matmul = true;
}
void EnableOnlyHoistCommonFactor(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.hoist_common_factor_out_of_aggregation = true;
}
void EnableOnlyMinimizeBroadcasts(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.minimize_broadcasts = true;
}
void EnableOnlyRemoveIdentityTranspose(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.remove_identity_transpose = true;
}
void EnableOnlyRemoveInvolution(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.remove_involution = true;
}
void EnableOnlyRemoveRedundantBitcast(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.remove_redundant_bitcast = true;
}
void EnableOnlyRemoveRedundantCast(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.remove_redundant_cast = true;
}
void EnableOnlyRemoveRedundantReshape(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.remove_redundant_reshape = true;
}
void EnableOnlyRemoveNegation(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.remove_negation = true;
}
void EnableOnlyReorderCastAndTranspose(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.reorder_cast_and_transpose = true;
}
void EnableOnlyReplaceMulWithSquare(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.replace_mul_with_square = true;
}
void EnableOnlyHoistCWiseUnaryChains(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.hoist_cwise_unary_chains = true;
}
void EnableOnlySqrtDivToRsqrtMul(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.convert_sqrt_div_to_rsqrt_mul = true;
}
void EnableOnlyConvertPow(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.convert_pow = true;
}
void EnableOnlyRemoveIdempotent(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.remove_idempotent = true;
}
void EnableOnlyRemoveLogicalNot(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.remove_logical_not = true;
}
void EnableOnlySimplifyAggregation(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.simplify_aggregation = true;
}
void EnableOnlyLog1p(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.convert_log1p = true;
}
void EnableOnlyOptimizeMaxOrMinOfMonotonic(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.optimize_max_or_min_of_monotonic = true;
}
void EnableOnlyExpm1(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.convert_expm1 = true;
}
void EnableOnlyUnaryOpsComposition(ArithmeticOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.unary_ops_composition = true;
}
};
TEST_F(ArithmeticOptimizerTest, NoOp) {
// This trivial graph is so basic there's nothing to optimize.
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
ArithmeticOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsMatch(item.graph, output, __LINE__);
}
TEST_F(ArithmeticOptimizerTest, OpDedupping) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c1 = ops::Const(s.WithOpName("c1"), {3.14, 2.7}, {1, 2});
Output c2 = ops::Const(s.WithOpName("c2"), {3.14, 2.7}, {1, 2});
Output div = ops::Div(s.WithOpName("div"), c1, c2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(2, output.node_size());
const NodeDef* new_c1 = node_map.GetNode("c1");
ASSERT_NE(new_c1, nullptr);
const NodeDef* new_div = node_map.GetNode("div");
ASSERT_NE(new_div, nullptr);
EXPECT_EQ(2, new_div->input_size());
EXPECT_EQ("c1", new_div->input(0));
EXPECT_EQ("c1", new_div->input(1));
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, OpDeduppingAssertAndCheckNumerics) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output p = ops::Placeholder(s, DT_BOOL, ops::Placeholder::Shape({}));
Output c = ops::Const(s.WithOpName("c"), {3.14, 2.7}, {1, 2});
auto check1 = ops::CheckNumerics(s.WithOpName("check1"), c, "foo");
auto check2 = ops::CheckNumerics(s.WithOpName("check2"), c, "foo");
auto assert1 = ops::Assert(s.WithOpName("assert1"), p, {c});
auto assert2 = ops::Assert(s.WithOpName("assert2"), p, {c});
Output div = ops::Div(s.WithOpName("div").WithControlDependencies(
{assert1.operation, assert2.operation}),
check1, check2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div"};
Tensor bool_t(DT_BOOL, TensorShape({}));
bool_t.scalar<bool>().setConstant(true);
auto tensors_expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", bool_t}});
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(5, output.node_size());
const NodeDef* new_div = node_map.GetNode("div");
ASSERT_NE(new_div, nullptr);
EXPECT_EQ(4, new_div->input_size());
EXPECT_EQ("check1", new_div->input(0));
EXPECT_EQ("check1", new_div->input(1));
EXPECT_EQ("^assert1", new_div->input(2));
EXPECT_EQ("^assert1", new_div->input(3));
auto tensors = EvaluateNodes(output, item.fetch, {{"Placeholder", bool_t}});
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, OpDedupCommutative) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c1 = ops::Const(s.WithOpName("c1"), {1.0f, 2.0f}, {1, 2});
Output c2 = ops::Const(s.WithOpName("c2"), {3.0f, 4.0f}, {1, 2});
Output mul1 = ops::Mul(s.WithOpName("mul1"), c1, c2);
Output mul2 = ops::Mul(s.WithOpName("mul2"), c2, c1);
Output div1 = ops::Div(s.WithOpName("div1"), mul1, mul2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div1"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(4, output.node_size());
const NodeDef* new_c1 = node_map.GetNode("c1");
ASSERT_NE(new_c1, nullptr);
const NodeDef* new_c2 = node_map.GetNode("c2");
ASSERT_NE(new_c2, nullptr);
const NodeDef* new_mul1 = node_map.GetNode("mul1");
ASSERT_NE(new_mul1, nullptr);
EXPECT_EQ(2, new_mul1->input_size());
EXPECT_EQ("c1", new_mul1->input(0));
EXPECT_EQ("c2", new_mul1->input(1));
const NodeDef* new_div1 = node_map.GetNode("div1");
ASSERT_NE(new_div1, nullptr);
EXPECT_EQ(2, new_div1->input_size());
EXPECT_EQ("mul1", new_div1->input(0));
EXPECT_EQ("mul1", new_div1->input(1));
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithSquare) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output d = ops::Const(s.WithOpName("d"), {3.0f, 4.0f}, {1, 2});
Output mul = ops::Mul(s.WithControlDependencies(d).WithOpName("mul"), c, c);
Output id = ops::Identity(s.WithOpName("id"), mul);
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithSquare(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(4, output.node_size());
NodeMap node_map(&output);
const string p = "ArithmeticOptimizer/ReplaceMulWithSquare";
const NodeDef* square_node = node_map.GetNode(strings::StrCat(p, "_", "mul"));
ASSERT_NE(square_node, nullptr);
EXPECT_EQ("Square", square_node->op());
EXPECT_EQ("c", square_node->input(0));
EXPECT_EQ("^d", square_node->input(1));
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveInvolution_AdjacentNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
auto neg1 = ops::Neg(s.WithOpName("neg1"), c);
auto neg2 = ops::Neg(s.WithOpName("neg2"), neg1);
auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), neg2);
auto recip2 = ops::Reciprocal(s.WithOpName("recip2"), recip1);
auto id = ops::Identity(s.WithOpName("id"), recip2);
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveInvolution(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
// Negation and Reciprocal nodes cancelled each other.
EXPECT_EQ(2, output.node_size());
EXPECT_EQ("id", output.node(1).name());
EXPECT_EQ("c", output.node(1).input(0));
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveInvolution_AroundValuePreservingChain) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), c);
auto id1 = ops::Identity(s.WithOpName("id1"), recip1);
auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), id1);
auto recip2 = ops::Reciprocal(s.WithOpName("recip2"), squeeze);
auto id2 = ops::Identity(s.WithOpName("id2"), recip2);
std::vector<string> fetch = {"id2"};
GrapplerItem item;
item.fetch = fetch;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, fetch);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveInvolution(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
// Check that Reciprocal nodes were removed from the graph.
EXPECT_EQ(3, output.node_size());
// And const directly flows into squeeze.
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "squeeze") {
EXPECT_EQ("c", node.input(0));
found++;
} else if (node.name() == "id2") {
EXPECT_EQ("squeeze", node.input(0));
found++;
}
}
EXPECT_EQ(2, found);
auto tensors = EvaluateNodes(output, fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveInvolution_SkipControlDependencies) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), c);
auto id1 = ops::Identity(s.WithOpName("id1"), recip1);
auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), id1);
auto recip2 = ops::Reciprocal(
s.WithOpName("recip2").WithControlDependencies(squeeze), c);
auto id2 = ops::Identity(s.WithOpName("id2"), recip2);
std::vector<string> fetch = {"id2"};
GrapplerItem item;
item.fetch = fetch;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, fetch);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveInvolution(&optimizer);
OptimizeTwice(&optimizer, &item, &output); // do not prune in this test
// The optimizer should be a noop.
VerifyGraphsMatch(item.graph, output, __LINE__);
auto tensors = EvaluateNodes(output, fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, TrivialSumsSimple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output add = ops::Add(s.WithOpName("add"), x, x);
Output id = ops::Identity(s.WithOpName("id"), add);
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(5, output.node_size());
const string optimized_const_name = AggregationConstName("add");
const string optimized_mul_name = AggregationMulName("add");
const NodeDef* new_const = node_map.GetNode(optimized_const_name);
ASSERT_NE(new_const, nullptr);
EXPECT_EQ("^x", new_const->input(0));
EXPECT_EQ(string("\0\0\0@", 4),
new_const->attr().at("value").tensor().tensor_content());
const NodeDef* new_mul = node_map.GetNode(optimized_mul_name);
ASSERT_NE(new_mul, nullptr);
EXPECT_EQ(optimized_const_name, new_mul->input(0));
EXPECT_EQ("x", new_mul->input(1));
const NodeDef* new_id = node_map.GetNode("id");
ASSERT_NE(new_id, nullptr);
EXPECT_EQ(optimized_mul_name, new_id->input(0));
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, TrivialSumsSimpleWithControlDep) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2});
Output x = ops::Const(s.WithOpName("x"), {3.0f, 4.0f}, {1, 2});
Output add = ops::Add(s.WithOpName("add").WithControlDependencies(y), x, x);
Output id = ops::Identity(s.WithOpName("id"), add);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
std::vector<string> fetch = {"id"};
auto tensors_expected = EvaluateNodes(item.graph, fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(6, output.node_size());
const string optimized_const_name = AggregationConstName("add");
const string optimized_mul_name = AggregationMulName("add");
const NodeDef* new_const = node_map.GetNode(optimized_const_name);
ASSERT_NE(new_const, nullptr);
EXPECT_EQ("^x", new_const->input(0));
EXPECT_EQ(string("\0\0\0@", 4),
new_const->attr().at("value").tensor().tensor_content());
const NodeDef* new_mul = node_map.GetNode(optimized_mul_name);
ASSERT_NE(new_mul, nullptr);
EXPECT_EQ(optimized_const_name, new_mul->input(0));
EXPECT_EQ("x", new_mul->input(1));
EXPECT_EQ("^y", new_mul->input(2));
const NodeDef* new_id = node_map.GetNode("id");
ASSERT_NE(new_id, nullptr);
EXPECT_EQ(optimized_mul_name, new_id->input(0));
auto tensors = EvaluateNodes(output, fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, TrivialSumsRepeatedAdd) {
// Test case from b/69059093.
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output p = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({10, 10}));
Output add = ops::Add(s.WithOpName("Add"), p, p);
Output add1 = ops::Add(s.WithOpName("Add_1"), p, p);
Output add4 = ops::Add(s.WithOpName("Add_4"), add, add1);
Output add5 = ops::Add(s.WithOpName("Add_5"), add, add1);
Output add6 = ops::Add(s.WithOpName("Add_6"), add4, add5);
Output id = ops::Identity(s.WithOpName("id"), add6);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
const std::vector<string> devices{
"/device:CPU:0", "/device:GPU:0", "/device:CPU:0", "/device:GPU:1",
"/device:CPU:0", "/device:CPU:0", "/device:CPU:0",
};
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device(devices[i]);
}
ArithmeticOptimizer optimizer;
DisableAddToAddNCombining(&optimizer);
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur:
//
// Mul(p,
// Add_6(Add_4(Const(2), Const(2)),
// Add_5(Const(2), Const(2))))
NodeMap node_map(&output);
EXPECT_EQ(17, output.node_size());
const NodeDef* id_node = node_map.GetNode("id");
ASSERT_NE(id_node, nullptr);
EXPECT_EQ(1, id_node->input_size());
EXPECT_EQ(HoistMulName("Add_6"), id_node->input(0));
const NodeDef* mul_node = node_map.GetNode(HoistMulName("Add_6"));
ASSERT_NE(mul_node, nullptr);
EXPECT_EQ(2, mul_node->input_size());
EXPECT_EQ("Placeholder", mul_node->input(0));
EXPECT_EQ(HoistAddName("Add_6"), mul_node->input(1));
const NodeDef* add_6_node = node_map.GetNode(HoistAddName("Add_6"));
ASSERT_NE(add_6_node, nullptr);
EXPECT_EQ(2, add_6_node->input_size());
EXPECT_EQ(HoistAddName("Add_4"), add_6_node->input(0));
EXPECT_EQ(HoistAddName("Add_5"), add_6_node->input(1));
const NodeDef* add_4_node = node_map.GetNode(HoistAddName("Add_4"));
ASSERT_NE(add_4_node, nullptr);
EXPECT_EQ("Add", add_4_node->op());
EXPECT_EQ(2, add_4_node->input_size());
EXPECT_EQ(AggregationConstName("Add"), add_4_node->input(0));
EXPECT_EQ(AggregationConstName("Add_1"), add_4_node->input(1));
const NodeDef* add_5_node = node_map.GetNode(HoistAddName("Add_5"));
ASSERT_NE(add_5_node, nullptr);
EXPECT_EQ("Add", add_5_node->op());
EXPECT_EQ(2, add_5_node->input_size());
EXPECT_EQ(AggregationConstName("Add"), add_5_node->input(0));
EXPECT_EQ(AggregationConstName("Add_1"), add_5_node->input(1));
const NodeDef* add_const_node = node_map.GetNode(AggregationConstName("Add"));
ASSERT_NE(add_const_node, nullptr);
EXPECT_EQ("Const", add_const_node->op());
EXPECT_EQ(1, add_const_node->input_size());
EXPECT_EQ("^Placeholder", add_const_node->input(0));
const NodeDef* add_1_const_node =
node_map.GetNode(AggregationConstName("Add_1"));
ASSERT_NE(add_1_const_node, nullptr);
EXPECT_EQ("Const", add_1_const_node->op());
EXPECT_EQ(1, add_1_const_node->input_size());
EXPECT_EQ("^Placeholder", add_1_const_node->input(0));
}
TEST_F(ArithmeticOptimizerTest, HoistFactorMul) {
for (bool matching_shapes : {true, false}) {
for (bool use_addn : {true, false}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output y1 = ops::Const(s.WithOpName("y1"), {3.0f, 4.0f}, {1, 2});
Output y2 = matching_shapes
? ops::Const(s.WithOpName("y2"), {5.0f, 6.0f}, {1, 2})
: ops::Const(s.WithOpName("y2"), {5.0f}, {1, 1});
Output mul1 = ops::Mul(s.WithOpName("mul1"), x, y1);
Output mul2 = ops::Mul(s.WithOpName("mul2"), y2, x);
Output id =
use_addn ? ops::Identity(s.WithOpName("id"),
ops::AddN(s.WithOpName("add"), {mul1, mul2}))
: ops::Identity(s.WithOpName("id"),
ops::Add(s.WithOpName("add"), mul1, mul2));
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
EnableOnlyHoistCommonFactor(&optimizer);
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur:
//
// Add Mul
// / \ / \
// Mul Mul -> x Add
// / \ / \ / \
// x y1 y2 x y1 y2
//
// If "root" op is AddN and shapes does not match, this rewrite is not
// possible and graph should stay intact.
NodeMap node_map(&output);
if (use_addn && !matching_shapes) {
VerifyGraphsMatch(item.graph, output, __LINE__);
} else {
EXPECT_EQ(9, output.node_size());
const NodeDef* new_add_node = node_map.GetNode(HoistAddName("add"));
ASSERT_NE(new_add_node, nullptr) << "Hoisted Add node not found";
EXPECT_EQ("y1", new_add_node->input(0));
EXPECT_EQ("y2", new_add_node->input(1));
const NodeDef* new_mul_node = node_map.GetNode(HoistMulName("add"));
ASSERT_NE(new_mul_node, nullptr) << "Hoisted Mul node not found";
EXPECT_EQ("x", new_mul_node->input(0));
EXPECT_EQ(new_add_node->name(), new_mul_node->input(1));
const NodeDef* id_node = node_map.GetNode("id");
ASSERT_NE(id_node, nullptr) << "Id node not found";
EXPECT_EQ("id", id_node->name());
EXPECT_EQ(HoistMulName("add"), id_node->input(0));
}
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
}
}
TEST_F(ArithmeticOptimizerTest, HoistFactorDiv) {
for (bool matching_shapes : {true, false}) {
for (bool use_addn : {true, false}) {
for (bool use_ints : {true, false}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = use_ints
? ops::Const(s.WithOpName("x"), {1, 2}, {1, 2})
: ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output y1 = use_ints
? ops::Const(s.WithOpName("y1"), {3, 4}, {1, 2})
: ops::Const(s.WithOpName("y1"), {3.0f, 4.0f}, {1, 2});
Output y2;
if (matching_shapes) {
y2 = use_ints ? ops::Const(s.WithOpName("y2"), {5, 6}, {1, 2})
: ops::Const(s.WithOpName("y2"), {5.0f, 6.0f}, {1, 2});
} else {
y2 = use_ints ? ops::Const(s.WithOpName("y2"), {5}, {1, 1})
: ops::Const(s.WithOpName("y2"), {5.0f}, {1, 1});
}
Output div1 = ops::Div(s.WithOpName("div1"), y1, x);
Output div2 = ops::Div(s.WithOpName("div2"), y2, x);
Output id =
use_addn
? ops::Identity(s.WithOpName("id"),
ops::AddN(s.WithOpName("add"), {div1, div2}))
: ops::Identity(s.WithOpName("id"),
ops::Add(s.WithOpName("add"), div1, div2));
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
EnableOnlyHoistCommonFactor(&optimizer);
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur:
//
// Add Div
// / \ / \
// Div Div -> Add x
// / \ / \ / \
// y1 x y2 x y1 y2
//
// If "root" op is AddN and shapes does not match, this rewrite is not
// possible and graph should stay intact.
NodeMap node_map(&output);
if ((use_addn && !matching_shapes) || use_ints) {
VerifyGraphsMatch(item.graph, output, __LINE__);
} else {
EXPECT_EQ(9, output.node_size());
const NodeDef* new_add_node = node_map.GetNode(HoistAddName("add"));
ASSERT_TRUE(new_add_node != nullptr) << "Hoisted Add node not found";
EXPECT_EQ("y1", new_add_node->input(0));
EXPECT_EQ("y2", new_add_node->input(1));
const NodeDef* new_div_node = node_map.GetNode(HoistDivName("add"));
ASSERT_TRUE(new_div_node != nullptr) << "Hoisted Div node not found";
EXPECT_EQ(new_add_node->name(), new_div_node->input(0));
EXPECT_EQ("x", new_div_node->input(1));
const NodeDef* id_node = node_map.GetNode("id");
ASSERT_TRUE(id_node != nullptr) << "Id node not found";
EXPECT_EQ("id", id_node->name());
EXPECT_EQ(HoistDivName("add"), id_node->input(0));
}
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
if (use_ints) {
test::ExpectTensorEqual<int32>(tensors_expected[0], tensors[0]);
} else {
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
}
}
}
}
TEST_F(ArithmeticOptimizerTest, FuseConjAndTranspose) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output re = ops::Const(s.WithOpName("re"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output im = ops::Const(s.WithOpName("im"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output z = ops::Complex(s.WithOpName("z"), re, im);
Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
Output conj = ops::Conj(s.WithOpName("conj"), z);
Output transp = ops::Transpose(s.WithOpName("trans"), conj, perm);
GrapplerItem item;
item.fetch = {"trans"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(7, output.node_size());
const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose";
const string optimized_name = strings::StrCat(p, "_", "trans");
const NodeDef* trans_fused_node = node_map.GetNode(optimized_name);
ASSERT_NE(trans_fused_node, nullptr);
EXPECT_EQ("ConjugateTranspose", trans_fused_node->op());
EXPECT_EQ("z", trans_fused_node->input(0));
EXPECT_EQ("perm", trans_fused_node->input(1));
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorEqual<complex64>(tensors_expected[0], tensors[0]);
}
TEST_F(ArithmeticOptimizerTest, FuseConjAndConjugateTranspose) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output re = ops::Const(s.WithOpName("re"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output im = ops::Const(s.WithOpName("im"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output z = ops::Complex(s.WithOpName("z"), re, im);
Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
Output conj = ops::Conj(s.WithOpName("conj"), z);
Output transp =
ops::ConjugateTranspose(s.WithOpName("conjugate_trans"), conj, perm);
GrapplerItem item;
item.fetch = {"conjugate_trans"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(7, output.node_size());
const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose";
const string optimized_name = strings::StrCat(p, "_", "conjugate_trans");
const NodeDef* conjugate_trans_fused_node = node_map.GetNode(optimized_name);
ASSERT_NE(conjugate_trans_fused_node, nullptr);
EXPECT_EQ("Transpose", conjugate_trans_fused_node->op());
EXPECT_EQ("z", conjugate_trans_fused_node->input(0));
EXPECT_EQ("perm", conjugate_trans_fused_node->input(1));
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorEqual<complex64>(tensors_expected[0], tensors[0]);
}
TEST_F(ArithmeticOptimizerTest, FuseTransposeAndConj) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output re = ops::Const(s.WithOpName("re"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output im = ops::Const(s.WithOpName("im"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output z = ops::Complex(s.WithOpName("z"), re, im);
Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
Output trans = ops::Transpose(s.WithOpName("trans"), z, perm);
Output conj = ops::Conj(s.WithOpName("conj"), trans);
GrapplerItem item;
item.fetch = {"conj"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(7, output.node_size());
const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose";
const string optimized_name = strings::StrCat(p, "_", "conj");
const NodeDef* conj_fused_node = node_map.GetNode(optimized_name);
ASSERT_NE(conj_fused_node, nullptr);
EXPECT_EQ("ConjugateTranspose", conj_fused_node->op());
EXPECT_EQ("z", conj_fused_node->input(0));
EXPECT_EQ("perm", conj_fused_node->input(1));
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorEqual<complex64>(tensors_expected[0], tensors[0]);
}
TEST_F(ArithmeticOptimizerTest, FoldTransposeIntoMatMul) {
for (const string matmul_type : {"MatMul", "SparseMatMul", "BatchMatMul"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output b = ops::Const(s.WithOpName("b"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
Output trans_a = ops::Transpose(s.WithOpName("trans_a"), a, perm);
Output trans_b = ops::Transpose(s.WithOpName("trans_b"), b, perm);
auto matmul_op = s.WithOpName("matmul");
if (matmul_type == "MatMul") {
Output matmul = ops::MatMul(matmul_op, trans_a, trans_b);
} else if (matmul_type == "SparseMatMul") {
Output matmul = ops::SparseMatMul(matmul_op, trans_a, trans_b);
} else if (matmul_type == "BatchMatMul") {
Output matmul = ops::BatchMatMul(matmul_op, trans_a, trans_b);
}
GrapplerItem item;
item.fetch = {"matmul"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
EnableOnlyFoldTransposeIntoMatMul(&optimizer);
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(7, output.node_size());
const string p = "ArithmeticOptimizer/FoldTransposeIntoMatMul";
const string optimized_name = strings::StrCat(p, "_", "matmul");
const NodeDef* matmul_fused_node = node_map.GetNode(optimized_name);
ASSERT_NE(matmul_fused_node, nullptr);
EXPECT_EQ("a", matmul_fused_node->input(0));
EXPECT_EQ("b", matmul_fused_node->input(1));
if (matmul_type == "BatchMatMul") {
EXPECT_TRUE(matmul_fused_node->attr().at("adj_x").b());
EXPECT_TRUE(matmul_fused_node->attr().at("adj_y").b());
} else {
EXPECT_TRUE(matmul_fused_node->attr().at("transpose_a").b());
EXPECT_TRUE(matmul_fused_node->attr().at("transpose_b").b());
}
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, FoldConjugateTransposeIntoBatchMatMul) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output re_a =
ops::Const(s.WithOpName("re_a"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output im_a =
ops::Const(s.WithOpName("im_a"), {-1.0f, -2.0f, -3.0f, -4.0f}, {2, 2});
Output re_b =
ops::Const(s.WithOpName("re_b"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output im_b =
ops::Const(s.WithOpName("im_b"), {-5.0f, -6.0f, -7.0f, -8.0f}, {2, 2});
Output a = ops::Complex(s.WithOpName("a"), re_a, im_a);
Output b = ops::Complex(s.WithOpName("b"), re_b, im_b);
Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
Output trans_a = ops::ConjugateTranspose(s.WithOpName("trans_a"), a, perm);
Output trans_b = ops::ConjugateTranspose(s.WithOpName("trans_b"), b, perm);
Output matmul = ops::BatchMatMul(s.WithOpName("matmul"), trans_a, trans_b);
GrapplerItem item;
item.fetch = {"matmul"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
ASSERT_EQ(11, output.node_size());
const string p = "ArithmeticOptimizer/FoldTransposeIntoMatMul";
const string optimized_name = strings::StrCat(p, "_", "matmul");
const NodeDef* optimized_matmul = node_map.GetNode(optimized_name);
ASSERT_NE(optimized_matmul, nullptr);
EXPECT_EQ("a", optimized_matmul->input(0));
EXPECT_EQ("b", optimized_matmul->input(1));
EXPECT_TRUE(optimized_matmul->attr().at("adj_x").b());
EXPECT_TRUE(optimized_matmul->attr().at("adj_y").b());
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<complex64>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_IdentityReshape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, 28, 28}));
Output inputs_shape = ops::Shape(s, inputs);
// The target shape of the reshape is the concatenation of `batch_size` and
// [3,28,28].
Output batch_size = ops::Slice(s, inputs_shape, ops::Const(s, {0}, {1}),
ops::Const(s, {1}, {1}));
Output target_shape = ops::Concat(
s.WithOpName("target_shape"),
{batch_size, ops::Const(s, {3, 28, 28}, {3})}, ops::Const(s, {0}, {}));
Output reshape = ops::Reshape(s, inputs, target_shape);
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 3, 28, 28}));
auto tensors_expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", x_t}});
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(0, CountOpNodes(output, "Reshape"));
auto tensors = EvaluateNodes(output, item.fetch, {{"Placeholder", x_t}});
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest,
RemoveRedundantReshape_IdentityReshapeBetweenSymbolicShapes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, -1, -1}));
Output inputs_shape = ops::Shape(s, inputs);
// The target shape of the reshape is the concatenation of `batch_size`, 3,
// `height, and `width`.
Output batch_size = ops::Slice(s, inputs_shape, ops::Const(s, {0}, {1}),
ops::Const(s, {1}, {1}));
Output height = ops::Slice(s, inputs_shape, ops::Const(s, {2}, {1}),
ops::Const(s, {1}, {1}));
Output width = ops::Slice(s, inputs_shape, ops::Const(s, {3}, {1}),
ops::Const(s, {1}, {1}));
Output target_shape =
ops::Concat(s.WithOpName("target_shape"),
{batch_size, ops::Const(s, {3}, {1}), height, width},
ops::Const(s, {0}, {}));
Output reshape = ops::Reshape(s, inputs, target_shape);
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 3, 28, 28}));
GrapplerItem item;
item.fetch = {"outputs"};
item.feed = {{"Placeholder", x_t}};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
// Assume valid feed shape in aggressive mode.
ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(0, CountOpNodes(output, "Reshape"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_NotAssumeValidFeeds) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3, 28, 28}));
Output target_shape = ops::Const(s, {4, 3, 28, 28}, {4});
Output reshape = ops::Reshape(s, inputs, target_shape);
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 3, 28, 28}));
GrapplerItem item;
item.fetch = {"outputs"};
item.feed = {{"Placeholder", x_t}};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
// The reshape is preserved because the shape of the placeholder can be
// different from the shape of the actual feed.
EXPECT_EQ(1, CountOpNodes(output, "Reshape"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest,
RemoveRedundantReshape_AssumeValidFeedsInAggressiveMode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3, 28, 28}));
Output target_shape = ops::Const(s, {4, 3, 28, 28}, {4});
Output reshape = ops::Reshape(s, inputs, target_shape);
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 3, 28, 28}));
GrapplerItem item;
item.fetch = {"outputs"};
item.feed = {{"Placeholder", x_t}};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(0, CountOpNodes(output, "Reshape"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_NotIdentityReshape) {
// Reshape from [-1,3,28,28] to [8,-1,28,28] is not identity, because it can
// be from [4,3,28,28] to [8,6,28,28].
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, 28, 28}));
Output reshape = ops::Reshape(s, inputs, ops::Const(s, {8, -1, 28, 28}, {4}));
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({8, 3, 28, 28}));
item.feed = {{"Placeholder", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(1, CountOpNodes(output, "Reshape"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest,
RemoveRedundantReshape_NotIdentityReshapeTooManyUnknownDimSizes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3}));
Output reshape = ops::Reshape(s, inputs, ops::Const(s, {-1, -1}, {2}));
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(1, CountOpNodes(output, "Reshape"));
}
TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_CombineReshapes) {
// Converts an NCHW_VECT_C tensor to NHWC and then flattens it to 2D. The two
// reshapes should be combined.
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output nchw_vect_c =
ops::Placeholder(s.WithOpName("nchw_vect_c"), DT_INT8,
ops::Placeholder::Shape({8, 3, 28, 28, 4}));
Output transpose =
ops::Transpose(s.WithOpName("transpose"), nchw_vect_c,
ops::Const(s.WithOpName("perm"), {0, 2, 3, 1, 4}, {5}));
Output nhwc = ops::Reshape(
s.WithOpName("nhwc"), transpose,
ops::Const(s.WithOpName("nhwc_shape"), {8, 28, 28, 12}, {4}));
Output flatten = ops::Reshape(
s.WithOpName("flatten"), nhwc,
ops::Const(s.WithOpName("flatten_shape"), {8, 28 * 28 * 12}, {2}));
Output outputs = ops::Identity(s.WithOpName("outputs"), flatten);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({8, 3, 28, 28, 4}));
item.feed = {{"nchw_vect_c", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(1, CountOpNodes(output, "Reshape"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]);
}
TEST_F(ArithmeticOptimizerTest, ReorderTransposeCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/gpu:0");
Output nhwc_uint8 =
ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
Output nhwc_fp32 = ops::Cast(s, nhwc_uint8, DT_FLOAT);
Output nchw_fp32 =
ops::Transpose(s, nhwc_fp32, ops::Const(s, {0, 3, 1, 2}, {4}));
Output outputs = ops::Identity(s.WithOpName("outputs"), nchw_fp32);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output));
item.graph.Swap(&output);
TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output));
const NodeDef* transpose_node = nullptr;
for (const NodeDef& node : output.node()) {
if (node.op() == "Transpose") {
EXPECT_EQ(transpose_node, nullptr);
EXPECT_EQ(DT_UINT8, node.attr().at("T").type());
transpose_node = &node;
}
}
EXPECT_NE(transpose_node, nullptr);
for (const NodeDef& node : output.node()) {
if (node.op() == "Cast") {
EXPECT_EQ(NodeName(node.input(0)), transpose_node->name());
}
}
}
TEST_F(ArithmeticOptimizerTest, NoReorderTransposeCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/gpu:0");
Output nhwc_fp32 =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({8, 28, 28, 3}));
Output nhwc_uint8 = ops::Cast(s, nhwc_fp32, DT_UINT8);
Output nchw_uint8 =
ops::Transpose(s, nhwc_uint8, ops::Const(s, {0, 3, 1, 2}, {4}));
Output outputs = ops::Identity(s.WithOpName("outputs"), nchw_uint8);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output));
item.graph.Swap(&output);
TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output));
int num_transposes = 0;
for (const NodeDef& node : output.node()) {
if (node.op() == "Transpose") {
EXPECT_EQ(DT_UINT8, node.attr().at("T").type());
EXPECT_EQ(node.input(0), "Cast");
++num_transposes;
}
}
EXPECT_EQ(1, num_transposes);
}
TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs_shape =
ops::Const(s.WithOpName("inputs_shape"), {8, 3, 28, 28}, {4});
Output inputs =
ops::RandomUniform(s.WithOpName("inputs"), inputs_shape, DT_FLOAT);
Output perm1 = ops::Const(s.WithOpName("perm1"), {0, 2, 3, 1}, {4});
Output perm2 = ops::Const(s.WithOpName("perm2"), {0, 3, 1, 2}, {4});
Output perm3 = ops::Const(s.WithOpName("perm3"), {0, 1, 2, 3}, {4});
Output transpose1 = ops::Transpose(s.WithOpName("transpose1"), inputs, perm1);
Output transpose2 =
ops::Transpose(s.WithOpName("transpose2"), transpose1, perm2);
Output transpose3 = ops::Transpose(s.WithOpName("transpose3"), inputs, perm3);
Output id1 = ops::Identity(s.WithOpName("id1"), transpose2);
Output id2 = ops::Identity(s.WithOpName("id2"), transpose3);
GrapplerItem item;
item.fetch = {"id1", "id2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
std::set<string> nodes_after_optimization;
for (const NodeDef& node : output.node()) {
nodes_after_optimization.insert(node.name());
}
EXPECT_EQ(nodes_after_optimization,
std::set<string>({"id1", "id2", "inputs_shape", "inputs"}));
}
TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesMultipleOutputs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs_shape =
ops::Const(s.WithOpName("inputs_shape"), {8, 9, 28, 28}, {4});
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT,
ops::Placeholder::Shape({8, 12, 28, 28}));
OutputList split = ops::Split(s, ops::Const(s, 1), inputs, 3).output;
Output perm1 = ops::Const(s, {0, 2, 3, 1}, {4});
Output perm2 = ops::Const(s, {0, 3, 1, 2}, {4});
Output branch0 = split[0];
Output branch1 = ops::Transpose(s, ops::Transpose(s, split[1], perm1), perm2);
Output branch2 = split[2];
Output concat = ops::Concat(s, {branch0, branch1, branch2}, ops::Const(s, 1));
Output outputs = ops::Identity(s.WithOpName("outputs"), concat);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({8, 12, 28, 28}));
item.feed = {{"inputs", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
for (const NodeDef& node : output.node()) {
if (node.op() == "Concat") {
EXPECT_EQ(node.input(0), "Split");
EXPECT_EQ(node.input(1), "Split:1");
EXPECT_EQ(node.input(2), "Split:2");
}
}
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveTransposesWithControlDependency) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({2, 3}));
Output transpose1 = ops::Transpose(s, inputs, ops::Const(s, {1, 0}));
Output transpose2 = ops::Transpose(s, transpose1, ops::Const(s, {1, 0}));
Output outputs =
ops::Identity(s.WithOpName("outputs").WithControlDependencies(transpose2),
ops::Const(s.WithOpName("outputs_const"), 1.0f));
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 3}));
item.feed = {{"Placeholder", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
const NodeDef* outputs_node = node_map.GetNode("outputs");
EXPECT_EQ(2, outputs_node->input_size());
EXPECT_EQ(outputs_node->input(0), "outputs_const");
EXPECT_EQ(outputs_node->input(1), "^Placeholder");
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, NotRemoveTransposes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs_shape =
ops::Const(s.WithOpName("inputs_shape"), {8, 3, 28, 28}, {4});
Output inputs =
ops::RandomUniform(s.WithOpName("inputs"), inputs_shape, DT_FLOAT);
Output perm = ops::Const(s.WithOpName("perm"), {1, 2, 3, 0}, {4});
Output transpose1 = ops::Transpose(s.WithOpName("transpose1"), inputs, perm);
Output transpose2 =
ops::Transpose(s.WithOpName("transpose2"), transpose1, perm);
Output outputs = ops::Identity(s.WithOpName("outputs"), transpose2);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(6, output.node_size());
}
TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesThroughChain) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs_shape =
ops::Const(s.WithOpName("inputs_shape"), {8, 3, 28, 28}, {4});
Output inputs =
ops::RandomUniform(s.WithOpName("inputs"), inputs_shape, DT_FLOAT);
Output perm1 = ops::Const(s.WithOpName("perm1"), {0, 2, 3, 1}, {4});
Output perm2 = ops::Const(s.WithOpName("perm2"), {0, 3, 1, 2}, {4});
Output transpose1 = ops::Transpose(
s.WithOpName("transpose1").WithControlDependencies(perm2), inputs, perm1);
Output identity = ops::Identity(s.WithOpName("id"), transpose1);
Output transpose2 =
ops::Transpose(s.WithOpName("transpose2"), identity, perm2);
Output id1 = ops::Identity(s.WithOpName("id1"), transpose2);
GrapplerItem item;
item.fetch = {"id1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
std::set<string> nodes_after_optimization;
for (const NodeDef& node : output.node()) {
nodes_after_optimization.insert(node.name());
if (node.name() == "id") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("inputs", node.input(0));
EXPECT_EQ("^perm2", node.input(1));
}
if (node.name() == "id1") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("id", node.input(0));
}
}
EXPECT_EQ(nodes_after_optimization,
std::set<string>({"id", "id1", "inputs_shape", "inputs", "perm2"}));
}
TEST_F(ArithmeticOptimizerTest, FoldMulToTransposeConv) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT,
ops::Placeholder::Shape({8, 28, 28, 3}));
Output scale = ops::Const(s.WithOpName("scale"), 1.0f / 255.0f, {});
Output scaled_inputs =
ops::Multiply(s.WithOpName("scaled_inputs"), inputs, scale);
Output perm_nhwc_to_nchw =
ops::Const(s.WithOpName("perm_nhwc_to_nchw"), {0, 3, 1, 2}, {4});
Output inputs_nchw = ops::Transpose(s.WithOpName("inputs_nchw"),
scaled_inputs, perm_nhwc_to_nchw);
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(127.0f, {5, 5, 3, 16}));
Output conv =
ops::Conv2D(s.WithOpName("conv"), inputs_nchw, weights, {1, 1, 1, 1},
"VALID", ops::Conv2D::DataFormat("NCHW"));
Output outputs = ops::Identity(s.WithOpName("outputs"), conv);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyFoldMultipleIntoConv(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
// `conv` is now a folded convolution with scaled weights.
const NodeDef* folded_conv = node_map.GetNode(conv.node()->name());
ASSERT_NE(folded_conv, nullptr);
const NodeDef* folded_conv_weights = node_map.GetNode(folded_conv->input(1));
ASSERT_NE(folded_conv_weights, nullptr);
EXPECT_EQ("Mul", folded_conv_weights->op());
// Its input should be a transpose of `inputs`.
const NodeDef* transpose = node_map.GetNode(NodeName(folded_conv->input(0)));
ASSERT_NE(transpose, nullptr);
EXPECT_EQ("inputs", transpose->input(0));
}
TEST_F(ArithmeticOptimizerTest, NotFoldMulAcrossPreservedTranspose) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT,
ops::Placeholder::Shape({8, 28, 28, 3}));
Output scale = ops::Const(s.WithOpName("scale"), 1.0f / 255.0f, {});
Output scaled_inputs =
ops::Multiply(s.WithOpName("scaled_inputs"), inputs, scale);
Output perm_nhwc_to_nchw =
ops::Const(s.WithOpName("perm_nhwc_to_nchw"), {0, 3, 1, 2}, {4});
Output inputs_nchw = ops::Transpose(s.WithOpName("inputs_nchw"),
scaled_inputs, perm_nhwc_to_nchw);
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(127.0f, {5, 5, 3, 16}));
Output conv =
ops::Conv2D(s.WithOpName("conv"), inputs_nchw, weights, {1, 1, 1, 1},
"VALID", ops::Conv2D::DataFormat("NCHW"));
Output outputs = ops::Identity(s.WithOpName("outputs"), conv);
Tensor inputs_nchw_tensor(DT_FLOAT, {8, 3, 28, 28});
memset(const_cast<char*>(inputs_nchw_tensor.tensor_data().data()), 0,
inputs_nchw_tensor.tensor_data().size());
GrapplerItem item;
item.fetch = {"outputs"};
item.feed = {{"inputs_nchw", inputs_nchw_tensor}};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output));
item.graph.Swap(&output);
TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output));
NodeMap node_map(&output);
const NodeDef* inputs_nchw_node_def =
node_map.GetNode(inputs_nchw.node()->name());
EXPECT_EQ(NodeName(inputs_nchw_node_def->input(0)),
scaled_inputs.node()->name());
}
TEST_F(ArithmeticOptimizerTest, FoldMulToConv) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT,
ops::Placeholder::Shape({8, 28, 28, 28, 3}));
Output scale = ops::Const(s.WithOpName("scale"), 1.0f / 255.0f, {});
Output scaled_inputs =
ops::Multiply(s.WithOpName("scaled_inputs"), inputs, scale);
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(127.0f, {5, 5, 5, 3, 16}));
Output conv = ops::Conv3D(s.WithOpName("conv"), scaled_inputs, weights,
{1, 1, 1, 1, 1}, "VALID");
Output outputs = ops::Identity(s.WithOpName("outputs"), conv);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output));
item.graph.Swap(&output);
TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output));
NodeMap node_map(&output);
// `conv` is now a folded convolution on `inputs` and scaled weights.
const NodeDef* folded_conv = node_map.GetNode(conv.node()->name());
CHECK_EQ(inputs.node()->name(), NodeName(folded_conv->input(0)));
CHECK_EQ(node_map.GetNode(NodeName(folded_conv->input(1)))->op(), "Mul");
}
TEST_F(ArithmeticOptimizerTest, OptimizeCastMulTransposeConv) {
// This unit test exercises two optimizations, folding mul into conv, and
// reordering cast and transpose.
//
// Conv2D(Transpose(Mul(Cast(I), S)), W)
// =>
// Conv2D(Transpose(Cast(I)), W*S)
// =>
// Conv2D(Cast(Transpose(I)), W*S)
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/gpu:0");
Output inputs =
ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
Output cast = ops::Cast(s, inputs, DT_FLOAT);
Output mul = ops::Mul(s, cast, ops::Const(s, 1.0f / 255.0f));
Output transpose =
ops::Transpose(s, mul, ops::Const(s.WithOpName("perm"), {0, 3, 1, 2}));
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(127.0f, {5, 5, 3, 16}));
Output conv = ops::Conv2D(s, transpose, weights, {1, 1, 1, 1}, "VALID",
ops::Conv2D::DataFormat("NCHW"));
Output outputs = ops::Identity(s.WithOpName("outputs"), conv);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer; // all optimization stages are on
OptimizeTwiceAndPrune(&optimizer, &item, &output, /*const_folding=*/true);
NodeMap node_map(&output);
// Expected names for reordered cast and transpose.
const string p = "ArithmeticOptimizer/ReorderCastAndTranspose_";
const string optimized_cast_name = strings::StrCat(p, "float_Cast");
const string optimized_transpose_name = strings::StrCat(p, "uint8_Transpose");
// Expected names for folded multiply and conv.
const string optimized_weights =
"ArithmeticOptimizer/FoldMultiplyIntoConv_scaled_Conv2D_weights";
const NodeDef* inputs_node = node_map.GetNode("Placeholder");
const NodeDef* transpose_node = node_map.GetNode(optimized_transpose_name);
const NodeDef* cast_node = node_map.GetNode(optimized_cast_name);
const NodeDef* weights_node = node_map.GetNode(optimized_weights);
const NodeDef* conv_node = node_map.GetNode("Conv2D");
ASSERT_NE(inputs_node, nullptr);
ASSERT_NE(transpose_node, nullptr);
ASSERT_NE(cast_node, nullptr);
ASSERT_NE(weights_node, nullptr);
ASSERT_NE(conv_node, nullptr);
EXPECT_EQ(output.node_size(), 7);
EXPECT_EQ(transpose_node->input(0), inputs_node->name());
EXPECT_EQ(cast_node->input(0), transpose_node->name());
EXPECT_EQ(conv_node->input(0), cast_node->name());
EXPECT_EQ(conv_node->input(1), weights_node->name());
}
TEST_F(ArithmeticOptimizerTest, OptimizeMultipleMulTransposeConv) {
// This unit test exercises optimization of folding mul into conv for
// multiple nodes in the graph.
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/gpu:0");
GrapplerItem item;
Output conv[2];
for (int i = 0; i < 2; ++i) {
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({8, 3, 28, 28}));
Output mul = ops::Mul(s, inputs, ops::Const(s, 1.0f / 255.0f));
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(127.0f, {5, 5, 3, 16}));
conv[i] = ops::Conv2D(s, mul, weights, {1, 1, 1, 1}, "VALID",
ops::Conv2D::DataFormat("NCHW"));
}
Output outputs = ops::Add(s.WithOpName("outputs"), conv[0], conv[1]);
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyFoldMultipleIntoConv(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output, /*const_folding=*/true);
NodeMap node_map(&output);
using strings::StrCat;
const string p = "ArithmeticOptimizer/FoldMultiplyIntoConv_";
const string optimized_weights = StrCat(p, "scaled_Conv2D_weights");
const string optimized_weights_1 = StrCat(p, "scaled_Conv2D_1_weights_1");
const NodeDef* weights_node = node_map.GetNode(optimized_weights);
const NodeDef* weights_node_1 = node_map.GetNode(optimized_weights_1);
const NodeDef* conv_node = node_map.GetNode("Conv2D");
const NodeDef* conv_node_1 = node_map.GetNode("Conv2D_1");
ASSERT_NE(weights_node, nullptr);
ASSERT_NE(weights_node_1, nullptr);
ASSERT_NE(conv_node, nullptr);
ASSERT_NE(conv_node_1, nullptr);
EXPECT_EQ(conv_node->input(1), weights_node->name());
EXPECT_EQ(conv_node_1->input(1), weights_node_1->name());
}
TEST_F(ArithmeticOptimizerTest, CombineBitcasts) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_UINT8,
ops::Placeholder::Shape({2, 3}));
Output bc1 = ops::Bitcast(s.WithOpName("bc1"), inputs, DT_QINT8);
Output bc2 = ops::Bitcast(s.WithOpName("bc2"), bc1, DT_INT8);
Output outputs = ops::Identity(s.WithOpName("outputs"), bc2);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_UINT8>(TensorShape({2, 3}));
item.feed = {{"inputs", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantBitcast(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
// Bitcasts combined into a single op and inputs redirected to updated Bitcast
EXPECT_EQ(3, output.node_size());
EXPECT_EQ(1, CountOpNodes(output, "Bitcast"));
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "bc2"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]);
}
TEST_F(ArithmeticOptimizerTest, CombineAndRemoveBitcasts) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_INT8,
ops::Placeholder::Shape({2, 3}));
Output bc1 = ops::Bitcast(s, inputs, DT_QINT8);
Output bc2 = ops::Bitcast(s, bc1, DT_INT8);
Output outputs = ops::Identity(s.WithOpName("outputs"), bc2);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({2, 3}));
item.feed = {{"inputs", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantBitcast(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
// Bitcasts removed and inputs redirected to outputs
EXPECT_EQ(2, output.node_size());
EXPECT_EQ(0, CountOpNodes(output, "Bitcast"));
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "outputs"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]);
}
TEST_F(ArithmeticOptimizerTest, RemoveRedundantCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_INT8,
ops::Placeholder::Shape({2, 3}));
Output cast = ops::Cast(s, inputs, DT_INT8);
Output outputs = ops::Identity(s.WithOpName("outputs"), cast);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({2, 3}));
item.feed = {{"inputs", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantCast(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
// Cast removed and inputs redirected to outputs
EXPECT_EQ(2, output.node_size());
EXPECT_EQ(0, CountOpNodes(output, "Cast"));
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "outputs"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfIdenticalShape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
tensorflow::Scope sx = s.NewSubScope("x");
tensorflow::Scope sy = s.NewSubScope("y");
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT);
auto add_ab = ops::Add(sx.WithOpName("Add_ab"), a, b);
auto add_abc = ops::Add(sy.WithOpName("Add_abc"), add_ab, c);
auto outputs = ops::Identity(s.WithOpName("outputs"), add_abc);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur:
//
// +
// / \
// + c --> AddN(a, b, c)
// / \
// a b
EXPECT_EQ(5, output.node_size());
NodeMap node_map(&output);
// check add tree was replaced with AddN
const NodeDef* collapsed_add =
node_map.GetNode("y/ArithmeticOptimizer/AddOpsRewrite_Add_abc");
ASSERT_NE(collapsed_add, nullptr);
EXPECT_EQ("AddN", collapsed_add->op());
EXPECT_EQ(3, collapsed_add->input_size());
EXPECT_EQ("a", collapsed_add->input(0));
EXPECT_EQ("b", collapsed_add->input(1));
EXPECT_EQ("c", collapsed_add->input(2));
// check output was re-wired to new node
const NodeDef* updated_outputs = node_map.GetNode("outputs");
ASSERT_NE(updated_outputs, nullptr);
EXPECT_EQ(collapsed_add->name(), updated_outputs->input(0));
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MultiplePasses) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c);
auto x = ops::Variable(s.WithOpName("x"), {2, 2}, DT_FLOAT);
auto y = ops::Variable(s.WithOpName("y"), {2, 2}, DT_FLOAT);
auto z = ops::Variable(s.WithOpName("z"), {2, 2}, DT_FLOAT);
auto add_xy = ops::Add(s.WithOpName("Add_xy"), x, y);
auto add_xyz = ops::Add(s.WithOpName("Add_xyz"), add_xy, z);
auto mul = ops::Multiply(s.WithOpName("Mul"), add_abc, add_xyz);
auto outputs = ops::Identity(s.WithOpName("outputs"), mul);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto z_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}, {"x", x_t}, {"y", y_t}, {"z", z_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur:
//
// *
// / \
// + + *
// / \ / \ / \
// + c x + --> AddN(a, b, c) AddN(x, y, z))
// / \ / \
// a b y z
EXPECT_EQ(10, output.node_size());
NodeMap node_map(&output);
// check left Add subtree replaced with AddN
const NodeDef* collapsed_left =
node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_abc");
ASSERT_NE(collapsed_left, nullptr);
EXPECT_EQ("AddN", collapsed_left->op());
EXPECT_EQ(3, collapsed_left->input_size());
EXPECT_EQ("a", collapsed_left->input(0));
EXPECT_EQ("b", collapsed_left->input(1));
EXPECT_EQ("c", collapsed_left->input(2));
// check right Add subtree replaced with AddN
const NodeDef* collapsed_right =
node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_xyz");
ASSERT_NE(collapsed_right, nullptr);
EXPECT_EQ("AddN", collapsed_right->op());
EXPECT_EQ(3, collapsed_right->input_size());
EXPECT_EQ("x", collapsed_right->input(0));
EXPECT_EQ("y", collapsed_right->input(1));
EXPECT_EQ("z", collapsed_right->input(2));
// check that Mul inputs re-wired to new Nodes
const NodeDef* updated_mul = node_map.GetNode("Mul");
ASSERT_NE(updated_mul, nullptr);
EXPECT_EQ("Mul", updated_mul->op());
EXPECT_EQ(2, updated_mul->input_size());
EXPECT_EQ(collapsed_left->name(), updated_mul->input(0));
EXPECT_EQ(collapsed_right->name(), updated_mul->input(1));
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddInputMultipleTimes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_bc = ops::Add(s.WithOpName("Add_bc"), b, c);
auto add_all = ops::Add(s.WithOpName("Add_all"), add_ab, add_bc);
auto outputs = ops::Identity(s.WithOpName("outputs"), add_all);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur:
//
// +
// / \
// + + --> AddN(a, b, b, c)
// / \ / \ ^
// a b c b added twice!
EXPECT_EQ(5, output.node_size());
NodeMap node_map(&output);
// check Add tree replaced with AddN
const NodeDef* collapsed_add =
node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_all");
ASSERT_NE(collapsed_add, nullptr);
EXPECT_EQ("AddN", collapsed_add->op());
EXPECT_EQ(4, collapsed_add->input_size());
EXPECT_EQ("a", collapsed_add->input(0));
EXPECT_EQ("b", collapsed_add->input(1));
EXPECT_EQ("b", collapsed_add->input(2));
EXPECT_EQ("c", collapsed_add->input(3));
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfSymbolicallyEqualShape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
// unknown input shape propagated symbolically through the graph
auto input = ops::Variable(s.WithOpName("input"), {-1, 2}, DT_FLOAT);
// [a, b, c] have symbolically equal shapes
auto a = ops::Sqrt(s.WithOpName("a"), input);
auto b = ops::Square(s.WithOpName("b"), input);
auto c = ops::Round(s.WithOpName("c"), input);
// [add_ab, add_abc] shape must be inferred from inputs
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c);
auto outputs = ops::Identity(s.WithOpName("outputs"), add_abc);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
std::vector<std::pair<string, Tensor>> feed = {{"input", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur:
//
// +
// / \
// + c --> AddN(a, b, c)
// / \
// a b
EXPECT_EQ(6, output.node_size());
NodeMap node_map(&output);
// check add tree was replaced with AddN
const NodeDef* collapsed_add =
node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_abc");
ASSERT_NE(collapsed_add, nullptr);
EXPECT_EQ("AddN", collapsed_add->op());
EXPECT_EQ(3, collapsed_add->input_size());
EXPECT_EQ("a", collapsed_add->input(0));
EXPECT_EQ("b", collapsed_add->input(1));
EXPECT_EQ("c", collapsed_add->input(2));
// check output was re-wired to new node
const NodeDef* updated_outputs = node_map.GetNode("outputs");
ASSERT_NE(updated_outputs, nullptr);
EXPECT_EQ(collapsed_add->name(), updated_outputs->input(0));
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {32}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {32, 32}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {32, 32, 32}, DT_FLOAT);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c);
auto x = ops::Variable(s.WithOpName("x"), {32}, DT_FLOAT);
auto y = ops::Variable(s.WithOpName("y"), {32, 32}, DT_FLOAT);
auto z = ops::Variable(s.WithOpName("z"), {32, 32, 32}, DT_FLOAT);
auto add_xy = ops::Add(s.WithOpName("Add_xy"), x, y);
auto add_xyz = ops::Add(s.WithOpName("Add_xyz"), add_xy, z);
auto add_all = ops::Add(s.WithOpName("AddAll"), add_abc, add_xyz);
auto outputs = ops::Identity(s.WithOpName("outputs"), add_all);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32, 32}));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32}));
auto z_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32, 32}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}, {"x", x_t}, {"y", y_t}, {"z", z_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur:
// 1) [a, x], [b, y], [c, z] - aggregate same shapes first
// 2) Build an aggregation tree minimizing cost of broadcast
//
// + +
// / \ / \
// + + + AddN(c, z)
// / \ / \ / \
// + c x + --> AddN(a, x) AddN(b, y)
// / \ / \
// a b y z
EXPECT_EQ(12, output.node_size());
NodeMap node_map(&output);
// expected names of outer and inner nodes
string outer_add_name = "ArithmeticOptimizer/AddOpsRewrite_AddAll";
string outer_0_add_name =
"ArithmeticOptimizer/AddOpsRewrite_Internal_0_AddAll";
string inner_0_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_0_AddAll";
string inner_1_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_1_AddAll";
string inner_2_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_2_AddAll";
// Add [a, x] first
const NodeDef* add_ax_node = node_map.GetNode(inner_0_add_name);
ASSERT_NE(add_ax_node, nullptr);
EXPECT_EQ("AddN", add_ax_node->op());
EXPECT_EQ(2, add_ax_node->input_size());
EXPECT_EQ("a", add_ax_node->input(0));
EXPECT_EQ("x", add_ax_node->input(1));
// Then add [b, y]
const NodeDef* add_by_node = node_map.GetNode(inner_1_add_name);
ASSERT_NE(add_by_node, nullptr);
EXPECT_EQ("AddN", add_by_node->op());
EXPECT_EQ(2, add_by_node->input_size());
EXPECT_EQ("b", add_by_node->input(0));
EXPECT_EQ("y", add_by_node->input(1));
// Then add [c, z]
const NodeDef* add_cz_node = node_map.GetNode(inner_2_add_name);
ASSERT_NE(add_cz_node, nullptr);
EXPECT_EQ("AddN", add_cz_node->op());
EXPECT_EQ(2, add_cz_node->input_size());
EXPECT_EQ("c", add_cz_node->input(0));
EXPECT_EQ("z", add_cz_node->input(1));
// Then add results together starting from smaller shapes [a, x] + [b, y]
const NodeDef* outer_0_node = node_map.GetNode(outer_0_add_name);
ASSERT_NE(outer_0_node, nullptr);
EXPECT_EQ("Add", outer_0_node->op());
EXPECT_EQ(2, outer_0_node->input_size());
EXPECT_EQ(inner_0_add_name, outer_0_node->input(0));
EXPECT_EQ(inner_1_add_name, outer_0_node->input(1));
// And finally top level Add node
const NodeDef* outer_node = node_map.GetNode(outer_add_name);
ASSERT_NE(outer_node, nullptr);
EXPECT_EQ("Add", outer_node->op());
EXPECT_EQ(2, outer_node->input_size());
EXPECT_EQ(outer_0_add_name, outer_node->input(0));
EXPECT_EQ(inner_2_add_name, outer_node->input(1));
// And outputs reading new top level Add node
const NodeDef* updated_outputs = node_map.GetNode("outputs");
ASSERT_NE(updated_outputs, nullptr);
EXPECT_EQ(outer_add_name, updated_outputs->input(0));
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCastWithSymbolicShapes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
// We have a small input with one unknown dimension
auto small = ops::Variable(s.WithOpName("small"), {-1, 1, 1}, DT_DOUBLE);
// And second input which is larger, but has the same unknown dimension
// device spec prevents this node from rewriting
auto d = "/device:CPU:0";
auto v = ops::Variable(s.WithOpName("v"), {1, 32, 32}, DT_DOUBLE);
auto large = ops::Add(s.WithOpName("large").WithDevice(d), small, v);
// [a, c] have {?, 1, 1} shape, [b] has {?, 32, 32}
auto a = ops::Sqrt(s.WithOpName("a"), small);
auto b = ops::Square(s.WithOpName("b"), large);
auto c = ops::Round(s.WithOpName("c"), small);
// [add_ab, add_abc] shape must be inferred from inputs
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c);
auto outputs = ops::Identity(s.WithOpName("outputs"), add_abc);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto s_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({8, 1, 1}));
auto v_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({1, 32, 32}));
std::vector<std::pair<string, Tensor>> feed = {{"small", s_t}, {"v", v_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur: it's much cheaper to add small
// tensors, and do the broadcast just once
//
// + +
// / \ / \
// + c --> + b
// / \ / \
// a b a c
EXPECT_EQ(9, output.node_size());
NodeMap node_map(&output);
// expected names of outer and inner nodes
string outer_add_name = "ArithmeticOptimizer/AddOpsRewrite_Add_abc";
string inner_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_0_Add_abc";
// outer Add node
const NodeDef* outer_add = node_map.GetNode(outer_add_name);
ASSERT_NE(outer_add, nullptr);
EXPECT_EQ("Add", outer_add->op());
EXPECT_EQ(inner_add_name, outer_add->input(0));
EXPECT_EQ("b", outer_add->input(1));
// inner AddN node
const NodeDef* inner_add = node_map.GetNode(inner_add_name);
ASSERT_NE(inner_add, nullptr);
EXPECT_EQ(2, inner_add->input_size());
EXPECT_EQ("a", inner_add->input(0));
EXPECT_EQ("c", inner_add->input(1));
// check output was re-wired to new node
const NodeDef* updated_outputs = node_map.GetNode("outputs");
ASSERT_NE(updated_outputs, nullptr);
EXPECT_EQ(outer_add_name, updated_outputs->input(0));
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveNegation) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Variable(s.WithOpName("x"), {2, 2}, DT_FLOAT);
auto y = ops::Variable(s.WithOpName("y"), {2, 2}, DT_FLOAT);
Output neg_x = ops::Neg(s.WithOpName("Neg_x"), x);
Output neg_y = ops::Neg(s.WithOpName("Neg_y"), y);
Output add_x_y = ops::Add(s.WithOpName("Add_x_y"), x, y);
Output add_negx_y = ops::Add(s.WithOpName("Add_negx_y"), neg_x, y);
Output add_x_negy = ops::Add(s.WithOpName("Add_x_negy"), x, neg_y);
Output add_negx_negy = ops::Add(s.WithOpName("Add_negx_negy"), neg_x, neg_y);
Output sub_x_y = ops::Sub(s.WithOpName("Sub_x_y"), x, y);
Output sub_negx_y = ops::Sub(s.WithOpName("Sub_negx_y"), neg_x, y);
Output sub_x_negy = ops::Sub(s.WithOpName("Sub_x_negy"), x, neg_y);
Output sub_negx_negy = ops::Sub(s.WithOpName("Sub_negx_negy"), neg_x, neg_y);
Output neg_x_with_dep = ops::Neg(
s.WithOpName("Neg_x_with_dep").WithControlDependencies({add_x_y}), x);
Output add_negx_with_dep_y =
ops::Add(s.WithOpName("Add_negx_with_dep_y"), neg_x_with_dep, y);
auto add_all =
ops::AddN(s.WithOpName("add_all"),
{add_x_y, add_negx_y, add_x_negy, add_negx_negy, sub_x_y,
sub_negx_y, sub_x_negy, sub_negx_negy, add_negx_with_dep_y});
GrapplerItem item;
item.fetch = {"add_all"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
std::vector<std::pair<string, Tensor>> feed = {{"x", x_t}, {"y", y_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveNegation(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
EXPECT_EQ(item.graph.node_size(), output.node_size());
int found = 0;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "Add_negx_y") {
++found;
EXPECT_EQ("Sub", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("x", node.input(1));
} else if (node.name() == "Add_x_negy") {
++found;
EXPECT_EQ("Sub", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
} else if (node.name() == "Add_negx_negy") {
++found;
EXPECT_EQ("Sub", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("Neg_x", node.input(0));
EXPECT_EQ("y", node.input(1));
} else if (node.name() == "Sub_x_negy") {
++found;
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
} else if (node.name() == "Sub_negx_negy") {
++found;
EXPECT_EQ("Sub", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("x", node.input(1));
} else if (node.name() == "Add_negx_with_dep_y") {
++found;
EXPECT_EQ("Sub", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("x", node.input(1));
EXPECT_EQ("^Add_x_y", node.input(2));
}
}
EXPECT_EQ(6, found);
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ConvertSqrtDivToRsqrtMul) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
auto y = ops::Const(s.WithOpName("y"), {3.0f, 4.0f}, {1, 2});
Output sqrt_y = ops::Sqrt(s.WithOpName("sqrt_y"), y);
Output div_x_sqrt_y = ops::Div(s.WithOpName("output"), x, sqrt_y);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlySqrtDivToRsqrtMul(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
EXPECT_EQ(item.graph.node_size(), output.node_size());
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "output") {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("sqrt_y", node.input(1));
} else if (node.name() == "sqrt_y") {
EXPECT_EQ("Rsqrt", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("y", node.input(0));
}
}
}
TEST_F(ArithmeticOptimizerTest, ConvertPow) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
auto y2 = ops::Const(s.WithOpName("y2"), {2.0f, 2.0f}, {1, 2});
auto y1 = ops::Const(s.WithOpName("y1"), {1.0f, 1.0f}, {1, 2});
auto yPoint5 = ops::Const(s.WithOpName("y.5"), {0.5f, 0.5f}, {1, 2});
auto y0 = ops::Const(s.WithOpName("y0"), {0.0f, 0.0f}, {1, 2});
auto y_Point5 = ops::Const(s.WithOpName("y_.5"), {-0.5f, -0.5f}, {1, 2});
auto y_1 = ops::Const(s.WithOpName("y_1"), {-1.0f, -1.0f}, {1, 2});
auto y = ops::Const(s.WithOpName("y"), {3.0f, 4.0f}, {1, 2});
auto z = ops::Const(s.WithOpName("z"), {42.0f}, {});
auto ones = ops::Const(s.WithOpName("ones"), {1.0f, 1.0f, 1.0f}, {1, 3});
auto zeros = ops::Const(s.WithOpName("zeros"), {0.0f, 0.0f, 0.0f}, {1, 3});
Output out2 = ops::Pow(s.WithOpName("out2"), x, y2);
Output out1 = ops::Pow(s.WithOpName("out1"), x, y1);
Output outPoint5 = ops::Pow(s.WithOpName("out.5"), x, yPoint5);
Output out0 = ops::Pow(s.WithOpName("out0"), x, y0);
Output out_Point5 = ops::Pow(s.WithOpName("out_.5"), x, y_Point5);
Output out_1 = ops::Pow(s.WithOpName("out_1"), x, y_1);
Output out = ops::Pow(s.WithOpName("out"), x, y);
Output out_bcast1 = ops::Pow(s.WithOpName("out_bcast1"), z, ones);
Output out_bcast2 = ops::Pow(s.WithOpName("out_bcast2"), z, zeros);
GrapplerItem item;
item.fetch = {"out2", "out1", "out.5", "out0", "out_.5",
"out_1", "out", "out_bcast1", "out_bcast2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(9, tensors_expected.size());
GraphDef got;
ArithmeticOptimizer optimizer;
EnableOnlyConvertPow(&optimizer);
OptimizeAndPrune(&optimizer, &item, &got);
auto tensors = EvaluateNodes(got, item.fetch);
EXPECT_EQ(9, tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
GraphDef want;
AddNode("x", "Const", {}, {}, &want);
AddNode("y2", "Const", {}, {}, &want);
AddNode("y1", "Const", {}, {}, &want);
AddNode("y.5", "Const", {}, {}, &want);
AddNode("y0", "Const", {}, {}, &want);
AddNode("y_.5", "Const", {}, {}, &want);
AddNode("y_1", "Const", {}, {}, &want);
AddNode("y", "Const", {}, {}, &want);
AddNode("z", "Const", {}, {}, &want);
AddNode("ones", "Const", {}, {}, &want);
AddNode("zeros", "Const", {}, {}, &want);
AddNode("out2", "Square", {"x", AsControlDependency("y2")}, {}, &want);
AddNode("out1", "Identity", {"x", AsControlDependency("y1")}, {}, &want);
AddNode("out.5", "Sqrt", {"x", AsControlDependency("y.5")}, {}, &want);
AddNode("out0", "Const",
{AsControlDependency("x"), AsControlDependency("y0")}, {}, &want);
AddNode("out_.5", "Rsqrt", {"x", AsControlDependency("y_.5")}, {}, &want);
AddNode("out_1", "Reciprocal", {"x", AsControlDependency("y_1")}, {}, &want);
AddNode("out", "Pow", {"x", "y"}, {}, &want);
AddNode("out_bcast1", "Pow", {"z", "ones"}, {}, &want);
AddNode("out_bcast2", "Pow", {"z", "zeros"}, {}, &want);
CompareGraphs(want, got);
}
TEST_F(ArithmeticOptimizerTest, Log1p) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x1 = ops::Const(s.WithOpName("x1"), {1.0f, 1.0f}, {1, 2});
auto x2 = ops::Const(s.WithOpName("x2"), {2.0f, 2.0f}, {1, 2});
auto x3 = ops::Const(s.WithOpName("x3"), {3.0f, 3.0f}, {1, 2});
auto a12 = ops::Add(s.WithOpName("a12").WithControlDependencies(x3), x1, x2);
auto a23 = ops::Add(s.WithOpName("a23"), x2, x3);
Output out1 = ops::Log(s.WithOpName("out1"), a12);
Output out2 = ops::Log(s.WithOpName("out2"), a23);
GrapplerItem item;
item.fetch = {"out1", "out2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(2, tensors_expected.size());
GraphDef got;
ArithmeticOptimizer optimizer;
EnableOnlyLog1p(&optimizer);
OptimizeAndPrune(&optimizer, &item, &got);
auto tensors = EvaluateNodes(got, item.fetch);
EXPECT_EQ(2, tensors.size());
for (int i = 0; i < 2; ++i) {
EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
GraphDef want;
AddNode("x1", "Const", {}, {}, &want);
AddNode("x2", "Const", {}, {}, &want);
AddNode("x3", "Const", {}, {}, &want);
AddNode("a23", "Add", {"x2", "x3"}, {}, &want);
AddNode("out1", "Log1p",
{"x2", AsControlDependency("x1"), AsControlDependency("x3")}, {},
&want);
AddNode("out2", "Log", {"a23"}, {}, &want);
CompareGraphs(want, got);
}
TEST_F(ArithmeticOptimizerTest, Expm1) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x1 = ops::Const(s.WithOpName("x1"), {2.0f, 2.0f}, {1, 2});
auto x2 = ops::Const(s.WithOpName("x2"), {1.0f, 1.0f}, {1, 2});
auto x3 = ops::Const(s.WithOpName("x3"), {3.0f, 3.0f}, {1, 2});
auto exp1 = ops::Exp(s.WithOpName("exp1").WithControlDependencies(x3), x1);
Output out1 = ops::Sub(s.WithOpName("out1"), exp1, x2);
Output out2 = ops::Sub(s.WithOpName("out2"), exp1, x3);
GrapplerItem item;
item.fetch = {"out1", "out2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(2, tensors_expected.size());
GraphDef got;
ArithmeticOptimizer optimizer;
EnableOnlyExpm1(&optimizer);
OptimizeAndPrune(&optimizer, &item, &got);
auto tensors = EvaluateNodes(got, item.fetch);
EXPECT_EQ(2, tensors.size());
for (int i = 0; i < 2; ++i) {
EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
GraphDef want;
AddNode("x1", "Const", {}, {}, &want);
AddNode("x2", "Const", {}, {}, &want);
AddNode("x3", "Const", {}, {}, &want);
AddNode("exp1", "Exp", {"x1", AsControlDependency("x3")}, {}, &want);
AddNode("out1", "Expm1",
{"x1", AsControlDependency("x2"), AsControlDependency("x3")}, {},
&want);
AddNode("out2", "Sub", {"exp1", "x3"}, {}, &want);
CompareGraphs(want, got);
}
TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_SimpleSwap) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {32}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {32, 32}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {32}, DT_FLOAT);
auto mul1 = ops::Mul(s.WithOpName("mul1"), a, b);
auto mul2 = ops::Mul(s.WithOpName("mul2"), mul1, c);
auto outputs = ops::Identity(s.WithOpName("outputs"), mul2);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyMinimizeBroadcasts(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur:
//
// * *
// / \ / \
// * c --> * b
// / \ / \
// a b a c
NodeMap node_map(&output);
const NodeDef* mul1_node = node_map.GetNode("mul1");
ASSERT_NE(mul1_node, nullptr);
EXPECT_EQ("a", mul1_node->input(0));
EXPECT_EQ("c", mul1_node->input(1));
const NodeDef* mul2_node = node_map.GetNode("mul2");
ASSERT_NE(mul2_node, nullptr);
EXPECT_EQ("mul1", mul2_node->input(0));
EXPECT_EQ("b", mul2_node->input(1));
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_FlattenTallGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {32}, DT_DOUBLE);
auto b = ops::Variable(s.WithOpName("b"), {32, 32}, DT_DOUBLE);
auto c = ops::Variable(s.WithOpName("c"), {32}, DT_DOUBLE);
auto d = ops::Variable(s.WithOpName("d"), {32}, DT_DOUBLE);
auto e = ops::Variable(s.WithOpName("e"), {32}, DT_DOUBLE);
auto mul1 = ops::Mul(s.WithOpName("mul1"), a, b);
auto mul2 = ops::Mul(s.WithOpName("mul2"), mul1, c);
auto mul3 = ops::Mul(s.WithOpName("mul3"), mul2, d);
auto mul4 = ops::Mul(s.WithOpName("mul4"), mul3, e);
auto outputs = ops::Identity(s.WithOpName("outputs"), mul4);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32}));
auto b_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32, 32}));
auto c_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32}));
auto d_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32}));
auto e_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}, {"d", d_t}, {"e", e_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyMinimizeBroadcasts(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur: Graph is "flattened" and
// largest shape pushed to the top.
//
// *
// / \
// * e *
// / \ / \
// * d * b
// / \ / \
// * c --> * *
// / \ / \ / \
// a b a c d e
NodeMap node_map(&output);
const NodeDef* mul1_node = node_map.GetNode("mul1");
ASSERT_NE(mul1_node, nullptr);
EXPECT_EQ("a", mul1_node->input(0));
EXPECT_EQ("c", mul1_node->input(1));
const NodeDef* mul2_node = node_map.GetNode("mul2");
ASSERT_NE(mul2_node, nullptr);
EXPECT_EQ("d", mul2_node->input(0));
EXPECT_EQ("e", mul2_node->input(1));
const NodeDef* mul3_node = node_map.GetNode("mul3");
ASSERT_NE(mul3_node, nullptr);
EXPECT_EQ("mul1", mul3_node->input(0));
EXPECT_EQ("mul2", mul3_node->input(1));
const NodeDef* mul4_node = node_map.GetNode("mul4");
ASSERT_NE(mul4_node, nullptr);
EXPECT_EQ("mul3", mul4_node->input(0));
EXPECT_EQ("b", mul4_node->input(1));
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_BuildTreeUp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
// [a, b, c] - scalars, [d] - matrix
auto a = ops::Variable(s.WithOpName("a"), {32}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {32}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {32}, DT_FLOAT);
auto d = ops::Variable(s.WithOpName("D"), {32, 32}, DT_FLOAT);
auto mul1 = ops::Mul(s.WithOpName("mul1"), a, b);
auto mul2 = ops::Mul(s.WithOpName("mul2"), c, d);
auto mul3 = ops::Mul(s.WithOpName("mul3"), mul1, mul2);
auto outputs = ops::Identity(s.WithOpName("outputs"), mul3);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto d_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}, {"D", d_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyMinimizeBroadcasts(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
// We expect the following rewrite(s) to occur:
//
// *
// / \
// * * D
// / \ / \
// * * -> * c
// / \ / \ / \
// a b c D a b
NodeMap node_map(&output);
const NodeDef* mul1_node = node_map.GetNode("mul2");
ASSERT_NE(mul1_node, nullptr);
EXPECT_EQ("a", mul1_node->input(0));
EXPECT_EQ("b", mul1_node->input(1));
const NodeDef* mul2_node = node_map.GetNode("mul1");
ASSERT_NE(mul2_node, nullptr);
EXPECT_EQ("mul2", mul2_node->input(0));
EXPECT_EQ("c", mul2_node->input(1));
const NodeDef* mul3_node = node_map.GetNode("mul3");
ASSERT_NE(mul3_node, nullptr);
EXPECT_EQ("D", mul3_node->input(0));
EXPECT_EQ("mul1", mul3_node->input(1));
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, HoistCWiseUnaryFromConcat) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32});
Output b = ops::Const(s.WithOpName("b"), 1.0f, {32});
Output c = ops::Const(s.WithOpName("c"), 42.0f, {32});
Output axis = ops::Const(s.WithOpName("axis"), 0, {});
Output ctrl1 = ops::Const(s.WithOpName("ctrl1"), 1, {});
Output ctrl2 = ops::Const(s.WithOpName("ctrl2"), 2, {});
Output ctrl3 = ops::Const(s.WithOpName("ctrl3"), 3, {});
// Test case with chains of length 1.
// Rewrites
// Concat({Exp(a), Exp(b), Exp(c)})
// into
// Exp(Concat({a, b, c})).
Output sin_a =
ops::Sin(s.WithOpName("sin_a").WithControlDependencies(ctrl3), a);
Output exp_a =
ops::Exp(s.WithOpName("exp_a").WithControlDependencies(ctrl1), sin_a);
Output exp_b = ops::Exp(s.WithOpName("exp_b"), b);
Output exp_c =
ops::Exp(s.WithOpName("exp_c").WithControlDependencies(ctrl2), c);
Output concat =
ops::Concat(s.WithOpName("concat"), {exp_a, exp_b, exp_c}, axis);
Output id = ops::Identity(s.WithOpName("id"), concat);
// Test case with chains of length 2.
// Rewrites
// Concat({Cos(Exp(a)), Cos(Exp(b)), Cos(Exp(c))})
// into
// Cos(Exp(Concat({a, b, c}))).
Output exp_a2 =
ops::Exp(s.WithOpName("exp_a2").WithControlDependencies(ctrl1), sin_a);
Output exp_b2 = ops::Exp(s.WithOpName("exp_b2"), b);
Output exp_c2 =
ops::Exp(s.WithOpName("exp_c2").WithControlDependencies(ctrl2), c);
Output cos_exp_a2 = ops::Cos(
s.WithOpName("cos_exp_a2").WithControlDependencies(ctrl1), exp_a2);
Output cos_exp_b2 = ops::Cos(
s.WithOpName("cos_exp_b2").WithControlDependencies(ctrl3), exp_b2);
Output cos_exp_c2 = ops::Cos(s.WithOpName("cos_exp_c2"), exp_c2);
Output concat2 = ops::Concat(s.WithOpName("concat2"),
{cos_exp_a2, cos_exp_b2, cos_exp_c2}, axis);
Output id2 = ops::Identity(s.WithOpName("id2"), concat2);
GrapplerItem item;
item.fetch = {"id", "id2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyHoistCWiseUnaryChains(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "concat") {
EXPECT_EQ(6, node.input_size());
EXPECT_EQ("sin_a", node.input(0));
EXPECT_EQ("b", node.input(1));
EXPECT_EQ("c", node.input(2));
EXPECT_EQ("axis", node.input(3));
EXPECT_EQ("^ctrl1", node.input(4));
EXPECT_EQ("^ctrl2", node.input(5));
found++;
}
if (node.name() == "exp_a") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("concat", node.input(0));
EXPECT_EQ("^ctrl1", node.input(1));
found++;
}
if (node.name() == "id") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("exp_a", node.input(0));
found++;
}
if (node.name() == "concat2") {
EXPECT_EQ(7, node.input_size());
EXPECT_EQ("sin_a", node.input(0));
EXPECT_EQ("b", node.input(1));
EXPECT_EQ("c", node.input(2));
EXPECT_EQ("axis", node.input(3));
EXPECT_EQ("^ctrl1", node.input(4));
EXPECT_EQ("^ctrl2", node.input(5));
EXPECT_EQ("^ctrl3", node.input(6));
found++;
}
if (node.name() == "exp_a2") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("concat2", node.input(0));
EXPECT_EQ("^ctrl1", node.input(1));
found++;
}
if (node.name() == "cos_exp_a2") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("exp_a2", node.input(0));
EXPECT_EQ("^ctrl1", node.input(1));
found++;
}
if (node.name() == "id2") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("cos_exp_a2", node.input(0));
found++;
}
}
EXPECT_EQ(7, found);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, HoistCWiseUnaryIntoSplit) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), 3.1415f, {32});
Output axis = ops::Const(s.WithOpName("axis"), 0, {});
Output ctrl1 = ops::Const(s.WithOpName("ctrl1"), 1, {});
Output ctrl2 = ops::Const(s.WithOpName("ctrl2"), 2, {});
Output ctrl3 = ops::Const(s.WithOpName("ctrl3"), 3, {});
// Test case with chains of length 1.
// Rewrites
// [Sin(y) for y in Split(x)]
// into
// [y for y in Split(Sin(x))].
ops::Split split1(s.WithOpName("split1"), axis, x, 2);
Output sin_a =
ops::Sin(s.WithOpName("sin_a").WithControlDependencies(ctrl1), split1[0]);
Output id_a = ops::Identity(s.WithOpName("id_a"), sin_a);
Output sin_b = ops::Sin(s.WithOpName("sin_b"), split1[1]);
Output exp_b = ops::Exp(s.WithOpName("exp_b"), sin_b);
Output id_b = ops::Identity(s.WithOpName("id_b"), exp_b);
// Test case with SplitV and chains of length 2.
// Rewrites
// [Cos(Exp(y)) for y in Split(x)]
// into
// [y for y in Split(Cos(Exp(x)))].
Output size_splits2 = ops::Const(s.WithOpName("size_splits2"), {20, 12}, {2});
ops::SplitV split2(s.WithOpName("split2"), x, size_splits2, axis, 2);
Output exp_a2 = ops::Exp(
s.WithOpName("exp_a2").WithControlDependencies(ctrl1), split2[0]);
Output exp_b2 = ops::Exp(s.WithOpName("exp_b2"), split2[1]);
Output cos_exp_a2 = ops::Cos(
s.WithOpName("cos_exp_a2").WithControlDependencies(ctrl2), exp_a2);
Output cos_exp_b2 = ops::Cos(
s.WithOpName("cos_exp_b2").WithControlDependencies(ctrl3), exp_b2);
Output id_a2 = ops::Identity(s.WithOpName("id_a2"), cos_exp_a2);
Output id_b2 = ops::Identity(s.WithOpName("id_b2"), cos_exp_b2);
GrapplerItem item;
item.fetch = {"id_a", "id_b", "id_a2", "id_b2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyHoistCWiseUnaryChains(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
int found = 0;
for (const NodeDef& node : output.node()) {
// The following 6 nodes should be pruned.
EXPECT_NE(node.name(), "sin_a");
EXPECT_NE(node.name(), "sin_b");
EXPECT_NE(node.name(), "exp_a2");
EXPECT_NE(node.name(), "exp_b2");
EXPECT_NE(node.name(), "cos_exp_a2");
EXPECT_NE(node.name(), "cos_exp_b2");
if (node.name() == "split1") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("axis", node.input(0));
EXPECT_EQ("ArithmeticOptimizer/_sin_a_split1", node.input(1));
found++;
}
if (node.name() == "ArithmeticOptimizer/_sin_a_split1") {
EXPECT_EQ("Sin", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^ctrl1", node.input(1));
found++;
}
if (node.name() == "id_a") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("split1", node.input(0));
found++;
}
if (node.name() == "exp_b") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("split1:1", node.input(0));
found++;
}
if (node.name() == "id_b") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("exp_b", node.input(0));
found++;
}
if (node.name() == "ArithmeticOptimizer/_exp_a2_split2") {
EXPECT_EQ("Exp", node.op());
EXPECT_EQ(4, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^ctrl1", node.input(1));
EXPECT_EQ("^ctrl2", node.input(2));
EXPECT_EQ("^ctrl3", node.input(3));
found++;
}
if (node.name() == "ArithmeticOptimizer/_cos_exp_a2_split2") {
EXPECT_EQ("Cos", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("ArithmeticOptimizer/_exp_a2_split2", node.input(0));
found++;
}
if (node.name() == "split2") {
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("ArithmeticOptimizer/_cos_exp_a2_split2", node.input(0));
EXPECT_EQ("size_splits2", node.input(1));
EXPECT_EQ("axis", node.input(2));
found++;
}
if (node.name() == "id_a2") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("split2", node.input(0));
found++;
}
if (node.name() == "id_b2") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("split2:1", node.input(0));
found++;
}
}
EXPECT_EQ(10, found);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, RemoveIdempotent) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32});
Output sn1 = ops::Snapshot(s.WithOpName("sn1"), a);
Output sn2 = ops::Snapshot(s.WithOpName("sn2"), sn1);
Output out1 = ops::Identity(s.WithOpName("out1"), sn2);
Output id1 = ops::Identity(s.WithOpName("id1"), a);
Output id2 = ops::Identity(s.WithOpName("id2"), id1);
Output out2 = ops::Identity(s.WithOpName("out2"), id2);
GrapplerItem item;
item.fetch = {"out1", "out2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdempotent(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
EXPECT_EQ(7, output.node_size());
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "out1") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("sn1", node.input(0));
found++;
} else if (node.name() == "out2") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("id1", node.input(0));
found++;
} else if (node.name() == "sn1") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("a", node.input(0));
found++;
}
}
EXPECT_EQ(3, found);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, RemoveLogicalNot) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32});
Output b = ops::Const(s.WithOpName("b"), -3.14f, {32});
Output eq = ops::Equal(s.WithOpName("eq"), a, b);
Output neq = ops::NotEqual(s.WithOpName("neq"), a, b);
Output lt = ops::Less(s.WithOpName("lt"), a, b);
Output le = ops::LessEqual(s.WithOpName("le"), a, b);
Output gt = ops::Greater(s.WithOpName("gt"), a, b);
Output ge = ops::GreaterEqual(s.WithOpName("ge"), a, b);
// not_eq is reserved
Output not_eq1 = ops::LogicalNot(s.WithOpName("not_eq1"), eq);
Output not_neq = ops::LogicalNot(s.WithOpName("not_neq"), neq);
Output not_lt = ops::LogicalNot(s.WithOpName("not_lt"), lt);
Output not_le = ops::LogicalNot(s.WithOpName("not_le"), le);
Output not_gt = ops::LogicalNot(s.WithOpName("not_gt"), gt);
Output not_ge = ops::LogicalNot(s.WithOpName("not_ge"), ge);
Output id_not_eq = ops::Identity(s.WithOpName("id_not_eq"), not_eq1);
Output id_not_neq = ops::Identity(s.WithOpName("id_not_neq"), not_neq);
Output id_not_lt = ops::Identity(s.WithOpName("id_not_lt"), not_lt);
Output id_not_le = ops::Identity(s.WithOpName("id_not_le"), not_le);
Output id_not_gt = ops::Identity(s.WithOpName("id_not_gt"), not_gt);
Output id_not_ge = ops::Identity(s.WithOpName("id_not_ge"), not_ge);
GrapplerItem item;
item.fetch = {"id_not_eq", "id_not_neq", "id_not_lt",
"id_not_le", "id_not_gt", "id_not_ge"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveLogicalNot(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "id_not_eq") {
EXPECT_EQ("eq", node.input(0));
++found;
}
if (node.name() == "id_not_neq") {
EXPECT_EQ("neq", node.input(0));
++found;
}
if (node.name() == "id_not_lt") {
EXPECT_EQ("lt", node.input(0));
++found;
}
if (node.name() == "id_not_le") {
EXPECT_EQ("le", node.input(0));
++found;
}
if (node.name() == "id_not_gt") {
EXPECT_EQ("gt", node.input(0));
++found;
}
if (node.name() == "id_not_ge") {
EXPECT_EQ("ge", node.input(0));
++found;
}
if (node.name() == "eq") {
EXPECT_EQ("NotEqual", node.op());
++found;
}
if (node.name() == "neq") {
EXPECT_EQ("Equal", node.op());
++found;
}
if (node.name() == "lt") {
EXPECT_EQ("GreaterEqual", node.op());
++found;
}
if (node.name() == "le") {
EXPECT_EQ("Greater", node.op());
++found;
}
if (node.name() == "gt") {
EXPECT_EQ("LessEqual", node.op());
++found;
}
if (node.name() == "ge") {
EXPECT_EQ("Less", node.op());
++found;
}
}
EXPECT_EQ(12, found);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorEqual<bool>(tensors_expected[i], tensors[i]);
}
}
TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicElementWise) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x);
Output reduce_max = ops::Max(s.WithOpName("reduce_max"), sqrt, {0});
Output final_out = ops::Identity(s.WithOpName("final_out"), reduce_max);
GrapplerItem item;
item.fetch = {"final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
EXPECT_EQ(item.graph.node_size(), output.node_size());
// Check if the inputs are switched
int required_node_count = 0;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "sqrt") {
EXPECT_EQ("Sqrt", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("reduce_max", node.input(0));
++required_node_count;
} else if (node.name() == "reduce_max") {
EXPECT_EQ("Max", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
++required_node_count;
}
}
EXPECT_EQ(2, required_node_count);
}
TEST_F(ArithmeticOptimizerTest,
OptimizeMaxOrMinOfMonotonicElementWise_DoNotChangeFetchNode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x);
Output reduce_max = ops::Max(s.WithOpName("reduce_max"), sqrt, {0});
Output final_out = ops::Identity(s.WithOpName("final_out"), reduce_max);
GrapplerItem item;
item.fetch = {"sqrt", "final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(2, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
// Should be a NoOp since we are not allowed to change the output of fetch
// nodes.
VerifyGraphsMatch(item.graph, output, __LINE__);
}
TEST_F(ArithmeticOptimizerTest,
OptimizeMaxOrMinOfMonotonicElementWiseNonIncreasing) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output neg = ops::Neg(s.WithOpName("neg"), x);
Output reduce_max = ops::Max(s.WithOpName("reduce_max"), neg, {0});
Output final_out = ops::Identity(s.WithOpName("final_out"), reduce_max);
GrapplerItem item;
item.fetch = {"final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
EXPECT_EQ(item.graph.node_size(), output.node_size());
// Check if the inputs are switched
int required_node_count = 0;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "neg") {
EXPECT_EQ("Neg", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("reduce_max", node.input(0));
++required_node_count;
} else if (node.name() == "reduce_max") {
EXPECT_EQ("Min", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
++required_node_count;
}
}
EXPECT_EQ(2, required_node_count);
}
TEST_F(ArithmeticOptimizerTest, UnaryOpsComposition) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x);
Output log = ops::Log(s.WithOpName("log"), sqrt);
Output relu = ops::Relu(s.WithOpName("relu"), log);
Output final_out = ops::Identity(s.WithOpName("final_out"), relu);
GrapplerItem item;
item.fetch = {"final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
// Place all nodes on CPU.
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(1, tensors_expected.size());
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyUnaryOpsComposition(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(3, output.node_size());
// Check that Sqrt/Log/Relu were replaced with a single op.
int required_node_count = 0;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "final_out") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("relu/unary_ops_composition", node.input(0));
++required_node_count;
} else if (node.name() == "relu/unary_ops_composition") {
EXPECT_EQ("_UnaryOpsComposition", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
auto op_names = node.attr().at("op_names").list().s();
EXPECT_EQ(3, op_names.size());
EXPECT_EQ("Sqrt", op_names[0]);
EXPECT_EQ("Log", op_names[1]);
EXPECT_EQ("Relu", op_names[2]);
++required_node_count;
}
}
EXPECT_EQ(2, required_node_count);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(1, tensors.size());
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
}
} // namespace grappler
} // namespace tensorflow
| snnn/tensorflow | tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc | C++ | apache-2.0 | 128,921 |
require 'spec_helper'
describe Puppet::Type.type(:openldap_access) do
describe 'namevar title patterns' do
it 'handles componsite name' do
access = described_class.new(name: 'to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth')
expect(access[:name]).to eq('to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth')
expect(access[:what]).to eq('attrs=userPassword,shadowLastChange')
expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']])
end
it 'handles componsite name with position' do
access = described_class.new(name: '{0}to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth')
expect(access[:name]).to eq('{0}to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth')
expect(access[:position]).to eq('0')
expect(access[:what]).to eq('attrs=userPassword,shadowLastChange')
expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']])
end
it 'handles componsite name with position' do
access = described_class.new(name: '{0}to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth on dc=example,dc=com')
expect(access[:name]).to eq('{0}to attrs=userPassword,shadowLastChange by dn="cn=admin,dc=example,dc=com" write by anonymous auth on dc=example,dc=com')
expect(access[:position]).to eq('0')
expect(access[:what]).to eq('attrs=userPassword,shadowLastChange')
expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']])
expect(access[:suffix]).to eq('dc=example,dc=com')
end
it 'handles specific value of attr' do
access = described_class.new(name: 'to attrs=objectClass val=posixAccount by dn="cn=admin,dc=example,dc=com" write by anonymous auth')
expect(access[:name]).to eq('to attrs=objectClass val=posixAccount by dn="cn=admin,dc=example,dc=com" write by anonymous auth')
expect(access[:what]).to eq('attrs=objectClass val=posixAccount')
expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']])
end
end
describe 'access' do
it 'handles array of values' do
access = described_class.new(name: 'foo', access: ['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth'])
expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write'], ['by anonymous auth']])
end
it 'handles string' do
access = described_class.new(name: 'foo', access: 'by dn="cn=admin,dc=example,dc=com" write by anonymous auth')
expect(access[:access]).to eq([['by dn="cn=admin,dc=example,dc=com" write', 'by anonymous auth']])
end
end
end
| nfrance-conseil/puppet-openldap | spec/unit/puppet/type/openldap_acess_spec.rb | Ruby | apache-2.0 | 2,911 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/forecast/ForecastService_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace ForecastService
{
namespace Model
{
class AWS_FORECASTSERVICE_API CreateForecastExportJobResult
{
public:
CreateForecastExportJobResult();
CreateForecastExportJobResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
CreateForecastExportJobResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The Amazon Resource Name (ARN) of the export job.</p>
*/
inline const Aws::String& GetForecastExportJobArn() const{ return m_forecastExportJobArn; }
/**
* <p>The Amazon Resource Name (ARN) of the export job.</p>
*/
inline void SetForecastExportJobArn(const Aws::String& value) { m_forecastExportJobArn = value; }
/**
* <p>The Amazon Resource Name (ARN) of the export job.</p>
*/
inline void SetForecastExportJobArn(Aws::String&& value) { m_forecastExportJobArn = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the export job.</p>
*/
inline void SetForecastExportJobArn(const char* value) { m_forecastExportJobArn.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the export job.</p>
*/
inline CreateForecastExportJobResult& WithForecastExportJobArn(const Aws::String& value) { SetForecastExportJobArn(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the export job.</p>
*/
inline CreateForecastExportJobResult& WithForecastExportJobArn(Aws::String&& value) { SetForecastExportJobArn(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the export job.</p>
*/
inline CreateForecastExportJobResult& WithForecastExportJobArn(const char* value) { SetForecastExportJobArn(value); return *this;}
private:
Aws::String m_forecastExportJobArn;
};
} // namespace Model
} // namespace ForecastService
} // namespace Aws
| awslabs/aws-sdk-cpp | aws-cpp-sdk-forecast/include/aws/forecast/model/CreateForecastExportJobResult.h | C | apache-2.0 | 2,341 |
/*
Copyright 2020 The Knative Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"io"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/klog"
)
// reconcilerControllerStubGenerator produces a file of the stub of the
// controller for a custom impl with injection.
type reconcilerControllerStubGenerator struct {
generator.DefaultGen
outputPackage string
imports namer.ImportTracker
typeToGenerate *types.Type
reconcilerPkg string
informerPackagePath string
reconcilerClass string
hasReconcilerClass bool
}
var _ generator.Generator = (*reconcilerControllerStubGenerator)(nil)
func (g *reconcilerControllerStubGenerator) Filter(c *generator.Context, t *types.Type) bool {
// Only process the type for this generator.
return t == g.typeToGenerate
}
func (g *reconcilerControllerStubGenerator) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *reconcilerControllerStubGenerator) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
return
}
func (g *reconcilerControllerStubGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "{{", "}}")
klog.V(5).Info("processing type ", t)
m := map[string]interface{}{
"type": t,
"class": g.reconcilerClass,
"hasClass": g.hasReconcilerClass,
"informerGet": c.Universe.Function(types.Name{
Package: g.informerPackagePath,
Name: "Get",
}),
"controllerImpl": c.Universe.Type(types.Name{Package: "knative.dev/pkg/controller", Name: "Impl"}),
"reconcilerNewImpl": c.Universe.Type(types.Name{
Package: g.reconcilerPkg,
Name: "NewImpl",
}),
"loggingFromContext": c.Universe.Function(types.Name{
Package: "knative.dev/pkg/logging",
Name: "FromContext",
}),
"contextContext": c.Universe.Type(types.Name{
Package: "context",
Name: "Context",
}),
"configmapWatcher": c.Universe.Type(types.Name{
Package: "knative.dev/pkg/configmap",
Name: "Watcher",
}),
"classAnnotationKey": c.Universe.Variable(types.Name{
Package: g.reconcilerPkg,
Name: "ClassAnnotationKey",
}),
"annotationFilterFunc": c.Universe.Function(types.Name{
Package: "knative.dev/pkg/reconciler",
Name: "AnnotationFilterFunc",
}),
"filterHandler": c.Universe.Type(types.Name{
Package: "k8s.io/client-go/tools/cache",
Name: "FilteringResourceEventHandler",
}),
}
sw.Do(reconcilerControllerStub, m)
return sw.Error()
}
var reconcilerControllerStub = `
// TODO: PLEASE COPY AND MODIFY THIS FILE AS A STARTING POINT
// NewController creates a Reconciler for {{.type|public}} and returns the result of NewImpl.
func NewController(
ctx {{.contextContext|raw}},
cmw {{.configmapWatcher|raw}},
) *{{.controllerImpl|raw}} {
logger := {{.loggingFromContext|raw}}(ctx)
{{.type|lowercaseSingular}}Informer := {{.informerGet|raw}}(ctx)
{{if .hasClass}}
classValue := "default" // TODO: update this to the appropriate value.
classFilter := {{.annotationFilterFunc|raw}}({{.classAnnotationKey|raw}}, classValue, false /*allowUnset*/)
{{end}}
// TODO: setup additional informers here.
{{if .hasClass}}// TODO: remember to use the classFilter from above to filter appropriately.{{end}}
r := &Reconciler{}
impl := {{.reconcilerNewImpl|raw}}(ctx, r{{if .hasClass}}, classValue{{end}})
logger.Info("Setting up event handlers.")
{{if .hasClass}}
{{.type|lowercaseSingular}}Informer.Informer().AddEventHandler({{.filterHandler|raw}}{
FilterFunc: classFilter,
Handler: controller.HandleAll(impl.Enqueue),
})
{{else}}
{{.type|lowercaseSingular}}Informer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))
{{end}}
// TODO: add additional informer event handlers here.
return impl
}
`
| google/knative-gcp | vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_controller_stub.go | GO | apache-2.0 | 4,426 |
/*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.mss.examples.petstore.util.fe.view;
import java.io.Serializable;
import javax.faces.bean.ManagedBean;
import javax.faces.bean.SessionScoped;
/**
* Bean classes used for JSF model.
*/
@ManagedBean
@SessionScoped
public class NavigationBean implements Serializable {
private static final long serialVersionUID = -8628674465932953415L;
public String redirectToStoreWelcome() {
return "pet/list.xhtml?faces-redirect=true";
}
public String redirectToAdminWelcome() {
return "pet/index.xhtml?faces-redirect=true";
}
public String toLogin() {
return "/login.xhtml";
}
public String backtoList() {
return "list";
}
}
| dakshika/product-mss | samples/petstore/frontend-util/src/main/java/org/wso2/carbon/mss/examples/petstore/util/fe/view/NavigationBean.java | Java | apache-2.0 | 1,401 |
/*
* Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.carbon.identity.oauth2.validators;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import org.wso2.carbon.identity.oauth2.dto.OAuth2TokenValidationRequestDTO;
import org.wso2.carbon.identity.oauth2.dto.OAuth2TokenValidationResponseDTO;
import static org.testng.Assert.assertEquals;
public class OAuth2TokenValidationMessageContextTest {
private OAuth2TokenValidationMessageContext oAuth2TokenValidationMessageContext;
private OAuth2TokenValidationRequestDTO requestDTO;
private OAuth2TokenValidationResponseDTO responseDTO;
@BeforeMethod
public void setUp() throws Exception {
requestDTO = new OAuth2TokenValidationRequestDTO();
responseDTO = new OAuth2TokenValidationResponseDTO();
oAuth2TokenValidationMessageContext = new OAuth2TokenValidationMessageContext(requestDTO, responseDTO);
}
@Test
public void testGetRequestDTO() throws Exception {
assertEquals(oAuth2TokenValidationMessageContext.getRequestDTO(), requestDTO);
}
@Test
public void testGetResponseDTO() throws Exception {
assertEquals(oAuth2TokenValidationMessageContext.getResponseDTO(), responseDTO);
}
@Test
public void testAddProperty() throws Exception {
oAuth2TokenValidationMessageContext.addProperty("testProperty", "testValue");
assertEquals(oAuth2TokenValidationMessageContext.getProperty("testProperty"), "testValue");
}
@Test
public void testGetProperty() throws Exception {
oAuth2TokenValidationMessageContext.addProperty("testProperty", "testValue");
assertEquals(oAuth2TokenValidationMessageContext.getProperty("testProperty"), "testValue");
}
}
| darshanasbg/identity-inbound-auth-oauth | components/org.wso2.carbon.identity.oauth/src/test/java/org/wso2/carbon/identity/oauth2/validators/OAuth2TokenValidationMessageContextTest.java | Java | apache-2.0 | 2,398 |
/*
* XDD - a data movement and benchmarking toolkit
*
* Copyright (C) 1992-2013 I/O Performance, Inc.
* Copyright (C) 2009-2013 UT-Battelle, LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* worker_thread.c
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <strings.h>
#include <string.h>
#include <errno.h>
#include <pthread.h>
#define THIS_IS_A_SUBROUTINE
#include "bx_data_structures.h"
#define DEBUG 0
/**************************************************************************
* The Worker Thread
**************************************************************************/
// There are one or more Worker Threads per target.
// A Worker Thread will indicate that it is WAITING for something to do and
// place itself on the Worker Thread Data Structure queue of its target.
// Eventually the target will wake up a Worker Thread which is indicated
// by a non-zero value in the bx_wd_released variable in the Worker Thread
// Data Structure for this Worker Thread.
// Upon waking up, the Worker Thread will perform an INPUT or OUTPUT
// operation depending on its target's designation. The Worker Thread
// will have a buffer header structure that contains the location in the
// file to perform the operation, the number of bytes to transfer, and the
// I/O memory buffer.
// Upon completing the requested I/O operation, the Worker Thread will
// stuff its Buffer Header on to the "next buffer queue". In other words,
// if this Worker Thread is an INPUT Worker Thread then it just read data
// into its buffer.
// Therefore, upon completion of the read, this Worker Thread will stuff
// its Buffer Header on the Buffer Header Queue of the OUTPUT target and
// wake up the OUTPUT target if it is waiting for a buffer.
// Likewise, if this Worker Thread is an OUTPUT Worker Thread then it just
// wrote data from its buffer. Therefore, upon completion of the write, this
// Worker Thread will stuff its Buffer Header on the Buffer Header Queue
// of the INPUT target and wake up the INPUT target if it is waiting
// for a buffer.
//
// After putting its Buffer Header on the appropriate queue this Worker
// Thread will stuff itself back onto its Worker Thread Data Structure
// queue and wait for something to do.
//
// At some point the Worker Thread will wake up and find the "TERMINATE"
// flag set in its Worker Thread Data Structure . At this point the Worker
// Thread will break the loop and terminate.
void *
worker_thread_main(void *pin) {
int status;
struct bx_td *bx_tdp;
struct bx_wd *bx_wdp;
struct bx_buffer_queue *qp;
struct bx_buffer_header *bufhdrp;
nclk_t nclk;
bx_wdp = (struct bx_wd *)pin;
bx_tdp = bx_wdp->bx_wd_my_bx_tdp;
nclk_now(&nclk);
if (DEBUG) fprintf(stderr,"%llu: worker_thread_main: ENTER: bx_wdp=%p, bx_tdp=%p\n", (unsigned long long int)nclk,bx_wdp, bx_tdp);
status = 0;
nclk_now(&nclk);
if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - ENTER %d\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number);
while (1) {
// Set flag to indicate that this worker_thread is WAITING
bx_wdp->bx_wd_flags |= BX_WD_WAITING;
// Enqueue this Worker Thread Data Structure on the bx_wd_queue for this target
bx_wd_enqueue(bx_wdp, bx_wdp->bx_wd_my_queue);
// Wait for the target thread to release me
pthread_mutex_lock(&bx_wdp->bx_wd_mutex);
nclk_now(&nclk);
if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - got the bx_wd_mutex lock - waiting for something to do - time %d\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number);
bx_wdp->bx_wd_flags |= BX_WD_WAITING;
while (1 != bx_wdp->bx_wd_released) {
pthread_cond_wait(&bx_wdp->bx_wd_conditional, &bx_wdp->bx_wd_mutex);
}
bx_wdp->bx_wd_flags &= ~BX_WD_WAITING;
bx_wdp->bx_wd_released = 0;
if (bx_wdp->bx_wd_flags & BX_WD_TERMINATE)
break;
nclk_now(&nclk);
if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - got the bx_wd_mutex lock - GOT something to do - time %d\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number);
bx_wd_show(bx_wdp);
bufhdrp = bx_wdp->bx_wd_bufhdrp;
if (bx_tdp->bx_td_flags & BX_TD_INPUT) { // Read the input file
status = pread(bx_wdp->bx_wd_fd, bufhdrp->bh_startp, bufhdrp->bh_transfer_size, bufhdrp->bh_file_offset);
if (status < 0) {
perror("Read error");
bufhdrp->bh_valid_size = 0;
} else {
bufhdrp->bh_valid_size = status;
}
nclk_now(&nclk);
if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - read %d of %d bytes starting at offset %d - time %zd\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number,status,bufhdrp->bh_transfer_size,bufhdrp->bh_file_offset);
// Put this buffer on the output target queue
qp = &bx_td[bx_wdp->bx_wd_next_buffer_queue].bx_td_buffer_queue;
bh_enqueue(bx_wdp->bx_wd_bufhdrp, qp);
} else { // Must be output
status = pwrite(bx_wdp->bx_wd_fd, bufhdrp->bh_startp, bufhdrp->bh_transfer_size, bufhdrp->bh_file_offset);
if (status < 0) {
perror("Write error");
bufhdrp->bh_valid_size = 0;
} else {
bufhdrp->bh_valid_size = status;
}
nclk_now(&nclk);
if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - wrote %d of %d bytes starting at offset %d - requeuing buffer %zd - time %p\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number,status,bufhdrp->bh_transfer_size,bufhdrp->bh_file_offset, bufhdrp);
// Put this buffer on the input target queue
qp = &bx_td[bx_wdp->bx_wd_next_buffer_queue].bx_td_buffer_queue;
bh_enqueue(bufhdrp, qp);
bufqueue_show(qp);
}
nclk_now(&nclk);
if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - transferred %d bytes - time %d\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number,status);
if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s - releasing the bx_wd_mutex lock %d\n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number);
pthread_mutex_unlock(&bx_wdp->bx_wd_mutex);
}
if (DEBUG) fprintf(stderr,"%llu worker_thread_main: my_worker_thread_number=%s %d - Exit \n", (unsigned long long int)nclk,(bx_tdp->bx_td_flags & BX_TD_INPUT)?"INPUT":"OUTPUT", bx_wdp->bx_wd_my_worker_thread_number);
return 0;
} // End of worker_thread_main()
| eunsungc/gt6-RAMSES_8_5 | xdd-7.0.0.rc-ramses3/src/bx/bx_worker_thread.c | C | apache-2.0 | 7,048 |
/*
Copyright 2011-2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.google.security.zynamics.binnavi.disassembly;
import com.google.security.zynamics.zylib.gui.zygraph.edges.IViewEdgeListener;
/**
* Interface for objects that want to be notified about changes in edges.
*/
public interface INaviEdgeListener extends IViewEdgeListener {
/**
* Invoked after the global comment of an edge changed.
*
* @param naviEdge The edge whose global comment changed.
*/
void changedGlobalComment(CNaviViewEdge naviEdge);
/**
* Invoked after the local comment of an edge changed.
*
* @param naviEdge The edge whose local comment changed.
*/
void changedLocalComment(CNaviViewEdge naviEdge);
}
| chubbymaggie/binnavi | src/main/java/com/google/security/zynamics/binnavi/disassembly/INaviEdgeListener.java | Java | apache-2.0 | 1,248 |
package com.bazaarvoice.emodb.common.dropwizard.leader;
import com.bazaarvoice.curator.recipes.leader.LeaderService;
import com.bazaarvoice.emodb.common.dropwizard.task.TaskRegistry;
import com.bazaarvoice.emodb.common.zookeeper.leader.PartitionedLeaderService;
import com.google.common.collect.ImmutableMultimap;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.Service;
import com.google.inject.Inject;
import io.dropwizard.servlets.tasks.Task;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.PrintWriter;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentMap;
/**
* Shows the current status of leadership processes managed by {@link LeaderService}. Allows terminating
* individual leadership processes, but such that they can be restarted only by restarting the entire server.
*/
public class LeaderServiceTask extends Task {
private static final Logger _log = LoggerFactory.getLogger(LeaderServiceTask.class);
private final ConcurrentMap<String, LeaderService> _selectorMap = Maps.newConcurrentMap();
@Inject
public LeaderServiceTask(TaskRegistry tasks) {
super("leader");
tasks.addTask(this);
}
public void register(final String name, final LeaderService leaderService) {
_selectorMap.put(name, leaderService);
// Unregister automatically to avoid memory leaks.
leaderService.addListener(new AbstractServiceListener() {
@Override
public void terminated(Service.State from) {
unregister(name, leaderService);
}
@Override
public void failed(Service.State from, Throwable failure) {
unregister(name, leaderService);
}
}, MoreExecutors.sameThreadExecutor());
}
public void register(final String name, final PartitionedLeaderService partitionedLeaderService) {
int partition = 0;
for (LeaderService leaderService : partitionedLeaderService.getPartitionLeaderServices()) {
register(String.format("%s-%d", name, partition++), leaderService);
}
}
public void unregister(String name, LeaderService leaderService) {
_selectorMap.remove(name, leaderService);
}
@Override
public void execute(ImmutableMultimap<String, String> parameters, PrintWriter out) throws Exception {
// The 'release' argument tells a server to give up leadership and let a new leader be elected, possibly
// re-electing the current server. This is useful for rebalancing leader-controlled activities.
for (String name : parameters.get("release")) {
LeaderService leaderService = _selectorMap.get(name);
if (leaderService == null) {
out.printf("Unknown leader process: %s%n", name);
continue;
}
Service actualService = leaderService.getCurrentDelegateService().orNull();
if (actualService == null || !actualService.isRunning()) {
out.printf("Process is not currently elected leader: %s%n", name);
continue;
}
_log.warn("Temporarily releasing leadership for process: {}", name);
out.printf("Temporarily releasing leadership for process: %s, cluster will elect a new leader.%n", name);
actualService.stopAndWait();
}
// The 'terminate' argument tells a server to give up leadership permanently (or until the server restarts).
for (String name : parameters.get("terminate")) {
LeaderService leaderService = _selectorMap.get(name);
if (leaderService == null) {
out.printf("Unknown leader process: %s%n", name);
continue;
}
_log.warn("Terminating leader process for: {}", name);
out.printf("Terminating leader process for: %s. Restart the server to restart the leader process.%n", name);
leaderService.stopAndWait();
}
// Print current status.
for (Map.Entry<String, LeaderService> entry : new TreeMap<>(_selectorMap).entrySet()) {
String name = entry.getKey();
LeaderService leaderService = entry.getValue();
out.printf("%s: %s (leader=%s)%n", name,
describeState(leaderService.state(), leaderService.hasLeadership()),
getLeaderId(leaderService));
}
}
private String describeState(Service.State state, boolean hasLeadership) {
if (state == Service.State.RUNNING && !hasLeadership) {
return "waiting to win leadership election";
} else {
return state.name();
}
}
private String getLeaderId(LeaderService leaderService) {
try {
return leaderService.getLeader().getId();
} catch (Exception e) {
return "<unknown>";
}
}
}
| billkalter/emodb | common/dropwizard/src/main/java/com/bazaarvoice/emodb/common/dropwizard/leader/LeaderServiceTask.java | Java | apache-2.0 | 5,047 |
<!DOCTYPE html>
<html devsite>
<head>
<meta name="project_path" value="/web/tools/workbox/_project.yaml" />
<meta name="book_path" value="/web/tools/workbox/_book.yaml" />
<meta name="gtm_var" data-key="docType" data-value="reference">
<title>Source: workbox-webpack-plugin/src/inject-manifest.js</title>
<link href="jsdoc.css" rel="stylesheet">
</head>
<body>
<div id="jsdoc-body-container">
<div id="jsdoc-content">
<div id="jsdoc-content-container">
<div id="jsdoc-banner" role="banner">
</div>
<div id="jsdoc-main" role="main">
<header class="page-header">
<h1>Source: workbox-webpack-plugin/src/inject-manifest.js</h1>
</header>
<article>
<pre class="prettyprint linenums"><code>/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
const assert = require('assert');
const path = require('path');
const {getManifest} = require('workbox-build');
const convertStringToAsset = require('./lib/convert-string-to-asset');
const getAssetHash = require('./lib/get-asset-hash');
const getManifestEntriesFromCompilation =
require('./lib/get-manifest-entries-from-compilation');
const getWorkboxSWImports = require('./lib/get-workbox-sw-imports');
const readFileWrapper = require('./lib/read-file-wrapper');
const sanitizeConfig = require('./lib/sanitize-config');
const stringifyManifest = require('./lib/stringify-manifest');
/**
* This class supports taking an existing service worker file which already
* uses Workbox, and injecting a reference to a [precache manifest]() into it,
* allowing it to efficiently precache the assets created by a webpack build.
*
* Use an instance of `InjectManifest` in the
* [`plugins` array](https://webpack.js.org/concepts/plugins/#usage) of a
* webpack config.
*
* @module workbox-webpack-plugin
*/
class InjectManifest {
/**
* Creates an instance of InjectManifest.
*
* @param {Object} [config] See the
* [configuration guide](/web/tools/workbox/modules/workbox-webpack-plugin#configuration)
* for all supported options and defaults.
*/
constructor(config = {}) {
assert(typeof config.swSrc === 'string', `swSrc must be set to the path ` +
`to an existing service worker file.`);
this.config = Object.assign({}, {
chunks: [],
exclude: [
// Exclude source maps.
/\.map$/,
// Exclude anything starting with manifest and ending .js or .json.
/^manifest.*\.js(?:on)?$/,
],
excludeChunks: [],
importScripts: [],
importWorkboxFrom: 'cdn',
swDest: path.basename(config.swSrc),
}, config);
}
/**
* @param {Object} compilation The webpack compilation.
* @param {Function} readFile The function to use when reading files,
* derived from compiler.inputFileSystem.
* @private
*/
async handleEmit(compilation, readFile) {
if (this.config.importWorkboxFrom === 'local') {
throw new Error(`importWorkboxFrom can not be set to 'local' when using` +
` InjectManifest. Please use 'cdn' or a chunk name instead.`);
}
const workboxSWImports = await getWorkboxSWImports(
compilation, this.config);
let entries = getManifestEntriesFromCompilation(compilation, this.config);
const sanitizedConfig = sanitizeConfig.forGetManifest(this.config);
// If there are any "extra" config options remaining after we remove the
// ones that are used natively by the plugin, then assume that they should
// be passed on to workbox-build.getManifest() to generate extra entries.
if (Object.keys(sanitizedConfig).length > 0) {
// If globPatterns isn't explicitly set, then default to [], instead of
// the workbox-build.getManifest() default.
sanitizedConfig.globPatterns = sanitizedConfig.globPatterns || [];
const {manifestEntries} = await getManifest(sanitizedConfig);
entries = entries.concat(manifestEntries);
}
const manifestString = stringifyManifest(entries);
const manifestAsset = convertStringToAsset(manifestString);
const manifestHash = getAssetHash(manifestAsset);
const manifestFilename = `precache-manifest.${manifestHash}.js`;
compilation.assets[manifestFilename] = manifestAsset;
this.config.importScripts.push(
(compilation.options.output.publicPath || '') + manifestFilename);
// workboxSWImports might be null if importWorkboxFrom is 'disabled'.
if (workboxSWImports) {
// workboxSWImport is an array, so use concat() rather than push().
this.config.importScripts = this.config.importScripts.concat(
workboxSWImports);
}
const originalSWString = await readFileWrapper(readFile, this.config.swSrc);
const importScriptsString = this.config.importScripts
.map(JSON.stringify)
.join(', ');
const postInjectionSWString = `importScripts(${importScriptsString});
${originalSWString}
`;
compilation.assets[this.config.swDest] =
convertStringToAsset(postInjectionSWString);
}
/**
* @param {Object} [compiler] default compiler object passed from webpack
*
* @private
*/
apply(compiler) {
compiler.plugin('emit', (compilation, callback) => {
this.handleEmit(compilation, compiler.inputFileSystem._readFile)
.then(callback)
.catch(callback);
});
}
}
module.exports = InjectManifest;
</code></pre>
</article>
</div>
</div>
<nav id="jsdoc-toc-nav" role="navigation"></nav>
</div>
</div>
</body>
</html> | ebidel/WebFundamentals | src/content/en/tools/workbox/reference-docs/v3.0.0-beta.0/workbox-webpack-plugin_src_inject-manifest.js.html | HTML | apache-2.0 | 6,558 |
/**
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
var should = require("should");
var request = require('supertest');
var express = require('express');
var when = require('when');
var app = express();
var RED = require("../../../red/red.js");
var storage = require("../../../red/storage");
var library = require("../../../red/api/library");
describe("library api", function() {
function initStorage(_flows,_libraryEntries) {
var flows = _flows;
var libraryEntries = _libraryEntries;
storage.init({
storageModule: {
init: function() {
return when.resolve();
},
getAllFlows: function() {
return when.resolve(flows);
},
getFlow: function(fn) {
if (flows[fn]) {
return when.resolve(flows[fn]);
} else {
return when.reject();
}
},
saveFlow: function(fn,data) {
flows[fn] = data;
return when.resolve();
},
getLibraryEntry: function(type,path) {
if (libraryEntries[type] && libraryEntries[type][path]) {
return when.resolve(libraryEntries[type][path]);
} else {
return when.reject();
}
},
saveLibraryEntry: function(type,path,meta,body) {
libraryEntries[type][path] = body;
return when.resolve();
}
}
});
}
describe("flows", function() {
var app;
before(function() {
app = express();
app.use(express.json());
app.get("/library/flows",library.getAll);
app.post(new RegExp("/library/flows\/(.*)"),library.post);
app.get(new RegExp("/library/flows\/(.*)"),library.get);
});
it('returns empty result', function(done) {
initStorage({});
request(app)
.get('/library/flows')
.expect(200)
.end(function(err,res) {
if (err) {
throw err;
}
res.body.should.not.have.property('f');
res.body.should.not.have.property('d');
done();
});
});
it('returns 404 for non-existent entry', function(done) {
initStorage({});
request(app)
.get('/library/flows/foo')
.expect(404)
.end(done);
});
it('can store and retrieve item', function(done) {
initStorage({});
var flow = '[]';
request(app)
.post('/library/flows/foo')
.set('Content-Type', 'application/json')
.send(flow)
.expect(204).end(function (err, res) {
if (err) {
throw err;
}
request(app)
.get('/library/flows/foo')
.expect(200)
.end(function(err,res) {
if (err) {
throw err;
}
res.text.should.equal(flow);
done();
});
});
});
it('lists a stored item', function(done) {
initStorage({f:["bar"]});
request(app)
.get('/library/flows')
.expect(200)
.end(function(err,res) {
if (err) {
throw err;
}
res.body.should.have.property('f');
should.deepEqual(res.body.f,['bar']);
done();
});
});
it('returns 403 for malicious get attempt', function(done) {
initStorage({});
// without the userDir override the malicious url would be
// http://127.0.0.1:1880/library/flows/../../package to
// obtain package.json from the node-red root.
request(app)
.get('/library/flows/../../../../../package')
.expect(403)
.end(done);
});
it('returns 403 for malicious post attempt', function(done) {
initStorage({});
// without the userDir override the malicious url would be
// http://127.0.0.1:1880/library/flows/../../package to
// obtain package.json from the node-red root.
request(app)
.post('/library/flows/../../../../../package')
.expect(403)
.end(done);
});
});
describe("type", function() {
var app;
before(function() {
app = express();
app.use(express.json());
library.init(app);
RED.library.register("test");
});
it('returns empty result', function(done) {
initStorage({},{'test':{"":[]}});
request(app)
.get('/library/test')
.expect(200)
.end(function(err,res) {
if (err) {
throw err;
}
res.body.should.not.have.property('f');
done();
});
});
it('returns 404 for non-existent entry', function(done) {
initStorage({},{});
request(app)
.get('/library/test/foo')
.expect(404)
.end(done);
});
it('can store and retrieve item', function(done) {
initStorage({},{'test':{}});
var flow = '[]';
request(app)
.post('/library/test/foo')
.set('Content-Type', 'text/plain')
.send(flow)
.expect(204).end(function (err, res) {
if (err) {
throw err;
}
request(app)
.get('/library/test/foo')
.expect(200)
.end(function(err,res) {
if (err) {
throw err;
}
res.text.should.equal(flow);
done();
});
});
});
it('lists a stored item', function(done) {
initStorage({},{'test':{'':['abc','def']}});
request(app)
.get('/library/test')
.expect(200)
.end(function(err,res) {
if (err) {
throw err;
}
// This response isn't strictly accurate - but it
// verifies the api returns what storage gave it
should.deepEqual(res.body,['abc','def']);
done();
});
});
it('returns 403 for malicious access attempt', function(done) {
request(app)
.get('/library/test/../../../../../../../../../../etc/passwd')
.expect(403)
.end(done);
});
it('returns 403 for malicious access attempt', function(done) {
request(app)
.get('/library/test/..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\passwd')
.expect(403)
.end(done);
});
it('returns 403 for malicious access attempt', function(done) {
request(app)
.post('/library/test/../../../../../../../../../../etc/passwd')
.set('Content-Type', 'text/plain')
.send('root:x:0:0:root:/root:/usr/bin/tclsh')
.expect(403)
.end(done);
});
});
});
| ty4tw/node-red | test/red/api/library_spec.js | JavaScript | apache-2.0 | 8,945 |
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source "${KUBE_ROOT}/cluster/gce/util.sh"
detect-project &> /dev/null
export PROJECT
RETRIES=3
# Runs gcloud compute command with the given parameters. Up to $RETRIES will be made
# to execute the command.
# arguments:
# $@: all stuff that goes after 'gcloud compute'
function run-gcloud-compute-with-retries {
for attempt in $(seq 1 ${RETRIES}); do
local -r gcloud_cmd_hash=`echo "gcloud compute $@" | md5sum | cut -f1 -d" "`
local -r gcloud_logfile="/tmp/gcloud_${gcloud_cmd_hash}.log"
echo "" > ${gcloud_logfile}
if ! gcloud compute "$@" |& tee ${gcloud_logfile}; then
if [[ $(grep -c "already exists" ${gcloud_logfile}) -gt 0 ]]; then
if [[ "${attempt}" == 1 ]]; then
echo -e "${color_red} Failed to $1 $2 $3 as the resource hasn't been deleted from a previous run.${color_norm}" >& 2
exit 1
fi
echo -e "${color_yellow}Succeeded to $1 $2 $3 in the previous attempt, but status response wasn't received.${color_norm}"
return 0
fi
echo -e "${color_yellow}Attempt $attempt failed to $1 $2 $3. Retrying.${color_norm}" >& 2
sleep $(($attempt * 5))
else
echo -e "${color_green}Succeeded to gcloud compute $1 $2 $3.${color_norm}"
return 0
fi
done
echo -e "${color_red} Failed to $1 $2 $3.${color_norm}" >& 2
exit 1
}
function create-master-instance-with-resources {
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}"
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
if [ "${EVENT_PD:-false}" == "true" ]; then
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
fi
run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \
--project "${PROJECT}" \
--region "${REGION}" -q
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--address "${MASTER_IP}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--scopes "storage-ro,compute-rw,logging-write" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
if [ "${EVENT_PD:-false}" == "true" ]; then
echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}"
run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--disk "${MASTER_NAME}-event-pd" \
--device-name="master-event-pd"
fi
run-gcloud-compute-with-retries firewall-rules create "${MASTER_NAME}-https" \
--project "${PROJECT}" \
--network "${NETWORK}" \
--source-ranges "0.0.0.0/0" \
--target-tags "${MASTER_TAG}" \
--allow "tcp:443"
}
# Command to be executed is '$1'.
# No. of retries is '$2' (if provided) or 1 (default).
function execute-cmd-on-master-with-retries() {
RETRIES="${2:-1}" run-gcloud-compute-with-retries ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" --command="$1"
}
function copy-files() {
run-gcloud-compute-with-retries copy-files --zone="${ZONE}" --project="${PROJECT}" $@
}
function delete-master-instance-and-resources {
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE} --quiet"
gcloud compute instances delete "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} &> /dev/null || true
gcloud compute addresses delete "${MASTER_NAME}-ip" \
--project "${PROJECT}" \
--region "${REGION}" \
--quiet || true
gcloud compute firewall-rules delete "${MASTER_NAME}-https" \
--project "${PROJECT}" \
--quiet || true
if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then
gcloud compute instances delete "${EVENT_STORE_NAME}" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${EVENT_STORE_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true
fi
}
| jinlmsft/kubernetes | test/kubemark/gce/util.sh | Shell | apache-2.0 | 5,070 |
"""Support for OpenWRT (ubus) routers."""
import logging
import re
from openwrt.ubus import Ubus
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_DHCP_SOFTWARE = "dhcp_software"
DEFAULT_DHCP_SOFTWARE = "dnsmasq"
DHCP_SOFTWARES = ["dnsmasq", "odhcpd", "none"]
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_DHCP_SOFTWARE, default=DEFAULT_DHCP_SOFTWARE): vol.In(
DHCP_SOFTWARES
),
}
)
def get_scanner(hass, config):
"""Validate the configuration and return an ubus scanner."""
dhcp_sw = config[DOMAIN][CONF_DHCP_SOFTWARE]
if dhcp_sw == "dnsmasq":
scanner = DnsmasqUbusDeviceScanner(config[DOMAIN])
elif dhcp_sw == "odhcpd":
scanner = OdhcpdUbusDeviceScanner(config[DOMAIN])
else:
scanner = UbusDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
def _refresh_on_access_denied(func):
"""If remove rebooted, it lost our session so rebuild one and try again."""
def decorator(self, *args, **kwargs):
"""Wrap the function to refresh session_id on PermissionError."""
try:
return func(self, *args, **kwargs)
except PermissionError:
_LOGGER.warning(
"Invalid session detected."
" Trying to refresh session_id and re-run RPC"
)
self.ubus.connect()
return func(self, *args, **kwargs)
return decorator
class UbusDeviceScanner(DeviceScanner):
"""
This class queries a wireless router running OpenWrt firmware.
Adapted from Tomato scanner.
"""
def __init__(self, config):
"""Initialize the scanner."""
host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.last_results = {}
self.url = f"http://{host}/ubus"
self.ubus = Ubus(self.url, self.username, self.password)
self.hostapd = []
self.mac2name = None
self.success_init = self.ubus.connect() is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def _generate_mac2name(self):
"""Return empty MAC to name dict. Overridden if DHCP server is set."""
self.mac2name = {}
@_refresh_on_access_denied
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
self._generate_mac2name()
if self.mac2name is None:
# Generation of mac2name dictionary failed
return None
name = self.mac2name.get(device.upper(), None)
return name
@_refresh_on_access_denied
def _update_info(self):
"""Ensure the information from the router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
_LOGGER.info("Checking hostapd")
if not self.hostapd:
hostapd = self.ubus.get_hostapd()
self.hostapd.extend(hostapd.keys())
self.last_results = []
results = 0
# for each access point
for hostapd in self.hostapd:
if result := self.ubus.get_hostapd_clients(hostapd):
results = results + 1
# Check for each device is authorized (valid wpa key)
for key in result["clients"].keys():
device = result["clients"][key]
if device["authorized"]:
self.last_results.append(key)
return bool(results)
class DnsmasqUbusDeviceScanner(UbusDeviceScanner):
"""Implement the Ubus device scanning for the dnsmasq DHCP server."""
def __init__(self, config):
"""Initialize the scanner."""
super().__init__(config)
self.leasefile = None
def _generate_mac2name(self):
if self.leasefile is None:
if result := self.ubus.get_uci_config("dhcp", "dnsmasq"):
values = result["values"].values()
self.leasefile = next(iter(values))["leasefile"]
else:
return
result = self.ubus.file_read(self.leasefile)
if result:
self.mac2name = {}
for line in result["data"].splitlines():
hosts = line.split(" ")
self.mac2name[hosts[1].upper()] = hosts[3]
else:
# Error, handled in the ubus.file_read()
return
class OdhcpdUbusDeviceScanner(UbusDeviceScanner):
"""Implement the Ubus device scanning for the odhcp DHCP server."""
def _generate_mac2name(self):
if result := self.ubus.get_dhcp_method("ipv4leases"):
self.mac2name = {}
for device in result["device"].values():
for lease in device["leases"]:
mac = lease["mac"] # mac = aabbccddeeff
# Convert it to expected format with colon
mac = ":".join(mac[i : i + 2] for i in range(0, len(mac), 2))
self.mac2name[mac.upper()] = lease["hostname"]
else:
# Error, handled in the ubus.get_dhcp_method()
return
| jawilson/home-assistant | homeassistant/components/ubus/device_tracker.py | Python | apache-2.0 | 5,843 |
/* IBM_PROLOG_BEGIN_TAG */
/* This is an automatically generated prolog. */
/* */
/* $Source: src/usr/hwpf/hwp/core_activate/proc_post_winkle/proc_post_winkle.C $ */
/* */
/* OpenPOWER HostBoot Project */
/* */
/* Contributors Listed Below - COPYRIGHT 2013,2014 */
/* [+] International Business Machines Corp. */
/* */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); */
/* you may not use this file except in compliance with the License. */
/* You may obtain a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
/* implied. See the License for the specific language governing */
/* permissions and limitations under the License. */
/* */
/* IBM_PROLOG_END_TAG */
// $Id: proc_post_winkle.C,v 1.2 2013/07/18 00:45:00 stillgs Exp $
// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ipl/fapi/proc_post_winkle.C,v $
//------------------------------------------------------------------------------
// *! (C) Copyright International Business Machines Corp. 2011
// *! All Rights Reserved -- Property of IBM
// *! *** ***
//------------------------------------------------------------------------------
// *! OWNER NAME : Greg Still Email: stillgs@us.ibm.com
// *! BACKUP NAME : Michael Olsen Email: cmolsen@us.ibm.com
/// \file proc_post_winkle.C
/// \brief Re-enables the standard product idle mode configuration after
/// an IPL Winkle action
///
/// \verbatim
///
/// For the passed EX target,
/// - Remove disable of DISABLE_FORCE_DEEP_TO_FAST_WINKLE that was
/// set on the master core. Removing on the non_master cores
/// is not harmful
///
/// Procedure Prereq:
/// - System clocks are running
/// \endverbatim
///
//------------------------------------------------------------------------------
// ----------------------------------------------------------------------
// Includes
// ----------------------------------------------------------------------
#include "proc_post_winkle.H"
extern "C" {
using namespace fapi;
// ----------------------------------------------------------------------
// Constant definitions
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// Global variables
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// Function prototypes
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// Function definitions
// ----------------------------------------------------------------------
/**
* proc_post_winkle
*
* @param[in] i_target EX target
*
* @retval ECMD_SUCCESS
* @retval ERROR only those from called functions or MACROs
*/
fapi::ReturnCode
proc_post_winkle(const Target& i_ex_target)
{
fapi::ReturnCode l_rc;
uint32_t rc = 0;
ecmdDataBufferBase data(64);
uint64_t address = 0;
uint64_t ex_offset = 0;
uint8_t l_ex_number = 0;
fapi::Target l_parentTarget;
do
{
FAPI_INF("Beginnning proc_post_winkle...");
// Get the parent chip to target the PCBS registers
l_rc = fapiGetParentChip(i_ex_target, l_parentTarget);
if (l_rc)
{
FAPI_ERR("fapiGetParentChip access");
break;
}
// Get the core number
l_rc = FAPI_ATTR_GET(ATTR_CHIP_UNIT_POS, &i_ex_target, l_ex_number);
if (l_rc)
{
FAPI_ERR("fapiGetAttribute of ATTR_CHIP_UNIT_POS with rc = 0x%x", (uint32_t)l_rc);
break;
}
FAPI_INF("Processing core %d on %s", l_ex_number, l_parentTarget.toEcmdString());
ex_offset = l_ex_number * 0x01000000;
// Debug
address = EX_PMGP1_0x100F0103 + ex_offset;
l_rc = fapiGetScom(l_parentTarget, address, data);
if(!l_rc.ok())
{
FAPI_ERR("Scom error reading PMGP1\n");
break;
}
FAPI_DBG("\tBefore PMGP1: 0x%016llX", data.getDoubleWord(0));
// Enable movement to Fast Winkle if errors are present. This is
// turned off in the during the IPL process
rc |= data.flushTo1();
rc |= data.clearBit(20);
if(rc)
{
FAPI_ERR("Error (0x%x) setting up ecmdDataBufferBase", rc);
l_rc.setEcmdError(rc);
break;
}
address = EX_PMGP1_AND_0x100F0104 + ex_offset;
l_rc = fapiPutScom(l_parentTarget, address, data);
if(!l_rc.ok())
{
FAPI_ERR("Scom error updating PMGP1\n");
break;
}
FAPI_INF("Enabled the conversion of Deep Winkle operations to Fast Winkle if errors are present upon Winkle entry");
// Debug
address = EX_PMGP1_0x100F0103 + ex_offset;
l_rc = fapiGetScom(l_parentTarget, address, data);
if(!l_rc.ok())
{
FAPI_ERR("Scom error reading PMGP1\n");
break;
}
FAPI_DBG("\tAfter PMGP1: 0x%016llX", data.getDoubleWord(0));
} while(0);
FAPI_INF("Exiting proc_post_winkle...");
return l_rc;
}
} //end extern C
| csmart/hostboot | src/usr/hwpf/hwp/core_activate/proc_post_winkle/proc_post_winkle.C | C++ | apache-2.0 | 6,536 |
package org.elasticsearch.action.get;
import com.bazaarvoice.elasticsearch.client.core.spi.RestExecutor;
import com.bazaarvoice.elasticsearch.client.core.spi.RestResponse;
import com.bazaarvoice.elasticsearch.client.core.util.UrlBuilder;
import org.elasticsearch.action.AbstractRestClientAction;
import org.elasticsearch.common.base.Function;
import org.elasticsearch.common.util.concurrent.Futures;
import org.elasticsearch.common.util.concurrent.ListenableFuture;
import static com.bazaarvoice.elasticsearch.client.core.util.StringFunctions.booleanToString;
import static com.bazaarvoice.elasticsearch.client.core.util.StringFunctions.commaDelimitedToString;
import static com.bazaarvoice.elasticsearch.client.core.util.UrlBuilder.urlEncode;
import static com.bazaarvoice.elasticsearch.client.core.util.Validation.notNull;
import static org.elasticsearch.common.base.Optional.fromNullable;
/**
* The inverse of {@link org.elasticsearch.rest.action.get.RestGetAction}
*
* @param <ResponseType>
*/
public class GetRest<ResponseType> extends AbstractRestClientAction<GetRequest, ResponseType> {
public GetRest(final String protocol, final String host, final int port, final RestExecutor executor, final Function<RestResponse, ResponseType> responseTransform) {
super(protocol, host, port, executor, responseTransform);
}
@Override public ListenableFuture<ResponseType> act(GetRequest request) {
UrlBuilder url = UrlBuilder.create()
.protocol(protocol).host(host).port(port)
.path(urlEncode(notNull(request.index())))
.seg(urlEncode(notNull(request.type())))
.seg(urlEncode(notNull(request.id())))
.paramIfPresent("refresh", fromNullable(request.refresh()).transform(booleanToString))
.paramIfPresent("routing", fromNullable(request.routing()))
// note parent(string) seems just to set the routing, so we don't need to provide it here
.paramIfPresent("preference", fromNullable(request.preference()))
.paramIfPresent("realtime", fromNullable(request.realtime()).transform(booleanToString))
.paramIfPresent("fields", fromNullable(request.fields()).transform(commaDelimitedToString));
return Futures.transform(executor.get(url.url()), responseTransform);
}
}
| bazaarvoice/es-client-java | es-rest-client-1.3/core/src/main/java/org/elasticsearch/action/get/GetRest.java | Java | apache-2.0 | 2,328 |
package internalversion
import (
"github.com/openshift/origin/pkg/template/api"
"k8s.io/apimachinery/pkg/api/errors"
)
// TemplateListerExpansion allows custom methods to be added to
// TemplateLister.
type TemplateListerExpansion interface {
GetByUID(uid string) (*api.Template, error)
}
// TemplateNamespaceListerExpansion allows custom methods to be added to
// TemplateNamespaceLister.
type TemplateNamespaceListerExpansion interface{}
func (s templateLister) GetByUID(uid string) (*api.Template, error) {
templates, err := s.indexer.ByIndex(api.TemplateUIDIndex, uid)
if err != nil {
return nil, err
}
if len(templates) == 0 {
return nil, errors.NewNotFound(api.Resource("template"), uid)
}
return templates[0].(*api.Template), nil
}
| louyihua/origin | pkg/template/generated/listers/template/internalversion/template_expansion.go | GO | apache-2.0 | 756 |
dir = File.expand_path(File.dirname(__FILE__))
$LOAD_PATH.unshift File.join(dir, "../lib")
# Maybe puppetlabs_spec_helper is in a directory next to puppetdb. If not, we
# don't fail any worse than we already would.
$LOAD_PATH.push File.join(dir, "../../../puppetlabs_spec_helper")
require 'rspec'
require 'puppetlabs_spec_helper/puppet_spec_helper'
require 'tmpdir'
require 'fileutils'
require 'puppet'
require 'puppet/util/log'
require 'puppet/util/puppetdb/command'
RSpec.configure do |config|
config.before :each do
@logs = []
Puppet::Util::Log.level = :info
Puppet::Util::Log.newdestination(Puppet::Test::LogCollector.new(@logs))
def test_logs
@logs.map(&:message)
end
end
end
| melissa/puppetdb | puppet/spec/spec_helper.rb | Ruby | apache-2.0 | 718 |
/************************************************************************************************************
DHTML Suite for Applications
(C) www.dhtmlgoodies.com, Janury 2007
CSS for the DHTMLSuite.imageSelection class.
www.dhtmlgoodies.com
Alf Magne Kalleland
************************************************************************************************************/
/* Mane selection div - the dragable rectangle */
#DHTMLSuite_imageSelectionSel{
position:absolute;
overflow:hidden;
border:1px solid #222; /* Blue border */
z-index:5000000;
}
/* Transparent div inside the image selection div */
#DHTMLSuite_imageSelection_transparentDiv{ /* This is the transparent div placed inside #DHTMLSuite_imageSelection */
filter:alpha(opacity=50);
opacity:0.5;
-moz-opacity:0.5;
background-color:#666; /* Blue background color */
position:absolute;
left:-5px;
top:-5px;
width:200%;
height:200%;
}
/* Div for the drag process - dragging images */
#DHTMLSuite_imageSelectionDrag{
position:absolute;
border:1px solid #222;
z-index:5000000;
overflow:hidden;
width:107px;
}
#DHTMLSuite_imageSelectionDrag #DHTMLSuite_imageSelectionDragContent{
padding:0px;
z-index:5;
position:relative;
}
/* Div for the drag process - small boxes for each image. These divs are placed inside #DHTMLSuite_imageSelectionDragContent */
#DHTMLSuite_imageSelectionDrag .DHTMLSuite_imageSelectionDragBox{ /* Individual box for each image */
width:35px;
height:35px;
float:left;
background-repeat:no-repeat;
background-position:center center;
}
/* Div inside the div with id #DHTMLSuite_imageSelectionDrag - it could be transparent or not, that's your choice */
#DHTMLSuite_imageSelectionDrag .DHTMLSuite_imageSelectionDrag_transparentDiv{
background-color:#666; /* Blue background color */
position:absolute;
left:-5px;
top:-5px;
width:200%;
height:200%;
z-index:4;
} | afamorim/Vortice | apps/vortice-webapp/target/vortice-webapp/nucleo/themes/zune/css/image-selection.css | CSS | apache-2.0 | 1,890 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.pdfbox.tools;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.HashMap;
import java.util.Map;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.pdmodel.PDPage;
import org.apache.pdfbox.pdmodel.PDPageContentStream;
import org.apache.pdfbox.pdmodel.font.PDFont;
import org.apache.pdfbox.pdmodel.font.PDType0Font;
import org.apache.pdfbox.pdmodel.font.PDType1Font;
/**
* This will take a text file and ouput a pdf with that text.
*
* @author Ben Litchfield
*/
public class TextToPDF
{
/**
* The scaling factor for font units to PDF units
*/
private static final int FONTSCALE = 1000;
/**
* The default font
*/
private static final PDType1Font DEFAULT_FONT = PDType1Font.HELVETICA;
/**
* The default font size
*/
private static final int DEFAULT_FONT_SIZE = 10;
/**
* The line height as a factor of the font size
*/
private static final float LINE_HEIGHT_FACTOR = 1.05f;
private int fontSize = DEFAULT_FONT_SIZE;
private PDFont font = DEFAULT_FONT;
private static final Map<String, PDType1Font> STANDARD_14 = new HashMap<String, PDType1Font>();
static
{
STANDARD_14.put(PDType1Font.TIMES_ROMAN.getBaseFont(), PDType1Font.TIMES_ROMAN);
STANDARD_14.put(PDType1Font.TIMES_BOLD.getBaseFont(), PDType1Font.TIMES_BOLD);
STANDARD_14.put(PDType1Font.TIMES_ITALIC.getBaseFont(), PDType1Font.TIMES_ITALIC);
STANDARD_14.put(PDType1Font.TIMES_BOLD_ITALIC.getBaseFont(), PDType1Font.TIMES_BOLD_ITALIC);
STANDARD_14.put(PDType1Font.HELVETICA.getBaseFont(), PDType1Font.HELVETICA);
STANDARD_14.put(PDType1Font.HELVETICA_BOLD.getBaseFont(), PDType1Font.HELVETICA_BOLD);
STANDARD_14.put(PDType1Font.HELVETICA_OBLIQUE.getBaseFont(), PDType1Font.HELVETICA_OBLIQUE);
STANDARD_14.put(PDType1Font.HELVETICA_BOLD_OBLIQUE.getBaseFont(), PDType1Font.HELVETICA_BOLD_OBLIQUE);
STANDARD_14.put(PDType1Font.COURIER.getBaseFont(), PDType1Font.COURIER);
STANDARD_14.put(PDType1Font.COURIER_BOLD.getBaseFont(), PDType1Font.COURIER_BOLD);
STANDARD_14.put(PDType1Font.COURIER_OBLIQUE.getBaseFont(), PDType1Font.COURIER_OBLIQUE);
STANDARD_14.put(PDType1Font.COURIER_BOLD_OBLIQUE.getBaseFont(), PDType1Font.COURIER_BOLD_OBLIQUE);
STANDARD_14.put(PDType1Font.SYMBOL.getBaseFont(), PDType1Font.SYMBOL);
STANDARD_14.put(PDType1Font.ZAPF_DINGBATS.getBaseFont(), PDType1Font.ZAPF_DINGBATS);
}
/**
* Create a PDF document with some text.
*
* @param text The stream of text data.
*
* @return The document with the text in it.
*
* @throws IOException If there is an error writing the data.
*/
public PDDocument createPDFFromText( Reader text ) throws IOException
{
PDDocument doc = new PDDocument();
createPDFFromText(doc, text);
return doc;
}
/**
* Create a PDF document with some text.
*
* @param text The stream of text data.
*
* @throws IOException If there is an error writing the data.
*/
public void createPDFFromText( PDDocument doc, Reader text ) throws IOException
{
try
{
final int margin = 40;
float height = font.getBoundingBox().getHeight() / FONTSCALE;
//calculate font height and increase by a factor.
height = height*fontSize*LINE_HEIGHT_FACTOR;
BufferedReader data = new BufferedReader( text );
String nextLine = null;
PDPage page = new PDPage();
PDPageContentStream contentStream = null;
float y = -1;
float maxStringLength = page.getMediaBox().getWidth() - 2*margin;
// There is a special case of creating a PDF document from an empty string.
boolean textIsEmpty = true;
while( (nextLine = data.readLine()) != null )
{
// The input text is nonEmpty. New pages will be created and added
// to the PDF document as they are needed, depending on the length of
// the text.
textIsEmpty = false;
String[] lineWords = nextLine.trim().split( " " );
int lineIndex = 0;
while( lineIndex < lineWords.length )
{
StringBuilder nextLineToDraw = new StringBuilder();
float lengthIfUsingNextWord = 0;
do
{
nextLineToDraw.append( lineWords[lineIndex] );
nextLineToDraw.append( " " );
lineIndex++;
if( lineIndex < lineWords.length )
{
String lineWithNextWord = nextLineToDraw.toString() + lineWords[lineIndex];
lengthIfUsingNextWord =
(font.getStringWidth( lineWithNextWord )/FONTSCALE) * fontSize;
}
}
while( lineIndex < lineWords.length &&
lengthIfUsingNextWord < maxStringLength );
if( y < margin )
{
// We have crossed the end-of-page boundary and need to extend the
// document by another page.
page = new PDPage();
doc.addPage( page );
if( contentStream != null )
{
contentStream.endText();
contentStream.close();
}
contentStream = new PDPageContentStream(doc, page);
contentStream.setFont( font, fontSize );
contentStream.beginText();
y = page.getMediaBox().getHeight() - margin + height;
contentStream.newLineAtOffset(
margin, y);
}
if( contentStream == null )
{
throw new IOException( "Error:Expected non-null content stream." );
}
contentStream.newLineAtOffset(0, -height);
y -= height;
contentStream.showText(nextLineToDraw.toString());
}
}
// If the input text was the empty string, then the above while loop will have short-circuited
// and we will not have added any PDPages to the document.
// So in order to make the resultant PDF document readable by Adobe Reader etc, we'll add an empty page.
if (textIsEmpty)
{
doc.addPage(page);
}
if( contentStream != null )
{
contentStream.endText();
contentStream.close();
}
}
catch( IOException io )
{
if( doc != null )
{
doc.close();
}
throw io;
}
}
/**
* This will create a PDF document with some text in it.
* <br />
* see usage() for commandline
*
* @param args Command line arguments.
*
* @throws IOException If there is an error with the PDF.
*/
public static void main(String[] args) throws IOException
{
// suppress the Dock icon on OS X
System.setProperty("apple.awt.UIElement", "true");
TextToPDF app = new TextToPDF();
PDDocument doc = new PDDocument();
try
{
if( args.length < 2 )
{
app.usage();
}
else
{
for( int i=0; i<args.length-2; i++ )
{
if( args[i].equals( "-standardFont" ))
{
i++;
app.setFont( getStandardFont( args[i] ));
}
else if( args[i].equals( "-ttf" ))
{
i++;
PDFont font = PDType0Font.load( doc, new File( args[i]) );
app.setFont( font );
}
else if( args[i].equals( "-fontSize" ))
{
i++;
app.setFontSize( Integer.parseInt( args[i] ) );
}
else
{
throw new IOException( "Unknown argument:" + args[i] );
}
}
app.createPDFFromText( doc, new FileReader( args[args.length-1] ) );
doc.save( args[args.length-2] );
}
}
finally
{
doc.close();
}
}
/**
* This will print out a message telling how to use this example.
*/
private void usage()
{
String[] std14 = getStandard14Names();
StringBuilder message = new StringBuilder();
message.append("Usage: jar -jar pdfbox-app-x.y.z.jar TextToPDF [options] <outputfile> <textfile>\n");
message.append("\nOptions:\n");
message.append(" -standardFont <name> : " + DEFAULT_FONT.getBaseFont() + " (default)\n");
for (String std14String : std14)
{
message.append(" " + std14String + "\n");
}
message.append(" -ttf <ttf file> : The TTF font to use.\n");
message.append(" -fontSize <fontSize> : default: " + DEFAULT_FONT_SIZE );
System.err.println(message.toString());
System.exit(1);
}
/**
* A convenience method to get one of the standard 14 font from name.
*
* @param name The name of the font to get.
*
* @return The font that matches the name or null if it does not exist.
*/
private static PDType1Font getStandardFont(String name)
{
return STANDARD_14.get(name);
}
/**
* This will get the names of the standard 14 fonts.
*
* @return An array of the names of the standard 14 fonts.
*/
private static String[] getStandard14Names()
{
return STANDARD_14.keySet().toArray(new String[14]);
}
/**
* @return Returns the font.
*/
public PDFont getFont()
{
return font;
}
/**
* @param aFont The font to set.
*/
public void setFont(PDFont aFont)
{
this.font = aFont;
}
/**
* @return Returns the fontSize.
*/
public int getFontSize()
{
return fontSize;
}
/**
* @param aFontSize The fontSize to set.
*/
public void setFontSize(int aFontSize)
{
this.fontSize = aFontSize;
}
}
| benmccann/pdfbox | tools/src/main/java/org/apache/pdfbox/tools/TextToPDF.java | Java | apache-2.0 | 11,926 |
/*
* Copyright 2012 International Business Machines Corp.
*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. Licensed under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package javax.batch.api.chunk.listener;
import java.util.List;
/**
* The AbstractItemWriteListener provides default
* implementations of less commonly implemented methods.
*/
public abstract class AbstractItemWriteListener implements
ItemWriteListener {
/**
* Override this method if the ItemWriteListener
* will do something before the items are written.
* The default implementation does nothing.
*
* @param items specifies the items about to be
* written.
* @throws Exception (or subclass) if an error occurs.
*/
@Override
public void beforeWrite(List<Object> items) throws Exception {}
/**
* Override this method if the ItemWriteListener
* will do something after the items are written.
* The default implementation does nothing.
*
* @param items specifies the items about to be
* written.
* @throws Exception (or subclass) if an error occurs.
*/
@Override
public void afterWrite(List<Object> items) throws Exception {}
/**
* Override this method if the ItemWriteListener
* will do something when the ItemWriter writeItems
* method throws an exception.
* The default implementation does nothing.
*
* @param items specifies the items about to be
* written.
* @param ex specifies the exception thrown by the item
* writer.
* @throws Exception (or subclass) if an error occurs.
*/
@Override
public void onWriteError(List<Object> items, Exception ex) throws Exception {}
}
| sidgoyal/standards.jsr352.jbatch | javax.batch/src/main/java/javax/batch/api/chunk/listener/AbstractItemWriteListener.java | Java | apache-2.0 | 2,264 |
/*
* #%L
* SparkCommerce Framework Web
* %%
* Copyright (C) 2009 - 2013 Spark Commerce
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package org.sparkcommerce.core.payment.service;
import org.sparkcommerce.common.payment.PaymentGatewayType;
import org.springframework.stereotype.Service;
/**
* In order to use load this demo service, you will need to component scan
* the package "com.mycompany.sample".
*
* This should NOT be used in production, and is meant solely for demonstration
* purposes only.
*
* @author Elbert Bautista (elbertbautista)
*/
@Service("blNullPaymentGatewayConfiguration")
public class NullPaymentGatewayConfigurationImpl implements NullPaymentGatewayConfiguration {
protected int failureReportingThreshold = 1;
protected boolean performAuthorizeAndCapture = true;
@Override
public String getTransparentRedirectUrl() {
return "/null-checkout/process";
}
@Override
public String getTransparentRedirectReturnUrl() {
return "/null-checkout/return";
}
@Override
public boolean isPerformAuthorizeAndCapture() {
return true;
}
@Override
public void setPerformAuthorizeAndCapture(boolean performAuthorizeAndCapture) {
this.performAuthorizeAndCapture = performAuthorizeAndCapture;
}
@Override
public int getFailureReportingThreshold() {
return failureReportingThreshold;
}
@Override
public void setFailureReportingThreshold(int failureReportingThreshold) {
this.failureReportingThreshold = failureReportingThreshold;
}
@Override
public boolean handlesAuthorize() {
return true;
}
@Override
public boolean handlesCapture() {
return false;
}
@Override
public boolean handlesAuthorizeAndCapture() {
return true;
}
@Override
public boolean handlesReverseAuthorize() {
return false;
}
@Override
public boolean handlesVoid() {
return false;
}
@Override
public boolean handlesRefund() {
return false;
}
@Override
public boolean handlesPartialCapture() {
return false;
}
@Override
public boolean handlesMultipleShipment() {
return false;
}
@Override
public boolean handlesRecurringPayment() {
return false;
}
@Override
public boolean handlesSavedCustomerPayment() {
return false;
}
@Override
public boolean handlesMultiplePayments() {
return false;
}
@Override
public PaymentGatewayType getGatewayType() {
return NullPaymentGatewayType.NULL_GATEWAY;
}
}
| akdasari/SparkIntegration | src/test/java/org/sparkcommerce/core/payment/service/NullPaymentGatewayConfigurationImpl.java | Java | apache-2.0 | 3,194 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
(function () {
'use strict';
angular
.module('webApp')
.controller('DriversListCtrl', DriversListCtrl);
DriversListCtrl.$inject = ['$scope', 'EntityFactory', 'ModalService', 'UtilsService', '$state'];
function DriversListCtrl($scope, EntityFactory, ModalService, UtilsService, $state) {
/*jshint validthis: true*/
var vm = this;
vm.deleteDriver = deleteDriver;
vm.getAllDrivers = getAllDrivers;
vm.createDriver = createDriver;
vm.sortDrivers = sortDrivers;
vm.tableReverse = false;
vm.sortField = 'fileName';
vm.errorMessage = {
type: 'error',
text: '',
internalTrace: ''
};
vm.successMessage = {
type: 'success',
text: '',
internalTrace: ''
};
init();
/////////////////////////////////
function init() {
getAllDrivers();
}
function getAllDrivers() {
EntityFactory.getAllDrivers().then(function (drivers) {
vm.driversData = drivers;
});
}
function createDriver() {
var controller = 'CreateEntityModalCtrl';
var templateUrl = "templates/modal/entity-creation-modal.tpl.html";
var resolve = {
type: function () {
return "DRIVER";
},
title: function () {
return "_ENTITY_._CREATE_DRIVER_TITLE_";
},
info: function () {
return "_DRIVER_INFO_";
},
text: function () {
return "_DRIVER_TEXT_";
},
};
var modalInstance = ModalService.openModal(controller, templateUrl, resolve, '', 'lg');
return modalInstance.result.then(function () {
getAllDrivers();
vm.successMessage.text = '_DRIVER_CREATE_OK_';
});
}
function deleteDriver(fileName) {
return deleteDriverConfirm('lg', fileName);
}
function deleteDriverConfirm(size, fileName) {
var controller = 'DeleteEntityModalCtrl';
var templateUrl = "templates/modal/entity-delete-modal.tpl.html";
var resolve = {
item: function () {
return fileName;
},
type: function () {
return "DRIVER";
},
title: function () {
return "_ENTITY_._DELETE_DRIVER_TITLE_";
}
};
var modalInstance = ModalService.openModal(controller, templateUrl, resolve, '', size);
return modalInstance.result.then(function (fileName) {
var index = UtilsService.getArrayElementPosition(vm.driversData, 'fileName', fileName);
vm.driversData.splice(index, 1);
vm.successMessage.text = '_DRIVER_DELETE_OK_';
});
}
function sortDrivers(fieldName) {
if (fieldName == vm.sortField) {
vm.tableReverse = !vm.tableReverse;
} else {
vm.tableReverse = false;
vm.sortField = fieldName;
}
}
}
})();
| Stratio/Sparta | web/src/scripts/controllers/drivers-list.js | JavaScript | apache-2.0 | 3,460 |
"""Support for Switchbot devices."""
from asyncio import Lock
import switchbot # pylint: disable=import-error
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_SENSOR_TYPE, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from .const import (
ATTR_BOT,
ATTR_CURTAIN,
BTLE_LOCK,
COMMON_OPTIONS,
CONF_RETRY_COUNT,
CONF_RETRY_TIMEOUT,
CONF_SCAN_TIMEOUT,
CONF_TIME_BETWEEN_UPDATE_COMMAND,
DATA_COORDINATOR,
DEFAULT_RETRY_COUNT,
DEFAULT_RETRY_TIMEOUT,
DEFAULT_SCAN_TIMEOUT,
DEFAULT_TIME_BETWEEN_UPDATE_COMMAND,
DOMAIN,
)
from .coordinator import SwitchbotDataUpdateCoordinator
PLATFORMS_BY_TYPE = {
ATTR_BOT: [Platform.SWITCH, Platform.SENSOR],
ATTR_CURTAIN: [Platform.COVER, Platform.BINARY_SENSOR, Platform.SENSOR],
}
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Switchbot from a config entry."""
hass.data.setdefault(DOMAIN, {})
if not entry.options:
options = {
CONF_TIME_BETWEEN_UPDATE_COMMAND: DEFAULT_TIME_BETWEEN_UPDATE_COMMAND,
CONF_RETRY_COUNT: DEFAULT_RETRY_COUNT,
CONF_RETRY_TIMEOUT: DEFAULT_RETRY_TIMEOUT,
CONF_SCAN_TIMEOUT: DEFAULT_SCAN_TIMEOUT,
}
hass.config_entries.async_update_entry(entry, options=options)
# Use same coordinator instance for all entities.
# Uses BTLE advertisement data, all Switchbot devices in range is stored here.
if DATA_COORDINATOR not in hass.data[DOMAIN]:
# Check if asyncio.lock is stored in hass data.
# BTLE has issues with multiple connections,
# so we use a lock to ensure that only one API request is reaching it at a time:
if BTLE_LOCK not in hass.data[DOMAIN]:
hass.data[DOMAIN][BTLE_LOCK] = Lock()
if COMMON_OPTIONS not in hass.data[DOMAIN]:
hass.data[DOMAIN][COMMON_OPTIONS] = {**entry.options}
switchbot.DEFAULT_RETRY_TIMEOUT = hass.data[DOMAIN][COMMON_OPTIONS][
CONF_RETRY_TIMEOUT
]
# Store api in coordinator.
coordinator = SwitchbotDataUpdateCoordinator(
hass,
update_interval=hass.data[DOMAIN][COMMON_OPTIONS][
CONF_TIME_BETWEEN_UPDATE_COMMAND
],
api=switchbot,
retry_count=hass.data[DOMAIN][COMMON_OPTIONS][CONF_RETRY_COUNT],
scan_timeout=hass.data[DOMAIN][COMMON_OPTIONS][CONF_SCAN_TIMEOUT],
api_lock=hass.data[DOMAIN][BTLE_LOCK],
)
hass.data[DOMAIN][DATA_COORDINATOR] = coordinator
else:
coordinator = hass.data[DOMAIN][DATA_COORDINATOR]
await coordinator.async_config_entry_first_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
entry.async_on_unload(entry.add_update_listener(_async_update_listener))
hass.data[DOMAIN][entry.entry_id] = {DATA_COORDINATOR: coordinator}
sensor_type = entry.data[CONF_SENSOR_TYPE]
hass.config_entries.async_setup_platforms(entry, PLATFORMS_BY_TYPE[sensor_type])
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
sensor_type = entry.data[CONF_SENSOR_TYPE]
unload_ok = await hass.config_entries.async_unload_platforms(
entry, PLATFORMS_BY_TYPE[sensor_type]
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if len(hass.config_entries.async_entries(DOMAIN)) == 0:
hass.data.pop(DOMAIN)
return unload_ok
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
# Update entity options stored in hass.
if {**entry.options} != hass.data[DOMAIN][COMMON_OPTIONS]:
hass.data[DOMAIN][COMMON_OPTIONS] = {**entry.options}
hass.data[DOMAIN].pop(DATA_COORDINATOR)
await hass.config_entries.async_reload(entry.entry_id)
| home-assistant/home-assistant | homeassistant/components/switchbot/__init__.py | Python | apache-2.0 | 4,051 |
package org.zstack.sdk.zwatch.thirdparty.api;
public class QueryThirdpartyAlertResult {
public java.util.List inventories;
public void setInventories(java.util.List inventories) {
this.inventories = inventories;
}
public java.util.List getInventories() {
return this.inventories;
}
public java.lang.Long total;
public void setTotal(java.lang.Long total) {
this.total = total;
}
public java.lang.Long getTotal() {
return this.total;
}
}
| zstackorg/zstack | sdk/src/main/java/org/zstack/sdk/zwatch/thirdparty/api/QueryThirdpartyAlertResult.java | Java | apache-2.0 | 513 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from 'react';
import { Link } from 'dva/router';
import Exception from '../../components/Exception';
export default () => (
<Exception type="500" style={{ minHeight: 500, height: '80%' }} linkElement={Link} />
);
| ascrutae/sky-walking-ui | src/routes/Exception/500.js | JavaScript | apache-2.0 | 1,035 |
+++
Talk_date = ""
Talk_start_time = ""
Talk_end_time = ""
Title = "Postmortem da estrela da morte"
Type = "talk"
Speakers = ["rafael-barbosa"]
youtube = ""
slideshare = ""
slides = ""
+++
Blameless postmortem, um dos rituais mais importante da cultura Devops sendo aplicado aos erros do projeto da estrela da morte e com o Darth Vader comandando. Uma maneira divertida de aprender. Se o pior Sith de todos conseguiu aplicar você também consegue, sem culpa. | gomex/devopsdays-web | content/events/2019-goiania/program/rafael-barbosa.md | Markdown | apache-2.0 | 461 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.discovery.zen.publish;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.node.Node;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.BytesTransportRequest;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportConnectionListener;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportResponseOptions;
import org.elasticsearch.transport.TransportService;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.emptyIterable;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasToString;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@TestLogging("discovery.zen.publish:TRACE")
public class PublishClusterStateActionTests extends ESTestCase {
private static final ClusterName CLUSTER_NAME = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY);
protected ThreadPool threadPool;
protected Map<String, MockNode> nodes = new HashMap<>();
public static class MockNode implements PublishClusterStateAction.NewPendingClusterStateListener, DiscoveryNodesProvider {
public final DiscoveryNode discoveryNode;
public final MockTransportService service;
public MockPublishAction action;
public final ClusterStateListener listener;
public volatile ClusterState clusterState;
private final ESLogger logger;
public MockNode(DiscoveryNode discoveryNode, MockTransportService service, @Nullable ClusterStateListener listener, ESLogger logger) {
this.discoveryNode = discoveryNode;
this.service = service;
this.listener = listener;
this.logger = logger;
this.clusterState = ClusterState.builder(CLUSTER_NAME).nodes(DiscoveryNodes.builder().put(discoveryNode).localNodeId(discoveryNode.getId()).build()).build();
}
public MockNode setAsMaster() {
this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(discoveryNode.getId())).build();
return this;
}
public MockNode resetMasterId() {
this.clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(null)).build();
return this;
}
public void connectTo(DiscoveryNode node) {
service.connectToNode(node);
}
@Override
public void onNewClusterState(String reason) {
ClusterState newClusterState = action.pendingStatesQueue().getNextClusterStateToProcess();
logger.debug("[{}] received version [{}], uuid [{}]", discoveryNode.getName(), newClusterState.version(), newClusterState.stateUUID());
if (listener != null) {
ClusterChangedEvent event = new ClusterChangedEvent("", newClusterState, clusterState);
listener.clusterChanged(event);
}
if (clusterState.nodes().getMasterNode() == null || newClusterState.supersedes(clusterState)) {
clusterState = newClusterState;
}
action.pendingStatesQueue().markAsProcessed(newClusterState);
}
@Override
public DiscoveryNodes nodes() {
return clusterState.nodes();
}
}
public MockNode createMockNode(final String name) throws Exception {
return createMockNode(name, Settings.EMPTY);
}
public MockNode createMockNode(String name, Settings settings) throws Exception {
return createMockNode(name, settings, null);
}
public MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener) throws Exception {
final Settings settings = Settings.builder()
.put("name", name)
.put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
.put(basSettings)
.build();
MockTransportService service = buildTransportService(settings);
DiscoveryNode discoveryNode = DiscoveryNode.createLocal(settings, service.boundAddress().publishAddress(),
NodeEnvironment.generateNodeId(settings));
MockNode node = new MockNode(discoveryNode, service, listener, logger);
node.action = buildPublishClusterStateAction(settings, service, () -> node.clusterState, node);
final CountDownLatch latch = new CountDownLatch(nodes.size() * 2 + 1);
TransportConnectionListener waitForConnection = new TransportConnectionListener() {
@Override
public void onNodeConnected(DiscoveryNode node) {
latch.countDown();
}
@Override
public void onNodeDisconnected(DiscoveryNode node) {
fail("disconnect should not be called " + node);
}
};
node.service.addConnectionListener(waitForConnection);
for (MockNode curNode : nodes.values()) {
curNode.service.addConnectionListener(waitForConnection);
curNode.connectTo(node.discoveryNode);
node.connectTo(curNode.discoveryNode);
}
node.connectTo(node.discoveryNode);
assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true));
for (MockNode curNode : nodes.values()) {
curNode.service.removeConnectionListener(waitForConnection);
}
node.service.removeConnectionListener(waitForConnection);
if (nodes.put(name, node) != null) {
fail("Node with the name " + name + " already exist");
}
return node;
}
public MockTransportService service(String name) {
MockNode node = nodes.get(name);
if (node != null) {
return node.service;
}
return null;
}
public PublishClusterStateAction action(String name) {
MockNode node = nodes.get(name);
if (node != null) {
return node.action;
}
return null;
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
threadPool = new TestThreadPool(getClass().getName());
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
for (MockNode curNode : nodes.values()) {
curNode.action.close();
curNode.service.close();
}
terminate(threadPool);
}
protected MockTransportService buildTransportService(Settings settings) {
MockTransportService transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
return transportService;
}
protected MockPublishAction buildPublishClusterStateAction(
Settings settings,
MockTransportService transportService,
Supplier<ClusterState> clusterStateSupplier,
PublishClusterStateAction.NewPendingClusterStateListener listener
) {
DiscoverySettings discoverySettings =
new DiscoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
return new MockPublishAction(
settings,
transportService,
clusterStateSupplier,
listener,
discoverySettings,
CLUSTER_NAME);
}
public void testSimpleClusterStatePublishing() throws Exception {
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY).setAsMaster();
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
// Initial cluster state
ClusterState clusterState = nodeA.clusterState;
// cluster state update - add nodeB
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(clusterState.nodes()).put(nodeB.discoveryNode).build();
ClusterState previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromFull(nodeB.clusterState, clusterState);
// cluster state update - add block
previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromDiff(nodeB.clusterState, clusterState);
assertThat(nodeB.clusterState.blocks().global().size(), equalTo(1));
// cluster state update - remove block
previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromDiff(nodeB.clusterState, clusterState);
assertTrue(nodeB.clusterState.wasReadFromDiff());
// Adding new node - this node should get full cluster state while nodeB should still be getting diffs
MockNode nodeC = createMockNode("nodeC", Settings.EMPTY);
// cluster state update 3 - register node C
previousClusterState = clusterState;
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeC.discoveryNode).build();
clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromDiff(nodeB.clusterState, clusterState);
// First state
assertSameStateFromFull(nodeC.clusterState, clusterState);
// cluster state update 4 - update settings
previousClusterState = clusterState;
MetaData metaData = MetaData.builder(clusterState.metaData()).transientSettings(Settings.builder().put("foo", "bar").build()).build();
clusterState = ClusterState.builder(clusterState).metaData(metaData).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromDiff(nodeB.clusterState, clusterState);
assertThat(nodeB.clusterState.blocks().global().size(), equalTo(0));
assertSameStateFromDiff(nodeC.clusterState, clusterState);
assertThat(nodeC.clusterState.blocks().global().size(), equalTo(0));
// cluster state update - skipping one version change - should request full cluster state
previousClusterState = ClusterState.builder(clusterState).incrementVersion().build();
clusterState = ClusterState.builder(clusterState).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromFull(nodeB.clusterState, clusterState);
assertSameStateFromFull(nodeC.clusterState, clusterState);
assertFalse(nodeC.clusterState.wasReadFromDiff());
// node A steps down from being master
nodeA.resetMasterId();
nodeB.resetMasterId();
nodeC.resetMasterId();
// node B becomes the master and sends a version of the cluster state that goes back
discoveryNodes = DiscoveryNodes.builder(discoveryNodes)
.put(nodeA.discoveryNode)
.put(nodeB.discoveryNode)
.put(nodeC.discoveryNode)
.masterNodeId(nodeB.discoveryNode.getId())
.localNodeId(nodeB.discoveryNode.getId())
.build();
previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
publishStateAndWait(nodeB.action, clusterState, previousClusterState);
assertSameStateFromFull(nodeA.clusterState, clusterState);
assertSameStateFromFull(nodeC.clusterState, clusterState);
}
public void testUnexpectedDiffPublishing() throws Exception {
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, event -> {
fail("Shouldn't send cluster state to myself");
}).setAsMaster();
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
// Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).put(nodeB.discoveryNode).build();
ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build();
ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromFull(nodeB.clusterState, clusterState);
// cluster state update - add block
previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromDiff(nodeB.clusterState, clusterState);
}
public void testDisablingDiffPublishing() throws Exception {
Settings noDiffPublishingSettings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build();
MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
fail("Shouldn't send cluster state to myself");
}
});
MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
assertFalse(event.state().wasReadFromDiff());
}
});
// Initial cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.getId()).masterNodeId(nodeA.discoveryNode.getId()).build();
ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build();
// cluster state update - add nodeB
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build();
ClusterState previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
// cluster state update - add block
previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
}
/**
* Test not waiting on publishing works correctly (i.e., publishing times out)
*/
public void testSimultaneousClusterStatePublishing() throws Exception {
int numberOfNodes = randomIntBetween(2, 10);
int numberOfIterations = scaledRandomIntBetween(5, 50);
Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), randomBoolean()).build();
MockNode master = createMockNode("node0", settings, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
assertProperMetaDataForVersion(event.state().metaData(), event.state().version());
}
}).setAsMaster();
DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(master.nodes());
for (int i = 1; i < numberOfNodes; i++) {
final String name = "node" + i;
final MockNode node = createMockNode(name, settings, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
assertProperMetaDataForVersion(event.state().metaData(), event.state().version());
}
});
discoveryNodesBuilder.put(node.discoveryNode);
}
AssertingAckListener[] listeners = new AssertingAckListener[numberOfIterations];
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
MetaData metaData = MetaData.EMPTY_META_DATA;
ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).metaData(metaData).build();
ClusterState previousState;
for (int i = 0; i < numberOfIterations; i++) {
previousState = clusterState;
metaData = buildMetaDataForVersion(metaData, i + 1);
clusterState = ClusterState.builder(clusterState).incrementVersion().metaData(metaData).nodes(discoveryNodes).build();
listeners[i] = publishState(master.action, clusterState, previousState);
}
for (int i = 0; i < numberOfIterations; i++) {
listeners[i].await(1, TimeUnit.SECONDS);
}
// set the master cs
master.clusterState = clusterState;
for (MockNode node : nodes.values()) {
assertSameState(node.clusterState, clusterState);
assertThat(node.clusterState.nodes().getLocalNode(), equalTo(node.discoveryNode));
}
}
public void testSerializationFailureDuringDiffPublishing() throws Exception {
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
fail("Shouldn't send cluster state to myself");
}
}).setAsMaster();
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
// Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).put(nodeB.discoveryNode).build();
ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build();
ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromFull(nodeB.clusterState, clusterState);
// cluster state update - add block
previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build();
ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.stateUUID(), clusterState) {
@Override
public Diff<ClusterState> diff(ClusterState previousState) {
return new Diff<ClusterState>() {
@Override
public ClusterState apply(ClusterState part) {
fail("this diff shouldn't be applied");
return part;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
throw new IOException("Simulated failure of diff serialization");
}
};
}
};
try {
publishStateAndWait(nodeA.action, unserializableClusterState, previousClusterState);
fail("cluster state published despite of diff errors");
} catch (Discovery.FailedToCommitClusterStateException e) {
assertThat(e.getCause(), notNullValue());
assertThat(e.getCause().getMessage(), containsString("failed to serialize"));
}
}
public void testFailToPublishWithLessThanMinMasterNodes() throws Exception {
final int masterNodes = randomIntBetween(1, 10);
MockNode master = createMockNode("master");
DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().put(master.discoveryNode);
for (int i = 1; i < masterNodes; i++) {
discoveryNodesBuilder.put(createMockNode("node" + i).discoveryNode);
}
final int dataNodes = randomIntBetween(0, 5);
final Settings dataSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build();
for (int i = 0; i < dataNodes; i++) {
discoveryNodesBuilder.put(createMockNode("data_" + i, dataSettings).discoveryNode);
}
discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId());
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
MetaData metaData = MetaData.EMPTY_META_DATA;
ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).metaData(metaData).nodes(discoveryNodes).build();
ClusterState previousState = master.clusterState;
try {
publishState(master.action, clusterState, previousState, masterNodes + randomIntBetween(1, 5));
fail("cluster state publishing didn't fail despite of not having enough nodes");
} catch (Discovery.FailedToCommitClusterStateException expected) {
logger.debug("failed to publish as expected", expected);
}
}
public void testPublishingWithSendingErrors() throws Exception {
int goodNodes = randomIntBetween(2, 5);
int errorNodes = randomIntBetween(1, 5);
int timeOutNodes = randomBoolean() ? 0 : randomIntBetween(1, 5); // adding timeout nodes will force timeout errors
final int numberOfMasterNodes = goodNodes + errorNodes + timeOutNodes + 1; // master
final boolean expectingToCommit = randomBoolean();
Settings.Builder settings = Settings.builder();
// make sure we have a reasonable timeout if we expect to timeout, o.w. one that will make the test "hang"
settings.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h")
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing
MockNode master = createMockNode("master", settings.build());
// randomize things a bit
int[] nodeTypes = new int[goodNodes + errorNodes + timeOutNodes];
for (int i = 0; i < goodNodes; i++) {
nodeTypes[i] = 0;
}
for (int i = goodNodes; i < goodNodes + errorNodes; i++) {
nodeTypes[i] = 1;
}
for (int i = goodNodes + errorNodes; i < nodeTypes.length; i++) {
nodeTypes[i] = 2;
}
Collections.shuffle(Arrays.asList(nodeTypes), random());
DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().put(master.discoveryNode);
for (int i = 0; i < nodeTypes.length; i++) {
final MockNode mockNode = createMockNode("node" + i);
discoveryNodesBuilder.put(mockNode.discoveryNode);
switch (nodeTypes[i]) {
case 1:
mockNode.action.errorOnSend.set(true);
break;
case 2:
mockNode.action.timeoutOnSend.set(true);
break;
}
}
final int dataNodes = randomIntBetween(0, 3); // data nodes don't matter
for (int i = 0; i < dataNodes; i++) {
final MockNode mockNode = createMockNode("data_" + i, Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build());
discoveryNodesBuilder.put(mockNode.discoveryNode);
if (randomBoolean()) {
// we really don't care - just chaos monkey
mockNode.action.errorOnCommit.set(randomBoolean());
mockNode.action.errorOnSend.set(randomBoolean());
mockNode.action.timeoutOnCommit.set(randomBoolean());
mockNode.action.timeoutOnSend.set(randomBoolean());
}
}
final int minMasterNodes;
final String expectedBehavior;
if (expectingToCommit) {
minMasterNodes = randomIntBetween(0, goodNodes + 1); // count master
expectedBehavior = "succeed";
} else {
minMasterNodes = randomIntBetween(goodNodes + 2, numberOfMasterNodes); // +2 because of master
expectedBehavior = timeOutNodes > 0 ? "timeout" : "fail";
}
logger.info("--> expecting commit to {}. good nodes [{}], errors [{}], timeouts [{}]. min_master_nodes [{}]",
expectedBehavior, goodNodes + 1, errorNodes, timeOutNodes, minMasterNodes);
discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId());
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
MetaData metaData = MetaData.EMPTY_META_DATA;
ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).metaData(metaData).nodes(discoveryNodes).build();
ClusterState previousState = master.clusterState;
try {
publishState(master.action, clusterState, previousState, minMasterNodes);
if (expectingToCommit == false) {
fail("cluster state publishing didn't fail despite of not have enough nodes");
}
} catch (Discovery.FailedToCommitClusterStateException exception) {
logger.debug("failed to publish as expected", exception);
if (expectingToCommit) {
throw exception;
}
assertThat(exception.getMessage(), containsString(timeOutNodes > 0 ? "timed out" : "failed"));
}
}
public void testIncomingClusterStateValidation() throws Exception {
MockNode node = createMockNode("node");
logger.info("--> testing acceptances of any master when having no master");
ClusterState state = ClusterState.builder(node.clusterState)
.nodes(DiscoveryNodes.builder(node.nodes()).masterNodeId(randomAsciiOfLength(10))).incrementVersion().build();
node.action.validateIncomingState(state, null);
// now set a master node
node.clusterState = ClusterState.builder(node.clusterState).nodes(DiscoveryNodes.builder(node.nodes()).masterNodeId("master")).build();
logger.info("--> testing rejection of another master");
try {
node.action.validateIncomingState(state, node.clusterState);
fail("node accepted state from another master");
} catch (IllegalStateException OK) {
assertThat(OK.toString(), containsString("cluster state from a different master than the current one, rejecting"));
}
logger.info("--> test state from the current master is accepted");
node.action.validateIncomingState(ClusterState.builder(node.clusterState)
.nodes(DiscoveryNodes.builder(node.nodes()).masterNodeId("master")).incrementVersion().build(), node.clusterState);
logger.info("--> testing rejection of another cluster name");
try {
node.action.validateIncomingState(ClusterState.builder(new ClusterName(randomAsciiOfLength(10))).nodes(node.nodes()).build(), node.clusterState);
fail("node accepted state with another cluster name");
} catch (IllegalStateException OK) {
assertThat(OK.toString(), containsString("received state from a node that is not part of the cluster"));
}
logger.info("--> testing rejection of a cluster state with wrong local node");
try {
state = ClusterState.builder(node.clusterState)
.nodes(DiscoveryNodes.builder(node.nodes()).localNodeId("_non_existing_").build())
.incrementVersion().build();
node.action.validateIncomingState(state, node.clusterState);
fail("node accepted state with non-existence local node");
} catch (IllegalStateException OK) {
assertThat(OK.toString(), containsString("received state with a local node that does not match the current local node"));
}
try {
MockNode otherNode = createMockNode("otherNode");
state = ClusterState.builder(node.clusterState).nodes(
DiscoveryNodes.builder(node.nodes()).put(otherNode.discoveryNode).localNodeId(otherNode.discoveryNode.getId()).build()
).incrementVersion().build();
node.action.validateIncomingState(state, node.clusterState);
fail("node accepted state with existent but wrong local node");
} catch (IllegalStateException OK) {
assertThat(OK.toString(), containsString("received state with a local node that does not match the current local node"));
}
logger.info("--> testing acceptance of an old cluster state");
final ClusterState incomingState = node.clusterState;
node.clusterState = ClusterState.builder(node.clusterState).incrementVersion().build();
final IllegalStateException e =
expectThrows(IllegalStateException.class, () -> node.action.validateIncomingState(incomingState, node.clusterState));
final String message = String.format(
Locale.ROOT,
"rejecting cluster state version [%d] uuid [%s] received from [%s]",
incomingState.version(),
incomingState.stateUUID(),
incomingState.nodes().getMasterNodeId()
);
assertThat(e, hasToString("java.lang.IllegalStateException: " + message));
// an older version from a *new* master is also OK!
ClusterState previousState = ClusterState.builder(node.clusterState).incrementVersion().build();
state = ClusterState.builder(node.clusterState)
.nodes(DiscoveryNodes.builder(node.clusterState.nodes()).masterNodeId("_new_master_").build())
.build();
// remove the master of the node (but still have a previous cluster state with it)!
node.resetMasterId();
node.action.validateIncomingState(state, previousState);
}
public void testOutOfOrderCommitMessages() throws Throwable {
MockNode node = createMockNode("node").setAsMaster();
final CapturingTransportChannel channel = new CapturingTransportChannel();
List<ClusterState> states = new ArrayList<>();
final int numOfStates = scaledRandomIntBetween(3, 25);
for (int i = 1; i <= numOfStates; i++) {
states.add(ClusterState.builder(node.clusterState).version(i).stateUUID(ClusterState.UNKNOWN_UUID).build());
}
final ClusterState finalState = states.get(numOfStates - 1);
logger.info("--> publishing states");
for (ClusterState state : states) {
node.action.handleIncomingClusterStateRequest(
new BytesTransportRequest(PublishClusterStateAction.serializeFullClusterState(state, Version.CURRENT), Version.CURRENT),
channel);
assertThat(channel.response.get(), equalTo((TransportResponse) TransportResponse.Empty.INSTANCE));
assertThat(channel.error.get(), nullValue());
channel.clear();
}
logger.info("--> committing states");
long largestVersionSeen = Long.MIN_VALUE;
Randomness.shuffle(states);
for (ClusterState state : states) {
node.action.handleCommitRequest(new PublishClusterStateAction.CommitClusterStateRequest(state.stateUUID()), channel);
if (largestVersionSeen < state.getVersion()) {
assertThat(channel.response.get(), equalTo((TransportResponse) TransportResponse.Empty.INSTANCE));
if (channel.error.get() != null) {
throw channel.error.get();
}
largestVersionSeen = state.getVersion();
} else {
// older cluster states will be rejected
assertNotNull(channel.error.get());
assertThat(channel.error.get(), instanceOf(IllegalStateException.class));
}
channel.clear();
}
//now check the last state held
assertSameState(node.clusterState, finalState);
}
/**
* Tests that cluster is committed or times out. It should never be the case that we fail
* an update due to a commit timeout, but it ends up being committed anyway
*/
public void testTimeoutOrCommit() throws Exception {
Settings settings = Settings.builder()
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout
MockNode master = createMockNode("master", settings);
MockNode node = createMockNode("node", settings);
ClusterState state = ClusterState.builder(master.clusterState)
.nodes(DiscoveryNodes.builder(master.clusterState.nodes()).put(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build();
for (int i = 0; i < 10; i++) {
state = ClusterState.builder(state).incrementVersion().build();
logger.debug("--> publishing version [{}], UUID [{}]", state.version(), state.stateUUID());
boolean success;
try {
publishState(master.action, state, master.clusterState, 2).await(1, TimeUnit.HOURS);
success = true;
} catch (Discovery.FailedToCommitClusterStateException OK) {
success = false;
}
logger.debug("--> publishing [{}], verifying...", success ? "succeeded" : "failed");
if (success) {
assertSameState(node.clusterState, state);
} else {
assertThat(node.clusterState.stateUUID(), not(equalTo(state.stateUUID())));
}
}
}
private MetaData buildMetaDataForVersion(MetaData metaData, long version) {
ImmutableOpenMap.Builder<String, IndexMetaData> indices = ImmutableOpenMap.builder(metaData.indices());
indices.put("test" + version, IndexMetaData.builder("test" + version).settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards((int) version).numberOfReplicas(0).build());
return MetaData.builder(metaData)
.transientSettings(Settings.builder().put("test", version).build())
.indices(indices.build())
.build();
}
private void assertProperMetaDataForVersion(MetaData metaData, long version) {
for (long i = 1; i <= version; i++) {
assertThat(metaData.index("test" + i), notNullValue());
assertThat(metaData.index("test" + i).getNumberOfShards(), equalTo((int) i));
}
assertThat(metaData.index("test" + (version + 1)), nullValue());
assertThat(metaData.transientSettings().get("test"), equalTo(Long.toString(version)));
}
public void publishStateAndWait(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException {
publishState(action, state, previousState).await(1, TimeUnit.SECONDS);
}
public AssertingAckListener publishState(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException {
final int minimumMasterNodes = randomIntBetween(-1, state.nodes().getMasterNodes().size());
return publishState(action, state, previousState, minimumMasterNodes);
}
public AssertingAckListener publishState(PublishClusterStateAction action, ClusterState state, ClusterState previousState, int minMasterNodes) throws InterruptedException {
AssertingAckListener assertingAckListener = new AssertingAckListener(state.nodes().getSize() - 1);
ClusterChangedEvent changedEvent = new ClusterChangedEvent("test update", state, previousState);
action.publish(changedEvent, minMasterNodes, assertingAckListener);
return assertingAckListener;
}
public static class AssertingAckListener implements Discovery.AckListener {
private final List<Tuple<DiscoveryNode, Throwable>> errors = new CopyOnWriteArrayList<>();
private final AtomicBoolean timeoutOccurred = new AtomicBoolean();
private final CountDownLatch countDown;
public AssertingAckListener(int nodeCount) {
countDown = new CountDownLatch(nodeCount);
}
@Override
public void onNodeAck(DiscoveryNode node, @Nullable Exception e) {
if (e != null) {
errors.add(new Tuple<>(node, e));
}
countDown.countDown();
}
@Override
public void onTimeout() {
timeoutOccurred.set(true);
// Fast forward the counter - no reason to wait here
long currentCount = countDown.getCount();
for (long i = 0; i < currentCount; i++) {
countDown.countDown();
}
}
public void await(long timeout, TimeUnit unit) throws InterruptedException {
assertThat(awaitErrors(timeout, unit), emptyIterable());
}
public List<Tuple<DiscoveryNode, Throwable>> awaitErrors(long timeout, TimeUnit unit) throws InterruptedException {
countDown.await(timeout, unit);
assertFalse(timeoutOccurred.get());
return errors;
}
}
void assertSameState(ClusterState actual, ClusterState expected) {
assertThat(actual, notNullValue());
final String reason = "\n--> actual ClusterState: " + actual.prettyPrint() + "\n--> expected ClusterState:" + expected.prettyPrint();
assertThat("unequal UUIDs" + reason, actual.stateUUID(), equalTo(expected.stateUUID()));
assertThat("unequal versions" + reason, actual.version(), equalTo(expected.version()));
}
void assertSameStateFromDiff(ClusterState actual, ClusterState expected) {
assertSameState(actual, expected);
assertTrue(actual.wasReadFromDiff());
}
void assertSameStateFromFull(ClusterState actual, ClusterState expected) {
assertSameState(actual, expected);
assertFalse(actual.wasReadFromDiff());
}
static class MockPublishAction extends PublishClusterStateAction {
AtomicBoolean timeoutOnSend = new AtomicBoolean();
AtomicBoolean errorOnSend = new AtomicBoolean();
AtomicBoolean timeoutOnCommit = new AtomicBoolean();
AtomicBoolean errorOnCommit = new AtomicBoolean();
public MockPublishAction(Settings settings, TransportService transportService, Supplier<ClusterState> clusterStateSupplier, NewPendingClusterStateListener listener, DiscoverySettings discoverySettings, ClusterName clusterName) {
super(settings, transportService, clusterStateSupplier, listener, discoverySettings, clusterName);
}
@Override
protected void handleIncomingClusterStateRequest(BytesTransportRequest request, TransportChannel channel) throws IOException {
if (errorOnSend.get()) {
throw new ElasticsearchException("forced error on incoming cluster state");
}
if (timeoutOnSend.get()) {
return;
}
super.handleIncomingClusterStateRequest(request, channel);
}
@Override
protected void handleCommitRequest(PublishClusterStateAction.CommitClusterStateRequest request, TransportChannel channel) {
if (errorOnCommit.get()) {
throw new ElasticsearchException("forced error on incoming commit");
}
if (timeoutOnCommit.get()) {
return;
}
super.handleCommitRequest(request, channel);
}
}
static class CapturingTransportChannel implements TransportChannel {
AtomicReference<TransportResponse> response = new AtomicReference<>();
AtomicReference<Throwable> error = new AtomicReference<>();
public void clear() {
response.set(null);
error.set(null);
}
@Override
public String action() {
return "_noop_";
}
@Override
public String getProfileName() {
return "_noop_";
}
@Override
public void sendResponse(TransportResponse response) throws IOException {
this.response.set(response);
assertThat(error.get(), nullValue());
}
@Override
public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException {
this.response.set(response);
assertThat(error.get(), nullValue());
}
@Override
public void sendResponse(Exception exception) throws IOException {
this.error.set(exception);
assertThat(response.get(), nullValue());
}
@Override
public long getRequestId() {
return 0;
}
@Override
public String getChannelType() {
return "capturing";
}
}
}
| dpursehouse/elasticsearch | core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java | Java | apache-2.0 | 44,836 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/kinesisanalytics/model/LambdaOutputUpdate.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace KinesisAnalytics
{
namespace Model
{
LambdaOutputUpdate::LambdaOutputUpdate() :
m_resourceARNUpdateHasBeenSet(false),
m_roleARNUpdateHasBeenSet(false)
{
}
LambdaOutputUpdate::LambdaOutputUpdate(JsonView jsonValue) :
m_resourceARNUpdateHasBeenSet(false),
m_roleARNUpdateHasBeenSet(false)
{
*this = jsonValue;
}
LambdaOutputUpdate& LambdaOutputUpdate::operator =(JsonView jsonValue)
{
if(jsonValue.ValueExists("ResourceARNUpdate"))
{
m_resourceARNUpdate = jsonValue.GetString("ResourceARNUpdate");
m_resourceARNUpdateHasBeenSet = true;
}
if(jsonValue.ValueExists("RoleARNUpdate"))
{
m_roleARNUpdate = jsonValue.GetString("RoleARNUpdate");
m_roleARNUpdateHasBeenSet = true;
}
return *this;
}
JsonValue LambdaOutputUpdate::Jsonize() const
{
JsonValue payload;
if(m_resourceARNUpdateHasBeenSet)
{
payload.WithString("ResourceARNUpdate", m_resourceARNUpdate);
}
if(m_roleARNUpdateHasBeenSet)
{
payload.WithString("RoleARNUpdate", m_roleARNUpdate);
}
return payload;
}
} // namespace Model
} // namespace KinesisAnalytics
} // namespace Aws
| jt70471/aws-sdk-cpp | aws-cpp-sdk-kinesisanalytics/source/model/LambdaOutputUpdate.cpp | C++ | apache-2.0 | 1,459 |
package com.orientechnologies.orient.core.index;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.orientechnologies.common.collection.OCompositeKey;
import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx;
import com.orientechnologies.orient.core.metadata.schema.OType;
import com.orientechnologies.orient.core.record.impl.ODocument;
@Test
@SuppressWarnings("unchecked")
public class OSimpleKeyIndexDefinitionTest {
private OSimpleKeyIndexDefinition simpleKeyIndexDefinition;
@BeforeMethod
public void beforeMethod() {
simpleKeyIndexDefinition = new OSimpleKeyIndexDefinition(OType.INTEGER, OType.STRING);
}
@Test
public void testGetFields() {
Assert.assertTrue(simpleKeyIndexDefinition.getFields().isEmpty());
}
@Test
public void testGetClassName() {
Assert.assertNull(simpleKeyIndexDefinition.getClassName());
}
@Test
public void testCreateValueSimpleKey() {
final OSimpleKeyIndexDefinition keyIndexDefinition = new OSimpleKeyIndexDefinition(OType.INTEGER);
final Object result = keyIndexDefinition.createValue("2");
Assert.assertEquals(result, 2);
}
@Test
public void testCreateValueCompositeKeyListParam() {
final Object result = simpleKeyIndexDefinition.createValue(Arrays.asList("2", "3"));
final OCompositeKey compositeKey = new OCompositeKey(Arrays.asList(2, "3"));
Assert.assertEquals(result, compositeKey);
}
@Test
public void testCreateValueCompositeKeyNullListParam() {
final Object result = simpleKeyIndexDefinition.createValue(Arrays.asList((Object) null));
Assert.assertNull(result);
}
@Test
public void testNullParamListItem() {
final Object result = simpleKeyIndexDefinition.createValue(Arrays.asList("2", null));
Assert.assertNull(result);
}
@Test
public void testWrongParamTypeListItem() {
final Object result = simpleKeyIndexDefinition.createValue(Arrays.asList("a", "3"));
Assert.assertNull(result);
}
@Test
public void testCreateValueCompositeKey() {
final Object result = simpleKeyIndexDefinition.createValue("2", "3");
final OCompositeKey compositeKey = new OCompositeKey(Arrays.asList(2, "3"));
Assert.assertEquals(result, compositeKey);
}
@Test
public void testCreateValueCompositeKeyNullParamList() {
final Object result = simpleKeyIndexDefinition.createValue((List<?>) null);
Assert.assertNull(result);
}
@Test
public void testCreateValueCompositeKeyNullParam() {
final Object result = simpleKeyIndexDefinition.createValue((Object) null);
Assert.assertNull(result);
}
@Test
public void testCreateValueCompositeKeyEmptyList() {
final Object result = simpleKeyIndexDefinition.createValue(Collections.<Object> emptyList());
Assert.assertNull(result);
}
@Test
public void testNullParamItem() {
final Object result = simpleKeyIndexDefinition.createValue("2", null);
Assert.assertNull(result);
}
@Test
public void testWrongParamType() {
final Object result = simpleKeyIndexDefinition.createValue("a", "3");
Assert.assertNull(result);
}
@Test
public void testParamCount() {
Assert.assertEquals(simpleKeyIndexDefinition.getParamCount(), 2);
}
@Test
public void testParamCountOneItem() {
final OSimpleKeyIndexDefinition keyIndexDefinition = new OSimpleKeyIndexDefinition(OType.INTEGER);
Assert.assertEquals(keyIndexDefinition.getParamCount(), 1);
}
@Test
public void testGetKeyTypes() {
Assert.assertEquals(simpleKeyIndexDefinition.getTypes(), new OType[] { OType.INTEGER, OType.STRING });
}
@Test
public void testGetKeyTypesOneType() {
final OSimpleKeyIndexDefinition keyIndexDefinition = new OSimpleKeyIndexDefinition(OType.BOOLEAN);
Assert.assertEquals(keyIndexDefinition.getTypes(), new OType[] { OType.BOOLEAN });
}
@Test
public void testReload() {
final ODatabaseDocumentTx databaseDocumentTx = new ODatabaseDocumentTx("memory:osimplekeyindexdefinitiontest");
databaseDocumentTx.create();
final ODocument storeDocument = simpleKeyIndexDefinition.toStream();
storeDocument.save();
final ODocument loadDocument = databaseDocumentTx.load(storeDocument.getIdentity());
final OSimpleKeyIndexDefinition loadedKeyIndexDefinition = new OSimpleKeyIndexDefinition();
loadedKeyIndexDefinition.fromStream(loadDocument);
databaseDocumentTx.drop();
Assert.assertEquals(loadedKeyIndexDefinition, simpleKeyIndexDefinition);
}
@Test(expectedExceptions = OIndexException.class)
public void testGetDocumentValueToIndex() {
simpleKeyIndexDefinition.getDocumentValueToIndex(new ODocument());
}
}
| nengxu/OrientDB | core/src/test/java/com/orientechnologies/orient/core/index/OSimpleKeyIndexDefinitionTest.java | Java | apache-2.0 | 4,965 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systemglobal_authenticationpolicy_binding(base_resource) :
""" Binding class showing the authenticationpolicy that can be bound to systemglobal.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._builtin = []
self.___count = 0
@property
def priority(self) :
ur"""The priority of the command policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority of the command policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def builtin(self) :
ur"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE.
"""
try :
return self._builtin
except Exception as e:
raise e
@builtin.setter
def builtin(self, builtin) :
ur"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE
"""
try :
self._builtin = builtin
except Exception as e:
raise e
@property
def policyname(self) :
ur"""The name of the command policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""The name of the command policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systemglobal_authenticationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systemglobal_authenticationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = systemglobal_authenticationpolicy_binding()
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [systemglobal_authenticationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = systemglobal_authenticationpolicy_binding()
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [systemglobal_authenticationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
ur""" Use this API to fetch a systemglobal_authenticationpolicy_binding resources.
"""
try :
obj = systemglobal_authenticationpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
ur""" Use this API to fetch filtered set of systemglobal_authenticationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemglobal_authenticationpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
ur""" Use this API to count systemglobal_authenticationpolicy_binding resources configued on NetScaler.
"""
try :
obj = systemglobal_authenticationpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
ur""" Use this API to count the filtered set of systemglobal_authenticationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemglobal_authenticationpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
class systemglobal_authenticationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.systemglobal_authenticationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systemglobal_authenticationpolicy_binding = [systemglobal_authenticationpolicy_binding() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/system/systemglobal_authenticationpolicy_binding.py | Python | apache-2.0 | 6,831 |
/*
* Copyright 2000-2010 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.lang.ant.refactoring;
import com.intellij.codeInsight.TargetElementUtilBase;
import com.intellij.lang.ant.dom.AntDomFileDescription;
import com.intellij.openapi.actionSystem.CommonDataKeys;
import com.intellij.openapi.actionSystem.DataContext;
import com.intellij.openapi.actionSystem.LangDataKeys;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.project.IndexNotReadyException;
import com.intellij.openapi.project.Project;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiFile;
import com.intellij.psi.PsiReference;
import com.intellij.psi.xml.XmlFile;
import com.intellij.refactoring.rename.PsiElementRenameHandler;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.Collection;
/**
* @author Eugene Zhuravlev
* Date: Mar 19, 2007
*/
public final class AntRenameHandler extends PsiElementRenameHandler {
public boolean isAvailableOnDataContext(final DataContext dataContext) {
final PsiElement[] elements = getElements(dataContext);
return elements != null && elements.length > 1;
}
public void invoke(@NotNull final Project project, final Editor editor, final PsiFile file, final DataContext dataContext) {
final PsiElement[] elements = getElements(dataContext);
if (elements != null && elements.length > 0) {
invoke(project, new PsiElement[]{elements[0]}, dataContext);
}
}
public void invoke(@NotNull final Project project, @NotNull final PsiElement[] elements, final DataContext dataContext) {
super.invoke(project, elements, dataContext);
}
@Nullable
private static PsiElement[] getElements(DataContext dataContext) {
final PsiFile psiFile = CommonDataKeys.PSI_FILE.getData(dataContext);
if (!(psiFile instanceof XmlFile && AntDomFileDescription.isAntFile((XmlFile)psiFile))) {
return null;
}
final Editor editor = LangDataKeys.EDITOR.getData(dataContext);
if (editor == null) {
return null;
}
return getPsiElementsIn(editor, psiFile);
}
@Nullable
private static PsiElement[] getPsiElementsIn(final Editor editor, final PsiFile psiFile) {
try {
final PsiReference reference = TargetElementUtilBase.findReference(editor, editor.getCaretModel().getOffset());
if (reference == null) {
return null;
}
final Collection<PsiElement> candidates = TargetElementUtilBase.getInstance().getTargetCandidates(reference);
return ContainerUtil.toArray(candidates, new PsiElement[candidates.size()]);
}
catch (IndexNotReadyException e) {
return null;
}
}
}
| IllusionRom-deprecated/android_platform_tools_idea | plugins/ant/src/com/intellij/lang/ant/refactoring/AntRenameHandler.java | Java | apache-2.0 | 3,289 |
"""
Drone.io badge generator.
Currently set up to work on Mac.
Requires Pillow.
"""
import os
from PIL import Image, ImageDraw, ImageFont
SIZE = (95, 18)
def hex_colour(hex):
if hex[0] == '#':
hex = hex[1:]
return (
int(hex[:2], 16),
int(hex[2:4], 16),
int(hex[4:6], 16),
)
BACKGROUND = hex_colour('#4A4A4A')
SUCCESS = hex_colour('#94B944')
WARNING = hex_colour('#E4A83C')
ERROR = hex_colour('#B10610')
SUCCESS_CUTOFF = 85
WARNING_CUTOFF = 45
FONT = ImageFont.truetype(size=10, filename="/Library/Fonts/Arial.ttf")
FONT_SHADOW = hex_colour('#525252')
PADDING_TOP = 3
def build_image(percentage, colour):
image = Image.new('RGB', SIZE, color=BACKGROUND)
drawing = ImageDraw.Draw(image)
drawing.rectangle([(55, 0), SIZE], colour, colour)
drawing.text((8, PADDING_TOP+1), 'coverage', font=FONT, fill=FONT_SHADOW)
drawing.text((7, PADDING_TOP), 'coverage', font=FONT)
drawing.text((63, PADDING_TOP+1), '%s%%' % percentage, font=FONT, fill=FONT_SHADOW)
drawing.text((62, PADDING_TOP), '%s%%' % percentage, font=FONT)
return image
os.chdir('_build')
for i in range(101):
filename = '%i.png' % i
file = open(filename, 'wb')
if i < WARNING_CUTOFF:
build_image(i, ERROR).save(file)
elif i < SUCCESS_CUTOFF:
build_image(i, WARNING).save(file)
else:
build_image(i, SUCCESS).save(file)
| 21strun/django-coverage | assets/badges/drone.io/badge_maker.py | Python | apache-2.0 | 1,424 |
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ThreadedComponent import threadedcomponent, threadedadaptivecommscomponent
import heapq
import time
class SchedulingComponentMixin(object):
"""
SchedulingComponent() -> new SchedulingComponent
Base class for a threadedcomponent with an inbuilt scheduler, allowing a
component to block until a scheduled event is ready or a message is received
on an inbox.
"""
Inboxes = {"inbox" : "Standard inbox for receiving data from other components",
"control" : "Standard inbox for receiving control messages from other components",
"event" : "Scheduled events which are ready to be processed"}
def __init__(self, **argd):
super(SchedulingComponentMixin, self).__init__(**argd)
self.eventQueue = []
def scheduleRel(self, message, delay, priority=1):
"""
Schedule an event to wake the component and send a message to the
"event" inbox after a delay.
"""
return self.scheduleAbs(message, time.time() + delay, priority)
def scheduleAbs(self, message, eventTime, priority=1):
"""
Schedule an event to wake the component and send a message to the
"event" inbox after at a specified time.
"""
event = eventTime, priority, message
heapq.heappush(self.eventQueue, event)
return event
def cancelEvent(self, event):
""" Remove a scheduled event from the scheduler """
self.eventQueue.remove(event)
heapq.heapify(self.eventQueue)
def eventReady(self):
""" Returns true if there is an event ready to be processed """
if self.eventQueue:
eventTime = self.eventQueue[0][0]
if time.time() >= eventTime:
return True
return False
def pause(self):
"""
Sleep until there is either an event ready or a message is received on
an inbox
"""
if self.eventReady():
self.signalEvent()
else:
if self.eventQueue:
eventTime = self.eventQueue[0][0]
super(SchedulingComponentMixin, self).pause(eventTime - time.time())
if self.eventReady():
self.signalEvent()
else:
super(SchedulingComponentMixin, self).pause()
def signalEvent(self):
"""
Put the event message of the earliest scheduled event onto the
component's "event" inbox and remove it from the scheduler.
"""
eventTime, priority, message = heapq.heappop(self.eventQueue)
#print "Signalling, late by:", (time.time() - eventTime)
if not self.inqueues["event"].full():
self.inqueues["event"].put(message)
class SchedulingComponent(SchedulingComponentMixin, threadedcomponent):
def __init__(self, **argd):
super(SchedulingComponent, self).__init__(**argd)
class SchedulingAdaptiveCommsComponent(SchedulingComponentMixin,
threadedadaptivecommscomponent):
def __init__(self, **argd):
super(SchedulingAdaptiveCommsComponent, self).__init__(**argd)
| sparkslabs/kamaelia_ | Sketches/JT/Jam/library/trunk/Axon/SchedulingComponent.py | Python | apache-2.0 | 3,988 |
// scalac: -Ystop-after:parser
//
object foo {
val n =
<a xmlns=""/>
n.namespace == null
}
| scala/scala | test/files/pos/xml-ns-empty.scala | Scala | apache-2.0 | 100 |
<?php
namespace PHPExcel\Reader\Excel5;
/**
* PHPExcel_Reader_Excel5_RC4
*
* Copyright (c) 2006 - 2015 PHPExcel
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category PHPExcel
* @package PHPExcel_Reader_Excel5
* @copyright Copyright (c) 2006 - 2015 PHPExcel (http://www.codeplex.com/PHPExcel)
* @license http://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt LGPL
* @version ##VERSION##, ##DATE##
*/
class RC4
{
// Context
protected $s = array();
protected $i = 0;
protected $j = 0;
/**
* RC4 stream decryption/encryption constrcutor
*
* @param string $key Encryption key/passphrase
*/
public function __construct($key)
{
$len = strlen($key);
for ($this->i = 0; $this->i < 256; $this->i++) {
$this->s[$this->i] = $this->i;
}
$this->j = 0;
for ($this->i = 0; $this->i < 256; $this->i++) {
$this->j = ($this->j + $this->s[$this->i] + ord($key[$this->i % $len])) % 256;
$t = $this->s[$this->i];
$this->s[$this->i] = $this->s[$this->j];
$this->s[$this->j] = $t;
}
$this->i = $this->j = 0;
}
/**
* Symmetric decryption/encryption function
*
* @param string $data Data to encrypt/decrypt
*
* @return string
*/
public function RC4($data)
{
$len = strlen($data);
for ($c = 0; $c < $len; $c++) {
$this->i = ($this->i + 1) % 256;
$this->j = ($this->j + $this->s[$this->i]) % 256;
$t = $this->s[$this->i];
$this->s[$this->i] = $this->s[$this->j];
$this->s[$this->j] = $t;
$t = ($this->s[$this->i] + $this->s[$this->j]) % 256;
$data[$c] = chr(ord($data[$c]) ^ $this->s[$t]);
}
return $data;
}
}
| winerQin/yesnophp | library/PHPExcel/Reader/Excel5/RC4.php | PHP | apache-2.0 | 2,548 |
/*
* Copyright 2011 Christopher Pheby
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jadira.bindings.core.binder;
import java.lang.annotation.Annotation;
import java.net.URL;
import org.jadira.bindings.core.api.Binding;
import org.jadira.bindings.core.api.Converter;
import org.jadira.bindings.core.api.FromUnmarshaller;
import org.jadira.bindings.core.api.ToMarshaller;
public interface RegisterableBinder {
/**
* Register the configuration file (bindings.xml) at the given URL
* @param nextLocation The URL to register
*/
void registerConfiguration(URL nextLocation);
/**
* Register a Binding with the given source and target class.
* A binding unifies a marshaller and an unmarshaller and both must be available to resolve a binding.
*
* The source class is considered the owning class of the binding. The source can be marshalled
* into the target class. Similarly, the target can be unmarshalled to produce an instance of the source type.
* @param key The converter key
* @param converter The binding to be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerBinding(ConverterKey<S,T> key, Binding<S, T> converter);
/**
* Register an UnMarshaller with the given source and target class.
* The unmarshaller is used as follows: Instances of the source can be marshalled into the target class.
* @param key The converter key
* @param converter The FromUnmarshaller to be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerUnmarshaller(ConverterKey<S,T> key, FromUnmarshaller<S, T> converter);
/**
* Register a Marshaller with the given source and target class.
* The marshaller is used as follows: Instances of the source can be marshalled into the target class.
* @param key The converter key
* @param converter The ToMarshaller to be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerMarshaller(ConverterKey<S,T> key, ToMarshaller<S, T> converter);
/**
* Register a Converter with the given input and output classes. Instances of the input class can be converted into
* instances of the output class
* @param key The converter key
* @param converter The Converter to be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerConverter(ConverterKey<S,T> key, Converter<S, T> converter);
/**
* Register a Binding with the given source and target class.
* A binding unifies a marshaller and an unmarshaller and both must be available to resolve a binding.
*
* The source class is considered the owning class of the binding. The source can be marshalled
* into the target class. Similarly, the target can be unmarshalled to produce an instance of the source type.
* @param sourceClass The source (owning) class
* @param targetClass The target (foreign) class
* @param converter The binding to be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerBinding(final Class<S> sourceClass, Class<T> targetClass, Binding<S, T> converter);
/**
* Register an UnMarshaller with the given source and target class.
* The unmarshaller is used as follows: Instances of the source can be marshalled into the target class.
* @param sourceClass The source (input) class
* @param targetClass The target (output) class
* @param converter The FromUnmarshaller to be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerUnmarshaller(Class<S> sourceClass, Class<T> targetClass, FromUnmarshaller<S, T> converter);
/**
* Register a Marshaller with the given source and target class.
* The marshaller is used as follows: Instances of the source can be marshalled into the target class.
* @param sourceClass The source (input) class
* @param targetClass The target (output) class
* @param converter The ToMarshaller to be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerMarshaller(Class<S> sourceClass, Class<T> targetClass, ToMarshaller<S, T> converter);
/**
* Register a Converter with the given input and output classes. Instances of the input class can be converted into
* instances of the output class
* @param sourceClass The source (input) class
* @param targetClass The target (output) class
* @param converter The Converter to be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerConverter(final Class<S> sourceClass, Class<T> targetClass, Converter<S, T> converter);
/**
* Register a Binding with the given source and target class.
* A binding unifies a marshaller and an unmarshaller and both must be available to resolve a binding.
*
* The source class is considered the owning class of the binding. The source can be marshalled
* into the target class. Similarly, the target can be unmarshalled to produce an instance of the source type.
* @param sourceClass The source (owning) class
* @param targetClass The target (foreign) class
* @param converter The binding to be registered
* @param qualifier The qualifier for which the binding must be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerBinding(final Class<S> sourceClass, Class<T> targetClass, Binding<S, T> converter, Class<? extends Annotation> qualifier);
/**
* Register an UnMarshaller with the given source and target class.
* The unmarshaller is used as follows: Instances of the source can be marshalled into the target class.
* @param sourceClass The source (input) class
* @param targetClass The target (output) class
* @param converter The FromUnmarshaller to be registered
* @param qualifier The qualifier for which the unmarshaller must be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerUnmarshaller(Class<S> sourceClass, Class<T> targetClass, FromUnmarshaller<S, T> converter, Class<? extends Annotation> qualifier);
/**
* Register a Marshaller with the given source and target class.
* The marshaller is used as follows: Instances of the source can be marshalled into the target class.
* @param sourceClass The source (input) class
* @param targetClass The target (output) class
* @param converter The ToMarshaller to be registered
* @param qualifier The qualifier for which the marshaller must be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerMarshaller(Class<S> sourceClass, Class<T> targetClass, ToMarshaller<S, T> converter, Class<? extends Annotation> qualifier);
/**
* Register a Converter with the given input and output classes. Instances of the input class can be converted into
* instances of the output class
* @param sourceClass The source (input) class
* @param targetClass The target (output) class
* @param converter The Converter to be registered
* @param qualifier The qualifier for which the converter must be registered
* @param <S> Source type
* @param <T> Target type
*/
<S, T> void registerConverter(final Class<S> sourceClass, Class<T> targetClass, Converter<S, T> converter, Class<? extends Annotation> qualifier);
/**
* Inspect each of the supplied classes, processing any of the annotated methods found
* @param classesToInspect
*/
void registerAnnotatedClasses(Class<?>... classesToInspect);
/**
* Return an iterable collection of ConverterKeys, one for each currently registered conversion
*/
Iterable<ConverterKey<?, ?>> getConverterEntries();
}
| JadiraOrg/jadira | bindings/src/main/java/org/jadira/bindings/core/binder/RegisterableBinder.java | Java | apache-2.0 | 8,392 |
/**
* @license
* Copyright 2013 Palantir Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { IOptions, RuleSeverity } from "./language/rule/rule";
export interface IConfigurationFile {
/**
* @deprecated property is never set
*
* The severity that is applied to rules in this config file as well as rules
* in any inherited config files which have their severity set to "default".
* Not inherited.
*/
defaultSeverity?: RuleSeverity;
/**
* An array of config files whose rules are inherited by this config file.
*/
extends: string[];
/**
* Rules that are used to lint to JavaScript files.
*/
jsRules: Map<string, Partial<IOptions>>;
/**
* A subset of the CLI options.
*/
linterOptions?: Partial<{
exclude: string[];
format: string;
}>;
/**
* Directories containing custom rules. Resolved using node module semantics.
*/
rulesDirectory: string[];
/**
* Rules that are used to lint TypeScript files.
*/
rules: Map<string, Partial<IOptions>>;
}
export interface IConfigurationLoadResult {
path?: string;
results?: IConfigurationFile;
}
export declare const JSON_CONFIG_FILENAME = "tslint.json";
/** @deprecated use `JSON_CONFIG_FILENAME` or `CONFIG_FILENAMES` instead. */
export declare const CONFIG_FILENAME = "tslint.json";
export declare const CONFIG_FILENAMES: string[];
export declare const DEFAULT_CONFIG: IConfigurationFile;
export declare const EMPTY_CONFIG: IConfigurationFile;
/**
* Searches for a TSLint configuration and returns the data from the config.
* @param configFile A path to a config file, this can be null if the location of a config is not known
* @param inputFilePath A path containing the current file being linted. This is the starting location
* of the search for a configuration.
* @returns Load status for a TSLint configuration object
*/
export declare function findConfiguration(configFile: string | null, inputFilePath: string): IConfigurationLoadResult;
export declare function findConfiguration(configFile: string, inputFilePath?: string): IConfigurationLoadResult;
/**
* Searches for a TSLint configuration and returns the path to it.
* Could return undefined if not configuration is found.
* @param suppliedConfigFilePath A path to an known config file supplied by a user. Pass null here if
* the location of the config file is not known and you want to search for one.
* @param inputFilePath A path to the current file being linted. This is the starting location
* of the search for a configuration.
* @returns An absolute path to a tslint.json or tslint.yml or tslint.yaml file
* or undefined if neither can be found.
*/
export declare function findConfigurationPath(suppliedConfigFilePath: string | null, inputFilePath: string): string | undefined;
export declare function findConfigurationPath(suppliedConfigFilePath: string, inputFilePath?: string): string | undefined;
/**
* Used Node semantics to load a configuration file given configFilePath.
* For example:
* '/path/to/config' will be treated as an absolute path
* './path/to/config' will be treated as a relative path
* 'path/to/config' will attempt to load a to/config file inside a node module named path
* @param configFilePath The configuration to load
* @param originalFilePath (deprecated) The entry point configuration file
* @returns a configuration object for TSLint loaded from the file at configFilePath
*/
export declare function loadConfigurationFromPath(configFilePath?: string, _originalFilePath?: string): IConfigurationFile;
/** Reads the configuration file from disk and parses it as raw JSON, YAML or JS depending on the extension. */
export declare function readConfigurationFile(filepath: string): RawConfigFile;
export declare function extendConfigurationFile(targetConfig: IConfigurationFile, nextConfigSource: IConfigurationFile): IConfigurationFile;
/**
* returns the absolute path (contrary to what the name implies)
*
* @deprecated use `path.resolve` instead
*/
export declare function getRelativePath(directory?: string | null, relativeTo?: string): string | undefined;
export declare function useAsPath(directory: string): boolean;
/**
* @param directories A path(s) to a directory of custom rules
* @param relativeTo A path that directories provided are relative to.
* For example, if the directories come from a tslint.json file, this path
* should be the path to the tslint.json file.
* @return An array of absolute paths to directories potentially containing rules
*/
export declare function getRulesDirectories(directories?: string | string[], relativeTo?: string): string[];
export interface RawConfigFile {
extends?: string | string[];
linterOptions?: IConfigurationFile["linterOptions"];
rulesDirectory?: string | string[];
defaultSeverity?: string;
rules?: RawRulesConfig;
jsRules?: RawRulesConfig | boolean;
}
export interface RawRulesConfig {
[key: string]: RawRuleConfig;
}
export declare type RawRuleConfig = null | undefined | boolean | any[] | {
severity?: RuleSeverity | "warn" | "none" | "default";
options?: any;
};
/**
* Parses a config file and normalizes legacy config settings.
* If `configFileDir` and `readConfig` are provided, this function will load all base configs and reduce them to the final configuration.
*
* @param configFile The raw object read from the JSON of a config file
* @param configFileDir The directory of the config file
* @param readConfig Will be used to load all base configurations while parsing. The function is called with the resolved path.
*/
export declare function parseConfigFile(configFile: RawConfigFile, configFileDir?: string, readConfig?: (path: string) => RawConfigFile): IConfigurationFile;
/**
* Fills in default values for `IOption` properties and outputs an array of `IOption`
*/
export declare function convertRuleOptions(ruleConfiguration: Map<string, Partial<IOptions>>): IOptions[];
export declare function isFileExcluded(filepath: string, configFile?: IConfigurationFile): boolean;
export declare function stringifyConfiguration(configFile: IConfigurationFile): string;
| cloudfoundry-community/asp.net5-buildpack | fixtures/node_apps/angular_dotnet/ClientApp/node_modules/tslint/lib/configuration.d.ts | TypeScript | apache-2.0 | 6,738 |
/*
* Copyright 2014 Guidewire Software, Inc.
*/
package gw.plugin.ij.intentions;
import com.intellij.codeInsight.CodeInsightUtilBase;
import com.intellij.codeInspection.LocalQuickFixAndIntentionActionOnPsiElement;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.project.Project;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiFile;
import com.intellij.psi.PsiWhiteSpace;
import com.intellij.psi.util.PsiMatcherImpl;
import gw.internal.gosu.parser.Expression;
import gw.internal.gosu.parser.expressions.NumericLiteral;
import gw.lang.parser.IStatement;
import gw.lang.parser.statements.IAssignmentStatement;
import gw.lang.parser.statements.IStatementList;
import gw.lang.parser.statements.IWhileStatement;
import gw.plugin.ij.lang.psi.api.statements.IGosuVariable;
import gw.plugin.ij.lang.psi.impl.statements.GosuForEachStatementImpl;
import gw.plugin.ij.lang.psi.impl.statements.GosuWhileStatementImpl;
import gw.plugin.ij.lang.psi.util.GosuPsiParseUtil;
import gw.plugin.ij.util.GosuBundle;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import static com.intellij.psi.util.PsiMatchers.hasClass;
public class WhileToForFix extends LocalQuickFixAndIntentionActionOnPsiElement {
String ident;
Expression rhs;
private IGosuVariable declarationEqualToZero;
private IAssignmentStatement increment;
public WhileToForFix(PsiElement whileStmt, String ident, Expression rhs, IGosuVariable declarationEqualToZero, IAssignmentStatement increment) {
super(whileStmt);
this.ident = ident;
this.rhs = rhs;
this.declarationEqualToZero = declarationEqualToZero;
this.increment = increment;
}
@Override
public void invoke(@NotNull Project project, @NotNull PsiFile file, @Nullable("is null when called from inspection") Editor editor, @NotNull PsiElement startElement, @NotNull PsiElement endElement) {
if (!CodeInsightUtilBase.prepareFileForWrite(startElement.getContainingFile())) {
return;
}
IWhileStatement parsedElement = ((GosuWhileStatementImpl) startElement).getParsedElement();
if (parsedElement == null) {
return;
}
IStatement statement = parsedElement.getStatement();
IStatement[] statements = ((IStatementList) statement).getStatements();
StringBuilder forStmt = new StringBuilder();
forStmt.append("for (");
forStmt.append(ident);
forStmt.append(" in 0..");
if(rhs instanceof NumericLiteral) {
Object res = rhs.evaluate();
if(res instanceof Integer) {
forStmt.append(((Integer)res)-1);
}
} else {
forStmt.append("|" + rhs);
}
forStmt.append(") {\n");
String indent = getIndet(parsedElement, statements);
for (IStatement statement1 : statements) {
if (statement1 != increment) {
forStmt.append(indent);
forStmt.append(statement1.getLocation().getTextFromTokens());
forStmt.append("\n");
}
}
forStmt.append("}");
PsiElement stub = GosuPsiParseUtil.parseProgramm(forStmt.toString(), startElement, file.getManager(), null);
PsiElement newForStmt = new PsiMatcherImpl(stub)
.descendant(hasClass(GosuForEachStatementImpl.class))
.getElement();
if (newForStmt != null) {
declarationEqualToZero.delete();
startElement.replace(newForStmt);
}
}
private String getIndet(IWhileStatement parsedElement, IStatement[] statements) {
int whileColum = parsedElement.getLocation().getColumn();
int column = statements[1].getLocation().getColumn() - whileColum;
if(column < 0) {
return " ";
}
StringBuilder out = new StringBuilder();
for(int i = 0; i <= column; i++) {
out.append(" ");
}
return out.toString();
}
private void removeVarDecl(PsiElement whileStmt, String ident) {
PsiElement prev = whileStmt.getPrevSibling();
while (prev instanceof PsiWhiteSpace) {
prev = prev.getPrevSibling();
}
if (prev instanceof IGosuVariable && ((IGosuVariable) prev).getName().equals(ident)) {
prev.delete();
}
}
@Override
public boolean isAvailable(@NotNull Project project,
@NotNull PsiFile file,
@NotNull PsiElement startElement,
@NotNull PsiElement endElement) {
return startElement instanceof GosuWhileStatementImpl;
}
@NotNull
@Override
public String getText() {
return GosuBundle.message("inspection.while.to.for");
}
@NotNull
@Override
public String getFamilyName() {
return GosuBundle.message("inspection.group.name.statement.issues");
}
}
| pdalbora/gosu-lang | idea-gosu-plugin/src/main/java/gw/plugin/ij/intentions/WhileToForFix.java | Java | apache-2.0 | 4,712 |
/*
* Author: Markus Stenberg <markus stenberg@iki.fi>
* Author: Steven Barth <steven@midlink.org>
* Author: Pierre Pfister
*
* Copyright (c) 2014-2015 cisco Systems, Inc.
*/
#pragma once
/* Anything up to INFO is compiled in by default; syslog can be used
* to filter them out. DEBUG can be quite spammy and isn't enabled by
* default. */
#define HNETD_DEFAULT_L_LEVEL 6
#ifndef L_LEVEL
#define L_LEVEL HNETD_DEFAULT_L_LEVEL
#endif /* !L_LEVEL */
#ifndef L_PREFIX
#define L_PREFIX ""
#endif /* !L_PREFIX */
#ifdef __APPLE__
/* Haha. Got to love advanced IPv6 socket API being disabled by
* default. */
#define __APPLE_USE_RFC_3542
#define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
#define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
/* LIST_HEAD macro in sys/queue.h, argh.. */
#include <sys/queue.h>
#ifdef LIST_HEAD
#undef LIST_HEAD
#endif /* LIST_HEAD */
#endif /* __APPLE__ */
#include <stddef.h>
#include <stdint.h>
#include <time.h>
#include <syslog.h>
#include <sys/types.h>
#include <libubox/utils.h>
#include <inttypes.h>
#define STR_EXPAND(tok) #tok
#define STR(tok) STR_EXPAND(tok)
#define PRItime PRId64
#include "hnetd_time.h"
extern int log_level;
// Logging macros
extern void (*hnetd_log)(int priority, const char *format, ...);
#define L_INTERNAL(level, ...) \
do { \
if (hnetd_log && log_level >= level) \
hnetd_log(level, L_PREFIX __VA_ARGS__); \
} while(0)
#if L_LEVEL >= LOG_ERR
#define L_ERR(...) L_INTERNAL(LOG_ERR, __VA_ARGS__)
#else
#define L_ERR(...) do {} while(0)
#endif
#if L_LEVEL >= LOG_WARNING
#define L_WARN(...) L_INTERNAL(LOG_WARNING, __VA_ARGS__)
#else
#define L_WARN(...) do {} while(0)
#endif
#if L_LEVEL >= LOG_NOTICE
#define L_NOTICE(...) L_INTERNAL(LOG_NOTICE, __VA_ARGS__)
#else
#define L_NOTICE(...) do {} while(0)
#endif
#if L_LEVEL >= LOG_INFO
#define L_INFO(...) L_INTERNAL(LOG_INFO, __VA_ARGS__)
#else
#define L_INFO(...) do {} while(0)
#endif
#if L_LEVEL >= LOG_DEBUG
#define L_DEBUG(...) L_INTERNAL(LOG_DEBUG, __VA_ARGS__)
#else
#define L_DEBUG(...) do {} while(0)
#endif
// Some C99 compatibility
#ifndef typeof
#define typeof __typeof
#endif
#ifndef container_of
#define container_of(ptr, type, member) ( \
(type *)( (char *)ptr - offsetof(type,member) ))
#endif
#ifndef __unused
#define __unused __attribute__((unused))
#endif
| sbyx/hnetd | src/hnetd.h | C | apache-2.0 | 2,413 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer"):
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
restore_op = io_ops.restore_v2(
ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
| ppwwyyxx/tensorflow | tensorflow/python/training/checkpoint_utils.py | Python | apache-2.0 | 19,434 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
using System.Reflection;
using System.Runtime.InteropServices;
[assembly: AssemblyCompany("The Apache Software Foundation.")]
[assembly: AssemblyTrademark("The Apache Software Foundation")]
[assembly: AssemblyCopyright("Copyright © 2017 The Apache Software Foundation")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyConfiguration("")]
// Setting ComVisible to false makes the types in this assembly not visible
// to COM components. If you need to access a type in this assembly from
// COM, set the ComVisible attribute to true on that type.
[assembly: ComVisible(false)]
[assembly: AssemblyVersion("0.16.0.0")]
[assembly: AssemblyFileVersion("0.16.0.0")]
| tcNickolas/reef | lang/cs/SharedAssemblyInfo.cs | C# | apache-2.0 | 1,475 |
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_DATE_H_
#define V8_DATE_H_
#include "src/base/timezone-cache.h"
#include "src/globals.h"
#include "src/objects/smi.h"
namespace v8 {
namespace internal {
class DateCache {
public:
static const int kMsPerMin = 60 * 1000;
static const int kSecPerDay = 24 * 60 * 60;
static const int64_t kMsPerDay = kSecPerDay * 1000;
static const int64_t kMsPerMonth = kMsPerDay * 30;
// The largest time that can be passed to OS date-time library functions.
static const int kMaxEpochTimeInSec = kMaxInt;
static const int64_t kMaxEpochTimeInMs =
static_cast<int64_t>(kMaxInt) * 1000;
// The largest time that can be stored in JSDate.
static const int64_t kMaxTimeInMs =
static_cast<int64_t>(864000000) * 10000000;
// Conservative upper bound on time that can be stored in JSDate
// before UTC conversion.
static const int64_t kMaxTimeBeforeUTCInMs = kMaxTimeInMs + kMsPerMonth;
// Sentinel that denotes an invalid local offset.
static const int kInvalidLocalOffsetInMs = kMaxInt;
// Sentinel that denotes an invalid cache stamp.
// It is an invariant of DateCache that cache stamp is non-negative.
static const int kInvalidStamp = -1;
DateCache();
virtual ~DateCache() {
delete tz_cache_;
tz_cache_ = nullptr;
}
// Clears cached timezone information and increments the cache stamp.
void ResetDateCache(
base::TimezoneCache::TimeZoneDetection time_zone_detection);
// Computes floor(time_ms / kMsPerDay).
static int DaysFromTime(int64_t time_ms) {
if (time_ms < 0) time_ms -= (kMsPerDay - 1);
return static_cast<int>(time_ms / kMsPerDay);
}
// Computes modulo(time_ms, kMsPerDay) given that
// days = floor(time_ms / kMsPerDay).
static int TimeInDay(int64_t time_ms, int days) {
return static_cast<int>(time_ms - days * kMsPerDay);
}
// ECMA 262 - ES#sec-timeclip TimeClip (time)
static double TimeClip(double time);
// Given the number of days since the epoch, computes the weekday.
// ECMA 262 - 15.9.1.6.
int Weekday(int days) {
int result = (days + 4) % 7;
return result >= 0 ? result : result + 7;
}
bool IsLeap(int year) {
return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
}
// ECMA 262 - ES#sec-local-time-zone-adjustment
int LocalOffsetInMs(int64_t time, bool is_utc) {
return GetLocalOffsetFromOS(time, is_utc);
}
const char* LocalTimezone(int64_t time_ms) {
if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
time_ms = EquivalentTime(time_ms);
}
bool is_dst = DaylightSavingsOffsetInMs(time_ms) != 0;
const char** name = is_dst ? &dst_tz_name_ : &tz_name_;
if (*name == nullptr) {
*name = tz_cache_->LocalTimezone(static_cast<double>(time_ms));
}
return *name;
}
// ECMA 262 - 15.9.5.26
int TimezoneOffset(int64_t time_ms) {
int64_t local_ms = ToLocal(time_ms);
return static_cast<int>((time_ms - local_ms) / kMsPerMin);
}
// ECMA 262 - ES#sec-localtime-t
// LocalTime(t) = t + LocalTZA(t, true)
int64_t ToLocal(int64_t time_ms) {
return time_ms + LocalOffsetInMs(time_ms, true);
}
// ECMA 262 - ES#sec-utc-t
// UTC(t) = t - LocalTZA(t, false)
int64_t ToUTC(int64_t time_ms) {
return time_ms - LocalOffsetInMs(time_ms, false);
}
// Computes a time equivalent to the given time according
// to ECMA 262 - 15.9.1.9.
// The issue here is that some library calls don't work right for dates
// that cannot be represented using a non-negative signed 32 bit integer
// (measured in whole seconds based on the 1970 epoch).
// We solve this by mapping the time to a year with same leap-year-ness
// and same starting day for the year. The ECMAscript specification says
// we must do this, but for compatibility with other browsers, we use
// the actual year if it is in the range 1970..2037
int64_t EquivalentTime(int64_t time_ms) {
int days = DaysFromTime(time_ms);
int time_within_day_ms = static_cast<int>(time_ms - days * kMsPerDay);
int year, month, day;
YearMonthDayFromDays(days, &year, &month, &day);
int new_days = DaysFromYearMonth(EquivalentYear(year), month) + day - 1;
return static_cast<int64_t>(new_days) * kMsPerDay + time_within_day_ms;
}
// Returns an equivalent year in the range [2008-2035] matching
// - leap year,
// - week day of first day.
// ECMA 262 - 15.9.1.9.
int EquivalentYear(int year) {
int week_day = Weekday(DaysFromYearMonth(year, 0));
int recent_year = (IsLeap(year) ? 1956 : 1967) + (week_day * 12) % 28;
// Find the year in the range 2008..2037 that is equivalent mod 28.
// Add 3*28 to give a positive argument to the modulus operator.
return 2008 + (recent_year + 3 * 28 - 2008) % 28;
}
// Given the number of days since the epoch, computes
// the corresponding year, month, and day.
void YearMonthDayFromDays(int days, int* year, int* month, int* day);
// Computes the number of days since the epoch for
// the first day of the given month in the given year.
int DaysFromYearMonth(int year, int month);
// Breaks down the time value.
void BreakDownTime(int64_t time_ms, int* year, int* month, int* day,
int* weekday, int* hour, int* min, int* sec, int* ms);
// Cache stamp is used for invalidating caches in JSDate.
// We increment the stamp each time when the timezone information changes.
// JSDate objects perform stamp check and invalidate their caches if
// their saved stamp is not equal to the current stamp.
Smi stamp() { return stamp_; }
void* stamp_address() { return &stamp_; }
// These functions are virtual so that we can override them when testing.
virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
double time_ms = static_cast<double>(time_sec * 1000);
return static_cast<int>(tz_cache_->DaylightSavingsOffset(time_ms));
}
virtual int GetLocalOffsetFromOS(int64_t time_ms, bool is_utc);
private:
// The implementation relies on the fact that no time zones have
// more than one daylight savings offset change per 19 days.
// In Egypt in 2010 they decided to suspend DST during Ramadan. This
// led to a short interval where DST is in effect from September 10 to
// September 30.
static const int kDefaultDSTDeltaInSec = 19 * kSecPerDay;
// Size of the Daylight Savings Time cache.
static const int kDSTSize = 32;
// Daylight Savings Time segment stores a segment of time where
// daylight savings offset does not change.
struct DST {
int start_sec;
int end_sec;
int offset_ms;
int last_used;
};
// Computes the daylight savings offset for the given time.
// ECMA 262 - 15.9.1.8
int DaylightSavingsOffsetInMs(int64_t time_ms);
// Sets the before_ and the after_ segments from the DST cache such that
// the before_ segment starts earlier than the given time and
// the after_ segment start later than the given time.
// Both segments might be invalid.
// The last_used counters of the before_ and after_ are updated.
void ProbeDST(int time_sec);
// Finds the least recently used segment from the DST cache that is not
// equal to the given 'skip' segment.
DST* LeastRecentlyUsedDST(DST* skip);
// Extends the after_ segment with the given point or resets it
// if it starts later than the given time + kDefaultDSTDeltaInSec.
inline void ExtendTheAfterSegment(int time_sec, int offset_ms);
// Makes the given segment invalid.
inline void ClearSegment(DST* segment);
bool InvalidSegment(DST* segment) {
return segment->start_sec > segment->end_sec;
}
Smi stamp_;
// Daylight Saving Time cache.
DST dst_[kDSTSize];
int dst_usage_counter_;
DST* before_;
DST* after_;
int local_offset_ms_;
// Year/Month/Day cache.
bool ymd_valid_;
int ymd_days_;
int ymd_year_;
int ymd_month_;
int ymd_day_;
// Timezone name cache
const char* tz_name_;
const char* dst_tz_name_;
base::TimezoneCache* tz_cache_;
};
} // namespace internal
} // namespace v8
#endif // V8_DATE_H_
| weolar/miniblink49 | v8_7_5/src/date.h | C | apache-2.0 | 8,226 |
//
// SZUserSettingsViewControllerIOS6.h
// Socialize
//
// Created by David Jedeikin on 1/6/14.
// Copyright (c) 2014 ShareThis. All rights reserved.
//
#import <Socialize/Socialize.h>
@interface SZUserSettingsViewControllerIOS6 : SZUserSettingsViewController
@end
| ngrebenshikov/SisyphusHill | src/pyramid-xcode/Pyramid/Pods/Socialize/Socialize/SZUserSettingsViewControllerIOS6.h | C | apache-2.0 | 273 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This file implements logic for lowering MHLO dialect to Standard dialect.
#include "llvm/ADT/StringSwitch.h"
#include "mlir-hlo/Dialect/mhlo/IR/hlo_ops.h"
#include "mlir-hlo/Dialect/mhlo/transforms/passes.h"
#include "mlir-hlo/Dialect/mhlo/transforms/rewriters.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
namespace {
#include "generated_legalize_to_standard.inc"
} // end anonymous namespace
namespace mhlo {
namespace {
class CompareIConvert : public OpRewritePattern<mhlo::CompareOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::CompareOp op,
PatternRewriter &rewriter) const override {
auto lhs = op.lhs();
auto rhs = op.rhs();
auto lhs_type = lhs.getType().cast<TensorType>();
auto rhs_type = rhs.getType().cast<TensorType>();
// Broadcasting not supported by this rewrite.
if (lhs_type.getShape() != rhs_type.getShape()) return failure();
if (!lhs_type.getElementType().isSignlessInteger() ||
!rhs_type.getElementType().isSignlessInteger())
return failure();
auto comparison_direction = op.comparison_direction();
auto compare_predicate =
llvm::StringSwitch<Optional<CmpIPredicate>>(comparison_direction)
.Case("EQ", CmpIPredicate::eq)
.Case("NE", CmpIPredicate::ne)
.Case("LT", CmpIPredicate::slt)
.Case("LE", CmpIPredicate::sle)
.Case("GT", CmpIPredicate::sgt)
.Case("GE", CmpIPredicate::sge)
.Default(llvm::None);
if (!compare_predicate.hasValue()) return failure();
rewriter.replaceOpWithNewOp<CmpIOp>(op, compare_predicate.getValue(), lhs,
rhs);
return success();
}
};
class CompareFConvert : public OpRewritePattern<mhlo::CompareOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::CompareOp op,
PatternRewriter &rewriter) const override {
auto lhs = op.lhs();
auto rhs = op.rhs();
auto lhs_type = lhs.getType().cast<TensorType>();
auto rhs_type = rhs.getType().cast<TensorType>();
// Broadcasting not supported by this rewrite.
if (lhs_type.getShape() != rhs_type.getShape()) return failure();
if (!lhs_type.getElementType().isa<FloatType>() ||
!rhs_type.getElementType().isa<FloatType>())
return failure();
auto comparison_direction = op.comparison_direction();
auto compare_predicate =
llvm::StringSwitch<Optional<CmpFPredicate>>(comparison_direction)
.Case("EQ", CmpFPredicate::OEQ)
.Case("NE", CmpFPredicate::UNE)
.Case("LT", CmpFPredicate::OLT)
.Case("LE", CmpFPredicate::OLE)
.Case("GT", CmpFPredicate::OGT)
.Case("GE", CmpFPredicate::OGE)
.Default(llvm::None);
if (!compare_predicate.hasValue()) return failure();
rewriter.replaceOpWithNewOp<CmpFOp>(op, compare_predicate.getValue(), lhs,
rhs);
return success();
}
};
// Replace IotaOp with an integer constant. A ConvertOp is added to
// convert the integer constant to iota result type. For complex types, the real
// part is replaced with the generated constant and the imaginary part is
// replaced with zero tensor.
class ConvertIotaOp : public OpRewritePattern<mhlo::IotaOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::IotaOp op,
PatternRewriter &rewriter) const override {
auto output_type = op.getType().cast<ShapedType>();
auto output_size = output_type.getNumElements();
auto dimension = op.iota_dimension();
auto max_dim_size = output_type.getDimSize(dimension);
auto element_type = output_type.getElementType();
int bitwidth;
auto complex_ty = element_type.dyn_cast<ComplexType>();
Type int_or_float_ty = element_type;
if (complex_ty) int_or_float_ty = complex_ty.getElementType();
bitwidth = int_or_float_ty.getIntOrFloatBitWidth();
llvm::SmallVector<APInt, 10> values;
values.reserve(output_size);
int64_t increase_stride = output_size;
for (int i = 0; i <= dimension; i++) {
increase_stride /= output_type.getDimSize(i);
}
int64_t current_value = 0;
for (int i = 0; i < output_size; i++) {
int64_t value = (current_value / increase_stride) % max_dim_size;
values.push_back(APInt(bitwidth, value));
++current_value;
}
auto int_shape_type = RankedTensorType::get(
output_type.getShape(),
IntegerType::get(bitwidth, rewriter.getContext()));
auto loc = op.getLoc();
auto integer_const = rewriter.create<mlir::ConstantOp>(
loc, DenseIntElementsAttr::get(int_shape_type, values));
auto int_or_float_shape_ty =
RankedTensorType::get(output_type.getShape(), int_or_float_ty);
auto iota_const =
rewriter.create<ConvertOp>(loc, int_or_float_shape_ty, integer_const);
// For int/float types we are done, replace op and return.
if (!complex_ty) {
rewriter.replaceOp(op, iota_const.getResult());
return success();
}
// For complex types, generate a constant tensor of zeroes for the imaginary
// part and use iota_const for real part.
auto zeroes = rewriter.create<mlir::ConstantOp>(
loc, DenseIntElementsAttr::get(int_shape_type, APInt(bitwidth, 0)));
auto imag_zeroes =
rewriter.create<ConvertOp>(loc, int_or_float_shape_ty, zeroes);
rewriter.replaceOpWithNewOp<mhlo::ComplexOp>(op, iota_const, imag_zeroes);
return success();
}
};
} // end anonymous namespace
namespace {
struct LegalizeToStandardPass
: public PassWrapper<LegalizeToStandardPass, FunctionPass> {
void getDependentDialects(DialectRegistry ®istry) const override {
registry.insert<StandardOpsDialect>();
}
/// Perform the lowering to Standard dialect.
void runOnFunction() override;
};
} // end anonymous namespace
std::unique_ptr<mlir::OperationPass<mlir::FuncOp>> createLegalizeToStdPass() {
return std::make_unique<LegalizeToStandardPass>();
}
void PopulateMhloToStdPatterns(OwningRewritePatternList *patterns,
mlir::MLIRContext *ctx) {
mlir::populateWithGenerated(ctx, patterns);
patterns->insert<CompareFConvert, CompareIConvert, ConvertIotaOp>(ctx);
}
/// Perform the lowering to standard dialect.
void LegalizeToStandardPass::runOnFunction() {
OwningRewritePatternList patterns;
mlir::mhlo::PopulateMhloToStdPatterns(&patterns, &getContext());
applyPatternsAndFoldGreedily(getFunction(), patterns);
}
} // end namespace mhlo
} // end namespace mlir
| karllessard/tensorflow | tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_to_standard.cc | C++ | apache-2.0 | 7,534 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.uima.annotator.regex;
import java.util.regex.Pattern;
/**
* RegexVariables interface.
*/
public interface RegexVariables {
public static final String VARIABLE_START = "\\v";
public static final String VARIABLE_REGEX_BEGIN = "\\\\v\\{";
public static final String VARIABLE_REGEX_END = "\\}";
public static final Pattern VARIABLE_REGEX_PATTERN = Pattern
.compile(VARIABLE_REGEX_BEGIN + "(\\w+)" + VARIABLE_REGEX_END);
/**
* Adds a variable to the Variables object.
*
* @param varName
* variable name
*
* @param varValue
* variable value
*/
public void addVariable(String varName, String varValue);
/**
* returns the value of the specified variable or <code>null</code> if the
* variable does not exist
*
* @param varName
* variable name
*
* @return returns the variable value of <code>null</code> if the variable
* does not exist
*
*/
public String getVariableValue(String varName);
} | jgrivolla/uima-addons | RegularExpressionAnnotator/src/main/java/org/apache/uima/annotator/regex/RegexVariables.java | Java | apache-2.0 | 1,870 |
/*
* The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
* (the "License"). You may not use this work except in compliance with the License, which is
* available at www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied, as more fully set forth in the License.
*
* See the NOTICE file distributed with this work for information regarding copyright ownership.
*/
package alluxio.client.block.stream;
import alluxio.conf.AlluxioConfiguration;
import alluxio.conf.PropertyKey;
import alluxio.exception.status.AlluxioStatusException;
import alluxio.exception.status.UnauthenticatedException;
import alluxio.grpc.BlockWorkerGrpc;
import alluxio.grpc.CacheRequest;
import alluxio.grpc.ClearMetricsRequest;
import alluxio.grpc.ClearMetricsResponse;
import alluxio.grpc.CreateLocalBlockRequest;
import alluxio.grpc.CreateLocalBlockResponse;
import alluxio.grpc.DataMessageMarshaller;
import alluxio.grpc.DataMessageMarshallerProvider;
import alluxio.grpc.GrpcChannel;
import alluxio.grpc.GrpcChannelBuilder;
import alluxio.grpc.GrpcNetworkGroup;
import alluxio.grpc.GrpcSerializationUtils;
import alluxio.grpc.GrpcServerAddress;
import alluxio.grpc.MoveBlockRequest;
import alluxio.grpc.MoveBlockResponse;
import alluxio.grpc.OpenLocalBlockRequest;
import alluxio.grpc.OpenLocalBlockResponse;
import alluxio.grpc.ReadRequest;
import alluxio.grpc.ReadResponse;
import alluxio.grpc.RemoveBlockRequest;
import alluxio.grpc.RemoveBlockResponse;
import alluxio.grpc.WriteRequest;
import alluxio.grpc.WriteResponse;
import alluxio.resource.AlluxioResourceLeakDetectorFactory;
import alluxio.retry.RetryPolicy;
import alluxio.retry.RetryUtils;
import alluxio.security.user.UserState;
import com.google.common.base.Preconditions;
import com.google.common.io.Closer;
import io.grpc.StatusRuntimeException;
import io.grpc.stub.StreamObserver;
import io.netty.util.ResourceLeakDetector;
import io.netty.util.ResourceLeakTracker;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
/**
* Default implementation of {@link BlockWorkerClient}.
*/
public class DefaultBlockWorkerClient implements BlockWorkerClient {
private static final Logger LOG =
LoggerFactory.getLogger(DefaultBlockWorkerClient.class.getName());
private static final ResourceLeakDetector<DefaultBlockWorkerClient> DETECTOR =
AlluxioResourceLeakDetectorFactory.instance()
.newResourceLeakDetector(DefaultBlockWorkerClient.class);
private GrpcChannel mStreamingChannel;
private GrpcChannel mRpcChannel;
private GrpcServerAddress mAddress;
private final long mRpcTimeoutMs;
private BlockWorkerGrpc.BlockWorkerStub mStreamingAsyncStub;
private BlockWorkerGrpc.BlockWorkerBlockingStub mRpcBlockingStub;
private BlockWorkerGrpc.BlockWorkerStub mRpcAsyncStub;
@Nullable
private final ResourceLeakTracker<DefaultBlockWorkerClient> mTracker;
/**
* Creates a client instance for communicating with block worker.
*
* @param userState the user state
* @param address the address of the worker
* @param alluxioConf Alluxio configuration
*/
public DefaultBlockWorkerClient(UserState userState, GrpcServerAddress address,
AlluxioConfiguration alluxioConf) throws IOException {
RetryPolicy retryPolicy = RetryUtils.defaultClientRetry(
alluxioConf.getDuration(PropertyKey.USER_RPC_RETRY_MAX_DURATION),
alluxioConf.getDuration(PropertyKey.USER_RPC_RETRY_BASE_SLEEP_MS),
alluxioConf.getDuration(PropertyKey.USER_RPC_RETRY_MAX_SLEEP_MS));
UnauthenticatedException lastException = null;
// TODO(feng): unify worker client with AbstractClient
while (retryPolicy.attempt()) {
try {
// Disables channel pooling for data streaming to achieve better throughput.
// Channel is still reused due to client pooling.
mStreamingChannel = GrpcChannelBuilder.newBuilder(address, alluxioConf)
.setSubject(userState.getSubject())
.setNetworkGroup(GrpcNetworkGroup.STREAMING)
.setClientType("DefaultBlockWorkerClient-Stream")
.build();
mStreamingChannel.intercept(new StreamSerializationClientInterceptor());
// Uses default pooling strategy for RPC calls for better scalability.
mRpcChannel = GrpcChannelBuilder.newBuilder(address, alluxioConf)
.setSubject(userState.getSubject())
.setNetworkGroup(GrpcNetworkGroup.RPC)
.setClientType("DefaultBlockWorkerClient-Rpc")
.build();
lastException = null;
break;
} catch (StatusRuntimeException e) {
close();
throw AlluxioStatusException.fromStatusRuntimeException(e);
} catch (UnauthenticatedException e) {
close();
userState.relogin();
lastException = e;
}
}
if (lastException != null) {
throw lastException;
}
mStreamingAsyncStub = BlockWorkerGrpc.newStub(mStreamingChannel);
mRpcBlockingStub = BlockWorkerGrpc.newBlockingStub(mRpcChannel);
mRpcAsyncStub = BlockWorkerGrpc.newStub(mRpcChannel);
mAddress = address;
mRpcTimeoutMs = alluxioConf.getMs(PropertyKey.USER_RPC_RETRY_MAX_DURATION);
mTracker = DETECTOR.track(this);
}
@Override
public boolean isShutdown() {
return mStreamingChannel.isShutdown() || mRpcChannel.isShutdown();
}
@Override
public boolean isHealthy() {
return !isShutdown() && mStreamingChannel.isHealthy() && mRpcChannel.isHealthy();
}
@Override
public void close() throws IOException {
try (Closer closer = Closer.create()) {
closer.register(() -> {
if (mStreamingChannel != null) {
mStreamingChannel.shutdown();
}
});
closer.register(() -> {
if (mRpcChannel != null) {
mRpcChannel.shutdown();
}
});
closer.register(() -> {
if (mTracker != null) {
mTracker.close(this);
}
});
}
}
@Override
public StreamObserver<WriteRequest> writeBlock(StreamObserver<WriteResponse> responseObserver) {
if (responseObserver instanceof DataMessageMarshallerProvider) {
DataMessageMarshaller<WriteRequest> marshaller =
((DataMessageMarshallerProvider<WriteRequest, WriteResponse>) responseObserver)
.getRequestMarshaller();
Preconditions.checkNotNull(marshaller, "marshaller");
return mStreamingAsyncStub
.withOption(GrpcSerializationUtils.OVERRIDDEN_METHOD_DESCRIPTOR,
BlockWorkerGrpc.getWriteBlockMethod().toBuilder()
.setRequestMarshaller(marshaller)
.build())
.writeBlock(responseObserver);
} else {
return mStreamingAsyncStub.writeBlock(responseObserver);
}
}
@Override
public StreamObserver<ReadRequest> readBlock(StreamObserver<ReadResponse> responseObserver) {
if (responseObserver instanceof DataMessageMarshallerProvider) {
DataMessageMarshaller<ReadResponse> marshaller =
((DataMessageMarshallerProvider<ReadRequest, ReadResponse>) responseObserver)
.getResponseMarshaller();
Preconditions.checkNotNull(marshaller);
return mStreamingAsyncStub
.withOption(GrpcSerializationUtils.OVERRIDDEN_METHOD_DESCRIPTOR,
BlockWorkerGrpc.getReadBlockMethod().toBuilder()
.setResponseMarshaller(marshaller)
.build())
.readBlock(responseObserver);
} else {
return mStreamingAsyncStub.readBlock(responseObserver);
}
}
@Override
public StreamObserver<CreateLocalBlockRequest> createLocalBlock(
StreamObserver<CreateLocalBlockResponse> responseObserver) {
return mStreamingAsyncStub.createLocalBlock(responseObserver);
}
@Override
public StreamObserver<OpenLocalBlockRequest> openLocalBlock(
StreamObserver<OpenLocalBlockResponse> responseObserver) {
return mStreamingAsyncStub.openLocalBlock(responseObserver);
}
@Override
public RemoveBlockResponse removeBlock(final RemoveBlockRequest request) {
return mRpcBlockingStub.withDeadlineAfter(mRpcTimeoutMs, TimeUnit.MILLISECONDS)
.removeBlock(request);
}
@Override
public MoveBlockResponse moveBlock(MoveBlockRequest request) {
return mRpcBlockingStub.withDeadlineAfter(mRpcTimeoutMs, TimeUnit.MILLISECONDS)
.moveBlock(request);
}
@Override
public ClearMetricsResponse clearMetrics(ClearMetricsRequest request) {
return mRpcBlockingStub.withDeadlineAfter(mRpcTimeoutMs, TimeUnit.MILLISECONDS)
.clearMetrics(request);
}
@Override
public void cache(CacheRequest request) {
boolean async = request.getAsync();
try {
mRpcBlockingStub.withDeadlineAfter(mRpcTimeoutMs, TimeUnit.MILLISECONDS).cache(request);
} catch (Exception e) {
if (!async) {
throw e;
}
LOG.warn("Error sending async cache request {} to worker {}.", request, mAddress, e);
}
}
}
| wwjiang007/alluxio | core/client/fs/src/main/java/alluxio/client/block/stream/DefaultBlockWorkerClient.java | Java | apache-2.0 | 9,174 |
/*
* Copyright (c) 2005-2010 Grameen Foundation USA
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* See also http://www.apache.org/licenses/LICENSE-2.0.html for an
* explanation of the license and how it is applied.
*/
package org.mifos.test.acceptance.framework.loan;
public class CreateLoanAccountSearchParameters {
private String searchString;
private String loanProduct;
public String getSearchString() {
return this.searchString;
}
public void setSearchString(String searchString) {
this.searchString = searchString;
}
public String getLoanProduct() {
return this.loanProduct;
}
public void setLoanProduct(String loanProduct) {
this.loanProduct = loanProduct;
}
}
| vorburger/mifos-head | acceptanceTests/src/test/java/org/mifos/test/acceptance/framework/loan/CreateLoanAccountSearchParameters.java | Java | apache-2.0 | 1,290 |
/*
* Copyright 2014 Realm Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.realm.internal.test;
import io.realm.internal.DefineTable;
/**
* A helper class containing model(s) for simple code generation tests.
*/
class CodeGenTest {
@DefineTable // this is enabled only for occasional local tests
class someModel {
String name;
int age;
}
}
| ShikaSD/realm-java | realm/realm-library/src/androidTest/java/io/realm/internal/test/CodeGenTest.java | Java | apache-2.0 | 901 |
/*
mustache.js — Logic-less templates in JavaScript
See http://mustache.github.com/ for more info.
*/
var Mustache = function() {
var Renderer = function() {};
Renderer.prototype = {
otag: "{{",
ctag: "}}",
pragmas: {},
buffer: [],
pragmas_implemented: {
"IMPLICIT-ITERATOR": true,
"TRANSLATION-HINT": true
},
context: {},
render: function(template, context, partials, in_recursion) {
// reset buffer & set context
if(!in_recursion) {
this.context = context;
this.buffer = []; // TODO: make this non-lazy
}
// fail fast
if(!this.includes("", template)) {
if(in_recursion) {
return template;
} else {
this.send(template);
return;
}
}
// Branching or moving down the partial stack, save any translation mode info.
if (this.pragmas['TRANSLATION-HINT']) {
context['_mode'] = this.pragmas['TRANSLATION-HINT']['mode'];
}
template = this.render_pragmas(template);
template = this.render_i18n(template, context, partials);
var html = this.render_section(template, context, partials);
if (html === template) {
if (in_recursion) {
return this.render_tags(html, context, partials, true);
}
this.render_tags(html, context, partials, false);
} else {
if(in_recursion) {
return html;
} else {
var lines = html.split("\n");
for (var i = 0; i < lines.length; i++) {
this.send(lines[i]);
}
return;
}
}
},
/*
Sends parsed lines
*/
send: function(line) {
if(line != "") {
this.buffer.push(line);
}
},
/*
Looks for %PRAGMAS
*/
render_pragmas: function(template) {
// no pragmas
if(!this.includes("%", template)) {
return template;
}
var that = this;
var regex = new RegExp(this.otag + "%([\\w-]+) ?([\\w]+=[\\w]+)?" +
this.ctag);
return template.replace(regex, function(match, pragma, options) {
if(!that.pragmas_implemented[pragma]) {
throw({message:
"This implementation of mustache doesn't understand the '" +
pragma + "' pragma"});
}
that.pragmas[pragma] = {};
if(options) {
var opts = options.split("=");
that.pragmas[pragma][opts[0]] = opts[1];
}
return "";
// ignore unknown pragmas silently
});
},
/*
Tries to find a partial in the curent scope and render it
*/
render_partial: function(name, context, partials) {
name = this.trim(name);
if(!partials || partials[name] === undefined) {
throw({message: "unknown_partial '" + name + "'"});
}
if(typeof(context[name]) != "object") {
return this.render(partials[name], context, partials, true);
}
return this.render(partials[name], context[name], partials, true);
},
render_i18n: function(html, context, partials) {
if (html.indexOf(this.otag + "_i") == -1) {
return html;
}
var that = this;
var regex = new RegExp(this.otag + "\\_i" + this.ctag +
"\\s*([\\s\\S]+?)" + this.otag + "\\/i" + this.ctag, "mg");
// for each {{_i}}{{/i}} section do...
return html.replace(regex, function(match, content) {
var translation_mode = undefined;
if (that.pragmas && that.pragmas["TRANSLATION-HINT"] && that.pragmas["TRANSLATION-HINT"]['mode']) {
translation_mode = { _mode: that.pragmas["TRANSLATION-HINT"]['mode'] };
} else if (context['_mode']) {
translation_mode = { _mode: context['_mode'] };
}
return that.render(_(content, translation_mode), context, partials, true);
});
},
/*
Renders inverted (^) and normal (#) sections
*/
render_section: function(template, context, partials) {
if(!this.includes("#", template) && !this.includes("^", template)) {
return template;
}
var that = this;
// This regex matches _the first_ section ({{#foo}}{{/foo}}), and captures the remainder
var regex = new RegExp(
"^([\\s\\S]*?)" + // all the crap at the beginning that is not {{*}} ($1)
this.otag + // {{
"(\\^|\\#)\\s*(.+)\\s*" + // #foo (# == $2, foo == $3)
this.ctag + // }}
"\n*([\\s\\S]*?)" + // between the tag ($2). leading newlines are dropped
this.otag + // {{
"\\/\\s*\\3\\s*" + // /foo (backreference to the opening tag).
this.ctag + // }}
"\\s*([\\s\\S]*)$", // everything else in the string ($4). leading whitespace is dropped.
"g");
// for each {{#foo}}{{/foo}} section do...
return template.replace(regex, function(match, before, type, name, content, after) {
// before contains only tags, no sections
var renderedBefore = before ? that.render_tags(before, context, partials, true) : "",
// after may contain both sections and tags, so use full rendering function
renderedAfter = after ? that.render(after, context, partials, true) : "";
var value = that.find(name, context);
if(type == "^") { // inverted section
if(!value || that.is_array(value) && value.length === 0) {
// false or empty list, render it
return renderedBefore + that.render(content, context, partials, true) + renderedAfter;
} else {
return renderedBefore + "" + renderedAfter;
}
} else if(type == "#") { // normal section
if(that.is_array(value)) { // Enumerable, Let's loop!
return renderedBefore + that.map(value, function(row) {
return that.render(content, that.create_context(row), partials, true);
}).join("") + renderedAfter;
} else if(that.is_object(value)) { // Object, Use it as subcontext!
return renderedBefore + that.render(content, that.create_context(value),
partials, true) + renderedAfter;
} else if(typeof value === "function") {
// higher order section
return renderedBefore + value.call(context, content, function(text) {
return that.render(text, context, partials, true);
}) + renderedAfter;
} else if(value) { // boolean section
return renderedBefore + that.render(content, context, partials, true) + renderedAfter;
} else {
return renderedBefore + "" + renderedAfter;
}
}
});
},
/*
Replace {{foo}} and friends with values from our view
*/
render_tags: function(template, context, partials, in_recursion) {
// tit for tat
var that = this;
var new_regex = function() {
return new RegExp(that.otag + "(=|!|>|\\{|%)?([^\\/#\\^]+?)\\1?" +
that.ctag + "+", "g");
};
var regex = new_regex();
var tag_replace_callback = function(match, operator, name) {
switch(operator) {
case "!": // ignore comments
return "";
case "=": // set new delimiters, rebuild the replace regexp
that.set_delimiters(name);
regex = new_regex();
return "";
case ">": // render partial
return that.render_partial(name, context, partials);
case "{": // the triple mustache is unescaped
return that.find(name, context);
default: // escape the value
return that.escape(that.find(name, context));
}
};
var lines = template.split("\n");
for(var i = 0; i < lines.length; i++) {
lines[i] = lines[i].replace(regex, tag_replace_callback, this);
if(!in_recursion) {
this.send(lines[i]);
}
}
if(in_recursion) {
return lines.join("\n");
}
},
set_delimiters: function(delimiters) {
var dels = delimiters.split(" ");
this.otag = this.escape_regex(dels[0]);
this.ctag = this.escape_regex(dels[1]);
},
escape_regex: function(text) {
// thank you Simon Willison
if(!arguments.callee.sRE) {
var specials = [
'/', '.', '*', '+', '?', '|',
'(', ')', '[', ']', '{', '}', '\\'
];
arguments.callee.sRE = new RegExp(
'(\\' + specials.join('|\\') + ')', 'g'
);
}
return text.replace(arguments.callee.sRE, '\\$1');
},
/*
find `name` in current `context`. That is find me a value
from the view object
*/
find: function(name, context) {
name = this.trim(name);
// Checks whether a value is thruthy or false or 0
function is_kinda_truthy(bool) {
return bool === false || bool === 0 || bool;
}
var value;
if(is_kinda_truthy(context[name])) {
value = context[name];
} else if(is_kinda_truthy(this.context[name])) {
value = this.context[name];
}
if(typeof value === "function") {
return value.apply(context);
}
if(value !== undefined) {
return value;
}
// silently ignore unkown variables
return "";
},
// Utility methods
/* includes tag */
includes: function(needle, haystack) {
return haystack.indexOf(this.otag + needle) != -1;
},
/*
Does away with nasty characters
*/
escape: function(s) {
s = String(s === null ? "" : s);
return s.replace(/&(?!\w+;)|["'<>\\]/g, function(s) {
switch(s) {
case "&": return "&";
case "\\": return "\\\\";
case '"': return '"';
case "'": return ''';
case "<": return "<";
case ">": return ">";
default: return s;
}
});
},
// by @langalex, support for arrays of strings
create_context: function(_context) {
if(this.is_object(_context)) {
return _context;
} else {
var iterator = ".";
if(this.pragmas["IMPLICIT-ITERATOR"]) {
iterator = this.pragmas["IMPLICIT-ITERATOR"].iterator;
}
var ctx = {};
ctx[iterator] = _context;
return ctx;
}
},
is_object: function(a) {
return a && typeof a == "object";
},
is_array: function(a) {
return Object.prototype.toString.call(a) === '[object Array]';
},
/*
Gets rid of leading and trailing whitespace
*/
trim: function(s) {
return s.replace(/^\s*|\s*$/g, "");
},
/*
Why, why, why? Because IE. Cry, cry cry.
*/
map: function(array, fn) {
if (typeof array.map == "function") {
return array.map(fn);
} else {
var r = [];
var l = array.length;
for(var i = 0; i < l; i++) {
r.push(fn(array[i]));
}
return r;
}
}
};
return({
name: "mustache.js",
version: "0.3.1-dev-twitter",
/*
Turns a template and view into HTML
*/
to_html: function(template, view, partials, send_fun) {
var renderer = new Renderer();
if(send_fun) {
renderer.send = send_fun;
}
renderer.render(template, view || {}, partials);
if(!send_fun) {
return renderer.buffer.join("\n");
}
}
});
}();
| EHJ-52n/js-sensorweb-client | src/main/js/libs/mustache.js | JavaScript | apache-2.0 | 11,529 |
/*
* $Id: WrapperClassBean.java 799110 2009-07-29 22:44:26Z musachy $
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.struts2.json;
import java.util.List;
import java.util.Map;
public class WrapperClassBean {
private String stringField;
private Integer intField;
private int nullIntField;
private Boolean booleanField;
private boolean primitiveBooleanField1;
private boolean primitiveBooleanField2;
private boolean primitiveBooleanField3;
private Character charField;
private Long longField;
private Float floatField;
private Double doubleField;
private Object objectField;
private Byte byteField;
private List<SimpleValue> listField;
private List<Map<String, Long>> listMapField;
private Map<String, List<Long>> mapListField;
private Map<String, Long>[] arrayMapField;
public List<SimpleValue> getListField() {
return listField;
}
public void setListField(List<SimpleValue> listField) {
this.listField = listField;
}
public List<Map<String, Long>> getListMapField() {
return listMapField;
}
public void setListMapField(List<Map<String, Long>> listMapField) {
this.listMapField = listMapField;
}
public Map<String, List<Long>> getMapListField() {
return mapListField;
}
public void setMapListField(Map<String, List<Long>> mapListField) {
this.mapListField = mapListField;
}
public Map<String, Long>[] getArrayMapField() {
return arrayMapField;
}
public void setArrayMapField(Map<String, Long>[] arrayMapField) {
this.arrayMapField = arrayMapField;
}
public Boolean getBooleanField() {
return booleanField;
}
public void setBooleanField(Boolean booleanField) {
this.booleanField = booleanField;
}
public boolean isPrimitiveBooleanField1() {
return primitiveBooleanField1;
}
public void setPrimitiveBooleanField1(boolean primitiveBooleanField1) {
this.primitiveBooleanField1 = primitiveBooleanField1;
}
public boolean isPrimitiveBooleanField2() {
return primitiveBooleanField2;
}
public void setPrimitiveBooleanField2(boolean primitiveBooleanField2) {
this.primitiveBooleanField2 = primitiveBooleanField2;
}
public boolean isPrimitiveBooleanField3() {
return primitiveBooleanField3;
}
public void setPrimitiveBooleanField3(boolean primitiveBooleanField3) {
this.primitiveBooleanField3 = primitiveBooleanField3;
}
public Byte getByteField() {
return byteField;
}
public void setByteField(Byte byteField) {
this.byteField = byteField;
}
public Character getCharField() {
return charField;
}
public void setCharField(Character charField) {
this.charField = charField;
}
public Double getDoubleField() {
return doubleField;
}
public void setDoubleField(Double doubleField) {
this.doubleField = doubleField;
}
public Float getFloatField() {
return floatField;
}
public void setFloatField(Float floatField) {
this.floatField = floatField;
}
public Integer getIntField() {
return intField;
}
public void setIntField(Integer intField) {
this.intField = intField;
}
public int getNullIntField() {
return nullIntField;
}
public void setNullIntField(int nullIntField) {
this.nullIntField = nullIntField;
}
public Long getLongField() {
return longField;
}
public void setLongField(Long longField) {
this.longField = longField;
}
public Object getObjectField() {
return objectField;
}
public void setObjectField(Object objectField) {
this.objectField = objectField;
}
public String getStringField() {
return stringField;
}
public void setStringField(String stringField) {
this.stringField = stringField;
}
}
| WillJiang/WillJiang | src/plugins/json/src/test/java/org/apache/struts2/json/WrapperClassBean.java | Java | apache-2.0 | 4,821 |
package alien4cloud.tosca.parser.mapping.generator;
import java.io.IOException;
import java.util.AbstractMap;
import java.util.Map;
import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.ApplicationContext;
import org.springframework.stereotype.Component;
import org.yaml.snakeyaml.nodes.MappingNode;
import org.yaml.snakeyaml.nodes.Node;
import org.yaml.snakeyaml.nodes.NodeTuple;
import org.yaml.snakeyaml.nodes.ScalarNode;
import org.yaml.snakeyaml.nodes.SequenceNode;
import alien4cloud.tosca.parser.IChecker;
import alien4cloud.tosca.parser.INodeParser;
import alien4cloud.tosca.parser.KeyValueMappingTarget;
import alien4cloud.tosca.parser.MappingTarget;
import alien4cloud.tosca.parser.ParserUtils;
import alien4cloud.tosca.parser.ParsingContextExecution;
import alien4cloud.tosca.parser.ParsingError;
import alien4cloud.tosca.parser.ParsingException;
import alien4cloud.tosca.parser.ParsingResult;
import alien4cloud.tosca.parser.YamlSimpleParser;
import alien4cloud.tosca.parser.impl.ErrorCode;
import alien4cloud.tosca.parser.impl.base.CheckedTypeNodeParser;
import alien4cloud.tosca.parser.impl.base.ScalarParser;
import alien4cloud.tosca.parser.impl.base.TypeNodeParser;
import alien4cloud.tosca.parser.mapping.DefaultParser;
import com.google.common.collect.Maps;
/**
* Load type mapping definition from yaml and add it to the type mapping registry.
*/
@Slf4j
@Component
public class MappingGenerator extends DefaultParser<Map<String, INodeParser>> {
@Resource
private ApplicationContext applicationContext;
private Map<String, INodeParser> parsers = Maps.newHashMap();
private Map<String, IMappingBuilder> mappingBuilders = Maps.newHashMap();
private Map<String, IChecker> checkers = Maps.newHashMap();
@PostConstruct
public void initialize() {
Map<String, INodeParser> contextParsers = applicationContext.getBeansOfType(INodeParser.class);
// register parsers based on their class name.
for (INodeParser parser : contextParsers.values()) {
parsers.put(parser.getClass().getName(), parser);
}
Map<String, IMappingBuilder> contextMappingBuilders = applicationContext.getBeansOfType(IMappingBuilder.class);
for (IMappingBuilder mappingBuilder : contextMappingBuilders.values()) {
mappingBuilders.put(mappingBuilder.getKey(), mappingBuilder);
}
Map<String, IChecker> contextCheckers = applicationContext.getBeansOfType(IChecker.class);
for (IChecker checker : contextCheckers.values()) {
checkers.put(checker.getName(), checker);
}
}
public Map<String, INodeParser> process(String resourceLocation) throws ParsingException {
org.springframework.core.io.Resource resource = applicationContext.getResource(resourceLocation);
YamlSimpleParser<Map<String, INodeParser>> nodeParser = new YamlSimpleParser<>(this);
try {
ParsingResult<Map<String, INodeParser>> result = nodeParser.parseFile(resource.getURI().toString(), resource.getFilename(),
resource.getInputStream(), null);
if (result.getContext().getParsingErrors().isEmpty()) {
return result.getResult();
}
throw new ParsingException(resource.getFilename(), result.getContext().getParsingErrors());
} catch (IOException e) {
log.error("Failed to open stream", e);
throw new ParsingException(resource.getFilename(), new ParsingError(ErrorCode.MISSING_FILE, "Unable to load file.", null, e.getMessage(), null,
resourceLocation));
}
}
public Map<String, INodeParser> parse(Node node, ParsingContextExecution context) {
Map<String, INodeParser> parsers = Maps.newHashMap();
if (node instanceof SequenceNode) {
SequenceNode types = (SequenceNode) node;
for (Node mapping : types.getValue()) {
Map.Entry<String, INodeParser<?>> entry = processTypeMapping(mapping, context);
if (entry != null) {
parsers.put(entry.getKey(), entry.getValue());
}
}
} else {
context.getParsingErrors().add(
new ParsingError(ErrorCode.SYNTAX_ERROR, "Mapping should be a sequence of type mappings", node.getStartMark(), "Actually was "
+ node.getClass().getSimpleName(), node.getEndMark(), ""));
}
return parsers;
}
private Map.Entry<String, INodeParser<?>> processTypeMapping(Node node, ParsingContextExecution context) {
try {
return doProcessTypeMapping(node, context);
} catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
log.error("Failed to load class while parsing mapping", e);
context.getParsingErrors().add(
new ParsingError(ErrorCode.SYNTAX_ERROR, "Unable to load class", node.getStartMark(), e.getMessage(), node.getEndMark(), ""));
return null;
}
}
private Map.Entry<String, INodeParser<?>> doProcessTypeMapping(Node node, ParsingContextExecution context) throws ClassNotFoundException,
IllegalAccessException, InstantiationException {
if (node instanceof MappingNode) {
MappingNode mapping = (MappingNode) node;
String yamlType = null;
INodeParser<?> parser = null;
for (NodeTuple tuple : mapping.getValue()) {
if (yamlType == null) {
yamlType = ParserUtils.getScalar(tuple.getKeyNode(), context);
String type = ParserUtils.getScalar(tuple.getValueNode(), context);
if (type.startsWith("__")) {
parser = getWrapperParser(type, mapping, context);
return new AbstractMap.SimpleEntry<String, INodeParser<?>>(yamlType, parser);
}
parser = this.parsers.get(type);
if (parser != null) {
log.debug("Mapping yaml type <" + yamlType + "> using parser <" + type + ">");
return new AbstractMap.SimpleEntry<String, INodeParser<?>>(yamlType, parser);
}
parser = buildTypeNodeParser(yamlType, type);
// log.debug("Mapping yaml type <" + yamlType + "> to class <" + type + ">");
// Class<?> javaClass = Class.forName(type);
// parser = new TypeNodeParser<>(javaClass, yamlType);
} else {
// process a mapping
map(tuple, (TypeNodeParser) parser, context);
}
}
return new AbstractMap.SimpleEntry<String, INodeParser<?>>(yamlType, parser);
} else {
context.getParsingErrors().add(
new ParsingError(ErrorCode.SYNTAX_ERROR, "Unable to process type mapping.", node.getStartMark(),
"Mapping must be defined using a mapping node.", node.getEndMark(), ""));
}
return null;
}
private TypeNodeParser<?> buildTypeNodeParser(String yamlType, String javaType) throws ClassNotFoundException {
String realJavaType = javaType;
IChecker checker = null;
if (javaType.contains("|")) {
realJavaType = javaType.substring(0, javaType.indexOf("|"));
String checkerName = javaType.substring(javaType.indexOf("|") + 1);
log.debug(String.format("After parsing <%s>, realJavaType is <%s>, checkerName is <%s>", javaType, realJavaType, checkerName));
checker = checkers.get(checkerName);
if (checker == null) {
log.warn(String.format("Can not find checker <%s>, using a standard TypeNodeParser", checkerName));
}
}
Class<?> javaClass = Class.forName(realJavaType);
if (checker == null) {
log.debug("Mapping yaml type <" + yamlType + "> to class <" + realJavaType + ">");
return new TypeNodeParser<>(javaClass, yamlType);
} else {
// TODO check that the type are compatible
log.debug("Mapping yaml type <" + yamlType + "> to class <" + realJavaType + "> using checker " + checker.toString());
return new CheckedTypeNodeParser<>(javaClass, yamlType, checker);
}
}
private INodeParser<?> getWrapperParser(String wrapperKey, MappingNode mapping, ParsingContextExecution context) {
IMappingBuilder builder = this.mappingBuilders.get(wrapperKey.substring(2));
return builder.buildMapping(mapping, context).getParser();
}
private void map(NodeTuple tuple, TypeNodeParser<?> parser, ParsingContextExecution context) {
String key = ParserUtils.getScalar(tuple.getKeyNode(), context);
int positionMappingIndex = positionMappingIndex(key);
if (positionMappingIndex > -1) {
mapPositionMapping(positionMappingIndex, tuple.getValueNode(), parser, context);
} else {
MappingTarget mappingTarget = getMappingTarget(tuple.getValueNode(), context);
if (mappingTarget != null) {
parser.getYamlToObjectMapping().put(key, mappingTarget);
}
}
}
private MappingTarget getMappingTarget(Node mappingNode, ParsingContextExecution context) {
if (mappingNode instanceof ScalarNode) {
// create a scalar mapping
String value = ParserUtils.getScalar(mappingNode, context);
return new MappingTarget(value, parsers.get(ScalarParser.class.getName()));
} else if (mappingNode instanceof MappingNode) {
return mapMappingNode((MappingNode) mappingNode, context);
}
return null;
}
private int positionMappingIndex(String key) {
if (key.startsWith("__")) {
try {
int position = Integer.valueOf(key.substring(2));
return position;
} catch (NumberFormatException e) {
// not a position mapping
return -1;
}
}
return -1;
}
private void mapPositionMapping(Integer index, Node positionMapping, TypeNodeParser<?> parser, ParsingContextExecution context) {
if (positionMapping instanceof MappingNode) {
MappingNode mappingNode = (MappingNode) positionMapping;
String key = null;
MappingTarget valueMappingTarget = null;
for (NodeTuple tuple : mappingNode.getValue()) {
String tupleKey = ParserUtils.getScalar(tuple.getKeyNode(), context);
if (tupleKey.equals("key")) {
key = ParserUtils.getScalar(tuple.getValueNode(), context);
} else if (tupleKey.equals("value")) {
valueMappingTarget = getMappingTarget(tuple.getValueNode(), context);
} else {
context.getParsingErrors().add(
new ParsingError(ErrorCode.SYNTAX_ERROR, "Unknown key for position mapping.", tuple.getKeyNode().getStartMark(), tupleKey, tuple
.getKeyNode().getEndMark(), ""));
}
}
if (valueMappingTarget == null) {
return;
}
if (key == null) {
parser.getYamlOrderedToObjectMapping().put(index, valueMappingTarget);
} else {
parser.getYamlOrderedToObjectMapping().put(index, new KeyValueMappingTarget(key, valueMappingTarget.getPath(), valueMappingTarget.getParser()));
}
} else {
context.getParsingErrors().add(
new ParsingError(ErrorCode.SYNTAX_ERROR, "Position mapping must be a mapping node with key and value fields.", positionMapping
.getStartMark(), "", positionMapping.getEndMark(), ""));
}
}
private MappingTarget mapMappingNode(MappingNode mappingNode, ParsingContextExecution context) {
String key = ParserUtils.getScalar(mappingNode.getValue().get(0).getKeyNode(), context);
IMappingBuilder mappingBuilder = mappingBuilders.get(key);
if (mappingBuilder != null) {
log.debug("Mapping yaml key <" + key + "> using mapping builder " + mappingBuilder.getClass().getName());
return mappingBuilder.buildMapping(mappingNode, context);
}
context.getParsingErrors().add(
new ParsingError(ErrorCode.SYNTAX_ERROR, "No mapping target found for key", mappingNode.getValue().get(0).getKeyNode().getStartMark(), key,
mappingNode.getValue().get(0).getKeyNode().getEndMark(), ""));
return null;
}
} | loicalbertin/alien4cloud | alien4cloud-core/src/main/java/alien4cloud/tosca/parser/mapping/generator/MappingGenerator.java | Java | apache-2.0 | 12,944 |
---
id: io-kinesis-sink
title: Kinesis sink connector
sidebar_label: "Kinesis sink connector"
original_id: io-kinesis-sink
---
The Kinesis sink connector pulls data from Pulsar and persists data into Amazon Kinesis.
## Configuration
The configuration of the Kinesis sink connector has the following property.
### Property
| Name | Type|Required | Default | Description
|------|----------|----------|---------|-------------|
`messageFormat`|MessageFormat|true|ONLY_RAW_PAYLOAD|Message format in which Kinesis sink converts Pulsar messages and publishes to Kinesis streams.<br /><br />Below are the available options:<br /><br /><li>`ONLY_RAW_PAYLOAD`: Kinesis sink directly publishes Pulsar message payload as a message into the configured Kinesis stream. <br /><br /></li><li>`FULL_MESSAGE_IN_JSON`: Kinesis sink creates a JSON payload with Pulsar message payload, properties and encryptionCtx, and publishes JSON payload into the configured Kinesis stream.<br /><br /></li><li>`FULL_MESSAGE_IN_FB`: Kinesis sink creates a flatbuffer serialized payload with Pulsar message payload, properties and encryptionCtx, and publishes flatbuffer payload into the configured Kinesis stream.</li>
`retainOrdering`|boolean|false|false|Whether Pulsar connectors to retain ordering when moving messages from Pulsar to Kinesis or not.
`awsEndpoint`|String|false|" " (empty string)|The Kinesis end-point URL, which can be found at [here](https://docs.aws.amazon.com/general/latest/gr/rande.html).
`awsRegion`|String|false|" " (empty string)|The AWS region. <br /><br />**Example**<br /> us-west-1, us-west-2
`awsKinesisStreamName`|String|true|" " (empty string)|The Kinesis stream name.
`awsCredentialPluginName`|String|false|" " (empty string)|The fully-qualified class name of implementation of {@inject: github:AwsCredentialProviderPlugin:/pulsar-io/kinesis/src/main/java/org/apache/pulsar/io/kinesis/AwsCredentialProviderPlugin.java}. <br /><br />It is a factory class which creates an AWSCredentialsProvider that is used by Kinesis sink. <br /><br />If it is empty, the Kinesis sink creates a default AWSCredentialsProvider which accepts json-map of credentials in `awsCredentialPluginParam`.
`awsCredentialPluginParam`|String |false|" " (empty string)|The JSON parameter to initialize `awsCredentialsProviderPlugin`.
### Built-in plugins
The following are built-in `AwsCredentialProviderPlugin` plugins:
* `org.apache.pulsar.io.kinesis.AwsDefaultProviderChainPlugin`
This plugin takes no configuration, it uses the default AWS provider chain.
For more information, see [AWS documentation](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default).
* `org.apache.pulsar.io.kinesis.STSAssumeRoleProviderPlugin`
This plugin takes a configuration (via the `awsCredentialPluginParam`) that describes a role to assume when running the KCL.
This configuration takes the form of a small json document like:
```json
{"roleArn": "arn...", "roleSessionName": "name"}
```
### Example
Before using the Kinesis sink connector, you need to create a configuration file through one of the following methods.
* JSON
```json
{
"awsEndpoint": "some.endpoint.aws",
"awsRegion": "us-east-1",
"awsKinesisStreamName": "my-stream",
"awsCredentialPluginParam": "{\"accessKey\":\"myKey\",\"secretKey\":\"my-Secret\"}",
"messageFormat": "ONLY_RAW_PAYLOAD",
"retainOrdering": "true"
}
```
* YAML
```yaml
configs:
awsEndpoint: "some.endpoint.aws"
awsRegion: "us-east-1"
awsKinesisStreamName: "my-stream"
awsCredentialPluginParam: "{\"accessKey\":\"myKey\",\"secretKey\":\"my-Secret\"}"
messageFormat: "ONLY_RAW_PAYLOAD"
retainOrdering: "true"
```
| massakam/pulsar | site2/website-next/versioned_docs/version-2.5.0/io-kinesis-sink.md | Markdown | apache-2.0 | 3,793 |
'use strict';
import { module } from 'angular';
import _ from 'lodash';
import { AccountService, ExpectedArtifactService } from '@spinnaker/core';
import { KubernetesProviderSettings } from '../../../kubernetes.settings';
export const KUBERNETES_V1_CLUSTER_CONFIGURE_COMMANDBUILDER = 'spinnaker.kubernetes.clusterCommandBuilder.service';
export const name = KUBERNETES_V1_CLUSTER_CONFIGURE_COMMANDBUILDER; // for backwards compatibility
module(KUBERNETES_V1_CLUSTER_CONFIGURE_COMMANDBUILDER, []).factory('kubernetesClusterCommandBuilder', function() {
function attemptToSetValidAccount(application, defaultAccount, command) {
return AccountService.listAccounts('kubernetes', 'v1').then(function(kubernetesAccounts) {
const kubernetesAccountNames = _.map(kubernetesAccounts, 'name');
let firstKubernetesAccount = null;
if (application.accounts.length) {
firstKubernetesAccount = _.find(application.accounts, function(applicationAccount) {
return kubernetesAccountNames.includes(applicationAccount);
});
} else if (kubernetesAccountNames.length) {
firstKubernetesAccount = kubernetesAccountNames[0];
}
const defaultAccountIsValid = defaultAccount && kubernetesAccountNames.includes(defaultAccount);
command.account = defaultAccountIsValid
? defaultAccount
: firstKubernetesAccount
? firstKubernetesAccount
: 'my-account-name';
});
}
function applyHealthProviders(application, command) {
command.interestingHealthProviderNames = ['KubernetesContainer', 'KubernetesPod'];
}
function buildNewClusterCommand(application, defaults = {}) {
const defaultAccount = defaults.account || KubernetesProviderSettings.defaults.account;
const command = {
account: defaultAccount,
application: application.name,
strategy: '',
targetSize: 1,
cloudProvider: 'kubernetes',
selectedProvider: 'kubernetes',
namespace: 'default',
containers: [],
initContainers: [],
volumeSources: [],
buildImageId: buildImageId,
groupByRegistry: groupByRegistry,
terminationGracePeriodSeconds: 30,
viewState: {
mode: defaults.mode || 'create',
disableStrategySelection: true,
useAutoscaler: false,
},
capacity: {
min: 1,
desired: 1,
max: 1,
},
scalingPolicy: {
cpuUtilization: {
target: 40,
},
},
useSourceCapacity: false,
deployment: {
enabled: false,
minReadySeconds: 0,
deploymentStrategy: {
type: 'RollingUpdate',
rollingUpdate: {
maxUnavailable: 1,
maxSurge: 1,
},
},
},
};
applyHealthProviders(application, command);
attemptToSetValidAccount(application, defaultAccount, command);
return command;
}
function buildClusterCommandFromExisting(application, existing, mode) {
mode = mode || 'clone';
const command = _.cloneDeep(existing.deployDescription);
command.groupByRegistry = groupByRegistry;
command.cloudProvider = 'kubernetes';
command.selectedProvider = 'kubernetes';
command.account = existing.account;
command.buildImageId = buildImageId;
command.strategy = '';
command.containers.forEach(container => {
container.imageDescription.imageId = buildImageId(container.imageDescription);
});
command.initContainers.forEach(container => {
container.imageDescription.imageId = buildImageId(container.imageDescription);
});
command.viewState = {
mode: mode,
useAutoscaler: !!command.scalingPolicy,
};
if (!command.capacity) {
command.capacity = {
min: command.targetSize,
max: command.targetSize,
desired: command.targetSize,
};
}
if (!_.has(command, 'scalingPolicy.cpuUtilization.target')) {
command.scalingPolicy = { cpuUtilization: { target: 40 } };
}
applyHealthProviders(application, command);
return command;
}
function groupByRegistry(container) {
if (container.imageDescription) {
if (container.imageDescription.fromContext) {
return 'Find Image Result(s)';
} else if (container.imageDescription.fromTrigger) {
return 'Images from Trigger(s)';
} else if (container.imageDescription.fromArtifact) {
return 'Images from Artifact(s)';
} else {
return container.imageDescription.registry;
}
}
}
function buildImageId(image) {
if (image.fromFindImage) {
return `${image.cluster} ${image.pattern}`;
} else if (image.fromBake) {
return `${image.repository} (Baked during execution)`;
} else if (image.fromTrigger && !image.tag) {
return `${image.registry}/${image.repository} (Tag resolved at runtime)`;
} else if (image.fromArtifact) {
return `${image.name} (Artifact resolved at runtime)`;
} else {
if (image.registry) {
return `${image.registry}/${image.repository}:${image.tag}`;
} else {
return `${image.repository}:${image.tag}`;
}
}
}
function reconcileUpstreamImages(containers, upstreamImages) {
const getConfig = image => {
if (image.fromContext) {
return {
match: other => other.fromContext && other.stageId === image.stageId,
fieldsToCopy: matchImage => {
const { cluster, pattern, repository } = matchImage;
return { cluster, pattern, repository };
},
};
} else if (image.fromTrigger) {
return {
match: other =>
other.fromTrigger &&
other.registry === image.registry &&
other.repository === image.repository &&
other.tag === image.tag,
fieldsToCopy: () => ({}),
};
} else if (image.fromArtifact) {
return {
match: other => other.fromArtifact && other.stageId === image.stageId,
fieldsToCopy: matchImage => {
const { name } = matchImage;
return { name };
},
};
} else {
return {
skipProcessing: true,
};
}
};
const result = [];
containers.forEach(container => {
const imageDescription = container.imageDescription;
const imageConfig = getConfig(imageDescription);
if (imageConfig.skipProcessing) {
result.push(container);
} else {
const matchingImage = upstreamImages.find(imageConfig.match);
if (matchingImage) {
Object.assign(imageDescription, imageConfig.fieldsToCopy(matchingImage));
result.push(container);
}
}
});
return result;
}
function findContextImages(current, all, visited = {}) {
// This actually indicates a loop in the stage dependencies.
if (visited[current.refId]) {
return [];
} else {
visited[current.refId] = true;
}
let result = [];
if (current.type === 'findImage') {
result.push({
fromContext: true,
fromFindImage: true,
cluster: current.cluster,
pattern: current.imageNamePattern,
repository: current.name,
stageId: current.refId,
});
} else if (current.type === 'bake') {
result.push({
fromContext: true,
fromBake: true,
repository: current.ami_name,
organization: current.organization,
stageId: current.refId,
});
}
current.requisiteStageRefIds.forEach(function(id) {
const next = all.find(stage => stage.refId === id);
if (next) {
result = result.concat(findContextImages(next, all, visited));
}
});
return result;
}
function findTriggerImages(triggers) {
return triggers
.filter(trigger => {
return trigger.type === 'docker';
})
.map(trigger => {
return {
fromTrigger: true,
repository: trigger.repository,
account: trigger.account,
organization: trigger.organization,
registry: trigger.registry,
tag: trigger.tag,
};
});
}
function findArtifactImages(currentStage, pipeline) {
const artifactImages = ExpectedArtifactService.getExpectedArtifactsAvailableToStage(currentStage, pipeline)
.filter(artifact => artifact.matchArtifact.type === 'docker/image')
.map(artifact => ({
fromArtifact: true,
artifactId: artifact.id,
name: artifact.matchArtifact.name,
}));
return artifactImages;
}
function buildNewClusterCommandForPipeline(current, pipeline) {
let contextImages = findContextImages(current, pipeline.stages) || [];
contextImages = contextImages.concat(findTriggerImages(pipeline.triggers));
contextImages = contextImages.concat(findArtifactImages(current, pipeline));
return {
strategy: '',
viewState: {
contextImages: contextImages,
mode: 'editPipeline',
submitButtonLabel: 'Done',
requiresTemplateSelection: true,
useAutoscaler: false,
},
};
}
function buildClusterCommandFromPipeline(app, originalCommand, current, pipeline) {
const command = _.cloneDeep(originalCommand);
let contextImages = findContextImages(current, pipeline.stages) || [];
contextImages = contextImages.concat(findTriggerImages(pipeline.triggers));
contextImages = contextImages.concat(findArtifactImages(current, pipeline));
command.containers = reconcileUpstreamImages(command.containers, contextImages);
command.containers.map(container => {
container.imageDescription.imageId = buildImageId(container.imageDescription);
});
command.groupByRegistry = groupByRegistry;
command.buildImageId = buildImageId;
command.strategy = command.strategy || '';
command.selectedProvider = 'kubernetes';
command.viewState = {
mode: 'editPipeline',
contextImages: contextImages,
submitButtonLabel: 'Done',
useAutoscaler: !!command.scalingPolicy,
};
if (!_.has(command, 'scalingPolicy.cpuUtilization.target')) {
command.scalingPolicy = { cpuUtilization: { target: 40 } };
}
return command;
}
return {
buildNewClusterCommand: buildNewClusterCommand,
buildClusterCommandFromExisting: buildClusterCommandFromExisting,
buildNewClusterCommandForPipeline: buildNewClusterCommandForPipeline,
buildClusterCommandFromPipeline: buildClusterCommandFromPipeline,
groupByRegistry: groupByRegistry,
buildImageId: buildImageId,
};
});
| sgarlick987/deck | app/scripts/modules/kubernetes/src/v1/cluster/configure/CommandBuilder.js | JavaScript | apache-2.0 | 10,658 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a VM with the provided name, metadata, and auth scopes."""
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
def GlobalComputeUrl(project, collection, name):
return ''.join([COMPUTE_URL_BASE, 'projects/', project,
'/global/', collection, '/', name])
def ZonalComputeUrl(project, zone, collection, name):
return ''.join([COMPUTE_URL_BASE, 'projects/', project,
'/zones/', zone, '/', collection, '/', name])
def GenerateConfig(context):
"""Generate configuration."""
base_name = context.properties['instanceName']
items = []
for key, value in context.properties['metadata'].iteritems():
items.append({
'key': key,
'value': value
})
metadata = {'items': items}
# Properties for the container-based instance.
instance = {
'zone': context.properties['zone'],
'machineType': ZonalComputeUrl(
context.env['project'], context.properties['zone'], 'machineTypes',
'f1-micro'),
'metadata': metadata,
'serviceAccounts': [{
'email': 'default',
'scopes': context.properties['scopes']
}],
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'autoDelete': True,
'boot': True,
'initializeParams': {
'diskName': base_name + '-disk',
'sourceImage': GlobalComputeUrl(
'debian-cloud', 'images',
''.join(['backports-debian', '-7-wheezy-v20151104']))
},
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': GlobalComputeUrl(
context.env['project'], 'networks', 'default')
}]
}
# Resources and output to return.
return {
'resources': [{
'name': base_name,
'type': 'compute.v1.instance',
'properties': instance
}]
}
| aljim/deploymentmanager-samples | examples/v2/waiter/instance.py | Python | apache-2.0 | 2,634 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.carbondata.core.writer.sortindex;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
import org.apache.carbondata.core.carbon.ColumnIdentifier;
import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile;
import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
import org.apache.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReader;
import org.apache.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReaderImpl;
import org.apache.carbondata.core.util.CarbonUtil;
import org.apache.carbondata.core.writer.CarbonDictionaryWriter;
import org.apache.carbondata.core.writer.CarbonDictionaryWriterImpl;
import org.apache.commons.lang.ArrayUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* class contains the unit test cases of the dictionary sort index & sort index inverted writing
*/
public class CarbonDictionarySortIndexWriterImplTest {
private String hdfsStorePath;
@Before public void setUp() throws Exception {
hdfsStorePath = "target/carbonStore";
}
@After public void tearDown() throws Exception {
//deleteStorePath();
}
/**
* s
* Method to test the write of sortIndex file.
*
* @throws Exception
*/
@Test public void write() throws Exception {
String storePath = hdfsStorePath;
CarbonTableIdentifier carbonTableIdentifier = new CarbonTableIdentifier("testSchema", "carbon", UUID.randomUUID().toString());
ColumnIdentifier columnIdentifier = new ColumnIdentifier("Name", null, null);
String metaFolderPath =hdfsStorePath+File.separator+carbonTableIdentifier.getDatabaseName()+File.separator+carbonTableIdentifier.getTableName()+File.separator+"Metadata";
CarbonUtil.checkAndCreateFolder(metaFolderPath);
CarbonDictionaryWriter dictionaryWriter = new CarbonDictionaryWriterImpl(hdfsStorePath,
carbonTableIdentifier, columnIdentifier);
CarbonDictionarySortIndexWriter dictionarySortIndexWriter =
new CarbonDictionarySortIndexWriterImpl(carbonTableIdentifier, columnIdentifier, storePath);
List<int[]> indexList = prepareExpectedData();
int[] data = indexList.get(0);
for(int i=0;i<data.length;i++) {
dictionaryWriter.write(String.valueOf(data[i]));
}
dictionaryWriter.close();
dictionaryWriter.commit();
List<Integer> sortIndex = Arrays.asList(ArrayUtils.toObject(indexList.get(0)));
List<Integer> invertedSortIndex = Arrays.asList(ArrayUtils.toObject(indexList.get(1)));
dictionarySortIndexWriter.writeSortIndex(sortIndex);
dictionarySortIndexWriter.writeInvertedSortIndex(invertedSortIndex);
dictionarySortIndexWriter.close();
CarbonDictionarySortIndexReader carbonDictionarySortIndexReader =
new CarbonDictionarySortIndexReaderImpl(carbonTableIdentifier, columnIdentifier, storePath);
List<Integer> actualSortIndex = carbonDictionarySortIndexReader.readSortIndex();
List<Integer> actualInvertedSortIndex = carbonDictionarySortIndexReader.readInvertedSortIndex();
for (int i = 0; i < actualSortIndex.size(); i++) {
Assert.assertEquals(sortIndex.get(i), actualSortIndex.get(i));
Assert.assertEquals(invertedSortIndex.get(i), actualInvertedSortIndex.get(i));
}
}
/**
* @throws Exception
*/
@Test public void writingEmptyValue() throws Exception {
String storePath = hdfsStorePath;
CarbonTableIdentifier carbonTableIdentifier = new CarbonTableIdentifier("testSchema", "carbon", UUID.randomUUID().toString());
ColumnIdentifier columnIdentifier = new ColumnIdentifier("Name", null, null);
CarbonDictionarySortIndexWriter dictionarySortIndexWriter =
new CarbonDictionarySortIndexWriterImpl(carbonTableIdentifier, columnIdentifier, storePath);
List<Integer> sortIndex = new ArrayList<>();
List<Integer> invertedSortIndex = new ArrayList<>();
dictionarySortIndexWriter.writeSortIndex(sortIndex);
dictionarySortIndexWriter.writeInvertedSortIndex(invertedSortIndex);
dictionarySortIndexWriter.close();
CarbonDictionarySortIndexReader carbonDictionarySortIndexReader =
new CarbonDictionarySortIndexReaderImpl(carbonTableIdentifier, columnIdentifier, storePath);
List<Integer> actualSortIndex = carbonDictionarySortIndexReader.readSortIndex();
List<Integer> actualInvertedSortIndex = carbonDictionarySortIndexReader.readInvertedSortIndex();
for (int i = 0; i < actualSortIndex.size(); i++) {
Assert.assertEquals(sortIndex.get(i), actualSortIndex.get(i));
Assert.assertEquals(invertedSortIndex.get(i), actualInvertedSortIndex.get(i));
}
}
private List<int[]> prepareExpectedData() {
List<int[]> indexList = new ArrayList<>(2);
int[] sortIndex = { 0, 3, 2, 4, 1 };
int[] sortIndexInverted = { 0, 2, 4, 1, 2 };
indexList.add(0, sortIndex);
indexList.add(1, sortIndexInverted);
return indexList;
}
/**
* this method will delete the store path
*/
private void deleteStorePath() {
FileFactory.FileType fileType = FileFactory.getFileType(this.hdfsStorePath);
CarbonFile carbonFile = FileFactory.getCarbonFile(this.hdfsStorePath, fileType);
deleteRecursiveSilent(carbonFile);
}
/**
* this method will delete the folders recursively
*/
private static void deleteRecursiveSilent(CarbonFile f) {
if (f.isDirectory()) {
if (f.listFiles() != null) {
for (CarbonFile c : f.listFiles()) {
deleteRecursiveSilent(c);
}
}
}
if (f.exists() && !f.delete()) {
return;
}
}
}
| foryou2030/incubator-carbondata | core/src/test/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriterImplTest.java | Java | apache-2.0 | 6,533 |
import { getGlobal } from '../src/prebidGlobal.js';
import { createBid } from '../src/bidfactory.js';
import { STATUS } from '../src/constants.json';
import { ajax } from '../src/ajax.js';
import * as utils from '../src/utils.js';
import { config } from '../src/config.js';
import { getHook } from '../src/hook.js';
const DEFAULT_CURRENCY_RATE_URL = 'https://cdn.jsdelivr.net/gh/prebid/currency-file@1/latest.json?date=$$TODAY$$';
const CURRENCY_RATE_PRECISION = 4;
var bidResponseQueue = [];
var conversionCache = {};
var currencyRatesLoaded = false;
var needToCallForCurrencyFile = true;
var adServerCurrency = 'USD';
export var currencySupportEnabled = false;
export var currencyRates = {};
var bidderCurrencyDefault = {};
var defaultRates;
/**
* Configuration function for currency
* @param {string} [config.adServerCurrency = 'USD']
* ISO 4217 3-letter currency code that represents the target currency. (e.g. 'EUR'). If this value is present,
* the currency conversion feature is activated.
* @param {number} [config.granularityMultiplier = 1]
* A decimal value representing how mcuh to scale the price granularity calculations.
* @param {object} config.bidderCurrencyDefault
* An optional argument to specify bid currencies for bid adapters. This option is provided for the transitional phase
* before every bid adapter will specify its own bid currency. If the adapter specifies a bid currency, this value is
* ignored for that bidder.
*
* example:
* {
* rubicon: 'USD'
* }
* @param {string} [config.conversionRateFile = 'URL pointing to conversion file']
* Optional path to a file containing currency conversion data. Prebid.org hosts a file that is used as the default,
* if not specified.
* @param {object} [config.rates]
* This optional argument allows you to specify the rates with a JSON object, subverting the need for a external
* config.conversionRateFile parameter. If this argument is specified, the conversion rate file will not be loaded.
*
* example:
* {
* 'GBP': { 'CNY': 8.8282, 'JPY': 141.7, 'USD': 1.2824 },
* 'USD': { 'CNY': 6.8842, 'GBP': 0.7798, 'JPY': 110.49 }
* }
* @param {object} [config.defaultRates]
* This optional currency rates definition follows the same format as config.rates, however it is only utilized if
* there is an error loading the config.conversionRateFile.
*/
export function setConfig(config) {
let url = DEFAULT_CURRENCY_RATE_URL;
if (typeof config.rates === 'object') {
currencyRates.conversions = config.rates;
currencyRatesLoaded = true;
needToCallForCurrencyFile = false; // don't call if rates are already specified
}
if (typeof config.defaultRates === 'object') {
defaultRates = config.defaultRates;
// set up the default rates to be used if the rate file doesn't get loaded in time
currencyRates.conversions = defaultRates;
currencyRatesLoaded = true;
}
if (typeof config.adServerCurrency === 'string') {
utils.logInfo('enabling currency support', arguments);
adServerCurrency = config.adServerCurrency;
if (config.conversionRateFile) {
utils.logInfo('currency using override conversionRateFile:', config.conversionRateFile);
url = config.conversionRateFile;
}
// see if the url contains a date macro
// this is a workaround to the fact that jsdelivr doesn't currently support setting a 24-hour HTTP cache header
// So this is an approach to let the browser cache a copy of the file each day
// We should remove the macro once the CDN support a day-level HTTP cache setting
const macroLocation = url.indexOf('$$TODAY$$');
if (macroLocation !== -1) {
// get the date to resolve the macro
const d = new Date();
let month = `${d.getMonth() + 1}`;
let day = `${d.getDate()}`;
if (month.length < 2) month = `0${month}`;
if (day.length < 2) day = `0${day}`;
const todaysDate = `${d.getFullYear()}${month}${day}`;
// replace $$TODAY$$ with todaysDate
url = `${url.substring(0, macroLocation)}${todaysDate}${url.substring(macroLocation + 9, url.length)}`;
}
initCurrency(url);
} else {
// currency support is disabled, setting defaults
utils.logInfo('disabling currency support');
resetCurrency();
}
if (typeof config.bidderCurrencyDefault === 'object') {
bidderCurrencyDefault = config.bidderCurrencyDefault;
}
}
config.getConfig('currency', config => setConfig(config.currency));
function errorSettingsRates(msg) {
if (defaultRates) {
utils.logWarn(msg);
utils.logWarn('Currency failed loading rates, falling back to currency.defaultRates');
} else {
utils.logError(msg);
}
}
function initCurrency(url) {
conversionCache = {};
currencySupportEnabled = true;
utils.logInfo('Installing addBidResponse decorator for currency module', arguments);
// Adding conversion function to prebid global for external module and on page use
getGlobal().convertCurrency = (cpm, fromCurrency, toCurrency) => parseFloat(cpm) * getCurrencyConversion(fromCurrency, toCurrency);
getHook('addBidResponse').before(addBidResponseHook, 100);
// call for the file if we haven't already
if (needToCallForCurrencyFile) {
needToCallForCurrencyFile = false;
ajax(url,
{
success: function (response) {
try {
currencyRates = JSON.parse(response);
utils.logInfo('currencyRates set to ' + JSON.stringify(currencyRates));
currencyRatesLoaded = true;
processBidResponseQueue();
} catch (e) {
errorSettingsRates('Failed to parse currencyRates response: ' + response);
}
},
error: errorSettingsRates
}
);
}
}
function resetCurrency() {
utils.logInfo('Uninstalling addBidResponse decorator for currency module', arguments);
getHook('addBidResponse').getHooks({hook: addBidResponseHook}).remove();
delete getGlobal().convertCurrency;
adServerCurrency = 'USD';
conversionCache = {};
currencySupportEnabled = false;
currencyRatesLoaded = false;
needToCallForCurrencyFile = true;
currencyRates = {};
bidderCurrencyDefault = {};
}
export function addBidResponseHook(fn, adUnitCode, bid) {
if (!bid) {
return fn.call(this, adUnitCode); // if no bid, call original and let it display warnings
}
let bidder = bid.bidderCode || bid.bidder;
if (bidderCurrencyDefault[bidder]) {
let currencyDefault = bidderCurrencyDefault[bidder];
if (bid.currency && currencyDefault !== bid.currency) {
utils.logWarn(`Currency default '${bidder}: ${currencyDefault}' ignored. adapter specified '${bid.currency}'`);
} else {
bid.currency = currencyDefault;
}
}
// default to USD if currency not set
if (!bid.currency) {
utils.logWarn('Currency not specified on bid. Defaulted to "USD"');
bid.currency = 'USD';
}
// used for analytics
bid.getCpmInNewCurrency = function(toCurrency) {
return (parseFloat(this.cpm) * getCurrencyConversion(this.currency, toCurrency)).toFixed(3);
};
// execute immediately if the bid is already in the desired currency
if (bid.currency === adServerCurrency) {
return fn.call(this, adUnitCode, bid);
}
bidResponseQueue.push(wrapFunction(fn, this, [adUnitCode, bid]));
if (!currencySupportEnabled || currencyRatesLoaded) {
processBidResponseQueue();
}
}
function processBidResponseQueue() {
while (bidResponseQueue.length > 0) {
(bidResponseQueue.shift())();
}
}
function wrapFunction(fn, context, params) {
return function() {
let bid = params[1];
if (bid !== undefined && 'currency' in bid && 'cpm' in bid) {
let fromCurrency = bid.currency;
try {
let conversion = getCurrencyConversion(fromCurrency);
if (conversion !== 1) {
bid.cpm = (parseFloat(bid.cpm) * conversion).toFixed(4);
bid.currency = adServerCurrency;
}
} catch (e) {
utils.logWarn('Returning NO_BID, getCurrencyConversion threw error: ', e);
params[1] = createBid(STATUS.NO_BID, {
bidder: bid.bidderCode || bid.bidder,
bidId: bid.requestId
});
}
}
return fn.apply(context, params);
};
}
function getCurrencyConversion(fromCurrency, toCurrency = adServerCurrency) {
var conversionRate = null;
var rates;
let cacheKey = `${fromCurrency}->${toCurrency}`;
if (cacheKey in conversionCache) {
conversionRate = conversionCache[cacheKey];
utils.logMessage('Using conversionCache value ' + conversionRate + ' for ' + cacheKey);
} else if (currencySupportEnabled === false) {
if (fromCurrency === 'USD') {
conversionRate = 1;
} else {
throw new Error('Prebid currency support has not been enabled and fromCurrency is not USD');
}
} else if (fromCurrency === toCurrency) {
conversionRate = 1;
} else {
if (fromCurrency in currencyRates.conversions) {
// using direct conversion rate from fromCurrency to toCurrency
rates = currencyRates.conversions[fromCurrency];
if (!(toCurrency in rates)) {
// bid should fail, currency is not supported
throw new Error('Specified adServerCurrency in config \'' + toCurrency + '\' not found in the currency rates file');
}
conversionRate = rates[toCurrency];
utils.logInfo('getCurrencyConversion using direct ' + fromCurrency + ' to ' + toCurrency + ' conversionRate ' + conversionRate);
} else if (toCurrency in currencyRates.conversions) {
// using reciprocal of conversion rate from toCurrency to fromCurrency
rates = currencyRates.conversions[toCurrency];
if (!(fromCurrency in rates)) {
// bid should fail, currency is not supported
throw new Error('Specified fromCurrency \'' + fromCurrency + '\' not found in the currency rates file');
}
conversionRate = roundFloat(1 / rates[fromCurrency], CURRENCY_RATE_PRECISION);
utils.logInfo('getCurrencyConversion using reciprocal ' + fromCurrency + ' to ' + toCurrency + ' conversionRate ' + conversionRate);
} else {
// first defined currency base used as intermediary
var anyBaseCurrency = Object.keys(currencyRates.conversions)[0];
if (!(fromCurrency in currencyRates.conversions[anyBaseCurrency])) {
// bid should fail, currency is not supported
throw new Error('Specified fromCurrency \'' + fromCurrency + '\' not found in the currency rates file');
}
var toIntermediateConversionRate = 1 / currencyRates.conversions[anyBaseCurrency][fromCurrency];
if (!(toCurrency in currencyRates.conversions[anyBaseCurrency])) {
// bid should fail, currency is not supported
throw new Error('Specified adServerCurrency in config \'' + toCurrency + '\' not found in the currency rates file');
}
var fromIntermediateConversionRate = currencyRates.conversions[anyBaseCurrency][toCurrency];
conversionRate = roundFloat(toIntermediateConversionRate * fromIntermediateConversionRate, CURRENCY_RATE_PRECISION);
utils.logInfo('getCurrencyConversion using intermediate ' + fromCurrency + ' thru ' + anyBaseCurrency + ' to ' + toCurrency + ' conversionRate ' + conversionRate);
}
}
if (!(cacheKey in conversionCache)) {
utils.logMessage('Adding conversionCache value ' + conversionRate + ' for ' + cacheKey);
conversionCache[cacheKey] = conversionRate;
}
return conversionRate;
}
function roundFloat(num, dec) {
var d = 1;
for (let i = 0; i < dec; i++) {
d += '0';
}
return Math.round(num * d) / d;
}
| mcallari/Prebid.js | modules/currency.js | JavaScript | apache-2.0 | 11,629 |
<!--
~ Copyright (c) 2016, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<div class="main-content">
<h2>Getting Started</h2>
<p>WSO2 API Manager is a complete solution for publishing APIs, creating and managing a developer community, and
for scalably routing API traffic. It leverages proven, production-ready, integration, security and
governance components from WSO2 Enterprise Service Bus, WSO2 Identity Server, and WSO2 Governance Registry.
Moreover, it is powered by WSO2 Business Activity Monitor, thereby making WSO2 API Manager ready for any
large-scale deployments right away.
</p>
<p>
As part of its latest release, the REST API was developed as a CXF REST web application running on WSO2 API
Manager. This API comes with a pluggable security mechanism. Since API security is implemented as a CXF
handler, if you need to plug a custom security mechanism, you can write your own handler and add it to the
web service. This REST API is implemented based on REST best practices and specifications. API development
is started with a swagger specification for Store and Publisher operations.
</p>
<p>
Before invoking the API with the access token, obtain the consumer key/secret key pair by calling the
dynamic client registration endpoint. You can request an access token with the preferred grant type. An
example is shown below,
</p>
<div class="pre"><code class="bash">curl -X POST -H "Authorization: Basic YWRtaW46YWRtaW4=" -H "Content-Type: application/json" -d @payload.json http://localhost:9763/client-registration/v0.9/register</code></div>
<br/>
<p>
Sample request:
</p>
<div class="pre"><code class="json">{
"callbackUrl": "www.google.lk",
"clientName": "rest_api_publisher",
"tokenScope": "Production",
"owner": "admin",
"grantType": "password refresh_token",
"saasApp": true
}</code></div>
<br/>
<p>
Sample response:
</p>
<div class="pre"><code class="json">{
"callBackURL": "www.google.lk",
"jsonString":
"{
\"username\":\"admin\",
\"redirect_uris\":\"www.google.lk\",
\"tokenScope\":[Ljava.lang.String;@3a73796a,
\"client_name\":\"admin_rest_api_publisher\",
\"grant_types\":\"authorization_code password refresh_token iwa:ntlm
urn:ietf:params:oauth:grant-type:saml2-bearer client_credentialsimplicit\"
}",
"clientName": null,
"clientId": "HfEl1jJPdg5tbtrxhAwybN05QGoa",
"clientSecret": "l6c0aoLcWR3fwezHhc7XoGOht5Aa"
}</code></div>
<br/>
<p>
During the API invocation process request, click the CXF handler first, which calls an introspection API to
validate the token. Generate the access token using the already created OAuth application. A sample call to
generate the access token is shown below.
</p>
<p>
<b>
<i>Note:</i> Access token must be generated using correct scope for the resource.
Scope for each resource is given in resource documentation.
</b>
</p>
<div class="pre"><code class="bash">curl -k -d "grant_type=password&username=admin&password=admin&<b>scope=apim:api_view</b>" -H "Authorization: Basic SGZFbDFqSlBkZzV0YnRyeGhBd3liTjA1UUdvYTpsNmMwYW9MY1dSM2Z3ZXpIaGM3WG9HT2h0NUFh" https://127.0.0.1:8243/token</code></div>
<br/>
<p>
Token response:
</p>
<div class="pre"><code class="json">{
"scope":"apim:api_view",
"token_type":"Bearer",
"expires_in":3600,
"refresh_token":"33c3be152ebf0030b3fb76f2c1f80bf8",
"access_token":"292ff0fd256814536baca0926f483c8d"
}</code></div>
<br/>
<p>
Now you have a valid access token, which you can use to invoke an API. Navigate through the API descriptions
to find the required API, obtain an access token as described above and invoke the API with the
authentication header. If you use a different authentication mechanism, this process may change.
</p>
</div> | dhanuka84/carbon-apimgt | features/apimgt/org.wso2.carbon.apimgt.rest.api.publisher.feature/src/main/resources/api-docs/docs/guide.html | HTML | apache-2.0 | 4,648 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/codecommit/CodeCommit_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/codecommit/model/Conflict.h>
#include <aws/codecommit/model/BatchDescribeMergeConflictsError.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace CodeCommit
{
namespace Model
{
class AWS_CODECOMMIT_API BatchDescribeMergeConflictsResult
{
public:
BatchDescribeMergeConflictsResult();
BatchDescribeMergeConflictsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
BatchDescribeMergeConflictsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>A list of conflicts for each file, including the conflict metadata and the
* hunks of the differences between the files.</p>
*/
inline const Aws::Vector<Conflict>& GetConflicts() const{ return m_conflicts; }
/**
* <p>A list of conflicts for each file, including the conflict metadata and the
* hunks of the differences between the files.</p>
*/
inline void SetConflicts(const Aws::Vector<Conflict>& value) { m_conflicts = value; }
/**
* <p>A list of conflicts for each file, including the conflict metadata and the
* hunks of the differences between the files.</p>
*/
inline void SetConflicts(Aws::Vector<Conflict>&& value) { m_conflicts = std::move(value); }
/**
* <p>A list of conflicts for each file, including the conflict metadata and the
* hunks of the differences between the files.</p>
*/
inline BatchDescribeMergeConflictsResult& WithConflicts(const Aws::Vector<Conflict>& value) { SetConflicts(value); return *this;}
/**
* <p>A list of conflicts for each file, including the conflict metadata and the
* hunks of the differences between the files.</p>
*/
inline BatchDescribeMergeConflictsResult& WithConflicts(Aws::Vector<Conflict>&& value) { SetConflicts(std::move(value)); return *this;}
/**
* <p>A list of conflicts for each file, including the conflict metadata and the
* hunks of the differences between the files.</p>
*/
inline BatchDescribeMergeConflictsResult& AddConflicts(const Conflict& value) { m_conflicts.push_back(value); return *this; }
/**
* <p>A list of conflicts for each file, including the conflict metadata and the
* hunks of the differences between the files.</p>
*/
inline BatchDescribeMergeConflictsResult& AddConflicts(Conflict&& value) { m_conflicts.push_back(std::move(value)); return *this; }
/**
* <p>An enumeration token that can be used in a request to return the next batch
* of the results.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>An enumeration token that can be used in a request to return the next batch
* of the results.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>An enumeration token that can be used in a request to return the next batch
* of the results.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>An enumeration token that can be used in a request to return the next batch
* of the results.</p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>An enumeration token that can be used in a request to return the next batch
* of the results.</p>
*/
inline BatchDescribeMergeConflictsResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>An enumeration token that can be used in a request to return the next batch
* of the results.</p>
*/
inline BatchDescribeMergeConflictsResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>An enumeration token that can be used in a request to return the next batch
* of the results.</p>
*/
inline BatchDescribeMergeConflictsResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>A list of any errors returned while describing the merge conflicts for each
* file.</p>
*/
inline const Aws::Vector<BatchDescribeMergeConflictsError>& GetErrors() const{ return m_errors; }
/**
* <p>A list of any errors returned while describing the merge conflicts for each
* file.</p>
*/
inline void SetErrors(const Aws::Vector<BatchDescribeMergeConflictsError>& value) { m_errors = value; }
/**
* <p>A list of any errors returned while describing the merge conflicts for each
* file.</p>
*/
inline void SetErrors(Aws::Vector<BatchDescribeMergeConflictsError>&& value) { m_errors = std::move(value); }
/**
* <p>A list of any errors returned while describing the merge conflicts for each
* file.</p>
*/
inline BatchDescribeMergeConflictsResult& WithErrors(const Aws::Vector<BatchDescribeMergeConflictsError>& value) { SetErrors(value); return *this;}
/**
* <p>A list of any errors returned while describing the merge conflicts for each
* file.</p>
*/
inline BatchDescribeMergeConflictsResult& WithErrors(Aws::Vector<BatchDescribeMergeConflictsError>&& value) { SetErrors(std::move(value)); return *this;}
/**
* <p>A list of any errors returned while describing the merge conflicts for each
* file.</p>
*/
inline BatchDescribeMergeConflictsResult& AddErrors(const BatchDescribeMergeConflictsError& value) { m_errors.push_back(value); return *this; }
/**
* <p>A list of any errors returned while describing the merge conflicts for each
* file.</p>
*/
inline BatchDescribeMergeConflictsResult& AddErrors(BatchDescribeMergeConflictsError&& value) { m_errors.push_back(std::move(value)); return *this; }
/**
* <p>The commit ID of the destination commit specifier that was used in the merge
* evaluation.</p>
*/
inline const Aws::String& GetDestinationCommitId() const{ return m_destinationCommitId; }
/**
* <p>The commit ID of the destination commit specifier that was used in the merge
* evaluation.</p>
*/
inline void SetDestinationCommitId(const Aws::String& value) { m_destinationCommitId = value; }
/**
* <p>The commit ID of the destination commit specifier that was used in the merge
* evaluation.</p>
*/
inline void SetDestinationCommitId(Aws::String&& value) { m_destinationCommitId = std::move(value); }
/**
* <p>The commit ID of the destination commit specifier that was used in the merge
* evaluation.</p>
*/
inline void SetDestinationCommitId(const char* value) { m_destinationCommitId.assign(value); }
/**
* <p>The commit ID of the destination commit specifier that was used in the merge
* evaluation.</p>
*/
inline BatchDescribeMergeConflictsResult& WithDestinationCommitId(const Aws::String& value) { SetDestinationCommitId(value); return *this;}
/**
* <p>The commit ID of the destination commit specifier that was used in the merge
* evaluation.</p>
*/
inline BatchDescribeMergeConflictsResult& WithDestinationCommitId(Aws::String&& value) { SetDestinationCommitId(std::move(value)); return *this;}
/**
* <p>The commit ID of the destination commit specifier that was used in the merge
* evaluation.</p>
*/
inline BatchDescribeMergeConflictsResult& WithDestinationCommitId(const char* value) { SetDestinationCommitId(value); return *this;}
/**
* <p>The commit ID of the source commit specifier that was used in the merge
* evaluation.</p>
*/
inline const Aws::String& GetSourceCommitId() const{ return m_sourceCommitId; }
/**
* <p>The commit ID of the source commit specifier that was used in the merge
* evaluation.</p>
*/
inline void SetSourceCommitId(const Aws::String& value) { m_sourceCommitId = value; }
/**
* <p>The commit ID of the source commit specifier that was used in the merge
* evaluation.</p>
*/
inline void SetSourceCommitId(Aws::String&& value) { m_sourceCommitId = std::move(value); }
/**
* <p>The commit ID of the source commit specifier that was used in the merge
* evaluation.</p>
*/
inline void SetSourceCommitId(const char* value) { m_sourceCommitId.assign(value); }
/**
* <p>The commit ID of the source commit specifier that was used in the merge
* evaluation.</p>
*/
inline BatchDescribeMergeConflictsResult& WithSourceCommitId(const Aws::String& value) { SetSourceCommitId(value); return *this;}
/**
* <p>The commit ID of the source commit specifier that was used in the merge
* evaluation.</p>
*/
inline BatchDescribeMergeConflictsResult& WithSourceCommitId(Aws::String&& value) { SetSourceCommitId(std::move(value)); return *this;}
/**
* <p>The commit ID of the source commit specifier that was used in the merge
* evaluation.</p>
*/
inline BatchDescribeMergeConflictsResult& WithSourceCommitId(const char* value) { SetSourceCommitId(value); return *this;}
/**
* <p>The commit ID of the merge base.</p>
*/
inline const Aws::String& GetBaseCommitId() const{ return m_baseCommitId; }
/**
* <p>The commit ID of the merge base.</p>
*/
inline void SetBaseCommitId(const Aws::String& value) { m_baseCommitId = value; }
/**
* <p>The commit ID of the merge base.</p>
*/
inline void SetBaseCommitId(Aws::String&& value) { m_baseCommitId = std::move(value); }
/**
* <p>The commit ID of the merge base.</p>
*/
inline void SetBaseCommitId(const char* value) { m_baseCommitId.assign(value); }
/**
* <p>The commit ID of the merge base.</p>
*/
inline BatchDescribeMergeConflictsResult& WithBaseCommitId(const Aws::String& value) { SetBaseCommitId(value); return *this;}
/**
* <p>The commit ID of the merge base.</p>
*/
inline BatchDescribeMergeConflictsResult& WithBaseCommitId(Aws::String&& value) { SetBaseCommitId(std::move(value)); return *this;}
/**
* <p>The commit ID of the merge base.</p>
*/
inline BatchDescribeMergeConflictsResult& WithBaseCommitId(const char* value) { SetBaseCommitId(value); return *this;}
private:
Aws::Vector<Conflict> m_conflicts;
Aws::String m_nextToken;
Aws::Vector<BatchDescribeMergeConflictsError> m_errors;
Aws::String m_destinationCommitId;
Aws::String m_sourceCommitId;
Aws::String m_baseCommitId;
};
} // namespace Model
} // namespace CodeCommit
} // namespace Aws
| awslabs/aws-sdk-cpp | aws-cpp-sdk-codecommit/include/aws/codecommit/model/BatchDescribeMergeConflictsResult.h | C | apache-2.0 | 11,100 |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en-us" xml:lang="en-us">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type" />
<meta name="copyright" content="(C) Copyright 2005" />
<meta name="DC.rights.owner" content="(C) Copyright 2005" />
<meta content="public" name="security" />
<meta content="index,follow" name="Robots" />
<meta http-equiv="PICS-Label" content='(PICS-1.1 "http://www.icra.org/ratingsv02.html" l gen true r (cz 1 lz 1 nz 1 oz 1 vz 1) "http://www.rsac.org/ratingsv01.html" l gen true r (n 0 s 0 v 0 l 0) "http://www.classify.org/safesurf/" l gen true r (SS~~000 1))' />
<meta content="reference" name="DC.Type" />
<meta name="DC.Title" content="derby.authentication.native.passwordLifetimeMillis" />
<meta content="derby.authentication.native.passwordLifetimeMillis, password expiration, specifying" name="DC.subject" />
<meta content="derby.authentication.native.passwordLifetimeMillis, password expiration, specifying" name="keywords" />
<meta scheme="URI" name="DC.Relation" content="crefproper22250.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperbuiltinalgorithm.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperiterations.html" />
<meta scheme="URI" name="DC.Relation" content="rrefpropersaltlength.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperauthdn.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperauthpw.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper26978.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper37341.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperpasswordthreshold.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper13766.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper25581.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper27467.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperclasspath.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper24846.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper81405.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper25025.html" />
<meta scheme="URI" name="DC.Relation" content="rrefpropernoautoboot.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper24390.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper39325.html" />
<meta scheme="URI" name="DC.Relation" content="rrefpropersqlauth.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper13217.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperxatrantimeout.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper43414.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper43517.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperpreallocator.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperstatementcachesize.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper10607.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper23835.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper40346.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper98166.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper46141.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperlogbuffersize.html" />
<meta scheme="URI" name="DC.Relation" content="rrefpropermaxlogshippinginterval.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperminlogshippinginterval.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperverbose.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperindexstatsauto.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperindexstatslog.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperindexstatstrace.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper27529.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperstormin.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper81359.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper28026.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper40688.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperrowlocking.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper34037.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperdefaultfileperms.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperextdiagsevlevel.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper33027.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper18151.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperlogboottrace.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper26985.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper35028.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperbootall.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperdurability.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper32066.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproper27355.html" />
<meta scheme="URI" name="DC.Relation" content="rrefproperdatadictversion.html" />
<meta content="XHTML" name="DC.Format" />
<meta content="rrefproperpasswordmillis" name="DC.Identifier" />
<meta content="en-us" name="DC.Language" />
<link href="commonltr.css" type="text/css" rel="stylesheet" />
<title>derby.authentication.native.passwordLifetimeMillis</title>
</head>
<body id="rrefproperpasswordmillis"><a name="rrefproperpasswordmillis"><!-- --></a>
<h1 class="topictitle1">derby.authentication.native.passwordLifetimeMillis</h1>
<div>
<div class="section"><h2 class="sectiontitle">Function</h2>
<p>Specifies the number of milliseconds a NATIVE authentication password remains
valid after being created, reset, or modified. If the value is less than or
equal to zero, the password never expires.</p>
<p>To avoid locking out the super-user, the password of the database owner of a
credentials database never expires.</p>
<p>If a connection attempt is made when the password's remaining lifetime is
less than a proportion of the maximum lifetime, a warning is issued. The
proportion is specified by the
<em><a href="rrefproperpasswordthreshold.html#rrefproperpasswordthreshold">derby.authentication.native.passwordLifetimeThreshold</a></em>
property.</p>
</div>
<div class="section"><h2 class="sectiontitle">Syntax</h2>
<pre><strong>derby.authentication.native.passwordLifetimeMillis=<em>millis</em></strong></pre>
</div>
<div class="section"><h2 class="sectiontitle">Default</h2>
<p>A number of milliseconds equal to 31 days (2,678,400,000).</p>
</div>
<div class="example"><h2 class="sectiontitle">Example</h2>
<pre><strong><span>-- system-wide property</span>
derby.authentication.native.passwordLifetimeMillis=5356800000
<span>-- database-level property</span>
CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY(
'derby.authentication.native.passwordLifetimeMillis', '5356800000');</strong></pre>
</div>
<div class="section"><h2 class="sectiontitle">Dynamic or static</h2>
<p>Static. For system-wide properties, you must reboot
<span>Derby</span> for the change to take
effect. For database-wide properties, you must reboot the database for the
change to take effect.</p>
</div>
</div>
<div>
<div class="familylinks">
<div class="parentlink"><strong>Parent topic:</strong> <a href="crefproper22250.html" title="">Derby properties</a></div>
</div>
<div class="relref"><strong>Related reference</strong><br />
<div><a href="rrefproperbuiltinalgorithm.html" title="">derby.authentication.builtin.algorithm</a></div>
<div><a href="rrefproperiterations.html" title="">derby.authentication.builtin.iterations</a></div>
<div><a href="rrefpropersaltlength.html" title="">derby.authentication.builtin.saltLength</a></div>
<div><a href="rrefproperauthdn.html" title="">derby.authentication.ldap.searchAuthDN</a></div>
<div><a href="rrefproperauthpw.html" title="">derby.authentication.ldap.searchAuthPW</a></div>
<div><a href="rrefproper26978.html" title="">derby.authentication.ldap.searchBase</a></div>
<div><a href="rrefproper37341.html" title="">derby.authentication.ldap.searchFilter</a></div>
<div><a href="rrefproperpasswordthreshold.html" title="">derby.authentication.native.passwordLifetimeThreshold</a></div>
<div><a href="rrefproper13766.html" title="">derby.authentication.provider</a></div>
<div><a href="rrefproper25581.html" title="">derby.authentication.server</a></div>
<div><a href="rrefproper27467.html" title="">derby.connection.requireAuthentication</a></div>
<div><a href="rrefproperclasspath.html" title="">derby.database.classpath</a></div>
<div><a href="rrefproper24846.html" title="">derby.database.defaultConnectionMode</a></div>
<div><a href="rrefproper81405.html" title="">derby.database.forceDatabaseLock</a></div>
<div><a href="rrefproper25025.html" title="">derby.database.fullAccessUsers</a></div>
<div><a href="rrefpropernoautoboot.html" title="">derby.database.noAutoBoot</a></div>
<div><a href="rrefproper24390.html" title="">derby.database.propertiesOnly</a></div>
<div><a href="rrefproper39325.html" title="">derby.database.readOnlyAccessUsers</a></div>
<div><a href="rrefpropersqlauth.html" title="">derby.database.sqlAuthorization</a></div>
<div><a href="rrefproper13217.html" title="">derby.infolog.append</a></div>
<div><a href="rrefproperxatrantimeout.html" title="">derby.jdbc.xaTransactionTimeout</a></div>
<div><a href="rrefproper43414.html" title="">derby.language.logQueryPlan</a></div>
<div><a href="rrefproper43517.html" title="">derby.language.logStatementText</a></div>
<div><a href="rrefproperpreallocator.html" title="">derby.language.sequence.preallocator</a></div>
<div><a href="rrefproperstatementcachesize.html" title="">derby.language.statementCacheSize</a></div>
<div><a href="rrefproper10607.html" title="">derby.locks.deadlockTimeout</a></div>
<div><a href="rrefproper23835.html" title="">derby.locks.deadlockTrace</a></div>
<div><a href="rrefproper40346.html" title="">derby.locks.escalationThreshold</a></div>
<div><a href="rrefproper98166.html" title="">derby.locks.monitor</a></div>
<div><a href="rrefproper46141.html" title="">derby.locks.waitTimeout</a></div>
<div><a href="rrefproperlogbuffersize.html" title="">derby.replication.logBufferSize</a></div>
<div><a href="rrefpropermaxlogshippinginterval.html" title="">derby.replication.maxLogShippingInterval</a></div>
<div><a href="rrefproperminlogshippinginterval.html" title="">derby.replication.minLogShippingInterval</a></div>
<div><a href="rrefproperverbose.html" title="">derby.replication.verbose</a></div>
<div><a href="rrefproperindexstatsauto.html" title="">derby.storage.indexStats.auto</a></div>
<div><a href="rrefproperindexstatslog.html" title="">derby.storage.indexStats.log</a></div>
<div><a href="rrefproperindexstatstrace.html" title="">derby.storage.indexStats.trace</a></div>
<div><a href="rrefproper27529.html" title="">derby.storage.initialPages</a></div>
<div><a href="rrefproperstormin.html" title="">derby.storage.minimumRecordSize</a></div>
<div><a href="rrefproper81359.html" title="">derby.storage.pageCacheSize</a></div>
<div><a href="rrefproper28026.html" title="">derby.storage.pageReservedSpace</a></div>
<div><a href="rrefproper40688.html" title="">derby.storage.pageSize</a></div>
<div><a href="rrefproperrowlocking.html" title="">derby.storage.rowLocking</a></div>
<div><a href="rrefproper34037.html" title="">derby.storage.tempDirectory</a></div>
<div><a href="rrefproperdefaultfileperms.html" title="">derby.storage.useDefaultFilePermissions</a></div>
<div><a href="rrefproperextdiagsevlevel.html" title="">derby.stream.error.extendedDiagSeverityLevel</a></div>
<div><a href="rrefproper33027.html" title="">derby.stream.error.field</a></div>
<div><a href="rrefproper18151.html" title="">derby.stream.error.file</a></div>
<div><a href="rrefproperlogboottrace.html" title="">derby.stream.error.logBootTrace</a></div>
<div><a href="rrefproper26985.html" title="">derby.stream.error.logSeverityLevel</a></div>
<div><a href="rrefproper35028.html" title="">derby.stream.error.method</a></div>
<div><a href="rrefproperbootall.html" title="">derby.system.bootAll</a></div>
<div><a href="rrefproperdurability.html" title="">derby.system.durability</a></div>
<div><a href="rrefproper32066.html" title="">derby.system.home</a></div>
<div><a href="rrefproper27355.html" title="">derby.user.UserName</a></div>
<div><a href="rrefproperdatadictversion.html" title="">DataDictionaryVersion</a></div>
</div>
</div>
</body>
</html>
| Kerensky256/Database | docs/html/ref/rrefproperpasswordmillis.html | HTML | apache-2.0 | 13,637 |
import app from 'common/electron/app';
import path from 'path';
/**
* @return the theme's css path
*/
function getThemePath (name) {
return path.join(app.getAppPath(), 'themes', name + '.css');
}
/**
* @return the style's css path
*/
function getStylePath (name) {
return path.join(app.getAppPath(), 'styles', name + '.css');
}
/**
* @return the image's path
*/
function getImagePath (name) {
return path.join(app.getAppPath(), 'images', name);
}
/**
* Windows only.
* @return the directory where the app is ran from
*/
function getCustomUserDataPath () {
return path.join(path.dirname(app.getPath('exe')), 'data');
}
/**
* Windows only.
* @return the path to Update.exe created by Squirrel.Windows
*/
function getSquirrelUpdateExePath () {
return path.join(path.dirname(app.getPath('exe')), '..', 'Update.exe');
}
export default {
getThemePath,
getStylePath,
getImagePath,
getCustomUserDataPath,
getSquirrelUpdateExePath
};
| rafael-neri/whatsapp-webapp | src/scripts/common/utils/file-paths.js | JavaScript | apache-2.0 | 963 |
{% extends "admin/change_form.html" %}
{% load i18n admin_static admin_modify admin_urls %}
{% block extrahead %}
{{ block.super }}
{# hackily include js required for django admin datepicker #}
<script type="text/javascript" src="{% static 'admin/js/core.js' %}"></script>
<script type="text/javascript" src="{% static 'admin/js/vendor/jquery/jquery.js' %}"></script>
<script type="text/javascript" src="{% static 'admin/js/jquery.init.js' %}"></script>
{{ form.media }}
{% endblock %}
{% block breadcrumbs %}
<div class="breadcrumbs">
<a href="{% url 'admin:index' %}">{% trans 'Home' %}</a>
› <a href="{% url 'admin:app_list' app_label=opts.app_label %}">{{ opts.app_config.verbose_name }}</a>
› <a href="{% url opts|admin_urlname:'changelist' %}">{{ opts.verbose_name_plural|capfirst }}</a>
› <a href="{% url 'admin:share_sourceconfig_change' source_config.id %}">{{ source_config.label }}</a>
› Harvest
</div>
{% endblock %}
{% block content %}
<div id="content-main">
<form action="" method="POST">
{% csrf_token %}
{% if form.non_field_errors|length > 0 %}
<p class="errornote">
"Please correct the errors below."
</p>
{{ form.non_field_errors }}
{% endif %}
<fieldset class="module aligned">
{% for field in form %}
<div class="form-row">
{{ field.errors }}
{{ field.label_tag }}
{{ field }}
{% if field.field.help_text %}
<p class="help">
{{ field.field.help_text|safe }}
</p>
{% endif %}
</div>
{% endfor %}
</fieldset>
<div class="submit-row">
<input type="submit" class="default" value="Start Harvest!">
</div>
</form>
</div>
{% endblock %}
| CenterForOpenScience/SHARE | templates/admin/harvest.html | HTML | apache-2.0 | 2,127 |
/* $NetBSD: disklabel.h,v 1.12 2013/05/27 07:37:20 msaitoh Exp $ */
/*
* Copyright (c) 1994 Mark Brinicombe.
* Copyright (c) 1994 Brini.
* All rights reserved.
*
* This code is derived from software written for Brini by Mark Brinicombe
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Brini.
* 4. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* RiscBSD kernel project
*
* disklabel.h
*
* machine specific disk label info
*
* Created : 04/10/94
*/
#ifndef _ARM_DISKLABEL_H_
#define _ARM_DISKLABEL_H_
#ifndef LABELUSESMBR
#define LABELUSESMBR 1 /* use MBR partitionning */
#endif
#define LABELSECTOR 1 /* sector containing label */
#define LABELOFFSET 0 /* offset of label in sector */
#define MAXPARTITIONS 16 /* number of partitions */
#define OLDMAXPARTITIONS 8 /* old number of partitions */
#ifndef RAW_PART
#define RAW_PART 2 /* raw partition: XX?c */
#endif
/*
* We use the highest bit of the minor number for the partition number.
* This maintains backward compatibility with device nodes created before
* MAXPARTITIONS was increased.
*/
#define __ARM_MAXDISKS ((1 << 20) / MAXPARTITIONS)
#define DISKUNIT(dev) ((minor(dev) / OLDMAXPARTITIONS) % __ARM_MAXDISKS)
#define DISKPART(dev) ((minor(dev) % OLDMAXPARTITIONS) + \
((minor(dev) / (__ARM_MAXDISKS * OLDMAXPARTITIONS)) * OLDMAXPARTITIONS))
#define DISKMINOR(unit, part) \
(((unit) * OLDMAXPARTITIONS) + ((part) % OLDMAXPARTITIONS) + \
((part) / OLDMAXPARTITIONS) * (__ARM_MAXDISKS * OLDMAXPARTITIONS))
#if HAVE_NBTOOL_CONFIG_H
#include <nbinclude/sys/dkbad.h>
#include <nbinclude/sys/disklabel_acorn.h>
#include <nbinclude/sys/bootblock.h>
#else
#include <sys/dkbad.h>
#include <sys/disklabel_acorn.h>
#include <sys/bootblock.h>
#endif /* HAVE_NBTOOL_CONFIG_H */
struct cpu_disklabel {
struct mbr_partition mbrparts[MBR_PART_COUNT];
#define __HAVE_DISKLABEL_DKBAD
struct dkbad bad;
};
#ifdef _KERNEL
struct buf;
struct disklabel;
/* for readdisklabel. rv != 0 -> matches, msg == NULL -> success */
int mbr_label_read(dev_t, void (*)(struct buf *), struct disklabel *,
struct cpu_disklabel *, const char **, int *, int *);
/* for writedisklabel. rv == 0 -> dosen't match, rv > 0 -> success */
int mbr_label_locate(dev_t, void (*)(struct buf *),
struct disklabel *, struct cpu_disklabel *, int *, int *);
#endif /* _KERNEL */
#endif /* _ARM_DISKLABEL_H_ */
| veritas-shine/minix3-rpi | sys/arch/arm/include/disklabel.h | C | apache-2.0 | 3,896 |
/**
* Copyright (C) 2015 Born Informatik AG (www.born.ch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wte4j.impl.service;
import org.wte4j.WteException;
/**
* Map JDBC types (as defined in <code>java.sql.Types</code>) to Java types. The
* mappings have been taken from [1]
* "JDBC 4.0 Specification, JSR 221, November 7, 2006, Appendix B, Table B-3"
*
*/
final class MapperSqlType {
private MapperSqlType() {
};
public static Class<?> map(int jdbcType) {
switch (jdbcType) {
case java.sql.Types.BIT:
case java.sql.Types.BOOLEAN:
return java.lang.Boolean.class;
case java.sql.Types.TINYINT:
case java.sql.Types.SMALLINT:
case java.sql.Types.INTEGER:
return java.lang.Integer.class;
case java.sql.Types.BIGINT:
return java.lang.Long.class;
case java.sql.Types.FLOAT:
case java.sql.Types.DOUBLE:
return java.lang.Double.class;
case java.sql.Types.REAL:
return java.lang.Float.class;
case java.sql.Types.NUMERIC: // according to [1] Table B-1
case java.sql.Types.DECIMAL:
return java.math.BigDecimal.class;
case java.sql.Types.CHAR:
case java.sql.Types.VARCHAR:
case java.sql.Types.LONGVARCHAR:
return java.lang.String.class;
case java.sql.Types.DATE:
return java.sql.Date.class;
case java.sql.Types.TIME:
return java.sql.Time.class;
case java.sql.Types.TIMESTAMP:
return java.sql.Timestamp.class;
case java.sql.Types.STRUCT:
return java.sql.Struct.class;
case java.sql.Types.ARRAY:
return java.sql.Array.class;
case java.sql.Types.BLOB:
return java.sql.Blob.class;
case java.sql.Types.CLOB:
return java.sql.Clob.class;
case java.sql.Types.REF:
return java.sql.Ref.class;
case java.sql.Types.DATALINK:
return java.net.URL.class;
case java.sql.Types.ROWID:
return java.sql.RowId.class;
case java.sql.Types.NULL:
case java.sql.Types.OTHER:
case java.sql.Types.JAVA_OBJECT:
case java.sql.Types.DISTINCT:
case java.sql.Types.BINARY:
case java.sql.Types.VARBINARY:
case java.sql.Types.LONGVARBINARY:
default:
throw new WteException("invalid or unmapped SQL type (" + jdbcType
+ ")");
}
}
}
| bbrehman/wte4j | wte4j-core/src/main/java/org/wte4j/impl/service/MapperSqlType.java | Java | apache-2.0 | 2,743 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/tools/parser/hlo_parser.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
namespace xla {
namespace tools {
namespace {
using tensorflow::StringPiece;
using tensorflow::gtl::optional;
using tensorflow::str_util::Split;
using tensorflow::str_util::SplitAndParseAsInts;
using tensorflow::strings::Printf;
using tensorflow::strings::StrAppend;
using tensorflow::strings::StrCat;
const double kF16max = 65504;
// Parser for the HloModule::ToString() format text.
class HloParser {
public:
using LocTy = HloLexer::LocTy;
explicit HloParser(StringPiece str, const HloModuleConfig& config)
: lexer_(str), config_(config) {}
// Runs the parser. Returns false if an error occurred.
bool Run();
// Returns the parsed HloModule.
std::unique_ptr<HloModule> ConsumeHloModule() { return std::move(module_); }
// Returns the error information.
string GetError() const { return tensorflow::str_util::Join(error_, "\n"); }
private:
// ParseXXX returns false if an error occurred.
bool ParseHloModule();
bool ParseComputations();
bool ParseComputation(HloComputation** entry_computation);
bool ParseInstructionList(HloComputation::Builder* builder,
string* root_name);
bool ParseInstruction(HloComputation::Builder* builder, string* root_name);
bool ParseControlPredecessors(HloInstruction* instruction);
bool ParseLiteral(std::unique_ptr<Literal>* literal, const Shape& shape);
bool ParseTupleLiteral(std::unique_ptr<Literal>* literal, const Shape& shape);
bool ParseNonTupleLiteral(std::unique_ptr<Literal>* literal,
const Shape& shape);
// Sets the sub-value of literal at the given index to the given value. The
// literal's shape must have the default layout.
bool SetValueInLiteral(int64 value, int64 linear_index, Literal* literal);
bool SetValueInLiteral(double value, int64 linear_index, Literal* literal);
bool SetValueInLiteral(bool value, int64 linear_index, Literal* literal);
template <typename LiteralNativeT, typename ParsedElemT>
bool SetValueInLiteralHelper(ParsedElemT value, int64 linear_index,
Literal* literal);
bool ParseOperands(std::vector<HloInstruction*>* operands);
// Fills parsed operands into 'operands' and expects a certain number of
// operands.
bool ParseOperands(std::vector<HloInstruction*>* operands,
const int expected_size);
// Describes the start, limit, and stride on every dimension of the operand
// being sliced.
struct SliceRanges {
std::vector<int64> starts;
std::vector<int64> limits;
std::vector<int64> strides;
};
// Types of attributes.
enum class AttrTy {
kInt64,
kInt32,
kFloat,
kString,
kBracedInt64List,
kHloComputation,
kWindow,
kConvolutionDimensionNumbers,
kSharding,
kInstructionList,
kSliceRanges,
kPaddingConfig,
kMetadata,
kFusionKind,
kDistribution,
};
struct AttrConfig {
bool required; // whether it's required or optional
AttrTy attr_type; // what type it is
void* result; // where to store the parsed result.
};
// attributes ::= (',' attribute)*
//
// Parses attributes given names and configs of the attributes. Each parsed
// result is passed back through the result pointer in corresponding
// AttrConfig. Note that the result pointer must point to a optional<T> typed
// variable which outlives this function. Returns false on error. You should
// not use the any of the results if this function failed.
//
// Example usage:
//
// std::unordered_map<string, AttrConfig> attrs;
// optional<int64> foo;
// attrs["foo"] = {/*required=*/false, AttrTy::kInt64, &foo};
// optional<Window> bar;
// attrs["bar"] = {/*required=*/true, AttrTy::kWindow, &bar};
// if (!ParseAttributes(attrs)) {
// return false; // Do not use 'foo' 'bar' if failed.
// }
// // Do something with 'bar'.
// if (foo) { // If attr foo is seen, do something with 'foo'. }
//
bool ParseAttributes(const std::unordered_map<string, AttrConfig>& attrs);
// sub_attributes ::= '{' (','? attribute)* '}'
//
// Usage is the same as ParseAttributes. See immediately above.
bool ParseSubAttributes(const std::unordered_map<string, AttrConfig>& attrs);
// Parses one attribute. If it has already been seen, return error. Returns
// true and adds to seen_attrs on success.
//
// Do not call this except in ParseAttributes or ParseSubAttributes.
bool ParseAttributeHelper(const std::unordered_map<string, AttrConfig>& attrs,
std::unordered_set<string>* seen_attrs);
// Parses a name and finds the corresponding hlo computation.
bool ParseComputationName(HloComputation** value);
// Parses a list of names and finds the corresponding hlo instructions.
bool ParseInstructionNames(std::vector<HloInstruction*>* instructions);
bool ParseWindow(Window* window);
bool ParseConvolutionDimensionNumbers(ConvolutionDimensionNumbers* dnums);
bool ParsePaddingConfig(PaddingConfig* padding);
bool ParseMetadata(OpMetadata* metadata);
bool ParseSharding(OpSharding* sharding);
bool ParseSingleSharding(OpSharding* sharding, bool lbrace_pre_lexed);
// Parses a sub-attribute of the window attribute, e.g.,size=1x2x3.
bool ParseDxD(const string& name, std::vector<int64>* result);
// Parses window's pad sub-attriute, e.g., pad=0_0x3x3.
bool ParseWindowPad(std::vector<std::vector<int64>>* pad);
bool ParseSliceRanges(SliceRanges* result);
bool ParseInt64List(const TokKind start, const TokKind end,
const TokKind delim, std::vector<int64>* result);
bool ParseParamListToShape(Shape* shape, LocTy* shape_loc);
bool ParseParamList();
bool ParseName(string* result);
bool ParseAttributeName(string* result);
bool ParseString(string* result);
bool ParseShape(Shape* result);
bool ParseOpcode(HloOpcode* result);
bool ParseFusionKind(HloInstruction::FusionKind* result);
bool ParseRandomDistribution(RandomDistribution* result);
bool ParseInt64(int64* result);
bool ParseDouble(double* result);
bool ParseBool(bool* result);
bool ParseToken(TokKind kind, const string& msg);
// Returns true if the current token is the beginning of a shape.
bool CanBeShape();
// Returns true if the current token is the beginning of a
// param_list_to_shape.
bool CanBeParamListToShape();
// Logs the current parsing line and the given message. Always returns false.
bool TokenError(StringPiece msg);
bool Error(LocTy loc, StringPiece msg);
// If the current token is 'kind', eats it (i.e. lexes the next token) and
// returns true.
bool EatIfPresent(TokKind kind);
// Parses a shape, and returns true if the result is compatible with the given
// shape.
bool EatShapeAndCheckCompatible(const Shape& shape);
// Adds the instruction to the pool. Returns false and emits an error if the
// instruction already exists.
bool AddInstruction(const string& name, HloInstruction* instruction,
LocTy name_loc);
// Adds the computation to the pool. Returns false and emits an error if the
// computation already exists.
bool AddComputation(const string& name, HloComputation* computation,
LocTy name_loc);
// The map from the instruction name to the instruction. This does not own the
// instructions.
std::unordered_map<string, HloInstruction*> instruction_pool_;
std::unordered_map<string, HloComputation*> computation_pool_;
HloLexer lexer_;
std::unique_ptr<HloModule> module_;
std::vector<std::unique_ptr<HloComputation>> computations_;
const HloModuleConfig config_;
std::vector<string> error_;
};
bool HloParser::Error(LocTy loc, StringPiece msg) {
auto line_col = lexer_.GetLineAndColumn(loc);
const unsigned line = line_col.first;
const unsigned col = line_col.second;
std::vector<string> error_lines;
error_lines.push_back(
StrCat("was parsing ", line, ":", col, ": error: ", msg));
error_lines.push_back(lexer_.GetLine(loc).ToString());
error_lines.push_back(col == 0 ? "" : StrCat(string(col - 1, ' '), "^"));
error_.push_back(tensorflow::str_util::Join(error_lines, "\n"));
VLOG(1) << "Error: " << error_.back();
return false;
}
bool HloParser::TokenError(StringPiece msg) {
return Error(lexer_.GetLoc(), msg);
}
bool HloParser::Run() {
lexer_.Lex();
return ParseHloModule();
}
// ::= 'HloModule' name computations
bool HloParser::ParseHloModule() {
if (lexer_.GetKind() != TokKind::kw_HloModule) {
return TokenError("expects HloModule");
}
// Eat 'HloModule'
lexer_.Lex();
string name;
if (!ParseName(&name)) {
return false;
}
module_ = MakeUnique<HloModule>(name, config_);
return ParseComputations();
}
// computations ::= (computation)+
bool HloParser::ParseComputations() {
HloComputation* entry_computation = nullptr;
do {
if (!ParseComputation(&entry_computation)) {
return false;
}
} while (lexer_.GetKind() != TokKind::kEof);
for (int i = 0; i < computations_.size(); i++) {
// If entry_computation is not nullptr, it means the computation it pointed
// to is marked with "ENTRY"; otherwise, no computation is marked with
// "ENTRY", and we use the last computation as the entry computation. We
// add the non-entry computations as embedded computations to the module.
if ((entry_computation != nullptr &&
computations_[i].get() != entry_computation) ||
(entry_computation == nullptr && i != computations_.size() - 1)) {
module_->AddEmbeddedComputation(std::move(computations_[i]));
continue;
}
auto computation =
module_->AddEntryComputation(std::move(computations_[i]));
// The parameters and result layouts were set to default layout. Here we
// set the layouts to what the hlo text says.
for (int p = 0; p < computation->num_parameters(); p++) {
const Shape& param_shape = computation->parameter_instruction(p)->shape();
if (param_shape.has_layout()) {
module_->mutable_entry_computation_layout()
->mutable_parameter_layout(p)
->ResetLayout(param_shape.layout());
}
}
const Shape& result_shape = computation->root_instruction()->shape();
if (result_shape.has_layout()) {
module_->mutable_entry_computation_layout()
->mutable_result_layout()
->ResetLayout(result_shape.layout());
}
}
return true;
}
// computation ::= ('ENTRY')? name (param_list_to_shape)? instruction_list
bool HloParser::ParseComputation(HloComputation** entry_computation) {
LocTy maybe_entry_loc = lexer_.GetLoc();
const bool is_entry_computation = EatIfPresent(TokKind::kw_ENTRY);
string name;
LocTy name_loc = lexer_.GetLoc();
if (!ParseName(&name)) {
return false;
}
auto builder = MakeUnique<HloComputation::Builder>(name);
LocTy shape_loc = nullptr;
Shape shape;
if (CanBeParamListToShape() && !ParseParamListToShape(&shape, &shape_loc)) {
return false;
}
string root_name;
if (!ParseInstructionList(builder.get(), &root_name)) {
return false;
}
HloInstruction* root =
tensorflow::gtl::FindPtrOrNull(instruction_pool_, root_name);
// This means some instruction was marked as ROOT but we didn't find it in the
// pool, which should not happen.
if (!root_name.empty() && root == nullptr) {
LOG(FATAL) << "instruction " << root_name
<< " was marked as ROOT but the parser has not seen it before";
}
// Now root can be either an existing instruction or a nullptr. If it's a
// nullptr, the implementation of Builder will set the last instruction as
// root instruction.
computations_.emplace_back(builder->Build(root));
HloComputation* computation = computations_.back().get();
if (!root) {
root = computation->root_instruction();
} else {
CHECK_EQ(root, computation->root_instruction());
}
// If param_list_to_shape was present, check compatibility.
if (shape_loc != nullptr && !ShapeUtil::Compatible(root->shape(), shape)) {
return Error(
shape_loc,
StrCat("Shape of computation ", name, ", ",
ShapeUtil::HumanString(shape),
", is not compatible with that of its root instruction ",
root_name, ", ", ShapeUtil::HumanString(root->shape())));
}
if (is_entry_computation) {
if (*entry_computation != nullptr) {
return Error(maybe_entry_loc, "expects only one ENTRY");
}
*entry_computation = computation;
}
return AddComputation(name, computation, name_loc);
}
// instruction_list ::= '{' instruction_list1 '}'
// instruction_list1 ::= (instruction)+
bool HloParser::ParseInstructionList(HloComputation::Builder* builder,
string* root_name) {
if (!ParseToken(TokKind::kLbrace,
"expects '{' at the beginning of instruction list.")) {
return false;
}
do {
if (!ParseInstruction(builder, root_name)) {
return false;
}
} while (lexer_.GetKind() != TokKind::kRbrace);
return ParseToken(TokKind::kRbrace,
"expects '}' at the end of instruction list.");
}
// instruction ::= ('ROOT')? name '=' shape opcode operands (attribute)*
bool HloParser::ParseInstruction(HloComputation::Builder* builder,
string* root_name) {
string name;
Shape shape;
HloOpcode opcode;
std::vector<HloInstruction*> operands;
LocTy maybe_root_loc = lexer_.GetLoc();
bool is_root = EatIfPresent(TokKind::kw_ROOT);
const LocTy name_loc = lexer_.GetLoc();
if (!ParseName(&name) ||
!ParseToken(TokKind::kEqual, "expects '=' in instruction") ||
!ParseShape(&shape) || !ParseOpcode(&opcode)) {
return false;
}
if (is_root) {
if (!root_name->empty()) {
return Error(maybe_root_loc, "one computation should have only one ROOT");
}
*root_name = name;
}
// Add optional attributes.
std::unordered_map<string, AttrConfig> attrs;
optional<OpSharding> sharding;
attrs["sharding"] = {/*required=*/false, AttrTy::kSharding, &sharding};
optional<std::vector<HloInstruction*>> predecessors;
attrs["control-predecessors"] = {/*required=*/false, AttrTy::kInstructionList,
&predecessors};
optional<OpMetadata> metadata;
attrs["metadata"] = {/*required=*/false, AttrTy::kMetadata, &metadata};
HloInstruction* instruction;
switch (opcode) {
case HloOpcode::kParameter: {
int64 parameter_number;
if (!ParseToken(TokKind::kLparen,
"expects '(' before parameter number") ||
!ParseInt64(¶meter_number) ||
!ParseToken(TokKind::kRparen, "expects ')' after parameter number") ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateParameter(parameter_number, shape, name));
break;
}
case HloOpcode::kConstant: {
std::unique_ptr<Literal> literal;
if (!ParseToken(TokKind::kLparen,
"expects '(' before constant literal") ||
!ParseLiteral(&literal, shape) ||
!ParseToken(TokKind::kRparen, "expects ')' after constant literal") ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
break;
}
// Unary ops.
case HloOpcode::kAbs:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kBitcast:
case HloOpcode::kCeil:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kExp:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kFloor:
case HloOpcode::kLog:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kReal:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSort:
case HloOpcode::kTanh: {
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateUnary(shape, opcode, operands[0]));
break;
}
// Binary ops.
case HloOpcode::kAdd:
case HloOpcode::kDivide:
case HloOpcode::kMultiply:
case HloOpcode::kSubtract:
case HloOpcode::kAtan2:
case HloOpcode::kComplex:
case HloOpcode::kEq:
case HloOpcode::kGe:
case HloOpcode::kGt:
case HloOpcode::kLe:
case HloOpcode::kLt:
case HloOpcode::kNe:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical: {
if (!ParseOperands(&operands, /*expected_size=*/2) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateBinary(
shape, opcode, operands[0], operands[1]));
break;
}
// Ternary ops.
case HloOpcode::kClamp:
case HloOpcode::kSelect: {
if (!ParseOperands(&operands, /*expected_size=*/3) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateTernary(
shape, opcode, operands[0], operands[1], operands[2]));
break;
}
// Other supported ops.
case HloOpcode::kConvert: {
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateConvert(shape, operands[0]));
break;
}
case HloOpcode::kBitcastConvert: {
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateBitcastConvert(shape, operands[0]));
break;
}
case HloOpcode::kCrossReplicaSum: {
if (!ParseOperands(&operands) || !ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateCrossReplicaSum(shape, operands));
break;
}
case HloOpcode::kReshape: {
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateReshape(shape, operands[0]));
break;
}
case HloOpcode::kTuple: {
if (!ParseOperands(&operands) || !ParseAttributes(attrs)) {
return false;
}
instruction =
builder->AddInstruction(HloInstruction::CreateTuple(operands));
break;
}
case HloOpcode::kWhile: {
optional<HloComputation*> condition;
optional<HloComputation*> body;
attrs["condition"] = {/*required=*/true, AttrTy::kHloComputation,
&condition};
attrs["body"] = {/*required=*/true, AttrTy::kHloComputation, &body};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateWhile(
shape, *condition, *body, /*init=*/operands[0]));
break;
}
case HloOpcode::kRecv: {
optional<int64> channel_id;
attrs["channel_id"] = {/*required=*/true, AttrTy::kInt64, &channel_id};
if (!ParseOperands(&operands, /*expected_size=*/0) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateRecv(shape.tuple_shapes(0), *channel_id));
break;
}
case HloOpcode::kRecvDone: {
optional<int64> channel_id;
attrs["channel_id"] = {/*required=*/true, AttrTy::kInt64, &channel_id};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
if (channel_id != operands[0]->channel_id()) {
return false;
}
instruction =
builder->AddInstruction(HloInstruction::CreateRecvDone(operands[0]));
break;
}
case HloOpcode::kSend: {
optional<int64> channel_id;
attrs["channel_id"] = {/*required=*/true, AttrTy::kInt64, &channel_id};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateSend(operands[0], *channel_id));
break;
}
case HloOpcode::kSendDone: {
optional<int64> channel_id;
attrs["channel_id"] = {/*required=*/true, AttrTy::kInt64, &channel_id};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
if (channel_id != operands[0]->channel_id()) {
return false;
}
instruction =
builder->AddInstruction(HloInstruction::CreateSendDone(operands[0]));
break;
}
case HloOpcode::kGetTupleElement: {
optional<int64> index;
attrs["index"] = {/*required=*/true, AttrTy::kInt64, &index};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateGetTupleElement(shape, operands[0], *index));
break;
}
case HloOpcode::kCall: {
optional<HloComputation*> to_apply;
attrs["to_apply"] = {/*required=*/true, AttrTy::kHloComputation,
&to_apply};
if (!ParseOperands(&operands) || !ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateCall(shape, operands, *to_apply));
break;
}
case HloOpcode::kReduceWindow: {
optional<HloComputation*> reduce_computation;
optional<Window> window;
attrs["window"] = {/*required=*/false, AttrTy::kWindow, &window};
attrs["to_apply"] = {/*required=*/true, AttrTy::kHloComputation,
&reduce_computation};
if (!ParseOperands(&operands, /*expected_size=*/2) ||
!ParseAttributes(attrs)) {
return false;
}
if (!window) {
window.emplace();
}
instruction = builder->AddInstruction(HloInstruction::CreateReduceWindow(
shape, /*operand=*/operands[0], /*init_value=*/operands[1], *window,
*reduce_computation));
break;
}
case HloOpcode::kConvolution: {
optional<Window> window;
optional<ConvolutionDimensionNumbers> dnums;
attrs["window"] = {/*required=*/false, AttrTy::kWindow, &window};
attrs["dim_labels"] = {/*required=*/true,
AttrTy::kConvolutionDimensionNumbers, &dnums};
if (!ParseOperands(&operands, /*expected_size=*/2) ||
!ParseAttributes(attrs)) {
return false;
}
if (!window) {
window.emplace();
}
instruction = builder->AddInstruction(HloInstruction::CreateConvolve(
shape, /*lhs=*/operands[0], /*rhs=*/operands[1], *window, *dnums));
break;
}
case HloOpcode::kBroadcast: {
optional<std::vector<int64>> broadcast_dimensions;
attrs["dimensions"] = {/*required=*/true, AttrTy::kBracedInt64List,
&broadcast_dimensions};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateBroadcast(
shape, operands[0], *broadcast_dimensions));
break;
}
case HloOpcode::kConcatenate: {
optional<std::vector<int64>> dimensions;
attrs["dimensions"] = {/*required=*/true, AttrTy::kBracedInt64List,
&dimensions};
if (!ParseOperands(&operands) || !ParseAttributes(attrs) ||
dimensions->size() != 1) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateConcatenate(
shape, operands, dimensions->at(0)));
break;
}
case HloOpcode::kMap: {
optional<HloComputation*> to_apply;
attrs["to_apply"] = {/*required=*/true, AttrTy::kHloComputation,
&to_apply};
if (!ParseOperands(&operands) || !ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateMap(shape, operands, *to_apply));
break;
}
case HloOpcode::kReduce: {
optional<HloComputation*> reduce_computation;
attrs["to_apply"] = {/*required=*/true, AttrTy::kHloComputation,
&reduce_computation};
optional<std::vector<int64>> dimensions_to_reduce;
attrs["dimensions"] = {/*required=*/true, AttrTy::kBracedInt64List,
&dimensions_to_reduce};
if (!ParseOperands(&operands, /*expected_size=*/2) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateReduce(
shape, /*operand=*/operands[0], /*init_value=*/operands[1],
*dimensions_to_reduce, *reduce_computation));
break;
}
case HloOpcode::kReverse: {
optional<std::vector<int64>> dimensions;
attrs["dimensions"] = {/*required=*/true, AttrTy::kBracedInt64List,
&dimensions};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateReverse(shape, operands[0], *dimensions));
break;
}
case HloOpcode::kSelectAndScatter: {
optional<HloComputation*> select;
attrs["select"] = {/*required=*/true, AttrTy::kHloComputation, &select};
optional<HloComputation*> scatter;
attrs["scatter"] = {/*required=*/true, AttrTy::kHloComputation, &scatter};
optional<Window> window;
attrs["window"] = {/*required=*/false, AttrTy::kWindow, &window};
if (!ParseOperands(&operands, /*expected_size=*/3) ||
!ParseAttributes(attrs)) {
return false;
}
if (!window) {
window.emplace();
}
instruction =
builder->AddInstruction(HloInstruction::CreateSelectAndScatter(
shape, /*operand=*/operands[0], *select, *window,
/*source=*/operands[1], /*init_value=*/operands[2], *scatter));
break;
}
case HloOpcode::kSlice: {
optional<SliceRanges> slice_ranges;
attrs["slice"] = {/*required=*/true, AttrTy::kSliceRanges, &slice_ranges};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateSlice(
shape, operands[0], slice_ranges->starts, slice_ranges->limits,
slice_ranges->strides));
break;
}
case HloOpcode::kDynamicSlice: {
optional<std::vector<int64>> dynamic_slice_sizes;
attrs["dynamic_slice_sizes"] = {
/*required=*/true, AttrTy::kBracedInt64List, &dynamic_slice_sizes};
if (!ParseOperands(&operands, /*expected_size=*/2) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateDynamicSlice(
shape, /*operand=*/operands[0], /*start_indices=*/operands[1],
*dynamic_slice_sizes));
break;
}
case HloOpcode::kDynamicUpdateSlice: {
if (!ParseOperands(&operands, /*expected_size=*/3) ||
!ParseAttributes(attrs)) {
return false;
}
instruction =
builder->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
shape, /*operand=*/operands[0], /*update=*/operands[1],
/*start_indices=*/operands[2]));
break;
}
case HloOpcode::kTranspose: {
optional<std::vector<int64>> dimensions;
attrs["dimensions"] = {/*required=*/true, AttrTy::kBracedInt64List,
&dimensions};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateTranspose(shape, operands[0], *dimensions));
break;
}
case HloOpcode::kBatchNormTraining: {
optional<float> epsilon;
attrs["epsilon"] = {/*required=*/true, AttrTy::kFloat, &epsilon};
optional<int64> feature_index;
attrs["feature_index"] = {/*required=*/true, AttrTy::kInt64,
&feature_index};
if (!ParseOperands(&operands, /*expected_size=*/3) ||
!ParseAttributes(attrs)) {
return false;
}
instruction =
builder->AddInstruction(HloInstruction::CreateBatchNormTraining(
shape, /*operand=*/operands[0], /*scale=*/operands[1],
/*offset=*/operands[2], *epsilon, *feature_index));
break;
}
case HloOpcode::kBatchNormInference: {
optional<float> epsilon;
attrs["epsilon"] = {/*required=*/true, AttrTy::kFloat, &epsilon};
optional<int64> feature_index;
attrs["feature_index"] = {/*required=*/true, AttrTy::kInt64,
&feature_index};
if (!ParseOperands(&operands, /*expected_size=*/5) ||
!ParseAttributes(attrs)) {
return false;
}
instruction =
builder->AddInstruction(HloInstruction::CreateBatchNormInference(
shape, /*operand=*/operands[0], /*scale=*/operands[1],
/*offset=*/operands[2], /*mean=*/operands[3],
/*variance=*/operands[4], *epsilon, *feature_index));
break;
}
case HloOpcode::kBatchNormGrad: {
optional<float> epsilon;
attrs["epsilon"] = {/*required=*/true, AttrTy::kFloat, &epsilon};
optional<int64> feature_index;
attrs["feature_index"] = {/*required=*/true, AttrTy::kInt64,
&feature_index};
if (!ParseOperands(&operands, /*expected_size=*/5) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateBatchNormGrad(
shape, /*operand=*/operands[0], /*scale=*/operands[1],
/*mean=*/operands[2], /*variance=*/operands[3],
/*grad_output=*/operands[4], *epsilon, *feature_index));
break;
}
case HloOpcode::kPad: {
optional<PaddingConfig> padding;
attrs["padding"] = {/*required=*/true, AttrTy::kPaddingConfig, &padding};
if (!ParseOperands(&operands, /*expected_size=*/2) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreatePad(
shape, operands[0], /*padding_value=*/operands[1], *padding));
break;
}
case HloOpcode::kFusion: {
optional<HloComputation*> fusion_computation;
attrs["calls"] = {/*required=*/true, AttrTy::kHloComputation,
&fusion_computation};
optional<HloInstruction::FusionKind> fusion_kind;
attrs["kind"] = {/*required=*/true, AttrTy::kFusionKind, &fusion_kind};
if (!ParseOperands(&operands) || !ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateFusion(
shape, *fusion_kind, operands, *fusion_computation));
break;
}
case HloOpcode::kInfeed: {
optional<string> config;
attrs["infeed_config"] = {/*required=*/false, AttrTy::kString, &config};
if (!ParseOperands(&operands, /*expected_size=*/0) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateInfeed(shape, config ? *config : ""));
break;
}
case HloOpcode::kOutfeed: {
optional<string> config;
attrs["outfeed_config"] = {/*required=*/false, AttrTy::kString, &config};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateOutfeed(
shape, operands[0], config ? *config : ""));
break;
}
case HloOpcode::kRng: {
optional<RandomDistribution> distribution;
attrs["distribution"] = {/*required=*/true, AttrTy::kDistribution,
&distribution};
if (!ParseOperands(&operands) || !ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(
HloInstruction::CreateRng(shape, *distribution, operands));
break;
}
case HloOpcode::kReducePrecision: {
optional<int64> exponent_bits;
optional<int64> mantissa_bits;
attrs["exponent_bits"] = {/*required=*/true, AttrTy::kInt64,
&exponent_bits};
attrs["mantissa_bits"] = {/*required=*/true, AttrTy::kInt64,
&mantissa_bits};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
instruction =
builder->AddInstruction(HloInstruction::CreateReducePrecision(
shape, operands[0], static_cast<int>(*exponent_bits),
static_cast<int>(*mantissa_bits)));
break;
}
case HloOpcode::kConditional: {
optional<HloComputation*> true_computation;
optional<HloComputation*> false_computation;
attrs["true_computation"] = {/*required=*/true, AttrTy::kHloComputation,
&true_computation};
attrs["false_computation"] = {/*required=*/true, AttrTy::kHloComputation,
&false_computation};
if (!ParseOperands(&operands, /*expected_size=*/3) ||
!ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateConditional(
shape, /*pred=*/operands[0],
/*true_computation_arg=*/operands[1], *true_computation,
/*false_computation_arg=*/operands[2], *false_computation));
break;
}
case HloOpcode::kCustomCall: {
optional<string> custom_call_target;
attrs["custom_call_target"] = {/*required=*/true, AttrTy::kString,
&custom_call_target};
if (!ParseOperands(&operands) || !ParseAttributes(attrs)) {
return false;
}
instruction = builder->AddInstruction(HloInstruction::CreateCustomCall(
shape, operands, *custom_call_target));
break;
}
case HloOpcode::kDot: {
optional<std::vector<int64>> lhs_contracting_dims;
attrs["lhs_contracting_dims"] = {
/*required=*/false, AttrTy::kBracedInt64List, &lhs_contracting_dims};
optional<std::vector<int64>> rhs_contracting_dims;
attrs["rhs_contracting_dims"] = {
/*required=*/false, AttrTy::kBracedInt64List, &rhs_contracting_dims};
optional<std::vector<int64>> lhs_batch_dims;
attrs["lhs_batch_dims"] = {/*required=*/false, AttrTy::kBracedInt64List,
&lhs_batch_dims};
optional<std::vector<int64>> rhs_batch_dims;
attrs["rhs_batch_dims"] = {/*required=*/false, AttrTy::kBracedInt64List,
&rhs_batch_dims};
if (!ParseOperands(&operands, /*expected_size=*/2) ||
!ParseAttributes(attrs)) {
return false;
}
DotDimensionNumbers dnum;
if (lhs_contracting_dims) {
*dnum.mutable_lhs_contracting_dimensions() = {
lhs_contracting_dims->begin(), lhs_contracting_dims->end()};
}
if (rhs_contracting_dims) {
*dnum.mutable_rhs_contracting_dimensions() = {
rhs_contracting_dims->begin(), rhs_contracting_dims->end()};
}
if (lhs_batch_dims) {
*dnum.mutable_lhs_batch_dimensions() = {lhs_batch_dims->begin(),
lhs_batch_dims->end()};
}
if (rhs_batch_dims) {
*dnum.mutable_rhs_batch_dimensions() = {rhs_batch_dims->begin(),
rhs_batch_dims->end()};
}
instruction = builder->AddInstruction(
HloInstruction::CreateDot(shape, operands[0], operands[1], dnum));
break;
}
case HloOpcode::kTrace:
return TokenError(StrCat("parsing not yet implemented for op: ",
HloOpcodeString(opcode)));
}
instruction->set_name(name);
// Add common attrs (sharding, control predecessors) to the instruction, if
// they were seen.
if (sharding) {
instruction->set_sharding(
HloSharding::FromProto(sharding.value()).ValueOrDie());
}
if (predecessors) {
for (auto* pre : *predecessors) {
Status status = pre->AddControlDependencyTo(instruction);
if (!status.ok()) {
return Error(name_loc, StrCat("error adding control dependency for: ",
name, " status: ", status.ToString()));
}
}
}
if (metadata) {
instruction->set_metadata(*metadata);
}
return AddInstruction(name, instruction, name_loc);
} // NOLINT(readability/fn_size)
// ::= '{' (single_sharding | tuple_sharding) '}'
//
// tuple_sharding ::= single_sharding* (',' single_sharding)*
bool HloParser::ParseSharding(OpSharding* sharding) {
// A single sharding starts with '{' and is not followed by '{'.
// A tuple sharding starts with '{' and is followed by '{', or is '{''}' for
// an empty tuple.
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start sharding attribute")) {
return false;
}
if (lexer_.GetKind() != TokKind::kLbrace &&
lexer_.GetKind() != TokKind::kRbrace) {
return ParseSingleSharding(sharding, /*lbrace_pre_lexed=*/true);
}
// Tuple sharding.
// Allow empty tuple shardings.
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
if (!ParseSingleSharding(sharding->add_tuple_shardings(),
/*lbrace_pre_lexed=*/false)) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
sharding->set_type(OpSharding::Type::OpSharding_Type_TUPLE);
return ParseToken(TokKind::kRbrace, "expected '}' to end sharding attribute");
}
// ::= '{' 'replicated'? 'maximal'? ('device=' int)? shape?
// ('devices=' ('[' dims ']')* device_list)? '}'
// dims ::= int_list device_list ::= int_list
bool HloParser::ParseSingleSharding(OpSharding* sharding,
bool lbrace_pre_lexed) {
if (!lbrace_pre_lexed &&
!ParseToken(TokKind::kLbrace,
"expected '{' to start sharding attribute")) {
return false;
}
LocTy loc = lexer_.GetLoc();
bool maximal = false;
bool replicated = false;
std::vector<int64> devices;
std::vector<int64> tile_assignment_dimensions;
Shape tile_shape;
while (lexer_.GetKind() != TokKind::kRbrace) {
switch (lexer_.GetKind()) {
case TokKind::kw_maximal:
maximal = true;
lexer_.Lex();
break;
case TokKind::kw_replicated:
replicated = true;
lexer_.Lex();
break;
case TokKind::kAttributeName: {
if (lexer_.GetStrVal() == "device") {
if (lexer_.Lex() != TokKind::kInt) {
return TokenError("device= attribute must be an integer");
}
devices = {lexer_.GetInt64Val()};
lexer_.Lex();
} else if (lexer_.GetStrVal() == "devices") {
lexer_.Lex();
if (!ParseToken(TokKind::kLsquare,
"expected '[' to start sharding devices shape")) {
return false;
}
do {
int64 dim;
if (!ParseInt64(&dim)) {
return false;
}
tile_assignment_dimensions.push_back(dim);
} while (EatIfPresent(TokKind::kComma));
if (!ParseToken(TokKind::kRsquare,
"expected ']' to start sharding devices shape")) {
return false;
}
do {
int64 device;
if (!ParseInt64(&device)) {
return false;
}
devices.push_back(device);
} while (EatIfPresent(TokKind::kComma));
} else {
return TokenError(
"unknown attribute in sharding: expected device= or devices=");
}
break;
}
case TokKind::kShape:
tile_shape = lexer_.GetShapeVal();
lexer_.Lex();
break;
case TokKind::kRbrace:
break;
default:
return TokenError("unexpected token");
}
}
if (replicated) {
if (!devices.empty()) {
return Error(loc,
"replicated shardings should not have any devices assigned");
}
if (!ShapeUtil::Equal(tile_shape, Shape())) {
return Error(loc,
"replicated shardings should not have any tile shape set");
}
sharding->set_type(OpSharding::Type::OpSharding_Type_REPLICATED);
} else if (maximal) {
if (devices.size() != 1) {
return Error(loc,
"maximal shardings should have exactly one device assigned");
}
if (!ShapeUtil::Equal(tile_shape, Shape())) {
return Error(loc, "maximal shardings should not have any tile shape set");
}
sharding->set_type(OpSharding::Type::OpSharding_Type_MAXIMAL);
sharding->add_tile_assignment_devices(devices[0]);
} else {
if (devices.size() <= 1) {
return Error(
loc, "non-maximal shardings must have more than one device assigned");
}
if (ShapeUtil::Equal(tile_shape, Shape())) {
return Error(loc, "non-maximal shardings should have a tile shape set");
}
if (tile_assignment_dimensions.empty()) {
return Error(
loc,
"non-maximal shardings must have a tile assignment list including "
"dimensions");
}
sharding->set_type(OpSharding::Type::OpSharding_Type_OTHER);
*sharding->mutable_tile_shape() = tile_shape;
for (int64 dim : tile_assignment_dimensions) {
sharding->add_tile_assignment_dimensions(dim);
}
for (int64 device : devices) {
sharding->add_tile_assignment_devices(device);
}
}
lexer_.Lex();
return true;
}
// '{' name+ '}'
bool HloParser::ParseInstructionNames(
std::vector<HloInstruction*>* instructions) {
if (!ParseToken(TokKind::kLbrace,
"expects '{' at the beginning of instruction name list")) {
return false;
}
LocTy loc = lexer_.GetLoc();
do {
string name;
if (!ParseName(&name)) {
return Error(loc, "expects a instruction name");
}
HloInstruction* instr =
tensorflow::gtl::FindPtrOrNull(instruction_pool_, name);
if (!instr) {
return TokenError(
Printf("instruction '%s' is not defined", name.c_str()));
}
instructions->push_back(instr);
} while (EatIfPresent(TokKind::kComma));
return ParseToken(TokKind::kRbrace,
"expects '}' at the end of instruction name list");
}
bool HloParser::SetValueInLiteral(int64 value, int64 linear_index,
Literal* literal) {
const Shape& shape = literal->shape();
switch (shape.element_type()) {
case S8:
return SetValueInLiteralHelper<int8>(value, linear_index, literal);
case S16:
return SetValueInLiteralHelper<int16>(value, linear_index, literal);
case S32:
return SetValueInLiteralHelper<int32>(value, linear_index, literal);
case S64:
return SetValueInLiteralHelper<int64>(value, linear_index, literal);
case U8:
return SetValueInLiteralHelper<uint8>(value, linear_index, literal);
case U16:
return SetValueInLiteralHelper<uint8>(value, linear_index, literal);
case U32:
return SetValueInLiteralHelper<uint32>(value, linear_index, literal);
case U64:
return SetValueInLiteralHelper<uint64>(value, linear_index, literal);
default:
LOG(FATAL) << "unknown integral primitive type "
<< PrimitiveType_Name(shape.element_type());
}
}
bool HloParser::SetValueInLiteral(double value, int64 linear_index,
Literal* literal) {
const Shape& shape = literal->shape();
switch (shape.element_type()) {
case F16:
return SetValueInLiteralHelper<half>(value, linear_index, literal);
case BF16:
return SetValueInLiteralHelper<bfloat16>(value, linear_index, literal);
case F32:
return SetValueInLiteralHelper<float>(value, linear_index, literal);
case F64:
return SetValueInLiteralHelper<double>(value, linear_index, literal);
default:
LOG(FATAL) << "unknown floating point primitive type "
<< PrimitiveType_Name(shape.element_type());
}
}
bool HloParser::SetValueInLiteral(bool value, int64 linear_index,
Literal* literal) {
const Shape& shape = literal->shape();
switch (shape.element_type()) {
case PRED:
return SetValueInLiteralHelper<bool>(value, linear_index, literal);
default:
LOG(FATAL) << PrimitiveType_Name(shape.element_type())
<< " is not PRED type";
}
}
template <typename LiteralNativeT, typename ParsedElemT>
bool HloParser::SetValueInLiteralHelper(ParsedElemT value, int64 linear_index,
Literal* literal) {
// Check that linear_index is in range.
if (linear_index >= ShapeUtil::ElementsIn(literal->shape())) {
return TokenError(
StrCat("trys to set value ", value, " to a literal in shape ",
ShapeUtil::HumanString(literal->shape()), " at linear index ",
linear_index, ", but the index is out of range"));
}
if (std::isnan(value) ||
(std::numeric_limits<ParsedElemT>::has_infinity &&
(std::numeric_limits<ParsedElemT>::infinity() == value ||
-std::numeric_limits<ParsedElemT>::infinity() == value))) {
// Skip range checking for non-finite value.
} else if (literal->shape().element_type() == F16 ||
literal->shape().element_type() == BF16) {
if (value > kF16max || value < -kF16max) {
return TokenError(StrCat(
"value ", value, " is out of range for literal's primitive type ",
PrimitiveType_Name(literal->shape().element_type())));
}
} else if (value > static_cast<ParsedElemT>(
std::numeric_limits<LiteralNativeT>::max()) ||
value < static_cast<ParsedElemT>(
std::numeric_limits<LiteralNativeT>::lowest())) {
// Value is out of range for LiteralNativeT.
return TokenError(StrCat(
"value ", value, " is out of range for literal's primitive type ",
PrimitiveType_Name(literal->shape().element_type())));
}
literal->GetMutableArraySlice<LiteralNativeT>().at(linear_index) =
static_cast<LiteralNativeT>(value);
return true;
}
bool HloParser::EatShapeAndCheckCompatible(const Shape& shape) {
Shape new_shape;
if (!ParseShape(&new_shape)) {
return TokenError(StrCat("expects shape ", ShapeUtil::HumanString(shape)));
}
if (!ShapeUtil::Compatible(shape, new_shape)) {
return TokenError(StrCat(
"expects shape ", ShapeUtil::HumanString(shape),
", but sees a different shape: ", ShapeUtil::HumanString(new_shape)));
}
return true;
}
// literal
// ::= tuple
// ::= non_tuple
bool HloParser::ParseLiteral(std::unique_ptr<Literal>* literal,
const Shape& shape) {
return ShapeUtil::IsTuple(shape) ? ParseTupleLiteral(literal, shape)
: ParseNonTupleLiteral(literal, shape);
}
// tuple
// ::= shape '(' literal_list ')'
// literal_list
// ::= /*empty*/
// ::= literal (',' literal)*
bool HloParser::ParseTupleLiteral(std::unique_ptr<Literal>* literal,
const Shape& shape) {
if (!EatShapeAndCheckCompatible(shape)) {
return TokenError(StrCat("expects tuple constant in shape ",
ShapeUtil::HumanString(shape)));
}
if (!ParseToken(TokKind::kLparen, "expects '(' in front of tuple elements")) {
return false;
}
std::vector<std::unique_ptr<Literal>> elements(
ShapeUtil::TupleElementCount(shape));
if (lexer_.GetKind() == TokKind::kRparen) {
// empty
} else {
// literal, (',' literal)*
for (int i = 0; i < elements.size(); i++) {
if (i > 0) {
ParseToken(TokKind::kComma, "exepcts ',' to separate tuple elements");
}
if (!ParseLiteral(&elements[i],
ShapeUtil::GetTupleElementShape(shape, i))) {
return TokenError(StrCat("expects the ", i, "th element"));
}
}
}
*literal = Literal::MakeTupleOwned(std::move(elements));
return ParseToken(TokKind::kRparen,
StrCat("expects ')' at the end of the tuple with ",
ShapeUtil::TupleElementCount(shape), "elements"));
}
// non_tuple
// ::= rank01
// ::= rank2345
// rank2345 ::= shape nested_array
bool HloParser::ParseNonTupleLiteral(std::unique_ptr<Literal>* literal,
const Shape& shape) {
const int64 rank = ShapeUtil::Rank(shape);
if (rank > 1 && !EatShapeAndCheckCompatible(shape)) {
return false;
}
// Create a literal with the given shape in default layout.
*literal = Literal::CreateFromDimensions(shape.element_type(),
AsInt64Slice(shape.dimensions()));
int64 nest_level = 0;
int64 linear_index = 0;
// elems_seen_per_dim[i] is how many elements or sub-arrays we have seen for
// the dimension i. For example, to parse f32[2,3] {{1, 2, 3}, {4, 5, 6}},
// when we are parsing the 2nd '{' (right before '1'), we are seeing a
// sub-array of the dimension 0, so elems_seen_per_dim[0]++. When we are at
// the first '}' (right after '3'), it means the sub-array ends, and the
// sub-array is supposed to contain exactly 3 elements, so check if
// elems_seen_per_dim[1] is 3.
std::vector<int64> elems_seen_per_dim(rank);
auto get_index_str = [&elems_seen_per_dim](int dim) -> string {
std::vector<int64> elems_seen_until_dim(elems_seen_per_dim.begin(),
elems_seen_per_dim.begin() + dim);
return StrCat("[",
tensorflow::str_util::Join(
elems_seen_until_dim, ",",
[](string* out, const int64& num_elems) {
tensorflow::strings::StrAppend(out, num_elems - 1);
}),
"]");
};
do {
switch (lexer_.GetKind()) {
default:
return TokenError("unexpected token type in a literal");
case TokKind::kLbrace: {
nest_level++;
if (nest_level > rank) {
return TokenError(Printf(
"expects nested array in rank %lld, but sees larger", rank));
}
if (nest_level > 1) {
elems_seen_per_dim[nest_level - 2]++;
if (elems_seen_per_dim[nest_level - 2] >
shape.dimensions(nest_level - 2)) {
return TokenError(Printf(
"expects %lld elements in the %sth element, but sees more",
shape.dimensions(nest_level - 2),
get_index_str(nest_level - 2).c_str()));
}
}
lexer_.Lex();
break;
}
case TokKind::kRbrace: {
nest_level--;
if (elems_seen_per_dim[nest_level] != shape.dimensions(nest_level)) {
return TokenError(Printf(
"expects %lld elements in the %sth element, but sees %lld",
shape.dimensions(nest_level), get_index_str(nest_level).c_str(),
elems_seen_per_dim[nest_level]));
}
elems_seen_per_dim[nest_level] = 0;
lexer_.Lex();
break;
}
case TokKind::kComma:
case TokKind::kComment:
// Skip.
lexer_.Lex();
break;
case TokKind::kw_true:
case TokKind::kw_false:
case TokKind::kInt:
case TokKind::kDecimal:
case TokKind::kw_nan:
case TokKind::kw_inf:
case TokKind::kNegInf: {
if (rank > 0) {
if (nest_level != rank) {
return TokenError(
Printf("expects nested array in rank %lld, but sees %lld", rank,
nest_level));
}
elems_seen_per_dim[rank - 1]++;
if (elems_seen_per_dim[rank - 1] > shape.dimensions(rank - 1)) {
return TokenError(
Printf("expects %lld elements on the minor-most dimension, but "
"sees more",
shape.dimensions(rank - 1)));
}
}
if (lexer_.GetKind() == TokKind::kw_true ||
lexer_.GetKind() == TokKind::kw_false) {
// TODO(congliu): bool type literals with rank >= 1 are actually
// printed in a compact form instead of "true" or "false". Fix that.
if (!SetValueInLiteral(lexer_.GetKind() == TokKind::kw_true,
linear_index++, literal->get())) {
return false;
}
lexer_.Lex();
} else if (primitive_util::IsIntegralType(shape.element_type())) {
LocTy loc = lexer_.GetLoc();
int64 value;
if (!ParseInt64(&value)) {
return Error(loc, StrCat("expects integer for primitive type: ",
PrimitiveType_Name(shape.element_type())));
}
if (!SetValueInLiteral(value, linear_index++, literal->get())) {
return false;
}
} else if (primitive_util::IsFloatingPointType(shape.element_type())) {
LocTy loc = lexer_.GetLoc();
double value;
if (!ParseDouble(&value)) {
return Error(
loc, StrCat("expect floating point value for primitive type: ",
PrimitiveType_Name(shape.element_type())));
}
if (!SetValueInLiteral(value, linear_index++, literal->get())) {
return false;
}
} else {
return TokenError(StrCat("unsupported premitive type ",
PrimitiveType_Name(shape.element_type())));
}
break;
}
} // end of switch
} while (nest_level > 0);
*literal = (*literal)->Relayout(shape.layout());
return true;
}
// operands ::= '(' operands1 ')'
// operands1
// ::= /*empty*/
// ::= operand (, operand)*
// operand ::= (shape)? name
bool HloParser::ParseOperands(std::vector<HloInstruction*>* operands) {
if (!ParseToken(TokKind::kLparen,
"expects '(' at the beginning of operands")) {
return false;
}
if (lexer_.GetKind() == TokKind::kRparen) {
// empty
} else {
do {
LocTy loc = lexer_.GetLoc();
string name;
if (CanBeShape()) {
Shape shape;
if (!ParseShape(&shape)) {
return false;
}
}
if (!ParseName(&name)) {
return false;
}
HloInstruction* instruction =
tensorflow::gtl::FindPtrOrNull(instruction_pool_, name);
if (!instruction) {
return Error(loc, StrCat("instruction does not exist: ", name));
}
operands->push_back(instruction);
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRparen, "expects ')' at the end of operands");
}
bool HloParser::ParseOperands(std::vector<HloInstruction*>* operands,
const int expected_size) {
LocTy loc = lexer_.GetLoc();
if (!ParseOperands(operands)) {
return false;
}
if (expected_size != operands->size()) {
return Error(loc, StrCat("expects ", expected_size, " operands, but has ",
operands->size(), " operands"));
}
return true;
}
// sub_attributes ::= '{' (','? attribute)* '}'
bool HloParser::ParseSubAttributes(
const std::unordered_map<string, AttrConfig>& attrs) {
LocTy loc = lexer_.GetLoc();
if (!ParseToken(TokKind::kLbrace, "expects '{' to start sub attributes")) {
return false;
}
std::unordered_set<string> seen_attrs;
if (lexer_.GetKind() == TokKind::kRbrace) {
// empty
} else {
do {
EatIfPresent(TokKind::kComma);
if (!ParseAttributeHelper(attrs, &seen_attrs)) {
return false;
}
} while (lexer_.GetKind() != TokKind::kRbrace);
}
// Check that all required attrs were seen.
for (const auto& attr_it : attrs) {
if (attr_it.second.required &&
seen_attrs.find(attr_it.first) == seen_attrs.end()) {
return Error(loc, Printf("sub-attribute %s is expected but not seen",
attr_it.first.c_str()));
}
}
return ParseToken(TokKind::kRbrace, "expects '}' to end sub attributes");
}
// attributes ::= (',' attribute)*
bool HloParser::ParseAttributes(
const std::unordered_map<string, AttrConfig>& attrs) {
LocTy loc = lexer_.GetLoc();
std::unordered_set<string> seen_attrs;
while (EatIfPresent(TokKind::kComma)) {
if (!ParseAttributeHelper(attrs, &seen_attrs)) {
return false;
}
}
// Check that all required attrs were seen.
for (const auto& attr_it : attrs) {
if (attr_it.second.required &&
seen_attrs.find(attr_it.first) == seen_attrs.end()) {
return Error(loc, Printf("attribute %s is expected but not seen",
attr_it.first.c_str()));
}
}
return true;
}
bool HloParser::ParseAttributeHelper(
const std::unordered_map<string, AttrConfig>& attrs,
std::unordered_set<string>* seen_attrs) {
LocTy loc = lexer_.GetLoc();
string name;
if (!ParseAttributeName(&name)) {
return Error(loc, "error parsing attributes");
}
VLOG(1) << "Parsing attribute " << name;
if (!seen_attrs->insert(name).second) {
return Error(loc, Printf("attribute %s already exists", name.c_str()));
}
auto attr_it = attrs.find(name);
if (attr_it == attrs.end()) {
return Error(loc, Printf("unexpected attribute %s", name.c_str()));
}
AttrTy attr_type = attr_it->second.attr_type;
void* attr_out_ptr = attr_it->second.result;
bool success = [&] {
LocTy attr_loc = lexer_.GetLoc();
switch (attr_type) {
case AttrTy::kInt64: {
int64 result;
if (!ParseInt64(&result)) {
return false;
}
static_cast<optional<int64>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kInt32: {
int64 result;
if (!ParseInt64(&result)) {
return false;
}
if (result != static_cast<int32>(result)) {
return Error(attr_loc, "value out of range for int32");
}
static_cast<optional<int32>*>(attr_out_ptr)
->emplace(static_cast<int32>(result));
return true;
}
case AttrTy::kFloat: {
double result;
if (!ParseDouble(&result)) {
return false;
}
if (result > std::numeric_limits<float>::max() ||
result < std::numeric_limits<float>::lowest()) {
return Error(attr_loc, "value out of range for float");
}
static_cast<optional<float>*>(attr_out_ptr)
->emplace(static_cast<float>(result));
return true;
}
case AttrTy::kHloComputation: {
HloComputation* result;
if (!ParseComputationName(&result)) {
return false;
}
static_cast<optional<HloComputation*>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kWindow: {
Window result;
if (!ParseWindow(&result)) {
return false;
}
static_cast<optional<Window>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kConvolutionDimensionNumbers: {
ConvolutionDimensionNumbers result;
if (!ParseConvolutionDimensionNumbers(&result)) {
return false;
}
static_cast<optional<ConvolutionDimensionNumbers>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kSharding: {
OpSharding sharding;
if (!ParseSharding(&sharding)) {
return false;
}
static_cast<optional<OpSharding>*>(attr_out_ptr)->emplace(sharding);
return true;
}
case AttrTy::kInstructionList: {
std::vector<HloInstruction*> result;
if (!ParseInstructionNames(&result)) {
return false;
}
static_cast<optional<std::vector<HloInstruction*>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kFusionKind: {
HloInstruction::FusionKind result;
if (!ParseFusionKind(&result)) {
return false;
}
static_cast<optional<HloInstruction::FusionKind>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kBracedInt64List: {
std::vector<int64> result;
if (!ParseInt64List(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
&result)) {
return false;
}
static_cast<optional<std::vector<int64>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kSliceRanges: {
SliceRanges result;
if (!ParseSliceRanges(&result)) {
return false;
}
static_cast<optional<SliceRanges>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kPaddingConfig: {
PaddingConfig result;
if (!ParsePaddingConfig(&result)) {
return false;
}
static_cast<optional<PaddingConfig>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kString: {
string result;
if (!ParseString(&result)) {
return false;
}
static_cast<optional<string>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kMetadata: {
OpMetadata result;
if (!ParseMetadata(&result)) {
return false;
}
static_cast<optional<OpMetadata>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kDistribution: {
RandomDistribution result;
if (!ParseRandomDistribution(&result)) {
return false;
}
static_cast<optional<RandomDistribution>*>(attr_out_ptr)
->emplace(result);
return true;
}
}
}();
if (!success) {
return Error(loc, Printf("error parsing attribute %s", name.c_str()));
}
return true;
}
bool HloParser::ParseComputationName(HloComputation** value) {
string name;
LocTy loc = lexer_.GetLoc();
if (!ParseName(&name)) {
return Error(loc, "expects computation name");
}
*value = tensorflow::gtl::FindPtrOrNull(computation_pool_, name);
if (*value == nullptr) {
return Error(loc, StrCat("computation does not exist: ", name));
}
return true;
}
// ::= '{' size stride? pad? lhs_dilate? rhs_dilate? '}'
// The subattributes can appear in any order. 'size=' is required, others are
// optional.
bool HloParser::ParseWindow(Window* window) {
LocTy loc = lexer_.GetLoc();
if (!ParseToken(TokKind::kLbrace, "expected '{' to start window attribute")) {
return false;
}
std::vector<int64> size;
std::vector<int64> stride;
std::vector<std::vector<int64>> pad;
std::vector<int64> lhs_dilate;
std::vector<int64> rhs_dilate;
std::vector<int64> rhs_reversal;
while (lexer_.GetKind() != TokKind::kRbrace) {
LocTy attr_loc = lexer_.GetLoc();
string field_name;
if (!ParseAttributeName(&field_name)) {
return Error(attr_loc, "expects sub-attributes in window");
}
bool ok = [&] {
if (field_name == "size") {
return ParseDxD("size", &size);
}
if (field_name == "stride") {
return ParseDxD("stride", &stride);
}
if (field_name == "lhs_dilate") {
return ParseDxD("lhs_dilate", &lhs_dilate);
}
if (field_name == "rhs_dilate") {
return ParseDxD("rls_dilate", &rhs_dilate);
}
if (field_name == "pad") {
return ParseWindowPad(&pad);
}
if (field_name == "rhs_reversal") {
return ParseDxD("rhs_reversal", &rhs_reversal);
}
return Error(loc, StrCat("unexpected attribute name: ", field_name));
}();
if (!ok) {
return false;
}
}
if (size.empty()) {
return Error(loc,
"sub-attribute 'size=' is required in the window attribute");
}
if (!stride.empty() && stride.size() != size.size()) {
return Error(loc, "expects 'stride=' has the same size as 'size='");
}
if (!lhs_dilate.empty() && lhs_dilate.size() != size.size()) {
return Error(loc, "expects 'lhs_dilate=' has the same size as 'size='");
}
if (!rhs_dilate.empty() && rhs_dilate.size() != size.size()) {
return Error(loc, "expects 'rhs_dilate=' has the same size as 'size='");
}
if (!pad.empty() && pad.size() != size.size()) {
return Error(loc, "expects 'pad=' has the same size as 'size='");
}
for (int i = 0; i < size.size(); i++) {
window->add_dimensions()->set_size(size[i]);
if (!pad.empty()) {
window->mutable_dimensions(i)->set_padding_low(pad[i][0]);
window->mutable_dimensions(i)->set_padding_high(pad[i][1]);
}
// If some field is not present, it has the default value.
window->mutable_dimensions(i)->set_stride(stride.empty() ? 1 : stride[i]);
window->mutable_dimensions(i)->set_base_dilation(
lhs_dilate.empty() ? 1 : lhs_dilate[i]);
window->mutable_dimensions(i)->set_window_dilation(
rhs_dilate.empty() ? 1 : rhs_dilate[i]);
window->mutable_dimensions(i)->set_window_reversal(
rhs_reversal.empty() ? false : (rhs_reversal[i] == 1));
}
return ParseToken(TokKind::kRbrace, "expected '}' to end window attribute");
}
// This is the inverse of HloInstruction::ConvolutionDimensionNumbersToString.
// The string looks like "dim_labels=0bf_0io->0bf".
bool HloParser::ParseConvolutionDimensionNumbers(
ConvolutionDimensionNumbers* dnums) {
if (lexer_.GetKind() != TokKind::kDimLabels) {
return TokenError("expects dim labels pattern, e.g., 'bf0_0io->0bf'");
}
string str = lexer_.GetStrVal();
// The str is expected to have 3 items, lhs, rhs, out, and it must looks like
// lhs_rhs->out, that is, the first separator is "_" and the second is "->".
// So we replace the "->" with "_" and then split on "_".
str = tensorflow::str_util::StringReplace(str, /*oldsub=*/"->",
/*newsub=*/"_",
/*replace_all=*/false);
std::vector<string> lhs_rhs_out = Split(str, "_");
if (lhs_rhs_out.size() != 3) {
LOG(FATAL) << "expects 3 items: lhs, rhs, and output dims, but sees "
<< str;
}
const int64 rank = lhs_rhs_out[0].length();
if (rank != lhs_rhs_out[1].length() || rank != lhs_rhs_out[2].length()) {
return TokenError(
"convolution lhs, rhs, and output must have the same rank");
}
if (rank < 2) {
return TokenError("convolution rank must >=2");
}
auto is_unique = [](string str) -> bool {
std::sort(str.begin(), str.end());
return std::unique(str.begin(), str.end()) == str.end();
};
// lhs
{
const string& lhs = lhs_rhs_out[0];
if (!is_unique(lhs)) {
return TokenError(
StrCat("expects unique lhs dimension numbers, but sees ", lhs));
}
for (int i = 0; i < rank - 2; i++) {
dnums->add_input_spatial_dimensions(-1);
}
for (int i = 0; i < rank; i++) {
char c = lhs[i];
if (c == 'b') {
dnums->set_input_batch_dimension(i);
} else if (c == 'f') {
dnums->set_input_feature_dimension(i);
} else if (c < '0' + rank && c >= '0') {
dnums->set_input_spatial_dimensions(c - '0', i);
} else {
return TokenError(
Printf("expects [0-%lldbf] in lhs dimension numbers", rank - 1));
}
}
}
// rhs
{
const string& rhs = lhs_rhs_out[1];
if (!is_unique(rhs)) {
return TokenError(
StrCat("expects unique rhs dimension numbers, but sees ", rhs));
}
for (int i = 0; i < rank - 2; i++) {
dnums->add_kernel_spatial_dimensions(-1);
}
for (int i = 0; i < rank; i++) {
char c = rhs[i];
if (c == 'i') {
dnums->set_kernel_input_feature_dimension(i);
} else if (c == 'o') {
dnums->set_kernel_output_feature_dimension(i);
} else if (c < '0' + rank && c >= '0') {
dnums->set_kernel_spatial_dimensions(c - '0', i);
} else {
return TokenError(
Printf("expects [0-%lldio] in rhs dimension numbers", rank - 1));
}
}
}
// output
{
const string& out = lhs_rhs_out[2];
if (!is_unique(out)) {
return TokenError(
StrCat("expects unique output dimension numbers, but sees ", out));
}
for (int i = 0; i < rank - 2; i++) {
dnums->add_output_spatial_dimensions(-1);
}
for (int i = 0; i < rank; i++) {
char c = out[i];
if (c == 'b') {
dnums->set_output_batch_dimension(i);
} else if (c == 'f') {
dnums->set_output_feature_dimension(i);
} else if (c < '0' + rank && c >= '0') {
dnums->set_output_spatial_dimensions(c - '0', i);
} else {
return TokenError(
Printf("expects [0-%lldbf] in output dimension numbers", rank - 1));
}
}
}
lexer_.Lex();
return true;
}
// ::= '{' ranges '}'
// ::= /*empty*/
// ::= range (',' range)*
// range ::= '[' start ':' limit (':' stride)? ']'
//
// The slice ranges are printed as:
//
// {[dim0_start:dim0_limit:dim0stride], [dim1_start:dim1_limit], ...}
//
// This function extracts the starts, limits, and strides as 3 vectors to the
// result. If stride is not present, stride is 1. For example, if the slice
// ranges is printed as:
//
// {[2:3:4], [5:6:7], [8:9]}
//
// The the parsed result will be:
//
// {/*starts=*/{2, 5, 8}, /*limits=*/{3, 6, 9}, /*strides=*/{4, 7, 1}}
//
bool HloParser::ParseSliceRanges(SliceRanges* result) {
if (!ParseToken(TokKind::kLbrace, "expects '{' to start ranges")) {
return false;
}
std::vector<std::vector<int64>> ranges;
if (lexer_.GetKind() == TokKind::kRbrace) {
// empty
return ParseToken(TokKind::kRbrace, "expects '}' to end ranges");
}
do {
LocTy loc = lexer_.GetLoc();
ranges.emplace_back();
if (!ParseInt64List(TokKind::kLsquare, TokKind::kRsquare, TokKind::kColon,
&ranges.back())) {
return false;
}
const auto& range = ranges.back();
if (range.size() != 2 && range.size() != 3) {
return Error(loc, Printf("expects [start:limit:step] or [start:limit], "
"but sees %ld elements.",
range.size()));
}
} while (EatIfPresent(TokKind::kComma));
for (const auto& range : ranges) {
result->starts.push_back(range[0]);
result->limits.push_back(range[1]);
result->strides.push_back(range.size() == 3 ? range[2] : 1);
}
return ParseToken(TokKind::kRbrace, "expects '}' to end ranges");
}
// int64list ::= start int64_elements end
// int64_elements
// ::= /*empty*/
// ::= int64_val (delim int64_val)*
bool HloParser::ParseInt64List(const TokKind start, const TokKind end,
const TokKind delim,
std::vector<int64>* result) {
if (!ParseToken(start, StrCat("expects an int64 list starting with ",
TokKindToString(start)))) {
return false;
}
if (lexer_.GetKind() == end) {
// empty
} else {
do {
int64 i;
if (!ParseInt64(&i)) {
return false;
}
result->push_back(i);
} while (EatIfPresent(delim));
}
return ParseToken(
end, StrCat("expects an int64 list to end with ", TokKindToString(end)));
}
// param_list_to_shape ::= param_list '->' shape
bool HloParser::ParseParamListToShape(Shape* shape, LocTy* shape_loc) {
if (!ParseParamList() || !ParseToken(TokKind::kArrow, "expects '->'")) {
return false;
}
*shape_loc = lexer_.GetLoc();
return ParseShape(shape);
}
bool HloParser::CanBeParamListToShape() {
return lexer_.GetKind() == TokKind::kLparen;
}
// param_list ::= '(' param_list1 ')'
// param_list1
// ::= /*empty*/
// ::= param (',' param)*
// param ::= name shape
bool HloParser::ParseParamList() {
if (!ParseToken(TokKind::kLparen,
"expects '(' at the beginning of param list")) {
return false;
}
if (lexer_.GetKind() == TokKind::kRparen) {
// empty
} else {
do {
Shape shape;
string name;
if (!ParseName(&name) || !ParseShape(&shape)) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRparen, "expects ')' at the end of param list");
}
// shape ::= shape_val_
// shape ::= '(' tuple_elements ')'
// tuple_elements
// ::= /*empty*/
// ::= shape (',' shape)*
bool HloParser::ParseShape(Shape* result) {
if (EatIfPresent(TokKind::kLparen)) { // Tuple
std::vector<Shape> shapes;
if (lexer_.GetKind() == TokKind::kRparen) {
/*empty*/
} else {
// shape (',' shape)*
do {
shapes.emplace_back();
if (!ParseShape(&shapes.back())) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
*result = ShapeUtil::MakeTupleShape(shapes);
return ParseToken(TokKind::kRparen, "expects ')' at the end of tuple.");
}
if (lexer_.GetKind() != TokKind::kShape) {
return TokenError("expects shape");
}
*result = lexer_.GetShapeVal();
lexer_.Lex();
return true;
}
bool HloParser::CanBeShape() {
// A non-tuple shape starts with a kShape token; a tuple shape starts with
// '('.
return lexer_.GetKind() == TokKind::kShape ||
lexer_.GetKind() == TokKind::kLparen;
}
bool HloParser::ParseName(string* result) {
VLOG(1) << "ParseName";
if (lexer_.GetKind() != TokKind::kIdent &&
lexer_.GetKind() != TokKind::kName) {
return TokenError("expects name");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParser::ParseAttributeName(string* result) {
if (lexer_.GetKind() != TokKind::kAttributeName) {
return TokenError("expects attribute name");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParser::ParseString(string* result) {
VLOG(1) << "ParseString";
if (lexer_.GetKind() != TokKind::kString) {
return TokenError("expects string");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParser::ParseDxD(const string& name, std::vector<int64>* result) {
LocTy loc = lexer_.GetLoc();
if (!result->empty()) {
return Error(loc,
Printf("sub-attribute '%s=' already exists", name.c_str()));
}
// 1D
if (lexer_.GetKind() == TokKind::kInt) {
int64 number;
if (!ParseInt64(&number)) {
return Error(loc, Printf("expects sub-attribute '%s=i'", name.c_str()));
}
result->push_back(number);
return true;
}
// 2D or higher.
if (lexer_.GetKind() == TokKind::kDxD) {
string str = lexer_.GetStrVal();
if (!SplitAndParseAsInts(str, 'x', result)) {
return Error(loc,
Printf("expects sub-attribute '%s=ixj...'", name.c_str()));
}
lexer_.Lex();
return true;
}
return TokenError("expects token type kInt or kDxD");
}
bool HloParser::ParseWindowPad(std::vector<std::vector<int64>>* pad) {
LocTy loc = lexer_.GetLoc();
if (!pad->empty()) {
return Error(loc, "sub-attribute 'pad=' already exists");
}
if (lexer_.GetKind() != TokKind::kPad) {
return TokenError("expects window pad pattern, e.g., '0_0x3_3'");
}
string str = lexer_.GetStrVal();
std::vector<string> padding_str = Split(str, 'x');
for (int i = 0; i < padding_str.size(); i++) {
std::vector<int64> low_high;
if (!SplitAndParseAsInts(padding_str[i], '_', &low_high) ||
low_high.size() != 2) {
return Error(loc,
"expects padding_low and padding_high separated by '_'");
}
pad->push_back(low_high);
}
lexer_.Lex();
return true;
}
// This is the inverse xla::ToString(PaddingConfig). The padding config string
// looks like "0_0_0x3_3_1". The string is first separated by 'x', each
// substring represents one PaddingConfigDimension. The substring is 3 (or 2)
// numbers joined by '_'.
bool HloParser::ParsePaddingConfig(PaddingConfig* padding) {
if (lexer_.GetKind() != TokKind::kPad) {
return TokenError("expects padding config, e.g., '0_0_0x3_3_1'");
}
LocTy loc = lexer_.GetLoc();
string str = lexer_.GetStrVal();
std::vector<string> padding_str = Split(str, 'x');
for (const auto& padding_dim_str : padding_str) {
std::vector<int64> padding_dim;
if (!SplitAndParseAsInts(padding_dim_str, '_', &padding_dim) ||
(padding_dim.size() != 2 && padding_dim.size() != 3)) {
return Error(loc,
"expects padding config pattern like 'low_high_interior' or "
"'low_high'");
}
auto* dim = padding->add_dimensions();
dim->set_edge_padding_low(padding_dim[0]);
dim->set_edge_padding_high(padding_dim[1]);
dim->set_interior_padding(padding_dim.size() == 3 ? padding_dim[2] : 0);
}
lexer_.Lex();
return true;
}
// '{' metadata_string '}'
bool HloParser::ParseMetadata(OpMetadata* metadata) {
std::unordered_map<string, AttrConfig> attrs;
optional<string> op_type;
optional<string> op_name;
optional<string> source_file;
optional<int32> source_line;
attrs["op_type"] = {/*required=*/false, AttrTy::kString, &op_type};
attrs["op_name"] = {/*required=*/false, AttrTy::kString, &op_name};
attrs["source_file"] = {/*required=*/false, AttrTy::kString, &source_file};
attrs["source_line"] = {/*required=*/false, AttrTy::kInt32, &source_line};
if (!ParseSubAttributes(attrs)) {
return false;
}
if (op_type) {
metadata->set_op_type(*op_type);
}
if (op_name) {
metadata->set_op_name(*op_name);
}
if (source_file) {
metadata->set_source_file(*source_file);
}
if (source_line) {
metadata->set_source_line(*source_line);
}
return true;
}
bool HloParser::ParseOpcode(HloOpcode* result) {
VLOG(1) << "ParseOpcode";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects opcode");
}
string val = lexer_.GetStrVal();
auto status_or_result = StringToHloOpcode(val);
if (!status_or_result.ok()) {
return TokenError(
Printf("expects opcode but sees: %s, error: %s", val.c_str(),
status_or_result.status().error_message().c_str()));
}
*result = status_or_result.ValueOrDie();
lexer_.Lex();
return true;
}
bool HloParser::ParseFusionKind(HloInstruction::FusionKind* result) {
VLOG(1) << "ParseFusionKind";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects fusion kind");
}
string val = lexer_.GetStrVal();
auto status_or_result = StringToFusionKind(val);
if (!status_or_result.ok()) {
return TokenError(
Printf("expects fusion kind but sees: %s, error: %s", val.c_str(),
status_or_result.status().error_message().c_str()));
}
*result = status_or_result.ValueOrDie();
lexer_.Lex();
return true;
}
bool HloParser::ParseRandomDistribution(RandomDistribution* result) {
VLOG(1) << "ParseRandomDistribution";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects random distribution");
}
string val = lexer_.GetStrVal();
auto status_or_result = StringToRandomDistribution(val);
if (!status_or_result.ok()) {
return TokenError(
Printf("expects random distribution but sees: %s, error: %s",
val.c_str(), status_or_result.status().error_message().c_str()));
}
*result = status_or_result.ValueOrDie();
lexer_.Lex();
return true;
}
bool HloParser::ParseInt64(int64* result) {
VLOG(1) << "ParseInt64";
if (lexer_.GetKind() != TokKind::kInt) {
return TokenError("expects integer");
}
*result = lexer_.GetInt64Val();
lexer_.Lex();
return true;
}
bool HloParser::ParseDouble(double* result) {
switch (lexer_.GetKind()) {
case TokKind::kDecimal:
*result = lexer_.GetDecimalVal();
break;
case TokKind::kInt:
*result = static_cast<double>(lexer_.GetInt64Val());
break;
case TokKind::kw_nan:
*result = std::numeric_limits<double>::quiet_NaN();
break;
case TokKind::kw_inf:
*result = std::numeric_limits<double>::infinity();
break;
case TokKind::kNegInf:
*result = -std::numeric_limits<double>::infinity();
break;
default:
return TokenError("expects decimal or integer");
}
lexer_.Lex();
return true;
}
bool HloParser::ParseBool(bool* result) {
if (lexer_.GetKind() != TokKind::kw_true &&
lexer_.GetKind() != TokKind::kw_false) {
return TokenError("expects true or false");
}
*result = lexer_.GetKind() == TokKind::kw_true;
lexer_.Lex();
return true;
}
bool HloParser::ParseToken(TokKind kind, const string& msg) {
VLOG(1) << "ParseToken " << TokKindToString(kind) << " " << msg;
if (lexer_.GetKind() != kind) {
return TokenError(msg);
}
lexer_.Lex();
return true;
}
bool HloParser::EatIfPresent(TokKind kind) {
if (lexer_.GetKind() != kind) {
return false;
}
lexer_.Lex();
return true;
}
bool HloParser::AddInstruction(const string& name, HloInstruction* instruction,
LocTy name_loc) {
auto result = instruction_pool_.insert({name, instruction});
if (!result.second) {
return Error(name_loc, StrCat("instruction already exists: ", name));
}
return true;
}
bool HloParser::AddComputation(const string& name, HloComputation* computation,
LocTy name_loc) {
auto result = computation_pool_.insert({name, computation});
if (!result.second) {
return Error(name_loc, StrCat("computation already exists: ", name));
}
return true;
}
} // namespace
StatusOr<std::unique_ptr<HloModule>> Parse(StringPiece str,
const HloModuleConfig& config) {
HloParser parser(str, config);
if (!parser.Run()) {
return InvalidArgument("Syntax error:\n%s", parser.GetError().c_str());
}
return parser.ConsumeHloModule();
}
StatusOr<std::unique_ptr<HloModule>> Parse(StringPiece str) {
HloModuleConfig config;
return Parse(str, config);
}
} // namespace tools
} // namespace xla
| JingJunYin/tensorflow | tensorflow/compiler/xla/tools/parser/hlo_parser.cc | C++ | apache-2.0 | 84,539 |
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using Xunit;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("Commands.ServiceBus.Test")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("Microsoft")]
[assembly: AssemblyProduct("Commands.ServiceBus.Test")]
[assembly: AssemblyCopyright("Copyright © Microsoft 2014")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// Setting ComVisible to false makes the types in this assembly not visible
// to COM components. If you need to access a type in this assembly from
// COM, set the ComVisible attribute to true on that type.
[assembly: ComVisible(false)]
// The following GUID is for the ID of the typelib if this project is exposed to COM
[assembly: Guid("a893f297-3311-4224-8086-a4bb3c5e478a")]
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Build and Revision Numbers
// by using the '*' as shown below:
[assembly: AssemblyVersion("0.1.0")]
[assembly: AssemblyFileVersion("0.1.0")]
[assembly: CollectionBehavior(DisableTestParallelization = true)]
| AzureRT/azure-powershell | src/ResourceManager/ServiceBus/Commands.ServiceBus.Test/Properties/AssemblyInfo.cs | C# | apache-2.0 | 1,478 |
// WARNING: Please don't edit this file. It was generated by C++/WinRT v2.0.210930.14
#pragma once
#ifndef WINRT_Windows_Devices_Portable_1_H
#define WINRT_Windows_Devices_Portable_1_H
#include "winrt/impl/Windows.Devices.Portable.0.h"
WINRT_EXPORT namespace winrt::Windows::Devices::Portable
{
struct __declspec(empty_bases) IServiceDeviceStatics :
winrt::Windows::Foundation::IInspectable,
impl::consume_t<IServiceDeviceStatics>
{
IServiceDeviceStatics(std::nullptr_t = nullptr) noexcept {}
IServiceDeviceStatics(void* ptr, take_ownership_from_abi_t) noexcept : winrt::Windows::Foundation::IInspectable(ptr, take_ownership_from_abi) {}
};
struct __declspec(empty_bases) IStorageDeviceStatics :
winrt::Windows::Foundation::IInspectable,
impl::consume_t<IStorageDeviceStatics>
{
IStorageDeviceStatics(std::nullptr_t = nullptr) noexcept {}
IStorageDeviceStatics(void* ptr, take_ownership_from_abi_t) noexcept : winrt::Windows::Foundation::IInspectable(ptr, take_ownership_from_abi) {}
};
}
#endif
| google/nearby | internal/platform/implementation/windows/generated/winrt/impl/Windows.Devices.Portable.1.h | C | apache-2.0 | 1,085 |
//
// HWTextPart.h
// 黑马微博2期
//
// Created by apple on 14/11/15.
// Copyright (c) 2014年 heima. All rights reserved.
// 文字的一部分
#import <Foundation/Foundation.h>
@interface HWTextPart : NSObject
/** 这段文字的内容 */
@property (nonatomic, copy) NSString *text;
/** 这段文字的范围 */
@property (nonatomic, assign) NSRange range;
/** 是否为特殊文字 */
@property (nonatomic, assign, getter = isSpecical) BOOL special;
/** 是否为表情 */
@property (nonatomic, assign, getter = isEmotion) BOOL emotion;
@end
| yujiemin/qwert123 | 黑马微博2期52-离线缓存/黑马微博2期/Classes/Home(首页)/Model/HWTextPart.h | C | apache-2.0 | 557 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<title>ZKTableArchiveClient xref</title>
<link type="text/css" rel="stylesheet" href="../../../../../../stylesheet.css" />
</head>
<body>
<div id="overview"><a href="../../../../../../../apidocs/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.html">View Javadoc</a></div><pre>
<a class="jxr_linenumber" name="1" href="#1">1</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="2" href="#2">2</a> <em class="jxr_javadoccomment"> * Licensed to the Apache Software Foundation (ASF) under one</em>
<a class="jxr_linenumber" name="3" href="#3">3</a> <em class="jxr_javadoccomment"> * or more contributor license agreements. See the NOTICE file</em>
<a class="jxr_linenumber" name="4" href="#4">4</a> <em class="jxr_javadoccomment"> * distributed with this work for additional information</em>
<a class="jxr_linenumber" name="5" href="#5">5</a> <em class="jxr_javadoccomment"> * regarding copyright ownership. The ASF licenses this file</em>
<a class="jxr_linenumber" name="6" href="#6">6</a> <em class="jxr_javadoccomment"> * to you under the Apache License, Version 2.0 (the</em>
<a class="jxr_linenumber" name="7" href="#7">7</a> <em class="jxr_javadoccomment"> * "License"); you may not use this file except in compliance</em>
<a class="jxr_linenumber" name="8" href="#8">8</a> <em class="jxr_javadoccomment"> * with the License. You may obtain a copy of the License at</em>
<a class="jxr_linenumber" name="9" href="#9">9</a> <em class="jxr_javadoccomment"> *</em>
<a class="jxr_linenumber" name="10" href="#10">10</a> <em class="jxr_javadoccomment"> * <a href="http://www.apache.org/licenses/LICENSE-2.0" target="alexandria_uri">http://www.apache.org/licenses/LICENSE-2.0</a></em>
<a class="jxr_linenumber" name="11" href="#11">11</a> <em class="jxr_javadoccomment"> *</em>
<a class="jxr_linenumber" name="12" href="#12">12</a> <em class="jxr_javadoccomment"> * Unless required by applicable law or agreed to in writing, software</em>
<a class="jxr_linenumber" name="13" href="#13">13</a> <em class="jxr_javadoccomment"> * distributed under the License is distributed on an "AS IS" BASIS,</em>
<a class="jxr_linenumber" name="14" href="#14">14</a> <em class="jxr_javadoccomment"> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</em>
<a class="jxr_linenumber" name="15" href="#15">15</a> <em class="jxr_javadoccomment"> * See the License for the specific language governing permissions and</em>
<a class="jxr_linenumber" name="16" href="#16">16</a> <em class="jxr_javadoccomment"> * limitations under the License.</em>
<a class="jxr_linenumber" name="17" href="#17">17</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="18" href="#18">18</a> <strong class="jxr_keyword">package</strong> org.apache.hadoop.hbase.backup.example;
<a class="jxr_linenumber" name="19" href="#19">19</a>
<a class="jxr_linenumber" name="20" href="#20">20</a> <strong class="jxr_keyword">import</strong> java.io.IOException;
<a class="jxr_linenumber" name="21" href="#21">21</a>
<a class="jxr_linenumber" name="22" href="#22">22</a> <strong class="jxr_keyword">import</strong> org.apache.hadoop.hbase.classification.InterfaceAudience;
<a class="jxr_linenumber" name="23" href="#23">23</a> <strong class="jxr_keyword">import</strong> org.apache.hadoop.conf.Configuration;
<a class="jxr_linenumber" name="24" href="#24">24</a> <strong class="jxr_keyword">import</strong> org.apache.hadoop.conf.Configured;
<a class="jxr_linenumber" name="25" href="#25">25</a> <strong class="jxr_keyword">import</strong> org.apache.hadoop.hbase.client.ClusterConnection;
<a class="jxr_linenumber" name="26" href="#26">26</a> <strong class="jxr_keyword">import</strong> org.apache.hadoop.hbase.util.Bytes;
<a class="jxr_linenumber" name="27" href="#27">27</a> <strong class="jxr_keyword">import</strong> org.apache.hadoop.hbase.zookeeper.ZKUtil;
<a class="jxr_linenumber" name="28" href="#28">28</a> <strong class="jxr_keyword">import</strong> org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
<a class="jxr_linenumber" name="29" href="#29">29</a> <strong class="jxr_keyword">import</strong> org.apache.zookeeper.KeeperException;
<a class="jxr_linenumber" name="30" href="#30">30</a>
<a class="jxr_linenumber" name="31" href="#31">31</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="32" href="#32">32</a> <em class="jxr_javadoccomment"> * Example class for how to use the table archiving coordinated via zookeeper</em>
<a class="jxr_linenumber" name="33" href="#33">33</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="34" href="#34">34</a> @InterfaceAudience.Private
<a class="jxr_linenumber" name="35" href="#35">35</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">class</strong> <a href="../../../../../../org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.html">ZKTableArchiveClient</a> <strong class="jxr_keyword">extends</strong> Configured {
<a class="jxr_linenumber" name="36" href="#36">36</a>
<a class="jxr_linenumber" name="37" href="#37">37</a> <em class="jxr_javadoccomment">/**</em><em class="jxr_javadoccomment"> Configuration key for the archive node. */</em>
<a class="jxr_linenumber" name="38" href="#38">38</a> <strong class="jxr_keyword">private</strong> <strong class="jxr_keyword">static</strong> <strong class="jxr_keyword">final</strong> String ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY = <span class="jxr_string">"zookeeper.znode.hfile.archive"</span>;
<a class="jxr_linenumber" name="39" href="#39">39</a> <strong class="jxr_keyword">private</strong> <a href="../../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html">ClusterConnection</a> connection;
<a class="jxr_linenumber" name="40" href="#40">40</a>
<a class="jxr_linenumber" name="41" href="#41">41</a> <strong class="jxr_keyword">public</strong> <a href="../../../../../../org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.html">ZKTableArchiveClient</a>(Configuration conf, <a href="../../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html">ClusterConnection</a> connection) {
<a class="jxr_linenumber" name="42" href="#42">42</a> <strong class="jxr_keyword">super</strong>(conf);
<a class="jxr_linenumber" name="43" href="#43">43</a> <strong class="jxr_keyword">this</strong>.connection = connection;
<a class="jxr_linenumber" name="44" href="#44">44</a> }
<a class="jxr_linenumber" name="45" href="#45">45</a>
<a class="jxr_linenumber" name="46" href="#46">46</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="47" href="#47">47</a> <em class="jxr_javadoccomment"> * Turn on backups for all HFiles for the given table.</em>
<a class="jxr_linenumber" name="48" href="#48">48</a> <em class="jxr_javadoccomment"> * <p></em>
<a class="jxr_linenumber" name="49" href="#49">49</a> <em class="jxr_javadoccomment"> * All deleted hfiles are moved to the archive directory under the table directory, rather than</em>
<a class="jxr_linenumber" name="50" href="#50">50</a> <em class="jxr_javadoccomment"> * being deleted.</em>
<a class="jxr_linenumber" name="51" href="#51">51</a> <em class="jxr_javadoccomment"> * <p></em>
<a class="jxr_linenumber" name="52" href="#52">52</a> <em class="jxr_javadoccomment"> * If backups are already enabled for this table, does nothing.</em>
<a class="jxr_linenumber" name="53" href="#53">53</a> <em class="jxr_javadoccomment"> * <p></em>
<a class="jxr_linenumber" name="54" href="#54">54</a> <em class="jxr_javadoccomment"> * If the table does not exist, the archiving the table's hfiles is still enabled as a future</em>
<a class="jxr_linenumber" name="55" href="#55">55</a> <em class="jxr_javadoccomment"> * table with that name may be created shortly.</em>
<a class="jxr_linenumber" name="56" href="#56">56</a> <em class="jxr_javadoccomment"> * @param table name of the table to start backing up</em>
<a class="jxr_linenumber" name="57" href="#57">57</a> <em class="jxr_javadoccomment"> * @throws IOException if an unexpected exception occurs</em>
<a class="jxr_linenumber" name="58" href="#58">58</a> <em class="jxr_javadoccomment"> * @throws KeeperException if zookeeper can't be reached</em>
<a class="jxr_linenumber" name="59" href="#59">59</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="60" href="#60">60</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">void</strong> enableHFileBackupAsync(<strong class="jxr_keyword">final</strong> byte[] table) <strong class="jxr_keyword">throws</strong> IOException, KeeperException {
<a class="jxr_linenumber" name="61" href="#61">61</a> createHFileArchiveManager().enableHFileBackup(table).stop();
<a class="jxr_linenumber" name="62" href="#62">62</a> }
<a class="jxr_linenumber" name="63" href="#63">63</a>
<a class="jxr_linenumber" name="64" href="#64">64</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="65" href="#65">65</a> <em class="jxr_javadoccomment"> * Disable hfile backups for the given table.</em>
<a class="jxr_linenumber" name="66" href="#66">66</a> <em class="jxr_javadoccomment"> * <p></em>
<a class="jxr_linenumber" name="67" href="#67">67</a> <em class="jxr_javadoccomment"> * Previously backed up files are still retained (if present).</em>
<a class="jxr_linenumber" name="68" href="#68">68</a> <em class="jxr_javadoccomment"> * <p></em>
<a class="jxr_linenumber" name="69" href="#69">69</a> <em class="jxr_javadoccomment"> * Asynchronous operation - some extra HFiles may be retained, in the archive directory after</em>
<a class="jxr_linenumber" name="70" href="#70">70</a> <em class="jxr_javadoccomment"> * disable is called, dependent on the latency in zookeeper to the servers.</em>
<a class="jxr_linenumber" name="71" href="#71">71</a> <em class="jxr_javadoccomment"> * @param table name of the table stop backing up</em>
<a class="jxr_linenumber" name="72" href="#72">72</a> <em class="jxr_javadoccomment"> * @throws IOException if an unexpected exception occurs</em>
<a class="jxr_linenumber" name="73" href="#73">73</a> <em class="jxr_javadoccomment"> * @throws KeeperException if zookeeper can't be reached</em>
<a class="jxr_linenumber" name="74" href="#74">74</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="75" href="#75">75</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">void</strong> disableHFileBackup(String table) <strong class="jxr_keyword">throws</strong> IOException, KeeperException {
<a class="jxr_linenumber" name="76" href="#76">76</a> disableHFileBackup(Bytes.toBytes(table));
<a class="jxr_linenumber" name="77" href="#77">77</a> }
<a class="jxr_linenumber" name="78" href="#78">78</a>
<a class="jxr_linenumber" name="79" href="#79">79</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="80" href="#80">80</a> <em class="jxr_javadoccomment"> * Disable hfile backups for the given table.</em>
<a class="jxr_linenumber" name="81" href="#81">81</a> <em class="jxr_javadoccomment"> * <p></em>
<a class="jxr_linenumber" name="82" href="#82">82</a> <em class="jxr_javadoccomment"> * Previously backed up files are still retained (if present).</em>
<a class="jxr_linenumber" name="83" href="#83">83</a> <em class="jxr_javadoccomment"> * <p></em>
<a class="jxr_linenumber" name="84" href="#84">84</a> <em class="jxr_javadoccomment"> * Asynchronous operation - some extra HFiles may be retained, in the archive directory after</em>
<a class="jxr_linenumber" name="85" href="#85">85</a> <em class="jxr_javadoccomment"> * disable is called, dependent on the latency in zookeeper to the servers.</em>
<a class="jxr_linenumber" name="86" href="#86">86</a> <em class="jxr_javadoccomment"> * @param table name of the table stop backing up</em>
<a class="jxr_linenumber" name="87" href="#87">87</a> <em class="jxr_javadoccomment"> * @throws IOException if an unexpected exception occurs</em>
<a class="jxr_linenumber" name="88" href="#88">88</a> <em class="jxr_javadoccomment"> * @throws KeeperException if zookeeper can't be reached</em>
<a class="jxr_linenumber" name="89" href="#89">89</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="90" href="#90">90</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">void</strong> disableHFileBackup(<strong class="jxr_keyword">final</strong> byte[] table) <strong class="jxr_keyword">throws</strong> IOException, KeeperException {
<a class="jxr_linenumber" name="91" href="#91">91</a> createHFileArchiveManager().disableHFileBackup(table).stop();
<a class="jxr_linenumber" name="92" href="#92">92</a> }
<a class="jxr_linenumber" name="93" href="#93">93</a>
<a class="jxr_linenumber" name="94" href="#94">94</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="95" href="#95">95</a> <em class="jxr_javadoccomment"> * Disable hfile backups for all tables.</em>
<a class="jxr_linenumber" name="96" href="#96">96</a> <em class="jxr_javadoccomment"> * <p></em>
<a class="jxr_linenumber" name="97" href="#97">97</a> <em class="jxr_javadoccomment"> * Previously backed up files are still retained (if present).</em>
<a class="jxr_linenumber" name="98" href="#98">98</a> <em class="jxr_javadoccomment"> * <p></em>
<a class="jxr_linenumber" name="99" href="#99">99</a> <em class="jxr_javadoccomment"> * Asynchronous operation - some extra HFiles may be retained, in the archive directory after</em>
<a class="jxr_linenumber" name="100" href="#100">100</a> <em class="jxr_javadoccomment"> * disable is called, dependent on the latency in zookeeper to the servers.</em>
<a class="jxr_linenumber" name="101" href="#101">101</a> <em class="jxr_javadoccomment"> * @throws IOException if an unexpected exception occurs</em>
<a class="jxr_linenumber" name="102" href="#102">102</a> <em class="jxr_javadoccomment"> * @throws KeeperException if zookeeper can't be reached</em>
<a class="jxr_linenumber" name="103" href="#103">103</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="104" href="#104">104</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">void</strong> disableHFileBackup() <strong class="jxr_keyword">throws</strong> IOException, KeeperException {
<a class="jxr_linenumber" name="105" href="#105">105</a> createHFileArchiveManager().disableHFileBackup().stop();
<a class="jxr_linenumber" name="106" href="#106">106</a> }
<a class="jxr_linenumber" name="107" href="#107">107</a>
<a class="jxr_linenumber" name="108" href="#108">108</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="109" href="#109">109</a> <em class="jxr_javadoccomment"> * Determine if archiving is enabled (but not necessarily fully propagated) for a table</em>
<a class="jxr_linenumber" name="110" href="#110">110</a> <em class="jxr_javadoccomment"> * @param table name of the table to check</em>
<a class="jxr_linenumber" name="111" href="#111">111</a> <em class="jxr_javadoccomment"> * @return <tt>true</tt> if it is, <tt>false</tt> otherwise</em>
<a class="jxr_linenumber" name="112" href="#112">112</a> <em class="jxr_javadoccomment"> * @throws IOException if a connection to ZooKeeper cannot be established</em>
<a class="jxr_linenumber" name="113" href="#113">113</a> <em class="jxr_javadoccomment"> * @throws KeeperException</em>
<a class="jxr_linenumber" name="114" href="#114">114</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="115" href="#115">115</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">boolean</strong> getArchivingEnabled(byte[] table) <strong class="jxr_keyword">throws</strong> IOException, KeeperException {
<a class="jxr_linenumber" name="116" href="#116">116</a> <a href="../../../../../../org/apache/hadoop/hbase/backup/example/HFileArchiveManager.html">HFileArchiveManager</a> manager = createHFileArchiveManager();
<a class="jxr_linenumber" name="117" href="#117">117</a> <strong class="jxr_keyword">try</strong> {
<a class="jxr_linenumber" name="118" href="#118">118</a> <strong class="jxr_keyword">return</strong> manager.isArchivingEnabled(table);
<a class="jxr_linenumber" name="119" href="#119">119</a> } <strong class="jxr_keyword">finally</strong> {
<a class="jxr_linenumber" name="120" href="#120">120</a> manager.stop();
<a class="jxr_linenumber" name="121" href="#121">121</a> }
<a class="jxr_linenumber" name="122" href="#122">122</a> }
<a class="jxr_linenumber" name="123" href="#123">123</a>
<a class="jxr_linenumber" name="124" href="#124">124</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="125" href="#125">125</a> <em class="jxr_javadoccomment"> * Determine if archiving is enabled (but not necessarily fully propagated) for a table</em>
<a class="jxr_linenumber" name="126" href="#126">126</a> <em class="jxr_javadoccomment"> * @param table name of the table to check</em>
<a class="jxr_linenumber" name="127" href="#127">127</a> <em class="jxr_javadoccomment"> * @return <tt>true</tt> if it is, <tt>false</tt> otherwise</em>
<a class="jxr_linenumber" name="128" href="#128">128</a> <em class="jxr_javadoccomment"> * @throws IOException if an unexpected network issue occurs</em>
<a class="jxr_linenumber" name="129" href="#129">129</a> <em class="jxr_javadoccomment"> * @throws KeeperException if zookeeper can't be reached</em>
<a class="jxr_linenumber" name="130" href="#130">130</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="131" href="#131">131</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">boolean</strong> getArchivingEnabled(String table) <strong class="jxr_keyword">throws</strong> IOException, KeeperException {
<a class="jxr_linenumber" name="132" href="#132">132</a> <strong class="jxr_keyword">return</strong> getArchivingEnabled(Bytes.toBytes(table));
<a class="jxr_linenumber" name="133" href="#133">133</a> }
<a class="jxr_linenumber" name="134" href="#134">134</a>
<a class="jxr_linenumber" name="135" href="#135">135</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="136" href="#136">136</a> <em class="jxr_javadoccomment"> * @return A new {@link HFileArchiveManager} to manage which tables' hfiles should be archived</em>
<a class="jxr_linenumber" name="137" href="#137">137</a> <em class="jxr_javadoccomment"> * rather than deleted.</em>
<a class="jxr_linenumber" name="138" href="#138">138</a> <em class="jxr_javadoccomment"> * @throws KeeperException if we can't reach zookeeper</em>
<a class="jxr_linenumber" name="139" href="#139">139</a> <em class="jxr_javadoccomment"> * @throws IOException if an unexpected network issue occurs</em>
<a class="jxr_linenumber" name="140" href="#140">140</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="141" href="#141">141</a> <strong class="jxr_keyword">private</strong> <strong class="jxr_keyword">synchronized</strong> <a href="../../../../../../org/apache/hadoop/hbase/backup/example/HFileArchiveManager.html">HFileArchiveManager</a> createHFileArchiveManager() <strong class="jxr_keyword">throws</strong> KeeperException,
<a class="jxr_linenumber" name="142" href="#142">142</a> IOException {
<a class="jxr_linenumber" name="143" href="#143">143</a> <strong class="jxr_keyword">return</strong> <strong class="jxr_keyword">new</strong> <a href="../../../../../../org/apache/hadoop/hbase/backup/example/HFileArchiveManager.html">HFileArchiveManager</a>(<strong class="jxr_keyword">this</strong>.connection, <strong class="jxr_keyword">this</strong>.getConf());
<a class="jxr_linenumber" name="144" href="#144">144</a> }
<a class="jxr_linenumber" name="145" href="#145">145</a>
<a class="jxr_linenumber" name="146" href="#146">146</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="147" href="#147">147</a> <em class="jxr_javadoccomment"> * @param conf conf to read for the base archive node</em>
<a class="jxr_linenumber" name="148" href="#148">148</a> <em class="jxr_javadoccomment"> * @param zooKeeper zookeeper to used for building the full path</em>
<a class="jxr_linenumber" name="149" href="#149">149</a> <em class="jxr_javadoccomment"> * @return get the znode for long-term archival of a table for</em>
<a class="jxr_linenumber" name="150" href="#150">150</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="151" href="#151">151</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">static</strong> String getArchiveZNode(Configuration conf, <a href="../../../../../../org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.html">ZooKeeperWatcher</a> zooKeeper) {
<a class="jxr_linenumber" name="152" href="#152">152</a> <strong class="jxr_keyword">return</strong> ZKUtil.joinZNode(zooKeeper.baseZNode, conf.get(ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY,
<a class="jxr_linenumber" name="153" href="#153">153</a> TableHFileArchiveTracker.HFILE_ARCHIVE_ZNODE_PARENT));
<a class="jxr_linenumber" name="154" href="#154">154</a> }
<a class="jxr_linenumber" name="155" href="#155">155</a> }
</pre>
<hr/><div id="footer">This page was automatically generated by <a href="http://maven.apache.org/">Maven</a></div></body>
</html>
| scalingdata/Impala | thirdparty/hbase-1.1.1.2.3.0.0-2557/docs/xref/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.html | HTML | apache-2.0 | 21,962 |
/*
* Copyright 2014 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.ssl;
import io.netty.buffer.ByteBuf;
import io.netty.util.internal.NativeLibraryLoader;
import io.netty.util.internal.SystemPropertyUtil;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import org.apache.tomcat.jni.Buffer;
import org.apache.tomcat.jni.Library;
import org.apache.tomcat.jni.Pool;
import org.apache.tomcat.jni.SSL;
import org.apache.tomcat.jni.SSLContext;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.Locale;
import java.util.Set;
/**
* Tells if <a href="http://netty.io/wiki/forked-tomcat-native.html">{@code netty-tcnative}</a> and its OpenSSL support
* are available.
*/
public final class OpenSsl {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(OpenSsl.class);
private static final String LINUX = "linux";
private static final String UNKNOWN = "unknown";
private static final Throwable UNAVAILABILITY_CAUSE;
private static final Set<String> AVAILABLE_CIPHER_SUITES;
static {
Throwable cause = null;
// Test if netty-tcnative is in the classpath first.
try {
Class.forName("org.apache.tomcat.jni.SSL", false, OpenSsl.class.getClassLoader());
} catch (ClassNotFoundException t) {
cause = t;
logger.debug(
"netty-tcnative not in the classpath; " +
OpenSslEngine.class.getSimpleName() + " will be unavailable.");
}
// If in the classpath, try to load the native library and initialize netty-tcnative.
if (cause == null) {
try {
// The JNI library was not already loaded. Load it now.
loadTcNative();
} catch (Throwable t) {
cause = t;
logger.debug(
"Failed to load netty-tcnative; " +
OpenSslEngine.class.getSimpleName() + " will be unavailable, unless the " +
"application has already loaded the symbols by some other means. " +
"See http://netty.io/wiki/forked-tomcat-native.html for more information.", t);
}
try {
initializeTcNative();
// The library was initialized successfully. If loading the library failed above,
// reset the cause now since it appears that the library was loaded by some other
// means.
cause = null;
} catch (Throwable t) {
if (cause == null) {
cause = t;
}
logger.debug(
"Failed to initialize netty-tcnative; " +
OpenSslEngine.class.getSimpleName() + " will be unavailable. " +
"See http://netty.io/wiki/forked-tomcat-native.html for more information.", t);
}
}
UNAVAILABILITY_CAUSE = cause;
if (cause == null) {
final Set<String> availableCipherSuites = new LinkedHashSet<String>(128);
final long aprPool = Pool.create(0);
try {
final long sslCtx = SSLContext.make(aprPool, SSL.SSL_PROTOCOL_ALL, SSL.SSL_MODE_SERVER);
try {
SSLContext.setOptions(sslCtx, SSL.SSL_OP_ALL);
SSLContext.setCipherSuite(sslCtx, "ALL");
final long ssl = SSL.newSSL(sslCtx, true);
try {
for (String c: SSL.getCiphers(ssl)) {
// Filter out bad input.
if (c == null || c.length() == 0 || availableCipherSuites.contains(c)) {
continue;
}
availableCipherSuites.add(c);
}
} finally {
SSL.freeSSL(ssl);
}
} finally {
SSLContext.free(sslCtx);
}
} catch (Exception e) {
logger.warn("Failed to get the list of available OpenSSL cipher suites.", e);
} finally {
Pool.destroy(aprPool);
}
AVAILABLE_CIPHER_SUITES = Collections.unmodifiableSet(availableCipherSuites);
} else {
AVAILABLE_CIPHER_SUITES = Collections.emptySet();
}
}
/**
* Returns {@code true} if and only if
* <a href="http://netty.io/wiki/forked-tomcat-native.html">{@code netty-tcnative}</a> and its OpenSSL support
* are available.
*/
public static boolean isAvailable() {
return UNAVAILABILITY_CAUSE == null;
}
/**
* Returns {@code true} if the used version of openssl supports
* <a href="https://tools.ietf.org/html/rfc7301">ALPN</a>.
*/
public static boolean isAlpnSupported() {
return version() >= 0x10002000L;
}
/**
* Returns the version of the used available OpenSSL library or {@code -1} if {@link #isAvailable()}
* returns {@code false}.
*/
public static int version() {
if (isAvailable()) {
return SSL.version();
}
return -1;
}
/**
* Returns the version string of the used available OpenSSL library or {@code null} if {@link #isAvailable()}
* returns {@code false}.
*/
public static String versionString() {
if (isAvailable()) {
return SSL.versionString();
}
return null;
}
/**
* Ensure that <a href="http://netty.io/wiki/forked-tomcat-native.html">{@code netty-tcnative}</a> and
* its OpenSSL support are available.
*
* @throws UnsatisfiedLinkError if unavailable
*/
public static void ensureAvailability() {
if (UNAVAILABILITY_CAUSE != null) {
throw (Error) new UnsatisfiedLinkError(
"failed to load the required native library").initCause(UNAVAILABILITY_CAUSE);
}
}
/**
* Returns the cause of unavailability of
* <a href="http://netty.io/wiki/forked-tomcat-native.html">{@code netty-tcnative}</a> and its OpenSSL support.
*
* @return the cause if unavailable. {@code null} if available.
*/
public static Throwable unavailabilityCause() {
return UNAVAILABILITY_CAUSE;
}
/**
* Returns all the available OpenSSL cipher suites.
* Please note that the returned array may include the cipher suites that are insecure or non-functional.
*/
public static Set<String> availableCipherSuites() {
return AVAILABLE_CIPHER_SUITES;
}
/**
* Returns {@code true} if and only if the specified cipher suite is available in OpenSSL.
* Both Java-style cipher suite and OpenSSL-style cipher suite are accepted.
*/
public static boolean isCipherSuiteAvailable(String cipherSuite) {
String converted = CipherSuiteConverter.toOpenSsl(cipherSuite);
if (converted != null) {
cipherSuite = converted;
}
return AVAILABLE_CIPHER_SUITES.contains(cipherSuite);
}
static boolean isError(long errorCode) {
return errorCode != SSL.SSL_ERROR_NONE;
}
static long memoryAddress(ByteBuf buf) {
assert buf.isDirect();
return buf.hasMemoryAddress() ? buf.memoryAddress() : Buffer.address(buf.nioBuffer());
}
private OpenSsl() { }
private static void loadTcNative() throws Exception {
String os = normalizeOs(SystemPropertyUtil.get("os.name", ""));
String arch = normalizeArch(SystemPropertyUtil.get("os.arch", ""));
Set<String> libNames = new LinkedHashSet<String>(3);
// First, try loading the platform-specific library. Platform-specific
// libraries will be available if using a tcnative uber jar.
libNames.add("netty-tcnative-" + os + '-' + arch);
if (LINUX.equalsIgnoreCase(os)) {
// Fedora SSL lib so naming (libssl.so.10 vs libssl.so.1.0.0)..
libNames.add("netty-tcnative-" + os + '-' + arch + "-fedora");
}
// finally the default library.
libNames.add("netty-tcnative");
NativeLibraryLoader.loadFirstAvailable(SSL.class.getClassLoader(),
libNames.toArray(new String[libNames.size()]));
}
private static void initializeTcNative() throws Exception {
Library.initialize("provided");
SSL.initialize(null);
}
private static String normalizeOs(String value) {
value = normalize(value);
if (value.startsWith("aix")) {
return "aix";
}
if (value.startsWith("hpux")) {
return "hpux";
}
if (value.startsWith("os400")) {
// Avoid the names such as os4000
if (value.length() <= 5 || !Character.isDigit(value.charAt(5))) {
return "os400";
}
}
if (value.startsWith(LINUX)) {
return LINUX;
}
if (value.startsWith("macosx") || value.startsWith("osx")) {
return "osx";
}
if (value.startsWith("freebsd")) {
return "freebsd";
}
if (value.startsWith("openbsd")) {
return "openbsd";
}
if (value.startsWith("netbsd")) {
return "netbsd";
}
if (value.startsWith("solaris") || value.startsWith("sunos")) {
return "sunos";
}
if (value.startsWith("windows")) {
return "windows";
}
return UNKNOWN;
}
private static String normalizeArch(String value) {
value = normalize(value);
if (value.matches("^(x8664|amd64|ia32e|em64t|x64)$")) {
return "x86_64";
}
if (value.matches("^(x8632|x86|i[3-6]86|ia32|x32)$")) {
return "x86_32";
}
if (value.matches("^(ia64|itanium64)$")) {
return "itanium_64";
}
if (value.matches("^(sparc|sparc32)$")) {
return "sparc_32";
}
if (value.matches("^(sparcv9|sparc64)$")) {
return "sparc_64";
}
if (value.matches("^(arm|arm32)$")) {
return "arm_32";
}
if ("aarch64".equals(value)) {
return "aarch_64";
}
if (value.matches("^(ppc|ppc32)$")) {
return "ppc_32";
}
if ("ppc64".equals(value)) {
return "ppc_64";
}
if ("ppc64le".equals(value)) {
return "ppcle_64";
}
if ("s390".equals(value)) {
return "s390_32";
}
if ("s390x".equals(value)) {
return "s390_64";
}
return UNKNOWN;
}
private static String normalize(String value) {
return value.toLowerCase(Locale.US).replaceAll("[^a-z0-9]+", "");
}
}
| yrcourage/netty | handler/src/main/java/io/netty/handler/ssl/OpenSsl.java | Java | apache-2.0 | 11,625 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_51) on Fri Jun 28 10:55:02 CEST 2013 -->
<TITLE>
UpdateInfoListener
</TITLE>
<META NAME="date" CONTENT="2013-06-28">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../doclava-developer-docs.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="UpdateInfoListener";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="class-use/UpdateInfoListener.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../net/hockeyapp/android/UpdateFragment.html" title="class in net.hockeyapp.android"><B>PREV CLASS</B></A>
<A HREF="../../../net/hockeyapp/android/UpdateManager.html" title="class in net.hockeyapp.android"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../index.html?net/hockeyapp/android/UpdateInfoListener.html" target="_top"><B>FRAMES</B></A>
<A HREF="UpdateInfoListener.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: NESTED | FIELD | CONSTR | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | CONSTR | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<!-- ======== START OF CLASS DATA ======== -->
<H2>
<FONT SIZE="-1">
net.hockeyapp.android</FONT>
<BR>
Interface UpdateInfoListener</H2>
<DL>
<DT><B>All Known Implementing Classes:</B> <DD><A HREF="../../../net/hockeyapp/android/UpdateActivity.html" title="class in net.hockeyapp.android">UpdateActivity</A>, <A HREF="../../../net/hockeyapp/android/UpdateFragment.html" title="class in net.hockeyapp.android">UpdateFragment</A></DD>
</DL>
<HR>
<DL>
<DT><PRE>public interface <B>UpdateInfoListener</B></DL>
</PRE>
<P>
<h4>Description</h4>
Abstract class for callbacks to be invoked from UpdateActivity
and UpdateFragment.
<h4>License</h4>
<pre>
Copyright (c) 2011-2013 Bit Stadium GmbH
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
</pre>
<P>
<P>
<DL>
<DT><B>Author:</B></DT>
<DD>Thomas Dohmke</DD>
</DL>
<HR>
<P>
<!-- ========== METHOD SUMMARY =========== -->
<A NAME="method_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Method Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> int</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../net/hockeyapp/android/UpdateInfoListener.html#getCurrentVersionCode()">getCurrentVersionCode</A></B>()</CODE>
<BR>
Implement to return the app's current version code.</TD>
</TR>
</TABLE>
<P>
<!-- ============ METHOD DETAIL ========== -->
<A NAME="method_detail"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2">
<B>Method Detail</B></FONT></TH>
</TR>
</TABLE>
<A NAME="getCurrentVersionCode()"><!-- --></A><H3>
getCurrentVersionCode</H3>
<PRE>
int <B>getCurrentVersionCode</B>()</PRE>
<DL>
<DD>Implement to return the app's current version code.
<P>
<DD><DL>
</DL>
</DD>
</DL>
<!-- ========= END OF CLASS DATA ========= -->
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="class-use/UpdateInfoListener.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../net/hockeyapp/android/UpdateFragment.html" title="class in net.hockeyapp.android"><B>PREV CLASS</B></A>
<A HREF="../../../net/hockeyapp/android/UpdateManager.html" title="class in net.hockeyapp.android"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../index.html?net/hockeyapp/android/UpdateInfoListener.html" target="_top"><B>FRAMES</B></A>
<A HREF="UpdateInfoListener.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: NESTED | FIELD | CONSTR | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | CONSTR | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
</BODY>
</HTML>
| david-byng/cordova-plugin-hockeyapp | src/android/HockeySDK-Android/docs/net/hockeyapp/android/UpdateInfoListener.html | HTML | apache-2.0 | 9,725 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/workmail/WorkMail_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/workmail/model/ResourceType.h>
#include <aws/workmail/model/EntityState.h>
#include <aws/core/utils/DateTime.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace WorkMail
{
namespace Model
{
/**
* <p>The representation of a resource.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/Resource">AWS
* API Reference</a></p>
*/
class AWS_WORKMAIL_API Resource
{
public:
Resource();
Resource(Aws::Utils::Json::JsonView jsonValue);
Resource& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The identifier of the resource.</p>
*/
inline const Aws::String& GetId() const{ return m_id; }
/**
* <p>The identifier of the resource.</p>
*/
inline bool IdHasBeenSet() const { return m_idHasBeenSet; }
/**
* <p>The identifier of the resource.</p>
*/
inline void SetId(const Aws::String& value) { m_idHasBeenSet = true; m_id = value; }
/**
* <p>The identifier of the resource.</p>
*/
inline void SetId(Aws::String&& value) { m_idHasBeenSet = true; m_id = std::move(value); }
/**
* <p>The identifier of the resource.</p>
*/
inline void SetId(const char* value) { m_idHasBeenSet = true; m_id.assign(value); }
/**
* <p>The identifier of the resource.</p>
*/
inline Resource& WithId(const Aws::String& value) { SetId(value); return *this;}
/**
* <p>The identifier of the resource.</p>
*/
inline Resource& WithId(Aws::String&& value) { SetId(std::move(value)); return *this;}
/**
* <p>The identifier of the resource.</p>
*/
inline Resource& WithId(const char* value) { SetId(value); return *this;}
/**
* <p>The email of the resource.</p>
*/
inline const Aws::String& GetEmail() const{ return m_email; }
/**
* <p>The email of the resource.</p>
*/
inline bool EmailHasBeenSet() const { return m_emailHasBeenSet; }
/**
* <p>The email of the resource.</p>
*/
inline void SetEmail(const Aws::String& value) { m_emailHasBeenSet = true; m_email = value; }
/**
* <p>The email of the resource.</p>
*/
inline void SetEmail(Aws::String&& value) { m_emailHasBeenSet = true; m_email = std::move(value); }
/**
* <p>The email of the resource.</p>
*/
inline void SetEmail(const char* value) { m_emailHasBeenSet = true; m_email.assign(value); }
/**
* <p>The email of the resource.</p>
*/
inline Resource& WithEmail(const Aws::String& value) { SetEmail(value); return *this;}
/**
* <p>The email of the resource.</p>
*/
inline Resource& WithEmail(Aws::String&& value) { SetEmail(std::move(value)); return *this;}
/**
* <p>The email of the resource.</p>
*/
inline Resource& WithEmail(const char* value) { SetEmail(value); return *this;}
/**
* <p>The name of the resource.</p>
*/
inline const Aws::String& GetName() const{ return m_name; }
/**
* <p>The name of the resource.</p>
*/
inline bool NameHasBeenSet() const { return m_nameHasBeenSet; }
/**
* <p>The name of the resource.</p>
*/
inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; }
/**
* <p>The name of the resource.</p>
*/
inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); }
/**
* <p>The name of the resource.</p>
*/
inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); }
/**
* <p>The name of the resource.</p>
*/
inline Resource& WithName(const Aws::String& value) { SetName(value); return *this;}
/**
* <p>The name of the resource.</p>
*/
inline Resource& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;}
/**
* <p>The name of the resource.</p>
*/
inline Resource& WithName(const char* value) { SetName(value); return *this;}
/**
* <p>The type of the resource: equipment or room.</p>
*/
inline const ResourceType& GetType() const{ return m_type; }
/**
* <p>The type of the resource: equipment or room.</p>
*/
inline bool TypeHasBeenSet() const { return m_typeHasBeenSet; }
/**
* <p>The type of the resource: equipment or room.</p>
*/
inline void SetType(const ResourceType& value) { m_typeHasBeenSet = true; m_type = value; }
/**
* <p>The type of the resource: equipment or room.</p>
*/
inline void SetType(ResourceType&& value) { m_typeHasBeenSet = true; m_type = std::move(value); }
/**
* <p>The type of the resource: equipment or room.</p>
*/
inline Resource& WithType(const ResourceType& value) { SetType(value); return *this;}
/**
* <p>The type of the resource: equipment or room.</p>
*/
inline Resource& WithType(ResourceType&& value) { SetType(std::move(value)); return *this;}
/**
* <p>The state of the resource, which can be ENABLED, DISABLED, or DELETED.</p>
*/
inline const EntityState& GetState() const{ return m_state; }
/**
* <p>The state of the resource, which can be ENABLED, DISABLED, or DELETED.</p>
*/
inline bool StateHasBeenSet() const { return m_stateHasBeenSet; }
/**
* <p>The state of the resource, which can be ENABLED, DISABLED, or DELETED.</p>
*/
inline void SetState(const EntityState& value) { m_stateHasBeenSet = true; m_state = value; }
/**
* <p>The state of the resource, which can be ENABLED, DISABLED, or DELETED.</p>
*/
inline void SetState(EntityState&& value) { m_stateHasBeenSet = true; m_state = std::move(value); }
/**
* <p>The state of the resource, which can be ENABLED, DISABLED, or DELETED.</p>
*/
inline Resource& WithState(const EntityState& value) { SetState(value); return *this;}
/**
* <p>The state of the resource, which can be ENABLED, DISABLED, or DELETED.</p>
*/
inline Resource& WithState(EntityState&& value) { SetState(std::move(value)); return *this;}
/**
* <p>The date indicating when the resource was enabled for Amazon WorkMail
* use.</p>
*/
inline const Aws::Utils::DateTime& GetEnabledDate() const{ return m_enabledDate; }
/**
* <p>The date indicating when the resource was enabled for Amazon WorkMail
* use.</p>
*/
inline bool EnabledDateHasBeenSet() const { return m_enabledDateHasBeenSet; }
/**
* <p>The date indicating when the resource was enabled for Amazon WorkMail
* use.</p>
*/
inline void SetEnabledDate(const Aws::Utils::DateTime& value) { m_enabledDateHasBeenSet = true; m_enabledDate = value; }
/**
* <p>The date indicating when the resource was enabled for Amazon WorkMail
* use.</p>
*/
inline void SetEnabledDate(Aws::Utils::DateTime&& value) { m_enabledDateHasBeenSet = true; m_enabledDate = std::move(value); }
/**
* <p>The date indicating when the resource was enabled for Amazon WorkMail
* use.</p>
*/
inline Resource& WithEnabledDate(const Aws::Utils::DateTime& value) { SetEnabledDate(value); return *this;}
/**
* <p>The date indicating when the resource was enabled for Amazon WorkMail
* use.</p>
*/
inline Resource& WithEnabledDate(Aws::Utils::DateTime&& value) { SetEnabledDate(std::move(value)); return *this;}
/**
* <p>The date indicating when the resource was disabled from Amazon WorkMail
* use.</p>
*/
inline const Aws::Utils::DateTime& GetDisabledDate() const{ return m_disabledDate; }
/**
* <p>The date indicating when the resource was disabled from Amazon WorkMail
* use.</p>
*/
inline bool DisabledDateHasBeenSet() const { return m_disabledDateHasBeenSet; }
/**
* <p>The date indicating when the resource was disabled from Amazon WorkMail
* use.</p>
*/
inline void SetDisabledDate(const Aws::Utils::DateTime& value) { m_disabledDateHasBeenSet = true; m_disabledDate = value; }
/**
* <p>The date indicating when the resource was disabled from Amazon WorkMail
* use.</p>
*/
inline void SetDisabledDate(Aws::Utils::DateTime&& value) { m_disabledDateHasBeenSet = true; m_disabledDate = std::move(value); }
/**
* <p>The date indicating when the resource was disabled from Amazon WorkMail
* use.</p>
*/
inline Resource& WithDisabledDate(const Aws::Utils::DateTime& value) { SetDisabledDate(value); return *this;}
/**
* <p>The date indicating when the resource was disabled from Amazon WorkMail
* use.</p>
*/
inline Resource& WithDisabledDate(Aws::Utils::DateTime&& value) { SetDisabledDate(std::move(value)); return *this;}
private:
Aws::String m_id;
bool m_idHasBeenSet;
Aws::String m_email;
bool m_emailHasBeenSet;
Aws::String m_name;
bool m_nameHasBeenSet;
ResourceType m_type;
bool m_typeHasBeenSet;
EntityState m_state;
bool m_stateHasBeenSet;
Aws::Utils::DateTime m_enabledDate;
bool m_enabledDateHasBeenSet;
Aws::Utils::DateTime m_disabledDate;
bool m_disabledDateHasBeenSet;
};
} // namespace Model
} // namespace WorkMail
} // namespace Aws
| awslabs/aws-sdk-cpp | aws-cpp-sdk-workmail/include/aws/workmail/model/Resource.h | C | apache-2.0 | 9,739 |
/*
* Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
* Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
* Created By:
* Maintained By:
*/
//= require can.jquery-all
//= require models/cacheable
(function(ns, can) {
can.Model.Cacheable("CMS.Models.Document", {
root_object : "document"
, root_collection : "documents"
, findAll : "GET /api/documents"
, create : function(params) {
var _params = {
document : {
title : params.document.title
, description : params.document.description
, link : params.document.link
}
};
return $.ajax({
type : "POST"
, "url" : "/api/documents"
, dataType : "json"
, data : _params
});
}
, search : function(request, response) {
return $.ajax({
type : "get"
, url : "/api/documents"
, dataType : "json"
, data : {s : request.term}
, success : function(data) {
response($.map( data, function( item ) {
return can.extend({}, item.document, {
label: item.document.title
? item.document.title
+ (item.document.link_url
? " (" + item.document.link_url + ")"
: "")
: item.document.link_url
, value: item.document.id
});
}));
}
});
}
}, {
init : function () {
this._super && this._super();
// this.bind("change", function(ev, attr, how, newVal, oldVal) {
// var obj;
// if(obj = CMS.Models.ObjectDocument.findInCacheById(this.id) && attr !== "id") {
// obj.attr(attr, newVal);
// }
// });
var that = this;
this.each(function(value, name) {
if (value === null)
that.attr(name, undefined);
});
}
});
can.Model.Cacheable("CMS.Models.ObjectDocument", {
root_object : "object_document"
, root_collection : "object_documents"
, findAll: "GET /api/object_documents"
, create: "POST /api/object_documents"
, destroy : "DELETE /api/object_documents/{id}"
}, {
init : function() {
var _super = this._super;
function reinit() {
var that = this;
typeof _super === "function" && _super.call(this);
this.attr("document", CMS.Models.get_instance(
"Document", this.document_id || (this.document && this.document.id)));
this.attr("documentable", CMS.Models.get_instance(
this.documentable_type || (this.documentable && this.documentable.type),
this.documentable_id || (this.documentable && this.documentable.id)));
/*this.attr(
"document"
, CMS.Models.Document.findInCacheById(this.document_id)
|| new CMS.Models.Document(this.document && this.document.serialize ? this.document.serialize() : this.document));
*/
this.each(function(value, name) {
if (value === null)
that.removeAttr(name);
});
}
this.bind("created", can.proxy(reinit, this));
reinit.call(this);
}
});
})(this, can);
| hamyuan/ggrc-self-test | src/ggrc/assets/javascripts/pbc/document.js | JavaScript | apache-2.0 | 3,489 |
/**
*
* Copyright 2017 Paul Schaub, 2020 Florian Schmaus
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jivesoftware.smackx.omemo;
import static org.jivesoftware.smackx.omemo.util.OmemoConstants.OMEMO_NAMESPACE_V_AXOLOTL;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.WeakHashMap;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.jivesoftware.smack.ConnectionListener;
import org.jivesoftware.smack.Manager;
import org.jivesoftware.smack.SmackException;
import org.jivesoftware.smack.SmackException.NotConnectedException;
import org.jivesoftware.smack.XMPPConnection;
import org.jivesoftware.smack.XMPPException;
import org.jivesoftware.smack.packet.Message;
import org.jivesoftware.smack.packet.MessageBuilder;
import org.jivesoftware.smack.packet.Stanza;
import org.jivesoftware.smack.util.Async;
import org.jivesoftware.smackx.carbons.CarbonManager;
import org.jivesoftware.smackx.carbons.packet.CarbonExtension;
import org.jivesoftware.smackx.disco.ServiceDiscoveryManager;
import org.jivesoftware.smackx.hints.element.StoreHint;
import org.jivesoftware.smackx.mam.MamManager;
import org.jivesoftware.smackx.muc.MultiUserChat;
import org.jivesoftware.smackx.muc.MultiUserChatManager;
import org.jivesoftware.smackx.muc.RoomInfo;
import org.jivesoftware.smackx.omemo.element.OmemoBundleElement;
import org.jivesoftware.smackx.omemo.element.OmemoDeviceListElement;
import org.jivesoftware.smackx.omemo.element.OmemoDeviceListElement_VAxolotl;
import org.jivesoftware.smackx.omemo.element.OmemoElement;
import org.jivesoftware.smackx.omemo.exceptions.CannotEstablishOmemoSessionException;
import org.jivesoftware.smackx.omemo.exceptions.CorruptedOmemoKeyException;
import org.jivesoftware.smackx.omemo.exceptions.CryptoFailedException;
import org.jivesoftware.smackx.omemo.exceptions.NoOmemoSupportException;
import org.jivesoftware.smackx.omemo.exceptions.NoRawSessionException;
import org.jivesoftware.smackx.omemo.exceptions.UndecidedOmemoIdentityException;
import org.jivesoftware.smackx.omemo.internal.OmemoCachedDeviceList;
import org.jivesoftware.smackx.omemo.internal.OmemoDevice;
import org.jivesoftware.smackx.omemo.listener.OmemoMessageListener;
import org.jivesoftware.smackx.omemo.listener.OmemoMucMessageListener;
import org.jivesoftware.smackx.omemo.trust.OmemoFingerprint;
import org.jivesoftware.smackx.omemo.trust.OmemoTrustCallback;
import org.jivesoftware.smackx.omemo.trust.TrustState;
import org.jivesoftware.smackx.omemo.util.MessageOrOmemoMessage;
import org.jivesoftware.smackx.omemo.util.OmemoConstants;
import org.jivesoftware.smackx.pep.PepEventListener;
import org.jivesoftware.smackx.pep.PepManager;
import org.jivesoftware.smackx.pubsub.PubSubException;
import org.jivesoftware.smackx.pubsub.PubSubManager;
import org.jivesoftware.smackx.pubsub.packet.PubSub;
import org.jxmpp.jid.BareJid;
import org.jxmpp.jid.DomainBareJid;
import org.jxmpp.jid.EntityBareJid;
import org.jxmpp.jid.EntityFullJid;
/**
* Manager that allows sending messages encrypted with OMEMO.
* This class also provides some methods useful for a client that implements OMEMO.
*
* @author Paul Schaub
*/
public final class OmemoManager extends Manager {
private static final Logger LOGGER = Logger.getLogger(OmemoManager.class.getName());
private static final Integer UNKNOWN_DEVICE_ID = -1;
private static final WeakHashMap<XMPPConnection, TreeMap<Integer, OmemoManager>> INSTANCES = new WeakHashMap<>();
private final OmemoService<?, ?, ?, ?, ?, ?, ?, ?, ?> service;
private final HashSet<OmemoMessageListener> omemoMessageListeners = new HashSet<>();
private final HashSet<OmemoMucMessageListener> omemoMucMessageListeners = new HashSet<>();
private final PepManager pepManager;
private OmemoTrustCallback trustCallback;
private BareJid ownJid;
private Integer deviceId;
/**
* Private constructor.
*
* @param connection connection
* @param deviceId deviceId
*/
private OmemoManager(XMPPConnection connection, Integer deviceId) {
super(connection);
service = OmemoService.getInstance();
pepManager = PepManager.getInstanceFor(connection);
this.deviceId = deviceId;
if (connection.isAuthenticated()) {
initBareJidAndDeviceId(this);
} else {
connection.addConnectionListener(new ConnectionListener() {
@Override
public void authenticated(XMPPConnection connection, boolean resumed) {
initBareJidAndDeviceId(OmemoManager.this);
}
});
}
service.registerRatchetForManager(this);
// StanzaListeners
resumeStanzaAndPEPListeners();
}
/**
* Return an OmemoManager instance for the given connection and deviceId.
* If there was an OmemoManager for the connection and id before, return it. Otherwise create a new OmemoManager
* instance and return it.
*
* @param connection XmppConnection.
* @param deviceId MUST NOT be null and MUST be greater than 0.
*
* @return OmemoManager instance for the given connection and deviceId.
*/
public static synchronized OmemoManager getInstanceFor(XMPPConnection connection, Integer deviceId) {
if (deviceId == null || deviceId < 1) {
throw new IllegalArgumentException("DeviceId MUST NOT be null and MUST be greater than 0.");
}
TreeMap<Integer, OmemoManager> managersOfConnection = INSTANCES.get(connection);
if (managersOfConnection == null) {
managersOfConnection = new TreeMap<>();
INSTANCES.put(connection, managersOfConnection);
}
OmemoManager manager = managersOfConnection.get(deviceId);
if (manager == null) {
manager = new OmemoManager(connection, deviceId);
managersOfConnection.put(deviceId, manager);
}
return manager;
}
/**
* Returns an OmemoManager instance for the given connection. If there was one manager for the connection before,
* return it. If there were multiple managers before, return the one with the lowest deviceId.
* If there was no manager before, return a new one. As soon as the connection gets authenticated, the manager
* will look for local deviceIDs and select the lowest one as its id. If there are not local deviceIds, the manager
* will assign itself a random id.
*
* @param connection XmppConnection.
*
* @return OmemoManager instance for the given connection and a determined deviceId.
*/
public static synchronized OmemoManager getInstanceFor(XMPPConnection connection) {
TreeMap<Integer, OmemoManager> managers = INSTANCES.get(connection);
if (managers == null) {
managers = new TreeMap<>();
INSTANCES.put(connection, managers);
}
OmemoManager manager;
if (managers.size() == 0) {
manager = new OmemoManager(connection, UNKNOWN_DEVICE_ID);
managers.put(UNKNOWN_DEVICE_ID, manager);
} else {
manager = managers.get(managers.firstKey());
}
return manager;
}
/**
* Set a TrustCallback for this particular OmemoManager.
* TrustCallbacks are used to query and modify trust decisions.
*
* @param callback trustCallback.
*/
public void setTrustCallback(OmemoTrustCallback callback) {
if (trustCallback != null) {
throw new IllegalStateException("TrustCallback can only be set once.");
}
trustCallback = callback;
}
/**
* Return the TrustCallback of this manager.
*
* @return callback that is used for trust decisions.
*/
OmemoTrustCallback getTrustCallback() {
return trustCallback;
}
/**
* Initializes the OmemoManager. This method must be called before the manager can be used.
*
* @throws CorruptedOmemoKeyException if the OMEMO key is corrupted.
* @throws InterruptedException if the calling thread was interrupted.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws XMPPException.XMPPErrorException if there was an XMPP error returned.
* @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated.
* @throws PubSubException.NotALeafNodeException if a PubSub leaf node operation was attempted on a non-leaf node.
* @throws IOException if an I/O error occurred.
*/
public synchronized void initialize()
throws SmackException.NotLoggedInException, CorruptedOmemoKeyException, InterruptedException,
SmackException.NoResponseException, SmackException.NotConnectedException, XMPPException.XMPPErrorException,
PubSubException.NotALeafNodeException, IOException {
if (!connection().isAuthenticated()) {
throw new SmackException.NotLoggedInException();
}
if (getTrustCallback() == null) {
throw new IllegalStateException("No TrustCallback set.");
}
getOmemoService().init(new LoggedInOmemoManager(this));
}
/**
* Initialize the manager without blocking. Once the manager is successfully initialized, the finishedCallback will
* be notified. It will also get notified, if an error occurs.
*
* @param finishedCallback callback that gets called once the manager is initialized.
*/
public void initializeAsync(final InitializationFinishedCallback finishedCallback) {
Async.go(new Runnable() {
@Override
public void run() {
try {
initialize();
finishedCallback.initializationFinished(OmemoManager.this);
} catch (Exception e) {
finishedCallback.initializationFailed(e);
}
}
});
}
/**
* Return a set of all OMEMO capable devices of a contact.
* Note, that this method does not explicitly refresh the device list of the contact, so it might be outdated.
*
* @see #requestDeviceListUpdateFor(BareJid)
*
* @param contact contact we want to get a set of device of.
* @return set of known devices of that contact.
*
* @throws IOException if an I/O error occurred.
*/
public Set<OmemoDevice> getDevicesOf(BareJid contact) throws IOException {
OmemoCachedDeviceList list = getOmemoService().getOmemoStoreBackend().loadCachedDeviceList(getOwnDevice(), contact);
HashSet<OmemoDevice> devices = new HashSet<>();
for (int deviceId : list.getActiveDevices()) {
devices.add(new OmemoDevice(contact, deviceId));
}
return devices;
}
/**
* OMEMO encrypt a cleartext message for a single recipient.
* Note that this method does NOT set the 'to' attribute of the message.
*
* @param recipient recipients bareJid
* @param message text to encrypt
* @return encrypted message
*
* @throws CryptoFailedException when something crypto related fails
* @throws UndecidedOmemoIdentityException When there are undecided devices
* @throws InterruptedException if the calling thread was interrupted.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated.
* @throws IOException if an I/O error occurred.
*/
public OmemoMessage.Sent encrypt(BareJid recipient, String message)
throws CryptoFailedException, UndecidedOmemoIdentityException,
InterruptedException, SmackException.NotConnectedException,
SmackException.NoResponseException, SmackException.NotLoggedInException, IOException {
Set<BareJid> recipients = new HashSet<>();
recipients.add(recipient);
return encrypt(recipients, message);
}
/**
* OMEMO encrypt a cleartext message for multiple recipients.
*
* @param recipients recipients barejids
* @param message text to encrypt
* @return encrypted message.
*
* @throws CryptoFailedException When something crypto related fails
* @throws UndecidedOmemoIdentityException When there are undecided devices.
* @throws InterruptedException if the calling thread was interrupted.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated.
* @throws IOException if an I/O error occurred.
*/
public synchronized OmemoMessage.Sent encrypt(Set<BareJid> recipients, String message)
throws CryptoFailedException, UndecidedOmemoIdentityException,
InterruptedException, SmackException.NotConnectedException,
SmackException.NoResponseException, SmackException.NotLoggedInException, IOException {
LoggedInOmemoManager guard = new LoggedInOmemoManager(this);
Set<OmemoDevice> devices = getDevicesOf(getOwnJid());
for (BareJid recipient : recipients) {
devices.addAll(getDevicesOf(recipient));
}
return service.createOmemoMessage(guard, devices, message);
}
/**
* Encrypt a message for all recipients in the MultiUserChat.
*
* @param muc multiUserChat
* @param message message to send
* @return encrypted message
*
* @throws UndecidedOmemoIdentityException when there are undecided devices.
* @throws CryptoFailedException if the OMEMO cryptography failed.
* @throws XMPPException.XMPPErrorException if there was an XMPP error returned.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws InterruptedException if the calling thread was interrupted.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws NoOmemoSupportException When the muc doesn't support OMEMO.
* @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated.
* @throws IOException if an I/O error occurred.
*/
public synchronized OmemoMessage.Sent encrypt(MultiUserChat muc, String message)
throws UndecidedOmemoIdentityException, CryptoFailedException,
XMPPException.XMPPErrorException, SmackException.NotConnectedException, InterruptedException,
SmackException.NoResponseException, NoOmemoSupportException,
SmackException.NotLoggedInException, IOException {
if (!multiUserChatSupportsOmemo(muc)) {
throw new NoOmemoSupportException();
}
Set<BareJid> recipients = new HashSet<>();
for (EntityFullJid e : muc.getOccupants()) {
recipients.add(muc.getOccupant(e).getJid().asBareJid());
}
return encrypt(recipients, message);
}
/**
* Manually decrypt an OmemoElement.
* This method should only be used for use-cases, where the internal listeners don't pick up on an incoming message.
* (for example MAM query results).
*
* @param sender bareJid of the message sender (must be the jid of the contact who sent the message)
* @param omemoElement omemoElement
* @return decrypted OmemoMessage
*
* @throws SmackException.NotLoggedInException if the Manager is not authenticated
* @throws CorruptedOmemoKeyException if our or their key is corrupted
* @throws NoRawSessionException if the message was not a preKeyMessage, but we had no session with the contact
* @throws CryptoFailedException if decryption fails
* @throws IOException if an I/O error occurred.
*/
public OmemoMessage.Received decrypt(BareJid sender, OmemoElement omemoElement)
throws SmackException.NotLoggedInException, CorruptedOmemoKeyException, NoRawSessionException,
CryptoFailedException, IOException {
LoggedInOmemoManager managerGuard = new LoggedInOmemoManager(this);
return getOmemoService().decryptMessage(managerGuard, sender, omemoElement);
}
/**
* Decrypt messages from a MAM query.
*
* @param mamQuery The MAM query
* @return list of decrypted OmemoMessages
*
* @throws SmackException.NotLoggedInException if the Manager is not authenticated.
* @throws IOException if an I/O error occurred.
*/
public List<MessageOrOmemoMessage> decryptMamQueryResult(MamManager.MamQuery mamQuery)
throws SmackException.NotLoggedInException, IOException {
return new ArrayList<>(getOmemoService().decryptMamQueryResult(new LoggedInOmemoManager(this), mamQuery));
}
/**
* Trust that a fingerprint belongs to an OmemoDevice.
* The fingerprint must be the lowercase, hexadecimal fingerprint of the identityKey of the device and must
* be of length 64.
*
* @param device device
* @param fingerprint fingerprint
*/
public void trustOmemoIdentity(OmemoDevice device, OmemoFingerprint fingerprint) {
if (trustCallback == null) {
throw new IllegalStateException("No TrustCallback set.");
}
trustCallback.setTrust(device, fingerprint, TrustState.trusted);
}
/**
* Distrust the fingerprint/OmemoDevice tuple.
* The fingerprint must be the lowercase, hexadecimal fingerprint of the identityKey of the device and must
* be of length 64.
*
* @param device device
* @param fingerprint fingerprint
*/
public void distrustOmemoIdentity(OmemoDevice device, OmemoFingerprint fingerprint) {
if (trustCallback == null) {
throw new IllegalStateException("No TrustCallback set.");
}
trustCallback.setTrust(device, fingerprint, TrustState.untrusted);
}
/**
* Returns true, if the fingerprint/OmemoDevice tuple is trusted, otherwise false.
* The fingerprint must be the lowercase, hexadecimal fingerprint of the identityKey of the device and must
* be of length 64.
*
* @param device device
* @param fingerprint fingerprint
* @return <code>true</code> if this is a trusted OMEMO identity.
*/
public boolean isTrustedOmemoIdentity(OmemoDevice device, OmemoFingerprint fingerprint) {
if (trustCallback == null) {
throw new IllegalStateException("No TrustCallback set.");
}
return trustCallback.getTrust(device, fingerprint) == TrustState.trusted;
}
/**
* Returns true, if the fingerprint/OmemoDevice tuple is decided by the user.
* The fingerprint must be the lowercase, hexadecimal fingerprint of the identityKey of the device and must
* be of length 64.
*
* @param device device
* @param fingerprint fingerprint
* @return <code>true</code> if the trust is decided for the identity.
*/
public boolean isDecidedOmemoIdentity(OmemoDevice device, OmemoFingerprint fingerprint) {
if (trustCallback == null) {
throw new IllegalStateException("No TrustCallback set.");
}
return trustCallback.getTrust(device, fingerprint) != TrustState.undecided;
}
/**
* Send a ratchet update message. This can be used to advance the ratchet of a session in order to maintain forward
* secrecy.
*
* @param recipient recipient
*
* @throws CorruptedOmemoKeyException When the used identityKeys are corrupted
* @throws CryptoFailedException When something fails with the crypto
* @throws CannotEstablishOmemoSessionException When we can't establish a session with the recipient
* @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated.
* @throws InterruptedException if the calling thread was interrupted.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws NoSuchAlgorithmException if no such algorithm is available.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws IOException if an I/O error occurred.
*/
public synchronized void sendRatchetUpdateMessage(OmemoDevice recipient)
throws SmackException.NotLoggedInException, CorruptedOmemoKeyException, InterruptedException,
SmackException.NoResponseException, NoSuchAlgorithmException, SmackException.NotConnectedException,
CryptoFailedException, CannotEstablishOmemoSessionException, IOException {
XMPPConnection connection = connection();
MessageBuilder message = connection.getStanzaFactory()
.buildMessageStanza()
.to(recipient.getJid());
OmemoElement element = getOmemoService().createRatchetUpdateElement(new LoggedInOmemoManager(this), recipient);
message.addExtension(element);
// Set MAM Storage hint
StoreHint.set(message);
connection.sendStanza(message.build());
}
/**
* Returns true, if the contact has any active devices published in a deviceList.
*
* @param contact contact
* @return true if contact has at least one OMEMO capable device.
*
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws InterruptedException if the calling thread was interrupted.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws PubSubException.NotALeafNodeException if a PubSub leaf node operation was attempted on a non-leaf node.
* @throws XMPPException.XMPPErrorException if there was an XMPP error returned.
* @throws IOException if an I/O error occurred.
*/
public synchronized boolean contactSupportsOmemo(BareJid contact)
throws InterruptedException, PubSubException.NotALeafNodeException, XMPPException.XMPPErrorException,
SmackException.NotConnectedException, SmackException.NoResponseException, IOException {
OmemoCachedDeviceList deviceList = getOmemoService().refreshDeviceList(connection(), getOwnDevice(), contact);
return !deviceList.getActiveDevices().isEmpty();
}
/**
* Returns true, if the MUC with the EntityBareJid multiUserChat is non-anonymous and members only (prerequisite
* for OMEMO encryption in MUC).
*
* @param multiUserChat MUC
* @return true if chat supports OMEMO
*
* @throws XMPPException.XMPPErrorException if there was an XMPP protocol level error
* @throws SmackException.NotConnectedException if the connection is not connected
* @throws InterruptedException if the thread is interrupted
* @throws SmackException.NoResponseException if the server does not respond
*/
public boolean multiUserChatSupportsOmemo(MultiUserChat multiUserChat)
throws XMPPException.XMPPErrorException, SmackException.NotConnectedException, InterruptedException,
SmackException.NoResponseException {
EntityBareJid jid = multiUserChat.getRoom();
RoomInfo roomInfo = MultiUserChatManager.getInstanceFor(connection()).getRoomInfo(jid);
return roomInfo.isNonanonymous() && roomInfo.isMembersOnly();
}
/**
* Returns true, if the Server supports PEP.
*
* @param connection XMPPConnection
* @param server domainBareJid of the server to test
* @return true if server supports pep
*
* @throws XMPPException.XMPPErrorException if there was an XMPP error returned.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws InterruptedException if the calling thread was interrupted.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
*/
public static boolean serverSupportsOmemo(XMPPConnection connection, DomainBareJid server)
throws XMPPException.XMPPErrorException, SmackException.NotConnectedException, InterruptedException,
SmackException.NoResponseException {
return ServiceDiscoveryManager.getInstanceFor(connection)
.discoverInfo(server).containsFeature(PubSub.NAMESPACE);
}
/**
* Return the fingerprint of our identity key.
*
* @return our own OMEMO fingerprint
*
* @throws SmackException.NotLoggedInException if we don't know our bareJid yet.
* @throws CorruptedOmemoKeyException if our identityKey is corrupted.
* @throws IOException if an I/O error occurred.
*/
public synchronized OmemoFingerprint getOwnFingerprint()
throws SmackException.NotLoggedInException, CorruptedOmemoKeyException, IOException {
if (getOwnJid() == null) {
throw new SmackException.NotLoggedInException();
}
return getOmemoService().getOmemoStoreBackend().getFingerprint(getOwnDevice());
}
/**
* Get the fingerprint of a contacts device.
*
* @param device contacts OmemoDevice
* @return fingerprint of the given OMEMO device.
*
* @throws CannotEstablishOmemoSessionException if we have no session yet, and are unable to create one.
* @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated.
* @throws CorruptedOmemoKeyException if the copy of the fingerprint we have is corrupted.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws InterruptedException if the calling thread was interrupted.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws IOException if an I/O error occurred.
*/
public synchronized OmemoFingerprint getFingerprint(OmemoDevice device)
throws CannotEstablishOmemoSessionException, SmackException.NotLoggedInException,
CorruptedOmemoKeyException, SmackException.NotConnectedException, InterruptedException,
SmackException.NoResponseException, IOException {
if (getOwnJid() == null) {
throw new SmackException.NotLoggedInException();
}
if (device.equals(getOwnDevice())) {
return getOwnFingerprint();
}
return getOmemoService().getOmemoStoreBackend()
.getFingerprintAndMaybeBuildSession(new LoggedInOmemoManager(this), device);
}
/**
* Return all OmemoFingerprints of active devices of a contact.
* TODO: Make more fail-safe
*
* @param contact contact
* @return Map of all active devices of the contact and their fingerprints.
*
* @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated.
* @throws CorruptedOmemoKeyException if the OMEMO key is corrupted.
* @throws CannotEstablishOmemoSessionException if no OMEMO session could be established.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws InterruptedException if the calling thread was interrupted.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws IOException if an I/O error occurred.
*/
public synchronized HashMap<OmemoDevice, OmemoFingerprint> getActiveFingerprints(BareJid contact)
throws SmackException.NotLoggedInException, CorruptedOmemoKeyException,
CannotEstablishOmemoSessionException, SmackException.NotConnectedException, InterruptedException,
SmackException.NoResponseException, IOException {
if (getOwnJid() == null) {
throw new SmackException.NotLoggedInException();
}
HashMap<OmemoDevice, OmemoFingerprint> fingerprints = new HashMap<>();
OmemoCachedDeviceList deviceList = getOmemoService().getOmemoStoreBackend().loadCachedDeviceList(getOwnDevice(),
contact);
for (int id : deviceList.getActiveDevices()) {
OmemoDevice device = new OmemoDevice(contact, id);
OmemoFingerprint fingerprint = getFingerprint(device);
if (fingerprint != null) {
fingerprints.put(device, fingerprint);
}
}
return fingerprints;
}
/**
* Add an OmemoMessageListener. This listener will be informed about incoming OMEMO messages
* (as well as KeyTransportMessages) and OMEMO encrypted message carbons.
*
* @param listener OmemoMessageListener
*/
public void addOmemoMessageListener(OmemoMessageListener listener) {
omemoMessageListeners.add(listener);
}
/**
* Remove an OmemoMessageListener.
*
* @param listener OmemoMessageListener
*/
public void removeOmemoMessageListener(OmemoMessageListener listener) {
omemoMessageListeners.remove(listener);
}
/**
* Add an OmemoMucMessageListener. This listener will be informed about incoming OMEMO encrypted MUC messages.
*
* @param listener OmemoMessageListener.
*/
public void addOmemoMucMessageListener(OmemoMucMessageListener listener) {
omemoMucMessageListeners.add(listener);
}
/**
* Remove an OmemoMucMessageListener.
*
* @param listener OmemoMucMessageListener
*/
public void removeOmemoMucMessageListener(OmemoMucMessageListener listener) {
omemoMucMessageListeners.remove(listener);
}
/**
* Request a deviceList update from contact contact.
*
* @param contact contact we want to obtain the deviceList from.
*
* @throws InterruptedException if the calling thread was interrupted.
* @throws PubSubException.NotALeafNodeException if a PubSub leaf node operation was attempted on a non-leaf node.
* @throws XMPPException.XMPPErrorException if there was an XMPP error returned.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws IOException if an I/O error occurred.
*/
public synchronized void requestDeviceListUpdateFor(BareJid contact)
throws InterruptedException, PubSubException.NotALeafNodeException, XMPPException.XMPPErrorException,
SmackException.NotConnectedException, SmackException.NoResponseException, IOException {
getOmemoService().refreshDeviceList(connection(), getOwnDevice(), contact);
}
/**
* Publish a new device list with just our own deviceId in it.
*
* @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated.
* @throws InterruptedException if the calling thread was interrupted.
* @throws XMPPException.XMPPErrorException if there was an XMPP error returned.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws IOException if an I/O error occurred.
* @throws PubSubException.NotALeafNodeException if a PubSub leaf node operation was attempted on a non-leaf node.
*/
public void purgeDeviceList()
throws SmackException.NotLoggedInException, InterruptedException, XMPPException.XMPPErrorException,
SmackException.NotConnectedException, SmackException.NoResponseException, IOException, PubSubException.NotALeafNodeException {
getOmemoService().purgeDeviceList(new LoggedInOmemoManager(this));
}
public List<Exception> purgeEverything() throws NotConnectedException, InterruptedException, IOException {
List<Exception> exceptions = new ArrayList<>(5);
PubSubManager pm = PubSubManager.getInstanceFor(getConnection(), getOwnJid());
try {
requestDeviceListUpdateFor(getOwnJid());
} catch (SmackException.NoResponseException | PubSubException.NotALeafNodeException
| XMPPException.XMPPErrorException e) {
exceptions.add(e);
}
OmemoCachedDeviceList deviceList = OmemoService.getInstance().getOmemoStoreBackend()
.loadCachedDeviceList(getOwnDevice(), getOwnJid());
for (int id : deviceList.getAllDevices()) {
try {
pm.getLeafNode(OmemoConstants.PEP_NODE_BUNDLE_FROM_DEVICE_ID(id)).deleteAllItems();
} catch (SmackException.NoResponseException | PubSubException.NotALeafNodeException
| XMPPException.XMPPErrorException | PubSubException.NotAPubSubNodeException e) {
exceptions.add(e);
}
try {
pm.deleteNode(OmemoConstants.PEP_NODE_BUNDLE_FROM_DEVICE_ID(id));
} catch (SmackException.NoResponseException | XMPPException.XMPPErrorException e) {
exceptions.add(e);
}
}
try {
pm.getLeafNode(OmemoConstants.PEP_NODE_DEVICE_LIST).deleteAllItems();
} catch (SmackException.NoResponseException | PubSubException.NotALeafNodeException
| XMPPException.XMPPErrorException | PubSubException.NotAPubSubNodeException e) {
exceptions.add(e);
}
try {
pm.deleteNode(OmemoConstants.PEP_NODE_DEVICE_LIST);
} catch (SmackException.NoResponseException | XMPPException.XMPPErrorException e) {
exceptions.add(e);
}
return exceptions;
}
/**
* Rotate the signedPreKey published in our OmemoBundle and republish it. This should be done every now and
* then (7-14 days). The old signedPreKey should be kept for some more time (a month or so) to enable decryption
* of messages that have been sent since the key was changed.
*
* @throws CorruptedOmemoKeyException When the IdentityKeyPair is damaged.
* @throws InterruptedException XMPP error
* @throws XMPPException.XMPPErrorException XMPP error
* @throws SmackException.NotConnectedException XMPP error
* @throws SmackException.NoResponseException XMPP error
* @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated.
* @throws IOException if an I/O error occurred.
* @throws PubSubException.NotALeafNodeException if a PubSub leaf node operation was attempted on a non-leaf node.
*/
public synchronized void rotateSignedPreKey()
throws CorruptedOmemoKeyException, SmackException.NotLoggedInException, XMPPException.XMPPErrorException,
SmackException.NotConnectedException, InterruptedException, SmackException.NoResponseException,
IOException, PubSubException.NotALeafNodeException {
if (!connection().isAuthenticated()) {
throw new SmackException.NotLoggedInException();
}
// generate key
getOmemoService().getOmemoStoreBackend().changeSignedPreKey(getOwnDevice());
// publish
OmemoBundleElement bundle = getOmemoService().getOmemoStoreBackend().packOmemoBundle(getOwnDevice());
OmemoService.publishBundle(connection(), getOwnDevice(), bundle);
}
/**
* Return true, if the given Stanza contains an OMEMO element 'encrypted'.
*
* @param stanza stanza
* @return true if stanza has extension 'encrypted'
*/
static boolean stanzaContainsOmemoElement(Stanza stanza) {
return stanza.hasExtension(OmemoElement.NAME_ENCRYPTED, OMEMO_NAMESPACE_V_AXOLOTL);
}
/**
* Throw an IllegalStateException if no OmemoService is set.
*/
private void throwIfNoServiceSet() {
if (service == null) {
throw new IllegalStateException("No OmemoService set in OmemoManager.");
}
}
/**
* Returns a pseudo random number from the interval [1, Integer.MAX_VALUE].
*
* @return a random deviceId.
*/
public static int randomDeviceId() {
return new Random().nextInt(Integer.MAX_VALUE - 1) + 1;
}
/**
* Return the BareJid of the user.
*
* @return our own bare JID.
*/
public BareJid getOwnJid() {
if (ownJid == null && connection().isAuthenticated()) {
ownJid = connection().getUser().asBareJid();
}
return ownJid;
}
/**
* Return the deviceId of this OmemoManager.
*
* @return this OmemoManagers deviceId.
*/
public synchronized Integer getDeviceId() {
return deviceId;
}
/**
* Return the OmemoDevice of the user.
*
* @return our own OmemoDevice
*/
public synchronized OmemoDevice getOwnDevice() {
BareJid jid = getOwnJid();
if (jid == null) {
return null;
}
return new OmemoDevice(jid, getDeviceId());
}
/**
* Set the deviceId of the manager to nDeviceId.
*
* @param nDeviceId new deviceId
*/
synchronized void setDeviceId(int nDeviceId) {
// Move this instance inside the HashMaps
INSTANCES.get(connection()).remove(getDeviceId());
INSTANCES.get(connection()).put(nDeviceId, this);
this.deviceId = nDeviceId;
}
/**
* Notify all registered OmemoMessageListeners about a received OmemoMessage.
*
* @param stanza original stanza
* @param decryptedMessage decrypted OmemoMessage.
*/
void notifyOmemoMessageReceived(Stanza stanza, OmemoMessage.Received decryptedMessage) {
for (OmemoMessageListener l : omemoMessageListeners) {
l.onOmemoMessageReceived(stanza, decryptedMessage);
}
}
/**
* Notify all registered OmemoMucMessageListeners of an incoming OmemoMessageElement in a MUC.
*
* @param muc MultiUserChat the message was received in.
* @param stanza Original Stanza.
* @param decryptedMessage Decrypted OmemoMessage.
*/
void notifyOmemoMucMessageReceived(MultiUserChat muc,
Stanza stanza,
OmemoMessage.Received decryptedMessage) {
for (OmemoMucMessageListener l : omemoMucMessageListeners) {
l.onOmemoMucMessageReceived(muc, stanza, decryptedMessage);
}
}
/**
* Notify all registered OmemoMessageListeners of an incoming OMEMO encrypted Carbon Copy.
* Remember: If you want to receive OMEMO encrypted carbon copies, you have to enable carbons using
* {@link CarbonManager#enableCarbons()}.
*
* @param direction direction of the carbon copy
* @param carbonCopy carbon copy itself
* @param wrappingMessage wrapping message
* @param decryptedCarbonCopy decrypted carbon copy OMEMO element
*/
void notifyOmemoCarbonCopyReceived(CarbonExtension.Direction direction,
Message carbonCopy,
Message wrappingMessage,
OmemoMessage.Received decryptedCarbonCopy) {
for (OmemoMessageListener l : omemoMessageListeners) {
l.onOmemoCarbonCopyReceived(direction, carbonCopy, wrappingMessage, decryptedCarbonCopy);
}
}
/**
* Register stanza listeners needed for OMEMO.
* This method is called automatically in the constructor and should only be used to restore the previous state
* after {@link #stopStanzaAndPEPListeners()} was called.
*/
public void resumeStanzaAndPEPListeners() {
CarbonManager carbonManager = CarbonManager.getInstanceFor(connection());
// Remove listeners to avoid them getting added twice
connection().removeAsyncStanzaListener(this::internalOmemoMessageStanzaListener);
carbonManager.removeCarbonCopyReceivedListener(this::internalOmemoCarbonCopyListener);
// Add listeners
pepManager.addPepEventListener(OmemoConstants.PEP_NODE_DEVICE_LIST, OmemoDeviceListElement.class, pepOmemoDeviceListEventListener);
connection().addAsyncStanzaListener(this::internalOmemoMessageStanzaListener, OmemoManager::isOmemoMessage);
carbonManager.addCarbonCopyReceivedListener(this::internalOmemoCarbonCopyListener);
}
/**
* Remove active stanza listeners needed for OMEMO.
*/
public void stopStanzaAndPEPListeners() {
pepManager.removePepEventListener(pepOmemoDeviceListEventListener);
connection().removeAsyncStanzaListener(this::internalOmemoMessageStanzaListener);
CarbonManager.getInstanceFor(connection()).removeCarbonCopyReceivedListener(this::internalOmemoCarbonCopyListener);
}
/**
* Build a fresh session with a contacts device.
* This might come in handy if a session is broken.
*
* @param contactsDevice OmemoDevice of a contact.
*
* @throws InterruptedException if the calling thread was interrupted.
* @throws SmackException.NoResponseException if there was no response from the remote entity.
* @throws CorruptedOmemoKeyException if our or their identityKey is corrupted.
* @throws SmackException.NotConnectedException if the XMPP connection is not connected.
* @throws CannotEstablishOmemoSessionException if no new session can be established.
* @throws SmackException.NotLoggedInException if the connection is not authenticated.
*/
public void rebuildSessionWith(OmemoDevice contactsDevice)
throws InterruptedException, SmackException.NoResponseException, CorruptedOmemoKeyException,
SmackException.NotConnectedException, CannotEstablishOmemoSessionException,
SmackException.NotLoggedInException {
if (!connection().isAuthenticated()) {
throw new SmackException.NotLoggedInException();
}
getOmemoService().buildFreshSessionWithDevice(connection(), getOwnDevice(), contactsDevice);
}
/**
* Get our connection.
*
* @return the connection of this manager
*/
XMPPConnection getConnection() {
return connection();
}
/**
* Return the OMEMO service object.
*
* @return the OmemoService object related to this OmemoManager.
*/
OmemoService<?, ?, ?, ?, ?, ?, ?, ?, ?> getOmemoService() {
throwIfNoServiceSet();
return service;
}
/**
* StanzaListener that listens for incoming Stanzas which contain OMEMO elements.
*/
private void internalOmemoMessageStanzaListener(final Stanza packet) {
Async.go(new Runnable() {
@Override
public void run() {
try {
getOmemoService().onOmemoMessageStanzaReceived(packet,
new LoggedInOmemoManager(OmemoManager.this));
} catch (SmackException.NotLoggedInException | IOException e) {
LOGGER.log(Level.SEVERE, "Exception while processing OMEMO stanza", e);
}
}
});
}
/**
* CarbonCopyListener that listens for incoming carbon copies which contain OMEMO elements.
*/
private void internalOmemoCarbonCopyListener(final CarbonExtension.Direction direction,
final Message carbonCopy,
final Message wrappingMessage) {
Async.go(new Runnable() {
@Override
public void run() {
if (isOmemoMessage(carbonCopy)) {
try {
getOmemoService().onOmemoCarbonCopyReceived(direction, carbonCopy, wrappingMessage,
new LoggedInOmemoManager(OmemoManager.this));
} catch (SmackException.NotLoggedInException | IOException e) {
LOGGER.log(Level.SEVERE, "Exception while processing OMEMO stanza", e);
}
}
}
});
}
@SuppressWarnings("UnnecessaryLambda")
private final PepEventListener<OmemoDeviceListElement> pepOmemoDeviceListEventListener =
(from, receivedDeviceList, id, message) -> {
// Device List <list>
OmemoCachedDeviceList deviceList;
try {
getOmemoService().getOmemoStoreBackend().mergeCachedDeviceList(getOwnDevice(), from,
receivedDeviceList);
if (!from.asBareJid().equals(getOwnJid())) {
return;
}
deviceList = getOmemoService().cleanUpDeviceList(getOwnDevice());
} catch (IOException e) {
LOGGER.log(Level.SEVERE,
"IOException while processing OMEMO PEP device updates. Message: " + message,
e);
return;
}
final OmemoDeviceListElement_VAxolotl newDeviceList = new OmemoDeviceListElement_VAxolotl(deviceList);
if (!newDeviceList.copyDeviceIds().equals(receivedDeviceList.copyDeviceIds())) {
LOGGER.log(Level.FINE, "Republish deviceList due to changes:" +
" Received: " + Arrays.toString(receivedDeviceList.copyDeviceIds().toArray()) +
" Published: " + Arrays.toString(newDeviceList.copyDeviceIds().toArray()));
Async.go(new Runnable() {
@Override
public void run() {
try {
OmemoService.publishDeviceList(connection(), newDeviceList);
} catch (InterruptedException | XMPPException.XMPPErrorException |
SmackException.NotConnectedException | SmackException.NoResponseException | PubSubException.NotALeafNodeException e) {
LOGGER.log(Level.WARNING, "Could not publish our deviceList upon an received update.", e);
}
}
});
}
};
/**
* StanzaFilter that filters messages containing a OMEMO element.
*/
private static boolean isOmemoMessage(Stanza stanza) {
return stanza instanceof Message && OmemoManager.stanzaContainsOmemoElement(stanza);
}
/**
* Guard class which ensures that the wrapped OmemoManager knows its BareJid.
*/
public static class LoggedInOmemoManager {
private final OmemoManager manager;
public LoggedInOmemoManager(OmemoManager manager)
throws SmackException.NotLoggedInException {
if (manager == null) {
throw new IllegalArgumentException("OmemoManager cannot be null.");
}
if (manager.getOwnJid() == null) {
if (manager.getConnection().isAuthenticated()) {
manager.ownJid = manager.getConnection().getUser().asBareJid();
} else {
throw new SmackException.NotLoggedInException();
}
}
this.manager = manager;
}
public OmemoManager get() {
return manager;
}
}
/**
* Callback which can be used to get notified, when the OmemoManager finished initializing.
*/
public interface InitializationFinishedCallback {
void initializationFinished(OmemoManager manager);
void initializationFailed(Exception cause);
}
/**
* Get the bareJid of the user from the authenticated XMPP connection.
* If our deviceId is unknown, use the bareJid to look up deviceIds available in the omemoStore.
* If there are ids available, choose the smallest one. Otherwise generate a random deviceId.
*
* @param manager OmemoManager
*/
private static void initBareJidAndDeviceId(OmemoManager manager) {
if (!manager.getConnection().isAuthenticated()) {
throw new IllegalStateException("Connection MUST be authenticated.");
}
if (manager.ownJid == null) {
manager.ownJid = manager.getConnection().getUser().asBareJid();
}
if (UNKNOWN_DEVICE_ID.equals(manager.deviceId)) {
SortedSet<Integer> storedDeviceIds = manager.getOmemoService().getOmemoStoreBackend().localDeviceIdsOf(manager.ownJid);
if (storedDeviceIds.size() > 0) {
manager.setDeviceId(storedDeviceIds.first());
} else {
manager.setDeviceId(randomDeviceId());
}
}
}
}
| igniterealtime/Smack | smack-omemo/src/main/java/org/jivesoftware/smackx/omemo/OmemoManager.java | Java | apache-2.0 | 49,782 |
/*
* Copyright 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.android.support.vectordrawable.app;
import android.os.Bundle;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.SeekBar;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.appcompat.app.AppCompatActivity;
import androidx.vectordrawable.graphics.drawable.SeekableAnimatedVectorDrawable;
import com.example.android.support.vectordrawable.R;
/**
* Demonstrates usage of {@link SeekableAnimatedVectorDrawable}.
*/
public class SeekableDemo extends AppCompatActivity {
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.seekable_demo);
final ImageView image = findViewById(R.id.image);
final Button start = findViewById(R.id.start);
final Button stop = findViewById(R.id.stop);
final SeekBar seekBar = findViewById(R.id.seek);
final SeekableAnimatedVectorDrawable avd =
SeekableAnimatedVectorDrawable.create(this, R.drawable.ic_hourglass_animation);
if (avd == null) {
finish();
return;
}
avd.registerAnimationCallback(new SeekableAnimatedVectorDrawable.AnimationCallback() {
@Override
public void onAnimationStart(@NonNull SeekableAnimatedVectorDrawable drawable) {
onAnimationRunning();
}
@Override
public void onAnimationEnd(@NonNull SeekableAnimatedVectorDrawable drawable) {
start.setEnabled(true);
start.setText(R.string.start);
stop.setEnabled(false);
seekBar.setProgress(0);
}
@Override
public void onAnimationPause(@NonNull SeekableAnimatedVectorDrawable drawable) {
start.setEnabled(true);
start.setText(R.string.resume);
stop.setEnabled(true);
}
@Override
public void onAnimationResume(@NonNull SeekableAnimatedVectorDrawable drawable) {
onAnimationRunning();
}
private void onAnimationRunning() {
start.setEnabled(true);
start.setText(R.string.pause);
stop.setEnabled(true);
}
@Override
public void onAnimationUpdate(@NonNull SeekableAnimatedVectorDrawable drawable) {
seekBar.setProgress((int) drawable.getCurrentPlayTime());
}
});
image.setImageDrawable(avd);
seekBar.setMax((int) avd.getTotalDuration());
start.setOnClickListener((v) -> {
if (!avd.isRunning()) {
avd.start();
} else if (!avd.isPaused()) {
avd.pause();
} else {
avd.resume();
}
});
stop.setOnClickListener((v) -> avd.stop());
seekBar.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() {
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
if (fromUser) {
avd.setCurrentPlayTime(progress);
}
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
}
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
}
});
}
}
| AndroidX/androidx | vectordrawable/integration-tests/testapp/src/main/java/com/example/android/support/vectordrawable/app/SeekableDemo.java | Java | apache-2.0 | 4,134 |
// (C) Copyright 2015 Moodle Pty Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import { NgModule } from '@angular/core';
import { IonicPageModule } from 'ionic-angular';
import { TranslateModule } from '@ngx-translate/core';
import { CoreComponentsModule } from '@components/components.module';
import { CoreDirectivesModule } from '@directives/directives.module';
import { CorePipesModule } from '@pipes/pipes.module';
import { AddonModChatComponentsModule } from '../../components/components.module';
import { AddonModChatSessionsPage } from './sessions';
@NgModule({
declarations: [
AddonModChatSessionsPage,
],
imports: [
CoreComponentsModule,
CoreDirectivesModule,
CorePipesModule,
AddonModChatComponentsModule,
IonicPageModule.forChild(AddonModChatSessionsPage),
TranslateModule.forChild()
],
})
export class AddonModChatSessionsPageModule {}
| FMCorz/moodlemobile2 | src/addon/mod/chat/pages/sessions/sessions.module.ts | TypeScript | apache-2.0 | 1,440 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.harmony.sql.tests.javax.sql.rowset.serial;
import javax.sql.rowset.serial.SerialException;
import junit.framework.TestCase;
import org.apache.harmony.testframework.serialization.SerializationTest;
public class SerialExceptionTest extends TestCase {
/**
* @tests serialization/deserialization compatibility.
*/
public void testSerializationSelf() throws Exception {
SerializationTest.verifySelf(new SerialException());
}
/**
* @tests serialization/deserialization compatibility with RI.
*/
public void testSerializationCompatibility() throws Exception {
SerializationTest.verifyGolden(this, new SerialException());
}
}
| freeVM/freeVM | enhanced/java/classlib/modules/sql/src/test/java/org/apache/harmony/sql/tests/javax/sql/rowset/serial/SerialExceptionTest.java | Java | apache-2.0 | 1,516 |
/*
* Copyright 2020 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.domain.materials.scm;
import com.thoughtworks.go.config.materials.PluggableSCMMaterial;
import com.thoughtworks.go.domain.MaterialRevision;
import com.thoughtworks.go.domain.config.Configuration;
import com.thoughtworks.go.domain.config.ConfigurationProperty;
import com.thoughtworks.go.domain.materials.MaterialAgent;
import com.thoughtworks.go.domain.materials.Modification;
import com.thoughtworks.go.domain.scm.SCM;
import com.thoughtworks.go.plugin.access.scm.SCMExtension;
import com.thoughtworks.go.plugin.access.scm.SCMProperty;
import com.thoughtworks.go.plugin.access.scm.SCMPropertyConfiguration;
import com.thoughtworks.go.plugin.access.scm.revision.SCMRevision;
import com.thoughtworks.go.plugin.api.response.Result;
import com.thoughtworks.go.util.command.ConsoleOutputStreamConsumer;
import org.apache.commons.lang3.StringUtils;
import java.io.File;
import static com.thoughtworks.go.util.command.TaggedStreamConsumer.PREP_ERR;
public class PluggableSCMMaterialAgent implements MaterialAgent {
private SCMExtension scmExtension;
private MaterialRevision revision;
private File workingDirectory;
private final ConsoleOutputStreamConsumer consumer;
public PluggableSCMMaterialAgent(SCMExtension scmExtension,
MaterialRevision revision,
File workingDirectory,
ConsoleOutputStreamConsumer consumer) {
this.scmExtension = scmExtension;
this.revision = revision;
this.workingDirectory = workingDirectory;
this.consumer = consumer;
}
@Override
public void prepare() {
try {
PluggableSCMMaterial material = (PluggableSCMMaterial) revision.getMaterial();
Modification latestModification = revision.getLatestModification();
SCMRevision scmRevision = new SCMRevision(latestModification.getRevision(), latestModification.getModifiedTime(), null, null, latestModification.getAdditionalDataMap(), null);
File destinationFolder = material.workingDirectory(workingDirectory);
Result result = scmExtension.checkout(material.getScmConfig().getPluginConfiguration().getId(), buildSCMPropertyConfigurations(material.getScmConfig()), destinationFolder.getAbsolutePath(), scmRevision);
handleCheckoutResult(material, result);
} catch (Exception e) {
consumer.taggedErrOutput(PREP_ERR, String.format("Material %s checkout failed: %s", revision.getMaterial().getDisplayName(), e.getMessage()));
throw e;
}
}
private void handleCheckoutResult(PluggableSCMMaterial material, Result result) {
if (result.isSuccessful()) {
if (StringUtils.isNotBlank(result.getMessagesForDisplay())) {
consumer.stdOutput(result.getMessagesForDisplay());
}
} else {
consumer.taggedErrOutput(PREP_ERR, String.format("Material %s checkout failed: %s", material.getDisplayName(), result.getMessagesForDisplay()));
throw new RuntimeException(String.format("Material %s checkout failed: %s", material.getDisplayName(), result.getMessagesForDisplay()));
}
}
private SCMPropertyConfiguration buildSCMPropertyConfigurations(SCM scmConfig) {
SCMPropertyConfiguration scmPropertyConfiguration = new SCMPropertyConfiguration();
populateConfiguration(scmConfig.getConfiguration(), scmPropertyConfiguration);
return scmPropertyConfiguration;
}
private void populateConfiguration(Configuration configuration,
com.thoughtworks.go.plugin.api.config.Configuration pluginConfiguration) {
for (ConfigurationProperty configurationProperty : configuration) {
pluginConfiguration.add(new SCMProperty(configurationProperty.getConfigurationKey().getName(), configurationProperty.getValue()));
}
}
}
| arvindsv/gocd | common/src/main/java/com/thoughtworks/go/domain/materials/scm/PluggableSCMMaterialAgent.java | Java | apache-2.0 | 4,585 |
/**
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.oozie.service;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.oozie.CoordinatorActionBean;
import org.apache.oozie.ErrorCode;
import org.apache.oozie.WorkflowActionBean;
import org.apache.oozie.command.CommandException;
import org.apache.oozie.command.coord.CoordActionCheckCommand;
import org.apache.oozie.command.coord.CoordActionCheckXCommand;
import org.apache.oozie.command.wf.ActionCheckCommand;
import org.apache.oozie.command.wf.ActionCheckXCommand;
import org.apache.oozie.executor.jpa.CoordActionsRunningGetJPAExecutor;
import org.apache.oozie.executor.jpa.JPAExecutorException;
import org.apache.oozie.executor.jpa.WorkflowActionsRunningGetJPAExecutor;
import org.apache.oozie.util.XCallable;
import org.apache.oozie.util.XLog;
/**
* The Action Checker Service queue ActionCheckCommands to check the status of
* running actions and CoordActionCheckCommands to check the status of
* coordinator actions. The delay between checks on the same action can be
* configured.
*/
public class ActionCheckerService implements Service {
public static final String CONF_PREFIX = Service.CONF_PREFIX + "ActionCheckerService.";
/**
* The frequency at which the ActionCheckService will run.
*/
public static final String CONF_ACTION_CHECK_INTERVAL = CONF_PREFIX + "action.check.interval";
/**
* The time, in seconds, between an ActionCheck for the same action.
*/
public static final String CONF_ACTION_CHECK_DELAY = CONF_PREFIX + "action.check.delay";
/**
* The number of callables to be queued in a batch.
*/
public static final String CONF_CALLABLE_BATCH_SIZE = CONF_PREFIX + "callable.batch.size";
protected static final String INSTRUMENTATION_GROUP = "actionchecker";
protected static final String INSTR_CHECK_ACTIONS_COUNTER = "checks_wf_actions";
protected static final String INSTR_CHECK_COORD_ACTIONS_COUNTER = "checks_coord_actions";
private static boolean useXCommand = true;
/**
* {@link ActionCheckRunnable} is the runnable which is scheduled to run and
* queue Action checks.
*/
static class ActionCheckRunnable implements Runnable {
private int actionCheckDelay;
private List<XCallable<Void>> callables;
private StringBuilder msg = null;
public ActionCheckRunnable(int actionCheckDelay) {
this.actionCheckDelay = actionCheckDelay;
}
public void run() {
XLog.Info.get().clear();
XLog LOG = XLog.getLog(getClass());
msg = new StringBuilder();
try {
runWFActionCheck();
runCoordActionCheck();
}
catch (CommandException ce) {
LOG.error("Unable to run action checks, ", ce);
}
LOG.debug("QUEUING [{0}] for potential checking", msg.toString());
if (null != callables) {
boolean ret = Services.get().get(CallableQueueService.class).queueSerial(callables);
if (ret == false) {
LOG.warn("Unable to queue the callables commands for CheckerService. "
+ "Most possibly command queue is full. Queue size is :"
+ Services.get().get(CallableQueueService.class).queueSize());
}
callables = null;
}
}
/**
* check workflow actions
*
* @throws CommandException
*/
private void runWFActionCheck() throws CommandException {
JPAService jpaService = Services.get().get(JPAService.class);
if (jpaService == null) {
throw new CommandException(ErrorCode.E0610);
}
List<WorkflowActionBean> actions;
try {
actions = jpaService
.execute(new WorkflowActionsRunningGetJPAExecutor(actionCheckDelay));
}
catch (JPAExecutorException je) {
throw new CommandException(je);
}
if (actions == null || actions.size() == 0) {
return;
}
msg.append(" WF_ACTIONS : " + actions.size());
for (WorkflowActionBean action : actions) {
Services.get().get(InstrumentationService.class).get().incr(INSTRUMENTATION_GROUP,
INSTR_CHECK_ACTIONS_COUNTER, 1);
if (useXCommand) {
queueCallable(new ActionCheckXCommand(action.getId()));
}
else {
queueCallable(new ActionCheckCommand(action.getId()));
}
}
}
/**
* check coordinator actions
*
* @throws CommandException
*/
private void runCoordActionCheck() throws CommandException {
JPAService jpaService = Services.get().get(JPAService.class);
if (jpaService == null) {
throw new CommandException(ErrorCode.E0610);
}
List<CoordinatorActionBean> cactions;
try {
cactions = jpaService.execute(new CoordActionsRunningGetJPAExecutor(
actionCheckDelay));
}
catch (JPAExecutorException je) {
throw new CommandException(je);
}
if (cactions == null || cactions.size() == 0) {
return;
}
msg.append(" COORD_ACTIONS : " + cactions.size());
for (CoordinatorActionBean caction : cactions) {
Services.get().get(InstrumentationService.class).get().incr(INSTRUMENTATION_GROUP,
INSTR_CHECK_COORD_ACTIONS_COUNTER, 1);
if (useXCommand) {
queueCallable(new CoordActionCheckXCommand(caction.getId(), actionCheckDelay));
}
else {
queueCallable(new CoordActionCheckCommand(caction.getId(), actionCheckDelay));
}
}
}
/**
* Adds callables to a list. If the number of callables in the list
* reaches {@link ActionCheckerService#CONF_CALLABLE_BATCH_SIZE}, the
* entire batch is queued and the callables list is reset.
*
* @param callable the callable to queue.
*/
private void queueCallable(XCallable<Void> callable) {
if (callables == null) {
callables = new ArrayList<XCallable<Void>>();
}
callables.add(callable);
if (callables.size() == Services.get().getConf().getInt(CONF_CALLABLE_BATCH_SIZE, 10)) {
boolean ret = Services.get().get(CallableQueueService.class).queueSerial(callables);
if (ret == false) {
XLog.getLog(getClass()).warn(
"Unable to queue the callables commands for CheckerService. "
+ "Most possibly command queue is full. Queue size is :"
+ Services.get().get(CallableQueueService.class).queueSize());
}
callables = new ArrayList<XCallable<Void>>();
}
}
}
/**
* Initializes the Action Check service.
*
* @param services services instance.
*/
@Override
public void init(Services services) {
Configuration conf = services.getConf();
Runnable actionCheckRunnable = new ActionCheckRunnable(conf.getInt(CONF_ACTION_CHECK_DELAY, 600));
services.get(SchedulerService.class).schedule(actionCheckRunnable, 10,
conf.getInt(CONF_ACTION_CHECK_INTERVAL, 60), SchedulerService.Unit.SEC);
if (Services.get().getConf().getBoolean(USE_XCOMMAND, true) == false) {
useXCommand = false;
}
}
/**
* Destroy the Action Checker Services.
*/
@Override
public void destroy() {
}
/**
* Return the public interface for the action checker service.
*
* @return {@link ActionCheckerService}.
*/
@Override
public Class<? extends Service> getInterface() {
return ActionCheckerService.class;
}
}
| sunmeng007/oozie | core/src/main/java/org/apache/oozie/service/ActionCheckerService.java | Java | apache-2.0 | 9,013 |
package com.google.api.ads.dfp.jaxws.v201508;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
/**
*
* The content partner related validation errors.
*
*
* <p>Java class for ContentPartnerError complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="ContentPartnerError">
* <complexContent>
* <extension base="{https://www.google.com/apis/ads/publisher/v201508}ApiError">
* <sequence>
* <element name="reason" type="{https://www.google.com/apis/ads/publisher/v201508}ContentPartnerError.Reason" minOccurs="0"/>
* </sequence>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "ContentPartnerError", propOrder = {
"reason"
})
public class ContentPartnerError
extends ApiError
{
@XmlSchemaType(name = "string")
protected ContentPartnerErrorReason reason;
/**
* Gets the value of the reason property.
*
* @return
* possible object is
* {@link ContentPartnerErrorReason }
*
*/
public ContentPartnerErrorReason getReason() {
return reason;
}
/**
* Sets the value of the reason property.
*
* @param value
* allowed object is
* {@link ContentPartnerErrorReason }
*
*/
public void setReason(ContentPartnerErrorReason value) {
this.reason = value;
}
}
| shyTNT/googleads-java-lib | modules/dfp_appengine/src/main/java/com/google/api/ads/dfp/jaxws/v201508/ContentPartnerError.java | Java | apache-2.0 | 1,711 |
# Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from getting_started import main
def test_main(cloud_config, capsys):
main(cloud_config.project)
out, _ = capsys.readouterr()
assert re.search(re.compile(
r'Query Results:.hamlet', re.DOTALL), out)
| clarko1/Cramd | bigquery/api/getting_started_test.py | Python | apache-2.0 | 808 |
<?php namespace Neomerx\JsonApi\Parameters\Headers;
/**
* Copyright 2015 info@neomerx.com (www.neomerx.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use \InvalidArgumentException;
use \Neomerx\JsonApi\Contracts\Parameters\Headers\MediaTypeInterface;
/**
* @package Neomerx\JsonApi
*/
class MediaType implements MediaTypeInterface
{
/**
* @var string
*/
private $type;
/**
* @var string
*/
private $subType;
/**
* @var string
*/
private $mediaType;
/**
* @var array<string,string>|null
*/
private $parameters;
/**
* @param string $type
* @param string $subType
* @param array<string,string>|null $parameters
*/
public function __construct($type, $subType, $parameters = null)
{
$type = trim($type);
if (empty($type) === true) {
throw new InvalidArgumentException('type');
}
$subType = trim($subType);
if (empty($subType) === true) {
throw new InvalidArgumentException('subType');
}
if ($parameters !== null && is_array($parameters) === false) {
throw new InvalidArgumentException('parameters');
}
$this->type = $type;
$this->subType = $subType;
$this->mediaType = $type . '/' . $subType;
$this->parameters = $parameters;
}
/**
* @inheritdoc
*/
public function getType()
{
return $this->type;
}
/**
* @inheritdoc
*/
public function getSubType()
{
return $this->subType;
}
/**
* @inheritdoc
*/
public function getMediaType()
{
return $this->mediaType;
}
/**
* @inheritdoc
*/
public function getParameters()
{
return $this->parameters;
}
/**
* @inheritdoc
*/
public function matchesTo(MediaTypeInterface $mediaType)
{
return
$this->isTypeMatches($mediaType) &&
$this->isSubTypeMatches($mediaType) &&
$this->isMediaParametersEqual($mediaType);
}
/**
* @inheritdoc
*/
public function equalsTo(MediaTypeInterface $mediaType)
{
return
$this->isTypeEquals($mediaType) &&
$this->isSubTypeEquals($mediaType) &&
$this->isMediaParametersEqual($mediaType);
}
/**
* Parse media type.
*
* @param int $position
* @param string $mediaType
*
* @return MediaType
*/
public static function parse($position, $mediaType)
{
$position ?: null;
$fields = explode(';', $mediaType);
if (strpos($fields[0], '/') === false) {
throw new InvalidArgumentException('mediaType');
}
list($type, $subType) = explode('/', $fields[0], 2);
$parameters = null;
$count = count($fields);
for ($idx = 1; $idx < $count; ++$idx) {
if (strpos($fields[$idx], '=') === false) {
throw new InvalidArgumentException('mediaType');
}
list($key, $value) = explode('=', $fields[$idx], 2);
$parameters[trim($key)] = trim($value, ' "');
}
return new MediaType($type, $subType, $parameters);
}
/**
* @param MediaTypeInterface $mediaType
*
* @return bool
*/
private function isTypeMatches(MediaTypeInterface $mediaType)
{
return $this->getType() === $mediaType->getType() || $mediaType->getType() === '*';
}
/**
* @param MediaTypeInterface $mediaType
*
* @return bool
*/
private function isTypeEquals(MediaTypeInterface $mediaType)
{
return $this->getType() === $mediaType->getType();
}
/**
* @param MediaTypeInterface $mediaType
*
* @return bool
*/
private function isSubTypeMatches(MediaTypeInterface $mediaType)
{
return $this->getSubType() === $mediaType->getSubType() || $mediaType->getSubType() === '*';
}
/**
* @param MediaTypeInterface $mediaType
*
* @return bool
*/
private function isSubTypeEquals(MediaTypeInterface $mediaType)
{
return $this->getSubType() === $mediaType->getSubType();
}
/**
* @param MediaTypeInterface $mediaType
*
* @return bool
*/
private function isMediaParametersEqual(MediaTypeInterface $mediaType)
{
if ($this->getParameters() === null && $mediaType->getParameters() === null) {
return true;
} elseif ($this->getParameters() !== null && $mediaType->getParameters() !== null) {
$intersect = array_intersect($this->getParameters(), $mediaType->getParameters());
return (count($this->getParameters()) === count($intersect));
}
return false;
}
}
| creocoder/json-api | src/Parameters/Headers/MediaType.php | PHP | apache-2.0 | 5,426 |
---
layout: default
description: Sequential Access and Cursors
---
Sequential Access and Cursors
=============================
If a query returns a cursor (for example by calling `db._query(...)`), then you can use *hasNext* and *next* to
iterate over the result set or *toArray* to convert it to an array.
If the number of query results is expected to be big, it is possible to
limit the amount of documents transferred between the server and the client
to a specific value. This value is called *batchSize*. The *batchSize*
can optionally be set before or when a simple query is executed.
If the server has more documents than should be returned in a single batch,
the server will set the *hasMore* attribute in the result. It will also
return the id of the server-side cursor in the *id* attribute in the result.
This id can be used with the cursor API to fetch any outstanding results from
the server and dispose the server-side cursor afterwards.
The initial *batchSize* value can be set using the *setBatchSize*
method that is available for each type of simple query, or when the simple
query is executed using its *execute* method. If no *batchSize* value
is specified, the server will pick a reasonable default value.
Has Next
--------
<!-- js/common/modules/@arangodb/simple-query-common.js -->
checks if the cursor is exhausted
`cursor.hasNext()`
The *hasNext* operator returns *true*, then the cursor still has
documents. In this case the next document can be accessed using the
*next* operator, which will advance the cursor.
**Examples**
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline cursorHasNext
@EXAMPLE_ARANGOSH_OUTPUT{cursorHasNext}
~ db._create("five");
~ db.five.save({ name : "one" });
~ db.five.save({ name : "two" });
~ db.five.save({ name : "three" });
~ db.five.save({ name : "four" });
~ db.five.save({ name : "five" });
var a = db._query("FOR x IN five RETURN x");
while (a.hasNext()) print(a.next());
~ db._drop("five")
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock cursorHasNext
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
Next
----
<!-- js/common/modules/@arangodb/simple-query-common.js -->
returns the next result document
`cursor.next()`
If the *hasNext* operator returns *true*, then the underlying
cursor of the simple query still has documents. In this case the
next document can be accessed using the *next* operator, which
will advance the underlying cursor. If you use *next* on an
exhausted cursor, then *undefined* is returned.
**Examples**
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline cursorNext
@EXAMPLE_ARANGOSH_OUTPUT{cursorNext}
~ db._create("five");
~ db.five.save({ name : "one" });
~ db.five.save({ name : "two" });
~ db.five.save({ name : "three" });
~ db.five.save({ name : "four" });
~ db.five.save({ name : "five" });
db._query("FOR x IN five RETURN x").next();
~ db._drop("five")
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock cursorNext
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
Set Batch size
--------------
<!-- js/common/modules/@arangodb/simple-query-common.js -->
sets the batch size for any following requests
`cursor.setBatchSize(number)`
Sets the batch size for queries. The batch size determines how many results
are at most transferred from the server to the client in one chunk.
Get Batch size
--------------
<!-- js/common/modules/@arangodb/simple-query-common.js -->
returns the batch size
`cursor.getBatchSize()`
Returns the batch size for queries. If the returned value is undefined, the
server will determine a sensible batch size for any following requests.
Execute Query
-------------
<!-- js/common/modules/@arangodb/simple-query-common.js -->
executes a query
`query.execute(batchSize)`
Executes a simple query. If the optional batchSize value is specified,
the server will return at most batchSize values in one roundtrip.
The batchSize cannot be adjusted after the query is first executed.
**Note**: There is no need to explicitly call the execute method if another
means of fetching the query results is chosen. The following two approaches
lead to the same result:
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline executeQueryNoBatchSize
@EXAMPLE_ARANGOSH_OUTPUT{executeQueryNoBatchSize}
~ db._create("users");
~ db.users.save({ name: "Gerhard" });
~ db.users.save({ name: "Helmut" });
~ db.users.save({ name: "Angela" });
result = db.users.all().toArray();
| var q = db._query("FOR x IN users RETURN x");
| result = [ ];
| while (q.hasNext()) {
| result.push(q.next());
}
~ db._drop("users")
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock executeQueryNoBatchSize
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
The following two alternatives both use a batchSize and return the same
result:
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline executeQueryBatchSize
@EXAMPLE_ARANGOSH_OUTPUT{executeQueryBatchSize}
~ db._create("users");
~ db.users.save({ name: "Gerhard" });
~ db.users.save({ name: "Helmut" });
~ db.users.save({ name: "Angela" });
q = db.users.all(); q.setBatchSize(20); q.execute(); while (q.hasNext()) { print(q.next()); }
q = db.users.all(); q.execute(20); while (q.hasNext()) { print(q.next()); }
~ db._drop("users")
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock executeQueryBatchSize
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
Dispose
-------
<!-- js/common/modules/@arangodb/simple-query-common.js -->
disposes the result
`cursor.dispose()`
If you are no longer interested in any further results, you should call
*dispose* in order to free any resources associated with the cursor.
After calling *dispose* you can no longer access the cursor.
Count
-----
<!-- js/common/modules/@arangodb/simple-query-common.js -->
counts the number of documents
`cursor.count()`
The *count* operator counts the number of document in the result set and
returns that number. The *count* operator ignores any limits and returns
the total number of documents found.
**Note**: Not all simple queries support counting. In this case *null* is
returned (Simple queries are deprecated).
`cursor.count(true)`
If the result set was limited by the *limit* operator or documents were
skiped using the *skip* operator, the *count* operator with argument
*true* will use the number of elements in the final result set - after
applying *limit* and *skip*.
**Note**: Not all simple queries support counting. In this case *null* is
returned (Simple queries are deprecated)..
| arangodb/docs | 3.7/appendix-references-cursor-object.md | Markdown | apache-2.0 | 7,046 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Example code to perform int8 GEMM"
import logging
import sys
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi.cuda.tensor_intrin import dp4a
DO_TUNING = True
PRETUNED_INDEX = 75333
intrin_dp4a = dp4a("local", "local", "local")
@autotvm.template
def gemm_int8(n, m, l):
A = te.placeholder((n, l), name="A", dtype="int8")
B = te.placeholder((m, l), name="B", dtype="int8")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
cfg = autotvm.get_config()
s = te.create_schedule(C.op)
y, x = C.op.axis
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
k = CC.op.reduce_axis[0]
cfg.define_split(
"tile_k",
cfg.axis(k),
num_outputs=3,
filter=lambda entity: entity.size[2] == 4 and entity.size[0] * 2 >= entity.size[1],
)
ko, kt, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].tensorize(ki, intrin_dp4a)
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
def block_size_filter(entity):
return (
entity.size[0] * 2 >= entity.size[1] * 2
and entity.size[1] <= 16
and entity.size[3] <= 4
)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4, filter=block_size_filter)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4, filter=block_size_filter)
by, tyz, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, txz, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, te.thread_axis("vthread"))
s[C].bind(txz, te.thread_axis("vthread"))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage in [AL, BL]:
s[stage].compute_at(s[CC], kt)
_, xi = s[stage].split(stage.op.axis[1], factor=4)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob("storage_align", [16, 48])
for stage in [AA, BB]:
s[stage].storage_align(s[stage].op.axis[0], cfg["storage_align"].val, 0)
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg["tile_y"].size[2])
tx, xi = s[stage].split(tx, nparts=cfg["tile_x"].size[2])
_, xi = s[stage].split(xi, factor=16)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
cfg.define_knob("auto_unroll_max_step", [512, 1500])
s[C].pragma(by, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(by, "unroll_explicit", False)
cfg.add_flop(n * m * l * 2)
return s, [A, B, C]
if __name__ == "__main__":
N = 2048
n = m = l = N
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
task = autotvm.task.create(gemm_int8, args=(n, m, l), target="cuda")
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4),
)
log_name = "gemm_int8.log"
if DO_TUNING:
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(
n_trial=1000,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)],
)
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config:")
print(best_config)
else:
config = task.config_space.get(PRETUNED_INDEX)
dispatch_context = autotvm.task.ApplyConfig(config)
print("Using pretuned config:")
print(config)
with dispatch_context:
with tvm.target.Target("cuda"):
s, arg_bufs = gemm_int8(n, m, l)
f = tvm.build(s, arg_bufs, "cuda", name="gemm_int8")
dev = tvm.device("cuda", 0)
a_np = np.random.randint(size=(n, l), low=-128, high=127, dtype="int8")
b_np = np.random.randint(size=(m, l), low=-128, high=127, dtype="int8")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype="int32"), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5
)
num_ops = 2 * l * m * n
num_runs = 1000
timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs)
t = timer_f(a, b, c).mean
GOPS = num_ops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GOPS." % (num_runs, t * 1e3, GOPS))
| dmlc/tvm | apps/topi_recipe/gemm/gemm_int8.py | Python | apache-2.0 | 5,879 |
/**
* Copyright 2017 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.github.ambry.store;
import com.github.ambry.utils.Pair;
import java.util.HashMap;
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
/**
* Hold the data structures needed by {@link BlobStoreStats} to serve requests. The class also exposes helper methods
* used to modify and access the stored data structures.
*/
class ScanResults {
// A NavigableMap that stores buckets for container valid data size. The key of the map is the end time of each
// bucket and the value is the corresponding valid data size map. For example, there are two buckets with end time
// t1 and t2. Bucket with end time t2 includes all events whose operation time is greater than or equal to t1 but
// strictly less than t2.
// Each bucket except for the very first one contains the delta in valid data size that occurred prior to the bucket
// end time. The very first bucket's end time is the forecast start time for containers and it contains the valid data
// size map at the forecast start time. The very first bucket is used as a base value, requested valid data size is
// computed by applying the deltas from appropriate buckets on the base value.
private final NavigableMap<Long, Map<String, Map<String, Long>>> containerBuckets = new TreeMap<>();
// A NavigableMap that stores buckets for log segment valid data size. The rest of the structure is similar
// to containerBuckets.
private final NavigableMap<Long, NavigableMap<String, Long>> logSegmentBuckets = new TreeMap<>();
final long containerForecastStartTimeMs;
final long containerLastBucketTimeMs;
final long containerForecastEndTimeMs;
final long logSegmentForecastStartTimeMs;
final long logSegmentLastBucketTimeMs;
final long logSegmentForecastEndTimeMs;
Offset scannedEndOffset = null;
/**
* Create the bucket data structures in advance based on the given scanStartTime and segmentScanTimeOffset.
*/
ScanResults(long startTimeInMs, long logSegmentForecastOffsetMs, int bucketCount, long bucketSpanInMs) {
long containerBucketTimeMs = startTimeInMs;
long logSegmentBucketTimeMs = startTimeInMs - logSegmentForecastOffsetMs;
for (int i = 0; i < bucketCount; i++) {
containerBuckets.put(containerBucketTimeMs, new HashMap<>());
logSegmentBuckets.put(logSegmentBucketTimeMs, new TreeMap<>(LogSegmentNameHelper.COMPARATOR));
containerBucketTimeMs += bucketSpanInMs;
logSegmentBucketTimeMs += bucketSpanInMs;
}
containerForecastStartTimeMs = containerBuckets.firstKey();
containerLastBucketTimeMs = containerBuckets.lastKey();
containerForecastEndTimeMs = containerLastBucketTimeMs + bucketSpanInMs;
logSegmentForecastStartTimeMs = logSegmentBuckets.firstKey();
logSegmentLastBucketTimeMs = logSegmentBuckets.lastKey();
logSegmentForecastEndTimeMs = logSegmentLastBucketTimeMs + bucketSpanInMs;
}
/**
* Given a reference time, return the key of the appropriate container bucket whose end time is strictly greater than
* the reference time.
* @param referenceTimeInMs the reference time or operation time of an event.
* @return the appropriate bucket key (bucket end time) to indicate which bucket will an event with
* the given reference time as operation time belong to.
*/
Long getContainerBucketKey(long referenceTimeInMs) {
return containerBuckets.higherKey(referenceTimeInMs);
}
/**
* Given a reference time, return the key of the appropriate log segment bucket whose end time is strictly greater
* than the reference time.
* @param referenceTimeInMs the reference time or operation time of an event.
* @return the appropriate bucket key (bucket end time) to indicate which bucket will an event with
* the given reference time as operation time belong to.
*/
Long getLogSegmentBucketKey(long referenceTimeInMs) {
return logSegmentBuckets.higherKey(referenceTimeInMs);
}
/**
* Helper function to update the container base value bucket with the given value.
* @param serviceId the serviceId of the map entry to be updated
* @param containerId the containerId of the map entry to be updated
* @param value the value to be added
*/
void updateContainerBaseBucket(String serviceId, String containerId, long value) {
updateContainerBucket(containerBuckets.firstKey(), serviceId, containerId, value);
}
/**
* Helper function to update the log segment base value bucket with the given value.
* @param logSegmentName the log segment name of the map entry to be updated
* @param value the value to be added
*/
void updateLogSegmentBaseBucket(String logSegmentName, long value) {
updateLogSegmentBucket(logSegmentBuckets.firstKey(), logSegmentName, value);
}
/**
* Helper function to update a container bucket with the given value.
* @param bucketKey the bucket key to specify which bucket will be updated
* @param serviceId the serviceId of the map entry to be updated
* @param containerId the containerId of the map entry to be updated
* @param value the value to be added
*/
void updateContainerBucket(Long bucketKey, String serviceId, String containerId, long value) {
if (bucketKey != null && containerBuckets.containsKey(bucketKey)) {
Map<String, Map<String, Long>> existingBucketEntry = containerBuckets.get(bucketKey);
updateNestedMapHelper(existingBucketEntry, serviceId, containerId, value);
}
}
/**
* Helper function to update a log segment bucket with a given value.
* @param bucketKey the bucket key to specify which bucket will be updated
* @param logSegmentName the log segment name of the map entry to be updated
* @param value the value to be added
*/
void updateLogSegmentBucket(Long bucketKey, String logSegmentName, long value) {
if (bucketKey != null && logSegmentBuckets.containsKey(bucketKey)) {
Map<String, Long> existingBucketEntry = logSegmentBuckets.get(bucketKey);
updateMapHelper(existingBucketEntry, logSegmentName, value);
}
}
/**
* Given a reference time in milliseconds return the corresponding valid data size per log segment map by aggregating
* all buckets whose end time is less than or equal to the reference time.
* @param referenceTimeInMS the reference time in ms until which deletes and expiration are relevant
* @return a {@link Pair} whose first element is the end time of the last bucket that was aggregated and whose second
* element is the requested valid data size per log segment {@link NavigableMap}.
*/
Pair<Long, NavigableMap<String, Long>> getValidSizePerLogSegment(Long referenceTimeInMS) {
NavigableMap<String, Long> validSizePerLogSegment = new TreeMap<>(logSegmentBuckets.firstEntry().getValue());
NavigableMap<Long, NavigableMap<String, Long>> subMap =
logSegmentBuckets.subMap(logSegmentBuckets.firstKey(), false, referenceTimeInMS, true);
for (Map.Entry<Long, NavigableMap<String, Long>> bucket : subMap.entrySet()) {
for (Map.Entry<String, Long> bucketEntry : bucket.getValue().entrySet()) {
updateMapHelper(validSizePerLogSegment, bucketEntry.getKey(), bucketEntry.getValue());
}
}
Long lastReferenceBucketTimeInMs = subMap.isEmpty() ? logSegmentBuckets.firstKey() : subMap.lastKey();
return new Pair<>(lastReferenceBucketTimeInMs, validSizePerLogSegment);
}
/**
* Given a reference time in ms return the corresponding valid data size per container map by aggregating all buckets
* whose end time is less than or equal to the reference time.
* @param referenceTimeInMs the reference time in ms until which deletes and expiration are relevant.
* @return a {@link Pair} whose first element is the end time of the last bucket that was aggregated and whose second
* element is the requested valid data size per container {@link Map}.
*/
Map<String, Map<String, Long>> getValidSizePerContainer(Long referenceTimeInMs) {
Map<String, Map<String, Long>> validSizePerContainer = new HashMap<>();
for (Map.Entry<String, Map<String, Long>> accountEntry : containerBuckets.firstEntry().getValue().entrySet()) {
validSizePerContainer.put(accountEntry.getKey(), new HashMap<>(accountEntry.getValue()));
}
NavigableMap<Long, Map<String, Map<String, Long>>> subMap =
containerBuckets.subMap(containerBuckets.firstKey(), false, referenceTimeInMs, true);
for (Map.Entry<Long, Map<String, Map<String, Long>>> bucket : subMap.entrySet()) {
for (Map.Entry<String, Map<String, Long>> accountEntry : bucket.getValue().entrySet()) {
for (Map.Entry<String, Long> containerEntry : accountEntry.getValue().entrySet()) {
updateNestedMapHelper(validSizePerContainer, accountEntry.getKey(), containerEntry.getKey(),
containerEntry.getValue());
}
}
}
return validSizePerContainer;
}
/**
* Helper function to update nested map data structure.
* @param nestedMap nested {@link Map} to be updated
* @param firstKey of the nested map
* @param secondKey of the nested map
* @param value the value to be added at the corresponding entry
*/
private void updateNestedMapHelper(Map<String, Map<String, Long>> nestedMap, String firstKey, String secondKey,
Long value) {
if (!nestedMap.containsKey(firstKey)) {
nestedMap.put(firstKey, new HashMap<String, Long>());
}
updateMapHelper(nestedMap.get(firstKey), secondKey, value);
}
/**
* Helper function to update map data structure.
* @param map {@link Map} to be updated
* @param key of the map
* @param value the value to be added at the corresponding entry
*/
private void updateMapHelper(Map<String, Long> map, String key, Long value) {
Long newValue = map.containsKey(key) ? map.get(key) + value : value;
map.put(key, newValue);
}
}
| xiahome/ambry | ambry-store/src/main/java/com.github.ambry.store/ScanResults.java | Java | apache-2.0 | 10,385 |
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mungeopts
import (
"time"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/test-infra/mungegithub/options"
)
var (
// Server holds the values of options used by mungers that serve web services.
Server struct {
Address string
WWWRoot string
}
// GCS holds the values of GCS options.
GCS struct {
BucketName string
LogDir string
// PullLogDir is the directory of the pr builder jenkins
PullLogDir string
// PullKey is a string to look for in a job name to figure out if it's
// a pull (presubmit) job.
PullKey string
}
// RequiredContexts holds options that specify which status contexts are required for various
// actions.
RequiredContexts struct {
Merge []string
Retest []string
}
// Maximum time to wait for tests in a PR to start or finish.
// This should be >2x as long as it normally takes for a PR
// to complete, to avoid congestion collapse in the queue.
PRMaxWaitTime time.Duration
)
// RegisterOptions registers options that may be used by any munger, feature, or report. It returns
// any options that require a restart when changed.
func RegisterOptions(opts *options.Options) sets.String {
// Options for mungers that run web servers.
opts.RegisterString(&Server.Address, "address", ":8080", "The address to listen on for HTTP Status")
opts.RegisterString(&Server.WWWRoot, "www", "www", "Path to static web files to serve from the webserver")
// GCS options:
opts.RegisterString(&GCS.BucketName, "gcs-bucket", "", "Name of GCS bucket.")
opts.RegisterString(&GCS.LogDir, "gcs-logs-dir", "", "Directory containing test logs.")
opts.RegisterString(&GCS.PullLogDir, "pull-logs-dir", "", "Directory of the PR builder.")
opts.RegisterString(&GCS.PullKey, "pull-key", "", "String to look for in job name for it to be a pull (presubmit) job.")
// Status context options:
opts.RegisterStringSlice(&RequiredContexts.Retest, "required-retest-contexts", []string{}, "Comma separate list of statuses which will be retested and which must come back green after the `retest-body` comment is posted to a PR")
opts.RegisterStringSlice(&RequiredContexts.Merge, "required-contexts", []string{}, "Comma separate list of status contexts required for a PR to be considered ok to merge")
opts.RegisterDuration(&PRMaxWaitTime, "pr-max-wait-time", 2*time.Hour, "Maximum time to wait for tests in a PR to start or finish")
return sets.NewString("address", "www")
}
| nlandolfi/test-infra-1 | mungegithub/mungeopts/mungeopts.go | GO | apache-2.0 | 2,994 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.wicket.application;
import java.io.IOException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import org.apache.wicket.util.collections.UrlExternalFormComparator;
import org.apache.wicket.util.file.File;
import org.apache.wicket.util.listener.IChangeListener;
import org.apache.wicket.util.time.Duration;
import org.apache.wicket.util.watch.IModificationWatcher;
import org.apache.wicket.util.watch.ModificationWatcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Custom ClassLoader that reverses the classloader lookups, and that is able to notify a listener
* when a class file is changed.
*
* @author <a href="mailto:jbq@apache.org">Jean-Baptiste Quenot</a>
*/
public class ReloadingClassLoader extends URLClassLoader
{
private static final Logger log = LoggerFactory.getLogger(ReloadingClassLoader.class);
private static final Set<URL> urls = new TreeSet<URL>(new UrlExternalFormComparator());
private static final List<String> patterns = new ArrayList<String>();
private IChangeListener listener;
private final Duration pollFrequency = Duration.seconds(3);
private final IModificationWatcher watcher;
static
{
addClassLoaderUrls(ReloadingClassLoader.class.getClassLoader());
excludePattern("org.apache.wicket.*");
includePattern("org.apache.wicket.examples.*");
}
/**
*
* @param name
* @return true if class if found, false otherwise
*/
protected boolean tryClassHere(String name)
{
// don't include classes in the java or javax.servlet package
if (name != null && (name.startsWith("java.") || name.startsWith("javax.servlet")))
{
return false;
}
// Scan includes, then excludes
boolean tryHere;
// If no explicit includes, try here
if (patterns == null || patterns.size() == 0)
{
tryHere = true;
}
else
{
// See if it matches include patterns
tryHere = false;
for (String rawpattern : patterns)
{
if (rawpattern.length() <= 1)
{
continue;
}
// FIXME it seems that only "includes" are handled. "Excludes" are ignored
boolean isInclude = rawpattern.substring(0, 1).equals("+");
String pattern = rawpattern.substring(1);
if (WildcardMatcherHelper.match(pattern, name) != null)
{
tryHere = isInclude;
}
}
}
return tryHere;
}
/**
* Include a pattern
*
* @param pattern
* the pattern to include
*/
public static void includePattern(String pattern)
{
patterns.add("+" + pattern);
}
/**
* Exclude a pattern
*
* @param pattern
* the pattern to exclude
*/
public static void excludePattern(String pattern)
{
patterns.add("-" + pattern);
}
/**
* Returns the list of all configured inclusion or exclusion patterns
*
* @return list of patterns as String
*/
public static List<String> getPatterns()
{
return patterns;
}
/**
* Add the location of a directory containing class files
*
* @param url
* the URL for the directory
*/
public static void addLocation(URL url)
{
urls.add(url);
}
/**
* Returns the list of all configured locations of directories containing class files
*
* @return list of locations as URL
*/
public static Set<URL> getLocations()
{
return urls;
}
/**
* Add all the url locations we can find for the provided class loader
*
* @param loader
* class loader
*/
private static void addClassLoaderUrls(ClassLoader loader)
{
if (loader != null)
{
final Enumeration<URL> resources;
try
{
resources = loader.getResources("");
}
catch (IOException e)
{
throw new RuntimeException(e);
}
while (resources.hasMoreElements())
{
URL location = resources.nextElement();
ReloadingClassLoader.addLocation(location);
}
}
}
/**
* Create a new reloading ClassLoader from a list of URLs, and initialize the
* ModificationWatcher to detect class file modifications
*
* @param parent
* the parent classloader in case the class file cannot be loaded from the above
* locations
*/
public ReloadingClassLoader(ClassLoader parent)
{
super(new URL[] { }, parent);
// probably doubles from this class, but just in case
addClassLoaderUrls(parent);
for (URL url : urls)
{
addURL(url);
}
watcher = new ModificationWatcher(pollFrequency);
}
/**
* Gets a resource from this <code>ClassLoader</class>. If the
* resource does not exist in this one, we check the parent.
* Please note that this is the exact opposite of the
* <code>ClassLoader</code> spec. We use it to work around inconsistent class loaders from third
* party vendors.
*
* @param name
* of resource
*/
@Override
public final URL getResource(final String name)
{
URL resource = findResource(name);
ClassLoader parent = getParent();
if (resource == null && parent != null)
{
resource = parent.getResource(name);
}
return resource;
}
/**
* Loads the class from this <code>ClassLoader</class>. If the
* class does not exist in this one, we check the parent. Please
* note that this is the exact opposite of the
* <code>ClassLoader</code> spec. We use it to load the class from the same classloader as
* WicketFilter or WicketServlet. When found, the class file is watched for modifications.
*
* @param name
* the name of the class
* @param resolve
* if <code>true</code> then resolve the class
* @return the resulting <code>Class</code> object
* @exception ClassNotFoundException
* if the class could not be found
*/
@Override
public final Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException
{
// First check if it's already loaded
Class<?> clazz = findLoadedClass(name);
if (clazz == null)
{
final ClassLoader parent = getParent();
if (tryClassHere(name))
{
try
{
clazz = findClass(name);
watchForModifications(clazz);
}
catch (ClassNotFoundException cnfe)
{
if (parent == null)
{
// Propagate exception
throw cnfe;
}
}
}
if (clazz == null)
{
if (parent == null)
{
throw new ClassNotFoundException(name);
}
else
{
// Will throw a CFNE if not found in parent
// see http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6500212
// clazz = parent.loadClass(name);
clazz = Class.forName(name, false, parent);
}
}
}
if (resolve)
{
resolveClass(clazz);
}
return clazz;
}
/**
* Sets the listener that will be notified when a class changes
*
* @param listener
* the listener to notify upon class change
*/
public void setListener(IChangeListener listener)
{
this.listener = listener;
}
/**
* Watch changes of a class file by locating it in the list of location URLs and adding the
* corresponding file to the ModificationWatcher
*
* @param clz
* the class to watch
*/
private void watchForModifications(Class<?> clz)
{
// Watch class in the future
Iterator<URL> locationsIterator = urls.iterator();
File clzFile = null;
while (locationsIterator.hasNext())
{
// FIXME only works for directories, but JARs etc could be checked
// as well
URL location = locationsIterator.next();
String clzLocation = location.getFile() + clz.getName().replaceAll("\\.", "/") +
".class";
log.debug("clzLocation=" + clzLocation);
clzFile = new File(clzLocation);
final File finalClzFile = clzFile;
if (clzFile.exists())
{
log.info("Watching changes of class " + clzFile);
watcher.add(clzFile, new IChangeListener()
{
@Override
public void onChange()
{
log.info("Class file " + finalClzFile + " has changed, reloading");
try
{
listener.onChange();
}
catch (Exception e)
{
log.error("Could not notify listener", e);
// If an error occurs when the listener is notified,
// remove the watched object to avoid rethrowing the
// exception at next check
// FIXME check if class file has been deleted
watcher.remove(finalClzFile);
}
}
});
break;
}
else
{
log.debug("Class file does not exist: " + clzFile);
}
}
if (clzFile != null && !clzFile.exists())
{
log.debug("Could not locate class " + clz.getName());
}
}
/**
* Remove the ModificationWatcher from the current reloading class loader
*/
public void destroy()
{
watcher.destroy();
}
}
| mafulafunk/wicket | wicket-core/src/main/java/org/apache/wicket/application/ReloadingClassLoader.java | Java | apache-2.0 | 9,526 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.