text
stringlengths 7
1.01M
|
|---|
/*
* WeatherAPILib
*
* This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
*/
package com.weatherapi.api.models;
import java.util.*;
import com.fasterxml.jackson.annotation.JsonGetter;
import com.fasterxml.jackson.annotation.JsonSetter;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
@JsonInclude(Include.ALWAYS)
public class Forecastday1
extends java.util.Observable
implements java.io.Serializable {
private static final long serialVersionUID = -2843791909295019823L;
private String date;
private Integer dateEpoch;
private Day day;
private Astro astro;
private List<Hour> hour;
/** GETTER
* Forecast date
*/
@JsonGetter("date")
public String getDate ( ) {
return this.date;
}
/** SETTER
* Forecast date
*/
@JsonSetter("date")
public void setDate (String value) {
this.date = value;
notifyObservers(this.date);
}
/** GETTER
* Forecast date as unix time.
*/
@JsonGetter("date_epoch")
public Integer getDateEpoch ( ) {
return this.dateEpoch;
}
/** SETTER
* Forecast date as unix time.
*/
@JsonSetter("date_epoch")
public void setDateEpoch (Integer value) {
this.dateEpoch = value;
notifyObservers(this.dateEpoch);
}
/** GETTER
* See day element
*/
@JsonGetter("day")
public Day getDay ( ) {
return this.day;
}
/** SETTER
* See day element
*/
@JsonSetter("day")
public void setDay (Day value) {
this.day = value;
notifyObservers(this.day);
}
/** GETTER
* TODO: Write general description for this method
*/
@JsonGetter("astro")
public Astro getAstro ( ) {
return this.astro;
}
/** SETTER
* TODO: Write general description for this method
*/
@JsonSetter("astro")
public void setAstro (Astro value) {
this.astro = value;
notifyObservers(this.astro);
}
/** GETTER
* TODO: Write general description for this method
*/
@JsonGetter("hour")
public List<Hour> getHour ( ) {
return this.hour;
}
/** SETTER
* TODO: Write general description for this method
*/
@JsonSetter("hour")
public void setHour (List<Hour> value) {
this.hour = value;
notifyObservers(this.hour);
}
}
|
/*
* Java.java
*
* Copyright (C) 2002-2006 Peter Graves, Andras Simon
* $Id$
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* As a special exception, the copyright holders of this library give you
* permission to link this library with independent modules to produce an
* executable, regardless of the license terms of these independent
* modules, and to copy and distribute the resulting executable under
* terms of your choice, provided that you also meet, for each linked
* independent module, the terms and conditions of the license of that
* module. An independent module is a module which is not derived from
* or based on this library. If you modify this library, you may extend
* this exception to your version of the library, but you are not
* obligated to do so. If you do not wish to do so, delete this
* exception statement from your version.
*/
package org.armedbear.lisp;
import static org.armedbear.lisp.Lisp.error;
import java.beans.BeanInfo;
import java.beans.IntrospectionException;
import java.beans.Introspector;
import java.beans.PropertyDescriptor;
public final class JavaBeans {
private static final Primitive JGET_PROPERTY_VALUE = new pf__jget_property_value();
@DocString(name="%jget-propety-value", args="java-object property-name",
doc="Gets a JavaBeans property on JAVA-OBJECT.\n" +
"SYSTEM-INTERNAL: Use jproperty-value instead.")
private static final class pf__jget_property_value extends Primitive
{
pf__jget_property_value()
{
super("%jget-property-value", PACKAGE_JAVA, false,
"java-object property-name");
}
@Override
public LispObject execute(LispObject javaObject, LispObject propertyName) {
try {
Object obj = javaObject.javaInstance();
PropertyDescriptor pd = getPropertyDescriptor(obj, propertyName);
Object value = pd.getReadMethod().invoke(obj);
if(value instanceof LispObject) {
return (LispObject) value;
} else if(value != null) {
return JavaObject.getInstance(value, true);
} else {
return NIL;
}
} catch (Exception e) {
return error(new JavaException(e));
}
}
};
private static final Primitive JSET_PROPERTY_VALUE = new pf__jset_property_value();
@DocString(name="%jset-propety-value", args="java-object property-name value",
doc="Sets a JavaBean property on JAVA-OBJECT.\n" +
"SYSTEM-INTERNAL: Use (setf jproperty-value) instead.")
private static final class pf__jset_property_value extends Primitive
{
pf__jset_property_value()
{
super("%jset-property-value", PACKAGE_JAVA, false,
"java-object property-name value");
}
@Override
public LispObject execute(LispObject javaObject, LispObject propertyName, LispObject value) {
Object obj = null;
try {
obj = javaObject.javaInstance();
PropertyDescriptor pd = getPropertyDescriptor(obj, propertyName);
Object jValue;
//TODO maybe we should do this in javaInstance(Class)
if(value instanceof JavaObject) {
jValue = value.javaInstance();
} else {
if(Boolean.TYPE.equals(pd.getPropertyType()) ||
Boolean.class.equals(pd.getPropertyType())) {
jValue = value != NIL;
} else {
jValue = value != NIL ? value.javaInstance() : null;
}
}
pd.getWriteMethod().invoke(obj, jValue);
return value;
} catch (Exception e) {
return error(new JavaException(e));
}
}
};
static PropertyDescriptor getPropertyDescriptor(Object obj, LispObject propertyName) throws IntrospectionException {
String prop = ((AbstractString) propertyName).getStringValue();
BeanInfo beanInfo = Introspector.getBeanInfo(obj.getClass());
for(PropertyDescriptor pd : beanInfo.getPropertyDescriptors()) {
if(pd.getName().equals(prop)) {
return pd;
}
}
error(new LispError("Property " + prop + " not found in " + obj));
return null; // not reached
}
}
|
package org.summerframework.core.text.parsing.token;
import org.springframework.core.convert.ConversionService;
import java.util.Iterator;
public class GenericTokenizer<T> extends Tokenizer<T, String> {
private final Class<T> targetType;
private final ConversionService service;
public GenericTokenizer(ConversionService service, Class<T> type, String text, String... separators) {
this(service, type, text.toCharArray(), separators);
}
public GenericTokenizer(ConversionService service, Class<T> type, char[] text, String... separators) {
super(text, separators);
this.service = service;
this.targetType = type;
}
@Override
public Iterator<Token<T, String>> iterator() {
return new GenericTokenizer<>(service, targetType, text, separators);
}
@SuppressWarnings("unchecked")
@Override
public Token<T, String> next() {
Token<String, String> token = nextToken();
if (String.class != targetType) {
return token.copy(service.convert(token.getValue(), targetType));
}
return (Token<T, String>) token;
}
}
|
package seedu.siasa.storage;
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeParseException;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import seedu.siasa.commons.exceptions.IllegalValueException;
import seedu.siasa.model.person.Person;
import seedu.siasa.model.policy.Commission;
import seedu.siasa.model.policy.ExpiryDate;
import seedu.siasa.model.policy.Policy;
import seedu.siasa.model.policy.Price;
import seedu.siasa.model.policy.Title;
/**
* Jackson-friendly version of {@link Policy}.
*/
public class JsonAdaptedPolicy {
public static final String MISSING_FIELD_MESSAGE_FORMAT = "Policy's %s field is missing!";
private final String title;
private final String price;
private final String expiryDate;
private final String commission;
private final JsonAdaptedPerson owner;
/**
* Constructs a {@code JsonAdaptedPolicy} with the given policy details.
*/
@JsonCreator
public JsonAdaptedPolicy(@JsonProperty("title") String title, @JsonProperty("price") String price,
@JsonProperty("expiryDate") String expiryDate,
@JsonProperty("commission") String commission,
@JsonProperty("owner") JsonAdaptedPerson owner) {
this.title = title;
this.price = price;
this.expiryDate = expiryDate;
this.commission = commission;
this.owner = owner;
}
/**
* Converts a given {@code Policy} into this class for Jackson use.
*/
public JsonAdaptedPolicy(Policy source) {
title = source.getTitle().toString();
price = Integer.toString(source.getPrice().priceInCents);
expiryDate = source.getExpiryDate().toString();
commission = Integer.toString(source.getCommission().commissionPercentage);
owner = new JsonAdaptedPerson(source.getOwner());
}
public JsonAdaptedPerson getOwner() {
return owner;
}
/**
* Converts this Jackson-friendly adapted person object into the model's {@code Policy} object.
*
* @throws IllegalValueException if there were any data constraints violated in the adapted policy.
*/
public Policy toModelType(Person policyOwner) throws IllegalValueException {
if (title == null) {
throw new IllegalValueException(String.format(MISSING_FIELD_MESSAGE_FORMAT, Title.class.getSimpleName()));
}
if (!Title.isValidTitle(title)) {
throw new IllegalValueException(Title.MESSAGE_CONSTRAINTS);
}
final Title modelTitle = new Title(title);
try {
if (price == null) {
throw new IllegalValueException(
String.format(MISSING_FIELD_MESSAGE_FORMAT, Price.class.getSimpleName()));
}
if (!Price.isValidPrice(Integer.parseInt(price))) {
throw new IllegalValueException(Price.MESSAGE_CONSTRAINTS);
}
} catch (IllegalValueException | NumberFormatException e) {
throw new IllegalValueException(Price.MESSAGE_CONSTRAINTS);
}
final Price modelPrice = new Price(Integer.parseInt(price));
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd");
try {
LocalDate date = LocalDate.parse(expiryDate, formatter);
if (expiryDate == null) {
throw new IllegalValueException(
String.format(MISSING_FIELD_MESSAGE_FORMAT, ExpiryDate.class.getSimpleName()));
}
if (!ExpiryDate.isValidExpiryDate(date)) {
throw new IllegalValueException(ExpiryDate.MESSAGE_CONSTRAINTS);
}
} catch (IllegalValueException | DateTimeParseException e) {
throw new IllegalValueException(ExpiryDate.MESSAGE_CONSTRAINTS);
}
LocalDate date = LocalDate.parse(expiryDate, formatter);
final ExpiryDate modelExpiryDate = new ExpiryDate(date);
try {
if (commission == null) {
throw new IllegalValueException(
String.format(MISSING_FIELD_MESSAGE_FORMAT, Commission.class.getSimpleName()));
}
if (!Commission.isValidCommission(Integer.parseInt(commission))) {
throw new IllegalValueException(Commission.MESSAGE_CONSTRAINTS);
}
} catch (IllegalValueException | NumberFormatException e) {
throw new IllegalValueException(Commission.MESSAGE_CONSTRAINTS);
}
final Commission modelCommission = new Commission(Integer.parseInt(commission));
if (policyOwner == null) {
throw new IllegalValueException(
String.format(MISSING_FIELD_MESSAGE_FORMAT, Person.class.getSimpleName()));
}
return new Policy(modelTitle, modelPrice, modelExpiryDate, modelCommission, policyOwner);
}
}
|
package com.scylladb.cdc.replicator.operations.update;
import com.datastax.driver.core.ColumnMetadata;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.TableMetadata;
import com.datastax.driver.core.querybuilder.Assignment;
import com.datastax.driver.core.querybuilder.ListSetIdxTimeUUIDAssignment;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.UdtSetFieldAssignment;
import com.datastax.driver.core.querybuilder.Update;
import com.scylladb.cdc.cql.driver3.Driver3FromLibraryTranslator;
import com.scylladb.cdc.model.worker.RawChange;
import com.scylladb.cdc.model.worker.cql.AbstractField;
import com.scylladb.cdc.model.worker.cql.Field;
import com.scylladb.cdc.replicator.operations.ExecutingStatementHandler;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.UUID;
import java.util.stream.Collectors;
import static com.datastax.driver.core.DataType.Name.UDT;
import static com.datastax.driver.core.querybuilder.QueryBuilder.*;
public class UnpreparedUpdateOperationHandler extends ExecutingStatementHandler {
private final TableMetadata table;
private final Driver3FromLibraryTranslator driver3FromLibraryTranslator;
public UnpreparedUpdateOperationHandler(Session session, TableMetadata t, Driver3FromLibraryTranslator d3t) {
super(session);
table = t;
driver3FromLibraryTranslator = d3t;
}
@Override
public Statement getStatement(RawChange change, ConsistencyLevel cl) {
Update builder = QueryBuilder.update(table);
Set<ColumnMetadata> primaryColumns = new HashSet<>(table.getPrimaryKey());
table.getColumns().stream().forEach(c -> {
if (primaryColumns.contains(c)) {
builder.where(eq(c.getName(), driver3FromLibraryTranslator.translate(change.getCell(c.getName()))));
} else {
Assignment op = null;
boolean isNonFrozenCollection = c.getType().isCollection() && !c.getType().isFrozen();
boolean isNonFrozenUDT = c.getType().getName() == UDT && !c.getType().isFrozen();
String deletedElementsColumnName = "cdc$deleted_elements_" + c.getName();
if (isNonFrozenCollection && !change.getIsDeleted(c.getName())) {
if (change.getAsObject(deletedElementsColumnName) != null) {
if (c.getType().getName() == DataType.Name.SET) {
op = removeAll(c.getName(), (Set) driver3FromLibraryTranslator.translate(change.getCell(deletedElementsColumnName)));
} else if (c.getType().getName() == DataType.Name.MAP) {
op = removeAll(c.getName(), (Set) driver3FromLibraryTranslator.translate(change.getCell(deletedElementsColumnName)));
} else if (c.getType().getName() == DataType.Name.LIST) {
Set<UUID> cSet = (Set<UUID>) driver3FromLibraryTranslator.translate(change.getCell(deletedElementsColumnName));
for (UUID key : cSet) {
builder.with(new ListSetIdxTimeUUIDAssignment(c.getName(), key, null));
}
return;
} else {
throw new IllegalStateException();
}
} else {
if (c.getType().getName() == DataType.Name.SET) {
op = addAll(c.getName(), (Set) driver3FromLibraryTranslator.translate(change.getCell(c.getName())));
} else if (c.getType().getName() == DataType.Name.MAP) {
op = putAll(c.getName(), (Map) driver3FromLibraryTranslator.translate(change.getCell(c.getName())));
} else if (c.getType().getName() == DataType.Name.LIST) {
Map<UUID, Object> cMap = (Map<UUID, Object>) driver3FromLibraryTranslator.translate(change.getCell(c.getName()));
for (Map.Entry<UUID, Object> e : cMap.entrySet()) {
builder.with(new ListSetIdxTimeUUIDAssignment(c.getName(), e.getKey(), e.getValue()));
}
return;
} else {
throw new IllegalStateException();
}
}
} else if (isNonFrozenUDT && !change.getIsDeleted(c.getName())) {
Map<String, Field> libraryUdt = change.getCell(c.getName()).getUDT();
Set<Field> deletedElements = change.getCell(deletedElementsColumnName).getSet();
if (deletedElements == null) {
deletedElements = Collections.emptySet();
}
Set<Short> deletedIdx = deletedElements.stream().map(AbstractField::getShort).collect(Collectors.toCollection(HashSet<Short>::new));
Set<Map.Entry<String, Field>> udtFields = libraryUdt.entrySet();
Iterator<Map.Entry<String, Field>> udtFieldsIterator = udtFields.iterator();
for (int idx = 0; udtFieldsIterator.hasNext(); idx++) {
Map.Entry<String, Field> field = udtFieldsIterator.next();
Object fieldValue = driver3FromLibraryTranslator.translate(field.getValue().getAsObject(),
field.getValue().getDataType());
if (fieldValue == null && !deletedIdx.contains((short)idx)) {
continue;
}
builder.with(new UdtSetFieldAssignment(c.getName(), field.getKey(), fieldValue));
}
return;
}
if (op == null) {
if (c.getType().getName() == DataType.Name.LIST && !c.getType().isFrozen()) {
Map<UUID, Object> cMap = (Map<UUID, Object>) driver3FromLibraryTranslator.translate(change.getCell(c.getName()));
if (cMap == null) {
op = set(c.getName(), null);
} else {
TreeMap<UUID, Object> sorted = new TreeMap<>();
for (Map.Entry<UUID, Object> e : cMap.entrySet()) {
sorted.put(e.getKey(), e.getValue());
}
List<Object> list = new ArrayList<>();
for (Map.Entry<UUID, Object> e : sorted.entrySet()) {
list.add(e.getValue());
}
op = set(c.getName(), list);
}
} else {
op = set(c.getName(), driver3FromLibraryTranslator.translate(change.getCell(c.getName())));
}
}
builder.with(op);
}
});
Long ttl = change.getTTL();
if (ttl != null) {
builder.using(timestamp(change.getId().getChangeTime().getTimestamp())).and(ttl((int) ((long) ttl)));
} else {
builder.using(timestamp(change.getId().getChangeTime().getTimestamp()));
}
builder.setConsistencyLevel(cl);
return builder;
}
}
|
package edu.umd.cs.findbugs.detect;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.collection.IsEmptyIterable.emptyIterable;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import java.nio.file.Paths;
import org.junit.Rule;
import org.junit.Test;
import edu.umd.cs.findbugs.BugCollection;
import edu.umd.cs.findbugs.SortedBugCollection;
import edu.umd.cs.findbugs.test.SpotBugsRule;
public class ResolveMethodReferencesTest {
@Rule
public SpotBugsRule spotbugs = new SpotBugsRule();
/**
* @see <a href="https://github.com/spotbugs/spotbugs/issues/338">GitHub
* issue</a>
*/
@Test
public void testIssue338() {
BugCollection bugCollection = spotbugs.performAnalysis(Paths.get("../spotbugsTestCases/build/classes/java/main/lambdas/Issue338.class"));
assertThat(bugCollection, is(emptyIterable()));
assertThat(bugCollection, instanceOf(SortedBugCollection.class));
assertThat(((SortedBugCollection) bugCollection).missingClassIterator().hasNext(), is(false));
}
}
|
package me.saket.dank.ui.preferences;
import android.content.res.Resources;
import android.graphics.Typeface;
import android.os.Build;
import androidx.annotation.FontRes;
import androidx.annotation.NonNull;
import androidx.annotation.RequiresApi;
import com.f2prateek.rx.preferences2.Preference;
import com.google.auto.value.AutoValue;
import com.squareup.moshi.JsonAdapter;
import com.squareup.moshi.Moshi;
import java.io.IOException;
import io.reactivex.exceptions.Exceptions;
@AutoValue
public abstract class TypefaceResource {
public static final TypefaceResource DEFAULT = TypefaceResource.create(
"Roboto regular",
-1,
"roboto_regular.ttf");
public abstract String name();
@FontRes
@RequiresApi(Build.VERSION_CODES.O)
public abstract int id();
/**
* Used below Oreo.
*/
public abstract String compatFileName();
public Typeface get(Resources resources) {
// if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
// return resources.getFont(id());
// } else {
return Typeface.createFromAsset(resources.getAssets(), compatFileName());
// }
}
public static TypefaceResource create(String name, @FontRes int typefaceRes, String compatFileName) {
return new AutoValue_TypefaceResource(name, typefaceRes, compatFileName);
}
public static TypefaceResource create(String name, String compatFileName) {
return new AutoValue_TypefaceResource(name, -1, compatFileName);
}
public static JsonAdapter<TypefaceResource> jsonAdapter(Moshi moshi) {
return new AutoValue_TypefaceResource.MoshiJsonAdapter(moshi);
}
public static class Converter implements Preference.Converter<TypefaceResource> {
private final Moshi moshi;
private JsonAdapter<TypefaceResource> adapter;
public Converter(Moshi moshi) {
this.moshi = moshi;
}
@NonNull
@Override
public TypefaceResource deserialize(@NonNull String serialized) {
JsonAdapter<TypefaceResource> adapter = adapter();
try {
//noinspection ConstantConditions
return adapter.fromJson(serialized);
} catch (IOException e) {
throw Exceptions.propagate(e);
}
}
@NonNull
@Override
public String serialize(@NonNull TypefaceResource value) {
JsonAdapter<TypefaceResource> adapter = adapter();
//noinspection ConstantConditions
return adapter.toJson(value);
}
private JsonAdapter<TypefaceResource> adapter() {
if (adapter == null) {
adapter = moshi.adapter(TypefaceResource.class);
}
return adapter;
}
}
}
|
/*
* Copyright 2015-present Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.net.intent.impl.compiler;
import com.google.common.collect.ImmutableSet;
import org.hamcrest.core.Is;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.onlab.packet.MacAddress;
import org.onlab.packet.VlanId;
import org.onosproject.cfg.ComponentConfigAdapter;
import org.onosproject.core.CoreService;
import org.onosproject.net.ConnectPoint;
import org.onosproject.net.DefaultLink;
import org.onosproject.net.FilteredConnectPoint;
import org.onosproject.net.Link;
import org.onosproject.net.PortNumber;
import org.onosproject.net.flow.DefaultTrafficSelector;
import org.onosproject.net.flow.DefaultTrafficTreatment;
import org.onosproject.net.flow.FlowRule;
import org.onosproject.net.flow.TrafficSelector;
import org.onosproject.net.flow.TrafficTreatment;
import org.onosproject.net.flow.criteria.MplsCriterion;
import org.onosproject.net.flow.criteria.PortCriterion;
import org.onosproject.net.flow.criteria.VlanIdCriterion;
import org.onosproject.net.intent.FlowRuleIntent;
import org.onosproject.net.intent.Intent;
import org.onosproject.net.intent.IntentExtensionService;
import org.onosproject.net.intent.LinkCollectionIntent;
import org.onosproject.net.resource.MockResourceService;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.everyItem;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.onlab.packet.EthType.EtherType.IPV4;
import static org.onosproject.net.Link.Type.DIRECT;
import static org.onosproject.net.NetTestTools.*;
import static org.onosproject.net.flow.criteria.Criterion.Type.IN_PORT;
import static org.onosproject.net.flow.criteria.Criterion.Type.MPLS_LABEL;
import static org.onosproject.net.flow.criteria.Criterion.Type.VLAN_VID;
import static org.onosproject.net.flow.instructions.L2ModificationInstruction.*;
/**
* This set of tests are meant to test the LinkCollectionIntent
* compiler.
*/
public class LinkCollectionIntentCompilerTest extends AbstractLinkCollectionTest {
@Before
public void setUp() {
sut = new LinkCollectionIntentCompiler();
coreService = createMock(CoreService.class);
expect(coreService.registerApplication("org.onosproject.net.intent"))
.andReturn(appId);
sut.coreService = coreService;
Intent.unbindIdGenerator(idGenerator);
Intent.bindIdGenerator(idGenerator);
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.selector(selector)
.treatment(treatment)
.links(links)
.ingressPoints(ImmutableSet.of(d1p1))
.egressPoints(ImmutableSet.of(d3p1))
.build();
intentExtensionService = createMock(IntentExtensionService.class);
intentExtensionService.registerCompiler(LinkCollectionIntent.class, sut);
intentExtensionService.unregisterCompiler(LinkCollectionIntent.class);
registrator = new IntentConfigurableRegistrator();
registrator.extensionService = intentExtensionService;
registrator.cfgService = new ComponentConfigAdapter();
registrator.activate();
sut.registrator = registrator;
sut.resourceService = new MockResourceService();
LinkCollectionCompiler.optimizeInstructions = false;
LinkCollectionCompiler.copyTtl = false;
replay(coreService, intentExtensionService);
}
@After
public void tearDown() {
Intent.unbindIdGenerator(idGenerator);
}
/**
* We test the proper compilation of a simple link collection intent
* with connect points, trivial treatment and trivial selector.
*/
@Test
public void testCompile() {
sut.activate();
List<Intent> compiled = sut.compile(intent, Collections.emptyList());
assertThat(compiled, hasSize(1));
assertThat("key is inherited",
compiled.stream().map(Intent::key).collect(Collectors.toList()),
everyItem(is(intent.key())));
Collection<FlowRule> rules = ((FlowRuleIntent) compiled.get(0)).flowRules();
assertThat(rules, hasSize(links.size()));
// if not found, get() raises an exception
FlowRule rule1 = rules.stream()
.filter(rule -> rule.deviceId().equals(d1p10.deviceId()))
.findFirst()
.get();
assertThat(rule1.selector(), is(
DefaultTrafficSelector.builder(intent.selector()).matchInPort(d1p1.port()).build()
));
assertThat(rule1.treatment(), is(
DefaultTrafficTreatment.builder(intent.treatment()).setOutput(d1p1.port()).build()
));
assertThat(rule1.priority(), is(intent.priority()));
FlowRule rule2 = rules.stream()
.filter(rule -> rule.deviceId().equals(d2p0.deviceId()))
.findFirst()
.get();
assertThat(rule2.selector(), is(
DefaultTrafficSelector.builder(intent.selector()).matchInPort(d2p0.port()).build()
));
assertThat(rule2.treatment(), is(
DefaultTrafficTreatment.builder().setOutput(d2p1.port()).build()
));
assertThat(rule2.priority(), is(intent.priority()));
FlowRule rule3 = rules.stream()
.filter(rule -> rule.deviceId().equals(d3p0.deviceId()))
.findFirst()
.get();
assertThat(rule3.selector(), is(
DefaultTrafficSelector.builder(intent.selector()).matchInPort(d3p1.port()).build()
));
assertThat(rule3.treatment(), is(
DefaultTrafficTreatment.builder().setOutput(d3p1.port()).build()
));
assertThat(rule3.priority(), is(intent.priority()));
sut.deactivate();
}
/**
* Single point to multi point case. Scenario is the follow:
*
* -1 of1 2-1 of2 2--1 of3 2-
* 3
* `-1 of4 2-
*
* We test the proper compilation of sp2mp with trivial selector,
* trivial treatment and different filtered points
*/
@Test
public void testFilteredConnectPointForSp() {
sut.activate();
Set<Link> testLinks = ImmutableSet.of(
DefaultLink.builder().providerId(PID).src(of1p2).dst(of2p1).type(DIRECT).build(),
DefaultLink.builder().providerId(PID).src(of2p2).dst(of3p1).type(DIRECT).build(),
DefaultLink.builder().providerId(PID).src(of2p3).dst(of4p1).type(DIRECT).build()
);
TrafficSelector expectOf1Selector = DefaultTrafficSelector.builder(vlan100Selector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf1Treatment = DefaultTrafficTreatment.builder()
.setOutput(PortNumber.portNumber(2))
.build();
TrafficSelector expectOf2Selector = DefaultTrafficSelector.builder(vlan100Selector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf2Treatment = DefaultTrafficTreatment.builder()
.setOutput(PortNumber.portNumber(2))
.setOutput(PortNumber.portNumber(3))
.build();
TrafficSelector expectOf3Selector = DefaultTrafficSelector.builder(vlan100Selector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf3Treatment = DefaultTrafficTreatment.builder()
.setOutput(PortNumber.portNumber(2))
.build();
TrafficSelector expectOf4Selector = DefaultTrafficSelector.builder(vlan100Selector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf4Treatment = DefaultTrafficTreatment.builder()
.setVlanId(VlanId.vlanId("200"))
.setOutput(PortNumber.portNumber(2))
.build();
Set<FilteredConnectPoint> ingress = ImmutableSet.of(
new FilteredConnectPoint(of1p1, vlan100Selector)
);
Set<FilteredConnectPoint> egress = ImmutableSet.of(
new FilteredConnectPoint(of3p2, vlan100Selector),
new FilteredConnectPoint(of4p2, vlan200Selector)
);
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.filteredIngressPoints(ingress)
.filteredEgressPoints(egress)
.treatment(treatment)
.applyTreatmentOnEgress(true)
.links(testLinks)
.build();
assertThat(sut, is(notNullValue()));
List<Intent> result = sut.compile(intent, Collections.emptyList());
assertThat(result, is(notNullValue()));
assertThat(result, hasSize(1));
Intent resultIntent = result.get(0);
assertThat(resultIntent, instanceOf(FlowRuleIntent.class));
if (resultIntent instanceof FlowRuleIntent) {
FlowRuleIntent frIntent = (FlowRuleIntent) resultIntent;
assertThat(frIntent.flowRules(), hasSize(4));
List<FlowRule> deviceFlowRules;
FlowRule flowRule;
// Of1
deviceFlowRules = getFlowRulesByDevice(of1Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf1Selector));
assertThat(flowRule.treatment(), is(expectOf1Treatment));
// Of2
deviceFlowRules = getFlowRulesByDevice(of2Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf2Selector));
assertThat(flowRule.treatment(), is(expectOf2Treatment));
// Of3
deviceFlowRules = getFlowRulesByDevice(of3Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf3Selector));
assertThat(flowRule.treatment(), is(expectOf3Treatment));
// Of4
deviceFlowRules = getFlowRulesByDevice(of4Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf4Selector));
assertThat(flowRule.treatment(), is(expectOf4Treatment));
}
sut.deactivate();
}
/**
* Multi point to single point intent with filtered connect point.
* Scenario is the follow:
*
* -1 of1 2-1 of2 2-1 of4 2-
* 3
* -1 of3 2---/
*
* We test the proper compilation of mp2sp intents with trivial selector,
* trivial treatment and different filtered point.
*/
@Test
public void testFilteredConnectPointForMp() {
sut.activate();
Set<Link> testlinks = ImmutableSet.of(
DefaultLink.builder().providerId(PID).src(of1p2).dst(of2p1).type(DIRECT).build(),
DefaultLink.builder().providerId(PID).src(of3p2).dst(of2p3).type(DIRECT).build(),
DefaultLink.builder().providerId(PID).src(of2p2).dst(of4p1).type(DIRECT).build()
);
Set<FilteredConnectPoint> ingress = ImmutableSet.of(
new FilteredConnectPoint(of1p1, vlan100Selector),
new FilteredConnectPoint(of3p1, vlan100Selector)
);
Set<FilteredConnectPoint> egress = ImmutableSet.of(
new FilteredConnectPoint(of4p2, vlan200Selector)
);
TrafficSelector expectOf1Selector = DefaultTrafficSelector.builder(vlan100Selector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf1Treatment = DefaultTrafficTreatment.builder()
.setVlanId(VlanId.vlanId("200"))
.setOutput(PortNumber.portNumber(2))
.build();
TrafficSelector expectOf2Selector1 = DefaultTrafficSelector.builder(vlan200Selector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficSelector expectOf2Selector2 = DefaultTrafficSelector.builder(vlan200Selector)
.matchInPort(PortNumber.portNumber(3))
.build();
TrafficTreatment expectOf2Treatment = DefaultTrafficTreatment.builder()
.setOutput(PortNumber.portNumber(2))
.build();
TrafficSelector expectOf3Selector = DefaultTrafficSelector.builder(vlan100Selector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf3Treatment = DefaultTrafficTreatment.builder()
.setVlanId(VlanId.vlanId("200"))
.setOutput(PortNumber.portNumber(2))
.build();
TrafficSelector expectOf4Selector = DefaultTrafficSelector.builder(vlan100Selector)
.matchInPort(PortNumber.portNumber(1))
.matchVlanId(VlanId.vlanId("200"))
.build();
TrafficTreatment expectOf4Treatment = DefaultTrafficTreatment.builder()
.setOutput(PortNumber.portNumber(2))
.build();
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.filteredIngressPoints(ingress)
.filteredEgressPoints(egress)
.treatment(treatment)
.links(testlinks)
.build();
List<Intent> result = sut.compile(intent, Collections.emptyList());
assertThat(result, is(notNullValue()));
assertThat(result, hasSize(1));
Intent resultIntent = result.get(0);
assertThat(resultIntent, instanceOf(FlowRuleIntent.class));
if (resultIntent instanceof FlowRuleIntent) {
FlowRuleIntent frIntent = (FlowRuleIntent) resultIntent;
assertThat(frIntent.flowRules(), hasSize(5));
List<FlowRule> deviceFlowRules;
FlowRule flowRule;
// Of1
deviceFlowRules = getFlowRulesByDevice(of1Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf1Selector));
assertThat(flowRule.treatment(), is(expectOf1Treatment));
// Of2 (has 2 flows)
deviceFlowRules = getFlowRulesByDevice(of2Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(2));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf2Selector1));
assertThat(flowRule.treatment(), is(expectOf2Treatment));
flowRule = deviceFlowRules.get(1);
assertThat(flowRule.selector(), is(expectOf2Selector2));
assertThat(flowRule.treatment(), is(expectOf2Treatment));
// Of3
deviceFlowRules = getFlowRulesByDevice(of3Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf3Selector));
assertThat(flowRule.treatment(), is(expectOf3Treatment));
// Of4
deviceFlowRules = getFlowRulesByDevice(of4Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf4Selector));
assertThat(flowRule.treatment(), is(expectOf4Treatment));
}
sut.deactivate();
}
/**
* Single point to multi point without filtered connect point case.
* Scenario is the follow:
*
* -1 of1 2-1 of2 2--1 of3 2-
* 3
* `-1 of4 2-
*
* We test the proper compilation of sp2mp with non trivial selector,
* non trivial treatment and simple connect points.
*/
@Test
public void nonTrivialTranslationForSp() {
sut.activate();
Set<Link> testLinks = ImmutableSet.of(
DefaultLink.builder().providerId(PID).src(of1p2).dst(of2p1).type(DIRECT).build(),
DefaultLink.builder().providerId(PID).src(of2p2).dst(of3p1).type(DIRECT).build(),
DefaultLink.builder().providerId(PID).src(of2p3).dst(of4p1).type(DIRECT).build()
);
TrafficSelector expectOf1Selector = DefaultTrafficSelector.builder(ipPrefixSelector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf1Treatment = DefaultTrafficTreatment.builder()
.setOutput(PortNumber.portNumber(2))
.build();
TrafficSelector expectOf2Selector = DefaultTrafficSelector.builder(ipPrefixSelector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf2Treatment = DefaultTrafficTreatment.builder()
.setOutput(PortNumber.portNumber(2))
.setOutput(PortNumber.portNumber(3))
.build();
TrafficSelector expectOf3Selector = DefaultTrafficSelector.builder(ipPrefixSelector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf3Treatment = DefaultTrafficTreatment.builder(ethDstTreatment)
.setOutput(PortNumber.portNumber(2))
.build();
TrafficSelector expectOf4Selector = DefaultTrafficSelector.builder(ipPrefixSelector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf4Treatment = DefaultTrafficTreatment.builder(ethDstTreatment)
.setOutput(PortNumber.portNumber(2))
.build();
Set<ConnectPoint> ingress = ImmutableSet.of(
of1p1
);
Set<ConnectPoint> egress = ImmutableSet.of(
of3p2,
of4p2
);
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.selector(ipPrefixSelector)
.treatment(ethDstTreatment)
.ingressPoints(ingress)
.egressPoints(egress)
.applyTreatmentOnEgress(true)
.links(testLinks)
.build();
assertThat(sut, is(notNullValue()));
List<Intent> result = sut.compile(intent, Collections.emptyList());
assertThat(result, is(notNullValue()));
assertThat(result, hasSize(1));
Intent resultIntent = result.get(0);
assertThat(resultIntent, instanceOf(FlowRuleIntent.class));
if (resultIntent instanceof FlowRuleIntent) {
FlowRuleIntent frIntent = (FlowRuleIntent) resultIntent;
assertThat(frIntent.flowRules(), hasSize(4));
List<FlowRule> deviceFlowRules;
FlowRule flowRule;
// Of1
deviceFlowRules = getFlowRulesByDevice(of1Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf1Selector));
assertThat(flowRule.treatment(), is(expectOf1Treatment));
// Of2
deviceFlowRules = getFlowRulesByDevice(of2Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf2Selector));
assertThat(flowRule.treatment(), is(expectOf2Treatment));
// Of3
deviceFlowRules = getFlowRulesByDevice(of3Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf3Selector));
assertThat(flowRule.treatment(), is(expectOf3Treatment));
// Of4
deviceFlowRules = getFlowRulesByDevice(of4Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf4Selector));
assertThat(flowRule.treatment(), is(expectOf4Treatment));
}
sut.deactivate();
}
/**
* Multi point to single point intent without filtered connect point.
* Scenario is the follow:
*
* -1 of1 2-1 of2 2-1 of4 2-
* 3
* -1 of3 2---/
*
* We test the proper compilation of mp2sp intent with non trivial selector,
* non trivial treatment and simple connect points.
*/
@Test
public void nonTrivialTranslationForMp() {
sut.activate();
Set<Link> testlinks = ImmutableSet.of(
DefaultLink.builder().providerId(PID).src(of1p2).dst(of2p1).type(DIRECT).build(),
DefaultLink.builder().providerId(PID).src(of3p2).dst(of2p3).type(DIRECT).build(),
DefaultLink.builder().providerId(PID).src(of2p2).dst(of4p1).type(DIRECT).build()
);
Set<ConnectPoint> ingress = ImmutableSet.of(
of1p1,
of3p1
);
Set<ConnectPoint> egress = ImmutableSet.of(
of4p2
);
TrafficSelector expectOf1Selector = DefaultTrafficSelector.builder(ipPrefixSelector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf1Treatment = DefaultTrafficTreatment.builder(ethDstTreatment)
.setOutput(PortNumber.portNumber(2))
.build();
TrafficSelector expectOf2Selector1 = DefaultTrafficSelector.builder(ipPrefixSelector)
.matchInPort(PortNumber.portNumber(1))
.matchEthDst(MacAddress.valueOf("C0:FF:EE:C0:FF:EE"))
.build();
TrafficSelector expectOf2Selector2 = DefaultTrafficSelector.builder(ipPrefixSelector)
.matchEthDst(MacAddress.valueOf("C0:FF:EE:C0:FF:EE"))
.matchInPort(PortNumber.portNumber(3))
.build();
TrafficTreatment expectOf2Treatment = DefaultTrafficTreatment.builder()
.setOutput(PortNumber.portNumber(2))
.build();
TrafficSelector expectOf3Selector = DefaultTrafficSelector.builder(ipPrefixSelector)
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf3Treatment = DefaultTrafficTreatment.builder(ethDstTreatment)
.setOutput(PortNumber.portNumber(2))
.build();
TrafficSelector expectOf4Selector = DefaultTrafficSelector.builder(ipPrefixSelector)
.matchEthDst(MacAddress.valueOf("C0:FF:EE:C0:FF:EE"))
.matchInPort(PortNumber.portNumber(1))
.build();
TrafficTreatment expectOf4Treatment = DefaultTrafficTreatment.builder()
.setOutput(PortNumber.portNumber(2))
.build();
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.selector(ipPrefixSelector)
.ingressPoints(ingress)
.egressPoints(egress)
.treatment(ethDstTreatment)
.links(testlinks)
.build();
List<Intent> result = sut.compile(intent, Collections.emptyList());
assertThat(result, is(notNullValue()));
assertThat(result, hasSize(1));
Intent resultIntent = result.get(0);
assertThat(resultIntent, instanceOf(FlowRuleIntent.class));
if (resultIntent instanceof FlowRuleIntent) {
FlowRuleIntent frIntent = (FlowRuleIntent) resultIntent;
assertThat(frIntent.flowRules(), hasSize(5));
List<FlowRule> deviceFlowRules;
FlowRule flowRule;
// Of1
deviceFlowRules = getFlowRulesByDevice(of1Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf1Selector));
assertThat(flowRule.treatment(), is(expectOf1Treatment));
// Of2 (has 2 flows)
deviceFlowRules = getFlowRulesByDevice(of2Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(2));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf2Selector1));
assertThat(flowRule.treatment(), is(expectOf2Treatment));
flowRule = deviceFlowRules.get(1);
assertThat(flowRule.selector(), is(expectOf2Selector2));
assertThat(flowRule.treatment(), is(expectOf2Treatment));
// Of3
deviceFlowRules = getFlowRulesByDevice(of3Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf3Selector));
assertThat(flowRule.treatment(), is(expectOf3Treatment));
// Of4
deviceFlowRules = getFlowRulesByDevice(of4Id, frIntent.flowRules());
assertThat(deviceFlowRules, hasSize(1));
flowRule = deviceFlowRules.get(0);
assertThat(flowRule.selector(), is(expectOf4Selector));
assertThat(flowRule.treatment(), is(expectOf4Treatment));
}
sut.deactivate();
}
/**
* We test the proper compilation of mp2sp with
* trivial selector, trivial treatment and 1 hop.
*/
@Test
public void singleHopTestForMp() {
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.selector(selector)
.treatment(treatment)
.links(ImmutableSet.of())
.filteredIngressPoints(ImmutableSet.of(
new FilteredConnectPoint(d1p10),
new FilteredConnectPoint(d1p11)
))
.filteredEgressPoints(ImmutableSet.of(new FilteredConnectPoint(d1p0)))
.build();
sut.activate();
List<Intent> compiled = sut.compile(intent, Collections.emptyList());
assertThat(compiled, hasSize(1));
Collection<FlowRule> rules = ((FlowRuleIntent) compiled.get(0)).flowRules();
assertThat(rules, hasSize(2));
Collection<FlowRule> rulesS1 = rules.stream()
.filter(rule -> rule.deviceId().equals(d1p0.deviceId()))
.collect(Collectors.toSet());
assertThat(rulesS1, hasSize(2));
FlowRule ruleS1 = rulesS1.stream()
.filter(rule -> {
PortCriterion inPort = (PortCriterion) rule.selector().getCriterion(IN_PORT);
return inPort.port().equals(d1p10.port());
})
.findFirst()
.get();
assertThat(ruleS1.selector(), Is.is(
DefaultTrafficSelector
.builder()
.matchInPort(d1p10.port())
.build()
));
assertThat(ruleS1.treatment(), Is.is(
DefaultTrafficTreatment
.builder()
.setOutput(d1p0.port())
.build()
));
ruleS1 = rulesS1.stream()
.filter(rule -> {
PortCriterion inPort = (PortCriterion) rule.selector().getCriterion(IN_PORT);
return inPort.port().equals(d1p11.port());
})
.findFirst()
.get();
assertThat(ruleS1.selector(), Is.is(
DefaultTrafficSelector
.builder()
.matchInPort(d1p11.port())
.build()
));
assertThat(ruleS1.treatment(), Is.is(
DefaultTrafficTreatment
.builder()
.setOutput(d1p0.port())
.build()
));
sut.deactivate();
}
/**
* We test the proper compilation of sp2mp with
* trivial selector, trivial treatment and 1 hop.
*/
@Test
public void singleHopTestForSp() {
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.selector(selector)
.treatment(treatment)
.applyTreatmentOnEgress(true)
.links(ImmutableSet.of())
.filteredEgressPoints(ImmutableSet.of(
new FilteredConnectPoint(d1p10),
new FilteredConnectPoint(d1p11)
))
.filteredIngressPoints(ImmutableSet.of(new FilteredConnectPoint(d1p0)))
.build();
sut.activate();
List<Intent> compiled = sut.compile(intent, Collections.emptyList());
assertThat(compiled, hasSize(1));
Collection<FlowRule> rules = ((FlowRuleIntent) compiled.get(0)).flowRules();
assertThat(rules, hasSize(1));
Collection<FlowRule> rulesS1 = rules.stream()
.filter(rule -> rule.deviceId().equals(d1p0.deviceId()))
.collect(Collectors.toSet());
assertThat(rulesS1, hasSize(1));
FlowRule ruleS1 = rulesS1.stream()
.filter(rule -> {
PortCriterion inPort = (PortCriterion) rule.selector().getCriterion(IN_PORT);
return inPort.port().equals(d1p0.port());
})
.findFirst()
.get();
assertThat(ruleS1.selector(), Is.is(
DefaultTrafficSelector
.builder()
.matchInPort(d1p0.port())
.build()
));
assertThat(ruleS1.treatment(), Is.is(
DefaultTrafficTreatment
.builder()
.setOutput(d1p10.port())
.setOutput(d1p11.port())
.build()
));
sut.deactivate();
}
/**
* We test the proper compilation of mp2sp with
* trivial selector, trivial treatment, filtered
* points and 1 hop.
*/
@Test
public void singleHopTestFilteredForMp() {
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.selector(selector)
.treatment(treatment)
.links(ImmutableSet.of())
.filteredIngressPoints(ImmutableSet.of(
new FilteredConnectPoint(d1p10, vlan100Selector),
new FilteredConnectPoint(d1p11, mpls69Selector)
))
.filteredEgressPoints(ImmutableSet.of(new FilteredConnectPoint(d1p0, vlan200Selector)))
.build();
sut.activate();
List<Intent> compiled = sut.compile(intent, Collections.emptyList());
assertThat(compiled, hasSize(1));
Collection<FlowRule> rules = ((FlowRuleIntent) compiled.get(0)).flowRules();
assertThat(rules, hasSize(2));
Collection<FlowRule> rulesS1 = rules.stream()
.filter(rule -> rule.deviceId().equals(d1p0.deviceId()))
.collect(Collectors.toSet());
assertThat(rulesS1, hasSize(2));
FlowRule ruleS1 = rulesS1.stream()
.filter(rule -> {
PortCriterion inPort = (PortCriterion) rule.selector().getCriterion(IN_PORT);
return inPort.port().equals(d1p10.port());
})
.findFirst()
.get();
assertThat(ruleS1.selector(), Is.is(
DefaultTrafficSelector
.builder(vlan100Selector)
.matchInPort(d1p10.port())
.build()
));
assertThat(ruleS1.treatment(), Is.is(
DefaultTrafficTreatment
.builder()
.setVlanId(((VlanIdCriterion) vlan200Selector.getCriterion(VLAN_VID)).vlanId())
.setOutput(d1p0.port())
.build()
));
ruleS1 = rulesS1.stream()
.filter(rule -> {
PortCriterion inPort = (PortCriterion) rule.selector().getCriterion(IN_PORT);
return inPort.port().equals(d1p11.port());
})
.findFirst()
.get();
assertThat(ruleS1.selector(), Is.is(
DefaultTrafficSelector
.builder(mpls69Selector)
.matchInPort(d1p11.port())
.build()
));
assertThat(ruleS1.treatment(), Is.is(
DefaultTrafficTreatment
.builder()
.popMpls(IPV4.ethType())
.pushVlan()
.setVlanId(((VlanIdCriterion) vlan200Selector.getCriterion(VLAN_VID)).vlanId())
.setOutput(d1p0.port())
.build()
));
sut.deactivate();
}
/**
* We test the proper compilation of sp2mp with
* trivial selector, trivial treatment and 1 hop.
*/
@Test
public void singleHopTestFilteredForSp() {
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.selector(selector)
.treatment(treatment)
.applyTreatmentOnEgress(true)
.links(ImmutableSet.of())
.filteredEgressPoints(ImmutableSet.of(
new FilteredConnectPoint(d1p10, vlan100Selector),
new FilteredConnectPoint(d1p11, mpls80Selector)
))
.filteredIngressPoints(ImmutableSet.of(new FilteredConnectPoint(d1p0, vlan200Selector)))
.build();
sut.activate();
List<Intent> compiled = sut.compile(intent, Collections.emptyList());
assertThat(compiled, hasSize(1));
Collection<FlowRule> rules = ((FlowRuleIntent) compiled.get(0)).flowRules();
assertThat(rules, hasSize(1));
Collection<FlowRule> rulesS1 = rules.stream()
.filter(rule -> rule.deviceId().equals(d1p0.deviceId()))
.collect(Collectors.toSet());
assertThat(rulesS1, hasSize(1));
FlowRule ruleS1 = rulesS1.stream()
.filter(rule -> {
PortCriterion inPort = (PortCriterion) rule.selector().getCriterion(IN_PORT);
return inPort.port().equals(d1p0.port());
})
.findFirst()
.get();
assertThat(ruleS1.selector(), Is.is(
DefaultTrafficSelector
.builder(vlan200Selector)
.matchInPort(d1p0.port())
.build()
));
assertThat(ruleS1.treatment(), Is.is(
DefaultTrafficTreatment
.builder()
.setVlanId(((VlanIdCriterion) vlan100Selector.getCriterion(VLAN_VID)).vlanId())
.setOutput(d1p10.port())
.popVlan()
.pushMpls()
.setMpls(((MplsCriterion) mpls80Selector.getCriterion(MPLS_LABEL)).label())
.setOutput(d1p11.port())
.build()
));
sut.deactivate();
}
/**
* We test the proper compilation of mp2sp with
* selector, treatment, filtered
* points and 1 hop.
*/
@Test
public void singleHopNonTrivialForMp() {
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.selector(ipPrefixSelector)
.treatment(ethDstTreatment)
.links(ImmutableSet.of())
.filteredIngressPoints(ImmutableSet.of(
new FilteredConnectPoint(d1p10, vlan100Selector),
new FilteredConnectPoint(d1p11, mpls100Selector)
))
.filteredEgressPoints(ImmutableSet.of(new FilteredConnectPoint(d1p0, vlan200Selector)))
.build();
sut.activate();
List<Intent> compiled = sut.compile(intent, Collections.emptyList());
assertThat(compiled, hasSize(1));
Collection<FlowRule> rules = ((FlowRuleIntent) compiled.get(0)).flowRules();
assertThat(rules, hasSize(2));
Collection<FlowRule> rulesS1 = rules.stream()
.filter(rule -> rule.deviceId().equals(d1p0.deviceId()))
.collect(Collectors.toSet());
assertThat(rulesS1, hasSize(2));
FlowRule ruleS1 = rulesS1.stream()
.filter(rule -> {
PortCriterion inPort = (PortCriterion) rule.selector().getCriterion(IN_PORT);
return inPort.port().equals(d1p10.port());
})
.findFirst()
.get();
assertThat(ruleS1.selector(), Is.is(
DefaultTrafficSelector
.builder(ipPrefixSelector)
.matchVlanId(((VlanIdCriterion) vlan100Selector.getCriterion(VLAN_VID)).vlanId())
.matchInPort(d1p10.port())
.build()
));
assertThat(ruleS1.treatment(), Is.is(
DefaultTrafficTreatment
.builder()
.setEthDst(((ModEtherInstruction) ethDstTreatment
.allInstructions()
.stream()
.filter(instruction -> instruction instanceof ModEtherInstruction)
.findFirst().get()).mac())
.setVlanId(((VlanIdCriterion) vlan200Selector.getCriterion(VLAN_VID)).vlanId())
.setOutput(d1p0.port())
.build()
));
ruleS1 = rulesS1.stream()
.filter(rule -> {
PortCriterion inPort = (PortCriterion) rule.selector().getCriterion(IN_PORT);
return inPort.port().equals(d1p11.port());
})
.findFirst()
.get();
assertThat(ruleS1.selector(), Is.is(
DefaultTrafficSelector
.builder(ipPrefixSelector)
.matchMplsLabel(((MplsCriterion) mpls100Selector.getCriterion(MPLS_LABEL)).label())
.matchInPort(d1p11.port())
.build()
));
assertThat(ruleS1.treatment(), Is.is(
DefaultTrafficTreatment
.builder()
.setEthDst(((ModEtherInstruction) ethDstTreatment
.allInstructions()
.stream()
.filter(instruction -> instruction instanceof ModEtherInstruction)
.findFirst().get()).mac())
.popMpls(IPV4.ethType())
.pushVlan()
.setVlanId(((VlanIdCriterion) vlan200Selector.getCriterion(VLAN_VID)).vlanId())
.setOutput(d1p0.port())
.build()
));
sut.deactivate();
}
/**
* We test the proper compilation of sp2mp with
* selector, treatment and 1 hop.
*/
@Test
public void singleHopNonTrivialForSp() {
intent = LinkCollectionIntent.builder()
.appId(APP_ID)
.selector(ipPrefixSelector)
.treatment(ethDstTreatment)
.applyTreatmentOnEgress(true)
.links(ImmutableSet.of())
.filteredEgressPoints(ImmutableSet.of(
new FilteredConnectPoint(d1p10, vlan100Selector),
new FilteredConnectPoint(d1p11, mpls200Selector)
))
.filteredIngressPoints(ImmutableSet.of(new FilteredConnectPoint(d1p0, vlan200Selector)))
.build();
sut.activate();
List<Intent> compiled = sut.compile(intent, Collections.emptyList());
assertThat(compiled, hasSize(1));
Collection<FlowRule> rules = ((FlowRuleIntent) compiled.get(0)).flowRules();
assertThat(rules, hasSize(1));
Collection<FlowRule> rulesS1 = rules.stream()
.filter(rule -> rule.deviceId().equals(d1p0.deviceId()))
.collect(Collectors.toSet());
assertThat(rulesS1, hasSize(1));
FlowRule ruleS1 = rulesS1.stream()
.filter(rule -> {
PortCriterion inPort = (PortCriterion) rule.selector().getCriterion(IN_PORT);
return inPort.port().equals(d1p0.port());
})
.findFirst()
.get();
assertThat(ruleS1.selector(), Is.is(
DefaultTrafficSelector
.builder(ipPrefixSelector)
.matchVlanId(((VlanIdCriterion) vlan200Selector.getCriterion(VLAN_VID)).vlanId())
.matchInPort(d1p0.port())
.build()
));
assertThat(ruleS1.treatment(), Is.is(
DefaultTrafficTreatment
.builder()
.setEthDst(((ModEtherInstruction) ethDstTreatment
.allInstructions()
.stream()
.filter(instruction -> instruction instanceof ModEtherInstruction)
.findFirst().get()).mac())
.setVlanId(((VlanIdCriterion) vlan100Selector.getCriterion(VLAN_VID)).vlanId())
.setOutput(d1p10.port())
.setEthDst(((ModEtherInstruction) ethDstTreatment
.allInstructions()
.stream()
.filter(instruction -> instruction instanceof ModEtherInstruction)
.findFirst().get()).mac())
.popVlan()
.pushMpls()
.setMpls(((MplsCriterion) mpls200Selector.getCriterion(MPLS_LABEL)).label())
.setOutput(d1p11.port())
.build()
));
sut.deactivate();
}
}
|
package com.top.Ertebat.fragment;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.Future;
import android.app.Activity;
import android.app.Fragment;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.content.SharedPreferences.Editor;
import android.database.Cursor;
import android.graphics.Color;
import android.graphics.Rect;
import android.graphics.Typeface;
import android.graphics.drawable.Drawable;
import android.net.Uri;
import android.os.Bundle;
import android.os.Handler;
import android.os.Parcelable;
import android.app.DialogFragment;
import android.app.LoaderManager;
import android.app.LoaderManager.LoaderCallbacks;
import android.content.CursorLoader;
import android.content.Loader;
import android.support.v4.content.LocalBroadcastManager;
import android.widget.CursorAdapter;
import android.text.Spannable;
import android.text.SpannableStringBuilder;
import android.text.format.DateUtils;
import android.text.style.AbsoluteSizeSpan;
import android.text.style.ForegroundColorSpan;
import android.util.Log;
import android.util.TypedValue;
import android.view.ActionMode;
import android.view.ContextMenu;
import android.view.ContextMenu.ContextMenuInfo;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewGroup;
import android.view.Window;
import android.widget.AdapterView;
import android.widget.AdapterView.AdapterContextMenuInfo;
import android.widget.AdapterView.OnItemClickListener;
import android.widget.CheckBox;
import android.widget.ListView;
import android.widget.QuickContactBadge;
import android.widget.TextView;
import android.widget.Toast;
import com.top.Ertebat.Account;
import com.top.Ertebat.Account.SortType;
import com.top.Ertebat.FontSizes;
import com.top.Ertebat.Ertebat;
import com.top.Ertebat.Preferences;
import com.top.Ertebat.R;
import com.top.Ertebat.activity.ActivityListener;
import com.top.Ertebat.activity.ChooseFolder;
import com.top.Ertebat.activity.FolderInfoHolder;
import com.top.Ertebat.activity.MessageReference;
import com.top.Ertebat.activity.misc.ContactPictureLoader;
import com.top.Ertebat.cache.EmailProviderCache;
import com.top.Ertebat.controller.MessagingController;
import com.top.Ertebat.fragment.ConfirmationDialogFragment.ConfirmationDialogFragmentListener;
import com.top.Ertebat.helper.ContactPicture;
import com.top.Ertebat.helper.MergeCursorWithUniqueId;
import com.top.Ertebat.helper.MessageHelper;
import com.top.Ertebat.helper.StringUtils;
import com.top.Ertebat.helper.Utility;
import com.top.Ertebat.mail.Address;
import com.top.Ertebat.mail.Flag;
import com.top.Ertebat.mail.Folder;
import com.top.Ertebat.mail.Message;
import com.top.Ertebat.mail.MessagingException;
import com.top.Ertebat.mail.store.LocalStore;
import com.top.Ertebat.mail.store.LocalStore.LocalFolder;
import com.top.Ertebat.provider.EmailProvider;
import com.top.Ertebat.provider.EmailProvider.MessageColumns;
import com.top.Ertebat.provider.EmailProvider.SpecialColumns;
import com.top.Ertebat.provider.EmailProvider.ThreadColumns;
import com.top.Ertebat.search.ConditionsTreeNode;
import com.top.Ertebat.search.LocalSearch;
import com.top.Ertebat.search.SearchSpecification;
import com.top.Ertebat.search.SearchSpecification.SearchCondition;
import com.top.Ertebat.search.SearchSpecification.Searchfield;
import com.top.Ertebat.search.SqlQueryBuilder;
import com.handmark.pulltorefresh.library.ILoadingLayout;
import com.handmark.pulltorefresh.library.PullToRefreshBase;
import com.handmark.pulltorefresh.library.PullToRefreshListView;
public class MessageListFragment extends Fragment implements OnItemClickListener,
ConfirmationDialogFragmentListener, LoaderCallbacks<Cursor> {
private static final String[] THREADED_PROJECTION = {
MessageColumns.ID,
MessageColumns.UID,
MessageColumns.INTERNAL_DATE,
MessageColumns.SUBJECT,
MessageColumns.DATE,
MessageColumns.SENDER_LIST,
MessageColumns.TO_LIST,
MessageColumns.CC_LIST,
MessageColumns.READ,
MessageColumns.FLAGGED,
MessageColumns.ANSWERED,
MessageColumns.FORWARDED,
MessageColumns.ATTACHMENT_COUNT,
MessageColumns.FOLDER_ID,
MessageColumns.PREVIEW,
ThreadColumns.ROOT,
SpecialColumns.ACCOUNT_UUID,
SpecialColumns.FOLDER_NAME,
SpecialColumns.THREAD_COUNT,
};
private static final int ID_COLUMN = 0;
private static final int UID_COLUMN = 1;
private static final int INTERNAL_DATE_COLUMN = 2;
private static final int SUBJECT_COLUMN = 3;
private static final int DATE_COLUMN = 4;
private static final int SENDER_LIST_COLUMN = 5;
private static final int TO_LIST_COLUMN = 6;
private static final int CC_LIST_COLUMN = 7;
private static final int READ_COLUMN = 8;
private static final int FLAGGED_COLUMN = 9;
private static final int ANSWERED_COLUMN = 10;
private static final int FORWARDED_COLUMN = 11;
private static final int ATTACHMENT_COUNT_COLUMN = 12;
private static final int FOLDER_ID_COLUMN = 13;
private static final int PREVIEW_COLUMN = 14;
private static final int THREAD_ROOT_COLUMN = 15;
private static final int ACCOUNT_UUID_COLUMN = 16;
private static final int FOLDER_NAME_COLUMN = 17;
private static final int THREAD_COUNT_COLUMN = 18;
private static final String[] PROJECTION = Arrays.copyOf(THREADED_PROJECTION,
THREAD_COUNT_COLUMN);
public static MessageListFragment newInstance(LocalSearch search, boolean isThreadDisplay, boolean threadedList) {
MessageListFragment fragment = new MessageListFragment();
Bundle args = new Bundle();
args.putParcelable(ARG_SEARCH, search);
args.putBoolean(ARG_IS_THREAD_DISPLAY, isThreadDisplay);
args.putBoolean(ARG_THREADED_LIST, threadedList);
fragment.setArguments(args);
return fragment;
}
/**
* Reverses the result of a {@link Comparator}.
*
* @param <T>
*/
public static class ReverseComparator<T> implements Comparator<T> {
private Comparator<T> mDelegate;
/**
* @param delegate
* Never {@code null}.
*/
public ReverseComparator(final Comparator<T> delegate) {
mDelegate = delegate;
}
@Override
public int compare(final T object1, final T object2) {
// arg1 & 2 are mixed up, this is done on purpose
return mDelegate.compare(object2, object1);
}
}
/**
* Chains comparator to find a non-0 result.
*
* @param <T>
*/
public static class ComparatorChain<T> implements Comparator<T> {
private List<Comparator<T>> mChain;
/**
* @param chain
* Comparator chain. Never {@code null}.
*/
public ComparatorChain(final List<Comparator<T>> chain) {
mChain = chain;
}
@Override
public int compare(T object1, T object2) {
int result = 0;
for (final Comparator<T> comparator : mChain) {
result = comparator.compare(object1, object2);
if (result != 0) {
break;
}
}
return result;
}
}
public static class ReverseIdComparator implements Comparator<Cursor> {
private int mIdColumn = -1;
@Override
public int compare(Cursor cursor1, Cursor cursor2) {
if (mIdColumn == -1) {
mIdColumn = cursor1.getColumnIndex("_id");
}
long o1Id = cursor1.getLong(mIdColumn);
long o2Id = cursor2.getLong(mIdColumn);
return (o1Id > o2Id) ? -1 : 1;
}
}
public static class AttachmentComparator implements Comparator<Cursor> {
@Override
public int compare(Cursor cursor1, Cursor cursor2) {
int o1HasAttachment = (cursor1.getInt(ATTACHMENT_COUNT_COLUMN) > 0) ? 0 : 1;
int o2HasAttachment = (cursor2.getInt(ATTACHMENT_COUNT_COLUMN) > 0) ? 0 : 1;
return o1HasAttachment - o2HasAttachment;
}
}
public static class FlaggedComparator implements Comparator<Cursor> {
@Override
public int compare(Cursor cursor1, Cursor cursor2) {
int o1IsFlagged = (cursor1.getInt(FLAGGED_COLUMN) == 1) ? 0 : 1;
int o2IsFlagged = (cursor2.getInt(FLAGGED_COLUMN) == 1) ? 0 : 1;
return o1IsFlagged - o2IsFlagged;
}
}
public static class UnreadComparator implements Comparator<Cursor> {
@Override
public int compare(Cursor cursor1, Cursor cursor2) {
int o1IsUnread = cursor1.getInt(READ_COLUMN);
int o2IsUnread = cursor2.getInt(READ_COLUMN);
return o1IsUnread - o2IsUnread;
}
}
public static class DateComparator implements Comparator<Cursor> {
@Override
public int compare(Cursor cursor1, Cursor cursor2) {
long o1Date = cursor1.getLong(DATE_COLUMN);
long o2Date = cursor2.getLong(DATE_COLUMN);
if (o1Date < o2Date) {
return -1;
} else if (o1Date == o2Date) {
return 0;
} else {
return 1;
}
}
}
public static class ArrivalComparator implements Comparator<Cursor> {
@Override
public int compare(Cursor cursor1, Cursor cursor2) {
long o1Date = cursor1.getLong(INTERNAL_DATE_COLUMN);
long o2Date = cursor2.getLong(INTERNAL_DATE_COLUMN);
if (o1Date == o2Date) {
return 0;
} else if (o1Date < o2Date) {
return -1;
} else {
return 1;
}
}
}
public static class SubjectComparator implements Comparator<Cursor> {
@Override
public int compare(Cursor cursor1, Cursor cursor2) {
String subject1 = cursor1.getString(SUBJECT_COLUMN);
String subject2 = cursor2.getString(SUBJECT_COLUMN);
if (subject1 == null) {
return (subject2 == null) ? 0 : -1;
} else if (subject2 == null) {
return 1;
}
return subject1.compareToIgnoreCase(subject2);
}
}
public static class SenderComparator implements Comparator<Cursor> {
@Override
public int compare(Cursor cursor1, Cursor cursor2) {
String sender1 = getSenderAddressFromCursor(cursor1);
String sender2 = getSenderAddressFromCursor(cursor2);
if (sender1 == null && sender2 == null) {
return 0;
} else if (sender1 == null) {
return 1;
} else if (sender2 == null) {
return -1;
} else {
return sender1.compareToIgnoreCase(sender2);
}
}
}
private static final int ACTIVITY_CHOOSE_FOLDER_MOVE = 1;
private static final int ACTIVITY_CHOOSE_FOLDER_COPY = 2;
private static final String ARG_SEARCH = "searchObject";
private static final String ARG_THREADED_LIST = "threadedList";
private static final String ARG_IS_THREAD_DISPLAY = "isThreadedDisplay";
private static final String STATE_SELECTED_MESSAGES = "selectedMessages";
private static final String STATE_ACTIVE_MESSAGE = "activeMessage";
private static final String STATE_REMOTE_SEARCH_PERFORMED = "remoteSearchPerformed";
private static final String STATE_MESSAGE_LIST = "listState";
/**
* Maps a {@link SortType} to a {@link Comparator} implementation.
*/
private static final Map<SortType, Comparator<Cursor>> SORT_COMPARATORS;
static {
// fill the mapping at class time loading
final Map<SortType, Comparator<Cursor>> map =
new EnumMap<SortType, Comparator<Cursor>>(SortType.class);
map.put(SortType.SORT_ATTACHMENT, new AttachmentComparator());
map.put(SortType.SORT_DATE, new DateComparator());
map.put(SortType.SORT_ARRIVAL, new ArrivalComparator());
map.put(SortType.SORT_FLAGGED, new FlaggedComparator());
map.put(SortType.SORT_SUBJECT, new SubjectComparator());
map.put(SortType.SORT_SENDER, new SenderComparator());
map.put(SortType.SORT_UNREAD, new UnreadComparator());
// make it immutable to prevent accidental alteration (content is immutable already)
SORT_COMPARATORS = Collections.unmodifiableMap(map);
}
private ListView mListView;
private PullToRefreshListView mPullToRefreshView;
private Parcelable mSavedListState;
private int mPreviewLines = 0;
private MessageListAdapter mAdapter;
private View mFooterView;
private FolderInfoHolder mCurrentFolder;
private LayoutInflater mInflater;
private MessagingController mController;
private Account mAccount;
private String[] mAccountUuids;
private int mUnreadMessageCount = 0;
private Cursor[] mCursors;
private boolean[] mCursorValid;
private int mUniqueIdColumn;
/**
* Stores the name of the folder that we want to open as soon as possible after load.
*/
private String mFolderName;
private boolean mRemoteSearchPerformed = false;
private Future<?> mRemoteSearchFuture = null;
public List<Message> mExtraSearchResults;
private String mTitle;
private LocalSearch mSearch = null;
private boolean mSingleAccountMode;
private boolean mSingleFolderMode;
private boolean mAllAccounts;
private MessageListHandler mHandler = new MessageListHandler(this);
private SortType mSortType = SortType.SORT_DATE;
private boolean mSortAscending = true;
private boolean mSortDateAscending = false;
private boolean mSenderAboveSubject = false;
private boolean mCheckboxes = true;
private boolean mStars = true;
private int mSelectedCount = 0;
private Set<Long> mSelected = new HashSet<Long>();
private FontSizes mFontSizes = Ertebat.getFontSizes();
private ActionMode mActionMode;
private Boolean mHasConnectivity;
/**
* Relevant messages for the current context when we have to remember the chosen messages
* between user interactions (e.g. selecting a folder for move operation).
*/
private List<Message> mActiveMessages;
/* package visibility for faster inner class access */
MessageHelper mMessageHelper;
private ActionModeCallback mActionModeCallback = new ActionModeCallback();
private MessageListFragmentListener mFragmentListener;
private boolean mThreadedList;
private boolean mIsThreadDisplay;
private Context mContext;
private final ActivityListener mListener = new MessageListActivityListener();
private Preferences mPreferences;
private boolean mLoaderJustInitialized;
private MessageReference mActiveMessage;
/**
* {@code true} after {@link #onCreate(Bundle)} was executed. Used in {@link #updateTitle()} to
* make sure we don't access member variables before initialization is complete.
*/
private boolean mInitialized = false;
private ContactPictureLoader mContactsPictureLoader;
private LocalBroadcastManager mLocalBroadcastManager;
private BroadcastReceiver mCacheBroadcastReceiver;
private IntentFilter mCacheIntentFilter;
/**
* Stores the unique ID of the message the context menu was opened for.
*
* We have to save this because the message list might change between the time the menu was
* opened and when the user clicks on a menu item. When this happens the 'adapter position' that
* is accessible via the {@code ContextMenu} object might correspond to another list item and we
* would end up using/modifying the wrong message.
*
* The value of this field is {@code 0} when no context menu is currently open.
*/
private long mContextMenuUniqueId = 0;
/**
* This class is used to run operations that modify UI elements in the UI thread.
*
* <p>We are using convenience methods that add a {@link android.os.Message} instance or a
* {@link Runnable} to the message queue.</p>
*
* <p><strong>Note:</strong> If you add a method to this class make sure you don't accidentally
* perform the operation in the calling thread.</p>
*/
static class MessageListHandler extends Handler {
private static final int ACTION_FOLDER_LOADING = 1;
private static final int ACTION_REFRESH_TITLE = 2;
private static final int ACTION_PROGRESS = 3;
private static final int ACTION_REMOTE_SEARCH_FINISHED = 4;
private static final int ACTION_GO_BACK = 5;
private static final int ACTION_RESTORE_LIST_POSITION = 6;
private static final int ACTION_OPEN_MESSAGE = 7;
private WeakReference<MessageListFragment> mFragment;
public MessageListHandler(MessageListFragment fragment) {
mFragment = new WeakReference<MessageListFragment>(fragment);
}
public void folderLoading(String folder, boolean loading) {
android.os.Message msg = android.os.Message.obtain(this, ACTION_FOLDER_LOADING,
(loading) ? 1 : 0, 0, folder);
sendMessage(msg);
}
public void refreshTitle() {
android.os.Message msg = android.os.Message.obtain(this, ACTION_REFRESH_TITLE);
sendMessage(msg);
}
public void progress(final boolean progress) {
android.os.Message msg = android.os.Message.obtain(this, ACTION_PROGRESS,
(progress) ? 1 : 0, 0);
sendMessage(msg);
}
public void remoteSearchFinished() {
android.os.Message msg = android.os.Message.obtain(this, ACTION_REMOTE_SEARCH_FINISHED);
sendMessage(msg);
}
public void updateFooter(final String message) {
post(new Runnable() {
@Override
public void run() {
MessageListFragment fragment = mFragment.get();
if (fragment != null) {
fragment.updateFooter(message);
}
}
});
}
public void goBack() {
android.os.Message msg = android.os.Message.obtain(this, ACTION_GO_BACK);
sendMessage(msg);
}
public void restoreListPosition() {
MessageListFragment fragment = mFragment.get();
if (fragment != null) {
android.os.Message msg = android.os.Message.obtain(this, ACTION_RESTORE_LIST_POSITION,
fragment.mSavedListState);
fragment.mSavedListState = null;
sendMessage(msg);
}
}
public void openMessage(MessageReference messageReference) {
android.os.Message msg = android.os.Message.obtain(this, ACTION_OPEN_MESSAGE,
messageReference);
sendMessage(msg);
}
@Override
public void handleMessage(android.os.Message msg) {
MessageListFragment fragment = mFragment.get();
if (fragment == null) {
return;
}
// The following messages don't need an attached activity.
switch (msg.what) {
case ACTION_REMOTE_SEARCH_FINISHED: {
fragment.remoteSearchFinished();
return;
}
}
// Discard messages if the fragment isn't attached to an activity anymore.
Activity activity = fragment.getActivity();
if (activity == null) {
return;
}
switch (msg.what) {
case ACTION_FOLDER_LOADING: {
String folder = (String) msg.obj;
boolean loading = (msg.arg1 == 1);
fragment.folderLoading(folder, loading);
break;
}
case ACTION_REFRESH_TITLE: {
fragment.updateTitle();
break;
}
case ACTION_PROGRESS: {
boolean progress = (msg.arg1 == 1);
fragment.progress(progress);
break;
}
case ACTION_GO_BACK: {
fragment.mFragmentListener.goBack();
break;
}
case ACTION_RESTORE_LIST_POSITION: {
fragment.mListView.onRestoreInstanceState((Parcelable) msg.obj);
break;
}
case ACTION_OPEN_MESSAGE: {
MessageReference messageReference = (MessageReference) msg.obj;
fragment.mFragmentListener.openMessage(messageReference);
break;
}
}
}
}
/**
* @return The comparator to use to display messages in an ordered
* fashion. Never {@code null}.
*/
protected Comparator<Cursor> getComparator() {
final List<Comparator<Cursor>> chain =
new ArrayList<Comparator<Cursor>>(3 /* we add 3 comparators at most */);
// Add the specified comparator
final Comparator<Cursor> comparator = SORT_COMPARATORS.get(mSortType);
if (mSortAscending) {
chain.add(comparator);
} else {
chain.add(new ReverseComparator<Cursor>(comparator));
}
// Add the date comparator if not already specified
if (mSortType != SortType.SORT_DATE && mSortType != SortType.SORT_ARRIVAL) {
final Comparator<Cursor> dateComparator = SORT_COMPARATORS.get(SortType.SORT_DATE);
if (mSortDateAscending) {
chain.add(dateComparator);
} else {
chain.add(new ReverseComparator<Cursor>(dateComparator));
}
}
// Add the id comparator
chain.add(new ReverseIdComparator());
// Build the comparator chain
return new ComparatorChain<Cursor>(chain);
}
private void folderLoading(String folder, boolean loading) {
if (mCurrentFolder != null && mCurrentFolder.name.equals(folder)) {
mCurrentFolder.loading = loading;
}
updateFooterView();
}
public void updateTitle() {
if (!mInitialized) {
return;
}
setWindowTitle();
if (!mSearch.isManualSearch()) {
setWindowProgress();
}
}
private void setWindowProgress() {
int level = Window.PROGRESS_END;
if (mCurrentFolder != null && mCurrentFolder.loading && mListener.getFolderTotal() > 0) {
int divisor = mListener.getFolderTotal();
if (divisor != 0) {
level = (Window.PROGRESS_END / divisor) * (mListener.getFolderCompleted()) ;
if (level > Window.PROGRESS_END) {
level = Window.PROGRESS_END;
}
}
}
mFragmentListener.setMessageListProgress(level);
}
private void setWindowTitle() {
// regular folder content display
if (!isManualSearch() && mSingleFolderMode) {
Activity activity = getActivity();
String displayName = FolderInfoHolder.getDisplayName(activity, mAccount,
mFolderName);
mFragmentListener.setMessageListTitle(displayName);
String operation = mListener.getOperation(activity);
if (operation.length() < 1) {
mFragmentListener.setMessageListSubTitle(mAccount.getEmail());
} else {
mFragmentListener.setMessageListSubTitle(operation);
}
} else {
// query result display. This may be for a search folder as opposed to a user-initiated search.
if (mTitle != null) {
// This was a search folder; the search folder has overridden our title.
mFragmentListener.setMessageListTitle(mTitle);
} else {
// This is a search result; set it to the default search result line.
mFragmentListener.setMessageListTitle(getString(R.string.search_results));
}
mFragmentListener.setMessageListSubTitle(null);
}
// set unread count
if (mUnreadMessageCount <= 0) {
mFragmentListener.setUnreadCount(0);
} else {
if (!mSingleFolderMode && mTitle == null) {
// The unread message count is easily confused
// with total number of messages in the search result, so let's hide it.
mFragmentListener.setUnreadCount(0);
} else {
mFragmentListener.setUnreadCount(mUnreadMessageCount);
}
}
}
private void progress(final boolean progress) {
mFragmentListener.enableActionBarProgress(progress);
if (mPullToRefreshView != null && !progress) {
mPullToRefreshView.onRefreshComplete();
}
}
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
if (view == mFooterView) {
if (mCurrentFolder != null && !mSearch.isManualSearch()) {
mController.loadMoreMessages(mAccount, mFolderName, null);
} else if (mCurrentFolder != null && isRemoteSearch() &&
mExtraSearchResults != null && mExtraSearchResults.size() > 0) {
int numResults = mExtraSearchResults.size();
int limit = mAccount.getRemoteSearchNumResults();
List<Message> toProcess = mExtraSearchResults;
if (limit > 0 && numResults > limit) {
toProcess = toProcess.subList(0, limit);
mExtraSearchResults = mExtraSearchResults.subList(limit,
mExtraSearchResults.size());
} else {
mExtraSearchResults = null;
updateFooter("");
}
mController.loadSearchResults(mAccount, mCurrentFolder.name, toProcess, mListener);
}
return;
}
Cursor cursor = (Cursor) parent.getItemAtPosition(position);
if (cursor == null) {
return;
}
if (mSelectedCount > 0) {
toggleMessageSelect(position);
} else {
if (mThreadedList && cursor.getInt(THREAD_COUNT_COLUMN) > 1) {
Account account = getAccountFromCursor(cursor);
long folderId = cursor.getLong(FOLDER_ID_COLUMN);
String folderName = getFolderNameById(account, folderId);
// If threading is enabled and this item represents a thread, display the thread contents.
long rootId = cursor.getLong(THREAD_ROOT_COLUMN);
mFragmentListener.showThread(account, folderName, rootId);
} else {
// This item represents a message; just display the message.
openMessageAtPosition(listViewToAdapterPosition(position));
}
}
}
@Override
public void onAttach(Activity activity) {
super.onAttach(activity);
mContext = activity.getApplicationContext();
try {
mFragmentListener = (MessageListFragmentListener) activity;
} catch (ClassCastException e) {
throw new ClassCastException(activity.getClass() +
" must implement MessageListFragmentListener");
}
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
Context appContext = getActivity().getApplicationContext();
mPreferences = Preferences.getPreferences(appContext);
mController = MessagingController.getInstance(getActivity().getApplication());
mPreviewLines = Ertebat.messageListPreviewLines();
mCheckboxes = Ertebat.messageListCheckboxes();
mStars = Ertebat.messageListStars();
if (Ertebat.showContactPicture()) {
mContactsPictureLoader = ContactPicture.getContactPictureLoader(getActivity());
}
restoreInstanceState(savedInstanceState);
decodeArguments();
createCacheBroadcastReceiver(appContext);
mInitialized = true;
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
mInflater = inflater;
View view = inflater.inflate(R.layout.message_list_fragment, container, false);
initializePullToRefresh(inflater, view);
initializeLayout();
mListView.setVerticalFadingEdgeEnabled(false);
return view;
}
@Override
public void onDestroyView() {
mSavedListState = mListView.onSaveInstanceState();
super.onDestroyView();
}
@Override
public void onActivityCreated(Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
mMessageHelper = MessageHelper.getInstance(getActivity());
initializeMessageList();
// This needs to be done before initializing the cursor loader below
initializeSortSettings();
mLoaderJustInitialized = true;
LoaderManager loaderManager = getLoaderManager();
int len = mAccountUuids.length;
mCursors = new Cursor[len];
mCursorValid = new boolean[len];
for (int i = 0; i < len; i++) {
loaderManager.initLoader(i, null, this);
mCursorValid[i] = false;
}
}
@Override
public void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
saveSelectedMessages(outState);
saveListState(outState);
outState.putBoolean(STATE_REMOTE_SEARCH_PERFORMED, mRemoteSearchPerformed);
outState.putParcelable(STATE_ACTIVE_MESSAGE, mActiveMessage);
}
/**
* Restore the state of a previous {@link MessageListFragment} instance.
*
* @see #onSaveInstanceState(Bundle)
*/
private void restoreInstanceState(Bundle savedInstanceState) {
if (savedInstanceState == null) {
return;
}
restoreSelectedMessages(savedInstanceState);
mRemoteSearchPerformed = savedInstanceState.getBoolean(STATE_REMOTE_SEARCH_PERFORMED);
mSavedListState = savedInstanceState.getParcelable(STATE_MESSAGE_LIST);
mActiveMessage = savedInstanceState.getParcelable(STATE_ACTIVE_MESSAGE);
}
/**
* Write the unique IDs of selected messages to a {@link Bundle}.
*/
private void saveSelectedMessages(Bundle outState) {
long[] selected = new long[mSelected.size()];
int i = 0;
for (Long id : mSelected) {
selected[i++] = id;
}
outState.putLongArray(STATE_SELECTED_MESSAGES, selected);
}
/**
* Restore selected messages from a {@link Bundle}.
*/
private void restoreSelectedMessages(Bundle savedInstanceState) {
long[] selected = savedInstanceState.getLongArray(STATE_SELECTED_MESSAGES);
for (long id : selected) {
mSelected.add(Long.valueOf(id));
}
}
private void saveListState(Bundle outState) {
if (mSavedListState != null) {
// The previously saved state was never restored, so just use that.
outState.putParcelable(STATE_MESSAGE_LIST, mSavedListState);
} else if (mListView != null) {
outState.putParcelable(STATE_MESSAGE_LIST, mListView.onSaveInstanceState());
}
}
private void initializeSortSettings() {
if (mSingleAccountMode) {
mSortType = mAccount.getSortType();
mSortAscending = mAccount.isSortAscending(mSortType);
mSortDateAscending = mAccount.isSortAscending(SortType.SORT_DATE);
} else {
mSortType = Ertebat.getSortType();
mSortAscending = Ertebat.isSortAscending(mSortType);
mSortDateAscending = Ertebat.isSortAscending(SortType.SORT_DATE);
}
}
private void decodeArguments() {
Bundle args = getArguments();
mThreadedList = args.getBoolean(ARG_THREADED_LIST, false);
mIsThreadDisplay = args.getBoolean(ARG_IS_THREAD_DISPLAY, false);
mSearch = args.getParcelable(ARG_SEARCH);
mTitle = mSearch.getName();
String[] accountUuids = mSearch.getAccountUuids();
mSingleAccountMode = false;
if (accountUuids.length == 1 && !mSearch.searchAllAccounts()) {
mSingleAccountMode = true;
mAccount = mPreferences.getAccount(accountUuids[0]);
}
mSingleFolderMode = false;
if (mSingleAccountMode && (mSearch.getFolderNames().size() == 1)) {
mSingleFolderMode = true;
mFolderName = mSearch.getFolderNames().get(0);
mCurrentFolder = getFolder(mFolderName, mAccount);
}
mAllAccounts = false;
if (mSingleAccountMode) {
mAccountUuids = new String[] { mAccount.getUuid() };
} else {
if (accountUuids.length == 1 &&
accountUuids[0].equals(SearchSpecification.ALL_ACCOUNTS)) {
mAllAccounts = true;
Account[] accounts = mPreferences.getAccounts();
mAccountUuids = new String[accounts.length];
for (int i = 0, len = accounts.length; i < len; i++) {
mAccountUuids[i] = accounts[i].getUuid();
}
if (mAccountUuids.length == 1) {
mSingleAccountMode = true;
mAccount = accounts[0];
}
} else {
mAccountUuids = accountUuids;
}
}
}
private void initializeMessageList() {
mAdapter = new MessageListAdapter();
if (mFolderName != null) {
mCurrentFolder = getFolder(mFolderName, mAccount);
}
if (mSingleFolderMode) {
mListView.addFooterView(getFooterView(mListView));
updateFooterView();
}
mListView.setAdapter(mAdapter);
}
private void createCacheBroadcastReceiver(Context appContext) {
mLocalBroadcastManager = LocalBroadcastManager.getInstance(appContext);
mCacheBroadcastReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
mAdapter.notifyDataSetChanged();
}
};
mCacheIntentFilter = new IntentFilter(EmailProviderCache.ACTION_CACHE_UPDATED);
}
private FolderInfoHolder getFolder(String folder, Account account) {
LocalFolder localFolder = null;
try {
LocalStore localStore = account.getLocalStore();
localFolder = localStore.getFolder(folder);
return new FolderInfoHolder(mContext, localFolder, account);
} catch (Exception e) {
Log.e(Ertebat.LOG_TAG, "getFolder(" + folder + ") goes boom: ", e);
return null;
} finally {
if (localFolder != null) {
localFolder.close();
}
}
}
private String getFolderNameById(Account account, long folderId) {
try {
Folder folder = getFolderById(account, folderId);
if (folder != null) {
return folder.getName();
}
} catch (Exception e) {
Log.e(Ertebat.LOG_TAG, "getFolderNameById() failed.", e);
}
return null;
}
private Folder getFolderById(Account account, long folderId) {
try {
LocalStore localStore = account.getLocalStore();
LocalFolder localFolder = localStore.getFolderById(folderId);
localFolder.open(Folder.OPEN_MODE_RO);
return localFolder;
} catch (Exception e) {
Log.e(Ertebat.LOG_TAG, "getFolderNameById() failed.", e);
return null;
}
}
@Override
public void onPause() {
super.onPause();
mLocalBroadcastManager.unregisterReceiver(mCacheBroadcastReceiver);
mListener.onPause(getActivity());
mController.removeListener(mListener);
}
/**
* On resume we refresh messages for the folder that is currently open.
* This guarantees that things like unread message count and read status
* are updated.
*/
@Override
public void onResume() {
super.onResume();
Context appContext = getActivity().getApplicationContext();
mSenderAboveSubject = Ertebat.messageListSenderAboveSubject();
if (!mLoaderJustInitialized) {
restartLoader();
} else {
mLoaderJustInitialized = false;
}
// Check if we have connectivity. Cache the value.
if (mHasConnectivity == null) {
mHasConnectivity = Utility.hasConnectivity(getActivity().getApplication());
}
mLocalBroadcastManager.registerReceiver(mCacheBroadcastReceiver, mCacheIntentFilter);
mListener.onResume(getActivity());
mController.addListener(mListener);
//Cancel pending new mail notifications when we open an account
Account[] accountsWithNotification;
Account account = mAccount;
if (account != null) {
accountsWithNotification = new Account[] { account };
} else {
accountsWithNotification = mPreferences.getAccounts();
}
for (Account accountWithNotification : accountsWithNotification) {
mController.notifyAccountCancel(appContext, accountWithNotification);
}
if (mAccount != null && mFolderName != null && !mSearch.isManualSearch()) {
mController.getFolderUnreadMessageCount(mAccount, mFolderName, mListener);
}
updateTitle();
}
private void restartLoader() {
if (mCursorValid == null) {
return;
}
// Refresh the message list
LoaderManager loaderManager = getLoaderManager();
for (int i = 0; i < mAccountUuids.length; i++) {
loaderManager.restartLoader(i, null, this);
mCursorValid[i] = false;
}
}
private void initializePullToRefresh(LayoutInflater inflater, View layout) {
mPullToRefreshView = (PullToRefreshListView) layout.findViewById(R.id.message_list);
// Set empty view
View loadingView = inflater.inflate(R.layout.message_list_loading, null);
mPullToRefreshView.setEmptyView(loadingView);
if (isRemoteSearchAllowed()) {
// "Pull to search server"
mPullToRefreshView.setOnRefreshListener(
new PullToRefreshBase.OnRefreshListener<ListView>() {
@Override
public void onRefresh(PullToRefreshBase<ListView> refreshView) {
mPullToRefreshView.onRefreshComplete();
onRemoteSearchRequested();
}
});
ILoadingLayout proxy = mPullToRefreshView.getLoadingLayoutProxy();
proxy.setPullLabel(getString(
R.string.pull_to_refresh_remote_search_from_local_search_pull));
proxy.setReleaseLabel(getString(
R.string.pull_to_refresh_remote_search_from_local_search_release));
} else if (isCheckMailSupported()) {
// "Pull to refresh"
mPullToRefreshView.setOnRefreshListener(
new PullToRefreshBase.OnRefreshListener<ListView>() {
@Override
public void onRefresh(PullToRefreshBase<ListView> refreshView) {
checkMail();
}
});
}
// Disable pull-to-refresh until the message list has been loaded
setPullToRefreshEnabled(false);
}
/**
* Enable or disable pull-to-refresh.
*
* @param enable
* {@code true} to enable. {@code false} to disable.
*/
private void setPullToRefreshEnabled(boolean enable) {
mPullToRefreshView.setMode((enable) ?
PullToRefreshBase.Mode.PULL_FROM_START : PullToRefreshBase.Mode.DISABLED);
}
private void initializeLayout() {
mListView = mPullToRefreshView.getRefreshableView();
mListView.setScrollBarStyle(View.SCROLLBARS_INSIDE_OVERLAY);
mListView.setLongClickable(true);
mListView.setFastScrollEnabled(true);
mListView.setScrollingCacheEnabled(false);
mListView.setOnItemClickListener(this);
registerForContextMenu(mListView);
}
public void onCompose() {
if (!mSingleAccountMode) {
/*
* If we have a query string, we don't have an account to let
* compose start the default action.
*/
mFragmentListener.onCompose(null);
} else {
mFragmentListener.onCompose(mAccount);
}
}
public void onReply(Message message) {
mFragmentListener.onReply(message);
}
public void onReplyAll(Message message) {
mFragmentListener.onReplyAll(message);
}
public void onForward(Message message) {
mFragmentListener.onForward(message);
}
public void onResendMessage(Message message) {
mFragmentListener.onResendMessage(message);
}
public void changeSort(SortType sortType) {
Boolean sortAscending = (mSortType == sortType) ? !mSortAscending : null;
changeSort(sortType, sortAscending);
}
/**
* User has requested a remote search. Setup the bundle and start the intent.
*/
public void onRemoteSearchRequested() {
String searchAccount;
String searchFolder;
searchAccount = mAccount.getUuid();
searchFolder = mCurrentFolder.name;
String queryString = mSearch.getRemoteSearchArguments();
mRemoteSearchPerformed = true;
mRemoteSearchFuture = mController.searchRemoteMessages(searchAccount, searchFolder,
queryString, null, null, mListener);
setPullToRefreshEnabled(false);
mFragmentListener.remoteSearchStarted();
}
/**
* Change the sort type and sort order used for the message list.
*
* @param sortType
* Specifies which field to use for sorting the message list.
* @param sortAscending
* Specifies the sort order. If this argument is {@code null} the default search order
* for the sort type is used.
*/
// FIXME: Don't save the changes in the UI thread
private void changeSort(SortType sortType, Boolean sortAscending) {
mSortType = sortType;
Account account = mAccount;
if (account != null) {
account.setSortType(mSortType);
if (sortAscending == null) {
mSortAscending = account.isSortAscending(mSortType);
} else {
mSortAscending = sortAscending;
}
account.setSortAscending(mSortType, mSortAscending);
mSortDateAscending = account.isSortAscending(SortType.SORT_DATE);
account.save(mPreferences);
} else {
Ertebat.setSortType(mSortType);
if (sortAscending == null) {
mSortAscending = Ertebat.isSortAscending(mSortType);
} else {
mSortAscending = sortAscending;
}
Ertebat.setSortAscending(mSortType, mSortAscending);
mSortDateAscending = Ertebat.isSortAscending(SortType.SORT_DATE);
Editor editor = mPreferences.getPreferences().edit();
Ertebat.save(editor);
editor.commit();
}
reSort();
}
private void reSort() {
int toastString = mSortType.getToast(mSortAscending);
Toast toast = Toast.makeText(getActivity(), toastString, Toast.LENGTH_SHORT);
toast.show();
LoaderManager loaderManager = getLoaderManager();
for (int i = 0, len = mAccountUuids.length; i < len; i++) {
loaderManager.restartLoader(i, null, this);
}
}
public void onCycleSort() {
SortType[] sorts = SortType.values();
int curIndex = 0;
for (int i = 0; i < sorts.length; i++) {
if (sorts[i] == mSortType) {
curIndex = i;
break;
}
}
curIndex++;
if (curIndex == sorts.length) {
curIndex = 0;
}
changeSort(sorts[curIndex]);
}
private void onDelete(Message message) {
onDelete(Collections.singletonList(message));
}
private void onDelete(List<Message> messages) {
if (Ertebat.confirmDelete()) {
// remember the message selection for #onCreateDialog(int)
mActiveMessages = messages;
showDialog(R.id.dialog_confirm_delete);
} else {
onDeleteConfirmed(messages);
}
}
private void onDeleteConfirmed(List<Message> messages) {
if (mThreadedList) {
mController.deleteThreads(messages);
} else {
mController.deleteMessages(messages, null);
}
}
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
if (resultCode != Activity.RESULT_OK) {
return;
}
switch (requestCode) {
case ACTIVITY_CHOOSE_FOLDER_MOVE:
case ACTIVITY_CHOOSE_FOLDER_COPY: {
if (data == null) {
return;
}
final String destFolderName = data.getStringExtra(ChooseFolder.EXTRA_NEW_FOLDER);
final List<Message> messages = mActiveMessages;
if (destFolderName != null) {
mActiveMessages = null; // don't need it any more
if (messages.size() > 0) {
Account account = messages.get(0).getFolder().getAccount();
account.setLastSelectedFolderName(destFolderName);
}
switch (requestCode) {
case ACTIVITY_CHOOSE_FOLDER_MOVE:
move(messages, destFolderName);
break;
case ACTIVITY_CHOOSE_FOLDER_COPY:
copy(messages, destFolderName);
break;
}
}
break;
}
}
}
public void onExpunge() {
if (mCurrentFolder != null) {
onExpunge(mAccount, mCurrentFolder.name);
}
}
private void onExpunge(final Account account, String folderName) {
mController.expunge(account, folderName, null);
}
private void showDialog(int dialogId) {
DialogFragment fragment;
switch (dialogId) {
case R.id.dialog_confirm_spam: {
String title = getString(R.string.dialog_confirm_spam_title);
int selectionSize = mActiveMessages.size();
String message = getResources().getQuantityString(
R.plurals.dialog_confirm_spam_message, selectionSize,
Integer.valueOf(selectionSize));
String confirmText = getString(R.string.dialog_confirm_spam_confirm_button);
String cancelText = getString(R.string.dialog_confirm_spam_cancel_button);
fragment = ConfirmationDialogFragment.newInstance(dialogId, title, message,
confirmText, cancelText);
break;
}
case R.id.dialog_confirm_delete: {
String title = getString(R.string.dialog_confirm_delete_title);
int selectionSize = mActiveMessages.size();
String message = getResources().getQuantityString(
R.plurals.dialog_confirm_delete_messages, selectionSize,
Integer.valueOf(selectionSize));
String confirmText = getString(R.string.dialog_confirm_delete_confirm_button);
String cancelText = getString(R.string.dialog_confirm_delete_cancel_button);
fragment = ConfirmationDialogFragment.newInstance(dialogId, title, message,
confirmText, cancelText);
break;
}
default: {
throw new RuntimeException("Called showDialog(int) with unknown dialog id.");
}
}
fragment.setTargetFragment(this, dialogId);
fragment.show(getFragmentManager(), getDialogTag(dialogId));
}
private String getDialogTag(int dialogId) {
return "dialog-" + dialogId;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int itemId = item.getItemId();
switch (itemId) {
case R.id.set_sort_date: {
changeSort(SortType.SORT_DATE);
return true;
}
case R.id.set_sort_arrival: {
changeSort(SortType.SORT_ARRIVAL);
return true;
}
case R.id.set_sort_subject: {
changeSort(SortType.SORT_SUBJECT);
return true;
}
case R.id.set_sort_sender: {
changeSort(SortType.SORT_SENDER);
return true;
}
case R.id.set_sort_flag: {
changeSort(SortType.SORT_FLAGGED);
return true;
}
case R.id.set_sort_unread: {
changeSort(SortType.SORT_UNREAD);
return true;
}
case R.id.set_sort_attach: {
changeSort(SortType.SORT_ATTACHMENT);
return true;
}
case R.id.select_all: {
selectAll();
return true;
}
}
if (!mSingleAccountMode) {
// None of the options after this point are "safe" for search results
//TODO: This is not true for "unread" and "starred" searches in regular folders
return false;
}
switch (itemId) {
case R.id.send_messages: {
onSendPendingMessages();
return true;
}
case R.id.expunge: {
if (mCurrentFolder != null) {
onExpunge(mAccount, mCurrentFolder.name);
}
return true;
}
default: {
return super.onOptionsItemSelected(item);
}
}
}
public void onSendPendingMessages() {
mController.sendPendingMessages(mAccount, null);
}
@Override
public boolean onContextItemSelected(android.view.MenuItem item) {
if (mContextMenuUniqueId == 0) {
return false;
}
int adapterPosition = getPositionForUniqueId(mContextMenuUniqueId);
if (adapterPosition == AdapterView.INVALID_POSITION) {
return false;
}
switch (item.getItemId()) {
case R.id.deselect:
case R.id.select: {
toggleMessageSelectWithAdapterPosition(adapterPosition);
break;
}
case R.id.reply: {
Message message = getMessageAtPosition(adapterPosition);
onReply(message);
break;
}
case R.id.reply_all: {
Message message = getMessageAtPosition(adapterPosition);
onReplyAll(message);
break;
}
case R.id.forward: {
Message message = getMessageAtPosition(adapterPosition);
onForward(message);
break;
}
case R.id.send_again: {
Message message = getMessageAtPosition(adapterPosition);
onResendMessage(message);
mSelectedCount = 0;
break;
}
case R.id.same_sender: {
Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition);
String senderAddress = getSenderAddressFromCursor(cursor);
if (senderAddress != null) {
mFragmentListener.showMoreFromSameSender(senderAddress);
}
break;
}
case R.id.delete: {
Message message = getMessageAtPosition(adapterPosition);
onDelete(message);
break;
}
case R.id.mark_as_read: {
setFlag(adapterPosition, Flag.SEEN, true);
break;
}
case R.id.mark_as_unread: {
setFlag(adapterPosition, Flag.SEEN, false);
break;
}
case R.id.flag: {
setFlag(adapterPosition, Flag.FLAGGED, true);
break;
}
case R.id.unflag: {
setFlag(adapterPosition, Flag.FLAGGED, false);
break;
}
// only if the account supports this
case R.id.archive: {
Message message = getMessageAtPosition(adapterPosition);
onArchive(message);
break;
}
case R.id.spam: {
Message message = getMessageAtPosition(adapterPosition);
onSpam(message);
break;
}
case R.id.move: {
Message message = getMessageAtPosition(adapterPosition);
onMove(message);
break;
}
case R.id.copy: {
Message message = getMessageAtPosition(adapterPosition);
onCopy(message);
break;
}
}
mContextMenuUniqueId = 0;
return true;
}
private static String getSenderAddressFromCursor(Cursor cursor) {
String fromList = cursor.getString(SENDER_LIST_COLUMN);
Address[] fromAddrs = Address.unpack(fromList);
return (fromAddrs.length > 0) ? fromAddrs[0].getAddress() : null;
}
@Override
public void onCreateContextMenu(ContextMenu menu, View v, ContextMenuInfo menuInfo) {
super.onCreateContextMenu(menu, v, menuInfo);
AdapterContextMenuInfo info = (AdapterContextMenuInfo) menuInfo;
Cursor cursor = (Cursor) mListView.getItemAtPosition(info.position);
if (cursor == null) {
return;
}
getActivity().getMenuInflater().inflate(R.menu.message_list_item_context, menu);
mContextMenuUniqueId = cursor.getLong(mUniqueIdColumn);
Account account = getAccountFromCursor(cursor);
String subject = cursor.getString(SUBJECT_COLUMN);
boolean read = (cursor.getInt(READ_COLUMN) == 1);
boolean flagged = (cursor.getInt(FLAGGED_COLUMN) == 1);
menu.setHeaderTitle(subject);
if( mSelected.contains(mContextMenuUniqueId)) {
menu.findItem(R.id.select).setVisible(false);
} else {
menu.findItem(R.id.deselect).setVisible(false);
}
if (read) {
menu.findItem(R.id.mark_as_read).setVisible(false);
} else {
menu.findItem(R.id.mark_as_unread).setVisible(false);
}
if (flagged) {
menu.findItem(R.id.flag).setVisible(false);
} else {
menu.findItem(R.id.unflag).setVisible(false);
}
if (!mController.isCopyCapable(account)) {
menu.findItem(R.id.copy).setVisible(false);
}
if (!mController.isMoveCapable(account)) {
menu.findItem(R.id.move).setVisible(false);
menu.findItem(R.id.archive).setVisible(false);
menu.findItem(R.id.spam).setVisible(false);
}
if (!account.hasArchiveFolder()) {
menu.findItem(R.id.archive).setVisible(false);
}
if (!account.hasSpamFolder()) {
menu.findItem(R.id.spam).setVisible(false);
}
}
public void onSwipeRightToLeft(final MotionEvent e1, final MotionEvent e2) {
// Handle right-to-left as an un-select
handleSwipe(e1, false);
}
public void onSwipeLeftToRight(final MotionEvent e1, final MotionEvent e2) {
// Handle left-to-right as a select.
handleSwipe(e1, true);
}
/**
* Handle a select or unselect swipe event.
*
* @param downMotion
* Event that started the swipe
* @param selected
* {@code true} if this was an attempt to select (i.e. left to right).
*/
private void handleSwipe(final MotionEvent downMotion, final boolean selected) {
int x = (int) downMotion.getRawX();
int y = (int) downMotion.getRawY();
Rect headerRect = new Rect();
mListView.getGlobalVisibleRect(headerRect);
// Only handle swipes in the visible area of the message list
if (headerRect.contains(x, y)) {
int[] listPosition = new int[2];
mListView.getLocationOnScreen(listPosition);
int listX = x - listPosition[0];
int listY = y - listPosition[1];
int listViewPosition = mListView.pointToPosition(listX, listY);
toggleMessageSelect(listViewPosition);
}
}
private int listViewToAdapterPosition(int position) {
if (position > 0 && position <= mAdapter.getCount()) {
return position - 1;
}
return AdapterView.INVALID_POSITION;
}
private int adapterToListViewPosition(int position) {
if (position >= 0 && position < mAdapter.getCount()) {
return position + 1;
}
return AdapterView.INVALID_POSITION;
}
class MessageListActivityListener extends ActivityListener {
@Override
public void remoteSearchFailed(Account acct, String folder, final String err) {
mHandler.post(new Runnable() {
@Override
public void run() {
Activity activity = getActivity();
if (activity != null) {
Toast.makeText(activity, R.string.remote_search_error,
Toast.LENGTH_LONG).show();
}
}
});
}
@Override
public void remoteSearchStarted(Account acct, String folder) {
mHandler.progress(true);
mHandler.updateFooter(mContext.getString(R.string.remote_search_sending_query));
}
@Override
public void enableProgressIndicator(boolean enable) {
mHandler.progress(enable);
}
@Override
public void remoteSearchFinished(Account acct, String folder, int numResults, List<Message> extraResults) {
mHandler.progress(false);
mHandler.remoteSearchFinished();
mExtraSearchResults = extraResults;
if (extraResults != null && extraResults.size() > 0) {
mHandler.updateFooter(String.format(mContext.getString(R.string.load_more_messages_fmt), acct.getRemoteSearchNumResults()));
} else {
mHandler.updateFooter("");
}
mFragmentListener.setMessageListProgress(Window.PROGRESS_END);
}
@Override
public void remoteSearchServerQueryComplete(Account account, String folderName, int numResults) {
mHandler.progress(true);
if (account != null && account.getRemoteSearchNumResults() != 0 && numResults > account.getRemoteSearchNumResults()) {
mHandler.updateFooter(mContext.getString(R.string.remote_search_downloading_limited,
account.getRemoteSearchNumResults(), numResults));
} else {
mHandler.updateFooter(mContext.getString(R.string.remote_search_downloading, numResults));
}
mFragmentListener.setMessageListProgress(Window.PROGRESS_START);
}
@Override
public void informUserOfStatus() {
mHandler.refreshTitle();
}
@Override
public void synchronizeMailboxStarted(Account account, String folder) {
if (updateForMe(account, folder)) {
mHandler.progress(true);
mHandler.folderLoading(folder, true);
}
super.synchronizeMailboxStarted(account, folder);
}
@Override
public void synchronizeMailboxFinished(Account account, String folder,
int totalMessagesInMailbox, int numNewMessages) {
if (updateForMe(account, folder)) {
mHandler.progress(false);
mHandler.folderLoading(folder, false);
}
super.synchronizeMailboxFinished(account, folder, totalMessagesInMailbox, numNewMessages);
}
@Override
public void synchronizeMailboxFailed(Account account, String folder, String message) {
if (updateForMe(account, folder)) {
mHandler.progress(false);
mHandler.folderLoading(folder, false);
}
super.synchronizeMailboxFailed(account, folder, message);
}
@Override
public void folderStatusChanged(Account account, String folder, int unreadMessageCount) {
if (isSingleAccountMode() && isSingleFolderMode() && mAccount.equals(account) &&
mFolderName.equals(folder)) {
mUnreadMessageCount = unreadMessageCount;
}
super.folderStatusChanged(account, folder, unreadMessageCount);
}
private boolean updateForMe(Account account, String folder) {
if (account == null || folder == null) {
return false;
}
if (!Utility.arrayContains(mAccountUuids, account.getUuid())) {
return false;
}
List<String> folderNames = mSearch.getFolderNames();
return (folderNames.size() == 0 || folderNames.contains(folder));
}
}
class MessageListAdapter extends CursorAdapter {
private Drawable mAttachmentIcon;
private Drawable mForwardedIcon;
private Drawable mAnsweredIcon;
private Drawable mForwardedAnsweredIcon;
MessageListAdapter() {
super(getActivity(), null, 0);
mAttachmentIcon = getResources().getDrawable(R.drawable.ic_email_attachment_small);
mAnsweredIcon = getResources().getDrawable(R.drawable.ic_email_answered_small);
mForwardedIcon = getResources().getDrawable(R.drawable.ic_email_forwarded_small);
mForwardedAnsweredIcon = getResources().getDrawable(R.drawable.ic_email_forwarded_answered_small);
}
private String recipientSigil(boolean toMe, boolean ccMe) {
if (toMe) {
return getString(R.string.messagelist_sent_to_me_sigil);
} else if (ccMe) {
return getString(R.string.messagelist_sent_cc_me_sigil);
} else {
return "";
}
}
@Override
public View newView(Context context, Cursor cursor, ViewGroup parent) {
View view = mInflater.inflate(R.layout.message_list_item, parent, false);
view.setId(R.layout.message_list_item);
MessageViewHolder holder = new MessageViewHolder();
holder.date = (TextView) view.findViewById(R.id.date);
holder.chip = view.findViewById(R.id.chip);
if (mPreviewLines == 0 && mContactsPictureLoader == null) {
view.findViewById(R.id.preview).setVisibility(View.GONE);
holder.preview = (TextView) view.findViewById(R.id.sender_compact);
holder.flagged = (CheckBox) view.findViewById(R.id.flagged_center_right);
view.findViewById(R.id.flagged_bottom_right).setVisibility(View.GONE);
} else {
view.findViewById(R.id.sender_compact).setVisibility(View.GONE);
holder.preview = (TextView) view.findViewById(R.id.preview);
holder.flagged = (CheckBox) view.findViewById(R.id.flagged_bottom_right);
view.findViewById(R.id.flagged_center_right).setVisibility(View.GONE);
}
QuickContactBadge contactBadge =
(QuickContactBadge) view.findViewById(R.id.contact_badge);
if (mContactsPictureLoader != null) {
holder.contactBadge = contactBadge;
} else {
contactBadge.setVisibility(View.GONE);
}
if (mSenderAboveSubject) {
holder.from = (TextView) view.findViewById(R.id.subject);
mFontSizes.setViewTextSize(holder.from, mFontSizes.getMessageListSender());
} else {
holder.subject = (TextView) view.findViewById(R.id.subject);
mFontSizes.setViewTextSize(holder.subject, mFontSizes.getMessageListSubject());
}
mFontSizes.setViewTextSize(holder.date, mFontSizes.getMessageListDate());
// 1 preview line is needed even if it is set to 0, because subject is part of the same text view
holder.preview.setLines(Math.max(mPreviewLines,1));
mFontSizes.setViewTextSize(holder.preview, mFontSizes.getMessageListPreview());
holder.threadCount = (TextView) view.findViewById(R.id.thread_count);
mFontSizes.setViewTextSize(holder.threadCount, mFontSizes.getMessageListSubject()); // thread count is next to subject
view.findViewById(R.id.selected_checkbox_wrapper).setVisibility((mCheckboxes) ? View.VISIBLE : View.GONE);
holder.flagged.setVisibility(mStars ? View.VISIBLE : View.GONE);
holder.flagged.setOnClickListener(holder);
holder.selected = (CheckBox) view.findViewById(R.id.selected_checkbox);
holder.selected.setOnClickListener(holder);
view.setTag(holder);
return view;
}
@Override
public void bindView(View view, Context context, Cursor cursor) {
Account account = getAccountFromCursor(cursor);
String fromList = cursor.getString(SENDER_LIST_COLUMN);
String toList = cursor.getString(TO_LIST_COLUMN);
String ccList = cursor.getString(CC_LIST_COLUMN);
Address[] fromAddrs = Address.unpack(fromList);
Address[] toAddrs = Address.unpack(toList);
Address[] ccAddrs = Address.unpack(ccList);
boolean fromMe = mMessageHelper.toMe(account, fromAddrs);
boolean toMe = mMessageHelper.toMe(account, toAddrs);
boolean ccMe = mMessageHelper.toMe(account, ccAddrs);
CharSequence displayName = mMessageHelper.getDisplayName(account, fromAddrs, toAddrs);
CharSequence displayDate = DateUtils.getRelativeTimeSpanString(context, cursor.getLong(DATE_COLUMN));
Address counterpartyAddress = null;
if (fromMe) {
if (toAddrs.length > 0) {
counterpartyAddress = toAddrs[0];
} else if (ccAddrs.length > 0) {
counterpartyAddress = ccAddrs[0];
}
} else if (fromAddrs.length > 0) {
counterpartyAddress = fromAddrs[0];
}
int threadCount = (mThreadedList) ? cursor.getInt(THREAD_COUNT_COLUMN) : 0;
String subject = cursor.getString(SUBJECT_COLUMN);
if (StringUtils.isNullOrEmpty(subject)) {
subject = getString(R.string.general_no_subject);
} else if (threadCount > 1) {
// If this is a thread, strip the RE/FW from the subject. "Be like Outlook."
subject = Utility.stripSubject(subject);
}
boolean read = (cursor.getInt(READ_COLUMN) == 1);
boolean flagged = (cursor.getInt(FLAGGED_COLUMN) == 1);
boolean answered = (cursor.getInt(ANSWERED_COLUMN) == 1);
boolean forwarded = (cursor.getInt(FORWARDED_COLUMN) == 1);
boolean hasAttachments = (cursor.getInt(ATTACHMENT_COUNT_COLUMN) > 0);
MessageViewHolder holder = (MessageViewHolder) view.getTag();
int maybeBoldTypeface = (read) ? Typeface.NORMAL : Typeface.BOLD;
long uniqueId = cursor.getLong(mUniqueIdColumn);
boolean selected = mSelected.contains(uniqueId);
holder.chip.setBackgroundColor(account.getChipColor());
if (mCheckboxes) {
holder.selected.setChecked(selected);
}
if (mStars) {
holder.flagged.setChecked(flagged);
}
holder.position = cursor.getPosition();
if (holder.contactBadge != null) {
if (counterpartyAddress != null) {
holder.contactBadge.assignContactFromEmail(counterpartyAddress.getAddress(), true);
/*
* At least in Android 2.2 a different background + padding is used when no
* email address is available. ListView reuses the views but QuickContactBadge
* doesn't reset the padding, so we do it ourselves.
*/
holder.contactBadge.setPadding(0, 0, 0, 0);
mContactsPictureLoader.loadContactPicture(counterpartyAddress, holder.contactBadge);
} else {
holder.contactBadge.assignContactUri(null);
holder.contactBadge.setImageResource(R.drawable.ic_contact_picture);
}
}
// Background color
if (selected || Ertebat.useBackgroundAsUnreadIndicator()) {
int res;
if (selected) {
res = R.attr.messageListSelectedBackgroundColor;
} else if (read) {
res = R.attr.messageListReadItemBackgroundColor;
} else {
res = R.attr.messageListUnreadItemBackgroundColor;
}
TypedValue outValue = new TypedValue();
getActivity().getTheme().resolveAttribute(res, outValue, true);
view.setBackgroundColor(outValue.data);
} else {
view.setBackgroundColor(Color.TRANSPARENT);
}
if (mActiveMessage != null) {
String uid = cursor.getString(UID_COLUMN);
String folderName = cursor.getString(FOLDER_NAME_COLUMN);
if (account.getUuid().equals(mActiveMessage.accountUuid) &&
folderName.equals(mActiveMessage.folderName) &&
uid.equals(mActiveMessage.uid)) {
int res = R.attr.messageListActiveItemBackgroundColor;
TypedValue outValue = new TypedValue();
getActivity().getTheme().resolveAttribute(res, outValue, true);
view.setBackgroundColor(outValue.data);
}
}
// Thread count
if (threadCount > 1) {
holder.threadCount.setText(Integer.toString(threadCount));
holder.threadCount.setVisibility(View.VISIBLE);
} else {
holder.threadCount.setVisibility(View.GONE);
}
CharSequence beforePreviewText = (mSenderAboveSubject) ? subject : displayName;
String sigil = recipientSigil(toMe, ccMe);
SpannableStringBuilder messageStringBuilder = new SpannableStringBuilder(sigil)
.append(beforePreviewText);
if (mPreviewLines > 0) {
String preview = cursor.getString(PREVIEW_COLUMN);
if (preview != null) {
messageStringBuilder.append(" ").append(preview);
}
}
holder.preview.setText(messageStringBuilder, TextView.BufferType.SPANNABLE);
Spannable str = (Spannable)holder.preview.getText();
// Create a span section for the sender, and assign the correct font size and weight
int fontSize = (mSenderAboveSubject) ?
mFontSizes.getMessageListSubject():
mFontSizes.getMessageListSender();
AbsoluteSizeSpan span = new AbsoluteSizeSpan(fontSize, true);
str.setSpan(span, 0, beforePreviewText.length() + sigil.length(),
Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
//TODO: make this part of the theme
int color = (Ertebat.getErtebatTheme() == Ertebat.Theme.LIGHT) ?
Color.rgb(105, 105, 105) :
Color.rgb(160, 160, 160);
// Set span (color) for preview message
str.setSpan(new ForegroundColorSpan(color), beforePreviewText.length() + sigil.length(),
str.length(), Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
Drawable statusHolder = null;
if (forwarded && answered) {
statusHolder = mForwardedAnsweredIcon;
} else if (answered) {
statusHolder = mAnsweredIcon;
} else if (forwarded) {
statusHolder = mForwardedIcon;
}
if (holder.from != null ) {
holder.from.setTypeface(null, maybeBoldTypeface);
if (mSenderAboveSubject) {
holder.from.setCompoundDrawablesWithIntrinsicBounds(
statusHolder, // left
null, // top
hasAttachments ? mAttachmentIcon : null, // right
null); // bottom
holder.from.setText(displayName);
} else {
holder.from.setText(new SpannableStringBuilder(sigil).append(displayName));
}
}
if (holder.subject != null ) {
if (!mSenderAboveSubject) {
holder.subject.setCompoundDrawablesWithIntrinsicBounds(
statusHolder, // left
null, // top
hasAttachments ? mAttachmentIcon : null, // right
null); // bottom
}
holder.subject.setTypeface(null, maybeBoldTypeface);
holder.subject.setText(subject);
}
holder.date.setText(displayDate);
}
}
class MessageViewHolder implements View.OnClickListener {
public TextView subject;
public TextView preview;
public TextView from;
public TextView time;
public TextView date;
public View chip;
public TextView threadCount;
public CheckBox flagged;
public CheckBox selected;
public int position = -1;
public QuickContactBadge contactBadge;
@Override
public void onClick(View view) {
if (position != -1) {
switch (view.getId()) {
case R.id.selected_checkbox:
toggleMessageSelectWithAdapterPosition(position);
break;
case R.id.flagged_bottom_right:
case R.id.flagged_center_right:
toggleMessageFlagWithAdapterPosition(position);
break;
}
}
}
}
private View getFooterView(ViewGroup parent) {
if (mFooterView == null) {
mFooterView = mInflater.inflate(R.layout.message_list_item_footer, parent, false);
mFooterView.setId(R.layout.message_list_item_footer);
FooterViewHolder holder = new FooterViewHolder();
holder.main = (TextView) mFooterView.findViewById(R.id.main_text);
mFooterView.setTag(holder);
}
return mFooterView;
}
private void updateFooterView() {
if (!mSearch.isManualSearch() && mCurrentFolder != null && mAccount != null) {
if (mCurrentFolder.loading) {
updateFooter(mContext.getString(R.string.status_loading_more));
} else {
String message;
if (!mCurrentFolder.lastCheckFailed) {
if (mAccount.getDisplayCount() == 0) {
message = mContext.getString(R.string.message_list_load_more_messages_action);
} else {
message = String.format(mContext.getString(R.string.load_more_messages_fmt), mAccount.getDisplayCount());
}
} else {
message = mContext.getString(R.string.status_loading_more_failed);
}
updateFooter(message);
}
} else {
updateFooter(null);
}
}
public void updateFooter(final String text) {
if (mFooterView == null) {
return;
}
FooterViewHolder holder = (FooterViewHolder) mFooterView.getTag();
if (text != null) {
holder.main.setText(text);
}
if (holder.main.getText().length() > 0) {
holder.main.setVisibility(View.VISIBLE);
} else {
holder.main.setVisibility(View.GONE);
}
}
static class FooterViewHolder {
public TextView main;
}
/**
* Set selection state for all messages.
*
* @param selected
* If {@code true} all messages get selected. Otherwise, all messages get deselected and
* action mode is finished.
*/
private void setSelectionState(boolean selected) {
if (selected) {
if (mAdapter.getCount() == 0) {
// Nothing to do if there are no messages
return;
}
mSelectedCount = 0;
for (int i = 0, end = mAdapter.getCount(); i < end; i++) {
Cursor cursor = (Cursor) mAdapter.getItem(i);
long uniqueId = cursor.getLong(mUniqueIdColumn);
mSelected.add(uniqueId);
if (mThreadedList) {
int threadCount = cursor.getInt(THREAD_COUNT_COLUMN);
mSelectedCount += (threadCount > 1) ? threadCount : 1;
} else {
mSelectedCount++;
}
}
if (mActionMode == null) {
mActionMode = getActivity().startActionMode(mActionModeCallback);
}
computeBatchDirection();
updateActionModeTitle();
computeSelectAllVisibility();
} else {
mSelected.clear();
mSelectedCount = 0;
if (mActionMode != null) {
mActionMode.finish();
mActionMode = null;
}
}
mAdapter.notifyDataSetChanged();
}
private void toggleMessageSelect(int listViewPosition) {
int adapterPosition = listViewToAdapterPosition(listViewPosition);
if (adapterPosition == AdapterView.INVALID_POSITION) {
return;
}
toggleMessageSelectWithAdapterPosition(adapterPosition);
}
private void toggleMessageFlagWithAdapterPosition(int adapterPosition) {
Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition);
boolean flagged = (cursor.getInt(FLAGGED_COLUMN) == 1);
setFlag(adapterPosition,Flag.FLAGGED, !flagged);
}
private void toggleMessageSelectWithAdapterPosition(int adapterPosition) {
Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition);
long uniqueId = cursor.getLong(mUniqueIdColumn);
boolean selected = mSelected.contains(uniqueId);
if (!selected) {
mSelected.add(uniqueId);
} else {
mSelected.remove(uniqueId);
}
int selectedCountDelta = 1;
if (mThreadedList) {
int threadCount = cursor.getInt(THREAD_COUNT_COLUMN);
if (threadCount > 1) {
selectedCountDelta = threadCount;
}
}
if (mActionMode != null) {
if (mSelectedCount == selectedCountDelta && selected) {
mActionMode.finish();
mActionMode = null;
return;
}
} else {
mActionMode = getActivity().startActionMode(mActionModeCallback);
}
if (selected) {
mSelectedCount -= selectedCountDelta;
} else {
mSelectedCount += selectedCountDelta;
}
computeBatchDirection();
updateActionModeTitle();
// make sure the onPrepareActionMode is called
mActionMode.invalidate();
computeSelectAllVisibility();
mAdapter.notifyDataSetChanged();
}
private void updateActionModeTitle() {
mActionMode.setTitle(String.format(getString(R.string.actionbar_selected), mSelectedCount));
}
private void computeSelectAllVisibility() {
mActionModeCallback.showSelectAll(mSelected.size() != mAdapter.getCount());
}
private void computeBatchDirection() {
boolean isBatchFlag = false;
boolean isBatchRead = false;
for (int i = 0, end = mAdapter.getCount(); i < end; i++) {
Cursor cursor = (Cursor) mAdapter.getItem(i);
long uniqueId = cursor.getLong(mUniqueIdColumn);
if (mSelected.contains(uniqueId)) {
boolean read = (cursor.getInt(READ_COLUMN) == 1);
boolean flagged = (cursor.getInt(FLAGGED_COLUMN) == 1);
if (!flagged) {
isBatchFlag = true;
}
if (!read) {
isBatchRead = true;
}
if (isBatchFlag && isBatchRead) {
break;
}
}
}
mActionModeCallback.showMarkAsRead(isBatchRead);
mActionModeCallback.showFlag(isBatchFlag);
}
private void setFlag(int adapterPosition, final Flag flag, final boolean newState) {
if (adapterPosition == AdapterView.INVALID_POSITION) {
return;
}
Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition);
Account account = mPreferences.getAccount(cursor.getString(ACCOUNT_UUID_COLUMN));
if (mThreadedList && cursor.getInt(THREAD_COUNT_COLUMN) > 1) {
long threadRootId = cursor.getLong(THREAD_ROOT_COLUMN);
mController.setFlagForThreads(account,
Collections.singletonList(Long.valueOf(threadRootId)), flag, newState);
} else {
long id = cursor.getLong(ID_COLUMN);
mController.setFlag(account, Collections.singletonList(Long.valueOf(id)), flag,
newState);
}
computeBatchDirection();
}
private void setFlagForSelected(final Flag flag, final boolean newState) {
if (mSelected.size() == 0) {
return;
}
Map<Account, List<Long>> messageMap = new HashMap<Account, List<Long>>();
Map<Account, List<Long>> threadMap = new HashMap<Account, List<Long>>();
Set<Account> accounts = new HashSet<Account>();
for (int position = 0, end = mAdapter.getCount(); position < end; position++) {
Cursor cursor = (Cursor) mAdapter.getItem(position);
long uniqueId = cursor.getLong(mUniqueIdColumn);
if (mSelected.contains(uniqueId)) {
String uuid = cursor.getString(ACCOUNT_UUID_COLUMN);
Account account = mPreferences.getAccount(uuid);
accounts.add(account);
if (mThreadedList && cursor.getInt(THREAD_COUNT_COLUMN) > 1) {
List<Long> threadRootIdList = threadMap.get(account);
if (threadRootIdList == null) {
threadRootIdList = new ArrayList<Long>();
threadMap.put(account, threadRootIdList);
}
threadRootIdList.add(cursor.getLong(THREAD_ROOT_COLUMN));
} else {
List<Long> messageIdList = messageMap.get(account);
if (messageIdList == null) {
messageIdList = new ArrayList<Long>();
messageMap.put(account, messageIdList);
}
messageIdList.add(cursor.getLong(ID_COLUMN));
}
}
}
for (Account account : accounts) {
List<Long> messageIds = messageMap.get(account);
List<Long> threadRootIds = threadMap.get(account);
if (messageIds != null) {
mController.setFlag(account, messageIds, flag, newState);
}
if (threadRootIds != null) {
mController.setFlagForThreads(account, threadRootIds, flag, newState);
}
}
computeBatchDirection();
}
private void onMove(Message message) {
onMove(Collections.singletonList(message));
}
/**
* Display the message move activity.
*
* @param messages
* Never {@code null}.
*/
private void onMove(List<Message> messages) {
if (!checkCopyOrMovePossible(messages, FolderOperation.MOVE)) {
return;
}
final Folder folder;
if (mIsThreadDisplay) {
folder = messages.get(0).getFolder();
} else if (mSingleFolderMode) {
folder = mCurrentFolder.folder;
} else {
folder = null;
}
Account account = messages.get(0).getFolder().getAccount();
displayFolderChoice(ACTIVITY_CHOOSE_FOLDER_MOVE, account, folder, messages);
}
private void onCopy(Message message) {
onCopy(Collections.singletonList(message));
}
/**
* Display the message copy activity.
*
* @param messages
* Never {@code null}.
*/
private void onCopy(List<Message> messages) {
if (!checkCopyOrMovePossible(messages, FolderOperation.COPY)) {
return;
}
final Folder folder;
if (mIsThreadDisplay) {
folder = messages.get(0).getFolder();
} else if (mSingleFolderMode) {
folder = mCurrentFolder.folder;
} else {
folder = null;
}
displayFolderChoice(ACTIVITY_CHOOSE_FOLDER_COPY, mAccount, folder, messages);
}
/**
* Helper method to manage the invocation of {@link #startActivityForResult(Intent, int)} for a
* folder operation ({@link ChooseFolder} activity), while saving a list of associated messages.
*
* @param requestCode
* If {@code >= 0}, this code will be returned in {@code onActivityResult()} when the
* activity exits.
* @param folder
* The source folder. Never {@code null}.
* @param messages
* Messages to be affected by the folder operation. Never {@code null}.
*
* @see #startActivityForResult(Intent, int)
*/
private void displayFolderChoice(int requestCode, Account account, Folder folder,
List<Message> messages) {
Intent intent = new Intent(getActivity(), ChooseFolder.class);
intent.putExtra(ChooseFolder.EXTRA_ACCOUNT, account.getUuid());
intent.putExtra(ChooseFolder.EXTRA_SEL_FOLDER, account.getLastSelectedFolderName());
if (folder == null) {
intent.putExtra(ChooseFolder.EXTRA_SHOW_CURRENT, "yes");
} else {
intent.putExtra(ChooseFolder.EXTRA_CUR_FOLDER, folder.getName());
}
// remember the selected messages for #onActivityResult
mActiveMessages = messages;
startActivityForResult(intent, requestCode);
}
private void onArchive(final Message message) {
onArchive(Collections.singletonList(message));
}
private void onArchive(final List<Message> messages) {
Map<Account, List<Message>> messagesByAccount = groupMessagesByAccount(messages);
for (Entry<Account, List<Message>> entry : messagesByAccount.entrySet()) {
Account account = entry.getKey();
String archiveFolder = account.getArchiveFolderName();
if (!Ertebat.FOLDER_NONE.equals(archiveFolder)) {
move(entry.getValue(), archiveFolder);
}
}
}
private Map<Account, List<Message>> groupMessagesByAccount(final List<Message> messages) {
Map<Account, List<Message>> messagesByAccount = new HashMap<Account, List<Message>>();
for (Message message : messages) {
Account account = message.getFolder().getAccount();
List<Message> msgList = messagesByAccount.get(account);
if (msgList == null) {
msgList = new ArrayList<Message>();
messagesByAccount.put(account, msgList);
}
msgList.add(message);
}
return messagesByAccount;
}
private void onSpam(Message message) {
onSpam(Collections.singletonList(message));
}
/**
* Move messages to the spam folder.
*
* @param messages
* The messages to move to the spam folder. Never {@code null}.
*/
private void onSpam(List<Message> messages) {
if (Ertebat.confirmSpam()) {
// remember the message selection for #onCreateDialog(int)
mActiveMessages = messages;
showDialog(R.id.dialog_confirm_spam);
} else {
onSpamConfirmed(messages);
}
}
private void onSpamConfirmed(List<Message> messages) {
Map<Account, List<Message>> messagesByAccount = groupMessagesByAccount(messages);
for (Entry<Account, List<Message>> entry : messagesByAccount.entrySet()) {
Account account = entry.getKey();
String spamFolder = account.getSpamFolderName();
if (!Ertebat.FOLDER_NONE.equals(spamFolder)) {
move(entry.getValue(), spamFolder);
}
}
}
private static enum FolderOperation {
COPY, MOVE
}
/**
* Display a Toast message if any message isn't synchronized
*
* @param messages
* The messages to copy or move. Never {@code null}.
* @param operation
* The type of operation to perform. Never {@code null}.
*
* @return {@code true}, if operation is possible.
*/
private boolean checkCopyOrMovePossible(final List<Message> messages,
final FolderOperation operation) {
if (messages.size() == 0) {
return false;
}
boolean first = true;
for (final Message message : messages) {
if (first) {
first = false;
// account check
final Account account = message.getFolder().getAccount();
if ((operation == FolderOperation.MOVE && !mController.isMoveCapable(account)) ||
(operation == FolderOperation.COPY && !mController.isCopyCapable(account))) {
return false;
}
}
// message check
if ((operation == FolderOperation.MOVE && !mController.isMoveCapable(message)) ||
(operation == FolderOperation.COPY && !mController.isCopyCapable(message))) {
final Toast toast = Toast.makeText(getActivity(), R.string.move_copy_cannot_copy_unsynced_message,
Toast.LENGTH_LONG);
toast.show();
return false;
}
}
return true;
}
/**
* Copy the specified messages to the specified folder.
*
* @param messages
* List of messages to copy. Never {@code null}.
* @param destination
* The name of the destination folder. Never {@code null}.
*/
private void copy(List<Message> messages, final String destination) {
copyOrMove(messages, destination, FolderOperation.COPY);
}
/**
* Move the specified messages to the specified folder.
*
* @param messages
* The list of messages to move. Never {@code null}.
* @param destination
* The name of the destination folder. Never {@code null}.
*/
private void move(List<Message> messages, final String destination) {
copyOrMove(messages, destination, FolderOperation.MOVE);
}
/**
* The underlying implementation for {@link #copy(List, String)} and
* {@link #move(List, String)}. This method was added mainly because those 2
* methods share common behavior.
*
* @param messages
* The list of messages to copy or move. Never {@code null}.
* @param destination
* The name of the destination folder. Never {@code null} or {@link Ertebat#FOLDER_NONE}.
* @param operation
* Specifies what operation to perform. Never {@code null}.
*/
private void copyOrMove(List<Message> messages, final String destination,
final FolderOperation operation) {
Map<String, List<Message>> folderMap = new HashMap<String, List<Message>>();
for (Message message : messages) {
if ((operation == FolderOperation.MOVE && !mController.isMoveCapable(message)) ||
(operation == FolderOperation.COPY && !mController.isCopyCapable(message))) {
Toast.makeText(getActivity(), R.string.move_copy_cannot_copy_unsynced_message,
Toast.LENGTH_LONG).show();
// XXX return meaningful error value?
// message isn't synchronized
return;
}
String folderName = message.getFolder().getName();
if (folderName.equals(destination)) {
// Skip messages already in the destination folder
continue;
}
List<Message> outMessages = folderMap.get(folderName);
if (outMessages == null) {
outMessages = new ArrayList<Message>();
folderMap.put(folderName, outMessages);
}
outMessages.add(message);
}
for (Map.Entry<String, List<Message>> entry : folderMap.entrySet()) {
String folderName = entry.getKey();
List<Message> outMessages = entry.getValue();
Account account = outMessages.get(0).getFolder().getAccount();
if (operation == FolderOperation.MOVE) {
if (mThreadedList) {
mController.moveMessagesInThread(account, folderName, outMessages, destination);
} else {
mController.moveMessages(account, folderName, outMessages, destination, null);
}
} else {
if (mThreadedList) {
mController.copyMessagesInThread(account, folderName, outMessages, destination);
} else {
mController.copyMessages(account, folderName, outMessages, destination, null);
}
}
}
}
class ActionModeCallback implements ActionMode.Callback {
private MenuItem mSelectAll;
private MenuItem mMarkAsRead;
private MenuItem mMarkAsUnread;
private MenuItem mFlag;
private MenuItem mUnflag;
@Override
public boolean onPrepareActionMode(ActionMode mode, Menu menu) {
mSelectAll = menu.findItem(R.id.select_all);
mMarkAsRead = menu.findItem(R.id.mark_as_read);
mMarkAsUnread = menu.findItem(R.id.mark_as_unread);
mFlag = menu.findItem(R.id.flag);
mUnflag = menu.findItem(R.id.unflag);
// we don't support cross account actions atm
if (!mSingleAccountMode) {
// show all
menu.findItem(R.id.move).setVisible(true);
menu.findItem(R.id.archive).setVisible(true);
menu.findItem(R.id.spam).setVisible(true);
menu.findItem(R.id.copy).setVisible(true);
Set<String> accountUuids = getAccountUuidsForSelected();
for (String accountUuid : accountUuids) {
Account account = mPreferences.getAccount(accountUuid);
if (account != null) {
setContextCapabilities(account, menu);
}
}
}
return true;
}
/**
* Get the set of account UUIDs for the selected messages.
*/
private Set<String> getAccountUuidsForSelected() {
int maxAccounts = mAccountUuids.length;
Set<String> accountUuids = new HashSet<String>(maxAccounts);
for (int position = 0, end = mAdapter.getCount(); position < end; position++) {
Cursor cursor = (Cursor) mAdapter.getItem(position);
long uniqueId = cursor.getLong(mUniqueIdColumn);
if (mSelected.contains(uniqueId)) {
String accountUuid = cursor.getString(ACCOUNT_UUID_COLUMN);
accountUuids.add(accountUuid);
if (accountUuids.size() == mAccountUuids.length) {
break;
}
}
}
return accountUuids;
}
@Override
public void onDestroyActionMode(ActionMode mode) {
mActionMode = null;
mSelectAll = null;
mMarkAsRead = null;
mMarkAsUnread = null;
mFlag = null;
mUnflag = null;
setSelectionState(false);
}
@Override
public boolean onCreateActionMode(ActionMode mode, Menu menu) {
MenuInflater inflater = mode.getMenuInflater();
inflater.inflate(R.menu.message_list_context, menu);
// check capabilities
setContextCapabilities(mAccount, menu);
return true;
}
/**
* Disables menu options not supported by the account type or current "search view".
*
* @param account
* The account to query for its capabilities.
* @param menu
* The menu to adapt.
*/
private void setContextCapabilities(Account account, Menu menu) {
if (!mSingleAccountMode) {
// We don't support cross-account copy/move operations right now
menu.findItem(R.id.move).setVisible(false);
menu.findItem(R.id.copy).setVisible(false);
//TODO: we could support the archive and spam operations if all selected messages
// belong to non-POP3 accounts
menu.findItem(R.id.archive).setVisible(false);
menu.findItem(R.id.spam).setVisible(false);
} else {
// hide unsupported
if (!mController.isCopyCapable(account)) {
menu.findItem(R.id.copy).setVisible(false);
}
if (!mController.isMoveCapable(account)) {
menu.findItem(R.id.move).setVisible(false);
menu.findItem(R.id.archive).setVisible(false);
menu.findItem(R.id.spam).setVisible(false);
}
if (!account.hasArchiveFolder()) {
menu.findItem(R.id.archive).setVisible(false);
}
if (!account.hasSpamFolder()) {
menu.findItem(R.id.spam).setVisible(false);
}
}
}
public void showSelectAll(boolean show) {
if (mActionMode != null) {
mSelectAll.setVisible(show);
}
}
public void showMarkAsRead(boolean show) {
if (mActionMode != null) {
mMarkAsRead.setVisible(show);
mMarkAsUnread.setVisible(!show);
}
}
public void showFlag(boolean show) {
if (mActionMode != null) {
mFlag.setVisible(show);
mUnflag.setVisible(!show);
}
}
@Override
public boolean onActionItemClicked(ActionMode mode, MenuItem item) {
/*
* In the following we assume that we can't move or copy
* mails to the same folder. Also that spam isn't available if we are
* in the spam folder,same for archive.
*
* This is the case currently so safe assumption.
*/
switch (item.getItemId()) {
case R.id.delete: {
List<Message> messages = getCheckedMessages();
onDelete(messages);
mSelectedCount = 0;
break;
}
case R.id.mark_as_read: {
setFlagForSelected(Flag.SEEN, true);
break;
}
case R.id.mark_as_unread: {
setFlagForSelected(Flag.SEEN, false);
break;
}
case R.id.flag: {
setFlagForSelected(Flag.FLAGGED, true);
break;
}
case R.id.unflag: {
setFlagForSelected(Flag.FLAGGED, false);
break;
}
case R.id.select_all: {
selectAll();
break;
}
// only if the account supports this
case R.id.archive: {
List<Message> messages = getCheckedMessages();
onArchive(messages);
mSelectedCount = 0;
break;
}
case R.id.spam: {
List<Message> messages = getCheckedMessages();
onSpam(messages);
mSelectedCount = 0;
break;
}
case R.id.move: {
List<Message> messages = getCheckedMessages();
onMove(messages);
mSelectedCount = 0;
break;
}
case R.id.copy: {
List<Message> messages = getCheckedMessages();
onCopy(messages);
mSelectedCount = 0;
break;
}
}
if (mSelectedCount == 0) {
mActionMode.finish();
}
return true;
}
}
@Override
public void doPositiveClick(int dialogId) {
switch (dialogId) {
case R.id.dialog_confirm_spam: {
onSpamConfirmed(mActiveMessages);
// No further need for this reference
mActiveMessages = null;
break;
}
case R.id.dialog_confirm_delete: {
onDeleteConfirmed(mActiveMessages);
mActiveMessage = null;
break;
}
}
}
@Override
public void doNegativeClick(int dialogId) {
switch (dialogId) {
case R.id.dialog_confirm_spam:
case R.id.dialog_confirm_delete: {
// No further need for this reference
mActiveMessages = null;
break;
}
}
}
@Override
public void dialogCancelled(int dialogId) {
doNegativeClick(dialogId);
}
public void checkMail() {
if (isSingleAccountMode() && isSingleFolderMode()) {
mController.synchronizeMailbox(mAccount, mFolderName, mListener, null);
mController.sendPendingMessages(mAccount, mListener);
} else if (mAllAccounts) {
mController.checkMail(mContext, null, true, true, mListener);
} else {
for (String accountUuid : mAccountUuids) {
Account account = mPreferences.getAccount(accountUuid);
mController.checkMail(mContext, account, true, true, mListener);
}
}
}
/**
* We need to do some special clean up when leaving a remote search result screen. If no
* remote search is in progress, this method does nothing special.
*/
@Override
public void onStop() {
// If we represent a remote search, then kill that before going back.
if (isRemoteSearch() && mRemoteSearchFuture != null) {
try {
Log.i(Ertebat.LOG_TAG, "Remote search in progress, attempting to abort...");
// Canceling the future stops any message fetches in progress.
final boolean cancelSuccess = mRemoteSearchFuture.cancel(true); // mayInterruptIfRunning = true
if (!cancelSuccess) {
Log.e(Ertebat.LOG_TAG, "Could not cancel remote search future.");
}
// Closing the folder will kill off the connection if we're mid-search.
final Account searchAccount = mAccount;
final Folder remoteFolder = mCurrentFolder.folder;
remoteFolder.close();
// Send a remoteSearchFinished() message for good measure.
mListener.remoteSearchFinished(searchAccount, mCurrentFolder.name, 0, null);
} catch (Exception e) {
// Since the user is going back, log and squash any exceptions.
Log.e(Ertebat.LOG_TAG, "Could not abort remote search before going back", e);
}
}
super.onStop();
}
public ArrayList<MessageReference> getMessageReferences() {
ArrayList<MessageReference> messageRefs = new ArrayList<MessageReference>();
for (int i = 0, len = mAdapter.getCount(); i < len; i++) {
Cursor cursor = (Cursor) mAdapter.getItem(i);
MessageReference ref = new MessageReference();
ref.accountUuid = cursor.getString(ACCOUNT_UUID_COLUMN);
ref.folderName = cursor.getString(FOLDER_NAME_COLUMN);
ref.uid = cursor.getString(UID_COLUMN);
messageRefs.add(ref);
}
return messageRefs;
}
public void selectAll() {
setSelectionState(true);
}
public void onMoveUp() {
int currentPosition = mListView.getSelectedItemPosition();
if (currentPosition == AdapterView.INVALID_POSITION || mListView.isInTouchMode()) {
currentPosition = mListView.getFirstVisiblePosition();
}
if (currentPosition > 0) {
mListView.setSelection(currentPosition - 1);
}
}
public void onMoveDown() {
int currentPosition = mListView.getSelectedItemPosition();
if (currentPosition == AdapterView.INVALID_POSITION || mListView.isInTouchMode()) {
currentPosition = mListView.getFirstVisiblePosition();
}
if (currentPosition < mListView.getCount()) {
mListView.setSelection(currentPosition + 1);
}
}
public boolean openPrevious(MessageReference messageReference) {
int position = getPosition(messageReference);
if (position <= 0) {
return false;
}
openMessageAtPosition(position - 1);
return true;
}
public boolean openNext(MessageReference messageReference) {
int position = getPosition(messageReference);
if (position < 0 || position == mAdapter.getCount() - 1) {
return false;
}
openMessageAtPosition(position + 1);
return true;
}
public boolean isFirst(MessageReference messageReference) {
return mAdapter.isEmpty() || messageReference.equals(getReferenceForPosition(0));
}
public boolean isLast(MessageReference messageReference) {
return mAdapter.isEmpty() || messageReference.equals(getReferenceForPosition(mAdapter.getCount() - 1));
}
private MessageReference getReferenceForPosition(int position) {
Cursor cursor = (Cursor) mAdapter.getItem(position);
MessageReference ref = new MessageReference();
ref.accountUuid = cursor.getString(ACCOUNT_UUID_COLUMN);
ref.folderName = cursor.getString(FOLDER_NAME_COLUMN);
ref.uid = cursor.getString(UID_COLUMN);
return ref;
}
private void openMessageAtPosition(int position) {
// Scroll message into view if necessary
int listViewPosition = adapterToListViewPosition(position);
if (listViewPosition != AdapterView.INVALID_POSITION &&
(listViewPosition < mListView.getFirstVisiblePosition() ||
listViewPosition > mListView.getLastVisiblePosition())) {
mListView.setSelection(listViewPosition);
}
MessageReference ref = getReferenceForPosition(position);
// For some reason the mListView.setSelection() above won't do anything when we call
// onOpenMessage() (and consequently mAdapter.notifyDataSetChanged()) right away. So we
// defer the call using MessageListHandler.
mHandler.openMessage(ref);
}
private int getPosition(MessageReference messageReference) {
for (int i = 0, len = mAdapter.getCount(); i < len; i++) {
Cursor cursor = (Cursor) mAdapter.getItem(i);
String accountUuid = cursor.getString(ACCOUNT_UUID_COLUMN);
String folderName = cursor.getString(FOLDER_NAME_COLUMN);
String uid = cursor.getString(UID_COLUMN);
if (accountUuid.equals(messageReference.accountUuid) &&
folderName.equals(messageReference.folderName) &&
uid.equals(messageReference.uid)) {
return i;
}
}
return -1;
}
public interface MessageListFragmentListener {
void enableActionBarProgress(boolean enable);
void setMessageListProgress(int level);
void showThread(Account account, String folderName, long rootId);
void showMoreFromSameSender(String senderAddress);
void onResendMessage(Message message);
void onForward(Message message);
void onReply(Message message);
void onReplyAll(Message message);
void openMessage(MessageReference messageReference);
void setMessageListTitle(String title);
void setMessageListSubTitle(String subTitle);
void setUnreadCount(int unread);
void onCompose(Account account);
boolean startSearch(Account account, String folderName);
void remoteSearchStarted();
void goBack();
void updateMenu();
}
public void onReverseSort() {
changeSort(mSortType);
}
private Message getSelectedMessage() {
int listViewPosition = mListView.getSelectedItemPosition();
int adapterPosition = listViewToAdapterPosition(listViewPosition);
return getMessageAtPosition(adapterPosition);
}
private int getAdapterPositionForSelectedMessage() {
int listViewPosition = mListView.getSelectedItemPosition();
return listViewToAdapterPosition(listViewPosition);
}
private int getPositionForUniqueId(long uniqueId) {
for (int position = 0, end = mAdapter.getCount(); position < end; position++) {
Cursor cursor = (Cursor) mAdapter.getItem(position);
if (cursor.getLong(mUniqueIdColumn) == uniqueId) {
return position;
}
}
return AdapterView.INVALID_POSITION;
}
private Message getMessageAtPosition(int adapterPosition) {
if (adapterPosition == AdapterView.INVALID_POSITION) {
return null;
}
Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition);
String uid = cursor.getString(UID_COLUMN);
Account account = getAccountFromCursor(cursor);
long folderId = cursor.getLong(FOLDER_ID_COLUMN);
Folder folder = getFolderById(account, folderId);
try {
return folder.getMessage(uid);
} catch (MessagingException e) {
Log.e(Ertebat.LOG_TAG, "Something went wrong while fetching a message", e);
}
return null;
}
private List<Message> getCheckedMessages() {
List<Message> messages = new ArrayList<Message>(mSelected.size());
for (int position = 0, end = mAdapter.getCount(); position < end; position++) {
Cursor cursor = (Cursor) mAdapter.getItem(position);
long uniqueId = cursor.getLong(mUniqueIdColumn);
if (mSelected.contains(uniqueId)) {
Message message = getMessageAtPosition(position);
if (message != null) {
messages.add(message);
}
}
}
return messages;
}
public void onDelete() {
Message message = getSelectedMessage();
if (message != null) {
onDelete(Collections.singletonList(message));
}
}
public void toggleMessageSelect() {
toggleMessageSelect(mListView.getSelectedItemPosition());
}
public void onToggleFlagged() {
onToggleFlag(Flag.FLAGGED, FLAGGED_COLUMN);
}
public void onToggleRead() {
onToggleFlag(Flag.SEEN, READ_COLUMN);
}
private void onToggleFlag(Flag flag, int flagColumn) {
int adapterPosition = getAdapterPositionForSelectedMessage();
if (adapterPosition == ListView.INVALID_POSITION) {
return;
}
Cursor cursor = (Cursor) mAdapter.getItem(adapterPosition);
boolean flagState = (cursor.getInt(flagColumn) == 1);
setFlag(adapterPosition, flag, !flagState);
}
public void onMove() {
Message message = getSelectedMessage();
if (message != null) {
onMove(message);
}
}
public void onArchive() {
Message message = getSelectedMessage();
if (message != null) {
onArchive(message);
}
}
public void onCopy() {
Message message = getSelectedMessage();
if (message != null) {
onCopy(message);
}
}
public boolean isOutbox() {
return (mFolderName != null && mFolderName.equals(mAccount.getOutboxFolderName()));
}
public boolean isErrorFolder() {
return Ertebat.ERROR_FOLDER_NAME.equals(mFolderName);
}
public boolean isRemoteFolder() {
if (mSearch.isManualSearch() || isOutbox() || isErrorFolder()) {
return false;
}
if (!mController.isMoveCapable(mAccount)) {
// For POP3 accounts only the Inbox is a remote folder.
return (mFolderName != null && mFolderName.equals(mAccount.getInboxFolderName()));
}
return true;
}
public boolean isManualSearch() {
return mSearch.isManualSearch();
}
public boolean isAccountExpungeCapable() {
try {
return (mAccount != null && mAccount.getRemoteStore().isExpungeCapable());
} catch (Exception e) {
return false;
}
}
public void onRemoteSearch() {
// Remote search is useless without the network.
if (mHasConnectivity) {
onRemoteSearchRequested();
} else {
Toast.makeText(getActivity(), getText(R.string.remote_search_unavailable_no_network),
Toast.LENGTH_SHORT).show();
}
}
public boolean isRemoteSearch() {
return mRemoteSearchPerformed;
}
public boolean isRemoteSearchAllowed() {
if (!mSearch.isManualSearch() || mRemoteSearchPerformed || !mSingleFolderMode) {
return false;
}
boolean allowRemoteSearch = false;
final Account searchAccount = mAccount;
if (searchAccount != null) {
allowRemoteSearch = searchAccount.allowRemoteSearch();
}
return allowRemoteSearch;
}
public boolean onSearchRequested() {
String folderName = (mCurrentFolder != null) ? mCurrentFolder.name : null;
return mFragmentListener.startSearch(mAccount, folderName);
}
@Override
public Loader<Cursor> onCreateLoader(int id, Bundle args) {
String accountUuid = mAccountUuids[id];
Account account = mPreferences.getAccount(accountUuid);
String threadId = getThreadId(mSearch);
Uri uri;
String[] projection;
boolean needConditions;
if (threadId != null) {
uri = Uri.withAppendedPath(EmailProvider.CONTENT_URI, "account/" + accountUuid + "/thread/" + threadId);
projection = PROJECTION;
needConditions = false;
} else if (mThreadedList) {
uri = Uri.withAppendedPath(EmailProvider.CONTENT_URI, "account/" + accountUuid + "/messages/threaded");
projection = THREADED_PROJECTION;
needConditions = true;
} else {
uri = Uri.withAppendedPath(EmailProvider.CONTENT_URI, "account/" + accountUuid + "/messages");
projection = PROJECTION;
needConditions = true;
}
StringBuilder query = new StringBuilder();
List<String> queryArgs = new ArrayList<String>();
if (needConditions) {
boolean selectActive = mActiveMessage != null && mActiveMessage.accountUuid.equals(accountUuid);
if (selectActive) {
query.append("(" + MessageColumns.UID + " = ? AND " + SpecialColumns.FOLDER_NAME + " = ?) OR (");
queryArgs.add(mActiveMessage.uid);
queryArgs.add(mActiveMessage.folderName);
}
SqlQueryBuilder.buildWhereClause(account, mSearch.getConditions(), query, queryArgs);
if (selectActive) {
query.append(')');
}
}
String selection = query.toString();
String[] selectionArgs = queryArgs.toArray(new String[0]);
String sortOrder = buildSortOrder();
return new CursorLoader(getActivity(), uri, projection, selection, selectionArgs,
sortOrder);
}
private String getThreadId(LocalSearch search) {
for (ConditionsTreeNode node : search.getLeafSet()) {
SearchCondition condition = node.mCondition;
if (condition.field == Searchfield.THREAD_ID) {
return condition.value;
}
}
return null;
}
private String buildSortOrder() {
String sortColumn = MessageColumns.ID;
switch (mSortType) {
case SORT_ARRIVAL: {
sortColumn = MessageColumns.INTERNAL_DATE;
break;
}
case SORT_ATTACHMENT: {
sortColumn = "(" + MessageColumns.ATTACHMENT_COUNT + " < 1)";
break;
}
case SORT_FLAGGED: {
sortColumn = "(" + MessageColumns.FLAGGED + " != 1)";
break;
}
case SORT_SENDER: {
//FIXME
sortColumn = MessageColumns.SENDER_LIST;
break;
}
case SORT_SUBJECT: {
sortColumn = MessageColumns.SUBJECT + " COLLATE NOCASE";
break;
}
case SORT_UNREAD: {
sortColumn = MessageColumns.READ;
break;
}
case SORT_DATE:
default: {
sortColumn = MessageColumns.DATE;
}
}
String sortDirection = (mSortAscending) ? " ASC" : " DESC";
String secondarySort;
if (mSortType == SortType.SORT_DATE || mSortType == SortType.SORT_ARRIVAL) {
secondarySort = "";
} else {
secondarySort = MessageColumns.DATE + ((mSortDateAscending) ? " ASC, " : " DESC, ");
}
String sortOrder = sortColumn + sortDirection + ", " + secondarySort +
MessageColumns.ID + " DESC";
return sortOrder;
}
@Override
public void onLoadFinished(Loader<Cursor> loader, Cursor data) {
if (mIsThreadDisplay && data.getCount() == 0) {
mHandler.goBack();
return;
}
// Remove the "Loading..." view
mPullToRefreshView.setEmptyView(null);
setPullToRefreshEnabled(isPullToRefreshAllowed());
final int loaderId = loader.getId();
mCursors[loaderId] = data;
mCursorValid[loaderId] = true;
Cursor cursor;
if (mCursors.length > 1) {
cursor = new MergeCursorWithUniqueId(mCursors, getComparator());
mUniqueIdColumn = cursor.getColumnIndex("_id");
} else {
cursor = data;
mUniqueIdColumn = ID_COLUMN;
}
if (mIsThreadDisplay) {
if (cursor.moveToFirst()) {
mTitle = cursor.getString(SUBJECT_COLUMN);
if (!StringUtils.isNullOrEmpty(mTitle)) {
mTitle = Utility.stripSubject(mTitle);
}
if (StringUtils.isNullOrEmpty(mTitle)) {
mTitle = getString(R.string.general_no_subject);
}
updateTitle();
} else {
//TODO: empty thread view -> return to full message list
}
}
cleanupSelected(cursor);
updateContextMenu(cursor);
mAdapter.swapCursor(cursor);
resetActionMode();
computeBatchDirection();
if (isLoadFinished()) {
if (mSavedListState != null) {
mHandler.restoreListPosition();
}
mFragmentListener.updateMenu();
}
}
public boolean isLoadFinished() {
if (mCursorValid == null) {
return false;
}
for (boolean cursorValid : mCursorValid) {
if (!cursorValid) {
return false;
}
}
return true;
}
/**
* Close the context menu when the message it was opened for is no longer in the message list.
*/
private void updateContextMenu(Cursor cursor) {
if (mContextMenuUniqueId == 0) {
return;
}
for (cursor.moveToFirst(); !cursor.isAfterLast(); cursor.moveToNext()) {
long uniqueId = cursor.getLong(mUniqueIdColumn);
if (uniqueId == mContextMenuUniqueId) {
return;
}
}
mContextMenuUniqueId = 0;
Activity activity = getActivity();
if (activity != null) {
activity.closeContextMenu();
}
}
private void cleanupSelected(Cursor cursor) {
if (mSelected.size() == 0) {
return;
}
Set<Long> selected = new HashSet<Long>();
for (cursor.moveToFirst(); !cursor.isAfterLast(); cursor.moveToNext()) {
long uniqueId = cursor.getLong(mUniqueIdColumn);
if (mSelected.contains(uniqueId)) {
selected.add(uniqueId);
}
}
mSelected = selected;
}
/**
* Starts or finishes the action mode when necessary.
*/
private void resetActionMode() {
if (mSelected.size() == 0) {
if (mActionMode != null) {
mActionMode.finish();
}
return;
}
if (mActionMode == null) {
mActionMode = getActivity().startActionMode(mActionModeCallback);
}
recalculateSelectionCount();
updateActionModeTitle();
}
/**
* Recalculates the selection count.
*
* <p>
* For non-threaded lists this is simply the number of visibly selected messages. If threaded
* view is enabled this method counts the number of messages in the selected threads.
* </p>
*/
private void recalculateSelectionCount() {
if (!mThreadedList) {
mSelectedCount = mSelected.size();
return;
}
mSelectedCount = 0;
for (int i = 0, end = mAdapter.getCount(); i < end; i++) {
Cursor cursor = (Cursor) mAdapter.getItem(i);
long uniqueId = cursor.getLong(mUniqueIdColumn);
if (mSelected.contains(uniqueId)) {
int threadCount = cursor.getInt(THREAD_COUNT_COLUMN);
mSelectedCount += (threadCount > 1) ? threadCount : 1;
}
}
}
@Override
public void onLoaderReset(Loader<Cursor> loader) {
mSelected.clear();
mAdapter.swapCursor(null);
}
private Account getAccountFromCursor(Cursor cursor) {
String accountUuid = cursor.getString(ACCOUNT_UUID_COLUMN);
return mPreferences.getAccount(accountUuid);
}
private void remoteSearchFinished() {
mRemoteSearchFuture = null;
}
/**
* Mark a message as 'active'.
*
* <p>
* The active message is the one currently displayed in the message view portion of the split
* view.
* </p>
*
* @param messageReference
* {@code null} to not mark any message as being 'active'.
*/
public void setActiveMessage(MessageReference messageReference) {
mActiveMessage = messageReference;
// Reload message list with modified query that always includes the active message
if (isAdded()) {
restartLoader();
}
// Redraw list immediately
if (mAdapter != null) {
mAdapter.notifyDataSetChanged();
}
}
public boolean isSingleAccountMode() {
return mSingleAccountMode;
}
public boolean isSingleFolderMode() {
return mSingleFolderMode;
}
public boolean isInitialized() {
return mInitialized;
}
public boolean isMarkAllAsReadSupported() {
return (isSingleAccountMode() && isSingleFolderMode());
}
public void markAllAsRead() {
if (isMarkAllAsReadSupported()) {
mController.markAllMessagesRead(mAccount, mFolderName);
}
}
public boolean isCheckMailSupported() {
return (mAllAccounts || !isSingleAccountMode() || !isSingleFolderMode() ||
isRemoteFolder());
}
private boolean isCheckMailAllowed() {
return (!isManualSearch() && isCheckMailSupported());
}
private boolean isPullToRefreshAllowed() {
return (isRemoteSearchAllowed() || isCheckMailAllowed());
}
}
|
package amazon.robot.test;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Solution {
/**
* x position to the robot move. East = +1 West = -1
*/
static private int x = 0;
/**
* x position to the robot move. North = +1 South = -1
*/
static private int y = 0;
/**
* Pattern to validate input format.
* Must be a Valid Sequence.
* Before a N,S,W or E could have a digit.
* After N,S,W or E could have 0 to n 'X' character
* this Characters
*/
static Pattern INPUT_VALIDATOR = Pattern.compile("^(\\d*[NSEW]X*)*$");
static Pattern SPLIT_GROUP = Pattern.compile("(\\d*[NSWE]X*)");
/**
* Pattern to split magnitude and undo.
*/
static Pattern SPLIT_N_DIRECTION = Pattern.compile("(\\d*)N(X*)");
static Pattern SPLIT_S_DIRECTION = Pattern.compile("(\\d*)S(X*)");
static Pattern SPLIT_W_DIRECTION = Pattern.compile("(\\d*)W(X*)");
static Pattern SPLIT_E_DIRECTION = Pattern.compile("(\\d*)E(X*)");
/**
* Inner Class Coordinates Plan, using the concept of Command Pattern
* */
private static class CoordinatesRobot {
public int _x = 0;
public int _y = 0;
/**
* Based in Pattern Command
* */
public int undo = 0;
@Override
public String toString() {
return "("+_x+","+_y+") "+ undo;
}
}
/**
* Move the Robot to Origin (0,0)
*/
private static void toOrigin() {
x = 0;
y = 0;
}
/**
* All comands are suposed to be calculated from Origin (0,0). All valids
* commands must follow these rules. Contain only these letters NSEWX
* uppercase Contain any number and after a number MUST be any of these
* letters above, and never end with a NUMBER.
*
*
* Space Complexity = O(n) , because of Array in method calculateDestination
*
* Runtime Complexity = 2.n + c in method calculateDestination
* Runtime Complexity = O(n), we don't have an while inside an while, if we did then O(n^2)
* Runtime Complexity = because of that the answer is O(n).
*
* @param position
* @return destination in format (x, y);
*/
/*
* Complete the function below.
*/
static String moveRobot(String s) {
toOrigin();
try {
// Validate empty command
if (s == null || s.isEmpty()) {
throw new IllegalArgumentException("Invalid Command ["
+ s + "].");
}
// Validate the Arguments to Operate
validateCommandInput(s);
calculateDestination(s);
} catch (Exception e) {
x = 999;
y = 999;
}
return getPoints();
}
private static void calculateDestination(String position) {
Matcher m = SPLIT_GROUP.matcher(position);
List<String> commands = new ArrayList<String>();
while ( m.find() )
commands.add(m.group());
while ( !commands.isEmpty() ){
String lastCmd = commands.get(commands.size()-1);
CoordinatesRobot c = extractPoint(lastCmd);
// System.out.println(c);
/**
* The command is OK.
*/
if ( c.undo == 0 ){
commands.remove(commands.size()-1);
x += c._x;
y += c._y;
} else {
while ( !commands.isEmpty() && c.undo > 0){
commands.remove(commands.size()-1);
c.undo--;
}
}
}
}
/**
* These method is used to extract the magnitude value and how much undo the algorithms should do.
* @param cmd
* @return
*/
private static CoordinatesRobot extractPoint(String cmd) {
CoordinatesRobot c = new CoordinatesRobot();
Matcher m;
if ( cmd.contains("N") ){
m = SPLIT_N_DIRECTION.matcher(cmd);
if ( m.find() ){
String yvalue = m.group(1);
if ( !yvalue.isEmpty() ){
c._y = Integer.parseInt(yvalue);
if ( c._y == 0 )
throw new IllegalArgumentException("Magnitude can't be 0");
} else {
c._y = 1;
}
String undoValue = m.group(2);
if ( !undoValue.isEmpty() ){
c.undo = undoValue.length();
}
}
} else if ( cmd.contains("S") ){
m = SPLIT_S_DIRECTION.matcher(cmd);
if ( m.find() ){
String yvalue = m.group(1);
if ( !yvalue.isEmpty() ){
c._y = -Integer.parseInt(yvalue);
if ( c._y == 0 )
throw new IllegalArgumentException("Magnitude can't be 0");
} else {
c._y = -1;
}
String undoValue = m.group(2);
if ( !undoValue.isEmpty() ){
c.undo = undoValue.length();
}
}
} else if ( cmd.contains("E") ){
m = SPLIT_E_DIRECTION.matcher(cmd);
if ( m.find() ){
String yvalue = m.group(1);
if ( !yvalue.isEmpty() ){
c._x = Integer.parseInt(yvalue);
if ( c._x == 0 )
throw new IllegalArgumentException("Magnitude can't be 0");
} else {
c._x = 1;
}
String undoValue = m.group(2);
if ( !undoValue.isEmpty() ){
c.undo = undoValue.length();
}
}
} else if ( cmd.contains("W") ){
m = SPLIT_W_DIRECTION.matcher(cmd);
if ( m.find() ){
String yvalue = m.group(1);
if ( !yvalue.isEmpty() ){
c._x = -Integer.parseInt(yvalue);
if ( c._x == 0 )
throw new IllegalArgumentException("Magnitude can't be 0");
} else {
c._x = -1;
}
String undoValue = m.group(2);
if ( !undoValue.isEmpty() ){
c.undo = undoValue.length();
}
}
}
return c;
}
private static String getPoints() {
return "(" + x + ", " + y + ")";
}
private static void validateCommandInput(String position) {
Matcher m = INPUT_VALIDATOR.matcher(position);
if (!m.find())
throw new IllegalArgumentException("Invalid Command [" + position
+ "].");
}
public static final void runTests(){
/**
* Illegal
*/
// System.out.println(moveRobot(null));
// System.out.println(moveRobot(""));
// System.out.println(moveRobot("A7NS"));
//MAYBE
// System.out.println(moveRobot("7NX"));
// System.out.println(moveRobot("7NXX"));
// System.out.println(moveRobot("7NXXX"));
System.out.println(moveRobot("N0W"));
/**
* Legal
*/
// System.out.println(moveRobot("N"));
// System.out.println(moveRobot("S"));
// System.out.println(moveRobot("E"));
// System.out.println(moveRobot("W"));
// System.out.println(moveRobot("7NXXX"));
}
public static void main(String[] args) throws IOException{
runTests();
// Scanner in = new Scanner(System.in);
// final String fileName = System.getenv("OUTPUT_PATH");
// BufferedWriter bw = new BufferedWriter(new FileWriter(fileName));
// String res;
// String _s;
// try {
// _s = in.nextLine();
// } catch (Exception e) {
// _s = null;
// }
//
// res = moveRobot(_s);
// bw.write(res);
// bw.newLine();
//
// bw.close();
}
}
|
/**
* Copyright (C) 2015 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
/**
* Provide the ability to extract data using textual expressions.
*/
package com.opengamma.strata.report.framework.expression;
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.contrib.streaming.state;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.state.State;
import org.apache.flink.api.common.state.StateDescriptor;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.core.memory.DataInputDeserializer;
import org.apache.flink.core.memory.DataOutputSerializer;
import org.apache.flink.runtime.state.RegisteredKeyValueStateBackendMetaInfo;
import org.apache.flink.runtime.state.StateSnapshotTransformer;
import org.apache.flink.runtime.state.internal.InternalListState;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.Preconditions;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.RocksDBException;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import static org.apache.flink.runtime.state.StateSnapshotTransformer.CollectionStateSnapshotTransformer.TransformStrategy.STOP_ON_FIRST_INCLUDED;
/**
* {@link ListState} implementation that stores state in RocksDB.
*
* <p>{@link RocksDBStateBackend} must ensure that we set the
* {@link org.rocksdb.StringAppendOperator} on the column family that we use for our state since
* we use the {@code merge()} call.
*
* @param <K> The type of the key.
* @param <N> The type of the namespace.
* @param <V> The type of the values in the list state.
*/
class RocksDBListState<K, N, V>
extends AbstractRocksDBState<K, N, List<V>, ListState<V>>
implements InternalListState<K, N, V> {
/** Serializer for the values. */
private final TypeSerializer<V> elementSerializer;
/**
* Separator of StringAppendTestOperator in RocksDB.
*/
private static final byte DELIMITER = ',';
/**
* Creates a new {@code RocksDBListState}.
*
* @param columnFamily The RocksDB column family that this state is associated to.
* @param namespaceSerializer The serializer for the namespace.
* @param valueSerializer The serializer for the state.
* @param defaultValue The default value for the state.
* @param elementSerializer The serializer for elements of the list state.
* @param backend The backend for which this state is bind to.
*/
private RocksDBListState(
ColumnFamilyHandle columnFamily,
TypeSerializer<N> namespaceSerializer,
TypeSerializer<List<V>> valueSerializer,
List<V> defaultValue,
TypeSerializer<V> elementSerializer,
RocksDBKeyedStateBackend<K> backend) {
super(columnFamily, namespaceSerializer, valueSerializer, defaultValue, backend);
this.elementSerializer = elementSerializer;
}
@Override
public TypeSerializer<K> getKeySerializer() {
return backend.getKeySerializer();
}
@Override
public TypeSerializer<N> getNamespaceSerializer() {
return namespaceSerializer;
}
@Override
public TypeSerializer<List<V>> getValueSerializer() {
return valueSerializer;
}
@Override
public Iterable<V> get() {
return getInternal();
}
@Override
public List<V> getInternal() {
try {
writeCurrentKeyWithGroupAndNamespace();
byte[] key = dataOutputView.getCopyOfBuffer();
byte[] valueBytes = backend.db.get(columnFamily, key);
return deserializeList(valueBytes);
} catch (IOException | RocksDBException e) {
throw new FlinkRuntimeException("Error while retrieving data from RocksDB", e);
}
}
private List<V> deserializeList(
byte[] valueBytes) {
if (valueBytes == null) {
return null;
}
dataInputView.setBuffer(valueBytes);
List<V> result = new ArrayList<>();
V next;
while ((next = deserializeNextElement(dataInputView, elementSerializer)) != null) {
result.add(next);
}
return result;
}
private static <V> V deserializeNextElement(DataInputDeserializer in, TypeSerializer<V> elementSerializer) {
try {
if (in.available() > 0) {
V element = elementSerializer.deserialize(in);
if (in.available() > 0) {
in.readByte();
}
return element;
}
} catch (IOException e) {
throw new FlinkRuntimeException("Unexpected list element deserialization failure");
}
return null;
}
@Override
public void add(V value) {
Preconditions.checkNotNull(value, "You cannot add null to a ListState.");
try {
writeCurrentKeyWithGroupAndNamespace();
byte[] key = dataOutputView.getCopyOfBuffer();
dataOutputView.clear();
elementSerializer.serialize(value, dataOutputView);
backend.db.merge(columnFamily, writeOptions, key, dataOutputView.getCopyOfBuffer());
} catch (Exception e) {
throw new FlinkRuntimeException("Error while adding data to RocksDB", e);
}
}
@Override
public void mergeNamespaces(N target, Collection<N> sources) {
if (sources == null || sources.isEmpty()) {
return;
}
// cache key and namespace
final K key = backend.getCurrentKey();
final int keyGroup = backend.getCurrentKeyGroupIndex();
try {
// create the target full-binary-key
writeKeyWithGroupAndNamespace(keyGroup, key, target, dataOutputView);
final byte[] targetKey = dataOutputView.getCopyOfBuffer();
// merge the sources to the target
for (N source : sources) {
if (source != null) {
writeKeyWithGroupAndNamespace(keyGroup, key, source, dataOutputView);
byte[] sourceKey = dataOutputView.getCopyOfBuffer();
byte[] valueBytes = backend.db.get(columnFamily, sourceKey);
backend.db.delete(columnFamily, writeOptions, sourceKey);
if (valueBytes != null) {
backend.db.merge(columnFamily, writeOptions, targetKey, valueBytes);
}
}
}
}
catch (Exception e) {
throw new FlinkRuntimeException("Error while merging state in RocksDB", e);
}
}
@Override
public void update(List<V> valueToStore) {
updateInternal(valueToStore);
}
@Override
public void updateInternal(List<V> values) {
Preconditions.checkNotNull(values, "List of values to add cannot be null.");
clear();
if (!values.isEmpty()) {
try {
writeCurrentKeyWithGroupAndNamespace();
byte[] key = dataOutputView.getCopyOfBuffer();
byte[] premerge = getPreMergedValue(values, elementSerializer, dataOutputView);
backend.db.put(columnFamily, writeOptions, key, premerge);
} catch (IOException | RocksDBException e) {
throw new FlinkRuntimeException("Error while updating data to RocksDB", e);
}
}
}
@Override
public void addAll(List<V> values) {
Preconditions.checkNotNull(values, "List of values to add cannot be null.");
if (!values.isEmpty()) {
try {
writeCurrentKeyWithGroupAndNamespace();
byte[] key = dataOutputView.getCopyOfBuffer();
byte[] premerge = getPreMergedValue(values, elementSerializer, dataOutputView);
backend.db.merge(columnFamily, writeOptions, key, premerge);
} catch (IOException | RocksDBException e) {
throw new FlinkRuntimeException("Error while updating data to RocksDB", e);
}
}
}
private static <V> byte[] getPreMergedValue(
List<V> values,
TypeSerializer<V> elementSerializer,
DataOutputSerializer keySerializationStream) throws IOException {
keySerializationStream.clear();
boolean first = true;
for (V value : values) {
Preconditions.checkNotNull(value, "You cannot add null to a ListState.");
if (first) {
first = false;
} else {
keySerializationStream.write(DELIMITER);
}
elementSerializer.serialize(value, keySerializationStream);
}
return keySerializationStream.getCopyOfBuffer();
}
@SuppressWarnings("unchecked")
static <E, K, N, SV, S extends State, IS extends S> IS create(
StateDescriptor<S, SV> stateDesc,
Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> registerResult,
RocksDBKeyedStateBackend<K> backend) {
return (IS) new RocksDBListState<>(
registerResult.f0,
registerResult.f1.getNamespaceSerializer(),
(TypeSerializer<List<E>>) registerResult.f1.getStateSerializer(),
(List<E>) stateDesc.getDefaultValue(),
((ListStateDescriptor<E>) stateDesc).getElementSerializer(),
backend);
}
static class StateSnapshotTransformerWrapper<T> implements StateSnapshotTransformer<byte[]> {
private final StateSnapshotTransformer<T> elementTransformer;
private final TypeSerializer<T> elementSerializer;
private final DataOutputSerializer out = new DataOutputSerializer(128);
private final CollectionStateSnapshotTransformer.TransformStrategy transformStrategy;
StateSnapshotTransformerWrapper(StateSnapshotTransformer<T> elementTransformer, TypeSerializer<T> elementSerializer) {
this.elementTransformer = elementTransformer;
this.elementSerializer = elementSerializer;
this.transformStrategy = elementTransformer instanceof CollectionStateSnapshotTransformer ?
((CollectionStateSnapshotTransformer) elementTransformer).getFilterStrategy() :
CollectionStateSnapshotTransformer.TransformStrategy.TRANSFORM_ALL;
}
@Override
@Nullable
public byte[] filterOrTransform(@Nullable byte[] value) {
if (value == null) {
return null;
}
List<T> result = new ArrayList<>();
DataInputDeserializer in = new DataInputDeserializer(value);
T next;
int prevPosition = 0;
while ((next = deserializeNextElement(in, elementSerializer)) != null) {
T transformedElement = elementTransformer.filterOrTransform(next);
if (transformedElement != null) {
if (transformStrategy == STOP_ON_FIRST_INCLUDED) {
return Arrays.copyOfRange(value, prevPosition, value.length);
} else {
result.add(transformedElement);
}
}
prevPosition = in.getPosition();
}
try {
return result.isEmpty() ? null : getPreMergedValue(result, elementSerializer, out);
} catch (IOException e) {
throw new FlinkRuntimeException("Failed to serialize transformed list", e);
}
}
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package javax.el;
import java.beans.BeanInfo;
import java.beans.FeatureDescriptor;
import java.beans.IntrospectionException;
import java.beans.Introspector;
import java.beans.PropertyDescriptor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.WeakHashMap;
import java.util.concurrent.ConcurrentHashMap;
public class BeanELResolver extends ELResolver {
private static final int CACHE_SIZE;
private static final String CACHE_SIZE_PROP =
"org.apache.el.BeanELResolver.CACHE_SIZE";
static {
if (System.getSecurityManager() == null) {
CACHE_SIZE = Integer.parseInt(
System.getProperty(CACHE_SIZE_PROP, "1000"));
} else {
CACHE_SIZE = AccessController.doPrivileged(
new PrivilegedAction<Integer>() {
@Override
public Integer run() {
return Integer.valueOf(
System.getProperty(CACHE_SIZE_PROP, "1000"));
}
}).intValue();
}
}
private final boolean readOnly;
private final ConcurrentCache<String, BeanProperties> cache =
new ConcurrentCache<String, BeanProperties>(CACHE_SIZE);
public BeanELResolver() {
this.readOnly = false;
}
public BeanELResolver(boolean readOnly) {
this.readOnly = readOnly;
}
@Override
public Class<?> getType(ELContext context, Object base, Object property)
throws NullPointerException, PropertyNotFoundException, ELException {
if (context == null) {
throw new NullPointerException();
}
if (base == null || property == null) {
return null;
}
context.setPropertyResolved(true);
return this.property(context, base, property).getPropertyType();
}
@Override
public Object getValue(ELContext context, Object base, Object property)
throws NullPointerException, PropertyNotFoundException, ELException {
if (context == null) {
throw new NullPointerException();
}
if (base == null || property == null) {
return null;
}
context.setPropertyResolved(true);
Method m = this.property(context, base, property).read(context);
try {
return m.invoke(base, (Object[]) null);
} catch (IllegalAccessException e) {
throw new ELException(e);
} catch (InvocationTargetException e) {
Throwable cause = e.getCause();
if (cause instanceof ThreadDeath) {
throw (ThreadDeath) cause;
}
if (cause instanceof VirtualMachineError) {
throw (VirtualMachineError) cause;
}
throw new ELException(Util.message(context, "propertyReadError",
base.getClass().getName(), property.toString()), cause);
} catch (Exception e) {
throw new ELException(e);
}
}
@Override
public void setValue(ELContext context, Object base, Object property,
Object value) throws NullPointerException,
PropertyNotFoundException, PropertyNotWritableException,
ELException {
if (context == null) {
throw new NullPointerException();
}
if (base == null || property == null) {
return;
}
context.setPropertyResolved(true);
if (this.readOnly) {
throw new PropertyNotWritableException(Util.message(context,
"resolverNotWriteable", base.getClass().getName()));
}
Method m = this.property(context, base, property).write(context);
try {
m.invoke(base, value);
} catch (IllegalAccessException e) {
throw new ELException(e);
} catch (InvocationTargetException e) {
Throwable cause = e.getCause();
if (cause instanceof ThreadDeath) {
throw (ThreadDeath) cause;
}
if (cause instanceof VirtualMachineError) {
throw (VirtualMachineError) cause;
}
throw new ELException(Util.message(context, "propertyWriteError",
base.getClass().getName(), property.toString()), cause);
} catch (Exception e) {
throw new ELException(e);
}
}
/**
* @since EL 2.2
*/
@Override
public Object invoke(ELContext context, Object base, Object method,
Class<?>[] paramTypes, Object[] params) {
if (context == null) {
throw new NullPointerException();
}
if (base == null || method == null) {
return null;
}
ExpressionFactory factory = Util.getExpressionFactory();
String methodName = (String) factory.coerceToType(method, String.class);
// Find the matching method
Method matchingMethod =
Util.findMethod(base.getClass(), methodName, paramTypes, params);
Object[] parameters = Util.buildParameters(
matchingMethod.getParameterTypes(), matchingMethod.isVarArgs(),
params);
Object result = null;
try {
result = matchingMethod.invoke(base, parameters);
} catch (IllegalArgumentException e) {
throw new ELException(e);
} catch (IllegalAccessException e) {
throw new ELException(e);
} catch (InvocationTargetException e) {
Throwable cause = e.getCause();
Util.handleThrowable(cause);
throw new ELException(cause);
}
context.setPropertyResolved(true);
return result;
}
@Override
public boolean isReadOnly(ELContext context, Object base, Object property)
throws NullPointerException, PropertyNotFoundException, ELException {
if (context == null) {
throw new NullPointerException();
}
if (base == null || property == null) {
return false;
}
context.setPropertyResolved(true);
return this.readOnly
|| this.property(context, base, property).isReadOnly();
}
@Override
public Iterator<FeatureDescriptor> getFeatureDescriptors(ELContext context, Object base) {
if (base == null) {
return null;
}
try {
BeanInfo info = Introspector.getBeanInfo(base.getClass());
PropertyDescriptor[] pds = info.getPropertyDescriptors();
for (int i = 0; i < pds.length; i++) {
pds[i].setValue(RESOLVABLE_AT_DESIGN_TIME, Boolean.TRUE);
pds[i].setValue(TYPE, pds[i].getPropertyType());
}
return Arrays.asList((FeatureDescriptor[]) pds).iterator();
} catch (IntrospectionException e) {
//
}
return null;
}
@Override
public Class<?> getCommonPropertyType(ELContext context, Object base) {
if (base != null) {
return Object.class;
}
return null;
}
protected static final class BeanProperties {
private final Map<String, BeanProperty> properties;
private final Class<?> type;
public BeanProperties(Class<?> type) throws ELException {
this.type = type;
this.properties = new HashMap<String, BeanProperty>();
try {
BeanInfo info = Introspector.getBeanInfo(this.type);
PropertyDescriptor[] pds = info.getPropertyDescriptors();
for (PropertyDescriptor pd: pds) {
this.properties.put(pd.getName(), new BeanProperty(type, pd));
}
if (System.getSecurityManager() != null) {
// When running with SecurityManager, some classes may be
// not accessible, but have accessible interfaces.
populateFromInterfaces(type);
}
} catch (IntrospectionException ie) {
throw new ELException(ie);
}
}
private void populateFromInterfaces(Class<?> aClass) throws IntrospectionException {
Class<?> interfaces[] = aClass.getInterfaces();
if (interfaces.length > 0) {
for (Class<?> ifs : interfaces) {
BeanInfo info = Introspector.getBeanInfo(ifs);
PropertyDescriptor[] pds = info.getPropertyDescriptors();
for (PropertyDescriptor pd : pds) {
if (!this.properties.containsKey(pd.getName())) {
this.properties.put(pd.getName(), new BeanProperty(
this.type, pd));
}
}
}
}
Class<?> superclass = aClass.getSuperclass();
if (superclass != null) {
populateFromInterfaces(superclass);
}
}
private BeanProperty get(ELContext ctx, String name) {
BeanProperty property = this.properties.get(name);
if (property == null) {
throw new PropertyNotFoundException(Util.message(ctx,
"propertyNotFound", type.getName(), name));
}
return property;
}
public BeanProperty getBeanProperty(String name) {
return get(null, name);
}
private Class<?> getType() {
return type;
}
}
protected static final class BeanProperty {
private final Class<?> type;
private final Class<?> owner;
private final PropertyDescriptor descriptor;
private Method read;
private Method write;
public BeanProperty(Class<?> owner, PropertyDescriptor descriptor) {
this.owner = owner;
this.descriptor = descriptor;
this.type = descriptor.getPropertyType();
}
// Can't use Class<?> because API needs to match specification
@SuppressWarnings("rawtypes")
public Class getPropertyType() {
return this.type;
}
public boolean isReadOnly() {
return this.write == null
&& (null == (this.write = Util.getMethod(this.owner, descriptor.getWriteMethod())));
}
public Method getWriteMethod() {
return write(null);
}
public Method getReadMethod() {
return this.read(null);
}
private Method write(ELContext ctx) {
if (this.write == null) {
this.write = Util.getMethod(this.owner, descriptor.getWriteMethod());
if (this.write == null) {
throw new PropertyNotFoundException(Util.message(ctx,
"propertyNotWritable", new Object[] {
owner.getName(), descriptor.getName() }));
}
}
return this.write;
}
private Method read(ELContext ctx) {
if (this.read == null) {
this.read = Util.getMethod(this.owner, descriptor.getReadMethod());
if (this.read == null) {
throw new PropertyNotFoundException(Util.message(ctx,
"propertyNotReadable", new Object[] {
owner.getName(), descriptor.getName() }));
}
}
return this.read;
}
}
private final BeanProperty property(ELContext ctx, Object base,
Object property) {
Class<?> type = base.getClass();
String prop = property.toString();
BeanProperties props = this.cache.get(type.getName());
if (props == null || type != props.getType()) {
props = new BeanProperties(type);
this.cache.put(type.getName(), props);
}
return props.get(ctx, prop);
}
private static final class ConcurrentCache<K,V> {
private final int size;
private final Map<K,V> eden;
private final Map<K,V> longterm;
public ConcurrentCache(int size) {
this.size = size;
this.eden = new ConcurrentHashMap<K,V>(size);
this.longterm = new WeakHashMap<K,V>(size);
}
public V get(K key) {
V value = this.eden.get(key);
if (value == null) {
synchronized (longterm) {
value = this.longterm.get(key);
}
if (value != null) {
this.eden.put(key, value);
}
}
return value;
}
public void put(K key, V value) {
if (this.eden.size() >= this.size) {
synchronized (longterm) {
this.longterm.putAll(this.eden);
}
this.eden.clear();
}
this.eden.put(key, value);
}
}
}
|
/*
* Copyright 2012-2018 MarkLogic Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.marklogic.client.test;
import static org.custommonkey.xmlunit.XMLAssert.assertXMLEqual;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.util.HashMap;
import java.util.Iterator;
import javax.xml.XMLConstants;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.stream.XMLEventReader;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import javax.xml.stream.events.Attribute;
import javax.xml.stream.events.StartElement;
import javax.xml.stream.events.XMLEvent;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerConfigurationException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.TransformerFactoryConfigurationError;
import javax.xml.transform.stream.StreamSource;
import javax.xml.validation.Schema;
import javax.xml.validation.SchemaFactory;
import com.marklogic.client.admin.ExtensionLibrariesManager;
import com.marklogic.client.io.*;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import com.marklogic.client.document.DocumentDescriptor;
import com.marklogic.client.document.DocumentManager.Metadata;
import com.marklogic.client.document.DocumentMetadataPatchBuilder.Cardinality;
import com.marklogic.client.document.DocumentPatchBuilder;
import com.marklogic.client.document.XMLDocumentManager;
import com.marklogic.client.document.DocumentPatchBuilder.Position;
import com.marklogic.client.document.XMLDocumentManager.DocumentRepair;
import com.marklogic.client.io.marker.DocumentPatchHandle;
import com.marklogic.client.util.EditableNamespaceContext;
import java.util.Map;
public class XMLDocumentTest {
private static ExtensionLibrariesManager libsMgr = null;
@BeforeClass
public static void beforeClass() {
Common.connectAdmin();
// get a manager
libsMgr = Common.adminClient
.newServerConfigManager().newExtensionLibrariesManager();
// write XQuery file to the modules database
libsMgr.write("/ext/my-lib.xqy", new FileHandle(
new File("src/test/resources/my-lib.xqy")).withFormat(Format.TEXT));
Common.connect();
}
@AfterClass
public static void afterClass() {
libsMgr.delete("/ext/my-lib.xqy");
}
@SuppressWarnings("unchecked")
@Test
public void testReadWrite()
throws ParserConfigurationException, SAXException, IOException, TransformerConfigurationException,
TransformerFactoryConfigurationError, XMLStreamException
{
String docId = "/test/testWrite1.xml";
Document domDocument = DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
Element root = domDocument.createElement("root");
root.setAttribute("xml:lang", "en");
root.setAttribute("foo", "bar");
root.appendChild(domDocument.createElement("child"));
root.appendChild(domDocument.createTextNode("mixed"));
domDocument.appendChild(root);
String domString = Common.testDocumentToString(domDocument);
XMLDocumentManager docMgr = Common.client.newXMLDocumentManager();
docMgr.write(docId, new DOMHandle().with(domDocument));
String docText = docMgr.read(docId, new StringHandle()).get();
assertNotNull("Read null string for XML content",docText);
assertXMLEqual("Failed to read XML document as String",domString,docText);
Document readDoc = docMgr.read(docId, new DOMHandle()).get();
assertNotNull("Read null document for XML content",readDoc);
assertXMLEqual("Failed to read XML document as DOM",Common.testDocumentToString(readDoc),domString);
String docId2 = "/test/testWrite2.xml";
Transformer transformer = TransformerFactory.newInstance().newTransformer();
SourceHandle sourceHandle = new SourceHandle();
sourceHandle.setTransformer(transformer);
docMgr.write(docId2, docMgr.read(docId, sourceHandle));
docText = docMgr.read(docId2, new StringHandle()).get();
assertNotNull("Read null document for transform result",docText);
assertXMLEqual("Transform result not equivalent to source",domString,docText);
InputSourceHandle saxHandle = new InputSourceHandle();
saxHandle.set(new InputSource(new StringReader(domString)));
docMgr.write(docId, saxHandle);
docText = docMgr.read(docId2, new StringHandle()).get();
assertNotNull("Read null document for SAX writer",docText);
assertXMLEqual("Failed to read XML document as DOM",domString,docText);
final Map<String,Integer> counter = new HashMap<>();
counter.put("elementCount",0);
counter.put("attributeCount",0);
DefaultHandler handler = new DefaultHandler() {
@Override
public void startElement(String uri, String localName, String qName, Attributes attributes) {
counter.put("elementCount",counter.get("elementCount") + 1);
if (attributes != null) {
int elementAttributeCount = attributes.getLength();
if (elementAttributeCount > 0)
counter.put("attributeCount",counter.get("attributeCount") + elementAttributeCount);
}
}
};
docMgr.read(docId, saxHandle).process(handler);
assertTrue("Failed to process XML document with SAX",
counter.get("elementCount") == 2 && counter.get("attributeCount") == 2);
XMLStreamReaderHandle streamReaderHandle = docMgr.read(docId, new XMLStreamReaderHandle());
XMLStreamReader streamReader = streamReaderHandle.get();
int elementCount = 0;
int attributeCount = 0;
while (streamReader.hasNext()) {
if (streamReader.next() != XMLStreamReader.START_ELEMENT)
continue;
elementCount++;
int elementAttributeCount = streamReader.getAttributeCount();
if (elementAttributeCount > 0)
attributeCount += elementAttributeCount;
}
streamReaderHandle.close();
assertTrue("Failed to process XML document with StAX stream reader",
elementCount == 2 && attributeCount == 2);
XMLEventReader eventReader = docMgr.read(docId, new XMLEventReaderHandle()).get();
elementCount = 0;
attributeCount = 0;
while (eventReader.hasNext()) {
XMLEvent event = eventReader.nextEvent();
if (!event.isStartElement())
continue;
StartElement element = event.asStartElement();
elementCount++;
Iterator<Attribute> attributes = element.getAttributes();
while (attributes.hasNext()) {
attributes.next();
attributeCount++;
}
}
eventReader.close();
assertTrue("Failed to process XML document with StAX event reader",
elementCount == 2 && attributeCount == 2);
String truncatedDoc ="<root><poorlyFormed></root>";
docMgr.setDocumentRepair(DocumentRepair.FULL);
docMgr.write(docId, new StringHandle().with(truncatedDoc));
docMgr.setDocumentRepair(DocumentRepair.NONE);
boolean threwException = false;
try {
docMgr.write(docId, new StringHandle().with(truncatedDoc));
} catch(RuntimeException ex) {
threwException = true;
}
assertTrue("Expected failure on truncated XML document with no repair", threwException);
}
@Test
public void testValidate()
throws ParserConfigurationException, SAXException, IOException, TransformerConfigurationException,
TransformerFactoryConfigurationError, XMLStreamException
{
String docId = "/test/testWrite1.xml";
SchemaFactory factory = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI);
XMLDocumentManager docMgr = Common.client.newXMLDocumentManager();
docMgr.setDocumentRepair(DocumentRepair.NONE);
String doc = "<?xml version='1.0' encoding='UTF-8'?>" +
"<root foo='bar'><child/>mixed</root>";
InputSourceHandle saxHandle = new InputSourceHandle();
// throw exceptions for parse errors
saxHandle.setErrorHandler(new InputSourceHandle.DraconianErrorHandler());
String validSchema =
"<?xml version='1.0' encoding='UTF-8'?>" +
"<xs:schema xmlns:xs='http://www.w3.org/2001/XMLSchema' " +
"xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' " +
"xsi:schemaLocation='http://www.w3.org/2001/XMLSchema XMLSchema.xsd'>" +
"<xs:element name='root'>" +
"<xs:complexType mixed='true'>" +
"<xs:choice minOccurs='0' maxOccurs='unbounded'>" +
"<xs:element name='child'/>" +
"</xs:choice>" +
"<xs:attribute name='foo' type='xs:string' use='optional'/>" +
"</xs:complexType>" +
"</xs:element>" +
"</xs:schema>";
Schema schema = factory.newSchema(new StreamSource(new StringReader(validSchema)));
saxHandle.setDefaultWriteSchema(schema);
if (docMgr.exists(docId) != null) {
docMgr.delete(docId);
}
docMgr.write(docId, saxHandle.with(new InputSource(new StringReader(doc))));
DocumentDescriptor docDesc = docMgr.exists(docId);
assertTrue("Write failed with valid SAX", docDesc != null);
docMgr.delete(docId);
String invalidSchema =
"<?xml version='1.0' encoding='UTF-8'?>" +
"<xs:schema xmlns:xs='http://www.w3.org/2001/XMLSchema' " +
"xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' " +
"xsi:schemaLocation='http://www.w3.org/2001/XMLSchema XMLSchema.xsd'>" +
"<xs:element name='root'>" +
"<xs:complexType>" +
"<xs:attribute name='foo' type='xs:string' use='optional'/>" +
"</xs:complexType>" +
"</xs:element>" +
"</xs:schema>";
schema = factory.newSchema(new StreamSource(new StringReader(invalidSchema)));
saxHandle.setDefaultWriteSchema(schema);
boolean threwException = false;
try {
docMgr.write(docId, saxHandle.with(new InputSource(new StringReader(doc))));
} catch(RuntimeException ex) {
threwException = true;
}
assertTrue("Expected failure for invalid SAX", threwException);
// if the error occurs in the root element, the server writes an empty document
docDesc = docMgr.exists(docId);
if (docDesc != null) {
docMgr.delete(docId);
}
}
@Test
public void testStAXWrite() throws XMLStreamException, SAXException, IOException {
String docId = "/test/testWrite1.xml";
String docIn =
"<?xml version='1.0'?>" +
"<def:default" +
" xmlns:def='http://marklogic.com/example/ns/default'" +
" xmlns:sp='http://marklogic.com/example/ns/specified'" +
" xmlns:un='http://marklogic.com/example/ns/unspecified'" +
">" +
"<sp:specified>first value</sp:specified>" +
"<un:unspecified>second value</un:unspecified>" +
"</def:default>";
XMLDocumentManager docMgr = Common.client.newXMLDocumentManager();
XMLStreamReaderHandle streamHandle = new XMLStreamReaderHandle();
streamHandle.set(
streamHandle.getFactory().createXMLStreamReader(
new StringReader(docIn)
)
);
docMgr.write(docId, streamHandle);
String docOut = docMgr.read(docId, new StringHandle()).get();
assertNotNull("Wrote null document for StAX stream", docOut);
assertXMLEqual("Failed to write StAX stream", docIn, docOut);
XMLEventReaderHandle eventHandle = new XMLEventReaderHandle();
eventHandle.set(
eventHandle.getFactory().createXMLEventReader(
new StringReader(docIn)
)
);
docMgr.write(docId, eventHandle);
docOut = docMgr.read(docId, new StringHandle()).get();
assertNotNull("Wrote null document for StAX events", docOut);
assertXMLEqual("Failed to write StAX events", docIn, docOut);
}
@Test
public void testPatch() throws Exception {
String docId = "/test/testWrite1.xml";
XMLDocumentManager docMgr = Common.client.newXMLDocumentManager();
DocumentPatchBuilder patchBldr = docMgr.newPatchBuilder();
patchBldr.insertFragment(
"/root/firstChild/firstChildOfFirstChild", Position.BEFORE, Cardinality.ONE_OR_MORE,
"<newFirstChildOfFirstChild/>"
);
patchBldr.insertFragment(
"/root/firstChild", Position.LAST_CHILD, "<lastChildOfFirstChild/>"
);
patchBldr.replaceFragment("/root/secondChild", "<replacedSecondChild/>");
patchBldr.replaceValue("/root/thirdChild", "new value");
patchBldr.delete("fourthChild");
patchBldr.replaceApply("fifthChild", Cardinality.ONE, patchBldr.call().multiply(3));
patchBldr.library("http://marklogic.com/java-unit-test/my-lib",
"/ext/my-lib.xqy");
patchBldr.replaceApply("/root/sixthChild",
patchBldr.call().applyLibraryValues("getMin", 18, 21));
DocumentPatchHandle patchHandle = patchBldr.build();
for (int i=0; i < 2; i++) {
Document domDocument = DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
Element root = domDocument.createElement("root");
Element firstChild = domDocument.createElement("firstChild");
Element firstChildOfFirstChild = domDocument.createElement("firstChildOfFirstChild");
firstChild.appendChild(firstChildOfFirstChild);
root.appendChild(firstChild);
Element secondChild = domDocument.createElement("secondChild");
root.appendChild(secondChild);
Element thirdChild = domDocument.createElement("thirdChild");
thirdChild.setTextContent("old value");
root.appendChild(thirdChild);
Element fourthChild = domDocument.createElement("fourthChild");
root.appendChild(fourthChild);
Element fifthChild = domDocument.createElement("fifthChild");
fifthChild.setTextContent("5");
root.appendChild(fifthChild);
Element sixthChild = domDocument.createElement("sixthChild");
sixthChild.setTextContent("31");
root.appendChild(sixthChild);
domDocument.appendChild(root);
docMgr.write(docId, new DOMHandle().with(domDocument));
String patchType = null;
switch (i) {
case 0:
patchType = "built";
docMgr.patch(docId, patchHandle);
break;
case 1:
patchType = "raw";
docMgr.clearMetadataCategories();
docMgr.patch(docId, new StringHandle(patchHandle.toString()));
docMgr.setMetadataCategories(Metadata.ALL);
break;
default:
throw new Exception("unknown patch loop value");
}
firstChild.insertBefore(
domDocument.createElement("newFirstChildOfFirstChild"), firstChildOfFirstChild
);
firstChild.appendChild(domDocument.createElement("lastChildOfFirstChild"));
root.replaceChild(domDocument.createElement("replacedSecondChild"), secondChild);
thirdChild.setTextContent("new value");
fifthChild.setTextContent("15");
sixthChild.setTextContent("18");
root.removeChild(fourthChild);
String expected = Common.testDocumentToString(domDocument);
String actual = docMgr.read(docId, new StringHandle()).get();
assertNotNull("Read null string for "+patchType+" patched XML content",actual);
assertXMLEqual("Unexpected result for "+patchType+" patched XML document",expected,actual);
}
String before =
"<r:root xmlns:r=\"root.org\">" +
"<t:target xmlns:t=\"target.org\">";
String after =
"</t:target>" +
"</r:root>";
EditableNamespaceContext namespaces = new EditableNamespaceContext();
namespaces.put("r", "root.org");
namespaces.put("t", "target.org");
namespaces.put("n", "new.org");
patchBldr = docMgr.newPatchBuilder();
patchBldr.setNamespaces(namespaces);
String inserted = "<n:new xmlns:n=\"new.org\"/>";
patchBldr.insertFragment(
"/r:root/t:target", Position.LAST_CHILD, inserted
);
patchHandle = patchBldr.build();
String expected = before+inserted+after;
for (int i=0; i < 2; i++) {
docMgr.write(docId, new StringHandle(before+after));
String patchType = null;
switch (i) {
case 0:
patchType = "built";
docMgr.patch(docId, patchHandle);
break;
case 1:
patchType = "raw";
docMgr.patch(docId, new StringHandle(patchHandle.toString()));
break;
default:
throw new Exception("unknown namespaced patch loop value");
}
String actual = docMgr.read(docId, new StringHandle()).get();
assertXMLEqual("Unexpected result for "+patchType+" patched namespaced XML document",expected,actual);
}
docMgr.delete(docId);
}
}
|
package ru.job4j.start;
import ru.job4j.models.Item;
import ru.job4j.models.ItemGetString;
/**
* Класс вывода данных в массив.
* @author Vladimir Prilepskiy
* @version 1
* @since 22.12.2016
*/
public class StubOutput implements Output {
/**
* Массив ответов.
*/
private String[] asks = null;
/**
* Выведет в массив содержимое заявки.
* @param items - заявки.
*/
public void answer(Item[] items) {
asks = new String[items.length];
int index = 0;
for (Item item : items) {
if (item != null) {
asks[index] = "№ " + index + "; " + new ItemGetString(item).getLine();
}
index++;
}
}
/**
* Возвращает массив ответов.
* @return - ответы.
*/
public String[] getAsks() {
return asks;
}
}
|
package io.github.strikerrocker;
import net.fabricmc.fabric.api.event.lifecycle.v1.ServerTickEvents;
import net.minecraft.block.BlockState;
import net.minecraft.block.Blocks;
import net.minecraft.block.SnowBlock;
import net.minecraft.server.world.ChunkHolder;
import net.minecraft.server.world.ServerWorld;
import net.minecraft.util.math.BlockPos;
import net.minecraft.world.Heightmap;
import net.minecraft.world.LightType;
import net.minecraft.world.chunk.Chunk;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
public class TickHandler implements ServerTickEvents.StartWorldTick {
Method getChunkHolder;
public TickHandler(Method getChunkHolder) {
this.getChunkHolder = getChunkHolder;
}
@Override
public void onStartTick(ServerWorld world) {
if (world != null) {
if (world.isRaining()) {
try {
getChunkHolder.setAccessible(true);
@SuppressWarnings("unchecked")
Iterable<ChunkHolder> chunkSet = (Iterable<ChunkHolder>) getChunkHolder.invoke(world.getChunkManager().threadedAnvilChunkStorage);
for (ChunkHolder holder : chunkSet) {
Chunk chunk = holder.getCurrentChunk();
if (chunk == null || !world.getChunkManager().isChunkLoaded(chunk.getPos().x, chunk.getPos().z)) {
continue;
}
//If it can rain here, there is a 1/16 chance of trying to add snow
if (world.random.nextInt(StitchedSnow.config.chanceToAccumulateSnow) == 0) {
//Get rain height at random position in chunk, splits the random val j2 to use for both parts of position
BlockPos pos1 = world.getTopPosition(Heightmap.Type.MOTION_BLOCKING, world.getRandomPosInChunk(chunk.getPos().getStartX(), 0, chunk.getPos().getStartZ(), 15));
//Check if block at position is a snow layer block
if (world.getBlockState(pos1).getBlock() instanceof SnowBlock) {
//Check if valid Y, correct light, and correct temp for snow formation
if (pos1.getY() >= 0 && pos1.getY() < 256 && world.getLightLevel(LightType.BLOCK, pos1) < 10 && !world.getBiome(pos1).doesNotSnow(pos1)) {
//Calculate mean surrounding block height
int height = world.getBlockState(pos1).get(SnowBlock.LAYERS);
if (height == 8) return;
float surroundings = 0;
List<BlockPos> posList = new ArrayList<>();
posList.add(pos1.north());
posList.add(pos1.east());
posList.add(pos1.south());
posList.add(pos1.west());
//Check for blocks on the side
for (BlockPos blockPos : posList) {
BlockState state = world.getBlockState(blockPos);
if (state.getBlock() instanceof SnowBlock) {
surroundings += state.get(SnowBlock.LAYERS);
} else if (state.isSolidBlock(chunk, blockPos)) {
surroundings += StitchedSnow.config.snowAccumulationLimit;
}
}
surroundings /= 4;
//Done calculating surroundings
if (surroundings >= height) {
float weight = (surroundings - height) / 2 + 0.05f;
if (world.random.nextFloat() <= weight) {
//Add Snow layer!
world.setBlockState(pos1, Blocks.SNOW.getDefaultState().with(SnowBlock.LAYERS, height + 1));
}
}
}
}
}
}
} catch (Exception ex) {
System.out.println("COULD NOT ACCESS LOADED CHUNKS!");
System.out.println(ex.getMessage());
ex.printStackTrace();
}
}
}
}
}
|
/*
* Copyright © 2016 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package io.cdap.cdap.internal.app.deploy;
import io.cdap.cdap.api.app.Application;
import io.cdap.cdap.api.app.ApplicationSpecification;
import io.cdap.cdap.app.DefaultAppConfigurer;
import io.cdap.cdap.app.DefaultApplicationContext;
import io.cdap.cdap.common.id.Id;
import io.cdap.cdap.internal.DefaultId;
/**
* Util for building app spec for tests.
*/
public final class Specifications {
private Specifications() {}
public static ApplicationSpecification from(Application app) {
DefaultAppConfigurer appConfigurer = new DefaultAppConfigurer(Id.Namespace.fromEntityId(DefaultId.NAMESPACE),
Id.Artifact.fromEntityId(DefaultId.ARTIFACT),
app);
app.configure(appConfigurer, new DefaultApplicationContext());
return appConfigurer.createSpecification(null);
}
}
|
package org.ovirt.mobile.movirt.util;
import java.util.Comparator;
public class NullPriorityComparator implements Comparator<Comparable> {
@Override
public int compare(Comparable o1, Comparable o2) {
if (o1 == null) {
return o2 == null ? 0 : -1;
} else {
return o2 == null ? 1 : o1.compareTo(o2);
}
}
}
|
/*
MIT License
Copyright (c) 2021 Kumar Chandra
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package kchandra423.kImages;
import processing.core.PApplet;
import processing.core.PImage;
/**
* Represents an image that can be moved around and resized. Supports Gif, jpeg, and png images.
* Gifs will be properly animated. Uses processing to draw.
*
* @author Kumar Chandra
* @see PImage
*/
public interface KImage extends Cloneable {
/**
* Resizes the textures image to the given width and height
*
* @param w new width of texture in pixels
* @param h new height of texture in pixels
*/
void resize(int w, int h);
void scale(float stretchX, float stretchY);
/**
* Returns the current width of the image
*
* @return Current width of the image
*/
int getWidth();
/**
* Returns the current height of the image
*
* @return Current height of the image
*/
int getHeight();
void translate(float delx, float dely);
float getX();
float getY();
void moveTo(float x, float y);
void setAngle(float theta);
float getAngle();
boolean isReflected();
boolean isReversed();
void rotate(float theta);
void reflect(boolean flag);
void reverse(boolean flag);
/**
* Draws the texture onto the given PApplet
*
* @param p The given PApplet to be drawn to
*/
void draw(PApplet p);
PImage getImage();
Object clone();
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.producer.internals;
import org.apache.kafka.clients.ApiVersions;
import org.apache.kafka.clients.ClientResponse;
import org.apache.kafka.clients.NodeApiVersions;
import org.apache.kafka.clients.RequestCompletionHandler;
import org.apache.kafka.clients.consumer.CommitFailedException;
import org.apache.kafka.clients.consumer.ConsumerGroupMetadata;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.errors.InvalidPidMappingException;
import org.apache.kafka.common.errors.InvalidProducerEpochException;
import org.apache.kafka.common.errors.RetriableException;
import org.apache.kafka.common.errors.UnknownProducerIdException;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion;
import org.apache.kafka.common.message.FindCoordinatorResponseData.Coordinator;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.utils.ProducerIdAndEpoch;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.ClusterAuthorizationException;
import org.apache.kafka.common.errors.GroupAuthorizationException;
import org.apache.kafka.common.errors.OutOfOrderSequenceException;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.errors.TopicAuthorizationException;
import org.apache.kafka.common.errors.TransactionalIdAuthorizationException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.AddOffsetsToTxnRequestData;
import org.apache.kafka.common.message.EndTxnRequestData;
import org.apache.kafka.common.message.FindCoordinatorRequestData;
import org.apache.kafka.common.message.InitProducerIdRequestData;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.DefaultRecordBatch;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.requests.AbstractRequest;
import org.apache.kafka.common.requests.AbstractResponse;
import org.apache.kafka.common.requests.AddOffsetsToTxnRequest;
import org.apache.kafka.common.requests.AddOffsetsToTxnResponse;
import org.apache.kafka.common.requests.AddPartitionsToTxnRequest;
import org.apache.kafka.common.requests.AddPartitionsToTxnResponse;
import org.apache.kafka.common.requests.EndTxnRequest;
import org.apache.kafka.common.requests.EndTxnResponse;
import org.apache.kafka.common.requests.FindCoordinatorRequest;
import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType;
import org.apache.kafka.common.requests.FindCoordinatorResponse;
import org.apache.kafka.common.requests.InitProducerIdRequest;
import org.apache.kafka.common.requests.InitProducerIdResponse;
import org.apache.kafka.common.requests.ProduceResponse;
import org.apache.kafka.common.requests.TransactionResult;
import org.apache.kafka.common.requests.TxnOffsetCommitRequest;
import org.apache.kafka.common.requests.TxnOffsetCommitRequest.CommittedOffset;
import org.apache.kafka.common.requests.TxnOffsetCommitResponse;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.PrimitiveRef;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.OptionalInt;
import java.util.OptionalLong;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.function.Consumer;
import java.util.function.Supplier;
/**
* A class which maintains state for transactions. Also keeps the state necessary to ensure idempotent production.
*/
public class TransactionManager {
private static final int NO_INFLIGHT_REQUEST_CORRELATION_ID = -1;
private static final int NO_LAST_ACKED_SEQUENCE_NUMBER = -1;
private final Logger log;
private final String transactionalId;
private final int transactionTimeoutMs;
private final ApiVersions apiVersions;
private static class TopicPartitionBookkeeper {
private final Map<TopicPartition, TopicPartitionEntry> topicPartitions = new HashMap<>();
private TopicPartitionEntry getPartition(TopicPartition topicPartition) {
TopicPartitionEntry ent = topicPartitions.get(topicPartition);
if (ent == null)
throw new IllegalStateException("Trying to get the sequence number for " + topicPartition +
", but the sequence number was never set for this partition.");
return ent;
}
private TopicPartitionEntry getOrCreatePartition(TopicPartition topicPartition) {
return topicPartitions.computeIfAbsent(topicPartition, tp -> new TopicPartitionEntry());
}
private boolean contains(TopicPartition topicPartition) {
return topicPartitions.containsKey(topicPartition);
}
private void reset() {
topicPartitions.clear();
}
private OptionalLong lastAckedOffset(TopicPartition topicPartition) {
TopicPartitionEntry entry = topicPartitions.get(topicPartition);
if (entry != null && entry.lastAckedOffset != ProduceResponse.INVALID_OFFSET)
return OptionalLong.of(entry.lastAckedOffset);
else
return OptionalLong.empty();
}
private OptionalInt lastAckedSequence(TopicPartition topicPartition) {
TopicPartitionEntry entry = topicPartitions.get(topicPartition);
if (entry != null && entry.lastAckedSequence != NO_LAST_ACKED_SEQUENCE_NUMBER)
return OptionalInt.of(entry.lastAckedSequence);
else
return OptionalInt.empty();
}
private void startSequencesAtBeginning(TopicPartition topicPartition, ProducerIdAndEpoch newProducerIdAndEpoch) {
final PrimitiveRef.IntRef sequence = PrimitiveRef.ofInt(0);
TopicPartitionEntry topicPartitionEntry = getPartition(topicPartition);
topicPartitionEntry.resetSequenceNumbers(inFlightBatch -> {
inFlightBatch.resetProducerState(newProducerIdAndEpoch, sequence.value, inFlightBatch.isTransactional());
sequence.value += inFlightBatch.recordCount;
});
topicPartitionEntry.producerIdAndEpoch = newProducerIdAndEpoch;
topicPartitionEntry.nextSequence = sequence.value;
topicPartitionEntry.lastAckedSequence = NO_LAST_ACKED_SEQUENCE_NUMBER;
}
}
private static class TopicPartitionEntry {
// The producer id/epoch being used for a given partition.
private ProducerIdAndEpoch producerIdAndEpoch;
// The base sequence of the next batch bound for a given partition.
private int nextSequence;
// The sequence number of the last record of the last ack'd batch from the given partition. When there are no
// in flight requests for a partition, the lastAckedSequence(topicPartition) == nextSequence(topicPartition) - 1.
private int lastAckedSequence;
// Keep track of the in flight batches bound for a partition, ordered by sequence. This helps us to ensure that
// we continue to order batches by the sequence numbers even when the responses come back out of order during
// leader failover. We add a batch to the queue when it is drained, and remove it when the batch completes
// (either successfully or through a fatal failure).
private SortedSet<ProducerBatch> inflightBatchesBySequence;
// We keep track of the last acknowledged offset on a per partition basis in order to disambiguate UnknownProducer
// responses which are due to the retention period elapsing, and those which are due to actual lost data.
private long lastAckedOffset;
// `inflightBatchesBySequence` should only have batches with the same producer id and producer
// epoch, but there is an edge case where we may remove the wrong batch if the comparator
// only takes `baseSequence` into account.
// See https://github.com/apache/kafka/pull/12096#pullrequestreview-955554191 for details.
private static final Comparator<ProducerBatch> PRODUCER_BATCH_COMPARATOR =
Comparator.comparingLong(ProducerBatch::producerId)
.thenComparing(ProducerBatch::producerEpoch)
.thenComparingInt(ProducerBatch::baseSequence);
TopicPartitionEntry() {
this.producerIdAndEpoch = ProducerIdAndEpoch.NONE;
this.nextSequence = 0;
this.lastAckedSequence = NO_LAST_ACKED_SEQUENCE_NUMBER;
this.lastAckedOffset = ProduceResponse.INVALID_OFFSET;
this.inflightBatchesBySequence = new TreeSet<>(PRODUCER_BATCH_COMPARATOR);
}
void resetSequenceNumbers(Consumer<ProducerBatch> resetSequence) {
TreeSet<ProducerBatch> newInflights = new TreeSet<>(PRODUCER_BATCH_COMPARATOR);
for (ProducerBatch inflightBatch : inflightBatchesBySequence) {
resetSequence.accept(inflightBatch);
newInflights.add(inflightBatch);
}
inflightBatchesBySequence = newInflights;
}
}
private final TopicPartitionBookkeeper topicPartitionBookkeeper;
private final Map<TopicPartition, CommittedOffset> pendingTxnOffsetCommits;
// If a batch bound for a partition expired locally after being sent at least once, the partition is considered
// to have an unresolved state. We keep track of such partitions here, and cannot assign any more sequence numbers
// for this partition until the unresolved state gets cleared. This may happen if other inflight batches returned
// successfully (indicating that the expired batch actually made it to the broker). If we don't get any successful
// responses for the partition once the inflight request count falls to zero, we reset the producer id and
// consequently clear this data structure as well.
// The value of the map is the sequence number of the batch following the expired one, computed by adding its
// record count to its sequence number. This is used to tell if a subsequent batch is the one immediately following
// the expired one.
private final Map<TopicPartition, Integer> partitionsWithUnresolvedSequences;
// The partitions that have received an error that triggers an epoch bump. When the epoch is bumped, these
// partitions will have the sequences of their in-flight batches rewritten
private final Set<TopicPartition> partitionsToRewriteSequences;
private final PriorityQueue<TxnRequestHandler> pendingRequests;
private final Set<TopicPartition> newPartitionsInTransaction;
private final Set<TopicPartition> pendingPartitionsInTransaction;
private final Set<TopicPartition> partitionsInTransaction;
private PendingStateTransition pendingTransition;
// This is used by the TxnRequestHandlers to control how long to back off before a given request is retried.
// For instance, this value is lowered by the AddPartitionsToTxnHandler when it receives a CONCURRENT_TRANSACTIONS
// error for the first AddPartitionsRequest in a transaction.
private final long retryBackoffMs;
// The retryBackoff is overridden to the following value if the first AddPartitions receives a
// CONCURRENT_TRANSACTIONS error.
private static final long ADD_PARTITIONS_RETRY_BACKOFF_MS = 20L;
private int inFlightRequestCorrelationId = NO_INFLIGHT_REQUEST_CORRELATION_ID;
private Node transactionCoordinator;
private Node consumerGroupCoordinator;
private boolean coordinatorSupportsBumpingEpoch;
private volatile State currentState = State.UNINITIALIZED;
private volatile RuntimeException lastError = null;
private volatile ProducerIdAndEpoch producerIdAndEpoch;
private volatile boolean transactionStarted = false;
private volatile boolean epochBumpRequired = false;
private enum State {
UNINITIALIZED,
INITIALIZING,
READY,
IN_TRANSACTION,
COMMITTING_TRANSACTION,
ABORTING_TRANSACTION,
ABORTABLE_ERROR,
FATAL_ERROR;
private boolean isTransitionValid(State source, State target) {
switch (target) {
case UNINITIALIZED:
return source == READY;
case INITIALIZING:
return source == UNINITIALIZED || source == ABORTING_TRANSACTION;
case READY:
return source == INITIALIZING || source == COMMITTING_TRANSACTION || source == ABORTING_TRANSACTION;
case IN_TRANSACTION:
return source == READY;
case COMMITTING_TRANSACTION:
return source == IN_TRANSACTION;
case ABORTING_TRANSACTION:
return source == IN_TRANSACTION || source == ABORTABLE_ERROR;
case ABORTABLE_ERROR:
return source == IN_TRANSACTION || source == COMMITTING_TRANSACTION || source == ABORTABLE_ERROR;
case FATAL_ERROR:
default:
// We can transition to FATAL_ERROR unconditionally.
// FATAL_ERROR is never a valid starting state for any transition. So the only option is to close the
// producer or do purely non transactional requests.
return true;
}
}
}
// We use the priority to determine the order in which requests need to be sent out. For instance, if we have
// a pending FindCoordinator request, that must always go first. Next, If we need a producer id, that must go second.
// The endTxn request must always go last, unless we are bumping the epoch (a special case of InitProducerId) as
// part of ending the transaction.
private enum Priority {
FIND_COORDINATOR(0),
INIT_PRODUCER_ID(1),
ADD_PARTITIONS_OR_OFFSETS(2),
END_TXN(3),
EPOCH_BUMP(4);
final int priority;
Priority(int priority) {
this.priority = priority;
}
}
public TransactionManager(final LogContext logContext,
final String transactionalId,
final int transactionTimeoutMs,
final long retryBackoffMs,
final ApiVersions apiVersions) {
this.producerIdAndEpoch = ProducerIdAndEpoch.NONE;
this.transactionalId = transactionalId;
this.log = logContext.logger(TransactionManager.class);
this.transactionTimeoutMs = transactionTimeoutMs;
this.transactionCoordinator = null;
this.consumerGroupCoordinator = null;
this.newPartitionsInTransaction = new HashSet<>();
this.pendingPartitionsInTransaction = new HashSet<>();
this.partitionsInTransaction = new HashSet<>();
this.pendingRequests = new PriorityQueue<>(10, Comparator.comparingInt(o -> o.priority().priority));
this.pendingTxnOffsetCommits = new HashMap<>();
this.partitionsWithUnresolvedSequences = new HashMap<>();
this.partitionsToRewriteSequences = new HashSet<>();
this.retryBackoffMs = retryBackoffMs;
this.topicPartitionBookkeeper = new TopicPartitionBookkeeper();
this.apiVersions = apiVersions;
}
public synchronized TransactionalRequestResult initializeTransactions() {
return initializeTransactions(ProducerIdAndEpoch.NONE);
}
synchronized TransactionalRequestResult initializeTransactions(ProducerIdAndEpoch producerIdAndEpoch) {
maybeFailWithError();
boolean isEpochBump = producerIdAndEpoch != ProducerIdAndEpoch.NONE;
return handleCachedTransactionRequestResult(() -> {
// If this is an epoch bump, we will transition the state as part of handling the EndTxnRequest
if (!isEpochBump) {
transitionTo(State.INITIALIZING);
log.info("Invoking InitProducerId for the first time in order to acquire a producer ID");
} else {
log.info("Invoking InitProducerId with current producer ID and epoch {} in order to bump the epoch", producerIdAndEpoch);
}
InitProducerIdRequestData requestData = new InitProducerIdRequestData()
.setTransactionalId(transactionalId)
.setTransactionTimeoutMs(transactionTimeoutMs)
.setProducerId(producerIdAndEpoch.producerId)
.setProducerEpoch(producerIdAndEpoch.epoch);
InitProducerIdHandler handler = new InitProducerIdHandler(new InitProducerIdRequest.Builder(requestData),
isEpochBump);
enqueueRequest(handler);
return handler.result;
}, State.INITIALIZING, "initTransactions");
}
public synchronized void beginTransaction() {
ensureTransactional();
throwIfPendingState("beginTransaction");
maybeFailWithError();
transitionTo(State.IN_TRANSACTION);
}
public synchronized TransactionalRequestResult beginCommit() {
return handleCachedTransactionRequestResult(() -> {
maybeFailWithError();
transitionTo(State.COMMITTING_TRANSACTION);
return beginCompletingTransaction(TransactionResult.COMMIT);
}, State.COMMITTING_TRANSACTION, "commitTransaction");
}
public synchronized TransactionalRequestResult beginAbort() {
return handleCachedTransactionRequestResult(() -> {
if (currentState != State.ABORTABLE_ERROR)
maybeFailWithError();
transitionTo(State.ABORTING_TRANSACTION);
// We're aborting the transaction, so there should be no need to add new partitions
newPartitionsInTransaction.clear();
return beginCompletingTransaction(TransactionResult.ABORT);
}, State.ABORTING_TRANSACTION, "abortTransaction");
}
private TransactionalRequestResult beginCompletingTransaction(TransactionResult transactionResult) {
if (!newPartitionsInTransaction.isEmpty())
enqueueRequest(addPartitionsToTransactionHandler());
// If the error is an INVALID_PRODUCER_ID_MAPPING error, the server will not accept an EndTxnRequest, so skip
// directly to InitProducerId. Otherwise, we must first abort the transaction, because the producer will be
// fenced if we directly call InitProducerId.
if (!(lastError instanceof InvalidPidMappingException)) {
EndTxnRequest.Builder builder = new EndTxnRequest.Builder(
new EndTxnRequestData()
.setTransactionalId(transactionalId)
.setProducerId(producerIdAndEpoch.producerId)
.setProducerEpoch(producerIdAndEpoch.epoch)
.setCommitted(transactionResult.id));
EndTxnHandler handler = new EndTxnHandler(builder);
enqueueRequest(handler);
if (!epochBumpRequired) {
return handler.result;
}
}
return initializeTransactions(this.producerIdAndEpoch);
}
public synchronized TransactionalRequestResult sendOffsetsToTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets,
final ConsumerGroupMetadata groupMetadata) {
ensureTransactional();
throwIfPendingState("sendOffsetsToTransaction");
maybeFailWithError();
if (currentState != State.IN_TRANSACTION) {
throw new IllegalStateException("Cannot send offsets if a transaction is not in progress " +
"(currentState= " + currentState + ")");
}
log.debug("Begin adding offsets {} for consumer group {} to transaction", offsets, groupMetadata);
AddOffsetsToTxnRequest.Builder builder = new AddOffsetsToTxnRequest.Builder(
new AddOffsetsToTxnRequestData()
.setTransactionalId(transactionalId)
.setProducerId(producerIdAndEpoch.producerId)
.setProducerEpoch(producerIdAndEpoch.epoch)
.setGroupId(groupMetadata.groupId())
);
AddOffsetsToTxnHandler handler = new AddOffsetsToTxnHandler(builder, offsets, groupMetadata);
enqueueRequest(handler);
return handler.result;
}
public synchronized void maybeAddPartition(TopicPartition topicPartition) {
maybeFailWithError();
throwIfPendingState("send");
if (isTransactional()) {
if (!hasProducerId()) {
throw new IllegalStateException("Cannot add partition " + topicPartition +
" to transaction before completing a call to initTransactions");
} else if (currentState != State.IN_TRANSACTION) {
throw new IllegalStateException("Cannot add partition " + topicPartition +
" to transaction while in state " + currentState);
} else if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) {
return;
} else {
log.debug("Begin adding new partition {} to transaction", topicPartition);
topicPartitionBookkeeper.getOrCreatePartition(topicPartition);
newPartitionsInTransaction.add(topicPartition);
}
}
}
RuntimeException lastError() {
return lastError;
}
synchronized boolean isSendToPartitionAllowed(TopicPartition tp) {
if (hasFatalError())
return false;
return !isTransactional() || partitionsInTransaction.contains(tp);
}
public String transactionalId() {
return transactionalId;
}
public boolean hasProducerId() {
return producerIdAndEpoch.isValid();
}
public boolean isTransactional() {
return transactionalId != null;
}
synchronized boolean hasPartitionsToAdd() {
return !newPartitionsInTransaction.isEmpty() || !pendingPartitionsInTransaction.isEmpty();
}
synchronized boolean isCompleting() {
return currentState == State.COMMITTING_TRANSACTION || currentState == State.ABORTING_TRANSACTION;
}
synchronized boolean hasError() {
return currentState == State.ABORTABLE_ERROR || currentState == State.FATAL_ERROR;
}
synchronized boolean isAborting() {
return currentState == State.ABORTING_TRANSACTION;
}
synchronized void transitionToAbortableError(RuntimeException exception) {
if (currentState == State.ABORTING_TRANSACTION) {
log.debug("Skipping transition to abortable error state since the transaction is already being " +
"aborted. Underlying exception: ", exception);
return;
}
log.info("Transiting to abortable error state due to {}", exception.toString());
transitionTo(State.ABORTABLE_ERROR, exception);
}
synchronized void transitionToFatalError(RuntimeException exception) {
log.info("Transiting to fatal error state due to {}", exception.toString());
transitionTo(State.FATAL_ERROR, exception);
if (pendingTransition != null) {
pendingTransition.result.fail(exception);
}
}
// visible for testing
synchronized boolean isPartitionAdded(TopicPartition partition) {
return partitionsInTransaction.contains(partition);
}
// visible for testing
synchronized boolean isPartitionPendingAdd(TopicPartition partition) {
return newPartitionsInTransaction.contains(partition) || pendingPartitionsInTransaction.contains(partition);
}
/**
* Get the current producer id and epoch without blocking. Callers must use {@link ProducerIdAndEpoch#isValid()} to
* verify that the result is valid.
*
* @return the current ProducerIdAndEpoch.
*/
ProducerIdAndEpoch producerIdAndEpoch() {
return producerIdAndEpoch;
}
synchronized public void maybeUpdateProducerIdAndEpoch(TopicPartition topicPartition) {
if (hasStaleProducerIdAndEpoch(topicPartition) && !hasInflightBatches(topicPartition)) {
// If the batch was on a different ID and/or epoch (due to an epoch bump) and all its in-flight batches
// have completed, reset the partition sequence so that the next batch (with the new epoch) starts from 0
topicPartitionBookkeeper.startSequencesAtBeginning(topicPartition, this.producerIdAndEpoch);
log.debug("ProducerId of partition {} set to {} with epoch {}. Reinitialize sequence at beginning.",
topicPartition, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch);
}
}
/**
* Set the producer id and epoch atomically.
*/
private void setProducerIdAndEpoch(ProducerIdAndEpoch producerIdAndEpoch) {
log.info("ProducerId set to {} with epoch {}", producerIdAndEpoch.producerId, producerIdAndEpoch.epoch);
this.producerIdAndEpoch = producerIdAndEpoch;
}
/**
* This method resets the producer ID and epoch and sets the state to UNINITIALIZED, which will trigger a new
* InitProducerId request. This method is only called when the producer epoch is exhausted; we will bump the epoch
* instead.
*/
private void resetIdempotentProducerId() {
if (isTransactional())
throw new IllegalStateException("Cannot reset producer state for a transactional producer. " +
"You must either abort the ongoing transaction or reinitialize the transactional producer instead");
log.debug("Resetting idempotent producer ID. ID and epoch before reset are {}", this.producerIdAndEpoch);
setProducerIdAndEpoch(ProducerIdAndEpoch.NONE);
transitionTo(State.UNINITIALIZED);
}
private void resetSequenceForPartition(TopicPartition topicPartition) {
topicPartitionBookkeeper.topicPartitions.remove(topicPartition);
this.partitionsWithUnresolvedSequences.remove(topicPartition);
}
private void resetSequenceNumbers() {
topicPartitionBookkeeper.reset();
this.partitionsWithUnresolvedSequences.clear();
}
synchronized void requestEpochBumpForPartition(TopicPartition tp) {
epochBumpRequired = true;
this.partitionsToRewriteSequences.add(tp);
}
private void bumpIdempotentProducerEpoch() {
if (this.producerIdAndEpoch.epoch == Short.MAX_VALUE) {
resetIdempotentProducerId();
} else {
setProducerIdAndEpoch(new ProducerIdAndEpoch(this.producerIdAndEpoch.producerId, (short) (this.producerIdAndEpoch.epoch + 1)));
log.debug("Incremented producer epoch, current producer ID and epoch are now {}", this.producerIdAndEpoch);
}
// When the epoch is bumped, rewrite all in-flight sequences for the partition(s) that triggered the epoch bump
for (TopicPartition topicPartition : this.partitionsToRewriteSequences) {
this.topicPartitionBookkeeper.startSequencesAtBeginning(topicPartition, this.producerIdAndEpoch);
this.partitionsWithUnresolvedSequences.remove(topicPartition);
}
this.partitionsToRewriteSequences.clear();
epochBumpRequired = false;
}
synchronized void bumpIdempotentEpochAndResetIdIfNeeded() {
if (!isTransactional()) {
if (epochBumpRequired) {
bumpIdempotentProducerEpoch();
}
if (currentState != State.INITIALIZING && !hasProducerId()) {
transitionTo(State.INITIALIZING);
InitProducerIdRequestData requestData = new InitProducerIdRequestData()
.setTransactionalId(null)
.setTransactionTimeoutMs(Integer.MAX_VALUE);
InitProducerIdHandler handler = new InitProducerIdHandler(new InitProducerIdRequest.Builder(requestData), false);
enqueueRequest(handler);
}
}
}
/**
* Returns the next sequence number to be written to the given TopicPartition.
*/
synchronized Integer sequenceNumber(TopicPartition topicPartition) {
return topicPartitionBookkeeper.getOrCreatePartition(topicPartition).nextSequence;
}
/**
* Returns the current producer id/epoch of the given TopicPartition.
*/
synchronized ProducerIdAndEpoch producerIdAndEpoch(TopicPartition topicPartition) {
return topicPartitionBookkeeper.getOrCreatePartition(topicPartition).producerIdAndEpoch;
}
synchronized void incrementSequenceNumber(TopicPartition topicPartition, int increment) {
Integer currentSequence = sequenceNumber(topicPartition);
currentSequence = DefaultRecordBatch.incrementSequence(currentSequence, increment);
topicPartitionBookkeeper.getPartition(topicPartition).nextSequence = currentSequence;
}
synchronized void addInFlightBatch(ProducerBatch batch) {
if (!batch.hasSequence())
throw new IllegalStateException("Can't track batch for partition " + batch.topicPartition + " when sequence is not set.");
topicPartitionBookkeeper.getPartition(batch.topicPartition).inflightBatchesBySequence.add(batch);
}
/**
* Returns the first inflight sequence for a given partition. This is the base sequence of an inflight batch with
* the lowest sequence number.
* @return the lowest inflight sequence if the transaction manager is tracking inflight requests for this partition.
* If there are no inflight requests being tracked for this partition, this method will return
* RecordBatch.NO_SEQUENCE.
*/
synchronized int firstInFlightSequence(TopicPartition topicPartition) {
if (!hasInflightBatches(topicPartition))
return RecordBatch.NO_SEQUENCE;
SortedSet<ProducerBatch> inflightBatches = topicPartitionBookkeeper.getPartition(topicPartition).inflightBatchesBySequence;
if (inflightBatches.isEmpty())
return RecordBatch.NO_SEQUENCE;
else
return inflightBatches.first().baseSequence();
}
synchronized ProducerBatch nextBatchBySequence(TopicPartition topicPartition) {
SortedSet<ProducerBatch> queue = topicPartitionBookkeeper.getPartition(topicPartition).inflightBatchesBySequence;
return queue.isEmpty() ? null : queue.first();
}
synchronized void removeInFlightBatch(ProducerBatch batch) {
if (hasInflightBatches(batch.topicPartition)) {
topicPartitionBookkeeper.getPartition(batch.topicPartition).inflightBatchesBySequence.remove(batch);
}
}
private int maybeUpdateLastAckedSequence(TopicPartition topicPartition, int sequence) {
int lastAckedSequence = lastAckedSequence(topicPartition).orElse(NO_LAST_ACKED_SEQUENCE_NUMBER);
if (sequence > lastAckedSequence) {
topicPartitionBookkeeper.getPartition(topicPartition).lastAckedSequence = sequence;
return sequence;
}
return lastAckedSequence;
}
synchronized OptionalInt lastAckedSequence(TopicPartition topicPartition) {
return topicPartitionBookkeeper.lastAckedSequence(topicPartition);
}
synchronized OptionalLong lastAckedOffset(TopicPartition topicPartition) {
return topicPartitionBookkeeper.lastAckedOffset(topicPartition);
}
private void updateLastAckedOffset(ProduceResponse.PartitionResponse response, ProducerBatch batch) {
if (response.baseOffset == ProduceResponse.INVALID_OFFSET)
return;
long lastOffset = response.baseOffset + batch.recordCount - 1;
OptionalLong lastAckedOffset = lastAckedOffset(batch.topicPartition);
// It might happen that the TransactionManager has been reset while a request was reenqueued and got a valid
// response for this. This can happen only if the producer is only idempotent (not transactional) and in
// this case there will be no tracked bookkeeper entry about it, so we have to insert one.
if (!lastAckedOffset.isPresent() && !isTransactional()) {
topicPartitionBookkeeper.getOrCreatePartition(batch.topicPartition);
}
if (lastOffset > lastAckedOffset.orElse(ProduceResponse.INVALID_OFFSET)) {
topicPartitionBookkeeper.getPartition(batch.topicPartition).lastAckedOffset = lastOffset;
} else {
log.trace("Partition {} keeps lastOffset at {}", batch.topicPartition, lastOffset);
}
}
public synchronized void handleCompletedBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response) {
int lastAckedSequence = maybeUpdateLastAckedSequence(batch.topicPartition, batch.lastSequence());
log.debug("ProducerId: {}; Set last ack'd sequence number for topic-partition {} to {}",
batch.producerId(),
batch.topicPartition,
lastAckedSequence);
updateLastAckedOffset(response, batch);
removeInFlightBatch(batch);
}
public synchronized void maybeTransitionToErrorState(RuntimeException exception) {
if (exception instanceof ClusterAuthorizationException
|| exception instanceof TransactionalIdAuthorizationException
|| exception instanceof ProducerFencedException
|| exception instanceof UnsupportedVersionException) {
transitionToFatalError(exception);
} else if (isTransactional()) {
if (canBumpEpoch() && !isCompleting()) {
epochBumpRequired = true;
}
transitionToAbortableError(exception);
}
}
synchronized void handleFailedBatch(ProducerBatch batch, RuntimeException exception, boolean adjustSequenceNumbers) {
maybeTransitionToErrorState(exception);
removeInFlightBatch(batch);
if (hasFatalError()) {
log.debug("Ignoring batch {} with producer id {}, epoch {}, and sequence number {} " +
"since the producer is already in fatal error state", batch, batch.producerId(),
batch.producerEpoch(), batch.baseSequence(), exception);
return;
}
if (exception instanceof OutOfOrderSequenceException && !isTransactional()) {
log.error("The broker returned {} for topic-partition {} with producerId {}, epoch {}, and sequence number {}",
exception, batch.topicPartition, batch.producerId(), batch.producerEpoch(), batch.baseSequence());
// If we fail with an OutOfOrderSequenceException, we have a gap in the log. Bump the epoch for this
// partition, which will reset the sequence number to 0 and allow us to continue
requestEpochBumpForPartition(batch.topicPartition);
} else if (exception instanceof UnknownProducerIdException) {
// If we get an UnknownProducerId for a partition, then the broker has no state for that producer. It will
// therefore accept a write with sequence number 0. We reset the sequence number for the partition here so
// that the producer can continue after aborting the transaction. All inflight-requests to this partition
// will also fail with an UnknownProducerId error, so the sequence will remain at 0. Note that if the
// broker supports bumping the epoch, we will later reset all sequence numbers after calling InitProducerId
resetSequenceForPartition(batch.topicPartition);
} else {
if (adjustSequenceNumbers) {
if (!isTransactional()) {
requestEpochBumpForPartition(batch.topicPartition);
} else {
adjustSequencesDueToFailedBatch(batch);
}
}
}
}
// If a batch is failed fatally, the sequence numbers for future batches bound for the partition must be adjusted
// so that they don't fail with the OutOfOrderSequenceException.
//
// This method must only be called when we know that the batch is question has been unequivocally failed by the broker,
// ie. it has received a confirmed fatal status code like 'Message Too Large' or something similar.
private void adjustSequencesDueToFailedBatch(ProducerBatch batch) {
if (!topicPartitionBookkeeper.contains(batch.topicPartition))
// Sequence numbers are not being tracked for this partition. This could happen if the producer id was just
// reset due to a previous OutOfOrderSequenceException.
return;
log.debug("producerId: {}, send to partition {} failed fatally. Reducing future sequence numbers by {}",
batch.producerId(), batch.topicPartition, batch.recordCount);
int currentSequence = sequenceNumber(batch.topicPartition);
currentSequence -= batch.recordCount;
if (currentSequence < 0)
throw new IllegalStateException("Sequence number for partition " + batch.topicPartition + " is going to become negative: " + currentSequence);
setNextSequence(batch.topicPartition, currentSequence);
topicPartitionBookkeeper.getPartition(batch.topicPartition).resetSequenceNumbers(inFlightBatch -> {
if (inFlightBatch.baseSequence() < batch.baseSequence())
return;
int newSequence = inFlightBatch.baseSequence() - batch.recordCount;
if (newSequence < 0)
throw new IllegalStateException("Sequence number for batch with sequence " + inFlightBatch.baseSequence()
+ " for partition " + batch.topicPartition + " is going to become negative: " + newSequence);
inFlightBatch.resetProducerState(new ProducerIdAndEpoch(inFlightBatch.producerId(), inFlightBatch.producerEpoch()), newSequence, inFlightBatch.isTransactional());
});
}
synchronized boolean hasInflightBatches(TopicPartition topicPartition) {
return !topicPartitionBookkeeper.getOrCreatePartition(topicPartition).inflightBatchesBySequence.isEmpty();
}
synchronized boolean hasStaleProducerIdAndEpoch(TopicPartition topicPartition) {
return !producerIdAndEpoch.equals(topicPartitionBookkeeper.getOrCreatePartition(topicPartition).producerIdAndEpoch);
}
synchronized boolean hasUnresolvedSequences() {
return !partitionsWithUnresolvedSequences.isEmpty();
}
synchronized boolean hasUnresolvedSequence(TopicPartition topicPartition) {
return partitionsWithUnresolvedSequences.containsKey(topicPartition);
}
synchronized void markSequenceUnresolved(ProducerBatch batch) {
int nextSequence = batch.lastSequence() + 1;
partitionsWithUnresolvedSequences.compute(batch.topicPartition,
(k, v) -> v == null ? nextSequence : Math.max(v, nextSequence));
log.debug("Marking partition {} unresolved with next sequence number {}", batch.topicPartition,
partitionsWithUnresolvedSequences.get(batch.topicPartition));
}
// Attempts to resolve unresolved sequences. If all in-flight requests are complete and some partitions are still
// unresolved, either bump the epoch if possible, or transition to a fatal error
synchronized void maybeResolveSequences() {
for (Iterator<TopicPartition> iter = partitionsWithUnresolvedSequences.keySet().iterator(); iter.hasNext(); ) {
TopicPartition topicPartition = iter.next();
if (!hasInflightBatches(topicPartition)) {
// The partition has been fully drained. At this point, the last ack'd sequence should be one less than
// next sequence destined for the partition. If so, the partition is fully resolved. If not, we should
// reset the sequence number if necessary.
if (isNextSequence(topicPartition, sequenceNumber(topicPartition))) {
// This would happen when a batch was expired, but subsequent batches succeeded.
iter.remove();
} else {
// We would enter this branch if all in flight batches were ultimately expired in the producer.
if (isTransactional()) {
// For the transactional producer, we bump the epoch if possible, otherwise we transition to a fatal error
String unackedMessagesErr = "The client hasn't received acknowledgment for some previously " +
"sent messages and can no longer retry them. ";
if (canBumpEpoch()) {
epochBumpRequired = true;
KafkaException exception = new KafkaException(unackedMessagesErr + "It is safe to abort " +
"the transaction and continue.");
transitionToAbortableError(exception);
} else {
KafkaException exception = new KafkaException(unackedMessagesErr + "It isn't safe to continue.");
transitionToFatalError(exception);
}
} else {
// For the idempotent producer, bump the epoch
log.info("No inflight batches remaining for {}, last ack'd sequence for partition is {}, next sequence is {}. " +
"Going to bump epoch and reset sequence numbers.", topicPartition,
lastAckedSequence(topicPartition).orElse(NO_LAST_ACKED_SEQUENCE_NUMBER), sequenceNumber(topicPartition));
requestEpochBumpForPartition(topicPartition);
}
iter.remove();
}
}
}
}
private boolean isNextSequence(TopicPartition topicPartition, int sequence) {
return sequence - lastAckedSequence(topicPartition).orElse(NO_LAST_ACKED_SEQUENCE_NUMBER) == 1;
}
private void setNextSequence(TopicPartition topicPartition, int sequence) {
topicPartitionBookkeeper.getPartition(topicPartition).nextSequence = sequence;
}
private boolean isNextSequenceForUnresolvedPartition(TopicPartition topicPartition, int sequence) {
return this.hasUnresolvedSequence(topicPartition) &&
sequence == this.partitionsWithUnresolvedSequences.get(topicPartition);
}
synchronized TxnRequestHandler nextRequest(boolean hasIncompleteBatches) {
if (!newPartitionsInTransaction.isEmpty())
enqueueRequest(addPartitionsToTransactionHandler());
TxnRequestHandler nextRequestHandler = pendingRequests.peek();
if (nextRequestHandler == null)
return null;
// Do not send the EndTxn until all batches have been flushed
if (nextRequestHandler.isEndTxn() && hasIncompleteBatches)
return null;
pendingRequests.poll();
if (maybeTerminateRequestWithError(nextRequestHandler)) {
log.trace("Not sending transactional request {} because we are in an error state",
nextRequestHandler.requestBuilder());
return null;
}
if (nextRequestHandler.isEndTxn() && !transactionStarted) {
nextRequestHandler.result.done();
if (currentState != State.FATAL_ERROR) {
log.debug("Not sending EndTxn for completed transaction since no partitions " +
"or offsets were successfully added");
completeTransaction();
}
nextRequestHandler = pendingRequests.poll();
}
if (nextRequestHandler != null)
log.trace("Request {} dequeued for sending", nextRequestHandler.requestBuilder());
return nextRequestHandler;
}
synchronized void retry(TxnRequestHandler request) {
request.setRetry();
enqueueRequest(request);
}
synchronized void authenticationFailed(AuthenticationException e) {
for (TxnRequestHandler request : pendingRequests)
request.fatalError(e);
}
synchronized void close() {
KafkaException shutdownException = new KafkaException("The producer closed forcefully");
pendingRequests.forEach(handler ->
handler.fatalError(shutdownException));
if (pendingTransition != null) {
pendingTransition.result.fail(shutdownException);
}
}
Node coordinator(FindCoordinatorRequest.CoordinatorType type) {
switch (type) {
case GROUP:
return consumerGroupCoordinator;
case TRANSACTION:
return transactionCoordinator;
default:
throw new IllegalStateException("Received an invalid coordinator type: " + type);
}
}
void lookupCoordinator(TxnRequestHandler request) {
lookupCoordinator(request.coordinatorType(), request.coordinatorKey());
}
void setInFlightCorrelationId(int correlationId) {
inFlightRequestCorrelationId = correlationId;
}
private void clearInFlightCorrelationId() {
inFlightRequestCorrelationId = NO_INFLIGHT_REQUEST_CORRELATION_ID;
}
boolean hasInFlightRequest() {
return inFlightRequestCorrelationId != NO_INFLIGHT_REQUEST_CORRELATION_ID;
}
// visible for testing.
boolean hasFatalError() {
return currentState == State.FATAL_ERROR;
}
// visible for testing.
boolean hasAbortableError() {
return currentState == State.ABORTABLE_ERROR;
}
// visible for testing
synchronized boolean transactionContainsPartition(TopicPartition topicPartition) {
return partitionsInTransaction.contains(topicPartition);
}
// visible for testing
synchronized boolean hasPendingOffsetCommits() {
return !pendingTxnOffsetCommits.isEmpty();
}
synchronized boolean hasPendingRequests() {
return !pendingRequests.isEmpty();
}
// visible for testing
synchronized boolean hasOngoingTransaction() {
// transactions are considered ongoing once started until completion or a fatal error
return currentState == State.IN_TRANSACTION || isCompleting() || hasAbortableError();
}
synchronized boolean canRetry(ProduceResponse.PartitionResponse response, ProducerBatch batch) {
Errors error = response.error;
// An UNKNOWN_PRODUCER_ID means that we have lost the producer state on the broker. Depending on the log start
// offset, we may want to retry these, as described for each case below. If none of those apply, then for the
// idempotent producer, we will locally bump the epoch and reset the sequence numbers of in-flight batches from
// sequence 0, then retry the failed batch, which should now succeed. For the transactional producer, allow the
// batch to fail. When processing the failed batch, we will transition to an abortable error and set a flag
// indicating that we need to bump the epoch (if supported by the broker).
if (error == Errors.UNKNOWN_PRODUCER_ID) {
if (response.logStartOffset == -1) {
// We don't know the log start offset with this response. We should just retry the request until we get it.
// The UNKNOWN_PRODUCER_ID error code was added along with the new ProduceResponse which includes the
// logStartOffset. So the '-1' sentinel is not for backward compatibility. Instead, it is possible for
// a broker to not know the logStartOffset at when it is returning the response because the partition
// may have moved away from the broker from the time the error was initially raised to the time the
// response was being constructed. In these cases, we should just retry the request: we are guaranteed
// to eventually get a logStartOffset once things settle down.
return true;
}
if (batch.sequenceHasBeenReset()) {
// When the first inflight batch fails due to the truncation case, then the sequences of all the other
// in flight batches would have been restarted from the beginning. However, when those responses
// come back from the broker, they would also come with an UNKNOWN_PRODUCER_ID error. In this case, we should not
// reset the sequence numbers to the beginning.
return true;
} else if (lastAckedOffset(batch.topicPartition).orElse(NO_LAST_ACKED_SEQUENCE_NUMBER) < response.logStartOffset) {
// The head of the log has been removed, probably due to the retention time elapsing. In this case,
// we expect to lose the producer state. For the transactional producer, reset the sequences of all
// inflight batches to be from the beginning and retry them, so that the transaction does not need to
// be aborted. For the idempotent producer, bump the epoch to avoid reusing (sequence, epoch) pairs
if (isTransactional()) {
topicPartitionBookkeeper.startSequencesAtBeginning(batch.topicPartition, this.producerIdAndEpoch);
} else {
requestEpochBumpForPartition(batch.topicPartition);
}
return true;
}
if (!isTransactional()) {
// For the idempotent producer, always retry UNKNOWN_PRODUCER_ID errors. If the batch has the current
// producer ID and epoch, request a bump of the epoch. Otherwise just retry the produce.
requestEpochBumpForPartition(batch.topicPartition);
return true;
}
} else if (error == Errors.OUT_OF_ORDER_SEQUENCE_NUMBER) {
if (!hasUnresolvedSequence(batch.topicPartition) &&
(batch.sequenceHasBeenReset() || !isNextSequence(batch.topicPartition, batch.baseSequence()))) {
// We should retry the OutOfOrderSequenceException if the batch is _not_ the next batch, ie. its base
// sequence isn't the lastAckedSequence + 1.
return true;
} else if (!isTransactional()) {
// For the idempotent producer, retry all OUT_OF_ORDER_SEQUENCE_NUMBER errors. If there are no
// unresolved sequences, or this batch is the one immediately following an unresolved sequence, we know
// there is actually a gap in the sequences, and we bump the epoch. Otherwise, retry without bumping
// and wait to see if the sequence resolves
if (!hasUnresolvedSequence(batch.topicPartition) ||
isNextSequenceForUnresolvedPartition(batch.topicPartition, batch.baseSequence())) {
requestEpochBumpForPartition(batch.topicPartition);
}
return true;
}
}
// If neither of the above cases are true, retry if the exception is retriable
return error.exception() instanceof RetriableException;
}
// visible for testing
synchronized boolean isReady() {
return isTransactional() && currentState == State.READY;
}
void handleCoordinatorReady() {
NodeApiVersions nodeApiVersions = transactionCoordinator != null ?
apiVersions.get(transactionCoordinator.idString()) :
null;
ApiVersion initProducerIdVersion = nodeApiVersions != null ?
nodeApiVersions.apiVersion(ApiKeys.INIT_PRODUCER_ID) :
null;
this.coordinatorSupportsBumpingEpoch = initProducerIdVersion != null &&
initProducerIdVersion.maxVersion() >= 3;
}
private void transitionTo(State target) {
transitionTo(target, null);
}
private void transitionTo(State target, RuntimeException error) {
if (!currentState.isTransitionValid(currentState, target)) {
String idString = transactionalId == null ? "" : "TransactionalId " + transactionalId + ": ";
throw new IllegalStateException(idString + "Invalid transition attempted from state "
+ currentState.name() + " to state " + target.name());
}
if (target == State.FATAL_ERROR || target == State.ABORTABLE_ERROR) {
if (error == null)
throw new IllegalArgumentException("Cannot transition to " + target + " with a null exception");
lastError = error;
} else {
lastError = null;
}
if (lastError != null)
log.debug("Transition from state {} to error state {}", currentState, target, lastError);
else
log.debug("Transition from state {} to {}", currentState, target);
currentState = target;
}
private void ensureTransactional() {
if (!isTransactional())
throw new IllegalStateException("Transactional method invoked on a non-transactional producer.");
}
private void maybeFailWithError() {
if (hasError()) {
// for ProducerFencedException, do not wrap it as a KafkaException
// but create a new instance without the call trace since it was not thrown because of the current call
if (lastError instanceof ProducerFencedException) {
throw new ProducerFencedException("Producer with transactionalId '" + transactionalId
+ "' and " + producerIdAndEpoch + " has been fenced by another producer " +
"with the same transactionalId");
} else if (lastError instanceof InvalidProducerEpochException) {
throw new InvalidProducerEpochException("Producer with transactionalId '" + transactionalId
+ "' and " + producerIdAndEpoch + " attempted to produce with an old epoch");
} else {
throw new KafkaException("Cannot execute transactional method because we are in an error state", lastError);
}
}
}
private boolean maybeTerminateRequestWithError(TxnRequestHandler requestHandler) {
if (hasError()) {
if (hasAbortableError() && requestHandler instanceof FindCoordinatorHandler)
// No harm letting the FindCoordinator request go through if we're expecting to abort
return false;
requestHandler.fail(lastError);
return true;
}
return false;
}
private void enqueueRequest(TxnRequestHandler requestHandler) {
log.debug("Enqueuing transactional request {}", requestHandler.requestBuilder());
pendingRequests.add(requestHandler);
}
private void lookupCoordinator(FindCoordinatorRequest.CoordinatorType type, String coordinatorKey) {
switch (type) {
case GROUP:
consumerGroupCoordinator = null;
break;
case TRANSACTION:
transactionCoordinator = null;
break;
default:
throw new IllegalStateException("Invalid coordinator type: " + type);
}
FindCoordinatorRequestData data = new FindCoordinatorRequestData()
.setKeyType(type.id())
.setKey(coordinatorKey);
FindCoordinatorRequest.Builder builder = new FindCoordinatorRequest.Builder(data);
enqueueRequest(new FindCoordinatorHandler(builder));
}
private TxnRequestHandler addPartitionsToTransactionHandler() {
pendingPartitionsInTransaction.addAll(newPartitionsInTransaction);
newPartitionsInTransaction.clear();
AddPartitionsToTxnRequest.Builder builder =
new AddPartitionsToTxnRequest.Builder(transactionalId,
producerIdAndEpoch.producerId,
producerIdAndEpoch.epoch,
new ArrayList<>(pendingPartitionsInTransaction));
return new AddPartitionsToTxnHandler(builder);
}
private TxnOffsetCommitHandler txnOffsetCommitHandler(TransactionalRequestResult result,
Map<TopicPartition, OffsetAndMetadata> offsets,
ConsumerGroupMetadata groupMetadata) {
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
OffsetAndMetadata offsetAndMetadata = entry.getValue();
CommittedOffset committedOffset = new CommittedOffset(offsetAndMetadata.offset(),
offsetAndMetadata.metadata(), offsetAndMetadata.leaderEpoch());
pendingTxnOffsetCommits.put(entry.getKey(), committedOffset);
}
final TxnOffsetCommitRequest.Builder builder =
new TxnOffsetCommitRequest.Builder(transactionalId,
groupMetadata.groupId(),
producerIdAndEpoch.producerId,
producerIdAndEpoch.epoch,
pendingTxnOffsetCommits,
groupMetadata.memberId(),
groupMetadata.generationId(),
groupMetadata.groupInstanceId()
);
return new TxnOffsetCommitHandler(result, builder);
}
private void throwIfPendingState(String operation) {
if (pendingTransition != null) {
if (pendingTransition.result.isAcked()) {
pendingTransition = null;
} else {
throw new IllegalStateException("Cannot attempt operation `" + operation + "` "
+ "because the previous call to `" + pendingTransition.operation + "` "
+ "timed out and must be retried");
}
}
}
private TransactionalRequestResult handleCachedTransactionRequestResult(
Supplier<TransactionalRequestResult> transactionalRequestResultSupplier,
State nextState,
String operation
) {
ensureTransactional();
if (pendingTransition != null) {
if (pendingTransition.result.isAcked()) {
pendingTransition = null;
} else if (nextState != pendingTransition.state) {
throw new IllegalStateException("Cannot attempt operation `" + operation + "` "
+ "because the previous call to `" + pendingTransition.operation + "` "
+ "timed out and must be retried");
} else {
return pendingTransition.result;
}
}
TransactionalRequestResult result = transactionalRequestResultSupplier.get();
pendingTransition = new PendingStateTransition(result, nextState, operation);
return result;
}
// package-private for testing
boolean canBumpEpoch() {
if (!isTransactional()) {
return true;
}
return coordinatorSupportsBumpingEpoch;
}
private void completeTransaction() {
if (epochBumpRequired) {
transitionTo(State.INITIALIZING);
} else {
transitionTo(State.READY);
}
lastError = null;
epochBumpRequired = false;
transactionStarted = false;
newPartitionsInTransaction.clear();
pendingPartitionsInTransaction.clear();
partitionsInTransaction.clear();
}
abstract class TxnRequestHandler implements RequestCompletionHandler {
protected final TransactionalRequestResult result;
private boolean isRetry = false;
TxnRequestHandler(TransactionalRequestResult result) {
this.result = result;
}
TxnRequestHandler(String operation) {
this(new TransactionalRequestResult(operation));
}
void fatalError(RuntimeException e) {
result.fail(e);
transitionToFatalError(e);
}
void abortableError(RuntimeException e) {
result.fail(e);
transitionToAbortableError(e);
}
void abortableErrorIfPossible(RuntimeException e) {
if (canBumpEpoch()) {
epochBumpRequired = true;
abortableError(e);
} else {
fatalError(e);
}
}
void fail(RuntimeException e) {
result.fail(e);
}
void reenqueue() {
synchronized (TransactionManager.this) {
this.isRetry = true;
enqueueRequest(this);
}
}
long retryBackoffMs() {
return retryBackoffMs;
}
@Override
public void onComplete(ClientResponse response) {
if (response.requestHeader().correlationId() != inFlightRequestCorrelationId) {
fatalError(new RuntimeException("Detected more than one in-flight transactional request."));
} else {
clearInFlightCorrelationId();
if (response.wasDisconnected()) {
log.debug("Disconnected from {}. Will retry.", response.destination());
if (this.needsCoordinator())
lookupCoordinator(this.coordinatorType(), this.coordinatorKey());
reenqueue();
} else if (response.versionMismatch() != null) {
fatalError(response.versionMismatch());
} else if (response.hasResponse()) {
log.trace("Received transactional response {} for request {}", response.responseBody(),
requestBuilder());
synchronized (TransactionManager.this) {
handleResponse(response.responseBody());
}
} else {
fatalError(new KafkaException("Could not execute transactional request for unknown reasons"));
}
}
}
boolean needsCoordinator() {
return coordinatorType() != null;
}
FindCoordinatorRequest.CoordinatorType coordinatorType() {
return FindCoordinatorRequest.CoordinatorType.TRANSACTION;
}
String coordinatorKey() {
return transactionalId;
}
void setRetry() {
this.isRetry = true;
}
boolean isRetry() {
return isRetry;
}
boolean isEndTxn() {
return false;
}
abstract AbstractRequest.Builder<?> requestBuilder();
abstract void handleResponse(AbstractResponse responseBody);
abstract Priority priority();
}
private class InitProducerIdHandler extends TxnRequestHandler {
private final InitProducerIdRequest.Builder builder;
private final boolean isEpochBump;
private InitProducerIdHandler(InitProducerIdRequest.Builder builder, boolean isEpochBump) {
super("InitProducerId");
this.builder = builder;
this.isEpochBump = isEpochBump;
}
@Override
InitProducerIdRequest.Builder requestBuilder() {
return builder;
}
@Override
Priority priority() {
return this.isEpochBump ? Priority.EPOCH_BUMP : Priority.INIT_PRODUCER_ID;
}
@Override
FindCoordinatorRequest.CoordinatorType coordinatorType() {
if (isTransactional()) {
return FindCoordinatorRequest.CoordinatorType.TRANSACTION;
} else {
return null;
}
}
@Override
public void handleResponse(AbstractResponse response) {
InitProducerIdResponse initProducerIdResponse = (InitProducerIdResponse) response;
Errors error = initProducerIdResponse.error();
if (error == Errors.NONE) {
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(initProducerIdResponse.data().producerId(),
initProducerIdResponse.data().producerEpoch());
setProducerIdAndEpoch(producerIdAndEpoch);
transitionTo(State.READY);
lastError = null;
if (this.isEpochBump) {
resetSequenceNumbers();
}
result.done();
} else if (error == Errors.NOT_COORDINATOR || error == Errors.COORDINATOR_NOT_AVAILABLE) {
lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId);
reenqueue();
} else if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.CONCURRENT_TRANSACTIONS) {
reenqueue();
} else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED ||
error == Errors.CLUSTER_AUTHORIZATION_FAILED) {
fatalError(error.exception());
} else if (error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED) {
// We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator,
// just treat it the same as PRODUCE_FENCED.
fatalError(Errors.PRODUCER_FENCED.exception());
} else {
fatalError(new KafkaException("Unexpected error in InitProducerIdResponse; " + error.message()));
}
}
}
private class AddPartitionsToTxnHandler extends TxnRequestHandler {
private final AddPartitionsToTxnRequest.Builder builder;
private long retryBackoffMs;
private AddPartitionsToTxnHandler(AddPartitionsToTxnRequest.Builder builder) {
super("AddPartitionsToTxn");
this.builder = builder;
this.retryBackoffMs = TransactionManager.this.retryBackoffMs;
}
@Override
AddPartitionsToTxnRequest.Builder requestBuilder() {
return builder;
}
@Override
Priority priority() {
return Priority.ADD_PARTITIONS_OR_OFFSETS;
}
@Override
public void handleResponse(AbstractResponse response) {
AddPartitionsToTxnResponse addPartitionsToTxnResponse = (AddPartitionsToTxnResponse) response;
Map<TopicPartition, Errors> errors = addPartitionsToTxnResponse.errors();
boolean hasPartitionErrors = false;
Set<String> unauthorizedTopics = new HashSet<>();
retryBackoffMs = TransactionManager.this.retryBackoffMs;
for (Map.Entry<TopicPartition, Errors> topicPartitionErrorEntry : errors.entrySet()) {
TopicPartition topicPartition = topicPartitionErrorEntry.getKey();
Errors error = topicPartitionErrorEntry.getValue();
if (error == Errors.NONE) {
continue;
} else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) {
lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId);
reenqueue();
return;
} else if (error == Errors.CONCURRENT_TRANSACTIONS) {
maybeOverrideRetryBackoffMs();
reenqueue();
return;
} else if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.UNKNOWN_TOPIC_OR_PARTITION) {
reenqueue();
return;
} else if (error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED) {
// We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator,
// just treat it the same as PRODUCE_FENCED.
fatalError(Errors.PRODUCER_FENCED.exception());
return;
} else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED) {
fatalError(error.exception());
return;
} else if (error == Errors.INVALID_TXN_STATE) {
fatalError(new KafkaException(error.exception()));
return;
} else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) {
unauthorizedTopics.add(topicPartition.topic());
} else if (error == Errors.OPERATION_NOT_ATTEMPTED) {
log.debug("Did not attempt to add partition {} to transaction because other partitions in the " +
"batch had errors.", topicPartition);
hasPartitionErrors = true;
} else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) {
abortableErrorIfPossible(error.exception());
return;
} else {
log.error("Could not add partition {} due to unexpected error {}", topicPartition, error);
hasPartitionErrors = true;
}
}
Set<TopicPartition> partitions = errors.keySet();
// Remove the partitions from the pending set regardless of the result. We use the presence
// of partitions in the pending set to know when it is not safe to send batches. However, if
// the partitions failed to be added and we enter an error state, we expect the batches to be
// aborted anyway. In this case, we must be able to continue sending the batches which are in
// retry for partitions that were successfully added.
pendingPartitionsInTransaction.removeAll(partitions);
if (!unauthorizedTopics.isEmpty()) {
abortableError(new TopicAuthorizationException(unauthorizedTopics));
} else if (hasPartitionErrors) {
abortableError(new KafkaException("Could not add partitions to transaction due to errors: " + errors));
} else {
log.debug("Successfully added partitions {} to transaction", partitions);
partitionsInTransaction.addAll(partitions);
transactionStarted = true;
result.done();
}
}
@Override
public long retryBackoffMs() {
return Math.min(TransactionManager.this.retryBackoffMs, this.retryBackoffMs);
}
private void maybeOverrideRetryBackoffMs() {
// We only want to reduce the backoff when retrying the first AddPartition which errored out due to a
// CONCURRENT_TRANSACTIONS error since this means that the previous transaction is still completing and
// we don't want to wait too long before trying to start the new one.
//
// This is only a temporary fix, the long term solution is being tracked in
// https://issues.apache.org/jira/browse/KAFKA-5482
if (partitionsInTransaction.isEmpty())
this.retryBackoffMs = ADD_PARTITIONS_RETRY_BACKOFF_MS;
}
}
private class FindCoordinatorHandler extends TxnRequestHandler {
private final FindCoordinatorRequest.Builder builder;
private FindCoordinatorHandler(FindCoordinatorRequest.Builder builder) {
super("FindCoordinator");
this.builder = builder;
}
@Override
FindCoordinatorRequest.Builder requestBuilder() {
return builder;
}
@Override
Priority priority() {
return Priority.FIND_COORDINATOR;
}
@Override
FindCoordinatorRequest.CoordinatorType coordinatorType() {
return null;
}
@Override
String coordinatorKey() {
return null;
}
@Override
public void handleResponse(AbstractResponse response) {
CoordinatorType coordinatorType = CoordinatorType.forId(builder.data().keyType());
List<Coordinator> coordinators = ((FindCoordinatorResponse) response).coordinators();
if (coordinators.size() != 1) {
log.error("Group coordinator lookup failed: Invalid response containing more than a single coordinator");
fatalError(new IllegalStateException("Group coordinator lookup failed: Invalid response containing more than a single coordinator"));
}
Coordinator coordinatorData = coordinators.get(0);
// For older versions without batching, obtain key from request data since it is not included in response
String key = coordinatorData.key() == null ? builder.data().key() : coordinatorData.key();
Errors error = Errors.forCode(coordinatorData.errorCode());
if (error == Errors.NONE) {
Node node = new Node(coordinatorData.nodeId(), coordinatorData.host(), coordinatorData.port());
switch (coordinatorType) {
case GROUP:
consumerGroupCoordinator = node;
break;
case TRANSACTION:
transactionCoordinator = node;
}
result.done();
log.info("Discovered {} coordinator {}", coordinatorType.toString().toLowerCase(Locale.ROOT), node);
} else if (error == Errors.COORDINATOR_NOT_AVAILABLE) {
reenqueue();
} else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED) {
fatalError(error.exception());
} else if (error == Errors.GROUP_AUTHORIZATION_FAILED) {
abortableError(GroupAuthorizationException.forGroupId(key));
} else {
fatalError(new KafkaException(String.format("Could not find a coordinator with type %s with key %s due to " +
"unexpected error: %s", coordinatorType, key,
coordinatorData.errorMessage())));
}
}
}
private class EndTxnHandler extends TxnRequestHandler {
private final EndTxnRequest.Builder builder;
private EndTxnHandler(EndTxnRequest.Builder builder) {
super("EndTxn(" + builder.data.committed() + ")");
this.builder = builder;
}
@Override
EndTxnRequest.Builder requestBuilder() {
return builder;
}
@Override
Priority priority() {
return Priority.END_TXN;
}
@Override
boolean isEndTxn() {
return true;
}
@Override
public void handleResponse(AbstractResponse response) {
EndTxnResponse endTxnResponse = (EndTxnResponse) response;
Errors error = endTxnResponse.error();
if (error == Errors.NONE) {
completeTransaction();
result.done();
} else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) {
lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId);
reenqueue();
} else if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.CONCURRENT_TRANSACTIONS) {
reenqueue();
} else if (error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED) {
// We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator,
// just treat it the same as PRODUCE_FENCED.
fatalError(Errors.PRODUCER_FENCED.exception());
} else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED) {
fatalError(error.exception());
} else if (error == Errors.INVALID_TXN_STATE) {
fatalError(error.exception());
} else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) {
abortableErrorIfPossible(error.exception());
} else {
fatalError(new KafkaException("Unhandled error in EndTxnResponse: " + error.message()));
}
}
}
private class AddOffsetsToTxnHandler extends TxnRequestHandler {
private final AddOffsetsToTxnRequest.Builder builder;
private final Map<TopicPartition, OffsetAndMetadata> offsets;
private final ConsumerGroupMetadata groupMetadata;
private AddOffsetsToTxnHandler(AddOffsetsToTxnRequest.Builder builder,
Map<TopicPartition, OffsetAndMetadata> offsets,
ConsumerGroupMetadata groupMetadata) {
super("AddOffsetsToTxn");
this.builder = builder;
this.offsets = offsets;
this.groupMetadata = groupMetadata;
}
@Override
AddOffsetsToTxnRequest.Builder requestBuilder() {
return builder;
}
@Override
Priority priority() {
return Priority.ADD_PARTITIONS_OR_OFFSETS;
}
@Override
public void handleResponse(AbstractResponse response) {
AddOffsetsToTxnResponse addOffsetsToTxnResponse = (AddOffsetsToTxnResponse) response;
Errors error = Errors.forCode(addOffsetsToTxnResponse.data().errorCode());
if (error == Errors.NONE) {
log.debug("Successfully added partition for consumer group {} to transaction", builder.data.groupId());
// note the result is not completed until the TxnOffsetCommit returns
pendingRequests.add(txnOffsetCommitHandler(result, offsets, groupMetadata));
transactionStarted = true;
} else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) {
lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId);
reenqueue();
} else if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS || error == Errors.CONCURRENT_TRANSACTIONS) {
reenqueue();
} else if (error == Errors.UNKNOWN_PRODUCER_ID || error == Errors.INVALID_PRODUCER_ID_MAPPING) {
abortableErrorIfPossible(error.exception());
} else if (error == Errors.INVALID_PRODUCER_EPOCH || error == Errors.PRODUCER_FENCED) {
// We could still receive INVALID_PRODUCER_EPOCH from old versioned transaction coordinator,
// just treat it the same as PRODUCE_FENCED.
fatalError(Errors.PRODUCER_FENCED.exception());
} else if (error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED) {
fatalError(error.exception());
} else if (error == Errors.GROUP_AUTHORIZATION_FAILED) {
abortableError(GroupAuthorizationException.forGroupId(builder.data.groupId()));
} else {
fatalError(new KafkaException("Unexpected error in AddOffsetsToTxnResponse: " + error.message()));
}
}
}
private class TxnOffsetCommitHandler extends TxnRequestHandler {
private final TxnOffsetCommitRequest.Builder builder;
private TxnOffsetCommitHandler(TransactionalRequestResult result,
TxnOffsetCommitRequest.Builder builder) {
super(result);
this.builder = builder;
}
@Override
TxnOffsetCommitRequest.Builder requestBuilder() {
return builder;
}
@Override
Priority priority() {
return Priority.ADD_PARTITIONS_OR_OFFSETS;
}
@Override
FindCoordinatorRequest.CoordinatorType coordinatorType() {
return FindCoordinatorRequest.CoordinatorType.GROUP;
}
@Override
String coordinatorKey() {
return builder.data.groupId();
}
@Override
public void handleResponse(AbstractResponse response) {
TxnOffsetCommitResponse txnOffsetCommitResponse = (TxnOffsetCommitResponse) response;
boolean coordinatorReloaded = false;
Map<TopicPartition, Errors> errors = txnOffsetCommitResponse.errors();
log.debug("Received TxnOffsetCommit response for consumer group {}: {}", builder.data.groupId(),
errors);
for (Map.Entry<TopicPartition, Errors> entry : errors.entrySet()) {
TopicPartition topicPartition = entry.getKey();
Errors error = entry.getValue();
if (error == Errors.NONE) {
pendingTxnOffsetCommits.remove(topicPartition);
} else if (error == Errors.COORDINATOR_NOT_AVAILABLE
|| error == Errors.NOT_COORDINATOR
|| error == Errors.REQUEST_TIMED_OUT) {
if (!coordinatorReloaded) {
coordinatorReloaded = true;
lookupCoordinator(FindCoordinatorRequest.CoordinatorType.GROUP, builder.data.groupId());
}
} else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION
|| error == Errors.COORDINATOR_LOAD_IN_PROGRESS) {
// If the topic is unknown or the coordinator is loading, retry with the current coordinator
continue;
} else if (error == Errors.GROUP_AUTHORIZATION_FAILED) {
abortableError(GroupAuthorizationException.forGroupId(builder.data.groupId()));
break;
} else if (error == Errors.FENCED_INSTANCE_ID) {
abortableError(error.exception());
break;
} else if (error == Errors.UNKNOWN_MEMBER_ID
|| error == Errors.ILLEGAL_GENERATION) {
abortableError(new CommitFailedException("Transaction offset Commit failed " +
"due to consumer group metadata mismatch: " + error.exception().getMessage()));
break;
} else if (isFatalException(error)) {
fatalError(error.exception());
break;
} else {
fatalError(new KafkaException("Unexpected error in TxnOffsetCommitResponse: " + error.message()));
break;
}
}
if (result.isCompleted()) {
pendingTxnOffsetCommits.clear();
} else if (pendingTxnOffsetCommits.isEmpty()) {
result.done();
} else {
// Retry the commits which failed with a retriable error
reenqueue();
}
}
}
private boolean isFatalException(Errors error) {
return error == Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED
|| error == Errors.INVALID_PRODUCER_EPOCH
|| error == Errors.PRODUCER_FENCED
|| error == Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT;
}
private static final class PendingStateTransition {
private final TransactionalRequestResult result;
private final State state;
private final String operation;
private PendingStateTransition(
TransactionalRequestResult result,
State state,
String operation
) {
this.result = result;
this.state = state;
this.operation = operation;
}
}
}
|
/*
* Barebones implementation of displaying camera preview.
*
* Created by lisah0 on 2012-02-24
*/
package com.example.lb.lbrocker.zbar.camera;
import android.content.Context;
import android.hardware.Camera;
import android.hardware.Camera.AutoFocusCallback;
import android.hardware.Camera.PreviewCallback;
import android.util.Log;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
import java.io.IOException;
/**
* A basic Camera preview class
*/
public class CameraPreview extends SurfaceView implements SurfaceHolder.Callback {
private SurfaceHolder mHolder;
private Camera mCamera;
private PreviewCallback previewCallback;
private AutoFocusCallback autoFocusCallback;
@SuppressWarnings("deprecation")
public CameraPreview(Context context, Camera camera, PreviewCallback previewCb, AutoFocusCallback autoFocusCb) {
super(context);
mCamera = camera;
previewCallback = previewCb;
autoFocusCallback = autoFocusCb;
/*
* Set camera to continuous focus if supported, otherwise use software
* auto-focus. Only works for API level >=9.
*/
Camera.Parameters parameters = camera.getParameters();
for (String f: parameters.getSupportedFocusModes()) {
if (f == Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE) {
parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE);
autoFocusCallback = null;
break;
}
}
// Install a SurfaceHolder.Callback so we get notified when the
// underlying surface is created and destroyed.
mHolder = getHolder();
mHolder.addCallback(this);
// deprecated setting, but required on Android versions prior to 3.0
mHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
}
public void surfaceCreated(SurfaceHolder holder) {
// The Surface has been created, now tell the camera where to draw the
// preview.
try {
mCamera.setPreviewDisplay(holder);
} catch (IOException e) {
Log.d("DBG", "Error setting camera preview: " + e.getMessage());
}
}
public void surfaceDestroyed(SurfaceHolder holder) {
// Camera preview released in activity
}
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
/*
* If your preview can change or rotate, take care of those events here.
* Make sure to stop the preview before resizing or reformatting it.
*/
if (mHolder.getSurface() == null) {
// preview surface does not exist
return;
}
// stop preview before making changes
try {
mCamera.stopPreview();
} catch (Exception e) {
// ignore: tried to stop a non-existent preview
}
try {
// Hard code camera surface rotation 90 degs to match Activity view
// in portrait
mCamera.setDisplayOrientation(90);
mCamera.setPreviewDisplay(mHolder);
mCamera.setPreviewCallback(previewCallback);
mCamera.startPreview();
mCamera.autoFocus(autoFocusCallback);
} catch (Exception e) {
Log.d("DBG", "Error starting camera preview: " + e.getMessage());
}
}
}
|
/******************************************************************************
* Compilation: javac InteractivePercolationVisualizer.java
* Execution: java InteractivePercolationVisualizer n
* Dependencies: PercolationVisualizer.java Percolation.java
* StdDraw.java StdOut.java
*
* This program takes the grid size n as a command-line argument.
* Then, the user repeatedly clicks sites to open with the mouse.
* After each site is opened, it draws full sites in light blue,
* open sites (that aren't full) in white, and blocked sites in black.
*
******************************************************************************/
package week1.percolation;
import edu.princeton.cs.algs4.StdDraw;
import edu.princeton.cs.algs4.StdOut;
public class InteractivePercolationVisualizer {
public static void main(String[] args) {
// n-by-n percolation system (read from command-line, default = 10)
int n = 10;
if (args.length == 1) n = Integer.parseInt(args[0]);
// repeatedly open site specified my mouse click and draw resulting system
StdOut.println(n);
StdDraw.enableDoubleBuffering();
Percolation perc = new Percolation(n);
PercolationVisualizer.draw(perc, n);
StdDraw.show();
while (true) {
// detected mouse click
if (StdDraw.isMousePressed()) {
// screen coordinates
double x = StdDraw.mouseX();
double y = StdDraw.mouseY();
// convert to row i, column j
int i = (int) (n - Math.floor(y));
int j = (int) (1 + Math.floor(x));
// open site (i, j) provided it's in bounds
if (i >= 1 && i <= n && j >= 1 && j <= n) {
if (!perc.isOpen(i, j)) {
StdOut.println(i + " " + j);
}
perc.open(i, j);
}
// draw n-by-n percolation system
PercolationVisualizer.draw(perc, n);
StdDraw.show();
}
StdDraw.pause(20);
}
}
}
|
package external_module_0870.a;
import java.nio.file.*;
import java.sql.*;
import java.util.logging.*;
/**
* Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut
* labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum.
* Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.
*
* @see javax.annotation.processing.Completion
* @see javax.lang.model.AnnotatedConstruct
* @see javax.management.Attribute
*/
@SuppressWarnings("all")
public abstract class Foo0<U> implements external_module_0870.a.IFoo0<U> {
javax.naming.directory.DirContext f0 = null;
javax.net.ssl.ExtendedSSLSession f1 = null;
javax.rmi.ssl.SslRMIClientSocketFactory f2 = null;
public U element;
public static Foo0 instance;
public static Foo0 getInstance() {
return instance;
}
public static <T> T create(java.util.List<T> input) {
return null;
}
public String getName() {
return element.toString();
}
public void setName(String string) {
return;
}
public U get() {
return element;
}
public void set(Object element) {
this.element = (U)element;
}
public U call() throws Exception {
return (U)getInstance().call();
}
}
|
package com.varitoooo.criteria.project.domain.criteria;
public abstract class ProjectStringCriteria {
public boolean isSatisfiedBy(String value, String projectValue) {
return projectValue != null && projectValue.contains(value);
}
}
|
package com4j;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Virtual table index of the method.
*
* <p>
* Java doesn't let us obtain the ordinal of a method,
* so we need to annotate that information explicitly.
*
* @author Kohsuke Kawaguchi (kk@kohsuke.org)
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.METHOD})
public @interface VTID {
/**
* Ordinal of this COM method among methods on the same interface.
*
* <p>
* 0 is always QueryInterface, 1 is always AddRef, and
* 2 is always Release.
*/
int value();
}
|
/*
* Copyright 2016 higherfrequencytrading.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.openhft.lang.io;
import org.junit.Test;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertSame;
/**
* Created by peter.lawrey on 06/08/2015.
*/
public class BytesTest {
@Test
public void testReadEnum() {
Bytes b = DirectStore.allocate(128).bytes();
b.writeEnum("Hello");
b.writeEnum("World");
b.flip();
String x = b.readEnum(String.class);
String y = b.readEnum(String.class);
assertNotSame(x, y);
b.position(0);
String x2 = b.readEnum(String.class);
String y2 = b.readEnum(String.class);
assertNotSame(x2, y2);
assertSame(x, x2);
assertSame(y, y2);
}
}
|
package org.radargun;
import org.radargun.state.MasterState;
/**
* A master stage is a stage that only gets executed on the master. E.g. report generation is suitable for being a
* master stage, as it is only the master having access to all the reports generated by the nodes.
*
* @author Mircea Markus <Mircea.Markus@jboss.com>
*/
public interface MasterStage extends Stage {
void init(MasterState masterState);
StageResult execute() throws Exception;
}
|
/**
* Copyright 2015 Hello NMS. All rights reserved.
*/
package com.hellonms.platforms.emp_orange.server.invoker.snmp;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
import org.snmp4j.PDU;
import org.snmp4j.smi.Null;
import org.snmp4j.smi.OID;
import org.snmp4j.smi.Variable;
import org.snmp4j.smi.VariableBinding;
import com.hellonms.platforms.emp_core.server.invoker.InvokerIf;
import com.hellonms.platforms.emp_core.server.transaction.EmpContext;
import com.hellonms.platforms.emp_core.share.error.ERROR_CODE_CORE;
import com.hellonms.platforms.emp_core.share.error.EmpException;
import com.hellonms.platforms.emp_plug.snmp.Plug4SNMPServer;
import com.hellonms.platforms.emp_plug.snmp.Plug4SNMPServer.Plug4SNMPServerRequestHandlerIf;
import com.hellonms.platforms.emp_plug.snmp.Plug4SNMPServer.Plug4SNMPServerResponseHandlerIf;
import com.hellonms.platforms.emp_plug.snmp.PlugRequestSNMPGet;
import com.hellonms.platforms.emp_plug.snmp.PlugRequestSNMPGetNext;
import com.hellonms.platforms.emp_plug.snmp.PlugRequestSNMPIf;
import com.hellonms.platforms.emp_plug.snmp.PlugRequestSNMPSet;
import com.hellonms.platforms.emp_plug.snmp.PlugResponseSNMP;
import com.hellonms.platforms.emp_plug.snmp.PlugResponseSNMPIf;
import com.hellonms.platforms.emp_plug.snmp.UtilSNMP;
import com.hellonms.platforms.emp_util.string.UtilString;
/**
* <p>
* Invoker SNMP Simulator
* </p>
*
* @since 1.6
* @create 2015. 7. 22.
* @modified 2015. 7. 22.
* @author cchyun
*
*/
public class Invoker4OrangeSNMPSimulator implements Invoker4OrangeSNMPIf, Plug4SNMPServerRequestHandlerIf {
private int port = 161;
private String read_community = "public";
private String write_community = "private";
private String properties = null;
private boolean ready = false;
@SuppressWarnings("unused")
private Plug4SNMPServer server;
private TreeMap<OID, Variable> variable_map = new TreeMap<OID, Variable>(new Comparator<OID>() {
@Override
public int compare(OID o1, OID o2) {
int[] value1 = o1.getValue();
int[] value2 = o2.getValue();
int length = Math.min(value1.length, value2.length);
int compare = 0;
for (int i = 0; i < length; i++) {
compare = value1[i] - value2[i];
if (compare != 0) {
return compare;
}
}
if (length < value1.length) {
compare = 1;
} else if (length < value2.length) {
compare = -1;
}
return compare;
}
});
private OID END_OF_MIB = new OID("1.3.6.1.4.2");
private int thread_count = 4;
private ThreadPoolExecutor threadPoolExecutor;
@Override
public Class<? extends InvokerIf> getDefine_class() {
return Invoker4OrangeSNMPIf.class;
}
public void setPort(int port) {
this.port = port;
}
public void setRead_community(String read_community) {
this.read_community = read_community;
}
public void setWrite_community(String write_community) {
this.write_community = write_community;
}
public void setProperties(String properties) {
this.properties = properties;
}
public void setThread_count(int thread_count) {
this.thread_count = thread_count;
}
@Override
public void initialize(EmpContext context) throws EmpException {
if (this.properties == null) {
throw new EmpException(ERROR_CODE_CORE.FILE_IO, "snmp mib properties file not setted !!!");
}
load();
server = new Plug4SNMPServer(port, read_community, write_community, this);
threadPoolExecutor = (ThreadPoolExecutor) Executors.newFixedThreadPool(thread_count);
ready = true;
}
@Override
public boolean isReady(EmpContext context) throws EmpException {
return ready;
}
@Override
public void dispose(EmpContext context) throws EmpException {
ready = false;
}
@Override
public void handleRequest(final InetAddress address, final int port, final PlugRequestSNMPIf request, final Plug4SNMPServerResponseHandlerIf responseHandler) {
threadPoolExecutor.execute(new Runnable() {
@Override
public void run() {
try {
if (request instanceof PlugRequestSNMPGet) {
responseHandler.handleResponse(snmp_get((PlugRequestSNMPGet) request));
} else if (request instanceof PlugRequestSNMPGetNext) {
responseHandler.handleResponse(snmp_get_next((PlugRequestSNMPGetNext) request));
} else if (request instanceof PlugRequestSNMPSet) {
responseHandler.handleResponse(snmp_set((PlugRequestSNMPSet) request));
} else {
responseHandler.handleResponseException(PDU.resourceUnavailable, PDU.resourceUnavailable);
}
} catch (EmpException e) {
e.printStackTrace();
responseHandler.handleResponseException(PDU.resourceUnavailable, PDU.resourceUnavailable);
}
}
});
}
private void load() throws EmpException {
try {
Properties properties = new Properties();
File file = new File(this.properties);
FileInputStream in = new FileInputStream(file);
properties.load(in);
in.close();
VariableBinding[] vbs = UtilSNMP.toVariableBinding(properties);
for (VariableBinding vb : vbs) {
variable_map.put(vb.getOid(), vb.getVariable());
}
} catch (Exception e) {
throw new EmpException(e, ERROR_CODE_CORE.FILE_IO);
}
}
private void store() throws EmpException {
try {
List<VariableBinding> vb_list = new ArrayList<VariableBinding>();
for (Map.Entry<OID, Variable> entry : variable_map.entrySet()) {
vb_list.add(new VariableBinding(entry.getKey(), entry.getValue()));
}
Properties properties = UtilSNMP.toProperties(vb_list.toArray(new VariableBinding[0]));
File file = new File(this.properties);
FileOutputStream out = new FileOutputStream(file);
properties.store(out, null);
out.close();
} catch (Exception e) {
throw new EmpException(e, ERROR_CODE_CORE.FILE_IO);
}
}
private PlugResponseSNMPIf snmp_get(PlugRequestSNMPGet request) throws EmpException {
PlugResponseSNMP response = new PlugResponseSNMP();
for (OID oid : request.getOids()) {
Variable variable = variable_map.get(oid);
if (variable == null) {
throw new EmpException(ERROR_CODE_CORE.FILE_IO, UtilString.format("no such snmp_oid {}", oid));
}
response.add(new VariableBinding(oid, variable));
}
return response;
}
private PlugResponseSNMPIf snmp_get_next(PlugRequestSNMPGetNext request) throws EmpException {
PlugResponseSNMP response = new PlugResponseSNMP();
for (OID oid : request.getOids()) {
Map.Entry<OID, Variable> entry = variable_map.higherEntry(oid);
if (entry == null) {
response.add(new VariableBinding(END_OF_MIB, new Null()));
} else {
response.add(new VariableBinding(entry.getKey(), entry.getValue()));
}
}
return response;
}
private PlugResponseSNMPIf snmp_set(PlugRequestSNMPSet request) throws EmpException {
PlugResponseSNMP response = new PlugResponseSNMP();
for (VariableBinding vb : request.getVbs()) {
Variable variable = variable_map.get(vb.getOid());
if (variable == null) {
throw new EmpException(ERROR_CODE_CORE.FILE_IO, UtilString.format("no such snmp_oid {}", vb.getOid()));
}
variable_map.put(vb.getOid(), vb.getVariable());
response.add(new VariableBinding(vb.getOid(), vb.getVariable()));
}
store();
return response;
}
}
|
/**
* Generated with Acceleo
*/
package org.wso2.integrationstudio.gmf.esb.parts.impl;
// Start of user code for imports
import java.util.ArrayList;
import java.util.List;
import org.eclipse.emf.ecore.EObject;
import org.eclipse.emf.eef.runtime.api.component.IPropertiesEditionComponent;
import org.eclipse.emf.eef.runtime.api.notify.IPropertiesEditionEvent;
import org.eclipse.emf.eef.runtime.api.parts.ISWTPropertiesEditionPart;
import org.eclipse.emf.eef.runtime.impl.notify.PropertiesEditionEvent;
import org.eclipse.emf.eef.runtime.impl.parts.CompositePropertiesEditionPart;
import org.eclipse.emf.eef.runtime.ui.parts.PartComposer;
import org.eclipse.emf.eef.runtime.ui.parts.sequence.BindingCompositionSequence;
import org.eclipse.emf.eef.runtime.ui.parts.sequence.CompositionSequence;
import org.eclipse.emf.eef.runtime.ui.widgets.ReferencesTable;
import org.eclipse.emf.eef.runtime.ui.widgets.ReferencesTable.ReferencesTableListener;
import org.eclipse.emf.eef.runtime.ui.widgets.referencestable.ReferencesTableContentProvider;
import org.eclipse.emf.eef.runtime.ui.widgets.referencestable.ReferencesTableSettings;
import org.eclipse.jface.viewers.ViewerFilter;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.SelectionAdapter;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Group;
import org.wso2.integrationstudio.gmf.esb.parts.BuilderMediatorOutputConectorPropertiesEditionPart;
import org.wso2.integrationstudio.gmf.esb.parts.EsbViewsRepository;
import org.wso2.integrationstudio.gmf.esb.providers.EsbMessages;
// End of user code
/**
*
*
*/
public class BuilderMediatorOutputConectorPropertiesEditionPartImpl extends CompositePropertiesEditionPart implements ISWTPropertiesEditionPart, BuilderMediatorOutputConectorPropertiesEditionPart {
protected ReferencesTable commentMediators;
protected List<ViewerFilter> commentMediatorsBusinessFilters = new ArrayList<ViewerFilter>();
protected List<ViewerFilter> commentMediatorsFilters = new ArrayList<ViewerFilter>();
/**
* Default constructor
* @param editionComponent the {@link IPropertiesEditionComponent} that manage this part
*
*/
public BuilderMediatorOutputConectorPropertiesEditionPartImpl(IPropertiesEditionComponent editionComponent) {
super(editionComponent);
}
/**
* {@inheritDoc}
*
* @see org.eclipse.emf.eef.runtime.api.parts.ISWTPropertiesEditionPart#
* createFigure(org.eclipse.swt.widgets.Composite)
*
*/
public Composite createFigure(final Composite parent) {
view = new Composite(parent, SWT.NONE);
GridLayout layout = new GridLayout();
layout.numColumns = 3;
view.setLayout(layout);
createControls(view);
return view;
}
/**
* {@inheritDoc}
*
* @see org.eclipse.emf.eef.runtime.api.parts.ISWTPropertiesEditionPart#
* createControls(org.eclipse.swt.widgets.Composite)
*
*/
public void createControls(Composite view) {
CompositionSequence builderMediatorOutputConectorStep = new BindingCompositionSequence(propertiesEditionComponent);
builderMediatorOutputConectorStep
.addStep(EsbViewsRepository.BuilderMediatorOutputConector.Properties.class)
.addStep(EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators);
composer = new PartComposer(builderMediatorOutputConectorStep) {
@Override
public Composite addToPart(Composite parent, Object key) {
if (key == EsbViewsRepository.BuilderMediatorOutputConector.Properties.class) {
return createPropertiesGroup(parent);
}
if (key == EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators) {
return createCommentMediatorsAdvancedTableComposition(parent);
}
return parent;
}
};
composer.compose(view);
}
/**
*
*/
protected Composite createPropertiesGroup(Composite parent) {
Group propertiesGroup = new Group(parent, SWT.NONE);
propertiesGroup.setText(EsbMessages.BuilderMediatorOutputConectorPropertiesEditionPart_PropertiesGroupLabel);
GridData propertiesGroupData = new GridData(GridData.FILL_HORIZONTAL);
propertiesGroupData.horizontalSpan = 3;
propertiesGroup.setLayoutData(propertiesGroupData);
GridLayout propertiesGroupLayout = new GridLayout();
propertiesGroupLayout.numColumns = 3;
propertiesGroup.setLayout(propertiesGroupLayout);
return propertiesGroup;
}
/**
* @param container
*
*/
protected Composite createCommentMediatorsAdvancedTableComposition(Composite parent) {
this.commentMediators = new ReferencesTable(getDescription(EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators, EsbMessages.BuilderMediatorOutputConectorPropertiesEditionPart_CommentMediatorsLabel), new ReferencesTableListener() {
public void handleAdd() {
propertiesEditionComponent.firePropertiesChanged(new PropertiesEditionEvent(BuilderMediatorOutputConectorPropertiesEditionPartImpl.this, EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators, PropertiesEditionEvent.COMMIT, PropertiesEditionEvent.ADD, null, null));
commentMediators.refresh();
}
public void handleEdit(EObject element) {
propertiesEditionComponent.firePropertiesChanged(new PropertiesEditionEvent(BuilderMediatorOutputConectorPropertiesEditionPartImpl.this, EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators, PropertiesEditionEvent.COMMIT, PropertiesEditionEvent.EDIT, null, element));
commentMediators.refresh();
}
public void handleMove(EObject element, int oldIndex, int newIndex) {
propertiesEditionComponent.firePropertiesChanged(new PropertiesEditionEvent(BuilderMediatorOutputConectorPropertiesEditionPartImpl.this, EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators, PropertiesEditionEvent.COMMIT, PropertiesEditionEvent.MOVE, element, newIndex));
commentMediators.refresh();
}
public void handleRemove(EObject element) {
propertiesEditionComponent.firePropertiesChanged(new PropertiesEditionEvent(BuilderMediatorOutputConectorPropertiesEditionPartImpl.this, EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators, PropertiesEditionEvent.COMMIT, PropertiesEditionEvent.REMOVE, null, element));
commentMediators.refresh();
}
public void navigateTo(EObject element) { }
});
for (ViewerFilter filter : this.commentMediatorsFilters) {
this.commentMediators.addFilter(filter);
}
this.commentMediators.setHelpText(propertiesEditionComponent.getHelpContent(EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators, EsbViewsRepository.SWT_KIND));
this.commentMediators.createControls(parent);
this.commentMediators.addSelectionListener(new SelectionAdapter() {
public void widgetSelected(SelectionEvent e) {
if (e.item != null && e.item.getData() instanceof EObject) {
propertiesEditionComponent.firePropertiesChanged(new PropertiesEditionEvent(BuilderMediatorOutputConectorPropertiesEditionPartImpl.this, EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators, PropertiesEditionEvent.CHANGE, PropertiesEditionEvent.SELECTION_CHANGED, null, e.item.getData()));
}
}
});
GridData commentMediatorsData = new GridData(GridData.FILL_HORIZONTAL);
commentMediatorsData.horizontalSpan = 3;
this.commentMediators.setLayoutData(commentMediatorsData);
this.commentMediators.setLowerBound(0);
this.commentMediators.setUpperBound(-1);
commentMediators.setID(EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators);
commentMediators.setEEFType("eef::AdvancedTableComposition"); //$NON-NLS-1$
// Start of user code for createCommentMediatorsAdvancedTableComposition
// End of user code
return parent;
}
/**
* {@inheritDoc}
*
* @see org.eclipse.emf.eef.runtime.api.notify.IPropertiesEditionListener#firePropertiesChanged(org.eclipse.emf.eef.runtime.api.notify.IPropertiesEditionEvent)
*
*/
public void firePropertiesChanged(IPropertiesEditionEvent event) {
// Start of user code for tab synchronization
// End of user code
}
/**
* {@inheritDoc}
*
* @see org.wso2.integrationstudio.gmf.esb.parts.BuilderMediatorOutputConectorPropertiesEditionPart#initCommentMediators(EObject current, EReference containingFeature, EReference feature)
*/
public void initCommentMediators(ReferencesTableSettings settings) {
if (current.eResource() != null && current.eResource().getResourceSet() != null)
this.resourceSet = current.eResource().getResourceSet();
ReferencesTableContentProvider contentProvider = new ReferencesTableContentProvider();
commentMediators.setContentProvider(contentProvider);
commentMediators.setInput(settings);
boolean eefElementEditorReadOnlyState = isReadOnly(EsbViewsRepository.BuilderMediatorOutputConector.Properties.commentMediators);
if (eefElementEditorReadOnlyState && commentMediators.isEnabled()) {
commentMediators.setEnabled(false);
commentMediators.setToolTipText(EsbMessages.BuilderMediatorOutputConector_ReadOnly);
} else if (!eefElementEditorReadOnlyState && !commentMediators.isEnabled()) {
commentMediators.setEnabled(true);
}
}
/**
* {@inheritDoc}
*
* @see org.wso2.integrationstudio.gmf.esb.parts.BuilderMediatorOutputConectorPropertiesEditionPart#updateCommentMediators()
*
*/
public void updateCommentMediators() {
commentMediators.refresh();
}
/**
* {@inheritDoc}
*
* @see org.wso2.integrationstudio.gmf.esb.parts.BuilderMediatorOutputConectorPropertiesEditionPart#addFilterCommentMediators(ViewerFilter filter)
*
*/
public void addFilterToCommentMediators(ViewerFilter filter) {
commentMediatorsFilters.add(filter);
if (this.commentMediators != null) {
this.commentMediators.addFilter(filter);
}
}
/**
* {@inheritDoc}
*
* @see org.wso2.integrationstudio.gmf.esb.parts.BuilderMediatorOutputConectorPropertiesEditionPart#addBusinessFilterCommentMediators(ViewerFilter filter)
*
*/
public void addBusinessFilterToCommentMediators(ViewerFilter filter) {
commentMediatorsBusinessFilters.add(filter);
}
/**
* {@inheritDoc}
*
* @see org.wso2.integrationstudio.gmf.esb.parts.BuilderMediatorOutputConectorPropertiesEditionPart#isContainedInCommentMediatorsTable(EObject element)
*
*/
public boolean isContainedInCommentMediatorsTable(EObject element) {
return ((ReferencesTableSettings)commentMediators.getInput()).contains(element);
}
/**
* {@inheritDoc}
*
* @see org.eclipse.emf.eef.runtime.api.parts.IPropertiesEditionPart#getTitle()
*
*/
public String getTitle() {
return EsbMessages.BuilderMediatorOutputConector_Part_Title;
}
// Start of user code additional methods
// End of user code
}
|
/*
* Copyright 2015-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binding;
import java.lang.reflect.Field;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Ignore;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.boot.context.properties.source.MapConfigurationPropertySource;
import org.springframework.cloud.stream.annotation.EnableBinding;
import org.springframework.cloud.stream.annotation.Input;
import org.springframework.cloud.stream.annotation.Output;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.BinderConfiguration;
import org.springframework.cloud.stream.binder.BinderFactory;
import org.springframework.cloud.stream.binder.BinderType;
import org.springframework.cloud.stream.binder.BinderTypeRegistry;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.ConsumerProperties;
import org.springframework.cloud.stream.binder.DefaultBinderFactory;
import org.springframework.cloud.stream.binder.DefaultBinderTypeRegistry;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
import org.springframework.cloud.stream.binder.ProducerProperties;
import org.springframework.cloud.stream.binder.test.TestChannelBinderConfiguration;
import org.springframework.cloud.stream.config.BindingProperties;
import org.springframework.cloud.stream.config.BindingServiceConfiguration;
import org.springframework.cloud.stream.config.BindingServiceProperties;
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
import org.springframework.cloud.stream.messaging.Processor;
import org.springframework.cloud.stream.messaging.Sink;
import org.springframework.cloud.stream.reflection.GenericsUtils;
import org.springframework.cloud.stream.utils.MockBinderConfiguration;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
import org.springframework.integration.annotation.ServiceActivator;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.test.util.TestUtils;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.SubscribableChannel;
import org.springframework.messaging.core.DestinationResolutionException;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import org.springframework.util.ReflectionUtils;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.ArgumentMatchers.isNull;
import static org.mockito.ArgumentMatchers.matches;
import static org.mockito.ArgumentMatchers.same;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/**
* @author Gary Russell
* @author Mark Fisher
* @author Marius Bogoevici
* @author Ilayaperumal Gopinathan
* @author Janne Valkealahti
* @author Soby Chacko
*/
public class BindingServiceTests {
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testDefaultGroup() throws Exception {
BindingServiceProperties properties = new BindingServiceProperties();
Map<String, BindingProperties> bindingProperties = new HashMap<>();
BindingProperties props = new BindingProperties();
props.setDestination("foo");
final String inputChannelName = "input";
bindingProperties.put(inputChannelName, props);
properties.setBindings(bindingProperties);
DefaultBinderFactory binderFactory = createMockBinderFactory();
Binder binder = binderFactory.getBinder("mock", MessageChannel.class);
BindingService service = new BindingService(properties, binderFactory);
MessageChannel inputChannel = new DirectChannel();
Binding<MessageChannel> mockBinding = Mockito.mock(Binding.class);
when(binder.bindConsumer(eq("foo"), isNull(), same(inputChannel),
any(ConsumerProperties.class))).thenReturn(mockBinding);
Collection<Binding<MessageChannel>> bindings = service.bindConsumer(inputChannel,
inputChannelName);
assertThat(bindings).hasSize(1);
Binding<MessageChannel> binding = bindings.iterator().next();
assertThat(binding).isSameAs(mockBinding);
service.unbindConsumers(inputChannelName);
verify(binder).bindConsumer(eq("foo"), isNull(), same(inputChannel),
any(ConsumerProperties.class));
verify(binding).unbind();
binderFactory.destroy();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testMultipleConsumerBindings() throws Exception {
BindingServiceProperties properties = new BindingServiceProperties();
Map<String, BindingProperties> bindingProperties = new HashMap<>();
BindingProperties props = new BindingProperties();
props.setDestination("foo,bar");
final String inputChannelName = "input";
bindingProperties.put(inputChannelName, props);
properties.setBindings(bindingProperties);
DefaultBinderFactory binderFactory = createMockBinderFactory();
Binder binder = binderFactory.getBinder("mock", MessageChannel.class);
BindingService service = new BindingService(properties, binderFactory);
MessageChannel inputChannel = new DirectChannel();
Binding<MessageChannel> mockBinding1 = Mockito.mock(Binding.class);
Binding<MessageChannel> mockBinding2 = Mockito.mock(Binding.class);
when(binder.bindConsumer(eq("foo"), isNull(), same(inputChannel),
any(ConsumerProperties.class))).thenReturn(mockBinding1);
when(binder.bindConsumer(eq("bar"), isNull(), same(inputChannel),
any(ConsumerProperties.class))).thenReturn(mockBinding2);
Collection<Binding<MessageChannel>> bindings = service.bindConsumer(inputChannel,
"input");
assertThat(bindings).hasSize(2);
Iterator<Binding<MessageChannel>> iterator = bindings.iterator();
Binding<MessageChannel> binding1 = iterator.next();
Binding<MessageChannel> binding2 = iterator.next();
assertThat(binding1).isSameAs(mockBinding1);
assertThat(binding2).isSameAs(mockBinding2);
service.unbindConsumers("input");
verify(binder).bindConsumer(eq("foo"), isNull(), same(inputChannel),
any(ConsumerProperties.class));
verify(binder).bindConsumer(eq("bar"), isNull(), same(inputChannel),
any(ConsumerProperties.class));
verify(binding1).unbind();
verify(binding2).unbind();
binderFactory.destroy();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testConsumerBindingWhenMultiplexingIsEnabled() throws Exception {
BindingServiceProperties properties = new BindingServiceProperties();
Map<String, BindingProperties> bindingProperties = new HashMap<>();
BindingProperties props = new BindingProperties();
props.setDestination("foo,bar");
ConsumerProperties consumer = properties.getConsumerProperties("input");
consumer.setMultiplex(true);
props.setConsumer(consumer);
final String inputChannelName = "input";
bindingProperties.put(inputChannelName, props);
properties.setBindings(bindingProperties);
DefaultBinderFactory binderFactory = createMockBinderFactory();
Binder binder = binderFactory.getBinder("mock", MessageChannel.class);
BindingService service = new BindingService(properties, binderFactory);
MessageChannel inputChannel = new DirectChannel();
Binding<MessageChannel> mockBinding1 = Mockito.mock(Binding.class);
when(binder.bindConsumer(eq("foo,bar"), isNull(), same(inputChannel),
any(ConsumerProperties.class))).thenReturn(mockBinding1);
Collection<Binding<MessageChannel>> bindings = service.bindConsumer(inputChannel,
"input");
assertThat(bindings).hasSize(1);
Iterator<Binding<MessageChannel>> iterator = bindings.iterator();
Binding<MessageChannel> binding1 = iterator.next();
assertThat(binding1).isSameAs(mockBinding1);
service.unbindConsumers("input");
verify(binder).bindConsumer(eq("foo,bar"), isNull(), same(inputChannel),
any(ConsumerProperties.class));
verify(binding1).unbind();
binderFactory.destroy();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testExplicitGroup() throws Exception {
BindingServiceProperties properties = new BindingServiceProperties();
Map<String, BindingProperties> bindingProperties = new HashMap<>();
BindingProperties props = new BindingProperties();
props.setDestination("foo");
props.setGroup("fooGroup");
final String inputChannelName = "input";
bindingProperties.put(inputChannelName, props);
properties.setBindings(bindingProperties);
DefaultBinderFactory binderFactory = createMockBinderFactory();
Binder binder = binderFactory.getBinder("mock", MessageChannel.class);
BindingService service = new BindingService(properties, binderFactory);
MessageChannel inputChannel = new DirectChannel();
Binding<MessageChannel> mockBinding = Mockito.mock(Binding.class);
when(binder.bindConsumer(eq("foo"), eq("fooGroup"), same(inputChannel),
any(ConsumerProperties.class))).thenReturn(mockBinding);
Collection<Binding<MessageChannel>> bindings = service.bindConsumer(inputChannel,
inputChannelName);
assertThat(bindings).hasSize(1);
Binding<MessageChannel> binding = bindings.iterator().next();
assertThat(binding).isSameAs(mockBinding);
service.unbindConsumers(inputChannelName);
verify(binder).bindConsumer(eq("foo"), eq(props.getGroup()), same(inputChannel),
any(ConsumerProperties.class));
verify(binding).unbind();
binderFactory.destroy();
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void checkDynamicBinding() {
BindingServiceProperties properties = new BindingServiceProperties();
BindingProperties bindingProperties = new BindingProperties();
bindingProperties.setProducer(new ProducerProperties());
properties.setBindings(Collections.singletonMap("foo", bindingProperties));
DefaultBinderFactory binderFactory = createMockBinderFactory();
final ExtendedPropertiesBinder binder = mock(ExtendedPropertiesBinder.class);
Properties extendedProps = new Properties();
when(binder.getExtendedProducerProperties(anyString())).thenReturn(extendedProps);
Binding<MessageChannel> mockBinding = Mockito.mock(Binding.class);
final AtomicReference<MessageChannel> dynamic = new AtomicReference<>();
when(binder.bindProducer(matches("foo"), any(DirectChannel.class),
any(ProducerProperties.class))).thenReturn(mockBinding);
BindingService bindingService = new BindingService(properties, binderFactory) {
@Override
protected <T> Binder<T, ?, ?> getBinder(String channelName,
Class<T> bindableType) {
return binder;
}
};
SubscribableChannelBindingTargetFactory bindableSubscribableChannelFactory;
bindableSubscribableChannelFactory = new SubscribableChannelBindingTargetFactory(
new MessageConverterConfigurer(properties,
new CompositeMessageConverterFactory().getMessageConverterForAllRegistered()));
final AtomicBoolean callbackInvoked = new AtomicBoolean();
BinderAwareChannelResolver resolver = new BinderAwareChannelResolver(
bindingService, bindableSubscribableChannelFactory,
new DynamicDestinationsBindable(), (name, channel, props, extended) -> {
callbackInvoked.set(true);
assertThat(name).isEqualTo("foo");
assertThat(channel).isNotNull();
assertThat(props).isNotNull();
assertThat(extended).isSameAs(extendedProps);
props.setUseNativeEncoding(true);
extendedProps.setProperty("bar", "baz");
});
ConfigurableListableBeanFactory beanFactory = mock(
ConfigurableListableBeanFactory.class);
when(beanFactory.getBean("foo", MessageChannel.class))
.thenThrow(new NoSuchBeanDefinitionException(MessageChannel.class));
when(beanFactory.getBean("bar", MessageChannel.class))
.thenThrow(new NoSuchBeanDefinitionException(MessageChannel.class));
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
dynamic.set(invocation.getArgument(1));
return null;
}
}).when(beanFactory).registerSingleton(eq("foo"), any(MessageChannel.class));
doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
return dynamic.get();
}
}).when(beanFactory).initializeBean(any(MessageChannel.class), eq("foo"));
resolver.setBeanFactory(beanFactory);
MessageChannel resolved = resolver.resolveDestination("foo");
assertThat(resolved).isSameAs(dynamic.get());
ArgumentCaptor<ProducerProperties> captor = ArgumentCaptor
.forClass(ProducerProperties.class);
verify(binder).bindProducer(eq("foo"), eq(dynamic.get()), captor.capture());
assertThat(captor.getValue().isUseNativeEncoding()).isTrue();
assertThat(captor.getValue()).isInstanceOf(ExtendedProducerProperties.class);
assertThat(((ExtendedProducerProperties) captor.getValue()).getExtension())
.isSameAs(extendedProps);
doReturn(dynamic.get()).when(beanFactory).getBean("foo", MessageChannel.class);
properties.setDynamicDestinations(new String[] { "foo" });
resolved = resolver.resolveDestination("foo");
assertThat(resolved).isSameAs(dynamic.get());
properties.setDynamicDestinations(new String[] { "test" });
try {
resolver.resolveDestination("bar");
fail("Should throw an exception");
}
catch (DestinationResolutionException e) {
assertThat(e).hasMessageContaining(
"Failed to find MessageChannel bean with name 'bar'");
}
}
@Test
public void testProducerPropertiesValidation() {
BindingServiceProperties serviceProperties = new BindingServiceProperties();
Map<String, BindingProperties> bindingProperties = new HashMap<>();
BindingProperties props = new BindingProperties();
ProducerProperties producerProperties = new ProducerProperties();
producerProperties.setPartitionCount(0);
props.setDestination("foo");
props.setProducer(producerProperties);
final String outputChannelName = "output";
bindingProperties.put(outputChannelName, props);
serviceProperties.setBindings(bindingProperties);
DefaultBinderFactory binderFactory = createMockBinderFactory();
BindingService service = new BindingService(serviceProperties, binderFactory);
MessageChannel outputChannel = new DirectChannel();
try {
service.bindProducer(outputChannel, outputChannelName);
fail("Producer properties should be validated.");
}
catch (IllegalStateException e) {
assertThat(e)
.hasMessageContaining("Partition count should be greater than zero.");
}
}
@Test
public void testDefaultPropertyBehavior() {
ConfigurableApplicationContext run = SpringApplication.run(
DefaultConsumerPropertiesTestSink.class,
"--server.port=0",
"--spring.cloud.stream.default.contentType=text/plain",
"--spring.cloud.stream.bindings.input1.contentType=application/json",
"--spring.cloud.stream.default.group=foo",
"--spring.cloud.stream.bindings.input2.group=bar",
"--spring.cloud.stream.default.consumer.concurrency=5",
"--spring.cloud.stream.bindings.input2.consumer.concurrency=1",
"--spring.cloud.stream.bindings.input1.consumer.partitioned=true",
"--spring.cloud.stream.default.producer.partitionCount=10",
"--spring.cloud.stream.bindings.output2.producer.partitionCount=1",
"--spring.cloud.stream.bindings.inputXyz.contentType=application/json",
"--spring.cloud.stream.bindings.inputFooBar.contentType=application/avro",
"--spring.cloud.stream.bindings.input_snake_case.contentType=application/avro");
BindingServiceProperties bindingServiceProperties = run.getBeanFactory()
.getBean(BindingServiceProperties.class);
Map<String, BindingProperties> bindings = bindingServiceProperties.getBindings();
assertThat(bindings.get("input1").getContentType()).isEqualTo("application/json");
assertThat(bindings.get("input2").getContentType()).isEqualTo("text/plain");
assertThat(bindings.get("input1").getGroup()).isEqualTo("foo");
assertThat(bindings.get("input2").getGroup()).isEqualTo("bar");
assertThat(bindings.get("input1").getConsumer().getConcurrency()).isEqualTo(5);
assertThat(bindings.get("input2").getConsumer().getConcurrency()).isEqualTo(1);
assertThat(bindings.get("input1").getConsumer().isPartitioned()).isEqualTo(true);
assertThat(bindings.get("input2").getConsumer().isPartitioned()).isEqualTo(false);
assertThat(bindings.get("output1").getProducer().getPartitionCount())
.isEqualTo(10);
assertThat(bindings.get("output2").getProducer().getPartitionCount())
.isEqualTo(1);
assertThat(bindings.get("inputXyz").getContentType())
.isEqualTo("application/json");
assertThat(bindings.get("inputFooBar").getContentType())
.isEqualTo("application/avro");
assertThat(bindings.get("inputFooBarBuzz").getContentType())
.isEqualTo("text/plain");
assertThat(bindings.get("input_snake_case").getContentType())
.isEqualTo("application/avro");
run.close();
}
@Test
public void testConsumerPropertiesValidation() {
BindingServiceProperties serviceProperties = new BindingServiceProperties();
Map<String, BindingProperties> bindingProperties = new HashMap<>();
BindingProperties props = new BindingProperties();
ConsumerProperties consumerProperties = new ConsumerProperties();
consumerProperties.setConcurrency(0);
props.setDestination("foo");
props.setConsumer(consumerProperties);
final String inputChannelName = "input";
bindingProperties.put(inputChannelName, props);
serviceProperties.setBindings(bindingProperties);
DefaultBinderFactory binderFactory = createMockBinderFactory();
BindingService service = new BindingService(serviceProperties, binderFactory);
MessageChannel inputChannel = new DirectChannel();
try {
service.bindConsumer(inputChannel, inputChannelName);
fail("Consumer properties should be validated.");
}
catch (IllegalStateException e) {
assertThat(e)
.hasMessageContaining("Concurrency should be greater than zero.");
}
}
@Test
public void testUnknownBinderOnBindingFailure() {
HashMap<String, String> properties = new HashMap<>();
properties.put("spring.cloud.stream.bindings.input.destination", "fooInput");
properties.put("spring.cloud.stream.bindings.input.binder", "mock");
properties.put("spring.cloud.stream.bindings.output.destination", "fooOutput");
properties.put("spring.cloud.stream.bindings.output.binder", "mockError");
BindingServiceProperties bindingServiceProperties = createBindingServiceProperties(
properties);
BindingService bindingService = new BindingService(bindingServiceProperties,
createMockBinderFactory());
bindingService.bindConsumer(new DirectChannel(), "input");
try {
bindingService.bindProducer(new DirectChannel(), "output");
fail("Expected 'Unknown binder configuration'");
}
catch (IllegalStateException e) {
assertThat(e).hasMessageContaining("Unknown binder configuration: mockError");
}
}
@Test
public void testUnrecognizedBinderAllowedIfNotUsed() {
HashMap<String, String> properties = new HashMap<>();
properties.put("spring.cloud.stream.bindings.input.destination", "fooInput");
properties.put("spring.cloud.stream.bindings.output.destination", "fooOutput");
properties.put("spring.cloud.stream.defaultBinder", "mock1");
properties.put("spring.cloud.stream.binders.mock1.type", "mock");
properties.put("spring.cloud.stream.binders.kafka1.type", "kafka");
BindingServiceProperties bindingServiceProperties = createBindingServiceProperties(
properties);
BinderFactory binderFactory = new BindingServiceConfiguration()
.binderFactory(createMockBinderTypeRegistry(), bindingServiceProperties);
BindingService bindingService = new BindingService(bindingServiceProperties,
binderFactory);
bindingService.bindConsumer(new DirectChannel(), "input");
bindingService.bindProducer(new DirectChannel(), "output");
}
@Test
public void testUnrecognizedBinderDisallowedIfUsed() {
HashMap<String, String> properties = new HashMap<>();
properties.put("spring.cloud.stream.bindings.input.destination", "fooInput");
properties.put("spring.cloud.stream.bindings.input.binder", "mock1");
properties.put("spring.cloud.stream.bindings.output.destination", "fooOutput");
properties.put("spring.cloud.stream.bindings.output.type", "kafka1");
properties.put("spring.cloud.stream.binders.mock1.type", "mock");
properties.put("spring.cloud.stream.binders.kafka1.type", "kafka");
BindingServiceProperties bindingServiceProperties = createBindingServiceProperties(
properties);
BinderFactory binderFactory = new BindingServiceConfiguration()
.binderFactory(createMockBinderTypeRegistry(), bindingServiceProperties);
BindingService bindingService = new BindingService(bindingServiceProperties,
binderFactory);
bindingService.bindConsumer(new DirectChannel(), "input");
try {
bindingService.bindProducer(new DirectChannel(), "output");
fail("Expected 'Unknown binder configuration'");
}
catch (IllegalArgumentException e) {
assertThat(e).hasMessageContaining("Binder type kafka is not defined");
}
}
@Test
public void testResolveBindableType() {
Class<?> bindableType = GenericsUtils.getParameterType(FooBinder.class,
Binder.class, 0);
assertThat(bindableType).isSameAs(SomeBindableType.class);
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
@Ignore
public void testLateBindingConsumer() throws Exception {
BindingServiceProperties properties = new BindingServiceProperties();
properties.setBindingRetryInterval(1);
Map<String, BindingProperties> bindingProperties = new HashMap<>();
BindingProperties props = new BindingProperties();
props.setDestination("foo");
final String inputChannelName = "input";
bindingProperties.put(inputChannelName, props);
properties.setBindings(bindingProperties);
DefaultBinderFactory binderFactory = createMockBinderFactory();
Binder binder = binderFactory.getBinder("mock", MessageChannel.class);
ThreadPoolTaskScheduler scheduler = new ThreadPoolTaskScheduler();
scheduler.initialize();
BindingService service = new BindingService(properties, binderFactory, scheduler);
MessageChannel inputChannel = new DirectChannel();
final Binding<MessageChannel> mockBinding = Mockito.mock(Binding.class);
final CountDownLatch fail = new CountDownLatch(2);
doAnswer(i -> {
fail.countDown();
if (fail.getCount() == 1) {
throw new RuntimeException("fail");
}
return mockBinding;
}).when(binder).bindConsumer(eq("foo"), isNull(), same(inputChannel),
any(ConsumerProperties.class));
Collection<Binding<MessageChannel>> bindings = service.bindConsumer(inputChannel,
inputChannelName);
assertThat(fail.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(bindings).hasSize(1);
Binding<MessageChannel> delegate = TestUtils
.getPropertyValue(bindings.iterator().next(), "delegate", Binding.class);
int n = 0;
while (n++ < 300 && delegate == null) {
Thread.sleep(400);
}
assertThat(delegate).isSameAs(mockBinding);
service.unbindConsumers(inputChannelName);
verify(binder, times(2)).bindConsumer(eq("foo"), isNull(), same(inputChannel),
any(ConsumerProperties.class));
verify(delegate).unbind();
binderFactory.destroy();
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testLateBindingProducer() throws Exception {
BindingServiceProperties properties = new BindingServiceProperties();
properties.setBindingRetryInterval(1);
Map<String, BindingProperties> bindingProperties = new HashMap<>();
BindingProperties props = new BindingProperties();
props.setDestination("foo");
final String outputChannelName = "output";
bindingProperties.put(outputChannelName, props);
properties.setBindings(bindingProperties);
DefaultBinderFactory binderFactory = createMockBinderFactory();
Binder binder = binderFactory.getBinder("mock", MessageChannel.class);
ThreadPoolTaskScheduler scheduler = new ThreadPoolTaskScheduler();
scheduler.initialize();
BindingService service = new BindingService(properties, binderFactory, scheduler);
MessageChannel outputChannel = new DirectChannel();
final Binding<MessageChannel> mockBinding = Mockito.mock(Binding.class);
final CountDownLatch fail = new CountDownLatch(2);
doAnswer(i -> {
fail.countDown();
if (fail.getCount() == 1) {
throw new RuntimeException("fail");
}
return mockBinding;
}).when(binder).bindProducer(eq("foo"), same(outputChannel),
any(ProducerProperties.class));
Binding<MessageChannel> binding = service.bindProducer(outputChannel,
outputChannelName);
assertThat(fail.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(binding).isNotNull();
Binding delegate = TestUtils.getPropertyValue(binding, "delegate", Binding.class);
int n = 0;
while (n++ < 300 && delegate == null) {
Thread.sleep(100);
delegate = TestUtils.getPropertyValue(binding, "delegate", Binding.class);
}
assertThat(delegate).isSameAs(mockBinding);
service.unbindProducers(outputChannelName);
verify(binder, times(2)).bindProducer(eq("foo"), same(outputChannel),
any(ProducerProperties.class));
verify(delegate).unbind();
binderFactory.destroy();
scheduler.destroy();
}
@SuppressWarnings("unchecked")
@Test
public void testBindingAutostartup() throws Exception {
ApplicationContext context = new SpringApplicationBuilder(FooConfiguration.class)
.web(WebApplicationType.NONE).run("--spring.jmx.enabled=false",
"--spring.cloud.stream.bindings.input.consumer.auto-startup=false");
BindingService bindingService = context.getBean(BindingService.class);
Field cbField = ReflectionUtils.findField(BindingService.class,
"consumerBindings");
cbField.setAccessible(true);
Map<String, Object> cbMap = (Map<String, Object>) cbField.get(bindingService);
Binding<?> inputBinding = ((List<Binding<?>>) cbMap.get("input")).get(0);
assertThat(inputBinding.isRunning()).isFalse();
}
private DefaultBinderFactory createMockBinderFactory() {
BinderTypeRegistry binderTypeRegistry = createMockBinderTypeRegistry();
return new DefaultBinderFactory(
Collections.singletonMap("mock",
new BinderConfiguration("mock", new HashMap<>(), true, true)),
binderTypeRegistry);
}
private DefaultBinderTypeRegistry createMockBinderTypeRegistry() {
return new DefaultBinderTypeRegistry(Collections.singletonMap("mock",
new BinderType("mock", new Class[] { MockBinderConfiguration.class })));
}
private BindingServiceProperties createBindingServiceProperties(
HashMap<String, String> properties) {
BindingServiceProperties bindingServiceProperties = new BindingServiceProperties();
org.springframework.boot.context.properties.bind.Binder propertiesBinder;
propertiesBinder = new org.springframework.boot.context.properties.bind.Binder(
new MapConfigurationPropertySource(properties));
propertiesBinder.bind("spring.cloud.stream",
org.springframework.boot.context.properties.bind.Bindable
.ofInstance(bindingServiceProperties));
return bindingServiceProperties;
}
public interface FooBinding {
@Input("input1")
SubscribableChannel in1();
@Input("input2")
SubscribableChannel in2();
@Output("output1")
MessageChannel out1();
@Output("output2")
MessageChannel out2();
@Input("inputXyz")
SubscribableChannel inXyz();
@Input("inputFooBar")
SubscribableChannel inFooBar();
@Input("inputFooBarBuzz")
SubscribableChannel inFooBarBuzz();
@Input("input_snake_case")
SubscribableChannel inWithSnakeCase();
}
@EnableBinding(FooBinding.class)
@EnableAutoConfiguration
public static class DefaultConsumerPropertiesTestSink {
@Bean
public Binder<?, ?, ?> binder() {
return Mockito.mock(Binder.class,
Mockito.withSettings().defaultAnswer(Mockito.RETURNS_MOCKS));
}
}
@EnableBinding(Sink.class)
@Import(TestChannelBinderConfiguration.class)
@EnableAutoConfiguration
public static class FooConfiguration {
@ServiceActivator(inputChannel = Processor.INPUT)
public void echo(Message<?> value) throws Exception {
}
}
public static class FooBinder
implements Binder<SomeBindableType, ConsumerProperties, ProducerProperties> {
@Override
public Binding<SomeBindableType> bindConsumer(String name, String group,
SomeBindableType inboundBindTarget,
ConsumerProperties consumerProperties) {
throw new UnsupportedOperationException();
}
@Override
public Binding<SomeBindableType> bindProducer(String name,
SomeBindableType outboundBindTarget,
ProducerProperties producerProperties) {
throw new UnsupportedOperationException();
}
}
public static class SomeBindableType {
}
}
|
package com.rjhister.app.service.dto;
import com.rjhister.app.config.Constants;
import com.rjhister.app.domain.Authority;
import com.rjhister.app.domain.User;
import javax.validation.constraints.*;
import java.time.Instant;
import java.util.Set;
import java.util.stream.Collectors;
/**
* A DTO representing a user, with his authorities.
*/
public class UserDTO {
private Long id;
@NotBlank
@Pattern(regexp = Constants.LOGIN_REGEX)
@Size(min = 1, max = 50)
private String login;
@Size(max = 50)
private String firstName;
@Size(max = 50)
private String lastName;
@Email
@Size(min = 5, max = 254)
private String email;
@Size(max = 256)
private String imageUrl;
private boolean activated = false;
@Size(min = 2, max = 10)
private String langKey;
private String createdBy;
private Instant createdDate;
private String lastModifiedBy;
private Instant lastModifiedDate;
private Set<String> authorities;
public UserDTO() {
// Empty constructor needed for Jackson.
}
public UserDTO(User user) {
this.id = user.getId();
this.login = user.getLogin();
this.firstName = user.getFirstName();
this.lastName = user.getLastName();
this.email = user.getEmail();
this.activated = user.getActivated();
this.imageUrl = user.getImageUrl();
this.langKey = user.getLangKey();
this.createdBy = user.getCreatedBy();
this.createdDate = user.getCreatedDate();
this.lastModifiedBy = user.getLastModifiedBy();
this.lastModifiedDate = user.getLastModifiedDate();
this.authorities = user.getAuthorities().stream()
.map(Authority::getName)
.collect(Collectors.toSet());
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getLogin() {
return login;
}
public void setLogin(String login) {
this.login = login;
}
public String getFirstName() {
return firstName;
}
public void setFirstName(String firstName) {
this.firstName = firstName;
}
public String getLastName() {
return lastName;
}
public void setLastName(String lastName) {
this.lastName = lastName;
}
public String getEmail() {
return email;
}
public void setEmail(String email) {
this.email = email;
}
public String getImageUrl() {
return imageUrl;
}
public void setImageUrl(String imageUrl) {
this.imageUrl = imageUrl;
}
public boolean isActivated() {
return activated;
}
public void setActivated(boolean activated) {
this.activated = activated;
}
public String getLangKey() {
return langKey;
}
public void setLangKey(String langKey) {
this.langKey = langKey;
}
public String getCreatedBy() {
return createdBy;
}
public void setCreatedBy(String createdBy) {
this.createdBy = createdBy;
}
public Instant getCreatedDate() {
return createdDate;
}
public void setCreatedDate(Instant createdDate) {
this.createdDate = createdDate;
}
public String getLastModifiedBy() {
return lastModifiedBy;
}
public void setLastModifiedBy(String lastModifiedBy) {
this.lastModifiedBy = lastModifiedBy;
}
public Instant getLastModifiedDate() {
return lastModifiedDate;
}
public void setLastModifiedDate(Instant lastModifiedDate) {
this.lastModifiedDate = lastModifiedDate;
}
public Set<String> getAuthorities() {
return authorities;
}
public void setAuthorities(Set<String> authorities) {
this.authorities = authorities;
}
@Override
public String toString() {
return "UserDTO{" +
"login='" + login + '\'' +
", firstName='" + firstName + '\'' +
", lastName='" + lastName + '\'' +
", email='" + email + '\'' +
", imageUrl='" + imageUrl + '\'' +
", activated=" + activated +
", langKey='" + langKey + '\'' +
", createdBy=" + createdBy +
", createdDate=" + createdDate +
", lastModifiedBy='" + lastModifiedBy + '\'' +
", lastModifiedDate=" + lastModifiedDate +
", authorities=" + authorities +
"}";
}
}
|
/**
* Automatically generated file. DO NOT MODIFY
*/
package gr.scify.icsee.test;
public final class BuildConfig {
public static final boolean DEBUG = Boolean.parseBoolean("true");
public static final String APPLICATION_ID = "gr.scify.icsee.test";
public static final String BUILD_TYPE = "debug";
public static final String FLAVOR = "";
public static final int VERSION_CODE = -1;
public static final String VERSION_NAME = "";
}
|
/*******************************************************************************
* Copyright (C) 2016 Black Duck Software, Inc.
* http://www.blackducksoftware.com/
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*******************************************************************************/
package com.blackducksoftware.integration.email.model;
import java.io.File;
import java.io.FileOutputStream;
import javax.mail.Message;
import javax.mail.MessagingException;
import javax.mail.Session;
// this mail wrapper is for testing purposes and can be swapped in the EmailEngine with the base class.
public class FileMailWrapper extends JavaMailWrapper {
private int index = 0;
@Override
public void sendMessage(final ExtensionProperties customerProperties, final Session session, final Message message)
throws MessagingException {
final File parent = new File(customerProperties.getEmailTemplateDirectory());
final File messagesDir = new File(parent.getParentFile(), "Test_Messages");
messagesDir.mkdirs();
final File file = new File(messagesDir, createFileName());
try (FileOutputStream fileOutput = new FileOutputStream(file)) {
file.createNewFile();
message.writeTo(fileOutput);
} catch (final Exception ex) {
ex.printStackTrace();
}
}
private String createFileName() {
return "Test_Message_" + ++index;
}
}
|
package org.odk.collect.android.formmanagement;
import android.content.Context;
import androidx.test.core.app.ApplicationProvider;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.odk.collect.android.R;
import org.odk.collect.android.openrosa.api.FormApiException;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import static org.odk.collect.android.openrosa.api.FormApiException.Type.FETCH_ERROR;
import static org.odk.collect.android.openrosa.api.FormApiException.Type.SECURITY_ERROR;
import static org.odk.collect.android.openrosa.api.FormApiException.Type.UNREACHABLE;
@RunWith(AndroidJUnit4.class)
public class FormApiExceptionMapperTest {
private Context context;
private FormApiExceptionMapper mapper;
@Before
public void setup() {
context = ApplicationProvider.getApplicationContext();
mapper = new FormApiExceptionMapper(context);
}
@Test
public void fetchErrorType_returnsGenericMessage() {
String expectedString = context.getString(R.string.report_to_project_lead);
assertThat(mapper.getMessage(new FormApiException(FETCH_ERROR)), is(expectedString));
}
@Test
public void unknownHostType_returnsUnknownHostMessage() {
String expectedString = context.getString(R.string.unreachable_error, "http://unknown.com") + " " + context.getString(R.string.report_to_project_lead);
assertThat(mapper.getMessage(new FormApiException(UNREACHABLE, "http://unknown.com")), is(expectedString));
}
@Test
public void securityErrorType_returnsSecurityMessage() {
String expectedString = context.getString(R.string.security_error, "http://unknown.com") + " " + context.getString(R.string.report_to_project_lead);
assertThat(mapper.getMessage(new FormApiException(SECURITY_ERROR, "http://unknown.com")), is(expectedString));
}
}
|
/*
* FactSet Private Markets API
* FactSet Private Markets API encompasses Private Company Financials data to start. Private Company firmographics can be found in the FactSet Entity API. The future plans of this service will include additional Private Markets data, such as Private Equity/Venture Capital.
*
* The version of the OpenAPI document: 1.0.0
* Contact: api@factset.com
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package com.factset.sdk.FactSetPrivateMarkets.auth;
import com.factset.sdk.FactSetPrivateMarkets.Pair;
import com.factset.sdk.FactSetPrivateMarkets.ApiException;
import java.net.URI;
import java.util.Map;
import java.util.List;
@javax.annotation.Generated(value = "org.openapitools.codegen.languages.JavaClientCodegen")
public class HttpBearerAuth implements Authentication {
private final String scheme;
private String bearerToken;
public HttpBearerAuth(String scheme) {
this.scheme = scheme;
}
/**
* Gets the token, which together with the scheme, will be sent as the value of the Authorization header.
*
* @return The bearer token
*/
public String getBearerToken() {
return bearerToken;
}
/**
* Sets the token, which together with the scheme, will be sent as the value of the Authorization header.
*
* @param bearerToken The bearer token to send in the Authorization header
*/
public void setBearerToken(String bearerToken) {
this.bearerToken = bearerToken;
}
@Override
public void applyToParams(List<Pair> queryParams, Map<String, String> headerParams, Map<String, String> cookieParams, String payload, String method, URI uri) throws ApiException {
if(bearerToken == null) {
return;
}
headerParams.put("Authorization", (scheme != null ? upperCaseBearer(scheme) + " " : "") + bearerToken);
}
private static String upperCaseBearer(String scheme) {
return ("bearer".equalsIgnoreCase(scheme)) ? "Bearer" : scheme;
}
}
|
/*
* Copyright 2007-2017 Michele Mostarda ( michele.mostarda@gmail.com ).
* All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the 'License');
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an 'AS IS' BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asemantics.rdfcoder.model;
/**
* Represents a {@link com.asemantics.rdfcoder.model.CodeModel} exposing a <i>SPARQL</i> endpoint.
*/
public abstract class SPARQLQuerableCodeModel extends CodeModelBase {
public abstract QueryResult performQuery(String sparqlQry) throws SPARQLException;
}
|
package com.pcz.neo4j;
import cn.hutool.json.JSONUtil;
import com.pcz.neo4j.model.Lesson;
import com.pcz.neo4j.model.Student;
import com.pcz.neo4j.service.NeoService;
import lombok.extern.slf4j.Slf4j;
import org.junit.Test;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
@Slf4j
public class Neo4jTest extends SpringBootNeo4jApplicationTest {
@Autowired
private NeoService neoService;
@Test
public void testSave() {
neoService.initData();
}
@Test
public void testDelete() {
neoService.delete();
}
@Test
public void testFindLessonsByStudent() {
List<Lesson> lessonList = neoService.findLessonsFromStudent("漩涡鸣人", 2);
lessonList.forEach(lesson -> log.info("[lesson] = {}", JSONUtil.toJsonStr(lesson)));
}
@Test
public void testCountStudent() {
Long all = neoService.studentCount(null);
log.info("[全校人数] = {}", all);
Long seven = neoService.studentCount("第七班");
log.info("[第七班人数] = {}", seven);
}
@Test
public void testFindClassmates() {
Map<String, List<Student>> classmates = neoService.findClassmatesGroupByLesson();
classmates.forEach((k, v) -> log.info("{}: {}", k, JSONUtil.toJsonStr(v
.stream()
.map(Student::getName)
.collect(Collectors.toList()))));
}
@Test
public void testFindTeacherStudent() {
Map<String, Set<Student>> teacherStudent = neoService.findTeacherStudent();
teacherStudent.forEach((k, v) -> log.info("{}: {}", k, JSONUtil.toJsonStr(v
.stream()
.map(Student::getName)
.collect(Collectors.toList()))));
}
}
|
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.instrumentation.jms;
import static io.opentelemetry.api.trace.Span.Kind.CONSUMER;
import static io.opentelemetry.api.trace.Span.Kind.PRODUCER;
import static io.opentelemetry.instrumentation.api.decorator.BaseDecorator.extract;
import static io.opentelemetry.javaagent.instrumentation.jms.MessageExtractAdapter.GETTER;
import static io.opentelemetry.javaagent.instrumentation.jms.MessageInjectAdapter.SETTER;
import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.api.trace.SpanBuilder;
import io.opentelemetry.api.trace.SpanContext;
import io.opentelemetry.api.trace.attributes.SemanticAttributes;
import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.api.tracer.BaseTracer;
import java.util.concurrent.TimeUnit;
import javax.jms.Destination;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.jms.Queue;
import javax.jms.TemporaryQueue;
import javax.jms.TemporaryTopic;
import javax.jms.Topic;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JmsTracer extends BaseTracer {
private static final Logger log = LoggerFactory.getLogger(JmsTracer.class);
// From the spec
public static final String TEMP_DESTINATION_NAME = "(temporary)";
private static final JmsTracer TRACER = new JmsTracer();
public static JmsTracer tracer() {
return TRACER;
}
public Span startConsumerSpan(
MessageDestination destination, String operation, Message message, long startTime) {
SpanBuilder spanBuilder =
tracer
.spanBuilder(spanName(destination, operation))
.setSpanKind(CONSUMER)
.setStartTimestamp(startTime, TimeUnit.MILLISECONDS)
.setAttribute(SemanticAttributes.MESSAGING_OPERATION, operation);
if (message != null && "process".equals(operation)) {
Context context = extract(message, GETTER);
SpanContext spanContext = Span.fromContext(context).getSpanContext();
if (spanContext.isValid()) {
spanBuilder.setParent(context);
}
}
Span span = spanBuilder.startSpan();
afterStart(span, destination, message);
return span;
}
public Span startProducerSpan(MessageDestination destination, Message message) {
Span span = tracer.spanBuilder(spanName(destination, "send")).setSpanKind(PRODUCER).startSpan();
afterStart(span, destination, message);
return span;
}
public Scope startProducerScope(Span span, Message message) {
Context context = Context.current().with(span);
OpenTelemetry.getGlobalPropagators().getTextMapPropagator().inject(context, message, SETTER);
return context.makeCurrent();
}
public String spanName(MessageDestination destination, String operation) {
if (destination.temporary) {
return TEMP_DESTINATION_NAME + " " + operation;
} else {
return destination.destinationName + " " + operation;
}
}
private static final String TIBCO_TMP_PREFIX = "$TMP$";
public MessageDestination extractDestination(Message message, Destination fallbackDestination) {
Destination jmsDestination = null;
try {
jmsDestination = message.getJMSDestination();
} catch (Exception ignored) {
}
if (jmsDestination == null) {
jmsDestination = fallbackDestination;
}
return extractMessageDestination(jmsDestination);
}
public static MessageDestination extractMessageDestination(Destination destination) {
if (destination instanceof Queue) {
String queueName;
try {
queueName = ((Queue) destination).getQueueName();
} catch (JMSException e) {
queueName = "unknown";
}
boolean temporary =
destination instanceof TemporaryQueue || queueName.startsWith(TIBCO_TMP_PREFIX);
return new MessageDestination(queueName, "queue", temporary);
}
if (destination instanceof Topic) {
String topicName;
try {
topicName = ((Topic) destination).getTopicName();
} catch (JMSException e) {
topicName = "unknown";
}
boolean temporary =
destination instanceof TemporaryTopic || topicName.startsWith(TIBCO_TMP_PREFIX);
return new MessageDestination(topicName, "topic", temporary);
}
return MessageDestination.UNKNOWN;
}
private void afterStart(Span span, MessageDestination destination, Message message) {
span.setAttribute(SemanticAttributes.MESSAGING_SYSTEM, "jms");
span.setAttribute(SemanticAttributes.MESSAGING_DESTINATION_KIND, destination.destinationKind);
if (destination.temporary) {
span.setAttribute(SemanticAttributes.MESSAGING_TEMP_DESTINATION, true);
span.setAttribute(SemanticAttributes.MESSAGING_DESTINATION, TEMP_DESTINATION_NAME);
} else {
span.setAttribute(SemanticAttributes.MESSAGING_DESTINATION, destination.destinationName);
}
if (message != null) {
try {
String messageId = message.getJMSMessageID();
if (messageId != null) {
span.setAttribute(SemanticAttributes.MESSAGING_MESSAGE_ID, messageId);
}
} catch (Exception e) {
log.debug("Failure getting JMS message id", e);
}
try {
String correlationId = message.getJMSCorrelationID();
if (correlationId != null) {
span.setAttribute(SemanticAttributes.MESSAGING_CONVERSATION_ID, correlationId);
}
} catch (Exception e) {
log.debug("Failure getting JMS correlation id", e);
}
}
}
@Override
protected String getInstrumentationName() {
return "io.opentelemetry.javaagent.jms";
}
}
|
package com.diguage.truman.netty.tcp;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import java.util.UUID;
import static java.nio.charset.StandardCharsets.UTF_8;
/**
* @author D瓜哥, https://www.diguage.com/
* @since 2020-06-29 10:42
*/
public class TcpServerHandler extends SimpleChannelInboundHandler<ByteBuf> {
private int count;
@Override
protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception {
byte[] bytes = new byte[msg.readableBytes()];
msg.readBytes(bytes);
// 将 bytes 转成字符串
String message = new String(bytes, UTF_8);
System.out.println("服务器接收到数据=" + message);
System.out.println("服务器接收到消息量=" + (++this.count));
// 服务器回送数据给客户端,回送一个随机ID
ByteBuf buffer = Unpooled.copiedBuffer(UUID.randomUUID().toString() + " ", UTF_8);
ctx.writeAndFlush(buffer);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
cause.printStackTrace();
ctx.channel().close();
}
}
|
/**
* Copyright (c) 2013, impossibl.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of impossibl.com nor the names of its contributors may
* be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.impossibl.postgres.jdbc;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import static java.lang.Boolean.FALSE;
import io.netty.util.ResourceLeakDetector;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
@RunWith(JUnit4.class)
public class LeakTest {
Connection conn;
ResourceLeakDetector.Level savedLevel;
@Before
public void before() throws Exception {
conn = TestUtil.openDB();
savedLevel = ResourceLeakDetector.getLevel();
ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.DISABLED);
getHousekeeper().setLogLeakedReferences(false);
}
@After
public void after() throws Exception {
if (conn != null && getHousekeeper() != null) {
getHousekeeper().testClear();
}
ResourceLeakDetector.setLevel(savedLevel);
}
ThreadedHousekeeper getHousekeeper() throws SQLException {
if ((conn.unwrap(PGDirectConnection.class)).housekeeper != null)
return (ThreadedHousekeeper)(conn.unwrap(PGDirectConnection.class)).housekeeper.get();
else
return null;
}
@Test
public void testResultSetLeak() throws SQLException {
ThreadedHousekeeper housekeeper = getHousekeeper();
assertNotNull(housekeeper);
int connId = System.identityHashCode(conn);
Statement stmt = conn.createStatement();
int stmtId = System.identityHashCode(stmt);
ResultSet rs = stmt.executeQuery("SELECT 1");
int rsId = System.identityHashCode(rs);
rs = null;
sleep();
assertTrue(housekeeper.testCheckCleaned(rsId));
sleep();
assertFalse(housekeeper.testCheckCleaned(stmtId));
sleep();
assertFalse(housekeeper.testCheckCleaned(connId));
}
@Test
public void testResultSetNoLeak() throws SQLException {
ThreadedHousekeeper housekeeper = getHousekeeper();
assertNotNull(housekeeper);
int connId = System.identityHashCode(conn);
Statement stmt = conn.createStatement();
int stmtId = System.identityHashCode(stmt);
ResultSet rs = stmt.executeQuery("SELECT 1");
int rsId = System.identityHashCode(rs);
rs.close();
sleep();
assertTrue(housekeeper.testCheckCleaned(rsId));
sleep();
assertFalse(housekeeper.testCheckCleaned(stmtId));
sleep();
assertFalse(housekeeper.testCheckCleaned(connId));
}
@Test
public void testStatementLeak() throws SQLException {
ThreadedHousekeeper housekeeper = getHousekeeper();
assertNotNull(housekeeper);
int connId = System.identityHashCode(conn);
Statement stmt = conn.createStatement();
int stmtId = System.identityHashCode(stmt);
ResultSet rs = stmt.executeQuery("SELECT 1");
int rsId = System.identityHashCode(rs);
rs = null;
stmt = null;
sleep();
assertTrue(housekeeper.testCheckCleaned(rsId));
sleep();
assertTrue(housekeeper.testCheckCleaned(stmtId));
sleep();
assertFalse(housekeeper.testCheckCleaned(connId));
}
@Test
public void testStatementNoLeak() throws SQLException {
ThreadedHousekeeper housekeeper = getHousekeeper();
assertNotNull(housekeeper);
int connId = System.identityHashCode(conn);
Statement stmt = conn.createStatement();
int stmtId = System.identityHashCode(stmt);
ResultSet rs = stmt.executeQuery("SELECT 1");
int rsId = System.identityHashCode(rs);
rs.close();
stmt.close();
sleep();
assertTrue(housekeeper.testCheckCleaned(rsId));
sleep();
assertTrue(housekeeper.testCheckCleaned(stmtId));
sleep();
assertFalse(housekeeper.testCheckCleaned(connId));
}
@Test
public void testConnectionLeak() throws SQLException {
ThreadedHousekeeper housekeeper = getHousekeeper();
assertNotNull(housekeeper);
int connId = System.identityHashCode(conn);
Statement stmt = conn.createStatement();
int stmtId = System.identityHashCode(stmt);
ResultSet rs = stmt.executeQuery("SELECT 1");
int rsId = System.identityHashCode(rs);
rs = null;
stmt = null;
conn = null;
sleep();
assertTrue(housekeeper.testCheckCleaned(rsId));
sleep();
assertTrue(housekeeper.testCheckCleaned(stmtId));
sleep();
assertTrue(housekeeper.testCheckCleaned(connId));
}
@Test
public void testConnectionNoLeak() throws SQLException {
int connId = System.identityHashCode(conn);
Statement stmt = conn.createStatement();
int stmtId = System.identityHashCode(stmt);
ResultSet rs = stmt.executeQuery("SELECT 1");
int rsId = System.identityHashCode(rs);
rs.close();
stmt.close();
conn.close();
ThreadedHousekeeper housekeeper = getHousekeeper();
assertNotNull(housekeeper);
sleep();
assertTrue(housekeeper.testCheckCleaned(rsId));
sleep();
assertTrue(housekeeper.testCheckCleaned(stmtId));
sleep();
assertTrue(housekeeper.testCheckCleaned(connId));
}
@Test
public void testNoHousekeeper() throws Exception {
Properties settings = new Properties();
settings.setProperty("housekeeper", FALSE.toString());
try (Connection conn = TestUtil.openDB(settings)) {
try (Statement stmt = conn.createStatement()) {
try (ResultSet rs = stmt.executeQuery("SELECT 1")) {
Housekeeper.Ref housekeeper = (conn.unwrap(PGDirectConnection.class)).housekeeper;
assertNull(housekeeper);
}
}
}
}
private void sleep() {
System.gc();
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
// Ignore...
}
}
}
|
/*
* Copyright 2017-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package example.springdata.jdbc.basics.aggregate;
import example.springdata.jdbc.basics.Output;
import java.time.Period;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.jdbc.AutoConfigureJdbc;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
/**
* Demonstrates various possibilities to customize the behavior of a repository.
*
* @author Jens Schauder
*/
@RunWith(SpringRunner.class)
@SpringBootTest(classes = AggregateConfiguration.class)
@AutoConfigureJdbc
public class AggregateTests {
@Autowired LegoSetRepository repository;
@Test
public void exerciseSomewhatComplexEntity() {
LegoSet smallCar = createLegoSet();
smallCar.setManual(new Manual("Just put all the pieces together in the right order", "Jens Schauder"));
smallCar.addModel("suv", "SUV with sliding doors.");
smallCar.addModel("roadster", "Slick red roadster.");
repository.save(smallCar);
Output.list(repository.findAll(), "Original LegoSet");
smallCar.getManual().setText("Just make it so it looks like a car.");
smallCar.addModel("pickup", "A pickup truck with some tools in the back.");
repository.save(smallCar);
Output.list(repository.findAll(), "Updated");
smallCar.setManual(new Manual("One last attempt: Just build a car! Ok?", "Jens Schauder"));
repository.save(smallCar);
Output.list(repository.findAll(), "Manual replaced");
}
private LegoSet createLegoSet() {
LegoSet smallCar = new LegoSet();
smallCar.setName("Small Car 01");
smallCar.setMinimumAge(Period.ofYears(5));
smallCar.setMaximumAge(Period.ofYears(12));
return smallCar;
}
}
|
/**
* Copyright (C) 2014-2018 Philip Helger (www.helger.com)
* philip[at]helger[dot]com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.helger.genericode.excel;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.poi.ss.usermodel.Sheet;
import org.apache.poi.ss.usermodel.Workbook;
import org.junit.Test;
import org.w3c.dom.Document;
import com.helger.commons.io.resource.ClassPathResource;
import com.helger.commons.io.resource.IReadableResource;
import com.helger.genericode.Genericode10CodeListMarshaller;
import com.helger.genericode.v10.CodeListDocument;
import com.helger.genericode.v10.UseType;
import com.helger.poi.excel.EExcelVersion;
/**
* Text class for class {@link ExcelSheetToCodeList10}.
*
* @author Philip Helger
*/
public final class ExcelSheetToCodeList10Test
{
@Test
public void testReadExcel () throws URISyntaxException
{
// Where is the Excel?
final IReadableResource aXls = new ClassPathResource ("excel/Simple1.xls");
assertTrue (aXls.exists ());
// Interpret as Excel
final Workbook aWB = EExcelVersion.XLS.readWorkbook (aXls.getInputStream ());
assertNotNull (aWB);
// Check whether all required sheets are present
final Sheet aSheet = aWB.getSheetAt (0);
assertNotNull (aSheet);
final ExcelReadOptions <UseType> aReadOptions = new ExcelReadOptions <UseType> ().setLinesToSkip (1)
.setLineIndexShortName (0);
aReadOptions.addColumn (0, "id", UseType.REQUIRED, "string", true);
aReadOptions.addColumn (1, "name", UseType.REQUIRED, "string", false);
final CodeListDocument aCodeList = ExcelSheetToCodeList10.convertToSimpleCodeList (aSheet,
aReadOptions,
"ExampleList",
"1.0",
new URI ("urn:www.helger.com:names:example"),
new URI ("urn:www.helger.com:names:example-1.0"),
null);
final Document aDoc = new Genericode10CodeListMarshaller ().getAsDocument (aCodeList);
assertNotNull (aDoc);
final CodeListDocument aCLDoc = new Genericode10CodeListMarshaller ().read (aDoc);
assertNotNull (aCLDoc);
}
}
|
package com.amaljoyc.kafka4hack.producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import java.util.HashMap;
import java.util.Map;
@Configuration
public class SenderConfig {
@Value("${kafka.servers}")
private String kafkaServers;
@Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
// list of host:port pairs used for establishing the initial connections to the Kakfa cluster
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
}
@Bean
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
@Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
@Bean
public Sender sender() {
return new Sender();
}
}
|
package mx.rmm.simpleconcise.roo.web.util;
import java.util.Iterator;
import java.util.Map;
import javax.faces.FacesException;
import javax.faces.application.NavigationHandler;
import javax.faces.application.ViewExpiredException;
import javax.faces.context.ExceptionHandler;
import javax.faces.context.ExceptionHandlerWrapper;
import javax.faces.context.FacesContext;
import javax.faces.event.ExceptionQueuedEvent;
import javax.faces.event.ExceptionQueuedEventContext;
public class ViewExpiredExceptionExceptionHandler extends ExceptionHandlerWrapper {
private ExceptionHandler wrapped;
public ViewExpiredExceptionExceptionHandler(ExceptionHandler wrapped) {
this.wrapped = wrapped;
}
@Override
public ExceptionHandler getWrapped() {
return this.wrapped;
}
@Override
public void handle() throws FacesException {
for (Iterator<ExceptionQueuedEvent> i = getUnhandledExceptionQueuedEvents().iterator(); i.hasNext();) {
ExceptionQueuedEvent event = i.next();
ExceptionQueuedEventContext context = (ExceptionQueuedEventContext) event.getSource();
Throwable t = context.getException();
if (t instanceof ViewExpiredException) {
ViewExpiredException vee = (ViewExpiredException) t;
FacesContext facesContext = FacesContext.getCurrentInstance();
Map<String, Object> requestMap = facesContext.getExternalContext().getRequestMap();
NavigationHandler navigationHandler = facesContext.getApplication().getNavigationHandler();
try {
// Push some useful stuff to the request scope for use in the page
requestMap.put("currentViewId", vee.getViewId());
navigationHandler.handleNavigation(facesContext, null, "/viewExpired");
facesContext.renderResponse();
} finally {
i.remove();
}
}
}
// At this point, the queue will not contain any ViewExpiredEvents. Therefore, let the parent handle them.
getWrapped().handle();
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.common.rest.codec;
import javax.servlet.http.Part;
import io.vertx.core.buffer.Buffer;
/**
* vertx的HttpClientRequest没有getHeader的能力
* 在写cookie参数时,没办法多次添加cookie,所以只能进行接口包装
*/
public interface RestClientRequest {
void write(Buffer bodyBuffer);
void end();
void addCookie(String name, String value);
void putHeader(String name, String value);
void addForm(String name, Object value);
Buffer getBodyBuffer() throws Exception;
void attach(String name, Part part);
}
|
package net.minecraft.entity.ai;
import net.minecraft.entity.EntityLiving;
import net.minecraft.entity.EntityLivingBase;
import net.minecraft.entity.ai.EntityAIBase;
import net.minecraft.world.World;
public class EntityAIOcelotAttack extends EntityAIBase {
World theWorld;
EntityLiving theEntity;
EntityLivingBase b;
int attackCountdown;
public EntityAIOcelotAttack(EntityLiving var1) {
this.theEntity = var1;
this.theWorld = var1.worldObj;
this.setMutexBits(3);
}
public boolean shouldExecute() {
EntityLivingBase var1 = this.theEntity.getAttackTarget();
return false;
}
public boolean continueExecuting() {
return this.b.isEntityAlive() && this.theEntity.getDistanceSqToEntity(this.b) <= 225.0D && (!this.theEntity.getNavigator().noPath() || this.shouldExecute());
}
public void resetTask() {
this.b = null;
this.theEntity.getNavigator().clearPathEntity();
}
public void updateTask() {
this.theEntity.getLookHelper().setLookPositionWithEntity(this.b, 30.0F, 30.0F);
double var1 = (double)(this.theEntity.width * 2.0F * this.theEntity.width * 2.0F);
double var3 = this.theEntity.getDistanceSq(this.b.posX, this.b.getEntityBoundingBox().minY, this.b.posZ);
double var5 = 0.8D;
if(var3 > var1 && var3 < 16.0D) {
var5 = 1.33D;
} else if(var3 < 225.0D) {
var5 = 0.6D;
}
this.theEntity.getNavigator().tryMoveToEntityLiving(this.b, var5);
this.attackCountdown = Math.max(this.attackCountdown - 1, 0);
if(var3 <= var1 && this.attackCountdown <= 0) {
this.attackCountdown = 20;
this.theEntity.attackEntityAsMob(this.b);
}
}
}
|
package com.zql.app_ji.Adapter.FavoriteAdapter;
import android.content.Context;
import android.content.Intent;
import android.support.v7.widget.CardView;
import android.support.v7.widget.RecyclerView;
import android.view.ContextMenu;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.TextView;
import com.zql.app_ji.Bean.Entity.MovieEntity;
import com.zql.app_ji.Bean.InterfaceState;
import com.zql.app_ji.Prestener.PrestenerFavoriteActivityImp;
import com.zql.app_ji.R;
import com.zql.app_ji.View.Activitys.DoubanMovieActivity;
import java.util.List;
public class FavoriteMovieRecyclerAdapter extends RecyclerView.Adapter<FavoriteMovieRecyclerAdapter.Viewholder> {
private Context context;
private List<MovieEntity> favoriteEntyList;
private PrestenerFavoriteActivityImp prestenerFavoriteActivityImp;
private int index;
private int getIndex(){return this.index;}
private void setIndex(int position){this.index=position;}
public FavoriteMovieRecyclerAdapter(Context mcontext, List<MovieEntity> movieEntities, PrestenerFavoriteActivityImp prestenerFavoriteActivityImp){
this.context=mcontext;
this.favoriteEntyList=movieEntities;
this.prestenerFavoriteActivityImp=prestenerFavoriteActivityImp;
}
@Override
public void onBindViewHolder(Viewholder holder, int position) {
InterfaceState interfaceState=prestenerFavoriteActivityImp.getTheNightstatefromUserseting();
holder.cardView.setBackgroundColor(interfaceState.getItemcolor());
holder.titletext.setText(favoriteEntyList.get(position).getTitle());
holder.titletext.setTextColor(interfaceState.getTextcolor());
startMoviedetailActivity(holder.cardView,favoriteEntyList.get(position).getMovie_id());
startContextMenu(holder.cardView,position);
}
@Override
public Viewholder onCreateViewHolder(ViewGroup parent, int viewType) {
View view=LayoutInflater.from(context).inflate(R.layout.item_favorite,parent,false);
Viewholder viewholder=new Viewholder(view);
return viewholder;
}
@Override
public int getItemCount() {
return favoriteEntyList==null ? 0 : favoriteEntyList.size();
}
public class Viewholder extends RecyclerView.ViewHolder implements View.OnCreateContextMenuListener {
private TextView titletext;
private CardView cardView;
public Viewholder(View view){
super(view);
titletext=(TextView)view.findViewById(R.id.favorite_title_text);
cardView=(CardView)view.findViewById(R.id.favorite_card);
view.setOnCreateContextMenuListener(this);
}
@Override
public void onCreateContextMenu(ContextMenu menu, View v, ContextMenu.ContextMenuInfo menuInfo) {
menu.add(201,0,ContextMenu.NONE,"删除");
menu.add(201,1,ContextMenu.NONE,"分享");
}
}
/**
* 打开详情界面
* @param view
* @param id
*/
private void startMoviedetailActivity(View view,final String id){
view.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent mintent=new Intent(context,DoubanMovieActivity.class);
mintent.putExtra("id",id);
context.startActivity(mintent);
}
});
}
/**
* 打开contextMenu
* @param view
* @param position
*/
private void startContextMenu(View view,final int position){
view.setOnLongClickListener(new View.OnLongClickListener() {
@Override
public boolean onLongClick(View view) {
setIndex(position);
return false;
}
});
}
/**
* 获取长按item的信息
* @return
*/
public MovieEntity getLongClickResult(){
return favoriteEntyList.get(getIndex());
}
}
|
package loon.font;
import loon.canvas.LColor;
import loon.geom.PointI;
import loon.opengl.GLEx;
import loon.opengl.LSTRFont;
public class ShadowFont implements IFont {
private boolean withShadow = false;
private LColor shadowColor = new LColor(0f, 0f, 0f, 1f);
private float shadowAlpha = 1f;
private LSTRFont strfont;
private int _size = -1;
private float _ascent = -1;
private PointI _offset = new PointI();
public LSTRFont getStrFont() {
return strfont;
}
public LFont getFont() {
return strfont.getFont();
}
public ShadowFont(LFont font, String[] messages, String append,
boolean shadow) {
if (append != null) {
int size = messages.length + 1;
String[] dest = new String[size];
dest[size - 1] = append;
System.arraycopy(messages, 0, dest, 0, messages.length);
this.strfont = new LSTRFont(font, dest, true);
} else {
this.strfont = new LSTRFont(font, messages, true);
}
this.withShadow = shadow;
}
public ShadowFont(LFont font, String message, String append, boolean shadow) {
this.strfont = new LSTRFont(font, message + append, true);
this.withShadow = shadow;
}
public void drawString(String text, float x, float y, LColor color) {
if (this.withShadow) {
this.shadowColor.a = (this.shadowAlpha * color.a);
strfont.drawString(text, x + _offset.x, y + _offset.y, shadowColor);
}
strfont.drawString(text, x + _offset.x, y + _offset.y, color);
}
@Override
public void drawString(GLEx g, String text, float x, float y) {
drawString(g, text, x, y, LColor.white);
}
@Override
public void drawString(GLEx g, String text, float x, float y, LColor color) {
if (this.withShadow) {
this.shadowColor.a = (this.shadowAlpha * color.a);
strfont.drawString(g, text, x + _offset.x, y + _offset.y,
shadowColor);
}
strfont.drawString(g, text, x + _offset.x, y + _offset.y, color);
}
@Override
public void drawString(GLEx g, String string, float x, float y,
float rotation, LColor c) {
if (this.withShadow) {
this.shadowColor.a = (this.shadowAlpha * c.a);
strfont.drawString(g, string, x + _offset.x, y + _offset.y,
rotation, shadowColor);
}
strfont.drawString(g, string, x + _offset.x, y + _offset.y, rotation, c);
}
public void drawString(GLEx g, String string, float x, float y, float sx,
float sy, float ax, float ay, float rotation, LColor c) {
if (this.withShadow) {
this.shadowColor.a = (this.shadowAlpha * c.a);
strfont.drawString(g, x + _offset.x, y + _offset.y, sx, sy, ax, ay,
rotation, string, shadowColor);
}
strfont.drawString(g, x + _offset.x, y + _offset.y, sx, sy, ax, ay,
rotation, string, c);
}
public void setShadowColor(LColor color) {
this.shadowColor = color;
}
public void setShadowAlpha(float alpha) {
this.shadowAlpha = alpha;
}
public void setShadow(boolean shadow) {
this.withShadow = shadow;
}
public boolean isShadowEffect() {
return this.withShadow;
}
public LColor getShadowColor() {
return this.shadowColor;
}
public boolean isWithShadow() {
return withShadow;
}
public void setWithShadow(boolean withShadow) {
this.withShadow = withShadow;
}
public float getShadowAlpha() {
return shadowAlpha;
}
@Override
public int stringWidth(String width) {
return strfont.getWidth(width);
}
@Override
public int stringHeight(String height) {
return strfont.getHeight(height);
}
@Override
public int getHeight() {
return strfont.getHeight();
}
@Override
public float getAscent() {
return this._ascent == -1 ? strfont.getAscent() : this._ascent;
}
@Override
public String confineLength(String s, int width) {
int length = 0;
for (int i = 0; i < s.length(); i++) {
length += stringWidth(String.valueOf(s.charAt(i)));
if (length >= width) {
int pLength = stringWidth("...");
while (length + pLength >= width && i >= 0) {
length -= stringWidth(String.valueOf(s.charAt(i)));
i--;
}
s = s.substring(0, ++i) + "...";
break;
}
}
return s;
}
@Override
public int getSize() {
return this._size == -1 ? strfont.getSize() : this._size;
}
@Override
public PointI getOffset() {
return _offset;
}
@Override
public void setOffset(PointI val) {
_offset.set(val);
}
@Override
public void setOffsetX(int x) {
_offset.x = x;
}
@Override
public void setOffsetY(int y) {
_offset.y = y;
}
@Override
public void setAssent(float assent) {
this._ascent = assent;
}
@Override
public void setSize(int size) {
this._size = size;
}
@Override
public int charWidth(char c) {
return strfont.charWidth(c);
}
@Override
public void close() {
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.ConsumerGroupState;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AccessControlEntryFilter;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.errors.NotCoordinatorException;
import org.apache.kafka.common.errors.NotEnoughReplicasException;
import org.apache.kafka.common.errors.SecurityDisabledException;
import org.apache.kafka.common.errors.UnknownServerException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.AddOffsetsToTxnRequestData;
import org.apache.kafka.common.message.AddOffsetsToTxnResponseData;
import org.apache.kafka.common.message.AlterClientQuotasResponseData;
import org.apache.kafka.common.message.AlterConfigsResponseData;
import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData;
import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData;
import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData;
import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic;
import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopicCollection;
import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData;
import org.apache.kafka.common.message.ApiVersionsRequestData;
import org.apache.kafka.common.message.ApiVersionsResponseData;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionsResponseKey;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionsResponseKeyCollection;
import org.apache.kafka.common.message.ControlledShutdownRequestData;
import org.apache.kafka.common.message.ControlledShutdownResponseData;
import org.apache.kafka.common.message.ControlledShutdownResponseData.RemainingPartition;
import org.apache.kafka.common.message.ControlledShutdownResponseData.RemainingPartitionCollection;
import org.apache.kafka.common.message.CreateAclsRequestData;
import org.apache.kafka.common.message.CreateAclsResponseData;
import org.apache.kafka.common.message.CreateDelegationTokenRequestData;
import org.apache.kafka.common.message.CreateDelegationTokenRequestData.CreatableRenewers;
import org.apache.kafka.common.message.CreateDelegationTokenResponseData;
import org.apache.kafka.common.message.CreatePartitionsRequestData;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsAssignment;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopicCollection;
import org.apache.kafka.common.message.CreatePartitionsResponseData;
import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult;
import org.apache.kafka.common.message.CreateTopicsRequestData;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignment;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreateableTopicConfig;
import org.apache.kafka.common.message.CreateTopicsResponseData;
import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicConfigs;
import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult;
import org.apache.kafka.common.message.DeleteAclsRequestData;
import org.apache.kafka.common.message.DeleteAclsResponseData;
import org.apache.kafka.common.message.DeleteGroupsRequestData;
import org.apache.kafka.common.message.DeleteGroupsResponseData;
import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult;
import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResultCollection;
import org.apache.kafka.common.message.DeleteTopicsRequestData;
import org.apache.kafka.common.message.DeleteTopicsResponseData;
import org.apache.kafka.common.message.DeleteTopicsResponseData.DeletableTopicResult;
import org.apache.kafka.common.message.DescribeAclsResponseData;
import org.apache.kafka.common.message.DescribeAclsResponseData.AclDescription;
import org.apache.kafka.common.message.DescribeAclsResponseData.DescribeAclsResource;
import org.apache.kafka.common.message.DescribeClientQuotasResponseData;
import org.apache.kafka.common.message.DescribeConfigsRequestData;
import org.apache.kafka.common.message.DescribeConfigsResponseData;
import org.apache.kafka.common.message.DescribeConfigsResponseData.DescribeConfigsResourceResult;
import org.apache.kafka.common.message.DescribeConfigsResponseData.DescribeConfigsResult;
import org.apache.kafka.common.message.DescribeGroupsRequestData;
import org.apache.kafka.common.message.DescribeGroupsResponseData;
import org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroup;
import org.apache.kafka.common.message.ElectLeadersResponseData.PartitionResult;
import org.apache.kafka.common.message.ElectLeadersResponseData.ReplicaElectionResult;
import org.apache.kafka.common.message.EndTxnRequestData;
import org.apache.kafka.common.message.EndTxnResponseData;
import org.apache.kafka.common.message.ExpireDelegationTokenRequestData;
import org.apache.kafka.common.message.ExpireDelegationTokenResponseData;
import org.apache.kafka.common.message.FetchRequestData;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.message.FindCoordinatorRequestData;
import org.apache.kafka.common.message.HeartbeatRequestData;
import org.apache.kafka.common.message.HeartbeatResponseData;
import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData;
import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterConfigsResource;
import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterableConfig;
import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData;
import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse;
import org.apache.kafka.common.message.InitProducerIdRequestData;
import org.apache.kafka.common.message.InitProducerIdResponseData;
import org.apache.kafka.common.message.JoinGroupRequestData;
import org.apache.kafka.common.message.JoinGroupResponseData;
import org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember;
import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState;
import org.apache.kafka.common.message.LeaderAndIsrResponseData;
import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity;
import org.apache.kafka.common.message.LeaveGroupResponseData;
import org.apache.kafka.common.message.ListGroupsRequestData;
import org.apache.kafka.common.message.ListGroupsResponseData;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic;
import org.apache.kafka.common.message.ListOffsetsResponseData;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse;
import org.apache.kafka.common.message.ListPartitionReassignmentsRequestData;
import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData;
import org.apache.kafka.common.message.OffsetCommitRequestData;
import org.apache.kafka.common.message.OffsetCommitResponseData;
import org.apache.kafka.common.message.OffsetDeleteRequestData;
import org.apache.kafka.common.message.OffsetDeleteRequestData.OffsetDeleteRequestPartition;
import org.apache.kafka.common.message.OffsetDeleteRequestData.OffsetDeleteRequestTopic;
import org.apache.kafka.common.message.OffsetDeleteRequestData.OffsetDeleteRequestTopicCollection;
import org.apache.kafka.common.message.OffsetDeleteResponseData;
import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponsePartition;
import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection;
import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponseTopic;
import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponseTopicCollection;
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset;
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition;
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic;
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopicCollection;
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData;
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult;
import org.apache.kafka.common.message.ProduceRequestData;
import org.apache.kafka.common.message.RenewDelegationTokenRequestData;
import org.apache.kafka.common.message.RenewDelegationTokenResponseData;
import org.apache.kafka.common.message.SaslAuthenticateRequestData;
import org.apache.kafka.common.message.SaslAuthenticateResponseData;
import org.apache.kafka.common.message.SaslHandshakeRequestData;
import org.apache.kafka.common.message.SaslHandshakeResponseData;
import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionState;
import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaTopicState;
import org.apache.kafka.common.message.StopReplicaResponseData;
import org.apache.kafka.common.message.SyncGroupRequestData;
import org.apache.kafka.common.message.SyncGroupRequestData.SyncGroupRequestAssignment;
import org.apache.kafka.common.message.SyncGroupResponseData;
import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker;
import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint;
import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState;
import org.apache.kafka.common.message.UpdateMetadataResponseData;
import org.apache.kafka.common.network.ListenerName;
import org.apache.kafka.common.network.Send;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.quota.ClientQuotaAlteration;
import org.apache.kafka.common.quota.ClientQuotaEntity;
import org.apache.kafka.common.quota.ClientQuotaFilter;
import org.apache.kafka.common.record.CompressionType;
import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.record.SimpleRecord;
import org.apache.kafka.common.requests.CreateTopicsRequest.Builder;
import org.apache.kafka.common.requests.DescribeConfigsResponse.ConfigType;
import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourcePatternFilter;
import org.apache.kafka.common.resource.ResourceType;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.common.security.token.delegation.DelegationToken;
import org.apache.kafka.common.security.token.delegation.TokenInformation;
import org.apache.kafka.common.utils.SecurityUtils;
import org.apache.kafka.common.utils.Utils;
import org.junit.Test;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import static java.util.Arrays.asList;
import static java.util.Collections.emptyList;
import static java.util.Collections.singletonList;
import static org.apache.kafka.common.protocol.ApiKeys.DESCRIBE_CONFIGS;
import static org.apache.kafka.common.protocol.ApiKeys.FETCH;
import static org.apache.kafka.common.protocol.ApiKeys.JOIN_GROUP;
import static org.apache.kafka.common.protocol.ApiKeys.LIST_GROUPS;
import static org.apache.kafka.common.protocol.ApiKeys.LIST_OFFSETS;
import static org.apache.kafka.common.protocol.ApiKeys.SYNC_GROUP;
import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class RequestResponseTest {
// Exception includes a message that we verify is not included in error responses
private final UnknownServerException unknownServerException = new UnknownServerException("secret");
@Test
public void testSerialization() throws Exception {
checkRequest(createFindCoordinatorRequest(0), true);
checkRequest(createFindCoordinatorRequest(1), true);
checkErrorResponse(createFindCoordinatorRequest(0), unknownServerException, true);
checkErrorResponse(createFindCoordinatorRequest(1), unknownServerException, true);
checkResponse(createFindCoordinatorResponse(), 0, true);
checkResponse(createFindCoordinatorResponse(), 1, true);
checkRequest(createControlledShutdownRequest(), true);
checkResponse(createControlledShutdownResponse(), 1, true);
checkErrorResponse(createControlledShutdownRequest(), unknownServerException, true);
checkErrorResponse(createControlledShutdownRequest(0), unknownServerException, true);
checkRequest(createFetchRequest(4), true);
checkResponse(createFetchResponse(true), 4, true);
List<TopicPartition> toForgetTopics = new ArrayList<>();
toForgetTopics.add(new TopicPartition("foo", 0));
toForgetTopics.add(new TopicPartition("foo", 2));
toForgetTopics.add(new TopicPartition("bar", 0));
checkRequest(createFetchRequest(7, new FetchMetadata(123, 456), toForgetTopics), true);
checkResponse(createFetchResponse(123), 7, true);
checkResponse(createFetchResponse(Errors.FETCH_SESSION_ID_NOT_FOUND, 123), 7, true);
checkErrorResponse(createFetchRequest(7), unknownServerException, true);
checkRequest(createHeartBeatRequest(), true);
checkErrorResponse(createHeartBeatRequest(), unknownServerException, true);
checkResponse(createHeartBeatResponse(), 0, true);
for (int v = ApiKeys.JOIN_GROUP.oldestVersion(); v <= ApiKeys.JOIN_GROUP.latestVersion(); v++) {
checkRequest(createJoinGroupRequest(v), true);
checkErrorResponse(createJoinGroupRequest(v), unknownServerException, true);
checkResponse(createJoinGroupResponse(v), v, true);
}
for (int v = ApiKeys.SYNC_GROUP.oldestVersion(); v <= ApiKeys.SYNC_GROUP.latestVersion(); v++) {
checkRequest(createSyncGroupRequest(v), true);
checkErrorResponse(createSyncGroupRequest(v), unknownServerException, true);
checkResponse(createSyncGroupResponse(v), v, true);
}
checkRequest(createLeaveGroupRequest(), true);
checkErrorResponse(createLeaveGroupRequest(), unknownServerException, true);
checkResponse(createLeaveGroupResponse(), 0, true);
for (short v = ApiKeys.LIST_GROUPS.oldestVersion(); v <= ApiKeys.LIST_GROUPS.latestVersion(); v++) {
checkRequest(createListGroupsRequest(v), false);
checkErrorResponse(createListGroupsRequest(v), unknownServerException, true);
checkResponse(createListGroupsResponse(v), v, true);
}
checkRequest(createDescribeGroupRequest(), true);
checkErrorResponse(createDescribeGroupRequest(), unknownServerException, true);
checkResponse(createDescribeGroupResponse(), 0, true);
checkRequest(createDeleteGroupsRequest(), true);
checkErrorResponse(createDeleteGroupsRequest(), unknownServerException, true);
checkResponse(createDeleteGroupsResponse(), 0, true);
for (int i = 0; i < ApiKeys.LIST_OFFSETS.latestVersion(); i++) {
checkRequest(createListOffsetRequest(i), true);
checkErrorResponse(createListOffsetRequest(i), unknownServerException, true);
checkResponse(createListOffsetResponse(i), i, true);
}
checkRequest(MetadataRequest.Builder.allTopics().build((short) 2), true);
checkRequest(createMetadataRequest(1, Collections.singletonList("topic1")), true);
checkErrorResponse(createMetadataRequest(1, Collections.singletonList("topic1")), unknownServerException, true);
checkResponse(createMetadataResponse(), 2, true);
checkErrorResponse(createMetadataRequest(2, Collections.singletonList("topic1")), unknownServerException, true);
checkResponse(createMetadataResponse(), 3, true);
checkErrorResponse(createMetadataRequest(3, Collections.singletonList("topic1")), unknownServerException, true);
checkResponse(createMetadataResponse(), 4, true);
checkErrorResponse(createMetadataRequest(4, Collections.singletonList("topic1")), unknownServerException, true);
checkRequest(createOffsetFetchRequestForAllPartition("group1", false), true);
checkRequest(createOffsetFetchRequestForAllPartition("group1", true), true);
checkErrorResponse(createOffsetFetchRequestForAllPartition("group1", false), new NotCoordinatorException("Not Coordinator"), true);
checkErrorResponse(createOffsetFetchRequestForAllPartition("group1", true), new NotCoordinatorException("Not Coordinator"), true);
checkRequest(createOffsetFetchRequest(0, false), true);
checkRequest(createOffsetFetchRequest(1, false), true);
checkRequest(createOffsetFetchRequest(2, false), true);
checkRequest(createOffsetFetchRequest(7, true), true);
checkRequest(createOffsetFetchRequestForAllPartition("group1", false), true);
checkRequest(createOffsetFetchRequestForAllPartition("group1", true), true);
checkErrorResponse(createOffsetFetchRequest(0, false), unknownServerException, true);
checkErrorResponse(createOffsetFetchRequest(1, false), unknownServerException, true);
checkErrorResponse(createOffsetFetchRequest(2, false), unknownServerException, true);
checkErrorResponse(createOffsetFetchRequest(7, true), unknownServerException, true);
checkResponse(createOffsetFetchResponse(), 0, true);
checkRequest(createProduceRequest(2), true);
checkErrorResponse(createProduceRequest(2), unknownServerException, true);
checkRequest(createProduceRequest(3), true);
checkErrorResponse(createProduceRequest(3), unknownServerException, true);
checkResponse(createProduceResponse(), 2, true);
checkResponse(createProduceResponseWithErrorMessage(), 8, true);
for (int v = ApiKeys.STOP_REPLICA.oldestVersion(); v <= ApiKeys.STOP_REPLICA.latestVersion(); v++) {
checkRequest(createStopReplicaRequest(v, true), true);
checkRequest(createStopReplicaRequest(v, false), true);
checkErrorResponse(createStopReplicaRequest(v, true), unknownServerException, true);
checkErrorResponse(createStopReplicaRequest(v, false), unknownServerException, true);
checkResponse(createStopReplicaResponse(), v, true);
}
checkRequest(createLeaderAndIsrRequest(0), true);
checkErrorResponse(createLeaderAndIsrRequest(0), unknownServerException, false);
checkRequest(createLeaderAndIsrRequest(1), true);
checkErrorResponse(createLeaderAndIsrRequest(1), unknownServerException, false);
checkRequest(createLeaderAndIsrRequest(2), true);
checkErrorResponse(createLeaderAndIsrRequest(2), unknownServerException, false);
checkResponse(createLeaderAndIsrResponse(), 0, true);
checkRequest(createSaslHandshakeRequest(), true);
checkErrorResponse(createSaslHandshakeRequest(), unknownServerException, true);
checkResponse(createSaslHandshakeResponse(), 0, true);
checkRequest(createSaslAuthenticateRequest(), true);
checkErrorResponse(createSaslAuthenticateRequest(), unknownServerException, true);
checkResponse(createSaslAuthenticateResponse(), 0, true);
checkResponse(createSaslAuthenticateResponse(), 1, true);
checkRequest(createApiVersionRequest(), true);
checkErrorResponse(createApiVersionRequest(), unknownServerException, true);
checkErrorResponse(createApiVersionRequest(), new UnsupportedVersionException("Not Supported"), true);
checkResponse(createApiVersionResponse(), 0, true);
checkResponse(createApiVersionResponse(), 1, true);
checkResponse(createApiVersionResponse(), 2, true);
checkResponse(createApiVersionResponse(), 3, true);
checkResponse(ApiVersionsResponse.DEFAULT_API_VERSIONS_RESPONSE, 0, true);
checkResponse(ApiVersionsResponse.DEFAULT_API_VERSIONS_RESPONSE, 1, true);
checkResponse(ApiVersionsResponse.DEFAULT_API_VERSIONS_RESPONSE, 2, true);
checkResponse(ApiVersionsResponse.DEFAULT_API_VERSIONS_RESPONSE, 3, true);
for (int v = ApiKeys.CREATE_TOPICS.oldestVersion(); v <= ApiKeys.CREATE_TOPICS.latestVersion(); v++) {
checkRequest(createCreateTopicRequest(v), true);
checkErrorResponse(createCreateTopicRequest(v), unknownServerException, true);
checkResponse(createCreateTopicResponse(), v, true);
}
for (int v = ApiKeys.DELETE_TOPICS.oldestVersion(); v <= ApiKeys.DELETE_TOPICS.latestVersion(); v++) {
checkRequest(createDeleteTopicsRequest(v), true);
checkErrorResponse(createDeleteTopicsRequest(v), unknownServerException, true);
checkResponse(createDeleteTopicsResponse(), v, true);
}
for (int v = ApiKeys.CREATE_PARTITIONS.oldestVersion(); v <= ApiKeys.CREATE_PARTITIONS.latestVersion(); v++) {
checkRequest(createCreatePartitionsRequest(v), true);
checkRequest(createCreatePartitionsRequestWithAssignments(v), false);
checkErrorResponse(createCreatePartitionsRequest(v), unknownServerException, true);
checkResponse(createCreatePartitionsResponse(), v, true);
}
checkRequest(createInitPidRequest(), true);
checkErrorResponse(createInitPidRequest(), unknownServerException, true);
checkResponse(createInitPidResponse(), 0, true);
checkRequest(createAddPartitionsToTxnRequest(), true);
checkResponse(createAddPartitionsToTxnResponse(), 0, true);
checkErrorResponse(createAddPartitionsToTxnRequest(), unknownServerException, true);
checkRequest(createAddOffsetsToTxnRequest(), true);
checkResponse(createAddOffsetsToTxnResponse(), 0, true);
checkErrorResponse(createAddOffsetsToTxnRequest(), unknownServerException, true);
checkRequest(createEndTxnRequest(), true);
checkResponse(createEndTxnResponse(), 0, true);
checkErrorResponse(createEndTxnRequest(), unknownServerException, true);
checkRequest(createWriteTxnMarkersRequest(), true);
checkResponse(createWriteTxnMarkersResponse(), 0, true);
checkErrorResponse(createWriteTxnMarkersRequest(), unknownServerException, true);
checkOlderFetchVersions();
checkResponse(createMetadataResponse(), 0, true);
checkResponse(createMetadataResponse(), 1, true);
checkErrorResponse(createMetadataRequest(1, Collections.singletonList("topic1")), unknownServerException, true);
checkRequest(createOffsetCommitRequest(0), true);
checkErrorResponse(createOffsetCommitRequest(0), unknownServerException, true);
checkRequest(createOffsetCommitRequest(1), true);
checkErrorResponse(createOffsetCommitRequest(1), unknownServerException, true);
checkRequest(createOffsetCommitRequest(2), true);
checkErrorResponse(createOffsetCommitRequest(2), unknownServerException, true);
checkRequest(createOffsetCommitRequest(3), true);
checkErrorResponse(createOffsetCommitRequest(3), unknownServerException, true);
checkRequest(createOffsetCommitRequest(4), true);
checkErrorResponse(createOffsetCommitRequest(4), unknownServerException, true);
checkResponse(createOffsetCommitResponse(), 4, true);
checkRequest(createOffsetCommitRequest(5), true);
checkErrorResponse(createOffsetCommitRequest(5), unknownServerException, true);
checkResponse(createOffsetCommitResponse(), 5, true);
checkRequest(createJoinGroupRequest(0), true);
checkRequest(createUpdateMetadataRequest(0, null), false);
checkErrorResponse(createUpdateMetadataRequest(0, null), unknownServerException, true);
checkRequest(createUpdateMetadataRequest(1, null), false);
checkRequest(createUpdateMetadataRequest(1, "rack1"), false);
checkErrorResponse(createUpdateMetadataRequest(1, null), unknownServerException, true);
checkRequest(createUpdateMetadataRequest(2, "rack1"), false);
checkRequest(createUpdateMetadataRequest(2, null), false);
checkErrorResponse(createUpdateMetadataRequest(2, "rack1"), unknownServerException, true);
checkRequest(createUpdateMetadataRequest(3, "rack1"), false);
checkRequest(createUpdateMetadataRequest(3, null), false);
checkErrorResponse(createUpdateMetadataRequest(3, "rack1"), unknownServerException, true);
checkRequest(createUpdateMetadataRequest(4, "rack1"), false);
checkRequest(createUpdateMetadataRequest(4, null), false);
checkErrorResponse(createUpdateMetadataRequest(4, "rack1"), unknownServerException, true);
checkRequest(createUpdateMetadataRequest(5, "rack1"), false);
checkRequest(createUpdateMetadataRequest(5, null), false);
checkErrorResponse(createUpdateMetadataRequest(5, "rack1"), unknownServerException, true);
checkResponse(createUpdateMetadataResponse(), 0, true);
checkRequest(createListOffsetRequest(0), true);
checkErrorResponse(createListOffsetRequest(0), unknownServerException, true);
checkResponse(createListOffsetResponse(0), 0, true);
checkRequest(createLeaderEpochRequestForReplica(0, 1), true);
checkRequest(createLeaderEpochRequestForConsumer(), true);
checkResponse(createLeaderEpochResponse(), 0, true);
checkErrorResponse(createLeaderEpochRequestForConsumer(), unknownServerException, true);
checkRequest(createAddPartitionsToTxnRequest(), true);
checkErrorResponse(createAddPartitionsToTxnRequest(), unknownServerException, true);
checkResponse(createAddPartitionsToTxnResponse(), 0, true);
checkRequest(createAddOffsetsToTxnRequest(), true);
checkErrorResponse(createAddOffsetsToTxnRequest(), unknownServerException, true);
checkResponse(createAddOffsetsToTxnResponse(), 0, true);
checkRequest(createEndTxnRequest(), true);
checkErrorResponse(createEndTxnRequest(), unknownServerException, true);
checkResponse(createEndTxnResponse(), 0, true);
checkRequest(createWriteTxnMarkersRequest(), true);
checkErrorResponse(createWriteTxnMarkersRequest(), unknownServerException, true);
checkResponse(createWriteTxnMarkersResponse(), 0, true);
checkRequest(createTxnOffsetCommitRequest(0), true);
checkRequest(createTxnOffsetCommitRequest(3), true);
checkRequest(createTxnOffsetCommitRequestWithAutoDowngrade(2), true);
checkErrorResponse(createTxnOffsetCommitRequest(0), unknownServerException, true);
checkErrorResponse(createTxnOffsetCommitRequest(3), unknownServerException, true);
checkErrorResponse(createTxnOffsetCommitRequestWithAutoDowngrade(2), unknownServerException, true);
checkResponse(createTxnOffsetCommitResponse(), 0, true);
checkRequest(createDescribeAclsRequest(), true);
checkErrorResponse(createDescribeAclsRequest(), new SecurityDisabledException("Security is not enabled."), true);
checkResponse(createDescribeAclsResponse(), ApiKeys.DESCRIBE_ACLS.latestVersion(), true);
checkRequest(createCreateAclsRequest(), true);
checkErrorResponse(createCreateAclsRequest(), new SecurityDisabledException("Security is not enabled."), true);
checkResponse(createCreateAclsResponse(), ApiKeys.CREATE_ACLS.latestVersion(), true);
checkRequest(createDeleteAclsRequest(), true);
checkErrorResponse(createDeleteAclsRequest(), new SecurityDisabledException("Security is not enabled."), true);
checkResponse(createDeleteAclsResponse(ApiKeys.DELETE_ACLS.latestVersion()), ApiKeys.DELETE_ACLS.latestVersion(), true);
checkRequest(createAlterConfigsRequest(), false);
checkErrorResponse(createAlterConfigsRequest(), unknownServerException, true);
checkResponse(createAlterConfigsResponse(), 0, false);
checkRequest(createDescribeConfigsRequest(0), true);
checkRequest(createDescribeConfigsRequestWithConfigEntries(0), false);
checkErrorResponse(createDescribeConfigsRequest(0), unknownServerException, true);
checkResponse(createDescribeConfigsResponse((short) 0), 0, false);
checkRequest(createDescribeConfigsRequest(1), true);
checkRequest(createDescribeConfigsRequestWithConfigEntries(1), false);
checkRequest(createDescribeConfigsRequestWithDocumentation(1), false);
checkRequest(createDescribeConfigsRequestWithDocumentation(2), false);
checkRequest(createDescribeConfigsRequestWithDocumentation(3), false);
checkErrorResponse(createDescribeConfigsRequest(1), unknownServerException, true);
checkResponse(createDescribeConfigsResponse((short) 1), 1, false);
checkDescribeConfigsResponseVersions();
checkRequest(createCreateTokenRequest(), true);
checkErrorResponse(createCreateTokenRequest(), unknownServerException, true);
checkResponse(createCreateTokenResponse(), 0, true);
checkRequest(createDescribeTokenRequest(), true);
checkErrorResponse(createDescribeTokenRequest(), unknownServerException, true);
checkResponse(createDescribeTokenResponse(), 0, true);
checkRequest(createExpireTokenRequest(), true);
checkErrorResponse(createExpireTokenRequest(), unknownServerException, true);
checkResponse(createExpireTokenResponse(), 0, true);
checkRequest(createRenewTokenRequest(), true);
checkErrorResponse(createRenewTokenRequest(), unknownServerException, true);
checkResponse(createRenewTokenResponse(), 0, true);
checkRequest(createElectLeadersRequest(), true);
checkRequest(createElectLeadersRequestNullPartitions(), true);
checkErrorResponse(createElectLeadersRequest(), unknownServerException, true);
checkResponse(createElectLeadersResponse(), 1, true);
checkRequest(createIncrementalAlterConfigsRequest(), true);
checkErrorResponse(createIncrementalAlterConfigsRequest(), unknownServerException, true);
checkResponse(createIncrementalAlterConfigsResponse(), 0, true);
checkRequest(createAlterPartitionReassignmentsRequest(), true);
checkErrorResponse(createAlterPartitionReassignmentsRequest(), unknownServerException, true);
checkResponse(createAlterPartitionReassignmentsResponse(), 0, true);
checkRequest(createListPartitionReassignmentsRequest(), true);
checkErrorResponse(createListPartitionReassignmentsRequest(), unknownServerException, true);
checkResponse(createListPartitionReassignmentsResponse(), 0, true);
checkRequest(createOffsetDeleteRequest(), true);
checkErrorResponse(createOffsetDeleteRequest(), unknownServerException, true);
checkResponse(createOffsetDeleteResponse(), 0, true);
checkRequest(createAlterReplicaLogDirsRequest(), true);
checkErrorResponse(createAlterReplicaLogDirsRequest(), unknownServerException, true);
checkResponse(createAlterReplicaLogDirsResponse(), 0, true);
checkRequest(createDescribeClientQuotasRequest(), true);
checkErrorResponse(createDescribeClientQuotasRequest(), unknownServerException, true);
checkResponse(createDescribeClientQuotasResponse(), 0, true);
checkRequest(createAlterClientQuotasRequest(), true);
checkErrorResponse(createAlterClientQuotasRequest(), unknownServerException, true);
checkResponse(createAlterClientQuotasResponse(), 0, true);
}
@Test
public void testResponseHeader() {
ResponseHeader header = createResponseHeader((short) 1);
ObjectSerializationCache serializationCache = new ObjectSerializationCache();
ByteBuffer buffer = ByteBuffer.allocate(header.size(serializationCache));
header.write(buffer, serializationCache);
buffer.flip();
ResponseHeader deserialized = ResponseHeader.parse(buffer, header.headerVersion());
assertEquals(header.correlationId(), deserialized.correlationId());
}
private void checkOlderFetchVersions() {
int latestVersion = FETCH.latestVersion();
for (int i = 0; i < latestVersion; ++i) {
if (i > 7) {
checkErrorResponse(createFetchRequest(i), unknownServerException, true);
}
checkRequest(createFetchRequest(i), true);
checkResponse(createFetchResponse(i >= 4), i, true);
}
}
private void verifyDescribeConfigsResponse(DescribeConfigsResponse expected, DescribeConfigsResponse actual,
int version) {
for (Map.Entry<ConfigResource, DescribeConfigsResult> resource : expected.resultMap().entrySet()) {
List<DescribeConfigsResourceResult> actualEntries = actual.resultMap().get(resource.getKey()).configs();
List<DescribeConfigsResourceResult> expectedEntries = expected.resultMap().get(resource.getKey()).configs();
assertEquals(expectedEntries.size(), actualEntries.size());
for (int i = 0; i < actualEntries.size(); ++i) {
DescribeConfigsResourceResult actualEntry = actualEntries.get(i);
DescribeConfigsResourceResult expectedEntry = expectedEntries.get(i);
assertEquals(expectedEntry.name(), actualEntry.name());
assertEquals("Non-matching values for " + actualEntry.name() + " in version " + version,
expectedEntry.value(), actualEntry.value());
assertEquals("Non-matching readonly for " + actualEntry.name() + " in version " + version,
expectedEntry.readOnly(), actualEntry.readOnly());
assertEquals("Non-matching isSensitive for " + actualEntry.name() + " in version " + version,
expectedEntry.isSensitive(), actualEntry.isSensitive());
if (version < 3) {
assertEquals("Non-matching configType for " + actualEntry.name() + " in version " + version,
ConfigType.UNKNOWN.id(), actualEntry.configType());
} else {
assertEquals("Non-matching configType for " + actualEntry.name() + " in version " + version,
expectedEntry.configType(), actualEntry.configType());
}
if (version == 0) {
assertEquals("Non matching configSource for " + actualEntry.name() + " in version " + version,
DescribeConfigsResponse.ConfigSource.STATIC_BROKER_CONFIG.id(), actualEntry.configSource());
} else {
assertEquals("Non-matching configSource for " + actualEntry.name() + " in version " + version,
expectedEntry.configSource(), actualEntry.configSource());
}
}
}
}
private void checkDescribeConfigsResponseVersions() {
for (int version = ApiKeys.DESCRIBE_CONFIGS.oldestVersion(); version < ApiKeys.DESCRIBE_CONFIGS.latestVersion(); ++version) {
short apiVersion = (short) version;
DescribeConfigsResponse response = createDescribeConfigsResponse(apiVersion);
DescribeConfigsResponse deserialized0 = (DescribeConfigsResponse) AbstractResponse.parseResponse(ApiKeys.DESCRIBE_CONFIGS,
response.serialize(apiVersion), apiVersion);
verifyDescribeConfigsResponse(response, deserialized0, apiVersion);
}
}
private void checkErrorResponse(AbstractRequest req, Throwable e, boolean checkEqualityAndHashCode) {
AbstractResponse response = req.getErrorResponse(e);
checkResponse(response, req.version(), checkEqualityAndHashCode);
if (e instanceof UnknownServerException) {
String responseStr = response.toString();
assertFalse(String.format("Unknown message included in response for %s: %s ", req.apiKey(), responseStr),
responseStr.contains(e.getMessage()));
}
}
private void checkRequest(AbstractRequest req, boolean checkEquality) {
// Check that we can serialize, deserialize and serialize again
// Check for equality of the ByteBuffer only if indicated (it is likely to fail if any of the fields
// in the request is a HashMap with multiple elements since ordering of the elements may vary)
try {
ByteBuffer serializedBytes = req.serialize();
AbstractRequest deserialized = AbstractRequest.parseRequest(req.apiKey(), req.version(), serializedBytes).request;
ByteBuffer serializedBytes2 = deserialized.serialize();
serializedBytes.rewind();
if (checkEquality)
assertEquals("Request " + req + "failed equality test", serializedBytes, serializedBytes2);
} catch (Exception e) {
throw new RuntimeException("Failed to deserialize request " + req + " with type " + req.getClass(), e);
}
}
private void checkResponse(AbstractResponse response, int version, boolean checkEquality) {
// Check that we can serialize, deserialize and serialize again
// Check for equality and hashCode of the Struct only if indicated (it is likely to fail if any of the fields
// in the response is a HashMap with multiple elements since ordering of the elements may vary)
try {
ByteBuffer serializedBytes = response.serialize((short) version);
AbstractResponse deserialized = AbstractResponse.parseResponse(response.apiKey(), serializedBytes, (short) version);
ByteBuffer serializedBytes2 = deserialized.serialize((short) version);
serializedBytes.rewind();
if (checkEquality)
assertEquals("Response " + response + "failed equality test", serializedBytes, serializedBytes2);
} catch (Exception e) {
throw new RuntimeException("Failed to deserialize response " + response + " with type " + response.getClass(), e);
}
}
@Test(expected = UnsupportedVersionException.class)
public void cannotUseFindCoordinatorV0ToFindTransactionCoordinator() {
FindCoordinatorRequest.Builder builder = new FindCoordinatorRequest.Builder(
new FindCoordinatorRequestData()
.setKeyType(CoordinatorType.TRANSACTION.id)
.setKey("foobar"));
builder.build((short) 0);
}
@Test
public void testPartitionSize() {
TopicPartition tp0 = new TopicPartition("test", 0);
TopicPartition tp1 = new TopicPartition("test", 1);
MemoryRecords records0 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2,
CompressionType.NONE, new SimpleRecord("woot".getBytes()));
MemoryRecords records1 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2,
CompressionType.NONE, new SimpleRecord("woot".getBytes()), new SimpleRecord("woot".getBytes()));
ProduceRequest request = ProduceRequest.forMagic(RecordBatch.MAGIC_VALUE_V2,
new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList(
new ProduceRequestData.TopicProduceData().setName(tp0.topic()).setPartitionData(
Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp0.partition()).setRecords(records0))),
new ProduceRequestData.TopicProduceData().setName(tp1.topic()).setPartitionData(
Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp1.partition()).setRecords(records1))))
.iterator()))
.setAcks((short) 1)
.setTimeoutMs(5000)
.setTransactionalId("transactionalId"))
.build((short) 3);
assertEquals(2, request.partitionSizes().size());
assertEquals(records0.sizeInBytes(), (int) request.partitionSizes().get(tp0));
assertEquals(records1.sizeInBytes(), (int) request.partitionSizes().get(tp1));
}
@Test
public void produceRequestToStringTest() {
ProduceRequest request = createProduceRequest(ApiKeys.PRODUCE.latestVersion());
assertEquals(1, request.data().topicData().size());
assertFalse(request.toString(false).contains("partitionSizes"));
assertTrue(request.toString(false).contains("numPartitions=1"));
assertTrue(request.toString(true).contains("partitionSizes"));
assertFalse(request.toString(true).contains("numPartitions"));
request.clearPartitionRecords();
try {
request.data();
fail("dataOrException should fail after clearPartitionRecords()");
} catch (IllegalStateException e) {
// OK
}
// `toString` should behave the same after `clearPartitionRecords`
assertFalse(request.toString(false).contains("partitionSizes"));
assertTrue(request.toString(false).contains("numPartitions=1"));
assertTrue(request.toString(true).contains("partitionSizes"));
assertFalse(request.toString(true).contains("numPartitions"));
}
@SuppressWarnings("deprecation")
@Test
public void produceRequestGetErrorResponseTest() {
ProduceRequest request = createProduceRequest(ApiKeys.PRODUCE.latestVersion());
Set<TopicPartition> partitions = new HashSet<>(request.partitionSizes().keySet());
ProduceResponse errorResponse = (ProduceResponse) request.getErrorResponse(new NotEnoughReplicasException());
assertEquals(partitions, errorResponse.responses().keySet());
ProduceResponse.PartitionResponse partitionResponse = errorResponse.responses().values().iterator().next();
assertEquals(Errors.NOT_ENOUGH_REPLICAS, partitionResponse.error);
assertEquals(ProduceResponse.INVALID_OFFSET, partitionResponse.baseOffset);
assertEquals(RecordBatch.NO_TIMESTAMP, partitionResponse.logAppendTime);
request.clearPartitionRecords();
// `getErrorResponse` should behave the same after `clearPartitionRecords`
errorResponse = (ProduceResponse) request.getErrorResponse(new NotEnoughReplicasException());
assertEquals(partitions, errorResponse.responses().keySet());
partitionResponse = errorResponse.responses().values().iterator().next();
assertEquals(Errors.NOT_ENOUGH_REPLICAS, partitionResponse.error);
assertEquals(ProduceResponse.INVALID_OFFSET, partitionResponse.baseOffset);
assertEquals(RecordBatch.NO_TIMESTAMP, partitionResponse.logAppendTime);
}
@Test
public void fetchResponseVersionTest() {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData<>(
Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET,
0L, Optional.empty(), Collections.emptyList(), records));
FetchResponse<MemoryRecords> v0Response = new FetchResponse<>(Errors.NONE, responseData, 0, INVALID_SESSION_ID);
FetchResponse<MemoryRecords> v1Response = new FetchResponse<>(Errors.NONE, responseData, 10, INVALID_SESSION_ID);
assertEquals("Throttle time must be zero", 0, v0Response.throttleTimeMs());
assertEquals("Throttle time must be 10", 10, v1Response.throttleTimeMs());
assertEquals("Response data does not match", responseData, v0Response.responseData());
assertEquals("Response data does not match", responseData, v1Response.responseData());
}
@Test
public void testFetchResponseV4() {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
List<FetchResponse.AbortedTransaction> abortedTransactions = asList(
new FetchResponse.AbortedTransaction(10, 100),
new FetchResponse.AbortedTransaction(15, 50)
);
responseData.put(new TopicPartition("bar", 0), new FetchResponse.PartitionData<>(Errors.NONE, 100000,
FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, Optional.empty(), abortedTransactions, records));
responseData.put(new TopicPartition("bar", 1), new FetchResponse.PartitionData<>(Errors.NONE, 900000,
5, FetchResponse.INVALID_LOG_START_OFFSET, Optional.empty(), null, records));
responseData.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData<>(Errors.NONE, 70000,
6, FetchResponse.INVALID_LOG_START_OFFSET, Optional.empty(), emptyList(), records));
FetchResponse<MemoryRecords> response = new FetchResponse<>(Errors.NONE, responseData, 10, INVALID_SESSION_ID);
FetchResponse<MemoryRecords> deserialized = FetchResponse.parse(response.serialize((short) 4), (short) 4);
assertEquals(responseData, deserialized.responseData());
}
@Test
public void verifyFetchResponseFullWrites() throws Exception {
verifyFetchResponseFullWrite(FETCH.latestVersion(), createFetchResponse(123));
verifyFetchResponseFullWrite(FETCH.latestVersion(),
createFetchResponse(Errors.FETCH_SESSION_ID_NOT_FOUND, 123));
for (short version = 0; version <= FETCH.latestVersion(); version++) {
verifyFetchResponseFullWrite(version, createFetchResponse(version >= 4));
}
}
private void verifyFetchResponseFullWrite(short apiVersion, FetchResponse<MemoryRecords> fetchResponse) throws Exception {
int correlationId = 15;
short responseHeaderVersion = FETCH.responseHeaderVersion(apiVersion);
Send send = fetchResponse.toSend(new ResponseHeader(correlationId, responseHeaderVersion), apiVersion);
ByteBufferChannel channel = new ByteBufferChannel(send.size());
send.writeTo(channel);
channel.close();
ByteBuffer buf = channel.buffer();
// read the size
int size = buf.getInt();
assertTrue(size > 0);
// read the header
ResponseHeader responseHeader = ResponseHeader.parse(channel.buffer(), responseHeaderVersion);
assertEquals(correlationId, responseHeader.correlationId());
assertEquals(fetchResponse.serialize(apiVersion), buf);
FetchResponseData deserialized = new FetchResponseData(new ByteBufferAccessor(buf), apiVersion);
ObjectSerializationCache serializationCache = new ObjectSerializationCache();
assertEquals(size, responseHeader.size(serializationCache) + deserialized.size(serializationCache, apiVersion));
}
@Test
public void testControlledShutdownResponse() {
ControlledShutdownResponse response = createControlledShutdownResponse();
short version = ApiKeys.CONTROLLED_SHUTDOWN.latestVersion();
ByteBuffer buffer = response.serialize(version);
ControlledShutdownResponse deserialized = ControlledShutdownResponse.parse(buffer, version);
assertEquals(response.error(), deserialized.error());
assertEquals(response.data().remainingPartitions(), deserialized.data().remainingPartitions());
}
@Test(expected = UnsupportedVersionException.class)
public void testCreateTopicRequestV0FailsIfValidateOnly() {
createCreateTopicRequest(0, true);
}
@Test
public void testCreateTopicRequestV3FailsIfNoPartitionsOrReplicas() {
final UnsupportedVersionException exception = assertThrows(
UnsupportedVersionException.class, () -> {
CreateTopicsRequestData data = new CreateTopicsRequestData()
.setTimeoutMs(123)
.setValidateOnly(false);
data.topics().add(new CreatableTopic().
setName("foo").
setNumPartitions(CreateTopicsRequest.NO_NUM_PARTITIONS).
setReplicationFactor((short) 1));
data.topics().add(new CreatableTopic().
setName("bar").
setNumPartitions(1).
setReplicationFactor(CreateTopicsRequest.NO_REPLICATION_FACTOR));
new Builder(data).build((short) 3);
});
assertTrue(exception.getMessage().contains("supported in CreateTopicRequest version 4+"));
assertTrue(exception.getMessage().contains("[foo, bar]"));
}
@Test
public void testFetchRequestMaxBytesOldVersions() {
final short version = 1;
FetchRequest fr = createFetchRequest(version);
FetchRequest fr2 = FetchRequest.parse(fr.serialize(), version);
assertEquals(fr2.maxBytes(), fr.maxBytes());
}
@Test
public void testFetchRequestIsolationLevel() throws Exception {
FetchRequest request = createFetchRequest(4, IsolationLevel.READ_COMMITTED);
FetchRequest deserialized = (FetchRequest) AbstractRequest.parseRequest(request.apiKey(), request.version(),
request.serialize()).request;
assertEquals(request.isolationLevel(), deserialized.isolationLevel());
request = createFetchRequest(4, IsolationLevel.READ_UNCOMMITTED);
deserialized = (FetchRequest) AbstractRequest.parseRequest(request.apiKey(), request.version(),
request.serialize()).request;
assertEquals(request.isolationLevel(), deserialized.isolationLevel());
}
@Test
public void testFetchRequestWithMetadata() throws Exception {
FetchRequest request = createFetchRequest(4, IsolationLevel.READ_COMMITTED);
FetchRequest deserialized = (FetchRequest) AbstractRequest.parseRequest(ApiKeys.FETCH, request.version(),
request.serialize()).request;
assertEquals(request.isolationLevel(), deserialized.isolationLevel());
request = createFetchRequest(4, IsolationLevel.READ_UNCOMMITTED);
deserialized = (FetchRequest) AbstractRequest.parseRequest(ApiKeys.FETCH, request.version(),
request.serialize()).request;
assertEquals(request.isolationLevel(), deserialized.isolationLevel());
}
@Test
public void testFetchRequestCompat() {
Map<TopicPartition, FetchRequest.PartitionData> fetchData = new HashMap<>();
fetchData.put(new TopicPartition("test", 0), new FetchRequest.PartitionData(100, 2, 100, Optional.of(42)));
FetchRequest req = FetchRequest.Builder
.forConsumer(100, 100, fetchData)
.metadata(new FetchMetadata(10, 20))
.isolationLevel(IsolationLevel.READ_COMMITTED)
.build((short) 2);
FetchRequestData data = req.data();
ObjectSerializationCache cache = new ObjectSerializationCache();
int size = data.size(cache, (short) 2);
ByteBufferAccessor writer = new ByteBufferAccessor(ByteBuffer.allocate(size));
data.write(writer, cache, (short) 2);
}
@Test
public void testJoinGroupRequestVersion0RebalanceTimeout() {
final short version = 0;
JoinGroupRequest jgr = createJoinGroupRequest(version);
JoinGroupRequest jgr2 = JoinGroupRequest.parse(jgr.serialize(), version);
assertEquals(jgr2.data().rebalanceTimeoutMs(), jgr.data().rebalanceTimeoutMs());
}
@Test
public void testOffsetFetchRequestBuilderToString() {
List<Boolean> stableFlags = Arrays.asList(true, false);
for (Boolean requireStable : stableFlags) {
String allTopicPartitionsString = new OffsetFetchRequest.Builder("someGroup", requireStable, null, false).toString();
assertTrue(allTopicPartitionsString.contains("groupId='someGroup', topics=null, requireStable="
+ requireStable.toString()));
String string = new OffsetFetchRequest.Builder("group1",
requireStable, Collections.singletonList(new TopicPartition("test11", 1)), false).toString();
assertTrue(string.contains("test11"));
assertTrue(string.contains("group1"));
assertTrue(string.contains("requireStable=" + requireStable.toString()));
}
}
@Test
public void testApiVersionsRequestBeforeV3Validation() {
for (short version = 0; version < 3; version++) {
ApiVersionsRequest request = new ApiVersionsRequest(new ApiVersionsRequestData(), version);
assertTrue(request.isValid());
}
}
@Test
public void testValidApiVersionsRequest() {
ApiVersionsRequest request;
request = new ApiVersionsRequest.Builder().build();
assertTrue(request.isValid());
request = new ApiVersionsRequest(new ApiVersionsRequestData()
.setClientSoftwareName("apache-kafka.java")
.setClientSoftwareVersion("0.0.0-SNAPSHOT"),
ApiKeys.API_VERSIONS.latestVersion()
);
assertTrue(request.isValid());
}
@Test(expected = UnsupportedVersionException.class)
public void testListGroupRequestV3FailsWithStates() {
ListGroupsRequestData data = new ListGroupsRequestData()
.setStatesFilter(asList(ConsumerGroupState.STABLE.name()));
new ListGroupsRequest.Builder(data).build((short) 3);
}
@Test
public void testInvalidApiVersionsRequest() {
testInvalidCase("java@apache_kafka", "0.0.0-SNAPSHOT");
testInvalidCase("apache-kafka-java", "0.0.0@java");
testInvalidCase("-apache-kafka-java", "0.0.0");
testInvalidCase("apache-kafka-java.", "0.0.0");
}
private void testInvalidCase(String name, String version) {
ApiVersionsRequest request = new ApiVersionsRequest(new ApiVersionsRequestData()
.setClientSoftwareName(name)
.setClientSoftwareVersion(version),
ApiKeys.API_VERSIONS.latestVersion()
);
assertFalse(request.isValid());
}
@Test
public void testApiVersionResponseWithUnsupportedError() {
ApiVersionsRequest request = new ApiVersionsRequest.Builder().build();
ApiVersionsResponse response = request.getErrorResponse(0, Errors.UNSUPPORTED_VERSION.exception());
assertEquals(Errors.UNSUPPORTED_VERSION.code(), response.data().errorCode());
ApiVersionsResponseKey apiVersion = response.data().apiKeys().find(ApiKeys.API_VERSIONS.id);
assertNotNull(apiVersion);
assertEquals(ApiKeys.API_VERSIONS.id, apiVersion.apiKey());
assertEquals(ApiKeys.API_VERSIONS.oldestVersion(), apiVersion.minVersion());
assertEquals(ApiKeys.API_VERSIONS.latestVersion(), apiVersion.maxVersion());
}
@Test
public void testApiVersionResponseWithNotUnsupportedError() {
ApiVersionsRequest request = new ApiVersionsRequest.Builder().build();
ApiVersionsResponse response = request.getErrorResponse(0, Errors.INVALID_REQUEST.exception());
assertEquals(response.data().errorCode(), Errors.INVALID_REQUEST.code());
assertTrue(response.data().apiKeys().isEmpty());
}
@Test
public void testApiVersionResponseParsingFallback() {
ByteBuffer buffer = ApiVersionsResponse.DEFAULT_API_VERSIONS_RESPONSE.serialize((short) 0);
ApiVersionsResponse response = ApiVersionsResponse.parse(buffer, ApiKeys.API_VERSIONS.latestVersion());
assertEquals(Errors.NONE.code(), response.data().errorCode());
}
@Test
public void testApiVersionResponseParsingFallbackException() {
short version = 0;
assertThrows(BufferUnderflowException.class, () -> ApiVersionsResponse.parse(ByteBuffer.allocate(0), version));
}
@Test
public void testApiVersionResponseParsing() {
ByteBuffer buffer = ApiVersionsResponse.DEFAULT_API_VERSIONS_RESPONSE.serialize(ApiKeys.API_VERSIONS.latestVersion());
ApiVersionsResponse response = ApiVersionsResponse.parse(buffer, ApiKeys.API_VERSIONS.latestVersion());
assertEquals(Errors.NONE.code(), response.data().errorCode());
}
@Test
public void testInitProducerIdRequestVersions() {
InitProducerIdRequest.Builder bld = new InitProducerIdRequest.Builder(
new InitProducerIdRequestData().setTransactionTimeoutMs(1000).
setTransactionalId("abracadabra").
setProducerId(123));
final UnsupportedVersionException exception = assertThrows(
UnsupportedVersionException.class, () -> bld.build((short) 2).serialize());
assertTrue(exception.getMessage().contains("Attempted to write a non-default producerId at version 2"));
bld.build((short) 3);
}
@Test
public void testDeletableTopicResultErrorMessageIsNullByDefault() {
DeletableTopicResult result = new DeletableTopicResult()
.setName("topic")
.setErrorCode(Errors.THROTTLING_QUOTA_EXCEEDED.code());
assertEquals("topic", result.name());
assertEquals(Errors.THROTTLING_QUOTA_EXCEEDED.code(), result.errorCode());
assertNull(result.errorMessage());
}
private ResponseHeader createResponseHeader(short headerVersion) {
return new ResponseHeader(10, headerVersion);
}
private FindCoordinatorRequest createFindCoordinatorRequest(int version) {
return new FindCoordinatorRequest.Builder(
new FindCoordinatorRequestData()
.setKeyType(CoordinatorType.GROUP.id())
.setKey("test-group"))
.build((short) version);
}
private FindCoordinatorResponse createFindCoordinatorResponse() {
Node node = new Node(10, "host1", 2014);
return FindCoordinatorResponse.prepareResponse(Errors.NONE, node);
}
private FetchRequest createFetchRequest(int version, FetchMetadata metadata, List<TopicPartition> toForget) {
LinkedHashMap<TopicPartition, FetchRequest.PartitionData> fetchData = new LinkedHashMap<>();
fetchData.put(new TopicPartition("test1", 0), new FetchRequest.PartitionData(100, -1L,
1000000, Optional.empty()));
fetchData.put(new TopicPartition("test2", 0), new FetchRequest.PartitionData(200, -1L,
1000000, Optional.empty()));
return FetchRequest.Builder.forConsumer(100, 100000, fetchData).
metadata(metadata).setMaxBytes(1000).toForget(toForget).build((short) version);
}
private FetchRequest createFetchRequest(int version, IsolationLevel isolationLevel) {
LinkedHashMap<TopicPartition, FetchRequest.PartitionData> fetchData = new LinkedHashMap<>();
fetchData.put(new TopicPartition("test1", 0), new FetchRequest.PartitionData(100, -1L,
1000000, Optional.empty()));
fetchData.put(new TopicPartition("test2", 0), new FetchRequest.PartitionData(200, -1L,
1000000, Optional.empty()));
return FetchRequest.Builder.forConsumer(100, 100000, fetchData).
isolationLevel(isolationLevel).setMaxBytes(1000).build((short) version);
}
private FetchRequest createFetchRequest(int version) {
LinkedHashMap<TopicPartition, FetchRequest.PartitionData> fetchData = new LinkedHashMap<>();
fetchData.put(new TopicPartition("test1", 0), new FetchRequest.PartitionData(100, -1L,
1000000, Optional.empty()));
fetchData.put(new TopicPartition("test2", 0), new FetchRequest.PartitionData(200, -1L,
1000000, Optional.empty()));
return FetchRequest.Builder.forConsumer(100, 100000, fetchData).setMaxBytes(1000).build((short) version);
}
private FetchResponse<MemoryRecords> createFetchResponse(Errors error, int sessionId) {
return new FetchResponse<>(error, new LinkedHashMap<>(), 25, sessionId);
}
private FetchResponse<MemoryRecords> createFetchResponse(int sessionId) {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData<>(Errors.NONE,
1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, Optional.empty(), Collections.emptyList(), records));
List<FetchResponse.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponse.AbortedTransaction(234L, 999L));
responseData.put(new TopicPartition("test", 1), new FetchResponse.PartitionData<>(Errors.NONE,
1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, Optional.empty(), abortedTransactions, MemoryRecords.EMPTY));
return new FetchResponse<>(Errors.NONE, responseData, 25, sessionId);
}
private FetchResponse<MemoryRecords> createFetchResponse(boolean includeAborted) {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData<>(Errors.NONE,
1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, Optional.empty(), Collections.emptyList(), records));
List<FetchResponse.AbortedTransaction> abortedTransactions = Collections.emptyList();
if (includeAborted) {
abortedTransactions = Collections.singletonList(
new FetchResponse.AbortedTransaction(234L, 999L));
}
responseData.put(new TopicPartition("test", 1), new FetchResponse.PartitionData<>(Errors.NONE,
1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, Optional.empty(), abortedTransactions, MemoryRecords.EMPTY));
return new FetchResponse<>(Errors.NONE, responseData, 25, INVALID_SESSION_ID);
}
private HeartbeatRequest createHeartBeatRequest() {
return new HeartbeatRequest.Builder(new HeartbeatRequestData()
.setGroupId("group1")
.setGenerationId(1)
.setMemberId("consumer1")).build();
}
private HeartbeatResponse createHeartBeatResponse() {
return new HeartbeatResponse(new HeartbeatResponseData().setErrorCode(Errors.NONE.code()));
}
private JoinGroupRequest createJoinGroupRequest(int version) {
JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols =
new JoinGroupRequestData.JoinGroupRequestProtocolCollection(
Collections.singleton(
new JoinGroupRequestData.JoinGroupRequestProtocol()
.setName("consumer-range")
.setMetadata(new byte[0])).iterator()
);
JoinGroupRequestData data = new JoinGroupRequestData()
.setGroupId("group1")
.setSessionTimeoutMs(30000)
.setMemberId("consumer1")
.setProtocolType("consumer")
.setProtocols(protocols);
// v1 and above contains rebalance timeout
if (version >= 1)
data.setRebalanceTimeoutMs(60000);
// v5 and above could set group instance id
if (version >= 5)
data.setGroupInstanceId("groupInstanceId");
return new JoinGroupRequest.Builder(data).build((short) version);
}
private JoinGroupResponse createJoinGroupResponse(int version) {
List<JoinGroupResponseData.JoinGroupResponseMember> members = new ArrayList<>();
for (int i = 0; i < 2; i++) {
JoinGroupResponseMember member = new JoinGroupResponseData.JoinGroupResponseMember()
.setMemberId("consumer" + i)
.setMetadata(new byte[0]);
if (version >= 5)
member.setGroupInstanceId("instance" + i);
members.add(member);
}
JoinGroupResponseData data = new JoinGroupResponseData()
.setErrorCode(Errors.NONE.code())
.setGenerationId(1)
.setProtocolType("consumer") // Added in v7 but ignorable
.setProtocolName("range")
.setLeader("leader")
.setMemberId("consumer1")
.setMembers(members);
// v1 and above could set throttle time
if (version >= 1)
data.setThrottleTimeMs(1000);
return new JoinGroupResponse(data);
}
private SyncGroupRequest createSyncGroupRequest(int version) {
List<SyncGroupRequestAssignment> assignments = Collections.singletonList(
new SyncGroupRequestAssignment()
.setMemberId("member")
.setAssignment(new byte[0])
);
SyncGroupRequestData data = new SyncGroupRequestData()
.setGroupId("group1")
.setGenerationId(1)
.setMemberId("member")
.setProtocolType("consumer") // Added in v5 but ignorable
.setProtocolName("range") // Added in v5 but ignorable
.setAssignments(assignments);
// v3 and above could set group instance id
if (version >= 3)
data.setGroupInstanceId("groupInstanceId");
return new SyncGroupRequest.Builder(data).build((short) version);
}
private SyncGroupResponse createSyncGroupResponse(int version) {
SyncGroupResponseData data = new SyncGroupResponseData()
.setErrorCode(Errors.NONE.code())
.setProtocolType("consumer") // Added in v5 but ignorable
.setProtocolName("range") // Added in v5 but ignorable
.setAssignment(new byte[0]);
// v1 and above could set throttle time
if (version >= 1)
data.setThrottleTimeMs(1000);
return new SyncGroupResponse(data);
}
private ListGroupsRequest createListGroupsRequest(short version) {
ListGroupsRequestData data = new ListGroupsRequestData();
if (version >= 4)
data.setStatesFilter(Arrays.asList("Stable"));
return new ListGroupsRequest.Builder(data).build(version);
}
private ListGroupsResponse createListGroupsResponse(int version) {
ListGroupsResponseData.ListedGroup group = new ListGroupsResponseData.ListedGroup()
.setGroupId("test-group")
.setProtocolType("consumer");
if (version >= 4)
group.setGroupState("Stable");
ListGroupsResponseData data = new ListGroupsResponseData()
.setErrorCode(Errors.NONE.code())
.setGroups(Collections.singletonList(group));
return new ListGroupsResponse(data);
}
private DescribeGroupsRequest createDescribeGroupRequest() {
return new DescribeGroupsRequest.Builder(
new DescribeGroupsRequestData().
setGroups(Collections.singletonList("test-group"))).build();
}
private DescribeGroupsResponse createDescribeGroupResponse() {
String clientId = "consumer-1";
String clientHost = "localhost";
DescribeGroupsResponseData describeGroupsResponseData = new DescribeGroupsResponseData();
DescribeGroupsResponseData.DescribedGroupMember member = DescribeGroupsResponse.groupMember("memberId", null,
clientId, clientHost, new byte[0], new byte[0]);
DescribedGroup metadata = DescribeGroupsResponse.groupMetadata("test-group",
Errors.NONE,
"STABLE",
"consumer",
"roundrobin",
Collections.singletonList(member),
DescribeGroupsResponse.AUTHORIZED_OPERATIONS_OMITTED);
describeGroupsResponseData.groups().add(metadata);
return new DescribeGroupsResponse(describeGroupsResponseData);
}
private LeaveGroupRequest createLeaveGroupRequest() {
return new LeaveGroupRequest.Builder(
"group1", Collections.singletonList(new MemberIdentity()
.setMemberId("consumer1"))
).build();
}
private LeaveGroupResponse createLeaveGroupResponse() {
return new LeaveGroupResponse(new LeaveGroupResponseData().setErrorCode(Errors.NONE.code()));
}
private DeleteGroupsRequest createDeleteGroupsRequest() {
return new DeleteGroupsRequest.Builder(
new DeleteGroupsRequestData()
.setGroupsNames(Collections.singletonList("test-group"))
).build();
}
private DeleteGroupsResponse createDeleteGroupsResponse() {
DeletableGroupResultCollection result = new DeletableGroupResultCollection();
result.add(new DeletableGroupResult()
.setGroupId("test-group")
.setErrorCode(Errors.NONE.code()));
return new DeleteGroupsResponse(
new DeleteGroupsResponseData()
.setResults(result)
);
}
private ListOffsetsRequest createListOffsetRequest(int version) {
if (version == 0) {
ListOffsetsTopic topic = new ListOffsetsTopic()
.setName("test")
.setPartitions(Arrays.asList(new ListOffsetsPartition()
.setPartitionIndex(0)
.setTimestamp(1000000L)
.setMaxNumOffsets(10)
.setCurrentLeaderEpoch(5)));
return ListOffsetsRequest.Builder
.forConsumer(false, IsolationLevel.READ_UNCOMMITTED)
.setTargetTimes(Collections.singletonList(topic))
.build((short) version);
} else if (version == 1) {
ListOffsetsTopic topic = new ListOffsetsTopic()
.setName("test")
.setPartitions(Arrays.asList(new ListOffsetsPartition()
.setPartitionIndex(0)
.setTimestamp(1000000L)
.setCurrentLeaderEpoch(5)));
return ListOffsetsRequest.Builder
.forConsumer(true, IsolationLevel.READ_UNCOMMITTED)
.setTargetTimes(Collections.singletonList(topic))
.build((short) version);
} else if (version >= 2 && version <= LIST_OFFSETS.latestVersion()) {
ListOffsetsPartition partition = new ListOffsetsPartition()
.setPartitionIndex(0)
.setTimestamp(1000000L)
.setCurrentLeaderEpoch(5);
ListOffsetsTopic topic = new ListOffsetsTopic()
.setName("test")
.setPartitions(Arrays.asList(partition));
return ListOffsetsRequest.Builder
.forConsumer(true, IsolationLevel.READ_COMMITTED)
.setTargetTimes(Collections.singletonList(topic))
.build((short) version);
} else {
throw new IllegalArgumentException("Illegal ListOffsetRequest version " + version);
}
}
private ListOffsetsResponse createListOffsetResponse(int version) {
if (version == 0) {
ListOffsetsResponseData data = new ListOffsetsResponseData()
.setTopics(Collections.singletonList(new ListOffsetsTopicResponse()
.setName("test")
.setPartitions(Collections.singletonList(new ListOffsetsPartitionResponse()
.setPartitionIndex(0)
.setErrorCode(Errors.NONE.code())
.setOldStyleOffsets(asList(100L))))));
return new ListOffsetsResponse(data);
} else if (version >= 1 && version <= LIST_OFFSETS.latestVersion()) {
ListOffsetsPartitionResponse partition = new ListOffsetsPartitionResponse()
.setPartitionIndex(0)
.setErrorCode(Errors.NONE.code())
.setTimestamp(10000L)
.setOffset(100L);
if (version >= 4) {
partition.setLeaderEpoch(27);
}
ListOffsetsResponseData data = new ListOffsetsResponseData()
.setTopics(Collections.singletonList(new ListOffsetsTopicResponse()
.setName("test")
.setPartitions(Collections.singletonList(partition))));
return new ListOffsetsResponse(data);
} else {
throw new IllegalArgumentException("Illegal ListOffsetResponse version " + version);
}
}
private MetadataRequest createMetadataRequest(int version, List<String> topics) {
return new MetadataRequest.Builder(topics, true).build((short) version);
}
private MetadataResponse createMetadataResponse() {
Node node = new Node(1, "host1", 1001);
List<Integer> replicas = singletonList(node.id());
List<Integer> isr = singletonList(node.id());
List<Integer> offlineReplicas = emptyList();
List<MetadataResponse.TopicMetadata> allTopicMetadata = new ArrayList<>();
allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, "__consumer_offsets", true,
asList(new MetadataResponse.PartitionMetadata(Errors.NONE,
new TopicPartition("__consumer_offsets", 1),
Optional.of(node.id()), Optional.of(5), replicas, isr, offlineReplicas))));
allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.LEADER_NOT_AVAILABLE, "topic2", false,
emptyList()));
allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, "topic3", false,
asList(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE,
new TopicPartition("topic3", 0), Optional.empty(),
Optional.empty(), replicas, isr, offlineReplicas))));
return RequestTestUtils.metadataResponse(asList(node), null, MetadataResponse.NO_CONTROLLER_ID, allTopicMetadata);
}
private OffsetCommitRequest createOffsetCommitRequest(int version) {
return new OffsetCommitRequest.Builder(new OffsetCommitRequestData()
.setGroupId("group1")
.setMemberId("consumer1")
.setGroupInstanceId(null)
.setGenerationId(100)
.setTopics(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestTopic()
.setName("test")
.setPartitions(Arrays.asList(
new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(0)
.setCommittedOffset(100)
.setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH)
.setCommittedMetadata(""),
new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(1)
.setCommittedOffset(200)
.setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH)
.setCommittedMetadata(null)
))
))
).build((short) version);
}
private OffsetCommitResponse createOffsetCommitResponse() {
return new OffsetCommitResponse(new OffsetCommitResponseData()
.setTopics(Collections.singletonList(
new OffsetCommitResponseData.OffsetCommitResponseTopic()
.setName("test")
.setPartitions(Collections.singletonList(
new OffsetCommitResponseData.OffsetCommitResponsePartition()
.setPartitionIndex(0)
.setErrorCode(Errors.NONE.code())
))
))
);
}
private OffsetFetchRequest createOffsetFetchRequest(int version, boolean requireStable) {
return new OffsetFetchRequest.Builder("group1", requireStable, Collections.singletonList(new TopicPartition("test11", 1)), false)
.build((short) version);
}
private OffsetFetchRequest createOffsetFetchRequestForAllPartition(String groupId, boolean requireStable) {
return new OffsetFetchRequest.Builder(groupId, requireStable, null, false).build();
}
private OffsetFetchResponse createOffsetFetchResponse() {
Map<TopicPartition, OffsetFetchResponse.PartitionData> responseData = new HashMap<>();
responseData.put(new TopicPartition("test", 0), new OffsetFetchResponse.PartitionData(
100L, Optional.empty(), "", Errors.NONE));
responseData.put(new TopicPartition("test", 1), new OffsetFetchResponse.PartitionData(
100L, Optional.of(10), null, Errors.NONE));
return new OffsetFetchResponse(Errors.NONE, responseData);
}
@SuppressWarnings("deprecation")
private ProduceRequest createProduceRequest(int version) {
if (version < 2)
throw new IllegalArgumentException("Produce request version 2 is not supported");
byte magic = version == 2 ? RecordBatch.MAGIC_VALUE_V1 : RecordBatch.MAGIC_VALUE_V2;
MemoryRecords records = MemoryRecords.withRecords(magic, CompressionType.NONE, new SimpleRecord("woot".getBytes()));
return ProduceRequest.forMagic(magic,
new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(
new ProduceRequestData.TopicProduceData()
.setName("test")
.setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData()
.setIndex(0)
.setRecords(records)))).iterator()))
.setAcks((short) 1)
.setTimeoutMs(5000)
.setTransactionalId(version >= 3 ? "transactionalId" : null))
.build((short) version);
}
@SuppressWarnings("deprecation")
private ProduceResponse createProduceResponse() {
Map<TopicPartition, ProduceResponse.PartitionResponse> responseData = new HashMap<>();
responseData.put(new TopicPartition("test", 0), new ProduceResponse.PartitionResponse(Errors.NONE,
10000, RecordBatch.NO_TIMESTAMP, 100));
return new ProduceResponse(responseData, 0);
}
@SuppressWarnings("deprecation")
private ProduceResponse createProduceResponseWithErrorMessage() {
Map<TopicPartition, ProduceResponse.PartitionResponse> responseData = new HashMap<>();
responseData.put(new TopicPartition("test", 0), new ProduceResponse.PartitionResponse(Errors.NONE,
10000, RecordBatch.NO_TIMESTAMP, 100, Collections.singletonList(new ProduceResponse.RecordError(0, "error message")),
"global error message"));
return new ProduceResponse(responseData, 0);
}
private StopReplicaRequest createStopReplicaRequest(int version, boolean deletePartitions) {
List<StopReplicaTopicState> topicStates = new ArrayList<>();
StopReplicaTopicState topic1 = new StopReplicaTopicState()
.setTopicName("topic1")
.setPartitionStates(Collections.singletonList(new StopReplicaPartitionState()
.setPartitionIndex(0)
.setLeaderEpoch(1)
.setDeletePartition(deletePartitions)));
topicStates.add(topic1);
StopReplicaTopicState topic2 = new StopReplicaTopicState()
.setTopicName("topic2")
.setPartitionStates(Collections.singletonList(new StopReplicaPartitionState()
.setPartitionIndex(1)
.setLeaderEpoch(2)
.setDeletePartition(deletePartitions)));
topicStates.add(topic2);
return new StopReplicaRequest.Builder((short) version, 0, 1, 0,
deletePartitions, topicStates).build((short) version);
}
private StopReplicaResponse createStopReplicaResponse() {
List<StopReplicaResponseData.StopReplicaPartitionError> partitions = new ArrayList<>();
partitions.add(new StopReplicaResponseData.StopReplicaPartitionError()
.setTopicName("test")
.setPartitionIndex(0)
.setErrorCode(Errors.NONE.code()));
return new StopReplicaResponse(new StopReplicaResponseData()
.setErrorCode(Errors.NONE.code())
.setPartitionErrors(partitions));
}
private ControlledShutdownRequest createControlledShutdownRequest() {
ControlledShutdownRequestData data = new ControlledShutdownRequestData()
.setBrokerId(10)
.setBrokerEpoch(0L);
return new ControlledShutdownRequest.Builder(
data,
ApiKeys.CONTROLLED_SHUTDOWN.latestVersion()).build();
}
private ControlledShutdownRequest createControlledShutdownRequest(int version) {
ControlledShutdownRequestData data = new ControlledShutdownRequestData()
.setBrokerId(10)
.setBrokerEpoch(0L);
return new ControlledShutdownRequest.Builder(
data,
ApiKeys.CONTROLLED_SHUTDOWN.latestVersion()).build((short) version);
}
private ControlledShutdownResponse createControlledShutdownResponse() {
RemainingPartition p1 = new RemainingPartition()
.setTopicName("test2")
.setPartitionIndex(5);
RemainingPartition p2 = new RemainingPartition()
.setTopicName("test1")
.setPartitionIndex(10);
RemainingPartitionCollection pSet = new RemainingPartitionCollection();
pSet.add(p1);
pSet.add(p2);
ControlledShutdownResponseData data = new ControlledShutdownResponseData()
.setErrorCode(Errors.NONE.code())
.setRemainingPartitions(pSet);
return new ControlledShutdownResponse(data);
}
private LeaderAndIsrRequest createLeaderAndIsrRequest(int version) {
List<LeaderAndIsrPartitionState> partitionStates = new ArrayList<>();
List<Integer> isr = asList(1, 2);
List<Integer> replicas = asList(1, 2, 3, 4);
partitionStates.add(new LeaderAndIsrPartitionState()
.setTopicName("topic5")
.setPartitionIndex(105)
.setControllerEpoch(0)
.setLeader(2)
.setLeaderEpoch(1)
.setIsr(isr)
.setZkVersion(2)
.setReplicas(replicas)
.setIsNew(false));
partitionStates.add(new LeaderAndIsrPartitionState()
.setTopicName("topic5")
.setPartitionIndex(1)
.setControllerEpoch(1)
.setLeader(1)
.setLeaderEpoch(1)
.setIsr(isr)
.setZkVersion(2)
.setReplicas(replicas)
.setIsNew(false));
partitionStates.add(new LeaderAndIsrPartitionState()
.setTopicName("topic20")
.setPartitionIndex(1)
.setControllerEpoch(1)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(isr)
.setZkVersion(2)
.setReplicas(replicas)
.setIsNew(false));
Set<Node> leaders = Utils.mkSet(
new Node(0, "test0", 1223),
new Node(1, "test1", 1223)
);
return new LeaderAndIsrRequest.Builder((short) version, 1, 10, 0, partitionStates, leaders).build();
}
private LeaderAndIsrResponse createLeaderAndIsrResponse() {
List<LeaderAndIsrResponseData.LeaderAndIsrPartitionError> partitions = new ArrayList<>();
partitions.add(new LeaderAndIsrResponseData.LeaderAndIsrPartitionError()
.setTopicName("test")
.setPartitionIndex(0)
.setErrorCode(Errors.NONE.code()));
return new LeaderAndIsrResponse(new LeaderAndIsrResponseData()
.setErrorCode(Errors.NONE.code())
.setPartitionErrors(partitions));
}
private UpdateMetadataRequest createUpdateMetadataRequest(int version, String rack) {
List<UpdateMetadataPartitionState> partitionStates = new ArrayList<>();
List<Integer> isr = asList(1, 2);
List<Integer> replicas = asList(1, 2, 3, 4);
List<Integer> offlineReplicas = asList();
partitionStates.add(new UpdateMetadataPartitionState()
.setTopicName("topic5")
.setPartitionIndex(105)
.setControllerEpoch(0)
.setLeader(2)
.setLeaderEpoch(1)
.setIsr(isr)
.setZkVersion(2)
.setReplicas(replicas)
.setOfflineReplicas(offlineReplicas));
partitionStates.add(new UpdateMetadataPartitionState()
.setTopicName("topic5")
.setPartitionIndex(1)
.setControllerEpoch(1)
.setLeader(1)
.setLeaderEpoch(1)
.setIsr(isr)
.setZkVersion(2)
.setReplicas(replicas)
.setOfflineReplicas(offlineReplicas));
partitionStates.add(new UpdateMetadataPartitionState()
.setTopicName("topic20")
.setPartitionIndex(1)
.setControllerEpoch(1)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(isr)
.setZkVersion(2)
.setReplicas(replicas)
.setOfflineReplicas(offlineReplicas));
SecurityProtocol plaintext = SecurityProtocol.PLAINTEXT;
List<UpdateMetadataEndpoint> endpoints1 = new ArrayList<>();
endpoints1.add(new UpdateMetadataEndpoint()
.setHost("host1")
.setPort(1223)
.setSecurityProtocol(plaintext.id)
.setListener(ListenerName.forSecurityProtocol(plaintext).value()));
List<UpdateMetadataEndpoint> endpoints2 = new ArrayList<>();
endpoints2.add(new UpdateMetadataEndpoint()
.setHost("host1")
.setPort(1244)
.setSecurityProtocol(plaintext.id)
.setListener(ListenerName.forSecurityProtocol(plaintext).value()));
if (version > 0) {
SecurityProtocol ssl = SecurityProtocol.SSL;
endpoints2.add(new UpdateMetadataEndpoint()
.setHost("host2")
.setPort(1234)
.setSecurityProtocol(ssl.id)
.setListener(ListenerName.forSecurityProtocol(ssl).value()));
endpoints2.add(new UpdateMetadataEndpoint()
.setHost("host2")
.setPort(1334)
.setSecurityProtocol(ssl.id));
if (version >= 3)
endpoints2.get(1).setListener("CLIENT");
}
List<UpdateMetadataBroker> liveBrokers = Arrays.asList(
new UpdateMetadataBroker()
.setId(0)
.setEndpoints(endpoints1)
.setRack(rack),
new UpdateMetadataBroker()
.setId(1)
.setEndpoints(endpoints2)
.setRack(rack)
);
return new UpdateMetadataRequest.Builder((short) version, 1, 10, 0, partitionStates,
liveBrokers).build();
}
private UpdateMetadataResponse createUpdateMetadataResponse() {
return new UpdateMetadataResponse(new UpdateMetadataResponseData().setErrorCode(Errors.NONE.code()));
}
private SaslHandshakeRequest createSaslHandshakeRequest() {
return new SaslHandshakeRequest.Builder(
new SaslHandshakeRequestData().setMechanism("PLAIN")).build();
}
private SaslHandshakeResponse createSaslHandshakeResponse() {
return new SaslHandshakeResponse(
new SaslHandshakeResponseData()
.setErrorCode(Errors.NONE.code()).setMechanisms(Collections.singletonList("GSSAPI")));
}
private SaslAuthenticateRequest createSaslAuthenticateRequest() {
SaslAuthenticateRequestData data = new SaslAuthenticateRequestData().setAuthBytes(new byte[0]);
return new SaslAuthenticateRequest(data, ApiKeys.SASL_AUTHENTICATE.latestVersion());
}
private SaslAuthenticateResponse createSaslAuthenticateResponse() {
SaslAuthenticateResponseData data = new SaslAuthenticateResponseData()
.setErrorCode(Errors.NONE.code())
.setAuthBytes(new byte[0])
.setSessionLifetimeMs(Long.MAX_VALUE);
return new SaslAuthenticateResponse(data);
}
private ApiVersionsRequest createApiVersionRequest() {
return new ApiVersionsRequest.Builder().build();
}
private ApiVersionsResponse createApiVersionResponse() {
ApiVersionsResponseKeyCollection apiVersions = new ApiVersionsResponseKeyCollection();
apiVersions.add(new ApiVersionsResponseKey()
.setApiKey((short) 0)
.setMinVersion((short) 0)
.setMaxVersion((short) 2));
return new ApiVersionsResponse(new ApiVersionsResponseData()
.setErrorCode(Errors.NONE.code())
.setThrottleTimeMs(0)
.setApiKeys(apiVersions));
}
private CreateTopicsRequest createCreateTopicRequest(int version) {
return createCreateTopicRequest(version, version >= 1);
}
private CreateTopicsRequest createCreateTopicRequest(int version, boolean validateOnly) {
CreateTopicsRequestData data = new CreateTopicsRequestData()
.setTimeoutMs(123)
.setValidateOnly(validateOnly);
data.topics().add(new CreatableTopic()
.setNumPartitions(3)
.setReplicationFactor((short) 5));
CreatableTopic topic2 = new CreatableTopic();
data.topics().add(topic2);
topic2.assignments().add(new CreatableReplicaAssignment()
.setPartitionIndex(0)
.setBrokerIds(Arrays.asList(1, 2, 3)));
topic2.assignments().add(new CreatableReplicaAssignment()
.setPartitionIndex(1)
.setBrokerIds(Arrays.asList(2, 3, 4)));
topic2.configs().add(new CreateableTopicConfig()
.setName("config1").setValue("value1"));
return new CreateTopicsRequest.Builder(data).build((short) version);
}
private CreateTopicsResponse createCreateTopicResponse() {
CreateTopicsResponseData data = new CreateTopicsResponseData();
data.topics().add(new CreatableTopicResult()
.setName("t1")
.setErrorCode(Errors.INVALID_TOPIC_EXCEPTION.code())
.setErrorMessage(null));
data.topics().add(new CreatableTopicResult()
.setName("t2")
.setErrorCode(Errors.LEADER_NOT_AVAILABLE.code())
.setErrorMessage("Leader with id 5 is not available."));
data.topics().add(new CreatableTopicResult()
.setName("t3")
.setErrorCode(Errors.NONE.code())
.setNumPartitions(1)
.setReplicationFactor((short) 2)
.setConfigs(Collections.singletonList(new CreatableTopicConfigs()
.setName("min.insync.replicas")
.setValue("2"))));
return new CreateTopicsResponse(data);
}
private DeleteTopicsRequest createDeleteTopicsRequest(int version) {
return new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData()
.setTopicNames(Arrays.asList("my_t1", "my_t2"))
.setTimeoutMs(1000)
).build((short) version);
}
private DeleteTopicsResponse createDeleteTopicsResponse() {
DeleteTopicsResponseData data = new DeleteTopicsResponseData();
data.responses().add(new DeletableTopicResult()
.setName("t1")
.setErrorCode(Errors.INVALID_TOPIC_EXCEPTION.code())
.setErrorMessage("Error Message"));
data.responses().add(new DeletableTopicResult()
.setName("t2")
.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code())
.setErrorMessage("Error Message"));
data.responses().add(new DeletableTopicResult()
.setName("t3")
.setErrorCode(Errors.NOT_CONTROLLER.code()));
data.responses().add(new DeletableTopicResult()
.setName("t4")
.setErrorCode(Errors.NONE.code()));
return new DeleteTopicsResponse(data);
}
private InitProducerIdRequest createInitPidRequest() {
InitProducerIdRequestData requestData = new InitProducerIdRequestData()
.setTransactionalId(null)
.setTransactionTimeoutMs(100);
return new InitProducerIdRequest.Builder(requestData).build();
}
private InitProducerIdResponse createInitPidResponse() {
InitProducerIdResponseData responseData = new InitProducerIdResponseData()
.setErrorCode(Errors.NONE.code())
.setProducerEpoch((short) 3)
.setProducerId(3332)
.setThrottleTimeMs(0);
return new InitProducerIdResponse(responseData);
}
private OffsetForLeaderTopicCollection createOffsetForLeaderTopicCollection() {
OffsetForLeaderTopicCollection topics = new OffsetForLeaderTopicCollection();
topics.add(new OffsetForLeaderTopic()
.setTopic("topic1")
.setPartitions(Arrays.asList(
new OffsetForLeaderPartition()
.setPartition(0)
.setLeaderEpoch(1)
.setCurrentLeaderEpoch(0),
new OffsetForLeaderPartition()
.setPartition(1)
.setLeaderEpoch(1)
.setCurrentLeaderEpoch(0))));
topics.add(new OffsetForLeaderTopic()
.setTopic("topic2")
.setPartitions(Arrays.asList(
new OffsetForLeaderPartition()
.setPartition(2)
.setLeaderEpoch(3)
.setCurrentLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH))));
return topics;
}
private OffsetsForLeaderEpochRequest createLeaderEpochRequestForConsumer() {
OffsetForLeaderTopicCollection epochs = createOffsetForLeaderTopicCollection();
return OffsetsForLeaderEpochRequest.Builder.forConsumer(epochs).build();
}
private OffsetsForLeaderEpochRequest createLeaderEpochRequestForReplica(int version, int replicaId) {
OffsetForLeaderTopicCollection epochs = createOffsetForLeaderTopicCollection();
return OffsetsForLeaderEpochRequest.Builder.forFollower((short) version, epochs, replicaId).build();
}
private OffsetsForLeaderEpochResponse createLeaderEpochResponse() {
OffsetForLeaderEpochResponseData data = new OffsetForLeaderEpochResponseData();
data.topics().add(new OffsetForLeaderTopicResult()
.setTopic("topic1")
.setPartitions(Arrays.asList(
new EpochEndOffset()
.setPartition(0)
.setErrorCode(Errors.NONE.code())
.setLeaderEpoch(1)
.setEndOffset(0),
new EpochEndOffset()
.setPartition(1)
.setErrorCode(Errors.NONE.code())
.setLeaderEpoch(1)
.setEndOffset(1))));
data.topics().add(new OffsetForLeaderTopicResult()
.setTopic("topic2")
.setPartitions(Arrays.asList(
new EpochEndOffset()
.setPartition(2)
.setErrorCode(Errors.NONE.code())
.setLeaderEpoch(1)
.setEndOffset(1))));
return new OffsetsForLeaderEpochResponse(data);
}
private AddPartitionsToTxnRequest createAddPartitionsToTxnRequest() {
return new AddPartitionsToTxnRequest.Builder("tid", 21L, (short) 42,
Collections.singletonList(new TopicPartition("topic", 73))).build();
}
private AddPartitionsToTxnResponse createAddPartitionsToTxnResponse() {
return new AddPartitionsToTxnResponse(0, Collections.singletonMap(new TopicPartition("t", 0), Errors.NONE));
}
private AddOffsetsToTxnRequest createAddOffsetsToTxnRequest() {
return new AddOffsetsToTxnRequest.Builder(
new AddOffsetsToTxnRequestData()
.setTransactionalId("tid")
.setProducerId(21L)
.setProducerEpoch((short) 42)
.setGroupId("gid")
).build();
}
private AddOffsetsToTxnResponse createAddOffsetsToTxnResponse() {
return new AddOffsetsToTxnResponse(new AddOffsetsToTxnResponseData()
.setErrorCode(Errors.NONE.code())
.setThrottleTimeMs(0));
}
private EndTxnRequest createEndTxnRequest() {
return new EndTxnRequest.Builder(
new EndTxnRequestData()
.setTransactionalId("tid")
.setProducerId(21L)
.setProducerEpoch((short) 42)
.setCommitted(TransactionResult.COMMIT.id)
).build();
}
private EndTxnResponse createEndTxnResponse() {
return new EndTxnResponse(
new EndTxnResponseData()
.setErrorCode(Errors.NONE.code())
.setThrottleTimeMs(0)
);
}
private WriteTxnMarkersRequest createWriteTxnMarkersRequest() {
List<TopicPartition> partitions = Collections.singletonList(new TopicPartition("topic", 73));
WriteTxnMarkersRequest.TxnMarkerEntry txnMarkerEntry = new WriteTxnMarkersRequest.TxnMarkerEntry(21L, (short) 42, 73, TransactionResult.ABORT, partitions);
return new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), Collections.singletonList(txnMarkerEntry)).build();
}
private WriteTxnMarkersResponse createWriteTxnMarkersResponse() {
final Map<TopicPartition, Errors> errorPerPartitions = new HashMap<>();
errorPerPartitions.put(new TopicPartition("topic", 73), Errors.NONE);
final Map<Long, Map<TopicPartition, Errors>> response = new HashMap<>();
response.put(21L, errorPerPartitions);
return new WriteTxnMarkersResponse(response);
}
private TxnOffsetCommitRequest createTxnOffsetCommitRequest(int version) {
final Map<TopicPartition, TxnOffsetCommitRequest.CommittedOffset> offsets = new HashMap<>();
offsets.put(new TopicPartition("topic", 73),
new TxnOffsetCommitRequest.CommittedOffset(100, null, Optional.empty()));
offsets.put(new TopicPartition("topic", 74),
new TxnOffsetCommitRequest.CommittedOffset(100, "blah", Optional.of(27)));
if (version < 3) {
return new TxnOffsetCommitRequest.Builder("transactionalId",
"groupId",
21L,
(short) 42,
offsets,
false).build();
} else {
return new TxnOffsetCommitRequest.Builder("transactionalId",
"groupId",
21L,
(short) 42,
offsets,
"member",
2,
Optional.of("instance"),
false).build();
}
}
private TxnOffsetCommitRequest createTxnOffsetCommitRequestWithAutoDowngrade(int version) {
final Map<TopicPartition, TxnOffsetCommitRequest.CommittedOffset> offsets = new HashMap<>();
offsets.put(new TopicPartition("topic", 73),
new TxnOffsetCommitRequest.CommittedOffset(100, null, Optional.empty()));
offsets.put(new TopicPartition("topic", 74),
new TxnOffsetCommitRequest.CommittedOffset(100, "blah", Optional.of(27)));
return new TxnOffsetCommitRequest.Builder("transactionalId",
"groupId",
21L,
(short) 42,
offsets,
"member",
2,
Optional.of("instance"),
true).build();
}
private TxnOffsetCommitResponse createTxnOffsetCommitResponse() {
final Map<TopicPartition, Errors> errorPerPartitions = new HashMap<>();
errorPerPartitions.put(new TopicPartition("topic", 73), Errors.NONE);
return new TxnOffsetCommitResponse(0, errorPerPartitions);
}
private DescribeAclsRequest createDescribeAclsRequest() {
return new DescribeAclsRequest.Builder(new AclBindingFilter(
new ResourcePatternFilter(ResourceType.TOPIC, "mytopic", PatternType.LITERAL),
new AccessControlEntryFilter(null, null, AclOperation.ANY, AclPermissionType.ANY))).build();
}
private DescribeAclsResponse createDescribeAclsResponse() {
DescribeAclsResponseData data = new DescribeAclsResponseData()
.setErrorCode(Errors.NONE.code())
.setErrorMessage(Errors.NONE.message())
.setThrottleTimeMs(0)
.setResources(Collections.singletonList(new DescribeAclsResource()
.setResourceType(ResourceType.TOPIC.code())
.setResourceName("mytopic")
.setPatternType(PatternType.LITERAL.code())
.setAcls(Collections.singletonList(new AclDescription()
.setHost("*")
.setOperation(AclOperation.WRITE.code())
.setPermissionType(AclPermissionType.ALLOW.code())
.setPrincipal("User:ANONYMOUS")))));
return new DescribeAclsResponse(data);
}
private CreateAclsRequest createCreateAclsRequest() {
List<CreateAclsRequestData.AclCreation> creations = new ArrayList<>();
creations.add(CreateAclsRequest.aclCreation(new AclBinding(
new ResourcePattern(ResourceType.TOPIC, "mytopic", PatternType.LITERAL),
new AccessControlEntry("User:ANONYMOUS", "127.0.0.1", AclOperation.READ, AclPermissionType.ALLOW))));
creations.add(CreateAclsRequest.aclCreation(new AclBinding(
new ResourcePattern(ResourceType.GROUP, "mygroup", PatternType.LITERAL),
new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.WRITE, AclPermissionType.DENY))));
CreateAclsRequestData data = new CreateAclsRequestData().setCreations(creations);
return new CreateAclsRequest.Builder(data).build();
}
private CreateAclsResponse createCreateAclsResponse() {
return new CreateAclsResponse(new CreateAclsResponseData().setResults(asList(
new CreateAclsResponseData.AclCreationResult(),
new CreateAclsResponseData.AclCreationResult()
.setErrorCode(Errors.NONE.code())
.setErrorMessage("Foo bar"))));
}
private DeleteAclsRequest createDeleteAclsRequest() {
DeleteAclsRequestData data = new DeleteAclsRequestData().setFilters(asList(
new DeleteAclsRequestData.DeleteAclsFilter()
.setResourceTypeFilter(ResourceType.ANY.code())
.setResourceNameFilter(null)
.setPatternTypeFilter(PatternType.LITERAL.code())
.setPrincipalFilter("User:ANONYMOUS")
.setHostFilter(null)
.setOperation(AclOperation.ANY.code())
.setPermissionType(AclPermissionType.ANY.code()),
new DeleteAclsRequestData.DeleteAclsFilter()
.setResourceTypeFilter(ResourceType.ANY.code())
.setResourceNameFilter(null)
.setPatternTypeFilter(PatternType.LITERAL.code())
.setPrincipalFilter("User:bob")
.setHostFilter(null)
.setOperation(AclOperation.ANY.code())
.setPermissionType(AclPermissionType.ANY.code())
));
return new DeleteAclsRequest.Builder(data).build();
}
private DeleteAclsResponse createDeleteAclsResponse(int version) {
List<DeleteAclsResponseData.DeleteAclsFilterResult> filterResults = new ArrayList<>();
filterResults.add(new DeleteAclsResponseData.DeleteAclsFilterResult().setMatchingAcls(asList(
new DeleteAclsResponseData.DeleteAclsMatchingAcl()
.setResourceType(ResourceType.TOPIC.code())
.setResourceName("mytopic3")
.setPatternType(PatternType.LITERAL.code())
.setPrincipal("User:ANONYMOUS")
.setHost("*")
.setOperation(AclOperation.DESCRIBE.code())
.setPermissionType(AclPermissionType.ALLOW.code()),
new DeleteAclsResponseData.DeleteAclsMatchingAcl()
.setResourceType(ResourceType.TOPIC.code())
.setResourceName("mytopic4")
.setPatternType(PatternType.LITERAL.code())
.setPrincipal("User:ANONYMOUS")
.setHost("*")
.setOperation(AclOperation.DESCRIBE.code())
.setPermissionType(AclPermissionType.DENY.code()))));
filterResults.add(new DeleteAclsResponseData.DeleteAclsFilterResult()
.setErrorCode(Errors.SECURITY_DISABLED.code())
.setErrorMessage("No security"));
return new DeleteAclsResponse(new DeleteAclsResponseData()
.setThrottleTimeMs(0)
.setFilterResults(filterResults), (short) version);
}
private DescribeConfigsRequest createDescribeConfigsRequest(int version) {
return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData()
.setResources(asList(
new DescribeConfigsRequestData.DescribeConfigsResource()
.setResourceType(ConfigResource.Type.BROKER.id())
.setResourceName("0"),
new DescribeConfigsRequestData.DescribeConfigsResource()
.setResourceType(ConfigResource.Type.TOPIC.id())
.setResourceName("topic"))))
.build((short) version);
}
private DescribeConfigsRequest createDescribeConfigsRequestWithConfigEntries(int version) {
return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData()
.setResources(asList(
new DescribeConfigsRequestData.DescribeConfigsResource()
.setResourceType(ConfigResource.Type.BROKER.id())
.setResourceName("0")
.setConfigurationKeys(asList("foo", "bar")),
new DescribeConfigsRequestData.DescribeConfigsResource()
.setResourceType(ConfigResource.Type.TOPIC.id())
.setResourceName("topic")
.setConfigurationKeys(null),
new DescribeConfigsRequestData.DescribeConfigsResource()
.setResourceType(ConfigResource.Type.TOPIC.id())
.setResourceName("topic a")
.setConfigurationKeys(emptyList())))).build((short) version);
}
private DescribeConfigsRequest createDescribeConfigsRequestWithDocumentation(int version) {
DescribeConfigsRequestData data = new DescribeConfigsRequestData()
.setResources(asList(
new DescribeConfigsRequestData.DescribeConfigsResource()
.setResourceType(ConfigResource.Type.BROKER.id())
.setResourceName("0")
.setConfigurationKeys(asList("foo", "bar"))));
if (version == 3) {
data.setIncludeDocumentation(true);
}
return new DescribeConfigsRequest.Builder(data).build((short) version);
}
private DescribeConfigsResponse createDescribeConfigsResponse(short version) {
return new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(
new DescribeConfigsResult()
.setErrorCode(Errors.NONE.code())
.setResourceType(ConfigResource.Type.BROKER.id())
.setResourceName("0")
.setConfigs(asList(
new DescribeConfigsResourceResult()
.setName("config_name")
.setValue("config_value")
// Note: the v0 default for this field that should be exposed to callers is
// context-dependent. For example, if the resource is a broker, this should default to 4.
// -1 is just a placeholder value.
.setConfigSource(version == 0 ? DescribeConfigsResponse.ConfigSource.STATIC_BROKER_CONFIG.id() : DescribeConfigsResponse.ConfigSource.DYNAMIC_BROKER_CONFIG.id)
.setIsSensitive(true).setReadOnly(false)
.setSynonyms(emptyList()),
new DescribeConfigsResourceResult()
.setName("yet_another_name")
.setValue("yet another value")
.setConfigSource(version == 0 ? DescribeConfigsResponse.ConfigSource.STATIC_BROKER_CONFIG.id() : DescribeConfigsResponse.ConfigSource.DEFAULT_CONFIG.id)
.setIsSensitive(false).setReadOnly(true)
.setSynonyms(emptyList())
.setConfigType(ConfigType.BOOLEAN.id())
.setDocumentation("some description"),
new DescribeConfigsResourceResult()
.setName("another_name")
.setValue("another value")
.setConfigSource(version == 0 ? DescribeConfigsResponse.ConfigSource.STATIC_BROKER_CONFIG.id() : DescribeConfigsResponse.ConfigSource.DEFAULT_CONFIG.id)
.setIsSensitive(false).setReadOnly(true)
.setSynonyms(emptyList())
)),
new DescribeConfigsResult()
.setErrorCode(Errors.NONE.code())
.setResourceType(ConfigResource.Type.TOPIC.id())
.setResourceName("topic")
.setConfigs(emptyList())
)));
}
private AlterConfigsRequest createAlterConfigsRequest() {
Map<ConfigResource, AlterConfigsRequest.Config> configs = new HashMap<>();
List<AlterConfigsRequest.ConfigEntry> configEntries = asList(
new AlterConfigsRequest.ConfigEntry("config_name", "config_value"),
new AlterConfigsRequest.ConfigEntry("another_name", "another value")
);
configs.put(new ConfigResource(ConfigResource.Type.BROKER, "0"), new AlterConfigsRequest.Config(configEntries));
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, "topic"),
new AlterConfigsRequest.Config(Collections.<AlterConfigsRequest.ConfigEntry>emptyList()));
return new AlterConfigsRequest.Builder(configs, false).build((short) 0);
}
private AlterConfigsResponse createAlterConfigsResponse() {
AlterConfigsResponseData data = new AlterConfigsResponseData()
.setThrottleTimeMs(20);
data.responses().add(new AlterConfigsResponseData.AlterConfigsResourceResponse()
.setErrorCode(Errors.NONE.code())
.setErrorMessage(null)
.setResourceName("0")
.setResourceType(ConfigResource.Type.BROKER.id()));
data.responses().add(new AlterConfigsResponseData.AlterConfigsResourceResponse()
.setErrorCode(Errors.INVALID_REQUEST.code())
.setErrorMessage("This request is invalid")
.setResourceName("topic")
.setResourceType(ConfigResource.Type.TOPIC.id()));
return new AlterConfigsResponse(data);
}
private CreatePartitionsRequest createCreatePartitionsRequest(int version) {
CreatePartitionsTopicCollection topics = new CreatePartitionsTopicCollection();
topics.add(new CreatePartitionsTopic()
.setName("my_topic")
.setCount(3)
);
topics.add(new CreatePartitionsTopic()
.setName("my_other_topic")
.setCount(3)
);
CreatePartitionsRequestData data = new CreatePartitionsRequestData()
.setTimeoutMs(0)
.setValidateOnly(false)
.setTopics(topics);
return new CreatePartitionsRequest(data, (short) version);
}
private CreatePartitionsRequest createCreatePartitionsRequestWithAssignments(int version) {
CreatePartitionsTopicCollection topics = new CreatePartitionsTopicCollection();
CreatePartitionsAssignment myTopicAssignment = new CreatePartitionsAssignment()
.setBrokerIds(Collections.singletonList(2));
topics.add(new CreatePartitionsTopic()
.setName("my_topic")
.setCount(3)
.setAssignments(Collections.singletonList(myTopicAssignment))
);
topics.add(new CreatePartitionsTopic()
.setName("my_other_topic")
.setCount(3)
.setAssignments(asList(
new CreatePartitionsAssignment().setBrokerIds(asList(2, 3)),
new CreatePartitionsAssignment().setBrokerIds(asList(3, 1))
))
);
CreatePartitionsRequestData data = new CreatePartitionsRequestData()
.setTimeoutMs(0)
.setValidateOnly(false)
.setTopics(topics);
return new CreatePartitionsRequest(data, (short) version);
}
private CreatePartitionsResponse createCreatePartitionsResponse() {
List<CreatePartitionsTopicResult> results = new LinkedList<>();
results.add(new CreatePartitionsTopicResult()
.setName("my_topic")
.setErrorCode(Errors.INVALID_REPLICA_ASSIGNMENT.code()));
results.add(new CreatePartitionsTopicResult()
.setName("my_topic")
.setErrorCode(Errors.NONE.code()));
CreatePartitionsResponseData data = new CreatePartitionsResponseData()
.setThrottleTimeMs(42)
.setResults(results);
return new CreatePartitionsResponse(data);
}
private CreateDelegationTokenRequest createCreateTokenRequest() {
List<CreatableRenewers> renewers = new ArrayList<>();
renewers.add(new CreatableRenewers()
.setPrincipalType("User")
.setPrincipalName("user1"));
renewers.add(new CreatableRenewers()
.setPrincipalType("User")
.setPrincipalName("user2"));
return new CreateDelegationTokenRequest.Builder(new CreateDelegationTokenRequestData()
.setRenewers(renewers)
.setMaxLifetimeMs(System.currentTimeMillis())).build();
}
private CreateDelegationTokenResponse createCreateTokenResponse() {
CreateDelegationTokenResponseData data = new CreateDelegationTokenResponseData()
.setThrottleTimeMs(20)
.setErrorCode(Errors.NONE.code())
.setPrincipalType("User")
.setPrincipalName("user1")
.setIssueTimestampMs(System.currentTimeMillis())
.setExpiryTimestampMs(System.currentTimeMillis())
.setMaxTimestampMs(System.currentTimeMillis())
.setTokenId("token1")
.setHmac("test".getBytes());
return new CreateDelegationTokenResponse(data);
}
private RenewDelegationTokenRequest createRenewTokenRequest() {
RenewDelegationTokenRequestData data = new RenewDelegationTokenRequestData()
.setHmac("test".getBytes())
.setRenewPeriodMs(System.currentTimeMillis());
return new RenewDelegationTokenRequest.Builder(data).build();
}
private RenewDelegationTokenResponse createRenewTokenResponse() {
RenewDelegationTokenResponseData data = new RenewDelegationTokenResponseData()
.setThrottleTimeMs(20)
.setErrorCode(Errors.NONE.code())
.setExpiryTimestampMs(System.currentTimeMillis());
return new RenewDelegationTokenResponse(data);
}
private ExpireDelegationTokenRequest createExpireTokenRequest() {
ExpireDelegationTokenRequestData data = new ExpireDelegationTokenRequestData()
.setHmac("test".getBytes())
.setExpiryTimePeriodMs(System.currentTimeMillis());
return new ExpireDelegationTokenRequest.Builder(data).build();
}
private ExpireDelegationTokenResponse createExpireTokenResponse() {
ExpireDelegationTokenResponseData data = new ExpireDelegationTokenResponseData()
.setThrottleTimeMs(20)
.setErrorCode(Errors.NONE.code())
.setExpiryTimestampMs(System.currentTimeMillis());
return new ExpireDelegationTokenResponse(data);
}
private DescribeDelegationTokenRequest createDescribeTokenRequest() {
List<KafkaPrincipal> owners = new ArrayList<>();
owners.add(SecurityUtils.parseKafkaPrincipal("User:user1"));
owners.add(SecurityUtils.parseKafkaPrincipal("User:user2"));
return new DescribeDelegationTokenRequest.Builder(owners).build();
}
private DescribeDelegationTokenResponse createDescribeTokenResponse() {
List<KafkaPrincipal> renewers = new ArrayList<>();
renewers.add(SecurityUtils.parseKafkaPrincipal("User:user1"));
renewers.add(SecurityUtils.parseKafkaPrincipal("User:user2"));
List<DelegationToken> tokenList = new LinkedList<>();
TokenInformation tokenInfo1 = new TokenInformation("1", SecurityUtils.parseKafkaPrincipal("User:owner"), renewers,
System.currentTimeMillis(), System.currentTimeMillis(), System.currentTimeMillis());
TokenInformation tokenInfo2 = new TokenInformation("2", SecurityUtils.parseKafkaPrincipal("User:owner1"), renewers,
System.currentTimeMillis(), System.currentTimeMillis(), System.currentTimeMillis());
tokenList.add(new DelegationToken(tokenInfo1, "test".getBytes()));
tokenList.add(new DelegationToken(tokenInfo2, "test".getBytes()));
return new DescribeDelegationTokenResponse(20, Errors.NONE, tokenList);
}
private ElectLeadersRequest createElectLeadersRequestNullPartitions() {
return new ElectLeadersRequest.Builder(ElectionType.PREFERRED, null, 100).build((short) 1);
}
private ElectLeadersRequest createElectLeadersRequest() {
List<TopicPartition> partitions = asList(new TopicPartition("data", 1), new TopicPartition("data", 2));
return new ElectLeadersRequest.Builder(ElectionType.PREFERRED, partitions, 100).build((short) 1);
}
private ElectLeadersResponse createElectLeadersResponse() {
String topic = "myTopic";
List<ReplicaElectionResult> electionResults = new ArrayList<>();
ReplicaElectionResult electionResult = new ReplicaElectionResult();
electionResults.add(electionResult);
electionResult.setTopic(topic);
// Add partition 1 result
PartitionResult partitionResult = new PartitionResult();
partitionResult.setPartitionId(0);
partitionResult.setErrorCode(ApiError.NONE.error().code());
partitionResult.setErrorMessage(ApiError.NONE.message());
electionResult.partitionResult().add(partitionResult);
// Add partition 2 result
partitionResult = new PartitionResult();
partitionResult.setPartitionId(1);
partitionResult.setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
partitionResult.setErrorMessage(Errors.UNKNOWN_TOPIC_OR_PARTITION.message());
electionResult.partitionResult().add(partitionResult);
return new ElectLeadersResponse(200, Errors.NONE.code(), electionResults, ApiKeys.ELECT_LEADERS.latestVersion());
}
private IncrementalAlterConfigsRequest createIncrementalAlterConfigsRequest() {
IncrementalAlterConfigsRequestData data = new IncrementalAlterConfigsRequestData();
AlterableConfig alterableConfig = new AlterableConfig()
.setName("retention.ms")
.setConfigOperation((byte) 0)
.setValue("100");
IncrementalAlterConfigsRequestData.AlterableConfigCollection alterableConfigs = new IncrementalAlterConfigsRequestData.AlterableConfigCollection();
alterableConfigs.add(alterableConfig);
data.resources().add(new AlterConfigsResource()
.setResourceName("testtopic")
.setResourceType(ResourceType.TOPIC.code())
.setConfigs(alterableConfigs));
return new IncrementalAlterConfigsRequest.Builder(data).build((short) 0);
}
private IncrementalAlterConfigsResponse createIncrementalAlterConfigsResponse() {
IncrementalAlterConfigsResponseData data = new IncrementalAlterConfigsResponseData();
data.responses().add(new AlterConfigsResourceResponse()
.setResourceName("testtopic")
.setResourceType(ResourceType.TOPIC.code())
.setErrorCode(Errors.NONE.code())
.setErrorMessage("Duplicate Keys"));
return new IncrementalAlterConfigsResponse(data);
}
private AlterPartitionReassignmentsRequest createAlterPartitionReassignmentsRequest() {
AlterPartitionReassignmentsRequestData data = new AlterPartitionReassignmentsRequestData();
data.topics().add(
new AlterPartitionReassignmentsRequestData.ReassignableTopic().setName("topic").setPartitions(
Collections.singletonList(
new AlterPartitionReassignmentsRequestData.ReassignablePartition().setPartitionIndex(0).setReplicas(null)
)
)
);
return new AlterPartitionReassignmentsRequest.Builder(data).build((short) 0);
}
private AlterPartitionReassignmentsResponse createAlterPartitionReassignmentsResponse() {
AlterPartitionReassignmentsResponseData data = new AlterPartitionReassignmentsResponseData();
data.responses().add(
new AlterPartitionReassignmentsResponseData.ReassignableTopicResponse()
.setName("topic")
.setPartitions(Collections.singletonList(
new AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse()
.setPartitionIndex(0)
.setErrorCode(Errors.NONE.code())
.setErrorMessage("No reassignment is in progress for topic topic partition 0")
)
)
);
return new AlterPartitionReassignmentsResponse(data);
}
private ListPartitionReassignmentsRequest createListPartitionReassignmentsRequest() {
ListPartitionReassignmentsRequestData data = new ListPartitionReassignmentsRequestData();
data.setTopics(
Collections.singletonList(
new ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics()
.setName("topic")
.setPartitionIndexes(Collections.singletonList(1))
)
);
return new ListPartitionReassignmentsRequest.Builder(data).build((short) 0);
}
private ListPartitionReassignmentsResponse createListPartitionReassignmentsResponse() {
ListPartitionReassignmentsResponseData data = new ListPartitionReassignmentsResponseData();
data.setTopics(Collections.singletonList(
new ListPartitionReassignmentsResponseData.OngoingTopicReassignment()
.setName("topic")
.setPartitions(Collections.singletonList(
new ListPartitionReassignmentsResponseData.OngoingPartitionReassignment()
.setPartitionIndex(0)
.setReplicas(Arrays.asList(1, 2))
.setAddingReplicas(Collections.singletonList(2))
.setRemovingReplicas(Collections.singletonList(1))
)
)
));
return new ListPartitionReassignmentsResponse(data);
}
private OffsetDeleteRequest createOffsetDeleteRequest() {
OffsetDeleteRequestTopicCollection topics = new OffsetDeleteRequestTopicCollection();
topics.add(new OffsetDeleteRequestTopic()
.setName("topic1")
.setPartitions(Collections.singletonList(
new OffsetDeleteRequestPartition()
.setPartitionIndex(0)
)
)
);
OffsetDeleteRequestData data = new OffsetDeleteRequestData();
data.setGroupId("group1");
data.setTopics(topics);
return new OffsetDeleteRequest.Builder(data).build((short) 0);
}
private OffsetDeleteResponse createOffsetDeleteResponse() {
OffsetDeleteResponsePartitionCollection partitions = new OffsetDeleteResponsePartitionCollection();
partitions.add(new OffsetDeleteResponsePartition()
.setPartitionIndex(0)
.setErrorCode(Errors.NONE.code())
);
OffsetDeleteResponseTopicCollection topics = new OffsetDeleteResponseTopicCollection();
topics.add(new OffsetDeleteResponseTopic()
.setName("topic1")
.setPartitions(partitions)
);
OffsetDeleteResponseData data = new OffsetDeleteResponseData();
data.setErrorCode(Errors.NONE.code());
data.setTopics(topics);
return new OffsetDeleteResponse(data);
}
private AlterReplicaLogDirsRequest createAlterReplicaLogDirsRequest() {
AlterReplicaLogDirsRequestData data = new AlterReplicaLogDirsRequestData();
data.dirs().add(
new AlterReplicaLogDirsRequestData.AlterReplicaLogDir()
.setPath("/data0")
.setTopics(new AlterReplicaLogDirTopicCollection(Collections.singletonList(
new AlterReplicaLogDirTopic()
.setPartitions(singletonList(0))
.setName("topic")
).iterator())
)
);
return new AlterReplicaLogDirsRequest.Builder(data).build((short) 0);
}
private AlterReplicaLogDirsResponse createAlterReplicaLogDirsResponse() {
AlterReplicaLogDirsResponseData data = new AlterReplicaLogDirsResponseData();
data.results().add(
new AlterReplicaLogDirsResponseData.AlterReplicaLogDirTopicResult()
.setTopicName("topic")
.setPartitions(Collections.singletonList(
new AlterReplicaLogDirsResponseData.AlterReplicaLogDirPartitionResult()
.setPartitionIndex(0)
.setErrorCode(Errors.NONE.code())
)
)
);
return new AlterReplicaLogDirsResponse(data);
}
private DescribeClientQuotasRequest createDescribeClientQuotasRequest() {
ClientQuotaFilter filter = ClientQuotaFilter.all();
return new DescribeClientQuotasRequest.Builder(filter).build((short) 0);
}
private DescribeClientQuotasResponse createDescribeClientQuotasResponse() {
DescribeClientQuotasResponseData data = new DescribeClientQuotasResponseData().setEntries(asList(
new DescribeClientQuotasResponseData.EntryData()
.setEntity(asList(new DescribeClientQuotasResponseData.EntityData()
.setEntityType(ClientQuotaEntity.USER)
.setEntityName("user")))
.setValues(asList(new DescribeClientQuotasResponseData.ValueData()
.setKey("request_percentage")
.setValue(1.0)))));
return new DescribeClientQuotasResponse(data);
}
private AlterClientQuotasRequest createAlterClientQuotasRequest() {
ClientQuotaEntity entity = new ClientQuotaEntity(Collections.singletonMap(ClientQuotaEntity.USER, "user"));
ClientQuotaAlteration.Op op = new ClientQuotaAlteration.Op("request_percentage", 2.0);
ClientQuotaAlteration alteration = new ClientQuotaAlteration(entity, Collections.singleton(op));
return new AlterClientQuotasRequest.Builder(Collections.singleton(alteration), false).build((short) 0);
}
private AlterClientQuotasResponse createAlterClientQuotasResponse() {
AlterClientQuotasResponseData data = new AlterClientQuotasResponseData()
.setEntries(asList(new AlterClientQuotasResponseData.EntryData()
.setEntity(asList(new AlterClientQuotasResponseData.EntityData()
.setEntityType(ClientQuotaEntity.USER)
.setEntityName("user")))));
return new AlterClientQuotasResponse(data);
}
/**
* Check that all error codes in the response get included in {@link AbstractResponse#errorCounts()}.
*/
@Test
public void testErrorCountsIncludesNone() {
assertEquals(Integer.valueOf(1), createAddOffsetsToTxnResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createAddPartitionsToTxnResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createAlterClientQuotasResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createAlterConfigsResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(2), createAlterPartitionReassignmentsResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createAlterReplicaLogDirsResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createApiVersionResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createControlledShutdownResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(2), createCreateAclsResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createCreatePartitionsResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createCreateTokenResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createCreateTopicResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createDeleteAclsResponse(ApiKeys.DELETE_ACLS.latestVersion()).errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createDeleteGroupsResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createDeleteTopicsResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createDescribeAclsResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createDescribeClientQuotasResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(2), createDescribeConfigsResponse(DESCRIBE_CONFIGS.latestVersion()).errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createDescribeGroupResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createDescribeTokenResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(2), createElectLeadersResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createEndTxnResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createExpireTokenResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(3), createFetchResponse(123).errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createFindCoordinatorResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createHeartBeatResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createIncrementalAlterConfigsResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createJoinGroupResponse(JOIN_GROUP.latestVersion()).errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(2), createLeaderAndIsrResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(3), createLeaderEpochResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createLeaveGroupResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createListGroupsResponse(LIST_GROUPS.latestVersion()).errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createListOffsetResponse(LIST_OFFSETS.latestVersion()).errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createListPartitionReassignmentsResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(3), createMetadataResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createOffsetCommitResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(2), createOffsetDeleteResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(3), createOffsetFetchResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createProduceResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createRenewTokenResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createSaslAuthenticateResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createSaslHandshakeResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(2), createStopReplicaResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createSyncGroupResponse(SYNC_GROUP.latestVersion()).errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createTxnOffsetCommitResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createUpdateMetadataResponse().errorCounts().get(Errors.NONE));
assertEquals(Integer.valueOf(1), createWriteTxnMarkersResponse().errorCounts().get(Errors.NONE));
}
}
|
package com.track.trackxtreme;
import android.location.Location;
import com.track.trackxtreme.data.TrackStatus;
import com.track.trackxtreme.data.track.Track;
import com.track.trackxtreme.data.track.TrackPoint;
import com.track.trackxtreme.data.track.TrackRecord;
import java.util.ArrayList;
import java.util.Calendar;
/**
* Created by marko on 30/04/2017.
*/
public abstract class AbstractTrackListener implements TrackUpdater {
protected MainActivity mainActivity;
private Location previousLocation;
private double distance = 0;
protected ArrayList<TrackPoint> trackpoints;
protected TrackStatus trackingStatus = TrackStatus.NONE;
protected TrackRecord trackRecord;
public AbstractTrackListener(MainActivity mainActivity,Track track) {
this.mainActivity = mainActivity;
trackpoints = new ArrayList<TrackPoint>();
trackRecord=new TrackRecord(track);
}
public void setPreviousLocation(Location previousLocation) {
this.previousLocation = previousLocation;
}
public Location getPreviousLocation() {
return previousLocation;
}
public void updateDashboard(Location location) {
if (previousLocation != null) {
distance += ((int) (previousLocation.distanceTo(location) * 100)) / 100;
}
mainActivity.setDashboardDistance(distance);
setPreviousLocation(location);
}
public double getDistance() {
return distance;
}
@Override
public long timeElapsed() {
long time=0;
long timeInMillis =Calendar.getInstance().getTimeInMillis();
if(trackpoints.size()>0){
TrackPoint trackPoint = trackpoints.get(0);
time= timeInMillis-trackPoint.getLocation().getTime();
}
return time;
}
@Override
public TrackStatus getTrackStatus() {
return trackingStatus;
}
@Override
public void setTrackStatus(TrackStatus status) {
trackingStatus=status;
if(trackingStatus.ordinal()>TrackStatus.WAITING.ordinal()){
distance=0;
mainActivity.startTimer();
}else{
mainActivity.stopTimer();
}
}
@Override
public TrackRecord getTrackRecord() {
return trackRecord;
}
@Override
public Track getTrack() {
return trackRecord.getTrack();
}
@Override
public ArrayList<TrackPoint> getTrackpoints() {
return trackpoints;
}
public void setTrack(Track track) {
trackpoints = new ArrayList<TrackPoint>();
trackRecord=new TrackRecord(track);
setTrackStatus(TrackStatus.NONE);
}
}
|
package io.ceph.rgw.client.model;
import io.ceph.rgw.client.ObjectClient;
import io.ceph.rgw.client.action.ActionFuture;
import io.ceph.rgw.client.action.ActionListener;
import java.nio.charset.Charset;
/**
* Created by zhuangshuo on 2020/3/1.
*/
public class PutStringRequest extends GenericPutObjectRequest<String> {
private final Charset charset;
public PutStringRequest(String bucketName, String key, String value, Metadata metadata, Tagging tagging, ACL acl, CannedACL cannedACL, Charset charset) {
super(bucketName, key, value, metadata, tagging, acl, cannedACL);
this.charset = charset;
}
public Charset getCharset() {
return charset;
}
@Override
public String toString() {
return "PutStringRequest{} " + super.toString();
}
public static final class Builder extends GenericPutObjectRequest.Builder<Builder, String> {
private Charset charset;
public Builder(ObjectClient client) {
super(client);
}
public Builder withCharset(Charset charset) {
this.charset = charset;
return self();
}
@Override
public PutStringRequest build() {
return new PutStringRequest(bucketName, key, value, metadata, tagging, acl, cannedACL, charset);
}
@Override
public PutObjectResponse run() {
return client.putString(build());
}
@Override
public ActionFuture<PutObjectResponse> execute() {
return client.putStringAsync(build());
}
@Override
public void execute(ActionListener<PutObjectResponse> listener) {
client.putStringAsync(build(), listener);
}
}
}
|
package mage.cards.e;
import java.util.UUID;
import mage.MageInt;
import mage.abilities.Ability;
import mage.abilities.common.SimpleActivatedAbility;
import mage.abilities.costs.mana.GenericManaCost;
import mage.abilities.effects.OneShotEffect;
import mage.cards.Card;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.constants.Outcome;
import mage.constants.SubType;
import mage.constants.Zone;
import mage.filter.StaticFilters;
import mage.game.Game;
import mage.game.permanent.Permanent;
import mage.target.common.TargetCardInGraveyard;
/**
*
* @author fireshoes
*/
public final class EaterOfTheDead extends CardImpl {
public EaterOfTheDead(UUID ownerId, CardSetInfo setInfo) {
super(ownerId, setInfo, new CardType[]{CardType.CREATURE}, "{4}{B}");
this.subtype.add(SubType.HORROR);
this.power = new MageInt(3);
this.toughness = new MageInt(4);
// {0}: If Eater of the Dead is tapped, exile target creature card from a graveyard and untap Eater of the Dead.
Ability ability = new SimpleActivatedAbility(Zone.BATTLEFIELD, new EaterOfTheDeadEffect(), new GenericManaCost(0));
ability.addTarget(new TargetCardInGraveyard(StaticFilters.FILTER_CARD_CREATURE));
this.addAbility(ability);
}
public EaterOfTheDead(final EaterOfTheDead card) {
super(card);
}
@Override
public EaterOfTheDead copy() {
return new EaterOfTheDead(this);
}
}
class EaterOfTheDeadEffect extends OneShotEffect {
EaterOfTheDeadEffect() {
super(Outcome.DestroyPermanent);
staticText = "If {this} is tapped, exile target creature card from a graveyard and untap {this}";
}
EaterOfTheDeadEffect(final EaterOfTheDeadEffect effect) {
super(effect);
}
@Override
public boolean apply(Game game, Ability source) {
Permanent sourcePermanent = game.getPermanent(source.getSourceId());
Card card = game.getCard(source.getFirstTarget());
if (sourcePermanent != null && sourcePermanent.isTapped() && card != null) {
card.moveToExile(null, "Eater of the Dead", source.getSourceId(), game);
sourcePermanent.untap(game);
}
return false;
}
@Override
public EaterOfTheDeadEffect copy() {
return new EaterOfTheDeadEffect(this);
}
}
|
package org.bian.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonCreator;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import org.bian.dto.BQCorporateActionsExecuteInputModelCorporateActionsInstanceRecord;
import org.bian.dto.BQDepositsandWithdrawalsExecuteInputModelExecuteRecordType;
import javax.validation.Valid;
/**
* BQCorporateActionsExecuteInputModel
*/
public class BQCorporateActionsExecuteInputModel {
private String investmentAccountFulfillmentArrangementInstanceReference = null;
private String corporateActionsInstanceReference = null;
private BQCorporateActionsExecuteInputModelCorporateActionsInstanceRecord corporateActionsInstanceRecord = null;
private Object corporateActionsExecuteActionTaskRecord = null;
private BQDepositsandWithdrawalsExecuteInputModelExecuteRecordType executeRecordType = null;
/**
* `status: Not Mapped` core-data-type-reference: BIAN::DataTypesLibrary::CoreDataTypes::ISO20022andUNCEFACT::Identifier general-info: Reference to the parent Investment Account Fulfillment Arrangement instance
* @return investmentAccountFulfillmentArrangementInstanceReference
**/
public String getInvestmentAccountFulfillmentArrangementInstanceReference() {
return investmentAccountFulfillmentArrangementInstanceReference;
}
public void setInvestmentAccountFulfillmentArrangementInstanceReference(String investmentAccountFulfillmentArrangementInstanceReference) {
this.investmentAccountFulfillmentArrangementInstanceReference = investmentAccountFulfillmentArrangementInstanceReference;
}
/**
* `status: Not Mapped` core-data-type-reference: BIAN::DataTypesLibrary::CoreDataTypes::ISO20022andUNCEFACT::Identifier general-info: Reference to the Corporate Actions instance
* @return corporateActionsInstanceReference
**/
public String getCorporateActionsInstanceReference() {
return corporateActionsInstanceReference;
}
public void setCorporateActionsInstanceReference(String corporateActionsInstanceReference) {
this.corporateActionsInstanceReference = corporateActionsInstanceReference;
}
/**
* Get corporateActionsInstanceRecord
* @return corporateActionsInstanceRecord
**/
public BQCorporateActionsExecuteInputModelCorporateActionsInstanceRecord getCorporateActionsInstanceRecord() {
return corporateActionsInstanceRecord;
}
public void setCorporateActionsInstanceRecord(BQCorporateActionsExecuteInputModelCorporateActionsInstanceRecord corporateActionsInstanceRecord) {
this.corporateActionsInstanceRecord = corporateActionsInstanceRecord;
}
/**
* `status: Not Mapped` core-data-type-reference: BIAN::DataTypesLibrary::CoreDataTypes::UNCEFACT::Binary general-info: The execute service call consolidated processing record
* @return corporateActionsExecuteActionTaskRecord
**/
public Object getCorporateActionsExecuteActionTaskRecord() {
return corporateActionsExecuteActionTaskRecord;
}
public void setCorporateActionsExecuteActionTaskRecord(Object corporateActionsExecuteActionTaskRecord) {
this.corporateActionsExecuteActionTaskRecord = corporateActionsExecuteActionTaskRecord;
}
/**
* Get executeRecordType
* @return executeRecordType
**/
public BQDepositsandWithdrawalsExecuteInputModelExecuteRecordType getExecuteRecordType() {
return executeRecordType;
}
public void setExecuteRecordType(BQDepositsandWithdrawalsExecuteInputModelExecuteRecordType executeRecordType) {
this.executeRecordType = executeRecordType;
}
}
|
/* */ package com.a.a.g;
/* */
/* */ import java.io.UnsupportedEncodingException;
/* */
/* */
/* */ public class a
/* */ {
/* */ public static final String a = "iso-2022-jp";
/* */ public static final String b = "ASCII";
/* */ public static final String c = "Windows-31J";
/* */ public static final String d = "euc-jp";
/* */ public static final String e = "utf-8";
/* */ public static final String f = "utf-16";
/* */ public static final String g = "utf-32";
/* 15 */ static final char[] h = new char[] {
/* 16 */ '\001', ''
/* */ };
/* */
/* 19 */ static final char[] i = new char[] { '\004', '\033', '$', '(', 'D', '\003', '\033', '$', '@', '\003', '\033', '$', 'B', '\006', '\033', '&', '@', '\033', '$', 'B', '\003', '\033', '(', 'B', '\003', '\033', '(', 'I', '\003', '\033', '(', 'J' };
/* */
/* */
/* */
/* */
/* */
/* */
/* */
/* */
/* */
/* 29 */ static final char[] j = new char[] {
/* 30 */ '\001', '',
/* 31 */ '\001', '¡', 'ß',
/* 32 */ '\002', '', '', '@', '~',
/* 33 */ '\002', '', '', '', 'ü',
/* 34 */ '\002', 'à', 'ü', '@', '~',
/* 35 */ '\002', 'à', 'ü', '', 'ü' };
/* */
/* */
/* 38 */ static final char[] k = new char[] {
/* 39 */ '\001', '',
/* 40 */ '\002', '¡', 'þ', '¡', 'þ',
/* 41 */ '\002', '', '', '¡', 'ß',
/* 42 */ '\003', '', '', '¡', 'þ', '¡', 'þ' };
/* */
/* */
/* 45 */ static final char[] l = new char[] {
/* 46 */ '\001', '',
/* 47 */ '\002', 'À', 'ß', '', '¿',
/* 48 */ '\003', 'à', 'ï', '', '¿', '', '¿',
/* 49 */ '\004', 'ð', '÷', '', '¿', '', '¿', '', '¿' };
/* */
/* */
/* 52 */ static final char[] m = new char[] { '\003', 'ï', '»', '¿' };
/* */
/* */
/* */
/* 56 */ static final char[] n = new char[] { '\002', 'ÿ', 'þ', '\002', 'þ', 'ÿ' };
/* */
/* */
/* */
/* */
/* 61 */ static final char[] o = new char[] {
/* 62 */ '\004', 'þ', 'ÿ',
/* 63 */ '\004', 'ÿ', 'þ'
/* */ };
/* */
/* */ static class a
/* */ {
/* */ String a;
/* */ char[] b;
/* */ char[] c;
/* */ char[] d;
/* */ boolean e;
/* */ boolean f = false;
/* 74 */ int g = 0;
/* */
/* */
/* */
/* */
/* */
/* */
/* */
/* */ a(String charsets, char[] range, char[] literals, char[] specials) {
/* 83 */ this.a = charsets;
/* 84 */ this.b = range;
/* 85 */ this.c = literals;
/* 86 */ this.d = specials;
/* 87 */ this.e = (range != null);
/* */ }
/* */
/* */
/* */
/* */
/* */ public String toString() {
/* 94 */ return String.valueOf(this.a) + "=" + this.e;
/* */ }
/* */ }
/* */
/* */ public static String a(byte[] b) throws UnsupportedEncodingException {
/* 99 */ return new String(b, b(b));
/* */ }
/* */
/* */ public static String a(byte[] b, int off, int len) throws UnsupportedEncodingException {
/* 103 */ return new String(b, off, len, b(b, off, len));
/* */ }
/* */
/* */ public static String b(byte[] b) {
/* 107 */ return b(b, 0, b.length);
/* */ }
/* */
/* */ public static String b(byte[] b, int off, int len) {
/* 111 */ a[] work = {
/* 112 */ new a("utf-8", l, null, m),
/* 113 */ new a("euc-jp", k, null, null),
/* 114 */ new a("Windows-31J", j, null, null),
/* 115 */ new a("iso-2022-jp", h, i, null),
/* 116 */ new a("ASCII", h, null, null),
/* 117 */ new a("utf-32", null, null, o),
/* 118 */ new a("utf-16", null, null, n)
/* */ };
/* */
/* 121 */ int save_off = off;
/* 122 */ int save_len = len;
/* 123 */ while (len > 0) {
/* 124 */ int cnt = 0; byte b2; int j; a[] arrayOfA;
/* 125 */ for (j = (arrayOfA = work).length, b2 = 0; b2 < j; ) { a f = arrayOfA[b2];
/* 126 */ if (f.e) {
/* 127 */ if (f.g == 0 && (f.g = a(b, off, len, f.c)) < 0) {
/* 128 */ f.e = false;
/* 129 */ } else if (f.g == 0 && (f.g = b(b, off, len, f.b)) < 0) {
/* 130 */ f.e = false;
/* */ }
/* */
/* */
/* 134 */ if (f.g > 0) f.g--;
/* */
/* */
/* 137 */ if (f.e) cnt++;
/* */
/* */ }
/* */ b2++; }
/* */
/* 142 */ off++;
/* 143 */ len--;
/* */ } byte b1;
/* */ int i;
/* */ a[] arrayOfA1;
/* 147 */ for (i = (arrayOfA1 = work).length, b1 = 0; b1 < i; ) { a f = arrayOfA1[b1];
/* 148 */ if (f.d != null && (f.e || f.b == null) &&
/* 149 */ c(b, save_off, save_len, f.d)) {
/* 150 */ return f.a;
/* */ }
/* */
/* */ b1++; }
/* */
/* 155 */ for (i = (arrayOfA1 = work).length, b1 = 0; b1 < i; ) { a f = arrayOfA1[b1];
/* 156 */ if (f.e)
/* 157 */ return f.a;
/* */ b1++; }
/* */
/* 160 */ return "ISO-8859-1";
/* */ }
/* */
/* */ private static int a(byte[] b, int off, int len, char[] literals) {
/* 164 */ if (literals == null) return 0;
/* 165 */ int pos = 0;
/* 166 */ boolean flg = false;
/* 167 */ while (pos < literals.length) {
/* 168 */ int l = literals[pos++];
/* 169 */ if (len >= l) {
/* 170 */ boolean same = true;
/* 171 */ for (int i = 0, p = off; i < l; i++, p++) {
/* 172 */ char c = (char)(b[p] & 0xFF);
/* 173 */ if (c != literals[pos + i]) {
/* 174 */ if (i > 0) flg = true;
/* 175 */ same = false;
/* */ break;
/* */ }
/* */ }
/* 179 */ if (same) {
/* 180 */ return l;
/* */ }
/* */ }
/* 183 */ pos += l;
/* */ }
/* 185 */ if (flg) {
/* 186 */ return -1;
/* */ }
/* 188 */ return 0;
/* */ }
/* */
/* */ private static int b(byte[] b, int off, int len, char[] ranges) {
/* 192 */ if (ranges == null) return 0;
/* 193 */ int pos = 0;
/* 194 */ while (pos < ranges.length) {
/* 195 */ int l = ranges[pos++];
/* 196 */ if (len >= l) {
/* 197 */ boolean ok = true;
/* 198 */ for (int i = 0, p = off, n = pos; i < l; i++) {
/* 199 */ char s = ranges[n++];
/* 200 */ char e = ranges[n++];
/* 201 */ char c = (char)(b[p++] & 0xFF);
/* 202 */ if (c < s || c > e) {
/* 203 */ ok = false;
/* */ break;
/* */ }
/* */ }
/* 207 */ if (ok) {
/* 208 */ return l;
/* */ }
/* */ }
/* 211 */ pos += l * 2;
/* */ }
/* 213 */ return -1;
/* */ }
/* */
/* */ private static boolean c(byte[] b, int off, int len, char[] specials) {
/* 217 */ if (specials == null) return false;
/* 218 */ int pos = 0;
/* */
/* 220 */ while (pos < specials.length) {
/* 221 */ int l = specials[pos++];
/* 222 */ if (len >= l) {
/* 223 */ boolean ok = true;
/* 224 */ for (int i = 0; i < l; i++) {
/* 225 */ char c = (char)(b[off + i] & 0xFF);
/* 226 */ if (c != specials[pos + i]) {
/* 227 */ ok = false;
/* */ break;
/* */ }
/* */ }
/* 231 */ if (ok) {
/* 232 */ return true;
/* */ }
/* */ }
/* 235 */ pos += l;
/* */ }
/* */
/* 238 */ return false;
/* */ }
/* */ }
/* Location: /mnt/r/ConTenDoViewer.jar!/com/a/a/g/a.class
* Java compiler version: 6 (50.0)
* JD-Core Version: 1.1.3
*/
|
package es.urjc.code.daw;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestParam;
@Controller
public class AnuncioController {
private List<Anuncio>
@PostMapping("/guardaranuncio")
public String guardarAnuncio(
Model model,
@RequestParam String nombre,
@RequestParam String asunto,
@RequestParam String comentario) {
model.addAttribute("nombre", nombre);
model.addAttribute("asunto", asunto);
model.addAttribute("comentario", comentario);
return "anuncio";
}
}
|
package tconstruct.library.tools;
import ic2.api.item.IBoxable;
import ic2.api.item.ICustomElectricItem;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import mods.battlegear2.api.weapons.IBattlegearWeapon;
import mods.battlegear2.api.weapons.OffhandAttackEvent;
import net.minecraft.block.Block;
import net.minecraft.client.renderer.texture.IconRegister;
import net.minecraft.creativetab.CreativeTabs;
import net.minecraft.entity.Entity;
import net.minecraft.entity.EntityLivingBase;
import net.minecraft.entity.player.EntityPlayer;
import net.minecraft.item.Item;
import net.minecraft.item.ItemBlock;
import net.minecraft.item.ItemPotion;
import net.minecraft.item.ItemStack;
import net.minecraft.nbt.NBTTagCompound;
import net.minecraft.util.Icon;
import net.minecraft.world.World;
import net.minecraftforge.event.entity.player.PlayerInteractEvent;
import tconstruct.library.ActiveToolMod;
import tconstruct.library.TConstructRegistry;
import tconstruct.library.crafting.ToolBuilder;
import tconstruct.library.util.MathUtils;
import cofh.api.energy.IEnergyContainerItem;
import cpw.mods.fml.relauncher.Side;
import cpw.mods.fml.relauncher.SideOnly;
/** NBTTags
* Main tag - InfiTool
* @see ToolBuilder
*
* Required:
* Head: Base and render tag, above the handle
* Handle: Base and render tag, bottom layer
*
* Damage: Replacement for metadata
* MaxDamage: ItemStacks only read setMaxDamage()
* Broken: Represents whether the tool is broken (boolean)
* Attack: How much damage a mob will take
* MiningSpeed: The speed at which a tool mines
*
* Others:
* Accessory: Base and tag, above head. Sword guards, binding, etc
* Effects: Render tag, top layer. Fancy effects like moss or diamond edge.
* Render order: Handle > Head > Accessory > Effect1 > Effect2 > Effect3 > etc
* Unbreaking: Reinforced in-game, 10% chance to not use durability per level
* Stonebound: Mines faster as the tool takes damage, but has less attack
* Spiny: Opposite of stonebound
*
* Modifiers have their own tags.
* @see ToolMod
*/
public abstract class ToolCore extends Item implements IEnergyContainerItem, ICustomElectricItem, IBoxable, IBattlegearWeapon
{
//TE power constants -- TODO grab these from the
protected int capacity = 400000;
protected int maxReceive = 75;
protected int maxExtract = 75;
protected Random random = new Random();
protected int damageVsEntity;
public static Icon blankSprite;
public static Icon emptyIcon;
public ToolCore(int id, int baseDamage)
{
super(id);
this.maxStackSize = 1;
this.setMaxDamage(100);
this.setUnlocalizedName("InfiTool");
this.setCreativeTab(TConstructRegistry.toolTab);
damageVsEntity = baseDamage;
TConstructRegistry.addToolMapping(this);
setNoRepair();
canRepair = false;
}
/** Determines crafting behavior with regards to durability
* 0: None
* 1: Adds handle modifier
* 2: Averages part with the rest of the tool (head)
* @return type
*/
public int durabilityTypeHandle ()
{
return 1;
}
public int durabilityTypeAccessory ()
{
return 0;
}
public int durabilityTypeExtra ()
{
return 0;
}
public int getModifierAmount ()
{
return 3;
}
public String getToolName ()
{
return this.getClass().getSimpleName();
}
/* Rendering */
public HashMap<Integer, Icon> headIcons = new HashMap<Integer, Icon>();
public HashMap<Integer, Icon> brokenIcons = new HashMap<Integer, Icon>();
public HashMap<Integer, Icon> handleIcons = new HashMap<Integer, Icon>();
public HashMap<Integer, Icon> accessoryIcons = new HashMap<Integer, Icon>();
public HashMap<Integer, Icon> effectIcons = new HashMap<Integer, Icon>();
public HashMap<Integer, Icon> extraIcons = new HashMap<Integer, Icon>();
//Not liking this
public HashMap<Integer, String> headStrings = new HashMap<Integer, String>();
public HashMap<Integer, String> brokenPartStrings = new HashMap<Integer, String>();
public HashMap<Integer, String> handleStrings = new HashMap<Integer, String>();
public HashMap<Integer, String> accessoryStrings = new HashMap<Integer, String>();
public HashMap<Integer, String> effectStrings = new HashMap<Integer, String>();
public HashMap<Integer, String> extraStrings = new HashMap<Integer, String>();
@SideOnly(Side.CLIENT)
@Override
public boolean requiresMultipleRenderPasses ()
{
return true;
}
@SideOnly(Side.CLIENT)
@Override
public int getRenderPasses (int metadata)
{
return 9;
}
@SideOnly(Side.CLIENT)
public boolean hasEffect (ItemStack par1ItemStack)
{
return false;
}
//Override me please!
public int getPartAmount ()
{
return 3;
}
public abstract String getIconSuffix (int partType);
public abstract String getEffectSuffix ();
public abstract String getDefaultFolder ();
public void registerPartPaths (int index, String[] location)
{
headStrings.put(index, location[0]);
brokenPartStrings.put(index, location[1]);
handleStrings.put(index, location[2]);
if (location.length > 3)
accessoryStrings.put(index, location[3]);
if (location.length > 4)
extraStrings.put(index, location[4]);
}
public void registerAlternatePartPaths (int index, String[] location)
{
}
public void registerEffectPath (int index, String location)
{
effectStrings.put(index, location);
}
@Override
public void registerIcons (IconRegister iconRegister)
{
headIcons.clear();
brokenIcons.clear();
handleIcons.clear();
accessoryIcons.clear();
extraIcons.clear();
effectIcons.clear();
Iterator iter = headStrings.entrySet().iterator();
while (iter.hasNext())
{
Map.Entry pairs = (Map.Entry) iter.next();
headIcons.put((Integer) pairs.getKey(), iconRegister.registerIcon((String) pairs.getValue()));
}
iter = brokenPartStrings.entrySet().iterator();
while (iter.hasNext())
{
Map.Entry pairs = (Map.Entry) iter.next();
brokenIcons.put((Integer) pairs.getKey(), iconRegister.registerIcon((String) pairs.getValue()));
}
iter = handleStrings.entrySet().iterator();
while (iter.hasNext())
{
Map.Entry pairs = (Map.Entry) iter.next();
handleIcons.put((Integer) pairs.getKey(), iconRegister.registerIcon((String) pairs.getValue()));
}
if (getPartAmount() > 2)
{
iter = accessoryStrings.entrySet().iterator();
while (iter.hasNext())
{
Map.Entry pairs = (Map.Entry) iter.next();
accessoryIcons.put((Integer) pairs.getKey(), iconRegister.registerIcon((String) pairs.getValue()));
}
}
if (getPartAmount() > 3)
{
iter = extraStrings.entrySet().iterator();
while (iter.hasNext())
{
Map.Entry pairs = (Map.Entry) iter.next();
extraIcons.put((Integer) pairs.getKey(), iconRegister.registerIcon((String) pairs.getValue()));
}
}
iter = effectStrings.entrySet().iterator();
while (iter.hasNext())
{
Map.Entry pairs = (Map.Entry) iter.next();
effectIcons.put((Integer) pairs.getKey(), iconRegister.registerIcon((String) pairs.getValue()));
}
emptyIcon = iconRegister.registerIcon("tinker:blankface");
}
@Override
@SideOnly(Side.CLIENT)
public Icon getIconFromDamage (int meta)
{
return blankSprite;
}
@Override
@SideOnly(Side.CLIENT)
public Icon getIcon (ItemStack stack, int renderPass)
{
NBTTagCompound tags = stack.getTagCompound();
if (tags != null)
{
tags = stack.getTagCompound().getCompoundTag("InfiTool");
if (renderPass < getPartAmount())
{
if (renderPass == 0) // Handle
{
return handleIcons.get(tags.getInteger("RenderHandle"));
}
else if (renderPass == 1) // Head
{
if (tags.getBoolean("Broken"))
return (brokenIcons.get(tags.getInteger("RenderHead")));
else
return (headIcons.get(tags.getInteger("RenderHead")));
}
else if (renderPass == 2) // Accessory
{
return (accessoryIcons.get(tags.getInteger("RenderAccessory")));
}
else if (renderPass == 3) // Extra
{
return (extraIcons.get(tags.getInteger("RenderExtra")));
}
}
else
{
if (renderPass == getPartAmount())
{
if (tags.hasKey("Effect1"))
return (effectIcons.get(tags.getInteger("Effect1")));
}
else if (renderPass == getPartAmount() + 1)
{
if (tags.hasKey("Effect2"))
return (effectIcons.get(tags.getInteger("Effect2")));
}
else if (renderPass == getPartAmount() + 2)
{
if (tags.hasKey("Effect3"))
return (effectIcons.get(tags.getInteger("Effect3")));
}
else if (renderPass == getPartAmount() + 3)
{
if (tags.hasKey("Effect4"))
return (effectIcons.get(tags.getInteger("Effect4")));
}
else if (renderPass == getPartAmount() + 4)
{
if (tags.hasKey("Effect5"))
return (effectIcons.get(tags.getInteger("Effect5")));
}
else if (renderPass == getPartAmount() + 5)
{
if (tags.hasKey("Effect6"))
return (effectIcons.get(tags.getInteger("Effect6")));
}
}
return blankSprite;
}
return emptyIcon;
}
/* Tags and information about the tool */
@Override
@SideOnly(Side.CLIENT)
public void addInformation (ItemStack stack, EntityPlayer player, List list, boolean par4)
{
if (!stack.hasTagCompound())
return;
NBTTagCompound tags = stack.getTagCompound();
if (tags.hasKey("charge"))
{
String color = "";
//double joules = this.getJoules(stack);
int power = tags.getInteger("charge");
if (power != 0)
{
if (power <= this.getMaxCharge(stack) / 3)
color = "\u00a74";
else if (power > this.getMaxCharge(stack) * 2 / 3)
color = "\u00a72";
else
color = "\u00a76";
}
String charge = new StringBuilder().append(color).append(tags.getInteger("charge")).append("/").append(getMaxCharge(stack)).append(" EU").toString();
list.add(charge);
}
if (tags.hasKey("Energy"))
{
String color = "";
int RF = tags.getInteger("Energy");
if (RF != 0)
{
if (RF <= this.getMaxEnergyStored(stack) / 3)
color = "\u00a74";
else if (RF > this.getMaxEnergyStored(stack) * 2 / 3)
color = "\u00a72";
else
color = "\u00a76";
}
String energy = new StringBuilder().append(color).append(tags.getInteger("Energy")).append("/").append(getMaxEnergyStored(stack)).append(" RF").toString();
list.add(energy);
}
if (tags.hasKey("InfiTool"))
{
boolean broken = tags.getCompoundTag("InfiTool").getBoolean("Broken");
if (broken)
list.add("\u00A7oBroken");
else
{
int head = tags.getCompoundTag("InfiTool").getInteger("Head");
int handle = tags.getCompoundTag("InfiTool").getInteger("Handle");
int binding = tags.getCompoundTag("InfiTool").getInteger("Accessory");
int extra = tags.getCompoundTag("InfiTool").getInteger("Extra");
String headName = getAbilityNameForType(head);
if (!headName.equals(""))
list.add(getStyleForType(head) + headName);
String handleName = getAbilityNameForType(handle);
if (!handleName.equals("") && handle != head)
list.add(getStyleForType(handle) + handleName);
if (getPartAmount() >= 3)
{
String bindingName = getAbilityNameForType(binding);
if (!bindingName.equals("") && binding != head && binding != handle)
list.add(getStyleForType(binding) + bindingName);
}
if (getPartAmount() >= 4)
{
String extraName = getAbilityNameForType(extra);
if (!extraName.equals("") && extra != head && extra != handle && extra != binding)
list.add(getStyleForType(extra) + extraName);
}
int unbreaking = tags.getCompoundTag("InfiTool").getInteger("Unbreaking");
String reinforced = getReinforcedName(head, handle, binding, extra, unbreaking);
if (!reinforced.equals(""))
list.add(reinforced);
boolean displayToolTips = true;
int tipNum = 0;
while (displayToolTips)
{
tipNum++;
String tooltip = "Tooltip" + tipNum;
if (tags.getCompoundTag("InfiTool").hasKey(tooltip))
{
String tipName = tags.getCompoundTag("InfiTool").getString(tooltip);
if (!tipName.equals(""))
list.add(tipName);
}
else
displayToolTips = false;
}
}
}
}
public static String getStyleForType (int type)
{
return TConstructRegistry.getMaterial(type).style();
}
public String getAbilityNameForType (int type)
{
return TConstructRegistry.getMaterial(type).ability();
}
public String getReinforcedName (int head, int handle, int accessory, int extra, int unbreaking)
{
ToolMaterial headMat = TConstructRegistry.getMaterial(head);
ToolMaterial handleMat = TConstructRegistry.getMaterial(handle);
ToolMaterial accessoryMat = TConstructRegistry.getMaterial(accessory);
ToolMaterial extraMat = TConstructRegistry.getMaterial(extra);
int reinforced = 0;
String style = "";
int current = headMat.reinforced();
if (current > 0)
{
style = headMat.style();
reinforced = current;
}
current = handleMat.reinforced();
if (current > 0 && current > reinforced)
{
style = handleMat.style();
reinforced = current;
}
if (getPartAmount() >= 3)
{
current = accessoryMat.reinforced();
if (current > 0 && current > reinforced)
{
style = accessoryMat.style();
reinforced = current;
}
}
if (getPartAmount() >= 4)
{
current = extraMat.reinforced();
if (current > 0 && current > reinforced)
{
style = extraMat.style();
reinforced = current;
}
}
reinforced += unbreaking - reinforced;
if (reinforced > 0)
{
return style + getReinforcedString(reinforced);
}
return "";
}
String getReinforcedString (int reinforced)
{
if (reinforced > 9)
return "Unbreakable";
String ret = "Reinforced ";
switch (reinforced)
{
case 1:
ret += "I";
break;
case 2:
ret += "II";
break;
case 3:
ret += "III";
break;
case 4:
ret += "IV";
break;
case 5:
ret += "V";
break;
case 6:
ret += "VI";
break;
case 7:
ret += "VII";
break;
case 8:
ret += "VIII";
break;
case 9:
ret += "IX";
break;
default:
ret += "X";
break;
}
return ret;
}
//Used for sounds and the like
public void onEntityDamaged (World world, EntityLivingBase player, Entity entity)
{
}
/* Creative mode tools */
public void getSubItems (int id, CreativeTabs tab, List list)
{
Iterator iter = TConstructRegistry.toolMaterials.entrySet().iterator();
while (iter.hasNext())
{
Map.Entry pairs = (Map.Entry) iter.next();
ToolMaterial material = (ToolMaterial) pairs.getValue();
buildTool((Integer) pairs.getKey(), material.displayName, list);
}
}
public void buildTool (int id, String name, List list)
{
Item accessory = getAccessoryItem();
ItemStack accessoryStack = accessory != null ? new ItemStack(getAccessoryItem(), 1, id) : null;
Item extra = getExtraItem();
ItemStack extraStack = extra != null ? new ItemStack(extra, 1, id) : null;
ItemStack tool = ToolBuilder.instance.buildTool(new ItemStack(getHeadItem(), 1, id), new ItemStack(getHandleItem(), 1, id), accessoryStack, extraStack, name + getToolName());
if (tool == null)
{
Class clazz;
Field fld;
boolean supress = false;
try
{
clazz = Class.forName(tconstruct.common.TContent.class.getName());
fld = clazz.getField("supressMissingToolLogs");
supress = fld.getBoolean(fld);
}
catch (Exception e)
{
TConstructRegistry.logger.severe("TConstruct Library could not find parts of TContent");
e.printStackTrace();
}
if (!supress)
TConstructRegistry.logger.severe("Creative builder failed tool for " + name + this.getToolName());
TConstructRegistry.logger.severe("Make sure you do not have item ID conflicts");
}
else
{
tool.getTagCompound().getCompoundTag("InfiTool").setBoolean("Built", true);
list.add(tool);
}
}
public abstract Item getHeadItem ();
public abstract Item getAccessoryItem ();
public Item getExtraItem ()
{
return null;
}
public Item getHandleItem ()
{
return TConstructRegistry.getItem("toolRod");//TContent.toolRod;
}
/* Updating */
@Override
public void onUpdate (ItemStack stack, World world, Entity entity, int par4, boolean par5)
{
for (ActiveToolMod mod : TConstructRegistry.activeModifiers)
{
mod.updateTool(this, stack, world, entity);
}
}
/* Tool uses */
//Types
public abstract String[] toolCategories ();
//Mining
@Override
public boolean onBlockStartBreak (ItemStack stack, int x, int y, int z, EntityPlayer player)
{
boolean cancelHarvest = false;
for (ActiveToolMod mod : TConstructRegistry.activeModifiers)
{
if (mod.beforeBlockBreak(this, stack, x, y, z, player))
cancelHarvest = true;
}
return cancelHarvest;
}
@Override
public boolean onBlockDestroyed (ItemStack itemstack, World world, int blockID, int x, int y, int z, EntityLivingBase player)
{
Block block = Block.blocksList[blockID];
if (block != null && (double) block.getBlockHardness(world, x, y, z) != 0.0D)
{
return AbilityHelper.onBlockChanged(itemstack, world, blockID, x, y, z, player, random);
}
return true;
}
@Override
public float getStrVsBlock (ItemStack stack, Block block, int meta)
{
NBTTagCompound tags = stack.getTagCompound();
if (tags.getCompoundTag("InfiTool").getBoolean("Broken"))
return 0.1f;
return 1f;
}
// Attacking
@Override
public boolean onLeftClickEntity (ItemStack stack, EntityPlayer player, Entity entity)
{
AbilityHelper.onLeftClickEntity(stack, player, entity, this, 0);
return false;
}
@Override
public boolean hitEntity (ItemStack stack, EntityLivingBase mob, EntityLivingBase player)
{
return true;
}
public boolean pierceArmor ()
{
return false;
}
public float chargeAttack ()
{
return 1f;
}
public int getDamageVsEntity (Entity par1Entity)
{
return this.damageVsEntity;
}
//Changes how much durability the base tool has
public float getDurabilityModifier ()
{
return 1f;
}
public float getRepairCost ()
{
return getDurabilityModifier();
}
public float getDamageModifier ()
{
return 1.0f;
}
//Right-click
public boolean onItemUse (ItemStack stack, EntityPlayer player, World world, int x, int y, int z, int side, float clickX, float clickY, float clickZ)
{
/*if (world.isRemote)
return true;*/
boolean used = false;
int hotbarSlot = player.inventory.currentItem;
int itemSlot = hotbarSlot == 0 ? 8 : hotbarSlot + 1;
ItemStack nearbyStack = null;
if (hotbarSlot < 8)
{
nearbyStack = player.inventory.getStackInSlot(itemSlot);
if (nearbyStack != null)
{
Item item = nearbyStack.getItem();
if (item instanceof ItemBlock)
{
int posX = x;
int posY = y;
int posZ = z;
int playerPosX = (int) Math.floor(player.posX);
int playerPosY = (int) Math.floor(player.posY);
int playerPosZ = (int) Math.floor(player.posZ);
if (side == 0)
{
--posY;
}
if (side == 1)
{
++posY;
}
if (side == 2)
{
--posZ;
}
if (side == 3)
{
++posZ;
}
if (side == 4)
{
--posX;
}
if (side == 5)
{
++posX;
}
if (posX == playerPosX && (posY == playerPosY || posY == playerPosY + 1 || posY == playerPosY - 1) && posZ == playerPosZ)
{
return false;
}
used = item.onItemUse(nearbyStack, player, world, x, y, z, side, clickX, clickY, clickZ);
if (nearbyStack.stackSize < 1)
{
nearbyStack = null;
player.inventory.setInventorySlotContents(itemSlot, null);
}
}
}
}
/*if (used) //Update client
{
Packet103SetSlot packet = new Packet103SetSlot(player.openContainer.windowId, itemSlot, nearbyStack);
((EntityPlayerMP)player).playerNetServerHandler.sendPacketToPlayer(packet);
}*/
return used;
}
public ItemStack onItemRightClick (ItemStack stack, World world, EntityPlayer player)
{
boolean used = false;
int hotbarSlot = player.inventory.currentItem;
int itemSlot = hotbarSlot == 0 ? 8 : hotbarSlot + 1;
ItemStack nearbyStack = null;
if (hotbarSlot < 8)
{
nearbyStack = player.inventory.getStackInSlot(itemSlot);
if (nearbyStack != null)
{
Item item = nearbyStack.getItem();
if (item instanceof ItemPotion && ((ItemPotion) item).isSplash(nearbyStack.getItemDamage()))
{
nearbyStack = item.onItemRightClick(nearbyStack, world, player);
if (nearbyStack.stackSize < 1)
{
nearbyStack = null;
player.inventory.setInventorySlotContents(itemSlot, null);
}
}
}
}
return stack;
}
/* Vanilla overrides */
public boolean isItemTool (ItemStack par1ItemStack)
{
return false;
}
@Override
public boolean getIsRepairable (ItemStack par1ItemStack, ItemStack par2ItemStack)
{
return false;
}
public boolean isRepairable ()
{
return false;
}
public int getItemEnchantability ()
{
return 0;
}
public boolean isFull3D ()
{
return true;
}
@SideOnly(Side.CLIENT)
public boolean hasEffect (ItemStack par1ItemStack, int pass)
{
return false;
}
/* Proper stack damage */
public int getItemMaxDamageFromStack (ItemStack stack)
{
NBTTagCompound tags = stack.getTagCompound();
if (tags == null)
{
return 0;
}
if (tags.hasKey("charge"))
{
int charge = tags.getInteger("charge");
if (charge > 0)
return this.getMaxCharge(stack);
}
if (tags.hasKey("Energy"))
{
int energy = tags.getInteger("Energy");
if (energy > 0)
return this.getMaxEnergyStored(stack);
}
return tags.getCompoundTag("InfiTool").getInteger("TotalDurability");
}
public int getItemDamageFromStackForDisplay (ItemStack stack)
{
NBTTagCompound tags = stack.getTagCompound();
if (tags == null)
{
return 0;
}
if (tags.hasKey("charge"))
{
int charge = tags.getInteger("charge");
if (charge > 0)
return getMaxCharge(stack) - charge;
}
if (tags.hasKey("Energy"))
{
int energy = tags.getInteger("Energy");
if (energy > 0)
return getMaxEnergyStored(stack) - energy;
}
return tags.getCompoundTag("InfiTool").getInteger("Damage");
}
/* IC2 Support
* Every tool can be an electric tool if you modify it right
*/
@Override
public boolean canBeStoredInToolbox (ItemStack stack)
{
return true;
}
@Override
public boolean canProvideEnergy (ItemStack stack)
{
return false;
/*NBTTagCompound tags = stack.getTagCompound();
if (!tags.hasKey("charge"))
return false;
return true;*/
}
@Override
public int getChargedItemId (ItemStack stack)
{
return this.itemID;
}
@Override
public int getEmptyItemId (ItemStack stack)
{
return this.itemID;
}
@Override
public int getMaxCharge (ItemStack stack)
{
NBTTagCompound tags = stack.getTagCompound();
if (!tags.hasKey("charge"))
return 0;
return 10000;
}
@Override
public int getTier (ItemStack itemStack)
{
return 0;
}
@Override
public int getTransferLimit (ItemStack stack)
{
NBTTagCompound tags = stack.getTagCompound();
if (!tags.hasKey("charge"))
return 0;
return 32;
}
@Override
public int charge (ItemStack stack, int amount, int tier, boolean ignoreTransferLimit, boolean simulate)
{
NBTTagCompound tags = stack.getTagCompound();
if (!tags.hasKey("charge"))
return 0;
if (amount > 0)
{
if (amount > getTransferLimit(stack) && !ignoreTransferLimit)
{
amount = getTransferLimit(stack);
}
int charge = tags.getInteger("charge");
if (amount > getMaxCharge(stack) - charge)
{
amount = getMaxCharge(stack) - charge;
}
charge += amount;
if (!simulate)
{
tags.setInteger("charge", charge);
stack.setItemDamage(1 + (getMaxCharge(stack) - charge) * (stack.getMaxDamage() - 2) / getMaxCharge(stack));
}
return amount;
}
else
return 0;
}
@Override
public int discharge (ItemStack stack, int amount, int tier, boolean ignoreTransferLimit, boolean simulate)
{
NBTTagCompound tags = stack.getTagCompound();
if (tags == null || !tags.hasKey("charge"))
return 0;
if (amount > 0)
{
if (amount > getTransferLimit(stack) && !ignoreTransferLimit)
{
amount = getTransferLimit(stack);
}
int charge = tags.getInteger("charge");
if (amount > charge)
{
amount = charge;
}
charge -= amount;
if (!simulate)
{
tags.setInteger("charge", charge);
stack.setItemDamage(1 + (getMaxCharge(stack) - charge) * (stack.getMaxDamage() - 1) / getMaxCharge(stack));
}
return amount;
}
else
return 0;
}
@Override
public boolean canShowChargeToolTip (ItemStack itemStack)
{
return false;
}
@Override
public boolean canUse (ItemStack itemStack, int amount)
{
return false;
}
/* Battlegear support, IBattlegearWeapon */
@Override
public boolean willAllowOffhandWeapon ()
{
return true;
}
@Override
public boolean willAllowShield ()
{
return true;
}
@Override
public boolean isOffhandHandDualWeapon ()
{
return true;
}
@Override
public boolean sheatheOnBack ()
{
return false;
}
@Override
public boolean offhandAttackEntity (OffhandAttackEvent event, ItemStack mainhandItem, ItemStack offhandItem)
{
return true;
}
@Override
public boolean offhandClickAir (PlayerInteractEvent event, ItemStack mainhandItem, ItemStack offhandItem)
{
return true;
}
@Override
public boolean offhandClickBlock (PlayerInteractEvent event, ItemStack mainhandItem, ItemStack offhandItem)
{
return true;
}
@Override
public void performPassiveEffects (Side effectiveSide, ItemStack mainhandItem, ItemStack offhandItem)
{
}
//TE support section -- from COFH core API reference section
public void setMaxTransfer (int maxTransfer)
{
setMaxReceive(maxTransfer);
setMaxExtract(maxTransfer);
}
public void setMaxReceive (int maxReceive)
{
this.maxReceive = maxReceive;
}
public void setMaxExtract (int maxExtract)
{
this.maxExtract = maxExtract;
}
/* IEnergyContainerItem */
@Override
public int receiveEnergy (ItemStack container, int maxReceive, boolean simulate)
{
NBTTagCompound tags = container.getTagCompound();
if (tags == null || !tags.hasKey("Energy"))
return 0;
int energy = tags.getInteger("Energy");
int energyReceived = MathUtils.minInt(capacity - energy, MathUtils.minInt(this.maxReceive, maxReceive));
if (!simulate)
{
energy += energyReceived;
tags.setInteger("Energy", energy);
container.setItemDamage(1 + (getMaxEnergyStored(container) - energy) * (container.getMaxDamage() - 2) / getMaxEnergyStored(container));
}
return energyReceived;
}
@Override
public int extractEnergy (ItemStack container, int maxExtract, boolean simulate)
{
NBTTagCompound tags = container.getTagCompound();
if (tags == null || !tags.hasKey("Energy"))
{
return 0;
}
int energy = tags.getInteger("Energy");
int energyExtracted = MathUtils.minInt(energy, MathUtils.minInt(this.maxExtract, maxExtract));
if (!simulate)
{
energy -= energyExtracted;
tags.setInteger("Energy", energy);
container.setItemDamage(1 + (getMaxEnergyStored(container) - energy) * (container.getMaxDamage() - 1) / getMaxEnergyStored(container));
}
return energyExtracted;
}
@Override
public int getEnergyStored (ItemStack container)
{
NBTTagCompound tags = container.getTagCompound();
if (tags == null || !tags.hasKey("Energy"))
{
return 0;
}
return tags.getInteger("Energy");
}
@Override
public int getMaxEnergyStored (ItemStack container)
{
NBTTagCompound tags = container.getTagCompound();
if (tags == null || !tags.hasKey("Energy"))
return 0;
return capacity;
}
//end of TE support section
}
|
// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
// Copyright (c) 2011, 2012 Open Networking Foundation
// Copyright (c) 2012, 2013 Big Switch Networks, Inc.
// This library was generated by the LoxiGen Compiler.
// See the file LICENSE.txt which should have been included in the source distribution
// Automatically generated by LOXI from template of_class.java
// Do not modify
package org.projectfloodlight.openflow.protocol.ver11;
import org.projectfloodlight.openflow.protocol.*;
import org.projectfloodlight.openflow.protocol.action.*;
import org.projectfloodlight.openflow.protocol.actionid.*;
import org.projectfloodlight.openflow.protocol.bsntlv.*;
import org.projectfloodlight.openflow.protocol.errormsg.*;
import org.projectfloodlight.openflow.protocol.meterband.*;
import org.projectfloodlight.openflow.protocol.instruction.*;
import org.projectfloodlight.openflow.protocol.instructionid.*;
import org.projectfloodlight.openflow.protocol.match.*;
import org.projectfloodlight.openflow.protocol.oxm.*;
import org.projectfloodlight.openflow.protocol.queueprop.*;
import org.projectfloodlight.openflow.types.*;
import org.projectfloodlight.openflow.util.*;
import org.projectfloodlight.openflow.exceptions.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Set;
import org.jboss.netty.buffer.ChannelBuffer;
import com.google.common.hash.PrimitiveSink;
import com.google.common.hash.Funnel;
class OFQueueOpFailedErrorMsgVer11 implements OFQueueOpFailedErrorMsg {
private static final Logger logger = LoggerFactory.getLogger(OFQueueOpFailedErrorMsgVer11.class);
// version: 1.1
final static byte WIRE_VERSION = 2;
final static int MINIMUM_LENGTH = 12;
private final static long DEFAULT_XID = 0x0L;
private final static OFErrorCauseData DEFAULT_DATA = OFErrorCauseData.NONE;
// OF message fields
private final long xid;
private final OFQueueOpFailedCode code;
private final OFErrorCauseData data;
//
// package private constructor - used by readers, builders, and factory
OFQueueOpFailedErrorMsgVer11(long xid, OFQueueOpFailedCode code, OFErrorCauseData data) {
if(code == null) {
throw new NullPointerException("OFQueueOpFailedErrorMsgVer11: property code cannot be null");
}
if(data == null) {
throw new NullPointerException("OFQueueOpFailedErrorMsgVer11: property data cannot be null");
}
this.xid = xid;
this.code = code;
this.data = data;
}
// Accessors for OF message fields
@Override
public OFVersion getVersion() {
return OFVersion.OF_11;
}
@Override
public OFType getType() {
return OFType.ERROR;
}
@Override
public long getXid() {
return xid;
}
@Override
public OFErrorType getErrType() {
return OFErrorType.QUEUE_OP_FAILED;
}
@Override
public OFQueueOpFailedCode getCode() {
return code;
}
@Override
public OFErrorCauseData getData() {
return data;
}
public OFQueueOpFailedErrorMsg.Builder createBuilder() {
return new BuilderWithParent(this);
}
static class BuilderWithParent implements OFQueueOpFailedErrorMsg.Builder {
final OFQueueOpFailedErrorMsgVer11 parentMessage;
// OF message fields
private boolean xidSet;
private long xid;
private boolean codeSet;
private OFQueueOpFailedCode code;
private boolean dataSet;
private OFErrorCauseData data;
BuilderWithParent(OFQueueOpFailedErrorMsgVer11 parentMessage) {
this.parentMessage = parentMessage;
}
@Override
public OFVersion getVersion() {
return OFVersion.OF_11;
}
@Override
public OFType getType() {
return OFType.ERROR;
}
@Override
public long getXid() {
return xid;
}
@Override
public OFQueueOpFailedErrorMsg.Builder setXid(long xid) {
this.xid = xid;
this.xidSet = true;
return this;
}
@Override
public OFErrorType getErrType() {
return OFErrorType.QUEUE_OP_FAILED;
}
@Override
public OFQueueOpFailedCode getCode() {
return code;
}
@Override
public OFQueueOpFailedErrorMsg.Builder setCode(OFQueueOpFailedCode code) {
this.code = code;
this.codeSet = true;
return this;
}
@Override
public OFErrorCauseData getData() {
return data;
}
@Override
public OFQueueOpFailedErrorMsg.Builder setData(OFErrorCauseData data) {
this.data = data;
this.dataSet = true;
return this;
}
@Override
public OFQueueOpFailedErrorMsg build() {
long xid = this.xidSet ? this.xid : parentMessage.xid;
OFQueueOpFailedCode code = this.codeSet ? this.code : parentMessage.code;
if(code == null)
throw new NullPointerException("Property code must not be null");
OFErrorCauseData data = this.dataSet ? this.data : parentMessage.data;
if(data == null)
throw new NullPointerException("Property data must not be null");
//
return new OFQueueOpFailedErrorMsgVer11(
xid,
code,
data
);
}
}
static class Builder implements OFQueueOpFailedErrorMsg.Builder {
// OF message fields
private boolean xidSet;
private long xid;
private boolean codeSet;
private OFQueueOpFailedCode code;
private boolean dataSet;
private OFErrorCauseData data;
@Override
public OFVersion getVersion() {
return OFVersion.OF_11;
}
@Override
public OFType getType() {
return OFType.ERROR;
}
@Override
public long getXid() {
return xid;
}
@Override
public OFQueueOpFailedErrorMsg.Builder setXid(long xid) {
this.xid = xid;
this.xidSet = true;
return this;
}
@Override
public OFErrorType getErrType() {
return OFErrorType.QUEUE_OP_FAILED;
}
@Override
public OFQueueOpFailedCode getCode() {
return code;
}
@Override
public OFQueueOpFailedErrorMsg.Builder setCode(OFQueueOpFailedCode code) {
this.code = code;
this.codeSet = true;
return this;
}
@Override
public OFErrorCauseData getData() {
return data;
}
@Override
public OFQueueOpFailedErrorMsg.Builder setData(OFErrorCauseData data) {
this.data = data;
this.dataSet = true;
return this;
}
//
@Override
public OFQueueOpFailedErrorMsg build() {
long xid = this.xidSet ? this.xid : DEFAULT_XID;
if(!this.codeSet)
throw new IllegalStateException("Property code doesn't have default value -- must be set");
if(code == null)
throw new NullPointerException("Property code must not be null");
OFErrorCauseData data = this.dataSet ? this.data : DEFAULT_DATA;
if(data == null)
throw new NullPointerException("Property data must not be null");
return new OFQueueOpFailedErrorMsgVer11(
xid,
code,
data
);
}
}
final static Reader READER = new Reader();
static class Reader implements OFMessageReader<OFQueueOpFailedErrorMsg> {
@Override
public OFQueueOpFailedErrorMsg readFrom(ChannelBuffer bb) throws OFParseError {
int start = bb.readerIndex();
// fixed value property version == 2
byte version = bb.readByte();
if(version != (byte) 0x2)
throw new OFParseError("Wrong version: Expected=OFVersion.OF_11(2), got="+version);
// fixed value property type == 1
byte type = bb.readByte();
if(type != (byte) 0x1)
throw new OFParseError("Wrong type: Expected=OFType.ERROR(1), got="+type);
int length = U16.f(bb.readShort());
if(length < MINIMUM_LENGTH)
throw new OFParseError("Wrong length: Expected to be >= " + MINIMUM_LENGTH + ", was: " + length);
if(bb.readableBytes() + (bb.readerIndex() - start) < length) {
// Buffer does not have all data yet
bb.readerIndex(start);
return null;
}
if(logger.isTraceEnabled())
logger.trace("readFrom - length={}", length);
long xid = U32.f(bb.readInt());
// fixed value property errType == 9
short errType = bb.readShort();
if(errType != (short) 0x9)
throw new OFParseError("Wrong errType: Expected=OFErrorType.QUEUE_OP_FAILED(9), got="+errType);
OFQueueOpFailedCode code = OFQueueOpFailedCodeSerializerVer11.readFrom(bb);
OFErrorCauseData data = OFErrorCauseData.read(bb, length - (bb.readerIndex() - start), OFVersion.OF_11);
OFQueueOpFailedErrorMsgVer11 queueOpFailedErrorMsgVer11 = new OFQueueOpFailedErrorMsgVer11(
xid,
code,
data
);
if(logger.isTraceEnabled())
logger.trace("readFrom - read={}", queueOpFailedErrorMsgVer11);
return queueOpFailedErrorMsgVer11;
}
}
public void putTo(PrimitiveSink sink) {
FUNNEL.funnel(this, sink);
}
final static OFQueueOpFailedErrorMsgVer11Funnel FUNNEL = new OFQueueOpFailedErrorMsgVer11Funnel();
static class OFQueueOpFailedErrorMsgVer11Funnel implements Funnel<OFQueueOpFailedErrorMsgVer11> {
private static final long serialVersionUID = 1L;
@Override
public void funnel(OFQueueOpFailedErrorMsgVer11 message, PrimitiveSink sink) {
// fixed value property version = 2
sink.putByte((byte) 0x2);
// fixed value property type = 1
sink.putByte((byte) 0x1);
// FIXME: skip funnel of length
sink.putLong(message.xid);
// fixed value property errType = 9
sink.putShort((short) 0x9);
OFQueueOpFailedCodeSerializerVer11.putTo(message.code, sink);
message.data.putTo(sink);
}
}
public void writeTo(ChannelBuffer bb) {
WRITER.write(bb, this);
}
final static Writer WRITER = new Writer();
static class Writer implements OFMessageWriter<OFQueueOpFailedErrorMsgVer11> {
@Override
public void write(ChannelBuffer bb, OFQueueOpFailedErrorMsgVer11 message) {
int startIndex = bb.writerIndex();
// fixed value property version = 2
bb.writeByte((byte) 0x2);
// fixed value property type = 1
bb.writeByte((byte) 0x1);
// length is length of variable message, will be updated at the end
int lengthIndex = bb.writerIndex();
bb.writeShort(U16.t(0));
bb.writeInt(U32.t(message.xid));
// fixed value property errType = 9
bb.writeShort((short) 0x9);
OFQueueOpFailedCodeSerializerVer11.writeTo(bb, message.code);
message.data.writeTo(bb);
// update length field
int length = bb.writerIndex() - startIndex;
bb.setShort(lengthIndex, length);
}
}
@Override
public String toString() {
StringBuilder b = new StringBuilder("OFQueueOpFailedErrorMsgVer11(");
b.append("xid=").append(xid);
b.append(", ");
b.append("code=").append(code);
b.append(", ");
b.append("data=").append(data);
b.append(")");
return b.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OFQueueOpFailedErrorMsgVer11 other = (OFQueueOpFailedErrorMsgVer11) obj;
if( xid != other.xid)
return false;
if (code == null) {
if (other.code != null)
return false;
} else if (!code.equals(other.code))
return false;
if (data == null) {
if (other.data != null)
return false;
} else if (!data.equals(other.data))
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * (int) (xid ^ (xid >>> 32));
result = prime * result + ((code == null) ? 0 : code.hashCode());
result = prime * result + ((data == null) ? 0 : data.hashCode());
return result;
}
}
|
package com.dianping.cat.report.page.cdn.graph;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import com.dianping.cat.Constants;
import com.dianping.cat.consumer.metric.model.entity.MetricItem;
import com.dianping.cat.consumer.metric.model.entity.MetricReport;
import com.dianping.cat.consumer.metric.model.entity.Segment;
import com.dianping.cat.helper.TimeHelper;
import com.dianping.cat.report.graph.metric.AbstractGraphCreator;
import com.dianping.cat.report.page.LineChart;
public class CdnGraphCreator extends AbstractGraphCreator {
private final static String CDN = "cdn";
private Map<String, LineChart> buildInfoChartData(final Map<String, double[]> datas, Date startDate, Date endDate,
final Map<String, double[]> dataWithOutFutures) {
Map<String, LineChart> charts = new LinkedHashMap<String, LineChart>();
int step = m_dataExtractor.getStep();
for (Entry<String, double[]> entry : dataWithOutFutures.entrySet()) {
String key = entry.getKey();
double[] value = entry.getValue();
LineChart lineChart = new LineChart();
lineChart.setId(key);
lineChart.setTitle(key);
lineChart.setStart(startDate);
lineChart.setSize(value.length);
lineChart.setStep(step * TimeHelper.ONE_MINUTE);
Map<Long, Double> all = convertToMap(datas.get(key), startDate, 1);
Map<Long, Double> current = convertToMap(dataWithOutFutures.get(key), startDate, step);
addLastMinuteData(current, all, m_lastMinute, endDate);
lineChart.add(entry.getKey(), current);
charts.put(key, lineChart);
}
return charts;
}
private Map<String, double[]> fetchData(MetricReport report) {
Map<String, double[]> data = new LinkedHashMap<String, double[]>();
Map<String, MetricItem> items = report.getMetricItems();
for (Entry<String, MetricItem> item : items.entrySet()) {
String key = item.getKey();
if (!data.containsKey(key)) {
double[] values = new double[60];
for (int i = 0; i < 60; i++)
values[i] = 0;
data.put(key, values);
}
Map<Integer, Segment> segments = item.getValue().getSegments();
for (Segment segment : segments.values()) {
int id = segment.getId();
data.get(key)[id] += segment.getSum();
}
}
return data;
}
public Map<String, double[]> prepareAllData(Date startDate, Date endDate, String cdn, String province, String city) {
long start = startDate.getTime(), end = endDate.getTime();
int totalSize = (int) ((end - start) / TimeHelper.ONE_MINUTE);
Map<String, String> properties = new HashMap<String, String>();
properties.put("metricType", Constants.METRIC_CDN);
properties.put("cdn", cdn);
properties.put("province", province);
properties.put("city", city);
Map<String, double[]> sourceValue = new LinkedHashMap<String, double[]>();
int index = 0;
for (; start < end; start += TimeHelper.ONE_HOUR) {
MetricReport report = m_metricReportService.queryCdnReport(CDN, properties, new Date(start));
Map<String, double[]> currentValues;
currentValues = fetchData(report);
mergeMap(sourceValue, currentValues, totalSize, index);
index++;
}
return sourceValue;
}
public Map<String, LineChart> queryBaseInfo(Date startDate, Date endDate, String cdn, String province, String city) {
Map<String, double[]> oldCurrentValues = prepareAllData(startDate, endDate, cdn, province, city);
Map<String, double[]> allCurrentValues = m_dataExtractor.extract(oldCurrentValues);
Map<String, double[]> dataWithOutFutures = removeFutureData(endDate, allCurrentValues);
Map<String, LineChart> lineCharts = buildInfoChartData(oldCurrentValues, startDate, endDate, dataWithOutFutures);
return lineCharts;
}
}
|
package com.example.studentappmessenger;
import androidx.annotation.NonNull;
import androidx.appcompat.app.ActionBar;
import androidx.appcompat.app.AppCompatActivity;
import androidx.fragment.app.FragmentTransaction;
import android.content.Intent;
import android.content.SharedPreferences;
import android.os.Bundle;
import android.view.Gravity;
import android.view.Menu;
import android.view.MenuItem;
import android.widget.PopupMenu;
import com.example.studentappmessenger.fragments.ChatListFragment;
import com.example.studentappmessenger.fragments.ForumFragment;
import com.example.studentappmessenger.fragments.GroupChatsFragment;
import com.example.studentappmessenger.fragments.HomeFragment;
import com.example.studentappmessenger.fragments.NotificationsFragment;
import com.example.studentappmessenger.fragments.ProfileFragment;
import com.example.studentappmessenger.fragments.UsersFragment;
import com.google.android.material.bottomnavigation.BottomNavigationView;
import com.google.firebase.auth.FirebaseAuth;
import com.google.firebase.auth.FirebaseUser;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.iid.FirebaseInstanceId;
import com.example.studentappmessenger.notifications.Token;
public class DashboardActivity extends AppCompatActivity {
FirebaseAuth firebaseAuth;
ActionBar actionBar;
private BottomNavigationView navigationView;
String mUID;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_dashboard);
actionBar=getSupportActionBar();
firebaseAuth = FirebaseAuth.getInstance();
navigationView= findViewById(R.id.navigation_btm );
// mProfileTv = findViewById(R.id.profileTv);
navigationView.setOnNavigationItemSelectedListener(selectedListener);
actionBar.setTitle("Home");
HomeFragment fragment1=new HomeFragment();
FragmentTransaction ft1= getSupportFragmentManager().beginTransaction();
ft1.replace(R.id.content,fragment1,"");
ft1.commit();
checkUserStatus();
}
@Override
protected void onResume() {
checkUserStatus();
super.onResume();
}
public void updateToken(String token){
DatabaseReference ref = FirebaseDatabase.getInstance().getReference("Tokens");
Token mToken = new Token(token);
ref.child(mUID).setValue(mToken);
}
private BottomNavigationView.OnNavigationItemSelectedListener selectedListener =
new BottomNavigationView.OnNavigationItemSelectedListener() {
@Override
public boolean onNavigationItemSelected(@NonNull MenuItem menuItem) {
switch (menuItem.getItemId()){
case R.id.nav_home:
//Fragment Transaction Process
actionBar.setTitle("Home");
HomeFragment fragment1=new HomeFragment();
FragmentTransaction ft1= getSupportFragmentManager().beginTransaction();
ft1.replace(R.id.content,fragment1,"");
ft1.commit();
return true;
case R.id.nav_profile:
//Fragment Transaction Process
actionBar.setTitle("Profile");
ProfileFragment fragment2= new ProfileFragment();
FragmentTransaction ft2= getSupportFragmentManager().beginTransaction();
ft2.replace(R.id.content,fragment2,"");
ft2.commit();
return true;
case R.id.nav_users:
//Fragment Transaction Process
actionBar.setTitle("Users");
UsersFragment fragment3 = new UsersFragment();
FragmentTransaction ft3= getSupportFragmentManager().beginTransaction();
ft3.replace(R.id.content,fragment3,"");
ft3.commit();
return true;
case R.id.nav_chat:
//Fragment Transaction Process
actionBar.setTitle("Chats");
ChatListFragment fragment4 = new ChatListFragment();
FragmentTransaction ft4= getSupportFragmentManager().beginTransaction();
ft4.replace(R.id.content,fragment4,"");
ft4.commit();
return true;
case R.id.nav_more:
//Fragment Transaction Process
showMoreOptions();
}
return false;
}
} ;
private void showMoreOptions() {
PopupMenu popupMenu=new PopupMenu(this,navigationView, Gravity.END);
popupMenu.getMenu().add(Menu.NONE,0,0,"Notifications");
popupMenu.getMenu().add(Menu.NONE,1,0,"Group Chats");
popupMenu.getMenu().add(Menu.NONE,2,0,"Forum");
popupMenu.getMenu().add(Menu.NONE,3,0,"Sos");
popupMenu.setOnMenuItemClickListener(new PopupMenu.OnMenuItemClickListener() {
@Override
public boolean onMenuItemClick(MenuItem item) {
int id= item.getItemId();
if(id==0){
actionBar.setTitle("Notifications");
NotificationsFragment fragment5 = new NotificationsFragment();
FragmentTransaction ft5= getSupportFragmentManager().beginTransaction();
ft5.replace(R.id.content,fragment5,"");
ft5.commit();
}
else if (id==1){
actionBar.setTitle("Group Chats");
GroupChatsFragment fragment6 = new GroupChatsFragment();
FragmentTransaction ft6= getSupportFragmentManager().beginTransaction();
ft6.replace(R.id.content,fragment6,"");
ft6.commit();
}
else if (id==2){
actionBar.setTitle("Forum");
ForumFragment fragment7 = new ForumFragment();
FragmentTransaction ft7= getSupportFragmentManager().beginTransaction();
ft7.replace(R.id.content,fragment7,"");
ft7.commit();
}
else if(id==3){
openSos();
}
return false;
}
});
popupMenu.show();
}
private void openSos() {
Intent intent = new Intent(this,MainActivity2.class);
startActivity(intent);
}
private void checkUserStatus(){
FirebaseUser user = firebaseAuth.getCurrentUser();
if(user!=null){
// mProfileTv.setText(user.getEmail());
mUID = user.getUid();
SharedPreferences sp = getSharedPreferences("SP_USER", MODE_PRIVATE);
SharedPreferences.Editor editor = sp.edit();
editor.putString("Current_USERID", mUID);
editor.apply();
updateToken(FirebaseInstanceId.getInstance().getToken());
}else{
startActivity(new Intent(DashboardActivity.this,MainActivity.class));
finish();
}
}
@Override
public void onBackPressed() {
super.onBackPressed();
finish();
}
@Override
protected void onStart(){
checkUserStatus();
super.onStart();
}
}
|
package com.codepath.apps.restclienttemplate.models;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.parceler.Parcel;
@Parcel
public class Tweet {
public String body;
public long uid; //database ID for tweet
public User user;
public String createdAt;
public String media;
public static Tweet fromJSON(JSONObject object)throws JSONException {
Tweet tweet = new Tweet();
tweet.body = object.getString("text");
tweet.uid = object.getLong("id");
tweet.createdAt = object.getString("created_at");
tweet.user = User.fromJSON(object.getJSONObject("user"));
JSONObject entities = object.getJSONObject("entities");
if(entities.has("media")) {
JSONArray medias = entities.getJSONArray("media");
JSONObject object2 = (JSONObject) medias.get(0);
tweet.media = object2.getString("media_url");
} else {
tweet.media = "";
}
return tweet;
}
public Tweet () {
}
}
|
package org.jeecg.modules.system.controller;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.lang.StringUtils;
import org.jeecg.common.api.vo.Result;
import org.jeecg.modules.system.mapper.SysDictMapper;
import org.jeecg.modules.system.model.DuplicateCheckVo;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import lombok.extern.slf4j.Slf4j;
/**
* @Title: DuplicateCheckAction
* @Description: 重复校验工具
* @Author
* @Date 2019-03-25
* @Version V1.0
*/
@Slf4j
@RestController
@RequestMapping("/sys/duplicate")
@Api(tags="重复校验")
public class DuplicateCheckController {
@Autowired
SysDictMapper sysDictMapper;
/**
* 校验数据是否在系统中是否存在
*
* @return
*/
@RequestMapping(value = "/check", method = RequestMethod.GET)
@ApiOperation("重复校验接口")
public Result<Object> doDuplicateCheck(DuplicateCheckVo duplicateCheckVo, HttpServletRequest request) {
Long num = null;
log.info("----duplicate check------:"+ duplicateCheckVo.toString());
if (StringUtils.isNotBlank(duplicateCheckVo.getDataId())) {
// [2].编辑页面校验
num = sysDictMapper.duplicateCheckCountSql(duplicateCheckVo);
} else {
// [1].添加页面校验
num = sysDictMapper.duplicateCheckCountSqlNoDataId(duplicateCheckVo);
}
if (num == null || num == 0) {
// 该值可用
return Result.ok("该值可用!");
} else {
// 该值不可用
log.info("该值不可用,系统中已存在!");
return Result.error("该值不可用,系统中已存在!");
}
}
}
|
package com.test.maps;
import java.sql.Date;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import com.test.maps.model.Maps;
import com.test.maps.repository.MapsRepository;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
@RestController
@RequestMapping("/maps") // actual API endpoint structure could be better laid out, will research this later.
public class MainRest {
@Autowired
MapsRepository mapsRepository;
// Returns everything in database
@GetMapping("/all")
public ResponseEntity<List<Maps>> getAllMaps(@RequestParam(required = false) String url) {
try {
List<Maps> maps = new ArrayList<Maps>();
if (url == null)
mapsRepository.findAll().forEach(maps::add);
else
mapsRepository.findByUrlContaining(url).forEach(maps::add);
if (maps.isEmpty()) {
return new ResponseEntity<>(HttpStatus.NO_CONTENT);
}
return new ResponseEntity<>(maps, HttpStatus.OK);
} catch (Exception e) {
return new ResponseEntity<>(null, HttpStatus.INTERNAL_SERVER_ERROR);
}
}
// Returns based on id
@GetMapping("/id/{id}")
public ResponseEntity<Maps> getMapsById(@PathVariable("id") UUID id) {
Optional<Maps> mapsData = mapsRepository.findById(id);
if (mapsData.isPresent()) {
return new ResponseEntity<>(mapsData.get(), HttpStatus.OK);
} else {
return new ResponseEntity<>(HttpStatus.NOT_FOUND);
}
}
// returns based on a date
@GetMapping("/date/{date}")
public ResponseEntity<List<Maps>> getMapsByDate(@PathVariable("date") String mydate) {
try {
List<Maps> mapsData = new ArrayList<Maps>();
//Pageable page = PageRequest.of(0, 10);
/*
if (date.isEmpty()){
mapsRepository.findTop10ByDate("0", page).forEach(mapsData::add);
} else {
} */
mapsRepository.findByDateGreaterThan(Date.valueOf(mydate)).forEach(mapsData::add);
return new ResponseEntity<>(mapsData, HttpStatus.OK);
} catch (Exception e) {
return new ResponseEntity<>(null, HttpStatus.INTERNAL_SERVER_ERROR);
}
}
// Find based on a search of meta content
@GetMapping("/meta/{keyWord}")
public ResponseEntity<List<Maps>> getMapsByMetaKeyword(@PathVariable String keyWord) {
List<Maps> mapsData = new ArrayList<Maps>();
mapsRepository.findByMetaContains(keyWord).forEach(mapsData::add);
return new ResponseEntity<>(mapsData, HttpStatus.OK);
}
// Add an entry into database
@PostMapping("/new")
public void addEntry(@RequestBody Maps maps) {
mapsRepository.save(maps);
}
}
|
package org.powertac.rachma.broker;
import org.powertac.rachma.powertac.broker.exception.BrokerNotFoundException;
import java.util.Map;
public interface BrokerTypeRepository {
BrokerType findByName(String name) throws BrokerNotFoundException;
Map<String, BrokerType> findAll();
boolean has(String name);
}
|
package com.huaweicloud.sdk.iot.device.demo;
import com.huaweicloud.sdk.iot.device.IoTDevice;
import com.huaweicloud.sdk.iot.device.client.requests.DeviceMessage;
import com.huaweicloud.sdk.iot.device.transport.ActionListener;
import com.huaweicloud.sdk.iot.device.transport.RawMessage;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.File;
import java.net.URL;
/**
* 演示如何直接使用DeviceClient进行消息透传
*/
public class MessageSample {
private static final Logger log = LogManager.getLogger(MessageSample.class);
public static void main(String[] args) throws InterruptedException {
//加载iot平台的ca证书,进行服务端校验
URL resource = MessageSample.class.getClassLoader().getResource("ca.jks");
File file = new File(resource.getPath());
//创建设备
IoTDevice device = new IoTDevice("ssl://iot-mqtts.cn-north-4.myhuaweicloud.com:8883",
"5e06bfee334dd4f33759f5b3_demo", "secret", file);
if (device.init() != 0) {
return;
}
//接收平台下行消息
device.getClient().setDeviceMessageListener(
deviceMessage -> log.info("onDeviceMessage:" + deviceMessage.toString()));
while (true) {
device.getClient().reportDeviceMessage(new DeviceMessage("hello"), new ActionListener() {
@Override
public void onSuccess(Object context) {
log.info("reportDeviceMessage ok");
}
@Override
public void onFailure(Object context, Throwable var2) {
log.error("reportDeviceMessage fail: " + var2);
}
});
//上报自定义topic消息,注意需要先在平台配置自定义topic
String topic = "$oc/devices/" + device.getDeviceId() + "/user/wpy";
device.getClient().publishRawMessage(new RawMessage(topic, "hello raw message "),
new ActionListener() {
@Override
public void onSuccess(Object context) {
log.info("publishRawMessage ok: ");
}
@Override
public void onFailure(Object context, Throwable var2) {
log.error("publishRawMessage fail: " + var2);
}
});
Thread.sleep(5000);
}
}
}
|
package com.ixyf;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
public class ChanServiceApplication {
public static void main(String[] args) {
SpringApplication.run(ChanServiceApplication.class, args);
}
}
|
/*******************************************************************************
* Copyright 2015 Technology Blueprint Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package uk.co.techblue.docusign.client.dto;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
/**
* The Class EnvelopeEmailSetting.
*/
@JsonSerialize
@JsonInclude(value = Include.NON_NULL)
public class EnvelopeEmailSetting extends BaseDto {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 4018511245527085865L;
/** The reply email address override. */
@JsonProperty(value = "replyEmailAddressOverride")
private String replyEmailAddressOverride;
/** The reply email name override. */
@JsonProperty(value = "replyEmailNameOverride")
private String replyEmailNameOverride;
/** The bcc email addresses. */
@JsonProperty(value = "bccEmailAddresses")
private List<BlindCopyEmailAddress> bccEmailAddresses;
/**
* Gets the reply email address override.
*
* @return the reply email address override
*/
public String getReplyEmailAddressOverride() {
return replyEmailAddressOverride;
}
/**
* Sets the reply email address override.
*
* @param replyEmailAddressOverride the new reply email address override
*/
public void setReplyEmailAddressOverride(String replyEmailAddressOverride) {
this.replyEmailAddressOverride = replyEmailAddressOverride;
}
/**
* Gets the reply email name override.
*
* @return the reply email name override
*/
public String getReplyEmailNameOverride() {
return replyEmailNameOverride;
}
/**
* Sets the reply email name override.
*
* @param replyEmailNameOverride the new reply email name override
*/
public void setReplyEmailNameOverride(String replyEmailNameOverride) {
this.replyEmailNameOverride = replyEmailNameOverride;
}
/**
* Gets the bcc email addresses.
*
* @return the bcc email addresses
*/
public List<BlindCopyEmailAddress> getBccEmailAddresses() {
return bccEmailAddresses;
}
/**
* Sets the bcc email addresses.
*
* @param bccEmailAddresses the new bcc email addresses
*/
public void setBccEmailAddresses(List<BlindCopyEmailAddress> bccEmailAddresses) {
this.bccEmailAddresses = bccEmailAddresses;
}
}
|
/*
* Copyright (c) 2020-2021 CertifAI Sdn. Bhd.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package ai.classifai.database.annotation.bndbox;
import ai.classifai.database.DbConfig;
import ai.classifai.database.annotation.AnnotationQuery;
import ai.classifai.database.annotation.AnnotationVerticle;
import ai.classifai.util.ParamConfig;
import ai.classifai.util.message.ErrorCodes;
import ai.classifai.util.type.AnnotationHandler;
import ai.classifai.util.type.AnnotationType;
import ai.classifai.util.type.database.H2;
import ai.classifai.util.type.database.RelationalDb;
import io.vertx.core.Promise;
import io.vertx.core.Vertx;
import io.vertx.core.eventbus.Message;
import io.vertx.core.json.JsonObject;
import io.vertx.jdbcclient.JDBCPool;
import lombok.extern.slf4j.Slf4j;
/**
* Bounding Box Verticle
*
* @author codenamewei
*/
@Slf4j
public class BoundingBoxVerticle extends AnnotationVerticle
{
public void onMessage(Message<JsonObject> message)
{
if (!message.headers().contains(ParamConfig.getActionKeyword()))
{
log.error("No action header specified for message with headers {} and body {}",
message.headers(), message.body().encodePrettily());
message.fail(ErrorCodes.NO_ACTION_SPECIFIED.ordinal(), "No keyword " + ParamConfig.getActionKeyword() + " specified");
return;
}
String action = message.headers().get(ParamConfig.getActionKeyword());
if (action.equals(AnnotationQuery.getQueryData()))
{
this.queryData(message, ParamConfig.getBoundingBoxParam());
}
else if (action.equals(AnnotationQuery.getUpdateData()))
{
this.updateData(message, ParamConfig.getBoundingBoxParam());
}
else if (action.equals(AnnotationQuery.getRetrieveDataPath()))
{
this.retrieveDataPath(message);
}
else if (action.equals(AnnotationQuery.getLoadValidProjectUuid()))
{
this.loadValidProjectUuid(message);
}
else if (action.equals(AnnotationQuery.getDeleteProject()))
{
this.deleteProject(message);
}
else if (action.equals(AnnotationQuery.getDeleteSelectionUuidList()))
{
this.deleteSelectionUuidList(message);
}
else
{
log.error("BoundingBox Verticle query error. Action did not have an assigned function for handling.");
}
}
private JDBCPool createJDBCPool(Vertx vertx, RelationalDb db)
{
return JDBCPool.pool(vertx, new JsonObject()
.put("url", db.getUrlHeader() + DbConfig.getTableAbsPathDict().get(DbConfig.getBndBoxKey()))
.put("driver_class", db.getDriver())
.put("user", db.getUser())
.put("password", db.getPassword())
.put("max_pool_size", 30));
}
@Override
public void stop(Promise<Void> promise)
{
jdbcPool.close();
log.info("Bounding Box Verticle stopping...");
}
//obtain a JDBC pool connection,
//Performs a SQL query to create the pages table unless it already existed
@Override
public void start(Promise<Void> promise) throws Exception
{
H2 h2 = DbConfig.getH2();
jdbcPool = createJDBCPool(vertx, h2);
jdbcPool.getConnection(ar -> {
if (ar.failed())
{
log.error("Could not open a database connection for Bounding Box Verticle", ar.cause());
promise.fail(ar.cause());
}
else
{
jdbcPool.query(BoundingBoxDbQuery.getCreateProject())
.execute()
.onComplete(create -> {
if (create.failed())
{
log.error("BoundingBoxVerticle database preparation error", create.cause());
promise.fail(create.cause());
}
else
{
AnnotationHandler.addJDBCPool(AnnotationType.BOUNDINGBOX, jdbcPool);
//the consumer methods registers an event bus destination handler
vertx.eventBus().consumer(BoundingBoxDbQuery.getQueue(), this::onMessage);
promise.complete();
}
});
}
});
}
}
|
/**
* Licensed to Apereo under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright ownership. Apereo
* licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the License at the
* following location:
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apereo.portal.spring.security.preauth;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import javax.servlet.ServletException;
import org.apereo.portal.layout.profile.ProfileSelectionEvent;
import org.junit.Test;
import org.springframework.security.core.context.SecurityContextHolder;
public class PortalPreAuthenticatedProcessingFilterIdentitySwapTest
extends PortalPreAuthenticatedProcessingFilterTestBase {
private String targetProfileKey;
private String targetUsername;
@Override
public void additionalSetup() {
this.targetProfileKey = "targetProfileKey";
this.targetUsername = "targetUsername";
SecurityContextHolder.createEmptyContext();
SecurityContextHolder.getContext().setAuthentication(this.auth);
}
@Test
public void testThatOriginalUserIsSetWhenSecurityContextIsStillAvailable() throws Exception {
// given
this.filter.setClearSecurityContextPriorToPortalAuthentication(false);
this.requestIsForIdentitySwapLogin();
this.requestedSessionIdIsValid();
// when
this.filter.doFilter(this.request, this.response, this.filterChain);
// then
verify(this.identitySwapperManager)
.setOriginalUser(this.session, this.username, this.targetUsername, this.auth);
}
@Test
public void testThatOriginalUserIsSetWhenSecurityContextIsNoLongerAvailable() throws Exception {
// given
this.filter.setClearSecurityContextPriorToPortalAuthentication(true);
this.requestIsForIdentitySwapLogin();
this.requestedSessionIdIsValid();
// when
this.filter.doFilter(this.request, this.response, this.filterChain);
// then
verify(this.identitySwapperManager)
.setOriginalUser(this.session, this.username, this.targetUsername, this.auth);
}
@Test
public void testThatTargetUsernameIsSetAsPersonName() throws Exception {
// given
this.requestIsForIdentitySwapLogin();
this.requestedSessionIdIsValid();
// when
this.filter.doFilter(this.request, this.response, this.filterChain);
// then
verify(this.person).setUserName(this.targetUsername);
}
/**
* Test that when swapping to another identity while specifying a target profile, fires event
* for that profile.
*/
@Test
public void testThatProfileSelectedEventIsSent() throws IOException, ServletException {
// given
this.requestIsForIdentitySwapLogin();
this.requestedSessionIdIsValid();
// when
this.filter.doFilter(this.request, this.response, this.filterChain);
// then
final ProfileSelectionEvent expectedEvent =
new ProfileSelectionEvent(
this.filter, this.targetProfileKey, this.person, this.request);
verify(this.eventPublisher).publishEvent(expectedEvent);
}
private void requestIsForIdentitySwapLogin() {
when(this.identitySwapperManager.getTargetProfile(this.session))
.thenReturn(this.targetProfileKey);
when(this.identitySwapperManager.getOriginalUsername(this.session)).thenReturn(null);
when(this.identitySwapperManager.getTargetUsername(this.session))
.thenReturn(this.targetUsername);
when(this.request.getServletPath()).thenReturn("/Login");
}
private void requestedSessionIdIsValid() {
when(this.request.isRequestedSessionIdValid()).thenReturn(true);
}
}
|
package cn.springcloud.book.feign;
import com.nepxion.discovery.common.constant.DiscoveryConstant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RestController;
@RestController
@ConditionalOnProperty(name = DiscoveryConstant.SPRING_APPLICATION_NAME, havingValue = "discovery-springcloud-example-b")
public class BFeignImpl extends AbstractFeignImpl implements BFeign {
private static final Logger LOG = LoggerFactory.getLogger(BFeignImpl.class);
@Autowired
private CFeign cFeign;
@Override
public String invoke(@RequestBody String value) {
value = doInvoke(value);
value = cFeign.invoke(value);
LOG.info("调用路径:{}", value);
return value;
}
}
|
package com.permissionx.app;
import android.Manifest;
import android.os.Bundle;
import android.view.View;
import android.widget.Toast;
import androidx.appcompat.app.AppCompatActivity;
import com.permissionx.app.databinding.ActivityMainJavaBinding;
import com.permissionx.guolindev.PermissionX;
import com.permissionx.guolindev.callback.ExplainReasonCallbackWithBeforeParam;
import com.permissionx.guolindev.callback.ForwardToSettingsCallback;
import com.permissionx.guolindev.callback.RequestCallback;
import com.permissionx.guolindev.request.ExplainScope;
import com.permissionx.guolindev.request.ForwardScope;
import java.util.List;
public class MainJavaActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
ActivityMainJavaBinding binding = ActivityMainJavaBinding.inflate(getLayoutInflater());
setContentView(binding.getRoot());
binding.makeRequestBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
PermissionX.init(MainJavaActivity.this)
.permissions(Manifest.permission.WRITE_EXTERNAL_STORAGE)
.explainReasonBeforeRequest()
.onExplainRequestReason(new ExplainReasonCallbackWithBeforeParam() {
@Override
public void onExplainReason(ExplainScope scope, List<String> deniedList, boolean beforeRequest) {
// CustomDialog customDialog = new CustomDialog(MainJavaActivity.this, "PermissionX needs following permissions to continue", deniedList);
// scope.showRequestReasonDialog(customDialog);
scope.showRequestReasonDialog(deniedList, "PermissionX needs following permissions to continue", "Allow");
}
})
.onForwardToSettings(new ForwardToSettingsCallback() {
@Override
public void onForwardToSettings(ForwardScope scope, List<String> deniedList) {
scope.showForwardToSettingsDialog(deniedList, "Please allow following permissions in settings", "Allow");
}
})
.request(new RequestCallback() {
@Override
public void onResult(boolean allGranted, List<String> grantedList, List<String> deniedList) {
if (allGranted) {
Toast.makeText(MainJavaActivity.this, "All permissions are granted", Toast.LENGTH_SHORT).show();
} else {
Toast.makeText(MainJavaActivity.this, "The following permissions are denied:" + deniedList, Toast.LENGTH_SHORT).show();
}
}
});
}
});
}
}
|
/*
* MIT License
*
* Copyright (c) 2021 MASES s.r.l.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**************************************************************************************
* <auto-generated>
* This code was generated from a template using JCOReflector
*
* Manual changes to this file may cause unexpected behavior in your application.
* Manual changes to this file will be overwritten if the code is regenerated.
* </auto-generated>
*************************************************************************************/
package system.data;
import org.mases.jcobridge.*;
import org.mases.jcobridge.netreflection.*;
// Import section
import system.collections.IList;
import system.collections.IListImplementation;
import system.collections.ICollection;
import system.collections.ICollectionImplementation;
import system.data.ITableMapping;
import system.data.ITableMappingImplementation;
import system.Array;
/**
* The base .NET class managing System.Data.ITableMappingCollection, System.Data.Common, Version=4.2.2.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a. Implements {@link IJCOBridgeReflected}.
* <p>
*
* See: <a href="https://docs.microsoft.com/en-us/dotnet/api/System.Data.ITableMappingCollection" target="_top">https://docs.microsoft.com/en-us/dotnet/api/System.Data.ITableMappingCollection</a>
*/
public interface ITableMappingCollection extends IJCOBridgeReflected, IList, ICollection, IEnumerable {
/**
* Fully assembly qualified name: System.Data.Common, Version=4.2.2.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a
*/
public static final String assemblyFullName = "System.Data.Common, Version=4.2.2.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a";
/**
* Assembly name: System.Data.Common
*/
public static final String assemblyShortName = "System.Data.Common";
/**
* Qualified class name: System.Data.ITableMappingCollection
*/
public static final String className = "System.Data.ITableMappingCollection";
/**
* Try to cast the {@link IJCOBridgeReflected} instance into {@link ITableMappingCollection}, a cast assert is made to check if types are compatible.
* @param from {@link IJCOBridgeReflected} instance to be casted
* @return {@link ITableMappingCollection} instance
* @throws java.lang.Throwable in case of error during cast operation
*/
public static ITableMappingCollection ToITableMappingCollection(IJCOBridgeReflected from) throws Throwable {
JCOBridge bridge = JCOBridgeInstance.getInstance("System.Data.Common, Version=4.2.2.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a");
JCType classType = bridge.GetType(className + ", " + (JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName));
NetType.AssertCast(classType, from);
return new ITableMappingCollectionImplementation(from.getJCOInstance());
}
/**
* Returns the reflected Assembly name
*
* @return A {@link String} representing the Fullname of reflected Assembly
*/
public String getJCOAssemblyName();
/**
* Returns the reflected Class name
*
* @return A {@link String} representing the Fullname of reflected Class
*/
public String getJCOClassName();
/**
* Returns the reflected Class name used to build the object
*
* @return A {@link String} representing the name used to allocated the object
* in CLR context
*/
public String getJCOObjectName();
/**
* Returns the instantiated class
*
* @return An {@link Object} representing the instance of the instantiated Class
*/
public Object getJCOInstance();
/**
* Returns the instantiated class Type
*
* @return A {@link JCType} representing the Type of the instantiated Class
*/
public JCType getJCOType();
// Methods section
public boolean Contains(java.lang.String sourceTableName) throws Throwable;
public int IndexOf(java.lang.String sourceTableName) throws Throwable;
public ITableMapping Add(java.lang.String sourceTableName, java.lang.String dataSetTableName) throws Throwable;
public ITableMapping GetByDataSetTable(java.lang.String dataSetTableName) throws Throwable;
public void RemoveAt(java.lang.String sourceTableName) throws Throwable;
// Properties section
// Instance Events section
}
|
package com.ivelum.exception;
public class CubException extends Exception {
public CubException(String message) {
super(message);
}
public CubException(String message, Throwable cause) {
super(message, cause);
}
}
|
/*******************************************************************************
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package io.fintechlabs.testframework.condition.client;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Spy;
import org.mockito.runners.MockitoJUnitRunner;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.verify;
import io.fintechlabs.testframework.condition.Condition.ConditionResult;
import io.fintechlabs.testframework.logging.TestInstanceEventLog;
import io.fintechlabs.testframework.testmodule.Environment;
@RunWith(MockitoJUnitRunner.class)
public class AddBasicAuthClientSecretAuthenticationParameters_UnitTest {
@Spy
private Environment env = new Environment();
@Mock
private TestInstanceEventLog eventLog;
private JsonObject client;
private String expectedAuth;
private AddBasicAuthClientSecretAuthenticationParameters cond;
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
cond = new AddBasicAuthClientSecretAuthenticationParameters("UNIT-TEST", eventLog, ConditionResult.INFO);
// Example values from RFC 6749
client = new JsonParser().parse("{"
+ "\"client_id\":\"s6BhdRkqt3\","
+ "\"client_secret\":\"7Fjfp0ZBr1KtDRbnfVdmIw\""
+ "}").getAsJsonObject();
expectedAuth = "Basic czZCaGRSa3F0Mzo3RmpmcDBaQnIxS3REUmJuZlZkbUl3";
env.putObject("client", client);
}
/**
* Test method for {@link io.fintechlabs.testframework.condition.client.AddBasicAuthClientSecretAuthenticationParameters#evaluate(io.fintechlabs.testframework.testmodule.Environment)}.
*/
@Test
public void testEvaluate_noError() {
cond.evaluate(env);
verify(env, atLeastOnce()).getString("client", "client_id");
verify(env, atLeastOnce()).getString("client", "client_secret");
assertThat(env.getString("token_endpoint_request_headers", "Authorization")).isEqualTo(expectedAuth);
}
}
|
package com.github.sgdc3.telegramnotificationbot.notification;
import lombok.AllArgsConstructor;
import lombok.Getter;
import me.philippheuer.twitch4j.model.Stream;
@Getter
@AllArgsConstructor
public class TwitchStream {
private final static String CHANNEL_BASE_URL = "https://www.twitch.tv/";
private long id;
private String title;
private String game;
private String channel;
private String channelName;
public String getUrl() {
return CHANNEL_BASE_URL + getChannel();
}
public static TwitchStream fromStream(Stream stream) {
long id = stream.getId();
String title = stream.getChannel().getStatus();
String game = stream.getGame();
String channel = stream.getChannel().getName();
String channelName = stream.getChannel().getName();
return new TwitchStream(id, title, game, channel, channelName);
}
}
|
package br.com.pokemon.exceptions;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.ResponseStatus;
import lombok.NoArgsConstructor;
@NoArgsConstructor
@ResponseStatus(value = HttpStatus.NOT_FOUND, reason = "The city entered was not found in the Weather API")
public class CityNotFoundException extends OpenWeatherIntegrationException {
private static final long serialVersionUID = -5650137028798444098L;
public CityNotFoundException(String message) {
super(message);
}
}
|
package com.dahafa.hais.model;
import java.awt.geom.Path2D;
import java.io.Serializable;
import java.util.List;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.JoinTable;
import javax.persistence.ManyToMany;
import javax.persistence.NamedQuery;
import javax.persistence.OneToMany;
import javax.persistence.SequenceGenerator;
import com.dahafa.hais.Identifiable;
@Entity
@NamedQuery(name="Room.findAll", query="SELECT r FROM Room r")
public class Room implements Serializable, Identifiable<Long> {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(strategy=GenerationType.SEQUENCE,
generator="RoomGenerator")
@SequenceGenerator(name="RoomGenerator",
sequenceName="RoomSequence", allocationSize=1)
private long roomID;
@OneToMany(fetch=FetchType.EAGER)
@JoinColumn(name="ROOM")
private List<GeoLocation> coordinates;
@ManyToMany
@JoinTable(name="ROOMEQUIPMENT", joinColumns={@JoinColumn(name="ROOM")},
inverseJoinColumns={@JoinColumn(name="MATERIAL")})
private List<Material> materials;
private String name;
private String roomType;
public boolean contains(final double latitude, final double longitude) {
if(this.coordinates.isEmpty())
return false;
final GeoLocation start = this.coordinates.get(0);
final Path2D polygon = new Path2D.Double();
polygon.moveTo(start.getLatitude(), start.getLongitude());
for(int i = 1; i < this.coordinates.size(); i++) {
final GeoLocation geoLocation = this.coordinates.get(i);
polygon.lineTo(geoLocation.getLatitude(), geoLocation.getLongitude());
}
polygon.closePath();
return polygon.contains(latitude, longitude);
}
@Override
public Long getID() {
return this.roomID;
}
public void setID(final long roomID) {
this.roomID = roomID;
}
public List<GeoLocation> getCoordinates() {
return this.coordinates;
}
public void setCoordinates(final List<GeoLocation> coordinates) {
this.coordinates = coordinates;
}
public GeoLocation addRoom(final GeoLocation geoLocation) {
this.getCoordinates().add(geoLocation);
return geoLocation;
}
public GeoLocation removeRoom(final GeoLocation geoLocation) {
this.getCoordinates().remove(geoLocation);
return geoLocation;
}
public String getName() {
return this.name;
}
public void setName(final String name) {
this.name = name;
}
public String getRoomType() {
return this.roomType;
}
public void setRoomType(final String roomType) {
this.roomType = roomType;
}
public List<Material> getMaterials() {
return this.materials;
}
public void setMaterials(final List<Material> materials) {
this.materials = materials;
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.transport.rest.vertx.accesslog.element.impl;
import static org.junit.Assert.assertEquals;
import org.apache.servicecomb.transport.rest.vertx.accesslog.AccessLogParam;
import org.junit.Test;
import org.mockito.Mockito;
import io.vertx.core.http.HttpServerRequest;
import io.vertx.core.net.SocketAddress;
import io.vertx.ext.web.RoutingContext;
public class LocalHostItemTest {
public static final LocalHostItem ELEMENT = new LocalHostItem();
@Test
public void getFormattedElement() {
AccessLogParam<RoutingContext> param = new AccessLogParam<>();
RoutingContext context = Mockito.mock(RoutingContext.class);
HttpServerRequest request = Mockito.mock(HttpServerRequest.class);
SocketAddress localAddress = Mockito.mock(SocketAddress.class);
String localHost = "testHost";
param.setContextData(context);
Mockito.when(context.request()).thenReturn(request);
Mockito.when(request.localAddress()).thenReturn(localAddress);
Mockito.when(localAddress.host()).thenReturn(localHost);
String result = ELEMENT.getFormattedItem(param);
assertEquals(localHost, result);
}
@Test
public void getFormattedElementOnRequestIsNull() {
AccessLogParam<RoutingContext> param = new AccessLogParam<>();
RoutingContext context = Mockito.mock(RoutingContext.class);
param.setContextData(context);
Mockito.when(context.request()).thenReturn(null);
String result = ELEMENT.getFormattedItem(param);
assertEquals("-", result);
}
@Test
public void getFormattedElementOnLocalAddressIsNull() {
AccessLogParam<RoutingContext> param = new AccessLogParam<>();
RoutingContext context = Mockito.mock(RoutingContext.class);
HttpServerRequest request = Mockito.mock(HttpServerRequest.class);
param.setContextData(context);
Mockito.when(context.request()).thenReturn(request);
Mockito.when(request.localAddress()).thenReturn(null);
String result = ELEMENT.getFormattedItem(param);
assertEquals("-", result);
}
@Test
public void getFormattedElementOnHostIsNull() {
AccessLogParam<RoutingContext> param = new AccessLogParam<>();
RoutingContext context = Mockito.mock(RoutingContext.class);
HttpServerRequest request = Mockito.mock(HttpServerRequest.class);
SocketAddress localAddress = Mockito.mock(SocketAddress.class);
param.setContextData(context);
Mockito.when(context.request()).thenReturn(request);
Mockito.when(request.localAddress()).thenReturn(localAddress);
Mockito.when(localAddress.host()).thenReturn(null);
String result = ELEMENT.getFormattedItem(param);
assertEquals("-", result);
}
@Test
public void getFormattedElementIsEmpty() {
AccessLogParam<RoutingContext> param = new AccessLogParam<>();
RoutingContext context = Mockito.mock(RoutingContext.class);
HttpServerRequest request = Mockito.mock(HttpServerRequest.class);
SocketAddress localAddress = Mockito.mock(SocketAddress.class);
String localHost = "";
param.setContextData(context);
Mockito.when(context.request()).thenReturn(request);
Mockito.when(request.localAddress()).thenReturn(localAddress);
Mockito.when(localAddress.host()).thenReturn(localHost);
String result = ELEMENT.getFormattedItem(param);
assertEquals("-", result);
}
}
|
package com.coyotesong.coursera.cloud.domain;
import javax.persistence.Entity;
import javax.persistence.Id;
/**
* Static lookup information on unique carriers, from L_UNIQUE_CARRIERS_ID.csv
*
* @author bgiles
*/
@Entity
public class LookupUniqueCarrier {
private String code;
private String description;
@Id
public String getCode() {
return code;
}
public void setCode(String code) {
this.code = code;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((code == null) ? 0 : code.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
LookupUniqueCarrier other = (LookupUniqueCarrier) obj;
if (code == null) {
if (other.code != null)
return false;
} else if (!code.equals(other.code))
return false;
if (description == null) {
if (other.description != null)
return false;
} else if (!description.equals(other.description))
return false;
return true;
}
@Override
public String toString() {
return "LookupUniqueCarrier [code=" + code + ", description=" + description + "]";
}
}
|
/*
* The MIT License
*
* Copyright 2018 Honestleaf<Cheng.Ye.HL@hotmail.com>.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package parser;
import java.util.ArrayList;
import org.apache.log4j.Logger;
import org.apache.poi.ss.usermodel.Cell;
import org.apache.poi.ss.usermodel.Row;
import org.apache.poi.xssf.usermodel.XSSFSheet;
/**
*
* @author Honestleaf<Cheng.Ye.HL@hotmail.com>
*/
public class SpecialMissionMapParser extends GenericParser<XSSFSheet, ArrayList<String[]>> {
private final static Logger LOG = Logger.getLogger(SpecialMissionMapParser.class.getName());
@Override
public ArrayList<String[]> parse() {
ArrayList<String[]> list = new ArrayList<>();
StringParser sp = new StringParser();
String[] missionData;
String mission, planet, node, mtype, firstOnly, skip, buffer;
boolean first = true;
for (Row row : this.table) {
if (first) {
first = false;
continue;
}
mission = planet = mtype = node = firstOnly = skip = null;
for (Cell cell : row) {
try {
buffer = cell.getStringCellValue();
switch (cell.getColumnIndex()) {
case 0:
mission = buffer;
break;
case 1:
planet = buffer;
break;
case 2:
mtype = buffer;
break;
case 3:
node = buffer;
break;
case 4:
firstOnly = buffer;
break;
case 5:
skip = buffer;
break;
default:
break;
}
} catch (Exception ex) {
LOG.warn("Invalid MissionMap cell value.", ex);
}
}
list.add(new String[]{mission, planet, mtype, node, firstOnly, skip});
}
return list;
}
}
|
// PART OF THE MACHINE SIMULATION. DO NOT CHANGE.
package nachos.machine;
import nachos.security.*;
import nachos.threads.KThread;
import java.util.Vector;
/**
* A TCB simulates the low-level details necessary to create, context-switch,
* and destroy Nachos threads. Each TCB controls an underlying JVM Thread
* object.
*
* <p>
* Do not use any methods in <tt>java.lang.Thread</tt>, as they are not
* compatible with the TCB API. Most <tt>Thread</tt> methods will either crash
* Nachos or have no useful effect.
*
* <p>
* Do not use the <i>synchronized</i> keyword <b>anywhere</b> in your code. It's
* against the rules, <i>and</i> it can easily deadlock nachos.
*/
public final class TCB {
/**
* Allocate a new TCB.
*/
public TCB() {
}
/**
* Give the TCB class the necessary privilege to create threads. This is
* necessary, because unlike other machine classes that need privilege, we
* want the kernel to be able to create TCB objects on its own.
*
* @param privilege
* encapsulates privileged access to the Nachos machine.
*/
public static void givePrivilege(Privilege privilege) {
TCB.privilege = privilege;
privilege.tcb = new TCBPrivilege();
}
/**
* Causes the thread represented by this TCB to begin execution. The
* specified target is run in the thread.
*/
public void start(Runnable target) {
/*
* We will not use synchronization here, because we're assuming that
* either this is the first call to start(), or we're being called in
* the context of another TCB. Since we only allow one TCB to run at a
* time, no synchronization is necessary.
*
* The only way this assumption could be broken is if one of our
* non-Nachos threads used the TCB code.
*/
/*
* Make sure this TCB has not already been started. If done is false,
* then destroy() has not yet set javaThread back to null, so we can use
* javaThread as a reliable indicator of whether or not start() has
* already been invoked.
*/
Lib.assertTrue(javaThread == null && !done);
/*
* Make sure there aren't too many running TCBs already. This limitation
* exists in an effort to prevent wild thread usage.
*/
Lib.assertTrue(runningThreads.size() < maxThreads);
isFirstTCB = (currentTCB == null);
/*
* Probably unnecessary sanity check: if this is not the first TCB, we
* make sure that the current thread is bound to the current TCB. This
* check can only fail if non-Nachos threads invoke start().
*/
if (!isFirstTCB)
Lib.assertTrue(currentTCB.javaThread == Thread.currentThread());
/*
* At this point all checks are complete, so we go ahead and start the
* TCB. Whether or not this is the first TCB, it gets added to
* runningThreads, and we save the target closure.
*/
runningThreads.add(this);
this.target = target;
if (!isFirstTCB) {
/*
* If this is not the first TCB, we have to make a new Java thread
* to run it. Creating Java threads is a privileged operation.
*/
tcbTarget = new Runnable() {
public void run() {
threadroot();
}
};
privilege.doPrivileged(new Runnable() {
public void run() {
javaThread = new Thread(tcbTarget);
}
});
/*
* The Java thread hasn't yet started, but we need to get it
* blocking in yield(). We do this by temporarily turning off the
* current TCB, starting the new Java thread, and waiting for it to
* wake us up from threadroot(). Once the new TCB wakes us up, it's
* safe to context switch to the new TCB.
*/
currentTCB.running = false;
this.javaThread.start();
currentTCB.waitForInterrupt();
} else {
/*
* This is the first TCB, so we don't need to make a new Java thread
* to run it; we just steal the current Java thread.
*/
javaThread = Thread.currentThread();
/* All we have to do now is invoke threadroot() directly. */
threadroot();
}
}
/**
* Return the TCB of the currently running thread.
*/
public static TCB currentTCB() {
return currentTCB;
}
/**
* Context switch between the current TCB and this TCB. This TCB will become
* the new current TCB. It is acceptable for this TCB to be the current TCB.
*/
public void contextSwitch() {
/*
* Probably unnecessary sanity check: we make sure that the current
* thread is bound to the current TCB. This check can only fail if
* non-Nachos threads invoke start().
*/
Lib.assertTrue(currentTCB.javaThread == Thread.currentThread());
// make sure AutoGrader.runningThread() called associateThread()
Lib.assertTrue(currentTCB.associated);
currentTCB.associated = false;
// can't switch from a TCB to itself
if (this == currentTCB)
return;
/*
* There are some synchronization concerns here. As soon as we wake up
* the next thread, we cannot assume anything about static variables, or
* about any TCB's state. Therefore, before waking up the next thread,
* we must latch the value of currentTCB, and set its running flag to
* false (so that, in case we get interrupted before we call yield(),
* the interrupt will set the running flag and yield() won't block).
*/
TCB previous = currentTCB;
previous.running = false;
this.interrupt();
previous.yield();
}
/**
* Destroy this TCB. This TCB must not be in use by the current thread. This
* TCB must also have been authorized to be destroyed by the autograder.
*/
public void destroy() {
// make sure the current TCB is correct
Lib.assertTrue(currentTCB != null
&& currentTCB.javaThread == Thread.currentThread());
// can't destroy current thread
Lib.assertTrue(this != currentTCB);
// thread must have started but not be destroyed yet
Lib.assertTrue(javaThread != null && !done);
// ensure AutoGrader.finishingCurrentThread() called authorizeDestroy()
Lib.assertTrue(nachosThread == toBeDestroyed);
toBeDestroyed = null;
this.done = true;
currentTCB.running = false;
this.interrupt();
currentTCB.waitForInterrupt();
this.javaThread = null;
}
/**
* Destroy all TCBs and exit Nachos. Same as <tt>Machine.terminate()</tt>.
*/
public static void die() {
privilege.exit(0);
}
/**
* Test if the current JVM thread belongs to a Nachos TCB. The AWT event
* dispatcher is an example of a non-Nachos thread.
*
* @return <tt>true</tt> if the current JVM thread is a Nachos thread.
*/
public static boolean isNachosThread() {
return (currentTCB != null && Thread.currentThread() == currentTCB.javaThread);
}
private void threadroot() {
// this should be running the current thread
Lib.assertTrue(javaThread == Thread.currentThread());
if (!isFirstTCB) {
/*
* start() is waiting for us to wake it up, signalling that it's OK
* to context switch to us. We leave the running flag false so that
* we'll still run if a context switch happens before we go to
* sleep. All we have to do is wake up the current TCB and then wait
* to get woken up by contextSwitch() or destroy().
*/
currentTCB.interrupt();
this.yield();
} else {
/*
* start() called us directly, so we just need to initialize a
* couple things.
*/
currentTCB = this;
running = true;
}
try {
target.run();
// no way out of here without going throw one of the catch blocks
Lib.assertNotReached();
} catch (ThreadDeath e) {
// make sure this TCB is being destroyed properly
if (!done) {
System.out.print("\nTCB terminated improperly!\n");
privilege.exit(1);
}
runningThreads.removeElement(this);
if (runningThreads.isEmpty())
privilege.exit(0);
} catch (Throwable e) {
System.out.print("\n");
e.printStackTrace();
runningThreads.removeElement(this);
if (runningThreads.isEmpty())
privilege.exit(1);
else
die();
}
}
/**
* Invoked by threadroot() and by contextSwitch() when it is necessary to
* wait for another TCB to context switch to this TCB. Since this TCB might
* get destroyed instead, we check the <tt>done</tt> flag after waking up.
* If it is set, the TCB that woke us up is waiting for an acknowledgement
* in destroy(). Otherwise, we just set the current TCB to this TCB and
* return.
*/
private void yield() {
waitForInterrupt();
if (done) {
currentTCB.interrupt();
throw new ThreadDeath();
}
currentTCB = this;
}
/**
* Waits on the monitor bound to this TCB until its <tt>running</tt> flag is
* set to <tt>true</tt>. <tt>waitForInterrupt()</tt> is used whenever a TCB
* needs to go to wait for its turn to run. This includes the ping-pong
* process of starting and destroying TCBs, as well as in context switching
* from this TCB to another. We don't rely on <tt>currentTCB</tt>, since it
* is updated by <tt>contextSwitch()</tt> before we get called.
*/
private synchronized void waitForInterrupt() {
while (!running) {
try {
wait();
} catch (InterruptedException e) {
}
}
}
/**
* Wake up this TCB by setting its <tt>running</tt> flag to <tt>true</tt>
* and signalling the monitor bound to it. Used in the ping-pong process of
* starting and destroying TCBs, as well as in context switching to this
* TCB.
*/
private synchronized void interrupt() {
running = true;
notify();
}
private void associateThread(KThread thread) {
// make sure AutoGrader.runningThread() gets called only once per
// context switch
Lib.assertTrue(!associated);
associated = true;
Lib.assertTrue(thread != null);
if (nachosThread != null)
Lib.assertTrue(thread == nachosThread);
else
nachosThread = thread;
}
private static void authorizeDestroy(KThread thread) {
// make sure AutoGrader.finishingThread() gets called only once per
// destroy
Lib.assertTrue(toBeDestroyed == null);
toBeDestroyed = thread;
}
/**
* The maximum number of started, non-destroyed TCB's that can be in
* existence.
*/
public static final int maxThreads = 250;
/**
* A reference to the currently running TCB. It is initialized to
* <tt>null</tt> when the <tt>TCB</tt> class is loaded, and then the first
* invocation of <tt>start(Runnable)</tt> assigns <tt>currentTCB</tt> a
* reference to the first TCB. After that, only <tt>yield()</tt> can change
* <tt>currentTCB</tt> to the current TCB, and only after
* <tt>waitForInterrupt()</tt> returns.
*
* <p>
* Note that <tt>currentTCB.javaThread</tt> will not be the current thread
* if the current thread is not bound to a TCB (this includes the threads
* created for the hardware simulation).
*/
private static TCB currentTCB = null;
/**
* A vector containing all <i>running</i> TCB objects. It is initialized to
* an empty vector when the <tt>TCB</tt> class is loaded. TCB objects are
* added only in <tt>start(Runnable)</tt>, which can only be invoked once on
* each TCB object. TCB objects are removed only in each of the
* <tt>catch</tt> clauses of <tt>threadroot()</tt>, one of which is always
* invoked on thread termination. The maximum number of threads in
* <tt>runningThreads</tt> is limited to <tt>maxThreads</tt> by
* <tt>start(Runnable)</tt>. If <tt>threadroot()</tt> drops the number of
* TCB objects in <tt>runningThreads</tt> to zero, Nachos exits, so once the
* first TCB is created, this vector is basically never empty.
*/
private static Vector<TCB> runningThreads = new Vector<TCB>();
private static Privilege privilege;
private static KThread toBeDestroyed = null;
/**
* <tt>true</tt> if and only if this TCB is the first TCB to start, the one
* started in <tt>Machine.main(String[])</tt>. Initialized by
* <tt>start(Runnable)</tt>, on the basis of whether <tt>currentTCB</tt> has
* been initialized.
*/
private boolean isFirstTCB;
/**
* A reference to the Java thread bound to this TCB. It is initially
* <tt>null</tt>, assigned to a Java thread in <tt>start(Runnable)</tt>, and
* set to <tt>null</tt> again in <tt>destroy()</tt>.
*/
private Thread javaThread = null;
/**
* <tt>true</tt> if and only if the Java thread bound to this TCB ought to
* be running. This is an entirely different condition from membership in
* <tt>runningThreads</tt>, which contains all TCB objects that have started
* and have not terminated. <tt>running</tt> is only <tt>true</tt> when the
* associated Java thread ought to run ASAP. When starting or destroying a
* TCB, this is temporarily true for a thread other than that of the current
* TCB.
*/
private boolean running = false;
/**
* Set to <tt>true</tt> by <tt>destroy()</tt>, so that when
* <tt>waitForInterrupt()</tt> returns in the doomed TCB, <tt>yield()</tt>
* will know that the current TCB is doomed.
*/
private boolean done = false;
private KThread nachosThread = null;
private boolean associated = false;
private Runnable target;
private Runnable tcbTarget;
private static class TCBPrivilege implements Privilege.TCBPrivilege {
public void associateThread(KThread thread) {
Lib.assertTrue(currentTCB != null);
currentTCB.associateThread(thread);
}
public void authorizeDestroy(KThread thread) {
TCB.authorizeDestroy(thread);
}
}
}
|
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.costexplorer.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Details on termination recommendation.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/ce-2017-10-25/TerminateRecommendationDetail" target="_top">AWS
* API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class TerminateRecommendationDetail implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* Estimated savings resulting from modification, on a monthly basis.
* </p>
*/
private String estimatedMonthlySavings;
/**
* <p>
* The currency code that Amazon Web Services used to calculate the costs for this instance.
* </p>
*/
private String currencyCode;
/**
* <p>
* Estimated savings resulting from modification, on a monthly basis.
* </p>
*
* @param estimatedMonthlySavings
* Estimated savings resulting from modification, on a monthly basis.
*/
public void setEstimatedMonthlySavings(String estimatedMonthlySavings) {
this.estimatedMonthlySavings = estimatedMonthlySavings;
}
/**
* <p>
* Estimated savings resulting from modification, on a monthly basis.
* </p>
*
* @return Estimated savings resulting from modification, on a monthly basis.
*/
public String getEstimatedMonthlySavings() {
return this.estimatedMonthlySavings;
}
/**
* <p>
* Estimated savings resulting from modification, on a monthly basis.
* </p>
*
* @param estimatedMonthlySavings
* Estimated savings resulting from modification, on a monthly basis.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public TerminateRecommendationDetail withEstimatedMonthlySavings(String estimatedMonthlySavings) {
setEstimatedMonthlySavings(estimatedMonthlySavings);
return this;
}
/**
* <p>
* The currency code that Amazon Web Services used to calculate the costs for this instance.
* </p>
*
* @param currencyCode
* The currency code that Amazon Web Services used to calculate the costs for this instance.
*/
public void setCurrencyCode(String currencyCode) {
this.currencyCode = currencyCode;
}
/**
* <p>
* The currency code that Amazon Web Services used to calculate the costs for this instance.
* </p>
*
* @return The currency code that Amazon Web Services used to calculate the costs for this instance.
*/
public String getCurrencyCode() {
return this.currencyCode;
}
/**
* <p>
* The currency code that Amazon Web Services used to calculate the costs for this instance.
* </p>
*
* @param currencyCode
* The currency code that Amazon Web Services used to calculate the costs for this instance.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public TerminateRecommendationDetail withCurrencyCode(String currencyCode) {
setCurrencyCode(currencyCode);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getEstimatedMonthlySavings() != null)
sb.append("EstimatedMonthlySavings: ").append(getEstimatedMonthlySavings()).append(",");
if (getCurrencyCode() != null)
sb.append("CurrencyCode: ").append(getCurrencyCode());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof TerminateRecommendationDetail == false)
return false;
TerminateRecommendationDetail other = (TerminateRecommendationDetail) obj;
if (other.getEstimatedMonthlySavings() == null ^ this.getEstimatedMonthlySavings() == null)
return false;
if (other.getEstimatedMonthlySavings() != null && other.getEstimatedMonthlySavings().equals(this.getEstimatedMonthlySavings()) == false)
return false;
if (other.getCurrencyCode() == null ^ this.getCurrencyCode() == null)
return false;
if (other.getCurrencyCode() != null && other.getCurrencyCode().equals(this.getCurrencyCode()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getEstimatedMonthlySavings() == null) ? 0 : getEstimatedMonthlySavings().hashCode());
hashCode = prime * hashCode + ((getCurrencyCode() == null) ? 0 : getCurrencyCode().hashCode());
return hashCode;
}
@Override
public TerminateRecommendationDetail clone() {
try {
return (TerminateRecommendationDetail) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.costexplorer.model.transform.TerminateRecommendationDetailMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
|
/*
* Copyright © 2018-2019 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package io.cdap.cdap.internal.app.runtime.monitor.proxy;
import io.netty.channel.ChannelHandler;
/**
* A {@link ChannelHandler} for handling SOCKS handshake requests for the {@link MonitorSocksProxy}.
*/
final class MonitorSocksServerHandler extends AbstractSocksServerHandler {
private final PortForwardingProvider portForwardingProvider;
MonitorSocksServerHandler(PortForwardingProvider portForwardingProvider) {
this.portForwardingProvider = portForwardingProvider;
}
@Override
protected ChannelHandler createSocks4ConnectHandler() {
return new MonitorSocksServerConnectHandler(portForwardingProvider);
}
@Override
protected ChannelHandler createSocks5ConnectHandler() {
return new MonitorSocksServerConnectHandler(portForwardingProvider);
}
}
|
package com.subra.aem.rjs.mailer.utils;
/**
* @author Raghava Joijode
*
*/
public enum EmailSenderType {
NOREPLY, HELP, GREETINGS, CONNECT, REACHUS, OFFICES, REQUEST, JENKINS;
}
|
package fuzzy4j.sets;
/**
* Wraps a function inside the unit interval [0, 1]
*
* Defined for the function f(x)
* <code>U_W(x) = min(1, max(0, f(x)))</code>
*
* @author Soren A. Davidsen <sorend@gmail.com>
*/
class UnitIntervalWrapperFunction implements FuzzyFunction {
private FuzzyFunction inner;
UnitIntervalWrapperFunction(FuzzyFunction inner) {
this.inner = inner;
}
@Override
public double apply(double x) {
double value = inner.apply(x);
if (value < 0.0)
return 0.0;
else if (value > 1.0)
return 1.0;
else
return value;
}
public String toString() {
return "min(1, max(0, " + inner.toString() + "))";
}
}
|
package jsky.app.ot.tpe;
/**
* Image feature categories
*/
public enum TpeImageFeatureCategory {
target("Target"),
fieldOfView("Field Of View"),
;
private final String display;
TpeImageFeatureCategory(String display) {
this.display = display;
}
public String displayName() {
return display;
}
}
|
package itx.elastic.service.dto;
import java.util.Objects;
public class IndexName {
private final String name;
public IndexName(String name) {
Objects.requireNonNull(name);
this.name = name;
}
public String getName() {
return name;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IndexName indexName = (IndexName) o;
return Objects.equals(name, indexName.name);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
}
|
package xeta.modules;
import org.junit.Test;
import xeta.library.Config;
import static org.junit.Assert.*;
public class AddressTest {
@Test
public void testRead() {
final String read = Address.read("5d1KYTwseBGzwuroT3wm5793pZ1fAXRmY4e9tBNErwYk");
System.out.println(read);
assertEquals("{\"balance\":{\"address\":\"5d1KYTwseBGzwuroT3wm5793pZ1fAXRmY4e9tBNErwYk\",\"xetaBalance\":\"1000\"}}", read);
// below is on testnet
Config.dev = true;
final String read1 = Address.read("3evjhDg7u6tAVnUL6GK6q288GpNdNdPv9te446upsfdd");
System.out.println(read1);
assertEquals("{\"balance\":{\"address\":\"3evjhDg7u6tAVnUL6GK6q288GpNdNdPv9te446upsfdd\",\"xetaBalance\":\"1000\"}}", read1);
Config.dev = false;
}
}
|
/*
* #%L
* SCIFIO fork of the Java Advanced Imaging Image I/O Tools API Core.
* %%
* Copyright (C) 2008 - 2016 Open Microscopy Environment:
* - Board of Regents of the University of Wisconsin-Madison
* - Glencoe Software, Inc.
* - University of Dundee
* %%
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* #L%
*/
/*
* $RCSfile: TIFFLSBCompressor.java,v $
*
*
* Copyright (c) 2005 Sun Microsystems, Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistribution of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistribution in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* Neither the name of Sun Microsystems, Inc. or the names of
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* This software is provided "AS IS," without a warranty of any
* kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY
* EXCLUDED. SUN MIDROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL
* NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF
* USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS
* DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR
* ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL,
* CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND
* REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR
* INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* You acknowledge that this software is not designed or intended for
* use in the design, construction, operation or maintenance of any
* nuclear facility.
*
* $Revision: 1.1 $
* $Date: 2005/02/11 05:01:47 $
* $State: Exp $
*/
package io.scif.media.imageioimpl.plugins.tiff;
import io.scif.media.imageio.plugins.tiff.BaselineTIFFTagSet;
import io.scif.media.imageio.plugins.tiff.TIFFCompressor;
import java.io.IOException;
/**
* Uncompressed data with LSB-to-MSB fill order.
*/
public class TIFFLSBCompressor extends TIFFCompressor {
public TIFFLSBCompressor() {
super("", BaselineTIFFTagSet.COMPRESSION_NONE, true);
}
public int encode(byte[] b, int off,
int width, int height,
int[] bitsPerSample,
int scanlineStride) throws IOException {
int bitsPerPixel = 0;
for (int i = 0; i < bitsPerSample.length; i++) {
bitsPerPixel += bitsPerSample[i];
}
int bytesPerRow = (bitsPerPixel*width + 7)/8;
byte[] compData = new byte[bytesPerRow];
byte[] flipTable = TIFFFaxDecompressor.flipTable;
for (int row = 0; row < height; row++) {
System.arraycopy(b, off, compData, 0, bytesPerRow);
for(int j = 0; j < bytesPerRow; j++) {
compData[j] = flipTable[compData[j]&0xff];
}
stream.write(compData, 0, bytesPerRow);
off += scanlineStride;
}
return height*bytesPerRow;
}
}
|
package si.bismuth.mixins;
import com.mojang.authlib.GameProfile;
import net.minecraft.block.Block;
import net.minecraft.entity.Entity;
import net.minecraft.entity.player.EntityPlayer;
import net.minecraft.entity.player.EntityPlayerMP;
import net.minecraft.init.Blocks;
import net.minecraft.server.management.PlayerList;
import net.minecraft.tileentity.TileEntity;
import net.minecraft.tileentity.TileEntityEndGateway;
import net.minecraft.util.math.BlockPos;
import net.minecraft.util.text.ITextComponent;
import net.minecraft.world.World;
import org.spongepowered.asm.mixin.Mixin;
import org.spongepowered.asm.mixin.Shadow;
import org.spongepowered.asm.mixin.injection.At;
import org.spongepowered.asm.mixin.injection.Inject;
import org.spongepowered.asm.mixin.injection.Redirect;
import org.spongepowered.asm.mixin.injection.callback.CallbackInfo;
import si.bismuth.MCServer;
import si.bismuth.utils.IRecipeBookItemDuper;
import javax.annotation.Nullable;
@Mixin(EntityPlayerMP.class)
public abstract class MixinEntityPlayerMP extends EntityPlayer implements IRecipeBookItemDuper {
@Shadow
public abstract boolean isSpectator();
@Shadow
public abstract BlockPos getPosition();
@Shadow
public abstract Entity getSpectatingEntity();
public MixinEntityPlayerMP(World world, GameProfile profile) {
super(world, profile);
}
private int dupe;
private boolean scanForDuping;
@Shadow
@Nullable
public abstract Entity changeDimension(int dim);
@Override
public void clearDupeItem() {
this.dupe = Integer.MIN_VALUE;
}
@Override
public void dupeItem(int slot) {
if (this.scanForDuping) {
this.dupe = slot;
}
}
@Override
public int getDupeItem() {
return this.dupe;
}
@Override
public void dupeItemScan(boolean s) {
this.scanForDuping = s;
}
@Inject(method = "onUpdate", at = @At("RETURN"))
private void postOnUpdate(CallbackInfo ci) {
this.clearDupeItem();
if (this.isSpectator() && this.getSpectatingEntity() == this) {
final BlockPos pos = this.getPosition();
final Block block = this.world.getBlockState(pos).getBlock();
if (block == Blocks.PORTAL) {
this.setPortal(pos);
} else if (block == Blocks.END_PORTAL) {
this.changeDimension(1);
} else if (block == Blocks.END_GATEWAY) {
final TileEntity te = this.world.getTileEntity(pos);
if (te instanceof TileEntityEndGateway) {
((TileEntityEndGateway) te).teleportEntity(this);
}
}
}
}
@Redirect(method = "onDeath", at = @At(value = "INVOKE", target = "Lnet/minecraft/server/management/PlayerList;sendMessage(Lnet/minecraft/util/text/ITextComponent;)V"))
private void sendMessage(PlayerList list, ITextComponent component) {
list.sendMessage(component);
MCServer.bot.sendDeathMessage(component);
MCServer.log.info("Player {} died at {} {} {} in {}", this.getName(), this.posX, this.posY, this.posZ, this.world.provider.getDimensionType().getName());
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.ozone.om.request.s3.tenant;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.audit.OMAction;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OMMultiTenantManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmDBTenantState;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.multitenant.OzoneTenant;
import org.apache.hadoop.ozone.om.multitenant.Tenant;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.om.request.volume.OMVolumeRequest;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.s3.tenant.OMTenantDeleteResponse;
import org.apache.hadoop.ozone.om.upgrade.DisallowedUntilLayoutVersion;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_EMPTY;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_FOUND;
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA;
/**
* Handles OMTenantDelete request.
*/
public class OMTenantDeleteRequest extends OMVolumeRequest {
public static final Logger LOG =
LoggerFactory.getLogger(OMTenantDeleteRequest.class);
public OMTenantDeleteRequest(OMRequest omRequest) {
super(omRequest);
}
@Override
@DisallowedUntilLayoutVersion(MULTITENANCY_SCHEMA)
public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
final OMRequest omRequest = super.preExecute(ozoneManager);
final OMMultiTenantManager multiTenantManager =
ozoneManager.getMultiTenantManager();
// Check Ozone cluster admin privilege
multiTenantManager.checkAdmin();
// First get tenant name
final String tenantId = omRequest.getDeleteTenantRequest().getTenantId();
Preconditions.checkNotNull(tenantId);
// Get tenant object by tenant name
final Tenant tenantObj = multiTenantManager.getTenantFromDBById(tenantId);
// Acquire write lock to authorizer (Ranger)
multiTenantManager.getAuthorizerLock().tryWriteLockInOMRequest();
try {
// Remove policies and roles from Ranger
// TODO: Deactivate (disable) policies instead of delete?
multiTenantManager.getAuthorizerOp().deleteTenant(tenantObj);
} catch (Exception e) {
multiTenantManager.getAuthorizerLock().unlockWriteInOMRequest();
throw e;
}
return omRequest;
}
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(
OzoneManager ozoneManager, long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
final OMMultiTenantManager multiTenantManager =
ozoneManager.getMultiTenantManager();
final OMMetrics omMetrics = ozoneManager.getMetrics();
omMetrics.incNumTenantDeletes();
OMClientResponse omClientResponse = null;
final OMResponse.Builder omResponse =
OmResponseUtil.getOMResponseBuilder(getOmRequest());
boolean acquiredVolumeLock = false;
final Map<String, String> auditMap = new HashMap<>();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
final DeleteTenantRequest request = getOmRequest().getDeleteTenantRequest();
final String tenantId = request.getTenantId();
String volumeName = null;
boolean decVolumeRefCount = true;
IOException exception = null;
OmVolumeArgs omVolumeArgs = null;
try {
// Check tenant existence in tenantStateTable
if (!omMetadataManager.getTenantStateTable().isExist(tenantId)) {
LOG.debug("tenant: {} does not exist", tenantId);
throw new OMException("Tenant '" + tenantId + "' does not exist",
TENANT_NOT_FOUND);
}
// Reading the TenantStateTable without lock as we don't have or need
// a TENANT_LOCK. The assumption is that OmDBTenantState is read-only
// once it is set during tenant creation.
final OmDBTenantState dbTenantState =
omMetadataManager.getTenantStateTable().get(tenantId);
volumeName = dbTenantState.getBucketNamespaceName();
Preconditions.checkNotNull(volumeName);
LOG.debug("Tenant '{}' has volume '{}'", tenantId, volumeName);
// decVolumeRefCount is true if volumeName is not empty string
decVolumeRefCount = volumeName.length() > 0;
// Acquire the volume lock
acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock(
VOLUME_LOCK, volumeName);
// Check if there are any accessIds in the tenant
if (!ozoneManager.getMultiTenantManager().isTenantEmpty(tenantId)) {
LOG.warn("tenant: '{}' is not empty. Unable to delete the tenant",
tenantId);
throw new OMException("Tenant '" + tenantId + "' is not empty. " +
"All accessIds associated to this tenant must be revoked before " +
"the tenant can be deleted. See `ozone tenant user revoke`",
TENANT_NOT_EMPTY);
}
// Invalidate cache entry
omMetadataManager.getTenantStateTable().addCacheEntry(
new CacheKey<>(tenantId),
new CacheValue<>(Optional.absent(), transactionLogIndex));
// Decrement volume refCount
if (decVolumeRefCount) {
// Check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL,
volumeName, null, null);
}
omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
// Decrement volume ref count
omVolumeArgs.decRefCount();
// Update omVolumeArgs
final String dbVolumeKey = omMetadataManager.getVolumeKey(volumeName);
omMetadataManager.getVolumeTable().addCacheEntry(
new CacheKey<>(dbVolumeKey),
new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
// TODO: Set response dbVolumeKey?
}
// Update tenant cache
multiTenantManager.getCacheOp().deleteTenant(new OzoneTenant(tenantId));
// Compose response
//
// If decVolumeRefCount is false, return -1 to the client, otherwise
// return the actual volume refCount. Note if the actual volume refCount
// becomes negative somehow, omVolumeArgs.decRefCount() would have thrown
// earlier.
final DeleteTenantResponse.Builder deleteTenantResponse =
DeleteTenantResponse.newBuilder()
.setVolumeName(volumeName)
.setVolRefCount(omVolumeArgs == null ? -1 :
omVolumeArgs.getRefCount());
omClientResponse = new OMTenantDeleteResponse(
omResponse.setDeleteTenantResponse(deleteTenantResponse).build(),
volumeName, omVolumeArgs, tenantId);
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMTenantDeleteResponse(
createErrorOMResponse(omResponse, exception));
} finally {
addResponseToDoubleBuffer(transactionLogIndex, omClientResponse,
ozoneManagerDoubleBufferHelper);
if (acquiredVolumeLock) {
omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volumeName);
}
// Release authorizer write lock
multiTenantManager.getAuthorizerLock().unlockWriteInOMRequest();
}
// Perform audit logging
auditMap.put(OzoneConsts.TENANT, tenantId);
// Audit volume ref count update
if (decVolumeRefCount) {
auditLog(ozoneManager.getAuditLogger(),
buildAuditMessage(OMAction.UPDATE_VOLUME,
buildVolumeAuditMap(volumeName),
exception, getOmRequest().getUserInfo()));
}
// Audit tenant deletion
auditLog(ozoneManager.getAuditLogger(),
buildAuditMessage(OMAction.DELETE_TENANT,
auditMap, exception, getOmRequest().getUserInfo()));
if (exception == null) {
LOG.info("Deleted tenant '{}' and volume '{}'", tenantId, volumeName);
omMetrics.decNumTenants();
} else {
LOG.error("Failed to delete tenant '{}'", tenantId, exception);
omMetrics.incNumTenantDeleteFails();
}
return omClientResponse;
}
}
|
/*
* Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.debugger.windbg;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.win32.coff.*;
import sun.jvm.hotspot.debugger.cdbg.*;
import sun.jvm.hotspot.debugger.cdbg.basic.*;
import sun.jvm.hotspot.utilities.Assert;
class WindbgCDebugInfoBuilder
implements DebugVC50SubsectionTypes, DebugVC50TypeLeafIndices, DebugVC50TypeEnums, DebugVC50SymbolTypes, DebugVC50MemberAttributes, CVAttributes, AccessControl {
private WindbgDebugger dbg;
private Address base;
private DebugVC50 vc50;
private BasicCDebugInfoDataBase db;
private DebugVC50TypeIterator iter;
private DebugVC50SymbolIterator symIter;
// Logical->physical segment mapping
private COFFFile file;
private DebugVC50SSSegMap segMap;
// Canonicalization of primitive types
private Map<Integer, BasicType> primIndexToTypeMap;
// Global unnamed enumeration
// (FIXME: must figure out how to handle nested type descriptions)
private BasicEnumType unnamedEnum;
private Stack<BlockSym> blockStack;
private int endsToSkip;
private static final int POINTER_SIZE = 4;
WindbgCDebugInfoBuilder(WindbgDebugger dbg) {
this.dbg = dbg;
}
CDebugInfoDataBase buildDataBase(String dllName, Address base) {
this.base = base;
file = COFFFileParser.getParser().parse(dllName);
vc50 = getDebugVC50(file);
if (vc50 == null) return null;
segMap = getSegMap();
primIndexToTypeMap = new HashMap<>();
blockStack = new Stack<>();
endsToSkip = 0;
db = new BasicCDebugInfoDataBase();
db.beginConstruction();
// Get global types and add them to the database
DebugVC50SSGlobalTypes types = getGlobalTypes();
for (iter = types.getTypeIterator(); !iter.done(); iter.next()) {
while (!iter.typeStringDone()) {
switch (iter.typeStringLeaf()) {
case LF_MODIFIER: {
int idx = iter.getModifierIndex();
BasicType target = getTypeByIndex(idx);
short windowsMods = iter.getModifierAttribute();
short mods = 0;
if ((windowsMods & MODIFIER_CONST_MASK) != 0) mods |= CONST;
if ((windowsMods & MODIFIER_VOLATILE_MASK) != 0) mods |= VOLATILE;
putType(target.getCVVariant(mods));
break;
}
case LF_POINTER: {
int idx = iter.getPointerType();
BasicType target = getTypeByIndex(idx);
short windowsMods = iter.getModifierAttribute();
short mods = 0;
if ((windowsMods & POINTER_CONST_MASK) != 0) mods |= CONST;
if ((windowsMods & POINTER_VOLATILE_MASK) != 0) mods |= VOLATILE;
BasicPointerType ptrType = new BasicPointerType(POINTER_SIZE, target);
if (mods != 0) {
ptrType = (BasicPointerType) ptrType.getCVVariant(mods);
}
putType(ptrType);
break;
}
case LF_ARRAY: {
BasicType elemType = getTypeByIndex(iter.getArrayElementType());
putType(new BasicArrayType(iter.getArrayName(), elemType, iter.getArrayLength()));
break;
}
case LF_CLASS:
case LF_STRUCTURE: {
CompoundTypeKind kind = ((iter.typeStringLeaf() == LF_CLASS) ? CompoundTypeKind.CLASS
: CompoundTypeKind.STRUCT);
BasicCompoundType type = new BasicCompoundType(iter.getClassName(),
iter.getClassSize(),
kind);
// Skip parsing of forward references to types
// FIXME: do we have to resolve these later?
if ((iter.getClassProperty() & PROPERTY_FWDREF) == 0) {
DebugVC50TypeIterator fieldIter = iter.getClassFieldListIterator();
if (Assert.ASSERTS_ENABLED) {
Assert.that(fieldIter.typeStringLeaf() == LF_FIELDLIST, "Expected field list");
}
boolean advance = false;
while (!fieldIter.typeStringDone()) {
advance = true;
switch (fieldIter.typeStringLeaf()) {
case LF_FIELDLIST: break;
case LF_BCLASS: {
int accessControl = memberAttributeToAccessControl(fieldIter.getBClassAttribute());
Type baseType = getTypeByIndex(fieldIter.getBClassType());
// FIXME: take offset into account
type.addBaseClass(new BasicBaseClass(accessControl, false, baseType));
break;
}
case LF_VBCLASS: {
int accessControl = memberAttributeToAccessControl(fieldIter.getVBClassAttribute());
Type baseType = getTypeByIndex(fieldIter.getVBClassBaseClassType());
// FIXME: take offset and virtual base offset into account
type.addBaseClass(new BasicBaseClass(accessControl, true, baseType));
break;
}
// I don't think we need to handle indirect virtual base
// classes since they should be handled indirectly through
// the modeling of the type hierarchy
case LF_IVBCLASS: break;
case LF_INDEX: {
fieldIter = fieldIter.getIndexIterator();
advance = false;
break;
}
case LF_MEMBER: {
BasicField field = new BasicField(fieldIter.getMemberName(),
getTypeByIndex(fieldIter.getMemberType()),
memberAttributeToAccessControl(fieldIter.getMemberAttribute()),
false);
field.setOffset(fieldIter.getMemberOffset());
type.addField(field);
break;
}
case LF_STMEMBER: {
BasicField field = new BasicField(fieldIter.getStaticName(),
getTypeByIndex(fieldIter.getStaticType()),
memberAttributeToAccessControl(fieldIter.getStaticAttribute()),
true);
// The field's address will be found during resolution
// of the debug info database
type.addField(field);
break;
}
// FIXME: handle methods
case LF_METHOD: break;
case LF_ONEMETHOD: break;
// FIXME: handle nested types
case LF_NESTTYPE: break;
case LF_NESTTYPEEX: break;
// NOTE: virtual functions not needed/handled yet for
// this debugging system (because we are not planning to
// handle calling methods in the target process at
// runtime)
case LF_VFUNCTAB: break;
case LF_FRIENDCLS: break;
case LF_VFUNCOFF: break;
case LF_MEMBERMODIFY: break;
case LF_PAD0: case LF_PAD1: case LF_PAD2: case LF_PAD3:
case LF_PAD4: case LF_PAD5: case LF_PAD6: case LF_PAD7:
case LF_PAD8: case LF_PAD9: case LF_PAD10: case LF_PAD11:
case LF_PAD12: case LF_PAD13: case LF_PAD14: case LF_PAD15: break;
default: System.err.println("WARNING: unexpected leaf index " +
fieldIter.typeStringLeaf() +
" in field list for type " + iter.getTypeIndex());
}
if (advance) {
fieldIter.typeStringNext();
}
}
}
putType(type);
break;
}
case LF_UNION: {
BasicCompoundType type = new BasicCompoundType(iter.getUnionName(),
iter.getUnionSize(),
CompoundTypeKind.UNION);
// Skip parsing of forward references to types
// FIXME: do we have to resolve these later?
if ((iter.getClassProperty() & PROPERTY_FWDREF) == 0) {
DebugVC50TypeIterator fieldIter = iter.getUnionFieldListIterator();
if (Assert.ASSERTS_ENABLED) {
Assert.that(fieldIter.typeStringLeaf() == LF_FIELDLIST, "Expected field list");
}
boolean advance = false;
while (!fieldIter.typeStringDone()) {
advance = true;
switch (fieldIter.typeStringLeaf()) {
case LF_FIELDLIST: break;
case LF_BCLASS: break;
case LF_VBCLASS: break;
case LF_IVBCLASS: break;
case LF_INDEX: {
fieldIter = fieldIter.getIndexIterator();
advance = false;
break;
}
case LF_MEMBER: {
BasicField field = new BasicField(fieldIter.getMemberName(),
getTypeByIndex(fieldIter.getMemberType()),
memberAttributeToAccessControl(fieldIter.getMemberAttribute()),
false);
field.setOffset(fieldIter.getMemberOffset());
type.addField(field);
break;
}
case LF_STMEMBER: {
System.err.println("WARNING: I didn't think unions could contain static fields...");
BasicField field = new BasicField(fieldIter.getStaticName(),
getTypeByIndex(fieldIter.getStaticType()),
memberAttributeToAccessControl(fieldIter.getStaticAttribute()),
true);
// The field's address will be found during resolution
// of the debug info database
type.addField(field);
break;
}
case LF_METHOD: break;
case LF_ONEMETHOD: break;
// FIXME: handle nested types
case LF_NESTTYPE: break;
case LF_NESTTYPEEX: break;
case LF_VFUNCTAB: break;
case LF_FRIENDCLS: break;
case LF_VFUNCOFF: break;
case LF_MEMBERMODIFY: break;
case LF_PAD0: case LF_PAD1: case LF_PAD2: case LF_PAD3:
case LF_PAD4: case LF_PAD5: case LF_PAD6: case LF_PAD7:
case LF_PAD8: case LF_PAD9: case LF_PAD10: case LF_PAD11:
case LF_PAD12: case LF_PAD13: case LF_PAD14: case LF_PAD15: break;
default: System.err.println("WARNING: unexpected leaf index " +
fieldIter.typeStringLeaf() +
" in field list for union of type " + iter.getTypeIndex());
}
if (advance) {
fieldIter.typeStringNext();
}
}
}
putType(type);
break;
}
case LF_ENUM: {
String name = iter.getEnumName();
BasicEnumType enumType = null;
if ((name == null) || (name.equals(""))) {
if (unnamedEnum == null) {
unnamedEnum = new BasicEnumType(null, getTypeByIndex(iter.getEnumType()));
}
enumType = unnamedEnum;
} else {
enumType = new BasicEnumType(name, getTypeByIndex(iter.getEnumType()));
}
DebugVC50TypeIterator fieldIter = iter.getEnumFieldListIterator();
if (Assert.ASSERTS_ENABLED) {
Assert.that(fieldIter.typeStringLeaf() == LF_FIELDLIST, "Expected field list");
}
boolean advance = false;
while (!fieldIter.typeStringDone()) {
advance = true;
switch (fieldIter.typeStringLeaf()) {
case LF_FIELDLIST: break;
case LF_ENUMERATE: {
String enumName = fieldIter.getEnumerateName();
long enumVal = fieldIter.getEnumerateValue();
enumType.addEnum(enumName, enumVal);
break;
}
case LF_INDEX: {
fieldIter = fieldIter.getIndexIterator();
advance = false;
break;
}
case LF_PAD0: case LF_PAD1: case LF_PAD2: case LF_PAD3:
case LF_PAD4: case LF_PAD5: case LF_PAD6: case LF_PAD7:
case LF_PAD8: case LF_PAD9: case LF_PAD10: case LF_PAD11:
case LF_PAD12: case LF_PAD13: case LF_PAD14: case LF_PAD15: break;
default: System.err.println("WARNING: unexpected leaf index " +
fieldIter.typeStringLeaf() +
" in field list for enum of type " + iter.getTypeIndex());
}
if (advance) {
fieldIter.typeStringNext();
}
}
putType(enumType);
break;
}
case LF_PROCEDURE: {
Type retType = getTypeByIndex(iter.getProcedureReturnType());
BasicFunctionType func = new BasicFunctionType(null, POINTER_SIZE, retType);
DebugVC50TypeIterator argIter = iter.getProcedureArgumentListIterator();
if (Assert.ASSERTS_ENABLED) {
Assert.that(argIter.typeStringLeaf() == LF_ARGLIST, "Expected argument list");
}
for (int i = 0; i < argIter.getArgListCount(); i++) {
func.addArgumentType(getTypeByIndex(argIter.getArgListType(i)));
}
putType(func);
break;
}
case LF_MFUNCTION: {
Type retType = getTypeByIndex(iter.getMFunctionReturnType());
Type container = getTypeByIndex(iter.getMFunctionContainingClass());
Type thisType = getTypeByIndex(iter.getMFunctionThis());
long thisAdjust = iter.getMFunctionThisAdjust();
BasicMemberFunctionType func = new BasicMemberFunctionType(null,
POINTER_SIZE,
retType,
container,
thisType,
thisAdjust);
DebugVC50TypeIterator argIter = iter.getMFunctionArgumentListIterator();
for (int i = 0; i < argIter.getArgListCount(); i++) {
func.addArgumentType(getTypeByIndex(argIter.getArgListType(i)));
}
putType(func);
break;
}
// FIXME: handle virtual function table shape description
case LF_VTSHAPE: break;
case LF_BARRAY: System.err.println("FIXME: don't know what to do with LF_BARRAY leaves (convert to pointers?"); break;
case LF_LABEL: break;
case LF_NULL: break; // FIXME: do we need to handle this? With what?
case LF_DIMARRAY: System.err.println("FIXME: don't know what to do with LF_DIMARRAY leaves yet"); break;
case LF_VFTPATH: break;
case LF_PRECOMP: break;
case LF_ENDPRECOMP: break;
case LF_OEM: break;
case LF_TYPESERVER: break;
// Type records referenced from other type records
case LF_SKIP: break;
case LF_ARGLIST: skipTypeRecord(); break;
case LF_DEFARG: System.err.println("FIXME: handle default arguments (dereference the type)"); break;
case LF_FIELDLIST: skipTypeRecord(); break;
case LF_DERIVED: break;
case LF_BITFIELD: {
Type underlyingType = getTypeByIndex(iter.getBitfieldFieldType());
BasicBitType bit = new BasicBitType(underlyingType,
(iter.getBitfieldLength() & 0xFF),
(iter.getBitfieldPosition() & 0xFF));
putType(bit);
break;
}
case LF_METHODLIST: break;
case LF_DIMCONU:
case LF_DIMCONLU:
case LF_DIMVARU:
case LF_DIMVARLU: break;
case LF_REFSYM: break;
case LF_PAD0: case LF_PAD1: case LF_PAD2: case LF_PAD3:
case LF_PAD4: case LF_PAD5: case LF_PAD6: case LF_PAD7:
case LF_PAD8: case LF_PAD9: case LF_PAD10: case LF_PAD11:
case LF_PAD12: case LF_PAD13: case LF_PAD14: case LF_PAD15: break;
default: {
System.err.println("Unexpected leaf index " +
iter.typeStringLeaf() + " at offset 0x" +
Integer.toHexString(iter.typeStringOffset()));
break;
}
}
if (!iter.typeStringDone()) {
iter.typeStringNext();
}
}
}
// Add all symbol directories to debug info
// (FIXME: must figure out how to handle module-by-module
// arrangement of at least the static symbols to have proper
// lookup -- should probably also take advantage of the PROCREF
// and UDT references to understand how to build the global
// database vs. the module-by-module one)
DebugVC50SubsectionDirectory dir = vc50.getSubsectionDirectory();
int moduleNumber = 0; // Debugging
for (int i = 0; i < dir.getNumEntries(); i++) {
DebugVC50Subsection ss = dir.getSubsection(i);
int ssType = ss.getSubsectionType();
boolean process = false;
if ((ssType == SST_GLOBAL_SYM) ||
(ssType == SST_GLOBAL_PUB) ||
(ssType == SST_STATIC_SYM)) {
DebugVC50SSSymbolBase syms = (DebugVC50SSSymbolBase) ss;
symIter = syms.getSymbolIterator();
process = true;
}
if (ssType == SST_ALIGN_SYM) {
DebugVC50SSAlignSym syms = (DebugVC50SSAlignSym) ss;
symIter = syms.getSymbolIterator();
process = true;
}
if (process) {
for (; !symIter.done(); symIter.next()) {
switch (symIter.getType()) {
case S_COMPILE: break;
case S_SSEARCH: break; // FIXME: may need this later
case S_END: {
try {
// FIXME: workaround for warnings until we figure out
// what to do with THUNK32 symbols
if (endsToSkip == 0) {
blockStack.pop();
} else {
--endsToSkip;
}
} catch (EmptyStackException e) {
System.err.println("WARNING: mismatched block begins/ends in debug information");
}
break;
}
case S_SKIP: break;
case S_CVRESERVE: break;
case S_OBJNAME: break; // FIXME: may need this later
case S_ENDARG: break;
case S_COBOLUDT: break;
case S_MANYREG: break; // FIXME: may need to add support for this
case S_RETURN: break; // NOTE: would need this if adding support for calling functions
case S_ENTRYTHIS: break; // FIXME: may need to add support for this
case S_REGISTER: break; // FIXME: may need to add support for this
case S_CONSTANT: break; // FIXME: will need to add support for this
case S_UDT: break; // FIXME: need to see how these are used; are
// they redundant, or are they used to describe
// global variables as opposed to types?
case S_COBOLUDT2: break;
case S_MANYREG2: break;
case S_BPREL32: {
LocalSym sym = new BasicLocalSym(symIter.getBPRelName(),
getTypeByIndex(symIter.getBPRelType()),
symIter.getBPRelOffset());
addLocalToCurBlock(sym);
break;
}
case S_LDATA32:
case S_GDATA32: {
// FIXME: must handle these separately from global data (have
// module scoping and only add these at the module level)
boolean isModuleLocal = (symIter.getType() == S_LDATA32);
GlobalSym sym = new BasicGlobalSym(symIter.getLGDataName(),
getTypeByIndex(symIter.getLGDataType()),
newAddress(symIter.getLGDataOffset(), symIter.getLGDataSegment()),
isModuleLocal);
// FIXME: must handle module-local symbols differently
addGlobalSym(sym);
break;
}
case S_PUB32: break; // FIXME: figure out how these differ from
// above and how they are used
case S_LPROC32:
case S_GPROC32: {
BasicFunctionSym sym = new BasicFunctionSym(newLazyBlockSym(symIter.getLGProcParentOffset()),
symIter.getLGProcLength(),
newAddress(symIter.getLGProcOffset(), symIter.getLGProcSegment()),
symIter.getLGProcName(),
getTypeByIndex(symIter.getLGProcType()),
(symIter.getType() == S_LPROC32));
// FIXME: have to handle local procedures differently (have
// notion of modules and only add those procedures to the
// module they are defined in)
addBlock(sym);
break;
}
case S_THUNK32: {
// FIXME: see whether we need to handle these
skipEnd();
break;
}
case S_BLOCK32: {
BasicBlockSym sym = new BasicBlockSym(newLazyBlockSym(symIter.getBlockParentOffset()),
symIter.getBlockLength(),
newAddress(symIter.getBlockOffset(), symIter.getBlockSegment()),
symIter.getBlockName());
addBlock(sym);
break;
}
case S_WITH32: break;
case S_LABEL32: break;
case S_CEXMODEL32: break;
case S_VFTTABLE32: break; // FIXME: may need to handle this
// (most likely for run-time type determination)
case S_REGREL32: break; // FIXME: may need to add support for this
case S_LTHREAD32: break;
case S_GTHREAD32: break; // FIXME: may need to add support for these
case S_PROCREF: break;
case S_DATAREF: break;
case S_ALIGN: break;
default:
// These two unknown symbol types show up very frequently.
// Symbol type 0 appears to always be a no-op symbol of
// length 2 (i.e., length just covers the symbol type.)
// Symbol type 4115 appears to be a copyright notice for
// the Microsoft linker.
if ((symIter.getType() != 0) && (symIter.getType() != 4115)) {
System.err.println(" NOTE: Unexpected symbol of type " +
symIter.getType() + " at offset 0x" +
Integer.toHexString(symIter.getOffset()));
}
break;
}
}
}
}
// Add line number information for all modules
for (int i = 0; i < dir.getNumEntries(); i++) {
DebugVC50Subsection ss = dir.getSubsection(i);
if (ss.getSubsectionType() == SST_SRC_MODULE) {
DebugVC50SSSrcModule srcMod = (DebugVC50SSSrcModule) ss;
for (int sf = 0; sf < srcMod.getNumSourceFiles(); sf++) {
DebugVC50SrcModFileDesc desc = srcMod.getSourceFileDesc(sf);
// Uniquify these to save space
String name = desc.getSourceFileName().intern();
for (int cs = 0; cs < desc.getNumCodeSegments(); cs++) {
DebugVC50SrcModLineNumberMap map = desc.getLineNumberMap(cs);
SectionHeader seg = file.getHeader().getSectionHeader(map.getSegment());
for (int lp = 0; lp < map.getNumSourceLinePairs(); lp++) {
Address startPC = base.addOffsetTo(seg.getVirtualAddress() + map.getCodeOffset(lp));
// Fake address for endPC -- will be filled in by BasicLineNumberMapping
Address endPC = base.addOffsetTo(seg.getSize());
db.addLineNumberInfo(new BasicLineNumberInfo(name, map.getLineNumber(lp), startPC, endPC));
}
}
}
}
}
// Finish assembly of database
db.resolve(new ResolveListener() {
public void resolveFailed(Type containingType, LazyType failedResolve, String detail) {
System.err.println("WARNING: failed to resolve type of index " +
((Integer) failedResolve.getKey()).intValue() +
" in type " + containingType.getName() + " (class " +
containingType.getClass().getName() + ") while " + detail);
}
public void resolveFailed(Type containingType, String staticFieldName) {
System.err.println("WARNING: failed to resolve address of static field \"" +
staticFieldName + "\" in type " + containingType.getName());
}
public void resolveFailed(Sym containingSymbol, LazyType failedResolve, String detail) {
System.err.println("WARNING: failed to resolve type of index " +
((Integer) failedResolve.getKey()).intValue() +
" in symbol of type " + containingSymbol.getClass().getName() +
" while " + detail);
}
public void resolveFailed(Sym containingSymbol, LazyBlockSym failedResolve, String detail) {
System.err.println("WARNING: failed to resolve block at offset 0x" +
Integer.toHexString(((Integer) failedResolve.getKey()).intValue()) +
" in symbol of type " + containingSymbol.getClass().getName() +
" while " + detail);
}
});
db.endConstruction();
return db;
}
//----------------------------------------------------------------------
// Internals only below this point
//
private static DebugVC50 getDebugVC50(COFFFile file) {
COFFHeader header = file.getHeader();
OptionalHeader opt = header.getOptionalHeader();
if (opt == null) {
// Optional header not found
return null;
}
OptionalHeaderDataDirectories dd = opt.getDataDirectories();
if (dd == null) {
// Optional header data directories not found
return null;
}
DebugDirectory debug = dd.getDebugDirectory();
if (debug == null) {
// Debug directory not found
return null;
}
for (int i = 0; i < debug.getNumEntries(); i++) {
DebugDirectoryEntry entry = debug.getEntry(i);
if (entry.getType() == DebugTypes.IMAGE_DEBUG_TYPE_CODEVIEW) {
return entry.getDebugVC50();
}
}
// CodeView information not found in debug directory
return null;
}
private DebugVC50SSSegMap getSegMap() {
return (DebugVC50SSSegMap) findSubsection(SST_SEG_MAP);
}
private DebugVC50SSGlobalTypes getGlobalTypes() {
return (DebugVC50SSGlobalTypes) findSubsection(SST_GLOBAL_TYPES);
}
private DebugVC50SSGlobalSym getGlobalSymbols() {
return (DebugVC50SSGlobalSym) findSubsection(SST_GLOBAL_SYM);
}
private DebugVC50Subsection findSubsection(short ssType) {
DebugVC50SubsectionDirectory dir = vc50.getSubsectionDirectory();
for (int i = 0; i < dir.getNumEntries(); i++) {
DebugVC50Subsection ss = dir.getSubsection(i);
if (ss.getSubsectionType() == ssType) {
return ss;
}
}
throw new DebuggerException("Unable to find subsection of type " + ssType);
}
private void putType(Type t) {
db.addType(iter.getTypeIndex(), t);
}
private Address newAddress(int offset, short segment) {
int seg = segment & 0xFFFF;
// NOTE: it isn't clear how to use the segMap to map from logical
// to physical segments. It seems it would make more sense if the
// SegDescs contained a physical segment number in addition to the
// offset within the physical segment of the logical one.
// Get the section header corresponding to this segment
SectionHeader section = file.getHeader().getSectionHeader(seg);
// Result is relative to image base
return base.addOffsetTo(section.getVirtualAddress() + offset);
}
private BasicType getTypeByIndex(int intIndex) {
Integer index = intIndex;
// Handle primitive types here.
if (intIndex <= 0x0FFF) {
BasicType type = (BasicType) primIndexToTypeMap.get(index);
if (type != null) {
return type;
}
// Construct appropriate new primitive type
int primMode = intIndex & RESERVED_MODE_MASK;
if (primMode == RESERVED_MODE_DIRECT) {
int primType = intIndex & RESERVED_TYPE_MASK;
switch (primType) {
case RESERVED_TYPE_SIGNED_INT:
case RESERVED_TYPE_UNSIGNED_INT: {
boolean unsigned = (primType == RESERVED_TYPE_UNSIGNED_INT);
int size = 0;
String name = null;
switch (intIndex & RESERVED_SIZE_MASK) {
case RESERVED_SIZE_INT_1_BYTE: size = 1; name = "char"; break;
case RESERVED_SIZE_INT_2_BYTE: size = 2; name = "short"; break;
case RESERVED_SIZE_INT_4_BYTE: size = 4; name = "int"; break;
case RESERVED_SIZE_INT_8_BYTE: size = 8; name = "__int64"; break;
default: throw new DebuggerException("Illegal size of integer type " + intIndex);
}
type = new BasicIntType(name, size, unsigned);
break;
}
case RESERVED_TYPE_BOOLEAN: {
int size = 0;
switch (intIndex & RESERVED_SIZE_MASK) {
case RESERVED_SIZE_INT_1_BYTE: size = 1; break;
case RESERVED_SIZE_INT_2_BYTE: size = 2; break;
case RESERVED_SIZE_INT_4_BYTE: size = 4; break;
case RESERVED_SIZE_INT_8_BYTE: size = 8; break;
default: throw new DebuggerException("Illegal size of boolean type " + intIndex);
}
type = new BasicIntType("bool", size, false);
break;
}
case RESERVED_TYPE_REAL: {
switch (intIndex & RESERVED_SIZE_MASK) {
case RESERVED_SIZE_REAL_32_BIT:
type = new BasicFloatType("float", 4);
break;
case RESERVED_SIZE_REAL_64_BIT:
type = new BasicDoubleType("double", 8);
break;
default:
throw new DebuggerException("Unsupported floating-point size in type " + intIndex);
}
break;
}
case RESERVED_TYPE_REALLY_INT: {
switch (intIndex & RESERVED_SIZE_MASK) {
case RESERVED_SIZE_REALLY_INT_CHAR: type = new BasicIntType("char", 1, false); break;
case RESERVED_SIZE_REALLY_INT_WCHAR: type = new BasicIntType("wchar", 2, false); break;
case RESERVED_SIZE_REALLY_INT_2_BYTE: type = new BasicIntType("short", 2, false); break;
case RESERVED_SIZE_REALLY_INT_2_BYTE_U: type = new BasicIntType("short", 2, true); break;
case RESERVED_SIZE_REALLY_INT_4_BYTE: type = new BasicIntType("int", 4, false); break;
case RESERVED_SIZE_REALLY_INT_4_BYTE_U: type = new BasicIntType("int", 4, true); break;
case RESERVED_SIZE_REALLY_INT_8_BYTE: type = new BasicIntType("__int64", 8, false); break;
case RESERVED_SIZE_REALLY_INT_8_BYTE_U: type = new BasicIntType("__int64", 8, true); break;
default: throw new DebuggerException("Illegal REALLY_INT size in type " + intIndex);
}
break;
}
case RESERVED_TYPE_SPECIAL: {
switch (intIndex & RESERVED_SIZE_MASK) {
case RESERVED_SIZE_SPECIAL_NO_TYPE:
case RESERVED_SIZE_SPECIAL_VOID: type = new BasicVoidType(); break;
default: throw new DebuggerException("Don't know how to handle reserved special type " + intIndex);
}
break;
}
default:
throw new DebuggerException("Don't know how to handle reserved type " + intIndex);
}
} else {
// Fold all pointer types together since we only support
// flat-mode addressing anyway
Type targetType = getTypeByIndex(intIndex & (~RESERVED_MODE_MASK));
type = new BasicPointerType(POINTER_SIZE, targetType);
}
if (Assert.ASSERTS_ENABLED) {
Assert.that(type != null, "Got null Type for primitive type " + intIndex);
}
primIndexToTypeMap.put(index, type);
return type;
}
// Not primitive type. Construct lazy reference to target type.
// (Is it worth canonicalizing these as well to save space?)
return new LazyType(index);
}
private void addBlock(BlockSym block) {
db.addBlock(symIter.getOffset(), block);
blockStack.push(block);
}
private void skipEnd() {
++endsToSkip;
}
private BlockSym newLazyBlockSym(int offset) {
if (offset == 0) {
return null;
}
return new LazyBlockSym(offset);
}
private int memberAttributeToAccessControl(short memberAttribute) {
int acc = memberAttribute & MEMATTR_ACCESS_MASK;
switch (acc) {
case MEMATTR_ACCESS_NO_PROTECTION: return NO_PROTECTION;
case MEMATTR_ACCESS_PRIVATE: return PRIVATE;
case MEMATTR_ACCESS_PROTECTED: return PROTECTED;
case MEMATTR_ACCESS_PUBLIC: return PUBLIC;
default: throw new RuntimeException("Should not reach here");
}
}
private void addLocalToCurBlock(LocalSym local) {
((BasicBlockSym) blockStack.peek()).addLocal(local);
}
private void addGlobalSym(GlobalSym sym) {
db.addGlobalSym(sym);
}
private void skipTypeRecord() {
while (!iter.typeStringDone()) {
iter.typeStringNext();
}
}
}
|
package project1.service;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Optional;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import project1.models.Product;
import project1.repository.ProductRepository;
@Service
public class ProductService {
@Autowired
ProductRepository productRepository;
@Value("${file.upload-dir}")
private String filePath;
public List<Product> getAllProduct() {
return productRepository.findAll();
}
public void addProduct(Product product) throws IOException {
byte[] imageBytes = product.getImageFile().getBytes();
Path path = Paths.get(filePath + product.getName() + ".jpg");
Files.write(path, imageBytes);
product.setFileName(product.getName() + ".jpg");
productRepository.save(product);
}
public void deleteById(Long id) {
productRepository.deleteById(id);
}
public Optional<Product> findById(Long id) {
return productRepository.findById(id);
}
public void updateProduct(Product product) throws IOException {
Product productEntity =productRepository.findById(product.getId()).get();
productEntity.setName(product.getName());
productEntity.setDescription(product.getDescription());
productEntity.setPrice(product.getPrice());
if (product.getImageFile().getSize() != 0) {
byte[] imageBytes = product.getImageFile().getBytes();
Path path = Paths.get(filePath + product.getName() + ".jpg");
Files.write(path, imageBytes);
productEntity.setFileName(product.getName() + ".jpg");
}
productRepository.save(productEntity);
}
}
|
package com.checkpoint.andela.notekeeper.adapter;
import android.content.Context;
import android.view.LayoutInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ArrayAdapter;
import android.widget.PopupMenu;
import android.widget.TextView;
import com.checkpoint.andela.notekeeper.R;
import com.checkpoint.andela.notekeeper.model.NoteModel;
import java.util.ArrayList;
/**
* Created by suadahaji.
*/
public class ListNoteAdapter extends ArrayAdapter<NoteModel> implements PopupMenu.OnMenuItemClickListener {
public ListNoteAdapter(Context context, ArrayList<NoteModel> noteModels) {
super(context, 0, noteModels);
}
static class LayoutHandler {
TextView note_title, note_content, note_date;
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
NoteModel noteModel = getItem(position);
View row = convertView;
LayoutHandler layoutHandler;
if (row == null) {
LayoutInflater layoutInflater = (LayoutInflater) this.getContext().getSystemService(Context.LAYOUT_INFLATER_SERVICE);
row = layoutInflater.inflate(R.layout.notes_row_layout, parent, false);
layoutHandler = new LayoutHandler();
layoutHandler.note_title = (TextView) row.findViewById(R.id.row_note_title);
layoutHandler.note_content = (TextView) row.findViewById(R.id.row_note_content);
layoutHandler.note_date = (TextView) row.findViewById(R.id.row_note_date);
row.setTag(layoutHandler);
} else {
layoutHandler = (LayoutHandler) row.getTag();
}
layoutHandler.note_title.setText(noteModel.getNote_title());
layoutHandler.note_content.setText(noteModel.getNote_content());
layoutHandler.note_date.setText(noteModel.getNote_date());
return row;
}
@Override
public boolean onMenuItemClick(MenuItem item) {
return false;
}
}
|
package Programs;
import java.util.Random;
import java.util.Scanner;
public class NumberGame {
public static void main(String[] args) {
Scanner s=new Scanner(System.in);
Random r=new Random();
int random= r.nextInt(100);
boolean aswon=false;
System.out.println("You need to guess a number between 0 to 100 in 10 chances");
System.out.println("Try to guess It");
for(int i=10;i>0;i--)
{
System.out.println("You have "+i+" chances left try again");
int guess=s.nextInt();
if(random>guess)
{
System.out.println("Its greater than "+guess);
}
else if(random<guess)
{
System.out.println("Its smaller than "+guess);
}
else if(random==guess)
{
// System.out.println("You;r guess is correct");
aswon=true;
break;
}
}
if(aswon)
{
System.out.println("COrrect YOU WIN");
}
else
{
System.out.println("GAME OVER: The random number was "+random);
}
}
}
|
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.drill.exec.store.parquet.columnreaders;
import java.io.IOException;
import org.apache.drill.common.exceptions.ExecutionSetupException;
import org.apache.drill.exec.vector.BaseDataValueVector;
import org.apache.drill.exec.vector.UInt4Vector;
import org.apache.drill.exec.vector.complex.RepeatedValueVector;
import org.apache.parquet.column.ColumnDescriptor;
import org.apache.parquet.format.SchemaElement;
import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
public class FixedWidthRepeatedReader extends VarLengthColumn<RepeatedValueVector> {
ColumnReader<?> dataReader;
int dataTypeLengthInBytes;
// we can do a vector copy of the data once we figure out how much we need to copy
// this tracks the number of values to transfer (the dataReader will translate this to a number
// of bytes to transfer and re-use the code from the non-repeated types)
int valuesToRead;
int repeatedGroupsReadInCurrentPass;
int repeatedValuesInCurrentList;
// empty lists are notated by definition levels, to stop reading at the correct time, we must keep
// track of the number of empty lists as well as the length of all of the defined lists together
int definitionLevelsRead;
// parquet currently does not restrict lists reaching across pages for repeated values, this necessitates
// tracking when this happens to stop some of the state updates until we know the full length of the repeated
// value for the current record
boolean notFishedReadingList;
byte[] leftOverBytes;
FixedWidthRepeatedReader(ParquetRecordReader parentReader, ColumnReader<?> dataReader, int dataTypeLengthInBytes, int allocateSize, ColumnDescriptor descriptor, ColumnChunkMetaData columnChunkMetaData, boolean fixedLength, RepeatedValueVector valueVector, SchemaElement schemaElement) throws ExecutionSetupException {
super(parentReader, allocateSize, descriptor, columnChunkMetaData, fixedLength, valueVector, schemaElement);
this.dataTypeLengthInBytes = dataTypeLengthInBytes;
this.dataReader = dataReader;
this.dataReader.pageReader.clear();
this.dataReader.pageReader = this.pageReader;
// this is not in the reset method because it needs to be initialized only for the very first page read
// in all other cases if a read ends at a page boundary we will need to keep track of this flag and not
// clear it at the start of the next read loop
notFishedReadingList = false;
}
@Override
public void reset() {
bytesReadInCurrentPass = 0;
valuesReadInCurrentPass = 0;
pageReader.valuesReadyToRead = 0;
dataReader.vectorData = BaseDataValueVector.class.cast(valueVec.getDataVector()).getBuffer();
dataReader.valuesReadInCurrentPass = 0;
repeatedGroupsReadInCurrentPass = 0;
}
@Override
public int getRecordsReadInCurrentPass() {
return repeatedGroupsReadInCurrentPass;
}
@Override
protected void readField(long recordsToRead) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public boolean skipReadyToReadPositionUpdate() {
return false;
}
@Override
public void updateReadyToReadPosition() {
valuesToRead += repeatedValuesInCurrentList;
pageReader.valuesReadyToRead += repeatedValuesInCurrentList;
repeatedGroupsReadInCurrentPass++;
currDictVal = null;
if ( ! notFishedReadingList) {
repeatedValuesInCurrentList = -1;
}
}
@Override
public void updatePosition() {
pageReader.readPosInBytes += dataTypeLengthInBits;
bytesReadInCurrentPass += dataTypeLengthInBits;
valuesReadInCurrentPass++;
}
@Override
public void hitRowGroupEnd() {
pageReader.valuesReadyToRead = 0;
definitionLevelsRead = 0;
}
@Override
public void postPageRead() {
super.postPageRead();
// this is no longer correct as we figured out that lists can reach across pages
if ( ! notFishedReadingList) {
repeatedValuesInCurrentList = -1;
}
definitionLevelsRead = 0;
}
@Override
protected int totalValuesReadAndReadyToReadInPage() {
// we need to prevent the page reader from getting rid of the current page in the case where we have a repeated
// value split across a page boundary
if (notFishedReadingList) {
return definitionLevelsRead - repeatedValuesInCurrentList;
}
return definitionLevelsRead;
}
@Override
protected boolean checkVectorCapacityReached() {
boolean doneReading = super.checkVectorCapacityReached();
if (doneReading) {
return true;
}
if (valuesReadInCurrentPass + pageReader.valuesReadyToRead + repeatedValuesInCurrentList >= valueVec.getValueCapacity()) {
return true;
} else {
return false;
}
}
@Override
protected boolean readAndStoreValueSizeInformation() {
int numLeftoverVals = 0;
if (notFishedReadingList) {
numLeftoverVals = repeatedValuesInCurrentList;
readRecords(numLeftoverVals);
notFishedReadingList = false;
pageReader.valuesReadyToRead = 0;
try {
boolean stopReading = readPage();
if (stopReading) {
// hit the end of a row group
return false;
}
} catch (IOException e) {
throw new RuntimeException("Unexpected error reading parquet repeated column.", e);
}
}
if ( currDefLevel == -1 ) {
currDefLevel = pageReader.definitionLevels.readInteger();
definitionLevelsRead++;
}
int repLevel;
if ( columnDescriptor.getMaxDefinitionLevel() == currDefLevel) {
if (repeatedValuesInCurrentList == -1 || notFishedReadingList) {
repeatedValuesInCurrentList = 1;
do {
repLevel = pageReader.repetitionLevels.readInteger();
if (repLevel > 0) {
repeatedValuesInCurrentList++;
currDefLevel = pageReader.definitionLevels.readInteger();
definitionLevelsRead++;
// we hit the end of this page, without confirmation that we reached the end of the current record
if (definitionLevelsRead == pageReader.currentPageCount) {
// check that we have not hit the end of the row group (in which case we will not find the repetition level indicating
// the end of this record as there is no next page to check, we have read all the values in this repetition so it is okay
// to add it to the read )
if (totalValuesRead + pageReader.valuesReadyToRead + repeatedValuesInCurrentList != columnChunkMetaData.getValueCount()) {
notFishedReadingList = true;
// if we hit this case, we cut off the current batch at the previous value, these extra values as well
// as those that spill into the next page will be added to the next batch
return true;
}
}
}
} while (repLevel != 0);
}
} else {
repeatedValuesInCurrentList = 0;
}
// this should not fail
final UInt4Vector offsets = valueVec.getOffsetVector();
offsets.getMutator().setSafe(repeatedGroupsReadInCurrentPass + 1, offsets.getAccessor().get(repeatedGroupsReadInCurrentPass));
// This field is being referenced in the superclass determineSize method, so we need to set it here
// again going to make this the length in BYTES to avoid repetitive multiplication/division
dataTypeLengthInBits = repeatedValuesInCurrentList * dataTypeLengthInBytes;
return false;
}
@Override
protected void readRecords(int valuesToRead) {
if (valuesToRead == 0) {
return;
}
// TODO - validate that this works in all cases, it fixes a bug when reading from multiple pages into
// a single vector
dataReader.valuesReadInCurrentPass = 0;
dataReader.readValues(valuesToRead);
valuesReadInCurrentPass += valuesToRead;
valueVec.getMutator().setValueCount(repeatedGroupsReadInCurrentPass);
valueVec.getDataVector().getMutator().setValueCount(valuesReadInCurrentPass);
}
@Override
public int capacity() {
return BaseDataValueVector.class.cast(valueVec.getDataVector()).getBuffer().capacity();
}
@Override
public void clear() {
super.clear();
dataReader.clear();
}
}
|
package <%=packageName%>;
import <%=packageName%>.config.Constants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.actuate.autoconfigure.MetricFilterAutoConfiguration;
import org.springframework.boot.actuate.autoconfigure.MetricRepositoryAutoConfiguration;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.core.env.Environment;
import org.springframework.core.env.SimpleCommandLinePropertySource;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import java.io.IOException;
import java.util.Arrays;
@ComponentScan
@EnableAutoConfiguration(exclude = {MetricFilterAutoConfiguration.class, MetricRepositoryAutoConfiguration.class})
public class Application {
private final Logger log = LoggerFactory.getLogger(Application.class);
@Inject
private Environment env;
/**
* Initializes <%= baseName %>.
* <p/>
* Spring profiles can be configured with a program arguments --spring.profiles.active=your-active-profile
* <p/>
*/
@PostConstruct
public void initApplication() throws IOException {
if (env.getActiveProfiles().length == 0) {
log.warn("No Spring profile configured, running with default configuration");
} else {
log.info("Running with Spring profile(s) : {}", Arrays.toString(env.getActiveProfiles()));
}
}
/**
* Main method, used to run the application.
*/
public static void main(String[] args) {
SpringApplication app = new SpringApplication(Application.class);
app.setShowBanner(false);
SimpleCommandLinePropertySource source = new SimpleCommandLinePropertySource(args);
// Check if the selected profile has been set as argument.
// if not the development profile will be added
addDefaultProfile(app, source);
app.run(args);
}
/**
* Set a default profile if it has not been set
*/
private static void addDefaultProfile(SpringApplication app, SimpleCommandLinePropertySource source) {
if (!source.containsProperty("spring.profiles.active")) {
app.setAdditionalProfiles(Constants.SPRING_PROFILE_DEVELOPMENT);
}
}
}
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.orc.metadata.statistics;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.Optional;
import static com.facebook.presto.orc.metadata.statistics.DecimalStatistics.DECIMAL_VALUE_BYTES_OVERHEAD;
public class ShortDecimalStatisticsBuilder
implements LongValueStatisticsBuilder
{
public static final long SHORT_DECIMAL_VALUE_BYTES = 8L;
private final int scale;
private long nonNullValueCount;
private long minimum = Long.MAX_VALUE;
private long maximum = Long.MIN_VALUE;
public ShortDecimalStatisticsBuilder(int scale)
{
this.scale = scale;
}
@Override
public void addValue(long value)
{
nonNullValueCount++;
minimum = Math.min(value, minimum);
maximum = Math.max(value, maximum);
}
private Optional<DecimalStatistics> buildDecimalStatistics()
{
if (nonNullValueCount == 0) {
return Optional.empty();
}
return Optional.of(new DecimalStatistics(
new BigDecimal(BigInteger.valueOf(minimum), scale),
new BigDecimal(BigInteger.valueOf(maximum), scale),
SHORT_DECIMAL_VALUE_BYTES));
}
@Override
public ColumnStatistics buildColumnStatistics()
{
Optional<DecimalStatistics> decimalStatistics = buildDecimalStatistics();
return new ColumnStatistics(
nonNullValueCount,
decimalStatistics.map(s -> DECIMAL_VALUE_BYTES_OVERHEAD + SHORT_DECIMAL_VALUE_BYTES).orElse(0L),
null,
null,
null,
null,
null,
decimalStatistics.orElse(null),
null,
null);
}
}
|
package seedu.address.model.util.predicate;
import java.util.List;
import seedu.address.commons.util.StringUtil;
import seedu.address.model.patient.Patient;
/**
* Tests that a {@code Patient}'s attached {@code NextOfKin} {@code Name} matches any of the keywords given.
*/
public class KinAddressContainsKeywordsPredicate extends ContainsKeywordsPredicate<Patient> {
public KinAddressContainsKeywordsPredicate(List<String> keywords) {
super(keywords);
}
public KinAddressContainsKeywordsPredicate(List<String> keywords, boolean isIgnoreCase, boolean isAnd) {
super(keywords, isIgnoreCase, isAnd);
}
@Override
public boolean test(Patient patient) {
if (!isIgnoreCase && !isAnd) {
return keywords.stream()
.anyMatch(keyword -> StringUtil.containsWordCaseSensitive(patient.getNextOfKin().getAddress()
.toString(), keyword));
} else if (isIgnoreCase && !isAnd) {
return keywords.stream()
.anyMatch(keyword -> StringUtil.containsWordIgnoreCase(patient.getNextOfKin().getAddress()
.toString(), keyword));
} else if (!isIgnoreCase && isAnd) {
return keywords.stream()
.allMatch(keyword -> StringUtil.containsWordCaseSensitive(patient.getNextOfKin().getAddress()
.toString(), keyword));
} else {
return keywords.stream()
.allMatch(keyword -> StringUtil.containsWordIgnoreCase(patient.getNextOfKin().getAddress()
.toString(), keyword));
}
}
@Override
public boolean equals(Object other) {
return other == this // short circuit if same object
|| (other instanceof KinAddressContainsKeywordsPredicate // instanceof handles nulls
&& keywords.equals(((KinAddressContainsKeywordsPredicate) other).keywords)); // state check
}
}
|
package fitperson.ritesh.com.thefitperson;
import android.content.Intent;
import android.media.MediaPlayer;
import android.net.Uri;
import android.support.annotation.NonNull;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.view.View;
import android.widget.Toast;
import android.widget.VideoView;
import com.facebook.AccessToken;
import com.facebook.CallbackManager;
import com.facebook.FacebookCallback;
import com.facebook.FacebookException;
import com.facebook.login.LoginResult;
import com.facebook.login.widget.LoginButton;
import com.google.android.gms.auth.api.signin.GoogleSignIn;
import com.google.android.gms.auth.api.signin.GoogleSignInAccount;
import com.google.android.gms.auth.api.signin.GoogleSignInClient;
import com.google.android.gms.auth.api.signin.GoogleSignInOptions;
import com.google.android.gms.common.SignInButton;
import com.google.android.gms.common.api.ApiException;
import com.google.android.gms.tasks.OnCompleteListener;
import com.google.android.gms.tasks.Task;
import com.google.firebase.auth.AuthCredential;
import com.google.firebase.auth.AuthResult;
import com.google.firebase.auth.FacebookAuthProvider;
import com.google.firebase.auth.FirebaseAuth;
import com.google.firebase.auth.FirebaseUser;
import com.google.firebase.auth.GoogleAuthProvider;
import com.google.firebase.database.DataSnapshot;
import com.google.firebase.database.DatabaseError;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.ValueEventListener;
import java.util.Arrays;
import java.util.Collections;
public class LaunchActivity extends AppCompatActivity {
private static final int RC_SIGN_IN = 1;
private static final String TAG = "TAG";
GoogleSignInOptions gso;
GoogleSignInClient mGoogleSignInClient;
SignInButton signInButton;
private FirebaseAuth mAuth;
CallbackManager callbackManager;
private static final String EMAIL = "email";
LoginButton loginButton;
DatabaseReference databaseReference;
VideoView videoview;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_launch);
// videoview = findViewById(R.id.videoView);
// Uri uri = Uri.parse("android.resource://"+getPackageName()+"/"+R.raw.video);
// videoview.setVideoURI(uri);
// videoview.start();
//Add video to raw folder and uncomment the above code. video unable to upload to github
videoview.setOnCompletionListener(new MediaPlayer.OnCompletionListener() {//To restart the video on completion
@Override
public void onCompletion(MediaPlayer mp) {
videoview.start();
}
});
databaseReference = FirebaseDatabase.getInstance().getReference("users");
mAuth = FirebaseAuth.getInstance();
//google auth
gso = new GoogleSignInOptions.Builder(GoogleSignInOptions.DEFAULT_SIGN_IN)
.requestIdToken(getString(R.string.default_web_client_id))
.requestEmail()
.build();
mGoogleSignInClient = GoogleSignIn.getClient(this, gso);
signInButton = findViewById(R.id.sign_in_button);
signInButton.setOnClickListener((View v) -> {
switch (v.getId()) {
case R.id.sign_in_button:
signIn();
break;
}
});
//facebook auth
callbackManager = CallbackManager.Factory.create();
loginButton = findViewById(R.id.login_button);
loginButton.setReadPermissions(Collections.singletonList(EMAIL));
// Callback registration
loginButton.registerCallback(callbackManager, new FacebookCallback<LoginResult>() {
@Override
public void onSuccess(LoginResult loginResult) {
// App code
handleFacebookAccessToken(loginResult.getAccessToken());
}
@Override
public void onCancel() {
// App code
}
@Override
public void onError(FacebookException exception) {
// App code
}
});
}
@Override
public void onStart() {
super.onStart();
// Check if user is signed in (non-null) and update UI accordingly.
FirebaseUser currentUser = mAuth.getCurrentUser();
if (currentUser != null) {
if (isPresentInDatabase(currentUser)) {
// do what is to be done when user already is in the database
} else {
Intent intent = new Intent(LaunchActivity.this, DataEntry.class);
startActivity(intent);
}
}
}
@Override
protected void onResume() {
super.onResume();
// to restart the video after coming from other activity like Sing up
videoview.start();
}
private boolean isPresentInDatabase(FirebaseUser user) {
final boolean[] answer = new boolean[1];
databaseReference.child("user").orderByChild("uid").equalTo(user.getUid()).addListenerForSingleValueEvent(new ValueEventListener() {
@Override
public void onDataChange(@NonNull DataSnapshot dataSnapshot) {
answer[0] = dataSnapshot.exists();
}
@Override
public void onCancelled(@NonNull DatabaseError databaseError) {
}
});
return answer[0];
}
private void signIn() {
Intent signInIntent = mGoogleSignInClient.getSignInIntent();
startActivityForResult(signInIntent, RC_SIGN_IN);
}
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
// Result returned from launching the Intent from GoogleSignInApi.getSignInIntent(...);
if (requestCode == RC_SIGN_IN) {
Task<GoogleSignInAccount> task = GoogleSignIn.getSignedInAccountFromIntent(data);
try {
// Google Sign In was successful, authenticate with Firebase
GoogleSignInAccount account = task.getResult(ApiException.class);
firebaseAuthWithGoogle(account);
} catch (ApiException e) {
e.printStackTrace();
}
} else {
callbackManager.onActivityResult(requestCode, resultCode, data);
}
}
private void handleFacebookAccessToken(AccessToken token) {
Log.d(TAG, "handleFacebookAccessToken:" + token);
AuthCredential credential = FacebookAuthProvider.getCredential(token.getToken());
mAuth.signInWithCredential(credential)
.addOnCompleteListener(this, (@NonNull Task<AuthResult> task) -> {
if (task.isSuccessful()) {
// Sign in success, update UI with the signed-in user's information
Log.d(TAG, "signInWithCredential:success");
FirebaseUser user = mAuth.getCurrentUser();
} else {
// If sign in fails, display a message to the user.
Log.w(TAG, "signInWithCredential:failure", task.getException());
Toast.makeText(getApplicationContext(), "Authentication failed.",
Toast.LENGTH_SHORT).show();
}
});
}
private void firebaseAuthWithGoogle(GoogleSignInAccount acct) {
Log.d(TAG, "firebaseAuthWithGoogle:" + acct.getId());
AuthCredential credential = GoogleAuthProvider.getCredential(acct.getIdToken(), null);
mAuth.signInWithCredential(credential)
.addOnCompleteListener(this, (@NonNull Task<AuthResult> task) -> {
if (task.isSuccessful()) {
// Sign in success, update UI with the signed-in user's information
Log.d(TAG, "signInWithCredential:success");
FirebaseUser user = mAuth.getCurrentUser();
} else {
// If sign in fails, display a message to the user.
Log.w(TAG, "signInWithCredential:failure", task.getException());
Toast.makeText(getApplicationContext(), "Authentication Failed", Toast.LENGTH_SHORT).show();
}
});
}
}
|
/*
* Copyright (C) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package OptimizationTests.ShortLeafMethodsInlining.InvokeVirtual_long_to_int_001;
class Test {
public int simple_method(long jj) {
int ii;
ii = (int)jj;
return ii;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.